1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 22 /* All Rights Reserved */ 23 24 25 /* 26 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 30 #include <sys/types.h> 31 #include <sys/sysmacros.h> 32 #include <sys/param.h> 33 #include <sys/errno.h> 34 #include <sys/signal.h> 35 #include <sys/proc.h> 36 #include <sys/conf.h> 37 #include <sys/cred.h> 38 #include <sys/user.h> 39 #include <sys/vnode.h> 40 #include <sys/file.h> 41 #include <sys/session.h> 42 #include <sys/stream.h> 43 #include <sys/strsubr.h> 44 #include <sys/stropts.h> 45 #include <sys/poll.h> 46 #include <sys/systm.h> 47 #include <sys/cpuvar.h> 48 #include <sys/uio.h> 49 #include <sys/cmn_err.h> 50 #include <sys/priocntl.h> 51 #include <sys/procset.h> 52 #include <sys/vmem.h> 53 #include <sys/bitmap.h> 54 #include <sys/kmem.h> 55 #include <sys/siginfo.h> 56 #include <sys/vtrace.h> 57 #include <sys/callb.h> 58 #include <sys/debug.h> 59 #include <sys/modctl.h> 60 #include <sys/vmsystm.h> 61 #include <vm/page.h> 62 #include <sys/atomic.h> 63 #include <sys/suntpi.h> 64 #include <sys/strlog.h> 65 #include <sys/promif.h> 66 #include <sys/project.h> 67 #include <sys/vm.h> 68 #include <sys/taskq.h> 69 #include <sys/sunddi.h> 70 #include <sys/sunldi_impl.h> 71 #include <sys/strsun.h> 72 #include <sys/isa_defs.h> 73 #include <sys/multidata.h> 74 #include <sys/pattr.h> 75 #include <sys/strft.h> 76 #include <sys/fs/snode.h> 77 #include <sys/zone.h> 78 #include <sys/open.h> 79 #include <sys/sunldi.h> 80 #include <sys/sad.h> 81 #include <sys/netstack.h> 82 83 #define O_SAMESTR(q) (((q)->q_next) && \ 84 (((q)->q_flag & QREADR) == ((q)->q_next->q_flag & QREADR))) 85 86 /* 87 * WARNING: 88 * The variables and routines in this file are private, belonging 89 * to the STREAMS subsystem. These should not be used by modules 90 * or drivers. Compatibility will not be guaranteed. 91 */ 92 93 /* 94 * Id value used to distinguish between different multiplexor links. 95 */ 96 static int32_t lnk_id = 0; 97 98 #define STREAMS_LOPRI MINCLSYSPRI 99 static pri_t streams_lopri = STREAMS_LOPRI; 100 101 #define STRSTAT(x) (str_statistics.x.value.ui64++) 102 typedef struct str_stat { 103 kstat_named_t sqenables; 104 kstat_named_t stenables; 105 kstat_named_t syncqservice; 106 kstat_named_t freebs; 107 kstat_named_t qwr_outer; 108 kstat_named_t rservice; 109 kstat_named_t strwaits; 110 kstat_named_t taskqfails; 111 kstat_named_t bufcalls; 112 kstat_named_t qhelps; 113 kstat_named_t qremoved; 114 kstat_named_t sqremoved; 115 kstat_named_t bcwaits; 116 kstat_named_t sqtoomany; 117 } str_stat_t; 118 119 static str_stat_t str_statistics = { 120 { "sqenables", KSTAT_DATA_UINT64 }, 121 { "stenables", KSTAT_DATA_UINT64 }, 122 { "syncqservice", KSTAT_DATA_UINT64 }, 123 { "freebs", KSTAT_DATA_UINT64 }, 124 { "qwr_outer", KSTAT_DATA_UINT64 }, 125 { "rservice", KSTAT_DATA_UINT64 }, 126 { "strwaits", KSTAT_DATA_UINT64 }, 127 { "taskqfails", KSTAT_DATA_UINT64 }, 128 { "bufcalls", KSTAT_DATA_UINT64 }, 129 { "qhelps", KSTAT_DATA_UINT64 }, 130 { "qremoved", KSTAT_DATA_UINT64 }, 131 { "sqremoved", KSTAT_DATA_UINT64 }, 132 { "bcwaits", KSTAT_DATA_UINT64 }, 133 { "sqtoomany", KSTAT_DATA_UINT64 }, 134 }; 135 136 static kstat_t *str_kstat; 137 138 /* 139 * qrunflag was used previously to control background scheduling of queues. It 140 * is not used anymore, but kept here in case some module still wants to access 141 * it via qready() and setqsched macros. 142 */ 143 char qrunflag; /* Unused */ 144 145 /* 146 * Most of the streams scheduling is done via task queues. Task queues may fail 147 * for non-sleep dispatches, so there are two backup threads servicing failed 148 * requests for queues and syncqs. Both of these threads also service failed 149 * dispatches freebs requests. Queues are put in the list specified by `qhead' 150 * and `qtail' pointers, syncqs use `sqhead' and `sqtail' pointers and freebs 151 * requests are put into `freebs_list' which has no tail pointer. All three 152 * lists are protected by a single `service_queue' lock and use 153 * `services_to_run' condition variable for signaling background threads. Use of 154 * a single lock should not be a problem because it is only used under heavy 155 * loads when task queues start to fail and at that time it may be a good idea 156 * to throttle scheduling requests. 157 * 158 * NOTE: queues and syncqs should be scheduled by two separate threads because 159 * queue servicing may be blocked waiting for a syncq which may be also 160 * scheduled for background execution. This may create a deadlock when only one 161 * thread is used for both. 162 */ 163 164 static taskq_t *streams_taskq; /* Used for most STREAMS scheduling */ 165 166 static kmutex_t service_queue; /* protects all of servicing vars */ 167 static kcondvar_t services_to_run; /* wake up background service thread */ 168 static kcondvar_t syncqs_to_run; /* wake up background service thread */ 169 170 /* 171 * List of queues scheduled for background processing due to lack of resources 172 * in the task queues. Protected by service_queue lock; 173 */ 174 static struct queue *qhead; 175 static struct queue *qtail; 176 177 /* 178 * Same list for syncqs 179 */ 180 static syncq_t *sqhead; 181 static syncq_t *sqtail; 182 183 static mblk_t *freebs_list; /* list of buffers to free */ 184 185 /* 186 * Backup threads for servicing queues and syncqs 187 */ 188 kthread_t *streams_qbkgrnd_thread; 189 kthread_t *streams_sqbkgrnd_thread; 190 191 /* 192 * Bufcalls related variables. 193 */ 194 struct bclist strbcalls; /* list of waiting bufcalls */ 195 kmutex_t strbcall_lock; /* protects bufcall list (strbcalls) */ 196 kcondvar_t strbcall_cv; /* Signaling when a bufcall is added */ 197 kmutex_t bcall_monitor; /* sleep/wakeup style monitor */ 198 kcondvar_t bcall_cv; /* wait 'till executing bufcall completes */ 199 kthread_t *bc_bkgrnd_thread; /* Thread to service bufcall requests */ 200 201 kmutex_t strresources; /* protects global resources */ 202 kmutex_t muxifier; /* single-threads multiplexor creation */ 203 204 static void *str_stack_init(netstackid_t stackid, netstack_t *ns); 205 static void str_stack_shutdown(netstackid_t stackid, void *arg); 206 static void str_stack_fini(netstackid_t stackid, void *arg); 207 208 /* 209 * run_queues is no longer used, but is kept in case some 3rd party 210 * module/driver decides to use it. 211 */ 212 int run_queues = 0; 213 214 /* 215 * sq_max_size is the depth of the syncq (in number of messages) before 216 * qfill_syncq() starts QFULL'ing destination queues. As its primary 217 * consumer - IP is no longer D_MTPERMOD, but there may be other 218 * modules/drivers depend on this syncq flow control, we prefer to 219 * choose a large number as the default value. For potential 220 * performance gain, this value is tunable in /etc/system. 221 */ 222 int sq_max_size = 10000; 223 224 /* 225 * The number of ciputctrl structures per syncq and stream we create when 226 * needed. 227 */ 228 int n_ciputctrl; 229 int max_n_ciputctrl = 16; 230 /* 231 * If n_ciputctrl is < min_n_ciputctrl don't even create ciputctrl_cache. 232 */ 233 int min_n_ciputctrl = 2; 234 235 /* 236 * Per-driver/module syncqs 237 * ======================== 238 * 239 * For drivers/modules that use PERMOD or outer syncqs we keep a list of 240 * perdm structures, new entries being added (and new syncqs allocated) when 241 * setq() encounters a module/driver with a streamtab that it hasn't seen 242 * before. 243 * The reason for this mechanism is that some modules and drivers share a 244 * common streamtab and it is necessary for those modules and drivers to also 245 * share a common PERMOD syncq. 246 * 247 * perdm_list --> dm_str == streamtab_1 248 * dm_sq == syncq_1 249 * dm_ref 250 * dm_next --> dm_str == streamtab_2 251 * dm_sq == syncq_2 252 * dm_ref 253 * dm_next --> ... NULL 254 * 255 * The dm_ref field is incremented for each new driver/module that takes 256 * a reference to the perdm structure and hence shares the syncq. 257 * References are held in the fmodsw_impl_t structure for each STREAMS module 258 * or the dev_impl array (indexed by device major number) for each driver. 259 * 260 * perdm_list -> [dm_ref == 1] -> [dm_ref == 2] -> [dm_ref == 1] -> NULL 261 * ^ ^ ^ ^ 262 * | ______________/ | | 263 * | / | | 264 * dev_impl: ...|x|y|... module A module B 265 * 266 * When a module/driver is unloaded the reference count is decremented and, 267 * when it falls to zero, the perdm structure is removed from the list and 268 * the syncq is freed (see rele_dm()). 269 */ 270 perdm_t *perdm_list = NULL; 271 static krwlock_t perdm_rwlock; 272 cdevsw_impl_t *devimpl; 273 274 extern struct qinit strdata; 275 extern struct qinit stwdata; 276 277 static void runservice(queue_t *); 278 static void streams_bufcall_service(void); 279 static void streams_qbkgrnd_service(void); 280 static void streams_sqbkgrnd_service(void); 281 static syncq_t *new_syncq(void); 282 static void free_syncq(syncq_t *); 283 static void outer_insert(syncq_t *, syncq_t *); 284 static void outer_remove(syncq_t *, syncq_t *); 285 static void write_now(syncq_t *); 286 static void clr_qfull(queue_t *); 287 static void runbufcalls(void); 288 static void sqenable(syncq_t *); 289 static void sqfill_events(syncq_t *, queue_t *, mblk_t *, void (*)()); 290 static void wait_q_syncq(queue_t *); 291 static void backenable_insertedq(queue_t *); 292 293 static void queue_service(queue_t *); 294 static void stream_service(stdata_t *); 295 static void syncq_service(syncq_t *); 296 static void qwriter_outer_service(syncq_t *); 297 static void mblk_free(mblk_t *); 298 #ifdef DEBUG 299 static int qprocsareon(queue_t *); 300 #endif 301 302 static void set_nfsrv_ptr(queue_t *, queue_t *, queue_t *, queue_t *); 303 static void reset_nfsrv_ptr(queue_t *, queue_t *); 304 void set_qfull(queue_t *); 305 306 static void sq_run_events(syncq_t *); 307 static int propagate_syncq(queue_t *); 308 309 static void blocksq(syncq_t *, ushort_t, int); 310 static void unblocksq(syncq_t *, ushort_t, int); 311 static int dropsq(syncq_t *, uint16_t); 312 static void emptysq(syncq_t *); 313 static sqlist_t *sqlist_alloc(struct stdata *, int); 314 static void sqlist_free(sqlist_t *); 315 static sqlist_t *sqlist_build(queue_t *, struct stdata *, boolean_t); 316 static void sqlist_insert(sqlist_t *, syncq_t *); 317 static void sqlist_insertall(sqlist_t *, queue_t *); 318 319 static void strsetuio(stdata_t *); 320 321 struct kmem_cache *stream_head_cache; 322 struct kmem_cache *queue_cache; 323 struct kmem_cache *syncq_cache; 324 struct kmem_cache *qband_cache; 325 struct kmem_cache *linkinfo_cache; 326 struct kmem_cache *ciputctrl_cache = NULL; 327 328 static linkinfo_t *linkinfo_list; 329 330 /* Global esballoc throttling queue */ 331 static esb_queue_t system_esbq; 332 333 /* 334 * esballoc tunable parameters. 335 */ 336 int esbq_max_qlen = 0x16; /* throttled queue length */ 337 clock_t esbq_timeout = 0x8; /* timeout to process esb queue */ 338 339 /* 340 * Routines to handle esballoc queueing. 341 */ 342 static void esballoc_process_queue(esb_queue_t *); 343 static void esballoc_enqueue_mblk(mblk_t *); 344 static void esballoc_timer(void *); 345 static void esballoc_set_timer(esb_queue_t *, clock_t); 346 static void esballoc_mblk_free(mblk_t *); 347 348 /* 349 * Qinit structure and Module_info structures 350 * for passthru read and write queues 351 */ 352 353 static void pass_wput(queue_t *, mblk_t *); 354 static queue_t *link_addpassthru(stdata_t *); 355 static void link_rempassthru(queue_t *); 356 357 struct module_info passthru_info = { 358 0, 359 "passthru", 360 0, 361 INFPSZ, 362 STRHIGH, 363 STRLOW 364 }; 365 366 struct qinit passthru_rinit = { 367 (int (*)())putnext, 368 NULL, 369 NULL, 370 NULL, 371 NULL, 372 &passthru_info, 373 NULL 374 }; 375 376 struct qinit passthru_winit = { 377 (int (*)()) pass_wput, 378 NULL, 379 NULL, 380 NULL, 381 NULL, 382 &passthru_info, 383 NULL 384 }; 385 386 /* 387 * Special form of assertion: verify that X implies Y i.e. when X is true Y 388 * should also be true. 389 */ 390 #define IMPLY(X, Y) ASSERT(!(X) || (Y)) 391 392 /* 393 * Logical equivalence. Verify that both X and Y are either TRUE or FALSE. 394 */ 395 #define EQUIV(X, Y) { IMPLY(X, Y); IMPLY(Y, X); } 396 397 /* 398 * Verify correctness of list head/tail pointers. 399 */ 400 #define LISTCHECK(head, tail, link) { \ 401 EQUIV(head, tail); \ 402 IMPLY(tail != NULL, tail->link == NULL); \ 403 } 404 405 /* 406 * Enqueue a list element `el' in the end of a list denoted by `head' and `tail' 407 * using a `link' field. 408 */ 409 #define ENQUEUE(el, head, tail, link) { \ 410 ASSERT(el->link == NULL); \ 411 LISTCHECK(head, tail, link); \ 412 if (head == NULL) \ 413 head = el; \ 414 else \ 415 tail->link = el; \ 416 tail = el; \ 417 } 418 419 /* 420 * Dequeue the first element of the list denoted by `head' and `tail' pointers 421 * using a `link' field and put result into `el'. 422 */ 423 #define DQ(el, head, tail, link) { \ 424 LISTCHECK(head, tail, link); \ 425 el = head; \ 426 if (head != NULL) { \ 427 head = head->link; \ 428 if (head == NULL) \ 429 tail = NULL; \ 430 el->link = NULL; \ 431 } \ 432 } 433 434 /* 435 * Remove `el' from the list using `chase' and `curr' pointers and return result 436 * in `succeed'. 437 */ 438 #define RMQ(el, head, tail, link, chase, curr, succeed) { \ 439 LISTCHECK(head, tail, link); \ 440 chase = NULL; \ 441 succeed = 0; \ 442 for (curr = head; (curr != el) && (curr != NULL); curr = curr->link) \ 443 chase = curr; \ 444 if (curr != NULL) { \ 445 succeed = 1; \ 446 ASSERT(curr == el); \ 447 if (chase != NULL) \ 448 chase->link = curr->link; \ 449 else \ 450 head = curr->link; \ 451 curr->link = NULL; \ 452 if (curr == tail) \ 453 tail = chase; \ 454 } \ 455 LISTCHECK(head, tail, link); \ 456 } 457 458 /* Handling of delayed messages on the inner syncq. */ 459 460 /* 461 * DEBUG versions should use function versions (to simplify tracing) and 462 * non-DEBUG kernels should use macro versions. 463 */ 464 465 /* 466 * Put a queue on the syncq list of queues. 467 * Assumes SQLOCK held. 468 */ 469 #define SQPUT_Q(sq, qp) \ 470 { \ 471 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 472 if (!(qp->q_sqflags & Q_SQQUEUED)) { \ 473 /* The queue should not be linked anywhere */ \ 474 ASSERT((qp->q_sqprev == NULL) && (qp->q_sqnext == NULL)); \ 475 /* Head and tail may only be NULL simultaneously */ \ 476 EQUIV(sq->sq_head, sq->sq_tail); \ 477 /* Queue may be only enqueued on its syncq */ \ 478 ASSERT(sq == qp->q_syncq); \ 479 /* Check the correctness of SQ_MESSAGES flag */ \ 480 EQUIV(sq->sq_head, (sq->sq_flags & SQ_MESSAGES)); \ 481 /* Sanity check first/last elements of the list */ \ 482 IMPLY(sq->sq_head != NULL, sq->sq_head->q_sqprev == NULL);\ 483 IMPLY(sq->sq_tail != NULL, sq->sq_tail->q_sqnext == NULL);\ 484 /* \ 485 * Sanity check of priority field: empty queue should \ 486 * have zero priority \ 487 * and nqueues equal to zero. \ 488 */ \ 489 IMPLY(sq->sq_head == NULL, sq->sq_pri == 0); \ 490 /* Sanity check of sq_nqueues field */ \ 491 EQUIV(sq->sq_head, sq->sq_nqueues); \ 492 if (sq->sq_head == NULL) { \ 493 sq->sq_head = sq->sq_tail = qp; \ 494 sq->sq_flags |= SQ_MESSAGES; \ 495 } else if (qp->q_spri == 0) { \ 496 qp->q_sqprev = sq->sq_tail; \ 497 sq->sq_tail->q_sqnext = qp; \ 498 sq->sq_tail = qp; \ 499 } else { \ 500 /* \ 501 * Put this queue in priority order: higher \ 502 * priority gets closer to the head. \ 503 */ \ 504 queue_t **qpp = &sq->sq_tail; \ 505 queue_t *qnext = NULL; \ 506 \ 507 while (*qpp != NULL && qp->q_spri > (*qpp)->q_spri) { \ 508 qnext = *qpp; \ 509 qpp = &(*qpp)->q_sqprev; \ 510 } \ 511 qp->q_sqnext = qnext; \ 512 qp->q_sqprev = *qpp; \ 513 if (*qpp != NULL) { \ 514 (*qpp)->q_sqnext = qp; \ 515 } else { \ 516 sq->sq_head = qp; \ 517 sq->sq_pri = sq->sq_head->q_spri; \ 518 } \ 519 *qpp = qp; \ 520 } \ 521 qp->q_sqflags |= Q_SQQUEUED; \ 522 qp->q_sqtstamp = ddi_get_lbolt(); \ 523 sq->sq_nqueues++; \ 524 } \ 525 } 526 527 /* 528 * Remove a queue from the syncq list 529 * Assumes SQLOCK held. 530 */ 531 #define SQRM_Q(sq, qp) \ 532 { \ 533 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 534 ASSERT(qp->q_sqflags & Q_SQQUEUED); \ 535 ASSERT(sq->sq_head != NULL && sq->sq_tail != NULL); \ 536 ASSERT((sq->sq_flags & SQ_MESSAGES) != 0); \ 537 /* Check that the queue is actually in the list */ \ 538 ASSERT(qp->q_sqnext != NULL || sq->sq_tail == qp); \ 539 ASSERT(qp->q_sqprev != NULL || sq->sq_head == qp); \ 540 ASSERT(sq->sq_nqueues != 0); \ 541 if (qp->q_sqprev == NULL) { \ 542 /* First queue on list, make head q_sqnext */ \ 543 sq->sq_head = qp->q_sqnext; \ 544 } else { \ 545 /* Make prev->next == next */ \ 546 qp->q_sqprev->q_sqnext = qp->q_sqnext; \ 547 } \ 548 if (qp->q_sqnext == NULL) { \ 549 /* Last queue on list, make tail sqprev */ \ 550 sq->sq_tail = qp->q_sqprev; \ 551 } else { \ 552 /* Make next->prev == prev */ \ 553 qp->q_sqnext->q_sqprev = qp->q_sqprev; \ 554 } \ 555 /* clear out references on this queue */ \ 556 qp->q_sqprev = qp->q_sqnext = NULL; \ 557 qp->q_sqflags &= ~Q_SQQUEUED; \ 558 /* If there is nothing queued, clear SQ_MESSAGES */ \ 559 if (sq->sq_head != NULL) { \ 560 sq->sq_pri = sq->sq_head->q_spri; \ 561 } else { \ 562 sq->sq_flags &= ~SQ_MESSAGES; \ 563 sq->sq_pri = 0; \ 564 } \ 565 sq->sq_nqueues--; \ 566 ASSERT(sq->sq_head != NULL || sq->sq_evhead != NULL || \ 567 (sq->sq_flags & SQ_QUEUED) == 0); \ 568 } 569 570 /* Hide the definition from the header file. */ 571 #ifdef SQPUT_MP 572 #undef SQPUT_MP 573 #endif 574 575 /* 576 * Put a message on the queue syncq. 577 * Assumes QLOCK held. 578 */ 579 #define SQPUT_MP(qp, mp) \ 580 { \ 581 ASSERT(MUTEX_HELD(QLOCK(qp))); \ 582 ASSERT(qp->q_sqhead == NULL || \ 583 (qp->q_sqtail != NULL && \ 584 qp->q_sqtail->b_next == NULL)); \ 585 qp->q_syncqmsgs++; \ 586 ASSERT(qp->q_syncqmsgs != 0); /* Wraparound */ \ 587 if (qp->q_sqhead == NULL) { \ 588 qp->q_sqhead = qp->q_sqtail = mp; \ 589 } else { \ 590 qp->q_sqtail->b_next = mp; \ 591 qp->q_sqtail = mp; \ 592 } \ 593 ASSERT(qp->q_syncqmsgs > 0); \ 594 set_qfull(qp); \ 595 } 596 597 #define SQ_PUTCOUNT_SETFAST_LOCKED(sq) { \ 598 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 599 if ((sq)->sq_ciputctrl != NULL) { \ 600 int i; \ 601 int nlocks = (sq)->sq_nciputctrl; \ 602 ciputctrl_t *cip = (sq)->sq_ciputctrl; \ 603 ASSERT((sq)->sq_type & SQ_CIPUT); \ 604 for (i = 0; i <= nlocks; i++) { \ 605 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \ 606 cip[i].ciputctrl_count |= SQ_FASTPUT; \ 607 } \ 608 } \ 609 } 610 611 612 #define SQ_PUTCOUNT_CLRFAST_LOCKED(sq) { \ 613 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 614 if ((sq)->sq_ciputctrl != NULL) { \ 615 int i; \ 616 int nlocks = (sq)->sq_nciputctrl; \ 617 ciputctrl_t *cip = (sq)->sq_ciputctrl; \ 618 ASSERT((sq)->sq_type & SQ_CIPUT); \ 619 for (i = 0; i <= nlocks; i++) { \ 620 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \ 621 cip[i].ciputctrl_count &= ~SQ_FASTPUT; \ 622 } \ 623 } \ 624 } 625 626 /* 627 * Run service procedures for all queues in the stream head. 628 */ 629 #define STR_SERVICE(stp, q) { \ 630 ASSERT(MUTEX_HELD(&stp->sd_qlock)); \ 631 while (stp->sd_qhead != NULL) { \ 632 DQ(q, stp->sd_qhead, stp->sd_qtail, q_link); \ 633 ASSERT(stp->sd_nqueues > 0); \ 634 stp->sd_nqueues--; \ 635 ASSERT(!(q->q_flag & QINSERVICE)); \ 636 mutex_exit(&stp->sd_qlock); \ 637 queue_service(q); \ 638 mutex_enter(&stp->sd_qlock); \ 639 } \ 640 ASSERT(stp->sd_nqueues == 0); \ 641 ASSERT((stp->sd_qhead == NULL) && (stp->sd_qtail == NULL)); \ 642 } 643 644 /* 645 * Constructor/destructor routines for the stream head cache 646 */ 647 /* ARGSUSED */ 648 static int 649 stream_head_constructor(void *buf, void *cdrarg, int kmflags) 650 { 651 stdata_t *stp = buf; 652 653 mutex_init(&stp->sd_lock, NULL, MUTEX_DEFAULT, NULL); 654 mutex_init(&stp->sd_reflock, NULL, MUTEX_DEFAULT, NULL); 655 mutex_init(&stp->sd_qlock, NULL, MUTEX_DEFAULT, NULL); 656 cv_init(&stp->sd_monitor, NULL, CV_DEFAULT, NULL); 657 cv_init(&stp->sd_iocmonitor, NULL, CV_DEFAULT, NULL); 658 cv_init(&stp->sd_refmonitor, NULL, CV_DEFAULT, NULL); 659 cv_init(&stp->sd_qcv, NULL, CV_DEFAULT, NULL); 660 cv_init(&stp->sd_zcopy_wait, NULL, CV_DEFAULT, NULL); 661 stp->sd_wrq = NULL; 662 663 return (0); 664 } 665 666 /* ARGSUSED */ 667 static void 668 stream_head_destructor(void *buf, void *cdrarg) 669 { 670 stdata_t *stp = buf; 671 672 mutex_destroy(&stp->sd_lock); 673 mutex_destroy(&stp->sd_reflock); 674 mutex_destroy(&stp->sd_qlock); 675 cv_destroy(&stp->sd_monitor); 676 cv_destroy(&stp->sd_iocmonitor); 677 cv_destroy(&stp->sd_refmonitor); 678 cv_destroy(&stp->sd_qcv); 679 cv_destroy(&stp->sd_zcopy_wait); 680 } 681 682 /* 683 * Constructor/destructor routines for the queue cache 684 */ 685 /* ARGSUSED */ 686 static int 687 queue_constructor(void *buf, void *cdrarg, int kmflags) 688 { 689 queinfo_t *qip = buf; 690 queue_t *qp = &qip->qu_rqueue; 691 queue_t *wqp = &qip->qu_wqueue; 692 syncq_t *sq = &qip->qu_syncq; 693 694 qp->q_first = NULL; 695 qp->q_link = NULL; 696 qp->q_count = 0; 697 qp->q_mblkcnt = 0; 698 qp->q_sqhead = NULL; 699 qp->q_sqtail = NULL; 700 qp->q_sqnext = NULL; 701 qp->q_sqprev = NULL; 702 qp->q_sqflags = 0; 703 qp->q_rwcnt = 0; 704 qp->q_spri = 0; 705 706 mutex_init(QLOCK(qp), NULL, MUTEX_DEFAULT, NULL); 707 cv_init(&qp->q_wait, NULL, CV_DEFAULT, NULL); 708 709 wqp->q_first = NULL; 710 wqp->q_link = NULL; 711 wqp->q_count = 0; 712 wqp->q_mblkcnt = 0; 713 wqp->q_sqhead = NULL; 714 wqp->q_sqtail = NULL; 715 wqp->q_sqnext = NULL; 716 wqp->q_sqprev = NULL; 717 wqp->q_sqflags = 0; 718 wqp->q_rwcnt = 0; 719 wqp->q_spri = 0; 720 721 mutex_init(QLOCK(wqp), NULL, MUTEX_DEFAULT, NULL); 722 cv_init(&wqp->q_wait, NULL, CV_DEFAULT, NULL); 723 724 sq->sq_head = NULL; 725 sq->sq_tail = NULL; 726 sq->sq_evhead = NULL; 727 sq->sq_evtail = NULL; 728 sq->sq_callbpend = NULL; 729 sq->sq_outer = NULL; 730 sq->sq_onext = NULL; 731 sq->sq_oprev = NULL; 732 sq->sq_next = NULL; 733 sq->sq_svcflags = 0; 734 sq->sq_servcount = 0; 735 sq->sq_needexcl = 0; 736 sq->sq_nqueues = 0; 737 sq->sq_pri = 0; 738 739 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL); 740 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL); 741 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL); 742 743 return (0); 744 } 745 746 /* ARGSUSED */ 747 static void 748 queue_destructor(void *buf, void *cdrarg) 749 { 750 queinfo_t *qip = buf; 751 queue_t *qp = &qip->qu_rqueue; 752 queue_t *wqp = &qip->qu_wqueue; 753 syncq_t *sq = &qip->qu_syncq; 754 755 ASSERT(qp->q_sqhead == NULL); 756 ASSERT(wqp->q_sqhead == NULL); 757 ASSERT(qp->q_sqnext == NULL); 758 ASSERT(wqp->q_sqnext == NULL); 759 ASSERT(qp->q_rwcnt == 0); 760 ASSERT(wqp->q_rwcnt == 0); 761 762 mutex_destroy(&qp->q_lock); 763 cv_destroy(&qp->q_wait); 764 765 mutex_destroy(&wqp->q_lock); 766 cv_destroy(&wqp->q_wait); 767 768 mutex_destroy(&sq->sq_lock); 769 cv_destroy(&sq->sq_wait); 770 cv_destroy(&sq->sq_exitwait); 771 } 772 773 /* 774 * Constructor/destructor routines for the syncq cache 775 */ 776 /* ARGSUSED */ 777 static int 778 syncq_constructor(void *buf, void *cdrarg, int kmflags) 779 { 780 syncq_t *sq = buf; 781 782 bzero(buf, sizeof (syncq_t)); 783 784 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL); 785 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL); 786 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL); 787 788 return (0); 789 } 790 791 /* ARGSUSED */ 792 static void 793 syncq_destructor(void *buf, void *cdrarg) 794 { 795 syncq_t *sq = buf; 796 797 ASSERT(sq->sq_head == NULL); 798 ASSERT(sq->sq_tail == NULL); 799 ASSERT(sq->sq_evhead == NULL); 800 ASSERT(sq->sq_evtail == NULL); 801 ASSERT(sq->sq_callbpend == NULL); 802 ASSERT(sq->sq_callbflags == 0); 803 ASSERT(sq->sq_outer == NULL); 804 ASSERT(sq->sq_onext == NULL); 805 ASSERT(sq->sq_oprev == NULL); 806 ASSERT(sq->sq_next == NULL); 807 ASSERT(sq->sq_needexcl == 0); 808 ASSERT(sq->sq_svcflags == 0); 809 ASSERT(sq->sq_servcount == 0); 810 ASSERT(sq->sq_nqueues == 0); 811 ASSERT(sq->sq_pri == 0); 812 ASSERT(sq->sq_count == 0); 813 ASSERT(sq->sq_rmqcount == 0); 814 ASSERT(sq->sq_cancelid == 0); 815 ASSERT(sq->sq_ciputctrl == NULL); 816 ASSERT(sq->sq_nciputctrl == 0); 817 ASSERT(sq->sq_type == 0); 818 ASSERT(sq->sq_flags == 0); 819 820 mutex_destroy(&sq->sq_lock); 821 cv_destroy(&sq->sq_wait); 822 cv_destroy(&sq->sq_exitwait); 823 } 824 825 /* ARGSUSED */ 826 static int 827 ciputctrl_constructor(void *buf, void *cdrarg, int kmflags) 828 { 829 ciputctrl_t *cip = buf; 830 int i; 831 832 for (i = 0; i < n_ciputctrl; i++) { 833 cip[i].ciputctrl_count = SQ_FASTPUT; 834 mutex_init(&cip[i].ciputctrl_lock, NULL, MUTEX_DEFAULT, NULL); 835 } 836 837 return (0); 838 } 839 840 /* ARGSUSED */ 841 static void 842 ciputctrl_destructor(void *buf, void *cdrarg) 843 { 844 ciputctrl_t *cip = buf; 845 int i; 846 847 for (i = 0; i < n_ciputctrl; i++) { 848 ASSERT(cip[i].ciputctrl_count & SQ_FASTPUT); 849 mutex_destroy(&cip[i].ciputctrl_lock); 850 } 851 } 852 853 /* 854 * Init routine run from main at boot time. 855 */ 856 void 857 strinit(void) 858 { 859 int ncpus = ((boot_max_ncpus == -1) ? max_ncpus : boot_max_ncpus); 860 861 stream_head_cache = kmem_cache_create("stream_head_cache", 862 sizeof (stdata_t), 0, 863 stream_head_constructor, stream_head_destructor, NULL, 864 NULL, NULL, 0); 865 866 queue_cache = kmem_cache_create("queue_cache", sizeof (queinfo_t), 0, 867 queue_constructor, queue_destructor, NULL, NULL, NULL, 0); 868 869 syncq_cache = kmem_cache_create("syncq_cache", sizeof (syncq_t), 0, 870 syncq_constructor, syncq_destructor, NULL, NULL, NULL, 0); 871 872 qband_cache = kmem_cache_create("qband_cache", 873 sizeof (qband_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 874 875 linkinfo_cache = kmem_cache_create("linkinfo_cache", 876 sizeof (linkinfo_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 877 878 n_ciputctrl = ncpus; 879 n_ciputctrl = 1 << highbit(n_ciputctrl - 1); 880 ASSERT(n_ciputctrl >= 1); 881 n_ciputctrl = MIN(n_ciputctrl, max_n_ciputctrl); 882 if (n_ciputctrl >= min_n_ciputctrl) { 883 ciputctrl_cache = kmem_cache_create("ciputctrl_cache", 884 sizeof (ciputctrl_t) * n_ciputctrl, 885 sizeof (ciputctrl_t), ciputctrl_constructor, 886 ciputctrl_destructor, NULL, NULL, NULL, 0); 887 } 888 889 streams_taskq = system_taskq; 890 891 if (streams_taskq == NULL) 892 panic("strinit: no memory for streams taskq!"); 893 894 bc_bkgrnd_thread = thread_create(NULL, 0, 895 streams_bufcall_service, NULL, 0, &p0, TS_RUN, streams_lopri); 896 897 streams_qbkgrnd_thread = thread_create(NULL, 0, 898 streams_qbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri); 899 900 streams_sqbkgrnd_thread = thread_create(NULL, 0, 901 streams_sqbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri); 902 903 /* 904 * Create STREAMS kstats. 905 */ 906 str_kstat = kstat_create("streams", 0, "strstat", 907 "net", KSTAT_TYPE_NAMED, 908 sizeof (str_statistics) / sizeof (kstat_named_t), 909 KSTAT_FLAG_VIRTUAL); 910 911 if (str_kstat != NULL) { 912 str_kstat->ks_data = &str_statistics; 913 kstat_install(str_kstat); 914 } 915 916 /* 917 * TPI support routine initialisation. 918 */ 919 tpi_init(); 920 921 /* 922 * Handle to have autopush and persistent link information per 923 * zone. 924 * Note: uses shutdown hook instead of destroy hook so that the 925 * persistent links can be torn down before the destroy hooks 926 * in the TCP/IP stack are called. 927 */ 928 netstack_register(NS_STR, str_stack_init, str_stack_shutdown, 929 str_stack_fini); 930 } 931 932 void 933 str_sendsig(vnode_t *vp, int event, uchar_t band, int error) 934 { 935 struct stdata *stp; 936 937 ASSERT(vp->v_stream); 938 stp = vp->v_stream; 939 /* Have to hold sd_lock to prevent siglist from changing */ 940 mutex_enter(&stp->sd_lock); 941 if (stp->sd_sigflags & event) 942 strsendsig(stp->sd_siglist, event, band, error); 943 mutex_exit(&stp->sd_lock); 944 } 945 946 /* 947 * Send the "sevent" set of signals to a process. 948 * This might send more than one signal if the process is registered 949 * for multiple events. The caller should pass in an sevent that only 950 * includes the events for which the process has registered. 951 */ 952 static void 953 dosendsig(proc_t *proc, int events, int sevent, k_siginfo_t *info, 954 uchar_t band, int error) 955 { 956 ASSERT(MUTEX_HELD(&proc->p_lock)); 957 958 info->si_band = 0; 959 info->si_errno = 0; 960 961 if (sevent & S_ERROR) { 962 sevent &= ~S_ERROR; 963 info->si_code = POLL_ERR; 964 info->si_errno = error; 965 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 966 "strsendsig:proc %p info %p", proc, info); 967 sigaddq(proc, NULL, info, KM_NOSLEEP); 968 info->si_errno = 0; 969 } 970 if (sevent & S_HANGUP) { 971 sevent &= ~S_HANGUP; 972 info->si_code = POLL_HUP; 973 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 974 "strsendsig:proc %p info %p", proc, info); 975 sigaddq(proc, NULL, info, KM_NOSLEEP); 976 } 977 if (sevent & S_HIPRI) { 978 sevent &= ~S_HIPRI; 979 info->si_code = POLL_PRI; 980 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 981 "strsendsig:proc %p info %p", proc, info); 982 sigaddq(proc, NULL, info, KM_NOSLEEP); 983 } 984 if (sevent & S_RDBAND) { 985 sevent &= ~S_RDBAND; 986 if (events & S_BANDURG) 987 sigtoproc(proc, NULL, SIGURG); 988 else 989 sigtoproc(proc, NULL, SIGPOLL); 990 } 991 if (sevent & S_WRBAND) { 992 sevent &= ~S_WRBAND; 993 sigtoproc(proc, NULL, SIGPOLL); 994 } 995 if (sevent & S_INPUT) { 996 sevent &= ~S_INPUT; 997 info->si_code = POLL_IN; 998 info->si_band = band; 999 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 1000 "strsendsig:proc %p info %p", proc, info); 1001 sigaddq(proc, NULL, info, KM_NOSLEEP); 1002 info->si_band = 0; 1003 } 1004 if (sevent & S_OUTPUT) { 1005 sevent &= ~S_OUTPUT; 1006 info->si_code = POLL_OUT; 1007 info->si_band = band; 1008 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 1009 "strsendsig:proc %p info %p", proc, info); 1010 sigaddq(proc, NULL, info, KM_NOSLEEP); 1011 info->si_band = 0; 1012 } 1013 if (sevent & S_MSG) { 1014 sevent &= ~S_MSG; 1015 info->si_code = POLL_MSG; 1016 info->si_band = band; 1017 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 1018 "strsendsig:proc %p info %p", proc, info); 1019 sigaddq(proc, NULL, info, KM_NOSLEEP); 1020 info->si_band = 0; 1021 } 1022 if (sevent & S_RDNORM) { 1023 sevent &= ~S_RDNORM; 1024 sigtoproc(proc, NULL, SIGPOLL); 1025 } 1026 if (sevent != 0) { 1027 panic("strsendsig: unknown event(s) %x", sevent); 1028 } 1029 } 1030 1031 /* 1032 * Send SIGPOLL/SIGURG signal to all processes and process groups 1033 * registered on the given signal list that want a signal for at 1034 * least one of the specified events. 1035 * 1036 * Must be called with exclusive access to siglist (caller holding sd_lock). 1037 * 1038 * strioctl(I_SETSIG/I_ESETSIG) will only change siglist when holding 1039 * sd_lock and the ioctl code maintains a PID_HOLD on the pid structure 1040 * while it is in the siglist. 1041 * 1042 * For performance reasons (MP scalability) the code drops pidlock 1043 * when sending signals to a single process. 1044 * When sending to a process group the code holds 1045 * pidlock to prevent the membership in the process group from changing 1046 * while walking the p_pglink list. 1047 */ 1048 void 1049 strsendsig(strsig_t *siglist, int event, uchar_t band, int error) 1050 { 1051 strsig_t *ssp; 1052 k_siginfo_t info; 1053 struct pid *pidp; 1054 proc_t *proc; 1055 1056 info.si_signo = SIGPOLL; 1057 info.si_errno = 0; 1058 for (ssp = siglist; ssp; ssp = ssp->ss_next) { 1059 int sevent; 1060 1061 sevent = ssp->ss_events & event; 1062 if (sevent == 0) 1063 continue; 1064 1065 if ((pidp = ssp->ss_pidp) == NULL) { 1066 /* pid was released but still on event list */ 1067 continue; 1068 } 1069 1070 1071 if (ssp->ss_pid > 0) { 1072 /* 1073 * XXX This unfortunately still generates 1074 * a signal when a fd is closed but 1075 * the proc is active. 1076 */ 1077 ASSERT(ssp->ss_pid == pidp->pid_id); 1078 1079 mutex_enter(&pidlock); 1080 proc = prfind_zone(pidp->pid_id, ALL_ZONES); 1081 if (proc == NULL) { 1082 mutex_exit(&pidlock); 1083 continue; 1084 } 1085 mutex_enter(&proc->p_lock); 1086 mutex_exit(&pidlock); 1087 dosendsig(proc, ssp->ss_events, sevent, &info, 1088 band, error); 1089 mutex_exit(&proc->p_lock); 1090 } else { 1091 /* 1092 * Send to process group. Hold pidlock across 1093 * calls to dosendsig(). 1094 */ 1095 pid_t pgrp = -ssp->ss_pid; 1096 1097 mutex_enter(&pidlock); 1098 proc = pgfind_zone(pgrp, ALL_ZONES); 1099 while (proc != NULL) { 1100 mutex_enter(&proc->p_lock); 1101 dosendsig(proc, ssp->ss_events, sevent, 1102 &info, band, error); 1103 mutex_exit(&proc->p_lock); 1104 proc = proc->p_pglink; 1105 } 1106 mutex_exit(&pidlock); 1107 } 1108 } 1109 } 1110 1111 /* 1112 * Attach a stream device or module. 1113 * qp is a read queue; the new queue goes in so its next 1114 * read ptr is the argument, and the write queue corresponding 1115 * to the argument points to this queue. Return 0 on success, 1116 * or a non-zero errno on failure. 1117 */ 1118 int 1119 qattach(queue_t *qp, dev_t *devp, int oflag, cred_t *crp, fmodsw_impl_t *fp, 1120 boolean_t is_insert) 1121 { 1122 major_t major; 1123 cdevsw_impl_t *dp; 1124 struct streamtab *str; 1125 queue_t *rq; 1126 queue_t *wrq; 1127 uint32_t qflag; 1128 uint32_t sqtype; 1129 perdm_t *dmp; 1130 int error; 1131 int sflag; 1132 1133 rq = allocq(); 1134 wrq = _WR(rq); 1135 STREAM(rq) = STREAM(wrq) = STREAM(qp); 1136 1137 if (fp != NULL) { 1138 str = fp->f_str; 1139 qflag = fp->f_qflag; 1140 sqtype = fp->f_sqtype; 1141 dmp = fp->f_dmp; 1142 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL); 1143 sflag = MODOPEN; 1144 1145 /* 1146 * stash away a pointer to the module structure so we can 1147 * unref it in qdetach. 1148 */ 1149 rq->q_fp = fp; 1150 } else { 1151 ASSERT(!is_insert); 1152 1153 major = getmajor(*devp); 1154 dp = &devimpl[major]; 1155 1156 str = dp->d_str; 1157 ASSERT(str == STREAMSTAB(major)); 1158 1159 qflag = dp->d_qflag; 1160 ASSERT(qflag & QISDRV); 1161 sqtype = dp->d_sqtype; 1162 1163 /* create perdm_t if needed */ 1164 if (NEED_DM(dp->d_dmp, qflag)) 1165 dp->d_dmp = hold_dm(str, qflag, sqtype); 1166 1167 dmp = dp->d_dmp; 1168 sflag = 0; 1169 } 1170 1171 TRACE_2(TR_FAC_STREAMS_FR, TR_QATTACH_FLAGS, 1172 "qattach:qflag == %X(%X)", qflag, *devp); 1173 1174 /* setq might sleep in allocator - avoid holding locks. */ 1175 setq(rq, str->st_rdinit, str->st_wrinit, dmp, qflag, sqtype, B_FALSE); 1176 1177 /* 1178 * Before calling the module's open routine, set up the q_next 1179 * pointer for inserting a module in the middle of a stream. 1180 * 1181 * Note that we can always set _QINSERTING and set up q_next 1182 * pointer for both inserting and pushing a module. Then there 1183 * is no need for the is_insert parameter. In insertq(), called 1184 * by qprocson(), assume that q_next of the new module always points 1185 * to the correct queue and use it for insertion. Everything should 1186 * work out fine. But in the first release of _I_INSERT, we 1187 * distinguish between inserting and pushing to make sure that 1188 * pushing a module follows the same code path as before. 1189 */ 1190 if (is_insert) { 1191 rq->q_flag |= _QINSERTING; 1192 rq->q_next = qp; 1193 } 1194 1195 /* 1196 * If there is an outer perimeter get exclusive access during 1197 * the open procedure. Bump up the reference count on the queue. 1198 */ 1199 entersq(rq->q_syncq, SQ_OPENCLOSE); 1200 error = (*rq->q_qinfo->qi_qopen)(rq, devp, oflag, sflag, crp); 1201 if (error != 0) 1202 goto failed; 1203 leavesq(rq->q_syncq, SQ_OPENCLOSE); 1204 ASSERT(qprocsareon(rq)); 1205 return (0); 1206 1207 failed: 1208 rq->q_flag &= ~_QINSERTING; 1209 if (backq(wrq) != NULL && backq(wrq)->q_next == wrq) 1210 qprocsoff(rq); 1211 leavesq(rq->q_syncq, SQ_OPENCLOSE); 1212 rq->q_next = wrq->q_next = NULL; 1213 qdetach(rq, 0, 0, crp, B_FALSE); 1214 return (error); 1215 } 1216 1217 /* 1218 * Handle second open of stream. For modules, set the 1219 * last argument to MODOPEN and do not pass any open flags. 1220 * Ignore dummydev since this is not the first open. 1221 */ 1222 int 1223 qreopen(queue_t *qp, dev_t *devp, int flag, cred_t *crp) 1224 { 1225 int error; 1226 dev_t dummydev; 1227 queue_t *wqp = _WR(qp); 1228 1229 ASSERT(qp->q_flag & QREADR); 1230 entersq(qp->q_syncq, SQ_OPENCLOSE); 1231 1232 dummydev = *devp; 1233 if (error = ((*qp->q_qinfo->qi_qopen)(qp, &dummydev, 1234 (wqp->q_next ? 0 : flag), (wqp->q_next ? MODOPEN : 0), crp))) { 1235 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1236 mutex_enter(&STREAM(qp)->sd_lock); 1237 qp->q_stream->sd_flag |= STREOPENFAIL; 1238 mutex_exit(&STREAM(qp)->sd_lock); 1239 return (error); 1240 } 1241 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1242 1243 /* 1244 * successful open should have done qprocson() 1245 */ 1246 ASSERT(qprocsareon(_RD(qp))); 1247 return (0); 1248 } 1249 1250 /* 1251 * Detach a stream module or device. 1252 * If clmode == 1 then the module or driver was opened and its 1253 * close routine must be called. If clmode == 0, the module 1254 * or driver was never opened or the open failed, and so its close 1255 * should not be called. 1256 */ 1257 void 1258 qdetach(queue_t *qp, int clmode, int flag, cred_t *crp, boolean_t is_remove) 1259 { 1260 queue_t *wqp = _WR(qp); 1261 ASSERT(STREAM(qp)->sd_flag & (STRCLOSE|STWOPEN|STRPLUMB)); 1262 1263 if (STREAM_NEEDSERVICE(STREAM(qp))) 1264 stream_runservice(STREAM(qp)); 1265 1266 if (clmode) { 1267 /* 1268 * Make sure that all the messages on the write side syncq are 1269 * processed and nothing is left. Since we are closing, no new 1270 * messages may appear there. 1271 */ 1272 wait_q_syncq(wqp); 1273 1274 entersq(qp->q_syncq, SQ_OPENCLOSE); 1275 if (is_remove) { 1276 mutex_enter(QLOCK(qp)); 1277 qp->q_flag |= _QREMOVING; 1278 mutex_exit(QLOCK(qp)); 1279 } 1280 (*qp->q_qinfo->qi_qclose)(qp, flag, crp); 1281 /* 1282 * Check that qprocsoff() was actually called. 1283 */ 1284 ASSERT((qp->q_flag & QWCLOSE) && (wqp->q_flag & QWCLOSE)); 1285 1286 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1287 } else { 1288 disable_svc(qp); 1289 } 1290 1291 /* 1292 * Allow any threads blocked in entersq to proceed and discover 1293 * the QWCLOSE is set. 1294 * Note: This assumes that all users of entersq check QWCLOSE. 1295 * Currently runservice is the only entersq that can happen 1296 * after removeq has finished. 1297 * Removeq will have discarded all messages destined to the closing 1298 * pair of queues from the syncq. 1299 * NOTE: Calling a function inside an assert is unconventional. 1300 * However, it does not cause any problem since flush_syncq() does 1301 * not change any state except when it returns non-zero i.e. 1302 * when the assert will trigger. 1303 */ 1304 ASSERT(flush_syncq(qp->q_syncq, qp) == 0); 1305 ASSERT(flush_syncq(wqp->q_syncq, wqp) == 0); 1306 ASSERT((qp->q_flag & QPERMOD) || 1307 ((qp->q_syncq->sq_head == NULL) && 1308 (wqp->q_syncq->sq_head == NULL))); 1309 1310 /* release any fmodsw_impl_t structure held on behalf of the queue */ 1311 ASSERT(qp->q_fp != NULL || qp->q_flag & QISDRV); 1312 if (qp->q_fp != NULL) 1313 fmodsw_rele(qp->q_fp); 1314 1315 /* freeq removes us from the outer perimeter if any */ 1316 freeq(qp); 1317 } 1318 1319 /* Prevent service procedures from being called */ 1320 void 1321 disable_svc(queue_t *qp) 1322 { 1323 queue_t *wqp = _WR(qp); 1324 1325 ASSERT(qp->q_flag & QREADR); 1326 mutex_enter(QLOCK(qp)); 1327 qp->q_flag |= QWCLOSE; 1328 mutex_exit(QLOCK(qp)); 1329 mutex_enter(QLOCK(wqp)); 1330 wqp->q_flag |= QWCLOSE; 1331 mutex_exit(QLOCK(wqp)); 1332 } 1333 1334 /* Allow service procedures to be called again */ 1335 void 1336 enable_svc(queue_t *qp) 1337 { 1338 queue_t *wqp = _WR(qp); 1339 1340 ASSERT(qp->q_flag & QREADR); 1341 mutex_enter(QLOCK(qp)); 1342 qp->q_flag &= ~QWCLOSE; 1343 mutex_exit(QLOCK(qp)); 1344 mutex_enter(QLOCK(wqp)); 1345 wqp->q_flag &= ~QWCLOSE; 1346 mutex_exit(QLOCK(wqp)); 1347 } 1348 1349 /* 1350 * Remove queue from qhead/qtail if it is enabled. 1351 * Only reset QENAB if the queue was removed from the runlist. 1352 * A queue goes through 3 stages: 1353 * It is on the service list and QENAB is set. 1354 * It is removed from the service list but QENAB is still set. 1355 * QENAB gets changed to QINSERVICE. 1356 * QINSERVICE is reset (when the service procedure is done) 1357 * Thus we can not reset QENAB unless we actually removed it from the service 1358 * queue. 1359 */ 1360 void 1361 remove_runlist(queue_t *qp) 1362 { 1363 if (qp->q_flag & QENAB && qhead != NULL) { 1364 queue_t *q_chase; 1365 queue_t *q_curr; 1366 int removed; 1367 1368 mutex_enter(&service_queue); 1369 RMQ(qp, qhead, qtail, q_link, q_chase, q_curr, removed); 1370 mutex_exit(&service_queue); 1371 if (removed) { 1372 STRSTAT(qremoved); 1373 qp->q_flag &= ~QENAB; 1374 } 1375 } 1376 } 1377 1378 1379 /* 1380 * Wait for any pending service processing to complete. 1381 * The removal of queues from the runlist is not atomic with the 1382 * clearing of the QENABLED flag and setting the INSERVICE flag. 1383 * consequently it is possible for remove_runlist in strclose 1384 * to not find the queue on the runlist but for it to be QENABLED 1385 * and not yet INSERVICE -> hence wait_svc needs to check QENABLED 1386 * as well as INSERVICE. 1387 */ 1388 void 1389 wait_svc(queue_t *qp) 1390 { 1391 queue_t *wqp = _WR(qp); 1392 1393 ASSERT(qp->q_flag & QREADR); 1394 1395 /* 1396 * Try to remove queues from qhead/qtail list. 1397 */ 1398 if (qhead != NULL) { 1399 remove_runlist(qp); 1400 remove_runlist(wqp); 1401 } 1402 /* 1403 * Wait till the syncqs associated with the queue disappear from the 1404 * background processing list. 1405 * This only needs to be done for non-PERMOD perimeters since 1406 * for PERMOD perimeters the syncq may be shared and will only be freed 1407 * when the last module/driver is unloaded. 1408 * If for PERMOD perimeters queue was on the syncq list, removeq() 1409 * should call propagate_syncq() or drain_syncq() for it. Both of these 1410 * functions remove the queue from its syncq list, so sqthread will not 1411 * try to access the queue. 1412 */ 1413 if (!(qp->q_flag & QPERMOD)) { 1414 syncq_t *rsq = qp->q_syncq; 1415 syncq_t *wsq = wqp->q_syncq; 1416 1417 /* 1418 * Disable rsq and wsq and wait for any background processing of 1419 * syncq to complete. 1420 */ 1421 wait_sq_svc(rsq); 1422 if (wsq != rsq) 1423 wait_sq_svc(wsq); 1424 } 1425 1426 mutex_enter(QLOCK(qp)); 1427 while (qp->q_flag & (QINSERVICE|QENAB)) 1428 cv_wait(&qp->q_wait, QLOCK(qp)); 1429 mutex_exit(QLOCK(qp)); 1430 mutex_enter(QLOCK(wqp)); 1431 while (wqp->q_flag & (QINSERVICE|QENAB)) 1432 cv_wait(&wqp->q_wait, QLOCK(wqp)); 1433 mutex_exit(QLOCK(wqp)); 1434 } 1435 1436 /* 1437 * Put ioctl data from userland buffer `arg' into the mblk chain `bp'. 1438 * `flag' must always contain either K_TO_K or U_TO_K; STR_NOSIG may 1439 * also be set, and is passed through to allocb_cred_wait(). 1440 * 1441 * Returns errno on failure, zero on success. 1442 */ 1443 int 1444 putiocd(mblk_t *bp, char *arg, int flag, cred_t *cr) 1445 { 1446 mblk_t *tmp; 1447 ssize_t count; 1448 int error = 0; 1449 1450 ASSERT((flag & (U_TO_K | K_TO_K)) == U_TO_K || 1451 (flag & (U_TO_K | K_TO_K)) == K_TO_K); 1452 1453 if (bp->b_datap->db_type == M_IOCTL) { 1454 count = ((struct iocblk *)bp->b_rptr)->ioc_count; 1455 } else { 1456 ASSERT(bp->b_datap->db_type == M_COPYIN); 1457 count = ((struct copyreq *)bp->b_rptr)->cq_size; 1458 } 1459 /* 1460 * strdoioctl validates ioc_count, so if this assert fails it 1461 * cannot be due to user error. 1462 */ 1463 ASSERT(count >= 0); 1464 1465 if ((tmp = allocb_cred_wait(count, (flag & STR_NOSIG), &error, cr, 1466 curproc->p_pid)) == NULL) { 1467 return (error); 1468 } 1469 error = strcopyin(arg, tmp->b_wptr, count, flag & (U_TO_K|K_TO_K)); 1470 if (error != 0) { 1471 freeb(tmp); 1472 return (error); 1473 } 1474 DB_CPID(tmp) = curproc->p_pid; 1475 tmp->b_wptr += count; 1476 bp->b_cont = tmp; 1477 1478 return (0); 1479 } 1480 1481 /* 1482 * Copy ioctl data to user-land. Return non-zero errno on failure, 1483 * 0 for success. 1484 */ 1485 int 1486 getiocd(mblk_t *bp, char *arg, int copymode) 1487 { 1488 ssize_t count; 1489 size_t n; 1490 int error; 1491 1492 if (bp->b_datap->db_type == M_IOCACK) 1493 count = ((struct iocblk *)bp->b_rptr)->ioc_count; 1494 else { 1495 ASSERT(bp->b_datap->db_type == M_COPYOUT); 1496 count = ((struct copyreq *)bp->b_rptr)->cq_size; 1497 } 1498 ASSERT(count >= 0); 1499 1500 for (bp = bp->b_cont; bp && count; 1501 count -= n, bp = bp->b_cont, arg += n) { 1502 n = MIN(count, bp->b_wptr - bp->b_rptr); 1503 error = strcopyout(bp->b_rptr, arg, n, copymode); 1504 if (error) 1505 return (error); 1506 } 1507 ASSERT(count == 0); 1508 return (0); 1509 } 1510 1511 /* 1512 * Allocate a linkinfo entry given the write queue of the 1513 * bottom module of the top stream and the write queue of the 1514 * stream head of the bottom stream. 1515 */ 1516 linkinfo_t * 1517 alloclink(queue_t *qup, queue_t *qdown, file_t *fpdown) 1518 { 1519 linkinfo_t *linkp; 1520 1521 linkp = kmem_cache_alloc(linkinfo_cache, KM_SLEEP); 1522 1523 linkp->li_lblk.l_qtop = qup; 1524 linkp->li_lblk.l_qbot = qdown; 1525 linkp->li_fpdown = fpdown; 1526 1527 mutex_enter(&strresources); 1528 linkp->li_next = linkinfo_list; 1529 linkp->li_prev = NULL; 1530 if (linkp->li_next) 1531 linkp->li_next->li_prev = linkp; 1532 linkinfo_list = linkp; 1533 linkp->li_lblk.l_index = ++lnk_id; 1534 ASSERT(lnk_id != 0); /* this should never wrap in practice */ 1535 mutex_exit(&strresources); 1536 1537 return (linkp); 1538 } 1539 1540 /* 1541 * Free a linkinfo entry. 1542 */ 1543 void 1544 lbfree(linkinfo_t *linkp) 1545 { 1546 mutex_enter(&strresources); 1547 if (linkp->li_next) 1548 linkp->li_next->li_prev = linkp->li_prev; 1549 if (linkp->li_prev) 1550 linkp->li_prev->li_next = linkp->li_next; 1551 else 1552 linkinfo_list = linkp->li_next; 1553 mutex_exit(&strresources); 1554 1555 kmem_cache_free(linkinfo_cache, linkp); 1556 } 1557 1558 /* 1559 * Check for a potential linking cycle. 1560 * Return 1 if a link will result in a cycle, 1561 * and 0 otherwise. 1562 */ 1563 int 1564 linkcycle(stdata_t *upstp, stdata_t *lostp, str_stack_t *ss) 1565 { 1566 struct mux_node *np; 1567 struct mux_edge *ep; 1568 int i; 1569 major_t lomaj; 1570 major_t upmaj; 1571 /* 1572 * if the lower stream is a pipe/FIFO, return, since link 1573 * cycles can not happen on pipes/FIFOs 1574 */ 1575 if (lostp->sd_vnode->v_type == VFIFO) 1576 return (0); 1577 1578 for (i = 0; i < ss->ss_devcnt; i++) { 1579 np = &ss->ss_mux_nodes[i]; 1580 MUX_CLEAR(np); 1581 } 1582 lomaj = getmajor(lostp->sd_vnode->v_rdev); 1583 upmaj = getmajor(upstp->sd_vnode->v_rdev); 1584 np = &ss->ss_mux_nodes[lomaj]; 1585 for (;;) { 1586 if (!MUX_DIDVISIT(np)) { 1587 if (np->mn_imaj == upmaj) 1588 return (1); 1589 if (np->mn_outp == NULL) { 1590 MUX_VISIT(np); 1591 if (np->mn_originp == NULL) 1592 return (0); 1593 np = np->mn_originp; 1594 continue; 1595 } 1596 MUX_VISIT(np); 1597 np->mn_startp = np->mn_outp; 1598 } else { 1599 if (np->mn_startp == NULL) { 1600 if (np->mn_originp == NULL) 1601 return (0); 1602 else { 1603 np = np->mn_originp; 1604 continue; 1605 } 1606 } 1607 /* 1608 * If ep->me_nodep is a FIFO (me_nodep == NULL), 1609 * ignore the edge and move on. ep->me_nodep gets 1610 * set to NULL in mux_addedge() if it is a FIFO. 1611 * 1612 */ 1613 ep = np->mn_startp; 1614 np->mn_startp = ep->me_nextp; 1615 if (ep->me_nodep == NULL) 1616 continue; 1617 ep->me_nodep->mn_originp = np; 1618 np = ep->me_nodep; 1619 } 1620 } 1621 } 1622 1623 /* 1624 * Find linkinfo entry corresponding to the parameters. 1625 */ 1626 linkinfo_t * 1627 findlinks(stdata_t *stp, int index, int type, str_stack_t *ss) 1628 { 1629 linkinfo_t *linkp; 1630 struct mux_edge *mep; 1631 struct mux_node *mnp; 1632 queue_t *qup; 1633 1634 mutex_enter(&strresources); 1635 if ((type & LINKTYPEMASK) == LINKNORMAL) { 1636 qup = getendq(stp->sd_wrq); 1637 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) { 1638 if ((qup == linkp->li_lblk.l_qtop) && 1639 (!index || (index == linkp->li_lblk.l_index))) { 1640 mutex_exit(&strresources); 1641 return (linkp); 1642 } 1643 } 1644 } else { 1645 ASSERT((type & LINKTYPEMASK) == LINKPERSIST); 1646 mnp = &ss->ss_mux_nodes[getmajor(stp->sd_vnode->v_rdev)]; 1647 mep = mnp->mn_outp; 1648 while (mep) { 1649 if ((index == 0) || (index == mep->me_muxid)) 1650 break; 1651 mep = mep->me_nextp; 1652 } 1653 if (!mep) { 1654 mutex_exit(&strresources); 1655 return (NULL); 1656 } 1657 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) { 1658 if ((!linkp->li_lblk.l_qtop) && 1659 (mep->me_muxid == linkp->li_lblk.l_index)) { 1660 mutex_exit(&strresources); 1661 return (linkp); 1662 } 1663 } 1664 } 1665 mutex_exit(&strresources); 1666 return (NULL); 1667 } 1668 1669 /* 1670 * Given a queue ptr, follow the chain of q_next pointers until you reach the 1671 * last queue on the chain and return it. 1672 */ 1673 queue_t * 1674 getendq(queue_t *q) 1675 { 1676 ASSERT(q != NULL); 1677 while (_SAMESTR(q)) 1678 q = q->q_next; 1679 return (q); 1680 } 1681 1682 /* 1683 * Wait for the syncq count to drop to zero. 1684 * sq could be either outer or inner. 1685 */ 1686 1687 static void 1688 wait_syncq(syncq_t *sq) 1689 { 1690 uint16_t count; 1691 1692 mutex_enter(SQLOCK(sq)); 1693 count = sq->sq_count; 1694 SQ_PUTLOCKS_ENTER(sq); 1695 SUM_SQ_PUTCOUNTS(sq, count); 1696 while (count != 0) { 1697 sq->sq_flags |= SQ_WANTWAKEUP; 1698 SQ_PUTLOCKS_EXIT(sq); 1699 cv_wait(&sq->sq_wait, SQLOCK(sq)); 1700 count = sq->sq_count; 1701 SQ_PUTLOCKS_ENTER(sq); 1702 SUM_SQ_PUTCOUNTS(sq, count); 1703 } 1704 SQ_PUTLOCKS_EXIT(sq); 1705 mutex_exit(SQLOCK(sq)); 1706 } 1707 1708 /* 1709 * Wait while there are any messages for the queue in its syncq. 1710 */ 1711 static void 1712 wait_q_syncq(queue_t *q) 1713 { 1714 if ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) { 1715 syncq_t *sq = q->q_syncq; 1716 1717 mutex_enter(SQLOCK(sq)); 1718 while ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) { 1719 sq->sq_flags |= SQ_WANTWAKEUP; 1720 cv_wait(&sq->sq_wait, SQLOCK(sq)); 1721 } 1722 mutex_exit(SQLOCK(sq)); 1723 } 1724 } 1725 1726 1727 int 1728 mlink_file(vnode_t *vp, int cmd, struct file *fpdown, cred_t *crp, int *rvalp, 1729 int lhlink) 1730 { 1731 struct stdata *stp; 1732 struct strioctl strioc; 1733 struct linkinfo *linkp; 1734 struct stdata *stpdown; 1735 struct streamtab *str; 1736 queue_t *passq; 1737 syncq_t *passyncq; 1738 queue_t *rq; 1739 cdevsw_impl_t *dp; 1740 uint32_t qflag; 1741 uint32_t sqtype; 1742 perdm_t *dmp; 1743 int error = 0; 1744 netstack_t *ns; 1745 str_stack_t *ss; 1746 1747 stp = vp->v_stream; 1748 TRACE_1(TR_FAC_STREAMS_FR, 1749 TR_I_LINK, "I_LINK/I_PLINK:stp %p", stp); 1750 /* 1751 * Test for invalid upper stream 1752 */ 1753 if (stp->sd_flag & STRHUP) { 1754 return (ENXIO); 1755 } 1756 if (vp->v_type == VFIFO) { 1757 return (EINVAL); 1758 } 1759 if (stp->sd_strtab == NULL) { 1760 return (EINVAL); 1761 } 1762 if (!stp->sd_strtab->st_muxwinit) { 1763 return (EINVAL); 1764 } 1765 if (fpdown == NULL) { 1766 return (EBADF); 1767 } 1768 ns = netstack_find_by_cred(crp); 1769 ASSERT(ns != NULL); 1770 ss = ns->netstack_str; 1771 ASSERT(ss != NULL); 1772 1773 if (getmajor(stp->sd_vnode->v_rdev) >= ss->ss_devcnt) { 1774 netstack_rele(ss->ss_netstack); 1775 return (EINVAL); 1776 } 1777 mutex_enter(&muxifier); 1778 if (stp->sd_flag & STPLEX) { 1779 mutex_exit(&muxifier); 1780 netstack_rele(ss->ss_netstack); 1781 return (ENXIO); 1782 } 1783 1784 /* 1785 * Test for invalid lower stream. 1786 * The check for the v_type != VFIFO and having a major 1787 * number not >= devcnt is done to avoid problems with 1788 * adding mux_node entry past the end of mux_nodes[]. 1789 * For FIFO's we don't add an entry so this isn't a 1790 * problem. 1791 */ 1792 if (((stpdown = fpdown->f_vnode->v_stream) == NULL) || 1793 (stpdown == stp) || (stpdown->sd_flag & 1794 (STPLEX|STRHUP|STRDERR|STWRERR|IOCWAIT|STRPLUMB)) || 1795 ((stpdown->sd_vnode->v_type != VFIFO) && 1796 (getmajor(stpdown->sd_vnode->v_rdev) >= ss->ss_devcnt)) || 1797 linkcycle(stp, stpdown, ss)) { 1798 mutex_exit(&muxifier); 1799 netstack_rele(ss->ss_netstack); 1800 return (EINVAL); 1801 } 1802 TRACE_1(TR_FAC_STREAMS_FR, 1803 TR_STPDOWN, "stpdown:%p", stpdown); 1804 rq = getendq(stp->sd_wrq); 1805 if (cmd == I_PLINK) 1806 rq = NULL; 1807 1808 linkp = alloclink(rq, stpdown->sd_wrq, fpdown); 1809 1810 strioc.ic_cmd = cmd; 1811 strioc.ic_timout = INFTIM; 1812 strioc.ic_len = sizeof (struct linkblk); 1813 strioc.ic_dp = (char *)&linkp->li_lblk; 1814 1815 /* 1816 * STRPLUMB protects plumbing changes and should be set before 1817 * link_addpassthru()/link_rempassthru() are called, so it is set here 1818 * and cleared in the end of mlink when passthru queue is removed. 1819 * Setting of STRPLUMB prevents reopens of the stream while passthru 1820 * queue is in-place (it is not a proper module and doesn't have open 1821 * entry point). 1822 * 1823 * STPLEX prevents any threads from entering the stream from above. It 1824 * can't be set before the call to link_addpassthru() because putnext 1825 * from below may cause stream head I/O routines to be called and these 1826 * routines assert that STPLEX is not set. After link_addpassthru() 1827 * nothing may come from below since the pass queue syncq is blocked. 1828 * Note also that STPLEX should be cleared before the call to 1829 * link_rempassthru() since when messages start flowing to the stream 1830 * head (e.g. because of message propagation from the pass queue) stream 1831 * head I/O routines may be called with STPLEX flag set. 1832 * 1833 * When STPLEX is set, nothing may come into the stream from above and 1834 * it is safe to do a setq which will change stream head. So, the 1835 * correct sequence of actions is: 1836 * 1837 * 1) Set STRPLUMB 1838 * 2) Call link_addpassthru() 1839 * 3) Set STPLEX 1840 * 4) Call setq and update the stream state 1841 * 5) Clear STPLEX 1842 * 6) Call link_rempassthru() 1843 * 7) Clear STRPLUMB 1844 * 1845 * The same sequence applies to munlink() code. 1846 */ 1847 mutex_enter(&stpdown->sd_lock); 1848 stpdown->sd_flag |= STRPLUMB; 1849 mutex_exit(&stpdown->sd_lock); 1850 /* 1851 * Add passthru queue below lower mux. This will block 1852 * syncqs of lower muxs read queue during I_LINK/I_UNLINK. 1853 */ 1854 passq = link_addpassthru(stpdown); 1855 1856 mutex_enter(&stpdown->sd_lock); 1857 stpdown->sd_flag |= STPLEX; 1858 mutex_exit(&stpdown->sd_lock); 1859 1860 rq = _RD(stpdown->sd_wrq); 1861 /* 1862 * There may be messages in the streamhead's syncq due to messages 1863 * that arrived before link_addpassthru() was done. To avoid 1864 * background processing of the syncq happening simultaneous with 1865 * setq processing, we disable the streamhead syncq and wait until 1866 * existing background thread finishes working on it. 1867 */ 1868 wait_sq_svc(rq->q_syncq); 1869 passyncq = passq->q_syncq; 1870 if (!(passyncq->sq_flags & SQ_BLOCKED)) 1871 blocksq(passyncq, SQ_BLOCKED, 0); 1872 1873 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE); 1874 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq)); 1875 rq->q_ptr = _WR(rq)->q_ptr = NULL; 1876 1877 /* setq might sleep in allocator - avoid holding locks. */ 1878 /* Note: we are holding muxifier here. */ 1879 1880 str = stp->sd_strtab; 1881 dp = &devimpl[getmajor(vp->v_rdev)]; 1882 ASSERT(dp->d_str == str); 1883 1884 qflag = dp->d_qflag; 1885 sqtype = dp->d_sqtype; 1886 1887 /* create perdm_t if needed */ 1888 if (NEED_DM(dp->d_dmp, qflag)) 1889 dp->d_dmp = hold_dm(str, qflag, sqtype); 1890 1891 dmp = dp->d_dmp; 1892 1893 setq(rq, str->st_muxrinit, str->st_muxwinit, dmp, qflag, sqtype, 1894 B_TRUE); 1895 1896 /* 1897 * XXX Remove any "odd" messages from the queue. 1898 * Keep only M_DATA, M_PROTO, M_PCPROTO. 1899 */ 1900 error = strdoioctl(stp, &strioc, FNATIVE, 1901 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp); 1902 if (error != 0) { 1903 lbfree(linkp); 1904 1905 if (!(passyncq->sq_flags & SQ_BLOCKED)) 1906 blocksq(passyncq, SQ_BLOCKED, 0); 1907 /* 1908 * Restore the stream head queue and then remove 1909 * the passq. Turn off STPLEX before we turn on 1910 * the stream by removing the passq. 1911 */ 1912 rq->q_ptr = _WR(rq)->q_ptr = stpdown; 1913 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, 1914 B_TRUE); 1915 1916 mutex_enter(&stpdown->sd_lock); 1917 stpdown->sd_flag &= ~STPLEX; 1918 mutex_exit(&stpdown->sd_lock); 1919 1920 link_rempassthru(passq); 1921 1922 mutex_enter(&stpdown->sd_lock); 1923 stpdown->sd_flag &= ~STRPLUMB; 1924 /* Wakeup anyone waiting for STRPLUMB to clear. */ 1925 cv_broadcast(&stpdown->sd_monitor); 1926 mutex_exit(&stpdown->sd_lock); 1927 1928 mutex_exit(&muxifier); 1929 netstack_rele(ss->ss_netstack); 1930 return (error); 1931 } 1932 mutex_enter(&fpdown->f_tlock); 1933 fpdown->f_count++; 1934 mutex_exit(&fpdown->f_tlock); 1935 1936 /* 1937 * if we've made it here the linkage is all set up so we should also 1938 * set up the layered driver linkages 1939 */ 1940 1941 ASSERT((cmd == I_LINK) || (cmd == I_PLINK)); 1942 if (cmd == I_LINK) { 1943 ldi_mlink_fp(stp, fpdown, lhlink, LINKNORMAL); 1944 } else { 1945 ldi_mlink_fp(stp, fpdown, lhlink, LINKPERSIST); 1946 } 1947 1948 link_rempassthru(passq); 1949 1950 mux_addedge(stp, stpdown, linkp->li_lblk.l_index, ss); 1951 1952 /* 1953 * Mark the upper stream as having dependent links 1954 * so that strclose can clean it up. 1955 */ 1956 if (cmd == I_LINK) { 1957 mutex_enter(&stp->sd_lock); 1958 stp->sd_flag |= STRHASLINKS; 1959 mutex_exit(&stp->sd_lock); 1960 } 1961 /* 1962 * Wake up any other processes that may have been 1963 * waiting on the lower stream. These will all 1964 * error out. 1965 */ 1966 mutex_enter(&stpdown->sd_lock); 1967 /* The passthru module is removed so we may release STRPLUMB */ 1968 stpdown->sd_flag &= ~STRPLUMB; 1969 cv_broadcast(&rq->q_wait); 1970 cv_broadcast(&_WR(rq)->q_wait); 1971 cv_broadcast(&stpdown->sd_monitor); 1972 mutex_exit(&stpdown->sd_lock); 1973 mutex_exit(&muxifier); 1974 *rvalp = linkp->li_lblk.l_index; 1975 netstack_rele(ss->ss_netstack); 1976 return (0); 1977 } 1978 1979 int 1980 mlink(vnode_t *vp, int cmd, int arg, cred_t *crp, int *rvalp, int lhlink) 1981 { 1982 int ret; 1983 struct file *fpdown; 1984 1985 fpdown = getf(arg); 1986 ret = mlink_file(vp, cmd, fpdown, crp, rvalp, lhlink); 1987 if (fpdown != NULL) 1988 releasef(arg); 1989 return (ret); 1990 } 1991 1992 /* 1993 * Unlink a multiplexor link. Stp is the controlling stream for the 1994 * link, and linkp points to the link's entry in the linkinfo list. 1995 * The muxifier lock must be held on entry and is dropped on exit. 1996 * 1997 * NOTE : Currently it is assumed that mux would process all the messages 1998 * sitting on it's queue before ACKing the UNLINK. It is the responsibility 1999 * of the mux to handle all the messages that arrive before UNLINK. 2000 * If the mux has to send down messages on its lower stream before 2001 * ACKing I_UNLINK, then it *should* know to handle messages even 2002 * after the UNLINK is acked (actually it should be able to handle till we 2003 * re-block the read side of the pass queue here). If the mux does not 2004 * open up the lower stream, any messages that arrive during UNLINK 2005 * will be put in the stream head. In the case of lower stream opening 2006 * up, some messages might land in the stream head depending on when 2007 * the message arrived and when the read side of the pass queue was 2008 * re-blocked. 2009 */ 2010 int 2011 munlink(stdata_t *stp, linkinfo_t *linkp, int flag, cred_t *crp, int *rvalp, 2012 str_stack_t *ss) 2013 { 2014 struct strioctl strioc; 2015 struct stdata *stpdown; 2016 queue_t *rq, *wrq; 2017 queue_t *passq; 2018 syncq_t *passyncq; 2019 int error = 0; 2020 file_t *fpdown; 2021 2022 ASSERT(MUTEX_HELD(&muxifier)); 2023 2024 stpdown = linkp->li_fpdown->f_vnode->v_stream; 2025 2026 /* 2027 * See the comment in mlink() concerning STRPLUMB/STPLEX flags. 2028 */ 2029 mutex_enter(&stpdown->sd_lock); 2030 stpdown->sd_flag |= STRPLUMB; 2031 mutex_exit(&stpdown->sd_lock); 2032 2033 /* 2034 * Add passthru queue below lower mux. This will block 2035 * syncqs of lower muxs read queue during I_LINK/I_UNLINK. 2036 */ 2037 passq = link_addpassthru(stpdown); 2038 2039 if ((flag & LINKTYPEMASK) == LINKNORMAL) 2040 strioc.ic_cmd = I_UNLINK; 2041 else 2042 strioc.ic_cmd = I_PUNLINK; 2043 strioc.ic_timout = INFTIM; 2044 strioc.ic_len = sizeof (struct linkblk); 2045 strioc.ic_dp = (char *)&linkp->li_lblk; 2046 2047 error = strdoioctl(stp, &strioc, FNATIVE, 2048 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp); 2049 2050 /* 2051 * If there was an error and this is not called via strclose, 2052 * return to the user. Otherwise, pretend there was no error 2053 * and close the link. 2054 */ 2055 if (error) { 2056 if (flag & LINKCLOSE) { 2057 cmn_err(CE_WARN, "KERNEL: munlink: could not perform " 2058 "unlink ioctl, closing anyway (%d)\n", error); 2059 } else { 2060 link_rempassthru(passq); 2061 mutex_enter(&stpdown->sd_lock); 2062 stpdown->sd_flag &= ~STRPLUMB; 2063 cv_broadcast(&stpdown->sd_monitor); 2064 mutex_exit(&stpdown->sd_lock); 2065 mutex_exit(&muxifier); 2066 return (error); 2067 } 2068 } 2069 2070 mux_rmvedge(stp, linkp->li_lblk.l_index, ss); 2071 fpdown = linkp->li_fpdown; 2072 lbfree(linkp); 2073 2074 /* 2075 * We go ahead and drop muxifier here--it's a nasty global lock that 2076 * can slow others down. It's okay to since attempts to mlink() this 2077 * stream will be stopped because STPLEX is still set in the stdata 2078 * structure, and munlink() is stopped because mux_rmvedge() and 2079 * lbfree() have removed it from mux_nodes[] and linkinfo_list, 2080 * respectively. Note that we defer the closef() of fpdown until 2081 * after we drop muxifier since strclose() can call munlinkall(). 2082 */ 2083 mutex_exit(&muxifier); 2084 2085 wrq = stpdown->sd_wrq; 2086 rq = _RD(wrq); 2087 2088 /* 2089 * Get rid of outstanding service procedure runs, before we make 2090 * it a stream head, since a stream head doesn't have any service 2091 * procedure. 2092 */ 2093 disable_svc(rq); 2094 wait_svc(rq); 2095 2096 /* 2097 * Since we don't disable the syncq for QPERMOD, we wait for whatever 2098 * is queued up to be finished. mux should take care that nothing is 2099 * send down to this queue. We should do it now as we're going to block 2100 * passyncq if it was unblocked. 2101 */ 2102 if (wrq->q_flag & QPERMOD) { 2103 syncq_t *sq = wrq->q_syncq; 2104 2105 mutex_enter(SQLOCK(sq)); 2106 while (wrq->q_sqflags & Q_SQQUEUED) { 2107 sq->sq_flags |= SQ_WANTWAKEUP; 2108 cv_wait(&sq->sq_wait, SQLOCK(sq)); 2109 } 2110 mutex_exit(SQLOCK(sq)); 2111 } 2112 passyncq = passq->q_syncq; 2113 if (!(passyncq->sq_flags & SQ_BLOCKED)) { 2114 2115 syncq_t *sq, *outer; 2116 2117 /* 2118 * Messages could be flowing from underneath. We will 2119 * block the read side of the passq. This would be 2120 * sufficient for QPAIR and QPERQ muxes to ensure 2121 * that no data is flowing up into this queue 2122 * and hence no thread active in this instance of 2123 * lower mux. But for QPERMOD and QMTOUTPERIM there 2124 * could be messages on the inner and outer/inner 2125 * syncqs respectively. We will wait for them to drain. 2126 * Because passq is blocked messages end up in the syncq 2127 * And qfill_syncq could possibly end up setting QFULL 2128 * which will access the rq->q_flag. Hence, we have to 2129 * acquire the QLOCK in setq. 2130 * 2131 * XXX Messages can also flow from top into this 2132 * queue though the unlink is over (Ex. some instance 2133 * in putnext() called from top that has still not 2134 * accessed this queue. And also putq(lowerq) ?). 2135 * Solution : How about blocking the l_qtop queue ? 2136 * Do we really care about such pure D_MP muxes ? 2137 */ 2138 2139 blocksq(passyncq, SQ_BLOCKED, 0); 2140 2141 sq = rq->q_syncq; 2142 if ((outer = sq->sq_outer) != NULL) { 2143 2144 /* 2145 * We have to just wait for the outer sq_count 2146 * drop to zero. As this does not prevent new 2147 * messages to enter the outer perimeter, this 2148 * is subject to starvation. 2149 * 2150 * NOTE :Because of blocksq above, messages could 2151 * be in the inner syncq only because of some 2152 * thread holding the outer perimeter exclusively. 2153 * Hence it would be sufficient to wait for the 2154 * exclusive holder of the outer perimeter to drain 2155 * the inner and outer syncqs. But we will not depend 2156 * on this feature and hence check the inner syncqs 2157 * separately. 2158 */ 2159 wait_syncq(outer); 2160 } 2161 2162 2163 /* 2164 * There could be messages destined for 2165 * this queue. Let the exclusive holder 2166 * drain it. 2167 */ 2168 2169 wait_syncq(sq); 2170 ASSERT((rq->q_flag & QPERMOD) || 2171 ((rq->q_syncq->sq_head == NULL) && 2172 (_WR(rq)->q_syncq->sq_head == NULL))); 2173 } 2174 2175 /* 2176 * We haven't taken care of QPERMOD case yet. QPERMOD is a special 2177 * case as we don't disable its syncq or remove it off the syncq 2178 * service list. 2179 */ 2180 if (rq->q_flag & QPERMOD) { 2181 syncq_t *sq = rq->q_syncq; 2182 2183 mutex_enter(SQLOCK(sq)); 2184 while (rq->q_sqflags & Q_SQQUEUED) { 2185 sq->sq_flags |= SQ_WANTWAKEUP; 2186 cv_wait(&sq->sq_wait, SQLOCK(sq)); 2187 } 2188 mutex_exit(SQLOCK(sq)); 2189 } 2190 2191 /* 2192 * flush_syncq changes states only when there are some messages to 2193 * free, i.e. when it returns non-zero value to return. 2194 */ 2195 ASSERT(flush_syncq(rq->q_syncq, rq) == 0); 2196 ASSERT(flush_syncq(wrq->q_syncq, wrq) == 0); 2197 2198 /* 2199 * Nobody else should know about this queue now. 2200 * If the mux did not process the messages before 2201 * acking the I_UNLINK, free them now. 2202 */ 2203 2204 flushq(rq, FLUSHALL); 2205 flushq(_WR(rq), FLUSHALL); 2206 2207 /* 2208 * Convert the mux lower queue into a stream head queue. 2209 * Turn off STPLEX before we turn on the stream by removing the passq. 2210 */ 2211 rq->q_ptr = wrq->q_ptr = stpdown; 2212 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, B_TRUE); 2213 2214 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE); 2215 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq)); 2216 2217 enable_svc(rq); 2218 2219 /* 2220 * Now it is a proper stream, so STPLEX is cleared. But STRPLUMB still 2221 * needs to be set to prevent reopen() of the stream - such reopen may 2222 * try to call non-existent pass queue open routine and panic. 2223 */ 2224 mutex_enter(&stpdown->sd_lock); 2225 stpdown->sd_flag &= ~STPLEX; 2226 mutex_exit(&stpdown->sd_lock); 2227 2228 ASSERT(((flag & LINKTYPEMASK) == LINKNORMAL) || 2229 ((flag & LINKTYPEMASK) == LINKPERSIST)); 2230 2231 /* clean up the layered driver linkages */ 2232 if ((flag & LINKTYPEMASK) == LINKNORMAL) { 2233 ldi_munlink_fp(stp, fpdown, LINKNORMAL); 2234 } else { 2235 ldi_munlink_fp(stp, fpdown, LINKPERSIST); 2236 } 2237 2238 link_rempassthru(passq); 2239 2240 /* 2241 * Now all plumbing changes are finished and STRPLUMB is no 2242 * longer needed. 2243 */ 2244 mutex_enter(&stpdown->sd_lock); 2245 stpdown->sd_flag &= ~STRPLUMB; 2246 cv_broadcast(&stpdown->sd_monitor); 2247 mutex_exit(&stpdown->sd_lock); 2248 2249 (void) closef(fpdown); 2250 return (0); 2251 } 2252 2253 /* 2254 * Unlink all multiplexor links for which stp is the controlling stream. 2255 * Return 0, or a non-zero errno on failure. 2256 */ 2257 int 2258 munlinkall(stdata_t *stp, int flag, cred_t *crp, int *rvalp, str_stack_t *ss) 2259 { 2260 linkinfo_t *linkp; 2261 int error = 0; 2262 2263 mutex_enter(&muxifier); 2264 while (linkp = findlinks(stp, 0, flag, ss)) { 2265 /* 2266 * munlink() releases the muxifier lock. 2267 */ 2268 if (error = munlink(stp, linkp, flag, crp, rvalp, ss)) 2269 return (error); 2270 mutex_enter(&muxifier); 2271 } 2272 mutex_exit(&muxifier); 2273 return (0); 2274 } 2275 2276 /* 2277 * A multiplexor link has been made. Add an 2278 * edge to the directed graph. 2279 */ 2280 void 2281 mux_addedge(stdata_t *upstp, stdata_t *lostp, int muxid, str_stack_t *ss) 2282 { 2283 struct mux_node *np; 2284 struct mux_edge *ep; 2285 major_t upmaj; 2286 major_t lomaj; 2287 2288 upmaj = getmajor(upstp->sd_vnode->v_rdev); 2289 lomaj = getmajor(lostp->sd_vnode->v_rdev); 2290 np = &ss->ss_mux_nodes[upmaj]; 2291 if (np->mn_outp) { 2292 ep = np->mn_outp; 2293 while (ep->me_nextp) 2294 ep = ep->me_nextp; 2295 ep->me_nextp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP); 2296 ep = ep->me_nextp; 2297 } else { 2298 np->mn_outp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP); 2299 ep = np->mn_outp; 2300 } 2301 ep->me_nextp = NULL; 2302 ep->me_muxid = muxid; 2303 /* 2304 * Save the dev_t for the purposes of str_stack_shutdown. 2305 * str_stack_shutdown assumes that the device allows reopen, since 2306 * this dev_t is the one after any cloning by xx_open(). 2307 * Would prefer finding the dev_t from before any cloning, 2308 * but specfs doesn't retain that. 2309 */ 2310 ep->me_dev = upstp->sd_vnode->v_rdev; 2311 if (lostp->sd_vnode->v_type == VFIFO) 2312 ep->me_nodep = NULL; 2313 else 2314 ep->me_nodep = &ss->ss_mux_nodes[lomaj]; 2315 } 2316 2317 /* 2318 * A multiplexor link has been removed. Remove the 2319 * edge in the directed graph. 2320 */ 2321 void 2322 mux_rmvedge(stdata_t *upstp, int muxid, str_stack_t *ss) 2323 { 2324 struct mux_node *np; 2325 struct mux_edge *ep; 2326 struct mux_edge *pep = NULL; 2327 major_t upmaj; 2328 2329 upmaj = getmajor(upstp->sd_vnode->v_rdev); 2330 np = &ss->ss_mux_nodes[upmaj]; 2331 ASSERT(np->mn_outp != NULL); 2332 ep = np->mn_outp; 2333 while (ep) { 2334 if (ep->me_muxid == muxid) { 2335 if (pep) 2336 pep->me_nextp = ep->me_nextp; 2337 else 2338 np->mn_outp = ep->me_nextp; 2339 kmem_free(ep, sizeof (struct mux_edge)); 2340 return; 2341 } 2342 pep = ep; 2343 ep = ep->me_nextp; 2344 } 2345 ASSERT(0); /* should not reach here */ 2346 } 2347 2348 /* 2349 * Translate the device flags (from conf.h) to the corresponding 2350 * qflag and sq_flag (type) values. 2351 */ 2352 int 2353 devflg_to_qflag(struct streamtab *stp, uint32_t devflag, uint32_t *qflagp, 2354 uint32_t *sqtypep) 2355 { 2356 uint32_t qflag = 0; 2357 uint32_t sqtype = 0; 2358 2359 if (devflag & _D_OLD) 2360 goto bad; 2361 2362 /* Inner perimeter presence and scope */ 2363 switch (devflag & D_MTINNER_MASK) { 2364 case D_MP: 2365 qflag |= QMTSAFE; 2366 sqtype |= SQ_CI; 2367 break; 2368 case D_MTPERQ|D_MP: 2369 qflag |= QPERQ; 2370 break; 2371 case D_MTQPAIR|D_MP: 2372 qflag |= QPAIR; 2373 break; 2374 case D_MTPERMOD|D_MP: 2375 qflag |= QPERMOD; 2376 break; 2377 default: 2378 goto bad; 2379 } 2380 2381 /* Outer perimeter */ 2382 if (devflag & D_MTOUTPERIM) { 2383 switch (devflag & D_MTINNER_MASK) { 2384 case D_MP: 2385 case D_MTPERQ|D_MP: 2386 case D_MTQPAIR|D_MP: 2387 break; 2388 default: 2389 goto bad; 2390 } 2391 qflag |= QMTOUTPERIM; 2392 } 2393 2394 /* Inner perimeter modifiers */ 2395 if (devflag & D_MTINNER_MOD) { 2396 switch (devflag & D_MTINNER_MASK) { 2397 case D_MP: 2398 goto bad; 2399 default: 2400 break; 2401 } 2402 if (devflag & D_MTPUTSHARED) 2403 sqtype |= SQ_CIPUT; 2404 if (devflag & _D_MTOCSHARED) { 2405 /* 2406 * The code in putnext assumes that it has the 2407 * highest concurrency by not checking sq_count. 2408 * Thus _D_MTOCSHARED can only be supported when 2409 * D_MTPUTSHARED is set. 2410 */ 2411 if (!(devflag & D_MTPUTSHARED)) 2412 goto bad; 2413 sqtype |= SQ_CIOC; 2414 } 2415 if (devflag & _D_MTCBSHARED) { 2416 /* 2417 * The code in putnext assumes that it has the 2418 * highest concurrency by not checking sq_count. 2419 * Thus _D_MTCBSHARED can only be supported when 2420 * D_MTPUTSHARED is set. 2421 */ 2422 if (!(devflag & D_MTPUTSHARED)) 2423 goto bad; 2424 sqtype |= SQ_CICB; 2425 } 2426 if (devflag & _D_MTSVCSHARED) { 2427 /* 2428 * The code in putnext assumes that it has the 2429 * highest concurrency by not checking sq_count. 2430 * Thus _D_MTSVCSHARED can only be supported when 2431 * D_MTPUTSHARED is set. Also _D_MTSVCSHARED is 2432 * supported only for QPERMOD. 2433 */ 2434 if (!(devflag & D_MTPUTSHARED) || !(qflag & QPERMOD)) 2435 goto bad; 2436 sqtype |= SQ_CISVC; 2437 } 2438 } 2439 2440 /* Default outer perimeter concurrency */ 2441 sqtype |= SQ_CO; 2442 2443 /* Outer perimeter modifiers */ 2444 if (devflag & D_MTOCEXCL) { 2445 if (!(devflag & D_MTOUTPERIM)) { 2446 /* No outer perimeter */ 2447 goto bad; 2448 } 2449 sqtype &= ~SQ_COOC; 2450 } 2451 2452 /* Synchronous Streams extended qinit structure */ 2453 if (devflag & D_SYNCSTR) 2454 qflag |= QSYNCSTR; 2455 2456 /* 2457 * Private flag used by a transport module to indicate 2458 * to sockfs that it supports direct-access mode without 2459 * having to go through STREAMS. 2460 */ 2461 if (devflag & _D_DIRECT) { 2462 /* Reject unless the module is fully-MT (no perimeter) */ 2463 if ((qflag & QMT_TYPEMASK) != QMTSAFE) 2464 goto bad; 2465 qflag |= _QDIRECT; 2466 } 2467 2468 *qflagp = qflag; 2469 *sqtypep = sqtype; 2470 return (0); 2471 2472 bad: 2473 cmn_err(CE_WARN, 2474 "stropen: bad MT flags (0x%x) in driver '%s'", 2475 (int)(qflag & D_MTSAFETY_MASK), 2476 stp->st_rdinit->qi_minfo->mi_idname); 2477 2478 return (EINVAL); 2479 } 2480 2481 /* 2482 * Set the interface values for a pair of queues (qinit structure, 2483 * packet sizes, water marks). 2484 * setq assumes that the caller does not have a claim (entersq or claimq) 2485 * on the queue. 2486 */ 2487 void 2488 setq(queue_t *rq, struct qinit *rinit, struct qinit *winit, 2489 perdm_t *dmp, uint32_t qflag, uint32_t sqtype, boolean_t lock_needed) 2490 { 2491 queue_t *wq; 2492 syncq_t *sq, *outer; 2493 2494 ASSERT(rq->q_flag & QREADR); 2495 ASSERT((qflag & QMT_TYPEMASK) != 0); 2496 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL); 2497 2498 wq = _WR(rq); 2499 rq->q_qinfo = rinit; 2500 rq->q_hiwat = rinit->qi_minfo->mi_hiwat; 2501 rq->q_lowat = rinit->qi_minfo->mi_lowat; 2502 rq->q_minpsz = rinit->qi_minfo->mi_minpsz; 2503 rq->q_maxpsz = rinit->qi_minfo->mi_maxpsz; 2504 wq->q_qinfo = winit; 2505 wq->q_hiwat = winit->qi_minfo->mi_hiwat; 2506 wq->q_lowat = winit->qi_minfo->mi_lowat; 2507 wq->q_minpsz = winit->qi_minfo->mi_minpsz; 2508 wq->q_maxpsz = winit->qi_minfo->mi_maxpsz; 2509 2510 /* Remove old syncqs */ 2511 sq = rq->q_syncq; 2512 outer = sq->sq_outer; 2513 if (outer != NULL) { 2514 ASSERT(wq->q_syncq->sq_outer == outer); 2515 outer_remove(outer, rq->q_syncq); 2516 if (wq->q_syncq != rq->q_syncq) 2517 outer_remove(outer, wq->q_syncq); 2518 } 2519 ASSERT(sq->sq_outer == NULL); 2520 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 2521 2522 if (sq != SQ(rq)) { 2523 if (!(rq->q_flag & QPERMOD)) 2524 free_syncq(sq); 2525 if (wq->q_syncq == rq->q_syncq) 2526 wq->q_syncq = NULL; 2527 rq->q_syncq = NULL; 2528 } 2529 if (wq->q_syncq != NULL && wq->q_syncq != sq && 2530 wq->q_syncq != SQ(rq)) { 2531 free_syncq(wq->q_syncq); 2532 wq->q_syncq = NULL; 2533 } 2534 ASSERT(rq->q_syncq == NULL || (rq->q_syncq->sq_head == NULL && 2535 rq->q_syncq->sq_tail == NULL)); 2536 ASSERT(wq->q_syncq == NULL || (wq->q_syncq->sq_head == NULL && 2537 wq->q_syncq->sq_tail == NULL)); 2538 2539 if (!(rq->q_flag & QPERMOD) && 2540 rq->q_syncq != NULL && rq->q_syncq->sq_ciputctrl != NULL) { 2541 ASSERT(rq->q_syncq->sq_nciputctrl == n_ciputctrl - 1); 2542 SUMCHECK_CIPUTCTRL_COUNTS(rq->q_syncq->sq_ciputctrl, 2543 rq->q_syncq->sq_nciputctrl, 0); 2544 ASSERT(ciputctrl_cache != NULL); 2545 kmem_cache_free(ciputctrl_cache, rq->q_syncq->sq_ciputctrl); 2546 rq->q_syncq->sq_ciputctrl = NULL; 2547 rq->q_syncq->sq_nciputctrl = 0; 2548 } 2549 2550 if (!(wq->q_flag & QPERMOD) && 2551 wq->q_syncq != NULL && wq->q_syncq->sq_ciputctrl != NULL) { 2552 ASSERT(wq->q_syncq->sq_nciputctrl == n_ciputctrl - 1); 2553 SUMCHECK_CIPUTCTRL_COUNTS(wq->q_syncq->sq_ciputctrl, 2554 wq->q_syncq->sq_nciputctrl, 0); 2555 ASSERT(ciputctrl_cache != NULL); 2556 kmem_cache_free(ciputctrl_cache, wq->q_syncq->sq_ciputctrl); 2557 wq->q_syncq->sq_ciputctrl = NULL; 2558 wq->q_syncq->sq_nciputctrl = 0; 2559 } 2560 2561 sq = SQ(rq); 2562 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL); 2563 ASSERT(sq->sq_outer == NULL); 2564 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 2565 2566 /* 2567 * Create syncqs based on qflag and sqtype. Set the SQ_TYPES_IN_FLAGS 2568 * bits in sq_flag based on the sqtype. 2569 */ 2570 ASSERT((sq->sq_flags & ~SQ_TYPES_IN_FLAGS) == 0); 2571 2572 rq->q_syncq = wq->q_syncq = sq; 2573 sq->sq_type = sqtype; 2574 sq->sq_flags = (sqtype & SQ_TYPES_IN_FLAGS); 2575 2576 /* 2577 * We are making sq_svcflags zero, 2578 * resetting SQ_DISABLED in case it was set by 2579 * wait_svc() in the munlink path. 2580 * 2581 */ 2582 ASSERT((sq->sq_svcflags & SQ_SERVICE) == 0); 2583 sq->sq_svcflags = 0; 2584 2585 /* 2586 * We need to acquire the lock here for the mlink and munlink case, 2587 * where canputnext, backenable, etc can access the q_flag. 2588 */ 2589 if (lock_needed) { 2590 mutex_enter(QLOCK(rq)); 2591 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2592 mutex_exit(QLOCK(rq)); 2593 mutex_enter(QLOCK(wq)); 2594 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2595 mutex_exit(QLOCK(wq)); 2596 } else { 2597 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2598 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2599 } 2600 2601 if (qflag & QPERQ) { 2602 /* Allocate a separate syncq for the write side */ 2603 sq = new_syncq(); 2604 sq->sq_type = rq->q_syncq->sq_type; 2605 sq->sq_flags = rq->q_syncq->sq_flags; 2606 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 2607 sq->sq_oprev == NULL); 2608 wq->q_syncq = sq; 2609 } 2610 if (qflag & QPERMOD) { 2611 sq = dmp->dm_sq; 2612 2613 /* 2614 * Assert that we do have an inner perimeter syncq and that it 2615 * does not have an outer perimeter associated with it. 2616 */ 2617 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 2618 sq->sq_oprev == NULL); 2619 rq->q_syncq = wq->q_syncq = sq; 2620 } 2621 if (qflag & QMTOUTPERIM) { 2622 outer = dmp->dm_sq; 2623 2624 ASSERT(outer->sq_outer == NULL); 2625 outer_insert(outer, rq->q_syncq); 2626 if (wq->q_syncq != rq->q_syncq) 2627 outer_insert(outer, wq->q_syncq); 2628 } 2629 ASSERT((rq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) == 2630 (rq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS)); 2631 ASSERT((wq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) == 2632 (wq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS)); 2633 ASSERT((rq->q_flag & QMT_TYPEMASK) == (qflag & QMT_TYPEMASK)); 2634 2635 /* 2636 * Initialize struio() types. 2637 */ 2638 rq->q_struiot = 2639 (rq->q_flag & QSYNCSTR) ? rinit->qi_struiot : STRUIOT_NONE; 2640 wq->q_struiot = 2641 (wq->q_flag & QSYNCSTR) ? winit->qi_struiot : STRUIOT_NONE; 2642 } 2643 2644 perdm_t * 2645 hold_dm(struct streamtab *str, uint32_t qflag, uint32_t sqtype) 2646 { 2647 syncq_t *sq; 2648 perdm_t **pp; 2649 perdm_t *p; 2650 perdm_t *dmp; 2651 2652 ASSERT(str != NULL); 2653 ASSERT(qflag & (QPERMOD | QMTOUTPERIM)); 2654 2655 rw_enter(&perdm_rwlock, RW_READER); 2656 for (p = perdm_list; p != NULL; p = p->dm_next) { 2657 if (p->dm_str == str) { /* found one */ 2658 atomic_add_32(&(p->dm_ref), 1); 2659 rw_exit(&perdm_rwlock); 2660 return (p); 2661 } 2662 } 2663 rw_exit(&perdm_rwlock); 2664 2665 sq = new_syncq(); 2666 if (qflag & QPERMOD) { 2667 sq->sq_type = sqtype | SQ_PERMOD; 2668 sq->sq_flags = sqtype & SQ_TYPES_IN_FLAGS; 2669 } else { 2670 ASSERT(qflag & QMTOUTPERIM); 2671 sq->sq_onext = sq->sq_oprev = sq; 2672 } 2673 2674 dmp = kmem_alloc(sizeof (perdm_t), KM_SLEEP); 2675 dmp->dm_sq = sq; 2676 dmp->dm_str = str; 2677 dmp->dm_ref = 1; 2678 dmp->dm_next = NULL; 2679 2680 rw_enter(&perdm_rwlock, RW_WRITER); 2681 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) { 2682 if (p->dm_str == str) { /* already present */ 2683 p->dm_ref++; 2684 rw_exit(&perdm_rwlock); 2685 free_syncq(sq); 2686 kmem_free(dmp, sizeof (perdm_t)); 2687 return (p); 2688 } 2689 } 2690 2691 *pp = dmp; 2692 rw_exit(&perdm_rwlock); 2693 return (dmp); 2694 } 2695 2696 void 2697 rele_dm(perdm_t *dmp) 2698 { 2699 perdm_t **pp; 2700 perdm_t *p; 2701 2702 rw_enter(&perdm_rwlock, RW_WRITER); 2703 ASSERT(dmp->dm_ref > 0); 2704 2705 if (--dmp->dm_ref > 0) { 2706 rw_exit(&perdm_rwlock); 2707 return; 2708 } 2709 2710 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) 2711 if (p == dmp) 2712 break; 2713 ASSERT(p == dmp); 2714 *pp = p->dm_next; 2715 rw_exit(&perdm_rwlock); 2716 2717 /* 2718 * Wait for any background processing that relies on the 2719 * syncq to complete before it is freed. 2720 */ 2721 wait_sq_svc(p->dm_sq); 2722 free_syncq(p->dm_sq); 2723 kmem_free(p, sizeof (perdm_t)); 2724 } 2725 2726 /* 2727 * Make a protocol message given control and data buffers. 2728 * n.b., this can block; be careful of what locks you hold when calling it. 2729 * 2730 * If sd_maxblk is less than *iosize this routine can fail part way through 2731 * (due to an allocation failure). In this case on return *iosize will contain 2732 * the amount that was consumed. Otherwise *iosize will not be modified 2733 * i.e. it will contain the amount that was consumed. 2734 */ 2735 int 2736 strmakemsg( 2737 struct strbuf *mctl, 2738 ssize_t *iosize, 2739 struct uio *uiop, 2740 stdata_t *stp, 2741 int32_t flag, 2742 mblk_t **mpp) 2743 { 2744 mblk_t *mpctl = NULL; 2745 mblk_t *mpdata = NULL; 2746 int error; 2747 2748 ASSERT(uiop != NULL); 2749 2750 *mpp = NULL; 2751 /* Create control part, if any */ 2752 if ((mctl != NULL) && (mctl->len >= 0)) { 2753 error = strmakectl(mctl, flag, uiop->uio_fmode, &mpctl); 2754 if (error) 2755 return (error); 2756 } 2757 /* Create data part, if any */ 2758 if (*iosize >= 0) { 2759 error = strmakedata(iosize, uiop, stp, flag, &mpdata); 2760 if (error) { 2761 freemsg(mpctl); 2762 return (error); 2763 } 2764 } 2765 if (mpctl != NULL) { 2766 if (mpdata != NULL) 2767 linkb(mpctl, mpdata); 2768 *mpp = mpctl; 2769 } else { 2770 *mpp = mpdata; 2771 } 2772 return (0); 2773 } 2774 2775 /* 2776 * Make the control part of a protocol message given a control buffer. 2777 * n.b., this can block; be careful of what locks you hold when calling it. 2778 */ 2779 int 2780 strmakectl( 2781 struct strbuf *mctl, 2782 int32_t flag, 2783 int32_t fflag, 2784 mblk_t **mpp) 2785 { 2786 mblk_t *bp = NULL; 2787 unsigned char msgtype; 2788 int error = 0; 2789 cred_t *cr = CRED(); 2790 2791 /* We do not support interrupt threads using the stream head to send */ 2792 ASSERT(cr != NULL); 2793 2794 *mpp = NULL; 2795 /* 2796 * Create control part of message, if any. 2797 */ 2798 if ((mctl != NULL) && (mctl->len >= 0)) { 2799 caddr_t base; 2800 int ctlcount; 2801 int allocsz; 2802 2803 if (flag & RS_HIPRI) 2804 msgtype = M_PCPROTO; 2805 else 2806 msgtype = M_PROTO; 2807 2808 ctlcount = mctl->len; 2809 base = mctl->buf; 2810 2811 /* 2812 * Give modules a better chance to reuse M_PROTO/M_PCPROTO 2813 * blocks by increasing the size to something more usable. 2814 */ 2815 allocsz = MAX(ctlcount, 64); 2816 2817 /* 2818 * Range checking has already been done; simply try 2819 * to allocate a message block for the ctl part. 2820 */ 2821 while ((bp = allocb_cred(allocsz, cr, 2822 curproc->p_pid)) == NULL) { 2823 if (fflag & (FNDELAY|FNONBLOCK)) 2824 return (EAGAIN); 2825 if (error = strwaitbuf(allocsz, BPRI_MED)) 2826 return (error); 2827 } 2828 2829 bp->b_datap->db_type = msgtype; 2830 if (copyin(base, bp->b_wptr, ctlcount)) { 2831 freeb(bp); 2832 return (EFAULT); 2833 } 2834 bp->b_wptr += ctlcount; 2835 } 2836 *mpp = bp; 2837 return (0); 2838 } 2839 2840 /* 2841 * Make a protocol message given data buffers. 2842 * n.b., this can block; be careful of what locks you hold when calling it. 2843 * 2844 * If sd_maxblk is less than *iosize this routine can fail part way through 2845 * (due to an allocation failure). In this case on return *iosize will contain 2846 * the amount that was consumed. Otherwise *iosize will not be modified 2847 * i.e. it will contain the amount that was consumed. 2848 */ 2849 int 2850 strmakedata( 2851 ssize_t *iosize, 2852 struct uio *uiop, 2853 stdata_t *stp, 2854 int32_t flag, 2855 mblk_t **mpp) 2856 { 2857 mblk_t *mp = NULL; 2858 mblk_t *bp; 2859 int wroff = (int)stp->sd_wroff; 2860 int tail_len = (int)stp->sd_tail; 2861 int extra = wroff + tail_len; 2862 int error = 0; 2863 ssize_t maxblk; 2864 ssize_t count = *iosize; 2865 cred_t *cr; 2866 2867 *mpp = NULL; 2868 if (count < 0) 2869 return (0); 2870 2871 /* We do not support interrupt threads using the stream head to send */ 2872 cr = CRED(); 2873 ASSERT(cr != NULL); 2874 2875 maxblk = stp->sd_maxblk; 2876 if (maxblk == INFPSZ) 2877 maxblk = count; 2878 2879 /* 2880 * Create data part of message, if any. 2881 */ 2882 do { 2883 ssize_t size; 2884 dblk_t *dp; 2885 2886 ASSERT(uiop); 2887 2888 size = MIN(count, maxblk); 2889 2890 while ((bp = allocb_cred(size + extra, cr, 2891 curproc->p_pid)) == NULL) { 2892 error = EAGAIN; 2893 if ((uiop->uio_fmode & (FNDELAY|FNONBLOCK)) || 2894 (error = strwaitbuf(size + extra, BPRI_MED)) != 0) { 2895 if (count == *iosize) { 2896 freemsg(mp); 2897 return (error); 2898 } else { 2899 *iosize -= count; 2900 *mpp = mp; 2901 return (0); 2902 } 2903 } 2904 } 2905 dp = bp->b_datap; 2906 dp->db_cpid = curproc->p_pid; 2907 ASSERT(wroff <= dp->db_lim - bp->b_wptr); 2908 bp->b_wptr = bp->b_rptr = bp->b_rptr + wroff; 2909 2910 if (flag & STRUIO_POSTPONE) { 2911 /* 2912 * Setup the stream uio portion of the 2913 * dblk for subsequent use by struioget(). 2914 */ 2915 dp->db_struioflag = STRUIO_SPEC; 2916 dp->db_cksumstart = 0; 2917 dp->db_cksumstuff = 0; 2918 dp->db_cksumend = size; 2919 *(long long *)dp->db_struioun.data = 0ll; 2920 bp->b_wptr += size; 2921 } else { 2922 if (stp->sd_copyflag & STRCOPYCACHED) 2923 uiop->uio_extflg |= UIO_COPY_CACHED; 2924 2925 if (size != 0) { 2926 error = uiomove(bp->b_wptr, size, UIO_WRITE, 2927 uiop); 2928 if (error != 0) { 2929 freeb(bp); 2930 freemsg(mp); 2931 return (error); 2932 } 2933 } 2934 bp->b_wptr += size; 2935 2936 if (stp->sd_wputdatafunc != NULL) { 2937 mblk_t *newbp; 2938 2939 newbp = (stp->sd_wputdatafunc)(stp->sd_vnode, 2940 bp, NULL, NULL, NULL, NULL); 2941 if (newbp == NULL) { 2942 freeb(bp); 2943 freemsg(mp); 2944 return (ECOMM); 2945 } 2946 bp = newbp; 2947 } 2948 } 2949 2950 count -= size; 2951 2952 if (mp == NULL) 2953 mp = bp; 2954 else 2955 linkb(mp, bp); 2956 } while (count > 0); 2957 2958 *mpp = mp; 2959 return (0); 2960 } 2961 2962 /* 2963 * Wait for a buffer to become available. Return non-zero errno 2964 * if not able to wait, 0 if buffer is probably there. 2965 */ 2966 int 2967 strwaitbuf(size_t size, int pri) 2968 { 2969 bufcall_id_t id; 2970 2971 mutex_enter(&bcall_monitor); 2972 if ((id = bufcall(size, pri, (void (*)(void *))cv_broadcast, 2973 &ttoproc(curthread)->p_flag_cv)) == 0) { 2974 mutex_exit(&bcall_monitor); 2975 return (ENOSR); 2976 } 2977 if (!cv_wait_sig(&(ttoproc(curthread)->p_flag_cv), &bcall_monitor)) { 2978 unbufcall(id); 2979 mutex_exit(&bcall_monitor); 2980 return (EINTR); 2981 } 2982 unbufcall(id); 2983 mutex_exit(&bcall_monitor); 2984 return (0); 2985 } 2986 2987 /* 2988 * This function waits for a read or write event to happen on a stream. 2989 * fmode can specify FNDELAY and/or FNONBLOCK. 2990 * The timeout is in ms with -1 meaning infinite. 2991 * The flag values work as follows: 2992 * READWAIT Check for read side errors, send M_READ 2993 * GETWAIT Check for read side errors, no M_READ 2994 * WRITEWAIT Check for write side errors. 2995 * NOINTR Do not return error if nonblocking or timeout. 2996 * STR_NOERROR Ignore all errors except STPLEX. 2997 * STR_NOSIG Ignore/hold signals during the duration of the call. 2998 * STR_PEEK Pass through the strgeterr(). 2999 */ 3000 int 3001 strwaitq(stdata_t *stp, int flag, ssize_t count, int fmode, clock_t timout, 3002 int *done) 3003 { 3004 int slpflg, errs; 3005 int error; 3006 kcondvar_t *sleepon; 3007 mblk_t *mp; 3008 ssize_t *rd_count; 3009 clock_t rval; 3010 3011 ASSERT(MUTEX_HELD(&stp->sd_lock)); 3012 if ((flag & READWAIT) || (flag & GETWAIT)) { 3013 slpflg = RSLEEP; 3014 sleepon = &_RD(stp->sd_wrq)->q_wait; 3015 errs = STRDERR|STPLEX; 3016 } else { 3017 slpflg = WSLEEP; 3018 sleepon = &stp->sd_wrq->q_wait; 3019 errs = STWRERR|STRHUP|STPLEX; 3020 } 3021 if (flag & STR_NOERROR) 3022 errs = STPLEX; 3023 3024 if (stp->sd_wakeq & slpflg) { 3025 /* 3026 * A strwakeq() is pending, no need to sleep. 3027 */ 3028 stp->sd_wakeq &= ~slpflg; 3029 *done = 0; 3030 return (0); 3031 } 3032 3033 if (stp->sd_flag & errs) { 3034 /* 3035 * Check for errors before going to sleep since the 3036 * caller might not have checked this while holding 3037 * sd_lock. 3038 */ 3039 error = strgeterr(stp, errs, (flag & STR_PEEK)); 3040 if (error != 0) { 3041 *done = 1; 3042 return (error); 3043 } 3044 } 3045 3046 /* 3047 * If any module downstream has requested read notification 3048 * by setting SNDMREAD flag using M_SETOPTS, send a message 3049 * down stream. 3050 */ 3051 if ((flag & READWAIT) && (stp->sd_flag & SNDMREAD)) { 3052 mutex_exit(&stp->sd_lock); 3053 if (!(mp = allocb_wait(sizeof (ssize_t), BPRI_MED, 3054 (flag & STR_NOSIG), &error))) { 3055 mutex_enter(&stp->sd_lock); 3056 *done = 1; 3057 return (error); 3058 } 3059 mp->b_datap->db_type = M_READ; 3060 rd_count = (ssize_t *)mp->b_wptr; 3061 *rd_count = count; 3062 mp->b_wptr += sizeof (ssize_t); 3063 /* 3064 * Send the number of bytes requested by the 3065 * read as the argument to M_READ. 3066 */ 3067 stream_willservice(stp); 3068 putnext(stp->sd_wrq, mp); 3069 stream_runservice(stp); 3070 mutex_enter(&stp->sd_lock); 3071 3072 /* 3073 * If any data arrived due to inline processing 3074 * of putnext(), don't sleep. 3075 */ 3076 if (_RD(stp->sd_wrq)->q_first != NULL) { 3077 *done = 0; 3078 return (0); 3079 } 3080 } 3081 3082 if (fmode & (FNDELAY|FNONBLOCK)) { 3083 if (!(flag & NOINTR)) 3084 error = EAGAIN; 3085 else 3086 error = 0; 3087 *done = 1; 3088 return (error); 3089 } 3090 3091 stp->sd_flag |= slpflg; 3092 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAIT2, 3093 "strwaitq sleeps (2):%p, %X, %lX, %X, %p", 3094 stp, flag, count, fmode, done); 3095 3096 rval = str_cv_wait(sleepon, &stp->sd_lock, timout, flag & STR_NOSIG); 3097 if (rval > 0) { 3098 /* EMPTY */ 3099 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAKE2, 3100 "strwaitq awakes(2):%X, %X, %X, %X, %X", 3101 stp, flag, count, fmode, done); 3102 } else if (rval == 0) { 3103 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_INTR2, 3104 "strwaitq interrupt #2:%p, %X, %lX, %X, %p", 3105 stp, flag, count, fmode, done); 3106 stp->sd_flag &= ~slpflg; 3107 cv_broadcast(sleepon); 3108 if (!(flag & NOINTR)) 3109 error = EINTR; 3110 else 3111 error = 0; 3112 *done = 1; 3113 return (error); 3114 } else { 3115 /* timeout */ 3116 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_TIME, 3117 "strwaitq timeout:%p, %X, %lX, %X, %p", 3118 stp, flag, count, fmode, done); 3119 *done = 1; 3120 if (!(flag & NOINTR)) 3121 return (ETIME); 3122 else 3123 return (0); 3124 } 3125 /* 3126 * If the caller implements delayed errors (i.e. queued after data) 3127 * we can not check for errors here since data as well as an 3128 * error might have arrived at the stream head. We return to 3129 * have the caller check the read queue before checking for errors. 3130 */ 3131 if ((stp->sd_flag & errs) && !(flag & STR_DELAYERR)) { 3132 error = strgeterr(stp, errs, (flag & STR_PEEK)); 3133 if (error != 0) { 3134 *done = 1; 3135 return (error); 3136 } 3137 } 3138 *done = 0; 3139 return (0); 3140 } 3141 3142 /* 3143 * Perform job control discipline access checks. 3144 * Return 0 for success and the errno for failure. 3145 */ 3146 3147 #define cantsend(p, t, sig) \ 3148 (sigismember(&(p)->p_ignore, sig) || signal_is_blocked((t), sig)) 3149 3150 int 3151 straccess(struct stdata *stp, enum jcaccess mode) 3152 { 3153 extern kcondvar_t lbolt_cv; /* XXX: should be in a header file */ 3154 kthread_t *t = curthread; 3155 proc_t *p = ttoproc(t); 3156 sess_t *sp; 3157 3158 ASSERT(mutex_owned(&stp->sd_lock)); 3159 3160 if (stp->sd_sidp == NULL || stp->sd_vnode->v_type == VFIFO) 3161 return (0); 3162 3163 mutex_enter(&p->p_lock); /* protects p_pgidp */ 3164 3165 for (;;) { 3166 mutex_enter(&p->p_splock); /* protects p->p_sessp */ 3167 sp = p->p_sessp; 3168 mutex_enter(&sp->s_lock); /* protects sp->* */ 3169 3170 /* 3171 * If this is not the calling process's controlling terminal 3172 * or if the calling process is already in the foreground 3173 * then allow access. 3174 */ 3175 if (sp->s_dev != stp->sd_vnode->v_rdev || 3176 p->p_pgidp == stp->sd_pgidp) { 3177 mutex_exit(&sp->s_lock); 3178 mutex_exit(&p->p_splock); 3179 mutex_exit(&p->p_lock); 3180 return (0); 3181 } 3182 3183 /* 3184 * Check to see if controlling terminal has been deallocated. 3185 */ 3186 if (sp->s_vp == NULL) { 3187 if (!cantsend(p, t, SIGHUP)) 3188 sigtoproc(p, t, SIGHUP); 3189 mutex_exit(&sp->s_lock); 3190 mutex_exit(&p->p_splock); 3191 mutex_exit(&p->p_lock); 3192 return (EIO); 3193 } 3194 3195 mutex_exit(&sp->s_lock); 3196 mutex_exit(&p->p_splock); 3197 3198 if (mode == JCGETP) { 3199 mutex_exit(&p->p_lock); 3200 return (0); 3201 } 3202 3203 if (mode == JCREAD) { 3204 if (p->p_detached || cantsend(p, t, SIGTTIN)) { 3205 mutex_exit(&p->p_lock); 3206 return (EIO); 3207 } 3208 mutex_exit(&p->p_lock); 3209 mutex_exit(&stp->sd_lock); 3210 pgsignal(p->p_pgidp, SIGTTIN); 3211 mutex_enter(&stp->sd_lock); 3212 mutex_enter(&p->p_lock); 3213 } else { /* mode == JCWRITE or JCSETP */ 3214 if ((mode == JCWRITE && !(stp->sd_flag & STRTOSTOP)) || 3215 cantsend(p, t, SIGTTOU)) { 3216 mutex_exit(&p->p_lock); 3217 return (0); 3218 } 3219 if (p->p_detached) { 3220 mutex_exit(&p->p_lock); 3221 return (EIO); 3222 } 3223 mutex_exit(&p->p_lock); 3224 mutex_exit(&stp->sd_lock); 3225 pgsignal(p->p_pgidp, SIGTTOU); 3226 mutex_enter(&stp->sd_lock); 3227 mutex_enter(&p->p_lock); 3228 } 3229 3230 /* 3231 * We call cv_wait_sig_swap() to cause the appropriate 3232 * action for the jobcontrol signal to take place. 3233 * If the signal is being caught, we will take the 3234 * EINTR error return. Otherwise, the default action 3235 * of causing the process to stop will take place. 3236 * In this case, we rely on the periodic cv_broadcast() on 3237 * &lbolt_cv to wake us up to loop around and test again. 3238 * We can't get here if the signal is ignored or 3239 * if the current thread is blocking the signal. 3240 */ 3241 mutex_exit(&stp->sd_lock); 3242 if (!cv_wait_sig_swap(&lbolt_cv, &p->p_lock)) { 3243 mutex_exit(&p->p_lock); 3244 mutex_enter(&stp->sd_lock); 3245 return (EINTR); 3246 } 3247 mutex_exit(&p->p_lock); 3248 mutex_enter(&stp->sd_lock); 3249 mutex_enter(&p->p_lock); 3250 } 3251 } 3252 3253 /* 3254 * Return size of message of block type (bp->b_datap->db_type) 3255 */ 3256 size_t 3257 xmsgsize(mblk_t *bp) 3258 { 3259 unsigned char type; 3260 size_t count = 0; 3261 3262 type = bp->b_datap->db_type; 3263 3264 for (; bp; bp = bp->b_cont) { 3265 if (type != bp->b_datap->db_type) 3266 break; 3267 ASSERT(bp->b_wptr >= bp->b_rptr); 3268 count += bp->b_wptr - bp->b_rptr; 3269 } 3270 return (count); 3271 } 3272 3273 /* 3274 * Allocate a stream head. 3275 */ 3276 struct stdata * 3277 shalloc(queue_t *qp) 3278 { 3279 stdata_t *stp; 3280 3281 stp = kmem_cache_alloc(stream_head_cache, KM_SLEEP); 3282 3283 stp->sd_wrq = _WR(qp); 3284 stp->sd_strtab = NULL; 3285 stp->sd_iocid = 0; 3286 stp->sd_mate = NULL; 3287 stp->sd_freezer = NULL; 3288 stp->sd_refcnt = 0; 3289 stp->sd_wakeq = 0; 3290 stp->sd_anchor = 0; 3291 stp->sd_struiowrq = NULL; 3292 stp->sd_struiordq = NULL; 3293 stp->sd_struiodnak = 0; 3294 stp->sd_struionak = NULL; 3295 stp->sd_t_audit_data = NULL; 3296 stp->sd_rput_opt = 0; 3297 stp->sd_wput_opt = 0; 3298 stp->sd_read_opt = 0; 3299 stp->sd_rprotofunc = strrput_proto; 3300 stp->sd_rmiscfunc = strrput_misc; 3301 stp->sd_rderrfunc = stp->sd_wrerrfunc = NULL; 3302 stp->sd_rputdatafunc = stp->sd_wputdatafunc = NULL; 3303 stp->sd_ciputctrl = NULL; 3304 stp->sd_nciputctrl = 0; 3305 stp->sd_qhead = NULL; 3306 stp->sd_qtail = NULL; 3307 stp->sd_servid = NULL; 3308 stp->sd_nqueues = 0; 3309 stp->sd_svcflags = 0; 3310 stp->sd_copyflag = 0; 3311 3312 return (stp); 3313 } 3314 3315 /* 3316 * Free a stream head. 3317 */ 3318 void 3319 shfree(stdata_t *stp) 3320 { 3321 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock)); 3322 3323 stp->sd_wrq = NULL; 3324 3325 mutex_enter(&stp->sd_qlock); 3326 while (stp->sd_svcflags & STRS_SCHEDULED) { 3327 STRSTAT(strwaits); 3328 cv_wait(&stp->sd_qcv, &stp->sd_qlock); 3329 } 3330 mutex_exit(&stp->sd_qlock); 3331 3332 if (stp->sd_ciputctrl != NULL) { 3333 ASSERT(stp->sd_nciputctrl == n_ciputctrl - 1); 3334 SUMCHECK_CIPUTCTRL_COUNTS(stp->sd_ciputctrl, 3335 stp->sd_nciputctrl, 0); 3336 ASSERT(ciputctrl_cache != NULL); 3337 kmem_cache_free(ciputctrl_cache, stp->sd_ciputctrl); 3338 stp->sd_ciputctrl = NULL; 3339 stp->sd_nciputctrl = 0; 3340 } 3341 ASSERT(stp->sd_qhead == NULL); 3342 ASSERT(stp->sd_qtail == NULL); 3343 ASSERT(stp->sd_nqueues == 0); 3344 kmem_cache_free(stream_head_cache, stp); 3345 } 3346 3347 /* 3348 * Allocate a pair of queues and a syncq for the pair 3349 */ 3350 queue_t * 3351 allocq(void) 3352 { 3353 queinfo_t *qip; 3354 queue_t *qp, *wqp; 3355 syncq_t *sq; 3356 3357 qip = kmem_cache_alloc(queue_cache, KM_SLEEP); 3358 3359 qp = &qip->qu_rqueue; 3360 wqp = &qip->qu_wqueue; 3361 sq = &qip->qu_syncq; 3362 3363 qp->q_last = NULL; 3364 qp->q_next = NULL; 3365 qp->q_ptr = NULL; 3366 qp->q_flag = QUSE | QREADR; 3367 qp->q_bandp = NULL; 3368 qp->q_stream = NULL; 3369 qp->q_syncq = sq; 3370 qp->q_nband = 0; 3371 qp->q_nfsrv = NULL; 3372 qp->q_draining = 0; 3373 qp->q_syncqmsgs = 0; 3374 qp->q_spri = 0; 3375 qp->q_qtstamp = 0; 3376 qp->q_sqtstamp = 0; 3377 qp->q_fp = NULL; 3378 3379 wqp->q_last = NULL; 3380 wqp->q_next = NULL; 3381 wqp->q_ptr = NULL; 3382 wqp->q_flag = QUSE; 3383 wqp->q_bandp = NULL; 3384 wqp->q_stream = NULL; 3385 wqp->q_syncq = sq; 3386 wqp->q_nband = 0; 3387 wqp->q_nfsrv = NULL; 3388 wqp->q_draining = 0; 3389 wqp->q_syncqmsgs = 0; 3390 wqp->q_qtstamp = 0; 3391 wqp->q_sqtstamp = 0; 3392 wqp->q_spri = 0; 3393 3394 sq->sq_count = 0; 3395 sq->sq_rmqcount = 0; 3396 sq->sq_flags = 0; 3397 sq->sq_type = 0; 3398 sq->sq_callbflags = 0; 3399 sq->sq_cancelid = 0; 3400 sq->sq_ciputctrl = NULL; 3401 sq->sq_nciputctrl = 0; 3402 sq->sq_needexcl = 0; 3403 sq->sq_svcflags = 0; 3404 3405 return (qp); 3406 } 3407 3408 /* 3409 * Free a pair of queues and the "attached" syncq. 3410 * Discard any messages left on the syncq(s), remove the syncq(s) from the 3411 * outer perimeter, and free the syncq(s) if they are not the "attached" syncq. 3412 */ 3413 void 3414 freeq(queue_t *qp) 3415 { 3416 qband_t *qbp, *nqbp; 3417 syncq_t *sq, *outer; 3418 queue_t *wqp = _WR(qp); 3419 3420 ASSERT(qp->q_flag & QREADR); 3421 3422 /* 3423 * If a previously dispatched taskq job is scheduled to run 3424 * sync_service() or a service routine is scheduled for the 3425 * queues about to be freed, wait here until all service is 3426 * done on the queue and all associated queues and syncqs. 3427 */ 3428 wait_svc(qp); 3429 3430 (void) flush_syncq(qp->q_syncq, qp); 3431 (void) flush_syncq(wqp->q_syncq, wqp); 3432 ASSERT(qp->q_syncqmsgs == 0 && wqp->q_syncqmsgs == 0); 3433 3434 /* 3435 * Flush the queues before q_next is set to NULL This is needed 3436 * in order to backenable any downstream queue before we go away. 3437 * Note: we are already removed from the stream so that the 3438 * backenabling will not cause any messages to be delivered to our 3439 * put procedures. 3440 */ 3441 flushq(qp, FLUSHALL); 3442 flushq(wqp, FLUSHALL); 3443 3444 /* Tidy up - removeq only does a half-remove from stream */ 3445 qp->q_next = wqp->q_next = NULL; 3446 ASSERT(!(qp->q_flag & QENAB)); 3447 ASSERT(!(wqp->q_flag & QENAB)); 3448 3449 outer = qp->q_syncq->sq_outer; 3450 if (outer != NULL) { 3451 outer_remove(outer, qp->q_syncq); 3452 if (wqp->q_syncq != qp->q_syncq) 3453 outer_remove(outer, wqp->q_syncq); 3454 } 3455 /* 3456 * Free any syncqs that are outside what allocq returned. 3457 */ 3458 if (qp->q_syncq != SQ(qp) && !(qp->q_flag & QPERMOD)) 3459 free_syncq(qp->q_syncq); 3460 if (qp->q_syncq != wqp->q_syncq && wqp->q_syncq != SQ(qp)) 3461 free_syncq(wqp->q_syncq); 3462 3463 ASSERT((qp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0); 3464 ASSERT((wqp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0); 3465 ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); 3466 ASSERT(MUTEX_NOT_HELD(QLOCK(wqp))); 3467 sq = SQ(qp); 3468 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 3469 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL); 3470 ASSERT(sq->sq_outer == NULL); 3471 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 3472 ASSERT(sq->sq_callbpend == NULL); 3473 ASSERT(sq->sq_needexcl == 0); 3474 3475 if (sq->sq_ciputctrl != NULL) { 3476 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1); 3477 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl, 3478 sq->sq_nciputctrl, 0); 3479 ASSERT(ciputctrl_cache != NULL); 3480 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl); 3481 sq->sq_ciputctrl = NULL; 3482 sq->sq_nciputctrl = 0; 3483 } 3484 3485 ASSERT(qp->q_first == NULL && wqp->q_first == NULL); 3486 ASSERT(qp->q_count == 0 && wqp->q_count == 0); 3487 ASSERT(qp->q_mblkcnt == 0 && wqp->q_mblkcnt == 0); 3488 3489 qp->q_flag &= ~QUSE; 3490 wqp->q_flag &= ~QUSE; 3491 3492 /* NOTE: Uncomment the assert below once bugid 1159635 is fixed. */ 3493 /* ASSERT((qp->q_flag & QWANTW) == 0 && (wqp->q_flag & QWANTW) == 0); */ 3494 3495 qbp = qp->q_bandp; 3496 while (qbp) { 3497 nqbp = qbp->qb_next; 3498 freeband(qbp); 3499 qbp = nqbp; 3500 } 3501 qbp = wqp->q_bandp; 3502 while (qbp) { 3503 nqbp = qbp->qb_next; 3504 freeband(qbp); 3505 qbp = nqbp; 3506 } 3507 kmem_cache_free(queue_cache, qp); 3508 } 3509 3510 /* 3511 * Allocate a qband structure. 3512 */ 3513 qband_t * 3514 allocband(void) 3515 { 3516 qband_t *qbp; 3517 3518 qbp = kmem_cache_alloc(qband_cache, KM_NOSLEEP); 3519 if (qbp == NULL) 3520 return (NULL); 3521 3522 qbp->qb_next = NULL; 3523 qbp->qb_count = 0; 3524 qbp->qb_mblkcnt = 0; 3525 qbp->qb_first = NULL; 3526 qbp->qb_last = NULL; 3527 qbp->qb_flag = 0; 3528 3529 return (qbp); 3530 } 3531 3532 /* 3533 * Free a qband structure. 3534 */ 3535 void 3536 freeband(qband_t *qbp) 3537 { 3538 kmem_cache_free(qband_cache, qbp); 3539 } 3540 3541 /* 3542 * Just like putnextctl(9F), except that allocb_wait() is used. 3543 * 3544 * Consolidation Private, and of course only callable from the stream head or 3545 * routines that may block. 3546 */ 3547 int 3548 putnextctl_wait(queue_t *q, int type) 3549 { 3550 mblk_t *bp; 3551 int error; 3552 3553 if ((datamsg(type) && (type != M_DELAY)) || 3554 (bp = allocb_wait(0, BPRI_HI, 0, &error)) == NULL) 3555 return (0); 3556 3557 bp->b_datap->db_type = (unsigned char)type; 3558 putnext(q, bp); 3559 return (1); 3560 } 3561 3562 /* 3563 * Run any possible bufcalls. 3564 */ 3565 void 3566 runbufcalls(void) 3567 { 3568 strbufcall_t *bcp; 3569 3570 mutex_enter(&bcall_monitor); 3571 mutex_enter(&strbcall_lock); 3572 3573 if (strbcalls.bc_head) { 3574 size_t count; 3575 int nevent; 3576 3577 /* 3578 * count how many events are on the list 3579 * now so we can check to avoid looping 3580 * in low memory situations 3581 */ 3582 nevent = 0; 3583 for (bcp = strbcalls.bc_head; bcp; bcp = bcp->bc_next) 3584 nevent++; 3585 3586 /* 3587 * get estimate of available memory from kmem_avail(). 3588 * awake all bufcall functions waiting for 3589 * memory whose request could be satisfied 3590 * by 'count' memory and let 'em fight for it. 3591 */ 3592 count = kmem_avail(); 3593 while ((bcp = strbcalls.bc_head) != NULL && nevent) { 3594 STRSTAT(bufcalls); 3595 --nevent; 3596 if (bcp->bc_size <= count) { 3597 bcp->bc_executor = curthread; 3598 mutex_exit(&strbcall_lock); 3599 (*bcp->bc_func)(bcp->bc_arg); 3600 mutex_enter(&strbcall_lock); 3601 bcp->bc_executor = NULL; 3602 cv_broadcast(&bcall_cv); 3603 strbcalls.bc_head = bcp->bc_next; 3604 kmem_free(bcp, sizeof (strbufcall_t)); 3605 } else { 3606 /* 3607 * too big, try again later - note 3608 * that nevent was decremented above 3609 * so we won't retry this one on this 3610 * iteration of the loop 3611 */ 3612 if (bcp->bc_next != NULL) { 3613 strbcalls.bc_head = bcp->bc_next; 3614 bcp->bc_next = NULL; 3615 strbcalls.bc_tail->bc_next = bcp; 3616 strbcalls.bc_tail = bcp; 3617 } 3618 } 3619 } 3620 if (strbcalls.bc_head == NULL) 3621 strbcalls.bc_tail = NULL; 3622 } 3623 3624 mutex_exit(&strbcall_lock); 3625 mutex_exit(&bcall_monitor); 3626 } 3627 3628 3629 /* 3630 * Actually run queue's service routine. 3631 */ 3632 static void 3633 runservice(queue_t *q) 3634 { 3635 qband_t *qbp; 3636 3637 ASSERT(q->q_qinfo->qi_srvp); 3638 again: 3639 entersq(q->q_syncq, SQ_SVC); 3640 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_START, 3641 "runservice starts:%p", q); 3642 3643 if (!(q->q_flag & QWCLOSE)) 3644 (*q->q_qinfo->qi_srvp)(q); 3645 3646 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_END, 3647 "runservice ends:(%p)", q); 3648 3649 leavesq(q->q_syncq, SQ_SVC); 3650 3651 mutex_enter(QLOCK(q)); 3652 if (q->q_flag & QENAB) { 3653 q->q_flag &= ~QENAB; 3654 mutex_exit(QLOCK(q)); 3655 goto again; 3656 } 3657 q->q_flag &= ~QINSERVICE; 3658 q->q_flag &= ~QBACK; 3659 for (qbp = q->q_bandp; qbp; qbp = qbp->qb_next) 3660 qbp->qb_flag &= ~QB_BACK; 3661 /* 3662 * Wakeup thread waiting for the service procedure 3663 * to be run (strclose and qdetach). 3664 */ 3665 cv_broadcast(&q->q_wait); 3666 3667 mutex_exit(QLOCK(q)); 3668 } 3669 3670 /* 3671 * Background processing of bufcalls. 3672 */ 3673 void 3674 streams_bufcall_service(void) 3675 { 3676 callb_cpr_t cprinfo; 3677 3678 CALLB_CPR_INIT(&cprinfo, &strbcall_lock, callb_generic_cpr, 3679 "streams_bufcall_service"); 3680 3681 mutex_enter(&strbcall_lock); 3682 3683 for (;;) { 3684 if (strbcalls.bc_head != NULL && kmem_avail() > 0) { 3685 mutex_exit(&strbcall_lock); 3686 runbufcalls(); 3687 mutex_enter(&strbcall_lock); 3688 } 3689 if (strbcalls.bc_head != NULL) { 3690 STRSTAT(bcwaits); 3691 /* Wait for memory to become available */ 3692 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3693 (void) cv_reltimedwait(&memavail_cv, &strbcall_lock, 3694 SEC_TO_TICK(60), TR_CLOCK_TICK); 3695 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock); 3696 } 3697 3698 /* Wait for new work to arrive */ 3699 if (strbcalls.bc_head == NULL) { 3700 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3701 cv_wait(&strbcall_cv, &strbcall_lock); 3702 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock); 3703 } 3704 } 3705 } 3706 3707 /* 3708 * Background processing of streams background tasks which failed 3709 * taskq_dispatch. 3710 */ 3711 static void 3712 streams_qbkgrnd_service(void) 3713 { 3714 callb_cpr_t cprinfo; 3715 queue_t *q; 3716 3717 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr, 3718 "streams_bkgrnd_service"); 3719 3720 mutex_enter(&service_queue); 3721 3722 for (;;) { 3723 /* 3724 * Wait for work to arrive. 3725 */ 3726 while ((freebs_list == NULL) && (qhead == NULL)) { 3727 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3728 cv_wait(&services_to_run, &service_queue); 3729 CALLB_CPR_SAFE_END(&cprinfo, &service_queue); 3730 } 3731 /* 3732 * Handle all pending freebs requests to free memory. 3733 */ 3734 while (freebs_list != NULL) { 3735 mblk_t *mp = freebs_list; 3736 freebs_list = mp->b_next; 3737 mutex_exit(&service_queue); 3738 mblk_free(mp); 3739 mutex_enter(&service_queue); 3740 } 3741 /* 3742 * Run pending queues. 3743 */ 3744 while (qhead != NULL) { 3745 DQ(q, qhead, qtail, q_link); 3746 ASSERT(q != NULL); 3747 mutex_exit(&service_queue); 3748 queue_service(q); 3749 mutex_enter(&service_queue); 3750 } 3751 ASSERT(qhead == NULL && qtail == NULL); 3752 } 3753 } 3754 3755 /* 3756 * Background processing of streams background tasks which failed 3757 * taskq_dispatch. 3758 */ 3759 static void 3760 streams_sqbkgrnd_service(void) 3761 { 3762 callb_cpr_t cprinfo; 3763 syncq_t *sq; 3764 3765 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr, 3766 "streams_sqbkgrnd_service"); 3767 3768 mutex_enter(&service_queue); 3769 3770 for (;;) { 3771 /* 3772 * Wait for work to arrive. 3773 */ 3774 while (sqhead == NULL) { 3775 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3776 cv_wait(&syncqs_to_run, &service_queue); 3777 CALLB_CPR_SAFE_END(&cprinfo, &service_queue); 3778 } 3779 3780 /* 3781 * Run pending syncqs. 3782 */ 3783 while (sqhead != NULL) { 3784 DQ(sq, sqhead, sqtail, sq_next); 3785 ASSERT(sq != NULL); 3786 ASSERT(sq->sq_svcflags & SQ_BGTHREAD); 3787 mutex_exit(&service_queue); 3788 syncq_service(sq); 3789 mutex_enter(&service_queue); 3790 } 3791 } 3792 } 3793 3794 /* 3795 * Disable the syncq and wait for background syncq processing to complete. 3796 * If the syncq is placed on the sqhead/sqtail queue, try to remove it from the 3797 * list. 3798 */ 3799 void 3800 wait_sq_svc(syncq_t *sq) 3801 { 3802 mutex_enter(SQLOCK(sq)); 3803 sq->sq_svcflags |= SQ_DISABLED; 3804 if (sq->sq_svcflags & SQ_BGTHREAD) { 3805 syncq_t *sq_chase; 3806 syncq_t *sq_curr; 3807 int removed; 3808 3809 ASSERT(sq->sq_servcount == 1); 3810 mutex_enter(&service_queue); 3811 RMQ(sq, sqhead, sqtail, sq_next, sq_chase, sq_curr, removed); 3812 mutex_exit(&service_queue); 3813 if (removed) { 3814 sq->sq_svcflags &= ~SQ_BGTHREAD; 3815 sq->sq_servcount = 0; 3816 STRSTAT(sqremoved); 3817 goto done; 3818 } 3819 } 3820 while (sq->sq_servcount != 0) { 3821 sq->sq_flags |= SQ_WANTWAKEUP; 3822 cv_wait(&sq->sq_wait, SQLOCK(sq)); 3823 } 3824 done: 3825 mutex_exit(SQLOCK(sq)); 3826 } 3827 3828 /* 3829 * Put a syncq on the list of syncq's to be serviced by the sqthread. 3830 * Add the argument to the end of the sqhead list and set the flag 3831 * indicating this syncq has been enabled. If it has already been 3832 * enabled, don't do anything. 3833 * This routine assumes that SQLOCK is held. 3834 * NOTE that the lock order is to have the SQLOCK first, 3835 * so if the service_syncq lock is held, we need to release it 3836 * before acquiring the SQLOCK (mostly relevant for the background 3837 * thread, and this seems to be common among the STREAMS global locks). 3838 * Note that the sq_svcflags are protected by the SQLOCK. 3839 */ 3840 void 3841 sqenable(syncq_t *sq) 3842 { 3843 /* 3844 * This is probably not important except for where I believe it 3845 * is being called. At that point, it should be held (and it 3846 * is a pain to release it just for this routine, so don't do 3847 * it). 3848 */ 3849 ASSERT(MUTEX_HELD(SQLOCK(sq))); 3850 3851 IMPLY(sq->sq_servcount == 0, sq->sq_next == NULL); 3852 IMPLY(sq->sq_next != NULL, sq->sq_svcflags & SQ_BGTHREAD); 3853 3854 /* 3855 * Do not put on list if background thread is scheduled or 3856 * syncq is disabled. 3857 */ 3858 if (sq->sq_svcflags & (SQ_DISABLED | SQ_BGTHREAD)) 3859 return; 3860 3861 /* 3862 * Check whether we should enable sq at all. 3863 * Non PERMOD syncqs may be drained by at most one thread. 3864 * PERMOD syncqs may be drained by several threads but we limit the 3865 * total amount to the lesser of 3866 * Number of queues on the squeue and 3867 * Number of CPUs. 3868 */ 3869 if (sq->sq_servcount != 0) { 3870 if (((sq->sq_type & SQ_PERMOD) == 0) || 3871 (sq->sq_servcount >= MIN(sq->sq_nqueues, ncpus_online))) { 3872 STRSTAT(sqtoomany); 3873 return; 3874 } 3875 } 3876 3877 sq->sq_tstamp = ddi_get_lbolt(); 3878 STRSTAT(sqenables); 3879 3880 /* Attempt a taskq dispatch */ 3881 sq->sq_servid = (void *)taskq_dispatch(streams_taskq, 3882 (task_func_t *)syncq_service, sq, TQ_NOSLEEP | TQ_NOQUEUE); 3883 if (sq->sq_servid != NULL) { 3884 sq->sq_servcount++; 3885 return; 3886 } 3887 3888 /* 3889 * This taskq dispatch failed, but a previous one may have succeeded. 3890 * Don't try to schedule on the background thread whilst there is 3891 * outstanding taskq processing. 3892 */ 3893 if (sq->sq_servcount != 0) 3894 return; 3895 3896 /* 3897 * System is low on resources and can't perform a non-sleeping 3898 * dispatch. Schedule the syncq for a background thread and mark the 3899 * syncq to avoid any further taskq dispatch attempts. 3900 */ 3901 mutex_enter(&service_queue); 3902 STRSTAT(taskqfails); 3903 ENQUEUE(sq, sqhead, sqtail, sq_next); 3904 sq->sq_svcflags |= SQ_BGTHREAD; 3905 sq->sq_servcount = 1; 3906 cv_signal(&syncqs_to_run); 3907 mutex_exit(&service_queue); 3908 } 3909 3910 /* 3911 * Note: fifo_close() depends on the mblk_t on the queue being freed 3912 * asynchronously. The asynchronous freeing of messages breaks the 3913 * recursive call chain of fifo_close() while there are I_SENDFD type of 3914 * messages referring to other file pointers on the queue. Then when 3915 * closing pipes it can avoid stack overflow in case of daisy-chained 3916 * pipes, and also avoid deadlock in case of fifonode_t pairs (which 3917 * share the same fifolock_t). 3918 */ 3919 3920 void 3921 freebs_enqueue(mblk_t *mp, dblk_t *dbp) 3922 { 3923 esb_queue_t *eqp = &system_esbq; 3924 3925 ASSERT(dbp->db_mblk == mp); 3926 3927 /* 3928 * Check data sanity. The dblock should have non-empty free function. 3929 * It is better to panic here then later when the dblock is freed 3930 * asynchronously when the context is lost. 3931 */ 3932 if (dbp->db_frtnp->free_func == NULL) { 3933 panic("freebs_enqueue: dblock %p has a NULL free callback", 3934 (void *)dbp); 3935 } 3936 3937 mutex_enter(&eqp->eq_lock); 3938 /* queue the new mblk on the esballoc queue */ 3939 if (eqp->eq_head == NULL) { 3940 eqp->eq_head = eqp->eq_tail = mp; 3941 } else { 3942 eqp->eq_tail->b_next = mp; 3943 eqp->eq_tail = mp; 3944 } 3945 eqp->eq_len++; 3946 3947 /* If we're the first thread to reach the threshold, process */ 3948 if (eqp->eq_len >= esbq_max_qlen && 3949 !(eqp->eq_flags & ESBQ_PROCESSING)) 3950 esballoc_process_queue(eqp); 3951 3952 esballoc_set_timer(eqp, esbq_timeout); 3953 mutex_exit(&eqp->eq_lock); 3954 } 3955 3956 static void 3957 esballoc_process_queue(esb_queue_t *eqp) 3958 { 3959 mblk_t *mp; 3960 3961 ASSERT(MUTEX_HELD(&eqp->eq_lock)); 3962 3963 eqp->eq_flags |= ESBQ_PROCESSING; 3964 3965 do { 3966 /* 3967 * Detach the message chain for processing. 3968 */ 3969 mp = eqp->eq_head; 3970 eqp->eq_tail->b_next = NULL; 3971 eqp->eq_head = eqp->eq_tail = NULL; 3972 eqp->eq_len = 0; 3973 mutex_exit(&eqp->eq_lock); 3974 3975 /* 3976 * Process the message chain. 3977 */ 3978 esballoc_enqueue_mblk(mp); 3979 mutex_enter(&eqp->eq_lock); 3980 } while ((eqp->eq_len >= esbq_max_qlen) && (eqp->eq_len > 0)); 3981 3982 eqp->eq_flags &= ~ESBQ_PROCESSING; 3983 } 3984 3985 /* 3986 * taskq callback routine to free esballoced mblk's 3987 */ 3988 static void 3989 esballoc_mblk_free(mblk_t *mp) 3990 { 3991 mblk_t *nextmp; 3992 3993 for (; mp != NULL; mp = nextmp) { 3994 nextmp = mp->b_next; 3995 mp->b_next = NULL; 3996 mblk_free(mp); 3997 } 3998 } 3999 4000 static void 4001 esballoc_enqueue_mblk(mblk_t *mp) 4002 { 4003 4004 if (taskq_dispatch(system_taskq, (task_func_t *)esballoc_mblk_free, mp, 4005 TQ_NOSLEEP) == NULL) { 4006 mblk_t *first_mp = mp; 4007 /* 4008 * System is low on resources and can't perform a non-sleeping 4009 * dispatch. Schedule for a background thread. 4010 */ 4011 mutex_enter(&service_queue); 4012 STRSTAT(taskqfails); 4013 4014 while (mp->b_next != NULL) 4015 mp = mp->b_next; 4016 4017 mp->b_next = freebs_list; 4018 freebs_list = first_mp; 4019 cv_signal(&services_to_run); 4020 mutex_exit(&service_queue); 4021 } 4022 } 4023 4024 static void 4025 esballoc_timer(void *arg) 4026 { 4027 esb_queue_t *eqp = arg; 4028 4029 mutex_enter(&eqp->eq_lock); 4030 eqp->eq_flags &= ~ESBQ_TIMER; 4031 4032 if (!(eqp->eq_flags & ESBQ_PROCESSING) && 4033 eqp->eq_len > 0) 4034 esballoc_process_queue(eqp); 4035 4036 esballoc_set_timer(eqp, esbq_timeout); 4037 mutex_exit(&eqp->eq_lock); 4038 } 4039 4040 static void 4041 esballoc_set_timer(esb_queue_t *eqp, clock_t eq_timeout) 4042 { 4043 ASSERT(MUTEX_HELD(&eqp->eq_lock)); 4044 4045 if (eqp->eq_len > 0 && !(eqp->eq_flags & ESBQ_TIMER)) { 4046 (void) timeout(esballoc_timer, eqp, eq_timeout); 4047 eqp->eq_flags |= ESBQ_TIMER; 4048 } 4049 } 4050 4051 void 4052 esballoc_queue_init(void) 4053 { 4054 system_esbq.eq_len = 0; 4055 system_esbq.eq_head = system_esbq.eq_tail = NULL; 4056 system_esbq.eq_flags = 0; 4057 } 4058 4059 /* 4060 * Set the QBACK or QB_BACK flag in the given queue for 4061 * the given priority band. 4062 */ 4063 void 4064 setqback(queue_t *q, unsigned char pri) 4065 { 4066 int i; 4067 qband_t *qbp; 4068 qband_t **qbpp; 4069 4070 ASSERT(MUTEX_HELD(QLOCK(q))); 4071 if (pri != 0) { 4072 if (pri > q->q_nband) { 4073 qbpp = &q->q_bandp; 4074 while (*qbpp) 4075 qbpp = &(*qbpp)->qb_next; 4076 while (pri > q->q_nband) { 4077 if ((*qbpp = allocband()) == NULL) { 4078 cmn_err(CE_WARN, 4079 "setqback: can't allocate qband\n"); 4080 return; 4081 } 4082 (*qbpp)->qb_hiwat = q->q_hiwat; 4083 (*qbpp)->qb_lowat = q->q_lowat; 4084 q->q_nband++; 4085 qbpp = &(*qbpp)->qb_next; 4086 } 4087 } 4088 qbp = q->q_bandp; 4089 i = pri; 4090 while (--i) 4091 qbp = qbp->qb_next; 4092 qbp->qb_flag |= QB_BACK; 4093 } else { 4094 q->q_flag |= QBACK; 4095 } 4096 } 4097 4098 int 4099 strcopyin(void *from, void *to, size_t len, int copyflag) 4100 { 4101 if (copyflag & U_TO_K) { 4102 ASSERT((copyflag & K_TO_K) == 0); 4103 if (copyin(from, to, len)) 4104 return (EFAULT); 4105 } else { 4106 ASSERT(copyflag & K_TO_K); 4107 bcopy(from, to, len); 4108 } 4109 return (0); 4110 } 4111 4112 int 4113 strcopyout(void *from, void *to, size_t len, int copyflag) 4114 { 4115 if (copyflag & U_TO_K) { 4116 if (copyout(from, to, len)) 4117 return (EFAULT); 4118 } else { 4119 ASSERT(copyflag & K_TO_K); 4120 bcopy(from, to, len); 4121 } 4122 return (0); 4123 } 4124 4125 /* 4126 * strsignal_nolock() posts a signal to the process(es) at the stream head. 4127 * It assumes that the stream head lock is already held, whereas strsignal() 4128 * acquires the lock first. This routine was created because a few callers 4129 * release the stream head lock before calling only to re-acquire it after 4130 * it returns. 4131 */ 4132 void 4133 strsignal_nolock(stdata_t *stp, int sig, uchar_t band) 4134 { 4135 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4136 switch (sig) { 4137 case SIGPOLL: 4138 if (stp->sd_sigflags & S_MSG) 4139 strsendsig(stp->sd_siglist, S_MSG, band, 0); 4140 break; 4141 default: 4142 if (stp->sd_pgidp) 4143 pgsignal(stp->sd_pgidp, sig); 4144 break; 4145 } 4146 } 4147 4148 void 4149 strsignal(stdata_t *stp, int sig, int32_t band) 4150 { 4151 TRACE_3(TR_FAC_STREAMS_FR, TR_SENDSIG, 4152 "strsignal:%p, %X, %X", stp, sig, band); 4153 4154 mutex_enter(&stp->sd_lock); 4155 switch (sig) { 4156 case SIGPOLL: 4157 if (stp->sd_sigflags & S_MSG) 4158 strsendsig(stp->sd_siglist, S_MSG, (uchar_t)band, 0); 4159 break; 4160 4161 default: 4162 if (stp->sd_pgidp) { 4163 pgsignal(stp->sd_pgidp, sig); 4164 } 4165 break; 4166 } 4167 mutex_exit(&stp->sd_lock); 4168 } 4169 4170 void 4171 strhup(stdata_t *stp) 4172 { 4173 ASSERT(mutex_owned(&stp->sd_lock)); 4174 pollwakeup(&stp->sd_pollist, POLLHUP); 4175 if (stp->sd_sigflags & S_HANGUP) 4176 strsendsig(stp->sd_siglist, S_HANGUP, 0, 0); 4177 } 4178 4179 /* 4180 * Backenable the first queue upstream from `q' with a service procedure. 4181 */ 4182 void 4183 backenable(queue_t *q, uchar_t pri) 4184 { 4185 queue_t *nq; 4186 4187 /* 4188 * Our presence might not prevent other modules in our own 4189 * stream from popping/pushing since the caller of getq might not 4190 * have a claim on the queue (some drivers do a getq on somebody 4191 * else's queue - they know that the queue itself is not going away 4192 * but the framework has to guarantee q_next in that stream). 4193 */ 4194 claimstr(q); 4195 4196 /* Find nearest back queue with service proc */ 4197 for (nq = backq(q); nq && !nq->q_qinfo->qi_srvp; nq = backq(nq)) { 4198 ASSERT(STRMATED(q->q_stream) || STREAM(q) == STREAM(nq)); 4199 } 4200 4201 if (nq) { 4202 kthread_t *freezer; 4203 /* 4204 * backenable can be called either with no locks held 4205 * or with the stream frozen (the latter occurs when a module 4206 * calls rmvq with the stream frozen). If the stream is frozen 4207 * by the caller the caller will hold all qlocks in the stream. 4208 * Note that a frozen stream doesn't freeze a mated stream, 4209 * so we explicitly check for that. 4210 */ 4211 freezer = STREAM(q)->sd_freezer; 4212 if (freezer != curthread || STREAM(q) != STREAM(nq)) { 4213 mutex_enter(QLOCK(nq)); 4214 } 4215 #ifdef DEBUG 4216 else { 4217 ASSERT(frozenstr(q)); 4218 ASSERT(MUTEX_HELD(QLOCK(q))); 4219 ASSERT(MUTEX_HELD(QLOCK(nq))); 4220 } 4221 #endif 4222 setqback(nq, pri); 4223 qenable_locked(nq); 4224 if (freezer != curthread || STREAM(q) != STREAM(nq)) 4225 mutex_exit(QLOCK(nq)); 4226 } 4227 releasestr(q); 4228 } 4229 4230 /* 4231 * Return the appropriate errno when one of flags_to_check is set 4232 * in sd_flags. Uses the exported error routines if they are set. 4233 * Will return 0 if non error is set (or if the exported error routines 4234 * do not return an error). 4235 * 4236 * If there is both a read and write error to check, we prefer the read error. 4237 * Also, give preference to recorded errno's over the error functions. 4238 * The flags that are handled are: 4239 * STPLEX return EINVAL 4240 * STRDERR return sd_rerror (and clear if STRDERRNONPERSIST) 4241 * STWRERR return sd_werror (and clear if STWRERRNONPERSIST) 4242 * STRHUP return sd_werror 4243 * 4244 * If the caller indicates that the operation is a peek, a nonpersistent error 4245 * is not cleared. 4246 */ 4247 int 4248 strgeterr(stdata_t *stp, int32_t flags_to_check, int ispeek) 4249 { 4250 int32_t sd_flag = stp->sd_flag & flags_to_check; 4251 int error = 0; 4252 4253 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4254 ASSERT((flags_to_check & ~(STRDERR|STWRERR|STRHUP|STPLEX)) == 0); 4255 if (sd_flag & STPLEX) 4256 error = EINVAL; 4257 else if (sd_flag & STRDERR) { 4258 error = stp->sd_rerror; 4259 if ((stp->sd_flag & STRDERRNONPERSIST) && !ispeek) { 4260 /* 4261 * Read errors are non-persistent i.e. discarded once 4262 * returned to a non-peeking caller, 4263 */ 4264 stp->sd_rerror = 0; 4265 stp->sd_flag &= ~STRDERR; 4266 } 4267 if (error == 0 && stp->sd_rderrfunc != NULL) { 4268 int clearerr = 0; 4269 4270 error = (*stp->sd_rderrfunc)(stp->sd_vnode, ispeek, 4271 &clearerr); 4272 if (clearerr) { 4273 stp->sd_flag &= ~STRDERR; 4274 stp->sd_rderrfunc = NULL; 4275 } 4276 } 4277 } else if (sd_flag & STWRERR) { 4278 error = stp->sd_werror; 4279 if ((stp->sd_flag & STWRERRNONPERSIST) && !ispeek) { 4280 /* 4281 * Write errors are non-persistent i.e. discarded once 4282 * returned to a non-peeking caller, 4283 */ 4284 stp->sd_werror = 0; 4285 stp->sd_flag &= ~STWRERR; 4286 } 4287 if (error == 0 && stp->sd_wrerrfunc != NULL) { 4288 int clearerr = 0; 4289 4290 error = (*stp->sd_wrerrfunc)(stp->sd_vnode, ispeek, 4291 &clearerr); 4292 if (clearerr) { 4293 stp->sd_flag &= ~STWRERR; 4294 stp->sd_wrerrfunc = NULL; 4295 } 4296 } 4297 } else if (sd_flag & STRHUP) { 4298 /* sd_werror set when STRHUP */ 4299 error = stp->sd_werror; 4300 } 4301 return (error); 4302 } 4303 4304 4305 /* 4306 * Single-thread open/close/push/pop 4307 * for twisted streams also 4308 */ 4309 int 4310 strstartplumb(stdata_t *stp, int flag, int cmd) 4311 { 4312 int waited = 1; 4313 int error = 0; 4314 4315 if (STRMATED(stp)) { 4316 struct stdata *stmatep = stp->sd_mate; 4317 4318 STRLOCKMATES(stp); 4319 while (waited) { 4320 waited = 0; 4321 while (stmatep->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4322 if ((cmd == I_POP) && 4323 (flag & (FNDELAY|FNONBLOCK))) { 4324 STRUNLOCKMATES(stp); 4325 return (EAGAIN); 4326 } 4327 waited = 1; 4328 mutex_exit(&stp->sd_lock); 4329 if (!cv_wait_sig(&stmatep->sd_monitor, 4330 &stmatep->sd_lock)) { 4331 mutex_exit(&stmatep->sd_lock); 4332 return (EINTR); 4333 } 4334 mutex_exit(&stmatep->sd_lock); 4335 STRLOCKMATES(stp); 4336 } 4337 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4338 if ((cmd == I_POP) && 4339 (flag & (FNDELAY|FNONBLOCK))) { 4340 STRUNLOCKMATES(stp); 4341 return (EAGAIN); 4342 } 4343 waited = 1; 4344 mutex_exit(&stmatep->sd_lock); 4345 if (!cv_wait_sig(&stp->sd_monitor, 4346 &stp->sd_lock)) { 4347 mutex_exit(&stp->sd_lock); 4348 return (EINTR); 4349 } 4350 mutex_exit(&stp->sd_lock); 4351 STRLOCKMATES(stp); 4352 } 4353 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) { 4354 error = strgeterr(stp, 4355 STRDERR|STWRERR|STRHUP|STPLEX, 0); 4356 if (error != 0) { 4357 STRUNLOCKMATES(stp); 4358 return (error); 4359 } 4360 } 4361 } 4362 stp->sd_flag |= STRPLUMB; 4363 STRUNLOCKMATES(stp); 4364 } else { 4365 mutex_enter(&stp->sd_lock); 4366 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4367 if (((cmd == I_POP) || (cmd == _I_REMOVE)) && 4368 (flag & (FNDELAY|FNONBLOCK))) { 4369 mutex_exit(&stp->sd_lock); 4370 return (EAGAIN); 4371 } 4372 if (!cv_wait_sig(&stp->sd_monitor, &stp->sd_lock)) { 4373 mutex_exit(&stp->sd_lock); 4374 return (EINTR); 4375 } 4376 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) { 4377 error = strgeterr(stp, 4378 STRDERR|STWRERR|STRHUP|STPLEX, 0); 4379 if (error != 0) { 4380 mutex_exit(&stp->sd_lock); 4381 return (error); 4382 } 4383 } 4384 } 4385 stp->sd_flag |= STRPLUMB; 4386 mutex_exit(&stp->sd_lock); 4387 } 4388 return (0); 4389 } 4390 4391 /* 4392 * Complete the plumbing operation associated with stream `stp'. 4393 */ 4394 void 4395 strendplumb(stdata_t *stp) 4396 { 4397 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4398 ASSERT(stp->sd_flag & STRPLUMB); 4399 stp->sd_flag &= ~STRPLUMB; 4400 cv_broadcast(&stp->sd_monitor); 4401 } 4402 4403 /* 4404 * This describes how the STREAMS framework handles synchronization 4405 * during open/push and close/pop. 4406 * The key interfaces for open and close are qprocson and qprocsoff, 4407 * respectively. While the close case in general is harder both open 4408 * have close have significant similarities. 4409 * 4410 * During close the STREAMS framework has to both ensure that there 4411 * are no stale references to the queue pair (and syncq) that 4412 * are being closed and also provide the guarantees that are documented 4413 * in qprocsoff(9F). 4414 * If there are stale references to the queue that is closing it can 4415 * result in kernel memory corruption or kernel panics. 4416 * 4417 * Note that is it up to the module/driver to ensure that it itself 4418 * does not have any stale references to the closing queues once its close 4419 * routine returns. This includes: 4420 * - Cancelling any timeout/bufcall/qtimeout/qbufcall callback routines 4421 * associated with the queues. For timeout and bufcall callbacks the 4422 * module/driver also has to ensure (or wait for) any callbacks that 4423 * are in progress. 4424 * - If the module/driver is using esballoc it has to ensure that any 4425 * esballoc free functions do not refer to a queue that has closed. 4426 * (Note that in general the close routine can not wait for the esballoc'ed 4427 * messages to be freed since that can cause a deadlock.) 4428 * - Cancelling any interrupts that refer to the closing queues and 4429 * also ensuring that there are no interrupts in progress that will 4430 * refer to the closing queues once the close routine returns. 4431 * - For multiplexors removing any driver global state that refers to 4432 * the closing queue and also ensuring that there are no threads in 4433 * the multiplexor that has picked up a queue pointer but not yet 4434 * finished using it. 4435 * 4436 * In addition, a driver/module can only reference the q_next pointer 4437 * in its open, close, put, or service procedures or in a 4438 * qtimeout/qbufcall callback procedure executing "on" the correct 4439 * stream. Thus it can not reference the q_next pointer in an interrupt 4440 * routine or a timeout, bufcall or esballoc callback routine. Likewise 4441 * it can not reference q_next of a different queue e.g. in a mux that 4442 * passes messages from one queues put/service procedure to another queue. 4443 * In all the cases when the driver/module can not access the q_next 4444 * field it must use the *next* versions e.g. canputnext instead of 4445 * canput(q->q_next) and putnextctl instead of putctl(q->q_next, ...). 4446 * 4447 * 4448 * Assuming that the driver/module conforms to the above constraints 4449 * the STREAMS framework has to avoid stale references to q_next for all 4450 * the framework internal cases which include (but are not limited to): 4451 * - Threads in canput/canputnext/backenable and elsewhere that are 4452 * walking q_next. 4453 * - Messages on a syncq that have a reference to the queue through b_queue. 4454 * - Messages on an outer perimeter (syncq) that have a reference to the 4455 * queue through b_queue. 4456 * - Threads that use q_nfsrv (e.g. canput) to find a queue. 4457 * Note that only canput and bcanput use q_nfsrv without any locking. 4458 * 4459 * The STREAMS framework providing the qprocsoff(9F) guarantees means that 4460 * after qprocsoff returns, the framework has to ensure that no threads can 4461 * enter the put or service routines for the closing read or write-side queue. 4462 * In addition to preventing "direct" entry into the put procedures 4463 * the framework also has to prevent messages being drained from 4464 * the syncq or the outer perimeter. 4465 * XXX Note that currently qdetach does relies on D_MTOCEXCL as the only 4466 * mechanism to prevent qwriter(PERIM_OUTER) from running after 4467 * qprocsoff has returned. 4468 * Note that if a module/driver uses put(9F) on one of its own queues 4469 * it is up to the module/driver to ensure that the put() doesn't 4470 * get called when the queue is closing. 4471 * 4472 * 4473 * The framework aspects of the above "contract" is implemented by 4474 * qprocsoff, removeq, and strlock: 4475 * - qprocsoff (disable_svc) sets QWCLOSE to prevent runservice from 4476 * entering the service procedures. 4477 * - strlock acquires the sd_lock and sd_reflock to prevent putnext, 4478 * canputnext, backenable etc from dereferencing the q_next that will 4479 * soon change. 4480 * - strlock waits for sd_refcnt to be zero to wait for e.g. any canputnext 4481 * or other q_next walker that uses claimstr/releasestr to finish. 4482 * - optionally for every syncq in the stream strlock acquires all the 4483 * sq_lock's and waits for all sq_counts to drop to a value that indicates 4484 * that no thread executes in the put or service procedures and that no 4485 * thread is draining into the module/driver. This ensures that no 4486 * open, close, put, service, or qtimeout/qbufcall callback procedure is 4487 * currently executing hence no such thread can end up with the old stale 4488 * q_next value and no canput/backenable can have the old stale 4489 * q_nfsrv/q_next. 4490 * - qdetach (wait_svc) makes sure that any scheduled or running threads 4491 * have either finished or observed the QWCLOSE flag and gone away. 4492 */ 4493 4494 4495 /* 4496 * Get all the locks necessary to change q_next. 4497 * 4498 * Wait for sd_refcnt to reach 0 and, if sqlist is present, wait for the 4499 * sq_count of each syncq in the list to drop to sq_rmqcount, indicating that 4500 * the only threads inside the syncq are threads currently calling removeq(). 4501 * Since threads calling removeq() are in the process of removing their queues 4502 * from the stream, we do not need to worry about them accessing a stale q_next 4503 * pointer and thus we do not need to wait for them to exit (in fact, waiting 4504 * for them can cause deadlock). 4505 * 4506 * This routine is subject to starvation since it does not set any flag to 4507 * prevent threads from entering a module in the stream (i.e. sq_count can 4508 * increase on some syncq while it is waiting on some other syncq). 4509 * 4510 * Assumes that only one thread attempts to call strlock for a given 4511 * stream. If this is not the case the two threads would deadlock. 4512 * This assumption is guaranteed since strlock is only called by insertq 4513 * and removeq and streams plumbing changes are single-threaded for 4514 * a given stream using the STWOPEN, STRCLOSE, and STRPLUMB flags. 4515 * 4516 * For pipes, it is not difficult to atomically designate a pair of streams 4517 * to be mated. Once mated atomically by the framework the twisted pair remain 4518 * configured that way until dismantled atomically by the framework. 4519 * When plumbing takes place on a twisted stream it is necessary to ensure that 4520 * this operation is done exclusively on the twisted stream since two such 4521 * operations, each initiated on different ends of the pipe will deadlock 4522 * waiting for each other to complete. 4523 * 4524 * On entry, no locks should be held. 4525 * The locks acquired and held by strlock depends on a few factors. 4526 * - If sqlist is non-NULL all the syncq locks in the sqlist will be acquired 4527 * and held on exit and all sq_count are at an acceptable level. 4528 * - In all cases, sd_lock and sd_reflock are acquired and held on exit with 4529 * sd_refcnt being zero. 4530 */ 4531 4532 static void 4533 strlock(struct stdata *stp, sqlist_t *sqlist) 4534 { 4535 syncql_t *sql, *sql2; 4536 retry: 4537 /* 4538 * Wait for any claimstr to go away. 4539 */ 4540 if (STRMATED(stp)) { 4541 struct stdata *stp1, *stp2; 4542 4543 STRLOCKMATES(stp); 4544 /* 4545 * Note that the selection of locking order is not 4546 * important, just that they are always acquired in 4547 * the same order. To assure this, we choose this 4548 * order based on the value of the pointer, and since 4549 * the pointer will not change for the life of this 4550 * pair, we will always grab the locks in the same 4551 * order (and hence, prevent deadlocks). 4552 */ 4553 if (&(stp->sd_lock) > &((stp->sd_mate)->sd_lock)) { 4554 stp1 = stp; 4555 stp2 = stp->sd_mate; 4556 } else { 4557 stp2 = stp; 4558 stp1 = stp->sd_mate; 4559 } 4560 mutex_enter(&stp1->sd_reflock); 4561 if (stp1->sd_refcnt > 0) { 4562 STRUNLOCKMATES(stp); 4563 cv_wait(&stp1->sd_refmonitor, &stp1->sd_reflock); 4564 mutex_exit(&stp1->sd_reflock); 4565 goto retry; 4566 } 4567 mutex_enter(&stp2->sd_reflock); 4568 if (stp2->sd_refcnt > 0) { 4569 STRUNLOCKMATES(stp); 4570 mutex_exit(&stp1->sd_reflock); 4571 cv_wait(&stp2->sd_refmonitor, &stp2->sd_reflock); 4572 mutex_exit(&stp2->sd_reflock); 4573 goto retry; 4574 } 4575 STREAM_PUTLOCKS_ENTER(stp1); 4576 STREAM_PUTLOCKS_ENTER(stp2); 4577 } else { 4578 mutex_enter(&stp->sd_lock); 4579 mutex_enter(&stp->sd_reflock); 4580 while (stp->sd_refcnt > 0) { 4581 mutex_exit(&stp->sd_lock); 4582 cv_wait(&stp->sd_refmonitor, &stp->sd_reflock); 4583 if (mutex_tryenter(&stp->sd_lock) == 0) { 4584 mutex_exit(&stp->sd_reflock); 4585 mutex_enter(&stp->sd_lock); 4586 mutex_enter(&stp->sd_reflock); 4587 } 4588 } 4589 STREAM_PUTLOCKS_ENTER(stp); 4590 } 4591 4592 if (sqlist == NULL) 4593 return; 4594 4595 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) { 4596 syncq_t *sq = sql->sql_sq; 4597 uint16_t count; 4598 4599 mutex_enter(SQLOCK(sq)); 4600 count = sq->sq_count; 4601 ASSERT(sq->sq_rmqcount <= count); 4602 SQ_PUTLOCKS_ENTER(sq); 4603 SUM_SQ_PUTCOUNTS(sq, count); 4604 if (count == sq->sq_rmqcount) 4605 continue; 4606 4607 /* Failed - drop all locks that we have acquired so far */ 4608 if (STRMATED(stp)) { 4609 STREAM_PUTLOCKS_EXIT(stp); 4610 STREAM_PUTLOCKS_EXIT(stp->sd_mate); 4611 STRUNLOCKMATES(stp); 4612 mutex_exit(&stp->sd_reflock); 4613 mutex_exit(&stp->sd_mate->sd_reflock); 4614 } else { 4615 STREAM_PUTLOCKS_EXIT(stp); 4616 mutex_exit(&stp->sd_lock); 4617 mutex_exit(&stp->sd_reflock); 4618 } 4619 for (sql2 = sqlist->sqlist_head; sql2 != sql; 4620 sql2 = sql2->sql_next) { 4621 SQ_PUTLOCKS_EXIT(sql2->sql_sq); 4622 mutex_exit(SQLOCK(sql2->sql_sq)); 4623 } 4624 4625 /* 4626 * The wait loop below may starve when there are many threads 4627 * claiming the syncq. This is especially a problem with permod 4628 * syncqs (IP). To lessen the impact of the problem we increment 4629 * sq_needexcl and clear fastbits so that putnexts will slow 4630 * down and call sqenable instead of draining right away. 4631 */ 4632 sq->sq_needexcl++; 4633 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 4634 while (count > sq->sq_rmqcount) { 4635 sq->sq_flags |= SQ_WANTWAKEUP; 4636 SQ_PUTLOCKS_EXIT(sq); 4637 cv_wait(&sq->sq_wait, SQLOCK(sq)); 4638 count = sq->sq_count; 4639 SQ_PUTLOCKS_ENTER(sq); 4640 SUM_SQ_PUTCOUNTS(sq, count); 4641 } 4642 sq->sq_needexcl--; 4643 if (sq->sq_needexcl == 0) 4644 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 4645 SQ_PUTLOCKS_EXIT(sq); 4646 ASSERT(count == sq->sq_rmqcount); 4647 mutex_exit(SQLOCK(sq)); 4648 goto retry; 4649 } 4650 } 4651 4652 /* 4653 * Drop all the locks that strlock acquired. 4654 */ 4655 static void 4656 strunlock(struct stdata *stp, sqlist_t *sqlist) 4657 { 4658 syncql_t *sql; 4659 4660 if (STRMATED(stp)) { 4661 STREAM_PUTLOCKS_EXIT(stp); 4662 STREAM_PUTLOCKS_EXIT(stp->sd_mate); 4663 STRUNLOCKMATES(stp); 4664 mutex_exit(&stp->sd_reflock); 4665 mutex_exit(&stp->sd_mate->sd_reflock); 4666 } else { 4667 STREAM_PUTLOCKS_EXIT(stp); 4668 mutex_exit(&stp->sd_lock); 4669 mutex_exit(&stp->sd_reflock); 4670 } 4671 4672 if (sqlist == NULL) 4673 return; 4674 4675 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) { 4676 SQ_PUTLOCKS_EXIT(sql->sql_sq); 4677 mutex_exit(SQLOCK(sql->sql_sq)); 4678 } 4679 } 4680 4681 /* 4682 * When the module has service procedure, we need check if the next 4683 * module which has service procedure is in flow control to trigger 4684 * the backenable. 4685 */ 4686 static void 4687 backenable_insertedq(queue_t *q) 4688 { 4689 qband_t *qbp; 4690 4691 claimstr(q); 4692 if (q->q_qinfo->qi_srvp != NULL && q->q_next != NULL) { 4693 if (q->q_next->q_nfsrv->q_flag & QWANTW) 4694 backenable(q, 0); 4695 4696 qbp = q->q_next->q_nfsrv->q_bandp; 4697 for (; qbp != NULL; qbp = qbp->qb_next) 4698 if ((qbp->qb_flag & QB_WANTW) && qbp->qb_first != NULL) 4699 backenable(q, qbp->qb_first->b_band); 4700 } 4701 releasestr(q); 4702 } 4703 4704 /* 4705 * Given two read queues, insert a new single one after another. 4706 * 4707 * This routine acquires all the necessary locks in order to change 4708 * q_next and related pointer using strlock(). 4709 * It depends on the stream head ensuring that there are no concurrent 4710 * insertq or removeq on the same stream. The stream head ensures this 4711 * using the flags STWOPEN, STRCLOSE, and STRPLUMB. 4712 * 4713 * Note that no syncq locks are held during the q_next change. This is 4714 * applied to all streams since, unlike removeq, there is no problem of stale 4715 * pointers when adding a module to the stream. Thus drivers/modules that do a 4716 * canput(rq->q_next) would never get a closed/freed queue pointer even if we 4717 * applied this optimization to all streams. 4718 */ 4719 void 4720 insertq(struct stdata *stp, queue_t *new) 4721 { 4722 queue_t *after; 4723 queue_t *wafter; 4724 queue_t *wnew = _WR(new); 4725 boolean_t have_fifo = B_FALSE; 4726 4727 if (new->q_flag & _QINSERTING) { 4728 ASSERT(stp->sd_vnode->v_type != VFIFO); 4729 after = new->q_next; 4730 wafter = _WR(new->q_next); 4731 } else { 4732 after = _RD(stp->sd_wrq); 4733 wafter = stp->sd_wrq; 4734 } 4735 4736 TRACE_2(TR_FAC_STREAMS_FR, TR_INSERTQ, 4737 "insertq:%p, %p", after, new); 4738 ASSERT(after->q_flag & QREADR); 4739 ASSERT(new->q_flag & QREADR); 4740 4741 strlock(stp, NULL); 4742 4743 /* Do we have a FIFO? */ 4744 if (wafter->q_next == after) { 4745 have_fifo = B_TRUE; 4746 wnew->q_next = new; 4747 } else { 4748 wnew->q_next = wafter->q_next; 4749 } 4750 new->q_next = after; 4751 4752 set_nfsrv_ptr(new, wnew, after, wafter); 4753 /* 4754 * set_nfsrv_ptr() needs to know if this is an insertion or not, 4755 * so only reset this flag after calling it. 4756 */ 4757 new->q_flag &= ~_QINSERTING; 4758 4759 if (have_fifo) { 4760 wafter->q_next = wnew; 4761 } else { 4762 if (wafter->q_next) 4763 _OTHERQ(wafter->q_next)->q_next = new; 4764 wafter->q_next = wnew; 4765 } 4766 4767 set_qend(new); 4768 /* The QEND flag might have to be updated for the upstream guy */ 4769 set_qend(after); 4770 4771 ASSERT(_SAMESTR(new) == O_SAMESTR(new)); 4772 ASSERT(_SAMESTR(wnew) == O_SAMESTR(wnew)); 4773 ASSERT(_SAMESTR(after) == O_SAMESTR(after)); 4774 ASSERT(_SAMESTR(wafter) == O_SAMESTR(wafter)); 4775 strsetuio(stp); 4776 4777 /* 4778 * If this was a module insertion, bump the push count. 4779 */ 4780 if (!(new->q_flag & QISDRV)) 4781 stp->sd_pushcnt++; 4782 4783 strunlock(stp, NULL); 4784 4785 /* check if the write Q needs backenable */ 4786 backenable_insertedq(wnew); 4787 4788 /* check if the read Q needs backenable */ 4789 backenable_insertedq(new); 4790 } 4791 4792 /* 4793 * Given a read queue, unlink it from any neighbors. 4794 * 4795 * This routine acquires all the necessary locks in order to 4796 * change q_next and related pointers and also guard against 4797 * stale references (e.g. through q_next) to the queue that 4798 * is being removed. It also plays part of the role in ensuring 4799 * that the module's/driver's put procedure doesn't get called 4800 * after qprocsoff returns. 4801 * 4802 * Removeq depends on the stream head ensuring that there are 4803 * no concurrent insertq or removeq on the same stream. The 4804 * stream head ensures this using the flags STWOPEN, STRCLOSE and 4805 * STRPLUMB. 4806 * 4807 * The set of locks needed to remove the queue is different in 4808 * different cases: 4809 * 4810 * Acquire sd_lock, sd_reflock, and all the syncq locks in the stream after 4811 * waiting for the syncq reference count to drop to 0 indicating that no 4812 * non-close threads are present anywhere in the stream. This ensures that any 4813 * module/driver can reference q_next in its open, close, put, or service 4814 * procedures. 4815 * 4816 * The sq_rmqcount counter tracks the number of threads inside removeq(). 4817 * strlock() ensures that there is either no threads executing inside perimeter 4818 * or there is only a thread calling qprocsoff(). 4819 * 4820 * strlock() compares the value of sq_count with the number of threads inside 4821 * removeq() and waits until sq_count is equal to sq_rmqcount. We need to wakeup 4822 * any threads waiting in strlock() when the sq_rmqcount increases. 4823 */ 4824 4825 void 4826 removeq(queue_t *qp) 4827 { 4828 queue_t *wqp = _WR(qp); 4829 struct stdata *stp = STREAM(qp); 4830 sqlist_t *sqlist = NULL; 4831 boolean_t isdriver; 4832 int moved; 4833 syncq_t *sq = qp->q_syncq; 4834 syncq_t *wsq = wqp->q_syncq; 4835 4836 ASSERT(stp); 4837 4838 TRACE_2(TR_FAC_STREAMS_FR, TR_REMOVEQ, 4839 "removeq:%p %p", qp, wqp); 4840 ASSERT(qp->q_flag&QREADR); 4841 4842 /* 4843 * For queues using Synchronous streams, we must wait for all threads in 4844 * rwnext() to drain out before proceeding. 4845 */ 4846 if (qp->q_flag & QSYNCSTR) { 4847 /* First, we need wakeup any threads blocked in rwnext() */ 4848 mutex_enter(SQLOCK(sq)); 4849 if (sq->sq_flags & SQ_WANTWAKEUP) { 4850 sq->sq_flags &= ~SQ_WANTWAKEUP; 4851 cv_broadcast(&sq->sq_wait); 4852 } 4853 mutex_exit(SQLOCK(sq)); 4854 4855 if (wsq != sq) { 4856 mutex_enter(SQLOCK(wsq)); 4857 if (wsq->sq_flags & SQ_WANTWAKEUP) { 4858 wsq->sq_flags &= ~SQ_WANTWAKEUP; 4859 cv_broadcast(&wsq->sq_wait); 4860 } 4861 mutex_exit(SQLOCK(wsq)); 4862 } 4863 4864 mutex_enter(QLOCK(qp)); 4865 while (qp->q_rwcnt > 0) { 4866 qp->q_flag |= QWANTRMQSYNC; 4867 cv_wait(&qp->q_wait, QLOCK(qp)); 4868 } 4869 mutex_exit(QLOCK(qp)); 4870 4871 mutex_enter(QLOCK(wqp)); 4872 while (wqp->q_rwcnt > 0) { 4873 wqp->q_flag |= QWANTRMQSYNC; 4874 cv_wait(&wqp->q_wait, QLOCK(wqp)); 4875 } 4876 mutex_exit(QLOCK(wqp)); 4877 } 4878 4879 mutex_enter(SQLOCK(sq)); 4880 sq->sq_rmqcount++; 4881 if (sq->sq_flags & SQ_WANTWAKEUP) { 4882 sq->sq_flags &= ~SQ_WANTWAKEUP; 4883 cv_broadcast(&sq->sq_wait); 4884 } 4885 mutex_exit(SQLOCK(sq)); 4886 4887 isdriver = (qp->q_flag & QISDRV); 4888 4889 sqlist = sqlist_build(qp, stp, STRMATED(stp)); 4890 strlock(stp, sqlist); 4891 4892 reset_nfsrv_ptr(qp, wqp); 4893 4894 ASSERT(wqp->q_next == NULL || backq(qp)->q_next == qp); 4895 ASSERT(qp->q_next == NULL || backq(wqp)->q_next == wqp); 4896 /* Do we have a FIFO? */ 4897 if (wqp->q_next == qp) { 4898 stp->sd_wrq->q_next = _RD(stp->sd_wrq); 4899 } else { 4900 if (wqp->q_next) 4901 backq(qp)->q_next = qp->q_next; 4902 if (qp->q_next) 4903 backq(wqp)->q_next = wqp->q_next; 4904 } 4905 4906 /* The QEND flag might have to be updated for the upstream guy */ 4907 if (qp->q_next) 4908 set_qend(qp->q_next); 4909 4910 ASSERT(_SAMESTR(stp->sd_wrq) == O_SAMESTR(stp->sd_wrq)); 4911 ASSERT(_SAMESTR(_RD(stp->sd_wrq)) == O_SAMESTR(_RD(stp->sd_wrq))); 4912 4913 /* 4914 * Move any messages destined for the put procedures to the next 4915 * syncq in line. Otherwise free them. 4916 */ 4917 moved = 0; 4918 /* 4919 * Quick check to see whether there are any messages or events. 4920 */ 4921 if (qp->q_syncqmsgs != 0 || (qp->q_syncq->sq_flags & SQ_EVENTS)) 4922 moved += propagate_syncq(qp); 4923 if (wqp->q_syncqmsgs != 0 || 4924 (wqp->q_syncq->sq_flags & SQ_EVENTS)) 4925 moved += propagate_syncq(wqp); 4926 4927 strsetuio(stp); 4928 4929 /* 4930 * If this was a module removal, decrement the push count. 4931 */ 4932 if (!isdriver) 4933 stp->sd_pushcnt--; 4934 4935 strunlock(stp, sqlist); 4936 sqlist_free(sqlist); 4937 4938 /* 4939 * Make sure any messages that were propagated are drained. 4940 * Also clear any QFULL bit caused by messages that were propagated. 4941 */ 4942 4943 if (qp->q_next != NULL) { 4944 clr_qfull(qp); 4945 /* 4946 * For the driver calling qprocsoff, propagate_syncq 4947 * frees all the messages instead of putting it in 4948 * the stream head 4949 */ 4950 if (!isdriver && (moved > 0)) 4951 emptysq(qp->q_next->q_syncq); 4952 } 4953 if (wqp->q_next != NULL) { 4954 clr_qfull(wqp); 4955 /* 4956 * We come here for any pop of a module except for the 4957 * case of driver being removed. We don't call emptysq 4958 * if we did not move any messages. This will avoid holding 4959 * PERMOD syncq locks in emptysq 4960 */ 4961 if (moved > 0) 4962 emptysq(wqp->q_next->q_syncq); 4963 } 4964 4965 mutex_enter(SQLOCK(sq)); 4966 sq->sq_rmqcount--; 4967 mutex_exit(SQLOCK(sq)); 4968 } 4969 4970 /* 4971 * Prevent further entry by setting a flag (like SQ_FROZEN, SQ_BLOCKED or 4972 * SQ_WRITER) on a syncq. 4973 * If maxcnt is not -1 it assumes that caller has "maxcnt" claim(s) on the 4974 * sync queue and waits until sq_count reaches maxcnt. 4975 * 4976 * If maxcnt is -1 there's no need to grab sq_putlocks since the caller 4977 * does not care about putnext threads that are in the middle of calling put 4978 * entry points. 4979 * 4980 * This routine is used for both inner and outer syncqs. 4981 */ 4982 static void 4983 blocksq(syncq_t *sq, ushort_t flag, int maxcnt) 4984 { 4985 uint16_t count = 0; 4986 4987 mutex_enter(SQLOCK(sq)); 4988 /* 4989 * Wait for SQ_FROZEN/SQ_BLOCKED to be reset. 4990 * SQ_FROZEN will be set if there is a frozen stream that has a 4991 * queue which also refers to this "shared" syncq. 4992 * SQ_BLOCKED will be set if there is "off" queue which also 4993 * refers to this "shared" syncq. 4994 */ 4995 if (maxcnt != -1) { 4996 count = sq->sq_count; 4997 SQ_PUTLOCKS_ENTER(sq); 4998 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 4999 SUM_SQ_PUTCOUNTS(sq, count); 5000 } 5001 sq->sq_needexcl++; 5002 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 5003 5004 while ((sq->sq_flags & flag) || 5005 (maxcnt != -1 && count > (unsigned)maxcnt)) { 5006 sq->sq_flags |= SQ_WANTWAKEUP; 5007 if (maxcnt != -1) { 5008 SQ_PUTLOCKS_EXIT(sq); 5009 } 5010 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5011 if (maxcnt != -1) { 5012 count = sq->sq_count; 5013 SQ_PUTLOCKS_ENTER(sq); 5014 SUM_SQ_PUTCOUNTS(sq, count); 5015 } 5016 } 5017 sq->sq_needexcl--; 5018 sq->sq_flags |= flag; 5019 ASSERT(maxcnt == -1 || count == maxcnt); 5020 if (maxcnt != -1) { 5021 if (sq->sq_needexcl == 0) { 5022 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 5023 } 5024 SQ_PUTLOCKS_EXIT(sq); 5025 } else if (sq->sq_needexcl == 0) { 5026 SQ_PUTCOUNT_SETFAST(sq); 5027 } 5028 5029 mutex_exit(SQLOCK(sq)); 5030 } 5031 5032 /* 5033 * Reset a flag that was set with blocksq. 5034 * 5035 * Can not use this routine to reset SQ_WRITER. 5036 * 5037 * If "isouter" is set then the syncq is assumed to be an outer perimeter 5038 * and drain_syncq is not called. Instead we rely on the qwriter_outer thread 5039 * to handle the queued qwriter operations. 5040 * 5041 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5042 * sq_putlocks are used. 5043 */ 5044 static void 5045 unblocksq(syncq_t *sq, uint16_t resetflag, int isouter) 5046 { 5047 uint16_t flags; 5048 5049 mutex_enter(SQLOCK(sq)); 5050 ASSERT(resetflag != SQ_WRITER); 5051 ASSERT(sq->sq_flags & resetflag); 5052 flags = sq->sq_flags & ~resetflag; 5053 sq->sq_flags = flags; 5054 if (flags & (SQ_QUEUED | SQ_WANTWAKEUP)) { 5055 if (flags & SQ_WANTWAKEUP) { 5056 flags &= ~SQ_WANTWAKEUP; 5057 cv_broadcast(&sq->sq_wait); 5058 } 5059 sq->sq_flags = flags; 5060 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 5061 if (!isouter) { 5062 /* drain_syncq drops SQLOCK */ 5063 drain_syncq(sq); 5064 return; 5065 } 5066 } 5067 } 5068 mutex_exit(SQLOCK(sq)); 5069 } 5070 5071 /* 5072 * Reset a flag that was set with blocksq. 5073 * Does not drain the syncq. Use emptysq() for that. 5074 * Returns 1 if SQ_QUEUED is set. Otherwise 0. 5075 * 5076 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5077 * sq_putlocks are used. 5078 */ 5079 static int 5080 dropsq(syncq_t *sq, uint16_t resetflag) 5081 { 5082 uint16_t flags; 5083 5084 mutex_enter(SQLOCK(sq)); 5085 ASSERT(sq->sq_flags & resetflag); 5086 flags = sq->sq_flags & ~resetflag; 5087 if (flags & SQ_WANTWAKEUP) { 5088 flags &= ~SQ_WANTWAKEUP; 5089 cv_broadcast(&sq->sq_wait); 5090 } 5091 sq->sq_flags = flags; 5092 mutex_exit(SQLOCK(sq)); 5093 if (flags & SQ_QUEUED) 5094 return (1); 5095 return (0); 5096 } 5097 5098 /* 5099 * Empty all the messages on a syncq. 5100 * 5101 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5102 * sq_putlocks are used. 5103 */ 5104 static void 5105 emptysq(syncq_t *sq) 5106 { 5107 uint16_t flags; 5108 5109 mutex_enter(SQLOCK(sq)); 5110 flags = sq->sq_flags; 5111 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 5112 /* 5113 * To prevent potential recursive invocation of drain_syncq we 5114 * do not call drain_syncq if count is non-zero. 5115 */ 5116 if (sq->sq_count == 0) { 5117 /* drain_syncq() drops SQLOCK */ 5118 drain_syncq(sq); 5119 return; 5120 } else 5121 sqenable(sq); 5122 } 5123 mutex_exit(SQLOCK(sq)); 5124 } 5125 5126 /* 5127 * Ordered insert while removing duplicates. 5128 */ 5129 static void 5130 sqlist_insert(sqlist_t *sqlist, syncq_t *sqp) 5131 { 5132 syncql_t *sqlp, **prev_sqlpp, *new_sqlp; 5133 5134 prev_sqlpp = &sqlist->sqlist_head; 5135 while ((sqlp = *prev_sqlpp) != NULL) { 5136 if (sqlp->sql_sq >= sqp) { 5137 if (sqlp->sql_sq == sqp) /* duplicate */ 5138 return; 5139 break; 5140 } 5141 prev_sqlpp = &sqlp->sql_next; 5142 } 5143 new_sqlp = &sqlist->sqlist_array[sqlist->sqlist_index++]; 5144 ASSERT((char *)new_sqlp < (char *)sqlist + sqlist->sqlist_size); 5145 new_sqlp->sql_next = sqlp; 5146 new_sqlp->sql_sq = sqp; 5147 *prev_sqlpp = new_sqlp; 5148 } 5149 5150 /* 5151 * Walk the write side queues until we hit either the driver 5152 * or a twist in the stream (_SAMESTR will return false in both 5153 * these cases) then turn around and walk the read side queues 5154 * back up to the stream head. 5155 */ 5156 static void 5157 sqlist_insertall(sqlist_t *sqlist, queue_t *q) 5158 { 5159 while (q != NULL) { 5160 sqlist_insert(sqlist, q->q_syncq); 5161 5162 if (_SAMESTR(q)) 5163 q = q->q_next; 5164 else if (!(q->q_flag & QREADR)) 5165 q = _RD(q); 5166 else 5167 q = NULL; 5168 } 5169 } 5170 5171 /* 5172 * Allocate and build a list of all syncqs in a stream and the syncq(s) 5173 * associated with the "q" parameter. The resulting list is sorted in a 5174 * canonical order and is free of duplicates. 5175 * Assumes the passed queue is a _RD(q). 5176 */ 5177 static sqlist_t * 5178 sqlist_build(queue_t *q, struct stdata *stp, boolean_t do_twist) 5179 { 5180 sqlist_t *sqlist = sqlist_alloc(stp, KM_SLEEP); 5181 5182 /* 5183 * start with the current queue/qpair 5184 */ 5185 ASSERT(q->q_flag & QREADR); 5186 5187 sqlist_insert(sqlist, q->q_syncq); 5188 sqlist_insert(sqlist, _WR(q)->q_syncq); 5189 5190 sqlist_insertall(sqlist, stp->sd_wrq); 5191 if (do_twist) 5192 sqlist_insertall(sqlist, stp->sd_mate->sd_wrq); 5193 5194 return (sqlist); 5195 } 5196 5197 static sqlist_t * 5198 sqlist_alloc(struct stdata *stp, int kmflag) 5199 { 5200 size_t sqlist_size; 5201 sqlist_t *sqlist; 5202 5203 /* 5204 * Allocate 2 syncql_t's for each pushed module. Note that 5205 * the sqlist_t structure already has 4 syncql_t's built in: 5206 * 2 for the stream head, and 2 for the driver/other stream head. 5207 */ 5208 sqlist_size = 2 * sizeof (syncql_t) * stp->sd_pushcnt + 5209 sizeof (sqlist_t); 5210 if (STRMATED(stp)) 5211 sqlist_size += 2 * sizeof (syncql_t) * stp->sd_mate->sd_pushcnt; 5212 sqlist = kmem_alloc(sqlist_size, kmflag); 5213 5214 sqlist->sqlist_head = NULL; 5215 sqlist->sqlist_size = sqlist_size; 5216 sqlist->sqlist_index = 0; 5217 5218 return (sqlist); 5219 } 5220 5221 /* 5222 * Free the list created by sqlist_alloc() 5223 */ 5224 static void 5225 sqlist_free(sqlist_t *sqlist) 5226 { 5227 kmem_free(sqlist, sqlist->sqlist_size); 5228 } 5229 5230 /* 5231 * Prevent any new entries into any syncq in this stream. 5232 * Used by freezestr. 5233 */ 5234 void 5235 strblock(queue_t *q) 5236 { 5237 struct stdata *stp; 5238 syncql_t *sql; 5239 sqlist_t *sqlist; 5240 5241 q = _RD(q); 5242 5243 stp = STREAM(q); 5244 ASSERT(stp != NULL); 5245 5246 /* 5247 * Get a sorted list with all the duplicates removed containing 5248 * all the syncqs referenced by this stream. 5249 */ 5250 sqlist = sqlist_build(q, stp, B_FALSE); 5251 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next) 5252 blocksq(sql->sql_sq, SQ_FROZEN, -1); 5253 sqlist_free(sqlist); 5254 } 5255 5256 /* 5257 * Release the block on new entries into this stream 5258 */ 5259 void 5260 strunblock(queue_t *q) 5261 { 5262 struct stdata *stp; 5263 syncql_t *sql; 5264 sqlist_t *sqlist; 5265 int drain_needed; 5266 5267 q = _RD(q); 5268 5269 /* 5270 * Get a sorted list with all the duplicates removed containing 5271 * all the syncqs referenced by this stream. 5272 * Have to drop the SQ_FROZEN flag on all the syncqs before 5273 * starting to drain them; otherwise the draining might 5274 * cause a freezestr in some module on the stream (which 5275 * would deadlock). 5276 */ 5277 stp = STREAM(q); 5278 ASSERT(stp != NULL); 5279 sqlist = sqlist_build(q, stp, B_FALSE); 5280 drain_needed = 0; 5281 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next) 5282 drain_needed += dropsq(sql->sql_sq, SQ_FROZEN); 5283 if (drain_needed) { 5284 for (sql = sqlist->sqlist_head; sql != NULL; 5285 sql = sql->sql_next) 5286 emptysq(sql->sql_sq); 5287 } 5288 sqlist_free(sqlist); 5289 } 5290 5291 #ifdef DEBUG 5292 static int 5293 qprocsareon(queue_t *rq) 5294 { 5295 if (rq->q_next == NULL) 5296 return (0); 5297 return (_WR(rq->q_next)->q_next == _WR(rq)); 5298 } 5299 5300 int 5301 qclaimed(queue_t *q) 5302 { 5303 uint_t count; 5304 5305 count = q->q_syncq->sq_count; 5306 SUM_SQ_PUTCOUNTS(q->q_syncq, count); 5307 return (count != 0); 5308 } 5309 5310 /* 5311 * Check if anyone has frozen this stream with freezestr 5312 */ 5313 int 5314 frozenstr(queue_t *q) 5315 { 5316 return ((q->q_syncq->sq_flags & SQ_FROZEN) != 0); 5317 } 5318 #endif /* DEBUG */ 5319 5320 /* 5321 * Enter a queue. 5322 * Obsoleted interface. Should not be used. 5323 */ 5324 void 5325 enterq(queue_t *q) 5326 { 5327 entersq(q->q_syncq, SQ_CALLBACK); 5328 } 5329 5330 void 5331 leaveq(queue_t *q) 5332 { 5333 leavesq(q->q_syncq, SQ_CALLBACK); 5334 } 5335 5336 /* 5337 * Enter a perimeter. c_inner and c_outer specifies which concurrency bits 5338 * to check. 5339 * Wait if SQ_QUEUED is set to preserve ordering between messages and qwriter 5340 * calls and the running of open, close and service procedures. 5341 * 5342 * If c_inner bit is set no need to grab sq_putlocks since we don't care 5343 * if other threads have entered or are entering put entry point. 5344 * 5345 * If c_inner bit is set it might have been possible to use 5346 * sq_putlocks/sq_putcounts instead of SQLOCK/sq_count (e.g. to optimize 5347 * open/close path for IP) but since the count may need to be decremented in 5348 * qwait() we wouldn't know which counter to decrement. Currently counter is 5349 * selected by current cpu_seqid and current CPU can change at any moment. XXX 5350 * in the future we might use curthread id bits to select the counter and this 5351 * would stay constant across routine calls. 5352 */ 5353 void 5354 entersq(syncq_t *sq, int entrypoint) 5355 { 5356 uint16_t count = 0; 5357 uint16_t flags; 5358 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL; 5359 uint16_t type; 5360 uint_t c_inner = entrypoint & SQ_CI; 5361 uint_t c_outer = entrypoint & SQ_CO; 5362 5363 /* 5364 * Increment ref count to keep closes out of this queue. 5365 */ 5366 ASSERT(sq); 5367 ASSERT(c_inner && c_outer); 5368 mutex_enter(SQLOCK(sq)); 5369 flags = sq->sq_flags; 5370 type = sq->sq_type; 5371 if (!(type & c_inner)) { 5372 /* Make sure all putcounts now use slowlock. */ 5373 count = sq->sq_count; 5374 SQ_PUTLOCKS_ENTER(sq); 5375 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 5376 SUM_SQ_PUTCOUNTS(sq, count); 5377 sq->sq_needexcl++; 5378 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 5379 waitflags |= SQ_MESSAGES; 5380 } 5381 /* 5382 * Wait until we can enter the inner perimeter. 5383 * If we want exclusive access we wait until sq_count is 0. 5384 * We have to do this before entering the outer perimeter in order 5385 * to preserve put/close message ordering. 5386 */ 5387 while ((flags & waitflags) || (!(type & c_inner) && count != 0)) { 5388 sq->sq_flags = flags | SQ_WANTWAKEUP; 5389 if (!(type & c_inner)) { 5390 SQ_PUTLOCKS_EXIT(sq); 5391 } 5392 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5393 if (!(type & c_inner)) { 5394 count = sq->sq_count; 5395 SQ_PUTLOCKS_ENTER(sq); 5396 SUM_SQ_PUTCOUNTS(sq, count); 5397 } 5398 flags = sq->sq_flags; 5399 } 5400 5401 if (!(type & c_inner)) { 5402 ASSERT(sq->sq_needexcl > 0); 5403 sq->sq_needexcl--; 5404 if (sq->sq_needexcl == 0) { 5405 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 5406 } 5407 } 5408 5409 /* Check if we need to enter the outer perimeter */ 5410 if (!(type & c_outer)) { 5411 /* 5412 * We have to enter the outer perimeter exclusively before 5413 * we can increment sq_count to avoid deadlock. This implies 5414 * that we have to re-check sq_flags and sq_count. 5415 * 5416 * is it possible to have c_inner set when c_outer is not set? 5417 */ 5418 if (!(type & c_inner)) { 5419 SQ_PUTLOCKS_EXIT(sq); 5420 } 5421 mutex_exit(SQLOCK(sq)); 5422 outer_enter(sq->sq_outer, SQ_GOAWAY); 5423 mutex_enter(SQLOCK(sq)); 5424 flags = sq->sq_flags; 5425 /* 5426 * there should be no need to recheck sq_putcounts 5427 * because outer_enter() has already waited for them to clear 5428 * after setting SQ_WRITER. 5429 */ 5430 count = sq->sq_count; 5431 #ifdef DEBUG 5432 /* 5433 * SUMCHECK_SQ_PUTCOUNTS should return the sum instead 5434 * of doing an ASSERT internally. Others should do 5435 * something like 5436 * ASSERT(SUMCHECK_SQ_PUTCOUNTS(sq) == 0); 5437 * without the need to #ifdef DEBUG it. 5438 */ 5439 SUMCHECK_SQ_PUTCOUNTS(sq, 0); 5440 #endif 5441 while ((flags & (SQ_EXCL|SQ_BLOCKED|SQ_FROZEN)) || 5442 (!(type & c_inner) && count != 0)) { 5443 sq->sq_flags = flags | SQ_WANTWAKEUP; 5444 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5445 count = sq->sq_count; 5446 flags = sq->sq_flags; 5447 } 5448 } 5449 5450 sq->sq_count++; 5451 ASSERT(sq->sq_count != 0); /* Wraparound */ 5452 if (!(type & c_inner)) { 5453 /* Exclusive entry */ 5454 ASSERT(sq->sq_count == 1); 5455 sq->sq_flags |= SQ_EXCL; 5456 if (type & c_outer) { 5457 SQ_PUTLOCKS_EXIT(sq); 5458 } 5459 } 5460 mutex_exit(SQLOCK(sq)); 5461 } 5462 5463 /* 5464 * Leave a syncq. Announce to framework that closes may proceed. 5465 * c_inner and c_outer specify which concurrency bits to check. 5466 * 5467 * Must never be called from driver or module put entry point. 5468 * 5469 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5470 * sq_putlocks are used. 5471 */ 5472 void 5473 leavesq(syncq_t *sq, int entrypoint) 5474 { 5475 uint16_t flags; 5476 uint16_t type; 5477 uint_t c_outer = entrypoint & SQ_CO; 5478 #ifdef DEBUG 5479 uint_t c_inner = entrypoint & SQ_CI; 5480 #endif 5481 5482 /* 5483 * Decrement ref count, drain the syncq if possible, and wake up 5484 * any waiting close. 5485 */ 5486 ASSERT(sq); 5487 ASSERT(c_inner && c_outer); 5488 mutex_enter(SQLOCK(sq)); 5489 flags = sq->sq_flags; 5490 type = sq->sq_type; 5491 if (flags & (SQ_QUEUED|SQ_WANTWAKEUP|SQ_WANTEXWAKEUP)) { 5492 5493 if (flags & SQ_WANTWAKEUP) { 5494 flags &= ~SQ_WANTWAKEUP; 5495 cv_broadcast(&sq->sq_wait); 5496 } 5497 if (flags & SQ_WANTEXWAKEUP) { 5498 flags &= ~SQ_WANTEXWAKEUP; 5499 cv_broadcast(&sq->sq_exitwait); 5500 } 5501 5502 if ((flags & SQ_QUEUED) && !(flags & SQ_STAYAWAY)) { 5503 /* 5504 * The syncq needs to be drained. "Exit" the syncq 5505 * before calling drain_syncq. 5506 */ 5507 ASSERT(sq->sq_count != 0); 5508 sq->sq_count--; 5509 ASSERT((flags & SQ_EXCL) || (type & c_inner)); 5510 sq->sq_flags = flags & ~SQ_EXCL; 5511 drain_syncq(sq); 5512 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 5513 /* Check if we need to exit the outer perimeter */ 5514 /* XXX will this ever be true? */ 5515 if (!(type & c_outer)) 5516 outer_exit(sq->sq_outer); 5517 return; 5518 } 5519 } 5520 ASSERT(sq->sq_count != 0); 5521 sq->sq_count--; 5522 ASSERT((flags & SQ_EXCL) || (type & c_inner)); 5523 sq->sq_flags = flags & ~SQ_EXCL; 5524 mutex_exit(SQLOCK(sq)); 5525 5526 /* Check if we need to exit the outer perimeter */ 5527 if (!(sq->sq_type & c_outer)) 5528 outer_exit(sq->sq_outer); 5529 } 5530 5531 /* 5532 * Prevent q_next from changing in this stream by incrementing sq_count. 5533 * 5534 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5535 * sq_putlocks are used. 5536 */ 5537 void 5538 claimq(queue_t *qp) 5539 { 5540 syncq_t *sq = qp->q_syncq; 5541 5542 mutex_enter(SQLOCK(sq)); 5543 sq->sq_count++; 5544 ASSERT(sq->sq_count != 0); /* Wraparound */ 5545 mutex_exit(SQLOCK(sq)); 5546 } 5547 5548 /* 5549 * Undo claimq. 5550 * 5551 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 5552 * sq_putlocks are used. 5553 */ 5554 void 5555 releaseq(queue_t *qp) 5556 { 5557 syncq_t *sq = qp->q_syncq; 5558 uint16_t flags; 5559 5560 mutex_enter(SQLOCK(sq)); 5561 ASSERT(sq->sq_count > 0); 5562 sq->sq_count--; 5563 5564 flags = sq->sq_flags; 5565 if (flags & (SQ_WANTWAKEUP|SQ_QUEUED)) { 5566 if (flags & SQ_WANTWAKEUP) { 5567 flags &= ~SQ_WANTWAKEUP; 5568 cv_broadcast(&sq->sq_wait); 5569 } 5570 sq->sq_flags = flags; 5571 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 5572 /* 5573 * To prevent potential recursive invocation of 5574 * drain_syncq we do not call drain_syncq if count is 5575 * non-zero. 5576 */ 5577 if (sq->sq_count == 0) { 5578 drain_syncq(sq); 5579 return; 5580 } else 5581 sqenable(sq); 5582 } 5583 } 5584 mutex_exit(SQLOCK(sq)); 5585 } 5586 5587 /* 5588 * Prevent q_next from changing in this stream by incrementing sd_refcnt. 5589 */ 5590 void 5591 claimstr(queue_t *qp) 5592 { 5593 struct stdata *stp = STREAM(qp); 5594 5595 mutex_enter(&stp->sd_reflock); 5596 stp->sd_refcnt++; 5597 ASSERT(stp->sd_refcnt != 0); /* Wraparound */ 5598 mutex_exit(&stp->sd_reflock); 5599 } 5600 5601 /* 5602 * Undo claimstr. 5603 */ 5604 void 5605 releasestr(queue_t *qp) 5606 { 5607 struct stdata *stp = STREAM(qp); 5608 5609 mutex_enter(&stp->sd_reflock); 5610 ASSERT(stp->sd_refcnt != 0); 5611 if (--stp->sd_refcnt == 0) 5612 cv_broadcast(&stp->sd_refmonitor); 5613 mutex_exit(&stp->sd_reflock); 5614 } 5615 5616 static syncq_t * 5617 new_syncq(void) 5618 { 5619 return (kmem_cache_alloc(syncq_cache, KM_SLEEP)); 5620 } 5621 5622 static void 5623 free_syncq(syncq_t *sq) 5624 { 5625 ASSERT(sq->sq_head == NULL); 5626 ASSERT(sq->sq_outer == NULL); 5627 ASSERT(sq->sq_callbpend == NULL); 5628 ASSERT((sq->sq_onext == NULL && sq->sq_oprev == NULL) || 5629 (sq->sq_onext == sq && sq->sq_oprev == sq)); 5630 5631 if (sq->sq_ciputctrl != NULL) { 5632 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1); 5633 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl, 5634 sq->sq_nciputctrl, 0); 5635 ASSERT(ciputctrl_cache != NULL); 5636 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl); 5637 } 5638 5639 sq->sq_tail = NULL; 5640 sq->sq_evhead = NULL; 5641 sq->sq_evtail = NULL; 5642 sq->sq_ciputctrl = NULL; 5643 sq->sq_nciputctrl = 0; 5644 sq->sq_count = 0; 5645 sq->sq_rmqcount = 0; 5646 sq->sq_callbflags = 0; 5647 sq->sq_cancelid = 0; 5648 sq->sq_next = NULL; 5649 sq->sq_needexcl = 0; 5650 sq->sq_svcflags = 0; 5651 sq->sq_nqueues = 0; 5652 sq->sq_pri = 0; 5653 sq->sq_onext = NULL; 5654 sq->sq_oprev = NULL; 5655 sq->sq_flags = 0; 5656 sq->sq_type = 0; 5657 sq->sq_servcount = 0; 5658 5659 kmem_cache_free(syncq_cache, sq); 5660 } 5661 5662 /* Outer perimeter code */ 5663 5664 /* 5665 * The outer syncq uses the fields and flags in the syncq slightly 5666 * differently from the inner syncqs. 5667 * sq_count Incremented when there are pending or running 5668 * writers at the outer perimeter to prevent the set of 5669 * inner syncqs that belong to the outer perimeter from 5670 * changing. 5671 * sq_head/tail List of deferred qwriter(OUTER) operations. 5672 * 5673 * SQ_BLOCKED Set to prevent traversing of sq_next,sq_prev while 5674 * inner syncqs are added to or removed from the 5675 * outer perimeter. 5676 * SQ_QUEUED sq_head/tail has messages or events queued. 5677 * 5678 * SQ_WRITER A thread is currently traversing all the inner syncqs 5679 * setting the SQ_WRITER flag. 5680 */ 5681 5682 /* 5683 * Get write access at the outer perimeter. 5684 * Note that read access is done by entersq, putnext, and put by simply 5685 * incrementing sq_count in the inner syncq. 5686 * 5687 * Waits until "flags" is no longer set in the outer to prevent multiple 5688 * threads from having write access at the same time. SQ_WRITER has to be part 5689 * of "flags". 5690 * 5691 * Increases sq_count on the outer syncq to keep away outer_insert/remove 5692 * until the outer_exit is finished. 5693 * 5694 * outer_enter is vulnerable to starvation since it does not prevent new 5695 * threads from entering the inner syncqs while it is waiting for sq_count to 5696 * go to zero. 5697 */ 5698 void 5699 outer_enter(syncq_t *outer, uint16_t flags) 5700 { 5701 syncq_t *sq; 5702 int wait_needed; 5703 uint16_t count; 5704 5705 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5706 outer->sq_oprev != NULL); 5707 ASSERT(flags & SQ_WRITER); 5708 5709 retry: 5710 mutex_enter(SQLOCK(outer)); 5711 while (outer->sq_flags & flags) { 5712 outer->sq_flags |= SQ_WANTWAKEUP; 5713 cv_wait(&outer->sq_wait, SQLOCK(outer)); 5714 } 5715 5716 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5717 outer->sq_flags |= SQ_WRITER; 5718 outer->sq_count++; 5719 ASSERT(outer->sq_count != 0); /* wraparound */ 5720 wait_needed = 0; 5721 /* 5722 * Set SQ_WRITER on all the inner syncqs while holding 5723 * the SQLOCK on the outer syncq. This ensures that the changing 5724 * of SQ_WRITER is atomic under the outer SQLOCK. 5725 */ 5726 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 5727 mutex_enter(SQLOCK(sq)); 5728 count = sq->sq_count; 5729 SQ_PUTLOCKS_ENTER(sq); 5730 sq->sq_flags |= SQ_WRITER; 5731 SUM_SQ_PUTCOUNTS(sq, count); 5732 if (count != 0) 5733 wait_needed = 1; 5734 SQ_PUTLOCKS_EXIT(sq); 5735 mutex_exit(SQLOCK(sq)); 5736 } 5737 mutex_exit(SQLOCK(outer)); 5738 5739 /* 5740 * Get everybody out of the syncqs sequentially. 5741 * Note that we don't actually need to acquire the PUTLOCKS, since 5742 * we have already cleared the fastbit, and set QWRITER. By 5743 * definition, the count can not increase since putnext will 5744 * take the slowlock path (and the purpose of acquiring the 5745 * putlocks was to make sure it didn't increase while we were 5746 * waiting). 5747 * 5748 * Note that we still acquire the PUTLOCKS to be safe. 5749 */ 5750 if (wait_needed) { 5751 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 5752 mutex_enter(SQLOCK(sq)); 5753 count = sq->sq_count; 5754 SQ_PUTLOCKS_ENTER(sq); 5755 SUM_SQ_PUTCOUNTS(sq, count); 5756 while (count != 0) { 5757 sq->sq_flags |= SQ_WANTWAKEUP; 5758 SQ_PUTLOCKS_EXIT(sq); 5759 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5760 count = sq->sq_count; 5761 SQ_PUTLOCKS_ENTER(sq); 5762 SUM_SQ_PUTCOUNTS(sq, count); 5763 } 5764 SQ_PUTLOCKS_EXIT(sq); 5765 mutex_exit(SQLOCK(sq)); 5766 } 5767 /* 5768 * Verify that none of the flags got set while we 5769 * were waiting for the sq_counts to drop. 5770 * If this happens we exit and retry entering the 5771 * outer perimeter. 5772 */ 5773 mutex_enter(SQLOCK(outer)); 5774 if (outer->sq_flags & (flags & ~SQ_WRITER)) { 5775 mutex_exit(SQLOCK(outer)); 5776 outer_exit(outer); 5777 goto retry; 5778 } 5779 mutex_exit(SQLOCK(outer)); 5780 } 5781 } 5782 5783 /* 5784 * Drop the write access at the outer perimeter. 5785 * Read access is dropped implicitly (by putnext, put, and leavesq) by 5786 * decrementing sq_count. 5787 */ 5788 void 5789 outer_exit(syncq_t *outer) 5790 { 5791 syncq_t *sq; 5792 int drain_needed; 5793 uint16_t flags; 5794 5795 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5796 outer->sq_oprev != NULL); 5797 ASSERT(MUTEX_NOT_HELD(SQLOCK(outer))); 5798 5799 /* 5800 * Atomically (from the perspective of threads calling become_writer) 5801 * drop the write access at the outer perimeter by holding 5802 * SQLOCK(outer) across all the dropsq calls and the resetting of 5803 * SQ_WRITER. 5804 * This defines a locking order between the outer perimeter 5805 * SQLOCK and the inner perimeter SQLOCKs. 5806 */ 5807 mutex_enter(SQLOCK(outer)); 5808 flags = outer->sq_flags; 5809 ASSERT(outer->sq_flags & SQ_WRITER); 5810 if (flags & SQ_QUEUED) { 5811 write_now(outer); 5812 flags = outer->sq_flags; 5813 } 5814 5815 /* 5816 * sq_onext is stable since sq_count has not yet been decreased. 5817 * Reset the SQ_WRITER flags in all syncqs. 5818 * After dropping SQ_WRITER on the outer syncq we empty all the 5819 * inner syncqs. 5820 */ 5821 drain_needed = 0; 5822 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) 5823 drain_needed += dropsq(sq, SQ_WRITER); 5824 ASSERT(!(outer->sq_flags & SQ_QUEUED)); 5825 flags &= ~SQ_WRITER; 5826 if (drain_needed) { 5827 outer->sq_flags = flags; 5828 mutex_exit(SQLOCK(outer)); 5829 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) 5830 emptysq(sq); 5831 mutex_enter(SQLOCK(outer)); 5832 flags = outer->sq_flags; 5833 } 5834 if (flags & SQ_WANTWAKEUP) { 5835 flags &= ~SQ_WANTWAKEUP; 5836 cv_broadcast(&outer->sq_wait); 5837 } 5838 outer->sq_flags = flags; 5839 ASSERT(outer->sq_count > 0); 5840 outer->sq_count--; 5841 mutex_exit(SQLOCK(outer)); 5842 } 5843 5844 /* 5845 * Add another syncq to an outer perimeter. 5846 * Block out all other access to the outer perimeter while it is being 5847 * changed using blocksq. 5848 * Assumes that the caller has *not* done an outer_enter. 5849 * 5850 * Vulnerable to starvation in blocksq. 5851 */ 5852 static void 5853 outer_insert(syncq_t *outer, syncq_t *sq) 5854 { 5855 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5856 outer->sq_oprev != NULL); 5857 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 5858 sq->sq_oprev == NULL); /* Can't be in an outer perimeter */ 5859 5860 /* Get exclusive access to the outer perimeter list */ 5861 blocksq(outer, SQ_BLOCKED, 0); 5862 ASSERT(outer->sq_flags & SQ_BLOCKED); 5863 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5864 5865 mutex_enter(SQLOCK(sq)); 5866 sq->sq_outer = outer; 5867 outer->sq_onext->sq_oprev = sq; 5868 sq->sq_onext = outer->sq_onext; 5869 outer->sq_onext = sq; 5870 sq->sq_oprev = outer; 5871 mutex_exit(SQLOCK(sq)); 5872 unblocksq(outer, SQ_BLOCKED, 1); 5873 } 5874 5875 /* 5876 * Remove a syncq from an outer perimeter. 5877 * Block out all other access to the outer perimeter while it is being 5878 * changed using blocksq. 5879 * Assumes that the caller has *not* done an outer_enter. 5880 * 5881 * Vulnerable to starvation in blocksq. 5882 */ 5883 static void 5884 outer_remove(syncq_t *outer, syncq_t *sq) 5885 { 5886 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5887 outer->sq_oprev != NULL); 5888 ASSERT(sq->sq_outer == outer); 5889 5890 /* Get exclusive access to the outer perimeter list */ 5891 blocksq(outer, SQ_BLOCKED, 0); 5892 ASSERT(outer->sq_flags & SQ_BLOCKED); 5893 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5894 5895 mutex_enter(SQLOCK(sq)); 5896 sq->sq_outer = NULL; 5897 sq->sq_onext->sq_oprev = sq->sq_oprev; 5898 sq->sq_oprev->sq_onext = sq->sq_onext; 5899 sq->sq_oprev = sq->sq_onext = NULL; 5900 mutex_exit(SQLOCK(sq)); 5901 unblocksq(outer, SQ_BLOCKED, 1); 5902 } 5903 5904 /* 5905 * Queue a deferred qwriter(OUTER) callback for this outer perimeter. 5906 * If this is the first callback for this outer perimeter then add 5907 * this outer perimeter to the list of outer perimeters that 5908 * the qwriter_outer_thread will process. 5909 * 5910 * Increments sq_count in the outer syncq to prevent the membership 5911 * of the outer perimeter (in terms of inner syncqs) to change while 5912 * the callback is pending. 5913 */ 5914 static void 5915 queue_writer(syncq_t *outer, void (*func)(), queue_t *q, mblk_t *mp) 5916 { 5917 ASSERT(MUTEX_HELD(SQLOCK(outer))); 5918 5919 mp->b_prev = (mblk_t *)func; 5920 mp->b_queue = q; 5921 mp->b_next = NULL; 5922 outer->sq_count++; /* Decremented when dequeued */ 5923 ASSERT(outer->sq_count != 0); /* Wraparound */ 5924 if (outer->sq_evhead == NULL) { 5925 /* First message. */ 5926 outer->sq_evhead = outer->sq_evtail = mp; 5927 outer->sq_flags |= SQ_EVENTS; 5928 mutex_exit(SQLOCK(outer)); 5929 STRSTAT(qwr_outer); 5930 (void) taskq_dispatch(streams_taskq, 5931 (task_func_t *)qwriter_outer_service, outer, TQ_SLEEP); 5932 } else { 5933 ASSERT(outer->sq_flags & SQ_EVENTS); 5934 outer->sq_evtail->b_next = mp; 5935 outer->sq_evtail = mp; 5936 mutex_exit(SQLOCK(outer)); 5937 } 5938 } 5939 5940 /* 5941 * Try and upgrade to write access at the outer perimeter. If this can 5942 * not be done without blocking then queue the callback to be done 5943 * by the qwriter_outer_thread. 5944 * 5945 * This routine can only be called from put or service procedures plus 5946 * asynchronous callback routines that have properly entered the queue (with 5947 * entersq). Thus qwriter(OUTER) assumes the caller has one claim on the syncq 5948 * associated with q. 5949 */ 5950 void 5951 qwriter_outer(queue_t *q, mblk_t *mp, void (*func)()) 5952 { 5953 syncq_t *osq, *sq, *outer; 5954 int failed; 5955 uint16_t flags; 5956 5957 osq = q->q_syncq; 5958 outer = osq->sq_outer; 5959 if (outer == NULL) 5960 panic("qwriter(PERIM_OUTER): no outer perimeter"); 5961 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5962 outer->sq_oprev != NULL); 5963 5964 mutex_enter(SQLOCK(outer)); 5965 flags = outer->sq_flags; 5966 /* 5967 * If some thread is traversing sq_next, or if we are blocked by 5968 * outer_insert or outer_remove, or if the we already have queued 5969 * callbacks, then queue this callback for later processing. 5970 * 5971 * Also queue the qwriter for an interrupt thread in order 5972 * to reduce the time spent running at high IPL. 5973 * to identify there are events. 5974 */ 5975 if ((flags & SQ_GOAWAY) || (curthread->t_pri >= kpreemptpri)) { 5976 /* 5977 * Queue the become_writer request. 5978 * The queueing is atomic under SQLOCK(outer) in order 5979 * to synchronize with outer_exit. 5980 * queue_writer will drop the outer SQLOCK 5981 */ 5982 if (flags & SQ_BLOCKED) { 5983 /* Must set SQ_WRITER on inner perimeter */ 5984 mutex_enter(SQLOCK(osq)); 5985 osq->sq_flags |= SQ_WRITER; 5986 mutex_exit(SQLOCK(osq)); 5987 } else { 5988 if (!(flags & SQ_WRITER)) { 5989 /* 5990 * The outer could have been SQ_BLOCKED thus 5991 * SQ_WRITER might not be set on the inner. 5992 */ 5993 mutex_enter(SQLOCK(osq)); 5994 osq->sq_flags |= SQ_WRITER; 5995 mutex_exit(SQLOCK(osq)); 5996 } 5997 ASSERT(osq->sq_flags & SQ_WRITER); 5998 } 5999 queue_writer(outer, func, q, mp); 6000 return; 6001 } 6002 /* 6003 * We are half-way to exclusive access to the outer perimeter. 6004 * Prevent any outer_enter, qwriter(OUTER), or outer_insert/remove 6005 * while the inner syncqs are traversed. 6006 */ 6007 outer->sq_count++; 6008 ASSERT(outer->sq_count != 0); /* wraparound */ 6009 flags |= SQ_WRITER; 6010 /* 6011 * Check if we can run the function immediately. Mark all 6012 * syncqs with the writer flag to prevent new entries into 6013 * put and service procedures. 6014 * 6015 * Set SQ_WRITER on all the inner syncqs while holding 6016 * the SQLOCK on the outer syncq. This ensures that the changing 6017 * of SQ_WRITER is atomic under the outer SQLOCK. 6018 */ 6019 failed = 0; 6020 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 6021 uint16_t count; 6022 uint_t maxcnt = (sq == osq) ? 1 : 0; 6023 6024 mutex_enter(SQLOCK(sq)); 6025 count = sq->sq_count; 6026 SQ_PUTLOCKS_ENTER(sq); 6027 SUM_SQ_PUTCOUNTS(sq, count); 6028 if (sq->sq_count > maxcnt) 6029 failed = 1; 6030 sq->sq_flags |= SQ_WRITER; 6031 SQ_PUTLOCKS_EXIT(sq); 6032 mutex_exit(SQLOCK(sq)); 6033 } 6034 if (failed) { 6035 /* 6036 * Some other thread has a read claim on the outer perimeter. 6037 * Queue the callback for deferred processing. 6038 * 6039 * queue_writer will set SQ_QUEUED before we drop SQ_WRITER 6040 * so that other qwriter(OUTER) calls will queue their 6041 * callbacks as well. queue_writer increments sq_count so we 6042 * decrement to compensate for the our increment. 6043 * 6044 * Dropping SQ_WRITER enables the writer thread to work 6045 * on this outer perimeter. 6046 */ 6047 outer->sq_flags = flags; 6048 queue_writer(outer, func, q, mp); 6049 /* queue_writer dropper the lock */ 6050 mutex_enter(SQLOCK(outer)); 6051 ASSERT(outer->sq_count > 0); 6052 outer->sq_count--; 6053 ASSERT(outer->sq_flags & SQ_WRITER); 6054 flags = outer->sq_flags; 6055 flags &= ~SQ_WRITER; 6056 if (flags & SQ_WANTWAKEUP) { 6057 flags &= ~SQ_WANTWAKEUP; 6058 cv_broadcast(&outer->sq_wait); 6059 } 6060 outer->sq_flags = flags; 6061 mutex_exit(SQLOCK(outer)); 6062 return; 6063 } else { 6064 outer->sq_flags = flags; 6065 mutex_exit(SQLOCK(outer)); 6066 } 6067 6068 /* Can run it immediately */ 6069 (*func)(q, mp); 6070 6071 outer_exit(outer); 6072 } 6073 6074 /* 6075 * Dequeue all writer callbacks from the outer perimeter and run them. 6076 */ 6077 static void 6078 write_now(syncq_t *outer) 6079 { 6080 mblk_t *mp; 6081 queue_t *q; 6082 void (*func)(); 6083 6084 ASSERT(MUTEX_HELD(SQLOCK(outer))); 6085 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 6086 outer->sq_oprev != NULL); 6087 while ((mp = outer->sq_evhead) != NULL) { 6088 /* 6089 * queues cannot be placed on the queuelist on the outer 6090 * perimeter. 6091 */ 6092 ASSERT(!(outer->sq_flags & SQ_MESSAGES)); 6093 ASSERT((outer->sq_flags & SQ_EVENTS)); 6094 6095 outer->sq_evhead = mp->b_next; 6096 if (outer->sq_evhead == NULL) { 6097 outer->sq_evtail = NULL; 6098 outer->sq_flags &= ~SQ_EVENTS; 6099 } 6100 ASSERT(outer->sq_count != 0); 6101 outer->sq_count--; /* Incremented when enqueued. */ 6102 mutex_exit(SQLOCK(outer)); 6103 /* 6104 * Drop the message if the queue is closing. 6105 * Make sure that the queue is "claimed" when the callback 6106 * is run in order to satisfy various ASSERTs. 6107 */ 6108 q = mp->b_queue; 6109 func = (void (*)())mp->b_prev; 6110 ASSERT(func != NULL); 6111 mp->b_next = mp->b_prev = NULL; 6112 if (q->q_flag & QWCLOSE) { 6113 freemsg(mp); 6114 } else { 6115 claimq(q); 6116 (*func)(q, mp); 6117 releaseq(q); 6118 } 6119 mutex_enter(SQLOCK(outer)); 6120 } 6121 ASSERT(MUTEX_HELD(SQLOCK(outer))); 6122 } 6123 6124 /* 6125 * The list of messages on the inner syncq is effectively hashed 6126 * by destination queue. These destination queues are doubly 6127 * linked lists (hopefully) in priority order. Messages are then 6128 * put on the queue referenced by the q_sqhead/q_sqtail elements. 6129 * Additional messages are linked together by the b_next/b_prev 6130 * elements in the mblk, with (similar to putq()) the first message 6131 * having a NULL b_prev and the last message having a NULL b_next. 6132 * 6133 * Events, such as qwriter callbacks, are put onto a list in FIFO 6134 * order referenced by sq_evhead, and sq_evtail. This is a singly 6135 * linked list, and messages here MUST be processed in the order queued. 6136 */ 6137 6138 /* 6139 * Run the events on the syncq event list (sq_evhead). 6140 * Assumes there is only one claim on the syncq, it is 6141 * already exclusive (SQ_EXCL set), and the SQLOCK held. 6142 * Messages here are processed in order, with the SQ_EXCL bit 6143 * held all the way through till the last message is processed. 6144 */ 6145 void 6146 sq_run_events(syncq_t *sq) 6147 { 6148 mblk_t *bp; 6149 queue_t *qp; 6150 uint16_t flags = sq->sq_flags; 6151 void (*func)(); 6152 6153 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6154 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6155 sq->sq_oprev == NULL) || 6156 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6157 sq->sq_oprev != NULL)); 6158 6159 ASSERT(flags & SQ_EXCL); 6160 ASSERT(sq->sq_count == 1); 6161 6162 /* 6163 * We need to process all of the events on this list. It 6164 * is possible that new events will be added while we are 6165 * away processing a callback, so on every loop, we start 6166 * back at the beginning of the list. 6167 */ 6168 /* 6169 * We have to reaccess sq_evhead since there is a 6170 * possibility of a new entry while we were running 6171 * the callback. 6172 */ 6173 for (bp = sq->sq_evhead; bp != NULL; bp = sq->sq_evhead) { 6174 ASSERT(bp->b_queue->q_syncq == sq); 6175 ASSERT(sq->sq_flags & SQ_EVENTS); 6176 6177 qp = bp->b_queue; 6178 func = (void (*)())bp->b_prev; 6179 ASSERT(func != NULL); 6180 6181 /* 6182 * Messages from the event queue must be taken off in 6183 * FIFO order. 6184 */ 6185 ASSERT(sq->sq_evhead == bp); 6186 sq->sq_evhead = bp->b_next; 6187 6188 if (bp->b_next == NULL) { 6189 /* Deleting last */ 6190 ASSERT(sq->sq_evtail == bp); 6191 sq->sq_evtail = NULL; 6192 sq->sq_flags &= ~SQ_EVENTS; 6193 } 6194 bp->b_prev = bp->b_next = NULL; 6195 ASSERT(bp->b_datap->db_ref != 0); 6196 6197 mutex_exit(SQLOCK(sq)); 6198 6199 (*func)(qp, bp); 6200 6201 mutex_enter(SQLOCK(sq)); 6202 /* 6203 * re-read the flags, since they could have changed. 6204 */ 6205 flags = sq->sq_flags; 6206 ASSERT(flags & SQ_EXCL); 6207 } 6208 ASSERT(sq->sq_evhead == NULL && sq->sq_evtail == NULL); 6209 ASSERT(!(sq->sq_flags & SQ_EVENTS)); 6210 6211 if (flags & SQ_WANTWAKEUP) { 6212 flags &= ~SQ_WANTWAKEUP; 6213 cv_broadcast(&sq->sq_wait); 6214 } 6215 if (flags & SQ_WANTEXWAKEUP) { 6216 flags &= ~SQ_WANTEXWAKEUP; 6217 cv_broadcast(&sq->sq_exitwait); 6218 } 6219 sq->sq_flags = flags; 6220 } 6221 6222 /* 6223 * Put messages on the event list. 6224 * If we can go exclusive now, do so and process the event list, otherwise 6225 * let the last claim service this list (or wake the sqthread). 6226 * This procedure assumes SQLOCK is held. To run the event list, it 6227 * must be called with no claims. 6228 */ 6229 static void 6230 sqfill_events(syncq_t *sq, queue_t *q, mblk_t *mp, void (*func)()) 6231 { 6232 uint16_t count; 6233 6234 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6235 ASSERT(func != NULL); 6236 6237 /* 6238 * This is a callback. Add it to the list of callbacks 6239 * and see about upgrading. 6240 */ 6241 mp->b_prev = (mblk_t *)func; 6242 mp->b_queue = q; 6243 mp->b_next = NULL; 6244 if (sq->sq_evhead == NULL) { 6245 sq->sq_evhead = sq->sq_evtail = mp; 6246 sq->sq_flags |= SQ_EVENTS; 6247 } else { 6248 ASSERT(sq->sq_evtail != NULL); 6249 ASSERT(sq->sq_evtail->b_next == NULL); 6250 ASSERT(sq->sq_flags & SQ_EVENTS); 6251 sq->sq_evtail->b_next = mp; 6252 sq->sq_evtail = mp; 6253 } 6254 /* 6255 * We have set SQ_EVENTS, so threads will have to 6256 * unwind out of the perimeter, and new entries will 6257 * not grab a putlock. But we still need to know 6258 * how many threads have already made a claim to the 6259 * syncq, so grab the putlocks, and sum the counts. 6260 * If there are no claims on the syncq, we can upgrade 6261 * to exclusive, and run the event list. 6262 * NOTE: We hold the SQLOCK, so we can just grab the 6263 * putlocks. 6264 */ 6265 count = sq->sq_count; 6266 SQ_PUTLOCKS_ENTER(sq); 6267 SUM_SQ_PUTCOUNTS(sq, count); 6268 /* 6269 * We have no claim, so we need to check if there 6270 * are no others, then we can upgrade. 6271 */ 6272 /* 6273 * There are currently no claims on 6274 * the syncq by this thread (at least on this entry). The thread who has 6275 * the claim should drain syncq. 6276 */ 6277 if (count > 0) { 6278 /* 6279 * Can't upgrade - other threads inside. 6280 */ 6281 SQ_PUTLOCKS_EXIT(sq); 6282 mutex_exit(SQLOCK(sq)); 6283 return; 6284 } 6285 /* 6286 * Need to set SQ_EXCL and make a claim on the syncq. 6287 */ 6288 ASSERT((sq->sq_flags & SQ_EXCL) == 0); 6289 sq->sq_flags |= SQ_EXCL; 6290 ASSERT(sq->sq_count == 0); 6291 sq->sq_count++; 6292 SQ_PUTLOCKS_EXIT(sq); 6293 6294 /* Process the events list */ 6295 sq_run_events(sq); 6296 6297 /* 6298 * Release our claim... 6299 */ 6300 sq->sq_count--; 6301 6302 /* 6303 * And release SQ_EXCL. 6304 * We don't need to acquire the putlocks to release 6305 * SQ_EXCL, since we are exclusive, and hold the SQLOCK. 6306 */ 6307 sq->sq_flags &= ~SQ_EXCL; 6308 6309 /* 6310 * sq_run_events should have released SQ_EXCL 6311 */ 6312 ASSERT(!(sq->sq_flags & SQ_EXCL)); 6313 6314 /* 6315 * If anything happened while we were running the 6316 * events (or was there before), we need to process 6317 * them now. We shouldn't be exclusive sine we 6318 * released the perimeter above (plus, we asserted 6319 * for it). 6320 */ 6321 if (!(sq->sq_flags & SQ_STAYAWAY) && (sq->sq_flags & SQ_QUEUED)) 6322 drain_syncq(sq); 6323 else 6324 mutex_exit(SQLOCK(sq)); 6325 } 6326 6327 /* 6328 * Perform delayed processing. The caller has to make sure that it is safe 6329 * to enter the syncq (e.g. by checking that none of the SQ_STAYAWAY bits are 6330 * set). 6331 * 6332 * Assume that the caller has NO claims on the syncq. However, a claim 6333 * on the syncq does not indicate that a thread is draining the syncq. 6334 * There may be more claims on the syncq than there are threads draining 6335 * (i.e. #_threads_draining <= sq_count) 6336 * 6337 * drain_syncq has to terminate when one of the SQ_STAYAWAY bits gets set 6338 * in order to preserve qwriter(OUTER) ordering constraints. 6339 * 6340 * sq_putcount only needs to be checked when dispatching the queued 6341 * writer call for CIPUT sync queue, but this is handled in sq_run_events. 6342 */ 6343 void 6344 drain_syncq(syncq_t *sq) 6345 { 6346 queue_t *qp; 6347 uint16_t count; 6348 uint16_t type = sq->sq_type; 6349 uint16_t flags = sq->sq_flags; 6350 boolean_t bg_service = sq->sq_svcflags & SQ_SERVICE; 6351 6352 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START, 6353 "drain_syncq start:%p", sq); 6354 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6355 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6356 sq->sq_oprev == NULL) || 6357 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6358 sq->sq_oprev != NULL)); 6359 6360 /* 6361 * Drop SQ_SERVICE flag. 6362 */ 6363 if (bg_service) 6364 sq->sq_svcflags &= ~SQ_SERVICE; 6365 6366 /* 6367 * If SQ_EXCL is set, someone else is processing this syncq - let him 6368 * finish the job. 6369 */ 6370 if (flags & SQ_EXCL) { 6371 if (bg_service) { 6372 ASSERT(sq->sq_servcount != 0); 6373 sq->sq_servcount--; 6374 } 6375 mutex_exit(SQLOCK(sq)); 6376 return; 6377 } 6378 6379 /* 6380 * This routine can be called by a background thread if 6381 * it was scheduled by a hi-priority thread. SO, if there are 6382 * NOT messages queued, return (remember, we have the SQLOCK, 6383 * and it cannot change until we release it). Wakeup any waiters also. 6384 */ 6385 if (!(flags & SQ_QUEUED)) { 6386 if (flags & SQ_WANTWAKEUP) { 6387 flags &= ~SQ_WANTWAKEUP; 6388 cv_broadcast(&sq->sq_wait); 6389 } 6390 if (flags & SQ_WANTEXWAKEUP) { 6391 flags &= ~SQ_WANTEXWAKEUP; 6392 cv_broadcast(&sq->sq_exitwait); 6393 } 6394 sq->sq_flags = flags; 6395 if (bg_service) { 6396 ASSERT(sq->sq_servcount != 0); 6397 sq->sq_servcount--; 6398 } 6399 mutex_exit(SQLOCK(sq)); 6400 return; 6401 } 6402 6403 /* 6404 * If this is not a concurrent put perimeter, we need to 6405 * become exclusive to drain. Also, if not CIPUT, we would 6406 * not have acquired a putlock, so we don't need to check 6407 * the putcounts. If not entering with a claim, we test 6408 * for sq_count == 0. 6409 */ 6410 type = sq->sq_type; 6411 if (!(type & SQ_CIPUT)) { 6412 if (sq->sq_count > 1) { 6413 if (bg_service) { 6414 ASSERT(sq->sq_servcount != 0); 6415 sq->sq_servcount--; 6416 } 6417 mutex_exit(SQLOCK(sq)); 6418 return; 6419 } 6420 sq->sq_flags |= SQ_EXCL; 6421 } 6422 6423 /* 6424 * This is where we make a claim to the syncq. 6425 * This can either be done by incrementing a putlock, or 6426 * the sq_count. But since we already have the SQLOCK 6427 * here, we just bump the sq_count. 6428 * 6429 * Note that after we make a claim, we need to let the code 6430 * fall through to the end of this routine to clean itself 6431 * up. A return in the while loop will put the syncq in a 6432 * very bad state. 6433 */ 6434 sq->sq_count++; 6435 ASSERT(sq->sq_count != 0); /* wraparound */ 6436 6437 while ((flags = sq->sq_flags) & SQ_QUEUED) { 6438 /* 6439 * If we are told to stayaway or went exclusive, 6440 * we are done. 6441 */ 6442 if (flags & (SQ_STAYAWAY)) { 6443 break; 6444 } 6445 6446 /* 6447 * If there are events to run, do so. 6448 * We have one claim to the syncq, so if there are 6449 * more than one, other threads are running. 6450 */ 6451 if (sq->sq_evhead != NULL) { 6452 ASSERT(sq->sq_flags & SQ_EVENTS); 6453 6454 count = sq->sq_count; 6455 SQ_PUTLOCKS_ENTER(sq); 6456 SUM_SQ_PUTCOUNTS(sq, count); 6457 if (count > 1) { 6458 SQ_PUTLOCKS_EXIT(sq); 6459 /* Can't upgrade - other threads inside */ 6460 break; 6461 } 6462 ASSERT((flags & SQ_EXCL) == 0); 6463 sq->sq_flags = flags | SQ_EXCL; 6464 SQ_PUTLOCKS_EXIT(sq); 6465 /* 6466 * we have the only claim, run the events, 6467 * sq_run_events will clear the SQ_EXCL flag. 6468 */ 6469 sq_run_events(sq); 6470 6471 /* 6472 * If this is a CIPUT perimeter, we need 6473 * to drop the SQ_EXCL flag so we can properly 6474 * continue draining the syncq. 6475 */ 6476 if (type & SQ_CIPUT) { 6477 ASSERT(sq->sq_flags & SQ_EXCL); 6478 sq->sq_flags &= ~SQ_EXCL; 6479 } 6480 6481 /* 6482 * And go back to the beginning just in case 6483 * anything changed while we were away. 6484 */ 6485 ASSERT((sq->sq_flags & SQ_EXCL) || (type & SQ_CIPUT)); 6486 continue; 6487 } 6488 6489 ASSERT(sq->sq_evhead == NULL); 6490 ASSERT(!(sq->sq_flags & SQ_EVENTS)); 6491 6492 /* 6493 * Find the queue that is not draining. 6494 * 6495 * q_draining is protected by QLOCK which we do not hold. 6496 * But if it was set, then a thread was draining, and if it gets 6497 * cleared, then it was because the thread has successfully 6498 * drained the syncq, or a GOAWAY state occurred. For the GOAWAY 6499 * state to happen, a thread needs the SQLOCK which we hold, and 6500 * if there was such a flag, we would have already seen it. 6501 */ 6502 6503 for (qp = sq->sq_head; 6504 qp != NULL && (qp->q_draining || 6505 (qp->q_sqflags & Q_SQDRAINING)); 6506 qp = qp->q_sqnext) 6507 ; 6508 6509 if (qp == NULL) 6510 break; 6511 6512 /* 6513 * We have a queue to work on, and we hold the 6514 * SQLOCK and one claim, call qdrain_syncq. 6515 * This means we need to release the SQLOCK and 6516 * acquire the QLOCK (OK since we have a claim). 6517 * Note that qdrain_syncq will actually dequeue 6518 * this queue from the sq_head list when it is 6519 * convinced all the work is done and release 6520 * the QLOCK before returning. 6521 */ 6522 qp->q_sqflags |= Q_SQDRAINING; 6523 mutex_exit(SQLOCK(sq)); 6524 mutex_enter(QLOCK(qp)); 6525 qdrain_syncq(sq, qp); 6526 mutex_enter(SQLOCK(sq)); 6527 6528 /* The queue is drained */ 6529 ASSERT(qp->q_sqflags & Q_SQDRAINING); 6530 qp->q_sqflags &= ~Q_SQDRAINING; 6531 /* 6532 * NOTE: After this point qp should not be used since it may be 6533 * closed. 6534 */ 6535 } 6536 6537 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6538 flags = sq->sq_flags; 6539 6540 /* 6541 * sq->sq_head cannot change because we hold the 6542 * sqlock. However, a thread CAN decide that it is no longer 6543 * going to drain that queue. However, this should be due to 6544 * a GOAWAY state, and we should see that here. 6545 * 6546 * This loop is not very efficient. One solution may be adding a second 6547 * pointer to the "draining" queue, but it is difficult to do when 6548 * queues are inserted in the middle due to priority ordering. Another 6549 * possibility is to yank the queue out of the sq list and put it onto 6550 * the "draining list" and then put it back if it can't be drained. 6551 */ 6552 6553 ASSERT((sq->sq_head == NULL) || (flags & SQ_GOAWAY) || 6554 (type & SQ_CI) || sq->sq_head->q_draining); 6555 6556 /* Drop SQ_EXCL for non-CIPUT perimeters */ 6557 if (!(type & SQ_CIPUT)) 6558 flags &= ~SQ_EXCL; 6559 ASSERT((flags & SQ_EXCL) == 0); 6560 6561 /* Wake up any waiters. */ 6562 if (flags & SQ_WANTWAKEUP) { 6563 flags &= ~SQ_WANTWAKEUP; 6564 cv_broadcast(&sq->sq_wait); 6565 } 6566 if (flags & SQ_WANTEXWAKEUP) { 6567 flags &= ~SQ_WANTEXWAKEUP; 6568 cv_broadcast(&sq->sq_exitwait); 6569 } 6570 sq->sq_flags = flags; 6571 6572 ASSERT(sq->sq_count != 0); 6573 /* Release our claim. */ 6574 sq->sq_count--; 6575 6576 if (bg_service) { 6577 ASSERT(sq->sq_servcount != 0); 6578 sq->sq_servcount--; 6579 } 6580 6581 mutex_exit(SQLOCK(sq)); 6582 6583 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END, 6584 "drain_syncq end:%p", sq); 6585 } 6586 6587 6588 /* 6589 * 6590 * qdrain_syncq can be called (currently) from only one of two places: 6591 * drain_syncq 6592 * putnext (or some variation of it). 6593 * and eventually 6594 * qwait(_sig) 6595 * 6596 * If called from drain_syncq, we found it in the list of queues needing 6597 * service, so there is work to be done (or it wouldn't be in the list). 6598 * 6599 * If called from some putnext variation, it was because the 6600 * perimeter is open, but messages are blocking a putnext and 6601 * there is not a thread working on it. Now a thread could start 6602 * working on it while we are getting ready to do so ourself, but 6603 * the thread would set the q_draining flag, and we can spin out. 6604 * 6605 * As for qwait(_sig), I think I shall let it continue to call 6606 * drain_syncq directly (after all, it will get here eventually). 6607 * 6608 * qdrain_syncq has to terminate when: 6609 * - one of the SQ_STAYAWAY bits gets set to preserve qwriter(OUTER) ordering 6610 * - SQ_EVENTS gets set to preserve qwriter(INNER) ordering 6611 * 6612 * ASSUMES: 6613 * One claim 6614 * QLOCK held 6615 * SQLOCK not held 6616 * Will release QLOCK before returning 6617 */ 6618 void 6619 qdrain_syncq(syncq_t *sq, queue_t *q) 6620 { 6621 mblk_t *bp; 6622 #ifdef DEBUG 6623 uint16_t count; 6624 #endif 6625 6626 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START, 6627 "drain_syncq start:%p", sq); 6628 ASSERT(q->q_syncq == sq); 6629 ASSERT(MUTEX_HELD(QLOCK(q))); 6630 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6631 /* 6632 * For non-CIPUT perimeters, we should be called with the exclusive bit 6633 * set already. For CIPUT perimeters, we will be doing a concurrent 6634 * drain, so it better not be set. 6635 */ 6636 ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT))); 6637 ASSERT(!((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL))); 6638 ASSERT((sq->sq_type & SQ_CIPUT) || (sq->sq_flags & SQ_EXCL)); 6639 /* 6640 * All outer pointers are set, or none of them are 6641 */ 6642 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6643 sq->sq_oprev == NULL) || 6644 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6645 sq->sq_oprev != NULL)); 6646 #ifdef DEBUG 6647 count = sq->sq_count; 6648 /* 6649 * This is OK without the putlocks, because we have one 6650 * claim either from the sq_count, or a putcount. We could 6651 * get an erroneous value from other counts, but ours won't 6652 * change, so one way or another, we will have at least a 6653 * value of one. 6654 */ 6655 SUM_SQ_PUTCOUNTS(sq, count); 6656 ASSERT(count >= 1); 6657 #endif /* DEBUG */ 6658 6659 /* 6660 * The first thing to do is find out if a thread is already draining 6661 * this queue. If so, we are done, just return. 6662 */ 6663 if (q->q_draining) { 6664 mutex_exit(QLOCK(q)); 6665 return; 6666 } 6667 6668 /* 6669 * If the perimeter is exclusive, there is nothing we can do right now, 6670 * go away. Note that there is nothing to prevent this case from 6671 * changing right after this check, but the spin-out will catch it. 6672 */ 6673 6674 /* Tell other threads that we are draining this queue */ 6675 q->q_draining = 1; /* Protected by QLOCK */ 6676 6677 /* 6678 * If there is nothing to do, clear QFULL as necessary. This caters for 6679 * the case where an empty queue was enqueued onto the syncq. 6680 */ 6681 if (q->q_sqhead == NULL) { 6682 ASSERT(q->q_syncqmsgs == 0); 6683 mutex_exit(QLOCK(q)); 6684 clr_qfull(q); 6685 mutex_enter(QLOCK(q)); 6686 } 6687 6688 /* 6689 * Note that q_sqhead must be re-checked here in case another message 6690 * was enqueued whilst QLOCK was dropped during the call to clr_qfull. 6691 */ 6692 for (bp = q->q_sqhead; bp != NULL; bp = q->q_sqhead) { 6693 /* 6694 * Because we can enter this routine just because a putnext is 6695 * blocked, we need to spin out if the perimeter wants to go 6696 * exclusive as well as just blocked. We need to spin out also 6697 * if events are queued on the syncq. 6698 * Don't check for SQ_EXCL, because non-CIPUT perimeters would 6699 * set it, and it can't become exclusive while we hold a claim. 6700 */ 6701 if (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS)) { 6702 break; 6703 } 6704 6705 #ifdef DEBUG 6706 /* 6707 * Since we are in qdrain_syncq, we already know the queue, 6708 * but for sanity, we want to check this against the qp that 6709 * was passed in by bp->b_queue. 6710 */ 6711 6712 ASSERT(bp->b_queue == q); 6713 ASSERT(bp->b_queue->q_syncq == sq); 6714 bp->b_queue = NULL; 6715 6716 /* 6717 * We would have the following check in the DEBUG code: 6718 * 6719 * if (bp->b_prev != NULL) { 6720 * ASSERT(bp->b_prev == (void (*)())q->q_qinfo->qi_putp); 6721 * } 6722 * 6723 * This can't be done, however, since IP modifies qinfo 6724 * structure at run-time (switching between IPv4 qinfo and IPv6 6725 * qinfo), invalidating the check. 6726 * So the assignment to func is left here, but the ASSERT itself 6727 * is removed until the whole issue is resolved. 6728 */ 6729 #endif 6730 ASSERT(q->q_sqhead == bp); 6731 q->q_sqhead = bp->b_next; 6732 bp->b_prev = bp->b_next = NULL; 6733 ASSERT(q->q_syncqmsgs > 0); 6734 mutex_exit(QLOCK(q)); 6735 6736 ASSERT(bp->b_datap->db_ref != 0); 6737 6738 (void) (*q->q_qinfo->qi_putp)(q, bp); 6739 6740 mutex_enter(QLOCK(q)); 6741 6742 /* 6743 * q_syncqmsgs should only be decremented after executing the 6744 * put procedure to avoid message re-ordering. This is due to an 6745 * optimisation in putnext() which can call the put procedure 6746 * directly if it sees q_syncqmsgs == 0 (despite Q_SQQUEUED 6747 * being set). 6748 * 6749 * We also need to clear QFULL in the next service procedure 6750 * queue if this is the last message destined for that queue. 6751 * 6752 * It would make better sense to have some sort of tunable for 6753 * the low water mark, but these semantics are not yet defined. 6754 * So, alas, we use a constant. 6755 */ 6756 if (--q->q_syncqmsgs == 0) { 6757 mutex_exit(QLOCK(q)); 6758 clr_qfull(q); 6759 mutex_enter(QLOCK(q)); 6760 } 6761 6762 /* 6763 * Always clear SQ_EXCL when CIPUT in order to handle 6764 * qwriter(INNER). The putp() can call qwriter and get exclusive 6765 * access IFF this is the only claim. So, we need to test for 6766 * this possibility, acquire the mutex and clear the bit. 6767 */ 6768 if ((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL)) { 6769 mutex_enter(SQLOCK(sq)); 6770 sq->sq_flags &= ~SQ_EXCL; 6771 mutex_exit(SQLOCK(sq)); 6772 } 6773 } 6774 6775 /* 6776 * We should either have no messages on this queue, or we were told to 6777 * goaway by a waiter (which we will wake up at the end of this 6778 * function). 6779 */ 6780 ASSERT((q->q_sqhead == NULL) || 6781 (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS))); 6782 6783 ASSERT(MUTEX_HELD(QLOCK(q))); 6784 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6785 6786 /* Remove the q from the syncq list if all the messages are drained. */ 6787 if (q->q_sqhead == NULL) { 6788 ASSERT(q->q_syncqmsgs == 0); 6789 mutex_enter(SQLOCK(sq)); 6790 if (q->q_sqflags & Q_SQQUEUED) 6791 SQRM_Q(sq, q); 6792 mutex_exit(SQLOCK(sq)); 6793 /* 6794 * Since the queue is removed from the list, reset its priority. 6795 */ 6796 q->q_spri = 0; 6797 } 6798 6799 /* 6800 * Remember, the q_draining flag is used to let another thread know 6801 * that there is a thread currently draining the messages for a queue. 6802 * Since we are now done with this queue (even if there may be messages 6803 * still there), we need to clear this flag so some thread will work on 6804 * it if needed. 6805 */ 6806 ASSERT(q->q_draining); 6807 q->q_draining = 0; 6808 6809 /* Called with a claim, so OK to drop all locks. */ 6810 mutex_exit(QLOCK(q)); 6811 6812 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END, 6813 "drain_syncq end:%p", sq); 6814 } 6815 /* END OF QDRAIN_SYNCQ */ 6816 6817 6818 /* 6819 * This is the mate to qdrain_syncq, except that it is putting the message onto 6820 * the queue instead of draining. Since the message is destined for the queue 6821 * that is selected, there is no need to identify the function because the 6822 * message is intended for the put routine for the queue. For debug kernels, 6823 * this routine will do it anyway just in case. 6824 * 6825 * After the message is enqueued on the syncq, it calls putnext_tail() 6826 * which will schedule a background thread to actually process the message. 6827 * 6828 * Assumes that there is a claim on the syncq (sq->sq_count > 0) and 6829 * SQLOCK(sq) and QLOCK(q) are not held. 6830 */ 6831 void 6832 qfill_syncq(syncq_t *sq, queue_t *q, mblk_t *mp) 6833 { 6834 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6835 ASSERT(MUTEX_NOT_HELD(QLOCK(q))); 6836 ASSERT(sq->sq_count > 0); 6837 ASSERT(q->q_syncq == sq); 6838 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6839 sq->sq_oprev == NULL) || 6840 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6841 sq->sq_oprev != NULL)); 6842 6843 mutex_enter(QLOCK(q)); 6844 6845 #ifdef DEBUG 6846 /* 6847 * This is used for debug in the qfill_syncq/qdrain_syncq case 6848 * to trace the queue that the message is intended for. Note 6849 * that the original use was to identify the queue and function 6850 * to call on the drain. In the new syncq, we have the context 6851 * of the queue that we are draining, so call it's putproc and 6852 * don't rely on the saved values. But for debug this is still 6853 * useful information. 6854 */ 6855 mp->b_prev = (mblk_t *)q->q_qinfo->qi_putp; 6856 mp->b_queue = q; 6857 mp->b_next = NULL; 6858 #endif 6859 ASSERT(q->q_syncq == sq); 6860 /* 6861 * Enqueue the message on the list. 6862 * SQPUT_MP() accesses q_syncqmsgs. We are already holding QLOCK to 6863 * protect it. So it's ok to acquire SQLOCK after SQPUT_MP(). 6864 */ 6865 SQPUT_MP(q, mp); 6866 mutex_enter(SQLOCK(sq)); 6867 6868 /* 6869 * And queue on syncq for scheduling, if not already queued. 6870 * Note that we need the SQLOCK for this, and for testing flags 6871 * at the end to see if we will drain. So grab it now, and 6872 * release it before we call qdrain_syncq or return. 6873 */ 6874 if (!(q->q_sqflags & Q_SQQUEUED)) { 6875 q->q_spri = curthread->t_pri; 6876 SQPUT_Q(sq, q); 6877 } 6878 #ifdef DEBUG 6879 else { 6880 /* 6881 * All of these conditions MUST be true! 6882 */ 6883 ASSERT(sq->sq_tail != NULL); 6884 if (sq->sq_tail == sq->sq_head) { 6885 ASSERT((q->q_sqprev == NULL) && 6886 (q->q_sqnext == NULL)); 6887 } else { 6888 ASSERT((q->q_sqprev != NULL) || 6889 (q->q_sqnext != NULL)); 6890 } 6891 ASSERT(sq->sq_flags & SQ_QUEUED); 6892 ASSERT(q->q_syncqmsgs != 0); 6893 ASSERT(q->q_sqflags & Q_SQQUEUED); 6894 } 6895 #endif 6896 mutex_exit(QLOCK(q)); 6897 /* 6898 * SQLOCK is still held, so sq_count can be safely decremented. 6899 */ 6900 sq->sq_count--; 6901 6902 putnext_tail(sq, q, 0); 6903 /* Should not reference sq or q after this point. */ 6904 } 6905 6906 /* End of qfill_syncq */ 6907 6908 /* 6909 * Remove all messages from a syncq (if qp is NULL) or remove all messages 6910 * that would be put into qp by drain_syncq. 6911 * Used when deleting the syncq (qp == NULL) or when detaching 6912 * a queue (qp != NULL). 6913 * Return non-zero if one or more messages were freed. 6914 * 6915 * No need to grab sq_putlocks here. See comment in strsubr.h that explains when 6916 * sq_putlocks are used. 6917 * 6918 * NOTE: This function assumes that it is called from the close() context and 6919 * that all the queues in the syncq are going away. For this reason it doesn't 6920 * acquire QLOCK for modifying q_sqhead/q_sqtail fields. This assumption is 6921 * currently valid, but it is useful to rethink this function to behave properly 6922 * in other cases. 6923 */ 6924 int 6925 flush_syncq(syncq_t *sq, queue_t *qp) 6926 { 6927 mblk_t *bp, *mp_head, *mp_next, *mp_prev; 6928 queue_t *q; 6929 int ret = 0; 6930 6931 mutex_enter(SQLOCK(sq)); 6932 6933 /* 6934 * Before we leave, we need to make sure there are no 6935 * events listed for this queue. All events for this queue 6936 * will just be freed. 6937 */ 6938 if (qp != NULL && sq->sq_evhead != NULL) { 6939 ASSERT(sq->sq_flags & SQ_EVENTS); 6940 6941 mp_prev = NULL; 6942 for (bp = sq->sq_evhead; bp != NULL; bp = mp_next) { 6943 mp_next = bp->b_next; 6944 if (bp->b_queue == qp) { 6945 /* Delete this message */ 6946 if (mp_prev != NULL) { 6947 mp_prev->b_next = mp_next; 6948 /* 6949 * Update sq_evtail if the last element 6950 * is removed. 6951 */ 6952 if (bp == sq->sq_evtail) { 6953 ASSERT(mp_next == NULL); 6954 sq->sq_evtail = mp_prev; 6955 } 6956 } else 6957 sq->sq_evhead = mp_next; 6958 if (sq->sq_evhead == NULL) 6959 sq->sq_flags &= ~SQ_EVENTS; 6960 bp->b_prev = bp->b_next = NULL; 6961 freemsg(bp); 6962 ret++; 6963 } else { 6964 mp_prev = bp; 6965 } 6966 } 6967 } 6968 6969 /* 6970 * Walk sq_head and: 6971 * - match qp if qp is set, remove it's messages 6972 * - all if qp is not set 6973 */ 6974 q = sq->sq_head; 6975 while (q != NULL) { 6976 ASSERT(q->q_syncq == sq); 6977 if ((qp == NULL) || (qp == q)) { 6978 /* 6979 * Yank the messages as a list off the queue 6980 */ 6981 mp_head = q->q_sqhead; 6982 /* 6983 * We do not have QLOCK(q) here (which is safe due to 6984 * assumptions mentioned above). To obtain the lock we 6985 * need to release SQLOCK which may allow lots of things 6986 * to change upon us. This place requires more analysis. 6987 */ 6988 q->q_sqhead = q->q_sqtail = NULL; 6989 ASSERT(mp_head->b_queue && 6990 mp_head->b_queue->q_syncq == sq); 6991 6992 /* 6993 * Free each of the messages. 6994 */ 6995 for (bp = mp_head; bp != NULL; bp = mp_next) { 6996 mp_next = bp->b_next; 6997 bp->b_prev = bp->b_next = NULL; 6998 freemsg(bp); 6999 ret++; 7000 } 7001 /* 7002 * Now remove the queue from the syncq. 7003 */ 7004 ASSERT(q->q_sqflags & Q_SQQUEUED); 7005 SQRM_Q(sq, q); 7006 q->q_spri = 0; 7007 q->q_syncqmsgs = 0; 7008 7009 /* 7010 * If qp was specified, we are done with it and are 7011 * going to drop SQLOCK(sq) and return. We wakeup syncq 7012 * waiters while we still have the SQLOCK. 7013 */ 7014 if ((qp != NULL) && (sq->sq_flags & SQ_WANTWAKEUP)) { 7015 sq->sq_flags &= ~SQ_WANTWAKEUP; 7016 cv_broadcast(&sq->sq_wait); 7017 } 7018 /* Drop SQLOCK across clr_qfull */ 7019 mutex_exit(SQLOCK(sq)); 7020 7021 /* 7022 * We avoid doing the test that drain_syncq does and 7023 * unconditionally clear qfull for every flushed 7024 * message. Since flush_syncq is only called during 7025 * close this should not be a problem. 7026 */ 7027 clr_qfull(q); 7028 if (qp != NULL) { 7029 return (ret); 7030 } else { 7031 mutex_enter(SQLOCK(sq)); 7032 /* 7033 * The head was removed by SQRM_Q above. 7034 * reread the new head and flush it. 7035 */ 7036 q = sq->sq_head; 7037 } 7038 } else { 7039 q = q->q_sqnext; 7040 } 7041 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7042 } 7043 7044 if (sq->sq_flags & SQ_WANTWAKEUP) { 7045 sq->sq_flags &= ~SQ_WANTWAKEUP; 7046 cv_broadcast(&sq->sq_wait); 7047 } 7048 7049 mutex_exit(SQLOCK(sq)); 7050 return (ret); 7051 } 7052 7053 /* 7054 * Propagate all messages from a syncq to the next syncq that are associated 7055 * with the specified queue. If the queue is attached to a driver or if the 7056 * messages have been added due to a qwriter(PERIM_INNER), free the messages. 7057 * 7058 * Assumes that the stream is strlock()'ed. We don't come here if there 7059 * are no messages to propagate. 7060 * 7061 * NOTE : If the queue is attached to a driver, all the messages are freed 7062 * as there is no point in propagating the messages from the driver syncq 7063 * to the closing stream head which will in turn get freed later. 7064 */ 7065 static int 7066 propagate_syncq(queue_t *qp) 7067 { 7068 mblk_t *bp, *head, *tail, *prev, *next; 7069 syncq_t *sq; 7070 queue_t *nqp; 7071 syncq_t *nsq; 7072 boolean_t isdriver; 7073 int moved = 0; 7074 uint16_t flags; 7075 pri_t priority = curthread->t_pri; 7076 #ifdef DEBUG 7077 void (*func)(); 7078 #endif 7079 7080 sq = qp->q_syncq; 7081 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7082 /* debug macro */ 7083 SQ_PUTLOCKS_HELD(sq); 7084 /* 7085 * As entersq() does not increment the sq_count for 7086 * the write side, check sq_count for non-QPERQ 7087 * perimeters alone. 7088 */ 7089 ASSERT((qp->q_flag & QPERQ) || (sq->sq_count >= 1)); 7090 7091 /* 7092 * propagate_syncq() can be called because of either messages on the 7093 * queue syncq or because on events on the queue syncq. Do actual 7094 * message propagations if there are any messages. 7095 */ 7096 if (qp->q_syncqmsgs) { 7097 isdriver = (qp->q_flag & QISDRV); 7098 7099 if (!isdriver) { 7100 nqp = qp->q_next; 7101 nsq = nqp->q_syncq; 7102 ASSERT(MUTEX_HELD(SQLOCK(nsq))); 7103 /* debug macro */ 7104 SQ_PUTLOCKS_HELD(nsq); 7105 #ifdef DEBUG 7106 func = (void (*)())nqp->q_qinfo->qi_putp; 7107 #endif 7108 } 7109 7110 SQRM_Q(sq, qp); 7111 priority = MAX(qp->q_spri, priority); 7112 qp->q_spri = 0; 7113 head = qp->q_sqhead; 7114 tail = qp->q_sqtail; 7115 qp->q_sqhead = qp->q_sqtail = NULL; 7116 qp->q_syncqmsgs = 0; 7117 7118 /* 7119 * Walk the list of messages, and free them if this is a driver, 7120 * otherwise reset the b_prev and b_queue value to the new putp. 7121 * Afterward, we will just add the head to the end of the next 7122 * syncq, and point the tail to the end of this one. 7123 */ 7124 7125 for (bp = head; bp != NULL; bp = next) { 7126 next = bp->b_next; 7127 if (isdriver) { 7128 bp->b_prev = bp->b_next = NULL; 7129 freemsg(bp); 7130 continue; 7131 } 7132 /* Change the q values for this message */ 7133 bp->b_queue = nqp; 7134 #ifdef DEBUG 7135 bp->b_prev = (mblk_t *)func; 7136 #endif 7137 moved++; 7138 } 7139 /* 7140 * Attach list of messages to the end of the new queue (if there 7141 * is a list of messages). 7142 */ 7143 7144 if (!isdriver && head != NULL) { 7145 ASSERT(tail != NULL); 7146 if (nqp->q_sqhead == NULL) { 7147 nqp->q_sqhead = head; 7148 } else { 7149 ASSERT(nqp->q_sqtail != NULL); 7150 nqp->q_sqtail->b_next = head; 7151 } 7152 nqp->q_sqtail = tail; 7153 /* 7154 * When messages are moved from high priority queue to 7155 * another queue, the destination queue priority is 7156 * upgraded. 7157 */ 7158 7159 if (priority > nqp->q_spri) 7160 nqp->q_spri = priority; 7161 7162 SQPUT_Q(nsq, nqp); 7163 7164 nqp->q_syncqmsgs += moved; 7165 ASSERT(nqp->q_syncqmsgs != 0); 7166 } 7167 } 7168 7169 /* 7170 * Before we leave, we need to make sure there are no 7171 * events listed for this queue. All events for this queue 7172 * will just be freed. 7173 */ 7174 if (sq->sq_evhead != NULL) { 7175 ASSERT(sq->sq_flags & SQ_EVENTS); 7176 prev = NULL; 7177 for (bp = sq->sq_evhead; bp != NULL; bp = next) { 7178 next = bp->b_next; 7179 if (bp->b_queue == qp) { 7180 /* Delete this message */ 7181 if (prev != NULL) { 7182 prev->b_next = next; 7183 /* 7184 * Update sq_evtail if the last element 7185 * is removed. 7186 */ 7187 if (bp == sq->sq_evtail) { 7188 ASSERT(next == NULL); 7189 sq->sq_evtail = prev; 7190 } 7191 } else 7192 sq->sq_evhead = next; 7193 if (sq->sq_evhead == NULL) 7194 sq->sq_flags &= ~SQ_EVENTS; 7195 bp->b_prev = bp->b_next = NULL; 7196 freemsg(bp); 7197 } else { 7198 prev = bp; 7199 } 7200 } 7201 } 7202 7203 flags = sq->sq_flags; 7204 7205 /* Wake up any waiter before leaving. */ 7206 if (flags & SQ_WANTWAKEUP) { 7207 flags &= ~SQ_WANTWAKEUP; 7208 cv_broadcast(&sq->sq_wait); 7209 } 7210 sq->sq_flags = flags; 7211 7212 return (moved); 7213 } 7214 7215 /* 7216 * Try and upgrade to exclusive access at the inner perimeter. If this can 7217 * not be done without blocking then request will be queued on the syncq 7218 * and drain_syncq will run it later. 7219 * 7220 * This routine can only be called from put or service procedures plus 7221 * asynchronous callback routines that have properly entered the queue (with 7222 * entersq). Thus qwriter_inner assumes the caller has one claim on the syncq 7223 * associated with q. 7224 */ 7225 void 7226 qwriter_inner(queue_t *q, mblk_t *mp, void (*func)()) 7227 { 7228 syncq_t *sq = q->q_syncq; 7229 uint16_t count; 7230 7231 mutex_enter(SQLOCK(sq)); 7232 count = sq->sq_count; 7233 SQ_PUTLOCKS_ENTER(sq); 7234 SUM_SQ_PUTCOUNTS(sq, count); 7235 ASSERT(count >= 1); 7236 ASSERT(sq->sq_type & (SQ_CIPUT|SQ_CISVC)); 7237 7238 if (count == 1) { 7239 /* 7240 * Can upgrade. This case also handles nested qwriter calls 7241 * (when the qwriter callback function calls qwriter). In that 7242 * case SQ_EXCL is already set. 7243 */ 7244 sq->sq_flags |= SQ_EXCL; 7245 SQ_PUTLOCKS_EXIT(sq); 7246 mutex_exit(SQLOCK(sq)); 7247 (*func)(q, mp); 7248 /* 7249 * Assumes that leavesq, putnext, and drain_syncq will reset 7250 * SQ_EXCL for SQ_CIPUT/SQ_CISVC queues. We leave SQ_EXCL on 7251 * until putnext, leavesq, or drain_syncq drops it. 7252 * That way we handle nested qwriter(INNER) without dropping 7253 * SQ_EXCL until the outermost qwriter callback routine is 7254 * done. 7255 */ 7256 return; 7257 } 7258 SQ_PUTLOCKS_EXIT(sq); 7259 sqfill_events(sq, q, mp, func); 7260 } 7261 7262 /* 7263 * Synchronous callback support functions 7264 */ 7265 7266 /* 7267 * Allocate a callback parameter structure. 7268 * Assumes that caller initializes the flags and the id. 7269 * Acquires SQLOCK(sq) if non-NULL is returned. 7270 */ 7271 callbparams_t * 7272 callbparams_alloc(syncq_t *sq, void (*func)(void *), void *arg, int kmflags) 7273 { 7274 callbparams_t *cbp; 7275 size_t size = sizeof (callbparams_t); 7276 7277 cbp = kmem_alloc(size, kmflags & ~KM_PANIC); 7278 7279 /* 7280 * Only try tryhard allocation if the caller is ready to panic. 7281 * Otherwise just fail. 7282 */ 7283 if (cbp == NULL) { 7284 if (kmflags & KM_PANIC) 7285 cbp = kmem_alloc_tryhard(sizeof (callbparams_t), 7286 &size, kmflags); 7287 else 7288 return (NULL); 7289 } 7290 7291 ASSERT(size >= sizeof (callbparams_t)); 7292 cbp->cbp_size = size; 7293 cbp->cbp_sq = sq; 7294 cbp->cbp_func = func; 7295 cbp->cbp_arg = arg; 7296 mutex_enter(SQLOCK(sq)); 7297 cbp->cbp_next = sq->sq_callbpend; 7298 sq->sq_callbpend = cbp; 7299 return (cbp); 7300 } 7301 7302 void 7303 callbparams_free(syncq_t *sq, callbparams_t *cbp) 7304 { 7305 callbparams_t **pp, *p; 7306 7307 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7308 7309 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) { 7310 if (p == cbp) { 7311 *pp = p->cbp_next; 7312 kmem_free(p, p->cbp_size); 7313 return; 7314 } 7315 } 7316 (void) (STRLOG(0, 0, 0, SL_CONSOLE, 7317 "callbparams_free: not found\n")); 7318 } 7319 7320 void 7321 callbparams_free_id(syncq_t *sq, callbparams_id_t id, int32_t flag) 7322 { 7323 callbparams_t **pp, *p; 7324 7325 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7326 7327 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) { 7328 if (p->cbp_id == id && p->cbp_flags == flag) { 7329 *pp = p->cbp_next; 7330 kmem_free(p, p->cbp_size); 7331 return; 7332 } 7333 } 7334 (void) (STRLOG(0, 0, 0, SL_CONSOLE, 7335 "callbparams_free_id: not found\n")); 7336 } 7337 7338 /* 7339 * Callback wrapper function used by once-only callbacks that can be 7340 * cancelled (qtimeout and qbufcall) 7341 * Contains inline version of entersq(sq, SQ_CALLBACK) that can be 7342 * cancelled by the qun* functions. 7343 */ 7344 void 7345 qcallbwrapper(void *arg) 7346 { 7347 callbparams_t *cbp = arg; 7348 syncq_t *sq; 7349 uint16_t count = 0; 7350 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL; 7351 uint16_t type; 7352 7353 sq = cbp->cbp_sq; 7354 mutex_enter(SQLOCK(sq)); 7355 type = sq->sq_type; 7356 if (!(type & SQ_CICB)) { 7357 count = sq->sq_count; 7358 SQ_PUTLOCKS_ENTER(sq); 7359 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 7360 SUM_SQ_PUTCOUNTS(sq, count); 7361 sq->sq_needexcl++; 7362 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 7363 waitflags |= SQ_MESSAGES; 7364 } 7365 /* Can not handle exclusive entry at outer perimeter */ 7366 ASSERT(type & SQ_COCB); 7367 7368 while ((sq->sq_flags & waitflags) || (!(type & SQ_CICB) &&count != 0)) { 7369 if ((sq->sq_callbflags & cbp->cbp_flags) && 7370 (sq->sq_cancelid == cbp->cbp_id)) { 7371 /* timeout has been cancelled */ 7372 sq->sq_callbflags |= SQ_CALLB_BYPASSED; 7373 callbparams_free(sq, cbp); 7374 if (!(type & SQ_CICB)) { 7375 ASSERT(sq->sq_needexcl > 0); 7376 sq->sq_needexcl--; 7377 if (sq->sq_needexcl == 0) { 7378 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 7379 } 7380 SQ_PUTLOCKS_EXIT(sq); 7381 } 7382 mutex_exit(SQLOCK(sq)); 7383 return; 7384 } 7385 sq->sq_flags |= SQ_WANTWAKEUP; 7386 if (!(type & SQ_CICB)) { 7387 SQ_PUTLOCKS_EXIT(sq); 7388 } 7389 cv_wait(&sq->sq_wait, SQLOCK(sq)); 7390 if (!(type & SQ_CICB)) { 7391 count = sq->sq_count; 7392 SQ_PUTLOCKS_ENTER(sq); 7393 SUM_SQ_PUTCOUNTS(sq, count); 7394 } 7395 } 7396 7397 sq->sq_count++; 7398 ASSERT(sq->sq_count != 0); /* Wraparound */ 7399 if (!(type & SQ_CICB)) { 7400 ASSERT(count == 0); 7401 sq->sq_flags |= SQ_EXCL; 7402 ASSERT(sq->sq_needexcl > 0); 7403 sq->sq_needexcl--; 7404 if (sq->sq_needexcl == 0) { 7405 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 7406 } 7407 SQ_PUTLOCKS_EXIT(sq); 7408 } 7409 7410 mutex_exit(SQLOCK(sq)); 7411 7412 cbp->cbp_func(cbp->cbp_arg); 7413 7414 /* 7415 * We drop the lock only for leavesq to re-acquire it. 7416 * Possible optimization is inline of leavesq. 7417 */ 7418 mutex_enter(SQLOCK(sq)); 7419 callbparams_free(sq, cbp); 7420 mutex_exit(SQLOCK(sq)); 7421 leavesq(sq, SQ_CALLBACK); 7422 } 7423 7424 /* 7425 * No need to grab sq_putlocks here. See comment in strsubr.h that 7426 * explains when sq_putlocks are used. 7427 * 7428 * sq_count (or one of the sq_putcounts) has already been 7429 * decremented by the caller, and if SQ_QUEUED, we need to call 7430 * drain_syncq (the global syncq drain). 7431 * If putnext_tail is called with the SQ_EXCL bit set, we are in 7432 * one of two states, non-CIPUT perimeter, and we need to clear 7433 * it, or we went exclusive in the put procedure. In any case, 7434 * we want to clear the bit now, and it is probably easier to do 7435 * this at the beginning of this function (remember, we hold 7436 * the SQLOCK). Lastly, if there are other messages queued 7437 * on the syncq (and not for our destination), enable the syncq 7438 * for background work. 7439 */ 7440 7441 /* ARGSUSED */ 7442 void 7443 putnext_tail(syncq_t *sq, queue_t *qp, uint32_t passflags) 7444 { 7445 uint16_t flags = sq->sq_flags; 7446 7447 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7448 ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); 7449 7450 /* Clear SQ_EXCL if set in passflags */ 7451 if (passflags & SQ_EXCL) { 7452 flags &= ~SQ_EXCL; 7453 } 7454 if (flags & SQ_WANTWAKEUP) { 7455 flags &= ~SQ_WANTWAKEUP; 7456 cv_broadcast(&sq->sq_wait); 7457 } 7458 if (flags & SQ_WANTEXWAKEUP) { 7459 flags &= ~SQ_WANTEXWAKEUP; 7460 cv_broadcast(&sq->sq_exitwait); 7461 } 7462 sq->sq_flags = flags; 7463 7464 /* 7465 * We have cleared SQ_EXCL if we were asked to, and started 7466 * the wakeup process for waiters. If there are no writers 7467 * then we need to drain the syncq if we were told to, or 7468 * enable the background thread to do it. 7469 */ 7470 if (!(flags & (SQ_STAYAWAY|SQ_EXCL))) { 7471 if ((passflags & SQ_QUEUED) || 7472 (sq->sq_svcflags & SQ_DISABLED)) { 7473 /* drain_syncq will take care of events in the list */ 7474 drain_syncq(sq); 7475 return; 7476 } else if (flags & SQ_QUEUED) { 7477 sqenable(sq); 7478 } 7479 } 7480 /* Drop the SQLOCK on exit */ 7481 mutex_exit(SQLOCK(sq)); 7482 TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END, 7483 "putnext_end:(%p, %p, %p) done", NULL, qp, sq); 7484 } 7485 7486 void 7487 set_qend(queue_t *q) 7488 { 7489 mutex_enter(QLOCK(q)); 7490 if (!O_SAMESTR(q)) 7491 q->q_flag |= QEND; 7492 else 7493 q->q_flag &= ~QEND; 7494 mutex_exit(QLOCK(q)); 7495 q = _OTHERQ(q); 7496 mutex_enter(QLOCK(q)); 7497 if (!O_SAMESTR(q)) 7498 q->q_flag |= QEND; 7499 else 7500 q->q_flag &= ~QEND; 7501 mutex_exit(QLOCK(q)); 7502 } 7503 7504 /* 7505 * Set QFULL in next service procedure queue (that cares) if not already 7506 * set and if there are already more messages on the syncq than 7507 * sq_max_size. If sq_max_size is 0, no flow control will be asserted on 7508 * any syncq. 7509 * 7510 * The fq here is the next queue with a service procedure. This is where 7511 * we would fail canputnext, so this is where we need to set QFULL. 7512 * In the case when fq != q we need to take QLOCK(fq) to set QFULL flag. 7513 * 7514 * We already have QLOCK at this point. To avoid cross-locks with 7515 * freezestr() which grabs all QLOCKs and with strlock() which grabs both 7516 * SQLOCK and sd_reflock, we need to drop respective locks first. 7517 */ 7518 void 7519 set_qfull(queue_t *q) 7520 { 7521 queue_t *fq = NULL; 7522 7523 ASSERT(MUTEX_HELD(QLOCK(q))); 7524 if ((sq_max_size != 0) && (!(q->q_nfsrv->q_flag & QFULL)) && 7525 (q->q_syncqmsgs > sq_max_size)) { 7526 if ((fq = q->q_nfsrv) == q) { 7527 fq->q_flag |= QFULL; 7528 } else { 7529 mutex_exit(QLOCK(q)); 7530 mutex_enter(QLOCK(fq)); 7531 fq->q_flag |= QFULL; 7532 mutex_exit(QLOCK(fq)); 7533 mutex_enter(QLOCK(q)); 7534 } 7535 } 7536 } 7537 7538 void 7539 clr_qfull(queue_t *q) 7540 { 7541 queue_t *oq = q; 7542 7543 q = q->q_nfsrv; 7544 /* Fast check if there is any work to do before getting the lock. */ 7545 if ((q->q_flag & (QFULL|QWANTW)) == 0) { 7546 return; 7547 } 7548 7549 /* 7550 * Do not reset QFULL (and backenable) if the q_count is the reason 7551 * for QFULL being set. 7552 */ 7553 mutex_enter(QLOCK(q)); 7554 /* 7555 * If queue is empty i.e q_mblkcnt is zero, queue can not be full. 7556 * Hence clear the QFULL. 7557 * If both q_count and q_mblkcnt are less than the hiwat mark, 7558 * clear the QFULL. 7559 */ 7560 if (q->q_mblkcnt == 0 || ((q->q_count < q->q_hiwat) && 7561 (q->q_mblkcnt < q->q_hiwat))) { 7562 q->q_flag &= ~QFULL; 7563 /* 7564 * A little more confusing, how about this way: 7565 * if someone wants to write, 7566 * AND 7567 * both counts are less than the lowat mark 7568 * OR 7569 * the lowat mark is zero 7570 * THEN 7571 * backenable 7572 */ 7573 if ((q->q_flag & QWANTW) && 7574 (((q->q_count < q->q_lowat) && 7575 (q->q_mblkcnt < q->q_lowat)) || q->q_lowat == 0)) { 7576 q->q_flag &= ~QWANTW; 7577 mutex_exit(QLOCK(q)); 7578 backenable(oq, 0); 7579 } else 7580 mutex_exit(QLOCK(q)); 7581 } else 7582 mutex_exit(QLOCK(q)); 7583 } 7584 7585 /* 7586 * Set the forward service procedure pointer. 7587 * 7588 * Called at insert-time to cache a queue's next forward service procedure in 7589 * q_nfsrv; used by canput() and canputnext(). If the queue to be inserted 7590 * has a service procedure then q_nfsrv points to itself. If the queue to be 7591 * inserted does not have a service procedure, then q_nfsrv points to the next 7592 * queue forward that has a service procedure. If the queue is at the logical 7593 * end of the stream (driver for write side, stream head for the read side) 7594 * and does not have a service procedure, then q_nfsrv also points to itself. 7595 */ 7596 void 7597 set_nfsrv_ptr( 7598 queue_t *rnew, /* read queue pointer to new module */ 7599 queue_t *wnew, /* write queue pointer to new module */ 7600 queue_t *prev_rq, /* read queue pointer to the module above */ 7601 queue_t *prev_wq) /* write queue pointer to the module above */ 7602 { 7603 queue_t *qp; 7604 7605 if (prev_wq->q_next == NULL) { 7606 /* 7607 * Insert the driver, initialize the driver and stream head. 7608 * In this case, prev_rq/prev_wq should be the stream head. 7609 * _I_INSERT does not allow inserting a driver. Make sure 7610 * that it is not an insertion. 7611 */ 7612 ASSERT(!(rnew->q_flag & _QINSERTING)); 7613 wnew->q_nfsrv = wnew; 7614 if (rnew->q_qinfo->qi_srvp) 7615 rnew->q_nfsrv = rnew; 7616 else 7617 rnew->q_nfsrv = prev_rq; 7618 prev_rq->q_nfsrv = prev_rq; 7619 prev_wq->q_nfsrv = prev_wq; 7620 } else { 7621 /* 7622 * set up read side q_nfsrv pointer. This MUST be done 7623 * before setting the write side, because the setting of 7624 * the write side for a fifo may depend on it. 7625 * 7626 * Suppose we have a fifo that only has pipemod pushed. 7627 * pipemod has no read or write service procedures, so 7628 * nfsrv for both pipemod queues points to prev_rq (the 7629 * stream read head). Now push bufmod (which has only a 7630 * read service procedure). Doing the write side first, 7631 * wnew->q_nfsrv is set to pipemod's writeq nfsrv, which 7632 * is WRONG; the next queue forward from wnew with a 7633 * service procedure will be rnew, not the stream read head. 7634 * Since the downstream queue (which in the case of a fifo 7635 * is the read queue rnew) can affect upstream queues, it 7636 * needs to be done first. Setting up the read side first 7637 * sets nfsrv for both pipemod queues to rnew and then 7638 * when the write side is set up, wnew-q_nfsrv will also 7639 * point to rnew. 7640 */ 7641 if (rnew->q_qinfo->qi_srvp) { 7642 /* 7643 * use _OTHERQ() because, if this is a pipe, next 7644 * module may have been pushed from other end and 7645 * q_next could be a read queue. 7646 */ 7647 qp = _OTHERQ(prev_wq->q_next); 7648 while (qp && qp->q_nfsrv != qp) { 7649 qp->q_nfsrv = rnew; 7650 qp = backq(qp); 7651 } 7652 rnew->q_nfsrv = rnew; 7653 } else 7654 rnew->q_nfsrv = prev_rq->q_nfsrv; 7655 7656 /* set up write side q_nfsrv pointer */ 7657 if (wnew->q_qinfo->qi_srvp) { 7658 wnew->q_nfsrv = wnew; 7659 7660 /* 7661 * For insertion, need to update nfsrv of the modules 7662 * above which do not have a service routine. 7663 */ 7664 if (rnew->q_flag & _QINSERTING) { 7665 for (qp = prev_wq; 7666 qp != NULL && qp->q_nfsrv != qp; 7667 qp = backq(qp)) { 7668 qp->q_nfsrv = wnew->q_nfsrv; 7669 } 7670 } 7671 } else { 7672 if (prev_wq->q_next == prev_rq) 7673 /* 7674 * Since prev_wq/prev_rq are the middle of a 7675 * fifo, wnew/rnew will also be the middle of 7676 * a fifo and wnew's nfsrv is same as rnew's. 7677 */ 7678 wnew->q_nfsrv = rnew->q_nfsrv; 7679 else 7680 wnew->q_nfsrv = prev_wq->q_next->q_nfsrv; 7681 } 7682 } 7683 } 7684 7685 /* 7686 * Reset the forward service procedure pointer; called at remove-time. 7687 */ 7688 void 7689 reset_nfsrv_ptr(queue_t *rqp, queue_t *wqp) 7690 { 7691 queue_t *tmp_qp; 7692 7693 /* Reset the write side q_nfsrv pointer for _I_REMOVE */ 7694 if ((rqp->q_flag & _QREMOVING) && (wqp->q_qinfo->qi_srvp != NULL)) { 7695 for (tmp_qp = backq(wqp); 7696 tmp_qp != NULL && tmp_qp->q_nfsrv == wqp; 7697 tmp_qp = backq(tmp_qp)) { 7698 tmp_qp->q_nfsrv = wqp->q_nfsrv; 7699 } 7700 } 7701 7702 /* reset the read side q_nfsrv pointer */ 7703 if (rqp->q_qinfo->qi_srvp) { 7704 if (wqp->q_next) { /* non-driver case */ 7705 tmp_qp = _OTHERQ(wqp->q_next); 7706 while (tmp_qp && tmp_qp->q_nfsrv == rqp) { 7707 /* Note that rqp->q_next cannot be NULL */ 7708 ASSERT(rqp->q_next != NULL); 7709 tmp_qp->q_nfsrv = rqp->q_next->q_nfsrv; 7710 tmp_qp = backq(tmp_qp); 7711 } 7712 } 7713 } 7714 } 7715 7716 /* 7717 * This routine should be called after all stream geometry changes to update 7718 * the stream head cached struio() rd/wr queue pointers. Note must be called 7719 * with the streamlock()ed. 7720 * 7721 * Note: only enables Synchronous STREAMS for a side of a Stream which has 7722 * an explicit synchronous barrier module queue. That is, a queue that 7723 * has specified a struio() type. 7724 */ 7725 static void 7726 strsetuio(stdata_t *stp) 7727 { 7728 queue_t *wrq; 7729 7730 if (stp->sd_flag & STPLEX) { 7731 /* 7732 * Not streamhead, but a mux, so no Synchronous STREAMS. 7733 */ 7734 stp->sd_struiowrq = NULL; 7735 stp->sd_struiordq = NULL; 7736 return; 7737 } 7738 /* 7739 * Scan the write queue(s) while synchronous 7740 * until we find a qinfo uio type specified. 7741 */ 7742 wrq = stp->sd_wrq->q_next; 7743 while (wrq) { 7744 if (wrq->q_struiot == STRUIOT_NONE) { 7745 wrq = 0; 7746 break; 7747 } 7748 if (wrq->q_struiot != STRUIOT_DONTCARE) 7749 break; 7750 if (! _SAMESTR(wrq)) { 7751 wrq = 0; 7752 break; 7753 } 7754 wrq = wrq->q_next; 7755 } 7756 stp->sd_struiowrq = wrq; 7757 /* 7758 * Scan the read queue(s) while synchronous 7759 * until we find a qinfo uio type specified. 7760 */ 7761 wrq = stp->sd_wrq->q_next; 7762 while (wrq) { 7763 if (_RD(wrq)->q_struiot == STRUIOT_NONE) { 7764 wrq = 0; 7765 break; 7766 } 7767 if (_RD(wrq)->q_struiot != STRUIOT_DONTCARE) 7768 break; 7769 if (! _SAMESTR(wrq)) { 7770 wrq = 0; 7771 break; 7772 } 7773 wrq = wrq->q_next; 7774 } 7775 stp->sd_struiordq = wrq ? _RD(wrq) : 0; 7776 } 7777 7778 /* 7779 * pass_wput, unblocks the passthru queues, so that 7780 * messages can arrive at muxs lower read queue, before 7781 * I_LINK/I_UNLINK is acked/nacked. 7782 */ 7783 static void 7784 pass_wput(queue_t *q, mblk_t *mp) 7785 { 7786 syncq_t *sq; 7787 7788 sq = _RD(q)->q_syncq; 7789 if (sq->sq_flags & SQ_BLOCKED) 7790 unblocksq(sq, SQ_BLOCKED, 0); 7791 putnext(q, mp); 7792 } 7793 7794 /* 7795 * Set up queues for the link/unlink. 7796 * Create a new queue and block it and then insert it 7797 * below the stream head on the lower stream. 7798 * This prevents any messages from arriving during the setq 7799 * as well as while the mux is processing the LINK/I_UNLINK. 7800 * The blocked passq is unblocked once the LINK/I_UNLINK has 7801 * been acked or nacked or if a message is generated and sent 7802 * down muxs write put procedure. 7803 * See pass_wput(). 7804 * 7805 * After the new queue is inserted, all messages coming from below are 7806 * blocked. The call to strlock will ensure that all activity in the stream head 7807 * read queue syncq is stopped (sq_count drops to zero). 7808 */ 7809 static queue_t * 7810 link_addpassthru(stdata_t *stpdown) 7811 { 7812 queue_t *passq; 7813 sqlist_t sqlist; 7814 7815 passq = allocq(); 7816 STREAM(passq) = STREAM(_WR(passq)) = stpdown; 7817 /* setq might sleep in allocator - avoid holding locks. */ 7818 setq(passq, &passthru_rinit, &passthru_winit, NULL, QPERQ, 7819 SQ_CI|SQ_CO, B_FALSE); 7820 claimq(passq); 7821 blocksq(passq->q_syncq, SQ_BLOCKED, 1); 7822 insertq(STREAM(passq), passq); 7823 7824 /* 7825 * Use strlock() to wait for the stream head sq_count to drop to zero 7826 * since we are going to change q_ptr in the stream head. Note that 7827 * insertq() doesn't wait for any syncq counts to drop to zero. 7828 */ 7829 sqlist.sqlist_head = NULL; 7830 sqlist.sqlist_index = 0; 7831 sqlist.sqlist_size = sizeof (sqlist_t); 7832 sqlist_insert(&sqlist, _RD(stpdown->sd_wrq)->q_syncq); 7833 strlock(stpdown, &sqlist); 7834 strunlock(stpdown, &sqlist); 7835 7836 releaseq(passq); 7837 return (passq); 7838 } 7839 7840 /* 7841 * Let messages flow up into the mux by removing 7842 * the passq. 7843 */ 7844 static void 7845 link_rempassthru(queue_t *passq) 7846 { 7847 claimq(passq); 7848 removeq(passq); 7849 releaseq(passq); 7850 freeq(passq); 7851 } 7852 7853 /* 7854 * Wait for the condition variable pointed to by `cvp' to be signaled, 7855 * or for `tim' milliseconds to elapse, whichever comes first. If `tim' 7856 * is negative, then there is no time limit. If `nosigs' is non-zero, 7857 * then the wait will be non-interruptible. 7858 * 7859 * Returns >0 if signaled, 0 if interrupted, or -1 upon timeout. 7860 */ 7861 clock_t 7862 str_cv_wait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim, int nosigs) 7863 { 7864 clock_t ret; 7865 7866 if (tim < 0) { 7867 if (nosigs) { 7868 cv_wait(cvp, mp); 7869 ret = 1; 7870 } else { 7871 ret = cv_wait_sig(cvp, mp); 7872 } 7873 } else if (tim > 0) { 7874 /* 7875 * convert milliseconds to clock ticks 7876 */ 7877 if (nosigs) { 7878 ret = cv_reltimedwait(cvp, mp, 7879 MSEC_TO_TICK_ROUNDUP(tim), TR_CLOCK_TICK); 7880 } else { 7881 ret = cv_reltimedwait_sig(cvp, mp, 7882 MSEC_TO_TICK_ROUNDUP(tim), TR_CLOCK_TICK); 7883 } 7884 } else { 7885 ret = -1; 7886 } 7887 return (ret); 7888 } 7889 7890 /* 7891 * Wait until the stream head can determine if it is at the mark but 7892 * don't wait forever to prevent a race condition between the "mark" state 7893 * in the stream head and any mark state in the caller/user of this routine. 7894 * 7895 * This is used by sockets and for a socket it would be incorrect 7896 * to return a failure for SIOCATMARK when there is no data in the receive 7897 * queue and the marked urgent data is traveling up the stream. 7898 * 7899 * This routine waits until the mark is known by waiting for one of these 7900 * three events: 7901 * The stream head read queue becoming non-empty (including an EOF). 7902 * The STRATMARK flag being set (due to a MSGMARKNEXT message). 7903 * The STRNOTATMARK flag being set (which indicates that the transport 7904 * has sent a MSGNOTMARKNEXT message to indicate that it is not at 7905 * the mark). 7906 * 7907 * The routine returns 1 if the stream is at the mark; 0 if it can 7908 * be determined that the stream is not at the mark. 7909 * If the wait times out and it can't determine 7910 * whether or not the stream might be at the mark the routine will return -1. 7911 * 7912 * Note: This routine should only be used when a mark is pending i.e., 7913 * in the socket case the SIGURG has been posted. 7914 * Note2: This can not wakeup just because synchronous streams indicate 7915 * that data is available since it is not possible to use the synchronous 7916 * streams interfaces to determine the b_flag value for the data queued below 7917 * the stream head. 7918 */ 7919 int 7920 strwaitmark(vnode_t *vp) 7921 { 7922 struct stdata *stp = vp->v_stream; 7923 queue_t *rq = _RD(stp->sd_wrq); 7924 int mark; 7925 7926 mutex_enter(&stp->sd_lock); 7927 while (rq->q_first == NULL && 7928 !(stp->sd_flag & (STRATMARK|STRNOTATMARK|STREOF))) { 7929 stp->sd_flag |= RSLEEP; 7930 7931 /* Wait for 100 milliseconds for any state change. */ 7932 if (str_cv_wait(&rq->q_wait, &stp->sd_lock, 100, 1) == -1) { 7933 mutex_exit(&stp->sd_lock); 7934 return (-1); 7935 } 7936 } 7937 if (stp->sd_flag & STRATMARK) 7938 mark = 1; 7939 else if (rq->q_first != NULL && (rq->q_first->b_flag & MSGMARK)) 7940 mark = 1; 7941 else 7942 mark = 0; 7943 7944 mutex_exit(&stp->sd_lock); 7945 return (mark); 7946 } 7947 7948 /* 7949 * Set a read side error. If persist is set change the socket error 7950 * to persistent. If errfunc is set install the function as the exported 7951 * error handler. 7952 */ 7953 void 7954 strsetrerror(vnode_t *vp, int error, int persist, errfunc_t errfunc) 7955 { 7956 struct stdata *stp = vp->v_stream; 7957 7958 mutex_enter(&stp->sd_lock); 7959 stp->sd_rerror = error; 7960 if (error == 0 && errfunc == NULL) 7961 stp->sd_flag &= ~STRDERR; 7962 else 7963 stp->sd_flag |= STRDERR; 7964 if (persist) { 7965 stp->sd_flag &= ~STRDERRNONPERSIST; 7966 } else { 7967 stp->sd_flag |= STRDERRNONPERSIST; 7968 } 7969 stp->sd_rderrfunc = errfunc; 7970 if (error != 0 || errfunc != NULL) { 7971 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */ 7972 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */ 7973 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 7974 7975 mutex_exit(&stp->sd_lock); 7976 pollwakeup(&stp->sd_pollist, POLLERR); 7977 mutex_enter(&stp->sd_lock); 7978 7979 if (stp->sd_sigflags & S_ERROR) 7980 strsendsig(stp->sd_siglist, S_ERROR, 0, error); 7981 } 7982 mutex_exit(&stp->sd_lock); 7983 } 7984 7985 /* 7986 * Set a write side error. If persist is set change the socket error 7987 * to persistent. 7988 */ 7989 void 7990 strsetwerror(vnode_t *vp, int error, int persist, errfunc_t errfunc) 7991 { 7992 struct stdata *stp = vp->v_stream; 7993 7994 mutex_enter(&stp->sd_lock); 7995 stp->sd_werror = error; 7996 if (error == 0 && errfunc == NULL) 7997 stp->sd_flag &= ~STWRERR; 7998 else 7999 stp->sd_flag |= STWRERR; 8000 if (persist) { 8001 stp->sd_flag &= ~STWRERRNONPERSIST; 8002 } else { 8003 stp->sd_flag |= STWRERRNONPERSIST; 8004 } 8005 stp->sd_wrerrfunc = errfunc; 8006 if (error != 0 || errfunc != NULL) { 8007 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */ 8008 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */ 8009 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 8010 8011 mutex_exit(&stp->sd_lock); 8012 pollwakeup(&stp->sd_pollist, POLLERR); 8013 mutex_enter(&stp->sd_lock); 8014 8015 if (stp->sd_sigflags & S_ERROR) 8016 strsendsig(stp->sd_siglist, S_ERROR, 0, error); 8017 } 8018 mutex_exit(&stp->sd_lock); 8019 } 8020 8021 /* 8022 * Make the stream return 0 (EOF) when all data has been read. 8023 * No effect on write side. 8024 */ 8025 void 8026 strseteof(vnode_t *vp, int eof) 8027 { 8028 struct stdata *stp = vp->v_stream; 8029 8030 mutex_enter(&stp->sd_lock); 8031 if (!eof) { 8032 stp->sd_flag &= ~STREOF; 8033 mutex_exit(&stp->sd_lock); 8034 return; 8035 } 8036 stp->sd_flag |= STREOF; 8037 if (stp->sd_flag & RSLEEP) { 8038 stp->sd_flag &= ~RSLEEP; 8039 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); 8040 } 8041 8042 mutex_exit(&stp->sd_lock); 8043 pollwakeup(&stp->sd_pollist, POLLIN|POLLRDNORM); 8044 mutex_enter(&stp->sd_lock); 8045 8046 if (stp->sd_sigflags & (S_INPUT|S_RDNORM)) 8047 strsendsig(stp->sd_siglist, S_INPUT|S_RDNORM, 0, 0); 8048 mutex_exit(&stp->sd_lock); 8049 } 8050 8051 void 8052 strflushrq(vnode_t *vp, int flag) 8053 { 8054 struct stdata *stp = vp->v_stream; 8055 8056 mutex_enter(&stp->sd_lock); 8057 flushq(_RD(stp->sd_wrq), flag); 8058 mutex_exit(&stp->sd_lock); 8059 } 8060 8061 void 8062 strsetrputhooks(vnode_t *vp, uint_t flags, 8063 msgfunc_t protofunc, msgfunc_t miscfunc) 8064 { 8065 struct stdata *stp = vp->v_stream; 8066 8067 mutex_enter(&stp->sd_lock); 8068 8069 if (protofunc == NULL) 8070 stp->sd_rprotofunc = strrput_proto; 8071 else 8072 stp->sd_rprotofunc = protofunc; 8073 8074 if (miscfunc == NULL) 8075 stp->sd_rmiscfunc = strrput_misc; 8076 else 8077 stp->sd_rmiscfunc = miscfunc; 8078 8079 if (flags & SH_CONSOL_DATA) 8080 stp->sd_rput_opt |= SR_CONSOL_DATA; 8081 else 8082 stp->sd_rput_opt &= ~SR_CONSOL_DATA; 8083 8084 if (flags & SH_SIGALLDATA) 8085 stp->sd_rput_opt |= SR_SIGALLDATA; 8086 else 8087 stp->sd_rput_opt &= ~SR_SIGALLDATA; 8088 8089 if (flags & SH_IGN_ZEROLEN) 8090 stp->sd_rput_opt |= SR_IGN_ZEROLEN; 8091 else 8092 stp->sd_rput_opt &= ~SR_IGN_ZEROLEN; 8093 8094 mutex_exit(&stp->sd_lock); 8095 } 8096 8097 void 8098 strsetwputhooks(vnode_t *vp, uint_t flags, clock_t closetime) 8099 { 8100 struct stdata *stp = vp->v_stream; 8101 8102 mutex_enter(&stp->sd_lock); 8103 stp->sd_closetime = closetime; 8104 8105 if (flags & SH_SIGPIPE) 8106 stp->sd_wput_opt |= SW_SIGPIPE; 8107 else 8108 stp->sd_wput_opt &= ~SW_SIGPIPE; 8109 if (flags & SH_RECHECK_ERR) 8110 stp->sd_wput_opt |= SW_RECHECK_ERR; 8111 else 8112 stp->sd_wput_opt &= ~SW_RECHECK_ERR; 8113 8114 mutex_exit(&stp->sd_lock); 8115 } 8116 8117 void 8118 strsetrwputdatahooks(vnode_t *vp, msgfunc_t rdatafunc, msgfunc_t wdatafunc) 8119 { 8120 struct stdata *stp = vp->v_stream; 8121 8122 mutex_enter(&stp->sd_lock); 8123 8124 stp->sd_rputdatafunc = rdatafunc; 8125 stp->sd_wputdatafunc = wdatafunc; 8126 8127 mutex_exit(&stp->sd_lock); 8128 } 8129 8130 /* Used within framework when the queue is already locked */ 8131 void 8132 qenable_locked(queue_t *q) 8133 { 8134 stdata_t *stp = STREAM(q); 8135 8136 ASSERT(MUTEX_HELD(QLOCK(q))); 8137 8138 if (!q->q_qinfo->qi_srvp) 8139 return; 8140 8141 /* 8142 * Do not place on run queue if already enabled or closing. 8143 */ 8144 if (q->q_flag & (QWCLOSE|QENAB)) 8145 return; 8146 8147 /* 8148 * mark queue enabled and place on run list if it is not already being 8149 * serviced. If it is serviced, the runservice() function will detect 8150 * that QENAB is set and call service procedure before clearing 8151 * QINSERVICE flag. 8152 */ 8153 q->q_flag |= QENAB; 8154 if (q->q_flag & QINSERVICE) 8155 return; 8156 8157 /* Record the time of qenable */ 8158 q->q_qtstamp = ddi_get_lbolt(); 8159 8160 /* 8161 * Put the queue in the stp list and schedule it for background 8162 * processing if it is not already scheduled or if stream head does not 8163 * intent to process it in the foreground later by setting 8164 * STRS_WILLSERVICE flag. 8165 */ 8166 mutex_enter(&stp->sd_qlock); 8167 /* 8168 * If there are already something on the list, stp flags should show 8169 * intention to drain it. 8170 */ 8171 IMPLY(STREAM_NEEDSERVICE(stp), 8172 (stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))); 8173 8174 ENQUEUE(q, stp->sd_qhead, stp->sd_qtail, q_link); 8175 stp->sd_nqueues++; 8176 8177 /* 8178 * If no one will drain this stream we are the first producer and 8179 * need to schedule it for background thread. 8180 */ 8181 if (!(stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))) { 8182 /* 8183 * No one will service this stream later, so we have to 8184 * schedule it now. 8185 */ 8186 STRSTAT(stenables); 8187 stp->sd_svcflags |= STRS_SCHEDULED; 8188 stp->sd_servid = (void *)taskq_dispatch(streams_taskq, 8189 (task_func_t *)stream_service, stp, TQ_NOSLEEP|TQ_NOQUEUE); 8190 8191 if (stp->sd_servid == NULL) { 8192 /* 8193 * Task queue failed so fail over to the backup 8194 * servicing thread. 8195 */ 8196 STRSTAT(taskqfails); 8197 /* 8198 * It is safe to clear STRS_SCHEDULED flag because it 8199 * was set by this thread above. 8200 */ 8201 stp->sd_svcflags &= ~STRS_SCHEDULED; 8202 8203 /* 8204 * Failover scheduling is protected by service_queue 8205 * lock. 8206 */ 8207 mutex_enter(&service_queue); 8208 ASSERT((stp->sd_qhead == q) && (stp->sd_qtail == q)); 8209 ASSERT(q->q_link == NULL); 8210 /* 8211 * Append the queue to qhead/qtail list. 8212 */ 8213 if (qhead == NULL) 8214 qhead = q; 8215 else 8216 qtail->q_link = q; 8217 qtail = q; 8218 /* 8219 * Clear stp queue list. 8220 */ 8221 stp->sd_qhead = stp->sd_qtail = NULL; 8222 stp->sd_nqueues = 0; 8223 /* 8224 * Wakeup background queue processing thread. 8225 */ 8226 cv_signal(&services_to_run); 8227 mutex_exit(&service_queue); 8228 } 8229 } 8230 mutex_exit(&stp->sd_qlock); 8231 } 8232 8233 static void 8234 queue_service(queue_t *q) 8235 { 8236 /* 8237 * The queue in the list should have 8238 * QENAB flag set and should not have 8239 * QINSERVICE flag set. QINSERVICE is 8240 * set when the queue is dequeued and 8241 * qenable_locked doesn't enqueue a 8242 * queue with QINSERVICE set. 8243 */ 8244 8245 ASSERT(!(q->q_flag & QINSERVICE)); 8246 ASSERT((q->q_flag & QENAB)); 8247 mutex_enter(QLOCK(q)); 8248 q->q_flag &= ~QENAB; 8249 q->q_flag |= QINSERVICE; 8250 mutex_exit(QLOCK(q)); 8251 runservice(q); 8252 } 8253 8254 static void 8255 syncq_service(syncq_t *sq) 8256 { 8257 STRSTAT(syncqservice); 8258 mutex_enter(SQLOCK(sq)); 8259 ASSERT(!(sq->sq_svcflags & SQ_SERVICE)); 8260 ASSERT(sq->sq_servcount != 0); 8261 ASSERT(sq->sq_next == NULL); 8262 8263 /* if we came here from the background thread, clear the flag */ 8264 if (sq->sq_svcflags & SQ_BGTHREAD) 8265 sq->sq_svcflags &= ~SQ_BGTHREAD; 8266 8267 /* let drain_syncq know that it's being called in the background */ 8268 sq->sq_svcflags |= SQ_SERVICE; 8269 drain_syncq(sq); 8270 } 8271 8272 static void 8273 qwriter_outer_service(syncq_t *outer) 8274 { 8275 /* 8276 * Note that SQ_WRITER is used on the outer perimeter 8277 * to signal that a qwriter(OUTER) is either investigating 8278 * running or that it is actually running a function. 8279 */ 8280 outer_enter(outer, SQ_BLOCKED|SQ_WRITER); 8281 8282 /* 8283 * All inner syncq are empty and have SQ_WRITER set 8284 * to block entering the outer perimeter. 8285 * 8286 * We do not need to explicitly call write_now since 8287 * outer_exit does it for us. 8288 */ 8289 outer_exit(outer); 8290 } 8291 8292 static void 8293 mblk_free(mblk_t *mp) 8294 { 8295 dblk_t *dbp = mp->b_datap; 8296 frtn_t *frp = dbp->db_frtnp; 8297 8298 mp->b_next = NULL; 8299 if (dbp->db_fthdr != NULL) 8300 str_ftfree(dbp); 8301 8302 ASSERT(dbp->db_fthdr == NULL); 8303 frp->free_func(frp->free_arg); 8304 ASSERT(dbp->db_mblk == mp); 8305 8306 if (dbp->db_credp != NULL) { 8307 crfree(dbp->db_credp); 8308 dbp->db_credp = NULL; 8309 } 8310 dbp->db_cpid = -1; 8311 dbp->db_struioflag = 0; 8312 dbp->db_struioun.cksum.flags = 0; 8313 8314 kmem_cache_free(dbp->db_cache, dbp); 8315 } 8316 8317 /* 8318 * Background processing of the stream queue list. 8319 */ 8320 static void 8321 stream_service(stdata_t *stp) 8322 { 8323 queue_t *q; 8324 8325 mutex_enter(&stp->sd_qlock); 8326 8327 STR_SERVICE(stp, q); 8328 8329 stp->sd_svcflags &= ~STRS_SCHEDULED; 8330 stp->sd_servid = NULL; 8331 cv_signal(&stp->sd_qcv); 8332 mutex_exit(&stp->sd_qlock); 8333 } 8334 8335 /* 8336 * Foreground processing of the stream queue list. 8337 */ 8338 void 8339 stream_runservice(stdata_t *stp) 8340 { 8341 queue_t *q; 8342 8343 mutex_enter(&stp->sd_qlock); 8344 STRSTAT(rservice); 8345 /* 8346 * We are going to drain this stream queue list, so qenable_locked will 8347 * not schedule it until we finish. 8348 */ 8349 stp->sd_svcflags |= STRS_WILLSERVICE; 8350 8351 STR_SERVICE(stp, q); 8352 8353 stp->sd_svcflags &= ~STRS_WILLSERVICE; 8354 mutex_exit(&stp->sd_qlock); 8355 /* 8356 * Help backup background thread to drain the qhead/qtail list. 8357 */ 8358 while (qhead != NULL) { 8359 STRSTAT(qhelps); 8360 mutex_enter(&service_queue); 8361 DQ(q, qhead, qtail, q_link); 8362 mutex_exit(&service_queue); 8363 if (q != NULL) 8364 queue_service(q); 8365 } 8366 } 8367 8368 void 8369 stream_willservice(stdata_t *stp) 8370 { 8371 mutex_enter(&stp->sd_qlock); 8372 stp->sd_svcflags |= STRS_WILLSERVICE; 8373 mutex_exit(&stp->sd_qlock); 8374 } 8375 8376 /* 8377 * Replace the cred currently in the mblk with a different one. 8378 * Also update db_cpid. 8379 */ 8380 void 8381 mblk_setcred(mblk_t *mp, cred_t *cr, pid_t cpid) 8382 { 8383 dblk_t *dbp = mp->b_datap; 8384 cred_t *ocr = dbp->db_credp; 8385 8386 ASSERT(cr != NULL); 8387 8388 if (cr != ocr) { 8389 crhold(dbp->db_credp = cr); 8390 if (ocr != NULL) 8391 crfree(ocr); 8392 } 8393 /* Don't overwrite with NOPID */ 8394 if (cpid != NOPID) 8395 dbp->db_cpid = cpid; 8396 } 8397 8398 /* 8399 * If the src message has a cred, then replace the cred currently in the mblk 8400 * with it. 8401 * Also update db_cpid. 8402 */ 8403 void 8404 mblk_copycred(mblk_t *mp, const mblk_t *src) 8405 { 8406 dblk_t *dbp = mp->b_datap; 8407 cred_t *cr, *ocr; 8408 pid_t cpid; 8409 8410 cr = msg_getcred(src, &cpid); 8411 if (cr == NULL) 8412 return; 8413 8414 ocr = dbp->db_credp; 8415 if (cr != ocr) { 8416 crhold(dbp->db_credp = cr); 8417 if (ocr != NULL) 8418 crfree(ocr); 8419 } 8420 /* Don't overwrite with NOPID */ 8421 if (cpid != NOPID) 8422 dbp->db_cpid = cpid; 8423 } 8424 8425 int 8426 hcksum_assoc(mblk_t *mp, multidata_t *mmd, pdesc_t *pd, 8427 uint32_t start, uint32_t stuff, uint32_t end, uint32_t value, 8428 uint32_t flags, int km_flags) 8429 { 8430 int rc = 0; 8431 8432 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8433 if (mp->b_datap->db_type == M_DATA) { 8434 /* Associate values for M_DATA type */ 8435 DB_CKSUMSTART(mp) = (intptr_t)start; 8436 DB_CKSUMSTUFF(mp) = (intptr_t)stuff; 8437 DB_CKSUMEND(mp) = (intptr_t)end; 8438 DB_CKSUMFLAGS(mp) = flags; 8439 DB_CKSUM16(mp) = (uint16_t)value; 8440 8441 } else { 8442 pattrinfo_t pa_info; 8443 8444 ASSERT(mmd != NULL); 8445 8446 pa_info.type = PATTR_HCKSUM; 8447 pa_info.len = sizeof (pattr_hcksum_t); 8448 8449 if (mmd_addpattr(mmd, pd, &pa_info, B_TRUE, km_flags) != NULL) { 8450 pattr_hcksum_t *hck = (pattr_hcksum_t *)pa_info.buf; 8451 8452 hck->hcksum_start_offset = start; 8453 hck->hcksum_stuff_offset = stuff; 8454 hck->hcksum_end_offset = end; 8455 hck->hcksum_cksum_val.inet_cksum = (uint16_t)value; 8456 hck->hcksum_flags = flags; 8457 } else { 8458 rc = -1; 8459 } 8460 } 8461 return (rc); 8462 } 8463 8464 void 8465 hcksum_retrieve(mblk_t *mp, multidata_t *mmd, pdesc_t *pd, 8466 uint32_t *start, uint32_t *stuff, uint32_t *end, 8467 uint32_t *value, uint32_t *flags) 8468 { 8469 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8470 if (mp->b_datap->db_type == M_DATA) { 8471 if (flags != NULL) { 8472 *flags = DB_CKSUMFLAGS(mp) & HCK_FLAGS; 8473 if ((*flags & (HCK_PARTIALCKSUM | 8474 HCK_FULLCKSUM)) != 0) { 8475 if (value != NULL) 8476 *value = (uint32_t)DB_CKSUM16(mp); 8477 if ((*flags & HCK_PARTIALCKSUM) != 0) { 8478 if (start != NULL) 8479 *start = 8480 (uint32_t)DB_CKSUMSTART(mp); 8481 if (stuff != NULL) 8482 *stuff = 8483 (uint32_t)DB_CKSUMSTUFF(mp); 8484 if (end != NULL) 8485 *end = 8486 (uint32_t)DB_CKSUMEND(mp); 8487 } 8488 } 8489 } 8490 } else { 8491 pattrinfo_t hck_attr = {PATTR_HCKSUM}; 8492 8493 ASSERT(mmd != NULL); 8494 8495 /* get hardware checksum attribute */ 8496 if (mmd_getpattr(mmd, pd, &hck_attr) != NULL) { 8497 pattr_hcksum_t *hck = (pattr_hcksum_t *)hck_attr.buf; 8498 8499 ASSERT(hck_attr.len >= sizeof (pattr_hcksum_t)); 8500 if (flags != NULL) 8501 *flags = hck->hcksum_flags; 8502 if (start != NULL) 8503 *start = hck->hcksum_start_offset; 8504 if (stuff != NULL) 8505 *stuff = hck->hcksum_stuff_offset; 8506 if (end != NULL) 8507 *end = hck->hcksum_end_offset; 8508 if (value != NULL) 8509 *value = (uint32_t) 8510 hck->hcksum_cksum_val.inet_cksum; 8511 } 8512 } 8513 } 8514 8515 void 8516 lso_info_set(mblk_t *mp, uint32_t mss, uint32_t flags) 8517 { 8518 ASSERT(DB_TYPE(mp) == M_DATA); 8519 ASSERT((flags & ~HW_LSO_FLAGS) == 0); 8520 8521 /* Set the flags */ 8522 DB_LSOFLAGS(mp) |= flags; 8523 DB_LSOMSS(mp) = mss; 8524 } 8525 8526 void 8527 lso_info_cleanup(mblk_t *mp) 8528 { 8529 ASSERT(DB_TYPE(mp) == M_DATA); 8530 8531 /* Clear the flags */ 8532 DB_LSOFLAGS(mp) &= ~HW_LSO_FLAGS; 8533 DB_LSOMSS(mp) = 0; 8534 } 8535 8536 void 8537 lso_info_get(mblk_t *mp, uint32_t *mss, uint32_t *flags) 8538 { 8539 ASSERT(DB_TYPE(mp) == M_DATA); 8540 8541 if (flags != NULL) { 8542 *flags = DB_CKSUMFLAGS(mp) & HW_LSO_FLAGS; 8543 if ((*flags != 0) && (mss != NULL)) 8544 *mss = (uint32_t)DB_LSOMSS(mp); 8545 } 8546 } 8547 8548 /* 8549 * Checksum buffer *bp for len bytes with psum partial checksum, 8550 * or 0 if none, and return the 16 bit partial checksum. 8551 */ 8552 unsigned 8553 bcksum(uchar_t *bp, int len, unsigned int psum) 8554 { 8555 int odd = len & 1; 8556 extern unsigned int ip_ocsum(); 8557 8558 if (((intptr_t)bp & 1) == 0 && !odd) { 8559 /* 8560 * Bp is 16 bit aligned and len is multiple of 16 bit word. 8561 */ 8562 return (ip_ocsum((ushort_t *)bp, len >> 1, psum)); 8563 } 8564 if (((intptr_t)bp & 1) != 0) { 8565 /* 8566 * Bp isn't 16 bit aligned. 8567 */ 8568 unsigned int tsum; 8569 8570 #ifdef _LITTLE_ENDIAN 8571 psum += *bp; 8572 #else 8573 psum += *bp << 8; 8574 #endif 8575 len--; 8576 bp++; 8577 tsum = ip_ocsum((ushort_t *)bp, len >> 1, 0); 8578 psum += (tsum << 8) & 0xffff | (tsum >> 8); 8579 if (len & 1) { 8580 bp += len - 1; 8581 #ifdef _LITTLE_ENDIAN 8582 psum += *bp << 8; 8583 #else 8584 psum += *bp; 8585 #endif 8586 } 8587 } else { 8588 /* 8589 * Bp is 16 bit aligned. 8590 */ 8591 psum = ip_ocsum((ushort_t *)bp, len >> 1, psum); 8592 if (odd) { 8593 bp += len - 1; 8594 #ifdef _LITTLE_ENDIAN 8595 psum += *bp; 8596 #else 8597 psum += *bp << 8; 8598 #endif 8599 } 8600 } 8601 /* 8602 * Normalize psum to 16 bits before returning the new partial 8603 * checksum. The max psum value before normalization is 0x3FDFE. 8604 */ 8605 return ((psum >> 16) + (psum & 0xFFFF)); 8606 } 8607 8608 boolean_t 8609 is_vmloaned_mblk(mblk_t *mp, multidata_t *mmd, pdesc_t *pd) 8610 { 8611 boolean_t rc; 8612 8613 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8614 if (DB_TYPE(mp) == M_DATA) { 8615 rc = (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0); 8616 } else { 8617 pattrinfo_t zcopy_attr = {PATTR_ZCOPY}; 8618 8619 ASSERT(mmd != NULL); 8620 rc = (mmd_getpattr(mmd, pd, &zcopy_attr) != NULL); 8621 } 8622 return (rc); 8623 } 8624 8625 void 8626 freemsgchain(mblk_t *mp) 8627 { 8628 mblk_t *next; 8629 8630 while (mp != NULL) { 8631 next = mp->b_next; 8632 mp->b_next = NULL; 8633 8634 freemsg(mp); 8635 mp = next; 8636 } 8637 } 8638 8639 mblk_t * 8640 copymsgchain(mblk_t *mp) 8641 { 8642 mblk_t *nmp = NULL; 8643 mblk_t **nmpp = &nmp; 8644 8645 for (; mp != NULL; mp = mp->b_next) { 8646 if ((*nmpp = copymsg(mp)) == NULL) { 8647 freemsgchain(nmp); 8648 return (NULL); 8649 } 8650 8651 nmpp = &((*nmpp)->b_next); 8652 } 8653 8654 return (nmp); 8655 } 8656 8657 /* NOTE: Do not add code after this point. */ 8658 #undef QLOCK 8659 8660 /* 8661 * Replacement for QLOCK macro for those that can't use it. 8662 */ 8663 kmutex_t * 8664 QLOCK(queue_t *q) 8665 { 8666 return (&(q)->q_lock); 8667 } 8668 8669 /* 8670 * Dummy runqueues/queuerun functions functions for backwards compatibility. 8671 */ 8672 #undef runqueues 8673 void 8674 runqueues(void) 8675 { 8676 } 8677 8678 #undef queuerun 8679 void 8680 queuerun(void) 8681 { 8682 } 8683 8684 /* 8685 * Initialize the STR stack instance, which tracks autopush and persistent 8686 * links. 8687 */ 8688 /* ARGSUSED */ 8689 static void * 8690 str_stack_init(netstackid_t stackid, netstack_t *ns) 8691 { 8692 str_stack_t *ss; 8693 int i; 8694 8695 ss = (str_stack_t *)kmem_zalloc(sizeof (*ss), KM_SLEEP); 8696 ss->ss_netstack = ns; 8697 8698 /* 8699 * set up autopush 8700 */ 8701 sad_initspace(ss); 8702 8703 /* 8704 * set up mux_node structures. 8705 */ 8706 ss->ss_devcnt = devcnt; /* In case it should change before free */ 8707 ss->ss_mux_nodes = kmem_zalloc((sizeof (struct mux_node) * 8708 ss->ss_devcnt), KM_SLEEP); 8709 for (i = 0; i < ss->ss_devcnt; i++) 8710 ss->ss_mux_nodes[i].mn_imaj = i; 8711 return (ss); 8712 } 8713 8714 /* 8715 * Note: run at zone shutdown and not destroy so that the PLINKs are 8716 * gone by the time other cleanup happens from the destroy callbacks. 8717 */ 8718 static void 8719 str_stack_shutdown(netstackid_t stackid, void *arg) 8720 { 8721 str_stack_t *ss = (str_stack_t *)arg; 8722 int i; 8723 cred_t *cr; 8724 8725 cr = zone_get_kcred(netstackid_to_zoneid(stackid)); 8726 ASSERT(cr != NULL); 8727 8728 /* Undo all the I_PLINKs for this zone */ 8729 for (i = 0; i < ss->ss_devcnt; i++) { 8730 struct mux_edge *ep; 8731 ldi_handle_t lh; 8732 ldi_ident_t li; 8733 int ret; 8734 int rval; 8735 dev_t rdev; 8736 8737 ep = ss->ss_mux_nodes[i].mn_outp; 8738 if (ep == NULL) 8739 continue; 8740 ret = ldi_ident_from_major((major_t)i, &li); 8741 if (ret != 0) { 8742 continue; 8743 } 8744 rdev = ep->me_dev; 8745 ret = ldi_open_by_dev(&rdev, OTYP_CHR, FREAD|FWRITE, 8746 cr, &lh, li); 8747 if (ret != 0) { 8748 ldi_ident_release(li); 8749 continue; 8750 } 8751 8752 ret = ldi_ioctl(lh, I_PUNLINK, (intptr_t)MUXID_ALL, FKIOCTL, 8753 cr, &rval); 8754 if (ret) { 8755 (void) ldi_close(lh, FREAD|FWRITE, cr); 8756 ldi_ident_release(li); 8757 continue; 8758 } 8759 (void) ldi_close(lh, FREAD|FWRITE, cr); 8760 8761 /* Close layered handles */ 8762 ldi_ident_release(li); 8763 } 8764 crfree(cr); 8765 8766 sad_freespace(ss); 8767 8768 kmem_free(ss->ss_mux_nodes, sizeof (struct mux_node) * ss->ss_devcnt); 8769 ss->ss_mux_nodes = NULL; 8770 } 8771 8772 /* 8773 * Free the structure; str_stack_shutdown did the other cleanup work. 8774 */ 8775 /* ARGSUSED */ 8776 static void 8777 str_stack_fini(netstackid_t stackid, void *arg) 8778 { 8779 str_stack_t *ss = (str_stack_t *)arg; 8780 8781 kmem_free(ss, sizeof (*ss)); 8782 } 8783