1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 22 /* All Rights Reserved */ 23 24 25 /* 26 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 30 #pragma ident "%Z%%M% %I% %E% SMI" 31 32 #include <sys/types.h> 33 #include <sys/sysmacros.h> 34 #include <sys/param.h> 35 #include <sys/errno.h> 36 #include <sys/signal.h> 37 #include <sys/proc.h> 38 #include <sys/conf.h> 39 #include <sys/cred.h> 40 #include <sys/user.h> 41 #include <sys/vnode.h> 42 #include <sys/file.h> 43 #include <sys/session.h> 44 #include <sys/stream.h> 45 #include <sys/strsubr.h> 46 #include <sys/stropts.h> 47 #include <sys/poll.h> 48 #include <sys/systm.h> 49 #include <sys/cpuvar.h> 50 #include <sys/uio.h> 51 #include <sys/cmn_err.h> 52 #include <sys/priocntl.h> 53 #include <sys/procset.h> 54 #include <sys/vmem.h> 55 #include <sys/bitmap.h> 56 #include <sys/kmem.h> 57 #include <sys/siginfo.h> 58 #include <sys/vtrace.h> 59 #include <sys/callb.h> 60 #include <sys/debug.h> 61 #include <sys/modctl.h> 62 #include <sys/vmsystm.h> 63 #include <vm/page.h> 64 #include <sys/atomic.h> 65 #include <sys/suntpi.h> 66 #include <sys/strlog.h> 67 #include <sys/promif.h> 68 #include <sys/project.h> 69 #include <sys/vm.h> 70 #include <sys/taskq.h> 71 #include <sys/sunddi.h> 72 #include <sys/sunldi_impl.h> 73 #include <sys/strsun.h> 74 #include <sys/isa_defs.h> 75 #include <sys/multidata.h> 76 #include <sys/pattr.h> 77 #include <sys/strft.h> 78 #include <sys/fs/snode.h> 79 #include <sys/zone.h> 80 #include <sys/open.h> 81 #include <sys/sunldi.h> 82 #include <sys/sad.h> 83 #include <sys/netstack.h> 84 85 #define O_SAMESTR(q) (((q)->q_next) && \ 86 (((q)->q_flag & QREADR) == ((q)->q_next->q_flag & QREADR))) 87 88 /* 89 * WARNING: 90 * The variables and routines in this file are private, belonging 91 * to the STREAMS subsystem. These should not be used by modules 92 * or drivers. Compatibility will not be guaranteed. 93 */ 94 95 /* 96 * Id value used to distinguish between different multiplexor links. 97 */ 98 static int32_t lnk_id = 0; 99 100 #define STREAMS_LOPRI MINCLSYSPRI 101 static pri_t streams_lopri = STREAMS_LOPRI; 102 103 #define STRSTAT(x) (str_statistics.x.value.ui64++) 104 typedef struct str_stat { 105 kstat_named_t sqenables; 106 kstat_named_t stenables; 107 kstat_named_t syncqservice; 108 kstat_named_t freebs; 109 kstat_named_t qwr_outer; 110 kstat_named_t rservice; 111 kstat_named_t strwaits; 112 kstat_named_t taskqfails; 113 kstat_named_t bufcalls; 114 kstat_named_t qhelps; 115 kstat_named_t qremoved; 116 kstat_named_t sqremoved; 117 kstat_named_t bcwaits; 118 kstat_named_t sqtoomany; 119 } str_stat_t; 120 121 static str_stat_t str_statistics = { 122 { "sqenables", KSTAT_DATA_UINT64 }, 123 { "stenables", KSTAT_DATA_UINT64 }, 124 { "syncqservice", KSTAT_DATA_UINT64 }, 125 { "freebs", KSTAT_DATA_UINT64 }, 126 { "qwr_outer", KSTAT_DATA_UINT64 }, 127 { "rservice", KSTAT_DATA_UINT64 }, 128 { "strwaits", KSTAT_DATA_UINT64 }, 129 { "taskqfails", KSTAT_DATA_UINT64 }, 130 { "bufcalls", KSTAT_DATA_UINT64 }, 131 { "qhelps", KSTAT_DATA_UINT64 }, 132 { "qremoved", KSTAT_DATA_UINT64 }, 133 { "sqremoved", KSTAT_DATA_UINT64 }, 134 { "bcwaits", KSTAT_DATA_UINT64 }, 135 { "sqtoomany", KSTAT_DATA_UINT64 }, 136 }; 137 138 static kstat_t *str_kstat; 139 140 /* 141 * qrunflag was used previously to control background scheduling of queues. It 142 * is not used anymore, but kept here in case some module still wants to access 143 * it via qready() and setqsched macros. 144 */ 145 char qrunflag; /* Unused */ 146 147 /* 148 * Most of the streams scheduling is done via task queues. Task queues may fail 149 * for non-sleep dispatches, so there are two backup threads servicing failed 150 * requests for queues and syncqs. Both of these threads also service failed 151 * dispatches freebs requests. Queues are put in the list specified by `qhead' 152 * and `qtail' pointers, syncqs use `sqhead' and `sqtail' pointers and freebs 153 * requests are put into `freebs_list' which has no tail pointer. All three 154 * lists are protected by a single `service_queue' lock and use 155 * `services_to_run' condition variable for signaling background threads. Use of 156 * a single lock should not be a problem because it is only used under heavy 157 * loads when task queues start to fail and at that time it may be a good idea 158 * to throttle scheduling requests. 159 * 160 * NOTE: queues and syncqs should be scheduled by two separate threads because 161 * queue servicing may be blocked waiting for a syncq which may be also 162 * scheduled for background execution. This may create a deadlock when only one 163 * thread is used for both. 164 */ 165 166 static taskq_t *streams_taskq; /* Used for most STREAMS scheduling */ 167 168 static kmutex_t service_queue; /* protects all of servicing vars */ 169 static kcondvar_t services_to_run; /* wake up background service thread */ 170 static kcondvar_t syncqs_to_run; /* wake up background service thread */ 171 172 /* 173 * List of queues scheduled for background processing dueue to lack of resources 174 * in the task queues. Protected by service_queue lock; 175 */ 176 static struct queue *qhead; 177 static struct queue *qtail; 178 179 /* 180 * Same list for syncqs 181 */ 182 static syncq_t *sqhead; 183 static syncq_t *sqtail; 184 185 static mblk_t *freebs_list; /* list of buffers to free */ 186 187 /* 188 * Backup threads for servicing queues and syncqs 189 */ 190 kthread_t *streams_qbkgrnd_thread; 191 kthread_t *streams_sqbkgrnd_thread; 192 193 /* 194 * Bufcalls related variables. 195 */ 196 struct bclist strbcalls; /* list of waiting bufcalls */ 197 kmutex_t strbcall_lock; /* protects bufcall list (strbcalls) */ 198 kcondvar_t strbcall_cv; /* Signaling when a bufcall is added */ 199 kmutex_t bcall_monitor; /* sleep/wakeup style monitor */ 200 kcondvar_t bcall_cv; /* wait 'till executing bufcall completes */ 201 kthread_t *bc_bkgrnd_thread; /* Thread to service bufcall requests */ 202 203 kmutex_t strresources; /* protects global resources */ 204 kmutex_t muxifier; /* single-threads multiplexor creation */ 205 206 static void *str_stack_init(netstackid_t stackid, netstack_t *ns); 207 static void str_stack_shutdown(netstackid_t stackid, void *arg); 208 static void str_stack_fini(netstackid_t stackid, void *arg); 209 210 extern void time_to_wait(clock_t *, clock_t); 211 212 /* 213 * run_queues is no longer used, but is kept in case some 3-d party 214 * module/driver decides to use it. 215 */ 216 int run_queues = 0; 217 218 /* 219 * sq_max_size is the depth of the syncq (in number of messages) before 220 * qfill_syncq() starts QFULL'ing destination queues. As its primary 221 * consumer - IP is no longer D_MTPERMOD, but there may be other 222 * modules/drivers depend on this syncq flow control, we prefer to 223 * choose a large number as the default value. For potential 224 * performance gain, this value is tunable in /etc/system. 225 */ 226 int sq_max_size = 10000; 227 228 /* 229 * the number of ciputctrl structures per syncq and stream we create when 230 * needed. 231 */ 232 int n_ciputctrl; 233 int max_n_ciputctrl = 16; 234 /* 235 * if n_ciputctrl is < min_n_ciputctrl don't even create ciputctrl_cache. 236 */ 237 int min_n_ciputctrl = 2; 238 239 /* 240 * Per-driver/module syncqs 241 * ======================== 242 * 243 * For drivers/modules that use PERMOD or outer syncqs we keep a list of 244 * perdm structures, new entries being added (and new syncqs allocated) when 245 * setq() encounters a module/driver with a streamtab that it hasn't seen 246 * before. 247 * The reason for this mechanism is that some modules and drivers share a 248 * common streamtab and it is necessary for those modules and drivers to also 249 * share a common PERMOD syncq. 250 * 251 * perdm_list --> dm_str == streamtab_1 252 * dm_sq == syncq_1 253 * dm_ref 254 * dm_next --> dm_str == streamtab_2 255 * dm_sq == syncq_2 256 * dm_ref 257 * dm_next --> ... NULL 258 * 259 * The dm_ref field is incremented for each new driver/module that takes 260 * a reference to the perdm structure and hence shares the syncq. 261 * References are held in the fmodsw_impl_t structure for each STREAMS module 262 * or the dev_impl array (indexed by device major number) for each driver. 263 * 264 * perdm_list -> [dm_ref == 1] -> [dm_ref == 2] -> [dm_ref == 1] -> NULL 265 * ^ ^ ^ ^ 266 * | ______________/ | | 267 * | / | | 268 * dev_impl: ...|x|y|... module A module B 269 * 270 * When a module/driver is unloaded the reference count is decremented and, 271 * when it falls to zero, the perdm structure is removed from the list and 272 * the syncq is freed (see rele_dm()). 273 */ 274 perdm_t *perdm_list = NULL; 275 static krwlock_t perdm_rwlock; 276 cdevsw_impl_t *devimpl; 277 278 extern struct qinit strdata; 279 extern struct qinit stwdata; 280 281 static void runservice(queue_t *); 282 static void streams_bufcall_service(void); 283 static void streams_qbkgrnd_service(void); 284 static void streams_sqbkgrnd_service(void); 285 static syncq_t *new_syncq(void); 286 static void free_syncq(syncq_t *); 287 static void outer_insert(syncq_t *, syncq_t *); 288 static void outer_remove(syncq_t *, syncq_t *); 289 static void write_now(syncq_t *); 290 static void clr_qfull(queue_t *); 291 static void enable_svc(queue_t *); 292 static void runbufcalls(void); 293 static void sqenable(syncq_t *); 294 static void sqfill_events(syncq_t *, queue_t *, mblk_t *, void (*)()); 295 static void wait_q_syncq(queue_t *); 296 static void backenable_insertedq(queue_t *); 297 298 static void queue_service(queue_t *); 299 static void stream_service(stdata_t *); 300 static void syncq_service(syncq_t *); 301 static void qwriter_outer_service(syncq_t *); 302 static void mblk_free(mblk_t *); 303 #ifdef DEBUG 304 static int qprocsareon(queue_t *); 305 #endif 306 307 static void set_nfsrv_ptr(queue_t *, queue_t *, queue_t *, queue_t *); 308 static void reset_nfsrv_ptr(queue_t *, queue_t *); 309 void set_qfull(queue_t *); 310 311 static void sq_run_events(syncq_t *); 312 static int propagate_syncq(queue_t *); 313 314 static void blocksq(syncq_t *, ushort_t, int); 315 static void unblocksq(syncq_t *, ushort_t, int); 316 static int dropsq(syncq_t *, uint16_t); 317 static void emptysq(syncq_t *); 318 static sqlist_t *sqlist_alloc(struct stdata *, int); 319 static void sqlist_free(sqlist_t *); 320 static sqlist_t *sqlist_build(queue_t *, struct stdata *, boolean_t); 321 static void sqlist_insert(sqlist_t *, syncq_t *); 322 static void sqlist_insertall(sqlist_t *, queue_t *); 323 324 static void strsetuio(stdata_t *); 325 326 struct kmem_cache *stream_head_cache; 327 struct kmem_cache *queue_cache; 328 struct kmem_cache *syncq_cache; 329 struct kmem_cache *qband_cache; 330 struct kmem_cache *linkinfo_cache; 331 struct kmem_cache *ciputctrl_cache = NULL; 332 333 static linkinfo_t *linkinfo_list; 334 335 /* global esballoc throttling queue */ 336 static esb_queue_t system_esbq; 337 338 /* 339 * esballoc tunable parameters. 340 */ 341 int esbq_max_qlen = 0x16; /* throttled queue length */ 342 clock_t esbq_timeout = 0x8; /* timeout to process esb queue */ 343 344 /* 345 * routines to handle esballoc queuing. 346 */ 347 static void esballoc_process_queue(esb_queue_t *); 348 static void esballoc_enqueue_mblk(mblk_t *); 349 static void esballoc_timer(void *); 350 static void esballoc_set_timer(esb_queue_t *, clock_t); 351 static void esballoc_mblk_free(mblk_t *); 352 353 /* 354 * Qinit structure and Module_info structures 355 * for passthru read and write queues 356 */ 357 358 static void pass_wput(queue_t *, mblk_t *); 359 static queue_t *link_addpassthru(stdata_t *); 360 static void link_rempassthru(queue_t *); 361 362 struct module_info passthru_info = { 363 0, 364 "passthru", 365 0, 366 INFPSZ, 367 STRHIGH, 368 STRLOW 369 }; 370 371 struct qinit passthru_rinit = { 372 (int (*)())putnext, 373 NULL, 374 NULL, 375 NULL, 376 NULL, 377 &passthru_info, 378 NULL 379 }; 380 381 struct qinit passthru_winit = { 382 (int (*)()) pass_wput, 383 NULL, 384 NULL, 385 NULL, 386 NULL, 387 &passthru_info, 388 NULL 389 }; 390 391 /* 392 * Special form of assertion: verify that X implies Y i.e. when X is true Y 393 * should also be true. 394 */ 395 #define IMPLY(X, Y) ASSERT(!(X) || (Y)) 396 397 /* 398 * Logical equivalence. Verify that both X and Y are either TRUE or FALSE. 399 */ 400 #define EQUIV(X, Y) { IMPLY(X, Y); IMPLY(Y, X); } 401 402 /* 403 * Verify correctness of list head/tail pointers. 404 */ 405 #define LISTCHECK(head, tail, link) { \ 406 EQUIV(head, tail); \ 407 IMPLY(tail != NULL, tail->link == NULL); \ 408 } 409 410 /* 411 * Enqueue a list element `el' in the end of a list denoted by `head' and `tail' 412 * using a `link' field. 413 */ 414 #define ENQUEUE(el, head, tail, link) { \ 415 ASSERT(el->link == NULL); \ 416 LISTCHECK(head, tail, link); \ 417 if (head == NULL) \ 418 head = el; \ 419 else \ 420 tail->link = el; \ 421 tail = el; \ 422 } 423 424 /* 425 * Dequeue the first element of the list denoted by `head' and `tail' pointers 426 * using a `link' field and put result into `el'. 427 */ 428 #define DQ(el, head, tail, link) { \ 429 LISTCHECK(head, tail, link); \ 430 el = head; \ 431 if (head != NULL) { \ 432 head = head->link; \ 433 if (head == NULL) \ 434 tail = NULL; \ 435 el->link = NULL; \ 436 } \ 437 } 438 439 /* 440 * Remove `el' from the list using `chase' and `curr' pointers and return result 441 * in `succeed'. 442 */ 443 #define RMQ(el, head, tail, link, chase, curr, succeed) { \ 444 LISTCHECK(head, tail, link); \ 445 chase = NULL; \ 446 succeed = 0; \ 447 for (curr = head; (curr != el) && (curr != NULL); curr = curr->link) \ 448 chase = curr; \ 449 if (curr != NULL) { \ 450 succeed = 1; \ 451 ASSERT(curr == el); \ 452 if (chase != NULL) \ 453 chase->link = curr->link; \ 454 else \ 455 head = curr->link; \ 456 curr->link = NULL; \ 457 if (curr == tail) \ 458 tail = chase; \ 459 } \ 460 LISTCHECK(head, tail, link); \ 461 } 462 463 /* Handling of delayed messages on the inner syncq. */ 464 465 /* 466 * DEBUG versions should use function versions (to simplify tracing) and 467 * non-DEBUG kernels should use macro versions. 468 */ 469 470 /* 471 * Put a queue on the syncq list of queues. 472 * Assumes SQLOCK held. 473 */ 474 #define SQPUT_Q(sq, qp) \ 475 { \ 476 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 477 if (!(qp->q_sqflags & Q_SQQUEUED)) { \ 478 /* The queue should not be linked anywhere */ \ 479 ASSERT((qp->q_sqprev == NULL) && (qp->q_sqnext == NULL)); \ 480 /* Head and tail may only be NULL simultaneously */ \ 481 EQUIV(sq->sq_head, sq->sq_tail); \ 482 /* Queue may be only enqueyed on its syncq */ \ 483 ASSERT(sq == qp->q_syncq); \ 484 /* Check the correctness of SQ_MESSAGES flag */ \ 485 EQUIV(sq->sq_head, (sq->sq_flags & SQ_MESSAGES)); \ 486 /* Sanity check first/last elements of the list */ \ 487 IMPLY(sq->sq_head != NULL, sq->sq_head->q_sqprev == NULL);\ 488 IMPLY(sq->sq_tail != NULL, sq->sq_tail->q_sqnext == NULL);\ 489 /* \ 490 * Sanity check of priority field: empty queue should \ 491 * have zero priority \ 492 * and nqueues equal to zero. \ 493 */ \ 494 IMPLY(sq->sq_head == NULL, sq->sq_pri == 0); \ 495 /* Sanity check of sq_nqueues field */ \ 496 EQUIV(sq->sq_head, sq->sq_nqueues); \ 497 if (sq->sq_head == NULL) { \ 498 sq->sq_head = sq->sq_tail = qp; \ 499 sq->sq_flags |= SQ_MESSAGES; \ 500 } else if (qp->q_spri == 0) { \ 501 qp->q_sqprev = sq->sq_tail; \ 502 sq->sq_tail->q_sqnext = qp; \ 503 sq->sq_tail = qp; \ 504 } else { \ 505 /* \ 506 * Put this queue in priority order: higher \ 507 * priority gets closer to the head. \ 508 */ \ 509 queue_t **qpp = &sq->sq_tail; \ 510 queue_t *qnext = NULL; \ 511 \ 512 while (*qpp != NULL && qp->q_spri > (*qpp)->q_spri) { \ 513 qnext = *qpp; \ 514 qpp = &(*qpp)->q_sqprev; \ 515 } \ 516 qp->q_sqnext = qnext; \ 517 qp->q_sqprev = *qpp; \ 518 if (*qpp != NULL) { \ 519 (*qpp)->q_sqnext = qp; \ 520 } else { \ 521 sq->sq_head = qp; \ 522 sq->sq_pri = sq->sq_head->q_spri; \ 523 } \ 524 *qpp = qp; \ 525 } \ 526 qp->q_sqflags |= Q_SQQUEUED; \ 527 qp->q_sqtstamp = lbolt; \ 528 sq->sq_nqueues++; \ 529 } \ 530 } 531 532 /* 533 * Remove a queue from the syncq list 534 * Assumes SQLOCK held. 535 */ 536 #define SQRM_Q(sq, qp) \ 537 { \ 538 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 539 ASSERT(qp->q_sqflags & Q_SQQUEUED); \ 540 ASSERT(sq->sq_head != NULL && sq->sq_tail != NULL); \ 541 ASSERT((sq->sq_flags & SQ_MESSAGES) != 0); \ 542 /* Check that the queue is actually in the list */ \ 543 ASSERT(qp->q_sqnext != NULL || sq->sq_tail == qp); \ 544 ASSERT(qp->q_sqprev != NULL || sq->sq_head == qp); \ 545 ASSERT(sq->sq_nqueues != 0); \ 546 if (qp->q_sqprev == NULL) { \ 547 /* First queue on list, make head q_sqnext */ \ 548 sq->sq_head = qp->q_sqnext; \ 549 } else { \ 550 /* Make prev->next == next */ \ 551 qp->q_sqprev->q_sqnext = qp->q_sqnext; \ 552 } \ 553 if (qp->q_sqnext == NULL) { \ 554 /* Last queue on list, make tail sqprev */ \ 555 sq->sq_tail = qp->q_sqprev; \ 556 } else { \ 557 /* Make next->prev == prev */ \ 558 qp->q_sqnext->q_sqprev = qp->q_sqprev; \ 559 } \ 560 /* clear out references on this queue */ \ 561 qp->q_sqprev = qp->q_sqnext = NULL; \ 562 qp->q_sqflags &= ~Q_SQQUEUED; \ 563 /* If there is nothing queued, clear SQ_MESSAGES */ \ 564 if (sq->sq_head != NULL) { \ 565 sq->sq_pri = sq->sq_head->q_spri; \ 566 } else { \ 567 sq->sq_flags &= ~SQ_MESSAGES; \ 568 sq->sq_pri = 0; \ 569 } \ 570 sq->sq_nqueues--; \ 571 ASSERT(sq->sq_head != NULL || sq->sq_evhead != NULL || \ 572 (sq->sq_flags & SQ_QUEUED) == 0); \ 573 } 574 575 /* Hide the definition from the header file. */ 576 #ifdef SQPUT_MP 577 #undef SQPUT_MP 578 #endif 579 580 /* 581 * Put a message on the queue syncq. 582 * Assumes QLOCK held. 583 */ 584 #define SQPUT_MP(qp, mp) \ 585 { \ 586 ASSERT(MUTEX_HELD(QLOCK(qp))); \ 587 ASSERT(qp->q_sqhead == NULL || \ 588 (qp->q_sqtail != NULL && \ 589 qp->q_sqtail->b_next == NULL)); \ 590 qp->q_syncqmsgs++; \ 591 ASSERT(qp->q_syncqmsgs != 0); /* Wraparound */ \ 592 if (qp->q_sqhead == NULL) { \ 593 qp->q_sqhead = qp->q_sqtail = mp; \ 594 } else { \ 595 qp->q_sqtail->b_next = mp; \ 596 qp->q_sqtail = mp; \ 597 } \ 598 ASSERT(qp->q_syncqmsgs > 0); \ 599 set_qfull(qp); \ 600 } 601 602 #define SQ_PUTCOUNT_SETFAST_LOCKED(sq) { \ 603 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 604 if ((sq)->sq_ciputctrl != NULL) { \ 605 int i; \ 606 int nlocks = (sq)->sq_nciputctrl; \ 607 ciputctrl_t *cip = (sq)->sq_ciputctrl; \ 608 ASSERT((sq)->sq_type & SQ_CIPUT); \ 609 for (i = 0; i <= nlocks; i++) { \ 610 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \ 611 cip[i].ciputctrl_count |= SQ_FASTPUT; \ 612 } \ 613 } \ 614 } 615 616 617 #define SQ_PUTCOUNT_CLRFAST_LOCKED(sq) { \ 618 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 619 if ((sq)->sq_ciputctrl != NULL) { \ 620 int i; \ 621 int nlocks = (sq)->sq_nciputctrl; \ 622 ciputctrl_t *cip = (sq)->sq_ciputctrl; \ 623 ASSERT((sq)->sq_type & SQ_CIPUT); \ 624 for (i = 0; i <= nlocks; i++) { \ 625 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \ 626 cip[i].ciputctrl_count &= ~SQ_FASTPUT; \ 627 } \ 628 } \ 629 } 630 631 /* 632 * Run service procedures for all queues in the stream head. 633 */ 634 #define STR_SERVICE(stp, q) { \ 635 ASSERT(MUTEX_HELD(&stp->sd_qlock)); \ 636 while (stp->sd_qhead != NULL) { \ 637 DQ(q, stp->sd_qhead, stp->sd_qtail, q_link); \ 638 ASSERT(stp->sd_nqueues > 0); \ 639 stp->sd_nqueues--; \ 640 ASSERT(!(q->q_flag & QINSERVICE)); \ 641 mutex_exit(&stp->sd_qlock); \ 642 queue_service(q); \ 643 mutex_enter(&stp->sd_qlock); \ 644 } \ 645 ASSERT(stp->sd_nqueues == 0); \ 646 ASSERT((stp->sd_qhead == NULL) && (stp->sd_qtail == NULL)); \ 647 } 648 649 /* 650 * constructor/destructor routines for the stream head cache 651 */ 652 /* ARGSUSED */ 653 static int 654 stream_head_constructor(void *buf, void *cdrarg, int kmflags) 655 { 656 stdata_t *stp = buf; 657 658 mutex_init(&stp->sd_lock, NULL, MUTEX_DEFAULT, NULL); 659 mutex_init(&stp->sd_reflock, NULL, MUTEX_DEFAULT, NULL); 660 mutex_init(&stp->sd_qlock, NULL, MUTEX_DEFAULT, NULL); 661 cv_init(&stp->sd_monitor, NULL, CV_DEFAULT, NULL); 662 cv_init(&stp->sd_iocmonitor, NULL, CV_DEFAULT, NULL); 663 cv_init(&stp->sd_refmonitor, NULL, CV_DEFAULT, NULL); 664 cv_init(&stp->sd_qcv, NULL, CV_DEFAULT, NULL); 665 cv_init(&stp->sd_zcopy_wait, NULL, CV_DEFAULT, NULL); 666 stp->sd_wrq = NULL; 667 668 return (0); 669 } 670 671 /* ARGSUSED */ 672 static void 673 stream_head_destructor(void *buf, void *cdrarg) 674 { 675 stdata_t *stp = buf; 676 677 mutex_destroy(&stp->sd_lock); 678 mutex_destroy(&stp->sd_reflock); 679 mutex_destroy(&stp->sd_qlock); 680 cv_destroy(&stp->sd_monitor); 681 cv_destroy(&stp->sd_iocmonitor); 682 cv_destroy(&stp->sd_refmonitor); 683 cv_destroy(&stp->sd_qcv); 684 cv_destroy(&stp->sd_zcopy_wait); 685 } 686 687 /* 688 * constructor/destructor routines for the queue cache 689 */ 690 /* ARGSUSED */ 691 static int 692 queue_constructor(void *buf, void *cdrarg, int kmflags) 693 { 694 queinfo_t *qip = buf; 695 queue_t *qp = &qip->qu_rqueue; 696 queue_t *wqp = &qip->qu_wqueue; 697 syncq_t *sq = &qip->qu_syncq; 698 699 qp->q_first = NULL; 700 qp->q_link = NULL; 701 qp->q_count = 0; 702 qp->q_mblkcnt = 0; 703 qp->q_sqhead = NULL; 704 qp->q_sqtail = NULL; 705 qp->q_sqnext = NULL; 706 qp->q_sqprev = NULL; 707 qp->q_sqflags = 0; 708 qp->q_rwcnt = 0; 709 qp->q_spri = 0; 710 711 mutex_init(QLOCK(qp), NULL, MUTEX_DEFAULT, NULL); 712 cv_init(&qp->q_wait, NULL, CV_DEFAULT, NULL); 713 714 wqp->q_first = NULL; 715 wqp->q_link = NULL; 716 wqp->q_count = 0; 717 wqp->q_mblkcnt = 0; 718 wqp->q_sqhead = NULL; 719 wqp->q_sqtail = NULL; 720 wqp->q_sqnext = NULL; 721 wqp->q_sqprev = NULL; 722 wqp->q_sqflags = 0; 723 wqp->q_rwcnt = 0; 724 wqp->q_spri = 0; 725 726 mutex_init(QLOCK(wqp), NULL, MUTEX_DEFAULT, NULL); 727 cv_init(&wqp->q_wait, NULL, CV_DEFAULT, NULL); 728 729 sq->sq_head = NULL; 730 sq->sq_tail = NULL; 731 sq->sq_evhead = NULL; 732 sq->sq_evtail = NULL; 733 sq->sq_callbpend = NULL; 734 sq->sq_outer = NULL; 735 sq->sq_onext = NULL; 736 sq->sq_oprev = NULL; 737 sq->sq_next = NULL; 738 sq->sq_svcflags = 0; 739 sq->sq_servcount = 0; 740 sq->sq_needexcl = 0; 741 sq->sq_nqueues = 0; 742 sq->sq_pri = 0; 743 744 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL); 745 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL); 746 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL); 747 748 return (0); 749 } 750 751 /* ARGSUSED */ 752 static void 753 queue_destructor(void *buf, void *cdrarg) 754 { 755 queinfo_t *qip = buf; 756 queue_t *qp = &qip->qu_rqueue; 757 queue_t *wqp = &qip->qu_wqueue; 758 syncq_t *sq = &qip->qu_syncq; 759 760 ASSERT(qp->q_sqhead == NULL); 761 ASSERT(wqp->q_sqhead == NULL); 762 ASSERT(qp->q_sqnext == NULL); 763 ASSERT(wqp->q_sqnext == NULL); 764 ASSERT(qp->q_rwcnt == 0); 765 ASSERT(wqp->q_rwcnt == 0); 766 767 mutex_destroy(&qp->q_lock); 768 cv_destroy(&qp->q_wait); 769 770 mutex_destroy(&wqp->q_lock); 771 cv_destroy(&wqp->q_wait); 772 773 mutex_destroy(&sq->sq_lock); 774 cv_destroy(&sq->sq_wait); 775 cv_destroy(&sq->sq_exitwait); 776 } 777 778 /* 779 * constructor/destructor routines for the syncq cache 780 */ 781 /* ARGSUSED */ 782 static int 783 syncq_constructor(void *buf, void *cdrarg, int kmflags) 784 { 785 syncq_t *sq = buf; 786 787 bzero(buf, sizeof (syncq_t)); 788 789 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL); 790 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL); 791 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL); 792 793 return (0); 794 } 795 796 /* ARGSUSED */ 797 static void 798 syncq_destructor(void *buf, void *cdrarg) 799 { 800 syncq_t *sq = buf; 801 802 ASSERT(sq->sq_head == NULL); 803 ASSERT(sq->sq_tail == NULL); 804 ASSERT(sq->sq_evhead == NULL); 805 ASSERT(sq->sq_evtail == NULL); 806 ASSERT(sq->sq_callbpend == NULL); 807 ASSERT(sq->sq_callbflags == 0); 808 ASSERT(sq->sq_outer == NULL); 809 ASSERT(sq->sq_onext == NULL); 810 ASSERT(sq->sq_oprev == NULL); 811 ASSERT(sq->sq_next == NULL); 812 ASSERT(sq->sq_needexcl == 0); 813 ASSERT(sq->sq_svcflags == 0); 814 ASSERT(sq->sq_servcount == 0); 815 ASSERT(sq->sq_nqueues == 0); 816 ASSERT(sq->sq_pri == 0); 817 ASSERT(sq->sq_count == 0); 818 ASSERT(sq->sq_rmqcount == 0); 819 ASSERT(sq->sq_cancelid == 0); 820 ASSERT(sq->sq_ciputctrl == NULL); 821 ASSERT(sq->sq_nciputctrl == 0); 822 ASSERT(sq->sq_type == 0); 823 ASSERT(sq->sq_flags == 0); 824 825 mutex_destroy(&sq->sq_lock); 826 cv_destroy(&sq->sq_wait); 827 cv_destroy(&sq->sq_exitwait); 828 } 829 830 /* ARGSUSED */ 831 static int 832 ciputctrl_constructor(void *buf, void *cdrarg, int kmflags) 833 { 834 ciputctrl_t *cip = buf; 835 int i; 836 837 for (i = 0; i < n_ciputctrl; i++) { 838 cip[i].ciputctrl_count = SQ_FASTPUT; 839 mutex_init(&cip[i].ciputctrl_lock, NULL, MUTEX_DEFAULT, NULL); 840 } 841 842 return (0); 843 } 844 845 /* ARGSUSED */ 846 static void 847 ciputctrl_destructor(void *buf, void *cdrarg) 848 { 849 ciputctrl_t *cip = buf; 850 int i; 851 852 for (i = 0; i < n_ciputctrl; i++) { 853 ASSERT(cip[i].ciputctrl_count & SQ_FASTPUT); 854 mutex_destroy(&cip[i].ciputctrl_lock); 855 } 856 } 857 858 /* 859 * Init routine run from main at boot time. 860 */ 861 void 862 strinit(void) 863 { 864 int ncpus = ((boot_max_ncpus == -1) ? max_ncpus : boot_max_ncpus); 865 866 stream_head_cache = kmem_cache_create("stream_head_cache", 867 sizeof (stdata_t), 0, 868 stream_head_constructor, stream_head_destructor, NULL, 869 NULL, NULL, 0); 870 871 queue_cache = kmem_cache_create("queue_cache", sizeof (queinfo_t), 0, 872 queue_constructor, queue_destructor, NULL, NULL, NULL, 0); 873 874 syncq_cache = kmem_cache_create("syncq_cache", sizeof (syncq_t), 0, 875 syncq_constructor, syncq_destructor, NULL, NULL, NULL, 0); 876 877 qband_cache = kmem_cache_create("qband_cache", 878 sizeof (qband_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 879 880 linkinfo_cache = kmem_cache_create("linkinfo_cache", 881 sizeof (linkinfo_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 882 883 n_ciputctrl = ncpus; 884 n_ciputctrl = 1 << highbit(n_ciputctrl - 1); 885 ASSERT(n_ciputctrl >= 1); 886 n_ciputctrl = MIN(n_ciputctrl, max_n_ciputctrl); 887 if (n_ciputctrl >= min_n_ciputctrl) { 888 ciputctrl_cache = kmem_cache_create("ciputctrl_cache", 889 sizeof (ciputctrl_t) * n_ciputctrl, 890 sizeof (ciputctrl_t), ciputctrl_constructor, 891 ciputctrl_destructor, NULL, NULL, NULL, 0); 892 } 893 894 streams_taskq = system_taskq; 895 896 if (streams_taskq == NULL) 897 panic("strinit: no memory for streams taskq!"); 898 899 bc_bkgrnd_thread = thread_create(NULL, 0, 900 streams_bufcall_service, NULL, 0, &p0, TS_RUN, streams_lopri); 901 902 streams_qbkgrnd_thread = thread_create(NULL, 0, 903 streams_qbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri); 904 905 streams_sqbkgrnd_thread = thread_create(NULL, 0, 906 streams_sqbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri); 907 908 /* 909 * Create STREAMS kstats. 910 */ 911 str_kstat = kstat_create("streams", 0, "strstat", 912 "net", KSTAT_TYPE_NAMED, 913 sizeof (str_statistics) / sizeof (kstat_named_t), 914 KSTAT_FLAG_VIRTUAL); 915 916 if (str_kstat != NULL) { 917 str_kstat->ks_data = &str_statistics; 918 kstat_install(str_kstat); 919 } 920 921 /* 922 * TPI support routine initialisation. 923 */ 924 tpi_init(); 925 926 /* 927 * Handle to have autopush and persistent link information per 928 * zone. 929 * Note: uses shutdown hook instead of destroy hook so that the 930 * persistent links can be torn down before the destroy hooks 931 * in the TCP/IP stack are called. 932 */ 933 netstack_register(NS_STR, str_stack_init, str_stack_shutdown, 934 str_stack_fini); 935 } 936 937 void 938 str_sendsig(vnode_t *vp, int event, uchar_t band, int error) 939 { 940 struct stdata *stp; 941 942 ASSERT(vp->v_stream); 943 stp = vp->v_stream; 944 /* Have to hold sd_lock to prevent siglist from changing */ 945 mutex_enter(&stp->sd_lock); 946 if (stp->sd_sigflags & event) 947 strsendsig(stp->sd_siglist, event, band, error); 948 mutex_exit(&stp->sd_lock); 949 } 950 951 /* 952 * Send the "sevent" set of signals to a process. 953 * This might send more than one signal if the process is registered 954 * for multiple events. The caller should pass in an sevent that only 955 * includes the events for which the process has registered. 956 */ 957 static void 958 dosendsig(proc_t *proc, int events, int sevent, k_siginfo_t *info, 959 uchar_t band, int error) 960 { 961 ASSERT(MUTEX_HELD(&proc->p_lock)); 962 963 info->si_band = 0; 964 info->si_errno = 0; 965 966 if (sevent & S_ERROR) { 967 sevent &= ~S_ERROR; 968 info->si_code = POLL_ERR; 969 info->si_errno = error; 970 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 971 "strsendsig:proc %p info %p", proc, info); 972 sigaddq(proc, NULL, info, KM_NOSLEEP); 973 info->si_errno = 0; 974 } 975 if (sevent & S_HANGUP) { 976 sevent &= ~S_HANGUP; 977 info->si_code = POLL_HUP; 978 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 979 "strsendsig:proc %p info %p", proc, info); 980 sigaddq(proc, NULL, info, KM_NOSLEEP); 981 } 982 if (sevent & S_HIPRI) { 983 sevent &= ~S_HIPRI; 984 info->si_code = POLL_PRI; 985 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 986 "strsendsig:proc %p info %p", proc, info); 987 sigaddq(proc, NULL, info, KM_NOSLEEP); 988 } 989 if (sevent & S_RDBAND) { 990 sevent &= ~S_RDBAND; 991 if (events & S_BANDURG) 992 sigtoproc(proc, NULL, SIGURG); 993 else 994 sigtoproc(proc, NULL, SIGPOLL); 995 } 996 if (sevent & S_WRBAND) { 997 sevent &= ~S_WRBAND; 998 sigtoproc(proc, NULL, SIGPOLL); 999 } 1000 if (sevent & S_INPUT) { 1001 sevent &= ~S_INPUT; 1002 info->si_code = POLL_IN; 1003 info->si_band = band; 1004 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 1005 "strsendsig:proc %p info %p", proc, info); 1006 sigaddq(proc, NULL, info, KM_NOSLEEP); 1007 info->si_band = 0; 1008 } 1009 if (sevent & S_OUTPUT) { 1010 sevent &= ~S_OUTPUT; 1011 info->si_code = POLL_OUT; 1012 info->si_band = band; 1013 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 1014 "strsendsig:proc %p info %p", proc, info); 1015 sigaddq(proc, NULL, info, KM_NOSLEEP); 1016 info->si_band = 0; 1017 } 1018 if (sevent & S_MSG) { 1019 sevent &= ~S_MSG; 1020 info->si_code = POLL_MSG; 1021 info->si_band = band; 1022 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 1023 "strsendsig:proc %p info %p", proc, info); 1024 sigaddq(proc, NULL, info, KM_NOSLEEP); 1025 info->si_band = 0; 1026 } 1027 if (sevent & S_RDNORM) { 1028 sevent &= ~S_RDNORM; 1029 sigtoproc(proc, NULL, SIGPOLL); 1030 } 1031 if (sevent != 0) { 1032 panic("strsendsig: unknown event(s) %x", sevent); 1033 } 1034 } 1035 1036 /* 1037 * Send SIGPOLL/SIGURG signal to all processes and process groups 1038 * registered on the given signal list that want a signal for at 1039 * least one of the specified events. 1040 * 1041 * Must be called with exclusive access to siglist (caller holding sd_lock). 1042 * 1043 * strioctl(I_SETSIG/I_ESETSIG) will only change siglist when holding 1044 * sd_lock and the ioctl code maintains a PID_HOLD on the pid structure 1045 * while it is in the siglist. 1046 * 1047 * For performance reasons (MP scalability) the code drops pidlock 1048 * when sending signals to a single process. 1049 * When sending to a process group the code holds 1050 * pidlock to prevent the membership in the process group from changing 1051 * while walking the p_pglink list. 1052 */ 1053 void 1054 strsendsig(strsig_t *siglist, int event, uchar_t band, int error) 1055 { 1056 strsig_t *ssp; 1057 k_siginfo_t info; 1058 struct pid *pidp; 1059 proc_t *proc; 1060 1061 info.si_signo = SIGPOLL; 1062 info.si_errno = 0; 1063 for (ssp = siglist; ssp; ssp = ssp->ss_next) { 1064 int sevent; 1065 1066 sevent = ssp->ss_events & event; 1067 if (sevent == 0) 1068 continue; 1069 1070 if ((pidp = ssp->ss_pidp) == NULL) { 1071 /* pid was released but still on event list */ 1072 continue; 1073 } 1074 1075 1076 if (ssp->ss_pid > 0) { 1077 /* 1078 * XXX This unfortunately still generates 1079 * a signal when a fd is closed but 1080 * the proc is active. 1081 */ 1082 ASSERT(ssp->ss_pid == pidp->pid_id); 1083 1084 mutex_enter(&pidlock); 1085 proc = prfind_zone(pidp->pid_id, ALL_ZONES); 1086 if (proc == NULL) { 1087 mutex_exit(&pidlock); 1088 continue; 1089 } 1090 mutex_enter(&proc->p_lock); 1091 mutex_exit(&pidlock); 1092 dosendsig(proc, ssp->ss_events, sevent, &info, 1093 band, error); 1094 mutex_exit(&proc->p_lock); 1095 } else { 1096 /* 1097 * Send to process group. Hold pidlock across 1098 * calls to dosendsig(). 1099 */ 1100 pid_t pgrp = -ssp->ss_pid; 1101 1102 mutex_enter(&pidlock); 1103 proc = pgfind_zone(pgrp, ALL_ZONES); 1104 while (proc != NULL) { 1105 mutex_enter(&proc->p_lock); 1106 dosendsig(proc, ssp->ss_events, sevent, 1107 &info, band, error); 1108 mutex_exit(&proc->p_lock); 1109 proc = proc->p_pglink; 1110 } 1111 mutex_exit(&pidlock); 1112 } 1113 } 1114 } 1115 1116 /* 1117 * Attach a stream device or module. 1118 * qp is a read queue; the new queue goes in so its next 1119 * read ptr is the argument, and the write queue corresponding 1120 * to the argument points to this queue. Return 0 on success, 1121 * or a non-zero errno on failure. 1122 */ 1123 int 1124 qattach(queue_t *qp, dev_t *devp, int oflag, cred_t *crp, fmodsw_impl_t *fp, 1125 boolean_t is_insert) 1126 { 1127 major_t major; 1128 cdevsw_impl_t *dp; 1129 struct streamtab *str; 1130 queue_t *rq; 1131 queue_t *wrq; 1132 uint32_t qflag; 1133 uint32_t sqtype; 1134 perdm_t *dmp; 1135 int error; 1136 int sflag; 1137 1138 rq = allocq(); 1139 wrq = _WR(rq); 1140 STREAM(rq) = STREAM(wrq) = STREAM(qp); 1141 1142 if (fp != NULL) { 1143 str = fp->f_str; 1144 qflag = fp->f_qflag; 1145 sqtype = fp->f_sqtype; 1146 dmp = fp->f_dmp; 1147 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL); 1148 sflag = MODOPEN; 1149 1150 /* 1151 * stash away a pointer to the module structure so we can 1152 * unref it in qdetach. 1153 */ 1154 rq->q_fp = fp; 1155 } else { 1156 ASSERT(!is_insert); 1157 1158 major = getmajor(*devp); 1159 dp = &devimpl[major]; 1160 1161 str = dp->d_str; 1162 ASSERT(str == STREAMSTAB(major)); 1163 1164 qflag = dp->d_qflag; 1165 ASSERT(qflag & QISDRV); 1166 sqtype = dp->d_sqtype; 1167 1168 /* create perdm_t if needed */ 1169 if (NEED_DM(dp->d_dmp, qflag)) 1170 dp->d_dmp = hold_dm(str, qflag, sqtype); 1171 1172 dmp = dp->d_dmp; 1173 sflag = 0; 1174 } 1175 1176 TRACE_2(TR_FAC_STREAMS_FR, TR_QATTACH_FLAGS, 1177 "qattach:qflag == %X(%X)", qflag, *devp); 1178 1179 /* setq might sleep in allocator - avoid holding locks. */ 1180 setq(rq, str->st_rdinit, str->st_wrinit, dmp, qflag, sqtype, B_FALSE); 1181 1182 /* 1183 * Before calling the module's open routine, set up the q_next 1184 * pointer for inserting a module in the middle of a stream. 1185 * 1186 * Note that we can always set _QINSERTING and set up q_next 1187 * pointer for both inserting and pushing a module. Then there 1188 * is no need for the is_insert parameter. In insertq(), called 1189 * by qprocson(), assume that q_next of the new module always points 1190 * to the correct queue and use it for insertion. Everything should 1191 * work out fine. But in the first release of _I_INSERT, we 1192 * distinguish between inserting and pushing to make sure that 1193 * pushing a module follows the same code path as before. 1194 */ 1195 if (is_insert) { 1196 rq->q_flag |= _QINSERTING; 1197 rq->q_next = qp; 1198 } 1199 1200 /* 1201 * If there is an outer perimeter get exclusive access during 1202 * the open procedure. Bump up the reference count on the queue. 1203 */ 1204 entersq(rq->q_syncq, SQ_OPENCLOSE); 1205 error = (*rq->q_qinfo->qi_qopen)(rq, devp, oflag, sflag, crp); 1206 if (error != 0) 1207 goto failed; 1208 leavesq(rq->q_syncq, SQ_OPENCLOSE); 1209 ASSERT(qprocsareon(rq)); 1210 return (0); 1211 1212 failed: 1213 rq->q_flag &= ~_QINSERTING; 1214 if (backq(wrq) != NULL && backq(wrq)->q_next == wrq) 1215 qprocsoff(rq); 1216 leavesq(rq->q_syncq, SQ_OPENCLOSE); 1217 rq->q_next = wrq->q_next = NULL; 1218 qdetach(rq, 0, 0, crp, B_FALSE); 1219 return (error); 1220 } 1221 1222 /* 1223 * Handle second open of stream. For modules, set the 1224 * last argument to MODOPEN and do not pass any open flags. 1225 * Ignore dummydev since this is not the first open. 1226 */ 1227 int 1228 qreopen(queue_t *qp, dev_t *devp, int flag, cred_t *crp) 1229 { 1230 int error; 1231 dev_t dummydev; 1232 queue_t *wqp = _WR(qp); 1233 1234 ASSERT(qp->q_flag & QREADR); 1235 entersq(qp->q_syncq, SQ_OPENCLOSE); 1236 1237 dummydev = *devp; 1238 if (error = ((*qp->q_qinfo->qi_qopen)(qp, &dummydev, 1239 (wqp->q_next ? 0 : flag), (wqp->q_next ? MODOPEN : 0), crp))) { 1240 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1241 mutex_enter(&STREAM(qp)->sd_lock); 1242 qp->q_stream->sd_flag |= STREOPENFAIL; 1243 mutex_exit(&STREAM(qp)->sd_lock); 1244 return (error); 1245 } 1246 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1247 1248 /* 1249 * successful open should have done qprocson() 1250 */ 1251 ASSERT(qprocsareon(_RD(qp))); 1252 return (0); 1253 } 1254 1255 /* 1256 * Detach a stream module or device. 1257 * If clmode == 1 then the module or driver was opened and its 1258 * close routine must be called. If clmode == 0, the module 1259 * or driver was never opened or the open failed, and so its close 1260 * should not be called. 1261 */ 1262 void 1263 qdetach(queue_t *qp, int clmode, int flag, cred_t *crp, boolean_t is_remove) 1264 { 1265 queue_t *wqp = _WR(qp); 1266 ASSERT(STREAM(qp)->sd_flag & (STRCLOSE|STWOPEN|STRPLUMB)); 1267 1268 if (STREAM_NEEDSERVICE(STREAM(qp))) 1269 stream_runservice(STREAM(qp)); 1270 1271 if (clmode) { 1272 /* 1273 * Make sure that all the messages on the write side syncq are 1274 * processed and nothing is left. Since we are closing, no new 1275 * messages may appear there. 1276 */ 1277 wait_q_syncq(wqp); 1278 1279 entersq(qp->q_syncq, SQ_OPENCLOSE); 1280 if (is_remove) { 1281 mutex_enter(QLOCK(qp)); 1282 qp->q_flag |= _QREMOVING; 1283 mutex_exit(QLOCK(qp)); 1284 } 1285 (*qp->q_qinfo->qi_qclose)(qp, flag, crp); 1286 /* 1287 * Check that qprocsoff() was actually called. 1288 */ 1289 ASSERT((qp->q_flag & QWCLOSE) && (wqp->q_flag & QWCLOSE)); 1290 1291 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1292 } else { 1293 disable_svc(qp); 1294 } 1295 1296 /* 1297 * Allow any threads blocked in entersq to proceed and discover 1298 * the QWCLOSE is set. 1299 * Note: This assumes that all users of entersq check QWCLOSE. 1300 * Currently runservice is the only entersq that can happen 1301 * after removeq has finished. 1302 * Removeq will have discarded all messages destined to the closing 1303 * pair of queues from the syncq. 1304 * NOTE: Calling a function inside an assert is unconventional. 1305 * However, it does not cause any problem since flush_syncq() does 1306 * not change any state except when it returns non-zero i.e. 1307 * when the assert will trigger. 1308 */ 1309 ASSERT(flush_syncq(qp->q_syncq, qp) == 0); 1310 ASSERT(flush_syncq(wqp->q_syncq, wqp) == 0); 1311 ASSERT((qp->q_flag & QPERMOD) || 1312 ((qp->q_syncq->sq_head == NULL) && 1313 (wqp->q_syncq->sq_head == NULL))); 1314 1315 /* release any fmodsw_impl_t structure held on behalf of the queue */ 1316 ASSERT(qp->q_fp != NULL || qp->q_flag & QISDRV); 1317 if (qp->q_fp != NULL) 1318 fmodsw_rele(qp->q_fp); 1319 1320 /* freeq removes us from the outer perimeter if any */ 1321 freeq(qp); 1322 } 1323 1324 /* Prevent service procedures from being called */ 1325 void 1326 disable_svc(queue_t *qp) 1327 { 1328 queue_t *wqp = _WR(qp); 1329 1330 ASSERT(qp->q_flag & QREADR); 1331 mutex_enter(QLOCK(qp)); 1332 qp->q_flag |= QWCLOSE; 1333 mutex_exit(QLOCK(qp)); 1334 mutex_enter(QLOCK(wqp)); 1335 wqp->q_flag |= QWCLOSE; 1336 mutex_exit(QLOCK(wqp)); 1337 } 1338 1339 /* allow service procedures to be called again */ 1340 void 1341 enable_svc(queue_t *qp) 1342 { 1343 queue_t *wqp = _WR(qp); 1344 1345 ASSERT(qp->q_flag & QREADR); 1346 mutex_enter(QLOCK(qp)); 1347 qp->q_flag &= ~QWCLOSE; 1348 mutex_exit(QLOCK(qp)); 1349 mutex_enter(QLOCK(wqp)); 1350 wqp->q_flag &= ~QWCLOSE; 1351 mutex_exit(QLOCK(wqp)); 1352 } 1353 1354 /* 1355 * Remove queue from qhead/qtail if it is enabled. 1356 * Only reset QENAB if the queue was removed from the runlist. 1357 * A queue goes through 3 stages: 1358 * It is on the service list and QENAB is set. 1359 * It is removed from the service list but QENAB is still set. 1360 * QENAB gets changed to QINSERVICE. 1361 * QINSERVICE is reset (when the service procedure is done) 1362 * Thus we can not reset QENAB unless we actually removed it from the service 1363 * queue. 1364 */ 1365 void 1366 remove_runlist(queue_t *qp) 1367 { 1368 if (qp->q_flag & QENAB && qhead != NULL) { 1369 queue_t *q_chase; 1370 queue_t *q_curr; 1371 int removed; 1372 1373 mutex_enter(&service_queue); 1374 RMQ(qp, qhead, qtail, q_link, q_chase, q_curr, removed); 1375 mutex_exit(&service_queue); 1376 if (removed) { 1377 STRSTAT(qremoved); 1378 qp->q_flag &= ~QENAB; 1379 } 1380 } 1381 } 1382 1383 1384 /* 1385 * wait for any pending service processing to complete. 1386 * The removal of queues from the runlist is not atomic with the 1387 * clearing of the QENABLED flag and setting the INSERVICE flag. 1388 * consequently it is possible for remove_runlist in strclose 1389 * to not find the queue on the runlist but for it to be QENABLED 1390 * and not yet INSERVICE -> hence wait_svc needs to check QENABLED 1391 * as well as INSERVICE. 1392 */ 1393 void 1394 wait_svc(queue_t *qp) 1395 { 1396 queue_t *wqp = _WR(qp); 1397 1398 ASSERT(qp->q_flag & QREADR); 1399 1400 /* 1401 * Try to remove queues from qhead/qtail list. 1402 */ 1403 if (qhead != NULL) { 1404 remove_runlist(qp); 1405 remove_runlist(wqp); 1406 } 1407 /* 1408 * Wait till the syncqs associated with the queue 1409 * will dissapear from background processing list. 1410 * This only needs to be done for non-PERMOD perimeters since 1411 * for PERMOD perimeters the syncq may be shared and will only be freed 1412 * when the last module/driver is unloaded. 1413 * If for PERMOD perimeters queue was on the syncq list, removeq() 1414 * should call propagate_syncq() or drain_syncq() for it. Both of these 1415 * function remove the queue from its syncq list, so sqthread will not 1416 * try to access the queue. 1417 */ 1418 if (!(qp->q_flag & QPERMOD)) { 1419 syncq_t *rsq = qp->q_syncq; 1420 syncq_t *wsq = wqp->q_syncq; 1421 1422 /* 1423 * Disable rsq and wsq and wait for any background processing of 1424 * syncq to complete. 1425 */ 1426 wait_sq_svc(rsq); 1427 if (wsq != rsq) 1428 wait_sq_svc(wsq); 1429 } 1430 1431 mutex_enter(QLOCK(qp)); 1432 while (qp->q_flag & (QINSERVICE|QENAB)) 1433 cv_wait(&qp->q_wait, QLOCK(qp)); 1434 mutex_exit(QLOCK(qp)); 1435 mutex_enter(QLOCK(wqp)); 1436 while (wqp->q_flag & (QINSERVICE|QENAB)) 1437 cv_wait(&wqp->q_wait, QLOCK(wqp)); 1438 mutex_exit(QLOCK(wqp)); 1439 } 1440 1441 /* 1442 * Put ioctl data from userland buffer `arg' into the mblk chain `bp'. 1443 * `flag' must always contain either K_TO_K or U_TO_K; STR_NOSIG may 1444 * also be set, and is passed through to allocb_cred_wait(). 1445 * 1446 * Returns errno on failure, zero on success. 1447 */ 1448 int 1449 putiocd(mblk_t *bp, char *arg, int flag, cred_t *cr) 1450 { 1451 mblk_t *tmp; 1452 ssize_t count; 1453 int error = 0; 1454 1455 ASSERT((flag & (U_TO_K | K_TO_K)) == U_TO_K || 1456 (flag & (U_TO_K | K_TO_K)) == K_TO_K); 1457 1458 if (bp->b_datap->db_type == M_IOCTL) { 1459 count = ((struct iocblk *)bp->b_rptr)->ioc_count; 1460 } else { 1461 ASSERT(bp->b_datap->db_type == M_COPYIN); 1462 count = ((struct copyreq *)bp->b_rptr)->cq_size; 1463 } 1464 /* 1465 * strdoioctl validates ioc_count, so if this assert fails it 1466 * cannot be due to user error. 1467 */ 1468 ASSERT(count >= 0); 1469 1470 if ((tmp = allocb_cred_wait(count, (flag & STR_NOSIG), &error, cr)) == 1471 NULL) { 1472 return (error); 1473 } 1474 error = strcopyin(arg, tmp->b_wptr, count, flag & (U_TO_K|K_TO_K)); 1475 if (error != 0) { 1476 freeb(tmp); 1477 return (error); 1478 } 1479 DB_CPID(tmp) = curproc->p_pid; 1480 tmp->b_wptr += count; 1481 bp->b_cont = tmp; 1482 1483 return (0); 1484 } 1485 1486 /* 1487 * Copy ioctl data to user-land. Return non-zero errno on failure, 1488 * 0 for success. 1489 */ 1490 int 1491 getiocd(mblk_t *bp, char *arg, int copymode) 1492 { 1493 ssize_t count; 1494 size_t n; 1495 int error; 1496 1497 if (bp->b_datap->db_type == M_IOCACK) 1498 count = ((struct iocblk *)bp->b_rptr)->ioc_count; 1499 else { 1500 ASSERT(bp->b_datap->db_type == M_COPYOUT); 1501 count = ((struct copyreq *)bp->b_rptr)->cq_size; 1502 } 1503 ASSERT(count >= 0); 1504 1505 for (bp = bp->b_cont; bp && count; 1506 count -= n, bp = bp->b_cont, arg += n) { 1507 n = MIN(count, bp->b_wptr - bp->b_rptr); 1508 error = strcopyout(bp->b_rptr, arg, n, copymode); 1509 if (error) 1510 return (error); 1511 } 1512 ASSERT(count == 0); 1513 return (0); 1514 } 1515 1516 /* 1517 * Allocate a linkinfo entry given the write queue of the 1518 * bottom module of the top stream and the write queue of the 1519 * stream head of the bottom stream. 1520 */ 1521 linkinfo_t * 1522 alloclink(queue_t *qup, queue_t *qdown, file_t *fpdown) 1523 { 1524 linkinfo_t *linkp; 1525 1526 linkp = kmem_cache_alloc(linkinfo_cache, KM_SLEEP); 1527 1528 linkp->li_lblk.l_qtop = qup; 1529 linkp->li_lblk.l_qbot = qdown; 1530 linkp->li_fpdown = fpdown; 1531 1532 mutex_enter(&strresources); 1533 linkp->li_next = linkinfo_list; 1534 linkp->li_prev = NULL; 1535 if (linkp->li_next) 1536 linkp->li_next->li_prev = linkp; 1537 linkinfo_list = linkp; 1538 linkp->li_lblk.l_index = ++lnk_id; 1539 ASSERT(lnk_id != 0); /* this should never wrap in practice */ 1540 mutex_exit(&strresources); 1541 1542 return (linkp); 1543 } 1544 1545 /* 1546 * Free a linkinfo entry. 1547 */ 1548 void 1549 lbfree(linkinfo_t *linkp) 1550 { 1551 mutex_enter(&strresources); 1552 if (linkp->li_next) 1553 linkp->li_next->li_prev = linkp->li_prev; 1554 if (linkp->li_prev) 1555 linkp->li_prev->li_next = linkp->li_next; 1556 else 1557 linkinfo_list = linkp->li_next; 1558 mutex_exit(&strresources); 1559 1560 kmem_cache_free(linkinfo_cache, linkp); 1561 } 1562 1563 /* 1564 * Check for a potential linking cycle. 1565 * Return 1 if a link will result in a cycle, 1566 * and 0 otherwise. 1567 */ 1568 int 1569 linkcycle(stdata_t *upstp, stdata_t *lostp, str_stack_t *ss) 1570 { 1571 struct mux_node *np; 1572 struct mux_edge *ep; 1573 int i; 1574 major_t lomaj; 1575 major_t upmaj; 1576 /* 1577 * if the lower stream is a pipe/FIFO, return, since link 1578 * cycles can not happen on pipes/FIFOs 1579 */ 1580 if (lostp->sd_vnode->v_type == VFIFO) 1581 return (0); 1582 1583 for (i = 0; i < ss->ss_devcnt; i++) { 1584 np = &ss->ss_mux_nodes[i]; 1585 MUX_CLEAR(np); 1586 } 1587 lomaj = getmajor(lostp->sd_vnode->v_rdev); 1588 upmaj = getmajor(upstp->sd_vnode->v_rdev); 1589 np = &ss->ss_mux_nodes[lomaj]; 1590 for (;;) { 1591 if (!MUX_DIDVISIT(np)) { 1592 if (np->mn_imaj == upmaj) 1593 return (1); 1594 if (np->mn_outp == NULL) { 1595 MUX_VISIT(np); 1596 if (np->mn_originp == NULL) 1597 return (0); 1598 np = np->mn_originp; 1599 continue; 1600 } 1601 MUX_VISIT(np); 1602 np->mn_startp = np->mn_outp; 1603 } else { 1604 if (np->mn_startp == NULL) { 1605 if (np->mn_originp == NULL) 1606 return (0); 1607 else { 1608 np = np->mn_originp; 1609 continue; 1610 } 1611 } 1612 /* 1613 * If ep->me_nodep is a FIFO (me_nodep == NULL), 1614 * ignore the edge and move on. ep->me_nodep gets 1615 * set to NULL in mux_addedge() if it is a FIFO. 1616 * 1617 */ 1618 ep = np->mn_startp; 1619 np->mn_startp = ep->me_nextp; 1620 if (ep->me_nodep == NULL) 1621 continue; 1622 ep->me_nodep->mn_originp = np; 1623 np = ep->me_nodep; 1624 } 1625 } 1626 } 1627 1628 /* 1629 * Find linkinfo entry corresponding to the parameters. 1630 */ 1631 linkinfo_t * 1632 findlinks(stdata_t *stp, int index, int type, str_stack_t *ss) 1633 { 1634 linkinfo_t *linkp; 1635 struct mux_edge *mep; 1636 struct mux_node *mnp; 1637 queue_t *qup; 1638 1639 mutex_enter(&strresources); 1640 if ((type & LINKTYPEMASK) == LINKNORMAL) { 1641 qup = getendq(stp->sd_wrq); 1642 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) { 1643 if ((qup == linkp->li_lblk.l_qtop) && 1644 (!index || (index == linkp->li_lblk.l_index))) { 1645 mutex_exit(&strresources); 1646 return (linkp); 1647 } 1648 } 1649 } else { 1650 ASSERT((type & LINKTYPEMASK) == LINKPERSIST); 1651 mnp = &ss->ss_mux_nodes[getmajor(stp->sd_vnode->v_rdev)]; 1652 mep = mnp->mn_outp; 1653 while (mep) { 1654 if ((index == 0) || (index == mep->me_muxid)) 1655 break; 1656 mep = mep->me_nextp; 1657 } 1658 if (!mep) { 1659 mutex_exit(&strresources); 1660 return (NULL); 1661 } 1662 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) { 1663 if ((!linkp->li_lblk.l_qtop) && 1664 (mep->me_muxid == linkp->li_lblk.l_index)) { 1665 mutex_exit(&strresources); 1666 return (linkp); 1667 } 1668 } 1669 } 1670 mutex_exit(&strresources); 1671 return (NULL); 1672 } 1673 1674 /* 1675 * Given a queue ptr, follow the chain of q_next pointers until you reach the 1676 * last queue on the chain and return it. 1677 */ 1678 queue_t * 1679 getendq(queue_t *q) 1680 { 1681 ASSERT(q != NULL); 1682 while (_SAMESTR(q)) 1683 q = q->q_next; 1684 return (q); 1685 } 1686 1687 /* 1688 * wait for the syncq count to drop to zero. 1689 * sq could be either outer or inner. 1690 */ 1691 1692 static void 1693 wait_syncq(syncq_t *sq) 1694 { 1695 uint16_t count; 1696 1697 mutex_enter(SQLOCK(sq)); 1698 count = sq->sq_count; 1699 SQ_PUTLOCKS_ENTER(sq); 1700 SUM_SQ_PUTCOUNTS(sq, count); 1701 while (count != 0) { 1702 sq->sq_flags |= SQ_WANTWAKEUP; 1703 SQ_PUTLOCKS_EXIT(sq); 1704 cv_wait(&sq->sq_wait, SQLOCK(sq)); 1705 count = sq->sq_count; 1706 SQ_PUTLOCKS_ENTER(sq); 1707 SUM_SQ_PUTCOUNTS(sq, count); 1708 } 1709 SQ_PUTLOCKS_EXIT(sq); 1710 mutex_exit(SQLOCK(sq)); 1711 } 1712 1713 /* 1714 * Wait while there are any messages for the queue in its syncq. 1715 */ 1716 static void 1717 wait_q_syncq(queue_t *q) 1718 { 1719 if ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) { 1720 syncq_t *sq = q->q_syncq; 1721 1722 mutex_enter(SQLOCK(sq)); 1723 while ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) { 1724 sq->sq_flags |= SQ_WANTWAKEUP; 1725 cv_wait(&sq->sq_wait, SQLOCK(sq)); 1726 } 1727 mutex_exit(SQLOCK(sq)); 1728 } 1729 } 1730 1731 1732 int 1733 mlink_file(vnode_t *vp, int cmd, struct file *fpdown, cred_t *crp, int *rvalp, 1734 int lhlink) 1735 { 1736 struct stdata *stp; 1737 struct strioctl strioc; 1738 struct linkinfo *linkp; 1739 struct stdata *stpdown; 1740 struct streamtab *str; 1741 queue_t *passq; 1742 syncq_t *passyncq; 1743 queue_t *rq; 1744 cdevsw_impl_t *dp; 1745 uint32_t qflag; 1746 uint32_t sqtype; 1747 perdm_t *dmp; 1748 int error = 0; 1749 netstack_t *ns; 1750 str_stack_t *ss; 1751 1752 stp = vp->v_stream; 1753 TRACE_1(TR_FAC_STREAMS_FR, 1754 TR_I_LINK, "I_LINK/I_PLINK:stp %p", stp); 1755 /* 1756 * Test for invalid upper stream 1757 */ 1758 if (stp->sd_flag & STRHUP) { 1759 return (ENXIO); 1760 } 1761 if (vp->v_type == VFIFO) { 1762 return (EINVAL); 1763 } 1764 if (stp->sd_strtab == NULL) { 1765 return (EINVAL); 1766 } 1767 if (!stp->sd_strtab->st_muxwinit) { 1768 return (EINVAL); 1769 } 1770 if (fpdown == NULL) { 1771 return (EBADF); 1772 } 1773 ns = netstack_find_by_cred(crp); 1774 ASSERT(ns != NULL); 1775 ss = ns->netstack_str; 1776 ASSERT(ss != NULL); 1777 1778 if (getmajor(stp->sd_vnode->v_rdev) >= ss->ss_devcnt) { 1779 netstack_rele(ss->ss_netstack); 1780 return (EINVAL); 1781 } 1782 mutex_enter(&muxifier); 1783 if (stp->sd_flag & STPLEX) { 1784 mutex_exit(&muxifier); 1785 netstack_rele(ss->ss_netstack); 1786 return (ENXIO); 1787 } 1788 1789 /* 1790 * Test for invalid lower stream. 1791 * The check for the v_type != VFIFO and having a major 1792 * number not >= devcnt is done to avoid problems with 1793 * adding mux_node entry past the end of mux_nodes[]. 1794 * For FIFO's we don't add an entry so this isn't a 1795 * problem. 1796 */ 1797 if (((stpdown = fpdown->f_vnode->v_stream) == NULL) || 1798 (stpdown == stp) || (stpdown->sd_flag & 1799 (STPLEX|STRHUP|STRDERR|STWRERR|IOCWAIT|STRPLUMB)) || 1800 ((stpdown->sd_vnode->v_type != VFIFO) && 1801 (getmajor(stpdown->sd_vnode->v_rdev) >= ss->ss_devcnt)) || 1802 linkcycle(stp, stpdown, ss)) { 1803 mutex_exit(&muxifier); 1804 netstack_rele(ss->ss_netstack); 1805 return (EINVAL); 1806 } 1807 TRACE_1(TR_FAC_STREAMS_FR, 1808 TR_STPDOWN, "stpdown:%p", stpdown); 1809 rq = getendq(stp->sd_wrq); 1810 if (cmd == I_PLINK) 1811 rq = NULL; 1812 1813 linkp = alloclink(rq, stpdown->sd_wrq, fpdown); 1814 1815 strioc.ic_cmd = cmd; 1816 strioc.ic_timout = INFTIM; 1817 strioc.ic_len = sizeof (struct linkblk); 1818 strioc.ic_dp = (char *)&linkp->li_lblk; 1819 1820 /* 1821 * STRPLUMB protects plumbing changes and should be set before 1822 * link_addpassthru()/link_rempassthru() are called, so it is set here 1823 * and cleared in the end of mlink when passthru queue is removed. 1824 * Setting of STRPLUMB prevents reopens of the stream while passthru 1825 * queue is in-place (it is not a proper module and doesn't have open 1826 * entry point). 1827 * 1828 * STPLEX prevents any threads from entering the stream from above. It 1829 * can't be set before the call to link_addpassthru() because putnext 1830 * from below may cause stream head I/O routines to be called and these 1831 * routines assert that STPLEX is not set. After link_addpassthru() 1832 * nothing may come from below since the pass queue syncq is blocked. 1833 * Note also that STPLEX should be cleared before the call to 1834 * link_remmpassthru() since when messages start flowing to the stream 1835 * head (e.g. because of message propagation from the pass queue) stream 1836 * head I/O routines may be called with STPLEX flag set. 1837 * 1838 * When STPLEX is set, nothing may come into the stream from above and 1839 * it is safe to do a setq which will change stream head. So, the 1840 * correct sequence of actions is: 1841 * 1842 * 1) Set STRPLUMB 1843 * 2) Call link_addpassthru() 1844 * 3) Set STPLEX 1845 * 4) Call setq and update the stream state 1846 * 5) Clear STPLEX 1847 * 6) Call link_rempassthru() 1848 * 7) Clear STRPLUMB 1849 * 1850 * The same sequence applies to munlink() code. 1851 */ 1852 mutex_enter(&stpdown->sd_lock); 1853 stpdown->sd_flag |= STRPLUMB; 1854 mutex_exit(&stpdown->sd_lock); 1855 /* 1856 * Add passthru queue below lower mux. This will block 1857 * syncqs of lower muxs read queue during I_LINK/I_UNLINK. 1858 */ 1859 passq = link_addpassthru(stpdown); 1860 1861 mutex_enter(&stpdown->sd_lock); 1862 stpdown->sd_flag |= STPLEX; 1863 mutex_exit(&stpdown->sd_lock); 1864 1865 rq = _RD(stpdown->sd_wrq); 1866 /* 1867 * There may be messages in the streamhead's syncq due to messages 1868 * that arrived before link_addpassthru() was done. To avoid 1869 * background processing of the syncq happening simultaneous with 1870 * setq processing, we disable the streamhead syncq and wait until 1871 * existing background thread finishes working on it. 1872 */ 1873 wait_sq_svc(rq->q_syncq); 1874 passyncq = passq->q_syncq; 1875 if (!(passyncq->sq_flags & SQ_BLOCKED)) 1876 blocksq(passyncq, SQ_BLOCKED, 0); 1877 1878 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE); 1879 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq)); 1880 rq->q_ptr = _WR(rq)->q_ptr = NULL; 1881 1882 /* setq might sleep in allocator - avoid holding locks. */ 1883 /* Note: we are holding muxifier here. */ 1884 1885 str = stp->sd_strtab; 1886 dp = &devimpl[getmajor(vp->v_rdev)]; 1887 ASSERT(dp->d_str == str); 1888 1889 qflag = dp->d_qflag; 1890 sqtype = dp->d_sqtype; 1891 1892 /* create perdm_t if needed */ 1893 if (NEED_DM(dp->d_dmp, qflag)) 1894 dp->d_dmp = hold_dm(str, qflag, sqtype); 1895 1896 dmp = dp->d_dmp; 1897 1898 setq(rq, str->st_muxrinit, str->st_muxwinit, dmp, qflag, sqtype, 1899 B_TRUE); 1900 1901 /* 1902 * XXX Remove any "odd" messages from the queue. 1903 * Keep only M_DATA, M_PROTO, M_PCPROTO. 1904 */ 1905 error = strdoioctl(stp, &strioc, FNATIVE, 1906 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp); 1907 if (error != 0) { 1908 lbfree(linkp); 1909 1910 if (!(passyncq->sq_flags & SQ_BLOCKED)) 1911 blocksq(passyncq, SQ_BLOCKED, 0); 1912 /* 1913 * Restore the stream head queue and then remove 1914 * the passq. Turn off STPLEX before we turn on 1915 * the stream by removing the passq. 1916 */ 1917 rq->q_ptr = _WR(rq)->q_ptr = stpdown; 1918 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, 1919 B_TRUE); 1920 1921 mutex_enter(&stpdown->sd_lock); 1922 stpdown->sd_flag &= ~STPLEX; 1923 mutex_exit(&stpdown->sd_lock); 1924 1925 link_rempassthru(passq); 1926 1927 mutex_enter(&stpdown->sd_lock); 1928 stpdown->sd_flag &= ~STRPLUMB; 1929 /* Wakeup anyone waiting for STRPLUMB to clear. */ 1930 cv_broadcast(&stpdown->sd_monitor); 1931 mutex_exit(&stpdown->sd_lock); 1932 1933 mutex_exit(&muxifier); 1934 netstack_rele(ss->ss_netstack); 1935 return (error); 1936 } 1937 mutex_enter(&fpdown->f_tlock); 1938 fpdown->f_count++; 1939 mutex_exit(&fpdown->f_tlock); 1940 1941 /* 1942 * if we've made it here the linkage is all set up so we should also 1943 * set up the layered driver linkages 1944 */ 1945 1946 ASSERT((cmd == I_LINK) || (cmd == I_PLINK)); 1947 if (cmd == I_LINK) { 1948 ldi_mlink_fp(stp, fpdown, lhlink, LINKNORMAL); 1949 } else { 1950 ldi_mlink_fp(stp, fpdown, lhlink, LINKPERSIST); 1951 } 1952 1953 link_rempassthru(passq); 1954 1955 mux_addedge(stp, stpdown, linkp->li_lblk.l_index, ss); 1956 1957 /* 1958 * Mark the upper stream as having dependent links 1959 * so that strclose can clean it up. 1960 */ 1961 if (cmd == I_LINK) { 1962 mutex_enter(&stp->sd_lock); 1963 stp->sd_flag |= STRHASLINKS; 1964 mutex_exit(&stp->sd_lock); 1965 } 1966 /* 1967 * Wake up any other processes that may have been 1968 * waiting on the lower stream. These will all 1969 * error out. 1970 */ 1971 mutex_enter(&stpdown->sd_lock); 1972 /* The passthru module is removed so we may release STRPLUMB */ 1973 stpdown->sd_flag &= ~STRPLUMB; 1974 cv_broadcast(&rq->q_wait); 1975 cv_broadcast(&_WR(rq)->q_wait); 1976 cv_broadcast(&stpdown->sd_monitor); 1977 mutex_exit(&stpdown->sd_lock); 1978 mutex_exit(&muxifier); 1979 *rvalp = linkp->li_lblk.l_index; 1980 netstack_rele(ss->ss_netstack); 1981 return (0); 1982 } 1983 1984 int 1985 mlink(vnode_t *vp, int cmd, int arg, cred_t *crp, int *rvalp, int lhlink) 1986 { 1987 int ret; 1988 struct file *fpdown; 1989 1990 fpdown = getf(arg); 1991 ret = mlink_file(vp, cmd, fpdown, crp, rvalp, lhlink); 1992 if (fpdown != NULL) 1993 releasef(arg); 1994 return (ret); 1995 } 1996 1997 /* 1998 * Unlink a multiplexor link. Stp is the controlling stream for the 1999 * link, and linkp points to the link's entry in the linkinfo list. 2000 * The muxifier lock must be held on entry and is dropped on exit. 2001 * 2002 * NOTE : Currently it is assumed that mux would process all the messages 2003 * sitting on it's queue before ACKing the UNLINK. It is the responsibility 2004 * of the mux to handle all the messages that arrive before UNLINK. 2005 * If the mux has to send down messages on its lower stream before 2006 * ACKing I_UNLINK, then it *should* know to handle messages even 2007 * after the UNLINK is acked (actually it should be able to handle till we 2008 * re-block the read side of the pass queue here). If the mux does not 2009 * open up the lower stream, any messages that arrive during UNLINK 2010 * will be put in the stream head. In the case of lower stream opening 2011 * up, some messages might land in the stream head depending on when 2012 * the message arrived and when the read side of the pass queue was 2013 * re-blocked. 2014 */ 2015 int 2016 munlink(stdata_t *stp, linkinfo_t *linkp, int flag, cred_t *crp, int *rvalp, 2017 str_stack_t *ss) 2018 { 2019 struct strioctl strioc; 2020 struct stdata *stpdown; 2021 queue_t *rq, *wrq; 2022 queue_t *passq; 2023 syncq_t *passyncq; 2024 int error = 0; 2025 file_t *fpdown; 2026 2027 ASSERT(MUTEX_HELD(&muxifier)); 2028 2029 stpdown = linkp->li_fpdown->f_vnode->v_stream; 2030 2031 /* 2032 * See the comment in mlink() concerning STRPLUMB/STPLEX flags. 2033 */ 2034 mutex_enter(&stpdown->sd_lock); 2035 stpdown->sd_flag |= STRPLUMB; 2036 mutex_exit(&stpdown->sd_lock); 2037 2038 /* 2039 * Add passthru queue below lower mux. This will block 2040 * syncqs of lower muxs read queue during I_LINK/I_UNLINK. 2041 */ 2042 passq = link_addpassthru(stpdown); 2043 2044 if ((flag & LINKTYPEMASK) == LINKNORMAL) 2045 strioc.ic_cmd = I_UNLINK; 2046 else 2047 strioc.ic_cmd = I_PUNLINK; 2048 strioc.ic_timout = INFTIM; 2049 strioc.ic_len = sizeof (struct linkblk); 2050 strioc.ic_dp = (char *)&linkp->li_lblk; 2051 2052 error = strdoioctl(stp, &strioc, FNATIVE, 2053 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp); 2054 2055 /* 2056 * If there was an error and this is not called via strclose, 2057 * return to the user. Otherwise, pretend there was no error 2058 * and close the link. 2059 */ 2060 if (error) { 2061 if (flag & LINKCLOSE) { 2062 cmn_err(CE_WARN, "KERNEL: munlink: could not perform " 2063 "unlink ioctl, closing anyway (%d)\n", error); 2064 } else { 2065 link_rempassthru(passq); 2066 mutex_enter(&stpdown->sd_lock); 2067 stpdown->sd_flag &= ~STRPLUMB; 2068 cv_broadcast(&stpdown->sd_monitor); 2069 mutex_exit(&stpdown->sd_lock); 2070 mutex_exit(&muxifier); 2071 return (error); 2072 } 2073 } 2074 2075 mux_rmvedge(stp, linkp->li_lblk.l_index, ss); 2076 fpdown = linkp->li_fpdown; 2077 lbfree(linkp); 2078 2079 /* 2080 * We go ahead and drop muxifier here--it's a nasty global lock that 2081 * can slow others down. It's okay to since attempts to mlink() this 2082 * stream will be stopped because STPLEX is still set in the stdata 2083 * structure, and munlink() is stopped because mux_rmvedge() and 2084 * lbfree() have removed it from mux_nodes[] and linkinfo_list, 2085 * respectively. Note that we defer the closef() of fpdown until 2086 * after we drop muxifier since strclose() can call munlinkall(). 2087 */ 2088 mutex_exit(&muxifier); 2089 2090 wrq = stpdown->sd_wrq; 2091 rq = _RD(wrq); 2092 2093 /* 2094 * Get rid of outstanding service procedure runs, before we make 2095 * it a stream head, since a stream head doesn't have any service 2096 * procedure. 2097 */ 2098 disable_svc(rq); 2099 wait_svc(rq); 2100 2101 /* 2102 * Since we don't disable the syncq for QPERMOD, we wait for whatever 2103 * is queued up to be finished. mux should take care that nothing is 2104 * send down to this queue. We should do it now as we're going to block 2105 * passyncq if it was unblocked. 2106 */ 2107 if (wrq->q_flag & QPERMOD) { 2108 syncq_t *sq = wrq->q_syncq; 2109 2110 mutex_enter(SQLOCK(sq)); 2111 while (wrq->q_sqflags & Q_SQQUEUED) { 2112 sq->sq_flags |= SQ_WANTWAKEUP; 2113 cv_wait(&sq->sq_wait, SQLOCK(sq)); 2114 } 2115 mutex_exit(SQLOCK(sq)); 2116 } 2117 passyncq = passq->q_syncq; 2118 if (!(passyncq->sq_flags & SQ_BLOCKED)) { 2119 2120 syncq_t *sq, *outer; 2121 2122 /* 2123 * Messages could be flowing from underneath. We will 2124 * block the read side of the passq. This would be 2125 * sufficient for QPAIR and QPERQ muxes to ensure 2126 * that no data is flowing up into this queue 2127 * and hence no thread active in this instance of 2128 * lower mux. But for QPERMOD and QMTOUTPERIM there 2129 * could be messages on the inner and outer/inner 2130 * syncqs respectively. We will wait for them to drain. 2131 * Because passq is blocked messages end up in the syncq 2132 * And qfill_syncq could possibly end up setting QFULL 2133 * which will access the rq->q_flag. Hence, we have to 2134 * acquire the QLOCK in setq. 2135 * 2136 * XXX Messages can also flow from top into this 2137 * queue though the unlink is over (Ex. some instance 2138 * in putnext() called from top that has still not 2139 * accessed this queue. And also putq(lowerq) ?). 2140 * Solution : How about blocking the l_qtop queue ? 2141 * Do we really care about such pure D_MP muxes ? 2142 */ 2143 2144 blocksq(passyncq, SQ_BLOCKED, 0); 2145 2146 sq = rq->q_syncq; 2147 if ((outer = sq->sq_outer) != NULL) { 2148 2149 /* 2150 * We have to just wait for the outer sq_count 2151 * drop to zero. As this does not prevent new 2152 * messages to enter the outer perimeter, this 2153 * is subject to starvation. 2154 * 2155 * NOTE :Because of blocksq above, messages could 2156 * be in the inner syncq only because of some 2157 * thread holding the outer perimeter exclusively. 2158 * Hence it would be sufficient to wait for the 2159 * exclusive holder of the outer perimeter to drain 2160 * the inner and outer syncqs. But we will not depend 2161 * on this feature and hence check the inner syncqs 2162 * separately. 2163 */ 2164 wait_syncq(outer); 2165 } 2166 2167 2168 /* 2169 * There could be messages destined for 2170 * this queue. Let the exclusive holder 2171 * drain it. 2172 */ 2173 2174 wait_syncq(sq); 2175 ASSERT((rq->q_flag & QPERMOD) || 2176 ((rq->q_syncq->sq_head == NULL) && 2177 (_WR(rq)->q_syncq->sq_head == NULL))); 2178 } 2179 2180 /* 2181 * We haven't taken care of QPERMOD case yet. QPERMOD is a special 2182 * case as we don't disable its syncq or remove it off the syncq 2183 * service list. 2184 */ 2185 if (rq->q_flag & QPERMOD) { 2186 syncq_t *sq = rq->q_syncq; 2187 2188 mutex_enter(SQLOCK(sq)); 2189 while (rq->q_sqflags & Q_SQQUEUED) { 2190 sq->sq_flags |= SQ_WANTWAKEUP; 2191 cv_wait(&sq->sq_wait, SQLOCK(sq)); 2192 } 2193 mutex_exit(SQLOCK(sq)); 2194 } 2195 2196 /* 2197 * flush_syncq changes states only when there is some messages to 2198 * free. ie when it returns non-zero value to return. 2199 */ 2200 ASSERT(flush_syncq(rq->q_syncq, rq) == 0); 2201 ASSERT(flush_syncq(wrq->q_syncq, wrq) == 0); 2202 2203 /* 2204 * No body else should know about this queue now. 2205 * If the mux did not process the messages before 2206 * acking the I_UNLINK, free them now. 2207 */ 2208 2209 flushq(rq, FLUSHALL); 2210 flushq(_WR(rq), FLUSHALL); 2211 2212 /* 2213 * Convert the mux lower queue into a stream head queue. 2214 * Turn off STPLEX before we turn on the stream by removing the passq. 2215 */ 2216 rq->q_ptr = wrq->q_ptr = stpdown; 2217 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, B_TRUE); 2218 2219 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE); 2220 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq)); 2221 2222 enable_svc(rq); 2223 2224 /* 2225 * Now it is a proper stream, so STPLEX is cleared. But STRPLUMB still 2226 * needs to be set to prevent reopen() of the stream - such reopen may 2227 * try to call non-existent pass queue open routine and panic. 2228 */ 2229 mutex_enter(&stpdown->sd_lock); 2230 stpdown->sd_flag &= ~STPLEX; 2231 mutex_exit(&stpdown->sd_lock); 2232 2233 ASSERT(((flag & LINKTYPEMASK) == LINKNORMAL) || 2234 ((flag & LINKTYPEMASK) == LINKPERSIST)); 2235 2236 /* clean up the layered driver linkages */ 2237 if ((flag & LINKTYPEMASK) == LINKNORMAL) { 2238 ldi_munlink_fp(stp, fpdown, LINKNORMAL); 2239 } else { 2240 ldi_munlink_fp(stp, fpdown, LINKPERSIST); 2241 } 2242 2243 link_rempassthru(passq); 2244 2245 /* 2246 * Now all plumbing changes are finished and STRPLUMB is no 2247 * longer needed. 2248 */ 2249 mutex_enter(&stpdown->sd_lock); 2250 stpdown->sd_flag &= ~STRPLUMB; 2251 cv_broadcast(&stpdown->sd_monitor); 2252 mutex_exit(&stpdown->sd_lock); 2253 2254 (void) closef(fpdown); 2255 return (0); 2256 } 2257 2258 /* 2259 * Unlink all multiplexor links for which stp is the controlling stream. 2260 * Return 0, or a non-zero errno on failure. 2261 */ 2262 int 2263 munlinkall(stdata_t *stp, int flag, cred_t *crp, int *rvalp, str_stack_t *ss) 2264 { 2265 linkinfo_t *linkp; 2266 int error = 0; 2267 2268 mutex_enter(&muxifier); 2269 while (linkp = findlinks(stp, 0, flag, ss)) { 2270 /* 2271 * munlink() releases the muxifier lock. 2272 */ 2273 if (error = munlink(stp, linkp, flag, crp, rvalp, ss)) 2274 return (error); 2275 mutex_enter(&muxifier); 2276 } 2277 mutex_exit(&muxifier); 2278 return (0); 2279 } 2280 2281 /* 2282 * A multiplexor link has been made. Add an 2283 * edge to the directed graph. 2284 */ 2285 void 2286 mux_addedge(stdata_t *upstp, stdata_t *lostp, int muxid, str_stack_t *ss) 2287 { 2288 struct mux_node *np; 2289 struct mux_edge *ep; 2290 major_t upmaj; 2291 major_t lomaj; 2292 2293 upmaj = getmajor(upstp->sd_vnode->v_rdev); 2294 lomaj = getmajor(lostp->sd_vnode->v_rdev); 2295 np = &ss->ss_mux_nodes[upmaj]; 2296 if (np->mn_outp) { 2297 ep = np->mn_outp; 2298 while (ep->me_nextp) 2299 ep = ep->me_nextp; 2300 ep->me_nextp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP); 2301 ep = ep->me_nextp; 2302 } else { 2303 np->mn_outp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP); 2304 ep = np->mn_outp; 2305 } 2306 ep->me_nextp = NULL; 2307 ep->me_muxid = muxid; 2308 /* 2309 * Save the dev_t for the purposes of str_stack_shutdown. 2310 * str_stack_shutdown assumes that the device allows reopen, since 2311 * this dev_t is the one after any cloning by xx_open(). 2312 * Would prefer finding the dev_t from before any cloning, 2313 * but specfs doesn't retain that. 2314 */ 2315 ep->me_dev = upstp->sd_vnode->v_rdev; 2316 if (lostp->sd_vnode->v_type == VFIFO) 2317 ep->me_nodep = NULL; 2318 else 2319 ep->me_nodep = &ss->ss_mux_nodes[lomaj]; 2320 } 2321 2322 /* 2323 * A multiplexor link has been removed. Remove the 2324 * edge in the directed graph. 2325 */ 2326 void 2327 mux_rmvedge(stdata_t *upstp, int muxid, str_stack_t *ss) 2328 { 2329 struct mux_node *np; 2330 struct mux_edge *ep; 2331 struct mux_edge *pep = NULL; 2332 major_t upmaj; 2333 2334 upmaj = getmajor(upstp->sd_vnode->v_rdev); 2335 np = &ss->ss_mux_nodes[upmaj]; 2336 ASSERT(np->mn_outp != NULL); 2337 ep = np->mn_outp; 2338 while (ep) { 2339 if (ep->me_muxid == muxid) { 2340 if (pep) 2341 pep->me_nextp = ep->me_nextp; 2342 else 2343 np->mn_outp = ep->me_nextp; 2344 kmem_free(ep, sizeof (struct mux_edge)); 2345 return; 2346 } 2347 pep = ep; 2348 ep = ep->me_nextp; 2349 } 2350 ASSERT(0); /* should not reach here */ 2351 } 2352 2353 /* 2354 * Translate the device flags (from conf.h) to the corresponding 2355 * qflag and sq_flag (type) values. 2356 */ 2357 int 2358 devflg_to_qflag(struct streamtab *stp, uint32_t devflag, uint32_t *qflagp, 2359 uint32_t *sqtypep) 2360 { 2361 uint32_t qflag = 0; 2362 uint32_t sqtype = 0; 2363 2364 if (devflag & _D_OLD) 2365 goto bad; 2366 2367 /* Inner perimeter presence and scope */ 2368 switch (devflag & D_MTINNER_MASK) { 2369 case D_MP: 2370 qflag |= QMTSAFE; 2371 sqtype |= SQ_CI; 2372 break; 2373 case D_MTPERQ|D_MP: 2374 qflag |= QPERQ; 2375 break; 2376 case D_MTQPAIR|D_MP: 2377 qflag |= QPAIR; 2378 break; 2379 case D_MTPERMOD|D_MP: 2380 qflag |= QPERMOD; 2381 break; 2382 default: 2383 goto bad; 2384 } 2385 2386 /* Outer perimeter */ 2387 if (devflag & D_MTOUTPERIM) { 2388 switch (devflag & D_MTINNER_MASK) { 2389 case D_MP: 2390 case D_MTPERQ|D_MP: 2391 case D_MTQPAIR|D_MP: 2392 break; 2393 default: 2394 goto bad; 2395 } 2396 qflag |= QMTOUTPERIM; 2397 } 2398 2399 /* Inner perimeter modifiers */ 2400 if (devflag & D_MTINNER_MOD) { 2401 switch (devflag & D_MTINNER_MASK) { 2402 case D_MP: 2403 goto bad; 2404 default: 2405 break; 2406 } 2407 if (devflag & D_MTPUTSHARED) 2408 sqtype |= SQ_CIPUT; 2409 if (devflag & _D_MTOCSHARED) { 2410 /* 2411 * The code in putnext assumes that it has the 2412 * highest concurrency by not checking sq_count. 2413 * Thus _D_MTOCSHARED can only be supported when 2414 * D_MTPUTSHARED is set. 2415 */ 2416 if (!(devflag & D_MTPUTSHARED)) 2417 goto bad; 2418 sqtype |= SQ_CIOC; 2419 } 2420 if (devflag & _D_MTCBSHARED) { 2421 /* 2422 * The code in putnext assumes that it has the 2423 * highest concurrency by not checking sq_count. 2424 * Thus _D_MTCBSHARED can only be supported when 2425 * D_MTPUTSHARED is set. 2426 */ 2427 if (!(devflag & D_MTPUTSHARED)) 2428 goto bad; 2429 sqtype |= SQ_CICB; 2430 } 2431 if (devflag & _D_MTSVCSHARED) { 2432 /* 2433 * The code in putnext assumes that it has the 2434 * highest concurrency by not checking sq_count. 2435 * Thus _D_MTSVCSHARED can only be supported when 2436 * D_MTPUTSHARED is set. Also _D_MTSVCSHARED is 2437 * supported only for QPERMOD. 2438 */ 2439 if (!(devflag & D_MTPUTSHARED) || !(qflag & QPERMOD)) 2440 goto bad; 2441 sqtype |= SQ_CISVC; 2442 } 2443 } 2444 2445 /* Default outer perimeter concurrency */ 2446 sqtype |= SQ_CO; 2447 2448 /* Outer perimeter modifiers */ 2449 if (devflag & D_MTOCEXCL) { 2450 if (!(devflag & D_MTOUTPERIM)) { 2451 /* No outer perimeter */ 2452 goto bad; 2453 } 2454 sqtype &= ~SQ_COOC; 2455 } 2456 2457 /* Synchronous Streams extended qinit structure */ 2458 if (devflag & D_SYNCSTR) 2459 qflag |= QSYNCSTR; 2460 2461 /* 2462 * Private flag used by a transport module to indicate 2463 * to sockfs that it supports direct-access mode without 2464 * having to go through STREAMS or the transport can use 2465 * sodirect_t sharing to bypass STREAMS for receive-side 2466 * M_DATA processing. 2467 */ 2468 if (devflag & (_D_DIRECT|_D_SODIRECT)) { 2469 /* Reject unless the module is fully-MT (no perimeter) */ 2470 if ((qflag & QMT_TYPEMASK) != QMTSAFE) 2471 goto bad; 2472 if (devflag & _D_DIRECT) 2473 qflag |= _QDIRECT; 2474 if (devflag & _D_SODIRECT) 2475 qflag |= _QSODIRECT; 2476 } 2477 2478 *qflagp = qflag; 2479 *sqtypep = sqtype; 2480 return (0); 2481 2482 bad: 2483 cmn_err(CE_WARN, 2484 "stropen: bad MT flags (0x%x) in driver '%s'", 2485 (int)(qflag & D_MTSAFETY_MASK), 2486 stp->st_rdinit->qi_minfo->mi_idname); 2487 2488 return (EINVAL); 2489 } 2490 2491 /* 2492 * Set the interface values for a pair of queues (qinit structure, 2493 * packet sizes, water marks). 2494 * setq assumes that the caller does not have a claim (entersq or claimq) 2495 * on the queue. 2496 */ 2497 void 2498 setq(queue_t *rq, struct qinit *rinit, struct qinit *winit, 2499 perdm_t *dmp, uint32_t qflag, uint32_t sqtype, boolean_t lock_needed) 2500 { 2501 queue_t *wq; 2502 syncq_t *sq, *outer; 2503 2504 ASSERT(rq->q_flag & QREADR); 2505 ASSERT((qflag & QMT_TYPEMASK) != 0); 2506 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL); 2507 2508 wq = _WR(rq); 2509 rq->q_qinfo = rinit; 2510 rq->q_hiwat = rinit->qi_minfo->mi_hiwat; 2511 rq->q_lowat = rinit->qi_minfo->mi_lowat; 2512 rq->q_minpsz = rinit->qi_minfo->mi_minpsz; 2513 rq->q_maxpsz = rinit->qi_minfo->mi_maxpsz; 2514 wq->q_qinfo = winit; 2515 wq->q_hiwat = winit->qi_minfo->mi_hiwat; 2516 wq->q_lowat = winit->qi_minfo->mi_lowat; 2517 wq->q_minpsz = winit->qi_minfo->mi_minpsz; 2518 wq->q_maxpsz = winit->qi_minfo->mi_maxpsz; 2519 2520 /* Remove old syncqs */ 2521 sq = rq->q_syncq; 2522 outer = sq->sq_outer; 2523 if (outer != NULL) { 2524 ASSERT(wq->q_syncq->sq_outer == outer); 2525 outer_remove(outer, rq->q_syncq); 2526 if (wq->q_syncq != rq->q_syncq) 2527 outer_remove(outer, wq->q_syncq); 2528 } 2529 ASSERT(sq->sq_outer == NULL); 2530 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 2531 2532 if (sq != SQ(rq)) { 2533 if (!(rq->q_flag & QPERMOD)) 2534 free_syncq(sq); 2535 if (wq->q_syncq == rq->q_syncq) 2536 wq->q_syncq = NULL; 2537 rq->q_syncq = NULL; 2538 } 2539 if (wq->q_syncq != NULL && wq->q_syncq != sq && 2540 wq->q_syncq != SQ(rq)) { 2541 free_syncq(wq->q_syncq); 2542 wq->q_syncq = NULL; 2543 } 2544 ASSERT(rq->q_syncq == NULL || (rq->q_syncq->sq_head == NULL && 2545 rq->q_syncq->sq_tail == NULL)); 2546 ASSERT(wq->q_syncq == NULL || (wq->q_syncq->sq_head == NULL && 2547 wq->q_syncq->sq_tail == NULL)); 2548 2549 if (!(rq->q_flag & QPERMOD) && 2550 rq->q_syncq != NULL && rq->q_syncq->sq_ciputctrl != NULL) { 2551 ASSERT(rq->q_syncq->sq_nciputctrl == n_ciputctrl - 1); 2552 SUMCHECK_CIPUTCTRL_COUNTS(rq->q_syncq->sq_ciputctrl, 2553 rq->q_syncq->sq_nciputctrl, 0); 2554 ASSERT(ciputctrl_cache != NULL); 2555 kmem_cache_free(ciputctrl_cache, rq->q_syncq->sq_ciputctrl); 2556 rq->q_syncq->sq_ciputctrl = NULL; 2557 rq->q_syncq->sq_nciputctrl = 0; 2558 } 2559 2560 if (!(wq->q_flag & QPERMOD) && 2561 wq->q_syncq != NULL && wq->q_syncq->sq_ciputctrl != NULL) { 2562 ASSERT(wq->q_syncq->sq_nciputctrl == n_ciputctrl - 1); 2563 SUMCHECK_CIPUTCTRL_COUNTS(wq->q_syncq->sq_ciputctrl, 2564 wq->q_syncq->sq_nciputctrl, 0); 2565 ASSERT(ciputctrl_cache != NULL); 2566 kmem_cache_free(ciputctrl_cache, wq->q_syncq->sq_ciputctrl); 2567 wq->q_syncq->sq_ciputctrl = NULL; 2568 wq->q_syncq->sq_nciputctrl = 0; 2569 } 2570 2571 sq = SQ(rq); 2572 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL); 2573 ASSERT(sq->sq_outer == NULL); 2574 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 2575 2576 /* 2577 * Create syncqs based on qflag and sqtype. Set the SQ_TYPES_IN_FLAGS 2578 * bits in sq_flag based on the sqtype. 2579 */ 2580 ASSERT((sq->sq_flags & ~SQ_TYPES_IN_FLAGS) == 0); 2581 2582 rq->q_syncq = wq->q_syncq = sq; 2583 sq->sq_type = sqtype; 2584 sq->sq_flags = (sqtype & SQ_TYPES_IN_FLAGS); 2585 2586 /* 2587 * We are making sq_svcflags zero, 2588 * resetting SQ_DISABLED in case it was set by 2589 * wait_svc() in the munlink path. 2590 * 2591 */ 2592 ASSERT((sq->sq_svcflags & SQ_SERVICE) == 0); 2593 sq->sq_svcflags = 0; 2594 2595 /* 2596 * We need to acquire the lock here for the mlink and munlink case, 2597 * where canputnext, backenable, etc can access the q_flag. 2598 */ 2599 if (lock_needed) { 2600 mutex_enter(QLOCK(rq)); 2601 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2602 mutex_exit(QLOCK(rq)); 2603 mutex_enter(QLOCK(wq)); 2604 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2605 mutex_exit(QLOCK(wq)); 2606 } else { 2607 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2608 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2609 } 2610 2611 if (qflag & QPERQ) { 2612 /* Allocate a separate syncq for the write side */ 2613 sq = new_syncq(); 2614 sq->sq_type = rq->q_syncq->sq_type; 2615 sq->sq_flags = rq->q_syncq->sq_flags; 2616 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 2617 sq->sq_oprev == NULL); 2618 wq->q_syncq = sq; 2619 } 2620 if (qflag & QPERMOD) { 2621 sq = dmp->dm_sq; 2622 2623 /* 2624 * Assert that we do have an inner perimeter syncq and that it 2625 * does not have an outer perimeter associated with it. 2626 */ 2627 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 2628 sq->sq_oprev == NULL); 2629 rq->q_syncq = wq->q_syncq = sq; 2630 } 2631 if (qflag & QMTOUTPERIM) { 2632 outer = dmp->dm_sq; 2633 2634 ASSERT(outer->sq_outer == NULL); 2635 outer_insert(outer, rq->q_syncq); 2636 if (wq->q_syncq != rq->q_syncq) 2637 outer_insert(outer, wq->q_syncq); 2638 } 2639 ASSERT((rq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) == 2640 (rq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS)); 2641 ASSERT((wq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) == 2642 (wq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS)); 2643 ASSERT((rq->q_flag & QMT_TYPEMASK) == (qflag & QMT_TYPEMASK)); 2644 2645 /* 2646 * Initialize struio() types. 2647 */ 2648 rq->q_struiot = 2649 (rq->q_flag & QSYNCSTR) ? rinit->qi_struiot : STRUIOT_NONE; 2650 wq->q_struiot = 2651 (wq->q_flag & QSYNCSTR) ? winit->qi_struiot : STRUIOT_NONE; 2652 } 2653 2654 perdm_t * 2655 hold_dm(struct streamtab *str, uint32_t qflag, uint32_t sqtype) 2656 { 2657 syncq_t *sq; 2658 perdm_t **pp; 2659 perdm_t *p; 2660 perdm_t *dmp; 2661 2662 ASSERT(str != NULL); 2663 ASSERT(qflag & (QPERMOD | QMTOUTPERIM)); 2664 2665 rw_enter(&perdm_rwlock, RW_READER); 2666 for (p = perdm_list; p != NULL; p = p->dm_next) { 2667 if (p->dm_str == str) { /* found one */ 2668 atomic_add_32(&(p->dm_ref), 1); 2669 rw_exit(&perdm_rwlock); 2670 return (p); 2671 } 2672 } 2673 rw_exit(&perdm_rwlock); 2674 2675 sq = new_syncq(); 2676 if (qflag & QPERMOD) { 2677 sq->sq_type = sqtype | SQ_PERMOD; 2678 sq->sq_flags = sqtype & SQ_TYPES_IN_FLAGS; 2679 } else { 2680 ASSERT(qflag & QMTOUTPERIM); 2681 sq->sq_onext = sq->sq_oprev = sq; 2682 } 2683 2684 dmp = kmem_alloc(sizeof (perdm_t), KM_SLEEP); 2685 dmp->dm_sq = sq; 2686 dmp->dm_str = str; 2687 dmp->dm_ref = 1; 2688 dmp->dm_next = NULL; 2689 2690 rw_enter(&perdm_rwlock, RW_WRITER); 2691 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) { 2692 if (p->dm_str == str) { /* already present */ 2693 p->dm_ref++; 2694 rw_exit(&perdm_rwlock); 2695 free_syncq(sq); 2696 kmem_free(dmp, sizeof (perdm_t)); 2697 return (p); 2698 } 2699 } 2700 2701 *pp = dmp; 2702 rw_exit(&perdm_rwlock); 2703 return (dmp); 2704 } 2705 2706 void 2707 rele_dm(perdm_t *dmp) 2708 { 2709 perdm_t **pp; 2710 perdm_t *p; 2711 2712 rw_enter(&perdm_rwlock, RW_WRITER); 2713 ASSERT(dmp->dm_ref > 0); 2714 2715 if (--dmp->dm_ref > 0) { 2716 rw_exit(&perdm_rwlock); 2717 return; 2718 } 2719 2720 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) 2721 if (p == dmp) 2722 break; 2723 ASSERT(p == dmp); 2724 *pp = p->dm_next; 2725 rw_exit(&perdm_rwlock); 2726 2727 /* 2728 * Wait for any background processing that relies on the 2729 * syncq to complete before it is freed. 2730 */ 2731 wait_sq_svc(p->dm_sq); 2732 free_syncq(p->dm_sq); 2733 kmem_free(p, sizeof (perdm_t)); 2734 } 2735 2736 /* 2737 * Make a protocol message given control and data buffers. 2738 * n.b., this can block; be careful of what locks you hold when calling it. 2739 * 2740 * If sd_maxblk is less than *iosize this routine can fail part way through 2741 * (due to an allocation failure). In this case on return *iosize will contain 2742 * the amount that was consumed. Otherwise *iosize will not be modified 2743 * i.e. it will contain the amount that was consumed. 2744 */ 2745 int 2746 strmakemsg( 2747 struct strbuf *mctl, 2748 ssize_t *iosize, 2749 struct uio *uiop, 2750 stdata_t *stp, 2751 int32_t flag, 2752 mblk_t **mpp) 2753 { 2754 mblk_t *mpctl = NULL; 2755 mblk_t *mpdata = NULL; 2756 int error; 2757 2758 ASSERT(uiop != NULL); 2759 2760 *mpp = NULL; 2761 /* Create control part, if any */ 2762 if ((mctl != NULL) && (mctl->len >= 0)) { 2763 error = strmakectl(mctl, flag, uiop->uio_fmode, &mpctl); 2764 if (error) 2765 return (error); 2766 } 2767 /* Create data part, if any */ 2768 if (*iosize >= 0) { 2769 error = strmakedata(iosize, uiop, stp, flag, &mpdata); 2770 if (error) { 2771 freemsg(mpctl); 2772 return (error); 2773 } 2774 } 2775 if (mpctl != NULL) { 2776 if (mpdata != NULL) 2777 linkb(mpctl, mpdata); 2778 *mpp = mpctl; 2779 } else { 2780 *mpp = mpdata; 2781 } 2782 return (0); 2783 } 2784 2785 /* 2786 * Make the control part of a protocol message given a control buffer. 2787 * n.b., this can block; be careful of what locks you hold when calling it. 2788 */ 2789 int 2790 strmakectl( 2791 struct strbuf *mctl, 2792 int32_t flag, 2793 int32_t fflag, 2794 mblk_t **mpp) 2795 { 2796 mblk_t *bp = NULL; 2797 unsigned char msgtype; 2798 int error = 0; 2799 2800 *mpp = NULL; 2801 /* 2802 * Create control part of message, if any. 2803 */ 2804 if ((mctl != NULL) && (mctl->len >= 0)) { 2805 caddr_t base; 2806 int ctlcount; 2807 int allocsz; 2808 2809 if (flag & RS_HIPRI) 2810 msgtype = M_PCPROTO; 2811 else 2812 msgtype = M_PROTO; 2813 2814 ctlcount = mctl->len; 2815 base = mctl->buf; 2816 2817 /* 2818 * Give modules a better chance to reuse M_PROTO/M_PCPROTO 2819 * blocks by increasing the size to something more usable. 2820 */ 2821 allocsz = MAX(ctlcount, 64); 2822 2823 /* 2824 * Range checking has already been done; simply try 2825 * to allocate a message block for the ctl part. 2826 */ 2827 while (!(bp = allocb(allocsz, BPRI_MED))) { 2828 if (fflag & (FNDELAY|FNONBLOCK)) 2829 return (EAGAIN); 2830 if (error = strwaitbuf(allocsz, BPRI_MED)) 2831 return (error); 2832 } 2833 2834 bp->b_datap->db_type = msgtype; 2835 if (copyin(base, bp->b_wptr, ctlcount)) { 2836 freeb(bp); 2837 return (EFAULT); 2838 } 2839 bp->b_wptr += ctlcount; 2840 } 2841 *mpp = bp; 2842 return (0); 2843 } 2844 2845 /* 2846 * Make a protocol message given data buffers. 2847 * n.b., this can block; be careful of what locks you hold when calling it. 2848 * 2849 * If sd_maxblk is less than *iosize this routine can fail part way through 2850 * (due to an allocation failure). In this case on return *iosize will contain 2851 * the amount that was consumed. Otherwise *iosize will not be modified 2852 * i.e. it will contain the amount that was consumed. 2853 */ 2854 int 2855 strmakedata( 2856 ssize_t *iosize, 2857 struct uio *uiop, 2858 stdata_t *stp, 2859 int32_t flag, 2860 mblk_t **mpp) 2861 { 2862 mblk_t *mp = NULL; 2863 mblk_t *bp; 2864 int wroff = (int)stp->sd_wroff; 2865 int tail_len = (int)stp->sd_tail; 2866 int extra = wroff + tail_len; 2867 int error = 0; 2868 ssize_t maxblk; 2869 ssize_t count = *iosize; 2870 cred_t *cr = CRED(); 2871 2872 *mpp = NULL; 2873 if (count < 0) 2874 return (0); 2875 2876 maxblk = stp->sd_maxblk; 2877 if (maxblk == INFPSZ) 2878 maxblk = count; 2879 2880 /* 2881 * Create data part of message, if any. 2882 */ 2883 do { 2884 ssize_t size; 2885 dblk_t *dp; 2886 2887 ASSERT(uiop); 2888 2889 size = MIN(count, maxblk); 2890 2891 while ((bp = allocb_cred(size + extra, cr)) == NULL) { 2892 error = EAGAIN; 2893 if ((uiop->uio_fmode & (FNDELAY|FNONBLOCK)) || 2894 (error = strwaitbuf(size + extra, BPRI_MED)) != 0) { 2895 if (count == *iosize) { 2896 freemsg(mp); 2897 return (error); 2898 } else { 2899 *iosize -= count; 2900 *mpp = mp; 2901 return (0); 2902 } 2903 } 2904 } 2905 dp = bp->b_datap; 2906 dp->db_cpid = curproc->p_pid; 2907 ASSERT(wroff <= dp->db_lim - bp->b_wptr); 2908 bp->b_wptr = bp->b_rptr = bp->b_rptr + wroff; 2909 2910 if (flag & STRUIO_POSTPONE) { 2911 /* 2912 * Setup the stream uio portion of the 2913 * dblk for subsequent use by struioget(). 2914 */ 2915 dp->db_struioflag = STRUIO_SPEC; 2916 dp->db_cksumstart = 0; 2917 dp->db_cksumstuff = 0; 2918 dp->db_cksumend = size; 2919 *(long long *)dp->db_struioun.data = 0ll; 2920 bp->b_wptr += size; 2921 } else { 2922 if (stp->sd_copyflag & STRCOPYCACHED) 2923 uiop->uio_extflg |= UIO_COPY_CACHED; 2924 2925 if (size != 0) { 2926 error = uiomove(bp->b_wptr, size, UIO_WRITE, 2927 uiop); 2928 if (error != 0) { 2929 freeb(bp); 2930 freemsg(mp); 2931 return (error); 2932 } 2933 } 2934 bp->b_wptr += size; 2935 2936 if (stp->sd_wputdatafunc != NULL) { 2937 mblk_t *newbp; 2938 2939 newbp = (stp->sd_wputdatafunc)(stp->sd_vnode, 2940 bp, NULL, NULL, NULL, NULL); 2941 if (newbp == NULL) { 2942 freeb(bp); 2943 freemsg(mp); 2944 return (ECOMM); 2945 } 2946 bp = newbp; 2947 } 2948 } 2949 2950 count -= size; 2951 2952 if (mp == NULL) 2953 mp = bp; 2954 else 2955 linkb(mp, bp); 2956 } while (count > 0); 2957 2958 *mpp = mp; 2959 return (0); 2960 } 2961 2962 /* 2963 * Wait for a buffer to become available. Return non-zero errno 2964 * if not able to wait, 0 if buffer is probably there. 2965 */ 2966 int 2967 strwaitbuf(size_t size, int pri) 2968 { 2969 bufcall_id_t id; 2970 2971 mutex_enter(&bcall_monitor); 2972 if ((id = bufcall(size, pri, (void (*)(void *))cv_broadcast, 2973 &ttoproc(curthread)->p_flag_cv)) == 0) { 2974 mutex_exit(&bcall_monitor); 2975 return (ENOSR); 2976 } 2977 if (!cv_wait_sig(&(ttoproc(curthread)->p_flag_cv), &bcall_monitor)) { 2978 unbufcall(id); 2979 mutex_exit(&bcall_monitor); 2980 return (EINTR); 2981 } 2982 unbufcall(id); 2983 mutex_exit(&bcall_monitor); 2984 return (0); 2985 } 2986 2987 /* 2988 * This function waits for a read or write event to happen on a stream. 2989 * fmode can specify FNDELAY and/or FNONBLOCK. 2990 * The timeout is in ms with -1 meaning infinite. 2991 * The flag values work as follows: 2992 * READWAIT Check for read side errors, send M_READ 2993 * GETWAIT Check for read side errors, no M_READ 2994 * WRITEWAIT Check for write side errors. 2995 * NOINTR Do not return error if nonblocking or timeout. 2996 * STR_NOERROR Ignore all errors except STPLEX. 2997 * STR_NOSIG Ignore/hold signals during the duration of the call. 2998 * STR_PEEK Pass through the strgeterr(). 2999 */ 3000 int 3001 strwaitq(stdata_t *stp, int flag, ssize_t count, int fmode, clock_t timout, 3002 int *done) 3003 { 3004 int slpflg, errs; 3005 int error; 3006 kcondvar_t *sleepon; 3007 mblk_t *mp; 3008 ssize_t *rd_count; 3009 clock_t rval; 3010 3011 ASSERT(MUTEX_HELD(&stp->sd_lock)); 3012 if ((flag & READWAIT) || (flag & GETWAIT)) { 3013 slpflg = RSLEEP; 3014 sleepon = &_RD(stp->sd_wrq)->q_wait; 3015 errs = STRDERR|STPLEX; 3016 } else { 3017 slpflg = WSLEEP; 3018 sleepon = &stp->sd_wrq->q_wait; 3019 errs = STWRERR|STRHUP|STPLEX; 3020 } 3021 if (flag & STR_NOERROR) 3022 errs = STPLEX; 3023 3024 if (stp->sd_wakeq & slpflg) { 3025 /* 3026 * A strwakeq() is pending, no need to sleep. 3027 */ 3028 stp->sd_wakeq &= ~slpflg; 3029 *done = 0; 3030 return (0); 3031 } 3032 3033 if (fmode & (FNDELAY|FNONBLOCK)) { 3034 if (!(flag & NOINTR)) 3035 error = EAGAIN; 3036 else 3037 error = 0; 3038 *done = 1; 3039 return (error); 3040 } 3041 3042 if (stp->sd_flag & errs) { 3043 /* 3044 * Check for errors before going to sleep since the 3045 * caller might not have checked this while holding 3046 * sd_lock. 3047 */ 3048 error = strgeterr(stp, errs, (flag & STR_PEEK)); 3049 if (error != 0) { 3050 *done = 1; 3051 return (error); 3052 } 3053 } 3054 3055 /* 3056 * If any module downstream has requested read notification 3057 * by setting SNDMREAD flag using M_SETOPTS, send a message 3058 * down stream. 3059 */ 3060 if ((flag & READWAIT) && (stp->sd_flag & SNDMREAD)) { 3061 mutex_exit(&stp->sd_lock); 3062 if (!(mp = allocb_wait(sizeof (ssize_t), BPRI_MED, 3063 (flag & STR_NOSIG), &error))) { 3064 mutex_enter(&stp->sd_lock); 3065 *done = 1; 3066 return (error); 3067 } 3068 mp->b_datap->db_type = M_READ; 3069 rd_count = (ssize_t *)mp->b_wptr; 3070 *rd_count = count; 3071 mp->b_wptr += sizeof (ssize_t); 3072 /* 3073 * Send the number of bytes requested by the 3074 * read as the argument to M_READ. 3075 */ 3076 stream_willservice(stp); 3077 putnext(stp->sd_wrq, mp); 3078 stream_runservice(stp); 3079 mutex_enter(&stp->sd_lock); 3080 3081 /* 3082 * If any data arrived due to inline processing 3083 * of putnext(), don't sleep. 3084 */ 3085 if (_RD(stp->sd_wrq)->q_first != NULL) { 3086 *done = 0; 3087 return (0); 3088 } 3089 } 3090 3091 stp->sd_flag |= slpflg; 3092 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAIT2, 3093 "strwaitq sleeps (2):%p, %X, %lX, %X, %p", 3094 stp, flag, count, fmode, done); 3095 3096 rval = str_cv_wait(sleepon, &stp->sd_lock, timout, flag & STR_NOSIG); 3097 if (rval > 0) { 3098 /* EMPTY */ 3099 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAKE2, 3100 "strwaitq awakes(2):%X, %X, %X, %X, %X", 3101 stp, flag, count, fmode, done); 3102 } else if (rval == 0) { 3103 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_INTR2, 3104 "strwaitq interrupt #2:%p, %X, %lX, %X, %p", 3105 stp, flag, count, fmode, done); 3106 stp->sd_flag &= ~slpflg; 3107 cv_broadcast(sleepon); 3108 if (!(flag & NOINTR)) 3109 error = EINTR; 3110 else 3111 error = 0; 3112 *done = 1; 3113 return (error); 3114 } else { 3115 /* timeout */ 3116 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_TIME, 3117 "strwaitq timeout:%p, %X, %lX, %X, %p", 3118 stp, flag, count, fmode, done); 3119 *done = 1; 3120 if (!(flag & NOINTR)) 3121 return (ETIME); 3122 else 3123 return (0); 3124 } 3125 /* 3126 * If the caller implements delayed errors (i.e. queued after data) 3127 * we can not check for errors here since data as well as an 3128 * error might have arrived at the stream head. We return to 3129 * have the caller check the read queue before checking for errors. 3130 */ 3131 if ((stp->sd_flag & errs) && !(flag & STR_DELAYERR)) { 3132 error = strgeterr(stp, errs, (flag & STR_PEEK)); 3133 if (error != 0) { 3134 *done = 1; 3135 return (error); 3136 } 3137 } 3138 *done = 0; 3139 return (0); 3140 } 3141 3142 /* 3143 * Perform job control discipline access checks. 3144 * Return 0 for success and the errno for failure. 3145 */ 3146 3147 #define cantsend(p, t, sig) \ 3148 (sigismember(&(p)->p_ignore, sig) || signal_is_blocked((t), sig)) 3149 3150 int 3151 straccess(struct stdata *stp, enum jcaccess mode) 3152 { 3153 extern kcondvar_t lbolt_cv; /* XXX: should be in a header file */ 3154 kthread_t *t = curthread; 3155 proc_t *p = ttoproc(t); 3156 sess_t *sp; 3157 3158 ASSERT(mutex_owned(&stp->sd_lock)); 3159 3160 if (stp->sd_sidp == NULL || stp->sd_vnode->v_type == VFIFO) 3161 return (0); 3162 3163 mutex_enter(&p->p_lock); /* protects p_pgidp */ 3164 3165 for (;;) { 3166 mutex_enter(&p->p_splock); /* protects p->p_sessp */ 3167 sp = p->p_sessp; 3168 mutex_enter(&sp->s_lock); /* protects sp->* */ 3169 3170 /* 3171 * If this is not the calling process's controlling terminal 3172 * or if the calling process is already in the foreground 3173 * then allow access. 3174 */ 3175 if (sp->s_dev != stp->sd_vnode->v_rdev || 3176 p->p_pgidp == stp->sd_pgidp) { 3177 mutex_exit(&sp->s_lock); 3178 mutex_exit(&p->p_splock); 3179 mutex_exit(&p->p_lock); 3180 return (0); 3181 } 3182 3183 /* 3184 * Check to see if controlling terminal has been deallocated. 3185 */ 3186 if (sp->s_vp == NULL) { 3187 if (!cantsend(p, t, SIGHUP)) 3188 sigtoproc(p, t, SIGHUP); 3189 mutex_exit(&sp->s_lock); 3190 mutex_exit(&p->p_splock); 3191 mutex_exit(&p->p_lock); 3192 return (EIO); 3193 } 3194 3195 mutex_exit(&sp->s_lock); 3196 mutex_exit(&p->p_splock); 3197 3198 if (mode == JCGETP) { 3199 mutex_exit(&p->p_lock); 3200 return (0); 3201 } 3202 3203 if (mode == JCREAD) { 3204 if (p->p_detached || cantsend(p, t, SIGTTIN)) { 3205 mutex_exit(&p->p_lock); 3206 return (EIO); 3207 } 3208 mutex_exit(&p->p_lock); 3209 mutex_exit(&stp->sd_lock); 3210 pgsignal(p->p_pgidp, SIGTTIN); 3211 mutex_enter(&stp->sd_lock); 3212 mutex_enter(&p->p_lock); 3213 } else { /* mode == JCWRITE or JCSETP */ 3214 if ((mode == JCWRITE && !(stp->sd_flag & STRTOSTOP)) || 3215 cantsend(p, t, SIGTTOU)) { 3216 mutex_exit(&p->p_lock); 3217 return (0); 3218 } 3219 if (p->p_detached) { 3220 mutex_exit(&p->p_lock); 3221 return (EIO); 3222 } 3223 mutex_exit(&p->p_lock); 3224 mutex_exit(&stp->sd_lock); 3225 pgsignal(p->p_pgidp, SIGTTOU); 3226 mutex_enter(&stp->sd_lock); 3227 mutex_enter(&p->p_lock); 3228 } 3229 3230 /* 3231 * We call cv_wait_sig_swap() to cause the appropriate 3232 * action for the jobcontrol signal to take place. 3233 * If the signal is being caught, we will take the 3234 * EINTR error return. Otherwise, the default action 3235 * of causing the process to stop will take place. 3236 * In this case, we rely on the periodic cv_broadcast() on 3237 * &lbolt_cv to wake us up to loop around and test again. 3238 * We can't get here if the signal is ignored or 3239 * if the current thread is blocking the signal. 3240 */ 3241 mutex_exit(&stp->sd_lock); 3242 if (!cv_wait_sig_swap(&lbolt_cv, &p->p_lock)) { 3243 mutex_exit(&p->p_lock); 3244 mutex_enter(&stp->sd_lock); 3245 return (EINTR); 3246 } 3247 mutex_exit(&p->p_lock); 3248 mutex_enter(&stp->sd_lock); 3249 mutex_enter(&p->p_lock); 3250 } 3251 } 3252 3253 /* 3254 * Return size of message of block type (bp->b_datap->db_type) 3255 */ 3256 size_t 3257 xmsgsize(mblk_t *bp) 3258 { 3259 unsigned char type; 3260 size_t count = 0; 3261 3262 type = bp->b_datap->db_type; 3263 3264 for (; bp; bp = bp->b_cont) { 3265 if (type != bp->b_datap->db_type) 3266 break; 3267 ASSERT(bp->b_wptr >= bp->b_rptr); 3268 count += bp->b_wptr - bp->b_rptr; 3269 } 3270 return (count); 3271 } 3272 3273 /* 3274 * Allocate a stream head. 3275 */ 3276 struct stdata * 3277 shalloc(queue_t *qp) 3278 { 3279 stdata_t *stp; 3280 3281 stp = kmem_cache_alloc(stream_head_cache, KM_SLEEP); 3282 3283 stp->sd_wrq = _WR(qp); 3284 stp->sd_strtab = NULL; 3285 stp->sd_iocid = 0; 3286 stp->sd_mate = NULL; 3287 stp->sd_freezer = NULL; 3288 stp->sd_refcnt = 0; 3289 stp->sd_wakeq = 0; 3290 stp->sd_anchor = 0; 3291 stp->sd_struiowrq = NULL; 3292 stp->sd_struiordq = NULL; 3293 stp->sd_struiodnak = 0; 3294 stp->sd_struionak = NULL; 3295 stp->sd_t_audit_data = NULL; 3296 stp->sd_rput_opt = 0; 3297 stp->sd_wput_opt = 0; 3298 stp->sd_read_opt = 0; 3299 stp->sd_rprotofunc = strrput_proto; 3300 stp->sd_rmiscfunc = strrput_misc; 3301 stp->sd_rderrfunc = stp->sd_wrerrfunc = NULL; 3302 stp->sd_rputdatafunc = stp->sd_wputdatafunc = NULL; 3303 stp->sd_ciputctrl = NULL; 3304 stp->sd_nciputctrl = 0; 3305 stp->sd_qhead = NULL; 3306 stp->sd_qtail = NULL; 3307 stp->sd_servid = NULL; 3308 stp->sd_nqueues = 0; 3309 stp->sd_svcflags = 0; 3310 stp->sd_copyflag = 0; 3311 3312 return (stp); 3313 } 3314 3315 /* 3316 * Free a stream head. 3317 */ 3318 void 3319 shfree(stdata_t *stp) 3320 { 3321 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock)); 3322 3323 stp->sd_wrq = NULL; 3324 3325 mutex_enter(&stp->sd_qlock); 3326 while (stp->sd_svcflags & STRS_SCHEDULED) { 3327 STRSTAT(strwaits); 3328 cv_wait(&stp->sd_qcv, &stp->sd_qlock); 3329 } 3330 mutex_exit(&stp->sd_qlock); 3331 3332 if (stp->sd_ciputctrl != NULL) { 3333 ASSERT(stp->sd_nciputctrl == n_ciputctrl - 1); 3334 SUMCHECK_CIPUTCTRL_COUNTS(stp->sd_ciputctrl, 3335 stp->sd_nciputctrl, 0); 3336 ASSERT(ciputctrl_cache != NULL); 3337 kmem_cache_free(ciputctrl_cache, stp->sd_ciputctrl); 3338 stp->sd_ciputctrl = NULL; 3339 stp->sd_nciputctrl = 0; 3340 } 3341 ASSERT(stp->sd_qhead == NULL); 3342 ASSERT(stp->sd_qtail == NULL); 3343 ASSERT(stp->sd_nqueues == 0); 3344 kmem_cache_free(stream_head_cache, stp); 3345 } 3346 3347 /* 3348 * Allocate a pair of queues and a syncq for the pair 3349 */ 3350 queue_t * 3351 allocq(void) 3352 { 3353 queinfo_t *qip; 3354 queue_t *qp, *wqp; 3355 syncq_t *sq; 3356 3357 qip = kmem_cache_alloc(queue_cache, KM_SLEEP); 3358 3359 qp = &qip->qu_rqueue; 3360 wqp = &qip->qu_wqueue; 3361 sq = &qip->qu_syncq; 3362 3363 qp->q_last = NULL; 3364 qp->q_next = NULL; 3365 qp->q_ptr = NULL; 3366 qp->q_flag = QUSE | QREADR; 3367 qp->q_bandp = NULL; 3368 qp->q_stream = NULL; 3369 qp->q_syncq = sq; 3370 qp->q_nband = 0; 3371 qp->q_nfsrv = NULL; 3372 qp->q_draining = 0; 3373 qp->q_syncqmsgs = 0; 3374 qp->q_spri = 0; 3375 qp->q_qtstamp = 0; 3376 qp->q_sqtstamp = 0; 3377 qp->q_fp = NULL; 3378 3379 wqp->q_last = NULL; 3380 wqp->q_next = NULL; 3381 wqp->q_ptr = NULL; 3382 wqp->q_flag = QUSE; 3383 wqp->q_bandp = NULL; 3384 wqp->q_stream = NULL; 3385 wqp->q_syncq = sq; 3386 wqp->q_nband = 0; 3387 wqp->q_nfsrv = NULL; 3388 wqp->q_draining = 0; 3389 wqp->q_syncqmsgs = 0; 3390 wqp->q_qtstamp = 0; 3391 wqp->q_sqtstamp = 0; 3392 wqp->q_spri = 0; 3393 3394 sq->sq_count = 0; 3395 sq->sq_rmqcount = 0; 3396 sq->sq_flags = 0; 3397 sq->sq_type = 0; 3398 sq->sq_callbflags = 0; 3399 sq->sq_cancelid = 0; 3400 sq->sq_ciputctrl = NULL; 3401 sq->sq_nciputctrl = 0; 3402 sq->sq_needexcl = 0; 3403 sq->sq_svcflags = 0; 3404 3405 return (qp); 3406 } 3407 3408 /* 3409 * Free a pair of queues and the "attached" syncq. 3410 * Discard any messages left on the syncq(s), remove the syncq(s) from the 3411 * outer perimeter, and free the syncq(s) if they are not the "attached" syncq. 3412 */ 3413 void 3414 freeq(queue_t *qp) 3415 { 3416 qband_t *qbp, *nqbp; 3417 syncq_t *sq, *outer; 3418 queue_t *wqp = _WR(qp); 3419 3420 ASSERT(qp->q_flag & QREADR); 3421 3422 /* 3423 * If a previously dispatched taskq job is scheduled to run 3424 * sync_service() or a service routine is scheduled for the 3425 * queues about to be freed, wait here until all service is 3426 * done on the queue and all associated queues and syncqs. 3427 */ 3428 wait_svc(qp); 3429 3430 (void) flush_syncq(qp->q_syncq, qp); 3431 (void) flush_syncq(wqp->q_syncq, wqp); 3432 ASSERT(qp->q_syncqmsgs == 0 && wqp->q_syncqmsgs == 0); 3433 3434 /* 3435 * Flush the queues before q_next is set to NULL This is needed 3436 * in order to backenable any downstream queue before we go away. 3437 * Note: we are already removed from the stream so that the 3438 * backenabling will not cause any messages to be delivered to our 3439 * put procedures. 3440 */ 3441 flushq(qp, FLUSHALL); 3442 flushq(wqp, FLUSHALL); 3443 3444 /* Tidy up - removeq only does a half-remove from stream */ 3445 qp->q_next = wqp->q_next = NULL; 3446 ASSERT(!(qp->q_flag & QENAB)); 3447 ASSERT(!(wqp->q_flag & QENAB)); 3448 3449 outer = qp->q_syncq->sq_outer; 3450 if (outer != NULL) { 3451 outer_remove(outer, qp->q_syncq); 3452 if (wqp->q_syncq != qp->q_syncq) 3453 outer_remove(outer, wqp->q_syncq); 3454 } 3455 /* 3456 * Free any syncqs that are outside what allocq returned. 3457 */ 3458 if (qp->q_syncq != SQ(qp) && !(qp->q_flag & QPERMOD)) 3459 free_syncq(qp->q_syncq); 3460 if (qp->q_syncq != wqp->q_syncq && wqp->q_syncq != SQ(qp)) 3461 free_syncq(wqp->q_syncq); 3462 3463 ASSERT((qp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0); 3464 ASSERT((wqp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0); 3465 ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); 3466 ASSERT(MUTEX_NOT_HELD(QLOCK(wqp))); 3467 sq = SQ(qp); 3468 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 3469 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL); 3470 ASSERT(sq->sq_outer == NULL); 3471 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 3472 ASSERT(sq->sq_callbpend == NULL); 3473 ASSERT(sq->sq_needexcl == 0); 3474 3475 if (sq->sq_ciputctrl != NULL) { 3476 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1); 3477 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl, 3478 sq->sq_nciputctrl, 0); 3479 ASSERT(ciputctrl_cache != NULL); 3480 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl); 3481 sq->sq_ciputctrl = NULL; 3482 sq->sq_nciputctrl = 0; 3483 } 3484 3485 ASSERT(qp->q_first == NULL && wqp->q_first == NULL); 3486 ASSERT(qp->q_count == 0 && wqp->q_count == 0); 3487 ASSERT(qp->q_mblkcnt == 0 && wqp->q_mblkcnt == 0); 3488 3489 qp->q_flag &= ~QUSE; 3490 wqp->q_flag &= ~QUSE; 3491 3492 /* NOTE: Uncomment the assert below once bugid 1159635 is fixed. */ 3493 /* ASSERT((qp->q_flag & QWANTW) == 0 && (wqp->q_flag & QWANTW) == 0); */ 3494 3495 qbp = qp->q_bandp; 3496 while (qbp) { 3497 nqbp = qbp->qb_next; 3498 freeband(qbp); 3499 qbp = nqbp; 3500 } 3501 qbp = wqp->q_bandp; 3502 while (qbp) { 3503 nqbp = qbp->qb_next; 3504 freeband(qbp); 3505 qbp = nqbp; 3506 } 3507 kmem_cache_free(queue_cache, qp); 3508 } 3509 3510 /* 3511 * Allocate a qband structure. 3512 */ 3513 qband_t * 3514 allocband(void) 3515 { 3516 qband_t *qbp; 3517 3518 qbp = kmem_cache_alloc(qband_cache, KM_NOSLEEP); 3519 if (qbp == NULL) 3520 return (NULL); 3521 3522 qbp->qb_next = NULL; 3523 qbp->qb_count = 0; 3524 qbp->qb_mblkcnt = 0; 3525 qbp->qb_first = NULL; 3526 qbp->qb_last = NULL; 3527 qbp->qb_flag = 0; 3528 3529 return (qbp); 3530 } 3531 3532 /* 3533 * Free a qband structure. 3534 */ 3535 void 3536 freeband(qband_t *qbp) 3537 { 3538 kmem_cache_free(qband_cache, qbp); 3539 } 3540 3541 /* 3542 * Just like putnextctl(9F), except that allocb_wait() is used. 3543 * 3544 * Consolidation Private, and of course only callable from the stream head or 3545 * routines that may block. 3546 */ 3547 int 3548 putnextctl_wait(queue_t *q, int type) 3549 { 3550 mblk_t *bp; 3551 int error; 3552 3553 if ((datamsg(type) && (type != M_DELAY)) || 3554 (bp = allocb_wait(0, BPRI_HI, 0, &error)) == NULL) 3555 return (0); 3556 3557 bp->b_datap->db_type = (unsigned char)type; 3558 putnext(q, bp); 3559 return (1); 3560 } 3561 3562 /* 3563 * run any possible bufcalls. 3564 */ 3565 void 3566 runbufcalls(void) 3567 { 3568 strbufcall_t *bcp; 3569 3570 mutex_enter(&bcall_monitor); 3571 mutex_enter(&strbcall_lock); 3572 3573 if (strbcalls.bc_head) { 3574 size_t count; 3575 int nevent; 3576 3577 /* 3578 * count how many events are on the list 3579 * now so we can check to avoid looping 3580 * in low memory situations 3581 */ 3582 nevent = 0; 3583 for (bcp = strbcalls.bc_head; bcp; bcp = bcp->bc_next) 3584 nevent++; 3585 3586 /* 3587 * get estimate of available memory from kmem_avail(). 3588 * awake all bufcall functions waiting for 3589 * memory whose request could be satisfied 3590 * by 'count' memory and let 'em fight for it. 3591 */ 3592 count = kmem_avail(); 3593 while ((bcp = strbcalls.bc_head) != NULL && nevent) { 3594 STRSTAT(bufcalls); 3595 --nevent; 3596 if (bcp->bc_size <= count) { 3597 bcp->bc_executor = curthread; 3598 mutex_exit(&strbcall_lock); 3599 (*bcp->bc_func)(bcp->bc_arg); 3600 mutex_enter(&strbcall_lock); 3601 bcp->bc_executor = NULL; 3602 cv_broadcast(&bcall_cv); 3603 strbcalls.bc_head = bcp->bc_next; 3604 kmem_free(bcp, sizeof (strbufcall_t)); 3605 } else { 3606 /* 3607 * too big, try again later - note 3608 * that nevent was decremented above 3609 * so we won't retry this one on this 3610 * iteration of the loop 3611 */ 3612 if (bcp->bc_next != NULL) { 3613 strbcalls.bc_head = bcp->bc_next; 3614 bcp->bc_next = NULL; 3615 strbcalls.bc_tail->bc_next = bcp; 3616 strbcalls.bc_tail = bcp; 3617 } 3618 } 3619 } 3620 if (strbcalls.bc_head == NULL) 3621 strbcalls.bc_tail = NULL; 3622 } 3623 3624 mutex_exit(&strbcall_lock); 3625 mutex_exit(&bcall_monitor); 3626 } 3627 3628 3629 /* 3630 * actually run queue's service routine. 3631 */ 3632 static void 3633 runservice(queue_t *q) 3634 { 3635 qband_t *qbp; 3636 3637 ASSERT(q->q_qinfo->qi_srvp); 3638 again: 3639 entersq(q->q_syncq, SQ_SVC); 3640 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_START, 3641 "runservice starts:%p", q); 3642 3643 if (!(q->q_flag & QWCLOSE)) 3644 (*q->q_qinfo->qi_srvp)(q); 3645 3646 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_END, 3647 "runservice ends:(%p)", q); 3648 3649 leavesq(q->q_syncq, SQ_SVC); 3650 3651 mutex_enter(QLOCK(q)); 3652 if (q->q_flag & QENAB) { 3653 q->q_flag &= ~QENAB; 3654 mutex_exit(QLOCK(q)); 3655 goto again; 3656 } 3657 q->q_flag &= ~QINSERVICE; 3658 q->q_flag &= ~QBACK; 3659 for (qbp = q->q_bandp; qbp; qbp = qbp->qb_next) 3660 qbp->qb_flag &= ~QB_BACK; 3661 /* 3662 * Wakeup thread waiting for the service procedure 3663 * to be run (strclose and qdetach). 3664 */ 3665 cv_broadcast(&q->q_wait); 3666 3667 mutex_exit(QLOCK(q)); 3668 } 3669 3670 /* 3671 * Background processing of bufcalls. 3672 */ 3673 void 3674 streams_bufcall_service(void) 3675 { 3676 callb_cpr_t cprinfo; 3677 3678 CALLB_CPR_INIT(&cprinfo, &strbcall_lock, callb_generic_cpr, 3679 "streams_bufcall_service"); 3680 3681 mutex_enter(&strbcall_lock); 3682 3683 for (;;) { 3684 if (strbcalls.bc_head != NULL && kmem_avail() > 0) { 3685 mutex_exit(&strbcall_lock); 3686 runbufcalls(); 3687 mutex_enter(&strbcall_lock); 3688 } 3689 if (strbcalls.bc_head != NULL) { 3690 clock_t wt, tick; 3691 3692 STRSTAT(bcwaits); 3693 /* Wait for memory to become available */ 3694 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3695 tick = SEC_TO_TICK(60); 3696 time_to_wait(&wt, tick); 3697 (void) cv_timedwait(&memavail_cv, &strbcall_lock, wt); 3698 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock); 3699 } 3700 3701 /* Wait for new work to arrive */ 3702 if (strbcalls.bc_head == NULL) { 3703 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3704 cv_wait(&strbcall_cv, &strbcall_lock); 3705 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock); 3706 } 3707 } 3708 } 3709 3710 /* 3711 * Background processing of streams background tasks which failed 3712 * taskq_dispatch. 3713 */ 3714 static void 3715 streams_qbkgrnd_service(void) 3716 { 3717 callb_cpr_t cprinfo; 3718 queue_t *q; 3719 3720 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr, 3721 "streams_bkgrnd_service"); 3722 3723 mutex_enter(&service_queue); 3724 3725 for (;;) { 3726 /* 3727 * Wait for work to arrive. 3728 */ 3729 while ((freebs_list == NULL) && (qhead == NULL)) { 3730 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3731 cv_wait(&services_to_run, &service_queue); 3732 CALLB_CPR_SAFE_END(&cprinfo, &service_queue); 3733 } 3734 /* 3735 * Handle all pending freebs requests to free memory. 3736 */ 3737 while (freebs_list != NULL) { 3738 mblk_t *mp = freebs_list; 3739 freebs_list = mp->b_next; 3740 mutex_exit(&service_queue); 3741 mblk_free(mp); 3742 mutex_enter(&service_queue); 3743 } 3744 /* 3745 * Run pending queues. 3746 */ 3747 while (qhead != NULL) { 3748 DQ(q, qhead, qtail, q_link); 3749 ASSERT(q != NULL); 3750 mutex_exit(&service_queue); 3751 queue_service(q); 3752 mutex_enter(&service_queue); 3753 } 3754 ASSERT(qhead == NULL && qtail == NULL); 3755 } 3756 } 3757 3758 /* 3759 * Background processing of streams background tasks which failed 3760 * taskq_dispatch. 3761 */ 3762 static void 3763 streams_sqbkgrnd_service(void) 3764 { 3765 callb_cpr_t cprinfo; 3766 syncq_t *sq; 3767 3768 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr, 3769 "streams_sqbkgrnd_service"); 3770 3771 mutex_enter(&service_queue); 3772 3773 for (;;) { 3774 /* 3775 * Wait for work to arrive. 3776 */ 3777 while (sqhead == NULL) { 3778 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3779 cv_wait(&syncqs_to_run, &service_queue); 3780 CALLB_CPR_SAFE_END(&cprinfo, &service_queue); 3781 } 3782 3783 /* 3784 * Run pending syncqs. 3785 */ 3786 while (sqhead != NULL) { 3787 DQ(sq, sqhead, sqtail, sq_next); 3788 ASSERT(sq != NULL); 3789 ASSERT(sq->sq_svcflags & SQ_BGTHREAD); 3790 mutex_exit(&service_queue); 3791 syncq_service(sq); 3792 mutex_enter(&service_queue); 3793 } 3794 } 3795 } 3796 3797 /* 3798 * Disable the syncq and wait for background syncq processing to complete. 3799 * If the syncq is placed on the sqhead/sqtail queue, try to remove it from the 3800 * list. 3801 */ 3802 void 3803 wait_sq_svc(syncq_t *sq) 3804 { 3805 mutex_enter(SQLOCK(sq)); 3806 sq->sq_svcflags |= SQ_DISABLED; 3807 if (sq->sq_svcflags & SQ_BGTHREAD) { 3808 syncq_t *sq_chase; 3809 syncq_t *sq_curr; 3810 int removed; 3811 3812 ASSERT(sq->sq_servcount == 1); 3813 mutex_enter(&service_queue); 3814 RMQ(sq, sqhead, sqtail, sq_next, sq_chase, sq_curr, removed); 3815 mutex_exit(&service_queue); 3816 if (removed) { 3817 sq->sq_svcflags &= ~SQ_BGTHREAD; 3818 sq->sq_servcount = 0; 3819 STRSTAT(sqremoved); 3820 goto done; 3821 } 3822 } 3823 while (sq->sq_servcount != 0) { 3824 sq->sq_flags |= SQ_WANTWAKEUP; 3825 cv_wait(&sq->sq_wait, SQLOCK(sq)); 3826 } 3827 done: 3828 mutex_exit(SQLOCK(sq)); 3829 } 3830 3831 /* 3832 * Put a syncq on the list of syncq's to be serviced by the sqthread. 3833 * Add the argument to the end of the sqhead list and set the flag 3834 * indicating this syncq has been enabled. If it has already been 3835 * enabled, don't do anything. 3836 * This routine assumes that SQLOCK is held. 3837 * NOTE that the lock order is to have the SQLOCK first, 3838 * so if the service_syncq lock is held, we need to release it 3839 * before aquiring the SQLOCK (mostly relevant for the background 3840 * thread, and this seems to be common among the STREAMS global locks). 3841 * Note the the sq_svcflags are protected by the SQLOCK. 3842 */ 3843 void 3844 sqenable(syncq_t *sq) 3845 { 3846 /* 3847 * This is probably not important except for where I believe it 3848 * is being called. At that point, it should be held (and it 3849 * is a pain to release it just for this routine, so don't do 3850 * it). 3851 */ 3852 ASSERT(MUTEX_HELD(SQLOCK(sq))); 3853 3854 IMPLY(sq->sq_servcount == 0, sq->sq_next == NULL); 3855 IMPLY(sq->sq_next != NULL, sq->sq_svcflags & SQ_BGTHREAD); 3856 3857 /* 3858 * Do not put on list if background thread is scheduled or 3859 * syncq is disabled. 3860 */ 3861 if (sq->sq_svcflags & (SQ_DISABLED | SQ_BGTHREAD)) 3862 return; 3863 3864 /* 3865 * Check whether we should enable sq at all. 3866 * Non PERMOD syncqs may be drained by at most one thread. 3867 * PERMOD syncqs may be drained by several threads but we limit the 3868 * total amount to the lesser of 3869 * Number of queues on the squeue and 3870 * Number of CPUs. 3871 */ 3872 if (sq->sq_servcount != 0) { 3873 if (((sq->sq_type & SQ_PERMOD) == 0) || 3874 (sq->sq_servcount >= MIN(sq->sq_nqueues, ncpus_online))) { 3875 STRSTAT(sqtoomany); 3876 return; 3877 } 3878 } 3879 3880 sq->sq_tstamp = lbolt; 3881 STRSTAT(sqenables); 3882 3883 /* Attempt a taskq dispatch */ 3884 sq->sq_servid = (void *)taskq_dispatch(streams_taskq, 3885 (task_func_t *)syncq_service, sq, TQ_NOSLEEP | TQ_NOQUEUE); 3886 if (sq->sq_servid != NULL) { 3887 sq->sq_servcount++; 3888 return; 3889 } 3890 3891 /* 3892 * This taskq dispatch failed, but a previous one may have succeeded. 3893 * Don't try to schedule on the background thread whilst there is 3894 * outstanding taskq processing. 3895 */ 3896 if (sq->sq_servcount != 0) 3897 return; 3898 3899 /* 3900 * System is low on resources and can't perform a non-sleeping 3901 * dispatch. Schedule the syncq for a background thread and mark the 3902 * syncq to avoid any further taskq dispatch attempts. 3903 */ 3904 mutex_enter(&service_queue); 3905 STRSTAT(taskqfails); 3906 ENQUEUE(sq, sqhead, sqtail, sq_next); 3907 sq->sq_svcflags |= SQ_BGTHREAD; 3908 sq->sq_servcount = 1; 3909 cv_signal(&syncqs_to_run); 3910 mutex_exit(&service_queue); 3911 } 3912 3913 /* 3914 * Note: fifo_close() depends on the mblk_t on the queue being freed 3915 * asynchronously. The asynchronous freeing of messages breaks the 3916 * recursive call chain of fifo_close() while there are I_SENDFD type of 3917 * messages refering other file pointers on the queue. Then when 3918 * closing pipes it can avoid stack overflow in case of daisy-chained 3919 * pipes, and also avoid deadlock in case of fifonode_t pairs (which 3920 * share the same fifolock_t). 3921 */ 3922 3923 void 3924 freebs_enqueue(mblk_t *mp, dblk_t *dbp) 3925 { 3926 esb_queue_t *eqp = &system_esbq; 3927 3928 ASSERT(dbp->db_mblk == mp); 3929 3930 /* 3931 * Check data sanity. The dblock should have non-empty free function. 3932 * It is better to panic here then later when the dblock is freed 3933 * asynchronously when the context is lost. 3934 */ 3935 if (dbp->db_frtnp->free_func == NULL) { 3936 panic("freebs_enqueue: dblock %p has a NULL free callback", 3937 (void *)dbp); 3938 } 3939 3940 mutex_enter(&eqp->eq_lock); 3941 /* queue the new mblk on the esballoc queue */ 3942 if (eqp->eq_head == NULL) { 3943 eqp->eq_head = eqp->eq_tail = mp; 3944 } else { 3945 eqp->eq_tail->b_next = mp; 3946 eqp->eq_tail = mp; 3947 } 3948 eqp->eq_len++; 3949 3950 /* If we're the first thread to reach the threshold, process */ 3951 if (eqp->eq_len >= esbq_max_qlen && 3952 !(eqp->eq_flags & ESBQ_PROCESSING)) 3953 esballoc_process_queue(eqp); 3954 3955 esballoc_set_timer(eqp, esbq_timeout); 3956 mutex_exit(&eqp->eq_lock); 3957 } 3958 3959 static void 3960 esballoc_process_queue(esb_queue_t *eqp) 3961 { 3962 mblk_t *mp; 3963 3964 ASSERT(MUTEX_HELD(&eqp->eq_lock)); 3965 3966 eqp->eq_flags |= ESBQ_PROCESSING; 3967 3968 do { 3969 /* 3970 * Detach the message chain for processing. 3971 */ 3972 mp = eqp->eq_head; 3973 eqp->eq_tail->b_next = NULL; 3974 eqp->eq_head = eqp->eq_tail = NULL; 3975 eqp->eq_len = 0; 3976 mutex_exit(&eqp->eq_lock); 3977 3978 /* 3979 * Process the message chain. 3980 */ 3981 esballoc_enqueue_mblk(mp); 3982 mutex_enter(&eqp->eq_lock); 3983 } while ((eqp->eq_len >= esbq_max_qlen) && (eqp->eq_len > 0)); 3984 3985 eqp->eq_flags &= ~ESBQ_PROCESSING; 3986 } 3987 3988 /* 3989 * taskq callback routine to free esballoced mblk's 3990 */ 3991 static void 3992 esballoc_mblk_free(mblk_t *mp) 3993 { 3994 mblk_t *nextmp; 3995 3996 for (; mp != NULL; mp = nextmp) { 3997 nextmp = mp->b_next; 3998 mp->b_next = NULL; 3999 mblk_free(mp); 4000 } 4001 } 4002 4003 static void 4004 esballoc_enqueue_mblk(mblk_t *mp) 4005 { 4006 4007 if (taskq_dispatch(system_taskq, (task_func_t *)esballoc_mblk_free, mp, 4008 TQ_NOSLEEP) == NULL) { 4009 mblk_t *first_mp = mp; 4010 /* 4011 * System is low on resources and can't perform a non-sleeping 4012 * dispatch. Schedule for a background thread. 4013 */ 4014 mutex_enter(&service_queue); 4015 STRSTAT(taskqfails); 4016 4017 while (mp->b_next != NULL) 4018 mp = mp->b_next; 4019 4020 mp->b_next = freebs_list; 4021 freebs_list = first_mp; 4022 cv_signal(&services_to_run); 4023 mutex_exit(&service_queue); 4024 } 4025 } 4026 4027 static void 4028 esballoc_timer(void *arg) 4029 { 4030 esb_queue_t *eqp = arg; 4031 4032 mutex_enter(&eqp->eq_lock); 4033 eqp->eq_flags &= ~ESBQ_TIMER; 4034 4035 if (!(eqp->eq_flags & ESBQ_PROCESSING) && 4036 eqp->eq_len > 0) 4037 esballoc_process_queue(eqp); 4038 4039 esballoc_set_timer(eqp, esbq_timeout); 4040 mutex_exit(&eqp->eq_lock); 4041 } 4042 4043 static void 4044 esballoc_set_timer(esb_queue_t *eqp, clock_t eq_timeout) 4045 { 4046 ASSERT(MUTEX_HELD(&eqp->eq_lock)); 4047 4048 if (eqp->eq_len > 0 && !(eqp->eq_flags & ESBQ_TIMER)) { 4049 (void) timeout(esballoc_timer, eqp, eq_timeout); 4050 eqp->eq_flags |= ESBQ_TIMER; 4051 } 4052 } 4053 4054 void 4055 esballoc_queue_init(void) 4056 { 4057 system_esbq.eq_len = 0; 4058 system_esbq.eq_head = system_esbq.eq_tail = NULL; 4059 system_esbq.eq_flags = 0; 4060 } 4061 4062 /* 4063 * Set the QBACK or QB_BACK flag in the given queue for 4064 * the given priority band. 4065 */ 4066 void 4067 setqback(queue_t *q, unsigned char pri) 4068 { 4069 int i; 4070 qband_t *qbp; 4071 qband_t **qbpp; 4072 4073 ASSERT(MUTEX_HELD(QLOCK(q))); 4074 if (pri != 0) { 4075 if (pri > q->q_nband) { 4076 qbpp = &q->q_bandp; 4077 while (*qbpp) 4078 qbpp = &(*qbpp)->qb_next; 4079 while (pri > q->q_nband) { 4080 if ((*qbpp = allocband()) == NULL) { 4081 cmn_err(CE_WARN, 4082 "setqback: can't allocate qband\n"); 4083 return; 4084 } 4085 (*qbpp)->qb_hiwat = q->q_hiwat; 4086 (*qbpp)->qb_lowat = q->q_lowat; 4087 q->q_nband++; 4088 qbpp = &(*qbpp)->qb_next; 4089 } 4090 } 4091 qbp = q->q_bandp; 4092 i = pri; 4093 while (--i) 4094 qbp = qbp->qb_next; 4095 qbp->qb_flag |= QB_BACK; 4096 } else { 4097 q->q_flag |= QBACK; 4098 } 4099 } 4100 4101 int 4102 strcopyin(void *from, void *to, size_t len, int copyflag) 4103 { 4104 if (copyflag & U_TO_K) { 4105 ASSERT((copyflag & K_TO_K) == 0); 4106 if (copyin(from, to, len)) 4107 return (EFAULT); 4108 } else { 4109 ASSERT(copyflag & K_TO_K); 4110 bcopy(from, to, len); 4111 } 4112 return (0); 4113 } 4114 4115 int 4116 strcopyout(void *from, void *to, size_t len, int copyflag) 4117 { 4118 if (copyflag & U_TO_K) { 4119 if (copyout(from, to, len)) 4120 return (EFAULT); 4121 } else { 4122 ASSERT(copyflag & K_TO_K); 4123 bcopy(from, to, len); 4124 } 4125 return (0); 4126 } 4127 4128 /* 4129 * strsignal_nolock() posts a signal to the process(es) at the stream head. 4130 * It assumes that the stream head lock is already held, whereas strsignal() 4131 * acquires the lock first. This routine was created because a few callers 4132 * release the stream head lock before calling only to re-acquire it after 4133 * it returns. 4134 */ 4135 void 4136 strsignal_nolock(stdata_t *stp, int sig, int32_t band) 4137 { 4138 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4139 switch (sig) { 4140 case SIGPOLL: 4141 if (stp->sd_sigflags & S_MSG) 4142 strsendsig(stp->sd_siglist, S_MSG, (uchar_t)band, 0); 4143 break; 4144 4145 default: 4146 if (stp->sd_pgidp) { 4147 pgsignal(stp->sd_pgidp, sig); 4148 } 4149 break; 4150 } 4151 } 4152 4153 void 4154 strsignal(stdata_t *stp, int sig, int32_t band) 4155 { 4156 TRACE_3(TR_FAC_STREAMS_FR, TR_SENDSIG, 4157 "strsignal:%p, %X, %X", stp, sig, band); 4158 4159 mutex_enter(&stp->sd_lock); 4160 switch (sig) { 4161 case SIGPOLL: 4162 if (stp->sd_sigflags & S_MSG) 4163 strsendsig(stp->sd_siglist, S_MSG, (uchar_t)band, 0); 4164 break; 4165 4166 default: 4167 if (stp->sd_pgidp) { 4168 pgsignal(stp->sd_pgidp, sig); 4169 } 4170 break; 4171 } 4172 mutex_exit(&stp->sd_lock); 4173 } 4174 4175 void 4176 strhup(stdata_t *stp) 4177 { 4178 ASSERT(mutex_owned(&stp->sd_lock)); 4179 pollwakeup(&stp->sd_pollist, POLLHUP); 4180 if (stp->sd_sigflags & S_HANGUP) 4181 strsendsig(stp->sd_siglist, S_HANGUP, 0, 0); 4182 } 4183 4184 /* 4185 * Backenable the first queue upstream from `q' with a service procedure. 4186 */ 4187 void 4188 backenable(queue_t *q, uchar_t pri) 4189 { 4190 queue_t *nq; 4191 4192 /* 4193 * our presence might not prevent other modules in our own 4194 * stream from popping/pushing since the caller of getq might not 4195 * have a claim on the queue (some drivers do a getq on somebody 4196 * else's queue - they know that the queue itself is not going away 4197 * but the framework has to guarantee q_next in that stream.) 4198 */ 4199 claimstr(q); 4200 4201 /* find nearest back queue with service proc */ 4202 for (nq = backq(q); nq && !nq->q_qinfo->qi_srvp; nq = backq(nq)) { 4203 ASSERT(STRMATED(q->q_stream) || STREAM(q) == STREAM(nq)); 4204 } 4205 4206 if (nq) { 4207 kthread_t *freezer; 4208 /* 4209 * backenable can be called either with no locks held 4210 * or with the stream frozen (the latter occurs when a module 4211 * calls rmvq with the stream frozen.) If the stream is frozen 4212 * by the caller the caller will hold all qlocks in the stream. 4213 * Note that a frozen stream doesn't freeze a mated stream, 4214 * so we explicitly check for that. 4215 */ 4216 freezer = STREAM(q)->sd_freezer; 4217 if (freezer != curthread || STREAM(q) != STREAM(nq)) { 4218 mutex_enter(QLOCK(nq)); 4219 } 4220 #ifdef DEBUG 4221 else { 4222 ASSERT(frozenstr(q)); 4223 ASSERT(MUTEX_HELD(QLOCK(q))); 4224 ASSERT(MUTEX_HELD(QLOCK(nq))); 4225 } 4226 #endif 4227 setqback(nq, pri); 4228 qenable_locked(nq); 4229 if (freezer != curthread || STREAM(q) != STREAM(nq)) 4230 mutex_exit(QLOCK(nq)); 4231 } 4232 releasestr(q); 4233 } 4234 4235 /* 4236 * Return the appropriate errno when one of flags_to_check is set 4237 * in sd_flags. Uses the exported error routines if they are set. 4238 * Will return 0 if non error is set (or if the exported error routines 4239 * do not return an error). 4240 * 4241 * If there is both a read and write error to check we prefer the read error. 4242 * Also, give preference to recorded errno's over the error functions. 4243 * The flags that are handled are: 4244 * STPLEX return EINVAL 4245 * STRDERR return sd_rerror (and clear if STRDERRNONPERSIST) 4246 * STWRERR return sd_werror (and clear if STWRERRNONPERSIST) 4247 * STRHUP return sd_werror 4248 * 4249 * If the caller indicates that the operation is a peek a nonpersistent error 4250 * is not cleared. 4251 */ 4252 int 4253 strgeterr(stdata_t *stp, int32_t flags_to_check, int ispeek) 4254 { 4255 int32_t sd_flag = stp->sd_flag & flags_to_check; 4256 int error = 0; 4257 4258 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4259 ASSERT((flags_to_check & ~(STRDERR|STWRERR|STRHUP|STPLEX)) == 0); 4260 if (sd_flag & STPLEX) 4261 error = EINVAL; 4262 else if (sd_flag & STRDERR) { 4263 error = stp->sd_rerror; 4264 if ((stp->sd_flag & STRDERRNONPERSIST) && !ispeek) { 4265 /* 4266 * Read errors are non-persistent i.e. discarded once 4267 * returned to a non-peeking caller, 4268 */ 4269 stp->sd_rerror = 0; 4270 stp->sd_flag &= ~STRDERR; 4271 } 4272 if (error == 0 && stp->sd_rderrfunc != NULL) { 4273 int clearerr = 0; 4274 4275 error = (*stp->sd_rderrfunc)(stp->sd_vnode, ispeek, 4276 &clearerr); 4277 if (clearerr) { 4278 stp->sd_flag &= ~STRDERR; 4279 stp->sd_rderrfunc = NULL; 4280 } 4281 } 4282 } else if (sd_flag & STWRERR) { 4283 error = stp->sd_werror; 4284 if ((stp->sd_flag & STWRERRNONPERSIST) && !ispeek) { 4285 /* 4286 * Write errors are non-persistent i.e. discarded once 4287 * returned to a non-peeking caller, 4288 */ 4289 stp->sd_werror = 0; 4290 stp->sd_flag &= ~STWRERR; 4291 } 4292 if (error == 0 && stp->sd_wrerrfunc != NULL) { 4293 int clearerr = 0; 4294 4295 error = (*stp->sd_wrerrfunc)(stp->sd_vnode, ispeek, 4296 &clearerr); 4297 if (clearerr) { 4298 stp->sd_flag &= ~STWRERR; 4299 stp->sd_wrerrfunc = NULL; 4300 } 4301 } 4302 } else if (sd_flag & STRHUP) { 4303 /* sd_werror set when STRHUP */ 4304 error = stp->sd_werror; 4305 } 4306 return (error); 4307 } 4308 4309 4310 /* 4311 * single-thread open/close/push/pop 4312 * for twisted streams also 4313 */ 4314 int 4315 strstartplumb(stdata_t *stp, int flag, int cmd) 4316 { 4317 int waited = 1; 4318 int error = 0; 4319 4320 if (STRMATED(stp)) { 4321 struct stdata *stmatep = stp->sd_mate; 4322 4323 STRLOCKMATES(stp); 4324 while (waited) { 4325 waited = 0; 4326 while (stmatep->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4327 if ((cmd == I_POP) && 4328 (flag & (FNDELAY|FNONBLOCK))) { 4329 STRUNLOCKMATES(stp); 4330 return (EAGAIN); 4331 } 4332 waited = 1; 4333 mutex_exit(&stp->sd_lock); 4334 if (!cv_wait_sig(&stmatep->sd_monitor, 4335 &stmatep->sd_lock)) { 4336 mutex_exit(&stmatep->sd_lock); 4337 return (EINTR); 4338 } 4339 mutex_exit(&stmatep->sd_lock); 4340 STRLOCKMATES(stp); 4341 } 4342 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4343 if ((cmd == I_POP) && 4344 (flag & (FNDELAY|FNONBLOCK))) { 4345 STRUNLOCKMATES(stp); 4346 return (EAGAIN); 4347 } 4348 waited = 1; 4349 mutex_exit(&stmatep->sd_lock); 4350 if (!cv_wait_sig(&stp->sd_monitor, 4351 &stp->sd_lock)) { 4352 mutex_exit(&stp->sd_lock); 4353 return (EINTR); 4354 } 4355 mutex_exit(&stp->sd_lock); 4356 STRLOCKMATES(stp); 4357 } 4358 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) { 4359 error = strgeterr(stp, 4360 STRDERR|STWRERR|STRHUP|STPLEX, 0); 4361 if (error != 0) { 4362 STRUNLOCKMATES(stp); 4363 return (error); 4364 } 4365 } 4366 } 4367 stp->sd_flag |= STRPLUMB; 4368 STRUNLOCKMATES(stp); 4369 } else { 4370 mutex_enter(&stp->sd_lock); 4371 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4372 if (((cmd == I_POP) || (cmd == _I_REMOVE)) && 4373 (flag & (FNDELAY|FNONBLOCK))) { 4374 mutex_exit(&stp->sd_lock); 4375 return (EAGAIN); 4376 } 4377 if (!cv_wait_sig(&stp->sd_monitor, &stp->sd_lock)) { 4378 mutex_exit(&stp->sd_lock); 4379 return (EINTR); 4380 } 4381 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) { 4382 error = strgeterr(stp, 4383 STRDERR|STWRERR|STRHUP|STPLEX, 0); 4384 if (error != 0) { 4385 mutex_exit(&stp->sd_lock); 4386 return (error); 4387 } 4388 } 4389 } 4390 stp->sd_flag |= STRPLUMB; 4391 mutex_exit(&stp->sd_lock); 4392 } 4393 return (0); 4394 } 4395 4396 /* 4397 * Complete the plumbing operation associated with stream `stp'. 4398 */ 4399 void 4400 strendplumb(stdata_t *stp) 4401 { 4402 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4403 ASSERT(stp->sd_flag & STRPLUMB); 4404 stp->sd_flag &= ~STRPLUMB; 4405 cv_broadcast(&stp->sd_monitor); 4406 } 4407 4408 /* 4409 * This describes how the STREAMS framework handles synchronization 4410 * during open/push and close/pop. 4411 * The key interfaces for open and close are qprocson and qprocsoff, 4412 * respectively. While the close case in general is harder both open 4413 * have close have significant similarities. 4414 * 4415 * During close the STREAMS framework has to both ensure that there 4416 * are no stale references to the queue pair (and syncq) that 4417 * are being closed and also provide the guarantees that are documented 4418 * in qprocsoff(9F). 4419 * If there are stale references to the queue that is closing it can 4420 * result in kernel memory corruption or kernel panics. 4421 * 4422 * Note that is it up to the module/driver to ensure that it itself 4423 * does not have any stale references to the closing queues once its close 4424 * routine returns. This includes: 4425 * - Cancelling any timeout/bufcall/qtimeout/qbufcall callback routines 4426 * associated with the queues. For timeout and bufcall callbacks the 4427 * module/driver also has to ensure (or wait for) any callbacks that 4428 * are in progress. 4429 * - If the module/driver is using esballoc it has to ensure that any 4430 * esballoc free functions do not refer to a queue that has closed. 4431 * (Note that in general the close routine can not wait for the esballoc'ed 4432 * messages to be freed since that can cause a deadlock.) 4433 * - Cancelling any interrupts that refer to the closing queues and 4434 * also ensuring that there are no interrupts in progress that will 4435 * refer to the closing queues once the close routine returns. 4436 * - For multiplexors removing any driver global state that refers to 4437 * the closing queue and also ensuring that there are no threads in 4438 * the multiplexor that has picked up a queue pointer but not yet 4439 * finished using it. 4440 * 4441 * In addition, a driver/module can only reference the q_next pointer 4442 * in its open, close, put, or service procedures or in a 4443 * qtimeout/qbufcall callback procedure executing "on" the correct 4444 * stream. Thus it can not reference the q_next pointer in an interrupt 4445 * routine or a timeout, bufcall or esballoc callback routine. Likewise 4446 * it can not reference q_next of a different queue e.g. in a mux that 4447 * passes messages from one queues put/service procedure to another queue. 4448 * In all the cases when the driver/module can not access the q_next 4449 * field it must use the *next* versions e.g. canputnext instead of 4450 * canput(q->q_next) and putnextctl instead of putctl(q->q_next, ...). 4451 * 4452 * 4453 * Assuming that the driver/module conforms to the above constraints 4454 * the STREAMS framework has to avoid stale references to q_next for all 4455 * the framework internal cases which include (but are not limited to): 4456 * - Threads in canput/canputnext/backenable and elsewhere that are 4457 * walking q_next. 4458 * - Messages on a syncq that have a reference to the queue through b_queue. 4459 * - Messages on an outer perimeter (syncq) that have a reference to the 4460 * queue through b_queue. 4461 * - Threads that use q_nfsrv (e.g. canput) to find a queue. 4462 * Note that only canput and bcanput use q_nfsrv without any locking. 4463 * 4464 * The STREAMS framework providing the qprocsoff(9F) guarantees means that 4465 * after qprocsoff returns, the framework has to ensure that no threads can 4466 * enter the put or service routines for the closing read or write-side queue. 4467 * In addition to preventing "direct" entry into the put procedures 4468 * the framework also has to prevent messages being drained from 4469 * the syncq or the outer perimeter. 4470 * XXX Note that currently qdetach does relies on D_MTOCEXCL as the only 4471 * mechanism to prevent qwriter(PERIM_OUTER) from running after 4472 * qprocsoff has returned. 4473 * Note that if a module/driver uses put(9F) on one of its own queues 4474 * it is up to the module/driver to ensure that the put() doesn't 4475 * get called when the queue is closing. 4476 * 4477 * 4478 * The framework aspects of the above "contract" is implemented by 4479 * qprocsoff, removeq, and strlock: 4480 * - qprocsoff (disable_svc) sets QWCLOSE to prevent runservice from 4481 * entering the service procedures. 4482 * - strlock acquires the sd_lock and sd_reflock to prevent putnext, 4483 * canputnext, backenable etc from dereferencing the q_next that will 4484 * soon change. 4485 * - strlock waits for sd_refcnt to be zero to wait for e.g. any canputnext 4486 * or other q_next walker that uses claimstr/releasestr to finish. 4487 * - optionally for every syncq in the stream strlock acquires all the 4488 * sq_lock's and waits for all sq_counts to drop to a value that indicates 4489 * that no thread executes in the put or service procedures and that no 4490 * thread is draining into the module/driver. This ensures that no 4491 * open, close, put, service, or qtimeout/qbufcall callback procedure is 4492 * currently executing hence no such thread can end up with the old stale 4493 * q_next value and no canput/backenable can have the old stale 4494 * q_nfsrv/q_next. 4495 * - qdetach (wait_svc) makes sure that any scheduled or running threads 4496 * have either finished or observed the QWCLOSE flag and gone away. 4497 */ 4498 4499 4500 /* 4501 * Get all the locks necessary to change q_next. 4502 * 4503 * Wait for sd_refcnt to reach 0 and, if sqlist is present, wait for the 4504 * sq_count of each syncq in the list to drop to sq_rmqcount, indicating that 4505 * the only threads inside the sqncq are threads currently calling removeq(). 4506 * Since threads calling removeq() are in the process of removing their queues 4507 * from the stream, we do not need to worry about them accessing a stale q_next 4508 * pointer and thus we do not need to wait for them to exit (in fact, waiting 4509 * for them can cause deadlock). 4510 * 4511 * This routine is subject to starvation since it does not set any flag to 4512 * prevent threads from entering a module in the stream(i.e. sq_count can 4513 * increase on some syncq while it is waiting on some other syncq.) 4514 * 4515 * Assumes that only one thread attempts to call strlock for a given 4516 * stream. If this is not the case the two threads would deadlock. 4517 * This assumption is guaranteed since strlock is only called by insertq 4518 * and removeq and streams plumbing changes are single-threaded for 4519 * a given stream using the STWOPEN, STRCLOSE, and STRPLUMB flags. 4520 * 4521 * For pipes, it is not difficult to atomically designate a pair of streams 4522 * to be mated. Once mated atomically by the framework the twisted pair remain 4523 * configured that way until dismantled atomically by the framework. 4524 * When plumbing takes place on a twisted stream it is necessary to ensure that 4525 * this operation is done exclusively on the twisted stream since two such 4526 * operations, each initiated on different ends of the pipe will deadlock 4527 * waiting for each other to complete. 4528 * 4529 * On entry, no locks should be held. 4530 * The locks acquired and held by strlock depends on a few factors. 4531 * - If sqlist is non-NULL all the syncq locks in the sqlist will be acquired 4532 * and held on exit and all sq_count are at an acceptable level. 4533 * - In all cases, sd_lock and sd_reflock are acquired and held on exit with 4534 * sd_refcnt being zero. 4535 */ 4536 4537 static void 4538 strlock(struct stdata *stp, sqlist_t *sqlist) 4539 { 4540 syncql_t *sql, *sql2; 4541 retry: 4542 /* 4543 * Wait for any claimstr to go away. 4544 */ 4545 if (STRMATED(stp)) { 4546 struct stdata *stp1, *stp2; 4547 4548 STRLOCKMATES(stp); 4549 /* 4550 * Note that the selection of locking order is not 4551 * important, just that they are always aquired in 4552 * the same order. To assure this, we choose this 4553 * order based on the value of the pointer, and since 4554 * the pointer will not change for the life of this 4555 * pair, we will always grab the locks in the same 4556 * order (and hence, prevent deadlocks). 4557 */ 4558 if (&(stp->sd_lock) > &((stp->sd_mate)->sd_lock)) { 4559 stp1 = stp; 4560 stp2 = stp->sd_mate; 4561 } else { 4562 stp2 = stp; 4563 stp1 = stp->sd_mate; 4564 } 4565 mutex_enter(&stp1->sd_reflock); 4566 if (stp1->sd_refcnt > 0) { 4567 STRUNLOCKMATES(stp); 4568 cv_wait(&stp1->sd_refmonitor, &stp1->sd_reflock); 4569 mutex_exit(&stp1->sd_reflock); 4570 goto retry; 4571 } 4572 mutex_enter(&stp2->sd_reflock); 4573 if (stp2->sd_refcnt > 0) { 4574 STRUNLOCKMATES(stp); 4575 mutex_exit(&stp1->sd_reflock); 4576 cv_wait(&stp2->sd_refmonitor, &stp2->sd_reflock); 4577 mutex_exit(&stp2->sd_reflock); 4578 goto retry; 4579 } 4580 STREAM_PUTLOCKS_ENTER(stp1); 4581 STREAM_PUTLOCKS_ENTER(stp2); 4582 } else { 4583 mutex_enter(&stp->sd_lock); 4584 mutex_enter(&stp->sd_reflock); 4585 while (stp->sd_refcnt > 0) { 4586 mutex_exit(&stp->sd_lock); 4587 cv_wait(&stp->sd_refmonitor, &stp->sd_reflock); 4588 if (mutex_tryenter(&stp->sd_lock) == 0) { 4589 mutex_exit(&stp->sd_reflock); 4590 mutex_enter(&stp->sd_lock); 4591 mutex_enter(&stp->sd_reflock); 4592 } 4593 } 4594 STREAM_PUTLOCKS_ENTER(stp); 4595 } 4596 4597 if (sqlist == NULL) 4598 return; 4599 4600 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) { 4601 syncq_t *sq = sql->sql_sq; 4602 uint16_t count; 4603 4604 mutex_enter(SQLOCK(sq)); 4605 count = sq->sq_count; 4606 ASSERT(sq->sq_rmqcount <= count); 4607 SQ_PUTLOCKS_ENTER(sq); 4608 SUM_SQ_PUTCOUNTS(sq, count); 4609 if (count == sq->sq_rmqcount) 4610 continue; 4611 4612 /* Failed - drop all locks that we have acquired so far */ 4613 if (STRMATED(stp)) { 4614 STREAM_PUTLOCKS_EXIT(stp); 4615 STREAM_PUTLOCKS_EXIT(stp->sd_mate); 4616 STRUNLOCKMATES(stp); 4617 mutex_exit(&stp->sd_reflock); 4618 mutex_exit(&stp->sd_mate->sd_reflock); 4619 } else { 4620 STREAM_PUTLOCKS_EXIT(stp); 4621 mutex_exit(&stp->sd_lock); 4622 mutex_exit(&stp->sd_reflock); 4623 } 4624 for (sql2 = sqlist->sqlist_head; sql2 != sql; 4625 sql2 = sql2->sql_next) { 4626 SQ_PUTLOCKS_EXIT(sql2->sql_sq); 4627 mutex_exit(SQLOCK(sql2->sql_sq)); 4628 } 4629 4630 /* 4631 * The wait loop below may starve when there are many threads 4632 * claiming the syncq. This is especially a problem with permod 4633 * syncqs (IP). To lessen the impact of the problem we increment 4634 * sq_needexcl and clear fastbits so that putnexts will slow 4635 * down and call sqenable instead of draining right away. 4636 */ 4637 sq->sq_needexcl++; 4638 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 4639 while (count > sq->sq_rmqcount) { 4640 sq->sq_flags |= SQ_WANTWAKEUP; 4641 SQ_PUTLOCKS_EXIT(sq); 4642 cv_wait(&sq->sq_wait, SQLOCK(sq)); 4643 count = sq->sq_count; 4644 SQ_PUTLOCKS_ENTER(sq); 4645 SUM_SQ_PUTCOUNTS(sq, count); 4646 } 4647 sq->sq_needexcl--; 4648 if (sq->sq_needexcl == 0) 4649 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 4650 SQ_PUTLOCKS_EXIT(sq); 4651 ASSERT(count == sq->sq_rmqcount); 4652 mutex_exit(SQLOCK(sq)); 4653 goto retry; 4654 } 4655 } 4656 4657 /* 4658 * Drop all the locks that strlock acquired. 4659 */ 4660 static void 4661 strunlock(struct stdata *stp, sqlist_t *sqlist) 4662 { 4663 syncql_t *sql; 4664 4665 if (STRMATED(stp)) { 4666 STREAM_PUTLOCKS_EXIT(stp); 4667 STREAM_PUTLOCKS_EXIT(stp->sd_mate); 4668 STRUNLOCKMATES(stp); 4669 mutex_exit(&stp->sd_reflock); 4670 mutex_exit(&stp->sd_mate->sd_reflock); 4671 } else { 4672 STREAM_PUTLOCKS_EXIT(stp); 4673 mutex_exit(&stp->sd_lock); 4674 mutex_exit(&stp->sd_reflock); 4675 } 4676 4677 if (sqlist == NULL) 4678 return; 4679 4680 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) { 4681 SQ_PUTLOCKS_EXIT(sql->sql_sq); 4682 mutex_exit(SQLOCK(sql->sql_sq)); 4683 } 4684 } 4685 4686 /* 4687 * When the module has service procedure, we need check if the next 4688 * module which has service procedure is in flow control to trigger 4689 * the backenable. 4690 */ 4691 static void 4692 backenable_insertedq(queue_t *q) 4693 { 4694 qband_t *qbp; 4695 4696 claimstr(q); 4697 if (q->q_qinfo->qi_srvp != NULL && q->q_next != NULL) { 4698 if (q->q_next->q_nfsrv->q_flag & QWANTW) 4699 backenable(q, 0); 4700 4701 qbp = q->q_next->q_nfsrv->q_bandp; 4702 for (; qbp != NULL; qbp = qbp->qb_next) 4703 if ((qbp->qb_flag & QB_WANTW) && qbp->qb_first != NULL) 4704 backenable(q, qbp->qb_first->b_band); 4705 } 4706 releasestr(q); 4707 } 4708 4709 /* 4710 * Given two read queues, insert a new single one after another. 4711 * 4712 * This routine acquires all the necessary locks in order to change 4713 * q_next and related pointer using strlock(). 4714 * It depends on the stream head ensuring that there are no concurrent 4715 * insertq or removeq on the same stream. The stream head ensures this 4716 * using the flags STWOPEN, STRCLOSE, and STRPLUMB. 4717 * 4718 * Note that no syncq locks are held during the q_next change. This is 4719 * applied to all streams since, unlike removeq, there is no problem of stale 4720 * pointers when adding a module to the stream. Thus drivers/modules that do a 4721 * canput(rq->q_next) would never get a closed/freed queue pointer even if we 4722 * applied this optimization to all streams. 4723 */ 4724 void 4725 insertq(struct stdata *stp, queue_t *new) 4726 { 4727 queue_t *after; 4728 queue_t *wafter; 4729 queue_t *wnew = _WR(new); 4730 boolean_t have_fifo = B_FALSE; 4731 4732 if (new->q_flag & _QINSERTING) { 4733 ASSERT(stp->sd_vnode->v_type != VFIFO); 4734 after = new->q_next; 4735 wafter = _WR(new->q_next); 4736 } else { 4737 after = _RD(stp->sd_wrq); 4738 wafter = stp->sd_wrq; 4739 } 4740 4741 TRACE_2(TR_FAC_STREAMS_FR, TR_INSERTQ, 4742 "insertq:%p, %p", after, new); 4743 ASSERT(after->q_flag & QREADR); 4744 ASSERT(new->q_flag & QREADR); 4745 4746 strlock(stp, NULL); 4747 4748 /* Do we have a FIFO? */ 4749 if (wafter->q_next == after) { 4750 have_fifo = B_TRUE; 4751 wnew->q_next = new; 4752 } else { 4753 wnew->q_next = wafter->q_next; 4754 } 4755 new->q_next = after; 4756 4757 set_nfsrv_ptr(new, wnew, after, wafter); 4758 /* 4759 * set_nfsrv_ptr() needs to know if this is an insertion or not, 4760 * so only reset this flag after calling it. 4761 */ 4762 new->q_flag &= ~_QINSERTING; 4763 4764 if (have_fifo) { 4765 wafter->q_next = wnew; 4766 } else { 4767 if (wafter->q_next) 4768 _OTHERQ(wafter->q_next)->q_next = new; 4769 wafter->q_next = wnew; 4770 } 4771 4772 set_qend(new); 4773 /* The QEND flag might have to be updated for the upstream guy */ 4774 set_qend(after); 4775 4776 ASSERT(_SAMESTR(new) == O_SAMESTR(new)); 4777 ASSERT(_SAMESTR(wnew) == O_SAMESTR(wnew)); 4778 ASSERT(_SAMESTR(after) == O_SAMESTR(after)); 4779 ASSERT(_SAMESTR(wafter) == O_SAMESTR(wafter)); 4780 strsetuio(stp); 4781 4782 /* 4783 * If this was a module insertion, bump the push count. 4784 */ 4785 if (!(new->q_flag & QISDRV)) 4786 stp->sd_pushcnt++; 4787 4788 strunlock(stp, NULL); 4789 4790 /* check if the write Q needs backenable */ 4791 backenable_insertedq(wnew); 4792 4793 /* check if the read Q needs backenable */ 4794 backenable_insertedq(new); 4795 } 4796 4797 /* 4798 * Given a read queue, unlink it from any neighbors. 4799 * 4800 * This routine acquires all the necessary locks in order to 4801 * change q_next and related pointers and also guard against 4802 * stale references (e.g. through q_next) to the queue that 4803 * is being removed. It also plays part of the role in ensuring 4804 * that the module's/driver's put procedure doesn't get called 4805 * after qprocsoff returns. 4806 * 4807 * Removeq depends on the stream head ensuring that there are 4808 * no concurrent insertq or removeq on the same stream. The 4809 * stream head ensures this using the flags STWOPEN, STRCLOSE and 4810 * STRPLUMB. 4811 * 4812 * The set of locks needed to remove the queue is different in 4813 * different cases: 4814 * 4815 * Acquire sd_lock, sd_reflock, and all the syncq locks in the stream after 4816 * waiting for the syncq reference count to drop to 0 indicating that no 4817 * non-close threads are present anywhere in the stream. This ensures that any 4818 * module/driver can reference q_next in its open, close, put, or service 4819 * procedures. 4820 * 4821 * The sq_rmqcount counter tracks the number of threads inside removeq(). 4822 * strlock() ensures that there is either no threads executing inside perimeter 4823 * or there is only a thread calling qprocsoff(). 4824 * 4825 * strlock() compares the value of sq_count with the number of threads inside 4826 * removeq() and waits until sq_count is equal to sq_rmqcount. We need to wakeup 4827 * any threads waiting in strlock() when the sq_rmqcount increases. 4828 */ 4829 4830 void 4831 removeq(queue_t *qp) 4832 { 4833 queue_t *wqp = _WR(qp); 4834 struct stdata *stp = STREAM(qp); 4835 sqlist_t *sqlist = NULL; 4836 boolean_t isdriver; 4837 int moved; 4838 syncq_t *sq = qp->q_syncq; 4839 syncq_t *wsq = wqp->q_syncq; 4840 4841 ASSERT(stp); 4842 4843 TRACE_2(TR_FAC_STREAMS_FR, TR_REMOVEQ, 4844 "removeq:%p %p", qp, wqp); 4845 ASSERT(qp->q_flag&QREADR); 4846 4847 /* 4848 * For queues using Synchronous streams, we must wait for all threads in 4849 * rwnext() to drain out before proceeding. 4850 */ 4851 if (qp->q_flag & QSYNCSTR) { 4852 /* First, we need wakeup any threads blocked in rwnext() */ 4853 mutex_enter(SQLOCK(sq)); 4854 if (sq->sq_flags & SQ_WANTWAKEUP) { 4855 sq->sq_flags &= ~SQ_WANTWAKEUP; 4856 cv_broadcast(&sq->sq_wait); 4857 } 4858 mutex_exit(SQLOCK(sq)); 4859 4860 if (wsq != sq) { 4861 mutex_enter(SQLOCK(wsq)); 4862 if (wsq->sq_flags & SQ_WANTWAKEUP) { 4863 wsq->sq_flags &= ~SQ_WANTWAKEUP; 4864 cv_broadcast(&wsq->sq_wait); 4865 } 4866 mutex_exit(SQLOCK(wsq)); 4867 } 4868 4869 mutex_enter(QLOCK(qp)); 4870 while (qp->q_rwcnt > 0) { 4871 qp->q_flag |= QWANTRMQSYNC; 4872 cv_wait(&qp->q_wait, QLOCK(qp)); 4873 } 4874 mutex_exit(QLOCK(qp)); 4875 4876 mutex_enter(QLOCK(wqp)); 4877 while (wqp->q_rwcnt > 0) { 4878 wqp->q_flag |= QWANTRMQSYNC; 4879 cv_wait(&wqp->q_wait, QLOCK(wqp)); 4880 } 4881 mutex_exit(QLOCK(wqp)); 4882 } 4883 4884 mutex_enter(SQLOCK(sq)); 4885 sq->sq_rmqcount++; 4886 if (sq->sq_flags & SQ_WANTWAKEUP) { 4887 sq->sq_flags &= ~SQ_WANTWAKEUP; 4888 cv_broadcast(&sq->sq_wait); 4889 } 4890 mutex_exit(SQLOCK(sq)); 4891 4892 isdriver = (qp->q_flag & QISDRV); 4893 4894 sqlist = sqlist_build(qp, stp, STRMATED(stp)); 4895 strlock(stp, sqlist); 4896 4897 reset_nfsrv_ptr(qp, wqp); 4898 4899 ASSERT(wqp->q_next == NULL || backq(qp)->q_next == qp); 4900 ASSERT(qp->q_next == NULL || backq(wqp)->q_next == wqp); 4901 /* Do we have a FIFO? */ 4902 if (wqp->q_next == qp) { 4903 stp->sd_wrq->q_next = _RD(stp->sd_wrq); 4904 } else { 4905 if (wqp->q_next) 4906 backq(qp)->q_next = qp->q_next; 4907 if (qp->q_next) 4908 backq(wqp)->q_next = wqp->q_next; 4909 } 4910 4911 /* The QEND flag might have to be updated for the upstream guy */ 4912 if (qp->q_next) 4913 set_qend(qp->q_next); 4914 4915 ASSERT(_SAMESTR(stp->sd_wrq) == O_SAMESTR(stp->sd_wrq)); 4916 ASSERT(_SAMESTR(_RD(stp->sd_wrq)) == O_SAMESTR(_RD(stp->sd_wrq))); 4917 4918 /* 4919 * Move any messages destined for the put procedures to the next 4920 * syncq in line. Otherwise free them. 4921 */ 4922 moved = 0; 4923 /* 4924 * Quick check to see whether there are any messages or events. 4925 */ 4926 if (qp->q_syncqmsgs != 0 || (qp->q_syncq->sq_flags & SQ_EVENTS)) 4927 moved += propagate_syncq(qp); 4928 if (wqp->q_syncqmsgs != 0 || 4929 (wqp->q_syncq->sq_flags & SQ_EVENTS)) 4930 moved += propagate_syncq(wqp); 4931 4932 strsetuio(stp); 4933 4934 /* 4935 * If this was a module removal, decrement the push count. 4936 */ 4937 if (!isdriver) 4938 stp->sd_pushcnt--; 4939 4940 strunlock(stp, sqlist); 4941 sqlist_free(sqlist); 4942 4943 /* 4944 * Make sure any messages that were propagated are drained. 4945 * Also clear any QFULL bit caused by messages that were propagated. 4946 */ 4947 4948 if (qp->q_next != NULL) { 4949 clr_qfull(qp); 4950 /* 4951 * For the driver calling qprocsoff, propagate_syncq 4952 * frees all the messages instead of putting it in 4953 * the stream head 4954 */ 4955 if (!isdriver && (moved > 0)) 4956 emptysq(qp->q_next->q_syncq); 4957 } 4958 if (wqp->q_next != NULL) { 4959 clr_qfull(wqp); 4960 /* 4961 * We come here for any pop of a module except for the 4962 * case of driver being removed. We don't call emptysq 4963 * if we did not move any messages. This will avoid holding 4964 * PERMOD syncq locks in emptysq 4965 */ 4966 if (moved > 0) 4967 emptysq(wqp->q_next->q_syncq); 4968 } 4969 4970 mutex_enter(SQLOCK(sq)); 4971 sq->sq_rmqcount--; 4972 mutex_exit(SQLOCK(sq)); 4973 } 4974 4975 /* 4976 * Prevent further entry by setting a flag (like SQ_FROZEN, SQ_BLOCKED or 4977 * SQ_WRITER) on a syncq. 4978 * If maxcnt is not -1 it assumes that caller has "maxcnt" claim(s) on the 4979 * sync queue and waits until sq_count reaches maxcnt. 4980 * 4981 * if maxcnt is -1 there's no need to grab sq_putlocks since the caller 4982 * does not care about putnext threads that are in the middle of calling put 4983 * entry points. 4984 * 4985 * This routine is used for both inner and outer syncqs. 4986 */ 4987 static void 4988 blocksq(syncq_t *sq, ushort_t flag, int maxcnt) 4989 { 4990 uint16_t count = 0; 4991 4992 mutex_enter(SQLOCK(sq)); 4993 /* 4994 * Wait for SQ_FROZEN/SQ_BLOCKED to be reset. 4995 * SQ_FROZEN will be set if there is a frozen stream that has a 4996 * queue which also refers to this "shared" syncq. 4997 * SQ_BLOCKED will be set if there is "off" queue which also 4998 * refers to this "shared" syncq. 4999 */ 5000 if (maxcnt != -1) { 5001 count = sq->sq_count; 5002 SQ_PUTLOCKS_ENTER(sq); 5003 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 5004 SUM_SQ_PUTCOUNTS(sq, count); 5005 } 5006 sq->sq_needexcl++; 5007 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 5008 5009 while ((sq->sq_flags & flag) || 5010 (maxcnt != -1 && count > (unsigned)maxcnt)) { 5011 sq->sq_flags |= SQ_WANTWAKEUP; 5012 if (maxcnt != -1) { 5013 SQ_PUTLOCKS_EXIT(sq); 5014 } 5015 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5016 if (maxcnt != -1) { 5017 count = sq->sq_count; 5018 SQ_PUTLOCKS_ENTER(sq); 5019 SUM_SQ_PUTCOUNTS(sq, count); 5020 } 5021 } 5022 sq->sq_needexcl--; 5023 sq->sq_flags |= flag; 5024 ASSERT(maxcnt == -1 || count == maxcnt); 5025 if (maxcnt != -1) { 5026 if (sq->sq_needexcl == 0) { 5027 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 5028 } 5029 SQ_PUTLOCKS_EXIT(sq); 5030 } else if (sq->sq_needexcl == 0) { 5031 SQ_PUTCOUNT_SETFAST(sq); 5032 } 5033 5034 mutex_exit(SQLOCK(sq)); 5035 } 5036 5037 /* 5038 * Reset a flag that was set with blocksq. 5039 * 5040 * Can not use this routine to reset SQ_WRITER. 5041 * 5042 * If "isouter" is set then the syncq is assumed to be an outer perimeter 5043 * and drain_syncq is not called. Instead we rely on the qwriter_outer thread 5044 * to handle the queued qwriter operations. 5045 * 5046 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 5047 * sq_putlocks are used. 5048 */ 5049 static void 5050 unblocksq(syncq_t *sq, uint16_t resetflag, int isouter) 5051 { 5052 uint16_t flags; 5053 5054 mutex_enter(SQLOCK(sq)); 5055 ASSERT(resetflag != SQ_WRITER); 5056 ASSERT(sq->sq_flags & resetflag); 5057 flags = sq->sq_flags & ~resetflag; 5058 sq->sq_flags = flags; 5059 if (flags & (SQ_QUEUED | SQ_WANTWAKEUP)) { 5060 if (flags & SQ_WANTWAKEUP) { 5061 flags &= ~SQ_WANTWAKEUP; 5062 cv_broadcast(&sq->sq_wait); 5063 } 5064 sq->sq_flags = flags; 5065 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 5066 if (!isouter) { 5067 /* drain_syncq drops SQLOCK */ 5068 drain_syncq(sq); 5069 return; 5070 } 5071 } 5072 } 5073 mutex_exit(SQLOCK(sq)); 5074 } 5075 5076 /* 5077 * Reset a flag that was set with blocksq. 5078 * Does not drain the syncq. Use emptysq() for that. 5079 * Returns 1 if SQ_QUEUED is set. Otherwise 0. 5080 * 5081 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 5082 * sq_putlocks are used. 5083 */ 5084 static int 5085 dropsq(syncq_t *sq, uint16_t resetflag) 5086 { 5087 uint16_t flags; 5088 5089 mutex_enter(SQLOCK(sq)); 5090 ASSERT(sq->sq_flags & resetflag); 5091 flags = sq->sq_flags & ~resetflag; 5092 if (flags & SQ_WANTWAKEUP) { 5093 flags &= ~SQ_WANTWAKEUP; 5094 cv_broadcast(&sq->sq_wait); 5095 } 5096 sq->sq_flags = flags; 5097 mutex_exit(SQLOCK(sq)); 5098 if (flags & SQ_QUEUED) 5099 return (1); 5100 return (0); 5101 } 5102 5103 /* 5104 * Empty all the messages on a syncq. 5105 * 5106 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 5107 * sq_putlocks are used. 5108 */ 5109 static void 5110 emptysq(syncq_t *sq) 5111 { 5112 uint16_t flags; 5113 5114 mutex_enter(SQLOCK(sq)); 5115 flags = sq->sq_flags; 5116 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 5117 /* 5118 * To prevent potential recursive invocation of drain_syncq we 5119 * do not call drain_syncq if count is non-zero. 5120 */ 5121 if (sq->sq_count == 0) { 5122 /* drain_syncq() drops SQLOCK */ 5123 drain_syncq(sq); 5124 return; 5125 } else 5126 sqenable(sq); 5127 } 5128 mutex_exit(SQLOCK(sq)); 5129 } 5130 5131 /* 5132 * Ordered insert while removing duplicates. 5133 */ 5134 static void 5135 sqlist_insert(sqlist_t *sqlist, syncq_t *sqp) 5136 { 5137 syncql_t *sqlp, **prev_sqlpp, *new_sqlp; 5138 5139 prev_sqlpp = &sqlist->sqlist_head; 5140 while ((sqlp = *prev_sqlpp) != NULL) { 5141 if (sqlp->sql_sq >= sqp) { 5142 if (sqlp->sql_sq == sqp) /* duplicate */ 5143 return; 5144 break; 5145 } 5146 prev_sqlpp = &sqlp->sql_next; 5147 } 5148 new_sqlp = &sqlist->sqlist_array[sqlist->sqlist_index++]; 5149 ASSERT((char *)new_sqlp < (char *)sqlist + sqlist->sqlist_size); 5150 new_sqlp->sql_next = sqlp; 5151 new_sqlp->sql_sq = sqp; 5152 *prev_sqlpp = new_sqlp; 5153 } 5154 5155 /* 5156 * Walk the write side queues until we hit either the driver 5157 * or a twist in the stream (_SAMESTR will return false in both 5158 * these cases) then turn around and walk the read side queues 5159 * back up to the stream head. 5160 */ 5161 static void 5162 sqlist_insertall(sqlist_t *sqlist, queue_t *q) 5163 { 5164 while (q != NULL) { 5165 sqlist_insert(sqlist, q->q_syncq); 5166 5167 if (_SAMESTR(q)) 5168 q = q->q_next; 5169 else if (!(q->q_flag & QREADR)) 5170 q = _RD(q); 5171 else 5172 q = NULL; 5173 } 5174 } 5175 5176 /* 5177 * Allocate and build a list of all syncqs in a stream and the syncq(s) 5178 * associated with the "q" parameter. The resulting list is sorted in a 5179 * canonical order and is free of duplicates. 5180 * Assumes the passed queue is a _RD(q). 5181 */ 5182 static sqlist_t * 5183 sqlist_build(queue_t *q, struct stdata *stp, boolean_t do_twist) 5184 { 5185 sqlist_t *sqlist = sqlist_alloc(stp, KM_SLEEP); 5186 5187 /* 5188 * start with the current queue/qpair 5189 */ 5190 ASSERT(q->q_flag & QREADR); 5191 5192 sqlist_insert(sqlist, q->q_syncq); 5193 sqlist_insert(sqlist, _WR(q)->q_syncq); 5194 5195 sqlist_insertall(sqlist, stp->sd_wrq); 5196 if (do_twist) 5197 sqlist_insertall(sqlist, stp->sd_mate->sd_wrq); 5198 5199 return (sqlist); 5200 } 5201 5202 static sqlist_t * 5203 sqlist_alloc(struct stdata *stp, int kmflag) 5204 { 5205 size_t sqlist_size; 5206 sqlist_t *sqlist; 5207 5208 /* 5209 * Allocate 2 syncql_t's for each pushed module. Note that 5210 * the sqlist_t structure already has 4 syncql_t's built in: 5211 * 2 for the stream head, and 2 for the driver/other stream head. 5212 */ 5213 sqlist_size = 2 * sizeof (syncql_t) * stp->sd_pushcnt + 5214 sizeof (sqlist_t); 5215 if (STRMATED(stp)) 5216 sqlist_size += 2 * sizeof (syncql_t) * stp->sd_mate->sd_pushcnt; 5217 sqlist = kmem_alloc(sqlist_size, kmflag); 5218 5219 sqlist->sqlist_head = NULL; 5220 sqlist->sqlist_size = sqlist_size; 5221 sqlist->sqlist_index = 0; 5222 5223 return (sqlist); 5224 } 5225 5226 /* 5227 * Free the list created by sqlist_alloc() 5228 */ 5229 static void 5230 sqlist_free(sqlist_t *sqlist) 5231 { 5232 kmem_free(sqlist, sqlist->sqlist_size); 5233 } 5234 5235 /* 5236 * Prevent any new entries into any syncq in this stream. 5237 * Used by freezestr. 5238 */ 5239 void 5240 strblock(queue_t *q) 5241 { 5242 struct stdata *stp; 5243 syncql_t *sql; 5244 sqlist_t *sqlist; 5245 5246 q = _RD(q); 5247 5248 stp = STREAM(q); 5249 ASSERT(stp != NULL); 5250 5251 /* 5252 * Get a sorted list with all the duplicates removed containing 5253 * all the syncqs referenced by this stream. 5254 */ 5255 sqlist = sqlist_build(q, stp, B_FALSE); 5256 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next) 5257 blocksq(sql->sql_sq, SQ_FROZEN, -1); 5258 sqlist_free(sqlist); 5259 } 5260 5261 /* 5262 * Release the block on new entries into this stream 5263 */ 5264 void 5265 strunblock(queue_t *q) 5266 { 5267 struct stdata *stp; 5268 syncql_t *sql; 5269 sqlist_t *sqlist; 5270 int drain_needed; 5271 5272 q = _RD(q); 5273 5274 /* 5275 * Get a sorted list with all the duplicates removed containing 5276 * all the syncqs referenced by this stream. 5277 * Have to drop the SQ_FROZEN flag on all the syncqs before 5278 * starting to drain them; otherwise the draining might 5279 * cause a freezestr in some module on the stream (which 5280 * would deadlock.) 5281 */ 5282 stp = STREAM(q); 5283 ASSERT(stp != NULL); 5284 sqlist = sqlist_build(q, stp, B_FALSE); 5285 drain_needed = 0; 5286 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next) 5287 drain_needed += dropsq(sql->sql_sq, SQ_FROZEN); 5288 if (drain_needed) { 5289 for (sql = sqlist->sqlist_head; sql != NULL; 5290 sql = sql->sql_next) 5291 emptysq(sql->sql_sq); 5292 } 5293 sqlist_free(sqlist); 5294 } 5295 5296 #ifdef DEBUG 5297 static int 5298 qprocsareon(queue_t *rq) 5299 { 5300 if (rq->q_next == NULL) 5301 return (0); 5302 return (_WR(rq->q_next)->q_next == _WR(rq)); 5303 } 5304 5305 int 5306 qclaimed(queue_t *q) 5307 { 5308 uint_t count; 5309 5310 count = q->q_syncq->sq_count; 5311 SUM_SQ_PUTCOUNTS(q->q_syncq, count); 5312 return (count != 0); 5313 } 5314 5315 /* 5316 * Check if anyone has frozen this stream with freezestr 5317 */ 5318 int 5319 frozenstr(queue_t *q) 5320 { 5321 return ((q->q_syncq->sq_flags & SQ_FROZEN) != 0); 5322 } 5323 #endif /* DEBUG */ 5324 5325 /* 5326 * Enter a queue. 5327 * Obsoleted interface. Should not be used. 5328 */ 5329 void 5330 enterq(queue_t *q) 5331 { 5332 entersq(q->q_syncq, SQ_CALLBACK); 5333 } 5334 5335 void 5336 leaveq(queue_t *q) 5337 { 5338 leavesq(q->q_syncq, SQ_CALLBACK); 5339 } 5340 5341 /* 5342 * Enter a perimeter. c_inner and c_outer specifies which concurrency bits 5343 * to check. 5344 * Wait if SQ_QUEUED is set to preserve ordering between messages and qwriter 5345 * calls and the running of open, close and service procedures. 5346 * 5347 * if c_inner bit is set no need to grab sq_putlocks since we don't care 5348 * if other threads have entered or are entering put entry point. 5349 * 5350 * if c_inner bit is set it might have been posible to use 5351 * sq_putlocks/sq_putcounts instead of SQLOCK/sq_count (e.g. to optimize 5352 * open/close path for IP) but since the count may need to be decremented in 5353 * qwait() we wouldn't know which counter to decrement. Currently counter is 5354 * selected by current cpu_seqid and current CPU can change at any moment. XXX 5355 * in the future we might use curthread id bits to select the counter and this 5356 * would stay constant across routine calls. 5357 */ 5358 void 5359 entersq(syncq_t *sq, int entrypoint) 5360 { 5361 uint16_t count = 0; 5362 uint16_t flags; 5363 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL; 5364 uint16_t type; 5365 uint_t c_inner = entrypoint & SQ_CI; 5366 uint_t c_outer = entrypoint & SQ_CO; 5367 5368 /* 5369 * Increment ref count to keep closes out of this queue. 5370 */ 5371 ASSERT(sq); 5372 ASSERT(c_inner && c_outer); 5373 mutex_enter(SQLOCK(sq)); 5374 flags = sq->sq_flags; 5375 type = sq->sq_type; 5376 if (!(type & c_inner)) { 5377 /* Make sure all putcounts now use slowlock. */ 5378 count = sq->sq_count; 5379 SQ_PUTLOCKS_ENTER(sq); 5380 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 5381 SUM_SQ_PUTCOUNTS(sq, count); 5382 sq->sq_needexcl++; 5383 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 5384 waitflags |= SQ_MESSAGES; 5385 } 5386 /* 5387 * Wait until we can enter the inner perimeter. 5388 * If we want exclusive access we wait until sq_count is 0. 5389 * We have to do this before entering the outer perimeter in order 5390 * to preserve put/close message ordering. 5391 */ 5392 while ((flags & waitflags) || (!(type & c_inner) && count != 0)) { 5393 sq->sq_flags = flags | SQ_WANTWAKEUP; 5394 if (!(type & c_inner)) { 5395 SQ_PUTLOCKS_EXIT(sq); 5396 } 5397 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5398 if (!(type & c_inner)) { 5399 count = sq->sq_count; 5400 SQ_PUTLOCKS_ENTER(sq); 5401 SUM_SQ_PUTCOUNTS(sq, count); 5402 } 5403 flags = sq->sq_flags; 5404 } 5405 5406 if (!(type & c_inner)) { 5407 ASSERT(sq->sq_needexcl > 0); 5408 sq->sq_needexcl--; 5409 if (sq->sq_needexcl == 0) { 5410 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 5411 } 5412 } 5413 5414 /* Check if we need to enter the outer perimeter */ 5415 if (!(type & c_outer)) { 5416 /* 5417 * We have to enter the outer perimeter exclusively before 5418 * we can increment sq_count to avoid deadlock. This implies 5419 * that we have to re-check sq_flags and sq_count. 5420 * 5421 * is it possible to have c_inner set when c_outer is not set? 5422 */ 5423 if (!(type & c_inner)) { 5424 SQ_PUTLOCKS_EXIT(sq); 5425 } 5426 mutex_exit(SQLOCK(sq)); 5427 outer_enter(sq->sq_outer, SQ_GOAWAY); 5428 mutex_enter(SQLOCK(sq)); 5429 flags = sq->sq_flags; 5430 /* 5431 * there should be no need to recheck sq_putcounts 5432 * because outer_enter() has already waited for them to clear 5433 * after setting SQ_WRITER. 5434 */ 5435 count = sq->sq_count; 5436 #ifdef DEBUG 5437 /* 5438 * SUMCHECK_SQ_PUTCOUNTS should return the sum instead 5439 * of doing an ASSERT internally. Others should do 5440 * something like 5441 * ASSERT(SUMCHECK_SQ_PUTCOUNTS(sq) == 0); 5442 * without the need to #ifdef DEBUG it. 5443 */ 5444 SUMCHECK_SQ_PUTCOUNTS(sq, 0); 5445 #endif 5446 while ((flags & (SQ_EXCL|SQ_BLOCKED|SQ_FROZEN)) || 5447 (!(type & c_inner) && count != 0)) { 5448 sq->sq_flags = flags | SQ_WANTWAKEUP; 5449 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5450 count = sq->sq_count; 5451 flags = sq->sq_flags; 5452 } 5453 } 5454 5455 sq->sq_count++; 5456 ASSERT(sq->sq_count != 0); /* Wraparound */ 5457 if (!(type & c_inner)) { 5458 /* Exclusive entry */ 5459 ASSERT(sq->sq_count == 1); 5460 sq->sq_flags |= SQ_EXCL; 5461 if (type & c_outer) { 5462 SQ_PUTLOCKS_EXIT(sq); 5463 } 5464 } 5465 mutex_exit(SQLOCK(sq)); 5466 } 5467 5468 /* 5469 * leave a syncq. announce to framework that closes may proceed. 5470 * c_inner and c_outer specifies which concurrency bits 5471 * to check. 5472 * 5473 * must never be called from driver or module put entry point. 5474 * 5475 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 5476 * sq_putlocks are used. 5477 */ 5478 void 5479 leavesq(syncq_t *sq, int entrypoint) 5480 { 5481 uint16_t flags; 5482 uint16_t type; 5483 uint_t c_outer = entrypoint & SQ_CO; 5484 #ifdef DEBUG 5485 uint_t c_inner = entrypoint & SQ_CI; 5486 #endif 5487 5488 /* 5489 * decrement ref count, drain the syncq if possible, and wake up 5490 * any waiting close. 5491 */ 5492 ASSERT(sq); 5493 ASSERT(c_inner && c_outer); 5494 mutex_enter(SQLOCK(sq)); 5495 flags = sq->sq_flags; 5496 type = sq->sq_type; 5497 if (flags & (SQ_QUEUED|SQ_WANTWAKEUP|SQ_WANTEXWAKEUP)) { 5498 5499 if (flags & SQ_WANTWAKEUP) { 5500 flags &= ~SQ_WANTWAKEUP; 5501 cv_broadcast(&sq->sq_wait); 5502 } 5503 if (flags & SQ_WANTEXWAKEUP) { 5504 flags &= ~SQ_WANTEXWAKEUP; 5505 cv_broadcast(&sq->sq_exitwait); 5506 } 5507 5508 if ((flags & SQ_QUEUED) && !(flags & SQ_STAYAWAY)) { 5509 /* 5510 * The syncq needs to be drained. "Exit" the syncq 5511 * before calling drain_syncq. 5512 */ 5513 ASSERT(sq->sq_count != 0); 5514 sq->sq_count--; 5515 ASSERT((flags & SQ_EXCL) || (type & c_inner)); 5516 sq->sq_flags = flags & ~SQ_EXCL; 5517 drain_syncq(sq); 5518 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 5519 /* Check if we need to exit the outer perimeter */ 5520 /* XXX will this ever be true? */ 5521 if (!(type & c_outer)) 5522 outer_exit(sq->sq_outer); 5523 return; 5524 } 5525 } 5526 ASSERT(sq->sq_count != 0); 5527 sq->sq_count--; 5528 ASSERT((flags & SQ_EXCL) || (type & c_inner)); 5529 sq->sq_flags = flags & ~SQ_EXCL; 5530 mutex_exit(SQLOCK(sq)); 5531 5532 /* Check if we need to exit the outer perimeter */ 5533 if (!(sq->sq_type & c_outer)) 5534 outer_exit(sq->sq_outer); 5535 } 5536 5537 /* 5538 * Prevent q_next from changing in this stream by incrementing sq_count. 5539 * 5540 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 5541 * sq_putlocks are used. 5542 */ 5543 void 5544 claimq(queue_t *qp) 5545 { 5546 syncq_t *sq = qp->q_syncq; 5547 5548 mutex_enter(SQLOCK(sq)); 5549 sq->sq_count++; 5550 ASSERT(sq->sq_count != 0); /* Wraparound */ 5551 mutex_exit(SQLOCK(sq)); 5552 } 5553 5554 /* 5555 * Undo claimq. 5556 * 5557 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 5558 * sq_putlocks are used. 5559 */ 5560 void 5561 releaseq(queue_t *qp) 5562 { 5563 syncq_t *sq = qp->q_syncq; 5564 uint16_t flags; 5565 5566 mutex_enter(SQLOCK(sq)); 5567 ASSERT(sq->sq_count > 0); 5568 sq->sq_count--; 5569 5570 flags = sq->sq_flags; 5571 if (flags & (SQ_WANTWAKEUP|SQ_QUEUED)) { 5572 if (flags & SQ_WANTWAKEUP) { 5573 flags &= ~SQ_WANTWAKEUP; 5574 cv_broadcast(&sq->sq_wait); 5575 } 5576 sq->sq_flags = flags; 5577 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 5578 /* 5579 * To prevent potential recursive invocation of 5580 * drain_syncq we do not call drain_syncq if count is 5581 * non-zero. 5582 */ 5583 if (sq->sq_count == 0) { 5584 drain_syncq(sq); 5585 return; 5586 } else 5587 sqenable(sq); 5588 } 5589 } 5590 mutex_exit(SQLOCK(sq)); 5591 } 5592 5593 /* 5594 * Prevent q_next from changing in this stream by incrementing sd_refcnt. 5595 */ 5596 void 5597 claimstr(queue_t *qp) 5598 { 5599 struct stdata *stp = STREAM(qp); 5600 5601 mutex_enter(&stp->sd_reflock); 5602 stp->sd_refcnt++; 5603 ASSERT(stp->sd_refcnt != 0); /* Wraparound */ 5604 mutex_exit(&stp->sd_reflock); 5605 } 5606 5607 /* 5608 * Undo claimstr. 5609 */ 5610 void 5611 releasestr(queue_t *qp) 5612 { 5613 struct stdata *stp = STREAM(qp); 5614 5615 mutex_enter(&stp->sd_reflock); 5616 ASSERT(stp->sd_refcnt != 0); 5617 if (--stp->sd_refcnt == 0) 5618 cv_broadcast(&stp->sd_refmonitor); 5619 mutex_exit(&stp->sd_reflock); 5620 } 5621 5622 static syncq_t * 5623 new_syncq(void) 5624 { 5625 return (kmem_cache_alloc(syncq_cache, KM_SLEEP)); 5626 } 5627 5628 static void 5629 free_syncq(syncq_t *sq) 5630 { 5631 ASSERT(sq->sq_head == NULL); 5632 ASSERT(sq->sq_outer == NULL); 5633 ASSERT(sq->sq_callbpend == NULL); 5634 ASSERT((sq->sq_onext == NULL && sq->sq_oprev == NULL) || 5635 (sq->sq_onext == sq && sq->sq_oprev == sq)); 5636 5637 if (sq->sq_ciputctrl != NULL) { 5638 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1); 5639 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl, 5640 sq->sq_nciputctrl, 0); 5641 ASSERT(ciputctrl_cache != NULL); 5642 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl); 5643 } 5644 5645 sq->sq_tail = NULL; 5646 sq->sq_evhead = NULL; 5647 sq->sq_evtail = NULL; 5648 sq->sq_ciputctrl = NULL; 5649 sq->sq_nciputctrl = 0; 5650 sq->sq_count = 0; 5651 sq->sq_rmqcount = 0; 5652 sq->sq_callbflags = 0; 5653 sq->sq_cancelid = 0; 5654 sq->sq_next = NULL; 5655 sq->sq_needexcl = 0; 5656 sq->sq_svcflags = 0; 5657 sq->sq_nqueues = 0; 5658 sq->sq_pri = 0; 5659 sq->sq_onext = NULL; 5660 sq->sq_oprev = NULL; 5661 sq->sq_flags = 0; 5662 sq->sq_type = 0; 5663 sq->sq_servcount = 0; 5664 5665 kmem_cache_free(syncq_cache, sq); 5666 } 5667 5668 /* Outer perimeter code */ 5669 5670 /* 5671 * The outer syncq uses the fields and flags in the syncq slightly 5672 * differently from the inner syncqs. 5673 * sq_count Incremented when there are pending or running 5674 * writers at the outer perimeter to prevent the set of 5675 * inner syncqs that belong to the outer perimeter from 5676 * changing. 5677 * sq_head/tail List of deferred qwriter(OUTER) operations. 5678 * 5679 * SQ_BLOCKED Set to prevent traversing of sq_next,sq_prev while 5680 * inner syncqs are added to or removed from the 5681 * outer perimeter. 5682 * SQ_QUEUED sq_head/tail has messages or eventsqueued. 5683 * 5684 * SQ_WRITER A thread is currently traversing all the inner syncqs 5685 * setting the SQ_WRITER flag. 5686 */ 5687 5688 /* 5689 * Get write access at the outer perimeter. 5690 * Note that read access is done by entersq, putnext, and put by simply 5691 * incrementing sq_count in the inner syncq. 5692 * 5693 * Waits until "flags" is no longer set in the outer to prevent multiple 5694 * threads from having write access at the same time. SQ_WRITER has to be part 5695 * of "flags". 5696 * 5697 * Increases sq_count on the outer syncq to keep away outer_insert/remove 5698 * until the outer_exit is finished. 5699 * 5700 * outer_enter is vulnerable to starvation since it does not prevent new 5701 * threads from entering the inner syncqs while it is waiting for sq_count to 5702 * go to zero. 5703 */ 5704 void 5705 outer_enter(syncq_t *outer, uint16_t flags) 5706 { 5707 syncq_t *sq; 5708 int wait_needed; 5709 uint16_t count; 5710 5711 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5712 outer->sq_oprev != NULL); 5713 ASSERT(flags & SQ_WRITER); 5714 5715 retry: 5716 mutex_enter(SQLOCK(outer)); 5717 while (outer->sq_flags & flags) { 5718 outer->sq_flags |= SQ_WANTWAKEUP; 5719 cv_wait(&outer->sq_wait, SQLOCK(outer)); 5720 } 5721 5722 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5723 outer->sq_flags |= SQ_WRITER; 5724 outer->sq_count++; 5725 ASSERT(outer->sq_count != 0); /* wraparound */ 5726 wait_needed = 0; 5727 /* 5728 * Set SQ_WRITER on all the inner syncqs while holding 5729 * the SQLOCK on the outer syncq. This ensures that the changing 5730 * of SQ_WRITER is atomic under the outer SQLOCK. 5731 */ 5732 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 5733 mutex_enter(SQLOCK(sq)); 5734 count = sq->sq_count; 5735 SQ_PUTLOCKS_ENTER(sq); 5736 sq->sq_flags |= SQ_WRITER; 5737 SUM_SQ_PUTCOUNTS(sq, count); 5738 if (count != 0) 5739 wait_needed = 1; 5740 SQ_PUTLOCKS_EXIT(sq); 5741 mutex_exit(SQLOCK(sq)); 5742 } 5743 mutex_exit(SQLOCK(outer)); 5744 5745 /* 5746 * Get everybody out of the syncqs sequentially. 5747 * Note that we don't actually need to aqiure the PUTLOCKS, since 5748 * we have already cleared the fastbit, and set QWRITER. By 5749 * definition, the count can not increase since putnext will 5750 * take the slowlock path (and the purpose of aquiring the 5751 * putlocks was to make sure it didn't increase while we were 5752 * waiting). 5753 * 5754 * Note that we still aquire the PUTLOCKS to be safe. 5755 */ 5756 if (wait_needed) { 5757 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 5758 mutex_enter(SQLOCK(sq)); 5759 count = sq->sq_count; 5760 SQ_PUTLOCKS_ENTER(sq); 5761 SUM_SQ_PUTCOUNTS(sq, count); 5762 while (count != 0) { 5763 sq->sq_flags |= SQ_WANTWAKEUP; 5764 SQ_PUTLOCKS_EXIT(sq); 5765 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5766 count = sq->sq_count; 5767 SQ_PUTLOCKS_ENTER(sq); 5768 SUM_SQ_PUTCOUNTS(sq, count); 5769 } 5770 SQ_PUTLOCKS_EXIT(sq); 5771 mutex_exit(SQLOCK(sq)); 5772 } 5773 /* 5774 * Verify that none of the flags got set while we 5775 * were waiting for the sq_counts to drop. 5776 * If this happens we exit and retry entering the 5777 * outer perimeter. 5778 */ 5779 mutex_enter(SQLOCK(outer)); 5780 if (outer->sq_flags & (flags & ~SQ_WRITER)) { 5781 mutex_exit(SQLOCK(outer)); 5782 outer_exit(outer); 5783 goto retry; 5784 } 5785 mutex_exit(SQLOCK(outer)); 5786 } 5787 } 5788 5789 /* 5790 * Drop the write access at the outer perimeter. 5791 * Read access is dropped implicitly (by putnext, put, and leavesq) by 5792 * decrementing sq_count. 5793 */ 5794 void 5795 outer_exit(syncq_t *outer) 5796 { 5797 syncq_t *sq; 5798 int drain_needed; 5799 uint16_t flags; 5800 5801 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5802 outer->sq_oprev != NULL); 5803 ASSERT(MUTEX_NOT_HELD(SQLOCK(outer))); 5804 5805 /* 5806 * Atomically (from the perspective of threads calling become_writer) 5807 * drop the write access at the outer perimeter by holding 5808 * SQLOCK(outer) across all the dropsq calls and the resetting of 5809 * SQ_WRITER. 5810 * This defines a locking order between the outer perimeter 5811 * SQLOCK and the inner perimeter SQLOCKs. 5812 */ 5813 mutex_enter(SQLOCK(outer)); 5814 flags = outer->sq_flags; 5815 ASSERT(outer->sq_flags & SQ_WRITER); 5816 if (flags & SQ_QUEUED) { 5817 write_now(outer); 5818 flags = outer->sq_flags; 5819 } 5820 5821 /* 5822 * sq_onext is stable since sq_count has not yet been decreased. 5823 * Reset the SQ_WRITER flags in all syncqs. 5824 * After dropping SQ_WRITER on the outer syncq we empty all the 5825 * inner syncqs. 5826 */ 5827 drain_needed = 0; 5828 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) 5829 drain_needed += dropsq(sq, SQ_WRITER); 5830 ASSERT(!(outer->sq_flags & SQ_QUEUED)); 5831 flags &= ~SQ_WRITER; 5832 if (drain_needed) { 5833 outer->sq_flags = flags; 5834 mutex_exit(SQLOCK(outer)); 5835 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) 5836 emptysq(sq); 5837 mutex_enter(SQLOCK(outer)); 5838 flags = outer->sq_flags; 5839 } 5840 if (flags & SQ_WANTWAKEUP) { 5841 flags &= ~SQ_WANTWAKEUP; 5842 cv_broadcast(&outer->sq_wait); 5843 } 5844 outer->sq_flags = flags; 5845 ASSERT(outer->sq_count > 0); 5846 outer->sq_count--; 5847 mutex_exit(SQLOCK(outer)); 5848 } 5849 5850 /* 5851 * Add another syncq to an outer perimeter. 5852 * Block out all other access to the outer perimeter while it is being 5853 * changed using blocksq. 5854 * Assumes that the caller has *not* done an outer_enter. 5855 * 5856 * Vulnerable to starvation in blocksq. 5857 */ 5858 static void 5859 outer_insert(syncq_t *outer, syncq_t *sq) 5860 { 5861 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5862 outer->sq_oprev != NULL); 5863 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 5864 sq->sq_oprev == NULL); /* Can't be in an outer perimeter */ 5865 5866 /* Get exclusive access to the outer perimeter list */ 5867 blocksq(outer, SQ_BLOCKED, 0); 5868 ASSERT(outer->sq_flags & SQ_BLOCKED); 5869 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5870 5871 mutex_enter(SQLOCK(sq)); 5872 sq->sq_outer = outer; 5873 outer->sq_onext->sq_oprev = sq; 5874 sq->sq_onext = outer->sq_onext; 5875 outer->sq_onext = sq; 5876 sq->sq_oprev = outer; 5877 mutex_exit(SQLOCK(sq)); 5878 unblocksq(outer, SQ_BLOCKED, 1); 5879 } 5880 5881 /* 5882 * Remove a syncq from an outer perimeter. 5883 * Block out all other access to the outer perimeter while it is being 5884 * changed using blocksq. 5885 * Assumes that the caller has *not* done an outer_enter. 5886 * 5887 * Vulnerable to starvation in blocksq. 5888 */ 5889 static void 5890 outer_remove(syncq_t *outer, syncq_t *sq) 5891 { 5892 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5893 outer->sq_oprev != NULL); 5894 ASSERT(sq->sq_outer == outer); 5895 5896 /* Get exclusive access to the outer perimeter list */ 5897 blocksq(outer, SQ_BLOCKED, 0); 5898 ASSERT(outer->sq_flags & SQ_BLOCKED); 5899 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5900 5901 mutex_enter(SQLOCK(sq)); 5902 sq->sq_outer = NULL; 5903 sq->sq_onext->sq_oprev = sq->sq_oprev; 5904 sq->sq_oprev->sq_onext = sq->sq_onext; 5905 sq->sq_oprev = sq->sq_onext = NULL; 5906 mutex_exit(SQLOCK(sq)); 5907 unblocksq(outer, SQ_BLOCKED, 1); 5908 } 5909 5910 /* 5911 * Queue a deferred qwriter(OUTER) callback for this outer perimeter. 5912 * If this is the first callback for this outer perimeter then add 5913 * this outer perimeter to the list of outer perimeters that 5914 * the qwriter_outer_thread will process. 5915 * 5916 * Increments sq_count in the outer syncq to prevent the membership 5917 * of the outer perimeter (in terms of inner syncqs) to change while 5918 * the callback is pending. 5919 */ 5920 static void 5921 queue_writer(syncq_t *outer, void (*func)(), queue_t *q, mblk_t *mp) 5922 { 5923 ASSERT(MUTEX_HELD(SQLOCK(outer))); 5924 5925 mp->b_prev = (mblk_t *)func; 5926 mp->b_queue = q; 5927 mp->b_next = NULL; 5928 outer->sq_count++; /* Decremented when dequeued */ 5929 ASSERT(outer->sq_count != 0); /* Wraparound */ 5930 if (outer->sq_evhead == NULL) { 5931 /* First message. */ 5932 outer->sq_evhead = outer->sq_evtail = mp; 5933 outer->sq_flags |= SQ_EVENTS; 5934 mutex_exit(SQLOCK(outer)); 5935 STRSTAT(qwr_outer); 5936 (void) taskq_dispatch(streams_taskq, 5937 (task_func_t *)qwriter_outer_service, outer, TQ_SLEEP); 5938 } else { 5939 ASSERT(outer->sq_flags & SQ_EVENTS); 5940 outer->sq_evtail->b_next = mp; 5941 outer->sq_evtail = mp; 5942 mutex_exit(SQLOCK(outer)); 5943 } 5944 } 5945 5946 /* 5947 * Try and upgrade to write access at the outer perimeter. If this can 5948 * not be done without blocking then queue the callback to be done 5949 * by the qwriter_outer_thread. 5950 * 5951 * This routine can only be called from put or service procedures plus 5952 * asynchronous callback routines that have properly entered to 5953 * queue (with entersq.) Thus qwriter(OUTER) assumes the caller has one claim 5954 * on the syncq associated with q. 5955 */ 5956 void 5957 qwriter_outer(queue_t *q, mblk_t *mp, void (*func)()) 5958 { 5959 syncq_t *osq, *sq, *outer; 5960 int failed; 5961 uint16_t flags; 5962 5963 osq = q->q_syncq; 5964 outer = osq->sq_outer; 5965 if (outer == NULL) 5966 panic("qwriter(PERIM_OUTER): no outer perimeter"); 5967 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5968 outer->sq_oprev != NULL); 5969 5970 mutex_enter(SQLOCK(outer)); 5971 flags = outer->sq_flags; 5972 /* 5973 * If some thread is traversing sq_next, or if we are blocked by 5974 * outer_insert or outer_remove, or if the we already have queued 5975 * callbacks, then queue this callback for later processing. 5976 * 5977 * Also queue the qwriter for an interrupt thread in order 5978 * to reduce the time spent running at high IPL. 5979 * to identify there are events. 5980 */ 5981 if ((flags & SQ_GOAWAY) || (curthread->t_pri >= kpreemptpri)) { 5982 /* 5983 * Queue the become_writer request. 5984 * The queueing is atomic under SQLOCK(outer) in order 5985 * to synchronize with outer_exit. 5986 * queue_writer will drop the outer SQLOCK 5987 */ 5988 if (flags & SQ_BLOCKED) { 5989 /* Must set SQ_WRITER on inner perimeter */ 5990 mutex_enter(SQLOCK(osq)); 5991 osq->sq_flags |= SQ_WRITER; 5992 mutex_exit(SQLOCK(osq)); 5993 } else { 5994 if (!(flags & SQ_WRITER)) { 5995 /* 5996 * The outer could have been SQ_BLOCKED thus 5997 * SQ_WRITER might not be set on the inner. 5998 */ 5999 mutex_enter(SQLOCK(osq)); 6000 osq->sq_flags |= SQ_WRITER; 6001 mutex_exit(SQLOCK(osq)); 6002 } 6003 ASSERT(osq->sq_flags & SQ_WRITER); 6004 } 6005 queue_writer(outer, func, q, mp); 6006 return; 6007 } 6008 /* 6009 * We are half-way to exclusive access to the outer perimeter. 6010 * Prevent any outer_enter, qwriter(OUTER), or outer_insert/remove 6011 * while the inner syncqs are traversed. 6012 */ 6013 outer->sq_count++; 6014 ASSERT(outer->sq_count != 0); /* wraparound */ 6015 flags |= SQ_WRITER; 6016 /* 6017 * Check if we can run the function immediately. Mark all 6018 * syncqs with the writer flag to prevent new entries into 6019 * put and service procedures. 6020 * 6021 * Set SQ_WRITER on all the inner syncqs while holding 6022 * the SQLOCK on the outer syncq. This ensures that the changing 6023 * of SQ_WRITER is atomic under the outer SQLOCK. 6024 */ 6025 failed = 0; 6026 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 6027 uint16_t count; 6028 uint_t maxcnt = (sq == osq) ? 1 : 0; 6029 6030 mutex_enter(SQLOCK(sq)); 6031 count = sq->sq_count; 6032 SQ_PUTLOCKS_ENTER(sq); 6033 SUM_SQ_PUTCOUNTS(sq, count); 6034 if (sq->sq_count > maxcnt) 6035 failed = 1; 6036 sq->sq_flags |= SQ_WRITER; 6037 SQ_PUTLOCKS_EXIT(sq); 6038 mutex_exit(SQLOCK(sq)); 6039 } 6040 if (failed) { 6041 /* 6042 * Some other thread has a read claim on the outer perimeter. 6043 * Queue the callback for deferred processing. 6044 * 6045 * queue_writer will set SQ_QUEUED before we drop SQ_WRITER 6046 * so that other qwriter(OUTER) calls will queue their 6047 * callbacks as well. queue_writer increments sq_count so we 6048 * decrement to compensate for the our increment. 6049 * 6050 * Dropping SQ_WRITER enables the writer thread to work 6051 * on this outer perimeter. 6052 */ 6053 outer->sq_flags = flags; 6054 queue_writer(outer, func, q, mp); 6055 /* queue_writer dropper the lock */ 6056 mutex_enter(SQLOCK(outer)); 6057 ASSERT(outer->sq_count > 0); 6058 outer->sq_count--; 6059 ASSERT(outer->sq_flags & SQ_WRITER); 6060 flags = outer->sq_flags; 6061 flags &= ~SQ_WRITER; 6062 if (flags & SQ_WANTWAKEUP) { 6063 flags &= ~SQ_WANTWAKEUP; 6064 cv_broadcast(&outer->sq_wait); 6065 } 6066 outer->sq_flags = flags; 6067 mutex_exit(SQLOCK(outer)); 6068 return; 6069 } else { 6070 outer->sq_flags = flags; 6071 mutex_exit(SQLOCK(outer)); 6072 } 6073 6074 /* Can run it immediately */ 6075 (*func)(q, mp); 6076 6077 outer_exit(outer); 6078 } 6079 6080 /* 6081 * Dequeue all writer callbacks from the outer perimeter and run them. 6082 */ 6083 static void 6084 write_now(syncq_t *outer) 6085 { 6086 mblk_t *mp; 6087 queue_t *q; 6088 void (*func)(); 6089 6090 ASSERT(MUTEX_HELD(SQLOCK(outer))); 6091 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 6092 outer->sq_oprev != NULL); 6093 while ((mp = outer->sq_evhead) != NULL) { 6094 /* 6095 * queues cannot be placed on the queuelist on the outer 6096 * perimiter. 6097 */ 6098 ASSERT(!(outer->sq_flags & SQ_MESSAGES)); 6099 ASSERT((outer->sq_flags & SQ_EVENTS)); 6100 6101 outer->sq_evhead = mp->b_next; 6102 if (outer->sq_evhead == NULL) { 6103 outer->sq_evtail = NULL; 6104 outer->sq_flags &= ~SQ_EVENTS; 6105 } 6106 ASSERT(outer->sq_count != 0); 6107 outer->sq_count--; /* Incremented when enqueued. */ 6108 mutex_exit(SQLOCK(outer)); 6109 /* 6110 * Drop the message if the queue is closing. 6111 * Make sure that the queue is "claimed" when the callback 6112 * is run in order to satisfy various ASSERTs. 6113 */ 6114 q = mp->b_queue; 6115 func = (void (*)())mp->b_prev; 6116 ASSERT(func != NULL); 6117 mp->b_next = mp->b_prev = NULL; 6118 if (q->q_flag & QWCLOSE) { 6119 freemsg(mp); 6120 } else { 6121 claimq(q); 6122 (*func)(q, mp); 6123 releaseq(q); 6124 } 6125 mutex_enter(SQLOCK(outer)); 6126 } 6127 ASSERT(MUTEX_HELD(SQLOCK(outer))); 6128 } 6129 6130 /* 6131 * The list of messages on the inner syncq is effectively hashed 6132 * by destination queue. These destination queues are doubly 6133 * linked lists (hopefully) in priority order. Messages are then 6134 * put on the queue referenced by the q_sqhead/q_sqtail elements. 6135 * Additional messages are linked together by the b_next/b_prev 6136 * elements in the mblk, with (similar to putq()) the first message 6137 * having a NULL b_prev and the last message having a NULL b_next. 6138 * 6139 * Events, such as qwriter callbacks, are put onto a list in FIFO 6140 * order referenced by sq_evhead, and sq_evtail. This is a singly 6141 * linked list, and messages here MUST be processed in the order queued. 6142 */ 6143 6144 /* 6145 * Run the events on the syncq event list (sq_evhead). 6146 * Assumes there is only one claim on the syncq, it is 6147 * already exclusive (SQ_EXCL set), and the SQLOCK held. 6148 * Messages here are processed in order, with the SQ_EXCL bit 6149 * held all the way through till the last message is processed. 6150 */ 6151 void 6152 sq_run_events(syncq_t *sq) 6153 { 6154 mblk_t *bp; 6155 queue_t *qp; 6156 uint16_t flags = sq->sq_flags; 6157 void (*func)(); 6158 6159 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6160 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6161 sq->sq_oprev == NULL) || 6162 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6163 sq->sq_oprev != NULL)); 6164 6165 ASSERT(flags & SQ_EXCL); 6166 ASSERT(sq->sq_count == 1); 6167 6168 /* 6169 * We need to process all of the events on this list. It 6170 * is possible that new events will be added while we are 6171 * away processing a callback, so on every loop, we start 6172 * back at the beginning of the list. 6173 */ 6174 /* 6175 * We have to reaccess sq_evhead since there is a 6176 * possibility of a new entry while we were running 6177 * the callback. 6178 */ 6179 for (bp = sq->sq_evhead; bp != NULL; bp = sq->sq_evhead) { 6180 ASSERT(bp->b_queue->q_syncq == sq); 6181 ASSERT(sq->sq_flags & SQ_EVENTS); 6182 6183 qp = bp->b_queue; 6184 func = (void (*)())bp->b_prev; 6185 ASSERT(func != NULL); 6186 6187 /* 6188 * Messages from the event queue must be taken off in 6189 * FIFO order. 6190 */ 6191 ASSERT(sq->sq_evhead == bp); 6192 sq->sq_evhead = bp->b_next; 6193 6194 if (bp->b_next == NULL) { 6195 /* Deleting last */ 6196 ASSERT(sq->sq_evtail == bp); 6197 sq->sq_evtail = NULL; 6198 sq->sq_flags &= ~SQ_EVENTS; 6199 } 6200 bp->b_prev = bp->b_next = NULL; 6201 ASSERT(bp->b_datap->db_ref != 0); 6202 6203 mutex_exit(SQLOCK(sq)); 6204 6205 (*func)(qp, bp); 6206 6207 mutex_enter(SQLOCK(sq)); 6208 /* 6209 * re-read the flags, since they could have changed. 6210 */ 6211 flags = sq->sq_flags; 6212 ASSERT(flags & SQ_EXCL); 6213 } 6214 ASSERT(sq->sq_evhead == NULL && sq->sq_evtail == NULL); 6215 ASSERT(!(sq->sq_flags & SQ_EVENTS)); 6216 6217 if (flags & SQ_WANTWAKEUP) { 6218 flags &= ~SQ_WANTWAKEUP; 6219 cv_broadcast(&sq->sq_wait); 6220 } 6221 if (flags & SQ_WANTEXWAKEUP) { 6222 flags &= ~SQ_WANTEXWAKEUP; 6223 cv_broadcast(&sq->sq_exitwait); 6224 } 6225 sq->sq_flags = flags; 6226 } 6227 6228 /* 6229 * Put messages on the event list. 6230 * If we can go exclusive now, do so and process the event list, otherwise 6231 * let the last claim service this list (or wake the sqthread). 6232 * This procedure assumes SQLOCK is held. To run the event list, it 6233 * must be called with no claims. 6234 */ 6235 static void 6236 sqfill_events(syncq_t *sq, queue_t *q, mblk_t *mp, void (*func)()) 6237 { 6238 uint16_t count; 6239 6240 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6241 ASSERT(func != NULL); 6242 6243 /* 6244 * This is a callback. Add it to the list of callbacks 6245 * and see about upgrading. 6246 */ 6247 mp->b_prev = (mblk_t *)func; 6248 mp->b_queue = q; 6249 mp->b_next = NULL; 6250 if (sq->sq_evhead == NULL) { 6251 sq->sq_evhead = sq->sq_evtail = mp; 6252 sq->sq_flags |= SQ_EVENTS; 6253 } else { 6254 ASSERT(sq->sq_evtail != NULL); 6255 ASSERT(sq->sq_evtail->b_next == NULL); 6256 ASSERT(sq->sq_flags & SQ_EVENTS); 6257 sq->sq_evtail->b_next = mp; 6258 sq->sq_evtail = mp; 6259 } 6260 /* 6261 * We have set SQ_EVENTS, so threads will have to 6262 * unwind out of the perimiter, and new entries will 6263 * not grab a putlock. But we still need to know 6264 * how many threads have already made a claim to the 6265 * syncq, so grab the putlocks, and sum the counts. 6266 * If there are no claims on the syncq, we can upgrade 6267 * to exclusive, and run the event list. 6268 * NOTE: We hold the SQLOCK, so we can just grab the 6269 * putlocks. 6270 */ 6271 count = sq->sq_count; 6272 SQ_PUTLOCKS_ENTER(sq); 6273 SUM_SQ_PUTCOUNTS(sq, count); 6274 /* 6275 * We have no claim, so we need to check if there 6276 * are no others, then we can upgrade. 6277 */ 6278 /* 6279 * There are currently no claims on 6280 * the syncq by this thread (at least on this entry). The thread who has 6281 * the claim should drain syncq. 6282 */ 6283 if (count > 0) { 6284 /* 6285 * Can't upgrade - other threads inside. 6286 */ 6287 SQ_PUTLOCKS_EXIT(sq); 6288 mutex_exit(SQLOCK(sq)); 6289 return; 6290 } 6291 /* 6292 * Need to set SQ_EXCL and make a claim on the syncq. 6293 */ 6294 ASSERT((sq->sq_flags & SQ_EXCL) == 0); 6295 sq->sq_flags |= SQ_EXCL; 6296 ASSERT(sq->sq_count == 0); 6297 sq->sq_count++; 6298 SQ_PUTLOCKS_EXIT(sq); 6299 6300 /* Process the events list */ 6301 sq_run_events(sq); 6302 6303 /* 6304 * Release our claim... 6305 */ 6306 sq->sq_count--; 6307 6308 /* 6309 * And release SQ_EXCL. 6310 * We don't need to acquire the putlocks to release 6311 * SQ_EXCL, since we are exclusive, and hold the SQLOCK. 6312 */ 6313 sq->sq_flags &= ~SQ_EXCL; 6314 6315 /* 6316 * sq_run_events should have released SQ_EXCL 6317 */ 6318 ASSERT(!(sq->sq_flags & SQ_EXCL)); 6319 6320 /* 6321 * If anything happened while we were running the 6322 * events (or was there before), we need to process 6323 * them now. We shouldn't be exclusive sine we 6324 * released the perimiter above (plus, we asserted 6325 * for it). 6326 */ 6327 if (!(sq->sq_flags & SQ_STAYAWAY) && (sq->sq_flags & SQ_QUEUED)) 6328 drain_syncq(sq); 6329 else 6330 mutex_exit(SQLOCK(sq)); 6331 } 6332 6333 /* 6334 * Perform delayed processing. The caller has to make sure that it is safe 6335 * to enter the syncq (e.g. by checking that none of the SQ_STAYAWAY bits are 6336 * set.) 6337 * 6338 * Assume that the caller has NO claims on the syncq. However, a claim 6339 * on the syncq does not indicate that a thread is draining the syncq. 6340 * There may be more claims on the syncq than there are threads draining 6341 * (i.e. #_threads_draining <= sq_count) 6342 * 6343 * drain_syncq has to terminate when one of the SQ_STAYAWAY bits gets set 6344 * in order to preserve qwriter(OUTER) ordering constraints. 6345 * 6346 * sq_putcount only needs to be checked when dispatching the queued 6347 * writer call for CIPUT sync queue, but this is handled in sq_run_events. 6348 */ 6349 void 6350 drain_syncq(syncq_t *sq) 6351 { 6352 queue_t *qp; 6353 uint16_t count; 6354 uint16_t type = sq->sq_type; 6355 uint16_t flags = sq->sq_flags; 6356 boolean_t bg_service = sq->sq_svcflags & SQ_SERVICE; 6357 6358 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START, 6359 "drain_syncq start:%p", sq); 6360 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6361 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6362 sq->sq_oprev == NULL) || 6363 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6364 sq->sq_oprev != NULL)); 6365 6366 /* 6367 * Drop SQ_SERVICE flag. 6368 */ 6369 if (bg_service) 6370 sq->sq_svcflags &= ~SQ_SERVICE; 6371 6372 /* 6373 * If SQ_EXCL is set, someone else is processing this syncq - let him 6374 * finish the job. 6375 */ 6376 if (flags & SQ_EXCL) { 6377 if (bg_service) { 6378 ASSERT(sq->sq_servcount != 0); 6379 sq->sq_servcount--; 6380 } 6381 mutex_exit(SQLOCK(sq)); 6382 return; 6383 } 6384 6385 /* 6386 * This routine can be called by a background thread if 6387 * it was scheduled by a hi-priority thread. SO, if there are 6388 * NOT messages queued, return (remember, we have the SQLOCK, 6389 * and it cannot change until we release it). Wakeup any waiters also. 6390 */ 6391 if (!(flags & SQ_QUEUED)) { 6392 if (flags & SQ_WANTWAKEUP) { 6393 flags &= ~SQ_WANTWAKEUP; 6394 cv_broadcast(&sq->sq_wait); 6395 } 6396 if (flags & SQ_WANTEXWAKEUP) { 6397 flags &= ~SQ_WANTEXWAKEUP; 6398 cv_broadcast(&sq->sq_exitwait); 6399 } 6400 sq->sq_flags = flags; 6401 if (bg_service) { 6402 ASSERT(sq->sq_servcount != 0); 6403 sq->sq_servcount--; 6404 } 6405 mutex_exit(SQLOCK(sq)); 6406 return; 6407 } 6408 6409 /* 6410 * If this is not a concurrent put perimiter, we need to 6411 * become exclusive to drain. Also, if not CIPUT, we would 6412 * not have acquired a putlock, so we don't need to check 6413 * the putcounts. If not entering with a claim, we test 6414 * for sq_count == 0. 6415 */ 6416 type = sq->sq_type; 6417 if (!(type & SQ_CIPUT)) { 6418 if (sq->sq_count > 1) { 6419 if (bg_service) { 6420 ASSERT(sq->sq_servcount != 0); 6421 sq->sq_servcount--; 6422 } 6423 mutex_exit(SQLOCK(sq)); 6424 return; 6425 } 6426 sq->sq_flags |= SQ_EXCL; 6427 } 6428 6429 /* 6430 * This is where we make a claim to the syncq. 6431 * This can either be done by incrementing a putlock, or 6432 * the sq_count. But since we already have the SQLOCK 6433 * here, we just bump the sq_count. 6434 * 6435 * Note that after we make a claim, we need to let the code 6436 * fall through to the end of this routine to clean itself 6437 * up. A return in the while loop will put the syncq in a 6438 * very bad state. 6439 */ 6440 sq->sq_count++; 6441 ASSERT(sq->sq_count != 0); /* wraparound */ 6442 6443 while ((flags = sq->sq_flags) & SQ_QUEUED) { 6444 /* 6445 * If we are told to stayaway or went exclusive, 6446 * we are done. 6447 */ 6448 if (flags & (SQ_STAYAWAY)) { 6449 break; 6450 } 6451 6452 /* 6453 * If there are events to run, do so. 6454 * We have one claim to the syncq, so if there are 6455 * more than one, other threads are running. 6456 */ 6457 if (sq->sq_evhead != NULL) { 6458 ASSERT(sq->sq_flags & SQ_EVENTS); 6459 6460 count = sq->sq_count; 6461 SQ_PUTLOCKS_ENTER(sq); 6462 SUM_SQ_PUTCOUNTS(sq, count); 6463 if (count > 1) { 6464 SQ_PUTLOCKS_EXIT(sq); 6465 /* Can't upgrade - other threads inside */ 6466 break; 6467 } 6468 ASSERT((flags & SQ_EXCL) == 0); 6469 sq->sq_flags = flags | SQ_EXCL; 6470 SQ_PUTLOCKS_EXIT(sq); 6471 /* 6472 * we have the only claim, run the events, 6473 * sq_run_events will clear the SQ_EXCL flag. 6474 */ 6475 sq_run_events(sq); 6476 6477 /* 6478 * If this is a CIPUT perimiter, we need 6479 * to drop the SQ_EXCL flag so we can properly 6480 * continue draining the syncq. 6481 */ 6482 if (type & SQ_CIPUT) { 6483 ASSERT(sq->sq_flags & SQ_EXCL); 6484 sq->sq_flags &= ~SQ_EXCL; 6485 } 6486 6487 /* 6488 * And go back to the beginning just in case 6489 * anything changed while we were away. 6490 */ 6491 ASSERT((sq->sq_flags & SQ_EXCL) || (type & SQ_CIPUT)); 6492 continue; 6493 } 6494 6495 ASSERT(sq->sq_evhead == NULL); 6496 ASSERT(!(sq->sq_flags & SQ_EVENTS)); 6497 6498 /* 6499 * Find the queue that is not draining. 6500 * 6501 * q_draining is protected by QLOCK which we do not hold. 6502 * But if it was set, then a thread was draining, and if it gets 6503 * cleared, then it was because the thread has successfully 6504 * drained the syncq, or a GOAWAY state occured. For the GOAWAY 6505 * state to happen, a thread needs the SQLOCK which we hold, and 6506 * if there was such a flag, we whould have already seen it. 6507 */ 6508 6509 for (qp = sq->sq_head; 6510 qp != NULL && (qp->q_draining || 6511 (qp->q_sqflags & Q_SQDRAINING)); 6512 qp = qp->q_sqnext) 6513 ; 6514 6515 if (qp == NULL) 6516 break; 6517 6518 /* 6519 * We have a queue to work on, and we hold the 6520 * SQLOCK and one claim, call qdrain_syncq. 6521 * This means we need to release the SQLOCK and 6522 * aquire the QLOCK (OK since we have a claim). 6523 * Note that qdrain_syncq will actually dequeue 6524 * this queue from the sq_head list when it is 6525 * convinced all the work is done and release 6526 * the QLOCK before returning. 6527 */ 6528 qp->q_sqflags |= Q_SQDRAINING; 6529 mutex_exit(SQLOCK(sq)); 6530 mutex_enter(QLOCK(qp)); 6531 qdrain_syncq(sq, qp); 6532 mutex_enter(SQLOCK(sq)); 6533 6534 /* The queue is drained */ 6535 ASSERT(qp->q_sqflags & Q_SQDRAINING); 6536 qp->q_sqflags &= ~Q_SQDRAINING; 6537 /* 6538 * NOTE: After this point qp should not be used since it may be 6539 * closed. 6540 */ 6541 } 6542 6543 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6544 flags = sq->sq_flags; 6545 6546 /* 6547 * sq->sq_head cannot change because we hold the 6548 * sqlock. However, a thread CAN decide that it is no longer 6549 * going to drain that queue. However, this should be due to 6550 * a GOAWAY state, and we should see that here. 6551 * 6552 * This loop is not very efficient. One solution may be adding a second 6553 * pointer to the "draining" queue, but it is difficult to do when 6554 * queues are inserted in the middle due to priority ordering. Another 6555 * possibility is to yank the queue out of the sq list and put it onto 6556 * the "draining list" and then put it back if it can't be drained. 6557 */ 6558 6559 ASSERT((sq->sq_head == NULL) || (flags & SQ_GOAWAY) || 6560 (type & SQ_CI) || sq->sq_head->q_draining); 6561 6562 /* Drop SQ_EXCL for non-CIPUT perimiters */ 6563 if (!(type & SQ_CIPUT)) 6564 flags &= ~SQ_EXCL; 6565 ASSERT((flags & SQ_EXCL) == 0); 6566 6567 /* Wake up any waiters. */ 6568 if (flags & SQ_WANTWAKEUP) { 6569 flags &= ~SQ_WANTWAKEUP; 6570 cv_broadcast(&sq->sq_wait); 6571 } 6572 if (flags & SQ_WANTEXWAKEUP) { 6573 flags &= ~SQ_WANTEXWAKEUP; 6574 cv_broadcast(&sq->sq_exitwait); 6575 } 6576 sq->sq_flags = flags; 6577 6578 ASSERT(sq->sq_count != 0); 6579 /* Release our claim. */ 6580 sq->sq_count--; 6581 6582 if (bg_service) { 6583 ASSERT(sq->sq_servcount != 0); 6584 sq->sq_servcount--; 6585 } 6586 6587 mutex_exit(SQLOCK(sq)); 6588 6589 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END, 6590 "drain_syncq end:%p", sq); 6591 } 6592 6593 6594 /* 6595 * 6596 * qdrain_syncq can be called (currently) from only one of two places: 6597 * drain_syncq 6598 * putnext (or some variation of it). 6599 * and eventually 6600 * qwait(_sig) 6601 * 6602 * If called from drain_syncq, we found it in the list 6603 * of queue's needing service, so there is work to be done (or it 6604 * wouldn't be on the list). 6605 * 6606 * If called from some putnext variation, it was because the 6607 * perimiter is open, but messages are blocking a putnext and 6608 * there is not a thread working on it. Now a thread could start 6609 * working on it while we are getting ready to do so ourself, but 6610 * the thread would set the q_draining flag, and we can spin out. 6611 * 6612 * As for qwait(_sig), I think I shall let it continue to call 6613 * drain_syncq directly (after all, it will get here eventually). 6614 * 6615 * qdrain_syncq has to terminate when: 6616 * - one of the SQ_STAYAWAY bits gets set to preserve qwriter(OUTER) ordering 6617 * - SQ_EVENTS gets set to preserve qwriter(INNER) ordering 6618 * 6619 * ASSUMES: 6620 * One claim 6621 * QLOCK held 6622 * SQLOCK not held 6623 * Will release QLOCK before returning 6624 */ 6625 void 6626 qdrain_syncq(syncq_t *sq, queue_t *q) 6627 { 6628 mblk_t *bp; 6629 boolean_t do_clr; 6630 #ifdef DEBUG 6631 uint16_t count; 6632 #endif 6633 6634 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START, 6635 "drain_syncq start:%p", sq); 6636 ASSERT(q->q_syncq == sq); 6637 ASSERT(MUTEX_HELD(QLOCK(q))); 6638 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6639 /* 6640 * For non-CIPUT perimiters, we should be called with the 6641 * exclusive bit set already. For non-CIPUT perimiters we 6642 * will be doing a concurrent drain, so it better not be set. 6643 */ 6644 ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT))); 6645 ASSERT(!((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL))); 6646 ASSERT((sq->sq_type & SQ_CIPUT) || (sq->sq_flags & SQ_EXCL)); 6647 /* 6648 * All outer pointers are set, or none of them are 6649 */ 6650 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6651 sq->sq_oprev == NULL) || 6652 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6653 sq->sq_oprev != NULL)); 6654 #ifdef DEBUG 6655 count = sq->sq_count; 6656 /* 6657 * This is OK without the putlocks, because we have one 6658 * claim either from the sq_count, or a putcount. We could 6659 * get an erroneous value from other counts, but ours won't 6660 * change, so one way or another, we will have at least a 6661 * value of one. 6662 */ 6663 SUM_SQ_PUTCOUNTS(sq, count); 6664 ASSERT(count >= 1); 6665 #endif /* DEBUG */ 6666 6667 /* 6668 * The first thing to do here, is find out if a thread is already 6669 * draining this queue or the queue is closing. If so, we are done, 6670 * just return. Also, if there are no messages, we are done as well. 6671 * Note that we check the q_sqhead since there is s window of 6672 * opportunity for us to enter here because Q_SQQUEUED was set, but is 6673 * not anymore. 6674 */ 6675 if (q->q_draining || (q->q_sqhead == NULL)) { 6676 mutex_exit(QLOCK(q)); 6677 return; 6678 } 6679 6680 /* 6681 * If the perimiter is exclusive, there is nothing we can 6682 * do right now, go away. 6683 * Note that there is nothing to prevent this case from changing 6684 * right after this check, but the spin-out will catch it. 6685 */ 6686 6687 /* Tell other threads that we are draining this queue */ 6688 q->q_draining = 1; /* Protected by QLOCK */ 6689 6690 for (bp = q->q_sqhead; bp != NULL; bp = q->q_sqhead) { 6691 6692 /* 6693 * Because we can enter this routine just because 6694 * a putnext is blocked, we need to spin out if 6695 * the perimiter wants to go exclusive as well 6696 * as just blocked. We need to spin out also if 6697 * events are queued on the syncq. 6698 * Don't check for SQ_EXCL, because non-CIPUT 6699 * perimiters would set it, and it can't become 6700 * exclusive while we hold a claim. 6701 */ 6702 if (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS)) { 6703 break; 6704 } 6705 6706 #ifdef DEBUG 6707 /* 6708 * Since we are in qdrain_syncq, we already know the queue, 6709 * but for sanity, we want to check this against the qp that 6710 * was passed in by bp->b_queue. 6711 */ 6712 6713 ASSERT(bp->b_queue == q); 6714 ASSERT(bp->b_queue->q_syncq == sq); 6715 bp->b_queue = NULL; 6716 6717 /* 6718 * We would have the following check in the DEBUG code: 6719 * 6720 * if (bp->b_prev != NULL) { 6721 * ASSERT(bp->b_prev == (void (*)())q->q_qinfo->qi_putp); 6722 * } 6723 * 6724 * This can't be done, however, since IP modifies qinfo 6725 * structure at run-time (switching between IPv4 qinfo and IPv6 6726 * qinfo), invalidating the check. 6727 * So the assignment to func is left here, but the ASSERT itself 6728 * is removed until the whole issue is resolved. 6729 */ 6730 #endif 6731 ASSERT(q->q_sqhead == bp); 6732 q->q_sqhead = bp->b_next; 6733 bp->b_prev = bp->b_next = NULL; 6734 ASSERT(q->q_syncqmsgs > 0); 6735 mutex_exit(QLOCK(q)); 6736 6737 ASSERT(bp->b_datap->db_ref != 0); 6738 6739 (void) (*q->q_qinfo->qi_putp)(q, bp); 6740 6741 mutex_enter(QLOCK(q)); 6742 /* 6743 * We should decrement q_syncqmsgs only after executing the 6744 * put procedure to avoid a possible race with putnext(). 6745 * In putnext() though it sees Q_SQQUEUED is set, there is 6746 * an optimization which allows putnext to call the put 6747 * procedure directly if (q_syncqmsgs == 0) and thus 6748 * a message reodering could otherwise occur. 6749 */ 6750 q->q_syncqmsgs--; 6751 6752 /* 6753 * Clear QFULL in the next service procedure queue if 6754 * this is the last message destined to that queue. 6755 * 6756 * It would make better sense to have some sort of 6757 * tunable for the low water mark, but these symantics 6758 * are not yet defined. So, alas, we use a constant. 6759 */ 6760 do_clr = (q->q_syncqmsgs == 0); 6761 mutex_exit(QLOCK(q)); 6762 6763 if (do_clr) 6764 clr_qfull(q); 6765 6766 mutex_enter(QLOCK(q)); 6767 /* 6768 * Always clear SQ_EXCL when CIPUT in order to handle 6769 * qwriter(INNER). 6770 */ 6771 /* 6772 * The putp() can call qwriter and get exclusive access 6773 * IFF this is the only claim. So, we need to test for 6774 * this possibility so we can aquire the mutex and clear 6775 * the bit. 6776 */ 6777 if ((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL)) { 6778 mutex_enter(SQLOCK(sq)); 6779 sq->sq_flags &= ~SQ_EXCL; 6780 mutex_exit(SQLOCK(sq)); 6781 } 6782 } 6783 6784 /* 6785 * We should either have no queues on the syncq, or we were 6786 * told to goaway by a waiter (which we will wake up at the 6787 * end of this function). 6788 */ 6789 ASSERT((q->q_sqhead == NULL) || 6790 (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS))); 6791 6792 ASSERT(MUTEX_HELD(QLOCK(q))); 6793 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6794 6795 /* 6796 * Remove the q from the syncq list if all the messages are 6797 * drained. 6798 */ 6799 if (q->q_sqhead == NULL) { 6800 mutex_enter(SQLOCK(sq)); 6801 if (q->q_sqflags & Q_SQQUEUED) 6802 SQRM_Q(sq, q); 6803 mutex_exit(SQLOCK(sq)); 6804 /* 6805 * Since the queue is removed from the list, reset its priority. 6806 */ 6807 q->q_spri = 0; 6808 } 6809 6810 /* 6811 * Remember, the q_draining flag is used to let another 6812 * thread know that there is a thread currently draining 6813 * the messages for a queue. Since we are now done with 6814 * this queue (even if there may be messages still there), 6815 * we need to clear this flag so some thread will work 6816 * on it if needed. 6817 */ 6818 ASSERT(q->q_draining); 6819 q->q_draining = 0; 6820 6821 /* called with a claim, so OK to drop all locks. */ 6822 mutex_exit(QLOCK(q)); 6823 6824 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END, 6825 "drain_syncq end:%p", sq); 6826 } 6827 /* END OF QDRAIN_SYNCQ */ 6828 6829 6830 /* 6831 * This is the mate to qdrain_syncq, except that it is putting the 6832 * message onto the the queue instead draining. Since the 6833 * message is destined for the queue that is selected, there is 6834 * no need to identify the function because the message is 6835 * intended for the put routine for the queue. But this 6836 * routine will do it anyway just in case (but only for debug kernels). 6837 * 6838 * After the message is enqueued on the syncq, it calls putnext_tail() 6839 * which will schedule a background thread to actually process the message. 6840 * 6841 * Assumes that there is a claim on the syncq (sq->sq_count > 0) and 6842 * SQLOCK(sq) and QLOCK(q) are not held. 6843 */ 6844 void 6845 qfill_syncq(syncq_t *sq, queue_t *q, mblk_t *mp) 6846 { 6847 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6848 ASSERT(MUTEX_NOT_HELD(QLOCK(q))); 6849 ASSERT(sq->sq_count > 0); 6850 ASSERT(q->q_syncq == sq); 6851 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6852 sq->sq_oprev == NULL) || 6853 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6854 sq->sq_oprev != NULL)); 6855 6856 mutex_enter(QLOCK(q)); 6857 6858 #ifdef DEBUG 6859 /* 6860 * This is used for debug in the qfill_syncq/qdrain_syncq case 6861 * to trace the queue that the message is intended for. Note 6862 * that the original use was to identify the queue and function 6863 * to call on the drain. In the new syncq, we have the context 6864 * of the queue that we are draining, so call it's putproc and 6865 * don't rely on the saved values. But for debug this is still 6866 * usefull information. 6867 */ 6868 mp->b_prev = (mblk_t *)q->q_qinfo->qi_putp; 6869 mp->b_queue = q; 6870 mp->b_next = NULL; 6871 #endif 6872 ASSERT(q->q_syncq == sq); 6873 /* 6874 * Enqueue the message on the list. 6875 * SQPUT_MP() accesses q_syncqmsgs. We are already holding QLOCK to 6876 * protect it. So its ok to acquire SQLOCK after SQPUT_MP(). 6877 */ 6878 SQPUT_MP(q, mp); 6879 mutex_enter(SQLOCK(sq)); 6880 6881 /* 6882 * And queue on syncq for scheduling, if not already queued. 6883 * Note that we need the SQLOCK for this, and for testing flags 6884 * at the end to see if we will drain. So grab it now, and 6885 * release it before we call qdrain_syncq or return. 6886 */ 6887 if (!(q->q_sqflags & Q_SQQUEUED)) { 6888 q->q_spri = curthread->t_pri; 6889 SQPUT_Q(sq, q); 6890 } 6891 #ifdef DEBUG 6892 else { 6893 /* 6894 * All of these conditions MUST be true! 6895 */ 6896 ASSERT(sq->sq_tail != NULL); 6897 if (sq->sq_tail == sq->sq_head) { 6898 ASSERT((q->q_sqprev == NULL) && 6899 (q->q_sqnext == NULL)); 6900 } else { 6901 ASSERT((q->q_sqprev != NULL) || 6902 (q->q_sqnext != NULL)); 6903 } 6904 ASSERT(sq->sq_flags & SQ_QUEUED); 6905 ASSERT(q->q_syncqmsgs != 0); 6906 ASSERT(q->q_sqflags & Q_SQQUEUED); 6907 } 6908 #endif 6909 mutex_exit(QLOCK(q)); 6910 /* 6911 * SQLOCK is still held, so sq_count can be safely decremented. 6912 */ 6913 sq->sq_count--; 6914 6915 putnext_tail(sq, q, 0); 6916 /* Should not reference sq or q after this point. */ 6917 } 6918 6919 /* End of qfill_syncq */ 6920 6921 /* 6922 * Remove all messages from a syncq (if qp is NULL) or remove all messages 6923 * that would be put into qp by drain_syncq. 6924 * Used when deleting the syncq (qp == NULL) or when detaching 6925 * a queue (qp != NULL). 6926 * Return non-zero if one or more messages were freed. 6927 * 6928 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 6929 * sq_putlocks are used. 6930 * 6931 * NOTE: This function assumes that it is called from the close() context and 6932 * that all the queues in the syncq are going aay. For this reason it doesn't 6933 * acquire QLOCK for modifying q_sqhead/q_sqtail fields. This assumption is 6934 * currently valid, but it is useful to rethink this function to behave properly 6935 * in other cases. 6936 */ 6937 int 6938 flush_syncq(syncq_t *sq, queue_t *qp) 6939 { 6940 mblk_t *bp, *mp_head, *mp_next, *mp_prev; 6941 queue_t *q; 6942 int ret = 0; 6943 6944 mutex_enter(SQLOCK(sq)); 6945 6946 /* 6947 * Before we leave, we need to make sure there are no 6948 * events listed for this queue. All events for this queue 6949 * will just be freed. 6950 */ 6951 if (qp != NULL && sq->sq_evhead != NULL) { 6952 ASSERT(sq->sq_flags & SQ_EVENTS); 6953 6954 mp_prev = NULL; 6955 for (bp = sq->sq_evhead; bp != NULL; bp = mp_next) { 6956 mp_next = bp->b_next; 6957 if (bp->b_queue == qp) { 6958 /* Delete this message */ 6959 if (mp_prev != NULL) { 6960 mp_prev->b_next = mp_next; 6961 /* 6962 * Update sq_evtail if the last element 6963 * is removed. 6964 */ 6965 if (bp == sq->sq_evtail) { 6966 ASSERT(mp_next == NULL); 6967 sq->sq_evtail = mp_prev; 6968 } 6969 } else 6970 sq->sq_evhead = mp_next; 6971 if (sq->sq_evhead == NULL) 6972 sq->sq_flags &= ~SQ_EVENTS; 6973 bp->b_prev = bp->b_next = NULL; 6974 freemsg(bp); 6975 ret++; 6976 } else { 6977 mp_prev = bp; 6978 } 6979 } 6980 } 6981 6982 /* 6983 * Walk sq_head and: 6984 * - match qp if qp is set, remove it's messages 6985 * - all if qp is not set 6986 */ 6987 q = sq->sq_head; 6988 while (q != NULL) { 6989 ASSERT(q->q_syncq == sq); 6990 if ((qp == NULL) || (qp == q)) { 6991 /* 6992 * Yank the messages as a list off the queue 6993 */ 6994 mp_head = q->q_sqhead; 6995 /* 6996 * We do not have QLOCK(q) here (which is safe due to 6997 * assumptions mentioned above). To obtain the lock we 6998 * need to release SQLOCK which may allow lots of things 6999 * to change upon us. This place requires more analysis. 7000 */ 7001 q->q_sqhead = q->q_sqtail = NULL; 7002 ASSERT(mp_head->b_queue && 7003 mp_head->b_queue->q_syncq == sq); 7004 7005 /* 7006 * Free each of the messages. 7007 */ 7008 for (bp = mp_head; bp != NULL; bp = mp_next) { 7009 mp_next = bp->b_next; 7010 bp->b_prev = bp->b_next = NULL; 7011 freemsg(bp); 7012 ret++; 7013 } 7014 /* 7015 * Now remove the queue from the syncq. 7016 */ 7017 ASSERT(q->q_sqflags & Q_SQQUEUED); 7018 SQRM_Q(sq, q); 7019 q->q_spri = 0; 7020 q->q_syncqmsgs = 0; 7021 7022 /* 7023 * If qp was specified, we are done with it and are 7024 * going to drop SQLOCK(sq) and return. We wakeup syncq 7025 * waiters while we still have the SQLOCK. 7026 */ 7027 if ((qp != NULL) && (sq->sq_flags & SQ_WANTWAKEUP)) { 7028 sq->sq_flags &= ~SQ_WANTWAKEUP; 7029 cv_broadcast(&sq->sq_wait); 7030 } 7031 /* Drop SQLOCK across clr_qfull */ 7032 mutex_exit(SQLOCK(sq)); 7033 7034 /* 7035 * We avoid doing the test that drain_syncq does and 7036 * unconditionally clear qfull for every flushed 7037 * message. Since flush_syncq is only called during 7038 * close this should not be a problem. 7039 */ 7040 clr_qfull(q); 7041 if (qp != NULL) { 7042 return (ret); 7043 } else { 7044 mutex_enter(SQLOCK(sq)); 7045 /* 7046 * The head was removed by SQRM_Q above. 7047 * reread the new head and flush it. 7048 */ 7049 q = sq->sq_head; 7050 } 7051 } else { 7052 q = q->q_sqnext; 7053 } 7054 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7055 } 7056 7057 if (sq->sq_flags & SQ_WANTWAKEUP) { 7058 sq->sq_flags &= ~SQ_WANTWAKEUP; 7059 cv_broadcast(&sq->sq_wait); 7060 } 7061 7062 mutex_exit(SQLOCK(sq)); 7063 return (ret); 7064 } 7065 7066 /* 7067 * Propagate all messages from a syncq to the next syncq that are associated 7068 * with the specified queue. If the queue is attached to a driver or if the 7069 * messages have been added due to a qwriter(PERIM_INNER), free the messages. 7070 * 7071 * Assumes that the stream is strlock()'ed. We don't come here if there 7072 * are no messages to propagate. 7073 * 7074 * NOTE : If the queue is attached to a driver, all the messages are freed 7075 * as there is no point in propagating the messages from the driver syncq 7076 * to the closing stream head which will in turn get freed later. 7077 */ 7078 static int 7079 propagate_syncq(queue_t *qp) 7080 { 7081 mblk_t *bp, *head, *tail, *prev, *next; 7082 syncq_t *sq; 7083 queue_t *nqp; 7084 syncq_t *nsq; 7085 boolean_t isdriver; 7086 int moved = 0; 7087 uint16_t flags; 7088 pri_t priority = curthread->t_pri; 7089 #ifdef DEBUG 7090 void (*func)(); 7091 #endif 7092 7093 sq = qp->q_syncq; 7094 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7095 /* debug macro */ 7096 SQ_PUTLOCKS_HELD(sq); 7097 /* 7098 * As entersq() does not increment the sq_count for 7099 * the write side, check sq_count for non-QPERQ 7100 * perimeters alone. 7101 */ 7102 ASSERT((qp->q_flag & QPERQ) || (sq->sq_count >= 1)); 7103 7104 /* 7105 * propagate_syncq() can be called because of either messages on the 7106 * queue syncq or because on events on the queue syncq. Do actual 7107 * message propagations if there are any messages. 7108 */ 7109 if (qp->q_syncqmsgs) { 7110 isdriver = (qp->q_flag & QISDRV); 7111 7112 if (!isdriver) { 7113 nqp = qp->q_next; 7114 nsq = nqp->q_syncq; 7115 ASSERT(MUTEX_HELD(SQLOCK(nsq))); 7116 /* debug macro */ 7117 SQ_PUTLOCKS_HELD(nsq); 7118 #ifdef DEBUG 7119 func = (void (*)())nqp->q_qinfo->qi_putp; 7120 #endif 7121 } 7122 7123 SQRM_Q(sq, qp); 7124 priority = MAX(qp->q_spri, priority); 7125 qp->q_spri = 0; 7126 head = qp->q_sqhead; 7127 tail = qp->q_sqtail; 7128 qp->q_sqhead = qp->q_sqtail = NULL; 7129 qp->q_syncqmsgs = 0; 7130 7131 /* 7132 * Walk the list of messages, and free them if this is a driver, 7133 * otherwise reset the b_prev and b_queue value to the new putp. 7134 * Afterward, we will just add the head to the end of the next 7135 * syncq, and point the tail to the end of this one. 7136 */ 7137 7138 for (bp = head; bp != NULL; bp = next) { 7139 next = bp->b_next; 7140 if (isdriver) { 7141 bp->b_prev = bp->b_next = NULL; 7142 freemsg(bp); 7143 continue; 7144 } 7145 /* Change the q values for this message */ 7146 bp->b_queue = nqp; 7147 #ifdef DEBUG 7148 bp->b_prev = (mblk_t *)func; 7149 #endif 7150 moved++; 7151 } 7152 /* 7153 * Attach list of messages to the end of the new queue (if there 7154 * is a list of messages). 7155 */ 7156 7157 if (!isdriver && head != NULL) { 7158 ASSERT(tail != NULL); 7159 if (nqp->q_sqhead == NULL) { 7160 nqp->q_sqhead = head; 7161 } else { 7162 ASSERT(nqp->q_sqtail != NULL); 7163 nqp->q_sqtail->b_next = head; 7164 } 7165 nqp->q_sqtail = tail; 7166 /* 7167 * When messages are moved from high priority queue to 7168 * another queue, the destination queue priority is 7169 * upgraded. 7170 */ 7171 7172 if (priority > nqp->q_spri) 7173 nqp->q_spri = priority; 7174 7175 SQPUT_Q(nsq, nqp); 7176 7177 nqp->q_syncqmsgs += moved; 7178 ASSERT(nqp->q_syncqmsgs != 0); 7179 } 7180 } 7181 7182 /* 7183 * Before we leave, we need to make sure there are no 7184 * events listed for this queue. All events for this queue 7185 * will just be freed. 7186 */ 7187 if (sq->sq_evhead != NULL) { 7188 ASSERT(sq->sq_flags & SQ_EVENTS); 7189 prev = NULL; 7190 for (bp = sq->sq_evhead; bp != NULL; bp = next) { 7191 next = bp->b_next; 7192 if (bp->b_queue == qp) { 7193 /* Delete this message */ 7194 if (prev != NULL) { 7195 prev->b_next = next; 7196 /* 7197 * Update sq_evtail if the last element 7198 * is removed. 7199 */ 7200 if (bp == sq->sq_evtail) { 7201 ASSERT(next == NULL); 7202 sq->sq_evtail = prev; 7203 } 7204 } else 7205 sq->sq_evhead = next; 7206 if (sq->sq_evhead == NULL) 7207 sq->sq_flags &= ~SQ_EVENTS; 7208 bp->b_prev = bp->b_next = NULL; 7209 freemsg(bp); 7210 } else { 7211 prev = bp; 7212 } 7213 } 7214 } 7215 7216 flags = sq->sq_flags; 7217 7218 /* Wake up any waiter before leaving. */ 7219 if (flags & SQ_WANTWAKEUP) { 7220 flags &= ~SQ_WANTWAKEUP; 7221 cv_broadcast(&sq->sq_wait); 7222 } 7223 sq->sq_flags = flags; 7224 7225 return (moved); 7226 } 7227 7228 /* 7229 * Try and upgrade to exclusive access at the inner perimeter. If this can 7230 * not be done without blocking then request will be queued on the syncq 7231 * and drain_syncq will run it later. 7232 * 7233 * This routine can only be called from put or service procedures plus 7234 * asynchronous callback routines that have properly entered to 7235 * queue (with entersq.) Thus qwriter_inner assumes the caller has one claim 7236 * on the syncq associated with q. 7237 */ 7238 void 7239 qwriter_inner(queue_t *q, mblk_t *mp, void (*func)()) 7240 { 7241 syncq_t *sq = q->q_syncq; 7242 uint16_t count; 7243 7244 mutex_enter(SQLOCK(sq)); 7245 count = sq->sq_count; 7246 SQ_PUTLOCKS_ENTER(sq); 7247 SUM_SQ_PUTCOUNTS(sq, count); 7248 ASSERT(count >= 1); 7249 ASSERT(sq->sq_type & (SQ_CIPUT|SQ_CISVC)); 7250 7251 if (count == 1) { 7252 /* 7253 * Can upgrade. This case also handles nested qwriter calls 7254 * (when the qwriter callback function calls qwriter). In that 7255 * case SQ_EXCL is already set. 7256 */ 7257 sq->sq_flags |= SQ_EXCL; 7258 SQ_PUTLOCKS_EXIT(sq); 7259 mutex_exit(SQLOCK(sq)); 7260 (*func)(q, mp); 7261 /* 7262 * Assumes that leavesq, putnext, and drain_syncq will reset 7263 * SQ_EXCL for SQ_CIPUT/SQ_CISVC queues. We leave SQ_EXCL on 7264 * until putnext, leavesq, or drain_syncq drops it. 7265 * That way we handle nested qwriter(INNER) without dropping 7266 * SQ_EXCL until the outermost qwriter callback routine is 7267 * done. 7268 */ 7269 return; 7270 } 7271 SQ_PUTLOCKS_EXIT(sq); 7272 sqfill_events(sq, q, mp, func); 7273 } 7274 7275 /* 7276 * Synchronous callback support functions 7277 */ 7278 7279 /* 7280 * Allocate a callback parameter structure. 7281 * Assumes that caller initializes the flags and the id. 7282 * Acquires SQLOCK(sq) if non-NULL is returned. 7283 */ 7284 callbparams_t * 7285 callbparams_alloc(syncq_t *sq, void (*func)(void *), void *arg, int kmflags) 7286 { 7287 callbparams_t *cbp; 7288 size_t size = sizeof (callbparams_t); 7289 7290 cbp = kmem_alloc(size, kmflags & ~KM_PANIC); 7291 7292 /* 7293 * Only try tryhard allocation if the caller is ready to panic. 7294 * Otherwise just fail. 7295 */ 7296 if (cbp == NULL) { 7297 if (kmflags & KM_PANIC) 7298 cbp = kmem_alloc_tryhard(sizeof (callbparams_t), 7299 &size, kmflags); 7300 else 7301 return (NULL); 7302 } 7303 7304 ASSERT(size >= sizeof (callbparams_t)); 7305 cbp->cbp_size = size; 7306 cbp->cbp_sq = sq; 7307 cbp->cbp_func = func; 7308 cbp->cbp_arg = arg; 7309 mutex_enter(SQLOCK(sq)); 7310 cbp->cbp_next = sq->sq_callbpend; 7311 sq->sq_callbpend = cbp; 7312 return (cbp); 7313 } 7314 7315 void 7316 callbparams_free(syncq_t *sq, callbparams_t *cbp) 7317 { 7318 callbparams_t **pp, *p; 7319 7320 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7321 7322 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) { 7323 if (p == cbp) { 7324 *pp = p->cbp_next; 7325 kmem_free(p, p->cbp_size); 7326 return; 7327 } 7328 } 7329 (void) (STRLOG(0, 0, 0, SL_CONSOLE, 7330 "callbparams_free: not found\n")); 7331 } 7332 7333 void 7334 callbparams_free_id(syncq_t *sq, callbparams_id_t id, int32_t flag) 7335 { 7336 callbparams_t **pp, *p; 7337 7338 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7339 7340 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) { 7341 if (p->cbp_id == id && p->cbp_flags == flag) { 7342 *pp = p->cbp_next; 7343 kmem_free(p, p->cbp_size); 7344 return; 7345 } 7346 } 7347 (void) (STRLOG(0, 0, 0, SL_CONSOLE, 7348 "callbparams_free_id: not found\n")); 7349 } 7350 7351 /* 7352 * Callback wrapper function used by once-only callbacks that can be 7353 * cancelled (qtimeout and qbufcall) 7354 * Contains inline version of entersq(sq, SQ_CALLBACK) that can be 7355 * cancelled by the qun* functions. 7356 */ 7357 void 7358 qcallbwrapper(void *arg) 7359 { 7360 callbparams_t *cbp = arg; 7361 syncq_t *sq; 7362 uint16_t count = 0; 7363 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL; 7364 uint16_t type; 7365 7366 sq = cbp->cbp_sq; 7367 mutex_enter(SQLOCK(sq)); 7368 type = sq->sq_type; 7369 if (!(type & SQ_CICB)) { 7370 count = sq->sq_count; 7371 SQ_PUTLOCKS_ENTER(sq); 7372 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 7373 SUM_SQ_PUTCOUNTS(sq, count); 7374 sq->sq_needexcl++; 7375 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 7376 waitflags |= SQ_MESSAGES; 7377 } 7378 /* Can not handle exlusive entry at outer perimeter */ 7379 ASSERT(type & SQ_COCB); 7380 7381 while ((sq->sq_flags & waitflags) || (!(type & SQ_CICB) &&count != 0)) { 7382 if ((sq->sq_callbflags & cbp->cbp_flags) && 7383 (sq->sq_cancelid == cbp->cbp_id)) { 7384 /* timeout has been cancelled */ 7385 sq->sq_callbflags |= SQ_CALLB_BYPASSED; 7386 callbparams_free(sq, cbp); 7387 if (!(type & SQ_CICB)) { 7388 ASSERT(sq->sq_needexcl > 0); 7389 sq->sq_needexcl--; 7390 if (sq->sq_needexcl == 0) { 7391 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 7392 } 7393 SQ_PUTLOCKS_EXIT(sq); 7394 } 7395 mutex_exit(SQLOCK(sq)); 7396 return; 7397 } 7398 sq->sq_flags |= SQ_WANTWAKEUP; 7399 if (!(type & SQ_CICB)) { 7400 SQ_PUTLOCKS_EXIT(sq); 7401 } 7402 cv_wait(&sq->sq_wait, SQLOCK(sq)); 7403 if (!(type & SQ_CICB)) { 7404 count = sq->sq_count; 7405 SQ_PUTLOCKS_ENTER(sq); 7406 SUM_SQ_PUTCOUNTS(sq, count); 7407 } 7408 } 7409 7410 sq->sq_count++; 7411 ASSERT(sq->sq_count != 0); /* Wraparound */ 7412 if (!(type & SQ_CICB)) { 7413 ASSERT(count == 0); 7414 sq->sq_flags |= SQ_EXCL; 7415 ASSERT(sq->sq_needexcl > 0); 7416 sq->sq_needexcl--; 7417 if (sq->sq_needexcl == 0) { 7418 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 7419 } 7420 SQ_PUTLOCKS_EXIT(sq); 7421 } 7422 7423 mutex_exit(SQLOCK(sq)); 7424 7425 cbp->cbp_func(cbp->cbp_arg); 7426 7427 /* 7428 * We drop the lock only for leavesq to re-acquire it. 7429 * Possible optimization is inline of leavesq. 7430 */ 7431 mutex_enter(SQLOCK(sq)); 7432 callbparams_free(sq, cbp); 7433 mutex_exit(SQLOCK(sq)); 7434 leavesq(sq, SQ_CALLBACK); 7435 } 7436 7437 /* 7438 * no need to grab sq_putlocks here. See comment in strsubr.h that 7439 * explains when sq_putlocks are used. 7440 * 7441 * sq_count (or one of the sq_putcounts) has already been 7442 * decremented by the caller, and if SQ_QUEUED, we need to call 7443 * drain_syncq (the global syncq drain). 7444 * If putnext_tail is called with the SQ_EXCL bit set, we are in 7445 * one of two states, non-CIPUT perimiter, and we need to clear 7446 * it, or we went exclusive in the put procedure. In any case, 7447 * we want to clear the bit now, and it is probably easier to do 7448 * this at the beginning of this function (remember, we hold 7449 * the SQLOCK). Lastly, if there are other messages queued 7450 * on the syncq (and not for our destination), enable the syncq 7451 * for background work. 7452 */ 7453 7454 /* ARGSUSED */ 7455 void 7456 putnext_tail(syncq_t *sq, queue_t *qp, uint32_t passflags) 7457 { 7458 uint16_t flags = sq->sq_flags; 7459 7460 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7461 ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); 7462 7463 /* Clear SQ_EXCL if set in passflags */ 7464 if (passflags & SQ_EXCL) { 7465 flags &= ~SQ_EXCL; 7466 } 7467 if (flags & SQ_WANTWAKEUP) { 7468 flags &= ~SQ_WANTWAKEUP; 7469 cv_broadcast(&sq->sq_wait); 7470 } 7471 if (flags & SQ_WANTEXWAKEUP) { 7472 flags &= ~SQ_WANTEXWAKEUP; 7473 cv_broadcast(&sq->sq_exitwait); 7474 } 7475 sq->sq_flags = flags; 7476 7477 /* 7478 * We have cleared SQ_EXCL if we were asked to, and started 7479 * the wakeup process for waiters. If there are no writers 7480 * then we need to drain the syncq if we were told to, or 7481 * enable the background thread to do it. 7482 */ 7483 if (!(flags & (SQ_STAYAWAY|SQ_EXCL))) { 7484 if ((passflags & SQ_QUEUED) || 7485 (sq->sq_svcflags & SQ_DISABLED)) { 7486 /* drain_syncq will take care of events in the list */ 7487 drain_syncq(sq); 7488 return; 7489 } else if (flags & SQ_QUEUED) { 7490 sqenable(sq); 7491 } 7492 } 7493 /* Drop the SQLOCK on exit */ 7494 mutex_exit(SQLOCK(sq)); 7495 TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END, 7496 "putnext_end:(%p, %p, %p) done", NULL, qp, sq); 7497 } 7498 7499 void 7500 set_qend(queue_t *q) 7501 { 7502 mutex_enter(QLOCK(q)); 7503 if (!O_SAMESTR(q)) 7504 q->q_flag |= QEND; 7505 else 7506 q->q_flag &= ~QEND; 7507 mutex_exit(QLOCK(q)); 7508 q = _OTHERQ(q); 7509 mutex_enter(QLOCK(q)); 7510 if (!O_SAMESTR(q)) 7511 q->q_flag |= QEND; 7512 else 7513 q->q_flag &= ~QEND; 7514 mutex_exit(QLOCK(q)); 7515 } 7516 7517 /* 7518 * Set QFULL in next service procedure queue (that cares) if not already 7519 * set and if there are already more messages on the syncq than 7520 * sq_max_size. If sq_max_size is 0, no flow control will be asserted on 7521 * any syncq. 7522 * 7523 * The fq here is the next queue with a service procedure. This is where 7524 * we would fail canputnext, so this is where we need to set QFULL. 7525 * In the case when fq != q we need to take QLOCK(fq) to set QFULL flag. 7526 * 7527 * We already have QLOCK at this point. To avoid cross-locks with 7528 * freezestr() which grabs all QLOCKs and with strlock() which grabs both 7529 * SQLOCK and sd_reflock, we need to drop respective locks first. 7530 */ 7531 void 7532 set_qfull(queue_t *q) 7533 { 7534 queue_t *fq = NULL; 7535 7536 ASSERT(MUTEX_HELD(QLOCK(q))); 7537 if ((sq_max_size != 0) && (!(q->q_nfsrv->q_flag & QFULL)) && 7538 (q->q_syncqmsgs > sq_max_size)) { 7539 if ((fq = q->q_nfsrv) == q) { 7540 fq->q_flag |= QFULL; 7541 } else { 7542 mutex_exit(QLOCK(q)); 7543 mutex_enter(QLOCK(fq)); 7544 fq->q_flag |= QFULL; 7545 mutex_exit(QLOCK(fq)); 7546 mutex_enter(QLOCK(q)); 7547 } 7548 } 7549 } 7550 7551 void 7552 clr_qfull(queue_t *q) 7553 { 7554 queue_t *oq = q; 7555 7556 q = q->q_nfsrv; 7557 /* Fast check if there is any work to do before getting the lock. */ 7558 if ((q->q_flag & (QFULL|QWANTW)) == 0) { 7559 return; 7560 } 7561 7562 /* 7563 * Do not reset QFULL (and backenable) if the q_count is the reason 7564 * for QFULL being set. 7565 */ 7566 mutex_enter(QLOCK(q)); 7567 /* 7568 * If queue is empty i.e q_mblkcnt is zero, queue can not be full. 7569 * Hence clear the QFULL. 7570 * If both q_count and q_mblkcnt are less than the hiwat mark, 7571 * clear the QFULL. 7572 */ 7573 if (q->q_mblkcnt == 0 || ((q->q_count < q->q_hiwat) && 7574 (q->q_mblkcnt < q->q_hiwat))) { 7575 q->q_flag &= ~QFULL; 7576 /* 7577 * A little more confusing, how about this way: 7578 * if someone wants to write, 7579 * AND 7580 * both counts are less than the lowat mark 7581 * OR 7582 * the lowat mark is zero 7583 * THEN 7584 * backenable 7585 */ 7586 if ((q->q_flag & QWANTW) && 7587 (((q->q_count < q->q_lowat) && 7588 (q->q_mblkcnt < q->q_lowat)) || q->q_lowat == 0)) { 7589 q->q_flag &= ~QWANTW; 7590 mutex_exit(QLOCK(q)); 7591 backenable(oq, 0); 7592 } else 7593 mutex_exit(QLOCK(q)); 7594 } else 7595 mutex_exit(QLOCK(q)); 7596 } 7597 7598 /* 7599 * Set the forward service procedure pointer. 7600 * 7601 * Called at insert-time to cache a queue's next forward service procedure in 7602 * q_nfsrv; used by canput() and canputnext(). If the queue to be inserted 7603 * has a service procedure then q_nfsrv points to itself. If the queue to be 7604 * inserted does not have a service procedure, then q_nfsrv points to the next 7605 * queue forward that has a service procedure. If the queue is at the logical 7606 * end of the stream (driver for write side, stream head for the read side) 7607 * and does not have a service procedure, then q_nfsrv also points to itself. 7608 */ 7609 void 7610 set_nfsrv_ptr( 7611 queue_t *rnew, /* read queue pointer to new module */ 7612 queue_t *wnew, /* write queue pointer to new module */ 7613 queue_t *prev_rq, /* read queue pointer to the module above */ 7614 queue_t *prev_wq) /* write queue pointer to the module above */ 7615 { 7616 queue_t *qp; 7617 7618 if (prev_wq->q_next == NULL) { 7619 /* 7620 * Insert the driver, initialize the driver and stream head. 7621 * In this case, prev_rq/prev_wq should be the stream head. 7622 * _I_INSERT does not allow inserting a driver. Make sure 7623 * that it is not an insertion. 7624 */ 7625 ASSERT(!(rnew->q_flag & _QINSERTING)); 7626 wnew->q_nfsrv = wnew; 7627 if (rnew->q_qinfo->qi_srvp) 7628 rnew->q_nfsrv = rnew; 7629 else 7630 rnew->q_nfsrv = prev_rq; 7631 prev_rq->q_nfsrv = prev_rq; 7632 prev_wq->q_nfsrv = prev_wq; 7633 } else { 7634 /* 7635 * set up read side q_nfsrv pointer. This MUST be done 7636 * before setting the write side, because the setting of 7637 * the write side for a fifo may depend on it. 7638 * 7639 * Suppose we have a fifo that only has pipemod pushed. 7640 * pipemod has no read or write service procedures, so 7641 * nfsrv for both pipemod queues points to prev_rq (the 7642 * stream read head). Now push bufmod (which has only a 7643 * read service procedure). Doing the write side first, 7644 * wnew->q_nfsrv is set to pipemod's writeq nfsrv, which 7645 * is WRONG; the next queue forward from wnew with a 7646 * service procedure will be rnew, not the stream read head. 7647 * Since the downstream queue (which in the case of a fifo 7648 * is the read queue rnew) can affect upstream queues, it 7649 * needs to be done first. Setting up the read side first 7650 * sets nfsrv for both pipemod queues to rnew and then 7651 * when the write side is set up, wnew-q_nfsrv will also 7652 * point to rnew. 7653 */ 7654 if (rnew->q_qinfo->qi_srvp) { 7655 /* 7656 * use _OTHERQ() because, if this is a pipe, next 7657 * module may have been pushed from other end and 7658 * q_next could be a read queue. 7659 */ 7660 qp = _OTHERQ(prev_wq->q_next); 7661 while (qp && qp->q_nfsrv != qp) { 7662 qp->q_nfsrv = rnew; 7663 qp = backq(qp); 7664 } 7665 rnew->q_nfsrv = rnew; 7666 } else 7667 rnew->q_nfsrv = prev_rq->q_nfsrv; 7668 7669 /* set up write side q_nfsrv pointer */ 7670 if (wnew->q_qinfo->qi_srvp) { 7671 wnew->q_nfsrv = wnew; 7672 7673 /* 7674 * For insertion, need to update nfsrv of the modules 7675 * above which do not have a service routine. 7676 */ 7677 if (rnew->q_flag & _QINSERTING) { 7678 for (qp = prev_wq; 7679 qp != NULL && qp->q_nfsrv != qp; 7680 qp = backq(qp)) { 7681 qp->q_nfsrv = wnew->q_nfsrv; 7682 } 7683 } 7684 } else { 7685 if (prev_wq->q_next == prev_rq) 7686 /* 7687 * Since prev_wq/prev_rq are the middle of a 7688 * fifo, wnew/rnew will also be the middle of 7689 * a fifo and wnew's nfsrv is same as rnew's. 7690 */ 7691 wnew->q_nfsrv = rnew->q_nfsrv; 7692 else 7693 wnew->q_nfsrv = prev_wq->q_next->q_nfsrv; 7694 } 7695 } 7696 } 7697 7698 /* 7699 * Reset the forward service procedure pointer; called at remove-time. 7700 */ 7701 void 7702 reset_nfsrv_ptr(queue_t *rqp, queue_t *wqp) 7703 { 7704 queue_t *tmp_qp; 7705 7706 /* Reset the write side q_nfsrv pointer for _I_REMOVE */ 7707 if ((rqp->q_flag & _QREMOVING) && (wqp->q_qinfo->qi_srvp != NULL)) { 7708 for (tmp_qp = backq(wqp); 7709 tmp_qp != NULL && tmp_qp->q_nfsrv == wqp; 7710 tmp_qp = backq(tmp_qp)) { 7711 tmp_qp->q_nfsrv = wqp->q_nfsrv; 7712 } 7713 } 7714 7715 /* reset the read side q_nfsrv pointer */ 7716 if (rqp->q_qinfo->qi_srvp) { 7717 if (wqp->q_next) { /* non-driver case */ 7718 tmp_qp = _OTHERQ(wqp->q_next); 7719 while (tmp_qp && tmp_qp->q_nfsrv == rqp) { 7720 /* Note that rqp->q_next cannot be NULL */ 7721 ASSERT(rqp->q_next != NULL); 7722 tmp_qp->q_nfsrv = rqp->q_next->q_nfsrv; 7723 tmp_qp = backq(tmp_qp); 7724 } 7725 } 7726 } 7727 } 7728 7729 /* 7730 * This routine should be called after all stream geometry changes to update 7731 * the stream head cached struio() rd/wr queue pointers. Note must be called 7732 * with the streamlock()ed. 7733 * 7734 * Note: only enables Synchronous STREAMS for a side of a Stream which has 7735 * an explicit synchronous barrier module queue. That is, a queue that 7736 * has specified a struio() type. 7737 */ 7738 static void 7739 strsetuio(stdata_t *stp) 7740 { 7741 queue_t *wrq; 7742 7743 if (stp->sd_flag & STPLEX) { 7744 /* 7745 * Not stremahead, but a mux, so no Synchronous STREAMS. 7746 */ 7747 stp->sd_struiowrq = NULL; 7748 stp->sd_struiordq = NULL; 7749 return; 7750 } 7751 /* 7752 * Scan the write queue(s) while synchronous 7753 * until we find a qinfo uio type specified. 7754 */ 7755 wrq = stp->sd_wrq->q_next; 7756 while (wrq) { 7757 if (wrq->q_struiot == STRUIOT_NONE) { 7758 wrq = 0; 7759 break; 7760 } 7761 if (wrq->q_struiot != STRUIOT_DONTCARE) 7762 break; 7763 if (! _SAMESTR(wrq)) { 7764 wrq = 0; 7765 break; 7766 } 7767 wrq = wrq->q_next; 7768 } 7769 stp->sd_struiowrq = wrq; 7770 /* 7771 * Scan the read queue(s) while synchronous 7772 * until we find a qinfo uio type specified. 7773 */ 7774 wrq = stp->sd_wrq->q_next; 7775 while (wrq) { 7776 if (_RD(wrq)->q_struiot == STRUIOT_NONE) { 7777 wrq = 0; 7778 break; 7779 } 7780 if (_RD(wrq)->q_struiot != STRUIOT_DONTCARE) 7781 break; 7782 if (! _SAMESTR(wrq)) { 7783 wrq = 0; 7784 break; 7785 } 7786 wrq = wrq->q_next; 7787 } 7788 stp->sd_struiordq = wrq ? _RD(wrq) : 0; 7789 } 7790 7791 /* 7792 * pass_wput, unblocks the passthru queues, so that 7793 * messages can arrive at muxs lower read queue, before 7794 * I_LINK/I_UNLINK is acked/nacked. 7795 */ 7796 static void 7797 pass_wput(queue_t *q, mblk_t *mp) 7798 { 7799 syncq_t *sq; 7800 7801 sq = _RD(q)->q_syncq; 7802 if (sq->sq_flags & SQ_BLOCKED) 7803 unblocksq(sq, SQ_BLOCKED, 0); 7804 putnext(q, mp); 7805 } 7806 7807 /* 7808 * Set up queues for the link/unlink. 7809 * Create a new queue and block it and then insert it 7810 * below the stream head on the lower stream. 7811 * This prevents any messages from arriving during the setq 7812 * as well as while the mux is processing the LINK/I_UNLINK. 7813 * The blocked passq is unblocked once the LINK/I_UNLINK has 7814 * been acked or nacked or if a message is generated and sent 7815 * down muxs write put procedure. 7816 * see pass_wput(). 7817 * 7818 * After the new queue is inserted, all messages coming from below are 7819 * blocked. The call to strlock will ensure that all activity in the stream head 7820 * read queue syncq is stopped (sq_count drops to zero). 7821 */ 7822 static queue_t * 7823 link_addpassthru(stdata_t *stpdown) 7824 { 7825 queue_t *passq; 7826 sqlist_t sqlist; 7827 7828 passq = allocq(); 7829 STREAM(passq) = STREAM(_WR(passq)) = stpdown; 7830 /* setq might sleep in allocator - avoid holding locks. */ 7831 setq(passq, &passthru_rinit, &passthru_winit, NULL, QPERQ, 7832 SQ_CI|SQ_CO, B_FALSE); 7833 claimq(passq); 7834 blocksq(passq->q_syncq, SQ_BLOCKED, 1); 7835 insertq(STREAM(passq), passq); 7836 7837 /* 7838 * Use strlock() to wait for the stream head sq_count to drop to zero 7839 * since we are going to change q_ptr in the stream head. Note that 7840 * insertq() doesn't wait for any syncq counts to drop to zero. 7841 */ 7842 sqlist.sqlist_head = NULL; 7843 sqlist.sqlist_index = 0; 7844 sqlist.sqlist_size = sizeof (sqlist_t); 7845 sqlist_insert(&sqlist, _RD(stpdown->sd_wrq)->q_syncq); 7846 strlock(stpdown, &sqlist); 7847 strunlock(stpdown, &sqlist); 7848 7849 releaseq(passq); 7850 return (passq); 7851 } 7852 7853 /* 7854 * Let messages flow up into the mux by removing 7855 * the passq. 7856 */ 7857 static void 7858 link_rempassthru(queue_t *passq) 7859 { 7860 claimq(passq); 7861 removeq(passq); 7862 releaseq(passq); 7863 freeq(passq); 7864 } 7865 7866 /* 7867 * Wait for the condition variable pointed to by `cvp' to be signaled, 7868 * or for `tim' milliseconds to elapse, whichever comes first. If `tim' 7869 * is negative, then there is no time limit. If `nosigs' is non-zero, 7870 * then the wait will be non-interruptible. 7871 * 7872 * Returns >0 if signaled, 0 if interrupted, or -1 upon timeout. 7873 */ 7874 clock_t 7875 str_cv_wait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim, int nosigs) 7876 { 7877 clock_t ret, now, tick; 7878 7879 if (tim < 0) { 7880 if (nosigs) { 7881 cv_wait(cvp, mp); 7882 ret = 1; 7883 } else { 7884 ret = cv_wait_sig(cvp, mp); 7885 } 7886 } else if (tim > 0) { 7887 /* 7888 * convert milliseconds to clock ticks 7889 */ 7890 tick = MSEC_TO_TICK_ROUNDUP(tim); 7891 time_to_wait(&now, tick); 7892 if (nosigs) { 7893 ret = cv_timedwait(cvp, mp, now); 7894 } else { 7895 ret = cv_timedwait_sig(cvp, mp, now); 7896 } 7897 } else { 7898 ret = -1; 7899 } 7900 return (ret); 7901 } 7902 7903 /* 7904 * Wait until the stream head can determine if it is at the mark but 7905 * don't wait forever to prevent a race condition between the "mark" state 7906 * in the stream head and any mark state in the caller/user of this routine. 7907 * 7908 * This is used by sockets and for a socket it would be incorrect 7909 * to return a failure for SIOCATMARK when there is no data in the receive 7910 * queue and the marked urgent data is traveling up the stream. 7911 * 7912 * This routine waits until the mark is known by waiting for one of these 7913 * three events: 7914 * The stream head read queue becoming non-empty (including an EOF) 7915 * The STRATMARK flag being set. (Due to a MSGMARKNEXT message.) 7916 * The STRNOTATMARK flag being set (which indicates that the transport 7917 * has sent a MSGNOTMARKNEXT message to indicate that it is not at 7918 * the mark). 7919 * 7920 * The routine returns 1 if the stream is at the mark; 0 if it can 7921 * be determined that the stream is not at the mark. 7922 * If the wait times out and it can't determine 7923 * whether or not the stream might be at the mark the routine will return -1. 7924 * 7925 * Note: This routine should only be used when a mark is pending i.e., 7926 * in the socket case the SIGURG has been posted. 7927 * Note2: This can not wakeup just because synchronous streams indicate 7928 * that data is available since it is not possible to use the synchronous 7929 * streams interfaces to determine the b_flag value for the data queued below 7930 * the stream head. 7931 */ 7932 int 7933 strwaitmark(vnode_t *vp) 7934 { 7935 struct stdata *stp = vp->v_stream; 7936 queue_t *rq = _RD(stp->sd_wrq); 7937 int mark; 7938 7939 mutex_enter(&stp->sd_lock); 7940 while (rq->q_first == NULL && 7941 !(stp->sd_flag & (STRATMARK|STRNOTATMARK|STREOF))) { 7942 stp->sd_flag |= RSLEEP; 7943 7944 /* Wait for 100 milliseconds for any state change. */ 7945 if (str_cv_wait(&rq->q_wait, &stp->sd_lock, 100, 1) == -1) { 7946 mutex_exit(&stp->sd_lock); 7947 return (-1); 7948 } 7949 } 7950 if (stp->sd_flag & STRATMARK) 7951 mark = 1; 7952 else if (rq->q_first != NULL && (rq->q_first->b_flag & MSGMARK)) 7953 mark = 1; 7954 else 7955 mark = 0; 7956 7957 mutex_exit(&stp->sd_lock); 7958 return (mark); 7959 } 7960 7961 /* 7962 * Set a read side error. If persist is set change the socket error 7963 * to persistent. If errfunc is set install the function as the exported 7964 * error handler. 7965 */ 7966 void 7967 strsetrerror(vnode_t *vp, int error, int persist, errfunc_t errfunc) 7968 { 7969 struct stdata *stp = vp->v_stream; 7970 7971 mutex_enter(&stp->sd_lock); 7972 stp->sd_rerror = error; 7973 if (error == 0 && errfunc == NULL) 7974 stp->sd_flag &= ~STRDERR; 7975 else 7976 stp->sd_flag |= STRDERR; 7977 if (persist) { 7978 stp->sd_flag &= ~STRDERRNONPERSIST; 7979 } else { 7980 stp->sd_flag |= STRDERRNONPERSIST; 7981 } 7982 stp->sd_rderrfunc = errfunc; 7983 if (error != 0 || errfunc != NULL) { 7984 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */ 7985 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */ 7986 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 7987 7988 mutex_exit(&stp->sd_lock); 7989 pollwakeup(&stp->sd_pollist, POLLERR); 7990 mutex_enter(&stp->sd_lock); 7991 7992 if (stp->sd_sigflags & S_ERROR) 7993 strsendsig(stp->sd_siglist, S_ERROR, 0, error); 7994 } 7995 mutex_exit(&stp->sd_lock); 7996 } 7997 7998 /* 7999 * Set a write side error. If persist is set change the socket error 8000 * to persistent. 8001 */ 8002 void 8003 strsetwerror(vnode_t *vp, int error, int persist, errfunc_t errfunc) 8004 { 8005 struct stdata *stp = vp->v_stream; 8006 8007 mutex_enter(&stp->sd_lock); 8008 stp->sd_werror = error; 8009 if (error == 0 && errfunc == NULL) 8010 stp->sd_flag &= ~STWRERR; 8011 else 8012 stp->sd_flag |= STWRERR; 8013 if (persist) { 8014 stp->sd_flag &= ~STWRERRNONPERSIST; 8015 } else { 8016 stp->sd_flag |= STWRERRNONPERSIST; 8017 } 8018 stp->sd_wrerrfunc = errfunc; 8019 if (error != 0 || errfunc != NULL) { 8020 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */ 8021 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */ 8022 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 8023 8024 mutex_exit(&stp->sd_lock); 8025 pollwakeup(&stp->sd_pollist, POLLERR); 8026 mutex_enter(&stp->sd_lock); 8027 8028 if (stp->sd_sigflags & S_ERROR) 8029 strsendsig(stp->sd_siglist, S_ERROR, 0, error); 8030 } 8031 mutex_exit(&stp->sd_lock); 8032 } 8033 8034 /* 8035 * Make the stream return 0 (EOF) when all data has been read. 8036 * No effect on write side. 8037 */ 8038 void 8039 strseteof(vnode_t *vp, int eof) 8040 { 8041 struct stdata *stp = vp->v_stream; 8042 8043 mutex_enter(&stp->sd_lock); 8044 if (!eof) { 8045 stp->sd_flag &= ~STREOF; 8046 mutex_exit(&stp->sd_lock); 8047 return; 8048 } 8049 stp->sd_flag |= STREOF; 8050 if (stp->sd_flag & RSLEEP) { 8051 stp->sd_flag &= ~RSLEEP; 8052 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); 8053 } 8054 8055 mutex_exit(&stp->sd_lock); 8056 pollwakeup(&stp->sd_pollist, POLLIN|POLLRDNORM); 8057 mutex_enter(&stp->sd_lock); 8058 8059 if (stp->sd_sigflags & (S_INPUT|S_RDNORM)) 8060 strsendsig(stp->sd_siglist, S_INPUT|S_RDNORM, 0, 0); 8061 mutex_exit(&stp->sd_lock); 8062 } 8063 8064 void 8065 strflushrq(vnode_t *vp, int flag) 8066 { 8067 struct stdata *stp = vp->v_stream; 8068 8069 mutex_enter(&stp->sd_lock); 8070 flushq(_RD(stp->sd_wrq), flag); 8071 mutex_exit(&stp->sd_lock); 8072 } 8073 8074 void 8075 strsetrputhooks(vnode_t *vp, uint_t flags, 8076 msgfunc_t protofunc, msgfunc_t miscfunc) 8077 { 8078 struct stdata *stp = vp->v_stream; 8079 8080 mutex_enter(&stp->sd_lock); 8081 8082 if (protofunc == NULL) 8083 stp->sd_rprotofunc = strrput_proto; 8084 else 8085 stp->sd_rprotofunc = protofunc; 8086 8087 if (miscfunc == NULL) 8088 stp->sd_rmiscfunc = strrput_misc; 8089 else 8090 stp->sd_rmiscfunc = miscfunc; 8091 8092 if (flags & SH_CONSOL_DATA) 8093 stp->sd_rput_opt |= SR_CONSOL_DATA; 8094 else 8095 stp->sd_rput_opt &= ~SR_CONSOL_DATA; 8096 8097 if (flags & SH_SIGALLDATA) 8098 stp->sd_rput_opt |= SR_SIGALLDATA; 8099 else 8100 stp->sd_rput_opt &= ~SR_SIGALLDATA; 8101 8102 if (flags & SH_IGN_ZEROLEN) 8103 stp->sd_rput_opt |= SR_IGN_ZEROLEN; 8104 else 8105 stp->sd_rput_opt &= ~SR_IGN_ZEROLEN; 8106 8107 mutex_exit(&stp->sd_lock); 8108 } 8109 8110 void 8111 strsetwputhooks(vnode_t *vp, uint_t flags, clock_t closetime) 8112 { 8113 struct stdata *stp = vp->v_stream; 8114 8115 mutex_enter(&stp->sd_lock); 8116 stp->sd_closetime = closetime; 8117 8118 if (flags & SH_SIGPIPE) 8119 stp->sd_wput_opt |= SW_SIGPIPE; 8120 else 8121 stp->sd_wput_opt &= ~SW_SIGPIPE; 8122 if (flags & SH_RECHECK_ERR) 8123 stp->sd_wput_opt |= SW_RECHECK_ERR; 8124 else 8125 stp->sd_wput_opt &= ~SW_RECHECK_ERR; 8126 8127 mutex_exit(&stp->sd_lock); 8128 } 8129 8130 void 8131 strsetrwputdatahooks(vnode_t *vp, msgfunc_t rdatafunc, msgfunc_t wdatafunc) 8132 { 8133 struct stdata *stp = vp->v_stream; 8134 8135 mutex_enter(&stp->sd_lock); 8136 8137 stp->sd_rputdatafunc = rdatafunc; 8138 stp->sd_wputdatafunc = wdatafunc; 8139 8140 mutex_exit(&stp->sd_lock); 8141 } 8142 8143 /* Used within framework when the queue is already locked */ 8144 void 8145 qenable_locked(queue_t *q) 8146 { 8147 stdata_t *stp = STREAM(q); 8148 8149 ASSERT(MUTEX_HELD(QLOCK(q))); 8150 8151 if (!q->q_qinfo->qi_srvp) 8152 return; 8153 8154 /* 8155 * Do not place on run queue if already enabled or closing. 8156 */ 8157 if (q->q_flag & (QWCLOSE|QENAB)) 8158 return; 8159 8160 /* 8161 * mark queue enabled and place on run list if it is not already being 8162 * serviced. If it is serviced, the runservice() function will detect 8163 * that QENAB is set and call service procedure before clearing 8164 * QINSERVICE flag. 8165 */ 8166 q->q_flag |= QENAB; 8167 if (q->q_flag & QINSERVICE) 8168 return; 8169 8170 /* Record the time of qenable */ 8171 q->q_qtstamp = lbolt; 8172 8173 /* 8174 * Put the queue in the stp list and schedule it for background 8175 * processing if it is not already scheduled or if stream head does not 8176 * intent to process it in the foreground later by setting 8177 * STRS_WILLSERVICE flag. 8178 */ 8179 mutex_enter(&stp->sd_qlock); 8180 /* 8181 * If there are already something on the list, stp flags should show 8182 * intention to drain it. 8183 */ 8184 IMPLY(STREAM_NEEDSERVICE(stp), 8185 (stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))); 8186 8187 ENQUEUE(q, stp->sd_qhead, stp->sd_qtail, q_link); 8188 stp->sd_nqueues++; 8189 8190 /* 8191 * If no one will drain this stream we are the first producer and 8192 * need to schedule it for background thread. 8193 */ 8194 if (!(stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))) { 8195 /* 8196 * No one will service this stream later, so we have to 8197 * schedule it now. 8198 */ 8199 STRSTAT(stenables); 8200 stp->sd_svcflags |= STRS_SCHEDULED; 8201 stp->sd_servid = (void *)taskq_dispatch(streams_taskq, 8202 (task_func_t *)stream_service, stp, TQ_NOSLEEP|TQ_NOQUEUE); 8203 8204 if (stp->sd_servid == NULL) { 8205 /* 8206 * Task queue failed so fail over to the backup 8207 * servicing thread. 8208 */ 8209 STRSTAT(taskqfails); 8210 /* 8211 * It is safe to clear STRS_SCHEDULED flag because it 8212 * was set by this thread above. 8213 */ 8214 stp->sd_svcflags &= ~STRS_SCHEDULED; 8215 8216 /* 8217 * Failover scheduling is protected by service_queue 8218 * lock. 8219 */ 8220 mutex_enter(&service_queue); 8221 ASSERT((stp->sd_qhead == q) && (stp->sd_qtail == q)); 8222 ASSERT(q->q_link == NULL); 8223 /* 8224 * Append the queue to qhead/qtail list. 8225 */ 8226 if (qhead == NULL) 8227 qhead = q; 8228 else 8229 qtail->q_link = q; 8230 qtail = q; 8231 /* 8232 * Clear stp queue list. 8233 */ 8234 stp->sd_qhead = stp->sd_qtail = NULL; 8235 stp->sd_nqueues = 0; 8236 /* 8237 * Wakeup background queue processing thread. 8238 */ 8239 cv_signal(&services_to_run); 8240 mutex_exit(&service_queue); 8241 } 8242 } 8243 mutex_exit(&stp->sd_qlock); 8244 } 8245 8246 static void 8247 queue_service(queue_t *q) 8248 { 8249 /* 8250 * The queue in the list should have 8251 * QENAB flag set and should not have 8252 * QINSERVICE flag set. QINSERVICE is 8253 * set when the queue is dequeued and 8254 * qenable_locked doesn't enqueue a 8255 * queue with QINSERVICE set. 8256 */ 8257 8258 ASSERT(!(q->q_flag & QINSERVICE)); 8259 ASSERT((q->q_flag & QENAB)); 8260 mutex_enter(QLOCK(q)); 8261 q->q_flag &= ~QENAB; 8262 q->q_flag |= QINSERVICE; 8263 mutex_exit(QLOCK(q)); 8264 runservice(q); 8265 } 8266 8267 static void 8268 syncq_service(syncq_t *sq) 8269 { 8270 STRSTAT(syncqservice); 8271 mutex_enter(SQLOCK(sq)); 8272 ASSERT(!(sq->sq_svcflags & SQ_SERVICE)); 8273 ASSERT(sq->sq_servcount != 0); 8274 ASSERT(sq->sq_next == NULL); 8275 8276 /* if we came here from the background thread, clear the flag */ 8277 if (sq->sq_svcflags & SQ_BGTHREAD) 8278 sq->sq_svcflags &= ~SQ_BGTHREAD; 8279 8280 /* let drain_syncq know that it's being called in the background */ 8281 sq->sq_svcflags |= SQ_SERVICE; 8282 drain_syncq(sq); 8283 } 8284 8285 static void 8286 qwriter_outer_service(syncq_t *outer) 8287 { 8288 /* 8289 * Note that SQ_WRITER is used on the outer perimeter 8290 * to signal that a qwriter(OUTER) is either investigating 8291 * running or that it is actually running a function. 8292 */ 8293 outer_enter(outer, SQ_BLOCKED|SQ_WRITER); 8294 8295 /* 8296 * All inner syncq are empty and have SQ_WRITER set 8297 * to block entering the outer perimeter. 8298 * 8299 * We do not need to explicitly call write_now since 8300 * outer_exit does it for us. 8301 */ 8302 outer_exit(outer); 8303 } 8304 8305 static void 8306 mblk_free(mblk_t *mp) 8307 { 8308 dblk_t *dbp = mp->b_datap; 8309 frtn_t *frp = dbp->db_frtnp; 8310 8311 mp->b_next = NULL; 8312 if (dbp->db_fthdr != NULL) 8313 str_ftfree(dbp); 8314 8315 ASSERT(dbp->db_fthdr == NULL); 8316 frp->free_func(frp->free_arg); 8317 ASSERT(dbp->db_mblk == mp); 8318 8319 if (dbp->db_credp != NULL) { 8320 crfree(dbp->db_credp); 8321 dbp->db_credp = NULL; 8322 } 8323 dbp->db_cpid = -1; 8324 dbp->db_struioflag = 0; 8325 dbp->db_struioun.cksum.flags = 0; 8326 8327 kmem_cache_free(dbp->db_cache, dbp); 8328 } 8329 8330 /* 8331 * Background processing of the stream queue list. 8332 */ 8333 static void 8334 stream_service(stdata_t *stp) 8335 { 8336 queue_t *q; 8337 8338 mutex_enter(&stp->sd_qlock); 8339 8340 STR_SERVICE(stp, q); 8341 8342 stp->sd_svcflags &= ~STRS_SCHEDULED; 8343 stp->sd_servid = NULL; 8344 cv_signal(&stp->sd_qcv); 8345 mutex_exit(&stp->sd_qlock); 8346 } 8347 8348 /* 8349 * Foreground processing of the stream queue list. 8350 */ 8351 void 8352 stream_runservice(stdata_t *stp) 8353 { 8354 queue_t *q; 8355 8356 mutex_enter(&stp->sd_qlock); 8357 STRSTAT(rservice); 8358 /* 8359 * We are going to drain this stream queue list, so qenable_locked will 8360 * not schedule it until we finish. 8361 */ 8362 stp->sd_svcflags |= STRS_WILLSERVICE; 8363 8364 STR_SERVICE(stp, q); 8365 8366 stp->sd_svcflags &= ~STRS_WILLSERVICE; 8367 mutex_exit(&stp->sd_qlock); 8368 /* 8369 * Help backup background thread to drain the qhead/qtail list. 8370 */ 8371 while (qhead != NULL) { 8372 STRSTAT(qhelps); 8373 mutex_enter(&service_queue); 8374 DQ(q, qhead, qtail, q_link); 8375 mutex_exit(&service_queue); 8376 if (q != NULL) 8377 queue_service(q); 8378 } 8379 } 8380 8381 void 8382 stream_willservice(stdata_t *stp) 8383 { 8384 mutex_enter(&stp->sd_qlock); 8385 stp->sd_svcflags |= STRS_WILLSERVICE; 8386 mutex_exit(&stp->sd_qlock); 8387 } 8388 8389 /* 8390 * Replace the cred currently in the mblk with a different one. 8391 */ 8392 void 8393 mblk_setcred(mblk_t *mp, cred_t *cr) 8394 { 8395 cred_t *ocr = DB_CRED(mp); 8396 8397 ASSERT(cr != NULL); 8398 8399 if (cr != ocr) { 8400 crhold(mp->b_datap->db_credp = cr); 8401 if (ocr != NULL) 8402 crfree(ocr); 8403 } 8404 } 8405 8406 int 8407 hcksum_assoc(mblk_t *mp, multidata_t *mmd, pdesc_t *pd, 8408 uint32_t start, uint32_t stuff, uint32_t end, uint32_t value, 8409 uint32_t flags, int km_flags) 8410 { 8411 int rc = 0; 8412 8413 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8414 if (mp->b_datap->db_type == M_DATA) { 8415 /* Associate values for M_DATA type */ 8416 DB_CKSUMSTART(mp) = (intptr_t)start; 8417 DB_CKSUMSTUFF(mp) = (intptr_t)stuff; 8418 DB_CKSUMEND(mp) = (intptr_t)end; 8419 DB_CKSUMFLAGS(mp) = flags; 8420 DB_CKSUM16(mp) = (uint16_t)value; 8421 8422 } else { 8423 pattrinfo_t pa_info; 8424 8425 ASSERT(mmd != NULL); 8426 8427 pa_info.type = PATTR_HCKSUM; 8428 pa_info.len = sizeof (pattr_hcksum_t); 8429 8430 if (mmd_addpattr(mmd, pd, &pa_info, B_TRUE, km_flags) != NULL) { 8431 pattr_hcksum_t *hck = (pattr_hcksum_t *)pa_info.buf; 8432 8433 hck->hcksum_start_offset = start; 8434 hck->hcksum_stuff_offset = stuff; 8435 hck->hcksum_end_offset = end; 8436 hck->hcksum_cksum_val.inet_cksum = (uint16_t)value; 8437 hck->hcksum_flags = flags; 8438 } else { 8439 rc = -1; 8440 } 8441 } 8442 return (rc); 8443 } 8444 8445 void 8446 hcksum_retrieve(mblk_t *mp, multidata_t *mmd, pdesc_t *pd, 8447 uint32_t *start, uint32_t *stuff, uint32_t *end, 8448 uint32_t *value, uint32_t *flags) 8449 { 8450 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8451 if (mp->b_datap->db_type == M_DATA) { 8452 if (flags != NULL) { 8453 *flags = DB_CKSUMFLAGS(mp); 8454 if (*flags & HCK_PARTIALCKSUM) { 8455 if (start != NULL) 8456 *start = (uint32_t)DB_CKSUMSTART(mp); 8457 if (stuff != NULL) 8458 *stuff = (uint32_t)DB_CKSUMSTUFF(mp); 8459 if (end != NULL) 8460 *end = (uint32_t)DB_CKSUMEND(mp); 8461 if (value != NULL) 8462 *value = (uint32_t)DB_CKSUM16(mp); 8463 } else if ((*flags & HW_LSO) && (value != NULL)) 8464 *value = (uint32_t)DB_LSOMSS(mp); 8465 } 8466 } else { 8467 pattrinfo_t hck_attr = {PATTR_HCKSUM}; 8468 8469 ASSERT(mmd != NULL); 8470 8471 /* get hardware checksum attribute */ 8472 if (mmd_getpattr(mmd, pd, &hck_attr) != NULL) { 8473 pattr_hcksum_t *hck = (pattr_hcksum_t *)hck_attr.buf; 8474 8475 ASSERT(hck_attr.len >= sizeof (pattr_hcksum_t)); 8476 if (flags != NULL) 8477 *flags = hck->hcksum_flags; 8478 if (start != NULL) 8479 *start = hck->hcksum_start_offset; 8480 if (stuff != NULL) 8481 *stuff = hck->hcksum_stuff_offset; 8482 if (end != NULL) 8483 *end = hck->hcksum_end_offset; 8484 if (value != NULL) 8485 *value = (uint32_t) 8486 hck->hcksum_cksum_val.inet_cksum; 8487 } 8488 } 8489 } 8490 8491 /* 8492 * Checksum buffer *bp for len bytes with psum partial checksum, 8493 * or 0 if none, and return the 16 bit partial checksum. 8494 */ 8495 unsigned 8496 bcksum(uchar_t *bp, int len, unsigned int psum) 8497 { 8498 int odd = len & 1; 8499 extern unsigned int ip_ocsum(); 8500 8501 if (((intptr_t)bp & 1) == 0 && !odd) { 8502 /* 8503 * Bp is 16 bit aligned and len is multiple of 16 bit word. 8504 */ 8505 return (ip_ocsum((ushort_t *)bp, len >> 1, psum)); 8506 } 8507 if (((intptr_t)bp & 1) != 0) { 8508 /* 8509 * Bp isn't 16 bit aligned. 8510 */ 8511 unsigned int tsum; 8512 8513 #ifdef _LITTLE_ENDIAN 8514 psum += *bp; 8515 #else 8516 psum += *bp << 8; 8517 #endif 8518 len--; 8519 bp++; 8520 tsum = ip_ocsum((ushort_t *)bp, len >> 1, 0); 8521 psum += (tsum << 8) & 0xffff | (tsum >> 8); 8522 if (len & 1) { 8523 bp += len - 1; 8524 #ifdef _LITTLE_ENDIAN 8525 psum += *bp << 8; 8526 #else 8527 psum += *bp; 8528 #endif 8529 } 8530 } else { 8531 /* 8532 * Bp is 16 bit aligned. 8533 */ 8534 psum = ip_ocsum((ushort_t *)bp, len >> 1, psum); 8535 if (odd) { 8536 bp += len - 1; 8537 #ifdef _LITTLE_ENDIAN 8538 psum += *bp; 8539 #else 8540 psum += *bp << 8; 8541 #endif 8542 } 8543 } 8544 /* 8545 * Normalize psum to 16 bits before returning the new partial 8546 * checksum. The max psum value before normalization is 0x3FDFE. 8547 */ 8548 return ((psum >> 16) + (psum & 0xFFFF)); 8549 } 8550 8551 boolean_t 8552 is_vmloaned_mblk(mblk_t *mp, multidata_t *mmd, pdesc_t *pd) 8553 { 8554 boolean_t rc; 8555 8556 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8557 if (DB_TYPE(mp) == M_DATA) { 8558 rc = (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0); 8559 } else { 8560 pattrinfo_t zcopy_attr = {PATTR_ZCOPY}; 8561 8562 ASSERT(mmd != NULL); 8563 rc = (mmd_getpattr(mmd, pd, &zcopy_attr) != NULL); 8564 } 8565 return (rc); 8566 } 8567 8568 void 8569 freemsgchain(mblk_t *mp) 8570 { 8571 mblk_t *next; 8572 8573 while (mp != NULL) { 8574 next = mp->b_next; 8575 mp->b_next = NULL; 8576 8577 freemsg(mp); 8578 mp = next; 8579 } 8580 } 8581 8582 mblk_t * 8583 copymsgchain(mblk_t *mp) 8584 { 8585 mblk_t *nmp = NULL; 8586 mblk_t **nmpp = &nmp; 8587 8588 for (; mp != NULL; mp = mp->b_next) { 8589 if ((*nmpp = copymsg(mp)) == NULL) { 8590 freemsgchain(nmp); 8591 return (NULL); 8592 } 8593 8594 nmpp = &((*nmpp)->b_next); 8595 } 8596 8597 return (nmp); 8598 } 8599 8600 /* NOTE: Do not add code after this point. */ 8601 #undef QLOCK 8602 8603 /* 8604 * replacement for QLOCK macro for those that can't use it. 8605 */ 8606 kmutex_t * 8607 QLOCK(queue_t *q) 8608 { 8609 return (&(q)->q_lock); 8610 } 8611 8612 /* 8613 * Dummy runqueues/queuerun functions functions for backwards compatibility. 8614 */ 8615 #undef runqueues 8616 void 8617 runqueues(void) 8618 { 8619 } 8620 8621 #undef queuerun 8622 void 8623 queuerun(void) 8624 { 8625 } 8626 8627 /* 8628 * Initialize the STR stack instance, which tracks autopush and persistent 8629 * links. 8630 */ 8631 /* ARGSUSED */ 8632 static void * 8633 str_stack_init(netstackid_t stackid, netstack_t *ns) 8634 { 8635 str_stack_t *ss; 8636 int i; 8637 8638 ss = (str_stack_t *)kmem_zalloc(sizeof (*ss), KM_SLEEP); 8639 ss->ss_netstack = ns; 8640 8641 /* 8642 * set up autopush 8643 */ 8644 sad_initspace(ss); 8645 8646 /* 8647 * set up mux_node structures. 8648 */ 8649 ss->ss_devcnt = devcnt; /* In case it should change before free */ 8650 ss->ss_mux_nodes = kmem_zalloc((sizeof (struct mux_node) * 8651 ss->ss_devcnt), KM_SLEEP); 8652 for (i = 0; i < ss->ss_devcnt; i++) 8653 ss->ss_mux_nodes[i].mn_imaj = i; 8654 return (ss); 8655 } 8656 8657 /* 8658 * Note: run at zone shutdown and not destroy so that the PLINKs are 8659 * gone by the time other cleanup happens from the destroy callbacks. 8660 */ 8661 static void 8662 str_stack_shutdown(netstackid_t stackid, void *arg) 8663 { 8664 str_stack_t *ss = (str_stack_t *)arg; 8665 int i; 8666 cred_t *cr; 8667 8668 cr = zone_get_kcred(netstackid_to_zoneid(stackid)); 8669 ASSERT(cr != NULL); 8670 8671 /* Undo all the I_PLINKs for this zone */ 8672 for (i = 0; i < ss->ss_devcnt; i++) { 8673 struct mux_edge *ep; 8674 ldi_handle_t lh; 8675 ldi_ident_t li; 8676 int ret; 8677 int rval; 8678 dev_t rdev; 8679 8680 ep = ss->ss_mux_nodes[i].mn_outp; 8681 if (ep == NULL) 8682 continue; 8683 ret = ldi_ident_from_major((major_t)i, &li); 8684 if (ret != 0) { 8685 continue; 8686 } 8687 rdev = ep->me_dev; 8688 ret = ldi_open_by_dev(&rdev, OTYP_CHR, FREAD|FWRITE, 8689 cr, &lh, li); 8690 if (ret != 0) { 8691 ldi_ident_release(li); 8692 continue; 8693 } 8694 8695 ret = ldi_ioctl(lh, I_PUNLINK, (intptr_t)MUXID_ALL, FKIOCTL, 8696 cr, &rval); 8697 if (ret) { 8698 (void) ldi_close(lh, FREAD|FWRITE, cr); 8699 ldi_ident_release(li); 8700 continue; 8701 } 8702 (void) ldi_close(lh, FREAD|FWRITE, cr); 8703 8704 /* Close layered handles */ 8705 ldi_ident_release(li); 8706 } 8707 crfree(cr); 8708 8709 sad_freespace(ss); 8710 8711 kmem_free(ss->ss_mux_nodes, sizeof (struct mux_node) * ss->ss_devcnt); 8712 ss->ss_mux_nodes = NULL; 8713 } 8714 8715 /* 8716 * Free the structure; str_stack_shutdown did the other cleanup work. 8717 */ 8718 /* ARGSUSED */ 8719 static void 8720 str_stack_fini(netstackid_t stackid, void *arg) 8721 { 8722 str_stack_t *ss = (str_stack_t *)arg; 8723 8724 kmem_free(ss, sizeof (*ss)); 8725 } 8726