1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 23 /* All Rights Reserved */ 24 25 26 /* 27 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 28 * Use is subject to license terms. 29 */ 30 31 #pragma ident "%Z%%M% %I% %E% SMI" 32 33 #include <sys/types.h> 34 #include <sys/sysmacros.h> 35 #include <sys/param.h> 36 #include <sys/errno.h> 37 #include <sys/signal.h> 38 #include <sys/proc.h> 39 #include <sys/conf.h> 40 #include <sys/cred.h> 41 #include <sys/user.h> 42 #include <sys/vnode.h> 43 #include <sys/file.h> 44 #include <sys/session.h> 45 #include <sys/stream.h> 46 #include <sys/strsubr.h> 47 #include <sys/stropts.h> 48 #include <sys/poll.h> 49 #include <sys/systm.h> 50 #include <sys/cpuvar.h> 51 #include <sys/uio.h> 52 #include <sys/cmn_err.h> 53 #include <sys/priocntl.h> 54 #include <sys/procset.h> 55 #include <sys/vmem.h> 56 #include <sys/bitmap.h> 57 #include <sys/kmem.h> 58 #include <sys/siginfo.h> 59 #include <sys/vtrace.h> 60 #include <sys/callb.h> 61 #include <sys/debug.h> 62 #include <sys/modctl.h> 63 #include <sys/vmsystm.h> 64 #include <vm/page.h> 65 #include <sys/atomic.h> 66 #include <sys/suntpi.h> 67 #include <sys/strlog.h> 68 #include <sys/promif.h> 69 #include <sys/project.h> 70 #include <sys/vm.h> 71 #include <sys/taskq.h> 72 #include <sys/sunddi.h> 73 #include <sys/sunldi_impl.h> 74 #include <sys/strsun.h> 75 #include <sys/isa_defs.h> 76 #include <sys/multidata.h> 77 #include <sys/pattr.h> 78 #include <sys/strft.h> 79 #include <sys/zone.h> 80 81 #define O_SAMESTR(q) (((q)->q_next) && \ 82 (((q)->q_flag & QREADR) == ((q)->q_next->q_flag & QREADR))) 83 84 /* 85 * WARNING: 86 * The variables and routines in this file are private, belonging 87 * to the STREAMS subsystem. These should not be used by modules 88 * or drivers. Compatibility will not be guaranteed. 89 */ 90 91 /* 92 * Id value used to distinguish between different multiplexor links. 93 */ 94 static int32_t lnk_id = 0; 95 96 #define STREAMS_LOPRI MINCLSYSPRI 97 static pri_t streams_lopri = STREAMS_LOPRI; 98 99 #define STRSTAT(x) (str_statistics.x.value.ui64++) 100 typedef struct str_stat { 101 kstat_named_t sqenables; 102 kstat_named_t stenables; 103 kstat_named_t syncqservice; 104 kstat_named_t freebs; 105 kstat_named_t qwr_outer; 106 kstat_named_t rservice; 107 kstat_named_t strwaits; 108 kstat_named_t taskqfails; 109 kstat_named_t bufcalls; 110 kstat_named_t qhelps; 111 kstat_named_t qremoved; 112 kstat_named_t sqremoved; 113 kstat_named_t bcwaits; 114 kstat_named_t sqtoomany; 115 } str_stat_t; 116 117 static str_stat_t str_statistics = { 118 { "sqenables", KSTAT_DATA_UINT64 }, 119 { "stenables", KSTAT_DATA_UINT64 }, 120 { "syncqservice", KSTAT_DATA_UINT64 }, 121 { "freebs", KSTAT_DATA_UINT64 }, 122 { "qwr_outer", KSTAT_DATA_UINT64 }, 123 { "rservice", KSTAT_DATA_UINT64 }, 124 { "strwaits", KSTAT_DATA_UINT64 }, 125 { "taskqfails", KSTAT_DATA_UINT64 }, 126 { "bufcalls", KSTAT_DATA_UINT64 }, 127 { "qhelps", KSTAT_DATA_UINT64 }, 128 { "qremoved", KSTAT_DATA_UINT64 }, 129 { "sqremoved", KSTAT_DATA_UINT64 }, 130 { "bcwaits", KSTAT_DATA_UINT64 }, 131 { "sqtoomany", KSTAT_DATA_UINT64 }, 132 }; 133 134 static kstat_t *str_kstat; 135 136 /* 137 * qrunflag was used previously to control background scheduling of queues. It 138 * is not used anymore, but kept here in case some module still wants to access 139 * it via qready() and setqsched macros. 140 */ 141 char qrunflag; /* Unused */ 142 143 /* 144 * Most of the streams scheduling is done via task queues. Task queues may fail 145 * for non-sleep dispatches, so there are two backup threads servicing failed 146 * requests for queues and syncqs. Both of these threads also service failed 147 * dispatches freebs requests. Queues are put in the list specified by `qhead' 148 * and `qtail' pointers, syncqs use `sqhead' and `sqtail' pointers and freebs 149 * requests are put into `freebs_list' which has no tail pointer. All three 150 * lists are protected by a single `service_queue' lock and use 151 * `services_to_run' condition variable for signaling background threads. Use of 152 * a single lock should not be a problem because it is only used under heavy 153 * loads when task queues start to fail and at that time it may be a good idea 154 * to throttle scheduling requests. 155 * 156 * NOTE: queues and syncqs should be scheduled by two separate threads because 157 * queue servicing may be blocked waiting for a syncq which may be also 158 * scheduled for background execution. This may create a deadlock when only one 159 * thread is used for both. 160 */ 161 162 static taskq_t *streams_taskq; /* Used for most STREAMS scheduling */ 163 164 static kmutex_t service_queue; /* protects all of servicing vars */ 165 static kcondvar_t services_to_run; /* wake up background service thread */ 166 static kcondvar_t syncqs_to_run; /* wake up background service thread */ 167 168 /* 169 * List of queues scheduled for background processing dueue to lack of resources 170 * in the task queues. Protected by service_queue lock; 171 */ 172 static struct queue *qhead; 173 static struct queue *qtail; 174 175 /* 176 * Same list for syncqs 177 */ 178 static syncq_t *sqhead; 179 static syncq_t *sqtail; 180 181 static mblk_t *freebs_list; /* list of buffers to free */ 182 183 /* 184 * Backup threads for servicing queues and syncqs 185 */ 186 kthread_t *streams_qbkgrnd_thread; 187 kthread_t *streams_sqbkgrnd_thread; 188 189 /* 190 * Bufcalls related variables. 191 */ 192 struct bclist strbcalls; /* list of waiting bufcalls */ 193 kmutex_t strbcall_lock; /* protects bufcall list (strbcalls) */ 194 kcondvar_t strbcall_cv; /* Signaling when a bufcall is added */ 195 kmutex_t bcall_monitor; /* sleep/wakeup style monitor */ 196 kcondvar_t bcall_cv; /* wait 'till executing bufcall completes */ 197 kthread_t *bc_bkgrnd_thread; /* Thread to service bufcall requests */ 198 199 kmutex_t strresources; /* protects global resources */ 200 kmutex_t muxifier; /* single-threads multiplexor creation */ 201 202 extern void time_to_wait(clock_t *, clock_t); 203 204 /* 205 * run_queues is no longer used, but is kept in case some 3-d party 206 * module/driver decides to use it. 207 */ 208 int run_queues = 0; 209 210 /* 211 * sq_max_size is the depth of the syncq (in number of messages) before 212 * qfill_syncq() starts QFULL'ing destination queues. As its primary 213 * consumer - IP is no longer D_MTPERMOD, but there may be other 214 * modules/drivers depend on this syncq flow control, we prefer to 215 * choose a large number as the default value. For potential 216 * performance gain, this value is tunable in /etc/system. 217 */ 218 int sq_max_size = 10000; 219 220 /* 221 * the number of ciputctrl structures per syncq and stream we create when 222 * needed. 223 */ 224 int n_ciputctrl; 225 int max_n_ciputctrl = 16; 226 /* 227 * if n_ciputctrl is < min_n_ciputctrl don't even create ciputctrl_cache. 228 */ 229 int min_n_ciputctrl = 2; 230 231 static struct mux_node *mux_nodes; /* mux info for cycle checking */ 232 233 /* 234 * Per-driver/module syncqs 235 * ======================== 236 * 237 * For drivers/modules that use PERMOD or outer syncqs we keep a list of 238 * perdm structures, new entries being added (and new syncqs allocated) when 239 * setq() encounters a module/driver with a streamtab that it hasn't seen 240 * before. 241 * The reason for this mechanism is that some modules and drivers share a 242 * common streamtab and it is necessary for those modules and drivers to also 243 * share a common PERMOD syncq. 244 * 245 * perdm_list --> dm_str == streamtab_1 246 * dm_sq == syncq_1 247 * dm_ref 248 * dm_next --> dm_str == streamtab_2 249 * dm_sq == syncq_2 250 * dm_ref 251 * dm_next --> ... NULL 252 * 253 * The dm_ref field is incremented for each new driver/module that takes 254 * a reference to the perdm structure and hence shares the syncq. 255 * References are held in the fmodsw_impl_t structure for each STREAMS module 256 * or the dev_impl array (indexed by device major number) for each driver. 257 * 258 * perdm_list -> [dm_ref == 1] -> [dm_ref == 2] -> [dm_ref == 1] -> NULL 259 * ^ ^ ^ ^ 260 * | ______________/ | | 261 * | / | | 262 * dev_impl: ...|x|y|... module A module B 263 * 264 * When a module/driver is unloaded the reference count is decremented and, 265 * when it falls to zero, the perdm structure is removed from the list and 266 * the syncq is freed (see rele_dm()). 267 */ 268 perdm_t *perdm_list = NULL; 269 static krwlock_t perdm_rwlock; 270 cdevsw_impl_t *devimpl; 271 272 extern struct qinit strdata; 273 extern struct qinit stwdata; 274 275 static void runservice(queue_t *); 276 static void streams_bufcall_service(void); 277 static void streams_qbkgrnd_service(void); 278 static void streams_sqbkgrnd_service(void); 279 static syncq_t *new_syncq(void); 280 static void free_syncq(syncq_t *); 281 static void outer_insert(syncq_t *, syncq_t *); 282 static void outer_remove(syncq_t *, syncq_t *); 283 static void write_now(syncq_t *); 284 static void clr_qfull(queue_t *); 285 static void enable_svc(queue_t *); 286 static void runbufcalls(void); 287 static void sqenable(syncq_t *); 288 static void sqfill_events(syncq_t *, queue_t *, mblk_t *, void (*)()); 289 static void wait_q_syncq(queue_t *); 290 291 static void queue_service(queue_t *); 292 static void stream_service(stdata_t *); 293 static void syncq_service(syncq_t *); 294 static void qwriter_outer_service(syncq_t *); 295 static void mblk_free(mblk_t *); 296 #ifdef DEBUG 297 static int qprocsareon(queue_t *); 298 #endif 299 300 static void set_nfsrv_ptr(queue_t *, queue_t *, queue_t *, queue_t *); 301 static void reset_nfsrv_ptr(queue_t *, queue_t *); 302 303 static void sq_run_events(syncq_t *); 304 static int propagate_syncq(queue_t *); 305 306 static void blocksq(syncq_t *, ushort_t, int); 307 static void unblocksq(syncq_t *, ushort_t, int); 308 static int dropsq(syncq_t *, uint16_t); 309 static void emptysq(syncq_t *); 310 static sqlist_t *sqlist_alloc(struct stdata *, int); 311 static void sqlist_free(sqlist_t *); 312 static sqlist_t *sqlist_build(queue_t *, struct stdata *, boolean_t); 313 static void sqlist_insert(sqlist_t *, syncq_t *); 314 static void sqlist_insertall(sqlist_t *, queue_t *); 315 316 static void strsetuio(stdata_t *); 317 318 struct kmem_cache *stream_head_cache; 319 struct kmem_cache *queue_cache; 320 struct kmem_cache *syncq_cache; 321 struct kmem_cache *qband_cache; 322 struct kmem_cache *linkinfo_cache; 323 struct kmem_cache *ciputctrl_cache = NULL; 324 325 static linkinfo_t *linkinfo_list; 326 327 /* 328 * Qinit structure and Module_info structures 329 * for passthru read and write queues 330 */ 331 332 static void pass_wput(queue_t *, mblk_t *); 333 static queue_t *link_addpassthru(stdata_t *); 334 static void link_rempassthru(queue_t *); 335 336 struct module_info passthru_info = { 337 0, 338 "passthru", 339 0, 340 INFPSZ, 341 STRHIGH, 342 STRLOW 343 }; 344 345 struct qinit passthru_rinit = { 346 (int (*)())putnext, 347 NULL, 348 NULL, 349 NULL, 350 NULL, 351 &passthru_info, 352 NULL 353 }; 354 355 struct qinit passthru_winit = { 356 (int (*)()) pass_wput, 357 NULL, 358 NULL, 359 NULL, 360 NULL, 361 &passthru_info, 362 NULL 363 }; 364 365 /* 366 * Special form of assertion: verify that X implies Y i.e. when X is true Y 367 * should also be true. 368 */ 369 #define IMPLY(X, Y) ASSERT(!(X) || (Y)) 370 371 /* 372 * Logical equivalence. Verify that both X and Y are either TRUE or FALSE. 373 */ 374 #define EQUIV(X, Y) { IMPLY(X, Y); IMPLY(Y, X); } 375 376 /* 377 * Verify correctness of list head/tail pointers. 378 */ 379 #define LISTCHECK(head, tail, link) { \ 380 EQUIV(head, tail); \ 381 IMPLY(tail != NULL, tail->link == NULL); \ 382 } 383 384 /* 385 * Enqueue a list element `el' in the end of a list denoted by `head' and `tail' 386 * using a `link' field. 387 */ 388 #define ENQUEUE(el, head, tail, link) { \ 389 ASSERT(el->link == NULL); \ 390 LISTCHECK(head, tail, link); \ 391 if (head == NULL) \ 392 head = el; \ 393 else \ 394 tail->link = el; \ 395 tail = el; \ 396 } 397 398 /* 399 * Dequeue the first element of the list denoted by `head' and `tail' pointers 400 * using a `link' field and put result into `el'. 401 */ 402 #define DQ(el, head, tail, link) { \ 403 LISTCHECK(head, tail, link); \ 404 el = head; \ 405 if (head != NULL) { \ 406 head = head->link; \ 407 if (head == NULL) \ 408 tail = NULL; \ 409 el->link = NULL; \ 410 } \ 411 } 412 413 /* 414 * Remove `el' from the list using `chase' and `curr' pointers and return result 415 * in `succeed'. 416 */ 417 #define RMQ(el, head, tail, link, chase, curr, succeed) { \ 418 LISTCHECK(head, tail, link); \ 419 chase = NULL; \ 420 succeed = 0; \ 421 for (curr = head; (curr != el) && (curr != NULL); curr = curr->link) \ 422 chase = curr; \ 423 if (curr != NULL) { \ 424 succeed = 1; \ 425 ASSERT(curr == el); \ 426 if (chase != NULL) \ 427 chase->link = curr->link; \ 428 else \ 429 head = curr->link; \ 430 curr->link = NULL; \ 431 if (curr == tail) \ 432 tail = chase; \ 433 } \ 434 LISTCHECK(head, tail, link); \ 435 } 436 437 /* Handling of delayed messages on the inner syncq. */ 438 439 /* 440 * DEBUG versions should use function versions (to simplify tracing) and 441 * non-DEBUG kernels should use macro versions. 442 */ 443 444 /* 445 * Put a queue on the syncq list of queues. 446 * Assumes SQLOCK held. 447 */ 448 #define SQPUT_Q(sq, qp) \ 449 { \ 450 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 451 if (!(qp->q_sqflags & Q_SQQUEUED)) { \ 452 /* The queue should not be linked anywhere */ \ 453 ASSERT((qp->q_sqprev == NULL) && (qp->q_sqnext == NULL)); \ 454 /* Head and tail may only be NULL simultaneously */ \ 455 EQUIV(sq->sq_head, sq->sq_tail); \ 456 /* Queue may be only enqueyed on its syncq */ \ 457 ASSERT(sq == qp->q_syncq); \ 458 /* Check the correctness of SQ_MESSAGES flag */ \ 459 EQUIV(sq->sq_head, (sq->sq_flags & SQ_MESSAGES)); \ 460 /* Sanity check first/last elements of the list */ \ 461 IMPLY(sq->sq_head != NULL, sq->sq_head->q_sqprev == NULL);\ 462 IMPLY(sq->sq_tail != NULL, sq->sq_tail->q_sqnext == NULL);\ 463 /* \ 464 * Sanity check of priority field: empty queue should \ 465 * have zero priority \ 466 * and nqueues equal to zero. \ 467 */ \ 468 IMPLY(sq->sq_head == NULL, sq->sq_pri == 0); \ 469 /* Sanity check of sq_nqueues field */ \ 470 EQUIV(sq->sq_head, sq->sq_nqueues); \ 471 if (sq->sq_head == NULL) { \ 472 sq->sq_head = sq->sq_tail = qp; \ 473 sq->sq_flags |= SQ_MESSAGES; \ 474 } else if (qp->q_spri == 0) { \ 475 qp->q_sqprev = sq->sq_tail; \ 476 sq->sq_tail->q_sqnext = qp; \ 477 sq->sq_tail = qp; \ 478 } else { \ 479 /* \ 480 * Put this queue in priority order: higher \ 481 * priority gets closer to the head. \ 482 */ \ 483 queue_t **qpp = &sq->sq_tail; \ 484 queue_t *qnext = NULL; \ 485 \ 486 while (*qpp != NULL && qp->q_spri > (*qpp)->q_spri) { \ 487 qnext = *qpp; \ 488 qpp = &(*qpp)->q_sqprev; \ 489 } \ 490 qp->q_sqnext = qnext; \ 491 qp->q_sqprev = *qpp; \ 492 if (*qpp != NULL) { \ 493 (*qpp)->q_sqnext = qp; \ 494 } else { \ 495 sq->sq_head = qp; \ 496 sq->sq_pri = sq->sq_head->q_spri; \ 497 } \ 498 *qpp = qp; \ 499 } \ 500 qp->q_sqflags |= Q_SQQUEUED; \ 501 qp->q_sqtstamp = lbolt; \ 502 sq->sq_nqueues++; \ 503 } \ 504 } 505 506 /* 507 * Remove a queue from the syncq list 508 * Assumes SQLOCK held. 509 */ 510 #define SQRM_Q(sq, qp) \ 511 { \ 512 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 513 ASSERT(qp->q_sqflags & Q_SQQUEUED); \ 514 ASSERT(sq->sq_head != NULL && sq->sq_tail != NULL); \ 515 ASSERT((sq->sq_flags & SQ_MESSAGES) != 0); \ 516 /* Check that the queue is actually in the list */ \ 517 ASSERT(qp->q_sqnext != NULL || sq->sq_tail == qp); \ 518 ASSERT(qp->q_sqprev != NULL || sq->sq_head == qp); \ 519 ASSERT(sq->sq_nqueues != 0); \ 520 if (qp->q_sqprev == NULL) { \ 521 /* First queue on list, make head q_sqnext */ \ 522 sq->sq_head = qp->q_sqnext; \ 523 } else { \ 524 /* Make prev->next == next */ \ 525 qp->q_sqprev->q_sqnext = qp->q_sqnext; \ 526 } \ 527 if (qp->q_sqnext == NULL) { \ 528 /* Last queue on list, make tail sqprev */ \ 529 sq->sq_tail = qp->q_sqprev; \ 530 } else { \ 531 /* Make next->prev == prev */ \ 532 qp->q_sqnext->q_sqprev = qp->q_sqprev; \ 533 } \ 534 /* clear out references on this queue */ \ 535 qp->q_sqprev = qp->q_sqnext = NULL; \ 536 qp->q_sqflags &= ~Q_SQQUEUED; \ 537 /* If there is nothing queued, clear SQ_MESSAGES */ \ 538 if (sq->sq_head != NULL) { \ 539 sq->sq_pri = sq->sq_head->q_spri; \ 540 } else { \ 541 sq->sq_flags &= ~SQ_MESSAGES; \ 542 sq->sq_pri = 0; \ 543 } \ 544 sq->sq_nqueues--; \ 545 ASSERT(sq->sq_head != NULL || sq->sq_evhead != NULL || \ 546 (sq->sq_flags & SQ_QUEUED) == 0); \ 547 } 548 549 /* Hide the definition from the header file. */ 550 #ifdef SQPUT_MP 551 #undef SQPUT_MP 552 #endif 553 554 /* 555 * Put a message on the queue syncq. 556 * Assumes QLOCK held. 557 */ 558 #define SQPUT_MP(qp, mp) \ 559 { \ 560 ASSERT(MUTEX_HELD(QLOCK(qp))); \ 561 ASSERT(qp->q_sqhead == NULL || \ 562 (qp->q_sqtail != NULL && \ 563 qp->q_sqtail->b_next == NULL)); \ 564 qp->q_syncqmsgs++; \ 565 ASSERT(qp->q_syncqmsgs != 0); /* Wraparound */ \ 566 if (qp->q_sqhead == NULL) { \ 567 qp->q_sqhead = qp->q_sqtail = mp; \ 568 } else { \ 569 qp->q_sqtail->b_next = mp; \ 570 qp->q_sqtail = mp; \ 571 } \ 572 ASSERT(qp->q_syncqmsgs > 0); \ 573 } 574 575 #define SQ_PUTCOUNT_SETFAST_LOCKED(sq) { \ 576 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 577 if ((sq)->sq_ciputctrl != NULL) { \ 578 int i; \ 579 int nlocks = (sq)->sq_nciputctrl; \ 580 ciputctrl_t *cip = (sq)->sq_ciputctrl; \ 581 ASSERT((sq)->sq_type & SQ_CIPUT); \ 582 for (i = 0; i <= nlocks; i++) { \ 583 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \ 584 cip[i].ciputctrl_count |= SQ_FASTPUT; \ 585 } \ 586 } \ 587 } 588 589 590 #define SQ_PUTCOUNT_CLRFAST_LOCKED(sq) { \ 591 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 592 if ((sq)->sq_ciputctrl != NULL) { \ 593 int i; \ 594 int nlocks = (sq)->sq_nciputctrl; \ 595 ciputctrl_t *cip = (sq)->sq_ciputctrl; \ 596 ASSERT((sq)->sq_type & SQ_CIPUT); \ 597 for (i = 0; i <= nlocks; i++) { \ 598 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \ 599 cip[i].ciputctrl_count &= ~SQ_FASTPUT; \ 600 } \ 601 } \ 602 } 603 604 /* 605 * Run service procedures for all queues in the stream head. 606 */ 607 #define STR_SERVICE(stp, q) { \ 608 ASSERT(MUTEX_HELD(&stp->sd_qlock)); \ 609 while (stp->sd_qhead != NULL) { \ 610 DQ(q, stp->sd_qhead, stp->sd_qtail, q_link); \ 611 ASSERT(stp->sd_nqueues > 0); \ 612 stp->sd_nqueues--; \ 613 ASSERT(!(q->q_flag & QINSERVICE)); \ 614 mutex_exit(&stp->sd_qlock); \ 615 queue_service(q); \ 616 mutex_enter(&stp->sd_qlock); \ 617 } \ 618 ASSERT(stp->sd_nqueues == 0); \ 619 ASSERT((stp->sd_qhead == NULL) && (stp->sd_qtail == NULL)); \ 620 } 621 622 /* 623 * constructor/destructor routines for the stream head cache 624 */ 625 /* ARGSUSED */ 626 static int 627 stream_head_constructor(void *buf, void *cdrarg, int kmflags) 628 { 629 stdata_t *stp = buf; 630 631 mutex_init(&stp->sd_lock, NULL, MUTEX_DEFAULT, NULL); 632 mutex_init(&stp->sd_reflock, NULL, MUTEX_DEFAULT, NULL); 633 mutex_init(&stp->sd_qlock, NULL, MUTEX_DEFAULT, NULL); 634 cv_init(&stp->sd_monitor, NULL, CV_DEFAULT, NULL); 635 cv_init(&stp->sd_iocmonitor, NULL, CV_DEFAULT, NULL); 636 cv_init(&stp->sd_refmonitor, NULL, CV_DEFAULT, NULL); 637 cv_init(&stp->sd_qcv, NULL, CV_DEFAULT, NULL); 638 cv_init(&stp->sd_zcopy_wait, NULL, CV_DEFAULT, NULL); 639 stp->sd_wrq = NULL; 640 641 return (0); 642 } 643 644 /* ARGSUSED */ 645 static void 646 stream_head_destructor(void *buf, void *cdrarg) 647 { 648 stdata_t *stp = buf; 649 650 mutex_destroy(&stp->sd_lock); 651 mutex_destroy(&stp->sd_reflock); 652 mutex_destroy(&stp->sd_qlock); 653 cv_destroy(&stp->sd_monitor); 654 cv_destroy(&stp->sd_iocmonitor); 655 cv_destroy(&stp->sd_refmonitor); 656 cv_destroy(&stp->sd_qcv); 657 cv_destroy(&stp->sd_zcopy_wait); 658 } 659 660 /* 661 * constructor/destructor routines for the queue cache 662 */ 663 /* ARGSUSED */ 664 static int 665 queue_constructor(void *buf, void *cdrarg, int kmflags) 666 { 667 queinfo_t *qip = buf; 668 queue_t *qp = &qip->qu_rqueue; 669 queue_t *wqp = &qip->qu_wqueue; 670 syncq_t *sq = &qip->qu_syncq; 671 672 qp->q_first = NULL; 673 qp->q_link = NULL; 674 qp->q_count = 0; 675 qp->q_mblkcnt = 0; 676 qp->q_sqhead = NULL; 677 qp->q_sqtail = NULL; 678 qp->q_sqnext = NULL; 679 qp->q_sqprev = NULL; 680 qp->q_sqflags = 0; 681 qp->q_rwcnt = 0; 682 qp->q_spri = 0; 683 684 mutex_init(QLOCK(qp), NULL, MUTEX_DEFAULT, NULL); 685 cv_init(&qp->q_wait, NULL, CV_DEFAULT, NULL); 686 687 wqp->q_first = NULL; 688 wqp->q_link = NULL; 689 wqp->q_count = 0; 690 wqp->q_mblkcnt = 0; 691 wqp->q_sqhead = NULL; 692 wqp->q_sqtail = NULL; 693 wqp->q_sqnext = NULL; 694 wqp->q_sqprev = NULL; 695 wqp->q_sqflags = 0; 696 wqp->q_rwcnt = 0; 697 wqp->q_spri = 0; 698 699 mutex_init(QLOCK(wqp), NULL, MUTEX_DEFAULT, NULL); 700 cv_init(&wqp->q_wait, NULL, CV_DEFAULT, NULL); 701 702 sq->sq_head = NULL; 703 sq->sq_tail = NULL; 704 sq->sq_evhead = NULL; 705 sq->sq_evtail = NULL; 706 sq->sq_callbpend = NULL; 707 sq->sq_outer = NULL; 708 sq->sq_onext = NULL; 709 sq->sq_oprev = NULL; 710 sq->sq_next = NULL; 711 sq->sq_svcflags = 0; 712 sq->sq_servcount = 0; 713 sq->sq_needexcl = 0; 714 sq->sq_nqueues = 0; 715 sq->sq_pri = 0; 716 717 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL); 718 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL); 719 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL); 720 721 return (0); 722 } 723 724 /* ARGSUSED */ 725 static void 726 queue_destructor(void *buf, void *cdrarg) 727 { 728 queinfo_t *qip = buf; 729 queue_t *qp = &qip->qu_rqueue; 730 queue_t *wqp = &qip->qu_wqueue; 731 syncq_t *sq = &qip->qu_syncq; 732 733 ASSERT(qp->q_sqhead == NULL); 734 ASSERT(wqp->q_sqhead == NULL); 735 ASSERT(qp->q_sqnext == NULL); 736 ASSERT(wqp->q_sqnext == NULL); 737 ASSERT(qp->q_rwcnt == 0); 738 ASSERT(wqp->q_rwcnt == 0); 739 740 mutex_destroy(&qp->q_lock); 741 cv_destroy(&qp->q_wait); 742 743 mutex_destroy(&wqp->q_lock); 744 cv_destroy(&wqp->q_wait); 745 746 mutex_destroy(&sq->sq_lock); 747 cv_destroy(&sq->sq_wait); 748 cv_destroy(&sq->sq_exitwait); 749 } 750 751 /* 752 * constructor/destructor routines for the syncq cache 753 */ 754 /* ARGSUSED */ 755 static int 756 syncq_constructor(void *buf, void *cdrarg, int kmflags) 757 { 758 syncq_t *sq = buf; 759 760 bzero(buf, sizeof (syncq_t)); 761 762 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL); 763 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL); 764 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL); 765 766 return (0); 767 } 768 769 /* ARGSUSED */ 770 static void 771 syncq_destructor(void *buf, void *cdrarg) 772 { 773 syncq_t *sq = buf; 774 775 ASSERT(sq->sq_head == NULL); 776 ASSERT(sq->sq_tail == NULL); 777 ASSERT(sq->sq_evhead == NULL); 778 ASSERT(sq->sq_evtail == NULL); 779 ASSERT(sq->sq_callbpend == NULL); 780 ASSERT(sq->sq_callbflags == 0); 781 ASSERT(sq->sq_outer == NULL); 782 ASSERT(sq->sq_onext == NULL); 783 ASSERT(sq->sq_oprev == NULL); 784 ASSERT(sq->sq_next == NULL); 785 ASSERT(sq->sq_needexcl == 0); 786 ASSERT(sq->sq_svcflags == 0); 787 ASSERT(sq->sq_servcount == 0); 788 ASSERT(sq->sq_nqueues == 0); 789 ASSERT(sq->sq_pri == 0); 790 ASSERT(sq->sq_count == 0); 791 ASSERT(sq->sq_rmqcount == 0); 792 ASSERT(sq->sq_cancelid == 0); 793 ASSERT(sq->sq_ciputctrl == NULL); 794 ASSERT(sq->sq_nciputctrl == 0); 795 ASSERT(sq->sq_type == 0); 796 ASSERT(sq->sq_flags == 0); 797 798 mutex_destroy(&sq->sq_lock); 799 cv_destroy(&sq->sq_wait); 800 cv_destroy(&sq->sq_exitwait); 801 } 802 803 /* ARGSUSED */ 804 static int 805 ciputctrl_constructor(void *buf, void *cdrarg, int kmflags) 806 { 807 ciputctrl_t *cip = buf; 808 int i; 809 810 for (i = 0; i < n_ciputctrl; i++) { 811 cip[i].ciputctrl_count = SQ_FASTPUT; 812 mutex_init(&cip[i].ciputctrl_lock, NULL, MUTEX_DEFAULT, NULL); 813 } 814 815 return (0); 816 } 817 818 /* ARGSUSED */ 819 static void 820 ciputctrl_destructor(void *buf, void *cdrarg) 821 { 822 ciputctrl_t *cip = buf; 823 int i; 824 825 for (i = 0; i < n_ciputctrl; i++) { 826 ASSERT(cip[i].ciputctrl_count & SQ_FASTPUT); 827 mutex_destroy(&cip[i].ciputctrl_lock); 828 } 829 } 830 831 /* 832 * Init routine run from main at boot time. 833 */ 834 void 835 strinit(void) 836 { 837 int i; 838 int ncpus = ((boot_max_ncpus == -1) ? max_ncpus : boot_max_ncpus); 839 840 /* 841 * Set up mux_node structures. 842 */ 843 mux_nodes = kmem_zalloc((sizeof (struct mux_node) * devcnt), KM_SLEEP); 844 for (i = 0; i < devcnt; i++) 845 mux_nodes[i].mn_imaj = i; 846 847 stream_head_cache = kmem_cache_create("stream_head_cache", 848 sizeof (stdata_t), 0, 849 stream_head_constructor, stream_head_destructor, NULL, 850 NULL, NULL, 0); 851 852 queue_cache = kmem_cache_create("queue_cache", sizeof (queinfo_t), 0, 853 queue_constructor, queue_destructor, NULL, NULL, NULL, 0); 854 855 syncq_cache = kmem_cache_create("syncq_cache", sizeof (syncq_t), 0, 856 syncq_constructor, syncq_destructor, NULL, NULL, NULL, 0); 857 858 qband_cache = kmem_cache_create("qband_cache", 859 sizeof (qband_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 860 861 linkinfo_cache = kmem_cache_create("linkinfo_cache", 862 sizeof (linkinfo_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 863 864 n_ciputctrl = ncpus; 865 n_ciputctrl = 1 << highbit(n_ciputctrl - 1); 866 ASSERT(n_ciputctrl >= 1); 867 n_ciputctrl = MIN(n_ciputctrl, max_n_ciputctrl); 868 if (n_ciputctrl >= min_n_ciputctrl) { 869 ciputctrl_cache = kmem_cache_create("ciputctrl_cache", 870 sizeof (ciputctrl_t) * n_ciputctrl, 871 sizeof (ciputctrl_t), ciputctrl_constructor, 872 ciputctrl_destructor, NULL, NULL, NULL, 0); 873 } 874 875 streams_taskq = system_taskq; 876 877 if (streams_taskq == NULL) 878 panic("strinit: no memory for streams taskq!"); 879 880 bc_bkgrnd_thread = thread_create(NULL, 0, 881 streams_bufcall_service, NULL, 0, &p0, TS_RUN, streams_lopri); 882 883 streams_qbkgrnd_thread = thread_create(NULL, 0, 884 streams_qbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri); 885 886 streams_sqbkgrnd_thread = thread_create(NULL, 0, 887 streams_sqbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri); 888 889 /* 890 * Create STREAMS kstats. 891 */ 892 str_kstat = kstat_create("streams", 0, "strstat", 893 "net", KSTAT_TYPE_NAMED, 894 sizeof (str_statistics) / sizeof (kstat_named_t), 895 KSTAT_FLAG_VIRTUAL); 896 897 if (str_kstat != NULL) { 898 str_kstat->ks_data = &str_statistics; 899 kstat_install(str_kstat); 900 } 901 902 /* 903 * TPI support routine initialisation. 904 */ 905 tpi_init(); 906 } 907 908 void 909 str_sendsig(vnode_t *vp, int event, uchar_t band, int error) 910 { 911 struct stdata *stp; 912 913 ASSERT(vp->v_stream); 914 stp = vp->v_stream; 915 /* Have to hold sd_lock to prevent siglist from changing */ 916 mutex_enter(&stp->sd_lock); 917 if (stp->sd_sigflags & event) 918 strsendsig(stp->sd_siglist, event, band, error); 919 mutex_exit(&stp->sd_lock); 920 } 921 922 /* 923 * Send the "sevent" set of signals to a process. 924 * This might send more than one signal if the process is registered 925 * for multiple events. The caller should pass in an sevent that only 926 * includes the events for which the process has registered. 927 */ 928 static void 929 dosendsig(proc_t *proc, int events, int sevent, k_siginfo_t *info, 930 uchar_t band, int error) 931 { 932 ASSERT(MUTEX_HELD(&proc->p_lock)); 933 934 info->si_band = 0; 935 info->si_errno = 0; 936 937 if (sevent & S_ERROR) { 938 sevent &= ~S_ERROR; 939 info->si_code = POLL_ERR; 940 info->si_errno = error; 941 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 942 "strsendsig:proc %p info %p", proc, info); 943 sigaddq(proc, NULL, info, KM_NOSLEEP); 944 info->si_errno = 0; 945 } 946 if (sevent & S_HANGUP) { 947 sevent &= ~S_HANGUP; 948 info->si_code = POLL_HUP; 949 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 950 "strsendsig:proc %p info %p", proc, info); 951 sigaddq(proc, NULL, info, KM_NOSLEEP); 952 } 953 if (sevent & S_HIPRI) { 954 sevent &= ~S_HIPRI; 955 info->si_code = POLL_PRI; 956 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 957 "strsendsig:proc %p info %p", proc, info); 958 sigaddq(proc, NULL, info, KM_NOSLEEP); 959 } 960 if (sevent & S_RDBAND) { 961 sevent &= ~S_RDBAND; 962 if (events & S_BANDURG) 963 sigtoproc(proc, NULL, SIGURG); 964 else 965 sigtoproc(proc, NULL, SIGPOLL); 966 } 967 if (sevent & S_WRBAND) { 968 sevent &= ~S_WRBAND; 969 sigtoproc(proc, NULL, SIGPOLL); 970 } 971 if (sevent & S_INPUT) { 972 sevent &= ~S_INPUT; 973 info->si_code = POLL_IN; 974 info->si_band = band; 975 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 976 "strsendsig:proc %p info %p", proc, info); 977 sigaddq(proc, NULL, info, KM_NOSLEEP); 978 info->si_band = 0; 979 } 980 if (sevent & S_OUTPUT) { 981 sevent &= ~S_OUTPUT; 982 info->si_code = POLL_OUT; 983 info->si_band = band; 984 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 985 "strsendsig:proc %p info %p", proc, info); 986 sigaddq(proc, NULL, info, KM_NOSLEEP); 987 info->si_band = 0; 988 } 989 if (sevent & S_MSG) { 990 sevent &= ~S_MSG; 991 info->si_code = POLL_MSG; 992 info->si_band = band; 993 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 994 "strsendsig:proc %p info %p", proc, info); 995 sigaddq(proc, NULL, info, KM_NOSLEEP); 996 info->si_band = 0; 997 } 998 if (sevent & S_RDNORM) { 999 sevent &= ~S_RDNORM; 1000 sigtoproc(proc, NULL, SIGPOLL); 1001 } 1002 if (sevent != 0) { 1003 panic("strsendsig: unknown event(s) %x", sevent); 1004 } 1005 } 1006 1007 /* 1008 * Send SIGPOLL/SIGURG signal to all processes and process groups 1009 * registered on the given signal list that want a signal for at 1010 * least one of the specified events. 1011 * 1012 * Must be called with exclusive access to siglist (caller holding sd_lock). 1013 * 1014 * strioctl(I_SETSIG/I_ESETSIG) will only change siglist when holding 1015 * sd_lock and the ioctl code maintains a PID_HOLD on the pid structure 1016 * while it is in the siglist. 1017 * 1018 * For performance reasons (MP scalability) the code drops pidlock 1019 * when sending signals to a single process. 1020 * When sending to a process group the code holds 1021 * pidlock to prevent the membership in the process group from changing 1022 * while walking the p_pglink list. 1023 */ 1024 void 1025 strsendsig(strsig_t *siglist, int event, uchar_t band, int error) 1026 { 1027 strsig_t *ssp; 1028 k_siginfo_t info; 1029 struct pid *pidp; 1030 proc_t *proc; 1031 1032 info.si_signo = SIGPOLL; 1033 info.si_errno = 0; 1034 for (ssp = siglist; ssp; ssp = ssp->ss_next) { 1035 int sevent; 1036 1037 sevent = ssp->ss_events & event; 1038 if (sevent == 0) 1039 continue; 1040 1041 if ((pidp = ssp->ss_pidp) == NULL) { 1042 /* pid was released but still on event list */ 1043 continue; 1044 } 1045 1046 1047 if (ssp->ss_pid > 0) { 1048 /* 1049 * XXX This unfortunately still generates 1050 * a signal when a fd is closed but 1051 * the proc is active. 1052 */ 1053 ASSERT(ssp->ss_pid == pidp->pid_id); 1054 1055 mutex_enter(&pidlock); 1056 proc = prfind_zone(pidp->pid_id, ALL_ZONES); 1057 if (proc == NULL) { 1058 mutex_exit(&pidlock); 1059 continue; 1060 } 1061 mutex_enter(&proc->p_lock); 1062 mutex_exit(&pidlock); 1063 dosendsig(proc, ssp->ss_events, sevent, &info, 1064 band, error); 1065 mutex_exit(&proc->p_lock); 1066 } else { 1067 /* 1068 * Send to process group. Hold pidlock across 1069 * calls to dosendsig(). 1070 */ 1071 pid_t pgrp = -ssp->ss_pid; 1072 1073 mutex_enter(&pidlock); 1074 proc = pgfind_zone(pgrp, ALL_ZONES); 1075 while (proc != NULL) { 1076 mutex_enter(&proc->p_lock); 1077 dosendsig(proc, ssp->ss_events, sevent, 1078 &info, band, error); 1079 mutex_exit(&proc->p_lock); 1080 proc = proc->p_pglink; 1081 } 1082 mutex_exit(&pidlock); 1083 } 1084 } 1085 } 1086 1087 /* 1088 * Attach a stream device or module. 1089 * qp is a read queue; the new queue goes in so its next 1090 * read ptr is the argument, and the write queue corresponding 1091 * to the argument points to this queue. Return 0 on success, 1092 * or a non-zero errno on failure. 1093 */ 1094 int 1095 qattach(queue_t *qp, dev_t *devp, int oflag, cred_t *crp, fmodsw_impl_t *fp, 1096 boolean_t is_insert) 1097 { 1098 major_t major; 1099 cdevsw_impl_t *dp; 1100 struct streamtab *str; 1101 queue_t *rq; 1102 queue_t *wrq; 1103 uint32_t qflag; 1104 uint32_t sqtype; 1105 perdm_t *dmp; 1106 int error; 1107 int sflag; 1108 1109 rq = allocq(); 1110 wrq = _WR(rq); 1111 STREAM(rq) = STREAM(wrq) = STREAM(qp); 1112 1113 if (fp != NULL) { 1114 str = fp->f_str; 1115 qflag = fp->f_qflag; 1116 sqtype = fp->f_sqtype; 1117 dmp = fp->f_dmp; 1118 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL); 1119 sflag = MODOPEN; 1120 1121 /* 1122 * stash away a pointer to the module structure so we can 1123 * unref it in qdetach. 1124 */ 1125 rq->q_fp = fp; 1126 } else { 1127 ASSERT(!is_insert); 1128 1129 major = getmajor(*devp); 1130 dp = &devimpl[major]; 1131 1132 str = dp->d_str; 1133 ASSERT(str == STREAMSTAB(major)); 1134 1135 qflag = dp->d_qflag; 1136 ASSERT(qflag & QISDRV); 1137 sqtype = dp->d_sqtype; 1138 1139 /* create perdm_t if needed */ 1140 if (NEED_DM(dp->d_dmp, qflag)) 1141 dp->d_dmp = hold_dm(str, qflag, sqtype); 1142 1143 dmp = dp->d_dmp; 1144 sflag = 0; 1145 } 1146 1147 TRACE_2(TR_FAC_STREAMS_FR, TR_QATTACH_FLAGS, 1148 "qattach:qflag == %X(%X)", qflag, *devp); 1149 1150 /* setq might sleep in allocator - avoid holding locks. */ 1151 setq(rq, str->st_rdinit, str->st_wrinit, dmp, qflag, sqtype, B_FALSE); 1152 1153 /* 1154 * Before calling the module's open routine, set up the q_next 1155 * pointer for inserting a module in the middle of a stream. 1156 * 1157 * Note that we can always set _QINSERTING and set up q_next 1158 * pointer for both inserting and pushing a module. Then there 1159 * is no need for the is_insert parameter. In insertq(), called 1160 * by qprocson(), assume that q_next of the new module always points 1161 * to the correct queue and use it for insertion. Everything should 1162 * work out fine. But in the first release of _I_INSERT, we 1163 * distinguish between inserting and pushing to make sure that 1164 * pushing a module follows the same code path as before. 1165 */ 1166 if (is_insert) { 1167 rq->q_flag |= _QINSERTING; 1168 rq->q_next = qp; 1169 } 1170 1171 /* 1172 * If there is an outer perimeter get exclusive access during 1173 * the open procedure. Bump up the reference count on the queue. 1174 */ 1175 entersq(rq->q_syncq, SQ_OPENCLOSE); 1176 error = (*rq->q_qinfo->qi_qopen)(rq, devp, oflag, sflag, crp); 1177 if (error != 0) 1178 goto failed; 1179 leavesq(rq->q_syncq, SQ_OPENCLOSE); 1180 ASSERT(qprocsareon(rq)); 1181 return (0); 1182 1183 failed: 1184 rq->q_flag &= ~_QINSERTING; 1185 if (backq(wrq) != NULL && backq(wrq)->q_next == wrq) 1186 qprocsoff(rq); 1187 leavesq(rq->q_syncq, SQ_OPENCLOSE); 1188 rq->q_next = wrq->q_next = NULL; 1189 qdetach(rq, 0, 0, crp, B_FALSE); 1190 return (error); 1191 } 1192 1193 /* 1194 * Handle second open of stream. For modules, set the 1195 * last argument to MODOPEN and do not pass any open flags. 1196 * Ignore dummydev since this is not the first open. 1197 */ 1198 int 1199 qreopen(queue_t *qp, dev_t *devp, int flag, cred_t *crp) 1200 { 1201 int error; 1202 dev_t dummydev; 1203 queue_t *wqp = _WR(qp); 1204 1205 ASSERT(qp->q_flag & QREADR); 1206 entersq(qp->q_syncq, SQ_OPENCLOSE); 1207 1208 dummydev = *devp; 1209 if (error = ((*qp->q_qinfo->qi_qopen)(qp, &dummydev, 1210 (wqp->q_next ? 0 : flag), (wqp->q_next ? MODOPEN : 0), crp))) { 1211 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1212 mutex_enter(&STREAM(qp)->sd_lock); 1213 qp->q_stream->sd_flag |= STREOPENFAIL; 1214 mutex_exit(&STREAM(qp)->sd_lock); 1215 return (error); 1216 } 1217 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1218 1219 /* 1220 * successful open should have done qprocson() 1221 */ 1222 ASSERT(qprocsareon(_RD(qp))); 1223 return (0); 1224 } 1225 1226 /* 1227 * Detach a stream module or device. 1228 * If clmode == 1 then the module or driver was opened and its 1229 * close routine must be called. If clmode == 0, the module 1230 * or driver was never opened or the open failed, and so its close 1231 * should not be called. 1232 */ 1233 void 1234 qdetach(queue_t *qp, int clmode, int flag, cred_t *crp, boolean_t is_remove) 1235 { 1236 queue_t *wqp = _WR(qp); 1237 ASSERT(STREAM(qp)->sd_flag & (STRCLOSE|STWOPEN|STRPLUMB)); 1238 1239 if (STREAM_NEEDSERVICE(STREAM(qp))) 1240 stream_runservice(STREAM(qp)); 1241 1242 if (clmode) { 1243 /* 1244 * Make sure that all the messages on the write side syncq are 1245 * processed and nothing is left. Since we are closing, no new 1246 * messages may appear there. 1247 */ 1248 wait_q_syncq(wqp); 1249 1250 entersq(qp->q_syncq, SQ_OPENCLOSE); 1251 if (is_remove) { 1252 mutex_enter(QLOCK(qp)); 1253 qp->q_flag |= _QREMOVING; 1254 mutex_exit(QLOCK(qp)); 1255 } 1256 (*qp->q_qinfo->qi_qclose)(qp, flag, crp); 1257 /* 1258 * Check that qprocsoff() was actually called. 1259 */ 1260 ASSERT((qp->q_flag & QWCLOSE) && (wqp->q_flag & QWCLOSE)); 1261 1262 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1263 } else { 1264 disable_svc(qp); 1265 } 1266 1267 /* 1268 * Allow any threads blocked in entersq to proceed and discover 1269 * the QWCLOSE is set. 1270 * Note: This assumes that all users of entersq check QWCLOSE. 1271 * Currently runservice is the only entersq that can happen 1272 * after removeq has finished. 1273 * Removeq will have discarded all messages destined to the closing 1274 * pair of queues from the syncq. 1275 * NOTE: Calling a function inside an assert is unconventional. 1276 * However, it does not cause any problem since flush_syncq() does 1277 * not change any state except when it returns non-zero i.e. 1278 * when the assert will trigger. 1279 */ 1280 ASSERT(flush_syncq(qp->q_syncq, qp) == 0); 1281 ASSERT(flush_syncq(wqp->q_syncq, wqp) == 0); 1282 ASSERT((qp->q_flag & QPERMOD) || 1283 ((qp->q_syncq->sq_head == NULL) && 1284 (wqp->q_syncq->sq_head == NULL))); 1285 1286 /* 1287 * Flush the queues before q_next is set to NULL. This is needed 1288 * in order to backenable any downstream queue before we go away. 1289 * Note: we are already removed from the stream so that the 1290 * backenabling will not cause any messages to be delivered to our 1291 * put procedures. 1292 */ 1293 flushq(qp, FLUSHALL); 1294 flushq(wqp, FLUSHALL); 1295 1296 /* 1297 * wait for any pending service processing to complete 1298 */ 1299 wait_svc(qp); 1300 1301 /* Tidy up - removeq only does a half-remove from stream */ 1302 qp->q_next = wqp->q_next = NULL; 1303 ASSERT(!(qp->q_flag & QENAB)); 1304 ASSERT(!(wqp->q_flag & QENAB)); 1305 1306 /* release any fmodsw_impl_t structure held on behalf of the queue */ 1307 1308 ASSERT(qp->q_fp != NULL || qp->q_flag & QISDRV); 1309 if (qp->q_fp != NULL) 1310 fmodsw_rele(qp->q_fp); 1311 1312 /* freeq removes us from the outer perimeter if any */ 1313 freeq(qp); 1314 } 1315 1316 /* Prevent service procedures from being called */ 1317 void 1318 disable_svc(queue_t *qp) 1319 { 1320 queue_t *wqp = _WR(qp); 1321 1322 ASSERT(qp->q_flag & QREADR); 1323 mutex_enter(QLOCK(qp)); 1324 qp->q_flag |= QWCLOSE; 1325 mutex_exit(QLOCK(qp)); 1326 mutex_enter(QLOCK(wqp)); 1327 wqp->q_flag |= QWCLOSE; 1328 mutex_exit(QLOCK(wqp)); 1329 } 1330 1331 /* allow service procedures to be called again */ 1332 void 1333 enable_svc(queue_t *qp) 1334 { 1335 queue_t *wqp = _WR(qp); 1336 1337 ASSERT(qp->q_flag & QREADR); 1338 mutex_enter(QLOCK(qp)); 1339 qp->q_flag &= ~QWCLOSE; 1340 mutex_exit(QLOCK(qp)); 1341 mutex_enter(QLOCK(wqp)); 1342 wqp->q_flag &= ~QWCLOSE; 1343 mutex_exit(QLOCK(wqp)); 1344 } 1345 1346 /* 1347 * Remove queue from qhead/qtail if it is enabled. 1348 * Only reset QENAB if the queue was removed from the runlist. 1349 * A queue goes through 3 stages: 1350 * It is on the service list and QENAB is set. 1351 * It is removed from the service list but QENAB is still set. 1352 * QENAB gets changed to QINSERVICE. 1353 * QINSERVICE is reset (when the service procedure is done) 1354 * Thus we can not reset QENAB unless we actually removed it from the service 1355 * queue. 1356 */ 1357 void 1358 remove_runlist(queue_t *qp) 1359 { 1360 if (qp->q_flag & QENAB && qhead != NULL) { 1361 queue_t *q_chase; 1362 queue_t *q_curr; 1363 int removed; 1364 1365 mutex_enter(&service_queue); 1366 RMQ(qp, qhead, qtail, q_link, q_chase, q_curr, removed); 1367 mutex_exit(&service_queue); 1368 if (removed) { 1369 STRSTAT(qremoved); 1370 qp->q_flag &= ~QENAB; 1371 } 1372 } 1373 } 1374 1375 1376 /* 1377 * wait for any pending service processing to complete. 1378 * The removal of queues from the runlist is not atomic with the 1379 * clearing of the QENABLED flag and setting the INSERVICE flag. 1380 * consequently it is possible for remove_runlist in strclose 1381 * to not find the queue on the runlist but for it to be QENABLED 1382 * and not yet INSERVICE -> hence wait_svc needs to check QENABLED 1383 * as well as INSERVICE. 1384 */ 1385 void 1386 wait_svc(queue_t *qp) 1387 { 1388 queue_t *wqp = _WR(qp); 1389 1390 ASSERT(qp->q_flag & QREADR); 1391 1392 /* 1393 * Try to remove queues from qhead/qtail list. 1394 */ 1395 if (qhead != NULL) { 1396 remove_runlist(qp); 1397 remove_runlist(wqp); 1398 } 1399 /* 1400 * Wait till the syncqs associated with the queue 1401 * will dissapear from background processing list. 1402 * This only needs to be done for non-PERMOD perimeters since 1403 * for PERMOD perimeters the syncq may be shared and will only be freed 1404 * when the last module/driver is unloaded. 1405 * If for PERMOD perimeters queue was on the syncq list, removeq() 1406 * should call propagate_syncq() or drain_syncq() for it. Both of these 1407 * function remove the queue from its syncq list, so sqthread will not 1408 * try to access the queue. 1409 */ 1410 if (!(qp->q_flag & QPERMOD)) { 1411 syncq_t *rsq = qp->q_syncq; 1412 syncq_t *wsq = wqp->q_syncq; 1413 1414 /* 1415 * Disable rsq and wsq and wait for any background processing of 1416 * syncq to complete. 1417 */ 1418 wait_sq_svc(rsq); 1419 if (wsq != rsq) 1420 wait_sq_svc(wsq); 1421 } 1422 1423 mutex_enter(QLOCK(qp)); 1424 while (qp->q_flag & (QINSERVICE|QENAB)) 1425 cv_wait(&qp->q_wait, QLOCK(qp)); 1426 mutex_exit(QLOCK(qp)); 1427 mutex_enter(QLOCK(wqp)); 1428 while (wqp->q_flag & (QINSERVICE|QENAB)) 1429 cv_wait(&wqp->q_wait, QLOCK(wqp)); 1430 mutex_exit(QLOCK(wqp)); 1431 } 1432 1433 /* 1434 * Put ioctl data from userland buffer `arg' into the mblk chain `bp'. 1435 * `flag' must always contain either K_TO_K or U_TO_K; STR_NOSIG may 1436 * also be set, and is passed through to allocb_cred_wait(). 1437 * 1438 * Returns errno on failure, zero on success. 1439 */ 1440 int 1441 putiocd(mblk_t *bp, char *arg, int flag, cred_t *cr) 1442 { 1443 mblk_t *tmp; 1444 ssize_t count; 1445 size_t n; 1446 int error = 0; 1447 1448 ASSERT((flag & (U_TO_K | K_TO_K)) == U_TO_K || 1449 (flag & (U_TO_K | K_TO_K)) == K_TO_K); 1450 1451 if (bp->b_datap->db_type == M_IOCTL) { 1452 count = ((struct iocblk *)bp->b_rptr)->ioc_count; 1453 } else { 1454 ASSERT(bp->b_datap->db_type == M_COPYIN); 1455 count = ((struct copyreq *)bp->b_rptr)->cq_size; 1456 } 1457 /* 1458 * strdoioctl validates ioc_count, so if this assert fails it 1459 * cannot be due to user error. 1460 */ 1461 ASSERT(count >= 0); 1462 1463 while (count > 0) { 1464 n = MIN(MAXIOCBSZ, count); 1465 if ((tmp = allocb_cred_wait(n, (flag & STR_NOSIG), &error, 1466 cr)) == NULL) { 1467 return (error); 1468 } 1469 error = strcopyin(arg, tmp->b_wptr, n, flag & (U_TO_K|K_TO_K)); 1470 if (error != 0) { 1471 freeb(tmp); 1472 return (error); 1473 } 1474 arg += n; 1475 DB_CPID(tmp) = curproc->p_pid; 1476 tmp->b_wptr += n; 1477 count -= n; 1478 bp = (bp->b_cont = tmp); 1479 } 1480 1481 return (0); 1482 } 1483 1484 /* 1485 * Copy ioctl data to user-land. Return non-zero errno on failure, 1486 * 0 for success. 1487 */ 1488 int 1489 getiocd(mblk_t *bp, char *arg, int copymode) 1490 { 1491 ssize_t count; 1492 size_t n; 1493 int error; 1494 1495 if (bp->b_datap->db_type == M_IOCACK) 1496 count = ((struct iocblk *)bp->b_rptr)->ioc_count; 1497 else { 1498 ASSERT(bp->b_datap->db_type == M_COPYOUT); 1499 count = ((struct copyreq *)bp->b_rptr)->cq_size; 1500 } 1501 ASSERT(count >= 0); 1502 1503 for (bp = bp->b_cont; bp && count; 1504 count -= n, bp = bp->b_cont, arg += n) { 1505 n = MIN(count, bp->b_wptr - bp->b_rptr); 1506 error = strcopyout(bp->b_rptr, arg, n, copymode); 1507 if (error) 1508 return (error); 1509 } 1510 ASSERT(count == 0); 1511 return (0); 1512 } 1513 1514 /* 1515 * Allocate a linkinfo entry given the write queue of the 1516 * bottom module of the top stream and the write queue of the 1517 * stream head of the bottom stream. 1518 */ 1519 linkinfo_t * 1520 alloclink(queue_t *qup, queue_t *qdown, file_t *fpdown) 1521 { 1522 linkinfo_t *linkp; 1523 1524 linkp = kmem_cache_alloc(linkinfo_cache, KM_SLEEP); 1525 1526 linkp->li_lblk.l_qtop = qup; 1527 linkp->li_lblk.l_qbot = qdown; 1528 linkp->li_fpdown = fpdown; 1529 1530 mutex_enter(&strresources); 1531 linkp->li_next = linkinfo_list; 1532 linkp->li_prev = NULL; 1533 if (linkp->li_next) 1534 linkp->li_next->li_prev = linkp; 1535 linkinfo_list = linkp; 1536 linkp->li_lblk.l_index = ++lnk_id; 1537 ASSERT(lnk_id != 0); /* this should never wrap in practice */ 1538 mutex_exit(&strresources); 1539 1540 return (linkp); 1541 } 1542 1543 /* 1544 * Free a linkinfo entry. 1545 */ 1546 void 1547 lbfree(linkinfo_t *linkp) 1548 { 1549 mutex_enter(&strresources); 1550 if (linkp->li_next) 1551 linkp->li_next->li_prev = linkp->li_prev; 1552 if (linkp->li_prev) 1553 linkp->li_prev->li_next = linkp->li_next; 1554 else 1555 linkinfo_list = linkp->li_next; 1556 mutex_exit(&strresources); 1557 1558 kmem_cache_free(linkinfo_cache, linkp); 1559 } 1560 1561 /* 1562 * Check for a potential linking cycle. 1563 * Return 1 if a link will result in a cycle, 1564 * and 0 otherwise. 1565 */ 1566 int 1567 linkcycle(stdata_t *upstp, stdata_t *lostp) 1568 { 1569 struct mux_node *np; 1570 struct mux_edge *ep; 1571 int i; 1572 major_t lomaj; 1573 major_t upmaj; 1574 /* 1575 * if the lower stream is a pipe/FIFO, return, since link 1576 * cycles can not happen on pipes/FIFOs 1577 */ 1578 if (lostp->sd_vnode->v_type == VFIFO) 1579 return (0); 1580 1581 for (i = 0; i < devcnt; i++) { 1582 np = &mux_nodes[i]; 1583 MUX_CLEAR(np); 1584 } 1585 lomaj = getmajor(lostp->sd_vnode->v_rdev); 1586 upmaj = getmajor(upstp->sd_vnode->v_rdev); 1587 np = &mux_nodes[lomaj]; 1588 for (;;) { 1589 if (!MUX_DIDVISIT(np)) { 1590 if (np->mn_imaj == upmaj) 1591 return (1); 1592 if (np->mn_outp == NULL) { 1593 MUX_VISIT(np); 1594 if (np->mn_originp == NULL) 1595 return (0); 1596 np = np->mn_originp; 1597 continue; 1598 } 1599 MUX_VISIT(np); 1600 np->mn_startp = np->mn_outp; 1601 } else { 1602 if (np->mn_startp == NULL) { 1603 if (np->mn_originp == NULL) 1604 return (0); 1605 else { 1606 np = np->mn_originp; 1607 continue; 1608 } 1609 } 1610 /* 1611 * If ep->me_nodep is a FIFO (me_nodep == NULL), 1612 * ignore the edge and move on. ep->me_nodep gets 1613 * set to NULL in mux_addedge() if it is a FIFO. 1614 * 1615 */ 1616 ep = np->mn_startp; 1617 np->mn_startp = ep->me_nextp; 1618 if (ep->me_nodep == NULL) 1619 continue; 1620 ep->me_nodep->mn_originp = np; 1621 np = ep->me_nodep; 1622 } 1623 } 1624 } 1625 1626 /* 1627 * Find linkinfo entry corresponding to the parameters. 1628 */ 1629 linkinfo_t * 1630 findlinks(stdata_t *stp, int index, int type) 1631 { 1632 linkinfo_t *linkp; 1633 struct mux_edge *mep; 1634 struct mux_node *mnp; 1635 queue_t *qup; 1636 1637 mutex_enter(&strresources); 1638 if ((type & LINKTYPEMASK) == LINKNORMAL) { 1639 qup = getendq(stp->sd_wrq); 1640 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) { 1641 if ((qup == linkp->li_lblk.l_qtop) && 1642 (!index || (index == linkp->li_lblk.l_index))) { 1643 mutex_exit(&strresources); 1644 return (linkp); 1645 } 1646 } 1647 } else { 1648 ASSERT((type & LINKTYPEMASK) == LINKPERSIST); 1649 mnp = &mux_nodes[getmajor(stp->sd_vnode->v_rdev)]; 1650 mep = mnp->mn_outp; 1651 while (mep) { 1652 if ((index == 0) || (index == mep->me_muxid)) 1653 break; 1654 mep = mep->me_nextp; 1655 } 1656 if (!mep) { 1657 mutex_exit(&strresources); 1658 return (NULL); 1659 } 1660 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) { 1661 if ((!linkp->li_lblk.l_qtop) && 1662 (mep->me_muxid == linkp->li_lblk.l_index)) { 1663 mutex_exit(&strresources); 1664 return (linkp); 1665 } 1666 } 1667 } 1668 mutex_exit(&strresources); 1669 return (NULL); 1670 } 1671 1672 /* 1673 * Given a queue ptr, follow the chain of q_next pointers until you reach the 1674 * last queue on the chain and return it. 1675 */ 1676 queue_t * 1677 getendq(queue_t *q) 1678 { 1679 ASSERT(q != NULL); 1680 while (_SAMESTR(q)) 1681 q = q->q_next; 1682 return (q); 1683 } 1684 1685 /* 1686 * wait for the syncq count to drop to zero. 1687 * sq could be either outer or inner. 1688 */ 1689 1690 static void 1691 wait_syncq(syncq_t *sq) 1692 { 1693 uint16_t count; 1694 1695 mutex_enter(SQLOCK(sq)); 1696 count = sq->sq_count; 1697 SQ_PUTLOCKS_ENTER(sq); 1698 SUM_SQ_PUTCOUNTS(sq, count); 1699 while (count != 0) { 1700 sq->sq_flags |= SQ_WANTWAKEUP; 1701 SQ_PUTLOCKS_EXIT(sq); 1702 cv_wait(&sq->sq_wait, SQLOCK(sq)); 1703 count = sq->sq_count; 1704 SQ_PUTLOCKS_ENTER(sq); 1705 SUM_SQ_PUTCOUNTS(sq, count); 1706 } 1707 SQ_PUTLOCKS_EXIT(sq); 1708 mutex_exit(SQLOCK(sq)); 1709 } 1710 1711 /* 1712 * Wait while there are any messages for the queue in its syncq. 1713 */ 1714 static void 1715 wait_q_syncq(queue_t *q) 1716 { 1717 if ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) { 1718 syncq_t *sq = q->q_syncq; 1719 1720 mutex_enter(SQLOCK(sq)); 1721 while ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) { 1722 sq->sq_flags |= SQ_WANTWAKEUP; 1723 cv_wait(&sq->sq_wait, SQLOCK(sq)); 1724 } 1725 mutex_exit(SQLOCK(sq)); 1726 } 1727 } 1728 1729 1730 int 1731 mlink_file(vnode_t *vp, int cmd, struct file *fpdown, cred_t *crp, int *rvalp, 1732 int lhlink) 1733 { 1734 struct stdata *stp; 1735 struct strioctl strioc; 1736 struct linkinfo *linkp; 1737 struct stdata *stpdown; 1738 struct streamtab *str; 1739 queue_t *passq; 1740 syncq_t *passyncq; 1741 queue_t *rq; 1742 cdevsw_impl_t *dp; 1743 uint32_t qflag; 1744 uint32_t sqtype; 1745 perdm_t *dmp; 1746 int error = 0; 1747 1748 stp = vp->v_stream; 1749 TRACE_1(TR_FAC_STREAMS_FR, 1750 TR_I_LINK, "I_LINK/I_PLINK:stp %p", stp); 1751 /* 1752 * Test for invalid upper stream 1753 */ 1754 if (stp->sd_flag & STRHUP) { 1755 return (ENXIO); 1756 } 1757 if (vp->v_type == VFIFO) { 1758 return (EINVAL); 1759 } 1760 if (stp->sd_strtab == NULL) { 1761 return (EINVAL); 1762 } 1763 if (!stp->sd_strtab->st_muxwinit) { 1764 return (EINVAL); 1765 } 1766 if (fpdown == NULL) { 1767 return (EBADF); 1768 } 1769 if (getmajor(stp->sd_vnode->v_rdev) >= devcnt) { 1770 return (EINVAL); 1771 } 1772 mutex_enter(&muxifier); 1773 if (stp->sd_flag & STPLEX) { 1774 mutex_exit(&muxifier); 1775 return (ENXIO); 1776 } 1777 1778 /* 1779 * Test for invalid lower stream. 1780 * The check for the v_type != VFIFO and having a major 1781 * number not >= devcnt is done to avoid problems with 1782 * adding mux_node entry past the end of mux_nodes[]. 1783 * For FIFO's we don't add an entry so this isn't a 1784 * problem. 1785 */ 1786 if (((stpdown = fpdown->f_vnode->v_stream) == NULL) || 1787 (stpdown == stp) || (stpdown->sd_flag & 1788 (STPLEX|STRHUP|STRDERR|STWRERR|IOCWAIT|STRPLUMB)) || 1789 ((stpdown->sd_vnode->v_type != VFIFO) && 1790 (getmajor(stpdown->sd_vnode->v_rdev) >= devcnt)) || 1791 linkcycle(stp, stpdown)) { 1792 mutex_exit(&muxifier); 1793 return (EINVAL); 1794 } 1795 TRACE_1(TR_FAC_STREAMS_FR, 1796 TR_STPDOWN, "stpdown:%p", stpdown); 1797 rq = getendq(stp->sd_wrq); 1798 if (cmd == I_PLINK) 1799 rq = NULL; 1800 1801 linkp = alloclink(rq, stpdown->sd_wrq, fpdown); 1802 1803 strioc.ic_cmd = cmd; 1804 strioc.ic_timout = INFTIM; 1805 strioc.ic_len = sizeof (struct linkblk); 1806 strioc.ic_dp = (char *)&linkp->li_lblk; 1807 1808 /* 1809 * STRPLUMB protects plumbing changes and should be set before 1810 * link_addpassthru()/link_rempassthru() are called, so it is set here 1811 * and cleared in the end of mlink when passthru queue is removed. 1812 * Setting of STRPLUMB prevents reopens of the stream while passthru 1813 * queue is in-place (it is not a proper module and doesn't have open 1814 * entry point). 1815 * 1816 * STPLEX prevents any threads from entering the stream from above. It 1817 * can't be set before the call to link_addpassthru() because putnext 1818 * from below may cause stream head I/O routines to be called and these 1819 * routines assert that STPLEX is not set. After link_addpassthru() 1820 * nothing may come from below since the pass queue syncq is blocked. 1821 * Note also that STPLEX should be cleared before the call to 1822 * link_remmpassthru() since when messages start flowing to the stream 1823 * head (e.g. because of message propagation from the pass queue) stream 1824 * head I/O routines may be called with STPLEX flag set. 1825 * 1826 * When STPLEX is set, nothing may come into the stream from above and 1827 * it is safe to do a setq which will change stream head. So, the 1828 * correct sequence of actions is: 1829 * 1830 * 1) Set STRPLUMB 1831 * 2) Call link_addpassthru() 1832 * 3) Set STPLEX 1833 * 4) Call setq and update the stream state 1834 * 5) Clear STPLEX 1835 * 6) Call link_rempassthru() 1836 * 7) Clear STRPLUMB 1837 * 1838 * The same sequence applies to munlink() code. 1839 */ 1840 mutex_enter(&stpdown->sd_lock); 1841 stpdown->sd_flag |= STRPLUMB; 1842 mutex_exit(&stpdown->sd_lock); 1843 /* 1844 * Add passthru queue below lower mux. This will block 1845 * syncqs of lower muxs read queue during I_LINK/I_UNLINK. 1846 */ 1847 passq = link_addpassthru(stpdown); 1848 1849 mutex_enter(&stpdown->sd_lock); 1850 stpdown->sd_flag |= STPLEX; 1851 mutex_exit(&stpdown->sd_lock); 1852 1853 rq = _RD(stpdown->sd_wrq); 1854 /* 1855 * There may be messages in the streamhead's syncq due to messages 1856 * that arrived before link_addpassthru() was done. To avoid 1857 * background processing of the syncq happening simultaneous with 1858 * setq processing, we disable the streamhead syncq and wait until 1859 * existing background thread finishes working on it. 1860 */ 1861 wait_sq_svc(rq->q_syncq); 1862 passyncq = passq->q_syncq; 1863 if (!(passyncq->sq_flags & SQ_BLOCKED)) 1864 blocksq(passyncq, SQ_BLOCKED, 0); 1865 1866 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE); 1867 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq)); 1868 rq->q_ptr = _WR(rq)->q_ptr = NULL; 1869 1870 /* setq might sleep in allocator - avoid holding locks. */ 1871 /* Note: we are holding muxifier here. */ 1872 1873 str = stp->sd_strtab; 1874 dp = &devimpl[getmajor(vp->v_rdev)]; 1875 ASSERT(dp->d_str == str); 1876 1877 qflag = dp->d_qflag; 1878 sqtype = dp->d_sqtype; 1879 1880 /* create perdm_t if needed */ 1881 if (NEED_DM(dp->d_dmp, qflag)) 1882 dp->d_dmp = hold_dm(str, qflag, sqtype); 1883 1884 dmp = dp->d_dmp; 1885 1886 setq(rq, str->st_muxrinit, str->st_muxwinit, dmp, qflag, sqtype, 1887 B_TRUE); 1888 1889 /* 1890 * XXX Remove any "odd" messages from the queue. 1891 * Keep only M_DATA, M_PROTO, M_PCPROTO. 1892 */ 1893 error = strdoioctl(stp, &strioc, FNATIVE, 1894 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp); 1895 if (error != 0) { 1896 lbfree(linkp); 1897 1898 if (!(passyncq->sq_flags & SQ_BLOCKED)) 1899 blocksq(passyncq, SQ_BLOCKED, 0); 1900 /* 1901 * Restore the stream head queue and then remove 1902 * the passq. Turn off STPLEX before we turn on 1903 * the stream by removing the passq. 1904 */ 1905 rq->q_ptr = _WR(rq)->q_ptr = stpdown; 1906 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, 1907 B_TRUE); 1908 1909 mutex_enter(&stpdown->sd_lock); 1910 stpdown->sd_flag &= ~STPLEX; 1911 mutex_exit(&stpdown->sd_lock); 1912 1913 link_rempassthru(passq); 1914 1915 mutex_enter(&stpdown->sd_lock); 1916 stpdown->sd_flag &= ~STRPLUMB; 1917 /* Wakeup anyone waiting for STRPLUMB to clear. */ 1918 cv_broadcast(&stpdown->sd_monitor); 1919 mutex_exit(&stpdown->sd_lock); 1920 1921 mutex_exit(&muxifier); 1922 return (error); 1923 } 1924 mutex_enter(&fpdown->f_tlock); 1925 fpdown->f_count++; 1926 mutex_exit(&fpdown->f_tlock); 1927 1928 /* 1929 * if we've made it here the linkage is all set up so we should also 1930 * set up the layered driver linkages 1931 */ 1932 1933 ASSERT((cmd == I_LINK) || (cmd == I_PLINK)); 1934 if (cmd == I_LINK) { 1935 ldi_mlink_fp(stp, fpdown, lhlink, LINKNORMAL); 1936 } else { 1937 ldi_mlink_fp(stp, fpdown, lhlink, LINKPERSIST); 1938 } 1939 1940 link_rempassthru(passq); 1941 1942 mux_addedge(stp, stpdown, linkp->li_lblk.l_index); 1943 1944 /* 1945 * Mark the upper stream as having dependent links 1946 * so that strclose can clean it up. 1947 */ 1948 if (cmd == I_LINK) { 1949 mutex_enter(&stp->sd_lock); 1950 stp->sd_flag |= STRHASLINKS; 1951 mutex_exit(&stp->sd_lock); 1952 } 1953 /* 1954 * Wake up any other processes that may have been 1955 * waiting on the lower stream. These will all 1956 * error out. 1957 */ 1958 mutex_enter(&stpdown->sd_lock); 1959 /* The passthru module is removed so we may release STRPLUMB */ 1960 stpdown->sd_flag &= ~STRPLUMB; 1961 cv_broadcast(&rq->q_wait); 1962 cv_broadcast(&_WR(rq)->q_wait); 1963 cv_broadcast(&stpdown->sd_monitor); 1964 mutex_exit(&stpdown->sd_lock); 1965 mutex_exit(&muxifier); 1966 *rvalp = linkp->li_lblk.l_index; 1967 return (0); 1968 } 1969 1970 int 1971 mlink(vnode_t *vp, int cmd, int arg, cred_t *crp, int *rvalp, int lhlink) 1972 { 1973 int ret; 1974 struct file *fpdown; 1975 1976 fpdown = getf(arg); 1977 ret = mlink_file(vp, cmd, fpdown, crp, rvalp, lhlink); 1978 if (fpdown != NULL) 1979 releasef(arg); 1980 return (ret); 1981 } 1982 1983 /* 1984 * Unlink a multiplexor link. Stp is the controlling stream for the 1985 * link, and linkp points to the link's entry in the linkinfo list. 1986 * The muxifier lock must be held on entry and is dropped on exit. 1987 * 1988 * NOTE : Currently it is assumed that mux would process all the messages 1989 * sitting on it's queue before ACKing the UNLINK. It is the responsibility 1990 * of the mux to handle all the messages that arrive before UNLINK. 1991 * If the mux has to send down messages on its lower stream before 1992 * ACKing I_UNLINK, then it *should* know to handle messages even 1993 * after the UNLINK is acked (actually it should be able to handle till we 1994 * re-block the read side of the pass queue here). If the mux does not 1995 * open up the lower stream, any messages that arrive during UNLINK 1996 * will be put in the stream head. In the case of lower stream opening 1997 * up, some messages might land in the stream head depending on when 1998 * the message arrived and when the read side of the pass queue was 1999 * re-blocked. 2000 */ 2001 int 2002 munlink(stdata_t *stp, linkinfo_t *linkp, int flag, cred_t *crp, int *rvalp) 2003 { 2004 struct strioctl strioc; 2005 struct stdata *stpdown; 2006 queue_t *rq, *wrq; 2007 queue_t *passq; 2008 syncq_t *passyncq; 2009 int error = 0; 2010 file_t *fpdown; 2011 2012 ASSERT(MUTEX_HELD(&muxifier)); 2013 2014 stpdown = linkp->li_fpdown->f_vnode->v_stream; 2015 2016 /* 2017 * See the comment in mlink() concerning STRPLUMB/STPLEX flags. 2018 */ 2019 mutex_enter(&stpdown->sd_lock); 2020 stpdown->sd_flag |= STRPLUMB; 2021 mutex_exit(&stpdown->sd_lock); 2022 2023 /* 2024 * Add passthru queue below lower mux. This will block 2025 * syncqs of lower muxs read queue during I_LINK/I_UNLINK. 2026 */ 2027 passq = link_addpassthru(stpdown); 2028 2029 if ((flag & LINKTYPEMASK) == LINKNORMAL) 2030 strioc.ic_cmd = I_UNLINK; 2031 else 2032 strioc.ic_cmd = I_PUNLINK; 2033 strioc.ic_timout = INFTIM; 2034 strioc.ic_len = sizeof (struct linkblk); 2035 strioc.ic_dp = (char *)&linkp->li_lblk; 2036 2037 error = strdoioctl(stp, &strioc, FNATIVE, 2038 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp); 2039 2040 /* 2041 * If there was an error and this is not called via strclose, 2042 * return to the user. Otherwise, pretend there was no error 2043 * and close the link. 2044 */ 2045 if (error) { 2046 if (flag & LINKCLOSE) { 2047 cmn_err(CE_WARN, "KERNEL: munlink: could not perform " 2048 "unlink ioctl, closing anyway (%d)\n", error); 2049 } else { 2050 link_rempassthru(passq); 2051 mutex_enter(&stpdown->sd_lock); 2052 stpdown->sd_flag &= ~STRPLUMB; 2053 cv_broadcast(&stpdown->sd_monitor); 2054 mutex_exit(&stpdown->sd_lock); 2055 mutex_exit(&muxifier); 2056 return (error); 2057 } 2058 } 2059 2060 mux_rmvedge(stp, linkp->li_lblk.l_index); 2061 fpdown = linkp->li_fpdown; 2062 lbfree(linkp); 2063 2064 /* 2065 * We go ahead and drop muxifier here--it's a nasty global lock that 2066 * can slow others down. It's okay to since attempts to mlink() this 2067 * stream will be stopped because STPLEX is still set in the stdata 2068 * structure, and munlink() is stopped because mux_rmvedge() and 2069 * lbfree() have removed it from mux_nodes[] and linkinfo_list, 2070 * respectively. Note that we defer the closef() of fpdown until 2071 * after we drop muxifier since strclose() can call munlinkall(). 2072 */ 2073 mutex_exit(&muxifier); 2074 2075 wrq = stpdown->sd_wrq; 2076 rq = _RD(wrq); 2077 2078 /* 2079 * Get rid of outstanding service procedure runs, before we make 2080 * it a stream head, since a stream head doesn't have any service 2081 * procedure. 2082 */ 2083 disable_svc(rq); 2084 wait_svc(rq); 2085 2086 /* 2087 * Since we don't disable the syncq for QPERMOD, we wait for whatever 2088 * is queued up to be finished. mux should take care that nothing is 2089 * send down to this queue. We should do it now as we're going to block 2090 * passyncq if it was unblocked. 2091 */ 2092 if (wrq->q_flag & QPERMOD) { 2093 syncq_t *sq = wrq->q_syncq; 2094 2095 mutex_enter(SQLOCK(sq)); 2096 while (wrq->q_sqflags & Q_SQQUEUED) { 2097 sq->sq_flags |= SQ_WANTWAKEUP; 2098 cv_wait(&sq->sq_wait, SQLOCK(sq)); 2099 } 2100 mutex_exit(SQLOCK(sq)); 2101 } 2102 passyncq = passq->q_syncq; 2103 if (!(passyncq->sq_flags & SQ_BLOCKED)) { 2104 2105 syncq_t *sq, *outer; 2106 2107 /* 2108 * Messages could be flowing from underneath. We will 2109 * block the read side of the passq. This would be 2110 * sufficient for QPAIR and QPERQ muxes to ensure 2111 * that no data is flowing up into this queue 2112 * and hence no thread active in this instance of 2113 * lower mux. But for QPERMOD and QMTOUTPERIM there 2114 * could be messages on the inner and outer/inner 2115 * syncqs respectively. We will wait for them to drain. 2116 * Because passq is blocked messages end up in the syncq 2117 * And qfill_syncq could possibly end up setting QFULL 2118 * which will access the rq->q_flag. Hence, we have to 2119 * acquire the QLOCK in setq. 2120 * 2121 * XXX Messages can also flow from top into this 2122 * queue though the unlink is over (Ex. some instance 2123 * in putnext() called from top that has still not 2124 * accessed this queue. And also putq(lowerq) ?). 2125 * Solution : How about blocking the l_qtop queue ? 2126 * Do we really care about such pure D_MP muxes ? 2127 */ 2128 2129 blocksq(passyncq, SQ_BLOCKED, 0); 2130 2131 sq = rq->q_syncq; 2132 if ((outer = sq->sq_outer) != NULL) { 2133 2134 /* 2135 * We have to just wait for the outer sq_count 2136 * drop to zero. As this does not prevent new 2137 * messages to enter the outer perimeter, this 2138 * is subject to starvation. 2139 * 2140 * NOTE :Because of blocksq above, messages could 2141 * be in the inner syncq only because of some 2142 * thread holding the outer perimeter exclusively. 2143 * Hence it would be sufficient to wait for the 2144 * exclusive holder of the outer perimeter to drain 2145 * the inner and outer syncqs. But we will not depend 2146 * on this feature and hence check the inner syncqs 2147 * separately. 2148 */ 2149 wait_syncq(outer); 2150 } 2151 2152 2153 /* 2154 * There could be messages destined for 2155 * this queue. Let the exclusive holder 2156 * drain it. 2157 */ 2158 2159 wait_syncq(sq); 2160 ASSERT((rq->q_flag & QPERMOD) || 2161 ((rq->q_syncq->sq_head == NULL) && 2162 (_WR(rq)->q_syncq->sq_head == NULL))); 2163 } 2164 2165 /* 2166 * We haven't taken care of QPERMOD case yet. QPERMOD is a special 2167 * case as we don't disable its syncq or remove it off the syncq 2168 * service list. 2169 */ 2170 if (rq->q_flag & QPERMOD) { 2171 syncq_t *sq = rq->q_syncq; 2172 2173 mutex_enter(SQLOCK(sq)); 2174 while (rq->q_sqflags & Q_SQQUEUED) { 2175 sq->sq_flags |= SQ_WANTWAKEUP; 2176 cv_wait(&sq->sq_wait, SQLOCK(sq)); 2177 } 2178 mutex_exit(SQLOCK(sq)); 2179 } 2180 2181 /* 2182 * flush_syncq changes states only when there is some messages to 2183 * free. ie when it returns non-zero value to return. 2184 */ 2185 ASSERT(flush_syncq(rq->q_syncq, rq) == 0); 2186 ASSERT(flush_syncq(wrq->q_syncq, wrq) == 0); 2187 2188 /* 2189 * No body else should know about this queue now. 2190 * If the mux did not process the messages before 2191 * acking the I_UNLINK, free them now. 2192 */ 2193 2194 flushq(rq, FLUSHALL); 2195 flushq(_WR(rq), FLUSHALL); 2196 2197 /* 2198 * Convert the mux lower queue into a stream head queue. 2199 * Turn off STPLEX before we turn on the stream by removing the passq. 2200 */ 2201 rq->q_ptr = wrq->q_ptr = stpdown; 2202 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, B_TRUE); 2203 2204 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE); 2205 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq)); 2206 2207 enable_svc(rq); 2208 2209 /* 2210 * Now it is a proper stream, so STPLEX is cleared. But STRPLUMB still 2211 * needs to be set to prevent reopen() of the stream - such reopen may 2212 * try to call non-existent pass queue open routine and panic. 2213 */ 2214 mutex_enter(&stpdown->sd_lock); 2215 stpdown->sd_flag &= ~STPLEX; 2216 mutex_exit(&stpdown->sd_lock); 2217 2218 ASSERT(((flag & LINKTYPEMASK) == LINKNORMAL) || 2219 ((flag & LINKTYPEMASK) == LINKPERSIST)); 2220 2221 /* clean up the layered driver linkages */ 2222 if ((flag & LINKTYPEMASK) == LINKNORMAL) { 2223 ldi_munlink_fp(stp, fpdown, LINKNORMAL); 2224 } else { 2225 ldi_munlink_fp(stp, fpdown, LINKPERSIST); 2226 } 2227 2228 link_rempassthru(passq); 2229 2230 /* 2231 * Now all plumbing changes are finished and STRPLUMB is no 2232 * longer needed. 2233 */ 2234 mutex_enter(&stpdown->sd_lock); 2235 stpdown->sd_flag &= ~STRPLUMB; 2236 cv_broadcast(&stpdown->sd_monitor); 2237 mutex_exit(&stpdown->sd_lock); 2238 2239 (void) closef(fpdown); 2240 return (0); 2241 } 2242 2243 /* 2244 * Unlink all multiplexor links for which stp is the controlling stream. 2245 * Return 0, or a non-zero errno on failure. 2246 */ 2247 int 2248 munlinkall(stdata_t *stp, int flag, cred_t *crp, int *rvalp) 2249 { 2250 linkinfo_t *linkp; 2251 int error = 0; 2252 2253 mutex_enter(&muxifier); 2254 while (linkp = findlinks(stp, 0, flag)) { 2255 /* 2256 * munlink() releases the muxifier lock. 2257 */ 2258 if (error = munlink(stp, linkp, flag, crp, rvalp)) 2259 return (error); 2260 mutex_enter(&muxifier); 2261 } 2262 mutex_exit(&muxifier); 2263 return (0); 2264 } 2265 2266 /* 2267 * A multiplexor link has been made. Add an 2268 * edge to the directed graph. 2269 */ 2270 void 2271 mux_addedge(stdata_t *upstp, stdata_t *lostp, int muxid) 2272 { 2273 struct mux_node *np; 2274 struct mux_edge *ep; 2275 major_t upmaj; 2276 major_t lomaj; 2277 2278 upmaj = getmajor(upstp->sd_vnode->v_rdev); 2279 lomaj = getmajor(lostp->sd_vnode->v_rdev); 2280 np = &mux_nodes[upmaj]; 2281 if (np->mn_outp) { 2282 ep = np->mn_outp; 2283 while (ep->me_nextp) 2284 ep = ep->me_nextp; 2285 ep->me_nextp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP); 2286 ep = ep->me_nextp; 2287 } else { 2288 np->mn_outp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP); 2289 ep = np->mn_outp; 2290 } 2291 ep->me_nextp = NULL; 2292 ep->me_muxid = muxid; 2293 if (lostp->sd_vnode->v_type == VFIFO) 2294 ep->me_nodep = NULL; 2295 else 2296 ep->me_nodep = &mux_nodes[lomaj]; 2297 } 2298 2299 /* 2300 * A multiplexor link has been removed. Remove the 2301 * edge in the directed graph. 2302 */ 2303 void 2304 mux_rmvedge(stdata_t *upstp, int muxid) 2305 { 2306 struct mux_node *np; 2307 struct mux_edge *ep; 2308 struct mux_edge *pep = NULL; 2309 major_t upmaj; 2310 2311 upmaj = getmajor(upstp->sd_vnode->v_rdev); 2312 np = &mux_nodes[upmaj]; 2313 ASSERT(np->mn_outp != NULL); 2314 ep = np->mn_outp; 2315 while (ep) { 2316 if (ep->me_muxid == muxid) { 2317 if (pep) 2318 pep->me_nextp = ep->me_nextp; 2319 else 2320 np->mn_outp = ep->me_nextp; 2321 kmem_free(ep, sizeof (struct mux_edge)); 2322 return; 2323 } 2324 pep = ep; 2325 ep = ep->me_nextp; 2326 } 2327 ASSERT(0); /* should not reach here */ 2328 } 2329 2330 /* 2331 * Translate the device flags (from conf.h) to the corresponding 2332 * qflag and sq_flag (type) values. 2333 */ 2334 int 2335 devflg_to_qflag(struct streamtab *stp, uint32_t devflag, uint32_t *qflagp, 2336 uint32_t *sqtypep) 2337 { 2338 uint32_t qflag = 0; 2339 uint32_t sqtype = 0; 2340 2341 if (devflag & _D_OLD) 2342 goto bad; 2343 2344 /* Inner perimeter presence and scope */ 2345 switch (devflag & D_MTINNER_MASK) { 2346 case D_MP: 2347 qflag |= QMTSAFE; 2348 sqtype |= SQ_CI; 2349 break; 2350 case D_MTPERQ|D_MP: 2351 qflag |= QPERQ; 2352 break; 2353 case D_MTQPAIR|D_MP: 2354 qflag |= QPAIR; 2355 break; 2356 case D_MTPERMOD|D_MP: 2357 qflag |= QPERMOD; 2358 break; 2359 default: 2360 goto bad; 2361 } 2362 2363 /* Outer perimeter */ 2364 if (devflag & D_MTOUTPERIM) { 2365 switch (devflag & D_MTINNER_MASK) { 2366 case D_MP: 2367 case D_MTPERQ|D_MP: 2368 case D_MTQPAIR|D_MP: 2369 break; 2370 default: 2371 goto bad; 2372 } 2373 qflag |= QMTOUTPERIM; 2374 } 2375 2376 /* Inner perimeter modifiers */ 2377 if (devflag & D_MTINNER_MOD) { 2378 switch (devflag & D_MTINNER_MASK) { 2379 case D_MP: 2380 goto bad; 2381 default: 2382 break; 2383 } 2384 if (devflag & D_MTPUTSHARED) 2385 sqtype |= SQ_CIPUT; 2386 if (devflag & _D_MTOCSHARED) { 2387 /* 2388 * The code in putnext assumes that it has the 2389 * highest concurrency by not checking sq_count. 2390 * Thus _D_MTOCSHARED can only be supported when 2391 * D_MTPUTSHARED is set. 2392 */ 2393 if (!(devflag & D_MTPUTSHARED)) 2394 goto bad; 2395 sqtype |= SQ_CIOC; 2396 } 2397 if (devflag & _D_MTCBSHARED) { 2398 /* 2399 * The code in putnext assumes that it has the 2400 * highest concurrency by not checking sq_count. 2401 * Thus _D_MTCBSHARED can only be supported when 2402 * D_MTPUTSHARED is set. 2403 */ 2404 if (!(devflag & D_MTPUTSHARED)) 2405 goto bad; 2406 sqtype |= SQ_CICB; 2407 } 2408 if (devflag & _D_MTSVCSHARED) { 2409 /* 2410 * The code in putnext assumes that it has the 2411 * highest concurrency by not checking sq_count. 2412 * Thus _D_MTSVCSHARED can only be supported when 2413 * D_MTPUTSHARED is set. Also _D_MTSVCSHARED is 2414 * supported only for QPERMOD. 2415 */ 2416 if (!(devflag & D_MTPUTSHARED) || !(qflag & QPERMOD)) 2417 goto bad; 2418 sqtype |= SQ_CISVC; 2419 } 2420 } 2421 2422 /* Default outer perimeter concurrency */ 2423 sqtype |= SQ_CO; 2424 2425 /* Outer perimeter modifiers */ 2426 if (devflag & D_MTOCEXCL) { 2427 if (!(devflag & D_MTOUTPERIM)) { 2428 /* No outer perimeter */ 2429 goto bad; 2430 } 2431 sqtype &= ~SQ_COOC; 2432 } 2433 2434 /* Synchronous Streams extended qinit structure */ 2435 if (devflag & D_SYNCSTR) 2436 qflag |= QSYNCSTR; 2437 2438 *qflagp = qflag; 2439 *sqtypep = sqtype; 2440 return (0); 2441 2442 bad: 2443 cmn_err(CE_WARN, 2444 "stropen: bad MT flags (0x%x) in driver '%s'", 2445 (int)(qflag & D_MTSAFETY_MASK), 2446 stp->st_rdinit->qi_minfo->mi_idname); 2447 2448 return (EINVAL); 2449 } 2450 2451 /* 2452 * Set the interface values for a pair of queues (qinit structure, 2453 * packet sizes, water marks). 2454 * setq assumes that the caller does not have a claim (entersq or claimq) 2455 * on the queue. 2456 */ 2457 void 2458 setq(queue_t *rq, struct qinit *rinit, struct qinit *winit, 2459 perdm_t *dmp, uint32_t qflag, uint32_t sqtype, boolean_t lock_needed) 2460 { 2461 queue_t *wq; 2462 syncq_t *sq, *outer; 2463 2464 ASSERT(rq->q_flag & QREADR); 2465 ASSERT((qflag & QMT_TYPEMASK) != 0); 2466 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL); 2467 2468 wq = _WR(rq); 2469 rq->q_qinfo = rinit; 2470 rq->q_hiwat = rinit->qi_minfo->mi_hiwat; 2471 rq->q_lowat = rinit->qi_minfo->mi_lowat; 2472 rq->q_minpsz = rinit->qi_minfo->mi_minpsz; 2473 rq->q_maxpsz = rinit->qi_minfo->mi_maxpsz; 2474 wq->q_qinfo = winit; 2475 wq->q_hiwat = winit->qi_minfo->mi_hiwat; 2476 wq->q_lowat = winit->qi_minfo->mi_lowat; 2477 wq->q_minpsz = winit->qi_minfo->mi_minpsz; 2478 wq->q_maxpsz = winit->qi_minfo->mi_maxpsz; 2479 2480 /* Remove old syncqs */ 2481 sq = rq->q_syncq; 2482 outer = sq->sq_outer; 2483 if (outer != NULL) { 2484 ASSERT(wq->q_syncq->sq_outer == outer); 2485 outer_remove(outer, rq->q_syncq); 2486 if (wq->q_syncq != rq->q_syncq) 2487 outer_remove(outer, wq->q_syncq); 2488 } 2489 ASSERT(sq->sq_outer == NULL); 2490 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 2491 2492 if (sq != SQ(rq)) { 2493 if (!(rq->q_flag & QPERMOD)) 2494 free_syncq(sq); 2495 if (wq->q_syncq == rq->q_syncq) 2496 wq->q_syncq = NULL; 2497 rq->q_syncq = NULL; 2498 } 2499 if (wq->q_syncq != NULL && wq->q_syncq != sq && 2500 wq->q_syncq != SQ(rq)) { 2501 free_syncq(wq->q_syncq); 2502 wq->q_syncq = NULL; 2503 } 2504 ASSERT(rq->q_syncq == NULL || (rq->q_syncq->sq_head == NULL && 2505 rq->q_syncq->sq_tail == NULL)); 2506 ASSERT(wq->q_syncq == NULL || (wq->q_syncq->sq_head == NULL && 2507 wq->q_syncq->sq_tail == NULL)); 2508 2509 if (!(rq->q_flag & QPERMOD) && 2510 rq->q_syncq != NULL && rq->q_syncq->sq_ciputctrl != NULL) { 2511 ASSERT(rq->q_syncq->sq_nciputctrl == n_ciputctrl - 1); 2512 SUMCHECK_CIPUTCTRL_COUNTS(rq->q_syncq->sq_ciputctrl, 2513 rq->q_syncq->sq_nciputctrl, 0); 2514 ASSERT(ciputctrl_cache != NULL); 2515 kmem_cache_free(ciputctrl_cache, rq->q_syncq->sq_ciputctrl); 2516 rq->q_syncq->sq_ciputctrl = NULL; 2517 rq->q_syncq->sq_nciputctrl = 0; 2518 } 2519 2520 if (!(wq->q_flag & QPERMOD) && 2521 wq->q_syncq != NULL && wq->q_syncq->sq_ciputctrl != NULL) { 2522 ASSERT(wq->q_syncq->sq_nciputctrl == n_ciputctrl - 1); 2523 SUMCHECK_CIPUTCTRL_COUNTS(wq->q_syncq->sq_ciputctrl, 2524 wq->q_syncq->sq_nciputctrl, 0); 2525 ASSERT(ciputctrl_cache != NULL); 2526 kmem_cache_free(ciputctrl_cache, wq->q_syncq->sq_ciputctrl); 2527 wq->q_syncq->sq_ciputctrl = NULL; 2528 wq->q_syncq->sq_nciputctrl = 0; 2529 } 2530 2531 sq = SQ(rq); 2532 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL); 2533 ASSERT(sq->sq_outer == NULL); 2534 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 2535 2536 /* 2537 * Create syncqs based on qflag and sqtype. Set the SQ_TYPES_IN_FLAGS 2538 * bits in sq_flag based on the sqtype. 2539 */ 2540 ASSERT((sq->sq_flags & ~SQ_TYPES_IN_FLAGS) == 0); 2541 2542 rq->q_syncq = wq->q_syncq = sq; 2543 sq->sq_type = sqtype; 2544 sq->sq_flags = (sqtype & SQ_TYPES_IN_FLAGS); 2545 2546 /* 2547 * We are making sq_svcflags zero, 2548 * resetting SQ_DISABLED in case it was set by 2549 * wait_svc() in the munlink path. 2550 * 2551 */ 2552 ASSERT((sq->sq_svcflags & SQ_SERVICE) == 0); 2553 sq->sq_svcflags = 0; 2554 2555 /* 2556 * We need to acquire the lock here for the mlink and munlink case, 2557 * where canputnext, backenable, etc can access the q_flag. 2558 */ 2559 if (lock_needed) { 2560 mutex_enter(QLOCK(rq)); 2561 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2562 mutex_exit(QLOCK(rq)); 2563 mutex_enter(QLOCK(wq)); 2564 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2565 mutex_exit(QLOCK(wq)); 2566 } else { 2567 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2568 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2569 } 2570 2571 if (qflag & QPERQ) { 2572 /* Allocate a separate syncq for the write side */ 2573 sq = new_syncq(); 2574 sq->sq_type = rq->q_syncq->sq_type; 2575 sq->sq_flags = rq->q_syncq->sq_flags; 2576 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 2577 sq->sq_oprev == NULL); 2578 wq->q_syncq = sq; 2579 } 2580 if (qflag & QPERMOD) { 2581 sq = dmp->dm_sq; 2582 2583 /* 2584 * Assert that we do have an inner perimeter syncq and that it 2585 * does not have an outer perimeter associated with it. 2586 */ 2587 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 2588 sq->sq_oprev == NULL); 2589 rq->q_syncq = wq->q_syncq = sq; 2590 } 2591 if (qflag & QMTOUTPERIM) { 2592 outer = dmp->dm_sq; 2593 2594 ASSERT(outer->sq_outer == NULL); 2595 outer_insert(outer, rq->q_syncq); 2596 if (wq->q_syncq != rq->q_syncq) 2597 outer_insert(outer, wq->q_syncq); 2598 } 2599 ASSERT((rq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) == 2600 (rq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS)); 2601 ASSERT((wq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) == 2602 (wq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS)); 2603 ASSERT((rq->q_flag & QMT_TYPEMASK) == (qflag & QMT_TYPEMASK)); 2604 2605 /* 2606 * Initialize struio() types. 2607 */ 2608 rq->q_struiot = 2609 (rq->q_flag & QSYNCSTR) ? rinit->qi_struiot : STRUIOT_NONE; 2610 wq->q_struiot = 2611 (wq->q_flag & QSYNCSTR) ? winit->qi_struiot : STRUIOT_NONE; 2612 } 2613 2614 perdm_t * 2615 hold_dm(struct streamtab *str, uint32_t qflag, uint32_t sqtype) 2616 { 2617 syncq_t *sq; 2618 perdm_t **pp; 2619 perdm_t *p; 2620 perdm_t *dmp; 2621 2622 ASSERT(str != NULL); 2623 ASSERT(qflag & (QPERMOD | QMTOUTPERIM)); 2624 2625 rw_enter(&perdm_rwlock, RW_READER); 2626 for (p = perdm_list; p != NULL; p = p->dm_next) { 2627 if (p->dm_str == str) { /* found one */ 2628 atomic_add_32(&(p->dm_ref), 1); 2629 rw_exit(&perdm_rwlock); 2630 return (p); 2631 } 2632 } 2633 rw_exit(&perdm_rwlock); 2634 2635 sq = new_syncq(); 2636 if (qflag & QPERMOD) { 2637 sq->sq_type = sqtype | SQ_PERMOD; 2638 sq->sq_flags = sqtype & SQ_TYPES_IN_FLAGS; 2639 } else { 2640 ASSERT(qflag & QMTOUTPERIM); 2641 sq->sq_onext = sq->sq_oprev = sq; 2642 } 2643 2644 dmp = kmem_alloc(sizeof (perdm_t), KM_SLEEP); 2645 dmp->dm_sq = sq; 2646 dmp->dm_str = str; 2647 dmp->dm_ref = 1; 2648 dmp->dm_next = NULL; 2649 2650 rw_enter(&perdm_rwlock, RW_WRITER); 2651 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) { 2652 if (p->dm_str == str) { /* already present */ 2653 p->dm_ref++; 2654 rw_exit(&perdm_rwlock); 2655 free_syncq(sq); 2656 kmem_free(dmp, sizeof (perdm_t)); 2657 return (p); 2658 } 2659 } 2660 2661 *pp = dmp; 2662 rw_exit(&perdm_rwlock); 2663 return (dmp); 2664 } 2665 2666 void 2667 rele_dm(perdm_t *dmp) 2668 { 2669 perdm_t **pp; 2670 perdm_t *p; 2671 2672 rw_enter(&perdm_rwlock, RW_WRITER); 2673 ASSERT(dmp->dm_ref > 0); 2674 2675 if (--dmp->dm_ref > 0) { 2676 rw_exit(&perdm_rwlock); 2677 return; 2678 } 2679 2680 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) 2681 if (p == dmp) 2682 break; 2683 ASSERT(p == dmp); 2684 *pp = p->dm_next; 2685 rw_exit(&perdm_rwlock); 2686 2687 /* 2688 * Wait for any background processing that relies on the 2689 * syncq to complete before it is freed. 2690 */ 2691 wait_sq_svc(p->dm_sq); 2692 free_syncq(p->dm_sq); 2693 kmem_free(p, sizeof (perdm_t)); 2694 } 2695 2696 /* 2697 * Make a protocol message given control and data buffers. 2698 * n.b., this can block; be careful of what locks you hold when calling it. 2699 * 2700 * If sd_maxblk is less than *iosize this routine can fail part way through 2701 * (due to an allocation failure). In this case on return *iosize will contain 2702 * the amount that was consumed. Otherwise *iosize will not be modified 2703 * i.e. it will contain the amount that was consumed. 2704 */ 2705 int 2706 strmakemsg( 2707 struct strbuf *mctl, 2708 ssize_t *iosize, 2709 struct uio *uiop, 2710 stdata_t *stp, 2711 int32_t flag, 2712 mblk_t **mpp) 2713 { 2714 mblk_t *mpctl = NULL; 2715 mblk_t *mpdata = NULL; 2716 int error; 2717 2718 ASSERT(uiop != NULL); 2719 2720 *mpp = NULL; 2721 /* Create control part, if any */ 2722 if ((mctl != NULL) && (mctl->len >= 0)) { 2723 error = strmakectl(mctl, flag, uiop->uio_fmode, &mpctl); 2724 if (error) 2725 return (error); 2726 } 2727 /* Create data part, if any */ 2728 if (*iosize >= 0) { 2729 error = strmakedata(iosize, uiop, stp, flag, &mpdata); 2730 if (error) { 2731 freemsg(mpctl); 2732 return (error); 2733 } 2734 } 2735 if (mpctl != NULL) { 2736 if (mpdata != NULL) 2737 linkb(mpctl, mpdata); 2738 *mpp = mpctl; 2739 } else { 2740 *mpp = mpdata; 2741 } 2742 return (0); 2743 } 2744 2745 /* 2746 * Make the control part of a protocol message given a control buffer. 2747 * n.b., this can block; be careful of what locks you hold when calling it. 2748 */ 2749 int 2750 strmakectl( 2751 struct strbuf *mctl, 2752 int32_t flag, 2753 int32_t fflag, 2754 mblk_t **mpp) 2755 { 2756 mblk_t *bp = NULL; 2757 unsigned char msgtype; 2758 int error = 0; 2759 2760 *mpp = NULL; 2761 /* 2762 * Create control part of message, if any. 2763 */ 2764 if ((mctl != NULL) && (mctl->len >= 0)) { 2765 caddr_t base; 2766 int ctlcount; 2767 int allocsz; 2768 2769 if (flag & RS_HIPRI) 2770 msgtype = M_PCPROTO; 2771 else 2772 msgtype = M_PROTO; 2773 2774 ctlcount = mctl->len; 2775 base = mctl->buf; 2776 2777 /* 2778 * Give modules a better chance to reuse M_PROTO/M_PCPROTO 2779 * blocks by increasing the size to something more usable. 2780 */ 2781 allocsz = MAX(ctlcount, 64); 2782 2783 /* 2784 * Range checking has already been done; simply try 2785 * to allocate a message block for the ctl part. 2786 */ 2787 while (!(bp = allocb(allocsz, BPRI_MED))) { 2788 if (fflag & (FNDELAY|FNONBLOCK)) 2789 return (EAGAIN); 2790 if (error = strwaitbuf(allocsz, BPRI_MED)) 2791 return (error); 2792 } 2793 2794 bp->b_datap->db_type = msgtype; 2795 if (copyin(base, bp->b_wptr, ctlcount)) { 2796 freeb(bp); 2797 return (EFAULT); 2798 } 2799 bp->b_wptr += ctlcount; 2800 } 2801 *mpp = bp; 2802 return (0); 2803 } 2804 2805 /* 2806 * Make a protocol message given data buffers. 2807 * n.b., this can block; be careful of what locks you hold when calling it. 2808 * 2809 * If sd_maxblk is less than *iosize this routine can fail part way through 2810 * (due to an allocation failure). In this case on return *iosize will contain 2811 * the amount that was consumed. Otherwise *iosize will not be modified 2812 * i.e. it will contain the amount that was consumed. 2813 */ 2814 int 2815 strmakedata( 2816 ssize_t *iosize, 2817 struct uio *uiop, 2818 stdata_t *stp, 2819 int32_t flag, 2820 mblk_t **mpp) 2821 { 2822 mblk_t *mp = NULL; 2823 mblk_t *bp; 2824 int wroff = (int)stp->sd_wroff; 2825 int error = 0; 2826 ssize_t maxblk; 2827 ssize_t count = *iosize; 2828 cred_t *cr = CRED(); 2829 2830 *mpp = NULL; 2831 if (count < 0) 2832 return (0); 2833 2834 maxblk = stp->sd_maxblk; 2835 if (maxblk == INFPSZ) 2836 maxblk = count; 2837 2838 /* 2839 * Create data part of message, if any. 2840 */ 2841 do { 2842 ssize_t size; 2843 dblk_t *dp; 2844 2845 ASSERT(uiop); 2846 2847 size = MIN(count, maxblk); 2848 2849 while ((bp = allocb_cred(size + wroff, cr)) == NULL) { 2850 error = EAGAIN; 2851 if ((uiop->uio_fmode & (FNDELAY|FNONBLOCK)) || 2852 (error = strwaitbuf(size + wroff, BPRI_MED)) != 0) { 2853 if (count == *iosize) { 2854 freemsg(mp); 2855 return (error); 2856 } else { 2857 *iosize -= count; 2858 *mpp = mp; 2859 return (0); 2860 } 2861 } 2862 } 2863 dp = bp->b_datap; 2864 dp->db_cpid = curproc->p_pid; 2865 ASSERT(wroff <= dp->db_lim - bp->b_wptr); 2866 bp->b_wptr = bp->b_rptr = bp->b_rptr + wroff; 2867 2868 if (flag & STRUIO_POSTPONE) { 2869 /* 2870 * Setup the stream uio portion of the 2871 * dblk for subsequent use by struioget(). 2872 */ 2873 dp->db_struioflag = STRUIO_SPEC; 2874 dp->db_cksumstart = 0; 2875 dp->db_cksumstuff = 0; 2876 dp->db_cksumend = size; 2877 *(long long *)dp->db_struioun.data = 0ll; 2878 } else { 2879 if (stp->sd_copyflag & STRCOPYCACHED) 2880 uiop->uio_extflg |= UIO_COPY_CACHED; 2881 2882 if (size != 0) { 2883 error = uiomove(bp->b_wptr, size, UIO_WRITE, 2884 uiop); 2885 if (error != 0) { 2886 freeb(bp); 2887 freemsg(mp); 2888 return (error); 2889 } 2890 } 2891 } 2892 2893 bp->b_wptr += size; 2894 count -= size; 2895 2896 if (mp == NULL) 2897 mp = bp; 2898 else 2899 linkb(mp, bp); 2900 } while (count > 0); 2901 2902 *mpp = mp; 2903 return (0); 2904 } 2905 2906 /* 2907 * Wait for a buffer to become available. Return non-zero errno 2908 * if not able to wait, 0 if buffer is probably there. 2909 */ 2910 int 2911 strwaitbuf(size_t size, int pri) 2912 { 2913 bufcall_id_t id; 2914 2915 mutex_enter(&bcall_monitor); 2916 if ((id = bufcall(size, pri, (void (*)(void *))cv_broadcast, 2917 &ttoproc(curthread)->p_flag_cv)) == 0) { 2918 mutex_exit(&bcall_monitor); 2919 return (ENOSR); 2920 } 2921 if (!cv_wait_sig(&(ttoproc(curthread)->p_flag_cv), &bcall_monitor)) { 2922 unbufcall(id); 2923 mutex_exit(&bcall_monitor); 2924 return (EINTR); 2925 } 2926 unbufcall(id); 2927 mutex_exit(&bcall_monitor); 2928 return (0); 2929 } 2930 2931 /* 2932 * This function waits for a read or write event to happen on a stream. 2933 * fmode can specify FNDELAY and/or FNONBLOCK. 2934 * The timeout is in ms with -1 meaning infinite. 2935 * The flag values work as follows: 2936 * READWAIT Check for read side errors, send M_READ 2937 * GETWAIT Check for read side errors, no M_READ 2938 * WRITEWAIT Check for write side errors. 2939 * NOINTR Do not return error if nonblocking or timeout. 2940 * STR_NOERROR Ignore all errors except STPLEX. 2941 * STR_NOSIG Ignore/hold signals during the duration of the call. 2942 * STR_PEEK Pass through the strgeterr(). 2943 */ 2944 int 2945 strwaitq(stdata_t *stp, int flag, ssize_t count, int fmode, clock_t timout, 2946 int *done) 2947 { 2948 int slpflg, errs; 2949 int error; 2950 kcondvar_t *sleepon; 2951 mblk_t *mp; 2952 ssize_t *rd_count; 2953 clock_t rval; 2954 2955 ASSERT(MUTEX_HELD(&stp->sd_lock)); 2956 if ((flag & READWAIT) || (flag & GETWAIT)) { 2957 slpflg = RSLEEP; 2958 sleepon = &_RD(stp->sd_wrq)->q_wait; 2959 errs = STRDERR|STPLEX; 2960 } else { 2961 slpflg = WSLEEP; 2962 sleepon = &stp->sd_wrq->q_wait; 2963 errs = STWRERR|STRHUP|STPLEX; 2964 } 2965 if (flag & STR_NOERROR) 2966 errs = STPLEX; 2967 2968 if (stp->sd_wakeq & slpflg) { 2969 /* 2970 * A strwakeq() is pending, no need to sleep. 2971 */ 2972 stp->sd_wakeq &= ~slpflg; 2973 *done = 0; 2974 return (0); 2975 } 2976 2977 if (fmode & (FNDELAY|FNONBLOCK)) { 2978 if (!(flag & NOINTR)) 2979 error = EAGAIN; 2980 else 2981 error = 0; 2982 *done = 1; 2983 return (error); 2984 } 2985 2986 if (stp->sd_flag & errs) { 2987 /* 2988 * Check for errors before going to sleep since the 2989 * caller might not have checked this while holding 2990 * sd_lock. 2991 */ 2992 error = strgeterr(stp, errs, (flag & STR_PEEK)); 2993 if (error != 0) { 2994 *done = 1; 2995 return (error); 2996 } 2997 } 2998 2999 /* 3000 * If any module downstream has requested read notification 3001 * by setting SNDMREAD flag using M_SETOPTS, send a message 3002 * down stream. 3003 */ 3004 if ((flag & READWAIT) && (stp->sd_flag & SNDMREAD)) { 3005 mutex_exit(&stp->sd_lock); 3006 if (!(mp = allocb_wait(sizeof (ssize_t), BPRI_MED, 3007 (flag & STR_NOSIG), &error))) { 3008 mutex_enter(&stp->sd_lock); 3009 *done = 1; 3010 return (error); 3011 } 3012 mp->b_datap->db_type = M_READ; 3013 rd_count = (ssize_t *)mp->b_wptr; 3014 *rd_count = count; 3015 mp->b_wptr += sizeof (ssize_t); 3016 /* 3017 * Send the number of bytes requested by the 3018 * read as the argument to M_READ. 3019 */ 3020 stream_willservice(stp); 3021 putnext(stp->sd_wrq, mp); 3022 stream_runservice(stp); 3023 mutex_enter(&stp->sd_lock); 3024 3025 /* 3026 * If any data arrived due to inline processing 3027 * of putnext(), don't sleep. 3028 */ 3029 if (_RD(stp->sd_wrq)->q_first != NULL) { 3030 *done = 0; 3031 return (0); 3032 } 3033 } 3034 3035 stp->sd_flag |= slpflg; 3036 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAIT2, 3037 "strwaitq sleeps (2):%p, %X, %lX, %X, %p", 3038 stp, flag, count, fmode, done); 3039 3040 rval = str_cv_wait(sleepon, &stp->sd_lock, timout, flag & STR_NOSIG); 3041 if (rval > 0) { 3042 /* EMPTY */ 3043 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAKE2, 3044 "strwaitq awakes(2):%X, %X, %X, %X, %X", 3045 stp, flag, count, fmode, done); 3046 } else if (rval == 0) { 3047 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_INTR2, 3048 "strwaitq interrupt #2:%p, %X, %lX, %X, %p", 3049 stp, flag, count, fmode, done); 3050 stp->sd_flag &= ~slpflg; 3051 cv_broadcast(sleepon); 3052 if (!(flag & NOINTR)) 3053 error = EINTR; 3054 else 3055 error = 0; 3056 *done = 1; 3057 return (error); 3058 } else { 3059 /* timeout */ 3060 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_TIME, 3061 "strwaitq timeout:%p, %X, %lX, %X, %p", 3062 stp, flag, count, fmode, done); 3063 *done = 1; 3064 if (!(flag & NOINTR)) 3065 return (ETIME); 3066 else 3067 return (0); 3068 } 3069 /* 3070 * If the caller implements delayed errors (i.e. queued after data) 3071 * we can not check for errors here since data as well as an 3072 * error might have arrived at the stream head. We return to 3073 * have the caller check the read queue before checking for errors. 3074 */ 3075 if ((stp->sd_flag & errs) && !(flag & STR_DELAYERR)) { 3076 error = strgeterr(stp, errs, (flag & STR_PEEK)); 3077 if (error != 0) { 3078 *done = 1; 3079 return (error); 3080 } 3081 } 3082 *done = 0; 3083 return (0); 3084 } 3085 3086 /* 3087 * Perform job control discipline access checks. 3088 * Return 0 for success and the errno for failure. 3089 */ 3090 3091 #define cantsend(p, t, sig) \ 3092 (sigismember(&(p)->p_ignore, sig) || signal_is_blocked((t), sig)) 3093 3094 int 3095 straccess(struct stdata *stp, enum jcaccess mode) 3096 { 3097 extern kcondvar_t lbolt_cv; /* XXX: should be in a header file */ 3098 kthread_t *t = curthread; 3099 proc_t *p = ttoproc(t); 3100 sess_t *sp; 3101 3102 if (stp->sd_sidp == NULL || stp->sd_vnode->v_type == VFIFO) 3103 return (0); 3104 3105 mutex_enter(&p->p_lock); 3106 sp = p->p_sessp; 3107 3108 for (;;) { 3109 /* 3110 * If this is not the calling process's controlling terminal 3111 * or if the calling process is already in the foreground 3112 * then allow access. 3113 */ 3114 if (sp->s_dev != stp->sd_vnode->v_rdev || 3115 p->p_pgidp == stp->sd_pgidp) { 3116 mutex_exit(&p->p_lock); 3117 return (0); 3118 } 3119 3120 /* 3121 * Check to see if controlling terminal has been deallocated. 3122 */ 3123 if (sp->s_vp == NULL) { 3124 if (!cantsend(p, t, SIGHUP)) 3125 sigtoproc(p, t, SIGHUP); 3126 mutex_exit(&p->p_lock); 3127 return (EIO); 3128 } 3129 3130 if (mode == JCGETP) { 3131 mutex_exit(&p->p_lock); 3132 return (0); 3133 } 3134 3135 if (mode == JCREAD) { 3136 if (p->p_detached || cantsend(p, t, SIGTTIN)) { 3137 mutex_exit(&p->p_lock); 3138 return (EIO); 3139 } 3140 mutex_exit(&p->p_lock); 3141 pgsignal(p->p_pgidp, SIGTTIN); 3142 mutex_enter(&p->p_lock); 3143 } else { /* mode == JCWRITE or JCSETP */ 3144 if ((mode == JCWRITE && !(stp->sd_flag & STRTOSTOP)) || 3145 cantsend(p, t, SIGTTOU)) { 3146 mutex_exit(&p->p_lock); 3147 return (0); 3148 } 3149 if (p->p_detached) { 3150 mutex_exit(&p->p_lock); 3151 return (EIO); 3152 } 3153 mutex_exit(&p->p_lock); 3154 pgsignal(p->p_pgidp, SIGTTOU); 3155 mutex_enter(&p->p_lock); 3156 } 3157 3158 /* 3159 * We call cv_wait_sig_swap() to cause the appropriate 3160 * action for the jobcontrol signal to take place. 3161 * If the signal is being caught, we will take the 3162 * EINTR error return. Otherwise, the default action 3163 * of causing the process to stop will take place. 3164 * In this case, we rely on the periodic cv_broadcast() on 3165 * &lbolt_cv to wake us up to loop around and test again. 3166 * We can't get here if the signal is ignored or 3167 * if the current thread is blocking the signal. 3168 */ 3169 if (!cv_wait_sig_swap(&lbolt_cv, &p->p_lock)) { 3170 mutex_exit(&p->p_lock); 3171 return (EINTR); 3172 } 3173 } 3174 } 3175 3176 /* 3177 * Return size of message of block type (bp->b_datap->db_type) 3178 */ 3179 size_t 3180 xmsgsize(mblk_t *bp) 3181 { 3182 unsigned char type; 3183 size_t count = 0; 3184 3185 type = bp->b_datap->db_type; 3186 3187 for (; bp; bp = bp->b_cont) { 3188 if (type != bp->b_datap->db_type) 3189 break; 3190 ASSERT(bp->b_wptr >= bp->b_rptr); 3191 count += bp->b_wptr - bp->b_rptr; 3192 } 3193 return (count); 3194 } 3195 3196 /* 3197 * Allocate a stream head. 3198 */ 3199 struct stdata * 3200 shalloc(queue_t *qp) 3201 { 3202 stdata_t *stp; 3203 3204 stp = kmem_cache_alloc(stream_head_cache, KM_SLEEP); 3205 3206 stp->sd_wrq = _WR(qp); 3207 stp->sd_strtab = NULL; 3208 stp->sd_iocid = 0; 3209 stp->sd_mate = NULL; 3210 stp->sd_freezer = NULL; 3211 stp->sd_refcnt = 0; 3212 stp->sd_wakeq = 0; 3213 stp->sd_anchor = 0; 3214 stp->sd_struiowrq = NULL; 3215 stp->sd_struiordq = NULL; 3216 stp->sd_struiodnak = 0; 3217 stp->sd_struionak = NULL; 3218 #ifdef C2_AUDIT 3219 stp->sd_t_audit_data = NULL; 3220 #endif 3221 stp->sd_rput_opt = 0; 3222 stp->sd_wput_opt = 0; 3223 stp->sd_read_opt = 0; 3224 stp->sd_rprotofunc = strrput_proto; 3225 stp->sd_rmiscfunc = strrput_misc; 3226 stp->sd_rderrfunc = stp->sd_wrerrfunc = NULL; 3227 stp->sd_ciputctrl = NULL; 3228 stp->sd_nciputctrl = 0; 3229 stp->sd_qhead = NULL; 3230 stp->sd_qtail = NULL; 3231 stp->sd_servid = NULL; 3232 stp->sd_nqueues = 0; 3233 stp->sd_svcflags = 0; 3234 stp->sd_copyflag = 0; 3235 return (stp); 3236 } 3237 3238 /* 3239 * Free a stream head. 3240 */ 3241 void 3242 shfree(stdata_t *stp) 3243 { 3244 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock)); 3245 3246 stp->sd_wrq = NULL; 3247 3248 mutex_enter(&stp->sd_qlock); 3249 while (stp->sd_svcflags & STRS_SCHEDULED) { 3250 STRSTAT(strwaits); 3251 cv_wait(&stp->sd_qcv, &stp->sd_qlock); 3252 } 3253 mutex_exit(&stp->sd_qlock); 3254 3255 if (stp->sd_ciputctrl != NULL) { 3256 ASSERT(stp->sd_nciputctrl == n_ciputctrl - 1); 3257 SUMCHECK_CIPUTCTRL_COUNTS(stp->sd_ciputctrl, 3258 stp->sd_nciputctrl, 0); 3259 ASSERT(ciputctrl_cache != NULL); 3260 kmem_cache_free(ciputctrl_cache, stp->sd_ciputctrl); 3261 stp->sd_ciputctrl = NULL; 3262 stp->sd_nciputctrl = 0; 3263 } 3264 ASSERT(stp->sd_qhead == NULL); 3265 ASSERT(stp->sd_qtail == NULL); 3266 ASSERT(stp->sd_nqueues == 0); 3267 kmem_cache_free(stream_head_cache, stp); 3268 } 3269 3270 /* 3271 * Allocate a pair of queues and a syncq for the pair 3272 */ 3273 queue_t * 3274 allocq(void) 3275 { 3276 queinfo_t *qip; 3277 queue_t *qp, *wqp; 3278 syncq_t *sq; 3279 3280 qip = kmem_cache_alloc(queue_cache, KM_SLEEP); 3281 3282 qp = &qip->qu_rqueue; 3283 wqp = &qip->qu_wqueue; 3284 sq = &qip->qu_syncq; 3285 3286 qp->q_last = NULL; 3287 qp->q_next = NULL; 3288 qp->q_ptr = NULL; 3289 qp->q_flag = QUSE | QREADR; 3290 qp->q_bandp = NULL; 3291 qp->q_stream = NULL; 3292 qp->q_syncq = sq; 3293 qp->q_nband = 0; 3294 qp->q_nfsrv = NULL; 3295 qp->q_draining = 0; 3296 qp->q_syncqmsgs = 0; 3297 qp->q_spri = 0; 3298 qp->q_qtstamp = 0; 3299 qp->q_sqtstamp = 0; 3300 qp->q_fp = NULL; 3301 3302 wqp->q_last = NULL; 3303 wqp->q_next = NULL; 3304 wqp->q_ptr = NULL; 3305 wqp->q_flag = QUSE; 3306 wqp->q_bandp = NULL; 3307 wqp->q_stream = NULL; 3308 wqp->q_syncq = sq; 3309 wqp->q_nband = 0; 3310 wqp->q_nfsrv = NULL; 3311 wqp->q_draining = 0; 3312 wqp->q_syncqmsgs = 0; 3313 wqp->q_qtstamp = 0; 3314 wqp->q_sqtstamp = 0; 3315 wqp->q_spri = 0; 3316 3317 sq->sq_count = 0; 3318 sq->sq_rmqcount = 0; 3319 sq->sq_flags = 0; 3320 sq->sq_type = 0; 3321 sq->sq_callbflags = 0; 3322 sq->sq_cancelid = 0; 3323 sq->sq_ciputctrl = NULL; 3324 sq->sq_nciputctrl = 0; 3325 sq->sq_needexcl = 0; 3326 sq->sq_svcflags = 0; 3327 3328 return (qp); 3329 } 3330 3331 /* 3332 * Free a pair of queues and the "attached" syncq. 3333 * Discard any messages left on the syncq(s), remove the syncq(s) from the 3334 * outer perimeter, and free the syncq(s) if they are not the "attached" syncq. 3335 */ 3336 void 3337 freeq(queue_t *qp) 3338 { 3339 qband_t *qbp, *nqbp; 3340 syncq_t *sq, *outer; 3341 queue_t *wqp = _WR(qp); 3342 3343 ASSERT(qp->q_flag & QREADR); 3344 3345 (void) flush_syncq(qp->q_syncq, qp); 3346 (void) flush_syncq(wqp->q_syncq, wqp); 3347 ASSERT(qp->q_syncqmsgs == 0 && wqp->q_syncqmsgs == 0); 3348 3349 outer = qp->q_syncq->sq_outer; 3350 if (outer != NULL) { 3351 outer_remove(outer, qp->q_syncq); 3352 if (wqp->q_syncq != qp->q_syncq) 3353 outer_remove(outer, wqp->q_syncq); 3354 } 3355 /* 3356 * Free any syncqs that are outside what allocq returned. 3357 */ 3358 if (qp->q_syncq != SQ(qp) && !(qp->q_flag & QPERMOD)) 3359 free_syncq(qp->q_syncq); 3360 if (qp->q_syncq != wqp->q_syncq && wqp->q_syncq != SQ(qp)) 3361 free_syncq(wqp->q_syncq); 3362 3363 ASSERT((qp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0); 3364 ASSERT((wqp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0); 3365 ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); 3366 ASSERT(MUTEX_NOT_HELD(QLOCK(wqp))); 3367 sq = SQ(qp); 3368 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 3369 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL); 3370 ASSERT(sq->sq_outer == NULL); 3371 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 3372 ASSERT(sq->sq_callbpend == NULL); 3373 ASSERT(sq->sq_needexcl == 0); 3374 3375 if (sq->sq_ciputctrl != NULL) { 3376 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1); 3377 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl, 3378 sq->sq_nciputctrl, 0); 3379 ASSERT(ciputctrl_cache != NULL); 3380 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl); 3381 sq->sq_ciputctrl = NULL; 3382 sq->sq_nciputctrl = 0; 3383 } 3384 3385 ASSERT(qp->q_first == NULL && wqp->q_first == NULL); 3386 ASSERT(qp->q_count == 0 && wqp->q_count == 0); 3387 ASSERT(qp->q_mblkcnt == 0 && wqp->q_mblkcnt == 0); 3388 3389 qp->q_flag &= ~QUSE; 3390 wqp->q_flag &= ~QUSE; 3391 3392 /* NOTE: Uncomment the assert below once bugid 1159635 is fixed. */ 3393 /* ASSERT((qp->q_flag & QWANTW) == 0 && (wqp->q_flag & QWANTW) == 0); */ 3394 3395 qbp = qp->q_bandp; 3396 while (qbp) { 3397 nqbp = qbp->qb_next; 3398 freeband(qbp); 3399 qbp = nqbp; 3400 } 3401 qbp = wqp->q_bandp; 3402 while (qbp) { 3403 nqbp = qbp->qb_next; 3404 freeband(qbp); 3405 qbp = nqbp; 3406 } 3407 kmem_cache_free(queue_cache, qp); 3408 } 3409 3410 /* 3411 * Allocate a qband structure. 3412 */ 3413 qband_t * 3414 allocband(void) 3415 { 3416 qband_t *qbp; 3417 3418 qbp = kmem_cache_alloc(qband_cache, KM_NOSLEEP); 3419 if (qbp == NULL) 3420 return (NULL); 3421 3422 qbp->qb_next = NULL; 3423 qbp->qb_count = 0; 3424 qbp->qb_mblkcnt = 0; 3425 qbp->qb_first = NULL; 3426 qbp->qb_last = NULL; 3427 qbp->qb_flag = 0; 3428 3429 return (qbp); 3430 } 3431 3432 /* 3433 * Free a qband structure. 3434 */ 3435 void 3436 freeband(qband_t *qbp) 3437 { 3438 kmem_cache_free(qband_cache, qbp); 3439 } 3440 3441 /* 3442 * Just like putnextctl(9F), except that allocb_wait() is used. 3443 * 3444 * Consolidation Private, and of course only callable from the stream head or 3445 * routines that may block. 3446 */ 3447 int 3448 putnextctl_wait(queue_t *q, int type) 3449 { 3450 mblk_t *bp; 3451 int error; 3452 3453 if ((datamsg(type) && (type != M_DELAY)) || 3454 (bp = allocb_wait(0, BPRI_HI, 0, &error)) == NULL) 3455 return (0); 3456 3457 bp->b_datap->db_type = (unsigned char)type; 3458 putnext(q, bp); 3459 return (1); 3460 } 3461 3462 /* 3463 * run any possible bufcalls. 3464 */ 3465 void 3466 runbufcalls(void) 3467 { 3468 strbufcall_t *bcp; 3469 3470 mutex_enter(&bcall_monitor); 3471 mutex_enter(&strbcall_lock); 3472 3473 if (strbcalls.bc_head) { 3474 size_t count; 3475 int nevent; 3476 3477 /* 3478 * count how many events are on the list 3479 * now so we can check to avoid looping 3480 * in low memory situations 3481 */ 3482 nevent = 0; 3483 for (bcp = strbcalls.bc_head; bcp; bcp = bcp->bc_next) 3484 nevent++; 3485 3486 /* 3487 * get estimate of available memory from kmem_avail(). 3488 * awake all bufcall functions waiting for 3489 * memory whose request could be satisfied 3490 * by 'count' memory and let 'em fight for it. 3491 */ 3492 count = kmem_avail(); 3493 while ((bcp = strbcalls.bc_head) != NULL && nevent) { 3494 STRSTAT(bufcalls); 3495 --nevent; 3496 if (bcp->bc_size <= count) { 3497 bcp->bc_executor = curthread; 3498 mutex_exit(&strbcall_lock); 3499 (*bcp->bc_func)(bcp->bc_arg); 3500 mutex_enter(&strbcall_lock); 3501 bcp->bc_executor = NULL; 3502 cv_broadcast(&bcall_cv); 3503 strbcalls.bc_head = bcp->bc_next; 3504 kmem_free(bcp, sizeof (strbufcall_t)); 3505 } else { 3506 /* 3507 * too big, try again later - note 3508 * that nevent was decremented above 3509 * so we won't retry this one on this 3510 * iteration of the loop 3511 */ 3512 if (bcp->bc_next != NULL) { 3513 strbcalls.bc_head = bcp->bc_next; 3514 bcp->bc_next = NULL; 3515 strbcalls.bc_tail->bc_next = bcp; 3516 strbcalls.bc_tail = bcp; 3517 } 3518 } 3519 } 3520 if (strbcalls.bc_head == NULL) 3521 strbcalls.bc_tail = NULL; 3522 } 3523 3524 mutex_exit(&strbcall_lock); 3525 mutex_exit(&bcall_monitor); 3526 } 3527 3528 3529 /* 3530 * actually run queue's service routine. 3531 */ 3532 static void 3533 runservice(queue_t *q) 3534 { 3535 qband_t *qbp; 3536 3537 ASSERT(q->q_qinfo->qi_srvp); 3538 again: 3539 entersq(q->q_syncq, SQ_SVC); 3540 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_START, 3541 "runservice starts:%p", q); 3542 3543 if (!(q->q_flag & QWCLOSE)) 3544 (*q->q_qinfo->qi_srvp)(q); 3545 3546 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_END, 3547 "runservice ends:(%p)", q); 3548 3549 leavesq(q->q_syncq, SQ_SVC); 3550 3551 mutex_enter(QLOCK(q)); 3552 if (q->q_flag & QENAB) { 3553 q->q_flag &= ~QENAB; 3554 mutex_exit(QLOCK(q)); 3555 goto again; 3556 } 3557 q->q_flag &= ~QINSERVICE; 3558 q->q_flag &= ~QBACK; 3559 for (qbp = q->q_bandp; qbp; qbp = qbp->qb_next) 3560 qbp->qb_flag &= ~QB_BACK; 3561 /* 3562 * Wakeup thread waiting for the service procedure 3563 * to be run (strclose and qdetach). 3564 */ 3565 cv_broadcast(&q->q_wait); 3566 3567 mutex_exit(QLOCK(q)); 3568 } 3569 3570 /* 3571 * Background processing of bufcalls. 3572 */ 3573 void 3574 streams_bufcall_service(void) 3575 { 3576 callb_cpr_t cprinfo; 3577 3578 CALLB_CPR_INIT(&cprinfo, &strbcall_lock, callb_generic_cpr, 3579 "streams_bufcall_service"); 3580 3581 mutex_enter(&strbcall_lock); 3582 3583 for (;;) { 3584 if (strbcalls.bc_head != NULL && kmem_avail() > 0) { 3585 mutex_exit(&strbcall_lock); 3586 runbufcalls(); 3587 mutex_enter(&strbcall_lock); 3588 } 3589 if (strbcalls.bc_head != NULL) { 3590 clock_t wt, tick; 3591 3592 STRSTAT(bcwaits); 3593 /* Wait for memory to become available */ 3594 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3595 tick = SEC_TO_TICK(60); 3596 time_to_wait(&wt, tick); 3597 (void) cv_timedwait(&memavail_cv, &strbcall_lock, wt); 3598 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock); 3599 } 3600 3601 /* Wait for new work to arrive */ 3602 if (strbcalls.bc_head == NULL) { 3603 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3604 cv_wait(&strbcall_cv, &strbcall_lock); 3605 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock); 3606 } 3607 } 3608 } 3609 3610 /* 3611 * Background processing of streams background tasks which failed 3612 * taskq_dispatch. 3613 */ 3614 static void 3615 streams_qbkgrnd_service(void) 3616 { 3617 callb_cpr_t cprinfo; 3618 queue_t *q; 3619 3620 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr, 3621 "streams_bkgrnd_service"); 3622 3623 mutex_enter(&service_queue); 3624 3625 for (;;) { 3626 /* 3627 * Wait for work to arrive. 3628 */ 3629 while ((freebs_list == NULL) && (qhead == NULL)) { 3630 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3631 cv_wait(&services_to_run, &service_queue); 3632 CALLB_CPR_SAFE_END(&cprinfo, &service_queue); 3633 } 3634 /* 3635 * Handle all pending freebs requests to free memory. 3636 */ 3637 while (freebs_list != NULL) { 3638 mblk_t *mp = freebs_list; 3639 freebs_list = mp->b_next; 3640 mutex_exit(&service_queue); 3641 mblk_free(mp); 3642 mutex_enter(&service_queue); 3643 } 3644 /* 3645 * Run pending queues. 3646 */ 3647 while (qhead != NULL) { 3648 DQ(q, qhead, qtail, q_link); 3649 ASSERT(q != NULL); 3650 mutex_exit(&service_queue); 3651 queue_service(q); 3652 mutex_enter(&service_queue); 3653 } 3654 ASSERT(qhead == NULL && qtail == NULL); 3655 } 3656 } 3657 3658 /* 3659 * Background processing of streams background tasks which failed 3660 * taskq_dispatch. 3661 */ 3662 static void 3663 streams_sqbkgrnd_service(void) 3664 { 3665 callb_cpr_t cprinfo; 3666 syncq_t *sq; 3667 3668 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr, 3669 "streams_sqbkgrnd_service"); 3670 3671 mutex_enter(&service_queue); 3672 3673 for (;;) { 3674 /* 3675 * Wait for work to arrive. 3676 */ 3677 while (sqhead == NULL) { 3678 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3679 cv_wait(&syncqs_to_run, &service_queue); 3680 CALLB_CPR_SAFE_END(&cprinfo, &service_queue); 3681 } 3682 3683 /* 3684 * Run pending syncqs. 3685 */ 3686 while (sqhead != NULL) { 3687 DQ(sq, sqhead, sqtail, sq_next); 3688 ASSERT(sq != NULL); 3689 ASSERT(sq->sq_svcflags & SQ_BGTHREAD); 3690 mutex_exit(&service_queue); 3691 syncq_service(sq); 3692 mutex_enter(&service_queue); 3693 } 3694 } 3695 } 3696 3697 /* 3698 * Disable the syncq and wait for background syncq processing to complete. 3699 * If the syncq is placed on the sqhead/sqtail queue, try to remove it from the 3700 * list. 3701 */ 3702 void 3703 wait_sq_svc(syncq_t *sq) 3704 { 3705 mutex_enter(SQLOCK(sq)); 3706 sq->sq_svcflags |= SQ_DISABLED; 3707 if (sq->sq_svcflags & SQ_BGTHREAD) { 3708 syncq_t *sq_chase; 3709 syncq_t *sq_curr; 3710 int removed; 3711 3712 ASSERT(sq->sq_servcount == 1); 3713 mutex_enter(&service_queue); 3714 RMQ(sq, sqhead, sqtail, sq_next, sq_chase, sq_curr, removed); 3715 mutex_exit(&service_queue); 3716 if (removed) { 3717 sq->sq_svcflags &= ~SQ_BGTHREAD; 3718 sq->sq_servcount = 0; 3719 STRSTAT(sqremoved); 3720 goto done; 3721 } 3722 } 3723 while (sq->sq_servcount != 0) { 3724 sq->sq_flags |= SQ_WANTWAKEUP; 3725 cv_wait(&sq->sq_wait, SQLOCK(sq)); 3726 } 3727 done: 3728 mutex_exit(SQLOCK(sq)); 3729 } 3730 3731 /* 3732 * Put a syncq on the list of syncq's to be serviced by the sqthread. 3733 * Add the argument to the end of the sqhead list and set the flag 3734 * indicating this syncq has been enabled. If it has already been 3735 * enabled, don't do anything. 3736 * This routine assumes that SQLOCK is held. 3737 * NOTE that the lock order is to have the SQLOCK first, 3738 * so if the service_syncq lock is held, we need to release it 3739 * before aquiring the SQLOCK (mostly relevant for the background 3740 * thread, and this seems to be common among the STREAMS global locks). 3741 * Note the the sq_svcflags are protected by the SQLOCK. 3742 */ 3743 void 3744 sqenable(syncq_t *sq) 3745 { 3746 /* 3747 * This is probably not important except for where I believe it 3748 * is being called. At that point, it should be held (and it 3749 * is a pain to release it just for this routine, so don't do 3750 * it). 3751 */ 3752 ASSERT(MUTEX_HELD(SQLOCK(sq))); 3753 3754 IMPLY(sq->sq_servcount == 0, sq->sq_next == NULL); 3755 IMPLY(sq->sq_next != NULL, sq->sq_svcflags & SQ_BGTHREAD); 3756 3757 /* 3758 * Do not put on list if background thread is scheduled or 3759 * syncq is disabled. 3760 */ 3761 if (sq->sq_svcflags & (SQ_DISABLED | SQ_BGTHREAD)) 3762 return; 3763 3764 /* 3765 * Check whether we should enable sq at all. 3766 * Non PERMOD syncqs may be drained by at most one thread. 3767 * PERMOD syncqs may be drained by several threads but we limit the 3768 * total amount to the lesser of 3769 * Number of queues on the squeue and 3770 * Number of CPUs. 3771 */ 3772 if (sq->sq_servcount != 0) { 3773 if (((sq->sq_type & SQ_PERMOD) == 0) || 3774 (sq->sq_servcount >= MIN(sq->sq_nqueues, ncpus_online))) { 3775 STRSTAT(sqtoomany); 3776 return; 3777 } 3778 } 3779 3780 sq->sq_tstamp = lbolt; 3781 STRSTAT(sqenables); 3782 3783 /* Attempt a taskq dispatch */ 3784 sq->sq_servid = (void *)taskq_dispatch(streams_taskq, 3785 (task_func_t *)syncq_service, sq, TQ_NOSLEEP | TQ_NOQUEUE); 3786 if (sq->sq_servid != NULL) { 3787 sq->sq_servcount++; 3788 return; 3789 } 3790 3791 /* 3792 * This taskq dispatch failed, but a previous one may have succeeded. 3793 * Don't try to schedule on the background thread whilst there is 3794 * outstanding taskq processing. 3795 */ 3796 if (sq->sq_servcount != 0) 3797 return; 3798 3799 /* 3800 * System is low on resources and can't perform a non-sleeping 3801 * dispatch. Schedule the syncq for a background thread and mark the 3802 * syncq to avoid any further taskq dispatch attempts. 3803 */ 3804 mutex_enter(&service_queue); 3805 STRSTAT(taskqfails); 3806 ENQUEUE(sq, sqhead, sqtail, sq_next); 3807 sq->sq_svcflags |= SQ_BGTHREAD; 3808 sq->sq_servcount = 1; 3809 cv_signal(&syncqs_to_run); 3810 mutex_exit(&service_queue); 3811 } 3812 3813 /* 3814 * Note: fifo_close() depends on the mblk_t on the queue being freed 3815 * asynchronously. The asynchronous freeing of messages breaks the 3816 * recursive call chain of fifo_close() while there are I_SENDFD type of 3817 * messages refering other file pointers on the queue. Then when 3818 * closing pipes it can avoid stack overflow in case of daisy-chained 3819 * pipes, and also avoid deadlock in case of fifonode_t pairs (which 3820 * share the same fifolock_t). 3821 */ 3822 3823 /* ARGSUSED */ 3824 void 3825 freebs_enqueue(mblk_t *mp, dblk_t *dbp) 3826 { 3827 ASSERT(dbp->db_mblk == mp); 3828 3829 /* 3830 * Check data sanity. The dblock should have non-empty free function. 3831 * It is better to panic here then later when the dblock is freed 3832 * asynchronously when the context is lost. 3833 */ 3834 if (dbp->db_frtnp->free_func == NULL) { 3835 panic("freebs_enqueue: dblock %p has a NULL free callback", 3836 (void *) dbp); 3837 } 3838 3839 STRSTAT(freebs); 3840 if (taskq_dispatch(streams_taskq, (task_func_t *)mblk_free, mp, 3841 TQ_NOSLEEP) == NULL) { 3842 /* 3843 * System is low on resources and can't perform a non-sleeping 3844 * dispatch. Schedule for a background thread. 3845 */ 3846 mutex_enter(&service_queue); 3847 STRSTAT(taskqfails); 3848 mp->b_next = freebs_list; 3849 freebs_list = mp; 3850 cv_signal(&services_to_run); 3851 mutex_exit(&service_queue); 3852 } 3853 } 3854 3855 /* 3856 * Set the QBACK or QB_BACK flag in the given queue for 3857 * the given priority band. 3858 */ 3859 void 3860 setqback(queue_t *q, unsigned char pri) 3861 { 3862 int i; 3863 qband_t *qbp; 3864 qband_t **qbpp; 3865 3866 ASSERT(MUTEX_HELD(QLOCK(q))); 3867 if (pri != 0) { 3868 if (pri > q->q_nband) { 3869 qbpp = &q->q_bandp; 3870 while (*qbpp) 3871 qbpp = &(*qbpp)->qb_next; 3872 while (pri > q->q_nband) { 3873 if ((*qbpp = allocband()) == NULL) { 3874 cmn_err(CE_WARN, 3875 "setqback: can't allocate qband\n"); 3876 return; 3877 } 3878 (*qbpp)->qb_hiwat = q->q_hiwat; 3879 (*qbpp)->qb_lowat = q->q_lowat; 3880 q->q_nband++; 3881 qbpp = &(*qbpp)->qb_next; 3882 } 3883 } 3884 qbp = q->q_bandp; 3885 i = pri; 3886 while (--i) 3887 qbp = qbp->qb_next; 3888 qbp->qb_flag |= QB_BACK; 3889 } else { 3890 q->q_flag |= QBACK; 3891 } 3892 } 3893 3894 int 3895 strcopyin(void *from, void *to, size_t len, int copyflag) 3896 { 3897 if (copyflag & U_TO_K) { 3898 ASSERT((copyflag & K_TO_K) == 0); 3899 if (copyin(from, to, len)) 3900 return (EFAULT); 3901 } else { 3902 ASSERT(copyflag & K_TO_K); 3903 bcopy(from, to, len); 3904 } 3905 return (0); 3906 } 3907 3908 int 3909 strcopyout(void *from, void *to, size_t len, int copyflag) 3910 { 3911 if (copyflag & U_TO_K) { 3912 if (copyout(from, to, len)) 3913 return (EFAULT); 3914 } else { 3915 ASSERT(copyflag & K_TO_K); 3916 bcopy(from, to, len); 3917 } 3918 return (0); 3919 } 3920 3921 /* 3922 * strsignal_nolock() posts a signal to the process(es) at the stream head. 3923 * It assumes that the stream head lock is already held, whereas strsignal() 3924 * acquires the lock first. This routine was created because a few callers 3925 * release the stream head lock before calling only to re-acquire it after 3926 * it returns. 3927 */ 3928 void 3929 strsignal_nolock(stdata_t *stp, int sig, int32_t band) 3930 { 3931 ASSERT(MUTEX_HELD(&stp->sd_lock)); 3932 switch (sig) { 3933 case SIGPOLL: 3934 if (stp->sd_sigflags & S_MSG) 3935 strsendsig(stp->sd_siglist, S_MSG, (uchar_t)band, 0); 3936 break; 3937 3938 default: 3939 if (stp->sd_pgidp) { 3940 pgsignal(stp->sd_pgidp, sig); 3941 } 3942 break; 3943 } 3944 } 3945 3946 void 3947 strsignal(stdata_t *stp, int sig, int32_t band) 3948 { 3949 TRACE_3(TR_FAC_STREAMS_FR, TR_SENDSIG, 3950 "strsignal:%p, %X, %X", stp, sig, band); 3951 3952 mutex_enter(&stp->sd_lock); 3953 switch (sig) { 3954 case SIGPOLL: 3955 if (stp->sd_sigflags & S_MSG) 3956 strsendsig(stp->sd_siglist, S_MSG, (uchar_t)band, 0); 3957 break; 3958 3959 default: 3960 if (stp->sd_pgidp) { 3961 pgsignal(stp->sd_pgidp, sig); 3962 } 3963 break; 3964 } 3965 mutex_exit(&stp->sd_lock); 3966 } 3967 3968 void 3969 strhup(stdata_t *stp) 3970 { 3971 pollwakeup(&stp->sd_pollist, POLLHUP); 3972 mutex_enter(&stp->sd_lock); 3973 if (stp->sd_sigflags & S_HANGUP) 3974 strsendsig(stp->sd_siglist, S_HANGUP, 0, 0); 3975 mutex_exit(&stp->sd_lock); 3976 } 3977 3978 void 3979 stralloctty(sess_t *sp, stdata_t *stp) 3980 { 3981 mutex_enter(&stp->sd_lock); 3982 mutex_enter(&pidlock); 3983 stp->sd_sidp = sp->s_sidp; 3984 stp->sd_pgidp = sp->s_sidp; 3985 PID_HOLD(stp->sd_pgidp); 3986 PID_HOLD(stp->sd_sidp); 3987 mutex_exit(&pidlock); 3988 mutex_exit(&stp->sd_lock); 3989 } 3990 3991 void 3992 strfreectty(stdata_t *stp) 3993 { 3994 mutex_enter(&stp->sd_lock); 3995 pgsignal(stp->sd_pgidp, SIGHUP); 3996 mutex_enter(&pidlock); 3997 PID_RELE(stp->sd_pgidp); 3998 PID_RELE(stp->sd_sidp); 3999 stp->sd_pgidp = NULL; 4000 stp->sd_sidp = NULL; 4001 mutex_exit(&pidlock); 4002 mutex_exit(&stp->sd_lock); 4003 if (!(stp->sd_flag & STRHUP)) 4004 strhup(stp); 4005 } 4006 4007 void 4008 strctty(stdata_t *stp) 4009 { 4010 extern vnode_t *makectty(); 4011 proc_t *p = curproc; 4012 sess_t *sp = p->p_sessp; 4013 4014 mutex_enter(&stp->sd_lock); 4015 /* 4016 * No need to hold the session lock or do a TTYHOLD, 4017 * because this is the only thread that can be the 4018 * session leader and not have a controlling tty. 4019 */ 4020 if ((stp->sd_flag & (STRHUP|STRDERR|STWRERR|STPLEX)) == 0 && 4021 stp->sd_sidp == NULL && /* not allocated as ctty */ 4022 sp->s_sidp == p->p_pidp && /* session leader */ 4023 sp->s_flag != SESS_CLOSE && /* session is not closing */ 4024 sp->s_vp == NULL) { /* without ctty */ 4025 mutex_exit(&stp->sd_lock); 4026 ASSERT(stp->sd_pgidp == NULL); 4027 alloctty(p, makectty(stp->sd_vnode)); 4028 stralloctty(sp, stp); 4029 mutex_enter(&stp->sd_lock); 4030 stp->sd_flag |= STRISTTY; /* just to be sure */ 4031 } 4032 mutex_exit(&stp->sd_lock); 4033 } 4034 4035 /* 4036 * enable first back queue with svc procedure. 4037 * Use pri == -1 to avoid the setqback 4038 */ 4039 void 4040 backenable(queue_t *q, int pri) 4041 { 4042 queue_t *nq; 4043 4044 /* 4045 * our presence might not prevent other modules in our own 4046 * stream from popping/pushing since the caller of getq might not 4047 * have a claim on the queue (some drivers do a getq on somebody 4048 * else's queue - they know that the queue itself is not going away 4049 * but the framework has to guarantee q_next in that stream.) 4050 */ 4051 claimstr(q); 4052 4053 /* find nearest back queue with service proc */ 4054 for (nq = backq(q); nq && !nq->q_qinfo->qi_srvp; nq = backq(nq)) { 4055 ASSERT(STRMATED(q->q_stream) || STREAM(q) == STREAM(nq)); 4056 } 4057 4058 if (nq) { 4059 kthread_t *freezer; 4060 /* 4061 * backenable can be called either with no locks held 4062 * or with the stream frozen (the latter occurs when a module 4063 * calls rmvq with the stream frozen.) If the stream is frozen 4064 * by the caller the caller will hold all qlocks in the stream. 4065 */ 4066 freezer = STREAM(q)->sd_freezer; 4067 if (freezer != curthread) { 4068 mutex_enter(QLOCK(nq)); 4069 } 4070 #ifdef DEBUG 4071 else { 4072 ASSERT(frozenstr(q)); 4073 ASSERT(MUTEX_HELD(QLOCK(q))); 4074 ASSERT(MUTEX_HELD(QLOCK(nq))); 4075 } 4076 #endif 4077 if (pri != -1) 4078 setqback(nq, pri); 4079 qenable_locked(nq); 4080 if (freezer != curthread) 4081 mutex_exit(QLOCK(nq)); 4082 } 4083 releasestr(q); 4084 } 4085 4086 /* 4087 * Return the appropriate errno when one of flags_to_check is set 4088 * in sd_flags. Uses the exported error routines if they are set. 4089 * Will return 0 if non error is set (or if the exported error routines 4090 * do not return an error). 4091 * 4092 * If there is both a read and write error to check we prefer the read error. 4093 * Also, give preference to recorded errno's over the error functions. 4094 * The flags that are handled are: 4095 * STPLEX return EINVAL 4096 * STRDERR return sd_rerror (and clear if STRDERRNONPERSIST) 4097 * STWRERR return sd_werror (and clear if STWRERRNONPERSIST) 4098 * STRHUP return sd_werror 4099 * 4100 * If the caller indicates that the operation is a peek a nonpersistent error 4101 * is not cleared. 4102 */ 4103 int 4104 strgeterr(stdata_t *stp, int32_t flags_to_check, int ispeek) 4105 { 4106 int32_t sd_flag = stp->sd_flag & flags_to_check; 4107 int error = 0; 4108 4109 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4110 ASSERT((flags_to_check & ~(STRDERR|STWRERR|STRHUP|STPLEX)) == 0); 4111 if (sd_flag & STPLEX) 4112 error = EINVAL; 4113 else if (sd_flag & STRDERR) { 4114 error = stp->sd_rerror; 4115 if ((stp->sd_flag & STRDERRNONPERSIST) && !ispeek) { 4116 /* 4117 * Read errors are non-persistent i.e. discarded once 4118 * returned to a non-peeking caller, 4119 */ 4120 stp->sd_rerror = 0; 4121 stp->sd_flag &= ~STRDERR; 4122 } 4123 if (error == 0 && stp->sd_rderrfunc != NULL) { 4124 int clearerr = 0; 4125 4126 error = (*stp->sd_rderrfunc)(stp->sd_vnode, ispeek, 4127 &clearerr); 4128 if (clearerr) { 4129 stp->sd_flag &= ~STRDERR; 4130 stp->sd_rderrfunc = NULL; 4131 } 4132 } 4133 } else if (sd_flag & STWRERR) { 4134 error = stp->sd_werror; 4135 if ((stp->sd_flag & STWRERRNONPERSIST) && !ispeek) { 4136 /* 4137 * Write errors are non-persistent i.e. discarded once 4138 * returned to a non-peeking caller, 4139 */ 4140 stp->sd_werror = 0; 4141 stp->sd_flag &= ~STWRERR; 4142 } 4143 if (error == 0 && stp->sd_wrerrfunc != NULL) { 4144 int clearerr = 0; 4145 4146 error = (*stp->sd_wrerrfunc)(stp->sd_vnode, ispeek, 4147 &clearerr); 4148 if (clearerr) { 4149 stp->sd_flag &= ~STWRERR; 4150 stp->sd_wrerrfunc = NULL; 4151 } 4152 } 4153 } else if (sd_flag & STRHUP) { 4154 /* sd_werror set when STRHUP */ 4155 error = stp->sd_werror; 4156 } 4157 return (error); 4158 } 4159 4160 4161 /* 4162 * single-thread open/close/push/pop 4163 * for twisted streams also 4164 */ 4165 int 4166 strstartplumb(stdata_t *stp, int flag, int cmd) 4167 { 4168 int waited = 1; 4169 int error = 0; 4170 4171 if (STRMATED(stp)) { 4172 struct stdata *stmatep = stp->sd_mate; 4173 4174 STRLOCKMATES(stp); 4175 while (waited) { 4176 waited = 0; 4177 while (stmatep->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4178 if ((cmd == I_POP) && 4179 (flag & (FNDELAY|FNONBLOCK))) { 4180 STRUNLOCKMATES(stp); 4181 return (EAGAIN); 4182 } 4183 waited = 1; 4184 mutex_exit(&stp->sd_lock); 4185 if (!cv_wait_sig(&stmatep->sd_monitor, 4186 &stmatep->sd_lock)) { 4187 mutex_exit(&stmatep->sd_lock); 4188 return (EINTR); 4189 } 4190 mutex_exit(&stmatep->sd_lock); 4191 STRLOCKMATES(stp); 4192 } 4193 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4194 if ((cmd == I_POP) && 4195 (flag & (FNDELAY|FNONBLOCK))) { 4196 STRUNLOCKMATES(stp); 4197 return (EAGAIN); 4198 } 4199 waited = 1; 4200 mutex_exit(&stmatep->sd_lock); 4201 if (!cv_wait_sig(&stp->sd_monitor, 4202 &stp->sd_lock)) { 4203 mutex_exit(&stp->sd_lock); 4204 return (EINTR); 4205 } 4206 mutex_exit(&stp->sd_lock); 4207 STRLOCKMATES(stp); 4208 } 4209 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) { 4210 error = strgeterr(stp, 4211 STRDERR|STWRERR|STRHUP|STPLEX, 0); 4212 if (error != 0) { 4213 STRUNLOCKMATES(stp); 4214 return (error); 4215 } 4216 } 4217 } 4218 stp->sd_flag |= STRPLUMB; 4219 STRUNLOCKMATES(stp); 4220 } else { 4221 mutex_enter(&stp->sd_lock); 4222 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4223 if (((cmd == I_POP) || (cmd == _I_REMOVE)) && 4224 (flag & (FNDELAY|FNONBLOCK))) { 4225 mutex_exit(&stp->sd_lock); 4226 return (EAGAIN); 4227 } 4228 if (!cv_wait_sig(&stp->sd_monitor, &stp->sd_lock)) { 4229 mutex_exit(&stp->sd_lock); 4230 return (EINTR); 4231 } 4232 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) { 4233 error = strgeterr(stp, 4234 STRDERR|STWRERR|STRHUP|STPLEX, 0); 4235 if (error != 0) { 4236 mutex_exit(&stp->sd_lock); 4237 return (error); 4238 } 4239 } 4240 } 4241 stp->sd_flag |= STRPLUMB; 4242 mutex_exit(&stp->sd_lock); 4243 } 4244 return (0); 4245 } 4246 4247 /* 4248 * Complete the plumbing operation associated with stream `stp'. 4249 */ 4250 void 4251 strendplumb(stdata_t *stp) 4252 { 4253 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4254 ASSERT(stp->sd_flag & STRPLUMB); 4255 stp->sd_flag &= ~STRPLUMB; 4256 cv_broadcast(&stp->sd_monitor); 4257 } 4258 4259 /* 4260 * This describes how the STREAMS framework handles synchronization 4261 * during open/push and close/pop. 4262 * The key interfaces for open and close are qprocson and qprocsoff, 4263 * respectively. While the close case in general is harder both open 4264 * have close have significant similarities. 4265 * 4266 * During close the STREAMS framework has to both ensure that there 4267 * are no stale references to the queue pair (and syncq) that 4268 * are being closed and also provide the guarantees that are documented 4269 * in qprocsoff(9F). 4270 * If there are stale references to the queue that is closing it can 4271 * result in kernel memory corruption or kernel panics. 4272 * 4273 * Note that is it up to the module/driver to ensure that it itself 4274 * does not have any stale references to the closing queues once its close 4275 * routine returns. This includes: 4276 * - Cancelling any timeout/bufcall/qtimeout/qbufcall callback routines 4277 * associated with the queues. For timeout and bufcall callbacks the 4278 * module/driver also has to ensure (or wait for) any callbacks that 4279 * are in progress. 4280 * - If the module/driver is using esballoc it has to ensure that any 4281 * esballoc free functions do not refer to a queue that has closed. 4282 * (Note that in general the close routine can not wait for the esballoc'ed 4283 * messages to be freed since that can cause a deadlock.) 4284 * - Cancelling any interrupts that refer to the closing queues and 4285 * also ensuring that there are no interrupts in progress that will 4286 * refer to the closing queues once the close routine returns. 4287 * - For multiplexors removing any driver global state that refers to 4288 * the closing queue and also ensuring that there are no threads in 4289 * the multiplexor that has picked up a queue pointer but not yet 4290 * finished using it. 4291 * 4292 * In addition, a driver/module can only reference the q_next pointer 4293 * in its open, close, put, or service procedures or in a 4294 * qtimeout/qbufcall callback procedure executing "on" the correct 4295 * stream. Thus it can not reference the q_next pointer in an interrupt 4296 * routine or a timeout, bufcall or esballoc callback routine. Likewise 4297 * it can not reference q_next of a different queue e.g. in a mux that 4298 * passes messages from one queues put/service procedure to another queue. 4299 * In all the cases when the driver/module can not access the q_next 4300 * field it must use the *next* versions e.g. canputnext instead of 4301 * canput(q->q_next) and putnextctl instead of putctl(q->q_next, ...). 4302 * 4303 * 4304 * Assuming that the driver/module conforms to the above constraints 4305 * the STREAMS framework has to avoid stale references to q_next for all 4306 * the framework internal cases which include (but are not limited to): 4307 * - Threads in canput/canputnext/backenable and elsewhere that are 4308 * walking q_next. 4309 * - Messages on a syncq that have a reference to the queue through b_queue. 4310 * - Messages on an outer perimeter (syncq) that have a reference to the 4311 * queue through b_queue. 4312 * - Threads that use q_nfsrv (e.g. canput) to find a queue. 4313 * Note that only canput and bcanput use q_nfsrv without any locking. 4314 * 4315 * The STREAMS framework providing the qprocsoff(9F) guarantees means that 4316 * after qprocsoff returns, the framework has to ensure that no threads can 4317 * enter the put or service routines for the closing read or write-side queue. 4318 * In addition to preventing "direct" entry into the put procedures 4319 * the framework also has to prevent messages being drained from 4320 * the syncq or the outer perimeter. 4321 * XXX Note that currently qdetach does relies on D_MTOCEXCL as the only 4322 * mechanism to prevent qwriter(PERIM_OUTER) from running after 4323 * qprocsoff has returned. 4324 * Note that if a module/driver uses put(9F) on one of its own queues 4325 * it is up to the module/driver to ensure that the put() doesn't 4326 * get called when the queue is closing. 4327 * 4328 * 4329 * The framework aspects of the above "contract" is implemented by 4330 * qprocsoff, removeq, and strlock: 4331 * - qprocsoff (disable_svc) sets QWCLOSE to prevent runservice from 4332 * entering the service procedures. 4333 * - strlock acquires the sd_lock and sd_reflock to prevent putnext, 4334 * canputnext, backenable etc from dereferencing the q_next that will 4335 * soon change. 4336 * - strlock waits for sd_refcnt to be zero to wait for e.g. any canputnext 4337 * or other q_next walker that uses claimstr/releasestr to finish. 4338 * - optionally for every syncq in the stream strlock acquires all the 4339 * sq_lock's and waits for all sq_counts to drop to a value that indicates 4340 * that no thread executes in the put or service procedures and that no 4341 * thread is draining into the module/driver. This ensures that no 4342 * open, close, put, service, or qtimeout/qbufcall callback procedure is 4343 * currently executing hence no such thread can end up with the old stale 4344 * q_next value and no canput/backenable can have the old stale 4345 * q_nfsrv/q_next. 4346 * - qdetach (wait_svc) makes sure that any scheduled or running threads 4347 * have either finished or observed the QWCLOSE flag and gone away. 4348 */ 4349 4350 4351 /* 4352 * Get all the locks necessary to change q_next. 4353 * 4354 * Wait for sd_refcnt to reach 0 and, if sqlist is present, wait for the 4355 * sq_count of each syncq in the list to drop to sq_rmqcount, indicating that 4356 * the only threads inside the sqncq are threads currently calling removeq(). 4357 * Since threads calling removeq() are in the process of removing their queues 4358 * from the stream, we do not need to worry about them accessing a stale q_next 4359 * pointer and thus we do not need to wait for them to exit (in fact, waiting 4360 * for them can cause deadlock). 4361 * 4362 * This routine is subject to starvation since it does not set any flag to 4363 * prevent threads from entering a module in the stream(i.e. sq_count can 4364 * increase on some syncq while it is waiting on some other syncq.) 4365 * 4366 * Assumes that only one thread attempts to call strlock for a given 4367 * stream. If this is not the case the two threads would deadlock. 4368 * This assumption is guaranteed since strlock is only called by insertq 4369 * and removeq and streams plumbing changes are single-threaded for 4370 * a given stream using the STWOPEN, STRCLOSE, and STRPLUMB flags. 4371 * 4372 * For pipes, it is not difficult to atomically designate a pair of streams 4373 * to be mated. Once mated atomically by the framework the twisted pair remain 4374 * configured that way until dismantled atomically by the framework. 4375 * When plumbing takes place on a twisted stream it is necessary to ensure that 4376 * this operation is done exclusively on the twisted stream since two such 4377 * operations, each initiated on different ends of the pipe will deadlock 4378 * waiting for each other to complete. 4379 * 4380 * On entry, no locks should be held. 4381 * The locks acquired and held by strlock depends on a few factors. 4382 * - If sqlist is non-NULL all the syncq locks in the sqlist will be acquired 4383 * and held on exit and all sq_count are at an acceptable level. 4384 * - In all cases, sd_lock and sd_reflock are acquired and held on exit with 4385 * sd_refcnt being zero. 4386 */ 4387 4388 static void 4389 strlock(struct stdata *stp, sqlist_t *sqlist) 4390 { 4391 syncql_t *sql, *sql2; 4392 retry: 4393 /* 4394 * Wait for any claimstr to go away. 4395 */ 4396 if (STRMATED(stp)) { 4397 struct stdata *stp1, *stp2; 4398 4399 STRLOCKMATES(stp); 4400 /* 4401 * Note that the selection of locking order is not 4402 * important, just that they are always aquired in 4403 * the same order. To assure this, we choose this 4404 * order based on the value of the pointer, and since 4405 * the pointer will not change for the life of this 4406 * pair, we will always grab the locks in the same 4407 * order (and hence, prevent deadlocks). 4408 */ 4409 if (&(stp->sd_lock) > &((stp->sd_mate)->sd_lock)) { 4410 stp1 = stp; 4411 stp2 = stp->sd_mate; 4412 } else { 4413 stp2 = stp; 4414 stp1 = stp->sd_mate; 4415 } 4416 mutex_enter(&stp1->sd_reflock); 4417 if (stp1->sd_refcnt > 0) { 4418 STRUNLOCKMATES(stp); 4419 cv_wait(&stp1->sd_refmonitor, &stp1->sd_reflock); 4420 mutex_exit(&stp1->sd_reflock); 4421 goto retry; 4422 } 4423 mutex_enter(&stp2->sd_reflock); 4424 if (stp2->sd_refcnt > 0) { 4425 STRUNLOCKMATES(stp); 4426 mutex_exit(&stp1->sd_reflock); 4427 cv_wait(&stp2->sd_refmonitor, &stp2->sd_reflock); 4428 mutex_exit(&stp2->sd_reflock); 4429 goto retry; 4430 } 4431 STREAM_PUTLOCKS_ENTER(stp1); 4432 STREAM_PUTLOCKS_ENTER(stp2); 4433 } else { 4434 mutex_enter(&stp->sd_lock); 4435 mutex_enter(&stp->sd_reflock); 4436 while (stp->sd_refcnt > 0) { 4437 mutex_exit(&stp->sd_lock); 4438 cv_wait(&stp->sd_refmonitor, &stp->sd_reflock); 4439 if (mutex_tryenter(&stp->sd_lock) == 0) { 4440 mutex_exit(&stp->sd_reflock); 4441 mutex_enter(&stp->sd_lock); 4442 mutex_enter(&stp->sd_reflock); 4443 } 4444 } 4445 STREAM_PUTLOCKS_ENTER(stp); 4446 } 4447 4448 if (sqlist == NULL) 4449 return; 4450 4451 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) { 4452 syncq_t *sq = sql->sql_sq; 4453 uint16_t count; 4454 4455 mutex_enter(SQLOCK(sq)); 4456 count = sq->sq_count; 4457 ASSERT(sq->sq_rmqcount <= count); 4458 SQ_PUTLOCKS_ENTER(sq); 4459 SUM_SQ_PUTCOUNTS(sq, count); 4460 if (count == sq->sq_rmqcount) 4461 continue; 4462 4463 /* Failed - drop all locks that we have acquired so far */ 4464 if (STRMATED(stp)) { 4465 STREAM_PUTLOCKS_EXIT(stp); 4466 STREAM_PUTLOCKS_EXIT(stp->sd_mate); 4467 STRUNLOCKMATES(stp); 4468 mutex_exit(&stp->sd_reflock); 4469 mutex_exit(&stp->sd_mate->sd_reflock); 4470 } else { 4471 STREAM_PUTLOCKS_EXIT(stp); 4472 mutex_exit(&stp->sd_lock); 4473 mutex_exit(&stp->sd_reflock); 4474 } 4475 for (sql2 = sqlist->sqlist_head; sql2 != sql; 4476 sql2 = sql2->sql_next) { 4477 SQ_PUTLOCKS_EXIT(sql2->sql_sq); 4478 mutex_exit(SQLOCK(sql2->sql_sq)); 4479 } 4480 4481 /* 4482 * The wait loop below may starve when there are many threads 4483 * claiming the syncq. This is especially a problem with permod 4484 * syncqs (IP). To lessen the impact of the problem we increment 4485 * sq_needexcl and clear fastbits so that putnexts will slow 4486 * down and call sqenable instead of draining right away. 4487 */ 4488 sq->sq_needexcl++; 4489 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 4490 while (count > sq->sq_rmqcount) { 4491 sq->sq_flags |= SQ_WANTWAKEUP; 4492 SQ_PUTLOCKS_EXIT(sq); 4493 cv_wait(&sq->sq_wait, SQLOCK(sq)); 4494 count = sq->sq_count; 4495 SQ_PUTLOCKS_ENTER(sq); 4496 SUM_SQ_PUTCOUNTS(sq, count); 4497 } 4498 sq->sq_needexcl--; 4499 if (sq->sq_needexcl == 0) 4500 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 4501 SQ_PUTLOCKS_EXIT(sq); 4502 ASSERT(count == sq->sq_rmqcount); 4503 mutex_exit(SQLOCK(sq)); 4504 goto retry; 4505 } 4506 } 4507 4508 /* 4509 * Drop all the locks that strlock acquired. 4510 */ 4511 static void 4512 strunlock(struct stdata *stp, sqlist_t *sqlist) 4513 { 4514 syncql_t *sql; 4515 4516 if (STRMATED(stp)) { 4517 STREAM_PUTLOCKS_EXIT(stp); 4518 STREAM_PUTLOCKS_EXIT(stp->sd_mate); 4519 STRUNLOCKMATES(stp); 4520 mutex_exit(&stp->sd_reflock); 4521 mutex_exit(&stp->sd_mate->sd_reflock); 4522 } else { 4523 STREAM_PUTLOCKS_EXIT(stp); 4524 mutex_exit(&stp->sd_lock); 4525 mutex_exit(&stp->sd_reflock); 4526 } 4527 4528 if (sqlist == NULL) 4529 return; 4530 4531 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) { 4532 SQ_PUTLOCKS_EXIT(sql->sql_sq); 4533 mutex_exit(SQLOCK(sql->sql_sq)); 4534 } 4535 } 4536 4537 4538 /* 4539 * Given two read queues, insert a new single one after another. 4540 * 4541 * This routine acquires all the necessary locks in order to change 4542 * q_next and related pointer using strlock(). 4543 * It depends on the stream head ensuring that there are no concurrent 4544 * insertq or removeq on the same stream. The stream head ensures this 4545 * using the flags STWOPEN, STRCLOSE, and STRPLUMB. 4546 * 4547 * Note that no syncq locks are held during the q_next change. This is 4548 * applied to all streams since, unlike removeq, there is no problem of stale 4549 * pointers when adding a module to the stream. Thus drivers/modules that do a 4550 * canput(rq->q_next) would never get a closed/freed queue pointer even if we 4551 * applied this optimization to all streams. 4552 */ 4553 void 4554 insertq(struct stdata *stp, queue_t *new) 4555 { 4556 queue_t *after; 4557 queue_t *wafter; 4558 queue_t *wnew = _WR(new); 4559 boolean_t have_fifo = B_FALSE; 4560 4561 if (new->q_flag & _QINSERTING) { 4562 ASSERT(stp->sd_vnode->v_type != VFIFO); 4563 after = new->q_next; 4564 wafter = _WR(new->q_next); 4565 } else { 4566 after = _RD(stp->sd_wrq); 4567 wafter = stp->sd_wrq; 4568 } 4569 4570 TRACE_2(TR_FAC_STREAMS_FR, TR_INSERTQ, 4571 "insertq:%p, %p", after, new); 4572 ASSERT(after->q_flag & QREADR); 4573 ASSERT(new->q_flag & QREADR); 4574 4575 strlock(stp, NULL); 4576 4577 /* Do we have a FIFO? */ 4578 if (wafter->q_next == after) { 4579 have_fifo = B_TRUE; 4580 wnew->q_next = new; 4581 } else { 4582 wnew->q_next = wafter->q_next; 4583 } 4584 new->q_next = after; 4585 4586 set_nfsrv_ptr(new, wnew, after, wafter); 4587 /* 4588 * set_nfsrv_ptr() needs to know if this is an insertion or not, 4589 * so only reset this flag after calling it. 4590 */ 4591 new->q_flag &= ~_QINSERTING; 4592 4593 if (have_fifo) { 4594 wafter->q_next = wnew; 4595 } else { 4596 if (wafter->q_next) 4597 _OTHERQ(wafter->q_next)->q_next = new; 4598 wafter->q_next = wnew; 4599 } 4600 4601 set_qend(new); 4602 /* The QEND flag might have to be updated for the upstream guy */ 4603 set_qend(after); 4604 4605 ASSERT(_SAMESTR(new) == O_SAMESTR(new)); 4606 ASSERT(_SAMESTR(wnew) == O_SAMESTR(wnew)); 4607 ASSERT(_SAMESTR(after) == O_SAMESTR(after)); 4608 ASSERT(_SAMESTR(wafter) == O_SAMESTR(wafter)); 4609 strsetuio(stp); 4610 4611 /* 4612 * If this was a module insertion, bump the push count. 4613 */ 4614 if (!(new->q_flag & QISDRV)) 4615 stp->sd_pushcnt++; 4616 4617 strunlock(stp, NULL); 4618 } 4619 4620 /* 4621 * Given a read queue, unlink it from any neighbors. 4622 * 4623 * This routine acquires all the necessary locks in order to 4624 * change q_next and related pointers and also guard against 4625 * stale references (e.g. through q_next) to the queue that 4626 * is being removed. It also plays part of the role in ensuring 4627 * that the module's/driver's put procedure doesn't get called 4628 * after qprocsoff returns. 4629 * 4630 * Removeq depends on the stream head ensuring that there are 4631 * no concurrent insertq or removeq on the same stream. The 4632 * stream head ensures this using the flags STWOPEN, STRCLOSE and 4633 * STRPLUMB. 4634 * 4635 * The set of locks needed to remove the queue is different in 4636 * different cases: 4637 * 4638 * Acquire sd_lock, sd_reflock, and all the syncq locks in the stream after 4639 * waiting for the syncq reference count to drop to 0 indicating that no 4640 * non-close threads are present anywhere in the stream. This ensures that any 4641 * module/driver can reference q_next in its open, close, put, or service 4642 * procedures. 4643 * 4644 * The sq_rmqcount counter tracks the number of threads inside removeq(). 4645 * strlock() ensures that there is either no threads executing inside perimeter 4646 * or there is only a thread calling qprocsoff(). 4647 * 4648 * strlock() compares the value of sq_count with the number of threads inside 4649 * removeq() and waits until sq_count is equal to sq_rmqcount. We need to wakeup 4650 * any threads waiting in strlock() when the sq_rmqcount increases. 4651 */ 4652 4653 void 4654 removeq(queue_t *qp) 4655 { 4656 queue_t *wqp = _WR(qp); 4657 struct stdata *stp = STREAM(qp); 4658 sqlist_t *sqlist = NULL; 4659 boolean_t isdriver; 4660 int moved; 4661 syncq_t *sq = qp->q_syncq; 4662 syncq_t *wsq = wqp->q_syncq; 4663 4664 ASSERT(stp); 4665 4666 TRACE_2(TR_FAC_STREAMS_FR, TR_REMOVEQ, 4667 "removeq:%p %p", qp, wqp); 4668 ASSERT(qp->q_flag&QREADR); 4669 4670 /* 4671 * For queues using Synchronous streams, we must wait for all threads in 4672 * rwnext() to drain out before proceeding. 4673 */ 4674 if (qp->q_flag & QSYNCSTR) { 4675 /* First, we need wakeup any threads blocked in rwnext() */ 4676 mutex_enter(SQLOCK(sq)); 4677 if (sq->sq_flags & SQ_WANTWAKEUP) { 4678 sq->sq_flags &= ~SQ_WANTWAKEUP; 4679 cv_broadcast(&sq->sq_wait); 4680 } 4681 mutex_exit(SQLOCK(sq)); 4682 4683 if (wsq != sq) { 4684 mutex_enter(SQLOCK(wsq)); 4685 if (wsq->sq_flags & SQ_WANTWAKEUP) { 4686 wsq->sq_flags &= ~SQ_WANTWAKEUP; 4687 cv_broadcast(&wsq->sq_wait); 4688 } 4689 mutex_exit(SQLOCK(wsq)); 4690 } 4691 4692 mutex_enter(QLOCK(qp)); 4693 while (qp->q_rwcnt > 0) { 4694 qp->q_flag |= QWANTRMQSYNC; 4695 cv_wait(&qp->q_wait, QLOCK(qp)); 4696 } 4697 mutex_exit(QLOCK(qp)); 4698 4699 mutex_enter(QLOCK(wqp)); 4700 while (wqp->q_rwcnt > 0) { 4701 wqp->q_flag |= QWANTRMQSYNC; 4702 cv_wait(&wqp->q_wait, QLOCK(wqp)); 4703 } 4704 mutex_exit(QLOCK(wqp)); 4705 } 4706 4707 mutex_enter(SQLOCK(sq)); 4708 sq->sq_rmqcount++; 4709 if (sq->sq_flags & SQ_WANTWAKEUP) { 4710 sq->sq_flags &= ~SQ_WANTWAKEUP; 4711 cv_broadcast(&sq->sq_wait); 4712 } 4713 mutex_exit(SQLOCK(sq)); 4714 4715 isdriver = (qp->q_flag & QISDRV); 4716 4717 sqlist = sqlist_build(qp, stp, STRMATED(stp)); 4718 strlock(stp, sqlist); 4719 4720 reset_nfsrv_ptr(qp, wqp); 4721 4722 ASSERT(wqp->q_next == NULL || backq(qp)->q_next == qp); 4723 ASSERT(qp->q_next == NULL || backq(wqp)->q_next == wqp); 4724 /* Do we have a FIFO? */ 4725 if (wqp->q_next == qp) { 4726 stp->sd_wrq->q_next = _RD(stp->sd_wrq); 4727 } else { 4728 if (wqp->q_next) 4729 backq(qp)->q_next = qp->q_next; 4730 if (qp->q_next) 4731 backq(wqp)->q_next = wqp->q_next; 4732 } 4733 4734 /* The QEND flag might have to be updated for the upstream guy */ 4735 if (qp->q_next) 4736 set_qend(qp->q_next); 4737 4738 ASSERT(_SAMESTR(stp->sd_wrq) == O_SAMESTR(stp->sd_wrq)); 4739 ASSERT(_SAMESTR(_RD(stp->sd_wrq)) == O_SAMESTR(_RD(stp->sd_wrq))); 4740 4741 /* 4742 * Move any messages destined for the put procedures to the next 4743 * syncq in line. Otherwise free them. 4744 */ 4745 moved = 0; 4746 /* 4747 * Quick check to see whether there are any messages or events. 4748 */ 4749 if (qp->q_syncqmsgs != 0 || (qp->q_syncq->sq_flags & SQ_EVENTS)) 4750 moved += propagate_syncq(qp); 4751 if (wqp->q_syncqmsgs != 0 || 4752 (wqp->q_syncq->sq_flags & SQ_EVENTS)) 4753 moved += propagate_syncq(wqp); 4754 4755 strsetuio(stp); 4756 4757 /* 4758 * If this was a module removal, decrement the push count. 4759 */ 4760 if (!isdriver) 4761 stp->sd_pushcnt--; 4762 4763 strunlock(stp, sqlist); 4764 sqlist_free(sqlist); 4765 4766 /* 4767 * Make sure any messages that were propagated are drained. 4768 * Also clear any QFULL bit caused by messages that were propagated. 4769 */ 4770 4771 if (qp->q_next != NULL) { 4772 clr_qfull(qp); 4773 /* 4774 * For the driver calling qprocsoff, propagate_syncq 4775 * frees all the messages instead of putting it in 4776 * the stream head 4777 */ 4778 if (!isdriver && (moved > 0)) 4779 emptysq(qp->q_next->q_syncq); 4780 } 4781 if (wqp->q_next != NULL) { 4782 clr_qfull(wqp); 4783 /* 4784 * We come here for any pop of a module except for the 4785 * case of driver being removed. We don't call emptysq 4786 * if we did not move any messages. This will avoid holding 4787 * PERMOD syncq locks in emptysq 4788 */ 4789 if (moved > 0) 4790 emptysq(wqp->q_next->q_syncq); 4791 } 4792 4793 mutex_enter(SQLOCK(sq)); 4794 sq->sq_rmqcount--; 4795 mutex_exit(SQLOCK(sq)); 4796 } 4797 4798 /* 4799 * Prevent further entry by setting a flag (like SQ_FROZEN, SQ_BLOCKED or 4800 * SQ_WRITER) on a syncq. 4801 * If maxcnt is not -1 it assumes that caller has "maxcnt" claim(s) on the 4802 * sync queue and waits until sq_count reaches maxcnt. 4803 * 4804 * if maxcnt is -1 there's no need to grab sq_putlocks since the caller 4805 * does not care about putnext threads that are in the middle of calling put 4806 * entry points. 4807 * 4808 * This routine is used for both inner and outer syncqs. 4809 */ 4810 static void 4811 blocksq(syncq_t *sq, ushort_t flag, int maxcnt) 4812 { 4813 uint16_t count = 0; 4814 4815 mutex_enter(SQLOCK(sq)); 4816 /* 4817 * Wait for SQ_FROZEN/SQ_BLOCKED to be reset. 4818 * SQ_FROZEN will be set if there is a frozen stream that has a 4819 * queue which also refers to this "shared" syncq. 4820 * SQ_BLOCKED will be set if there is "off" queue which also 4821 * refers to this "shared" syncq. 4822 */ 4823 if (maxcnt != -1) { 4824 count = sq->sq_count; 4825 SQ_PUTLOCKS_ENTER(sq); 4826 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 4827 SUM_SQ_PUTCOUNTS(sq, count); 4828 } 4829 sq->sq_needexcl++; 4830 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 4831 4832 while ((sq->sq_flags & flag) || 4833 (maxcnt != -1 && count > (unsigned)maxcnt)) { 4834 sq->sq_flags |= SQ_WANTWAKEUP; 4835 if (maxcnt != -1) { 4836 SQ_PUTLOCKS_EXIT(sq); 4837 } 4838 cv_wait(&sq->sq_wait, SQLOCK(sq)); 4839 if (maxcnt != -1) { 4840 count = sq->sq_count; 4841 SQ_PUTLOCKS_ENTER(sq); 4842 SUM_SQ_PUTCOUNTS(sq, count); 4843 } 4844 } 4845 sq->sq_needexcl--; 4846 sq->sq_flags |= flag; 4847 ASSERT(maxcnt == -1 || count == maxcnt); 4848 if (maxcnt != -1) { 4849 if (sq->sq_needexcl == 0) { 4850 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 4851 } 4852 SQ_PUTLOCKS_EXIT(sq); 4853 } else if (sq->sq_needexcl == 0) { 4854 SQ_PUTCOUNT_SETFAST(sq); 4855 } 4856 4857 mutex_exit(SQLOCK(sq)); 4858 } 4859 4860 /* 4861 * Reset a flag that was set with blocksq. 4862 * 4863 * Can not use this routine to reset SQ_WRITER. 4864 * 4865 * If "isouter" is set then the syncq is assumed to be an outer perimeter 4866 * and drain_syncq is not called. Instead we rely on the qwriter_outer thread 4867 * to handle the queued qwriter operations. 4868 * 4869 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 4870 * sq_putlocks are used. 4871 */ 4872 static void 4873 unblocksq(syncq_t *sq, uint16_t resetflag, int isouter) 4874 { 4875 uint16_t flags; 4876 4877 mutex_enter(SQLOCK(sq)); 4878 ASSERT(resetflag != SQ_WRITER); 4879 ASSERT(sq->sq_flags & resetflag); 4880 flags = sq->sq_flags & ~resetflag; 4881 sq->sq_flags = flags; 4882 if (flags & (SQ_QUEUED | SQ_WANTWAKEUP)) { 4883 if (flags & SQ_WANTWAKEUP) { 4884 flags &= ~SQ_WANTWAKEUP; 4885 cv_broadcast(&sq->sq_wait); 4886 } 4887 sq->sq_flags = flags; 4888 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 4889 if (!isouter) { 4890 /* drain_syncq drops SQLOCK */ 4891 drain_syncq(sq); 4892 return; 4893 } 4894 } 4895 } 4896 mutex_exit(SQLOCK(sq)); 4897 } 4898 4899 /* 4900 * Reset a flag that was set with blocksq. 4901 * Does not drain the syncq. Use emptysq() for that. 4902 * Returns 1 if SQ_QUEUED is set. Otherwise 0. 4903 * 4904 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 4905 * sq_putlocks are used. 4906 */ 4907 static int 4908 dropsq(syncq_t *sq, uint16_t resetflag) 4909 { 4910 uint16_t flags; 4911 4912 mutex_enter(SQLOCK(sq)); 4913 ASSERT(sq->sq_flags & resetflag); 4914 flags = sq->sq_flags & ~resetflag; 4915 if (flags & SQ_WANTWAKEUP) { 4916 flags &= ~SQ_WANTWAKEUP; 4917 cv_broadcast(&sq->sq_wait); 4918 } 4919 sq->sq_flags = flags; 4920 mutex_exit(SQLOCK(sq)); 4921 if (flags & SQ_QUEUED) 4922 return (1); 4923 return (0); 4924 } 4925 4926 /* 4927 * Empty all the messages on a syncq. 4928 * 4929 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 4930 * sq_putlocks are used. 4931 */ 4932 static void 4933 emptysq(syncq_t *sq) 4934 { 4935 uint16_t flags; 4936 4937 mutex_enter(SQLOCK(sq)); 4938 flags = sq->sq_flags; 4939 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 4940 /* 4941 * To prevent potential recursive invocation of drain_syncq we 4942 * do not call drain_syncq if count is non-zero. 4943 */ 4944 if (sq->sq_count == 0) { 4945 /* drain_syncq() drops SQLOCK */ 4946 drain_syncq(sq); 4947 return; 4948 } else 4949 sqenable(sq); 4950 } 4951 mutex_exit(SQLOCK(sq)); 4952 } 4953 4954 /* 4955 * Ordered insert while removing duplicates. 4956 */ 4957 static void 4958 sqlist_insert(sqlist_t *sqlist, syncq_t *sqp) 4959 { 4960 syncql_t *sqlp, **prev_sqlpp, *new_sqlp; 4961 4962 prev_sqlpp = &sqlist->sqlist_head; 4963 while ((sqlp = *prev_sqlpp) != NULL) { 4964 if (sqlp->sql_sq >= sqp) { 4965 if (sqlp->sql_sq == sqp) /* duplicate */ 4966 return; 4967 break; 4968 } 4969 prev_sqlpp = &sqlp->sql_next; 4970 } 4971 new_sqlp = &sqlist->sqlist_array[sqlist->sqlist_index++]; 4972 ASSERT((char *)new_sqlp < (char *)sqlist + sqlist->sqlist_size); 4973 new_sqlp->sql_next = sqlp; 4974 new_sqlp->sql_sq = sqp; 4975 *prev_sqlpp = new_sqlp; 4976 } 4977 4978 /* 4979 * Walk the write side queues until we hit either the driver 4980 * or a twist in the stream (_SAMESTR will return false in both 4981 * these cases) then turn around and walk the read side queues 4982 * back up to the stream head. 4983 */ 4984 static void 4985 sqlist_insertall(sqlist_t *sqlist, queue_t *q) 4986 { 4987 while (q != NULL) { 4988 sqlist_insert(sqlist, q->q_syncq); 4989 4990 if (_SAMESTR(q)) 4991 q = q->q_next; 4992 else if (!(q->q_flag & QREADR)) 4993 q = _RD(q); 4994 else 4995 q = NULL; 4996 } 4997 } 4998 4999 /* 5000 * Allocate and build a list of all syncqs in a stream and the syncq(s) 5001 * associated with the "q" parameter. The resulting list is sorted in a 5002 * canonical order and is free of duplicates. 5003 * Assumes the passed queue is a _RD(q). 5004 */ 5005 static sqlist_t * 5006 sqlist_build(queue_t *q, struct stdata *stp, boolean_t do_twist) 5007 { 5008 sqlist_t *sqlist = sqlist_alloc(stp, KM_SLEEP); 5009 5010 /* 5011 * start with the current queue/qpair 5012 */ 5013 ASSERT(q->q_flag & QREADR); 5014 5015 sqlist_insert(sqlist, q->q_syncq); 5016 sqlist_insert(sqlist, _WR(q)->q_syncq); 5017 5018 sqlist_insertall(sqlist, stp->sd_wrq); 5019 if (do_twist) 5020 sqlist_insertall(sqlist, stp->sd_mate->sd_wrq); 5021 5022 return (sqlist); 5023 } 5024 5025 static sqlist_t * 5026 sqlist_alloc(struct stdata *stp, int kmflag) 5027 { 5028 size_t sqlist_size; 5029 sqlist_t *sqlist; 5030 5031 /* 5032 * Allocate 2 syncql_t's for each pushed module. Note that 5033 * the sqlist_t structure already has 4 syncql_t's built in: 5034 * 2 for the stream head, and 2 for the driver/other stream head. 5035 */ 5036 sqlist_size = 2 * sizeof (syncql_t) * stp->sd_pushcnt + 5037 sizeof (sqlist_t); 5038 if (STRMATED(stp)) 5039 sqlist_size += 2 * sizeof (syncql_t) * stp->sd_mate->sd_pushcnt; 5040 sqlist = kmem_alloc(sqlist_size, kmflag); 5041 5042 sqlist->sqlist_head = NULL; 5043 sqlist->sqlist_size = sqlist_size; 5044 sqlist->sqlist_index = 0; 5045 5046 return (sqlist); 5047 } 5048 5049 /* 5050 * Free the list created by sqlist_alloc() 5051 */ 5052 static void 5053 sqlist_free(sqlist_t *sqlist) 5054 { 5055 kmem_free(sqlist, sqlist->sqlist_size); 5056 } 5057 5058 /* 5059 * Prevent any new entries into any syncq in this stream. 5060 * Used by freezestr. 5061 */ 5062 void 5063 strblock(queue_t *q) 5064 { 5065 struct stdata *stp; 5066 syncql_t *sql; 5067 sqlist_t *sqlist; 5068 5069 q = _RD(q); 5070 5071 stp = STREAM(q); 5072 ASSERT(stp != NULL); 5073 5074 /* 5075 * Get a sorted list with all the duplicates removed containing 5076 * all the syncqs referenced by this stream. 5077 */ 5078 sqlist = sqlist_build(q, stp, B_FALSE); 5079 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next) 5080 blocksq(sql->sql_sq, SQ_FROZEN, -1); 5081 sqlist_free(sqlist); 5082 } 5083 5084 /* 5085 * Release the block on new entries into this stream 5086 */ 5087 void 5088 strunblock(queue_t *q) 5089 { 5090 struct stdata *stp; 5091 syncql_t *sql; 5092 sqlist_t *sqlist; 5093 int drain_needed; 5094 5095 q = _RD(q); 5096 5097 /* 5098 * Get a sorted list with all the duplicates removed containing 5099 * all the syncqs referenced by this stream. 5100 * Have to drop the SQ_FROZEN flag on all the syncqs before 5101 * starting to drain them; otherwise the draining might 5102 * cause a freezestr in some module on the stream (which 5103 * would deadlock.) 5104 */ 5105 stp = STREAM(q); 5106 ASSERT(stp != NULL); 5107 sqlist = sqlist_build(q, stp, B_FALSE); 5108 drain_needed = 0; 5109 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next) 5110 drain_needed += dropsq(sql->sql_sq, SQ_FROZEN); 5111 if (drain_needed) { 5112 for (sql = sqlist->sqlist_head; sql != NULL; 5113 sql = sql->sql_next) 5114 emptysq(sql->sql_sq); 5115 } 5116 sqlist_free(sqlist); 5117 } 5118 5119 #ifdef DEBUG 5120 static int 5121 qprocsareon(queue_t *rq) 5122 { 5123 if (rq->q_next == NULL) 5124 return (0); 5125 return (_WR(rq->q_next)->q_next == _WR(rq)); 5126 } 5127 5128 int 5129 qclaimed(queue_t *q) 5130 { 5131 uint_t count; 5132 5133 count = q->q_syncq->sq_count; 5134 SUM_SQ_PUTCOUNTS(q->q_syncq, count); 5135 return (count != 0); 5136 } 5137 5138 /* 5139 * Check if anyone has frozen this stream with freezestr 5140 */ 5141 int 5142 frozenstr(queue_t *q) 5143 { 5144 return ((q->q_syncq->sq_flags & SQ_FROZEN) != 0); 5145 } 5146 #endif /* DEBUG */ 5147 5148 /* 5149 * Enter a queue. 5150 * Obsoleted interface. Should not be used. 5151 */ 5152 void 5153 enterq(queue_t *q) 5154 { 5155 entersq(q->q_syncq, SQ_CALLBACK); 5156 } 5157 5158 void 5159 leaveq(queue_t *q) 5160 { 5161 leavesq(q->q_syncq, SQ_CALLBACK); 5162 } 5163 5164 /* 5165 * Enter a perimeter. c_inner and c_outer specifies which concurrency bits 5166 * to check. 5167 * Wait if SQ_QUEUED is set to preserve ordering between messages and qwriter 5168 * calls and the running of open, close and service procedures. 5169 * 5170 * if c_inner bit is set no need to grab sq_putlocks since we don't care 5171 * if other threads have entered or are entering put entry point. 5172 * 5173 * if c_inner bit is set it might have been posible to use 5174 * sq_putlocks/sq_putcounts instead of SQLOCK/sq_count (e.g. to optimize 5175 * open/close path for IP) but since the count may need to be decremented in 5176 * qwait() we wouldn't know which counter to decrement. Currently counter is 5177 * selected by current cpu_seqid and current CPU can change at any moment. XXX 5178 * in the future we might use curthread id bits to select the counter and this 5179 * would stay constant across routine calls. 5180 */ 5181 void 5182 entersq(syncq_t *sq, int entrypoint) 5183 { 5184 uint16_t count = 0; 5185 uint16_t flags; 5186 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL; 5187 uint16_t type; 5188 uint_t c_inner = entrypoint & SQ_CI; 5189 uint_t c_outer = entrypoint & SQ_CO; 5190 5191 /* 5192 * Increment ref count to keep closes out of this queue. 5193 */ 5194 ASSERT(sq); 5195 ASSERT(c_inner && c_outer); 5196 mutex_enter(SQLOCK(sq)); 5197 flags = sq->sq_flags; 5198 type = sq->sq_type; 5199 if (!(type & c_inner)) { 5200 /* Make sure all putcounts now use slowlock. */ 5201 count = sq->sq_count; 5202 SQ_PUTLOCKS_ENTER(sq); 5203 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 5204 SUM_SQ_PUTCOUNTS(sq, count); 5205 sq->sq_needexcl++; 5206 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 5207 waitflags |= SQ_MESSAGES; 5208 } 5209 /* 5210 * Wait until we can enter the inner perimeter. 5211 * If we want exclusive access we wait until sq_count is 0. 5212 * We have to do this before entering the outer perimeter in order 5213 * to preserve put/close message ordering. 5214 */ 5215 while ((flags & waitflags) || (!(type & c_inner) && count != 0)) { 5216 sq->sq_flags = flags | SQ_WANTWAKEUP; 5217 if (!(type & c_inner)) { 5218 SQ_PUTLOCKS_EXIT(sq); 5219 } 5220 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5221 if (!(type & c_inner)) { 5222 count = sq->sq_count; 5223 SQ_PUTLOCKS_ENTER(sq); 5224 SUM_SQ_PUTCOUNTS(sq, count); 5225 } 5226 flags = sq->sq_flags; 5227 } 5228 5229 if (!(type & c_inner)) { 5230 ASSERT(sq->sq_needexcl > 0); 5231 sq->sq_needexcl--; 5232 if (sq->sq_needexcl == 0) { 5233 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 5234 } 5235 } 5236 5237 /* Check if we need to enter the outer perimeter */ 5238 if (!(type & c_outer)) { 5239 /* 5240 * We have to enter the outer perimeter exclusively before 5241 * we can increment sq_count to avoid deadlock. This implies 5242 * that we have to re-check sq_flags and sq_count. 5243 * 5244 * is it possible to have c_inner set when c_outer is not set? 5245 */ 5246 if (!(type & c_inner)) { 5247 SQ_PUTLOCKS_EXIT(sq); 5248 } 5249 mutex_exit(SQLOCK(sq)); 5250 outer_enter(sq->sq_outer, SQ_GOAWAY); 5251 mutex_enter(SQLOCK(sq)); 5252 flags = sq->sq_flags; 5253 /* 5254 * there should be no need to recheck sq_putcounts 5255 * because outer_enter() has already waited for them to clear 5256 * after setting SQ_WRITER. 5257 */ 5258 count = sq->sq_count; 5259 #ifdef DEBUG 5260 /* 5261 * SUMCHECK_SQ_PUTCOUNTS should return the sum instead 5262 * of doing an ASSERT internally. Others should do 5263 * something like 5264 * ASSERT(SUMCHECK_SQ_PUTCOUNTS(sq) == 0); 5265 * without the need to #ifdef DEBUG it. 5266 */ 5267 SUMCHECK_SQ_PUTCOUNTS(sq, 0); 5268 #endif 5269 while ((flags & (SQ_EXCL|SQ_BLOCKED|SQ_FROZEN)) || 5270 (!(type & c_inner) && count != 0)) { 5271 sq->sq_flags = flags | SQ_WANTWAKEUP; 5272 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5273 count = sq->sq_count; 5274 flags = sq->sq_flags; 5275 } 5276 } 5277 5278 sq->sq_count++; 5279 ASSERT(sq->sq_count != 0); /* Wraparound */ 5280 if (!(type & c_inner)) { 5281 /* Exclusive entry */ 5282 ASSERT(sq->sq_count == 1); 5283 sq->sq_flags |= SQ_EXCL; 5284 if (type & c_outer) { 5285 SQ_PUTLOCKS_EXIT(sq); 5286 } 5287 } 5288 mutex_exit(SQLOCK(sq)); 5289 } 5290 5291 /* 5292 * leave a syncq. announce to framework that closes may proceed. 5293 * c_inner and c_outer specifies which concurrency bits 5294 * to check. 5295 * 5296 * must never be called from driver or module put entry point. 5297 * 5298 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 5299 * sq_putlocks are used. 5300 */ 5301 void 5302 leavesq(syncq_t *sq, int entrypoint) 5303 { 5304 uint16_t flags; 5305 uint16_t type; 5306 uint_t c_outer = entrypoint & SQ_CO; 5307 #ifdef DEBUG 5308 uint_t c_inner = entrypoint & SQ_CI; 5309 #endif 5310 5311 /* 5312 * decrement ref count, drain the syncq if possible, and wake up 5313 * any waiting close. 5314 */ 5315 ASSERT(sq); 5316 ASSERT(c_inner && c_outer); 5317 mutex_enter(SQLOCK(sq)); 5318 flags = sq->sq_flags; 5319 type = sq->sq_type; 5320 if (flags & (SQ_QUEUED|SQ_WANTWAKEUP|SQ_WANTEXWAKEUP)) { 5321 5322 if (flags & SQ_WANTWAKEUP) { 5323 flags &= ~SQ_WANTWAKEUP; 5324 cv_broadcast(&sq->sq_wait); 5325 } 5326 if (flags & SQ_WANTEXWAKEUP) { 5327 flags &= ~SQ_WANTEXWAKEUP; 5328 cv_broadcast(&sq->sq_exitwait); 5329 } 5330 5331 if ((flags & SQ_QUEUED) && !(flags & SQ_STAYAWAY)) { 5332 /* 5333 * The syncq needs to be drained. "Exit" the syncq 5334 * before calling drain_syncq. 5335 */ 5336 ASSERT(sq->sq_count != 0); 5337 sq->sq_count--; 5338 ASSERT((flags & SQ_EXCL) || (type & c_inner)); 5339 sq->sq_flags = flags & ~SQ_EXCL; 5340 drain_syncq(sq); 5341 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 5342 /* Check if we need to exit the outer perimeter */ 5343 /* XXX will this ever be true? */ 5344 if (!(type & c_outer)) 5345 outer_exit(sq->sq_outer); 5346 return; 5347 } 5348 } 5349 ASSERT(sq->sq_count != 0); 5350 sq->sq_count--; 5351 ASSERT((flags & SQ_EXCL) || (type & c_inner)); 5352 sq->sq_flags = flags & ~SQ_EXCL; 5353 mutex_exit(SQLOCK(sq)); 5354 5355 /* Check if we need to exit the outer perimeter */ 5356 if (!(sq->sq_type & c_outer)) 5357 outer_exit(sq->sq_outer); 5358 } 5359 5360 /* 5361 * Prevent q_next from changing in this stream by incrementing sq_count. 5362 * 5363 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 5364 * sq_putlocks are used. 5365 */ 5366 void 5367 claimq(queue_t *qp) 5368 { 5369 syncq_t *sq = qp->q_syncq; 5370 5371 mutex_enter(SQLOCK(sq)); 5372 sq->sq_count++; 5373 ASSERT(sq->sq_count != 0); /* Wraparound */ 5374 mutex_exit(SQLOCK(sq)); 5375 } 5376 5377 /* 5378 * Undo claimq. 5379 * 5380 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 5381 * sq_putlocks are used. 5382 */ 5383 void 5384 releaseq(queue_t *qp) 5385 { 5386 syncq_t *sq = qp->q_syncq; 5387 uint16_t flags; 5388 5389 mutex_enter(SQLOCK(sq)); 5390 ASSERT(sq->sq_count > 0); 5391 sq->sq_count--; 5392 5393 flags = sq->sq_flags; 5394 if (flags & (SQ_WANTWAKEUP|SQ_QUEUED)) { 5395 if (flags & SQ_WANTWAKEUP) { 5396 flags &= ~SQ_WANTWAKEUP; 5397 cv_broadcast(&sq->sq_wait); 5398 } 5399 sq->sq_flags = flags; 5400 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 5401 /* 5402 * To prevent potential recursive invocation of 5403 * drain_syncq we do not call drain_syncq if count is 5404 * non-zero. 5405 */ 5406 if (sq->sq_count == 0) { 5407 drain_syncq(sq); 5408 return; 5409 } else 5410 sqenable(sq); 5411 } 5412 } 5413 mutex_exit(SQLOCK(sq)); 5414 } 5415 5416 /* 5417 * Prevent q_next from changing in this stream by incrementing sd_refcnt. 5418 */ 5419 void 5420 claimstr(queue_t *qp) 5421 { 5422 struct stdata *stp = STREAM(qp); 5423 5424 mutex_enter(&stp->sd_reflock); 5425 stp->sd_refcnt++; 5426 ASSERT(stp->sd_refcnt != 0); /* Wraparound */ 5427 mutex_exit(&stp->sd_reflock); 5428 } 5429 5430 /* 5431 * Undo claimstr. 5432 */ 5433 void 5434 releasestr(queue_t *qp) 5435 { 5436 struct stdata *stp = STREAM(qp); 5437 5438 mutex_enter(&stp->sd_reflock); 5439 ASSERT(stp->sd_refcnt != 0); 5440 if (--stp->sd_refcnt == 0) 5441 cv_broadcast(&stp->sd_refmonitor); 5442 mutex_exit(&stp->sd_reflock); 5443 } 5444 5445 static syncq_t * 5446 new_syncq(void) 5447 { 5448 return (kmem_cache_alloc(syncq_cache, KM_SLEEP)); 5449 } 5450 5451 static void 5452 free_syncq(syncq_t *sq) 5453 { 5454 ASSERT(sq->sq_head == NULL); 5455 ASSERT(sq->sq_outer == NULL); 5456 ASSERT(sq->sq_callbpend == NULL); 5457 ASSERT((sq->sq_onext == NULL && sq->sq_oprev == NULL) || 5458 (sq->sq_onext == sq && sq->sq_oprev == sq)); 5459 5460 if (sq->sq_ciputctrl != NULL) { 5461 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1); 5462 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl, 5463 sq->sq_nciputctrl, 0); 5464 ASSERT(ciputctrl_cache != NULL); 5465 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl); 5466 } 5467 5468 sq->sq_tail = NULL; 5469 sq->sq_evhead = NULL; 5470 sq->sq_evtail = NULL; 5471 sq->sq_ciputctrl = NULL; 5472 sq->sq_nciputctrl = 0; 5473 sq->sq_count = 0; 5474 sq->sq_rmqcount = 0; 5475 sq->sq_callbflags = 0; 5476 sq->sq_cancelid = 0; 5477 sq->sq_next = NULL; 5478 sq->sq_needexcl = 0; 5479 sq->sq_svcflags = 0; 5480 sq->sq_nqueues = 0; 5481 sq->sq_pri = 0; 5482 sq->sq_onext = NULL; 5483 sq->sq_oprev = NULL; 5484 sq->sq_flags = 0; 5485 sq->sq_type = 0; 5486 sq->sq_servcount = 0; 5487 5488 kmem_cache_free(syncq_cache, sq); 5489 } 5490 5491 /* Outer perimeter code */ 5492 5493 /* 5494 * The outer syncq uses the fields and flags in the syncq slightly 5495 * differently from the inner syncqs. 5496 * sq_count Incremented when there are pending or running 5497 * writers at the outer perimeter to prevent the set of 5498 * inner syncqs that belong to the outer perimeter from 5499 * changing. 5500 * sq_head/tail List of deferred qwriter(OUTER) operations. 5501 * 5502 * SQ_BLOCKED Set to prevent traversing of sq_next,sq_prev while 5503 * inner syncqs are added to or removed from the 5504 * outer perimeter. 5505 * SQ_QUEUED sq_head/tail has messages or eventsqueued. 5506 * 5507 * SQ_WRITER A thread is currently traversing all the inner syncqs 5508 * setting the SQ_WRITER flag. 5509 */ 5510 5511 /* 5512 * Get write access at the outer perimeter. 5513 * Note that read access is done by entersq, putnext, and put by simply 5514 * incrementing sq_count in the inner syncq. 5515 * 5516 * Waits until "flags" is no longer set in the outer to prevent multiple 5517 * threads from having write access at the same time. SQ_WRITER has to be part 5518 * of "flags". 5519 * 5520 * Increases sq_count on the outer syncq to keep away outer_insert/remove 5521 * until the outer_exit is finished. 5522 * 5523 * outer_enter is vulnerable to starvation since it does not prevent new 5524 * threads from entering the inner syncqs while it is waiting for sq_count to 5525 * go to zero. 5526 */ 5527 void 5528 outer_enter(syncq_t *outer, uint16_t flags) 5529 { 5530 syncq_t *sq; 5531 int wait_needed; 5532 uint16_t count; 5533 5534 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5535 outer->sq_oprev != NULL); 5536 ASSERT(flags & SQ_WRITER); 5537 5538 retry: 5539 mutex_enter(SQLOCK(outer)); 5540 while (outer->sq_flags & flags) { 5541 outer->sq_flags |= SQ_WANTWAKEUP; 5542 cv_wait(&outer->sq_wait, SQLOCK(outer)); 5543 } 5544 5545 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5546 outer->sq_flags |= SQ_WRITER; 5547 outer->sq_count++; 5548 ASSERT(outer->sq_count != 0); /* wraparound */ 5549 wait_needed = 0; 5550 /* 5551 * Set SQ_WRITER on all the inner syncqs while holding 5552 * the SQLOCK on the outer syncq. This ensures that the changing 5553 * of SQ_WRITER is atomic under the outer SQLOCK. 5554 */ 5555 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 5556 mutex_enter(SQLOCK(sq)); 5557 count = sq->sq_count; 5558 SQ_PUTLOCKS_ENTER(sq); 5559 sq->sq_flags |= SQ_WRITER; 5560 SUM_SQ_PUTCOUNTS(sq, count); 5561 if (count != 0) 5562 wait_needed = 1; 5563 SQ_PUTLOCKS_EXIT(sq); 5564 mutex_exit(SQLOCK(sq)); 5565 } 5566 mutex_exit(SQLOCK(outer)); 5567 5568 /* 5569 * Get everybody out of the syncqs sequentially. 5570 * Note that we don't actually need to aqiure the PUTLOCKS, since 5571 * we have already cleared the fastbit, and set QWRITER. By 5572 * definition, the count can not increase since putnext will 5573 * take the slowlock path (and the purpose of aquiring the 5574 * putlocks was to make sure it didn't increase while we were 5575 * waiting). 5576 * 5577 * Note that we still aquire the PUTLOCKS to be safe. 5578 */ 5579 if (wait_needed) { 5580 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 5581 mutex_enter(SQLOCK(sq)); 5582 count = sq->sq_count; 5583 SQ_PUTLOCKS_ENTER(sq); 5584 SUM_SQ_PUTCOUNTS(sq, count); 5585 while (count != 0) { 5586 sq->sq_flags |= SQ_WANTWAKEUP; 5587 SQ_PUTLOCKS_EXIT(sq); 5588 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5589 count = sq->sq_count; 5590 SQ_PUTLOCKS_ENTER(sq); 5591 SUM_SQ_PUTCOUNTS(sq, count); 5592 } 5593 SQ_PUTLOCKS_EXIT(sq); 5594 mutex_exit(SQLOCK(sq)); 5595 } 5596 /* 5597 * Verify that none of the flags got set while we 5598 * were waiting for the sq_counts to drop. 5599 * If this happens we exit and retry entering the 5600 * outer perimeter. 5601 */ 5602 mutex_enter(SQLOCK(outer)); 5603 if (outer->sq_flags & (flags & ~SQ_WRITER)) { 5604 mutex_exit(SQLOCK(outer)); 5605 outer_exit(outer); 5606 goto retry; 5607 } 5608 mutex_exit(SQLOCK(outer)); 5609 } 5610 } 5611 5612 /* 5613 * Drop the write access at the outer perimeter. 5614 * Read access is dropped implicitly (by putnext, put, and leavesq) by 5615 * decrementing sq_count. 5616 */ 5617 void 5618 outer_exit(syncq_t *outer) 5619 { 5620 syncq_t *sq; 5621 int drain_needed; 5622 uint16_t flags; 5623 5624 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5625 outer->sq_oprev != NULL); 5626 ASSERT(MUTEX_NOT_HELD(SQLOCK(outer))); 5627 5628 /* 5629 * Atomically (from the perspective of threads calling become_writer) 5630 * drop the write access at the outer perimeter by holding 5631 * SQLOCK(outer) across all the dropsq calls and the resetting of 5632 * SQ_WRITER. 5633 * This defines a locking order between the outer perimeter 5634 * SQLOCK and the inner perimeter SQLOCKs. 5635 */ 5636 mutex_enter(SQLOCK(outer)); 5637 flags = outer->sq_flags; 5638 ASSERT(outer->sq_flags & SQ_WRITER); 5639 if (flags & SQ_QUEUED) { 5640 write_now(outer); 5641 flags = outer->sq_flags; 5642 } 5643 5644 /* 5645 * sq_onext is stable since sq_count has not yet been decreased. 5646 * Reset the SQ_WRITER flags in all syncqs. 5647 * After dropping SQ_WRITER on the outer syncq we empty all the 5648 * inner syncqs. 5649 */ 5650 drain_needed = 0; 5651 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) 5652 drain_needed += dropsq(sq, SQ_WRITER); 5653 ASSERT(!(outer->sq_flags & SQ_QUEUED)); 5654 flags &= ~SQ_WRITER; 5655 if (drain_needed) { 5656 outer->sq_flags = flags; 5657 mutex_exit(SQLOCK(outer)); 5658 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) 5659 emptysq(sq); 5660 mutex_enter(SQLOCK(outer)); 5661 flags = outer->sq_flags; 5662 } 5663 if (flags & SQ_WANTWAKEUP) { 5664 flags &= ~SQ_WANTWAKEUP; 5665 cv_broadcast(&outer->sq_wait); 5666 } 5667 outer->sq_flags = flags; 5668 ASSERT(outer->sq_count > 0); 5669 outer->sq_count--; 5670 mutex_exit(SQLOCK(outer)); 5671 } 5672 5673 /* 5674 * Add another syncq to an outer perimeter. 5675 * Block out all other access to the outer perimeter while it is being 5676 * changed using blocksq. 5677 * Assumes that the caller has *not* done an outer_enter. 5678 * 5679 * Vulnerable to starvation in blocksq. 5680 */ 5681 static void 5682 outer_insert(syncq_t *outer, syncq_t *sq) 5683 { 5684 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5685 outer->sq_oprev != NULL); 5686 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 5687 sq->sq_oprev == NULL); /* Can't be in an outer perimeter */ 5688 5689 /* Get exclusive access to the outer perimeter list */ 5690 blocksq(outer, SQ_BLOCKED, 0); 5691 ASSERT(outer->sq_flags & SQ_BLOCKED); 5692 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5693 5694 mutex_enter(SQLOCK(sq)); 5695 sq->sq_outer = outer; 5696 outer->sq_onext->sq_oprev = sq; 5697 sq->sq_onext = outer->sq_onext; 5698 outer->sq_onext = sq; 5699 sq->sq_oprev = outer; 5700 mutex_exit(SQLOCK(sq)); 5701 unblocksq(outer, SQ_BLOCKED, 1); 5702 } 5703 5704 /* 5705 * Remove a syncq from an outer perimeter. 5706 * Block out all other access to the outer perimeter while it is being 5707 * changed using blocksq. 5708 * Assumes that the caller has *not* done an outer_enter. 5709 * 5710 * Vulnerable to starvation in blocksq. 5711 */ 5712 static void 5713 outer_remove(syncq_t *outer, syncq_t *sq) 5714 { 5715 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5716 outer->sq_oprev != NULL); 5717 ASSERT(sq->sq_outer == outer); 5718 5719 /* Get exclusive access to the outer perimeter list */ 5720 blocksq(outer, SQ_BLOCKED, 0); 5721 ASSERT(outer->sq_flags & SQ_BLOCKED); 5722 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5723 5724 mutex_enter(SQLOCK(sq)); 5725 sq->sq_outer = NULL; 5726 sq->sq_onext->sq_oprev = sq->sq_oprev; 5727 sq->sq_oprev->sq_onext = sq->sq_onext; 5728 sq->sq_oprev = sq->sq_onext = NULL; 5729 mutex_exit(SQLOCK(sq)); 5730 unblocksq(outer, SQ_BLOCKED, 1); 5731 } 5732 5733 /* 5734 * Queue a deferred qwriter(OUTER) callback for this outer perimeter. 5735 * If this is the first callback for this outer perimeter then add 5736 * this outer perimeter to the list of outer perimeters that 5737 * the qwriter_outer_thread will process. 5738 * 5739 * Increments sq_count in the outer syncq to prevent the membership 5740 * of the outer perimeter (in terms of inner syncqs) to change while 5741 * the callback is pending. 5742 */ 5743 static void 5744 queue_writer(syncq_t *outer, void (*func)(), queue_t *q, mblk_t *mp) 5745 { 5746 ASSERT(MUTEX_HELD(SQLOCK(outer))); 5747 5748 mp->b_prev = (mblk_t *)func; 5749 mp->b_queue = q; 5750 mp->b_next = NULL; 5751 outer->sq_count++; /* Decremented when dequeued */ 5752 ASSERT(outer->sq_count != 0); /* Wraparound */ 5753 if (outer->sq_evhead == NULL) { 5754 /* First message. */ 5755 outer->sq_evhead = outer->sq_evtail = mp; 5756 outer->sq_flags |= SQ_EVENTS; 5757 mutex_exit(SQLOCK(outer)); 5758 STRSTAT(qwr_outer); 5759 (void) taskq_dispatch(streams_taskq, 5760 (task_func_t *)qwriter_outer_service, outer, TQ_SLEEP); 5761 } else { 5762 ASSERT(outer->sq_flags & SQ_EVENTS); 5763 outer->sq_evtail->b_next = mp; 5764 outer->sq_evtail = mp; 5765 mutex_exit(SQLOCK(outer)); 5766 } 5767 } 5768 5769 /* 5770 * Try and upgrade to write access at the outer perimeter. If this can 5771 * not be done without blocking then queue the callback to be done 5772 * by the qwriter_outer_thread. 5773 * 5774 * This routine can only be called from put or service procedures plus 5775 * asynchronous callback routines that have properly entered to 5776 * queue (with entersq.) Thus qwriter(OUTER) assumes the caller has one claim 5777 * on the syncq associated with q. 5778 */ 5779 void 5780 qwriter_outer(queue_t *q, mblk_t *mp, void (*func)()) 5781 { 5782 syncq_t *osq, *sq, *outer; 5783 int failed; 5784 uint16_t flags; 5785 5786 osq = q->q_syncq; 5787 outer = osq->sq_outer; 5788 if (outer == NULL) 5789 panic("qwriter(PERIM_OUTER): no outer perimeter"); 5790 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5791 outer->sq_oprev != NULL); 5792 5793 mutex_enter(SQLOCK(outer)); 5794 flags = outer->sq_flags; 5795 /* 5796 * If some thread is traversing sq_next, or if we are blocked by 5797 * outer_insert or outer_remove, or if the we already have queued 5798 * callbacks, then queue this callback for later processing. 5799 * 5800 * Also queue the qwriter for an interrupt thread in order 5801 * to reduce the time spent running at high IPL. 5802 * to identify there are events. 5803 */ 5804 if ((flags & SQ_GOAWAY) || (curthread->t_pri >= kpreemptpri)) { 5805 /* 5806 * Queue the become_writer request. 5807 * The queueing is atomic under SQLOCK(outer) in order 5808 * to synchronize with outer_exit. 5809 * queue_writer will drop the outer SQLOCK 5810 */ 5811 if (flags & SQ_BLOCKED) { 5812 /* Must set SQ_WRITER on inner perimeter */ 5813 mutex_enter(SQLOCK(osq)); 5814 osq->sq_flags |= SQ_WRITER; 5815 mutex_exit(SQLOCK(osq)); 5816 } else { 5817 if (!(flags & SQ_WRITER)) { 5818 /* 5819 * The outer could have been SQ_BLOCKED thus 5820 * SQ_WRITER might not be set on the inner. 5821 */ 5822 mutex_enter(SQLOCK(osq)); 5823 osq->sq_flags |= SQ_WRITER; 5824 mutex_exit(SQLOCK(osq)); 5825 } 5826 ASSERT(osq->sq_flags & SQ_WRITER); 5827 } 5828 queue_writer(outer, func, q, mp); 5829 return; 5830 } 5831 /* 5832 * We are half-way to exclusive access to the outer perimeter. 5833 * Prevent any outer_enter, qwriter(OUTER), or outer_insert/remove 5834 * while the inner syncqs are traversed. 5835 */ 5836 outer->sq_count++; 5837 ASSERT(outer->sq_count != 0); /* wraparound */ 5838 flags |= SQ_WRITER; 5839 /* 5840 * Check if we can run the function immediately. Mark all 5841 * syncqs with the writer flag to prevent new entries into 5842 * put and service procedures. 5843 * 5844 * Set SQ_WRITER on all the inner syncqs while holding 5845 * the SQLOCK on the outer syncq. This ensures that the changing 5846 * of SQ_WRITER is atomic under the outer SQLOCK. 5847 */ 5848 failed = 0; 5849 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 5850 uint16_t count; 5851 uint_t maxcnt = (sq == osq) ? 1 : 0; 5852 5853 mutex_enter(SQLOCK(sq)); 5854 count = sq->sq_count; 5855 SQ_PUTLOCKS_ENTER(sq); 5856 SUM_SQ_PUTCOUNTS(sq, count); 5857 if (sq->sq_count > maxcnt) 5858 failed = 1; 5859 sq->sq_flags |= SQ_WRITER; 5860 SQ_PUTLOCKS_EXIT(sq); 5861 mutex_exit(SQLOCK(sq)); 5862 } 5863 if (failed) { 5864 /* 5865 * Some other thread has a read claim on the outer perimeter. 5866 * Queue the callback for deferred processing. 5867 * 5868 * queue_writer will set SQ_QUEUED before we drop SQ_WRITER 5869 * so that other qwriter(OUTER) calls will queue their 5870 * callbacks as well. queue_writer increments sq_count so we 5871 * decrement to compensate for the our increment. 5872 * 5873 * Dropping SQ_WRITER enables the writer thread to work 5874 * on this outer perimeter. 5875 */ 5876 outer->sq_flags = flags; 5877 queue_writer(outer, func, q, mp); 5878 /* queue_writer dropper the lock */ 5879 mutex_enter(SQLOCK(outer)); 5880 ASSERT(outer->sq_count > 0); 5881 outer->sq_count--; 5882 ASSERT(outer->sq_flags & SQ_WRITER); 5883 flags = outer->sq_flags; 5884 flags &= ~SQ_WRITER; 5885 if (flags & SQ_WANTWAKEUP) { 5886 flags &= ~SQ_WANTWAKEUP; 5887 cv_broadcast(&outer->sq_wait); 5888 } 5889 outer->sq_flags = flags; 5890 mutex_exit(SQLOCK(outer)); 5891 return; 5892 } else { 5893 outer->sq_flags = flags; 5894 mutex_exit(SQLOCK(outer)); 5895 } 5896 5897 /* Can run it immediately */ 5898 (*func)(q, mp); 5899 5900 outer_exit(outer); 5901 } 5902 5903 /* 5904 * Dequeue all writer callbacks from the outer perimeter and run them. 5905 */ 5906 static void 5907 write_now(syncq_t *outer) 5908 { 5909 mblk_t *mp; 5910 queue_t *q; 5911 void (*func)(); 5912 5913 ASSERT(MUTEX_HELD(SQLOCK(outer))); 5914 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5915 outer->sq_oprev != NULL); 5916 while ((mp = outer->sq_evhead) != NULL) { 5917 /* 5918 * queues cannot be placed on the queuelist on the outer 5919 * perimiter. 5920 */ 5921 ASSERT(!(outer->sq_flags & SQ_MESSAGES)); 5922 ASSERT((outer->sq_flags & SQ_EVENTS)); 5923 5924 outer->sq_evhead = mp->b_next; 5925 if (outer->sq_evhead == NULL) { 5926 outer->sq_evtail = NULL; 5927 outer->sq_flags &= ~SQ_EVENTS; 5928 } 5929 ASSERT(outer->sq_count != 0); 5930 outer->sq_count--; /* Incremented when enqueued. */ 5931 mutex_exit(SQLOCK(outer)); 5932 /* 5933 * Drop the message if the queue is closing. 5934 * Make sure that the queue is "claimed" when the callback 5935 * is run in order to satisfy various ASSERTs. 5936 */ 5937 q = mp->b_queue; 5938 func = (void (*)())mp->b_prev; 5939 ASSERT(func != NULL); 5940 mp->b_next = mp->b_prev = NULL; 5941 if (q->q_flag & QWCLOSE) { 5942 freemsg(mp); 5943 } else { 5944 claimq(q); 5945 (*func)(q, mp); 5946 releaseq(q); 5947 } 5948 mutex_enter(SQLOCK(outer)); 5949 } 5950 ASSERT(MUTEX_HELD(SQLOCK(outer))); 5951 } 5952 5953 /* 5954 * The list of messages on the inner syncq is effectively hashed 5955 * by destination queue. These destination queues are doubly 5956 * linked lists (hopefully) in priority order. Messages are then 5957 * put on the queue referenced by the q_sqhead/q_sqtail elements. 5958 * Additional messages are linked together by the b_next/b_prev 5959 * elements in the mblk, with (similar to putq()) the first message 5960 * having a NULL b_prev and the last message having a NULL b_next. 5961 * 5962 * Events, such as qwriter callbacks, are put onto a list in FIFO 5963 * order referenced by sq_evhead, and sq_evtail. This is a singly 5964 * linked list, and messages here MUST be processed in the order queued. 5965 */ 5966 5967 /* 5968 * Run the events on the syncq event list (sq_evhead). 5969 * Assumes there is only one claim on the syncq, it is 5970 * already exclusive (SQ_EXCL set), and the SQLOCK held. 5971 * Messages here are processed in order, with the SQ_EXCL bit 5972 * held all the way through till the last message is processed. 5973 */ 5974 void 5975 sq_run_events(syncq_t *sq) 5976 { 5977 mblk_t *bp; 5978 queue_t *qp; 5979 uint16_t flags = sq->sq_flags; 5980 void (*func)(); 5981 5982 ASSERT(MUTEX_HELD(SQLOCK(sq))); 5983 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 5984 sq->sq_oprev == NULL) || 5985 (sq->sq_outer != NULL && sq->sq_onext != NULL && 5986 sq->sq_oprev != NULL)); 5987 5988 ASSERT(flags & SQ_EXCL); 5989 ASSERT(sq->sq_count == 1); 5990 5991 /* 5992 * We need to process all of the events on this list. It 5993 * is possible that new events will be added while we are 5994 * away processing a callback, so on every loop, we start 5995 * back at the beginning of the list. 5996 */ 5997 /* 5998 * We have to reaccess sq_evhead since there is a 5999 * possibility of a new entry while we were running 6000 * the callback. 6001 */ 6002 for (bp = sq->sq_evhead; bp != NULL; bp = sq->sq_evhead) { 6003 ASSERT(bp->b_queue->q_syncq == sq); 6004 ASSERT(sq->sq_flags & SQ_EVENTS); 6005 6006 qp = bp->b_queue; 6007 func = (void (*)())bp->b_prev; 6008 ASSERT(func != NULL); 6009 6010 /* 6011 * Messages from the event queue must be taken off in 6012 * FIFO order. 6013 */ 6014 ASSERT(sq->sq_evhead == bp); 6015 sq->sq_evhead = bp->b_next; 6016 6017 if (bp->b_next == NULL) { 6018 /* Deleting last */ 6019 ASSERT(sq->sq_evtail == bp); 6020 sq->sq_evtail = NULL; 6021 sq->sq_flags &= ~SQ_EVENTS; 6022 } 6023 bp->b_prev = bp->b_next = NULL; 6024 ASSERT(bp->b_datap->db_ref != 0); 6025 6026 mutex_exit(SQLOCK(sq)); 6027 6028 (*func)(qp, bp); 6029 6030 mutex_enter(SQLOCK(sq)); 6031 /* 6032 * re-read the flags, since they could have changed. 6033 */ 6034 flags = sq->sq_flags; 6035 ASSERT(flags & SQ_EXCL); 6036 } 6037 ASSERT(sq->sq_evhead == NULL && sq->sq_evtail == NULL); 6038 ASSERT(!(sq->sq_flags & SQ_EVENTS)); 6039 6040 if (flags & SQ_WANTWAKEUP) { 6041 flags &= ~SQ_WANTWAKEUP; 6042 cv_broadcast(&sq->sq_wait); 6043 } 6044 if (flags & SQ_WANTEXWAKEUP) { 6045 flags &= ~SQ_WANTEXWAKEUP; 6046 cv_broadcast(&sq->sq_exitwait); 6047 } 6048 sq->sq_flags = flags; 6049 } 6050 6051 /* 6052 * Put messages on the event list. 6053 * If we can go exclusive now, do so and process the event list, otherwise 6054 * let the last claim service this list (or wake the sqthread). 6055 * This procedure assumes SQLOCK is held. To run the event list, it 6056 * must be called with no claims. 6057 */ 6058 static void 6059 sqfill_events(syncq_t *sq, queue_t *q, mblk_t *mp, void (*func)()) 6060 { 6061 uint16_t count; 6062 6063 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6064 ASSERT(func != NULL); 6065 6066 /* 6067 * This is a callback. Add it to the list of callbacks 6068 * and see about upgrading. 6069 */ 6070 mp->b_prev = (mblk_t *)func; 6071 mp->b_queue = q; 6072 mp->b_next = NULL; 6073 if (sq->sq_evhead == NULL) { 6074 sq->sq_evhead = sq->sq_evtail = mp; 6075 sq->sq_flags |= SQ_EVENTS; 6076 } else { 6077 ASSERT(sq->sq_evtail != NULL); 6078 ASSERT(sq->sq_evtail->b_next == NULL); 6079 ASSERT(sq->sq_flags & SQ_EVENTS); 6080 sq->sq_evtail->b_next = mp; 6081 sq->sq_evtail = mp; 6082 } 6083 /* 6084 * We have set SQ_EVENTS, so threads will have to 6085 * unwind out of the perimiter, and new entries will 6086 * not grab a putlock. But we still need to know 6087 * how many threads have already made a claim to the 6088 * syncq, so grab the putlocks, and sum the counts. 6089 * If there are no claims on the syncq, we can upgrade 6090 * to exclusive, and run the event list. 6091 * NOTE: We hold the SQLOCK, so we can just grab the 6092 * putlocks. 6093 */ 6094 count = sq->sq_count; 6095 SQ_PUTLOCKS_ENTER(sq); 6096 SUM_SQ_PUTCOUNTS(sq, count); 6097 /* 6098 * We have no claim, so we need to check if there 6099 * are no others, then we can upgrade. 6100 */ 6101 /* 6102 * There are currently no claims on 6103 * the syncq by this thread (at least on this entry). The thread who has 6104 * the claim should drain syncq. 6105 */ 6106 if (count > 0) { 6107 /* 6108 * Can't upgrade - other threads inside. 6109 */ 6110 SQ_PUTLOCKS_EXIT(sq); 6111 mutex_exit(SQLOCK(sq)); 6112 return; 6113 } 6114 /* 6115 * Need to set SQ_EXCL and make a claim on the syncq. 6116 */ 6117 ASSERT((sq->sq_flags & SQ_EXCL) == 0); 6118 sq->sq_flags |= SQ_EXCL; 6119 ASSERT(sq->sq_count == 0); 6120 sq->sq_count++; 6121 SQ_PUTLOCKS_EXIT(sq); 6122 6123 /* Process the events list */ 6124 sq_run_events(sq); 6125 6126 /* 6127 * Release our claim... 6128 */ 6129 sq->sq_count--; 6130 6131 /* 6132 * And release SQ_EXCL. 6133 * We don't need to acquire the putlocks to release 6134 * SQ_EXCL, since we are exclusive, and hold the SQLOCK. 6135 */ 6136 sq->sq_flags &= ~SQ_EXCL; 6137 6138 /* 6139 * sq_run_events should have released SQ_EXCL 6140 */ 6141 ASSERT(!(sq->sq_flags & SQ_EXCL)); 6142 6143 /* 6144 * If anything happened while we were running the 6145 * events (or was there before), we need to process 6146 * them now. We shouldn't be exclusive sine we 6147 * released the perimiter above (plus, we asserted 6148 * for it). 6149 */ 6150 if (!(sq->sq_flags & SQ_STAYAWAY) && (sq->sq_flags & SQ_QUEUED)) 6151 drain_syncq(sq); 6152 else 6153 mutex_exit(SQLOCK(sq)); 6154 } 6155 6156 /* 6157 * Perform delayed processing. The caller has to make sure that it is safe 6158 * to enter the syncq (e.g. by checking that none of the SQ_STAYAWAY bits are 6159 * set.) 6160 * 6161 * Assume that the caller has NO claims on the syncq. However, a claim 6162 * on the syncq does not indicate that a thread is draining the syncq. 6163 * There may be more claims on the syncq than there are threads draining 6164 * (i.e. #_threads_draining <= sq_count) 6165 * 6166 * drain_syncq has to terminate when one of the SQ_STAYAWAY bits gets set 6167 * in order to preserve qwriter(OUTER) ordering constraints. 6168 * 6169 * sq_putcount only needs to be checked when dispatching the queued 6170 * writer call for CIPUT sync queue, but this is handled in sq_run_events. 6171 */ 6172 void 6173 drain_syncq(syncq_t *sq) 6174 { 6175 queue_t *qp; 6176 uint16_t count; 6177 uint16_t type = sq->sq_type; 6178 uint16_t flags = sq->sq_flags; 6179 boolean_t bg_service = sq->sq_svcflags & SQ_SERVICE; 6180 6181 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START, 6182 "drain_syncq start:%p", sq); 6183 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6184 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6185 sq->sq_oprev == NULL) || 6186 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6187 sq->sq_oprev != NULL)); 6188 6189 /* 6190 * Drop SQ_SERVICE flag. 6191 */ 6192 if (bg_service) 6193 sq->sq_svcflags &= ~SQ_SERVICE; 6194 6195 /* 6196 * If SQ_EXCL is set, someone else is processing this syncq - let him 6197 * finish the job. 6198 */ 6199 if (flags & SQ_EXCL) { 6200 if (bg_service) { 6201 ASSERT(sq->sq_servcount != 0); 6202 sq->sq_servcount--; 6203 } 6204 mutex_exit(SQLOCK(sq)); 6205 return; 6206 } 6207 6208 /* 6209 * This routine can be called by a background thread if 6210 * it was scheduled by a hi-priority thread. SO, if there are 6211 * NOT messages queued, return (remember, we have the SQLOCK, 6212 * and it cannot change until we release it). Wakeup any waiters also. 6213 */ 6214 if (!(flags & SQ_QUEUED)) { 6215 if (flags & SQ_WANTWAKEUP) { 6216 flags &= ~SQ_WANTWAKEUP; 6217 cv_broadcast(&sq->sq_wait); 6218 } 6219 if (flags & SQ_WANTEXWAKEUP) { 6220 flags &= ~SQ_WANTEXWAKEUP; 6221 cv_broadcast(&sq->sq_exitwait); 6222 } 6223 sq->sq_flags = flags; 6224 if (bg_service) { 6225 ASSERT(sq->sq_servcount != 0); 6226 sq->sq_servcount--; 6227 } 6228 mutex_exit(SQLOCK(sq)); 6229 return; 6230 } 6231 6232 /* 6233 * If this is not a concurrent put perimiter, we need to 6234 * become exclusive to drain. Also, if not CIPUT, we would 6235 * not have acquired a putlock, so we don't need to check 6236 * the putcounts. If not entering with a claim, we test 6237 * for sq_count == 0. 6238 */ 6239 type = sq->sq_type; 6240 if (!(type & SQ_CIPUT)) { 6241 if (sq->sq_count > 1) { 6242 if (bg_service) { 6243 ASSERT(sq->sq_servcount != 0); 6244 sq->sq_servcount--; 6245 } 6246 mutex_exit(SQLOCK(sq)); 6247 return; 6248 } 6249 sq->sq_flags |= SQ_EXCL; 6250 } 6251 6252 /* 6253 * This is where we make a claim to the syncq. 6254 * This can either be done by incrementing a putlock, or 6255 * the sq_count. But since we already have the SQLOCK 6256 * here, we just bump the sq_count. 6257 * 6258 * Note that after we make a claim, we need to let the code 6259 * fall through to the end of this routine to clean itself 6260 * up. A return in the while loop will put the syncq in a 6261 * very bad state. 6262 */ 6263 sq->sq_count++; 6264 ASSERT(sq->sq_count != 0); /* wraparound */ 6265 6266 while ((flags = sq->sq_flags) & SQ_QUEUED) { 6267 /* 6268 * If we are told to stayaway or went exclusive, 6269 * we are done. 6270 */ 6271 if (flags & (SQ_STAYAWAY)) { 6272 break; 6273 } 6274 6275 /* 6276 * If there are events to run, do so. 6277 * We have one claim to the syncq, so if there are 6278 * more than one, other threads are running. 6279 */ 6280 if (sq->sq_evhead != NULL) { 6281 ASSERT(sq->sq_flags & SQ_EVENTS); 6282 6283 count = sq->sq_count; 6284 SQ_PUTLOCKS_ENTER(sq); 6285 SUM_SQ_PUTCOUNTS(sq, count); 6286 if (count > 1) { 6287 SQ_PUTLOCKS_EXIT(sq); 6288 /* Can't upgrade - other threads inside */ 6289 break; 6290 } 6291 ASSERT((flags & SQ_EXCL) == 0); 6292 sq->sq_flags = flags | SQ_EXCL; 6293 SQ_PUTLOCKS_EXIT(sq); 6294 /* 6295 * we have the only claim, run the events, 6296 * sq_run_events will clear the SQ_EXCL flag. 6297 */ 6298 sq_run_events(sq); 6299 6300 /* 6301 * If this is a CIPUT perimiter, we need 6302 * to drop the SQ_EXCL flag so we can properly 6303 * continue draining the syncq. 6304 */ 6305 if (type & SQ_CIPUT) { 6306 ASSERT(sq->sq_flags & SQ_EXCL); 6307 sq->sq_flags &= ~SQ_EXCL; 6308 } 6309 6310 /* 6311 * And go back to the beginning just in case 6312 * anything changed while we were away. 6313 */ 6314 ASSERT((sq->sq_flags & SQ_EXCL) || (type & SQ_CIPUT)); 6315 continue; 6316 } 6317 6318 ASSERT(sq->sq_evhead == NULL); 6319 ASSERT(!(sq->sq_flags & SQ_EVENTS)); 6320 6321 /* 6322 * Find the queue that is not draining. 6323 * 6324 * q_draining is protected by QLOCK which we do not hold. 6325 * But if it was set, then a thread was draining, and if it gets 6326 * cleared, then it was because the thread has successfully 6327 * drained the syncq, or a GOAWAY state occured. For the GOAWAY 6328 * state to happen, a thread needs the SQLOCK which we hold, and 6329 * if there was such a flag, we whould have already seen it. 6330 */ 6331 6332 for (qp = sq->sq_head; 6333 qp != NULL && (qp->q_draining || 6334 (qp->q_sqflags & Q_SQDRAINING)); 6335 qp = qp->q_sqnext) 6336 ; 6337 6338 if (qp == NULL) 6339 break; 6340 6341 /* 6342 * We have a queue to work on, and we hold the 6343 * SQLOCK and one claim, call qdrain_syncq. 6344 * This means we need to release the SQLOCK and 6345 * aquire the QLOCK (OK since we have a claim). 6346 * Note that qdrain_syncq will actually dequeue 6347 * this queue from the sq_head list when it is 6348 * convinced all the work is done and release 6349 * the QLOCK before returning. 6350 */ 6351 qp->q_sqflags |= Q_SQDRAINING; 6352 mutex_exit(SQLOCK(sq)); 6353 mutex_enter(QLOCK(qp)); 6354 qdrain_syncq(sq, qp); 6355 mutex_enter(SQLOCK(sq)); 6356 6357 /* The queue is drained */ 6358 ASSERT(qp->q_sqflags & Q_SQDRAINING); 6359 qp->q_sqflags &= ~Q_SQDRAINING; 6360 /* 6361 * NOTE: After this point qp should not be used since it may be 6362 * closed. 6363 */ 6364 } 6365 6366 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6367 flags = sq->sq_flags; 6368 6369 /* 6370 * sq->sq_head cannot change because we hold the 6371 * sqlock. However, a thread CAN decide that it is no longer 6372 * going to drain that queue. However, this should be due to 6373 * a GOAWAY state, and we should see that here. 6374 * 6375 * This loop is not very efficient. One solution may be adding a second 6376 * pointer to the "draining" queue, but it is difficult to do when 6377 * queues are inserted in the middle due to priority ordering. Another 6378 * possibility is to yank the queue out of the sq list and put it onto 6379 * the "draining list" and then put it back if it can't be drained. 6380 */ 6381 6382 ASSERT((sq->sq_head == NULL) || (flags & SQ_GOAWAY) || 6383 (type & SQ_CI) || sq->sq_head->q_draining); 6384 6385 /* Drop SQ_EXCL for non-CIPUT perimiters */ 6386 if (!(type & SQ_CIPUT)) 6387 flags &= ~SQ_EXCL; 6388 ASSERT((flags & SQ_EXCL) == 0); 6389 6390 /* Wake up any waiters. */ 6391 if (flags & SQ_WANTWAKEUP) { 6392 flags &= ~SQ_WANTWAKEUP; 6393 cv_broadcast(&sq->sq_wait); 6394 } 6395 if (flags & SQ_WANTEXWAKEUP) { 6396 flags &= ~SQ_WANTEXWAKEUP; 6397 cv_broadcast(&sq->sq_exitwait); 6398 } 6399 sq->sq_flags = flags; 6400 6401 ASSERT(sq->sq_count != 0); 6402 /* Release our claim. */ 6403 sq->sq_count--; 6404 6405 if (bg_service) { 6406 ASSERT(sq->sq_servcount != 0); 6407 sq->sq_servcount--; 6408 } 6409 6410 mutex_exit(SQLOCK(sq)); 6411 6412 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END, 6413 "drain_syncq end:%p", sq); 6414 } 6415 6416 6417 /* 6418 * 6419 * qdrain_syncq can be called (currently) from only one of two places: 6420 * drain_syncq 6421 * putnext (or some variation of it). 6422 * and eventually 6423 * qwait(_sig) 6424 * 6425 * If called from drain_syncq, we found it in the list 6426 * of queue's needing service, so there is work to be done (or it 6427 * wouldn't be on the list). 6428 * 6429 * If called from some putnext variation, it was because the 6430 * perimiter is open, but messages are blocking a putnext and 6431 * there is not a thread working on it. Now a thread could start 6432 * working on it while we are getting ready to do so ourself, but 6433 * the thread would set the q_draining flag, and we can spin out. 6434 * 6435 * As for qwait(_sig), I think I shall let it continue to call 6436 * drain_syncq directly (after all, it will get here eventually). 6437 * 6438 * qdrain_syncq has to terminate when: 6439 * - one of the SQ_STAYAWAY bits gets set to preserve qwriter(OUTER) ordering 6440 * - SQ_EVENTS gets set to preserve qwriter(INNER) ordering 6441 * 6442 * ASSUMES: 6443 * One claim 6444 * QLOCK held 6445 * SQLOCK not held 6446 * Will release QLOCK before returning 6447 */ 6448 void 6449 qdrain_syncq(syncq_t *sq, queue_t *q) 6450 { 6451 mblk_t *bp; 6452 boolean_t do_clr; 6453 #ifdef DEBUG 6454 uint16_t count; 6455 #endif 6456 6457 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START, 6458 "drain_syncq start:%p", sq); 6459 ASSERT(q->q_syncq == sq); 6460 ASSERT(MUTEX_HELD(QLOCK(q))); 6461 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6462 /* 6463 * For non-CIPUT perimiters, we should be called with the 6464 * exclusive bit set already. For non-CIPUT perimiters we 6465 * will be doing a concurrent drain, so it better not be set. 6466 */ 6467 ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT))); 6468 ASSERT(!((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL))); 6469 ASSERT((sq->sq_type & SQ_CIPUT) || (sq->sq_flags & SQ_EXCL)); 6470 /* 6471 * All outer pointers are set, or none of them are 6472 */ 6473 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6474 sq->sq_oprev == NULL) || 6475 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6476 sq->sq_oprev != NULL)); 6477 #ifdef DEBUG 6478 count = sq->sq_count; 6479 /* 6480 * This is OK without the putlocks, because we have one 6481 * claim either from the sq_count, or a putcount. We could 6482 * get an erroneous value from other counts, but ours won't 6483 * change, so one way or another, we will have at least a 6484 * value of one. 6485 */ 6486 SUM_SQ_PUTCOUNTS(sq, count); 6487 ASSERT(count >= 1); 6488 #endif /* DEBUG */ 6489 6490 /* 6491 * The first thing to do here, is find out if a thread is already 6492 * draining this queue or the queue is closing. If so, we are done, 6493 * just return. Also, if there are no messages, we are done as well. 6494 * Note that we check the q_sqhead since there is s window of 6495 * opportunity for us to enter here because Q_SQQUEUED was set, but is 6496 * not anymore. 6497 */ 6498 if (q->q_draining || (q->q_sqhead == NULL)) { 6499 mutex_exit(QLOCK(q)); 6500 return; 6501 } 6502 6503 /* 6504 * If the perimiter is exclusive, there is nothing we can 6505 * do right now, go away. 6506 * Note that there is nothing to prevent this case from changing 6507 * right after this check, but the spin-out will catch it. 6508 */ 6509 6510 /* Tell other threads that we are draining this queue */ 6511 q->q_draining = 1; /* Protected by QLOCK */ 6512 6513 for (bp = q->q_sqhead; bp != NULL; bp = q->q_sqhead) { 6514 6515 /* 6516 * Because we can enter this routine just because 6517 * a putnext is blocked, we need to spin out if 6518 * the perimiter wants to go exclusive as well 6519 * as just blocked. We need to spin out also if 6520 * events are queued on the syncq. 6521 * Don't check for SQ_EXCL, because non-CIPUT 6522 * perimiters would set it, and it can't become 6523 * exclusive while we hold a claim. 6524 */ 6525 if (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS)) { 6526 break; 6527 } 6528 6529 #ifdef DEBUG 6530 /* 6531 * Since we are in qdrain_syncq, we already know the queue, 6532 * but for sanity, we want to check this against the qp that 6533 * was passed in by bp->b_queue. 6534 */ 6535 6536 ASSERT(bp->b_queue == q); 6537 ASSERT(bp->b_queue->q_syncq == sq); 6538 bp->b_queue = NULL; 6539 6540 /* 6541 * We would have the following check in the DEBUG code: 6542 * 6543 * if (bp->b_prev != NULL) { 6544 * ASSERT(bp->b_prev == (void (*)())q->q_qinfo->qi_putp); 6545 * } 6546 * 6547 * This can't be done, however, since IP modifies qinfo 6548 * structure at run-time (switching between IPv4 qinfo and IPv6 6549 * qinfo), invalidating the check. 6550 * So the assignment to func is left here, but the ASSERT itself 6551 * is removed until the whole issue is resolved. 6552 */ 6553 #endif 6554 ASSERT(q->q_sqhead == bp); 6555 q->q_sqhead = bp->b_next; 6556 bp->b_prev = bp->b_next = NULL; 6557 ASSERT(q->q_syncqmsgs > 0); 6558 mutex_exit(QLOCK(q)); 6559 6560 ASSERT(bp->b_datap->db_ref != 0); 6561 6562 (void) (*q->q_qinfo->qi_putp)(q, bp); 6563 6564 mutex_enter(QLOCK(q)); 6565 /* 6566 * We should decrement q_syncqmsgs only after executing the 6567 * put procedure to avoid a possible race with putnext(). 6568 * In putnext() though it sees Q_SQQUEUED is set, there is 6569 * an optimization which allows putnext to call the put 6570 * procedure directly if (q_syncqmsgs == 0) and thus 6571 * a message reodering could otherwise occur. 6572 */ 6573 q->q_syncqmsgs--; 6574 6575 /* 6576 * Clear QFULL in the next service procedure queue if 6577 * this is the last message destined to that queue. 6578 * 6579 * It would make better sense to have some sort of 6580 * tunable for the low water mark, but these symantics 6581 * are not yet defined. So, alas, we use a constant. 6582 */ 6583 do_clr = (q->q_syncqmsgs == 0); 6584 mutex_exit(QLOCK(q)); 6585 6586 if (do_clr) 6587 clr_qfull(q); 6588 6589 mutex_enter(QLOCK(q)); 6590 /* 6591 * Always clear SQ_EXCL when CIPUT in order to handle 6592 * qwriter(INNER). 6593 */ 6594 /* 6595 * The putp() can call qwriter and get exclusive access 6596 * IFF this is the only claim. So, we need to test for 6597 * this possibility so we can aquire the mutex and clear 6598 * the bit. 6599 */ 6600 if ((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL)) { 6601 mutex_enter(SQLOCK(sq)); 6602 sq->sq_flags &= ~SQ_EXCL; 6603 mutex_exit(SQLOCK(sq)); 6604 } 6605 } 6606 6607 /* 6608 * We should either have no queues on the syncq, or we were 6609 * told to goaway by a waiter (which we will wake up at the 6610 * end of this function). 6611 */ 6612 ASSERT((q->q_sqhead == NULL) || 6613 (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS))); 6614 6615 ASSERT(MUTEX_HELD(QLOCK(q))); 6616 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6617 6618 /* 6619 * Remove the q from the syncq list if all the messages are 6620 * drained. 6621 */ 6622 if (q->q_sqhead == NULL) { 6623 mutex_enter(SQLOCK(sq)); 6624 if (q->q_sqflags & Q_SQQUEUED) 6625 SQRM_Q(sq, q); 6626 mutex_exit(SQLOCK(sq)); 6627 /* 6628 * Since the queue is removed from the list, reset its priority. 6629 */ 6630 q->q_spri = 0; 6631 } 6632 6633 /* 6634 * Remember, the q_draining flag is used to let another 6635 * thread know that there is a thread currently draining 6636 * the messages for a queue. Since we are now done with 6637 * this queue (even if there may be messages still there), 6638 * we need to clear this flag so some thread will work 6639 * on it if needed. 6640 */ 6641 ASSERT(q->q_draining); 6642 q->q_draining = 0; 6643 6644 /* called with a claim, so OK to drop all locks. */ 6645 mutex_exit(QLOCK(q)); 6646 6647 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END, 6648 "drain_syncq end:%p", sq); 6649 } 6650 /* END OF QDRAIN_SYNCQ */ 6651 6652 6653 /* 6654 * This is the mate to qdrain_syncq, except that it is putting the 6655 * message onto the the queue instead draining. Since the 6656 * message is destined for the queue that is selected, there is 6657 * no need to identify the function because the message is 6658 * intended for the put routine for the queue. But this 6659 * routine will do it anyway just in case (but only for debug kernels). 6660 * 6661 * After the message is enqueued on the syncq, it calls putnext_tail() 6662 * which will schedule a background thread to actually process the message. 6663 * 6664 * Assumes that there is a claim on the syncq (sq->sq_count > 0) and 6665 * SQLOCK(sq) and QLOCK(q) are not held. 6666 */ 6667 void 6668 qfill_syncq(syncq_t *sq, queue_t *q, mblk_t *mp) 6669 { 6670 queue_t *fq = NULL; 6671 6672 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6673 ASSERT(MUTEX_NOT_HELD(QLOCK(q))); 6674 ASSERT(sq->sq_count > 0); 6675 ASSERT(q->q_syncq == sq); 6676 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6677 sq->sq_oprev == NULL) || 6678 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6679 sq->sq_oprev != NULL)); 6680 6681 mutex_enter(QLOCK(q)); 6682 6683 /* 6684 * Set QFULL in next service procedure queue (that cares) if not 6685 * already set and if there are already more messages on the syncq 6686 * than sq_max_size. If sq_max_size is 0, no flow control will be 6687 * asserted on any syncq. 6688 * 6689 * The fq here is the next queue with a service procedure. 6690 * This is where we would fail canputnext, so this is where we 6691 * need to set QFULL. 6692 * 6693 * LOCKING HIERARCHY: In the case when fq != q we need to 6694 * a) Take QLOCK(fq) to set QFULL flag and 6695 * b) Take sd_reflock in the case of the hot stream to update 6696 * sd_refcnt. 6697 * We already have QLOCK at this point. To avoid cross-locks with 6698 * freezestr() which grabs all QLOCKs and with strlock() which grabs 6699 * both SQLOCK and sd_reflock, we need to drop respective locks first. 6700 */ 6701 if ((sq_max_size != 0) && (!(q->q_nfsrv->q_flag & QFULL)) && 6702 (q->q_syncqmsgs > sq_max_size)) { 6703 if ((fq = q->q_nfsrv) == q) { 6704 fq->q_flag |= QFULL; 6705 } else { 6706 mutex_exit(QLOCK(q)); 6707 mutex_enter(QLOCK(fq)); 6708 fq->q_flag |= QFULL; 6709 mutex_exit(QLOCK(fq)); 6710 mutex_enter(QLOCK(q)); 6711 } 6712 } 6713 6714 #ifdef DEBUG 6715 /* 6716 * This is used for debug in the qfill_syncq/qdrain_syncq case 6717 * to trace the queue that the message is intended for. Note 6718 * that the original use was to identify the queue and function 6719 * to call on the drain. In the new syncq, we have the context 6720 * of the queue that we are draining, so call it's putproc and 6721 * don't rely on the saved values. But for debug this is still 6722 * usefull information. 6723 */ 6724 mp->b_prev = (mblk_t *)q->q_qinfo->qi_putp; 6725 mp->b_queue = q; 6726 mp->b_next = NULL; 6727 #endif 6728 ASSERT(q->q_syncq == sq); 6729 /* 6730 * Enqueue the message on the list. 6731 * SQPUT_MP() accesses q_syncqmsgs. We are already holding QLOCK to 6732 * protect it. So its ok to acquire SQLOCK after SQPUT_MP(). 6733 */ 6734 SQPUT_MP(q, mp); 6735 mutex_enter(SQLOCK(sq)); 6736 6737 /* 6738 * And queue on syncq for scheduling, if not already queued. 6739 * Note that we need the SQLOCK for this, and for testing flags 6740 * at the end to see if we will drain. So grab it now, and 6741 * release it before we call qdrain_syncq or return. 6742 */ 6743 if (!(q->q_sqflags & Q_SQQUEUED)) { 6744 q->q_spri = curthread->t_pri; 6745 SQPUT_Q(sq, q); 6746 } 6747 #ifdef DEBUG 6748 else { 6749 /* 6750 * All of these conditions MUST be true! 6751 */ 6752 ASSERT(sq->sq_tail != NULL); 6753 if (sq->sq_tail == sq->sq_head) { 6754 ASSERT((q->q_sqprev == NULL) && 6755 (q->q_sqnext == NULL)); 6756 } else { 6757 ASSERT((q->q_sqprev != NULL) || 6758 (q->q_sqnext != NULL)); 6759 } 6760 ASSERT(sq->sq_flags & SQ_QUEUED); 6761 ASSERT(q->q_syncqmsgs != 0); 6762 ASSERT(q->q_sqflags & Q_SQQUEUED); 6763 } 6764 #endif 6765 mutex_exit(QLOCK(q)); 6766 /* 6767 * SQLOCK is still held, so sq_count can be safely decremented. 6768 */ 6769 sq->sq_count--; 6770 6771 putnext_tail(sq, q, 0); 6772 /* Should not reference sq or q after this point. */ 6773 } 6774 6775 /* End of qfill_syncq */ 6776 6777 /* 6778 * Remove all messages from a syncq (if qp is NULL) or remove all messages 6779 * that would be put into qp by drain_syncq. 6780 * Used when deleting the syncq (qp == NULL) or when detaching 6781 * a queue (qp != NULL). 6782 * Return non-zero if one or more messages were freed. 6783 * 6784 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 6785 * sq_putlocks are used. 6786 * 6787 * NOTE: This function assumes that it is called from the close() context and 6788 * that all the queues in the syncq are going aay. For this reason it doesn't 6789 * acquire QLOCK for modifying q_sqhead/q_sqtail fields. This assumption is 6790 * currently valid, but it is useful to rethink this function to behave properly 6791 * in other cases. 6792 */ 6793 int 6794 flush_syncq(syncq_t *sq, queue_t *qp) 6795 { 6796 mblk_t *bp, *mp_head, *mp_next, *mp_prev; 6797 queue_t *q; 6798 int ret = 0; 6799 6800 mutex_enter(SQLOCK(sq)); 6801 6802 /* 6803 * Before we leave, we need to make sure there are no 6804 * events listed for this queue. All events for this queue 6805 * will just be freed. 6806 */ 6807 if (qp != NULL && sq->sq_evhead != NULL) { 6808 ASSERT(sq->sq_flags & SQ_EVENTS); 6809 6810 mp_prev = NULL; 6811 for (bp = sq->sq_evhead; bp != NULL; bp = mp_next) { 6812 mp_next = bp->b_next; 6813 if (bp->b_queue == qp) { 6814 /* Delete this message */ 6815 if (mp_prev != NULL) { 6816 mp_prev->b_next = mp_next; 6817 /* 6818 * Update sq_evtail if the last element 6819 * is removed. 6820 */ 6821 if (bp == sq->sq_evtail) { 6822 ASSERT(mp_next == NULL); 6823 sq->sq_evtail = mp_prev; 6824 } 6825 } else 6826 sq->sq_evhead = mp_next; 6827 if (sq->sq_evhead == NULL) 6828 sq->sq_flags &= ~SQ_EVENTS; 6829 bp->b_prev = bp->b_next = NULL; 6830 freemsg(bp); 6831 ret++; 6832 } else { 6833 mp_prev = bp; 6834 } 6835 } 6836 } 6837 6838 /* 6839 * Walk sq_head and: 6840 * - match qp if qp is set, remove it's messages 6841 * - all if qp is not set 6842 */ 6843 q = sq->sq_head; 6844 while (q != NULL) { 6845 ASSERT(q->q_syncq == sq); 6846 if ((qp == NULL) || (qp == q)) { 6847 /* 6848 * Yank the messages as a list off the queue 6849 */ 6850 mp_head = q->q_sqhead; 6851 /* 6852 * We do not have QLOCK(q) here (which is safe due to 6853 * assumptions mentioned above). To obtain the lock we 6854 * need to release SQLOCK which may allow lots of things 6855 * to change upon us. This place requires more analysis. 6856 */ 6857 q->q_sqhead = q->q_sqtail = NULL; 6858 ASSERT(mp_head->b_queue && 6859 mp_head->b_queue->q_syncq == sq); 6860 6861 /* 6862 * Free each of the messages. 6863 */ 6864 for (bp = mp_head; bp != NULL; bp = mp_next) { 6865 mp_next = bp->b_next; 6866 bp->b_prev = bp->b_next = NULL; 6867 freemsg(bp); 6868 ret++; 6869 } 6870 /* 6871 * Now remove the queue from the syncq. 6872 */ 6873 ASSERT(q->q_sqflags & Q_SQQUEUED); 6874 SQRM_Q(sq, q); 6875 q->q_spri = 0; 6876 q->q_syncqmsgs = 0; 6877 6878 /* 6879 * If qp was specified, we are done with it and are 6880 * going to drop SQLOCK(sq) and return. We wakeup syncq 6881 * waiters while we still have the SQLOCK. 6882 */ 6883 if ((qp != NULL) && (sq->sq_flags & SQ_WANTWAKEUP)) { 6884 sq->sq_flags &= ~SQ_WANTWAKEUP; 6885 cv_broadcast(&sq->sq_wait); 6886 } 6887 /* Drop SQLOCK across clr_qfull */ 6888 mutex_exit(SQLOCK(sq)); 6889 6890 /* 6891 * We avoid doing the test that drain_syncq does and 6892 * unconditionally clear qfull for every flushed 6893 * message. Since flush_syncq is only called during 6894 * close this should not be a problem. 6895 */ 6896 clr_qfull(q); 6897 if (qp != NULL) { 6898 return (ret); 6899 } else { 6900 mutex_enter(SQLOCK(sq)); 6901 /* 6902 * The head was removed by SQRM_Q above. 6903 * reread the new head and flush it. 6904 */ 6905 q = sq->sq_head; 6906 } 6907 } else { 6908 q = q->q_sqnext; 6909 } 6910 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6911 } 6912 6913 if (sq->sq_flags & SQ_WANTWAKEUP) { 6914 sq->sq_flags &= ~SQ_WANTWAKEUP; 6915 cv_broadcast(&sq->sq_wait); 6916 } 6917 6918 mutex_exit(SQLOCK(sq)); 6919 return (ret); 6920 } 6921 6922 /* 6923 * Propagate all messages from a syncq to the next syncq that are associated 6924 * with the specified queue. If the queue is attached to a driver or if the 6925 * messages have been added due to a qwriter(PERIM_INNER), free the messages. 6926 * 6927 * Assumes that the stream is strlock()'ed. We don't come here if there 6928 * are no messages to propagate. 6929 * 6930 * NOTE : If the queue is attached to a driver, all the messages are freed 6931 * as there is no point in propagating the messages from the driver syncq 6932 * to the closing stream head which will in turn get freed later. 6933 */ 6934 static int 6935 propagate_syncq(queue_t *qp) 6936 { 6937 mblk_t *bp, *head, *tail, *prev, *next; 6938 syncq_t *sq; 6939 queue_t *nqp; 6940 syncq_t *nsq; 6941 boolean_t isdriver; 6942 int moved = 0; 6943 uint16_t flags; 6944 pri_t priority = curthread->t_pri; 6945 #ifdef DEBUG 6946 void (*func)(); 6947 #endif 6948 6949 sq = qp->q_syncq; 6950 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6951 /* debug macro */ 6952 SQ_PUTLOCKS_HELD(sq); 6953 /* 6954 * As entersq() does not increment the sq_count for 6955 * the write side, check sq_count for non-QPERQ 6956 * perimeters alone. 6957 */ 6958 ASSERT((qp->q_flag & QPERQ) || (sq->sq_count >= 1)); 6959 6960 /* 6961 * propagate_syncq() can be called because of either messages on the 6962 * queue syncq or because on events on the queue syncq. Do actual 6963 * message propagations if there are any messages. 6964 */ 6965 if (qp->q_syncqmsgs) { 6966 isdriver = (qp->q_flag & QISDRV); 6967 6968 if (!isdriver) { 6969 nqp = qp->q_next; 6970 nsq = nqp->q_syncq; 6971 ASSERT(MUTEX_HELD(SQLOCK(nsq))); 6972 /* debug macro */ 6973 SQ_PUTLOCKS_HELD(nsq); 6974 #ifdef DEBUG 6975 func = (void (*)())nqp->q_qinfo->qi_putp; 6976 #endif 6977 } 6978 6979 SQRM_Q(sq, qp); 6980 priority = MAX(qp->q_spri, priority); 6981 qp->q_spri = 0; 6982 head = qp->q_sqhead; 6983 tail = qp->q_sqtail; 6984 qp->q_sqhead = qp->q_sqtail = NULL; 6985 qp->q_syncqmsgs = 0; 6986 6987 /* 6988 * Walk the list of messages, and free them if this is a driver, 6989 * otherwise reset the b_prev and b_queue value to the new putp. 6990 * Afterward, we will just add the head to the end of the next 6991 * syncq, and point the tail to the end of this one. 6992 */ 6993 6994 for (bp = head; bp != NULL; bp = next) { 6995 next = bp->b_next; 6996 if (isdriver) { 6997 bp->b_prev = bp->b_next = NULL; 6998 freemsg(bp); 6999 continue; 7000 } 7001 /* Change the q values for this message */ 7002 bp->b_queue = nqp; 7003 #ifdef DEBUG 7004 bp->b_prev = (mblk_t *)func; 7005 #endif 7006 moved++; 7007 } 7008 /* 7009 * Attach list of messages to the end of the new queue (if there 7010 * is a list of messages). 7011 */ 7012 7013 if (!isdriver && head != NULL) { 7014 ASSERT(tail != NULL); 7015 if (nqp->q_sqhead == NULL) { 7016 nqp->q_sqhead = head; 7017 } else { 7018 ASSERT(nqp->q_sqtail != NULL); 7019 nqp->q_sqtail->b_next = head; 7020 } 7021 nqp->q_sqtail = tail; 7022 /* 7023 * When messages are moved from high priority queue to 7024 * another queue, the destination queue priority is 7025 * upgraded. 7026 */ 7027 7028 if (priority > nqp->q_spri) 7029 nqp->q_spri = priority; 7030 7031 SQPUT_Q(nsq, nqp); 7032 7033 nqp->q_syncqmsgs += moved; 7034 ASSERT(nqp->q_syncqmsgs != 0); 7035 } 7036 } 7037 7038 /* 7039 * Before we leave, we need to make sure there are no 7040 * events listed for this queue. All events for this queue 7041 * will just be freed. 7042 */ 7043 if (sq->sq_evhead != NULL) { 7044 ASSERT(sq->sq_flags & SQ_EVENTS); 7045 prev = NULL; 7046 for (bp = sq->sq_evhead; bp != NULL; bp = next) { 7047 next = bp->b_next; 7048 if (bp->b_queue == qp) { 7049 /* Delete this message */ 7050 if (prev != NULL) { 7051 prev->b_next = next; 7052 /* 7053 * Update sq_evtail if the last element 7054 * is removed. 7055 */ 7056 if (bp == sq->sq_evtail) { 7057 ASSERT(next == NULL); 7058 sq->sq_evtail = prev; 7059 } 7060 } else 7061 sq->sq_evhead = next; 7062 if (sq->sq_evhead == NULL) 7063 sq->sq_flags &= ~SQ_EVENTS; 7064 bp->b_prev = bp->b_next = NULL; 7065 freemsg(bp); 7066 } else { 7067 prev = bp; 7068 } 7069 } 7070 } 7071 7072 flags = sq->sq_flags; 7073 7074 /* Wake up any waiter before leaving. */ 7075 if (flags & SQ_WANTWAKEUP) { 7076 flags &= ~SQ_WANTWAKEUP; 7077 cv_broadcast(&sq->sq_wait); 7078 } 7079 sq->sq_flags = flags; 7080 7081 return (moved); 7082 } 7083 7084 /* 7085 * Try and upgrade to exclusive access at the inner perimeter. If this can 7086 * not be done without blocking then request will be queued on the syncq 7087 * and drain_syncq will run it later. 7088 * 7089 * This routine can only be called from put or service procedures plus 7090 * asynchronous callback routines that have properly entered to 7091 * queue (with entersq.) Thus qwriter_inner assumes the caller has one claim 7092 * on the syncq associated with q. 7093 */ 7094 void 7095 qwriter_inner(queue_t *q, mblk_t *mp, void (*func)()) 7096 { 7097 syncq_t *sq = q->q_syncq; 7098 uint16_t count; 7099 7100 mutex_enter(SQLOCK(sq)); 7101 count = sq->sq_count; 7102 SQ_PUTLOCKS_ENTER(sq); 7103 SUM_SQ_PUTCOUNTS(sq, count); 7104 ASSERT(count >= 1); 7105 ASSERT(sq->sq_type & (SQ_CIPUT|SQ_CISVC)); 7106 7107 if (count == 1) { 7108 /* 7109 * Can upgrade. This case also handles nested qwriter calls 7110 * (when the qwriter callback function calls qwriter). In that 7111 * case SQ_EXCL is already set. 7112 */ 7113 sq->sq_flags |= SQ_EXCL; 7114 SQ_PUTLOCKS_EXIT(sq); 7115 mutex_exit(SQLOCK(sq)); 7116 (*func)(q, mp); 7117 /* 7118 * Assumes that leavesq, putnext, and drain_syncq will reset 7119 * SQ_EXCL for SQ_CIPUT/SQ_CISVC queues. We leave SQ_EXCL on 7120 * until putnext, leavesq, or drain_syncq drops it. 7121 * That way we handle nested qwriter(INNER) without dropping 7122 * SQ_EXCL until the outermost qwriter callback routine is 7123 * done. 7124 */ 7125 return; 7126 } 7127 SQ_PUTLOCKS_EXIT(sq); 7128 sqfill_events(sq, q, mp, func); 7129 } 7130 7131 /* 7132 * Synchronous callback support functions 7133 */ 7134 7135 /* 7136 * Allocate a callback parameter structure. 7137 * Assumes that caller initializes the flags and the id. 7138 * Acquires SQLOCK(sq) if non-NULL is returned. 7139 */ 7140 callbparams_t * 7141 callbparams_alloc(syncq_t *sq, void (*func)(void *), void *arg, int kmflags) 7142 { 7143 callbparams_t *cbp; 7144 size_t size = sizeof (callbparams_t); 7145 7146 cbp = kmem_alloc(size, kmflags & ~KM_PANIC); 7147 7148 /* 7149 * Only try tryhard allocation if the caller is ready to panic. 7150 * Otherwise just fail. 7151 */ 7152 if (cbp == NULL) { 7153 if (kmflags & KM_PANIC) 7154 cbp = kmem_alloc_tryhard(sizeof (callbparams_t), 7155 &size, kmflags); 7156 else 7157 return (NULL); 7158 } 7159 7160 ASSERT(size >= sizeof (callbparams_t)); 7161 cbp->cbp_size = size; 7162 cbp->cbp_sq = sq; 7163 cbp->cbp_func = func; 7164 cbp->cbp_arg = arg; 7165 mutex_enter(SQLOCK(sq)); 7166 cbp->cbp_next = sq->sq_callbpend; 7167 sq->sq_callbpend = cbp; 7168 return (cbp); 7169 } 7170 7171 void 7172 callbparams_free(syncq_t *sq, callbparams_t *cbp) 7173 { 7174 callbparams_t **pp, *p; 7175 7176 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7177 7178 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) { 7179 if (p == cbp) { 7180 *pp = p->cbp_next; 7181 kmem_free(p, p->cbp_size); 7182 return; 7183 } 7184 } 7185 (void) (STRLOG(0, 0, 0, SL_CONSOLE, 7186 "callbparams_free: not found\n")); 7187 } 7188 7189 void 7190 callbparams_free_id(syncq_t *sq, callbparams_id_t id, int32_t flag) 7191 { 7192 callbparams_t **pp, *p; 7193 7194 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7195 7196 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) { 7197 if (p->cbp_id == id && p->cbp_flags == flag) { 7198 *pp = p->cbp_next; 7199 kmem_free(p, p->cbp_size); 7200 return; 7201 } 7202 } 7203 (void) (STRLOG(0, 0, 0, SL_CONSOLE, 7204 "callbparams_free_id: not found\n")); 7205 } 7206 7207 /* 7208 * Callback wrapper function used by once-only callbacks that can be 7209 * cancelled (qtimeout and qbufcall) 7210 * Contains inline version of entersq(sq, SQ_CALLBACK) that can be 7211 * cancelled by the qun* functions. 7212 */ 7213 void 7214 qcallbwrapper(void *arg) 7215 { 7216 callbparams_t *cbp = arg; 7217 syncq_t *sq; 7218 uint16_t count = 0; 7219 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL; 7220 uint16_t type; 7221 7222 sq = cbp->cbp_sq; 7223 mutex_enter(SQLOCK(sq)); 7224 type = sq->sq_type; 7225 if (!(type & SQ_CICB)) { 7226 count = sq->sq_count; 7227 SQ_PUTLOCKS_ENTER(sq); 7228 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 7229 SUM_SQ_PUTCOUNTS(sq, count); 7230 sq->sq_needexcl++; 7231 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 7232 waitflags |= SQ_MESSAGES; 7233 } 7234 /* Can not handle exlusive entry at outer perimeter */ 7235 ASSERT(type & SQ_COCB); 7236 7237 while ((sq->sq_flags & waitflags) || (!(type & SQ_CICB) &&count != 0)) { 7238 if ((sq->sq_callbflags & cbp->cbp_flags) && 7239 (sq->sq_cancelid == cbp->cbp_id)) { 7240 /* timeout has been cancelled */ 7241 sq->sq_callbflags |= SQ_CALLB_BYPASSED; 7242 callbparams_free(sq, cbp); 7243 if (!(type & SQ_CICB)) { 7244 ASSERT(sq->sq_needexcl > 0); 7245 sq->sq_needexcl--; 7246 if (sq->sq_needexcl == 0) { 7247 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 7248 } 7249 SQ_PUTLOCKS_EXIT(sq); 7250 } 7251 mutex_exit(SQLOCK(sq)); 7252 return; 7253 } 7254 sq->sq_flags |= SQ_WANTWAKEUP; 7255 if (!(type & SQ_CICB)) { 7256 SQ_PUTLOCKS_EXIT(sq); 7257 } 7258 cv_wait(&sq->sq_wait, SQLOCK(sq)); 7259 if (!(type & SQ_CICB)) { 7260 count = sq->sq_count; 7261 SQ_PUTLOCKS_ENTER(sq); 7262 SUM_SQ_PUTCOUNTS(sq, count); 7263 } 7264 } 7265 7266 sq->sq_count++; 7267 ASSERT(sq->sq_count != 0); /* Wraparound */ 7268 if (!(type & SQ_CICB)) { 7269 ASSERT(count == 0); 7270 sq->sq_flags |= SQ_EXCL; 7271 ASSERT(sq->sq_needexcl > 0); 7272 sq->sq_needexcl--; 7273 if (sq->sq_needexcl == 0) { 7274 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 7275 } 7276 SQ_PUTLOCKS_EXIT(sq); 7277 } 7278 7279 mutex_exit(SQLOCK(sq)); 7280 7281 cbp->cbp_func(cbp->cbp_arg); 7282 7283 /* 7284 * We drop the lock only for leavesq to re-acquire it. 7285 * Possible optimization is inline of leavesq. 7286 */ 7287 mutex_enter(SQLOCK(sq)); 7288 callbparams_free(sq, cbp); 7289 mutex_exit(SQLOCK(sq)); 7290 leavesq(sq, SQ_CALLBACK); 7291 } 7292 7293 /* 7294 * no need to grab sq_putlocks here. See comment in strsubr.h that 7295 * explains when sq_putlocks are used. 7296 * 7297 * sq_count (or one of the sq_putcounts) has already been 7298 * decremented by the caller, and if SQ_QUEUED, we need to call 7299 * drain_syncq (the global syncq drain). 7300 * If putnext_tail is called with the SQ_EXCL bit set, we are in 7301 * one of two states, non-CIPUT perimiter, and we need to clear 7302 * it, or we went exclusive in the put procedure. In any case, 7303 * we want to clear the bit now, and it is probably easier to do 7304 * this at the beginning of this function (remember, we hold 7305 * the SQLOCK). Lastly, if there are other messages queued 7306 * on the syncq (and not for our destination), enable the syncq 7307 * for background work. 7308 */ 7309 7310 /* ARGSUSED */ 7311 void 7312 putnext_tail(syncq_t *sq, queue_t *qp, uint32_t passflags) 7313 { 7314 uint16_t flags = sq->sq_flags; 7315 7316 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7317 ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); 7318 7319 /* Clear SQ_EXCL if set in passflags */ 7320 if (passflags & SQ_EXCL) { 7321 flags &= ~SQ_EXCL; 7322 } 7323 if (flags & SQ_WANTWAKEUP) { 7324 flags &= ~SQ_WANTWAKEUP; 7325 cv_broadcast(&sq->sq_wait); 7326 } 7327 if (flags & SQ_WANTEXWAKEUP) { 7328 flags &= ~SQ_WANTEXWAKEUP; 7329 cv_broadcast(&sq->sq_exitwait); 7330 } 7331 sq->sq_flags = flags; 7332 7333 /* 7334 * We have cleared SQ_EXCL if we were asked to, and started 7335 * the wakeup process for waiters. If there are no writers 7336 * then we need to drain the syncq if we were told to, or 7337 * enable the background thread to do it. 7338 */ 7339 if (!(flags & (SQ_STAYAWAY|SQ_EXCL))) { 7340 if ((passflags & SQ_QUEUED) || 7341 (sq->sq_svcflags & SQ_DISABLED)) { 7342 /* drain_syncq will take care of events in the list */ 7343 drain_syncq(sq); 7344 return; 7345 } else if (flags & SQ_QUEUED) { 7346 sqenable(sq); 7347 } 7348 } 7349 /* Drop the SQLOCK on exit */ 7350 mutex_exit(SQLOCK(sq)); 7351 TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END, 7352 "putnext_end:(%p, %p, %p) done", NULL, qp, sq); 7353 } 7354 7355 void 7356 set_qend(queue_t *q) 7357 { 7358 mutex_enter(QLOCK(q)); 7359 if (!O_SAMESTR(q)) 7360 q->q_flag |= QEND; 7361 else 7362 q->q_flag &= ~QEND; 7363 mutex_exit(QLOCK(q)); 7364 q = _OTHERQ(q); 7365 mutex_enter(QLOCK(q)); 7366 if (!O_SAMESTR(q)) 7367 q->q_flag |= QEND; 7368 else 7369 q->q_flag &= ~QEND; 7370 mutex_exit(QLOCK(q)); 7371 } 7372 7373 7374 void 7375 clr_qfull(queue_t *q) 7376 { 7377 queue_t *oq = q; 7378 7379 q = q->q_nfsrv; 7380 /* Fast check if there is any work to do before getting the lock. */ 7381 if ((q->q_flag & (QFULL|QWANTW)) == 0) { 7382 return; 7383 } 7384 7385 /* 7386 * Do not reset QFULL (and backenable) if the q_count is the reason 7387 * for QFULL being set. 7388 */ 7389 mutex_enter(QLOCK(q)); 7390 /* 7391 * If both q_count and q_mblkcnt are less than the hiwat mark 7392 */ 7393 if ((q->q_count < q->q_hiwat) && (q->q_mblkcnt < q->q_hiwat)) { 7394 q->q_flag &= ~QFULL; 7395 /* 7396 * A little more confusing, how about this way: 7397 * if someone wants to write, 7398 * AND 7399 * both counts are less than the lowat mark 7400 * OR 7401 * the lowat mark is zero 7402 * THEN 7403 * backenable 7404 */ 7405 if ((q->q_flag & QWANTW) && 7406 (((q->q_count < q->q_lowat) && 7407 (q->q_mblkcnt < q->q_lowat)) || q->q_lowat == 0)) { 7408 q->q_flag &= ~QWANTW; 7409 mutex_exit(QLOCK(q)); 7410 backenable(oq, 0); 7411 } else 7412 mutex_exit(QLOCK(q)); 7413 } else 7414 mutex_exit(QLOCK(q)); 7415 } 7416 7417 /* 7418 * Set the forward service procedure pointer. 7419 * 7420 * Called at insert-time to cache a queue's next forward service procedure in 7421 * q_nfsrv; used by canput() and canputnext(). If the queue to be inserted 7422 * has a service procedure then q_nfsrv points to itself. If the queue to be 7423 * inserted does not have a service procedure, then q_nfsrv points to the next 7424 * queue forward that has a service procedure. If the queue is at the logical 7425 * end of the stream (driver for write side, stream head for the read side) 7426 * and does not have a service procedure, then q_nfsrv also points to itself. 7427 */ 7428 void 7429 set_nfsrv_ptr( 7430 queue_t *rnew, /* read queue pointer to new module */ 7431 queue_t *wnew, /* write queue pointer to new module */ 7432 queue_t *prev_rq, /* read queue pointer to the module above */ 7433 queue_t *prev_wq) /* write queue pointer to the module above */ 7434 { 7435 queue_t *qp; 7436 7437 if (prev_wq->q_next == NULL) { 7438 /* 7439 * Insert the driver, initialize the driver and stream head. 7440 * In this case, prev_rq/prev_wq should be the stream head. 7441 * _I_INSERT does not allow inserting a driver. Make sure 7442 * that it is not an insertion. 7443 */ 7444 ASSERT(!(rnew->q_flag & _QINSERTING)); 7445 wnew->q_nfsrv = wnew; 7446 if (rnew->q_qinfo->qi_srvp) 7447 rnew->q_nfsrv = rnew; 7448 else 7449 rnew->q_nfsrv = prev_rq; 7450 prev_rq->q_nfsrv = prev_rq; 7451 prev_wq->q_nfsrv = prev_wq; 7452 } else { 7453 /* 7454 * set up read side q_nfsrv pointer. This MUST be done 7455 * before setting the write side, because the setting of 7456 * the write side for a fifo may depend on it. 7457 * 7458 * Suppose we have a fifo that only has pipemod pushed. 7459 * pipemod has no read or write service procedures, so 7460 * nfsrv for both pipemod queues points to prev_rq (the 7461 * stream read head). Now push bufmod (which has only a 7462 * read service procedure). Doing the write side first, 7463 * wnew->q_nfsrv is set to pipemod's writeq nfsrv, which 7464 * is WRONG; the next queue forward from wnew with a 7465 * service procedure will be rnew, not the stream read head. 7466 * Since the downstream queue (which in the case of a fifo 7467 * is the read queue rnew) can affect upstream queues, it 7468 * needs to be done first. Setting up the read side first 7469 * sets nfsrv for both pipemod queues to rnew and then 7470 * when the write side is set up, wnew-q_nfsrv will also 7471 * point to rnew. 7472 */ 7473 if (rnew->q_qinfo->qi_srvp) { 7474 /* 7475 * use _OTHERQ() because, if this is a pipe, next 7476 * module may have been pushed from other end and 7477 * q_next could be a read queue. 7478 */ 7479 qp = _OTHERQ(prev_wq->q_next); 7480 while (qp && qp->q_nfsrv != qp) { 7481 qp->q_nfsrv = rnew; 7482 qp = backq(qp); 7483 } 7484 rnew->q_nfsrv = rnew; 7485 } else 7486 rnew->q_nfsrv = prev_rq->q_nfsrv; 7487 7488 /* set up write side q_nfsrv pointer */ 7489 if (wnew->q_qinfo->qi_srvp) { 7490 wnew->q_nfsrv = wnew; 7491 7492 /* 7493 * For insertion, need to update nfsrv of the modules 7494 * above which do not have a service routine. 7495 */ 7496 if (rnew->q_flag & _QINSERTING) { 7497 for (qp = prev_wq; 7498 qp != NULL && qp->q_nfsrv != qp; 7499 qp = backq(qp)) { 7500 qp->q_nfsrv = wnew->q_nfsrv; 7501 } 7502 } 7503 } else { 7504 if (prev_wq->q_next == prev_rq) 7505 /* 7506 * Since prev_wq/prev_rq are the middle of a 7507 * fifo, wnew/rnew will also be the middle of 7508 * a fifo and wnew's nfsrv is same as rnew's. 7509 */ 7510 wnew->q_nfsrv = rnew->q_nfsrv; 7511 else 7512 wnew->q_nfsrv = prev_wq->q_next->q_nfsrv; 7513 } 7514 } 7515 } 7516 7517 /* 7518 * Reset the forward service procedure pointer; called at remove-time. 7519 */ 7520 void 7521 reset_nfsrv_ptr(queue_t *rqp, queue_t *wqp) 7522 { 7523 queue_t *tmp_qp; 7524 7525 /* Reset the write side q_nfsrv pointer for _I_REMOVE */ 7526 if ((rqp->q_flag & _QREMOVING) && (wqp->q_qinfo->qi_srvp != NULL)) { 7527 for (tmp_qp = backq(wqp); 7528 tmp_qp != NULL && tmp_qp->q_nfsrv == wqp; 7529 tmp_qp = backq(tmp_qp)) { 7530 tmp_qp->q_nfsrv = wqp->q_nfsrv; 7531 } 7532 } 7533 7534 /* reset the read side q_nfsrv pointer */ 7535 if (rqp->q_qinfo->qi_srvp) { 7536 if (wqp->q_next) { /* non-driver case */ 7537 tmp_qp = _OTHERQ(wqp->q_next); 7538 while (tmp_qp && tmp_qp->q_nfsrv == rqp) { 7539 /* Note that rqp->q_next cannot be NULL */ 7540 ASSERT(rqp->q_next != NULL); 7541 tmp_qp->q_nfsrv = rqp->q_next->q_nfsrv; 7542 tmp_qp = backq(tmp_qp); 7543 } 7544 } 7545 } 7546 } 7547 7548 /* 7549 * This routine should be called after all stream geometry changes to update 7550 * the stream head cached struio() rd/wr queue pointers. Note must be called 7551 * with the streamlock()ed. 7552 * 7553 * Note: only enables Synchronous STREAMS for a side of a Stream which has 7554 * an explicit synchronous barrier module queue. That is, a queue that 7555 * has specified a struio() type. 7556 */ 7557 static void 7558 strsetuio(stdata_t *stp) 7559 { 7560 queue_t *wrq; 7561 7562 if (stp->sd_flag & STPLEX) { 7563 /* 7564 * Not stremahead, but a mux, so no Synchronous STREAMS. 7565 */ 7566 stp->sd_struiowrq = NULL; 7567 stp->sd_struiordq = NULL; 7568 return; 7569 } 7570 /* 7571 * Scan the write queue(s) while synchronous 7572 * until we find a qinfo uio type specified. 7573 */ 7574 wrq = stp->sd_wrq->q_next; 7575 while (wrq) { 7576 if (wrq->q_struiot == STRUIOT_NONE) { 7577 wrq = 0; 7578 break; 7579 } 7580 if (wrq->q_struiot != STRUIOT_DONTCARE) 7581 break; 7582 if (! _SAMESTR(wrq)) { 7583 wrq = 0; 7584 break; 7585 } 7586 wrq = wrq->q_next; 7587 } 7588 stp->sd_struiowrq = wrq; 7589 /* 7590 * Scan the read queue(s) while synchronous 7591 * until we find a qinfo uio type specified. 7592 */ 7593 wrq = stp->sd_wrq->q_next; 7594 while (wrq) { 7595 if (_RD(wrq)->q_struiot == STRUIOT_NONE) { 7596 wrq = 0; 7597 break; 7598 } 7599 if (_RD(wrq)->q_struiot != STRUIOT_DONTCARE) 7600 break; 7601 if (! _SAMESTR(wrq)) { 7602 wrq = 0; 7603 break; 7604 } 7605 wrq = wrq->q_next; 7606 } 7607 stp->sd_struiordq = wrq ? _RD(wrq) : 0; 7608 } 7609 7610 /* 7611 * pass_wput, unblocks the passthru queues, so that 7612 * messages can arrive at muxs lower read queue, before 7613 * I_LINK/I_UNLINK is acked/nacked. 7614 */ 7615 static void 7616 pass_wput(queue_t *q, mblk_t *mp) 7617 { 7618 syncq_t *sq; 7619 7620 sq = _RD(q)->q_syncq; 7621 if (sq->sq_flags & SQ_BLOCKED) 7622 unblocksq(sq, SQ_BLOCKED, 0); 7623 putnext(q, mp); 7624 } 7625 7626 /* 7627 * Set up queues for the link/unlink. 7628 * Create a new queue and block it and then insert it 7629 * below the stream head on the lower stream. 7630 * This prevents any messages from arriving during the setq 7631 * as well as while the mux is processing the LINK/I_UNLINK. 7632 * The blocked passq is unblocked once the LINK/I_UNLINK has 7633 * been acked or nacked or if a message is generated and sent 7634 * down muxs write put procedure. 7635 * see pass_wput(). 7636 * 7637 * After the new queue is inserted, all messages coming from below are 7638 * blocked. The call to strlock will ensure that all activity in the stream head 7639 * read queue syncq is stopped (sq_count drops to zero). 7640 */ 7641 static queue_t * 7642 link_addpassthru(stdata_t *stpdown) 7643 { 7644 queue_t *passq; 7645 sqlist_t sqlist; 7646 7647 passq = allocq(); 7648 STREAM(passq) = STREAM(_WR(passq)) = stpdown; 7649 /* setq might sleep in allocator - avoid holding locks. */ 7650 setq(passq, &passthru_rinit, &passthru_winit, NULL, QPERQ, 7651 SQ_CI|SQ_CO, B_FALSE); 7652 claimq(passq); 7653 blocksq(passq->q_syncq, SQ_BLOCKED, 1); 7654 insertq(STREAM(passq), passq); 7655 7656 /* 7657 * Use strlock() to wait for the stream head sq_count to drop to zero 7658 * since we are going to change q_ptr in the stream head. Note that 7659 * insertq() doesn't wait for any syncq counts to drop to zero. 7660 */ 7661 sqlist.sqlist_head = NULL; 7662 sqlist.sqlist_index = 0; 7663 sqlist.sqlist_size = sizeof (sqlist_t); 7664 sqlist_insert(&sqlist, _RD(stpdown->sd_wrq)->q_syncq); 7665 strlock(stpdown, &sqlist); 7666 strunlock(stpdown, &sqlist); 7667 7668 releaseq(passq); 7669 return (passq); 7670 } 7671 7672 /* 7673 * Let messages flow up into the mux by removing 7674 * the passq. 7675 */ 7676 static void 7677 link_rempassthru(queue_t *passq) 7678 { 7679 claimq(passq); 7680 removeq(passq); 7681 releaseq(passq); 7682 freeq(passq); 7683 } 7684 7685 /* 7686 * wait for an event with optional timeout and optional return if 7687 * a signal is sent to the thread 7688 * tim: -1 : no timeout 7689 * otherwise the value is relative time in milliseconds to wait 7690 * nosig: if 0 then signals will be ignored, otherwise signals 7691 * will terminate wait 7692 * returns >0 on success, 0 if signal was encountered, -1 if timeout 7693 * was reached. 7694 */ 7695 clock_t 7696 str_cv_wait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim, int nosigs) 7697 { 7698 clock_t ret, now, tick; 7699 7700 if (tim < 0) { 7701 if (nosigs) { 7702 cv_wait(cvp, mp); 7703 ret = 1; 7704 } else { 7705 ret = cv_wait_sig(cvp, mp); 7706 } 7707 } else if (tim > 0) { 7708 /* 7709 * convert milliseconds to clock ticks 7710 */ 7711 tick = MSEC_TO_TICK_ROUNDUP(tim); 7712 time_to_wait(&now, tick); 7713 if (nosigs) { 7714 ret = cv_timedwait(cvp, mp, now); 7715 } else { 7716 ret = cv_timedwait_sig(cvp, mp, now); 7717 } 7718 } else { 7719 ret = -1; 7720 } 7721 return (ret); 7722 } 7723 7724 /* 7725 * Wait until the stream head can determine if it is at the mark but 7726 * don't wait forever to prevent a race condition between the "mark" state 7727 * in the stream head and any mark state in the caller/user of this routine. 7728 * 7729 * This is used by sockets and for a socket it would be incorrect 7730 * to return a failure for SIOCATMARK when there is no data in the receive 7731 * queue and the marked urgent data is traveling up the stream. 7732 * 7733 * This routine waits until the mark is known by waiting for one of these 7734 * three events: 7735 * The stream head read queue becoming non-empty (including an EOF) 7736 * The STRATMARK flag being set. (Due to a MSGMARKNEXT message.) 7737 * The STRNOTATMARK flag being set (which indicates that the transport 7738 * has sent a MSGNOTMARKNEXT message to indicate that it is not at 7739 * the mark). 7740 * 7741 * The routine returns 1 if the stream is at the mark; 0 if it can 7742 * be determined that the stream is not at the mark. 7743 * If the wait times out and it can't determine 7744 * whether or not the stream might be at the mark the routine will return -1. 7745 * 7746 * Note: This routine should only be used when a mark is pending i.e., 7747 * in the socket case the SIGURG has been posted. 7748 * Note2: This can not wakeup just because synchronous streams indicate 7749 * that data is available since it is not possible to use the synchronous 7750 * streams interfaces to determine the b_flag value for the data queued below 7751 * the stream head. 7752 */ 7753 int 7754 strwaitmark(vnode_t *vp) 7755 { 7756 struct stdata *stp = vp->v_stream; 7757 queue_t *rq = _RD(stp->sd_wrq); 7758 int mark; 7759 7760 mutex_enter(&stp->sd_lock); 7761 while (rq->q_first == NULL && 7762 !(stp->sd_flag & (STRATMARK|STRNOTATMARK|STREOF))) { 7763 stp->sd_flag |= RSLEEP; 7764 7765 /* Wait for 100 milliseconds for any state change. */ 7766 if (str_cv_wait(&rq->q_wait, &stp->sd_lock, 100, 1) == -1) { 7767 mutex_exit(&stp->sd_lock); 7768 return (-1); 7769 } 7770 } 7771 if (stp->sd_flag & STRATMARK) 7772 mark = 1; 7773 else if (rq->q_first != NULL && (rq->q_first->b_flag & MSGMARK)) 7774 mark = 1; 7775 else 7776 mark = 0; 7777 7778 mutex_exit(&stp->sd_lock); 7779 return (mark); 7780 } 7781 7782 /* 7783 * Set a read side error. If persist is set change the socket error 7784 * to persistent. If errfunc is set install the function as the exported 7785 * error handler. 7786 */ 7787 void 7788 strsetrerror(vnode_t *vp, int error, int persist, errfunc_t errfunc) 7789 { 7790 struct stdata *stp = vp->v_stream; 7791 7792 mutex_enter(&stp->sd_lock); 7793 stp->sd_rerror = error; 7794 if (error == 0 && errfunc == NULL) 7795 stp->sd_flag &= ~STRDERR; 7796 else 7797 stp->sd_flag |= STRDERR; 7798 if (persist) { 7799 stp->sd_flag &= ~STRDERRNONPERSIST; 7800 } else { 7801 stp->sd_flag |= STRDERRNONPERSIST; 7802 } 7803 stp->sd_rderrfunc = errfunc; 7804 if (error != 0 || errfunc != NULL) { 7805 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */ 7806 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */ 7807 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 7808 7809 mutex_exit(&stp->sd_lock); 7810 pollwakeup(&stp->sd_pollist, POLLERR); 7811 mutex_enter(&stp->sd_lock); 7812 7813 if (stp->sd_sigflags & S_ERROR) 7814 strsendsig(stp->sd_siglist, S_ERROR, 0, error); 7815 } 7816 mutex_exit(&stp->sd_lock); 7817 } 7818 7819 /* 7820 * Set a write side error. If persist is set change the socket error 7821 * to persistent. 7822 */ 7823 void 7824 strsetwerror(vnode_t *vp, int error, int persist, errfunc_t errfunc) 7825 { 7826 struct stdata *stp = vp->v_stream; 7827 7828 mutex_enter(&stp->sd_lock); 7829 stp->sd_werror = error; 7830 if (error == 0 && errfunc == NULL) 7831 stp->sd_flag &= ~STWRERR; 7832 else 7833 stp->sd_flag |= STWRERR; 7834 if (persist) { 7835 stp->sd_flag &= ~STWRERRNONPERSIST; 7836 } else { 7837 stp->sd_flag |= STWRERRNONPERSIST; 7838 } 7839 stp->sd_wrerrfunc = errfunc; 7840 if (error != 0 || errfunc != NULL) { 7841 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */ 7842 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */ 7843 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 7844 7845 mutex_exit(&stp->sd_lock); 7846 pollwakeup(&stp->sd_pollist, POLLERR); 7847 mutex_enter(&stp->sd_lock); 7848 7849 if (stp->sd_sigflags & S_ERROR) 7850 strsendsig(stp->sd_siglist, S_ERROR, 0, error); 7851 } 7852 mutex_exit(&stp->sd_lock); 7853 } 7854 7855 /* 7856 * Make the stream return 0 (EOF) when all data has been read. 7857 * No effect on write side. 7858 */ 7859 void 7860 strseteof(vnode_t *vp, int eof) 7861 { 7862 struct stdata *stp = vp->v_stream; 7863 7864 mutex_enter(&stp->sd_lock); 7865 if (!eof) { 7866 stp->sd_flag &= ~STREOF; 7867 mutex_exit(&stp->sd_lock); 7868 return; 7869 } 7870 stp->sd_flag |= STREOF; 7871 if (stp->sd_flag & RSLEEP) { 7872 stp->sd_flag &= ~RSLEEP; 7873 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); 7874 } 7875 7876 mutex_exit(&stp->sd_lock); 7877 pollwakeup(&stp->sd_pollist, POLLIN|POLLRDNORM); 7878 mutex_enter(&stp->sd_lock); 7879 7880 if (stp->sd_sigflags & (S_INPUT|S_RDNORM)) 7881 strsendsig(stp->sd_siglist, S_INPUT|S_RDNORM, 0, 0); 7882 mutex_exit(&stp->sd_lock); 7883 } 7884 7885 void 7886 strflushrq(vnode_t *vp, int flag) 7887 { 7888 struct stdata *stp = vp->v_stream; 7889 7890 mutex_enter(&stp->sd_lock); 7891 flushq(_RD(stp->sd_wrq), flag); 7892 mutex_exit(&stp->sd_lock); 7893 } 7894 7895 void 7896 strsetrputhooks(vnode_t *vp, uint_t flags, 7897 msgfunc_t protofunc, msgfunc_t miscfunc) 7898 { 7899 struct stdata *stp = vp->v_stream; 7900 7901 mutex_enter(&stp->sd_lock); 7902 7903 if (protofunc == NULL) 7904 stp->sd_rprotofunc = strrput_proto; 7905 else 7906 stp->sd_rprotofunc = protofunc; 7907 7908 if (miscfunc == NULL) 7909 stp->sd_rmiscfunc = strrput_misc; 7910 else 7911 stp->sd_rmiscfunc = miscfunc; 7912 7913 if (flags & SH_CONSOL_DATA) 7914 stp->sd_rput_opt |= SR_CONSOL_DATA; 7915 else 7916 stp->sd_rput_opt &= ~SR_CONSOL_DATA; 7917 7918 if (flags & SH_SIGALLDATA) 7919 stp->sd_rput_opt |= SR_SIGALLDATA; 7920 else 7921 stp->sd_rput_opt &= ~SR_SIGALLDATA; 7922 7923 if (flags & SH_IGN_ZEROLEN) 7924 stp->sd_rput_opt |= SR_IGN_ZEROLEN; 7925 else 7926 stp->sd_rput_opt &= ~SR_IGN_ZEROLEN; 7927 7928 mutex_exit(&stp->sd_lock); 7929 } 7930 7931 void 7932 strsetwputhooks(vnode_t *vp, uint_t flags, clock_t closetime) 7933 { 7934 struct stdata *stp = vp->v_stream; 7935 7936 mutex_enter(&stp->sd_lock); 7937 stp->sd_closetime = closetime; 7938 7939 if (flags & SH_SIGPIPE) 7940 stp->sd_wput_opt |= SW_SIGPIPE; 7941 else 7942 stp->sd_wput_opt &= ~SW_SIGPIPE; 7943 if (flags & SH_RECHECK_ERR) 7944 stp->sd_wput_opt |= SW_RECHECK_ERR; 7945 else 7946 stp->sd_wput_opt &= ~SW_RECHECK_ERR; 7947 7948 mutex_exit(&stp->sd_lock); 7949 } 7950 7951 /* Used within framework when the queue is already locked */ 7952 void 7953 qenable_locked(queue_t *q) 7954 { 7955 stdata_t *stp = STREAM(q); 7956 7957 ASSERT(MUTEX_HELD(QLOCK(q))); 7958 7959 if (!q->q_qinfo->qi_srvp) 7960 return; 7961 7962 /* 7963 * Do not place on run queue if already enabled or closing. 7964 */ 7965 if (q->q_flag & (QWCLOSE|QENAB)) 7966 return; 7967 7968 /* 7969 * mark queue enabled and place on run list if it is not already being 7970 * serviced. If it is serviced, the runservice() function will detect 7971 * that QENAB is set and call service procedure before clearing 7972 * QINSERVICE flag. 7973 */ 7974 q->q_flag |= QENAB; 7975 if (q->q_flag & QINSERVICE) 7976 return; 7977 7978 /* Record the time of qenable */ 7979 q->q_qtstamp = lbolt; 7980 7981 /* 7982 * Put the queue in the stp list and schedule it for background 7983 * processing if it is not already scheduled or if stream head does not 7984 * intent to process it in the foreground later by setting 7985 * STRS_WILLSERVICE flag. 7986 */ 7987 mutex_enter(&stp->sd_qlock); 7988 /* 7989 * If there are already something on the list, stp flags should show 7990 * intention to drain it. 7991 */ 7992 IMPLY(STREAM_NEEDSERVICE(stp), 7993 (stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))); 7994 7995 ENQUEUE(q, stp->sd_qhead, stp->sd_qtail, q_link); 7996 stp->sd_nqueues++; 7997 7998 /* 7999 * If no one will drain this stream we are the first producer and 8000 * need to schedule it for background thread. 8001 */ 8002 if (!(stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))) { 8003 /* 8004 * No one will service this stream later, so we have to 8005 * schedule it now. 8006 */ 8007 STRSTAT(stenables); 8008 stp->sd_svcflags |= STRS_SCHEDULED; 8009 stp->sd_servid = (void *)taskq_dispatch(streams_taskq, 8010 (task_func_t *)stream_service, stp, TQ_NOSLEEP|TQ_NOQUEUE); 8011 8012 if (stp->sd_servid == NULL) { 8013 /* 8014 * Task queue failed so fail over to the backup 8015 * servicing thread. 8016 */ 8017 STRSTAT(taskqfails); 8018 /* 8019 * It is safe to clear STRS_SCHEDULED flag because it 8020 * was set by this thread above. 8021 */ 8022 stp->sd_svcflags &= ~STRS_SCHEDULED; 8023 8024 /* 8025 * Failover scheduling is protected by service_queue 8026 * lock. 8027 */ 8028 mutex_enter(&service_queue); 8029 ASSERT((stp->sd_qhead == q) && (stp->sd_qtail == q)); 8030 ASSERT(q->q_link == NULL); 8031 /* 8032 * Append the queue to qhead/qtail list. 8033 */ 8034 if (qhead == NULL) 8035 qhead = q; 8036 else 8037 qtail->q_link = q; 8038 qtail = q; 8039 /* 8040 * Clear stp queue list. 8041 */ 8042 stp->sd_qhead = stp->sd_qtail = NULL; 8043 stp->sd_nqueues = 0; 8044 /* 8045 * Wakeup background queue processing thread. 8046 */ 8047 cv_signal(&services_to_run); 8048 mutex_exit(&service_queue); 8049 } 8050 } 8051 mutex_exit(&stp->sd_qlock); 8052 } 8053 8054 static void 8055 queue_service(queue_t *q) 8056 { 8057 /* 8058 * The queue in the list should have 8059 * QENAB flag set and should not have 8060 * QINSERVICE flag set. QINSERVICE is 8061 * set when the queue is dequeued and 8062 * qenable_locked doesn't enqueue a 8063 * queue with QINSERVICE set. 8064 */ 8065 8066 ASSERT(!(q->q_flag & QINSERVICE)); 8067 ASSERT((q->q_flag & QENAB)); 8068 mutex_enter(QLOCK(q)); 8069 q->q_flag &= ~QENAB; 8070 q->q_flag |= QINSERVICE; 8071 mutex_exit(QLOCK(q)); 8072 runservice(q); 8073 } 8074 8075 static void 8076 syncq_service(syncq_t *sq) 8077 { 8078 STRSTAT(syncqservice); 8079 mutex_enter(SQLOCK(sq)); 8080 ASSERT(!(sq->sq_svcflags & SQ_SERVICE)); 8081 ASSERT(sq->sq_servcount != 0); 8082 ASSERT(sq->sq_next == NULL); 8083 8084 /* if we came here from the background thread, clear the flag */ 8085 if (sq->sq_svcflags & SQ_BGTHREAD) 8086 sq->sq_svcflags &= ~SQ_BGTHREAD; 8087 8088 /* let drain_syncq know that it's being called in the background */ 8089 sq->sq_svcflags |= SQ_SERVICE; 8090 drain_syncq(sq); 8091 } 8092 8093 static void 8094 qwriter_outer_service(syncq_t *outer) 8095 { 8096 /* 8097 * Note that SQ_WRITER is used on the outer perimeter 8098 * to signal that a qwriter(OUTER) is either investigating 8099 * running or that it is actually running a function. 8100 */ 8101 outer_enter(outer, SQ_BLOCKED|SQ_WRITER); 8102 8103 /* 8104 * All inner syncq are empty and have SQ_WRITER set 8105 * to block entering the outer perimeter. 8106 * 8107 * We do not need to explicitly call write_now since 8108 * outer_exit does it for us. 8109 */ 8110 outer_exit(outer); 8111 } 8112 8113 static void 8114 mblk_free(mblk_t *mp) 8115 { 8116 dblk_t *dbp = mp->b_datap; 8117 frtn_t *frp = dbp->db_frtnp; 8118 8119 mp->b_next = NULL; 8120 if (dbp->db_fthdr != NULL) 8121 str_ftfree(dbp); 8122 8123 ASSERT(dbp->db_fthdr == NULL); 8124 frp->free_func(frp->free_arg); 8125 ASSERT(dbp->db_mblk == mp); 8126 8127 if (dbp->db_credp != NULL) { 8128 crfree(dbp->db_credp); 8129 dbp->db_credp = NULL; 8130 } 8131 dbp->db_cpid = -1; 8132 dbp->db_struioflag = 0; 8133 dbp->db_struioun.cksum.flags = 0; 8134 8135 kmem_cache_free(dbp->db_cache, dbp); 8136 } 8137 8138 /* 8139 * Background processing of the stream queue list. 8140 */ 8141 static void 8142 stream_service(stdata_t *stp) 8143 { 8144 queue_t *q; 8145 8146 mutex_enter(&stp->sd_qlock); 8147 8148 STR_SERVICE(stp, q); 8149 8150 stp->sd_svcflags &= ~STRS_SCHEDULED; 8151 stp->sd_servid = NULL; 8152 cv_signal(&stp->sd_qcv); 8153 mutex_exit(&stp->sd_qlock); 8154 } 8155 8156 /* 8157 * Foreground processing of the stream queue list. 8158 */ 8159 void 8160 stream_runservice(stdata_t *stp) 8161 { 8162 queue_t *q; 8163 8164 mutex_enter(&stp->sd_qlock); 8165 STRSTAT(rservice); 8166 /* 8167 * We are going to drain this stream queue list, so qenable_locked will 8168 * not schedule it until we finish. 8169 */ 8170 stp->sd_svcflags |= STRS_WILLSERVICE; 8171 8172 STR_SERVICE(stp, q); 8173 8174 stp->sd_svcflags &= ~STRS_WILLSERVICE; 8175 mutex_exit(&stp->sd_qlock); 8176 /* 8177 * Help backup background thread to drain the qhead/qtail list. 8178 */ 8179 while (qhead != NULL) { 8180 STRSTAT(qhelps); 8181 mutex_enter(&service_queue); 8182 DQ(q, qhead, qtail, q_link); 8183 mutex_exit(&service_queue); 8184 if (q != NULL) 8185 queue_service(q); 8186 } 8187 } 8188 8189 void 8190 stream_willservice(stdata_t *stp) 8191 { 8192 mutex_enter(&stp->sd_qlock); 8193 stp->sd_svcflags |= STRS_WILLSERVICE; 8194 mutex_exit(&stp->sd_qlock); 8195 } 8196 8197 /* 8198 * Replace the cred currently in the mblk with a different one. 8199 */ 8200 void 8201 mblk_setcred(mblk_t *mp, cred_t *cr) 8202 { 8203 cred_t *ocr = DB_CRED(mp); 8204 8205 ASSERT(cr != NULL); 8206 8207 if (cr != ocr) { 8208 crhold(mp->b_datap->db_credp = cr); 8209 if (ocr != NULL) 8210 crfree(ocr); 8211 } 8212 } 8213 8214 int 8215 hcksum_assoc(mblk_t *mp, multidata_t *mmd, pdesc_t *pd, 8216 uint32_t start, uint32_t stuff, uint32_t end, uint32_t value, 8217 uint32_t flags, int km_flags) 8218 { 8219 int rc = 0; 8220 8221 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8222 if (mp->b_datap->db_type == M_DATA) { 8223 /* Associate values for M_DATA type */ 8224 mp->b_datap->db_cksumstart = (intptr_t)start; 8225 mp->b_datap->db_cksumstuff = (intptr_t)stuff; 8226 mp->b_datap->db_cksumend = (intptr_t)end; 8227 mp->b_datap->db_struioun.cksum.flags = flags; 8228 mp->b_datap->db_cksum16 = (uint16_t)value; 8229 8230 } else { 8231 pattrinfo_t pa_info; 8232 8233 ASSERT(mmd != NULL); 8234 8235 pa_info.type = PATTR_HCKSUM; 8236 pa_info.len = sizeof (pattr_hcksum_t); 8237 8238 if (mmd_addpattr(mmd, pd, &pa_info, B_TRUE, km_flags) != NULL) { 8239 pattr_hcksum_t *hck = (pattr_hcksum_t *)pa_info.buf; 8240 8241 hck->hcksum_start_offset = start; 8242 hck->hcksum_stuff_offset = stuff; 8243 hck->hcksum_end_offset = end; 8244 hck->hcksum_cksum_val.inet_cksum = (uint16_t)value; 8245 hck->hcksum_flags = flags; 8246 } 8247 } 8248 return (rc); 8249 } 8250 8251 void 8252 hcksum_retrieve(mblk_t *mp, multidata_t *mmd, pdesc_t *pd, 8253 uint32_t *start, uint32_t *stuff, uint32_t *end, 8254 uint32_t *value, uint32_t *flags) 8255 { 8256 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8257 if (mp->b_datap->db_type == M_DATA) { 8258 if (flags != NULL) { 8259 *flags = mp->b_datap->db_struioun.cksum.flags; 8260 if (*flags & HCK_PARTIALCKSUM) { 8261 if (start != NULL) 8262 *start = (uint32_t) 8263 mp->b_datap->db_cksumstart; 8264 if (stuff != NULL) 8265 *stuff = (uint32_t) 8266 mp->b_datap->db_cksumstuff; 8267 if (end != NULL) 8268 *end = 8269 (uint32_t)mp->b_datap->db_cksumend; 8270 if (value != NULL) 8271 *value = 8272 (uint32_t)mp->b_datap->db_cksum16; 8273 } 8274 } 8275 } else { 8276 pattrinfo_t hck_attr = {PATTR_HCKSUM}; 8277 8278 ASSERT(mmd != NULL); 8279 8280 /* get hardware checksum attribute */ 8281 if (mmd_getpattr(mmd, pd, &hck_attr) != NULL) { 8282 pattr_hcksum_t *hck = (pattr_hcksum_t *)hck_attr.buf; 8283 8284 ASSERT(hck_attr.len >= sizeof (pattr_hcksum_t)); 8285 if (flags != NULL) 8286 *flags = hck->hcksum_flags; 8287 if (start != NULL) 8288 *start = hck->hcksum_start_offset; 8289 if (stuff != NULL) 8290 *stuff = hck->hcksum_stuff_offset; 8291 if (end != NULL) 8292 *end = hck->hcksum_end_offset; 8293 if (value != NULL) 8294 *value = (uint32_t) 8295 hck->hcksum_cksum_val.inet_cksum; 8296 } 8297 } 8298 } 8299 8300 /* 8301 * Checksum buffer *bp for len bytes with psum partial checksum, 8302 * or 0 if none, and return the 16 bit partial checksum. 8303 */ 8304 unsigned 8305 bcksum(uchar_t *bp, int len, unsigned int psum) 8306 { 8307 int odd = len & 1; 8308 extern unsigned int ip_ocsum(); 8309 8310 if (((intptr_t)bp & 1) == 0 && !odd) { 8311 /* 8312 * Bp is 16 bit aligned and len is multiple of 16 bit word. 8313 */ 8314 return (ip_ocsum((ushort_t *)bp, len >> 1, psum)); 8315 } 8316 if (((intptr_t)bp & 1) != 0) { 8317 /* 8318 * Bp isn't 16 bit aligned. 8319 */ 8320 unsigned int tsum; 8321 8322 #ifdef _LITTLE_ENDIAN 8323 psum += *bp; 8324 #else 8325 psum += *bp << 8; 8326 #endif 8327 len--; 8328 bp++; 8329 tsum = ip_ocsum((ushort_t *)bp, len >> 1, 0); 8330 psum += (tsum << 8) & 0xffff | (tsum >> 8); 8331 if (len & 1) { 8332 bp += len - 1; 8333 #ifdef _LITTLE_ENDIAN 8334 psum += *bp << 8; 8335 #else 8336 psum += *bp; 8337 #endif 8338 } 8339 } else { 8340 /* 8341 * Bp is 16 bit aligned. 8342 */ 8343 psum = ip_ocsum((ushort_t *)bp, len >> 1, psum); 8344 if (odd) { 8345 bp += len - 1; 8346 #ifdef _LITTLE_ENDIAN 8347 psum += *bp; 8348 #else 8349 psum += *bp << 8; 8350 #endif 8351 } 8352 } 8353 /* 8354 * Normalize psum to 16 bits before returning the new partial 8355 * checksum. The max psum value before normalization is 0x3FDFE. 8356 */ 8357 return ((psum >> 16) + (psum & 0xFFFF)); 8358 } 8359 8360 boolean_t 8361 is_vmloaned_mblk(mblk_t *mp, multidata_t *mmd, pdesc_t *pd) 8362 { 8363 boolean_t rc; 8364 8365 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8366 if (DB_TYPE(mp) == M_DATA) { 8367 rc = (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0); 8368 } else { 8369 pattrinfo_t zcopy_attr = {PATTR_ZCOPY}; 8370 8371 ASSERT(mmd != NULL); 8372 rc = (mmd_getpattr(mmd, pd, &zcopy_attr) != NULL); 8373 } 8374 return (rc); 8375 } 8376 8377 void 8378 freemsgchain(mblk_t *mp) 8379 { 8380 mblk_t *next; 8381 8382 while (mp != NULL) { 8383 next = mp->b_next; 8384 mp->b_next = NULL; 8385 8386 freemsg(mp); 8387 mp = next; 8388 } 8389 } 8390 8391 mblk_t * 8392 copymsgchain(mblk_t *mp) 8393 { 8394 mblk_t *nmp = NULL; 8395 mblk_t **nmpp = &nmp; 8396 8397 for (; mp != NULL; mp = mp->b_next) { 8398 if ((*nmpp = copymsg(mp)) == NULL) { 8399 freemsgchain(nmp); 8400 return (NULL); 8401 } 8402 8403 nmpp = &((*nmpp)->b_next); 8404 } 8405 8406 return (nmp); 8407 } 8408 8409 /* NOTE: Do not add code after this point. */ 8410 #undef QLOCK 8411 8412 /* 8413 * replacement for QLOCK macro for those that can't use it. 8414 */ 8415 kmutex_t * 8416 QLOCK(queue_t *q) 8417 { 8418 return (&(q)->q_lock); 8419 } 8420 8421 /* 8422 * Dummy runqueues/queuerun functions functions for backwards compatibility. 8423 */ 8424 #undef runqueues 8425 void 8426 runqueues(void) 8427 { 8428 } 8429 8430 #undef queuerun 8431 void 8432 queuerun(void) 8433 { 8434 } 8435