1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 22 /* All Rights Reserved */ 23 24 25 /* 26 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 30 #pragma ident "%Z%%M% %I% %E% SMI" 31 32 #include <sys/types.h> 33 #include <sys/sysmacros.h> 34 #include <sys/param.h> 35 #include <sys/errno.h> 36 #include <sys/signal.h> 37 #include <sys/proc.h> 38 #include <sys/conf.h> 39 #include <sys/cred.h> 40 #include <sys/user.h> 41 #include <sys/vnode.h> 42 #include <sys/file.h> 43 #include <sys/session.h> 44 #include <sys/stream.h> 45 #include <sys/strsubr.h> 46 #include <sys/stropts.h> 47 #include <sys/poll.h> 48 #include <sys/systm.h> 49 #include <sys/cpuvar.h> 50 #include <sys/uio.h> 51 #include <sys/cmn_err.h> 52 #include <sys/priocntl.h> 53 #include <sys/procset.h> 54 #include <sys/vmem.h> 55 #include <sys/bitmap.h> 56 #include <sys/kmem.h> 57 #include <sys/siginfo.h> 58 #include <sys/vtrace.h> 59 #include <sys/callb.h> 60 #include <sys/debug.h> 61 #include <sys/modctl.h> 62 #include <sys/vmsystm.h> 63 #include <vm/page.h> 64 #include <sys/atomic.h> 65 #include <sys/suntpi.h> 66 #include <sys/strlog.h> 67 #include <sys/promif.h> 68 #include <sys/project.h> 69 #include <sys/vm.h> 70 #include <sys/taskq.h> 71 #include <sys/sunddi.h> 72 #include <sys/sunldi_impl.h> 73 #include <sys/strsun.h> 74 #include <sys/isa_defs.h> 75 #include <sys/multidata.h> 76 #include <sys/pattr.h> 77 #include <sys/strft.h> 78 #include <sys/fs/snode.h> 79 #include <sys/zone.h> 80 81 #define O_SAMESTR(q) (((q)->q_next) && \ 82 (((q)->q_flag & QREADR) == ((q)->q_next->q_flag & QREADR))) 83 84 /* 85 * WARNING: 86 * The variables and routines in this file are private, belonging 87 * to the STREAMS subsystem. These should not be used by modules 88 * or drivers. Compatibility will not be guaranteed. 89 */ 90 91 /* 92 * Id value used to distinguish between different multiplexor links. 93 */ 94 static int32_t lnk_id = 0; 95 96 #define STREAMS_LOPRI MINCLSYSPRI 97 static pri_t streams_lopri = STREAMS_LOPRI; 98 99 #define STRSTAT(x) (str_statistics.x.value.ui64++) 100 typedef struct str_stat { 101 kstat_named_t sqenables; 102 kstat_named_t stenables; 103 kstat_named_t syncqservice; 104 kstat_named_t freebs; 105 kstat_named_t qwr_outer; 106 kstat_named_t rservice; 107 kstat_named_t strwaits; 108 kstat_named_t taskqfails; 109 kstat_named_t bufcalls; 110 kstat_named_t qhelps; 111 kstat_named_t qremoved; 112 kstat_named_t sqremoved; 113 kstat_named_t bcwaits; 114 kstat_named_t sqtoomany; 115 } str_stat_t; 116 117 static str_stat_t str_statistics = { 118 { "sqenables", KSTAT_DATA_UINT64 }, 119 { "stenables", KSTAT_DATA_UINT64 }, 120 { "syncqservice", KSTAT_DATA_UINT64 }, 121 { "freebs", KSTAT_DATA_UINT64 }, 122 { "qwr_outer", KSTAT_DATA_UINT64 }, 123 { "rservice", KSTAT_DATA_UINT64 }, 124 { "strwaits", KSTAT_DATA_UINT64 }, 125 { "taskqfails", KSTAT_DATA_UINT64 }, 126 { "bufcalls", KSTAT_DATA_UINT64 }, 127 { "qhelps", KSTAT_DATA_UINT64 }, 128 { "qremoved", KSTAT_DATA_UINT64 }, 129 { "sqremoved", KSTAT_DATA_UINT64 }, 130 { "bcwaits", KSTAT_DATA_UINT64 }, 131 { "sqtoomany", KSTAT_DATA_UINT64 }, 132 }; 133 134 static kstat_t *str_kstat; 135 136 /* 137 * qrunflag was used previously to control background scheduling of queues. It 138 * is not used anymore, but kept here in case some module still wants to access 139 * it via qready() and setqsched macros. 140 */ 141 char qrunflag; /* Unused */ 142 143 /* 144 * Most of the streams scheduling is done via task queues. Task queues may fail 145 * for non-sleep dispatches, so there are two backup threads servicing failed 146 * requests for queues and syncqs. Both of these threads also service failed 147 * dispatches freebs requests. Queues are put in the list specified by `qhead' 148 * and `qtail' pointers, syncqs use `sqhead' and `sqtail' pointers and freebs 149 * requests are put into `freebs_list' which has no tail pointer. All three 150 * lists are protected by a single `service_queue' lock and use 151 * `services_to_run' condition variable for signaling background threads. Use of 152 * a single lock should not be a problem because it is only used under heavy 153 * loads when task queues start to fail and at that time it may be a good idea 154 * to throttle scheduling requests. 155 * 156 * NOTE: queues and syncqs should be scheduled by two separate threads because 157 * queue servicing may be blocked waiting for a syncq which may be also 158 * scheduled for background execution. This may create a deadlock when only one 159 * thread is used for both. 160 */ 161 162 static taskq_t *streams_taskq; /* Used for most STREAMS scheduling */ 163 164 static kmutex_t service_queue; /* protects all of servicing vars */ 165 static kcondvar_t services_to_run; /* wake up background service thread */ 166 static kcondvar_t syncqs_to_run; /* wake up background service thread */ 167 168 /* 169 * List of queues scheduled for background processing dueue to lack of resources 170 * in the task queues. Protected by service_queue lock; 171 */ 172 static struct queue *qhead; 173 static struct queue *qtail; 174 175 /* 176 * Same list for syncqs 177 */ 178 static syncq_t *sqhead; 179 static syncq_t *sqtail; 180 181 static mblk_t *freebs_list; /* list of buffers to free */ 182 183 /* 184 * Backup threads for servicing queues and syncqs 185 */ 186 kthread_t *streams_qbkgrnd_thread; 187 kthread_t *streams_sqbkgrnd_thread; 188 189 /* 190 * Bufcalls related variables. 191 */ 192 struct bclist strbcalls; /* list of waiting bufcalls */ 193 kmutex_t strbcall_lock; /* protects bufcall list (strbcalls) */ 194 kcondvar_t strbcall_cv; /* Signaling when a bufcall is added */ 195 kmutex_t bcall_monitor; /* sleep/wakeup style monitor */ 196 kcondvar_t bcall_cv; /* wait 'till executing bufcall completes */ 197 kthread_t *bc_bkgrnd_thread; /* Thread to service bufcall requests */ 198 199 kmutex_t strresources; /* protects global resources */ 200 kmutex_t muxifier; /* single-threads multiplexor creation */ 201 202 extern void time_to_wait(clock_t *, clock_t); 203 204 /* 205 * run_queues is no longer used, but is kept in case some 3-d party 206 * module/driver decides to use it. 207 */ 208 int run_queues = 0; 209 210 /* 211 * sq_max_size is the depth of the syncq (in number of messages) before 212 * qfill_syncq() starts QFULL'ing destination queues. As its primary 213 * consumer - IP is no longer D_MTPERMOD, but there may be other 214 * modules/drivers depend on this syncq flow control, we prefer to 215 * choose a large number as the default value. For potential 216 * performance gain, this value is tunable in /etc/system. 217 */ 218 int sq_max_size = 10000; 219 220 /* 221 * the number of ciputctrl structures per syncq and stream we create when 222 * needed. 223 */ 224 int n_ciputctrl; 225 int max_n_ciputctrl = 16; 226 /* 227 * if n_ciputctrl is < min_n_ciputctrl don't even create ciputctrl_cache. 228 */ 229 int min_n_ciputctrl = 2; 230 231 static struct mux_node *mux_nodes; /* mux info for cycle checking */ 232 233 /* 234 * Per-driver/module syncqs 235 * ======================== 236 * 237 * For drivers/modules that use PERMOD or outer syncqs we keep a list of 238 * perdm structures, new entries being added (and new syncqs allocated) when 239 * setq() encounters a module/driver with a streamtab that it hasn't seen 240 * before. 241 * The reason for this mechanism is that some modules and drivers share a 242 * common streamtab and it is necessary for those modules and drivers to also 243 * share a common PERMOD syncq. 244 * 245 * perdm_list --> dm_str == streamtab_1 246 * dm_sq == syncq_1 247 * dm_ref 248 * dm_next --> dm_str == streamtab_2 249 * dm_sq == syncq_2 250 * dm_ref 251 * dm_next --> ... NULL 252 * 253 * The dm_ref field is incremented for each new driver/module that takes 254 * a reference to the perdm structure and hence shares the syncq. 255 * References are held in the fmodsw_impl_t structure for each STREAMS module 256 * or the dev_impl array (indexed by device major number) for each driver. 257 * 258 * perdm_list -> [dm_ref == 1] -> [dm_ref == 2] -> [dm_ref == 1] -> NULL 259 * ^ ^ ^ ^ 260 * | ______________/ | | 261 * | / | | 262 * dev_impl: ...|x|y|... module A module B 263 * 264 * When a module/driver is unloaded the reference count is decremented and, 265 * when it falls to zero, the perdm structure is removed from the list and 266 * the syncq is freed (see rele_dm()). 267 */ 268 perdm_t *perdm_list = NULL; 269 static krwlock_t perdm_rwlock; 270 cdevsw_impl_t *devimpl; 271 272 extern struct qinit strdata; 273 extern struct qinit stwdata; 274 275 static void runservice(queue_t *); 276 static void streams_bufcall_service(void); 277 static void streams_qbkgrnd_service(void); 278 static void streams_sqbkgrnd_service(void); 279 static syncq_t *new_syncq(void); 280 static void free_syncq(syncq_t *); 281 static void outer_insert(syncq_t *, syncq_t *); 282 static void outer_remove(syncq_t *, syncq_t *); 283 static void write_now(syncq_t *); 284 static void clr_qfull(queue_t *); 285 static void enable_svc(queue_t *); 286 static void runbufcalls(void); 287 static void sqenable(syncq_t *); 288 static void sqfill_events(syncq_t *, queue_t *, mblk_t *, void (*)()); 289 static void wait_q_syncq(queue_t *); 290 static void backenable_insertedq(queue_t *); 291 292 static void queue_service(queue_t *); 293 static void stream_service(stdata_t *); 294 static void syncq_service(syncq_t *); 295 static void qwriter_outer_service(syncq_t *); 296 static void mblk_free(mblk_t *); 297 #ifdef DEBUG 298 static int qprocsareon(queue_t *); 299 #endif 300 301 static void set_nfsrv_ptr(queue_t *, queue_t *, queue_t *, queue_t *); 302 static void reset_nfsrv_ptr(queue_t *, queue_t *); 303 304 static void sq_run_events(syncq_t *); 305 static int propagate_syncq(queue_t *); 306 307 static void blocksq(syncq_t *, ushort_t, int); 308 static void unblocksq(syncq_t *, ushort_t, int); 309 static int dropsq(syncq_t *, uint16_t); 310 static void emptysq(syncq_t *); 311 static sqlist_t *sqlist_alloc(struct stdata *, int); 312 static void sqlist_free(sqlist_t *); 313 static sqlist_t *sqlist_build(queue_t *, struct stdata *, boolean_t); 314 static void sqlist_insert(sqlist_t *, syncq_t *); 315 static void sqlist_insertall(sqlist_t *, queue_t *); 316 317 static void strsetuio(stdata_t *); 318 319 struct kmem_cache *stream_head_cache; 320 struct kmem_cache *queue_cache; 321 struct kmem_cache *syncq_cache; 322 struct kmem_cache *qband_cache; 323 struct kmem_cache *linkinfo_cache; 324 struct kmem_cache *ciputctrl_cache = NULL; 325 326 static linkinfo_t *linkinfo_list; 327 328 /* 329 * Qinit structure and Module_info structures 330 * for passthru read and write queues 331 */ 332 333 static void pass_wput(queue_t *, mblk_t *); 334 static queue_t *link_addpassthru(stdata_t *); 335 static void link_rempassthru(queue_t *); 336 337 struct module_info passthru_info = { 338 0, 339 "passthru", 340 0, 341 INFPSZ, 342 STRHIGH, 343 STRLOW 344 }; 345 346 struct qinit passthru_rinit = { 347 (int (*)())putnext, 348 NULL, 349 NULL, 350 NULL, 351 NULL, 352 &passthru_info, 353 NULL 354 }; 355 356 struct qinit passthru_winit = { 357 (int (*)()) pass_wput, 358 NULL, 359 NULL, 360 NULL, 361 NULL, 362 &passthru_info, 363 NULL 364 }; 365 366 /* 367 * Special form of assertion: verify that X implies Y i.e. when X is true Y 368 * should also be true. 369 */ 370 #define IMPLY(X, Y) ASSERT(!(X) || (Y)) 371 372 /* 373 * Logical equivalence. Verify that both X and Y are either TRUE or FALSE. 374 */ 375 #define EQUIV(X, Y) { IMPLY(X, Y); IMPLY(Y, X); } 376 377 /* 378 * Verify correctness of list head/tail pointers. 379 */ 380 #define LISTCHECK(head, tail, link) { \ 381 EQUIV(head, tail); \ 382 IMPLY(tail != NULL, tail->link == NULL); \ 383 } 384 385 /* 386 * Enqueue a list element `el' in the end of a list denoted by `head' and `tail' 387 * using a `link' field. 388 */ 389 #define ENQUEUE(el, head, tail, link) { \ 390 ASSERT(el->link == NULL); \ 391 LISTCHECK(head, tail, link); \ 392 if (head == NULL) \ 393 head = el; \ 394 else \ 395 tail->link = el; \ 396 tail = el; \ 397 } 398 399 /* 400 * Dequeue the first element of the list denoted by `head' and `tail' pointers 401 * using a `link' field and put result into `el'. 402 */ 403 #define DQ(el, head, tail, link) { \ 404 LISTCHECK(head, tail, link); \ 405 el = head; \ 406 if (head != NULL) { \ 407 head = head->link; \ 408 if (head == NULL) \ 409 tail = NULL; \ 410 el->link = NULL; \ 411 } \ 412 } 413 414 /* 415 * Remove `el' from the list using `chase' and `curr' pointers and return result 416 * in `succeed'. 417 */ 418 #define RMQ(el, head, tail, link, chase, curr, succeed) { \ 419 LISTCHECK(head, tail, link); \ 420 chase = NULL; \ 421 succeed = 0; \ 422 for (curr = head; (curr != el) && (curr != NULL); curr = curr->link) \ 423 chase = curr; \ 424 if (curr != NULL) { \ 425 succeed = 1; \ 426 ASSERT(curr == el); \ 427 if (chase != NULL) \ 428 chase->link = curr->link; \ 429 else \ 430 head = curr->link; \ 431 curr->link = NULL; \ 432 if (curr == tail) \ 433 tail = chase; \ 434 } \ 435 LISTCHECK(head, tail, link); \ 436 } 437 438 /* Handling of delayed messages on the inner syncq. */ 439 440 /* 441 * DEBUG versions should use function versions (to simplify tracing) and 442 * non-DEBUG kernels should use macro versions. 443 */ 444 445 /* 446 * Put a queue on the syncq list of queues. 447 * Assumes SQLOCK held. 448 */ 449 #define SQPUT_Q(sq, qp) \ 450 { \ 451 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 452 if (!(qp->q_sqflags & Q_SQQUEUED)) { \ 453 /* The queue should not be linked anywhere */ \ 454 ASSERT((qp->q_sqprev == NULL) && (qp->q_sqnext == NULL)); \ 455 /* Head and tail may only be NULL simultaneously */ \ 456 EQUIV(sq->sq_head, sq->sq_tail); \ 457 /* Queue may be only enqueyed on its syncq */ \ 458 ASSERT(sq == qp->q_syncq); \ 459 /* Check the correctness of SQ_MESSAGES flag */ \ 460 EQUIV(sq->sq_head, (sq->sq_flags & SQ_MESSAGES)); \ 461 /* Sanity check first/last elements of the list */ \ 462 IMPLY(sq->sq_head != NULL, sq->sq_head->q_sqprev == NULL);\ 463 IMPLY(sq->sq_tail != NULL, sq->sq_tail->q_sqnext == NULL);\ 464 /* \ 465 * Sanity check of priority field: empty queue should \ 466 * have zero priority \ 467 * and nqueues equal to zero. \ 468 */ \ 469 IMPLY(sq->sq_head == NULL, sq->sq_pri == 0); \ 470 /* Sanity check of sq_nqueues field */ \ 471 EQUIV(sq->sq_head, sq->sq_nqueues); \ 472 if (sq->sq_head == NULL) { \ 473 sq->sq_head = sq->sq_tail = qp; \ 474 sq->sq_flags |= SQ_MESSAGES; \ 475 } else if (qp->q_spri == 0) { \ 476 qp->q_sqprev = sq->sq_tail; \ 477 sq->sq_tail->q_sqnext = qp; \ 478 sq->sq_tail = qp; \ 479 } else { \ 480 /* \ 481 * Put this queue in priority order: higher \ 482 * priority gets closer to the head. \ 483 */ \ 484 queue_t **qpp = &sq->sq_tail; \ 485 queue_t *qnext = NULL; \ 486 \ 487 while (*qpp != NULL && qp->q_spri > (*qpp)->q_spri) { \ 488 qnext = *qpp; \ 489 qpp = &(*qpp)->q_sqprev; \ 490 } \ 491 qp->q_sqnext = qnext; \ 492 qp->q_sqprev = *qpp; \ 493 if (*qpp != NULL) { \ 494 (*qpp)->q_sqnext = qp; \ 495 } else { \ 496 sq->sq_head = qp; \ 497 sq->sq_pri = sq->sq_head->q_spri; \ 498 } \ 499 *qpp = qp; \ 500 } \ 501 qp->q_sqflags |= Q_SQQUEUED; \ 502 qp->q_sqtstamp = lbolt; \ 503 sq->sq_nqueues++; \ 504 } \ 505 } 506 507 /* 508 * Remove a queue from the syncq list 509 * Assumes SQLOCK held. 510 */ 511 #define SQRM_Q(sq, qp) \ 512 { \ 513 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 514 ASSERT(qp->q_sqflags & Q_SQQUEUED); \ 515 ASSERT(sq->sq_head != NULL && sq->sq_tail != NULL); \ 516 ASSERT((sq->sq_flags & SQ_MESSAGES) != 0); \ 517 /* Check that the queue is actually in the list */ \ 518 ASSERT(qp->q_sqnext != NULL || sq->sq_tail == qp); \ 519 ASSERT(qp->q_sqprev != NULL || sq->sq_head == qp); \ 520 ASSERT(sq->sq_nqueues != 0); \ 521 if (qp->q_sqprev == NULL) { \ 522 /* First queue on list, make head q_sqnext */ \ 523 sq->sq_head = qp->q_sqnext; \ 524 } else { \ 525 /* Make prev->next == next */ \ 526 qp->q_sqprev->q_sqnext = qp->q_sqnext; \ 527 } \ 528 if (qp->q_sqnext == NULL) { \ 529 /* Last queue on list, make tail sqprev */ \ 530 sq->sq_tail = qp->q_sqprev; \ 531 } else { \ 532 /* Make next->prev == prev */ \ 533 qp->q_sqnext->q_sqprev = qp->q_sqprev; \ 534 } \ 535 /* clear out references on this queue */ \ 536 qp->q_sqprev = qp->q_sqnext = NULL; \ 537 qp->q_sqflags &= ~Q_SQQUEUED; \ 538 /* If there is nothing queued, clear SQ_MESSAGES */ \ 539 if (sq->sq_head != NULL) { \ 540 sq->sq_pri = sq->sq_head->q_spri; \ 541 } else { \ 542 sq->sq_flags &= ~SQ_MESSAGES; \ 543 sq->sq_pri = 0; \ 544 } \ 545 sq->sq_nqueues--; \ 546 ASSERT(sq->sq_head != NULL || sq->sq_evhead != NULL || \ 547 (sq->sq_flags & SQ_QUEUED) == 0); \ 548 } 549 550 /* Hide the definition from the header file. */ 551 #ifdef SQPUT_MP 552 #undef SQPUT_MP 553 #endif 554 555 /* 556 * Put a message on the queue syncq. 557 * Assumes QLOCK held. 558 */ 559 #define SQPUT_MP(qp, mp) \ 560 { \ 561 ASSERT(MUTEX_HELD(QLOCK(qp))); \ 562 ASSERT(qp->q_sqhead == NULL || \ 563 (qp->q_sqtail != NULL && \ 564 qp->q_sqtail->b_next == NULL)); \ 565 qp->q_syncqmsgs++; \ 566 ASSERT(qp->q_syncqmsgs != 0); /* Wraparound */ \ 567 if (qp->q_sqhead == NULL) { \ 568 qp->q_sqhead = qp->q_sqtail = mp; \ 569 } else { \ 570 qp->q_sqtail->b_next = mp; \ 571 qp->q_sqtail = mp; \ 572 } \ 573 ASSERT(qp->q_syncqmsgs > 0); \ 574 } 575 576 #define SQ_PUTCOUNT_SETFAST_LOCKED(sq) { \ 577 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 578 if ((sq)->sq_ciputctrl != NULL) { \ 579 int i; \ 580 int nlocks = (sq)->sq_nciputctrl; \ 581 ciputctrl_t *cip = (sq)->sq_ciputctrl; \ 582 ASSERT((sq)->sq_type & SQ_CIPUT); \ 583 for (i = 0; i <= nlocks; i++) { \ 584 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \ 585 cip[i].ciputctrl_count |= SQ_FASTPUT; \ 586 } \ 587 } \ 588 } 589 590 591 #define SQ_PUTCOUNT_CLRFAST_LOCKED(sq) { \ 592 ASSERT(MUTEX_HELD(SQLOCK(sq))); \ 593 if ((sq)->sq_ciputctrl != NULL) { \ 594 int i; \ 595 int nlocks = (sq)->sq_nciputctrl; \ 596 ciputctrl_t *cip = (sq)->sq_ciputctrl; \ 597 ASSERT((sq)->sq_type & SQ_CIPUT); \ 598 for (i = 0; i <= nlocks; i++) { \ 599 ASSERT(MUTEX_HELD(&cip[i].ciputctrl_lock)); \ 600 cip[i].ciputctrl_count &= ~SQ_FASTPUT; \ 601 } \ 602 } \ 603 } 604 605 /* 606 * Run service procedures for all queues in the stream head. 607 */ 608 #define STR_SERVICE(stp, q) { \ 609 ASSERT(MUTEX_HELD(&stp->sd_qlock)); \ 610 while (stp->sd_qhead != NULL) { \ 611 DQ(q, stp->sd_qhead, stp->sd_qtail, q_link); \ 612 ASSERT(stp->sd_nqueues > 0); \ 613 stp->sd_nqueues--; \ 614 ASSERT(!(q->q_flag & QINSERVICE)); \ 615 mutex_exit(&stp->sd_qlock); \ 616 queue_service(q); \ 617 mutex_enter(&stp->sd_qlock); \ 618 } \ 619 ASSERT(stp->sd_nqueues == 0); \ 620 ASSERT((stp->sd_qhead == NULL) && (stp->sd_qtail == NULL)); \ 621 } 622 623 /* 624 * constructor/destructor routines for the stream head cache 625 */ 626 /* ARGSUSED */ 627 static int 628 stream_head_constructor(void *buf, void *cdrarg, int kmflags) 629 { 630 stdata_t *stp = buf; 631 632 mutex_init(&stp->sd_lock, NULL, MUTEX_DEFAULT, NULL); 633 mutex_init(&stp->sd_reflock, NULL, MUTEX_DEFAULT, NULL); 634 mutex_init(&stp->sd_qlock, NULL, MUTEX_DEFAULT, NULL); 635 cv_init(&stp->sd_monitor, NULL, CV_DEFAULT, NULL); 636 cv_init(&stp->sd_iocmonitor, NULL, CV_DEFAULT, NULL); 637 cv_init(&stp->sd_refmonitor, NULL, CV_DEFAULT, NULL); 638 cv_init(&stp->sd_qcv, NULL, CV_DEFAULT, NULL); 639 cv_init(&stp->sd_zcopy_wait, NULL, CV_DEFAULT, NULL); 640 stp->sd_wrq = NULL; 641 642 return (0); 643 } 644 645 /* ARGSUSED */ 646 static void 647 stream_head_destructor(void *buf, void *cdrarg) 648 { 649 stdata_t *stp = buf; 650 651 mutex_destroy(&stp->sd_lock); 652 mutex_destroy(&stp->sd_reflock); 653 mutex_destroy(&stp->sd_qlock); 654 cv_destroy(&stp->sd_monitor); 655 cv_destroy(&stp->sd_iocmonitor); 656 cv_destroy(&stp->sd_refmonitor); 657 cv_destroy(&stp->sd_qcv); 658 cv_destroy(&stp->sd_zcopy_wait); 659 } 660 661 /* 662 * constructor/destructor routines for the queue cache 663 */ 664 /* ARGSUSED */ 665 static int 666 queue_constructor(void *buf, void *cdrarg, int kmflags) 667 { 668 queinfo_t *qip = buf; 669 queue_t *qp = &qip->qu_rqueue; 670 queue_t *wqp = &qip->qu_wqueue; 671 syncq_t *sq = &qip->qu_syncq; 672 673 qp->q_first = NULL; 674 qp->q_link = NULL; 675 qp->q_count = 0; 676 qp->q_mblkcnt = 0; 677 qp->q_sqhead = NULL; 678 qp->q_sqtail = NULL; 679 qp->q_sqnext = NULL; 680 qp->q_sqprev = NULL; 681 qp->q_sqflags = 0; 682 qp->q_rwcnt = 0; 683 qp->q_spri = 0; 684 685 mutex_init(QLOCK(qp), NULL, MUTEX_DEFAULT, NULL); 686 cv_init(&qp->q_wait, NULL, CV_DEFAULT, NULL); 687 688 wqp->q_first = NULL; 689 wqp->q_link = NULL; 690 wqp->q_count = 0; 691 wqp->q_mblkcnt = 0; 692 wqp->q_sqhead = NULL; 693 wqp->q_sqtail = NULL; 694 wqp->q_sqnext = NULL; 695 wqp->q_sqprev = NULL; 696 wqp->q_sqflags = 0; 697 wqp->q_rwcnt = 0; 698 wqp->q_spri = 0; 699 700 mutex_init(QLOCK(wqp), NULL, MUTEX_DEFAULT, NULL); 701 cv_init(&wqp->q_wait, NULL, CV_DEFAULT, NULL); 702 703 sq->sq_head = NULL; 704 sq->sq_tail = NULL; 705 sq->sq_evhead = NULL; 706 sq->sq_evtail = NULL; 707 sq->sq_callbpend = NULL; 708 sq->sq_outer = NULL; 709 sq->sq_onext = NULL; 710 sq->sq_oprev = NULL; 711 sq->sq_next = NULL; 712 sq->sq_svcflags = 0; 713 sq->sq_servcount = 0; 714 sq->sq_needexcl = 0; 715 sq->sq_nqueues = 0; 716 sq->sq_pri = 0; 717 718 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL); 719 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL); 720 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL); 721 722 return (0); 723 } 724 725 /* ARGSUSED */ 726 static void 727 queue_destructor(void *buf, void *cdrarg) 728 { 729 queinfo_t *qip = buf; 730 queue_t *qp = &qip->qu_rqueue; 731 queue_t *wqp = &qip->qu_wqueue; 732 syncq_t *sq = &qip->qu_syncq; 733 734 ASSERT(qp->q_sqhead == NULL); 735 ASSERT(wqp->q_sqhead == NULL); 736 ASSERT(qp->q_sqnext == NULL); 737 ASSERT(wqp->q_sqnext == NULL); 738 ASSERT(qp->q_rwcnt == 0); 739 ASSERT(wqp->q_rwcnt == 0); 740 741 mutex_destroy(&qp->q_lock); 742 cv_destroy(&qp->q_wait); 743 744 mutex_destroy(&wqp->q_lock); 745 cv_destroy(&wqp->q_wait); 746 747 mutex_destroy(&sq->sq_lock); 748 cv_destroy(&sq->sq_wait); 749 cv_destroy(&sq->sq_exitwait); 750 } 751 752 /* 753 * constructor/destructor routines for the syncq cache 754 */ 755 /* ARGSUSED */ 756 static int 757 syncq_constructor(void *buf, void *cdrarg, int kmflags) 758 { 759 syncq_t *sq = buf; 760 761 bzero(buf, sizeof (syncq_t)); 762 763 mutex_init(&sq->sq_lock, NULL, MUTEX_DEFAULT, NULL); 764 cv_init(&sq->sq_wait, NULL, CV_DEFAULT, NULL); 765 cv_init(&sq->sq_exitwait, NULL, CV_DEFAULT, NULL); 766 767 return (0); 768 } 769 770 /* ARGSUSED */ 771 static void 772 syncq_destructor(void *buf, void *cdrarg) 773 { 774 syncq_t *sq = buf; 775 776 ASSERT(sq->sq_head == NULL); 777 ASSERT(sq->sq_tail == NULL); 778 ASSERT(sq->sq_evhead == NULL); 779 ASSERT(sq->sq_evtail == NULL); 780 ASSERT(sq->sq_callbpend == NULL); 781 ASSERT(sq->sq_callbflags == 0); 782 ASSERT(sq->sq_outer == NULL); 783 ASSERT(sq->sq_onext == NULL); 784 ASSERT(sq->sq_oprev == NULL); 785 ASSERT(sq->sq_next == NULL); 786 ASSERT(sq->sq_needexcl == 0); 787 ASSERT(sq->sq_svcflags == 0); 788 ASSERT(sq->sq_servcount == 0); 789 ASSERT(sq->sq_nqueues == 0); 790 ASSERT(sq->sq_pri == 0); 791 ASSERT(sq->sq_count == 0); 792 ASSERT(sq->sq_rmqcount == 0); 793 ASSERT(sq->sq_cancelid == 0); 794 ASSERT(sq->sq_ciputctrl == NULL); 795 ASSERT(sq->sq_nciputctrl == 0); 796 ASSERT(sq->sq_type == 0); 797 ASSERT(sq->sq_flags == 0); 798 799 mutex_destroy(&sq->sq_lock); 800 cv_destroy(&sq->sq_wait); 801 cv_destroy(&sq->sq_exitwait); 802 } 803 804 /* ARGSUSED */ 805 static int 806 ciputctrl_constructor(void *buf, void *cdrarg, int kmflags) 807 { 808 ciputctrl_t *cip = buf; 809 int i; 810 811 for (i = 0; i < n_ciputctrl; i++) { 812 cip[i].ciputctrl_count = SQ_FASTPUT; 813 mutex_init(&cip[i].ciputctrl_lock, NULL, MUTEX_DEFAULT, NULL); 814 } 815 816 return (0); 817 } 818 819 /* ARGSUSED */ 820 static void 821 ciputctrl_destructor(void *buf, void *cdrarg) 822 { 823 ciputctrl_t *cip = buf; 824 int i; 825 826 for (i = 0; i < n_ciputctrl; i++) { 827 ASSERT(cip[i].ciputctrl_count & SQ_FASTPUT); 828 mutex_destroy(&cip[i].ciputctrl_lock); 829 } 830 } 831 832 /* 833 * Init routine run from main at boot time. 834 */ 835 void 836 strinit(void) 837 { 838 int i; 839 int ncpus = ((boot_max_ncpus == -1) ? max_ncpus : boot_max_ncpus); 840 841 /* 842 * Set up mux_node structures. 843 */ 844 mux_nodes = kmem_zalloc((sizeof (struct mux_node) * devcnt), KM_SLEEP); 845 for (i = 0; i < devcnt; i++) 846 mux_nodes[i].mn_imaj = i; 847 848 stream_head_cache = kmem_cache_create("stream_head_cache", 849 sizeof (stdata_t), 0, 850 stream_head_constructor, stream_head_destructor, NULL, 851 NULL, NULL, 0); 852 853 queue_cache = kmem_cache_create("queue_cache", sizeof (queinfo_t), 0, 854 queue_constructor, queue_destructor, NULL, NULL, NULL, 0); 855 856 syncq_cache = kmem_cache_create("syncq_cache", sizeof (syncq_t), 0, 857 syncq_constructor, syncq_destructor, NULL, NULL, NULL, 0); 858 859 qband_cache = kmem_cache_create("qband_cache", 860 sizeof (qband_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 861 862 linkinfo_cache = kmem_cache_create("linkinfo_cache", 863 sizeof (linkinfo_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 864 865 n_ciputctrl = ncpus; 866 n_ciputctrl = 1 << highbit(n_ciputctrl - 1); 867 ASSERT(n_ciputctrl >= 1); 868 n_ciputctrl = MIN(n_ciputctrl, max_n_ciputctrl); 869 if (n_ciputctrl >= min_n_ciputctrl) { 870 ciputctrl_cache = kmem_cache_create("ciputctrl_cache", 871 sizeof (ciputctrl_t) * n_ciputctrl, 872 sizeof (ciputctrl_t), ciputctrl_constructor, 873 ciputctrl_destructor, NULL, NULL, NULL, 0); 874 } 875 876 streams_taskq = system_taskq; 877 878 if (streams_taskq == NULL) 879 panic("strinit: no memory for streams taskq!"); 880 881 bc_bkgrnd_thread = thread_create(NULL, 0, 882 streams_bufcall_service, NULL, 0, &p0, TS_RUN, streams_lopri); 883 884 streams_qbkgrnd_thread = thread_create(NULL, 0, 885 streams_qbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri); 886 887 streams_sqbkgrnd_thread = thread_create(NULL, 0, 888 streams_sqbkgrnd_service, NULL, 0, &p0, TS_RUN, streams_lopri); 889 890 /* 891 * Create STREAMS kstats. 892 */ 893 str_kstat = kstat_create("streams", 0, "strstat", 894 "net", KSTAT_TYPE_NAMED, 895 sizeof (str_statistics) / sizeof (kstat_named_t), 896 KSTAT_FLAG_VIRTUAL); 897 898 if (str_kstat != NULL) { 899 str_kstat->ks_data = &str_statistics; 900 kstat_install(str_kstat); 901 } 902 903 /* 904 * TPI support routine initialisation. 905 */ 906 tpi_init(); 907 } 908 909 void 910 str_sendsig(vnode_t *vp, int event, uchar_t band, int error) 911 { 912 struct stdata *stp; 913 914 ASSERT(vp->v_stream); 915 stp = vp->v_stream; 916 /* Have to hold sd_lock to prevent siglist from changing */ 917 mutex_enter(&stp->sd_lock); 918 if (stp->sd_sigflags & event) 919 strsendsig(stp->sd_siglist, event, band, error); 920 mutex_exit(&stp->sd_lock); 921 } 922 923 /* 924 * Send the "sevent" set of signals to a process. 925 * This might send more than one signal if the process is registered 926 * for multiple events. The caller should pass in an sevent that only 927 * includes the events for which the process has registered. 928 */ 929 static void 930 dosendsig(proc_t *proc, int events, int sevent, k_siginfo_t *info, 931 uchar_t band, int error) 932 { 933 ASSERT(MUTEX_HELD(&proc->p_lock)); 934 935 info->si_band = 0; 936 info->si_errno = 0; 937 938 if (sevent & S_ERROR) { 939 sevent &= ~S_ERROR; 940 info->si_code = POLL_ERR; 941 info->si_errno = error; 942 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 943 "strsendsig:proc %p info %p", proc, info); 944 sigaddq(proc, NULL, info, KM_NOSLEEP); 945 info->si_errno = 0; 946 } 947 if (sevent & S_HANGUP) { 948 sevent &= ~S_HANGUP; 949 info->si_code = POLL_HUP; 950 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 951 "strsendsig:proc %p info %p", proc, info); 952 sigaddq(proc, NULL, info, KM_NOSLEEP); 953 } 954 if (sevent & S_HIPRI) { 955 sevent &= ~S_HIPRI; 956 info->si_code = POLL_PRI; 957 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 958 "strsendsig:proc %p info %p", proc, info); 959 sigaddq(proc, NULL, info, KM_NOSLEEP); 960 } 961 if (sevent & S_RDBAND) { 962 sevent &= ~S_RDBAND; 963 if (events & S_BANDURG) 964 sigtoproc(proc, NULL, SIGURG); 965 else 966 sigtoproc(proc, NULL, SIGPOLL); 967 } 968 if (sevent & S_WRBAND) { 969 sevent &= ~S_WRBAND; 970 sigtoproc(proc, NULL, SIGPOLL); 971 } 972 if (sevent & S_INPUT) { 973 sevent &= ~S_INPUT; 974 info->si_code = POLL_IN; 975 info->si_band = band; 976 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 977 "strsendsig:proc %p info %p", proc, info); 978 sigaddq(proc, NULL, info, KM_NOSLEEP); 979 info->si_band = 0; 980 } 981 if (sevent & S_OUTPUT) { 982 sevent &= ~S_OUTPUT; 983 info->si_code = POLL_OUT; 984 info->si_band = band; 985 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 986 "strsendsig:proc %p info %p", proc, info); 987 sigaddq(proc, NULL, info, KM_NOSLEEP); 988 info->si_band = 0; 989 } 990 if (sevent & S_MSG) { 991 sevent &= ~S_MSG; 992 info->si_code = POLL_MSG; 993 info->si_band = band; 994 TRACE_2(TR_FAC_STREAMS_FR, TR_STRSENDSIG, 995 "strsendsig:proc %p info %p", proc, info); 996 sigaddq(proc, NULL, info, KM_NOSLEEP); 997 info->si_band = 0; 998 } 999 if (sevent & S_RDNORM) { 1000 sevent &= ~S_RDNORM; 1001 sigtoproc(proc, NULL, SIGPOLL); 1002 } 1003 if (sevent != 0) { 1004 panic("strsendsig: unknown event(s) %x", sevent); 1005 } 1006 } 1007 1008 /* 1009 * Send SIGPOLL/SIGURG signal to all processes and process groups 1010 * registered on the given signal list that want a signal for at 1011 * least one of the specified events. 1012 * 1013 * Must be called with exclusive access to siglist (caller holding sd_lock). 1014 * 1015 * strioctl(I_SETSIG/I_ESETSIG) will only change siglist when holding 1016 * sd_lock and the ioctl code maintains a PID_HOLD on the pid structure 1017 * while it is in the siglist. 1018 * 1019 * For performance reasons (MP scalability) the code drops pidlock 1020 * when sending signals to a single process. 1021 * When sending to a process group the code holds 1022 * pidlock to prevent the membership in the process group from changing 1023 * while walking the p_pglink list. 1024 */ 1025 void 1026 strsendsig(strsig_t *siglist, int event, uchar_t band, int error) 1027 { 1028 strsig_t *ssp; 1029 k_siginfo_t info; 1030 struct pid *pidp; 1031 proc_t *proc; 1032 1033 info.si_signo = SIGPOLL; 1034 info.si_errno = 0; 1035 for (ssp = siglist; ssp; ssp = ssp->ss_next) { 1036 int sevent; 1037 1038 sevent = ssp->ss_events & event; 1039 if (sevent == 0) 1040 continue; 1041 1042 if ((pidp = ssp->ss_pidp) == NULL) { 1043 /* pid was released but still on event list */ 1044 continue; 1045 } 1046 1047 1048 if (ssp->ss_pid > 0) { 1049 /* 1050 * XXX This unfortunately still generates 1051 * a signal when a fd is closed but 1052 * the proc is active. 1053 */ 1054 ASSERT(ssp->ss_pid == pidp->pid_id); 1055 1056 mutex_enter(&pidlock); 1057 proc = prfind_zone(pidp->pid_id, ALL_ZONES); 1058 if (proc == NULL) { 1059 mutex_exit(&pidlock); 1060 continue; 1061 } 1062 mutex_enter(&proc->p_lock); 1063 mutex_exit(&pidlock); 1064 dosendsig(proc, ssp->ss_events, sevent, &info, 1065 band, error); 1066 mutex_exit(&proc->p_lock); 1067 } else { 1068 /* 1069 * Send to process group. Hold pidlock across 1070 * calls to dosendsig(). 1071 */ 1072 pid_t pgrp = -ssp->ss_pid; 1073 1074 mutex_enter(&pidlock); 1075 proc = pgfind_zone(pgrp, ALL_ZONES); 1076 while (proc != NULL) { 1077 mutex_enter(&proc->p_lock); 1078 dosendsig(proc, ssp->ss_events, sevent, 1079 &info, band, error); 1080 mutex_exit(&proc->p_lock); 1081 proc = proc->p_pglink; 1082 } 1083 mutex_exit(&pidlock); 1084 } 1085 } 1086 } 1087 1088 /* 1089 * Attach a stream device or module. 1090 * qp is a read queue; the new queue goes in so its next 1091 * read ptr is the argument, and the write queue corresponding 1092 * to the argument points to this queue. Return 0 on success, 1093 * or a non-zero errno on failure. 1094 */ 1095 int 1096 qattach(queue_t *qp, dev_t *devp, int oflag, cred_t *crp, fmodsw_impl_t *fp, 1097 boolean_t is_insert) 1098 { 1099 major_t major; 1100 cdevsw_impl_t *dp; 1101 struct streamtab *str; 1102 queue_t *rq; 1103 queue_t *wrq; 1104 uint32_t qflag; 1105 uint32_t sqtype; 1106 perdm_t *dmp; 1107 int error; 1108 int sflag; 1109 1110 rq = allocq(); 1111 wrq = _WR(rq); 1112 STREAM(rq) = STREAM(wrq) = STREAM(qp); 1113 1114 if (fp != NULL) { 1115 str = fp->f_str; 1116 qflag = fp->f_qflag; 1117 sqtype = fp->f_sqtype; 1118 dmp = fp->f_dmp; 1119 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL); 1120 sflag = MODOPEN; 1121 1122 /* 1123 * stash away a pointer to the module structure so we can 1124 * unref it in qdetach. 1125 */ 1126 rq->q_fp = fp; 1127 } else { 1128 ASSERT(!is_insert); 1129 1130 major = getmajor(*devp); 1131 dp = &devimpl[major]; 1132 1133 str = dp->d_str; 1134 ASSERT(str == STREAMSTAB(major)); 1135 1136 qflag = dp->d_qflag; 1137 ASSERT(qflag & QISDRV); 1138 sqtype = dp->d_sqtype; 1139 1140 /* create perdm_t if needed */ 1141 if (NEED_DM(dp->d_dmp, qflag)) 1142 dp->d_dmp = hold_dm(str, qflag, sqtype); 1143 1144 dmp = dp->d_dmp; 1145 sflag = 0; 1146 } 1147 1148 TRACE_2(TR_FAC_STREAMS_FR, TR_QATTACH_FLAGS, 1149 "qattach:qflag == %X(%X)", qflag, *devp); 1150 1151 /* setq might sleep in allocator - avoid holding locks. */ 1152 setq(rq, str->st_rdinit, str->st_wrinit, dmp, qflag, sqtype, B_FALSE); 1153 1154 /* 1155 * Before calling the module's open routine, set up the q_next 1156 * pointer for inserting a module in the middle of a stream. 1157 * 1158 * Note that we can always set _QINSERTING and set up q_next 1159 * pointer for both inserting and pushing a module. Then there 1160 * is no need for the is_insert parameter. In insertq(), called 1161 * by qprocson(), assume that q_next of the new module always points 1162 * to the correct queue and use it for insertion. Everything should 1163 * work out fine. But in the first release of _I_INSERT, we 1164 * distinguish between inserting and pushing to make sure that 1165 * pushing a module follows the same code path as before. 1166 */ 1167 if (is_insert) { 1168 rq->q_flag |= _QINSERTING; 1169 rq->q_next = qp; 1170 } 1171 1172 /* 1173 * If there is an outer perimeter get exclusive access during 1174 * the open procedure. Bump up the reference count on the queue. 1175 */ 1176 entersq(rq->q_syncq, SQ_OPENCLOSE); 1177 error = (*rq->q_qinfo->qi_qopen)(rq, devp, oflag, sflag, crp); 1178 if (error != 0) 1179 goto failed; 1180 leavesq(rq->q_syncq, SQ_OPENCLOSE); 1181 ASSERT(qprocsareon(rq)); 1182 return (0); 1183 1184 failed: 1185 rq->q_flag &= ~_QINSERTING; 1186 if (backq(wrq) != NULL && backq(wrq)->q_next == wrq) 1187 qprocsoff(rq); 1188 leavesq(rq->q_syncq, SQ_OPENCLOSE); 1189 rq->q_next = wrq->q_next = NULL; 1190 qdetach(rq, 0, 0, crp, B_FALSE); 1191 return (error); 1192 } 1193 1194 /* 1195 * Handle second open of stream. For modules, set the 1196 * last argument to MODOPEN and do not pass any open flags. 1197 * Ignore dummydev since this is not the first open. 1198 */ 1199 int 1200 qreopen(queue_t *qp, dev_t *devp, int flag, cred_t *crp) 1201 { 1202 int error; 1203 dev_t dummydev; 1204 queue_t *wqp = _WR(qp); 1205 1206 ASSERT(qp->q_flag & QREADR); 1207 entersq(qp->q_syncq, SQ_OPENCLOSE); 1208 1209 dummydev = *devp; 1210 if (error = ((*qp->q_qinfo->qi_qopen)(qp, &dummydev, 1211 (wqp->q_next ? 0 : flag), (wqp->q_next ? MODOPEN : 0), crp))) { 1212 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1213 mutex_enter(&STREAM(qp)->sd_lock); 1214 qp->q_stream->sd_flag |= STREOPENFAIL; 1215 mutex_exit(&STREAM(qp)->sd_lock); 1216 return (error); 1217 } 1218 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1219 1220 /* 1221 * successful open should have done qprocson() 1222 */ 1223 ASSERT(qprocsareon(_RD(qp))); 1224 return (0); 1225 } 1226 1227 /* 1228 * Detach a stream module or device. 1229 * If clmode == 1 then the module or driver was opened and its 1230 * close routine must be called. If clmode == 0, the module 1231 * or driver was never opened or the open failed, and so its close 1232 * should not be called. 1233 */ 1234 void 1235 qdetach(queue_t *qp, int clmode, int flag, cred_t *crp, boolean_t is_remove) 1236 { 1237 queue_t *wqp = _WR(qp); 1238 ASSERT(STREAM(qp)->sd_flag & (STRCLOSE|STWOPEN|STRPLUMB)); 1239 1240 if (STREAM_NEEDSERVICE(STREAM(qp))) 1241 stream_runservice(STREAM(qp)); 1242 1243 if (clmode) { 1244 /* 1245 * Make sure that all the messages on the write side syncq are 1246 * processed and nothing is left. Since we are closing, no new 1247 * messages may appear there. 1248 */ 1249 wait_q_syncq(wqp); 1250 1251 entersq(qp->q_syncq, SQ_OPENCLOSE); 1252 if (is_remove) { 1253 mutex_enter(QLOCK(qp)); 1254 qp->q_flag |= _QREMOVING; 1255 mutex_exit(QLOCK(qp)); 1256 } 1257 (*qp->q_qinfo->qi_qclose)(qp, flag, crp); 1258 /* 1259 * Check that qprocsoff() was actually called. 1260 */ 1261 ASSERT((qp->q_flag & QWCLOSE) && (wqp->q_flag & QWCLOSE)); 1262 1263 leavesq(qp->q_syncq, SQ_OPENCLOSE); 1264 } else { 1265 disable_svc(qp); 1266 } 1267 1268 /* 1269 * Allow any threads blocked in entersq to proceed and discover 1270 * the QWCLOSE is set. 1271 * Note: This assumes that all users of entersq check QWCLOSE. 1272 * Currently runservice is the only entersq that can happen 1273 * after removeq has finished. 1274 * Removeq will have discarded all messages destined to the closing 1275 * pair of queues from the syncq. 1276 * NOTE: Calling a function inside an assert is unconventional. 1277 * However, it does not cause any problem since flush_syncq() does 1278 * not change any state except when it returns non-zero i.e. 1279 * when the assert will trigger. 1280 */ 1281 ASSERT(flush_syncq(qp->q_syncq, qp) == 0); 1282 ASSERT(flush_syncq(wqp->q_syncq, wqp) == 0); 1283 ASSERT((qp->q_flag & QPERMOD) || 1284 ((qp->q_syncq->sq_head == NULL) && 1285 (wqp->q_syncq->sq_head == NULL))); 1286 1287 /* release any fmodsw_impl_t structure held on behalf of the queue */ 1288 ASSERT(qp->q_fp != NULL || qp->q_flag & QISDRV); 1289 if (qp->q_fp != NULL) 1290 fmodsw_rele(qp->q_fp); 1291 1292 /* freeq removes us from the outer perimeter if any */ 1293 freeq(qp); 1294 } 1295 1296 /* Prevent service procedures from being called */ 1297 void 1298 disable_svc(queue_t *qp) 1299 { 1300 queue_t *wqp = _WR(qp); 1301 1302 ASSERT(qp->q_flag & QREADR); 1303 mutex_enter(QLOCK(qp)); 1304 qp->q_flag |= QWCLOSE; 1305 mutex_exit(QLOCK(qp)); 1306 mutex_enter(QLOCK(wqp)); 1307 wqp->q_flag |= QWCLOSE; 1308 mutex_exit(QLOCK(wqp)); 1309 } 1310 1311 /* allow service procedures to be called again */ 1312 void 1313 enable_svc(queue_t *qp) 1314 { 1315 queue_t *wqp = _WR(qp); 1316 1317 ASSERT(qp->q_flag & QREADR); 1318 mutex_enter(QLOCK(qp)); 1319 qp->q_flag &= ~QWCLOSE; 1320 mutex_exit(QLOCK(qp)); 1321 mutex_enter(QLOCK(wqp)); 1322 wqp->q_flag &= ~QWCLOSE; 1323 mutex_exit(QLOCK(wqp)); 1324 } 1325 1326 /* 1327 * Remove queue from qhead/qtail if it is enabled. 1328 * Only reset QENAB if the queue was removed from the runlist. 1329 * A queue goes through 3 stages: 1330 * It is on the service list and QENAB is set. 1331 * It is removed from the service list but QENAB is still set. 1332 * QENAB gets changed to QINSERVICE. 1333 * QINSERVICE is reset (when the service procedure is done) 1334 * Thus we can not reset QENAB unless we actually removed it from the service 1335 * queue. 1336 */ 1337 void 1338 remove_runlist(queue_t *qp) 1339 { 1340 if (qp->q_flag & QENAB && qhead != NULL) { 1341 queue_t *q_chase; 1342 queue_t *q_curr; 1343 int removed; 1344 1345 mutex_enter(&service_queue); 1346 RMQ(qp, qhead, qtail, q_link, q_chase, q_curr, removed); 1347 mutex_exit(&service_queue); 1348 if (removed) { 1349 STRSTAT(qremoved); 1350 qp->q_flag &= ~QENAB; 1351 } 1352 } 1353 } 1354 1355 1356 /* 1357 * wait for any pending service processing to complete. 1358 * The removal of queues from the runlist is not atomic with the 1359 * clearing of the QENABLED flag and setting the INSERVICE flag. 1360 * consequently it is possible for remove_runlist in strclose 1361 * to not find the queue on the runlist but for it to be QENABLED 1362 * and not yet INSERVICE -> hence wait_svc needs to check QENABLED 1363 * as well as INSERVICE. 1364 */ 1365 void 1366 wait_svc(queue_t *qp) 1367 { 1368 queue_t *wqp = _WR(qp); 1369 1370 ASSERT(qp->q_flag & QREADR); 1371 1372 /* 1373 * Try to remove queues from qhead/qtail list. 1374 */ 1375 if (qhead != NULL) { 1376 remove_runlist(qp); 1377 remove_runlist(wqp); 1378 } 1379 /* 1380 * Wait till the syncqs associated with the queue 1381 * will dissapear from background processing list. 1382 * This only needs to be done for non-PERMOD perimeters since 1383 * for PERMOD perimeters the syncq may be shared and will only be freed 1384 * when the last module/driver is unloaded. 1385 * If for PERMOD perimeters queue was on the syncq list, removeq() 1386 * should call propagate_syncq() or drain_syncq() for it. Both of these 1387 * function remove the queue from its syncq list, so sqthread will not 1388 * try to access the queue. 1389 */ 1390 if (!(qp->q_flag & QPERMOD)) { 1391 syncq_t *rsq = qp->q_syncq; 1392 syncq_t *wsq = wqp->q_syncq; 1393 1394 /* 1395 * Disable rsq and wsq and wait for any background processing of 1396 * syncq to complete. 1397 */ 1398 wait_sq_svc(rsq); 1399 if (wsq != rsq) 1400 wait_sq_svc(wsq); 1401 } 1402 1403 mutex_enter(QLOCK(qp)); 1404 while (qp->q_flag & (QINSERVICE|QENAB)) 1405 cv_wait(&qp->q_wait, QLOCK(qp)); 1406 mutex_exit(QLOCK(qp)); 1407 mutex_enter(QLOCK(wqp)); 1408 while (wqp->q_flag & (QINSERVICE|QENAB)) 1409 cv_wait(&wqp->q_wait, QLOCK(wqp)); 1410 mutex_exit(QLOCK(wqp)); 1411 } 1412 1413 /* 1414 * Put ioctl data from userland buffer `arg' into the mblk chain `bp'. 1415 * `flag' must always contain either K_TO_K or U_TO_K; STR_NOSIG may 1416 * also be set, and is passed through to allocb_cred_wait(). 1417 * 1418 * Returns errno on failure, zero on success. 1419 */ 1420 int 1421 putiocd(mblk_t *bp, char *arg, int flag, cred_t *cr) 1422 { 1423 mblk_t *tmp; 1424 ssize_t count; 1425 size_t n; 1426 int error = 0; 1427 1428 ASSERT((flag & (U_TO_K | K_TO_K)) == U_TO_K || 1429 (flag & (U_TO_K | K_TO_K)) == K_TO_K); 1430 1431 if (bp->b_datap->db_type == M_IOCTL) { 1432 count = ((struct iocblk *)bp->b_rptr)->ioc_count; 1433 } else { 1434 ASSERT(bp->b_datap->db_type == M_COPYIN); 1435 count = ((struct copyreq *)bp->b_rptr)->cq_size; 1436 } 1437 /* 1438 * strdoioctl validates ioc_count, so if this assert fails it 1439 * cannot be due to user error. 1440 */ 1441 ASSERT(count >= 0); 1442 1443 while (count > 0) { 1444 n = MIN(MAXIOCBSZ, count); 1445 if ((tmp = allocb_cred_wait(n, (flag & STR_NOSIG), &error, 1446 cr)) == NULL) { 1447 return (error); 1448 } 1449 error = strcopyin(arg, tmp->b_wptr, n, flag & (U_TO_K|K_TO_K)); 1450 if (error != 0) { 1451 freeb(tmp); 1452 return (error); 1453 } 1454 arg += n; 1455 DB_CPID(tmp) = curproc->p_pid; 1456 tmp->b_wptr += n; 1457 count -= n; 1458 bp = (bp->b_cont = tmp); 1459 } 1460 1461 return (0); 1462 } 1463 1464 /* 1465 * Copy ioctl data to user-land. Return non-zero errno on failure, 1466 * 0 for success. 1467 */ 1468 int 1469 getiocd(mblk_t *bp, char *arg, int copymode) 1470 { 1471 ssize_t count; 1472 size_t n; 1473 int error; 1474 1475 if (bp->b_datap->db_type == M_IOCACK) 1476 count = ((struct iocblk *)bp->b_rptr)->ioc_count; 1477 else { 1478 ASSERT(bp->b_datap->db_type == M_COPYOUT); 1479 count = ((struct copyreq *)bp->b_rptr)->cq_size; 1480 } 1481 ASSERT(count >= 0); 1482 1483 for (bp = bp->b_cont; bp && count; 1484 count -= n, bp = bp->b_cont, arg += n) { 1485 n = MIN(count, bp->b_wptr - bp->b_rptr); 1486 error = strcopyout(bp->b_rptr, arg, n, copymode); 1487 if (error) 1488 return (error); 1489 } 1490 ASSERT(count == 0); 1491 return (0); 1492 } 1493 1494 /* 1495 * Allocate a linkinfo entry given the write queue of the 1496 * bottom module of the top stream and the write queue of the 1497 * stream head of the bottom stream. 1498 */ 1499 linkinfo_t * 1500 alloclink(queue_t *qup, queue_t *qdown, file_t *fpdown) 1501 { 1502 linkinfo_t *linkp; 1503 1504 linkp = kmem_cache_alloc(linkinfo_cache, KM_SLEEP); 1505 1506 linkp->li_lblk.l_qtop = qup; 1507 linkp->li_lblk.l_qbot = qdown; 1508 linkp->li_fpdown = fpdown; 1509 1510 mutex_enter(&strresources); 1511 linkp->li_next = linkinfo_list; 1512 linkp->li_prev = NULL; 1513 if (linkp->li_next) 1514 linkp->li_next->li_prev = linkp; 1515 linkinfo_list = linkp; 1516 linkp->li_lblk.l_index = ++lnk_id; 1517 ASSERT(lnk_id != 0); /* this should never wrap in practice */ 1518 mutex_exit(&strresources); 1519 1520 return (linkp); 1521 } 1522 1523 /* 1524 * Free a linkinfo entry. 1525 */ 1526 void 1527 lbfree(linkinfo_t *linkp) 1528 { 1529 mutex_enter(&strresources); 1530 if (linkp->li_next) 1531 linkp->li_next->li_prev = linkp->li_prev; 1532 if (linkp->li_prev) 1533 linkp->li_prev->li_next = linkp->li_next; 1534 else 1535 linkinfo_list = linkp->li_next; 1536 mutex_exit(&strresources); 1537 1538 kmem_cache_free(linkinfo_cache, linkp); 1539 } 1540 1541 /* 1542 * Check for a potential linking cycle. 1543 * Return 1 if a link will result in a cycle, 1544 * and 0 otherwise. 1545 */ 1546 int 1547 linkcycle(stdata_t *upstp, stdata_t *lostp) 1548 { 1549 struct mux_node *np; 1550 struct mux_edge *ep; 1551 int i; 1552 major_t lomaj; 1553 major_t upmaj; 1554 /* 1555 * if the lower stream is a pipe/FIFO, return, since link 1556 * cycles can not happen on pipes/FIFOs 1557 */ 1558 if (lostp->sd_vnode->v_type == VFIFO) 1559 return (0); 1560 1561 for (i = 0; i < devcnt; i++) { 1562 np = &mux_nodes[i]; 1563 MUX_CLEAR(np); 1564 } 1565 lomaj = getmajor(lostp->sd_vnode->v_rdev); 1566 upmaj = getmajor(upstp->sd_vnode->v_rdev); 1567 np = &mux_nodes[lomaj]; 1568 for (;;) { 1569 if (!MUX_DIDVISIT(np)) { 1570 if (np->mn_imaj == upmaj) 1571 return (1); 1572 if (np->mn_outp == NULL) { 1573 MUX_VISIT(np); 1574 if (np->mn_originp == NULL) 1575 return (0); 1576 np = np->mn_originp; 1577 continue; 1578 } 1579 MUX_VISIT(np); 1580 np->mn_startp = np->mn_outp; 1581 } else { 1582 if (np->mn_startp == NULL) { 1583 if (np->mn_originp == NULL) 1584 return (0); 1585 else { 1586 np = np->mn_originp; 1587 continue; 1588 } 1589 } 1590 /* 1591 * If ep->me_nodep is a FIFO (me_nodep == NULL), 1592 * ignore the edge and move on. ep->me_nodep gets 1593 * set to NULL in mux_addedge() if it is a FIFO. 1594 * 1595 */ 1596 ep = np->mn_startp; 1597 np->mn_startp = ep->me_nextp; 1598 if (ep->me_nodep == NULL) 1599 continue; 1600 ep->me_nodep->mn_originp = np; 1601 np = ep->me_nodep; 1602 } 1603 } 1604 } 1605 1606 /* 1607 * Find linkinfo entry corresponding to the parameters. 1608 */ 1609 linkinfo_t * 1610 findlinks(stdata_t *stp, int index, int type) 1611 { 1612 linkinfo_t *linkp; 1613 struct mux_edge *mep; 1614 struct mux_node *mnp; 1615 queue_t *qup; 1616 1617 mutex_enter(&strresources); 1618 if ((type & LINKTYPEMASK) == LINKNORMAL) { 1619 qup = getendq(stp->sd_wrq); 1620 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) { 1621 if ((qup == linkp->li_lblk.l_qtop) && 1622 (!index || (index == linkp->li_lblk.l_index))) { 1623 mutex_exit(&strresources); 1624 return (linkp); 1625 } 1626 } 1627 } else { 1628 ASSERT((type & LINKTYPEMASK) == LINKPERSIST); 1629 mnp = &mux_nodes[getmajor(stp->sd_vnode->v_rdev)]; 1630 mep = mnp->mn_outp; 1631 while (mep) { 1632 if ((index == 0) || (index == mep->me_muxid)) 1633 break; 1634 mep = mep->me_nextp; 1635 } 1636 if (!mep) { 1637 mutex_exit(&strresources); 1638 return (NULL); 1639 } 1640 for (linkp = linkinfo_list; linkp; linkp = linkp->li_next) { 1641 if ((!linkp->li_lblk.l_qtop) && 1642 (mep->me_muxid == linkp->li_lblk.l_index)) { 1643 mutex_exit(&strresources); 1644 return (linkp); 1645 } 1646 } 1647 } 1648 mutex_exit(&strresources); 1649 return (NULL); 1650 } 1651 1652 /* 1653 * Given a queue ptr, follow the chain of q_next pointers until you reach the 1654 * last queue on the chain and return it. 1655 */ 1656 queue_t * 1657 getendq(queue_t *q) 1658 { 1659 ASSERT(q != NULL); 1660 while (_SAMESTR(q)) 1661 q = q->q_next; 1662 return (q); 1663 } 1664 1665 /* 1666 * wait for the syncq count to drop to zero. 1667 * sq could be either outer or inner. 1668 */ 1669 1670 static void 1671 wait_syncq(syncq_t *sq) 1672 { 1673 uint16_t count; 1674 1675 mutex_enter(SQLOCK(sq)); 1676 count = sq->sq_count; 1677 SQ_PUTLOCKS_ENTER(sq); 1678 SUM_SQ_PUTCOUNTS(sq, count); 1679 while (count != 0) { 1680 sq->sq_flags |= SQ_WANTWAKEUP; 1681 SQ_PUTLOCKS_EXIT(sq); 1682 cv_wait(&sq->sq_wait, SQLOCK(sq)); 1683 count = sq->sq_count; 1684 SQ_PUTLOCKS_ENTER(sq); 1685 SUM_SQ_PUTCOUNTS(sq, count); 1686 } 1687 SQ_PUTLOCKS_EXIT(sq); 1688 mutex_exit(SQLOCK(sq)); 1689 } 1690 1691 /* 1692 * Wait while there are any messages for the queue in its syncq. 1693 */ 1694 static void 1695 wait_q_syncq(queue_t *q) 1696 { 1697 if ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) { 1698 syncq_t *sq = q->q_syncq; 1699 1700 mutex_enter(SQLOCK(sq)); 1701 while ((q->q_sqflags & Q_SQQUEUED) || (q->q_syncqmsgs > 0)) { 1702 sq->sq_flags |= SQ_WANTWAKEUP; 1703 cv_wait(&sq->sq_wait, SQLOCK(sq)); 1704 } 1705 mutex_exit(SQLOCK(sq)); 1706 } 1707 } 1708 1709 1710 int 1711 mlink_file(vnode_t *vp, int cmd, struct file *fpdown, cred_t *crp, int *rvalp, 1712 int lhlink) 1713 { 1714 struct stdata *stp; 1715 struct strioctl strioc; 1716 struct linkinfo *linkp; 1717 struct stdata *stpdown; 1718 struct streamtab *str; 1719 queue_t *passq; 1720 syncq_t *passyncq; 1721 queue_t *rq; 1722 cdevsw_impl_t *dp; 1723 uint32_t qflag; 1724 uint32_t sqtype; 1725 perdm_t *dmp; 1726 int error = 0; 1727 1728 stp = vp->v_stream; 1729 TRACE_1(TR_FAC_STREAMS_FR, 1730 TR_I_LINK, "I_LINK/I_PLINK:stp %p", stp); 1731 /* 1732 * Test for invalid upper stream 1733 */ 1734 if (stp->sd_flag & STRHUP) { 1735 return (ENXIO); 1736 } 1737 if (vp->v_type == VFIFO) { 1738 return (EINVAL); 1739 } 1740 if (stp->sd_strtab == NULL) { 1741 return (EINVAL); 1742 } 1743 if (!stp->sd_strtab->st_muxwinit) { 1744 return (EINVAL); 1745 } 1746 if (fpdown == NULL) { 1747 return (EBADF); 1748 } 1749 if (getmajor(stp->sd_vnode->v_rdev) >= devcnt) { 1750 return (EINVAL); 1751 } 1752 mutex_enter(&muxifier); 1753 if (stp->sd_flag & STPLEX) { 1754 mutex_exit(&muxifier); 1755 return (ENXIO); 1756 } 1757 1758 /* 1759 * Test for invalid lower stream. 1760 * The check for the v_type != VFIFO and having a major 1761 * number not >= devcnt is done to avoid problems with 1762 * adding mux_node entry past the end of mux_nodes[]. 1763 * For FIFO's we don't add an entry so this isn't a 1764 * problem. 1765 */ 1766 if (((stpdown = fpdown->f_vnode->v_stream) == NULL) || 1767 (stpdown == stp) || (stpdown->sd_flag & 1768 (STPLEX|STRHUP|STRDERR|STWRERR|IOCWAIT|STRPLUMB)) || 1769 ((stpdown->sd_vnode->v_type != VFIFO) && 1770 (getmajor(stpdown->sd_vnode->v_rdev) >= devcnt)) || 1771 linkcycle(stp, stpdown)) { 1772 mutex_exit(&muxifier); 1773 return (EINVAL); 1774 } 1775 TRACE_1(TR_FAC_STREAMS_FR, 1776 TR_STPDOWN, "stpdown:%p", stpdown); 1777 rq = getendq(stp->sd_wrq); 1778 if (cmd == I_PLINK) 1779 rq = NULL; 1780 1781 linkp = alloclink(rq, stpdown->sd_wrq, fpdown); 1782 1783 strioc.ic_cmd = cmd; 1784 strioc.ic_timout = INFTIM; 1785 strioc.ic_len = sizeof (struct linkblk); 1786 strioc.ic_dp = (char *)&linkp->li_lblk; 1787 1788 /* 1789 * STRPLUMB protects plumbing changes and should be set before 1790 * link_addpassthru()/link_rempassthru() are called, so it is set here 1791 * and cleared in the end of mlink when passthru queue is removed. 1792 * Setting of STRPLUMB prevents reopens of the stream while passthru 1793 * queue is in-place (it is not a proper module and doesn't have open 1794 * entry point). 1795 * 1796 * STPLEX prevents any threads from entering the stream from above. It 1797 * can't be set before the call to link_addpassthru() because putnext 1798 * from below may cause stream head I/O routines to be called and these 1799 * routines assert that STPLEX is not set. After link_addpassthru() 1800 * nothing may come from below since the pass queue syncq is blocked. 1801 * Note also that STPLEX should be cleared before the call to 1802 * link_remmpassthru() since when messages start flowing to the stream 1803 * head (e.g. because of message propagation from the pass queue) stream 1804 * head I/O routines may be called with STPLEX flag set. 1805 * 1806 * When STPLEX is set, nothing may come into the stream from above and 1807 * it is safe to do a setq which will change stream head. So, the 1808 * correct sequence of actions is: 1809 * 1810 * 1) Set STRPLUMB 1811 * 2) Call link_addpassthru() 1812 * 3) Set STPLEX 1813 * 4) Call setq and update the stream state 1814 * 5) Clear STPLEX 1815 * 6) Call link_rempassthru() 1816 * 7) Clear STRPLUMB 1817 * 1818 * The same sequence applies to munlink() code. 1819 */ 1820 mutex_enter(&stpdown->sd_lock); 1821 stpdown->sd_flag |= STRPLUMB; 1822 mutex_exit(&stpdown->sd_lock); 1823 /* 1824 * Add passthru queue below lower mux. This will block 1825 * syncqs of lower muxs read queue during I_LINK/I_UNLINK. 1826 */ 1827 passq = link_addpassthru(stpdown); 1828 1829 mutex_enter(&stpdown->sd_lock); 1830 stpdown->sd_flag |= STPLEX; 1831 mutex_exit(&stpdown->sd_lock); 1832 1833 rq = _RD(stpdown->sd_wrq); 1834 /* 1835 * There may be messages in the streamhead's syncq due to messages 1836 * that arrived before link_addpassthru() was done. To avoid 1837 * background processing of the syncq happening simultaneous with 1838 * setq processing, we disable the streamhead syncq and wait until 1839 * existing background thread finishes working on it. 1840 */ 1841 wait_sq_svc(rq->q_syncq); 1842 passyncq = passq->q_syncq; 1843 if (!(passyncq->sq_flags & SQ_BLOCKED)) 1844 blocksq(passyncq, SQ_BLOCKED, 0); 1845 1846 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE); 1847 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq)); 1848 rq->q_ptr = _WR(rq)->q_ptr = NULL; 1849 1850 /* setq might sleep in allocator - avoid holding locks. */ 1851 /* Note: we are holding muxifier here. */ 1852 1853 str = stp->sd_strtab; 1854 dp = &devimpl[getmajor(vp->v_rdev)]; 1855 ASSERT(dp->d_str == str); 1856 1857 qflag = dp->d_qflag; 1858 sqtype = dp->d_sqtype; 1859 1860 /* create perdm_t if needed */ 1861 if (NEED_DM(dp->d_dmp, qflag)) 1862 dp->d_dmp = hold_dm(str, qflag, sqtype); 1863 1864 dmp = dp->d_dmp; 1865 1866 setq(rq, str->st_muxrinit, str->st_muxwinit, dmp, qflag, sqtype, 1867 B_TRUE); 1868 1869 /* 1870 * XXX Remove any "odd" messages from the queue. 1871 * Keep only M_DATA, M_PROTO, M_PCPROTO. 1872 */ 1873 error = strdoioctl(stp, &strioc, FNATIVE, 1874 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp); 1875 if (error != 0) { 1876 lbfree(linkp); 1877 1878 if (!(passyncq->sq_flags & SQ_BLOCKED)) 1879 blocksq(passyncq, SQ_BLOCKED, 0); 1880 /* 1881 * Restore the stream head queue and then remove 1882 * the passq. Turn off STPLEX before we turn on 1883 * the stream by removing the passq. 1884 */ 1885 rq->q_ptr = _WR(rq)->q_ptr = stpdown; 1886 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, 1887 B_TRUE); 1888 1889 mutex_enter(&stpdown->sd_lock); 1890 stpdown->sd_flag &= ~STPLEX; 1891 mutex_exit(&stpdown->sd_lock); 1892 1893 link_rempassthru(passq); 1894 1895 mutex_enter(&stpdown->sd_lock); 1896 stpdown->sd_flag &= ~STRPLUMB; 1897 /* Wakeup anyone waiting for STRPLUMB to clear. */ 1898 cv_broadcast(&stpdown->sd_monitor); 1899 mutex_exit(&stpdown->sd_lock); 1900 1901 mutex_exit(&muxifier); 1902 return (error); 1903 } 1904 mutex_enter(&fpdown->f_tlock); 1905 fpdown->f_count++; 1906 mutex_exit(&fpdown->f_tlock); 1907 1908 /* 1909 * if we've made it here the linkage is all set up so we should also 1910 * set up the layered driver linkages 1911 */ 1912 1913 ASSERT((cmd == I_LINK) || (cmd == I_PLINK)); 1914 if (cmd == I_LINK) { 1915 ldi_mlink_fp(stp, fpdown, lhlink, LINKNORMAL); 1916 } else { 1917 ldi_mlink_fp(stp, fpdown, lhlink, LINKPERSIST); 1918 } 1919 1920 link_rempassthru(passq); 1921 1922 mux_addedge(stp, stpdown, linkp->li_lblk.l_index); 1923 1924 /* 1925 * Mark the upper stream as having dependent links 1926 * so that strclose can clean it up. 1927 */ 1928 if (cmd == I_LINK) { 1929 mutex_enter(&stp->sd_lock); 1930 stp->sd_flag |= STRHASLINKS; 1931 mutex_exit(&stp->sd_lock); 1932 } 1933 /* 1934 * Wake up any other processes that may have been 1935 * waiting on the lower stream. These will all 1936 * error out. 1937 */ 1938 mutex_enter(&stpdown->sd_lock); 1939 /* The passthru module is removed so we may release STRPLUMB */ 1940 stpdown->sd_flag &= ~STRPLUMB; 1941 cv_broadcast(&rq->q_wait); 1942 cv_broadcast(&_WR(rq)->q_wait); 1943 cv_broadcast(&stpdown->sd_monitor); 1944 mutex_exit(&stpdown->sd_lock); 1945 mutex_exit(&muxifier); 1946 *rvalp = linkp->li_lblk.l_index; 1947 return (0); 1948 } 1949 1950 int 1951 mlink(vnode_t *vp, int cmd, int arg, cred_t *crp, int *rvalp, int lhlink) 1952 { 1953 int ret; 1954 struct file *fpdown; 1955 1956 fpdown = getf(arg); 1957 ret = mlink_file(vp, cmd, fpdown, crp, rvalp, lhlink); 1958 if (fpdown != NULL) 1959 releasef(arg); 1960 return (ret); 1961 } 1962 1963 /* 1964 * Unlink a multiplexor link. Stp is the controlling stream for the 1965 * link, and linkp points to the link's entry in the linkinfo list. 1966 * The muxifier lock must be held on entry and is dropped on exit. 1967 * 1968 * NOTE : Currently it is assumed that mux would process all the messages 1969 * sitting on it's queue before ACKing the UNLINK. It is the responsibility 1970 * of the mux to handle all the messages that arrive before UNLINK. 1971 * If the mux has to send down messages on its lower stream before 1972 * ACKing I_UNLINK, then it *should* know to handle messages even 1973 * after the UNLINK is acked (actually it should be able to handle till we 1974 * re-block the read side of the pass queue here). If the mux does not 1975 * open up the lower stream, any messages that arrive during UNLINK 1976 * will be put in the stream head. In the case of lower stream opening 1977 * up, some messages might land in the stream head depending on when 1978 * the message arrived and when the read side of the pass queue was 1979 * re-blocked. 1980 */ 1981 int 1982 munlink(stdata_t *stp, linkinfo_t *linkp, int flag, cred_t *crp, int *rvalp) 1983 { 1984 struct strioctl strioc; 1985 struct stdata *stpdown; 1986 queue_t *rq, *wrq; 1987 queue_t *passq; 1988 syncq_t *passyncq; 1989 int error = 0; 1990 file_t *fpdown; 1991 1992 ASSERT(MUTEX_HELD(&muxifier)); 1993 1994 stpdown = linkp->li_fpdown->f_vnode->v_stream; 1995 1996 /* 1997 * See the comment in mlink() concerning STRPLUMB/STPLEX flags. 1998 */ 1999 mutex_enter(&stpdown->sd_lock); 2000 stpdown->sd_flag |= STRPLUMB; 2001 mutex_exit(&stpdown->sd_lock); 2002 2003 /* 2004 * Add passthru queue below lower mux. This will block 2005 * syncqs of lower muxs read queue during I_LINK/I_UNLINK. 2006 */ 2007 passq = link_addpassthru(stpdown); 2008 2009 if ((flag & LINKTYPEMASK) == LINKNORMAL) 2010 strioc.ic_cmd = I_UNLINK; 2011 else 2012 strioc.ic_cmd = I_PUNLINK; 2013 strioc.ic_timout = INFTIM; 2014 strioc.ic_len = sizeof (struct linkblk); 2015 strioc.ic_dp = (char *)&linkp->li_lblk; 2016 2017 error = strdoioctl(stp, &strioc, FNATIVE, 2018 K_TO_K | STR_NOERROR | STR_NOSIG, crp, rvalp); 2019 2020 /* 2021 * If there was an error and this is not called via strclose, 2022 * return to the user. Otherwise, pretend there was no error 2023 * and close the link. 2024 */ 2025 if (error) { 2026 if (flag & LINKCLOSE) { 2027 cmn_err(CE_WARN, "KERNEL: munlink: could not perform " 2028 "unlink ioctl, closing anyway (%d)\n", error); 2029 } else { 2030 link_rempassthru(passq); 2031 mutex_enter(&stpdown->sd_lock); 2032 stpdown->sd_flag &= ~STRPLUMB; 2033 cv_broadcast(&stpdown->sd_monitor); 2034 mutex_exit(&stpdown->sd_lock); 2035 mutex_exit(&muxifier); 2036 return (error); 2037 } 2038 } 2039 2040 mux_rmvedge(stp, linkp->li_lblk.l_index); 2041 fpdown = linkp->li_fpdown; 2042 lbfree(linkp); 2043 2044 /* 2045 * We go ahead and drop muxifier here--it's a nasty global lock that 2046 * can slow others down. It's okay to since attempts to mlink() this 2047 * stream will be stopped because STPLEX is still set in the stdata 2048 * structure, and munlink() is stopped because mux_rmvedge() and 2049 * lbfree() have removed it from mux_nodes[] and linkinfo_list, 2050 * respectively. Note that we defer the closef() of fpdown until 2051 * after we drop muxifier since strclose() can call munlinkall(). 2052 */ 2053 mutex_exit(&muxifier); 2054 2055 wrq = stpdown->sd_wrq; 2056 rq = _RD(wrq); 2057 2058 /* 2059 * Get rid of outstanding service procedure runs, before we make 2060 * it a stream head, since a stream head doesn't have any service 2061 * procedure. 2062 */ 2063 disable_svc(rq); 2064 wait_svc(rq); 2065 2066 /* 2067 * Since we don't disable the syncq for QPERMOD, we wait for whatever 2068 * is queued up to be finished. mux should take care that nothing is 2069 * send down to this queue. We should do it now as we're going to block 2070 * passyncq if it was unblocked. 2071 */ 2072 if (wrq->q_flag & QPERMOD) { 2073 syncq_t *sq = wrq->q_syncq; 2074 2075 mutex_enter(SQLOCK(sq)); 2076 while (wrq->q_sqflags & Q_SQQUEUED) { 2077 sq->sq_flags |= SQ_WANTWAKEUP; 2078 cv_wait(&sq->sq_wait, SQLOCK(sq)); 2079 } 2080 mutex_exit(SQLOCK(sq)); 2081 } 2082 passyncq = passq->q_syncq; 2083 if (!(passyncq->sq_flags & SQ_BLOCKED)) { 2084 2085 syncq_t *sq, *outer; 2086 2087 /* 2088 * Messages could be flowing from underneath. We will 2089 * block the read side of the passq. This would be 2090 * sufficient for QPAIR and QPERQ muxes to ensure 2091 * that no data is flowing up into this queue 2092 * and hence no thread active in this instance of 2093 * lower mux. But for QPERMOD and QMTOUTPERIM there 2094 * could be messages on the inner and outer/inner 2095 * syncqs respectively. We will wait for them to drain. 2096 * Because passq is blocked messages end up in the syncq 2097 * And qfill_syncq could possibly end up setting QFULL 2098 * which will access the rq->q_flag. Hence, we have to 2099 * acquire the QLOCK in setq. 2100 * 2101 * XXX Messages can also flow from top into this 2102 * queue though the unlink is over (Ex. some instance 2103 * in putnext() called from top that has still not 2104 * accessed this queue. And also putq(lowerq) ?). 2105 * Solution : How about blocking the l_qtop queue ? 2106 * Do we really care about such pure D_MP muxes ? 2107 */ 2108 2109 blocksq(passyncq, SQ_BLOCKED, 0); 2110 2111 sq = rq->q_syncq; 2112 if ((outer = sq->sq_outer) != NULL) { 2113 2114 /* 2115 * We have to just wait for the outer sq_count 2116 * drop to zero. As this does not prevent new 2117 * messages to enter the outer perimeter, this 2118 * is subject to starvation. 2119 * 2120 * NOTE :Because of blocksq above, messages could 2121 * be in the inner syncq only because of some 2122 * thread holding the outer perimeter exclusively. 2123 * Hence it would be sufficient to wait for the 2124 * exclusive holder of the outer perimeter to drain 2125 * the inner and outer syncqs. But we will not depend 2126 * on this feature and hence check the inner syncqs 2127 * separately. 2128 */ 2129 wait_syncq(outer); 2130 } 2131 2132 2133 /* 2134 * There could be messages destined for 2135 * this queue. Let the exclusive holder 2136 * drain it. 2137 */ 2138 2139 wait_syncq(sq); 2140 ASSERT((rq->q_flag & QPERMOD) || 2141 ((rq->q_syncq->sq_head == NULL) && 2142 (_WR(rq)->q_syncq->sq_head == NULL))); 2143 } 2144 2145 /* 2146 * We haven't taken care of QPERMOD case yet. QPERMOD is a special 2147 * case as we don't disable its syncq or remove it off the syncq 2148 * service list. 2149 */ 2150 if (rq->q_flag & QPERMOD) { 2151 syncq_t *sq = rq->q_syncq; 2152 2153 mutex_enter(SQLOCK(sq)); 2154 while (rq->q_sqflags & Q_SQQUEUED) { 2155 sq->sq_flags |= SQ_WANTWAKEUP; 2156 cv_wait(&sq->sq_wait, SQLOCK(sq)); 2157 } 2158 mutex_exit(SQLOCK(sq)); 2159 } 2160 2161 /* 2162 * flush_syncq changes states only when there is some messages to 2163 * free. ie when it returns non-zero value to return. 2164 */ 2165 ASSERT(flush_syncq(rq->q_syncq, rq) == 0); 2166 ASSERT(flush_syncq(wrq->q_syncq, wrq) == 0); 2167 2168 /* 2169 * No body else should know about this queue now. 2170 * If the mux did not process the messages before 2171 * acking the I_UNLINK, free them now. 2172 */ 2173 2174 flushq(rq, FLUSHALL); 2175 flushq(_WR(rq), FLUSHALL); 2176 2177 /* 2178 * Convert the mux lower queue into a stream head queue. 2179 * Turn off STPLEX before we turn on the stream by removing the passq. 2180 */ 2181 rq->q_ptr = wrq->q_ptr = stpdown; 2182 setq(rq, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, B_TRUE); 2183 2184 ASSERT((rq->q_flag & QMT_TYPEMASK) == QMTSAFE); 2185 ASSERT(rq->q_syncq == SQ(rq) && _WR(rq)->q_syncq == SQ(rq)); 2186 2187 enable_svc(rq); 2188 2189 /* 2190 * Now it is a proper stream, so STPLEX is cleared. But STRPLUMB still 2191 * needs to be set to prevent reopen() of the stream - such reopen may 2192 * try to call non-existent pass queue open routine and panic. 2193 */ 2194 mutex_enter(&stpdown->sd_lock); 2195 stpdown->sd_flag &= ~STPLEX; 2196 mutex_exit(&stpdown->sd_lock); 2197 2198 ASSERT(((flag & LINKTYPEMASK) == LINKNORMAL) || 2199 ((flag & LINKTYPEMASK) == LINKPERSIST)); 2200 2201 /* clean up the layered driver linkages */ 2202 if ((flag & LINKTYPEMASK) == LINKNORMAL) { 2203 ldi_munlink_fp(stp, fpdown, LINKNORMAL); 2204 } else { 2205 ldi_munlink_fp(stp, fpdown, LINKPERSIST); 2206 } 2207 2208 link_rempassthru(passq); 2209 2210 /* 2211 * Now all plumbing changes are finished and STRPLUMB is no 2212 * longer needed. 2213 */ 2214 mutex_enter(&stpdown->sd_lock); 2215 stpdown->sd_flag &= ~STRPLUMB; 2216 cv_broadcast(&stpdown->sd_monitor); 2217 mutex_exit(&stpdown->sd_lock); 2218 2219 (void) closef(fpdown); 2220 return (0); 2221 } 2222 2223 /* 2224 * Unlink all multiplexor links for which stp is the controlling stream. 2225 * Return 0, or a non-zero errno on failure. 2226 */ 2227 int 2228 munlinkall(stdata_t *stp, int flag, cred_t *crp, int *rvalp) 2229 { 2230 linkinfo_t *linkp; 2231 int error = 0; 2232 2233 mutex_enter(&muxifier); 2234 while (linkp = findlinks(stp, 0, flag)) { 2235 /* 2236 * munlink() releases the muxifier lock. 2237 */ 2238 if (error = munlink(stp, linkp, flag, crp, rvalp)) 2239 return (error); 2240 mutex_enter(&muxifier); 2241 } 2242 mutex_exit(&muxifier); 2243 return (0); 2244 } 2245 2246 /* 2247 * A multiplexor link has been made. Add an 2248 * edge to the directed graph. 2249 */ 2250 void 2251 mux_addedge(stdata_t *upstp, stdata_t *lostp, int muxid) 2252 { 2253 struct mux_node *np; 2254 struct mux_edge *ep; 2255 major_t upmaj; 2256 major_t lomaj; 2257 2258 upmaj = getmajor(upstp->sd_vnode->v_rdev); 2259 lomaj = getmajor(lostp->sd_vnode->v_rdev); 2260 np = &mux_nodes[upmaj]; 2261 if (np->mn_outp) { 2262 ep = np->mn_outp; 2263 while (ep->me_nextp) 2264 ep = ep->me_nextp; 2265 ep->me_nextp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP); 2266 ep = ep->me_nextp; 2267 } else { 2268 np->mn_outp = kmem_alloc(sizeof (struct mux_edge), KM_SLEEP); 2269 ep = np->mn_outp; 2270 } 2271 ep->me_nextp = NULL; 2272 ep->me_muxid = muxid; 2273 if (lostp->sd_vnode->v_type == VFIFO) 2274 ep->me_nodep = NULL; 2275 else 2276 ep->me_nodep = &mux_nodes[lomaj]; 2277 } 2278 2279 /* 2280 * A multiplexor link has been removed. Remove the 2281 * edge in the directed graph. 2282 */ 2283 void 2284 mux_rmvedge(stdata_t *upstp, int muxid) 2285 { 2286 struct mux_node *np; 2287 struct mux_edge *ep; 2288 struct mux_edge *pep = NULL; 2289 major_t upmaj; 2290 2291 upmaj = getmajor(upstp->sd_vnode->v_rdev); 2292 np = &mux_nodes[upmaj]; 2293 ASSERT(np->mn_outp != NULL); 2294 ep = np->mn_outp; 2295 while (ep) { 2296 if (ep->me_muxid == muxid) { 2297 if (pep) 2298 pep->me_nextp = ep->me_nextp; 2299 else 2300 np->mn_outp = ep->me_nextp; 2301 kmem_free(ep, sizeof (struct mux_edge)); 2302 return; 2303 } 2304 pep = ep; 2305 ep = ep->me_nextp; 2306 } 2307 ASSERT(0); /* should not reach here */ 2308 } 2309 2310 /* 2311 * Translate the device flags (from conf.h) to the corresponding 2312 * qflag and sq_flag (type) values. 2313 */ 2314 int 2315 devflg_to_qflag(struct streamtab *stp, uint32_t devflag, uint32_t *qflagp, 2316 uint32_t *sqtypep) 2317 { 2318 uint32_t qflag = 0; 2319 uint32_t sqtype = 0; 2320 2321 if (devflag & _D_OLD) 2322 goto bad; 2323 2324 /* Inner perimeter presence and scope */ 2325 switch (devflag & D_MTINNER_MASK) { 2326 case D_MP: 2327 qflag |= QMTSAFE; 2328 sqtype |= SQ_CI; 2329 break; 2330 case D_MTPERQ|D_MP: 2331 qflag |= QPERQ; 2332 break; 2333 case D_MTQPAIR|D_MP: 2334 qflag |= QPAIR; 2335 break; 2336 case D_MTPERMOD|D_MP: 2337 qflag |= QPERMOD; 2338 break; 2339 default: 2340 goto bad; 2341 } 2342 2343 /* Outer perimeter */ 2344 if (devflag & D_MTOUTPERIM) { 2345 switch (devflag & D_MTINNER_MASK) { 2346 case D_MP: 2347 case D_MTPERQ|D_MP: 2348 case D_MTQPAIR|D_MP: 2349 break; 2350 default: 2351 goto bad; 2352 } 2353 qflag |= QMTOUTPERIM; 2354 } 2355 2356 /* Inner perimeter modifiers */ 2357 if (devflag & D_MTINNER_MOD) { 2358 switch (devflag & D_MTINNER_MASK) { 2359 case D_MP: 2360 goto bad; 2361 default: 2362 break; 2363 } 2364 if (devflag & D_MTPUTSHARED) 2365 sqtype |= SQ_CIPUT; 2366 if (devflag & _D_MTOCSHARED) { 2367 /* 2368 * The code in putnext assumes that it has the 2369 * highest concurrency by not checking sq_count. 2370 * Thus _D_MTOCSHARED can only be supported when 2371 * D_MTPUTSHARED is set. 2372 */ 2373 if (!(devflag & D_MTPUTSHARED)) 2374 goto bad; 2375 sqtype |= SQ_CIOC; 2376 } 2377 if (devflag & _D_MTCBSHARED) { 2378 /* 2379 * The code in putnext assumes that it has the 2380 * highest concurrency by not checking sq_count. 2381 * Thus _D_MTCBSHARED can only be supported when 2382 * D_MTPUTSHARED is set. 2383 */ 2384 if (!(devflag & D_MTPUTSHARED)) 2385 goto bad; 2386 sqtype |= SQ_CICB; 2387 } 2388 if (devflag & _D_MTSVCSHARED) { 2389 /* 2390 * The code in putnext assumes that it has the 2391 * highest concurrency by not checking sq_count. 2392 * Thus _D_MTSVCSHARED can only be supported when 2393 * D_MTPUTSHARED is set. Also _D_MTSVCSHARED is 2394 * supported only for QPERMOD. 2395 */ 2396 if (!(devflag & D_MTPUTSHARED) || !(qflag & QPERMOD)) 2397 goto bad; 2398 sqtype |= SQ_CISVC; 2399 } 2400 } 2401 2402 /* Default outer perimeter concurrency */ 2403 sqtype |= SQ_CO; 2404 2405 /* Outer perimeter modifiers */ 2406 if (devflag & D_MTOCEXCL) { 2407 if (!(devflag & D_MTOUTPERIM)) { 2408 /* No outer perimeter */ 2409 goto bad; 2410 } 2411 sqtype &= ~SQ_COOC; 2412 } 2413 2414 /* Synchronous Streams extended qinit structure */ 2415 if (devflag & D_SYNCSTR) 2416 qflag |= QSYNCSTR; 2417 2418 /* 2419 * Private flag used by a transport module to indicate 2420 * to sockfs that it supports direct-access mode without 2421 * having to go through STREAMS. 2422 */ 2423 if (devflag & _D_DIRECT) { 2424 /* Reject unless the module is fully-MT (no perimeter) */ 2425 if ((qflag & QMT_TYPEMASK) != QMTSAFE) 2426 goto bad; 2427 qflag |= _QDIRECT; 2428 } 2429 2430 *qflagp = qflag; 2431 *sqtypep = sqtype; 2432 return (0); 2433 2434 bad: 2435 cmn_err(CE_WARN, 2436 "stropen: bad MT flags (0x%x) in driver '%s'", 2437 (int)(qflag & D_MTSAFETY_MASK), 2438 stp->st_rdinit->qi_minfo->mi_idname); 2439 2440 return (EINVAL); 2441 } 2442 2443 /* 2444 * Set the interface values for a pair of queues (qinit structure, 2445 * packet sizes, water marks). 2446 * setq assumes that the caller does not have a claim (entersq or claimq) 2447 * on the queue. 2448 */ 2449 void 2450 setq(queue_t *rq, struct qinit *rinit, struct qinit *winit, 2451 perdm_t *dmp, uint32_t qflag, uint32_t sqtype, boolean_t lock_needed) 2452 { 2453 queue_t *wq; 2454 syncq_t *sq, *outer; 2455 2456 ASSERT(rq->q_flag & QREADR); 2457 ASSERT((qflag & QMT_TYPEMASK) != 0); 2458 IMPLY((qflag & (QPERMOD | QMTOUTPERIM)), dmp != NULL); 2459 2460 wq = _WR(rq); 2461 rq->q_qinfo = rinit; 2462 rq->q_hiwat = rinit->qi_minfo->mi_hiwat; 2463 rq->q_lowat = rinit->qi_minfo->mi_lowat; 2464 rq->q_minpsz = rinit->qi_minfo->mi_minpsz; 2465 rq->q_maxpsz = rinit->qi_minfo->mi_maxpsz; 2466 wq->q_qinfo = winit; 2467 wq->q_hiwat = winit->qi_minfo->mi_hiwat; 2468 wq->q_lowat = winit->qi_minfo->mi_lowat; 2469 wq->q_minpsz = winit->qi_minfo->mi_minpsz; 2470 wq->q_maxpsz = winit->qi_minfo->mi_maxpsz; 2471 2472 /* Remove old syncqs */ 2473 sq = rq->q_syncq; 2474 outer = sq->sq_outer; 2475 if (outer != NULL) { 2476 ASSERT(wq->q_syncq->sq_outer == outer); 2477 outer_remove(outer, rq->q_syncq); 2478 if (wq->q_syncq != rq->q_syncq) 2479 outer_remove(outer, wq->q_syncq); 2480 } 2481 ASSERT(sq->sq_outer == NULL); 2482 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 2483 2484 if (sq != SQ(rq)) { 2485 if (!(rq->q_flag & QPERMOD)) 2486 free_syncq(sq); 2487 if (wq->q_syncq == rq->q_syncq) 2488 wq->q_syncq = NULL; 2489 rq->q_syncq = NULL; 2490 } 2491 if (wq->q_syncq != NULL && wq->q_syncq != sq && 2492 wq->q_syncq != SQ(rq)) { 2493 free_syncq(wq->q_syncq); 2494 wq->q_syncq = NULL; 2495 } 2496 ASSERT(rq->q_syncq == NULL || (rq->q_syncq->sq_head == NULL && 2497 rq->q_syncq->sq_tail == NULL)); 2498 ASSERT(wq->q_syncq == NULL || (wq->q_syncq->sq_head == NULL && 2499 wq->q_syncq->sq_tail == NULL)); 2500 2501 if (!(rq->q_flag & QPERMOD) && 2502 rq->q_syncq != NULL && rq->q_syncq->sq_ciputctrl != NULL) { 2503 ASSERT(rq->q_syncq->sq_nciputctrl == n_ciputctrl - 1); 2504 SUMCHECK_CIPUTCTRL_COUNTS(rq->q_syncq->sq_ciputctrl, 2505 rq->q_syncq->sq_nciputctrl, 0); 2506 ASSERT(ciputctrl_cache != NULL); 2507 kmem_cache_free(ciputctrl_cache, rq->q_syncq->sq_ciputctrl); 2508 rq->q_syncq->sq_ciputctrl = NULL; 2509 rq->q_syncq->sq_nciputctrl = 0; 2510 } 2511 2512 if (!(wq->q_flag & QPERMOD) && 2513 wq->q_syncq != NULL && wq->q_syncq->sq_ciputctrl != NULL) { 2514 ASSERT(wq->q_syncq->sq_nciputctrl == n_ciputctrl - 1); 2515 SUMCHECK_CIPUTCTRL_COUNTS(wq->q_syncq->sq_ciputctrl, 2516 wq->q_syncq->sq_nciputctrl, 0); 2517 ASSERT(ciputctrl_cache != NULL); 2518 kmem_cache_free(ciputctrl_cache, wq->q_syncq->sq_ciputctrl); 2519 wq->q_syncq->sq_ciputctrl = NULL; 2520 wq->q_syncq->sq_nciputctrl = 0; 2521 } 2522 2523 sq = SQ(rq); 2524 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL); 2525 ASSERT(sq->sq_outer == NULL); 2526 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 2527 2528 /* 2529 * Create syncqs based on qflag and sqtype. Set the SQ_TYPES_IN_FLAGS 2530 * bits in sq_flag based on the sqtype. 2531 */ 2532 ASSERT((sq->sq_flags & ~SQ_TYPES_IN_FLAGS) == 0); 2533 2534 rq->q_syncq = wq->q_syncq = sq; 2535 sq->sq_type = sqtype; 2536 sq->sq_flags = (sqtype & SQ_TYPES_IN_FLAGS); 2537 2538 /* 2539 * We are making sq_svcflags zero, 2540 * resetting SQ_DISABLED in case it was set by 2541 * wait_svc() in the munlink path. 2542 * 2543 */ 2544 ASSERT((sq->sq_svcflags & SQ_SERVICE) == 0); 2545 sq->sq_svcflags = 0; 2546 2547 /* 2548 * We need to acquire the lock here for the mlink and munlink case, 2549 * where canputnext, backenable, etc can access the q_flag. 2550 */ 2551 if (lock_needed) { 2552 mutex_enter(QLOCK(rq)); 2553 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2554 mutex_exit(QLOCK(rq)); 2555 mutex_enter(QLOCK(wq)); 2556 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2557 mutex_exit(QLOCK(wq)); 2558 } else { 2559 rq->q_flag = (rq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2560 wq->q_flag = (wq->q_flag & ~QMT_TYPEMASK) | QWANTR | qflag; 2561 } 2562 2563 if (qflag & QPERQ) { 2564 /* Allocate a separate syncq for the write side */ 2565 sq = new_syncq(); 2566 sq->sq_type = rq->q_syncq->sq_type; 2567 sq->sq_flags = rq->q_syncq->sq_flags; 2568 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 2569 sq->sq_oprev == NULL); 2570 wq->q_syncq = sq; 2571 } 2572 if (qflag & QPERMOD) { 2573 sq = dmp->dm_sq; 2574 2575 /* 2576 * Assert that we do have an inner perimeter syncq and that it 2577 * does not have an outer perimeter associated with it. 2578 */ 2579 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 2580 sq->sq_oprev == NULL); 2581 rq->q_syncq = wq->q_syncq = sq; 2582 } 2583 if (qflag & QMTOUTPERIM) { 2584 outer = dmp->dm_sq; 2585 2586 ASSERT(outer->sq_outer == NULL); 2587 outer_insert(outer, rq->q_syncq); 2588 if (wq->q_syncq != rq->q_syncq) 2589 outer_insert(outer, wq->q_syncq); 2590 } 2591 ASSERT((rq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) == 2592 (rq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS)); 2593 ASSERT((wq->q_syncq->sq_flags & SQ_TYPES_IN_FLAGS) == 2594 (wq->q_syncq->sq_type & SQ_TYPES_IN_FLAGS)); 2595 ASSERT((rq->q_flag & QMT_TYPEMASK) == (qflag & QMT_TYPEMASK)); 2596 2597 /* 2598 * Initialize struio() types. 2599 */ 2600 rq->q_struiot = 2601 (rq->q_flag & QSYNCSTR) ? rinit->qi_struiot : STRUIOT_NONE; 2602 wq->q_struiot = 2603 (wq->q_flag & QSYNCSTR) ? winit->qi_struiot : STRUIOT_NONE; 2604 } 2605 2606 perdm_t * 2607 hold_dm(struct streamtab *str, uint32_t qflag, uint32_t sqtype) 2608 { 2609 syncq_t *sq; 2610 perdm_t **pp; 2611 perdm_t *p; 2612 perdm_t *dmp; 2613 2614 ASSERT(str != NULL); 2615 ASSERT(qflag & (QPERMOD | QMTOUTPERIM)); 2616 2617 rw_enter(&perdm_rwlock, RW_READER); 2618 for (p = perdm_list; p != NULL; p = p->dm_next) { 2619 if (p->dm_str == str) { /* found one */ 2620 atomic_add_32(&(p->dm_ref), 1); 2621 rw_exit(&perdm_rwlock); 2622 return (p); 2623 } 2624 } 2625 rw_exit(&perdm_rwlock); 2626 2627 sq = new_syncq(); 2628 if (qflag & QPERMOD) { 2629 sq->sq_type = sqtype | SQ_PERMOD; 2630 sq->sq_flags = sqtype & SQ_TYPES_IN_FLAGS; 2631 } else { 2632 ASSERT(qflag & QMTOUTPERIM); 2633 sq->sq_onext = sq->sq_oprev = sq; 2634 } 2635 2636 dmp = kmem_alloc(sizeof (perdm_t), KM_SLEEP); 2637 dmp->dm_sq = sq; 2638 dmp->dm_str = str; 2639 dmp->dm_ref = 1; 2640 dmp->dm_next = NULL; 2641 2642 rw_enter(&perdm_rwlock, RW_WRITER); 2643 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) { 2644 if (p->dm_str == str) { /* already present */ 2645 p->dm_ref++; 2646 rw_exit(&perdm_rwlock); 2647 free_syncq(sq); 2648 kmem_free(dmp, sizeof (perdm_t)); 2649 return (p); 2650 } 2651 } 2652 2653 *pp = dmp; 2654 rw_exit(&perdm_rwlock); 2655 return (dmp); 2656 } 2657 2658 void 2659 rele_dm(perdm_t *dmp) 2660 { 2661 perdm_t **pp; 2662 perdm_t *p; 2663 2664 rw_enter(&perdm_rwlock, RW_WRITER); 2665 ASSERT(dmp->dm_ref > 0); 2666 2667 if (--dmp->dm_ref > 0) { 2668 rw_exit(&perdm_rwlock); 2669 return; 2670 } 2671 2672 for (pp = &perdm_list; (p = *pp) != NULL; pp = &(p->dm_next)) 2673 if (p == dmp) 2674 break; 2675 ASSERT(p == dmp); 2676 *pp = p->dm_next; 2677 rw_exit(&perdm_rwlock); 2678 2679 /* 2680 * Wait for any background processing that relies on the 2681 * syncq to complete before it is freed. 2682 */ 2683 wait_sq_svc(p->dm_sq); 2684 free_syncq(p->dm_sq); 2685 kmem_free(p, sizeof (perdm_t)); 2686 } 2687 2688 /* 2689 * Make a protocol message given control and data buffers. 2690 * n.b., this can block; be careful of what locks you hold when calling it. 2691 * 2692 * If sd_maxblk is less than *iosize this routine can fail part way through 2693 * (due to an allocation failure). In this case on return *iosize will contain 2694 * the amount that was consumed. Otherwise *iosize will not be modified 2695 * i.e. it will contain the amount that was consumed. 2696 */ 2697 int 2698 strmakemsg( 2699 struct strbuf *mctl, 2700 ssize_t *iosize, 2701 struct uio *uiop, 2702 stdata_t *stp, 2703 int32_t flag, 2704 mblk_t **mpp) 2705 { 2706 mblk_t *mpctl = NULL; 2707 mblk_t *mpdata = NULL; 2708 int error; 2709 2710 ASSERT(uiop != NULL); 2711 2712 *mpp = NULL; 2713 /* Create control part, if any */ 2714 if ((mctl != NULL) && (mctl->len >= 0)) { 2715 error = strmakectl(mctl, flag, uiop->uio_fmode, &mpctl); 2716 if (error) 2717 return (error); 2718 } 2719 /* Create data part, if any */ 2720 if (*iosize >= 0) { 2721 error = strmakedata(iosize, uiop, stp, flag, &mpdata); 2722 if (error) { 2723 freemsg(mpctl); 2724 return (error); 2725 } 2726 } 2727 if (mpctl != NULL) { 2728 if (mpdata != NULL) 2729 linkb(mpctl, mpdata); 2730 *mpp = mpctl; 2731 } else { 2732 *mpp = mpdata; 2733 } 2734 return (0); 2735 } 2736 2737 /* 2738 * Make the control part of a protocol message given a control buffer. 2739 * n.b., this can block; be careful of what locks you hold when calling it. 2740 */ 2741 int 2742 strmakectl( 2743 struct strbuf *mctl, 2744 int32_t flag, 2745 int32_t fflag, 2746 mblk_t **mpp) 2747 { 2748 mblk_t *bp = NULL; 2749 unsigned char msgtype; 2750 int error = 0; 2751 2752 *mpp = NULL; 2753 /* 2754 * Create control part of message, if any. 2755 */ 2756 if ((mctl != NULL) && (mctl->len >= 0)) { 2757 caddr_t base; 2758 int ctlcount; 2759 int allocsz; 2760 2761 if (flag & RS_HIPRI) 2762 msgtype = M_PCPROTO; 2763 else 2764 msgtype = M_PROTO; 2765 2766 ctlcount = mctl->len; 2767 base = mctl->buf; 2768 2769 /* 2770 * Give modules a better chance to reuse M_PROTO/M_PCPROTO 2771 * blocks by increasing the size to something more usable. 2772 */ 2773 allocsz = MAX(ctlcount, 64); 2774 2775 /* 2776 * Range checking has already been done; simply try 2777 * to allocate a message block for the ctl part. 2778 */ 2779 while (!(bp = allocb(allocsz, BPRI_MED))) { 2780 if (fflag & (FNDELAY|FNONBLOCK)) 2781 return (EAGAIN); 2782 if (error = strwaitbuf(allocsz, BPRI_MED)) 2783 return (error); 2784 } 2785 2786 bp->b_datap->db_type = msgtype; 2787 if (copyin(base, bp->b_wptr, ctlcount)) { 2788 freeb(bp); 2789 return (EFAULT); 2790 } 2791 bp->b_wptr += ctlcount; 2792 } 2793 *mpp = bp; 2794 return (0); 2795 } 2796 2797 /* 2798 * Make a protocol message given data buffers. 2799 * n.b., this can block; be careful of what locks you hold when calling it. 2800 * 2801 * If sd_maxblk is less than *iosize this routine can fail part way through 2802 * (due to an allocation failure). In this case on return *iosize will contain 2803 * the amount that was consumed. Otherwise *iosize will not be modified 2804 * i.e. it will contain the amount that was consumed. 2805 */ 2806 int 2807 strmakedata( 2808 ssize_t *iosize, 2809 struct uio *uiop, 2810 stdata_t *stp, 2811 int32_t flag, 2812 mblk_t **mpp) 2813 { 2814 mblk_t *mp = NULL; 2815 mblk_t *bp; 2816 int wroff = (int)stp->sd_wroff; 2817 int tail_len = (int)stp->sd_tail; 2818 int extra = wroff + tail_len; 2819 int error = 0; 2820 ssize_t maxblk; 2821 ssize_t count = *iosize; 2822 cred_t *cr = CRED(); 2823 2824 *mpp = NULL; 2825 if (count < 0) 2826 return (0); 2827 2828 maxblk = stp->sd_maxblk; 2829 if (maxblk == INFPSZ) 2830 maxblk = count; 2831 2832 /* 2833 * Create data part of message, if any. 2834 */ 2835 do { 2836 ssize_t size; 2837 dblk_t *dp; 2838 2839 ASSERT(uiop); 2840 2841 size = MIN(count, maxblk); 2842 2843 while ((bp = allocb_cred(size + extra, cr)) == NULL) { 2844 error = EAGAIN; 2845 if ((uiop->uio_fmode & (FNDELAY|FNONBLOCK)) || 2846 (error = strwaitbuf(size + extra, BPRI_MED)) != 0) { 2847 if (count == *iosize) { 2848 freemsg(mp); 2849 return (error); 2850 } else { 2851 *iosize -= count; 2852 *mpp = mp; 2853 return (0); 2854 } 2855 } 2856 } 2857 dp = bp->b_datap; 2858 dp->db_cpid = curproc->p_pid; 2859 ASSERT(wroff <= dp->db_lim - bp->b_wptr); 2860 bp->b_wptr = bp->b_rptr = bp->b_rptr + wroff; 2861 2862 if (flag & STRUIO_POSTPONE) { 2863 /* 2864 * Setup the stream uio portion of the 2865 * dblk for subsequent use by struioget(). 2866 */ 2867 dp->db_struioflag = STRUIO_SPEC; 2868 dp->db_cksumstart = 0; 2869 dp->db_cksumstuff = 0; 2870 dp->db_cksumend = size; 2871 *(long long *)dp->db_struioun.data = 0ll; 2872 bp->b_wptr += size; 2873 } else { 2874 if (stp->sd_copyflag & STRCOPYCACHED) 2875 uiop->uio_extflg |= UIO_COPY_CACHED; 2876 2877 if (size != 0) { 2878 error = uiomove(bp->b_wptr, size, UIO_WRITE, 2879 uiop); 2880 if (error != 0) { 2881 freeb(bp); 2882 freemsg(mp); 2883 return (error); 2884 } 2885 } 2886 bp->b_wptr += size; 2887 2888 if (stp->sd_wputdatafunc != NULL) { 2889 mblk_t *newbp; 2890 2891 newbp = (stp->sd_wputdatafunc)(stp->sd_vnode, 2892 bp, NULL, NULL, NULL, NULL); 2893 if (newbp == NULL) { 2894 freeb(bp); 2895 freemsg(mp); 2896 return (ECOMM); 2897 } 2898 bp = newbp; 2899 } 2900 } 2901 2902 count -= size; 2903 2904 if (mp == NULL) 2905 mp = bp; 2906 else 2907 linkb(mp, bp); 2908 } while (count > 0); 2909 2910 *mpp = mp; 2911 return (0); 2912 } 2913 2914 /* 2915 * Wait for a buffer to become available. Return non-zero errno 2916 * if not able to wait, 0 if buffer is probably there. 2917 */ 2918 int 2919 strwaitbuf(size_t size, int pri) 2920 { 2921 bufcall_id_t id; 2922 2923 mutex_enter(&bcall_monitor); 2924 if ((id = bufcall(size, pri, (void (*)(void *))cv_broadcast, 2925 &ttoproc(curthread)->p_flag_cv)) == 0) { 2926 mutex_exit(&bcall_monitor); 2927 return (ENOSR); 2928 } 2929 if (!cv_wait_sig(&(ttoproc(curthread)->p_flag_cv), &bcall_monitor)) { 2930 unbufcall(id); 2931 mutex_exit(&bcall_monitor); 2932 return (EINTR); 2933 } 2934 unbufcall(id); 2935 mutex_exit(&bcall_monitor); 2936 return (0); 2937 } 2938 2939 /* 2940 * This function waits for a read or write event to happen on a stream. 2941 * fmode can specify FNDELAY and/or FNONBLOCK. 2942 * The timeout is in ms with -1 meaning infinite. 2943 * The flag values work as follows: 2944 * READWAIT Check for read side errors, send M_READ 2945 * GETWAIT Check for read side errors, no M_READ 2946 * WRITEWAIT Check for write side errors. 2947 * NOINTR Do not return error if nonblocking or timeout. 2948 * STR_NOERROR Ignore all errors except STPLEX. 2949 * STR_NOSIG Ignore/hold signals during the duration of the call. 2950 * STR_PEEK Pass through the strgeterr(). 2951 */ 2952 int 2953 strwaitq(stdata_t *stp, int flag, ssize_t count, int fmode, clock_t timout, 2954 int *done) 2955 { 2956 int slpflg, errs; 2957 int error; 2958 kcondvar_t *sleepon; 2959 mblk_t *mp; 2960 ssize_t *rd_count; 2961 clock_t rval; 2962 2963 ASSERT(MUTEX_HELD(&stp->sd_lock)); 2964 if ((flag & READWAIT) || (flag & GETWAIT)) { 2965 slpflg = RSLEEP; 2966 sleepon = &_RD(stp->sd_wrq)->q_wait; 2967 errs = STRDERR|STPLEX; 2968 } else { 2969 slpflg = WSLEEP; 2970 sleepon = &stp->sd_wrq->q_wait; 2971 errs = STWRERR|STRHUP|STPLEX; 2972 } 2973 if (flag & STR_NOERROR) 2974 errs = STPLEX; 2975 2976 if (stp->sd_wakeq & slpflg) { 2977 /* 2978 * A strwakeq() is pending, no need to sleep. 2979 */ 2980 stp->sd_wakeq &= ~slpflg; 2981 *done = 0; 2982 return (0); 2983 } 2984 2985 if (fmode & (FNDELAY|FNONBLOCK)) { 2986 if (!(flag & NOINTR)) 2987 error = EAGAIN; 2988 else 2989 error = 0; 2990 *done = 1; 2991 return (error); 2992 } 2993 2994 if (stp->sd_flag & errs) { 2995 /* 2996 * Check for errors before going to sleep since the 2997 * caller might not have checked this while holding 2998 * sd_lock. 2999 */ 3000 error = strgeterr(stp, errs, (flag & STR_PEEK)); 3001 if (error != 0) { 3002 *done = 1; 3003 return (error); 3004 } 3005 } 3006 3007 /* 3008 * If any module downstream has requested read notification 3009 * by setting SNDMREAD flag using M_SETOPTS, send a message 3010 * down stream. 3011 */ 3012 if ((flag & READWAIT) && (stp->sd_flag & SNDMREAD)) { 3013 mutex_exit(&stp->sd_lock); 3014 if (!(mp = allocb_wait(sizeof (ssize_t), BPRI_MED, 3015 (flag & STR_NOSIG), &error))) { 3016 mutex_enter(&stp->sd_lock); 3017 *done = 1; 3018 return (error); 3019 } 3020 mp->b_datap->db_type = M_READ; 3021 rd_count = (ssize_t *)mp->b_wptr; 3022 *rd_count = count; 3023 mp->b_wptr += sizeof (ssize_t); 3024 /* 3025 * Send the number of bytes requested by the 3026 * read as the argument to M_READ. 3027 */ 3028 stream_willservice(stp); 3029 putnext(stp->sd_wrq, mp); 3030 stream_runservice(stp); 3031 mutex_enter(&stp->sd_lock); 3032 3033 /* 3034 * If any data arrived due to inline processing 3035 * of putnext(), don't sleep. 3036 */ 3037 if (_RD(stp->sd_wrq)->q_first != NULL) { 3038 *done = 0; 3039 return (0); 3040 } 3041 } 3042 3043 stp->sd_flag |= slpflg; 3044 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAIT2, 3045 "strwaitq sleeps (2):%p, %X, %lX, %X, %p", 3046 stp, flag, count, fmode, done); 3047 3048 rval = str_cv_wait(sleepon, &stp->sd_lock, timout, flag & STR_NOSIG); 3049 if (rval > 0) { 3050 /* EMPTY */ 3051 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_WAKE2, 3052 "strwaitq awakes(2):%X, %X, %X, %X, %X", 3053 stp, flag, count, fmode, done); 3054 } else if (rval == 0) { 3055 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_INTR2, 3056 "strwaitq interrupt #2:%p, %X, %lX, %X, %p", 3057 stp, flag, count, fmode, done); 3058 stp->sd_flag &= ~slpflg; 3059 cv_broadcast(sleepon); 3060 if (!(flag & NOINTR)) 3061 error = EINTR; 3062 else 3063 error = 0; 3064 *done = 1; 3065 return (error); 3066 } else { 3067 /* timeout */ 3068 TRACE_5(TR_FAC_STREAMS_FR, TR_STRWAITQ_TIME, 3069 "strwaitq timeout:%p, %X, %lX, %X, %p", 3070 stp, flag, count, fmode, done); 3071 *done = 1; 3072 if (!(flag & NOINTR)) 3073 return (ETIME); 3074 else 3075 return (0); 3076 } 3077 /* 3078 * If the caller implements delayed errors (i.e. queued after data) 3079 * we can not check for errors here since data as well as an 3080 * error might have arrived at the stream head. We return to 3081 * have the caller check the read queue before checking for errors. 3082 */ 3083 if ((stp->sd_flag & errs) && !(flag & STR_DELAYERR)) { 3084 error = strgeterr(stp, errs, (flag & STR_PEEK)); 3085 if (error != 0) { 3086 *done = 1; 3087 return (error); 3088 } 3089 } 3090 *done = 0; 3091 return (0); 3092 } 3093 3094 /* 3095 * Perform job control discipline access checks. 3096 * Return 0 for success and the errno for failure. 3097 */ 3098 3099 #define cantsend(p, t, sig) \ 3100 (sigismember(&(p)->p_ignore, sig) || signal_is_blocked((t), sig)) 3101 3102 int 3103 straccess(struct stdata *stp, enum jcaccess mode) 3104 { 3105 extern kcondvar_t lbolt_cv; /* XXX: should be in a header file */ 3106 kthread_t *t = curthread; 3107 proc_t *p = ttoproc(t); 3108 sess_t *sp; 3109 3110 ASSERT(mutex_owned(&stp->sd_lock)); 3111 3112 if (stp->sd_sidp == NULL || stp->sd_vnode->v_type == VFIFO) 3113 return (0); 3114 3115 mutex_enter(&p->p_lock); /* protects p_pgidp */ 3116 3117 for (;;) { 3118 mutex_enter(&p->p_splock); /* protects p->p_sessp */ 3119 sp = p->p_sessp; 3120 mutex_enter(&sp->s_lock); /* protects sp->* */ 3121 3122 /* 3123 * If this is not the calling process's controlling terminal 3124 * or if the calling process is already in the foreground 3125 * then allow access. 3126 */ 3127 if (sp->s_dev != stp->sd_vnode->v_rdev || 3128 p->p_pgidp == stp->sd_pgidp) { 3129 mutex_exit(&sp->s_lock); 3130 mutex_exit(&p->p_splock); 3131 mutex_exit(&p->p_lock); 3132 return (0); 3133 } 3134 3135 /* 3136 * Check to see if controlling terminal has been deallocated. 3137 */ 3138 if (sp->s_vp == NULL) { 3139 if (!cantsend(p, t, SIGHUP)) 3140 sigtoproc(p, t, SIGHUP); 3141 mutex_exit(&sp->s_lock); 3142 mutex_exit(&p->p_splock); 3143 mutex_exit(&p->p_lock); 3144 return (EIO); 3145 } 3146 3147 mutex_exit(&sp->s_lock); 3148 mutex_exit(&p->p_splock); 3149 3150 if (mode == JCGETP) { 3151 mutex_exit(&p->p_lock); 3152 return (0); 3153 } 3154 3155 if (mode == JCREAD) { 3156 if (p->p_detached || cantsend(p, t, SIGTTIN)) { 3157 mutex_exit(&p->p_lock); 3158 return (EIO); 3159 } 3160 mutex_exit(&p->p_lock); 3161 mutex_exit(&stp->sd_lock); 3162 pgsignal(p->p_pgidp, SIGTTIN); 3163 mutex_enter(&stp->sd_lock); 3164 mutex_enter(&p->p_lock); 3165 } else { /* mode == JCWRITE or JCSETP */ 3166 if ((mode == JCWRITE && !(stp->sd_flag & STRTOSTOP)) || 3167 cantsend(p, t, SIGTTOU)) { 3168 mutex_exit(&p->p_lock); 3169 return (0); 3170 } 3171 if (p->p_detached) { 3172 mutex_exit(&p->p_lock); 3173 return (EIO); 3174 } 3175 mutex_exit(&p->p_lock); 3176 mutex_exit(&stp->sd_lock); 3177 pgsignal(p->p_pgidp, SIGTTOU); 3178 mutex_enter(&stp->sd_lock); 3179 mutex_enter(&p->p_lock); 3180 } 3181 3182 /* 3183 * We call cv_wait_sig_swap() to cause the appropriate 3184 * action for the jobcontrol signal to take place. 3185 * If the signal is being caught, we will take the 3186 * EINTR error return. Otherwise, the default action 3187 * of causing the process to stop will take place. 3188 * In this case, we rely on the periodic cv_broadcast() on 3189 * &lbolt_cv to wake us up to loop around and test again. 3190 * We can't get here if the signal is ignored or 3191 * if the current thread is blocking the signal. 3192 */ 3193 mutex_exit(&stp->sd_lock); 3194 if (!cv_wait_sig_swap(&lbolt_cv, &p->p_lock)) { 3195 mutex_exit(&p->p_lock); 3196 mutex_enter(&stp->sd_lock); 3197 return (EINTR); 3198 } 3199 mutex_exit(&p->p_lock); 3200 mutex_enter(&stp->sd_lock); 3201 mutex_enter(&p->p_lock); 3202 } 3203 } 3204 3205 /* 3206 * Return size of message of block type (bp->b_datap->db_type) 3207 */ 3208 size_t 3209 xmsgsize(mblk_t *bp) 3210 { 3211 unsigned char type; 3212 size_t count = 0; 3213 3214 type = bp->b_datap->db_type; 3215 3216 for (; bp; bp = bp->b_cont) { 3217 if (type != bp->b_datap->db_type) 3218 break; 3219 ASSERT(bp->b_wptr >= bp->b_rptr); 3220 count += bp->b_wptr - bp->b_rptr; 3221 } 3222 return (count); 3223 } 3224 3225 /* 3226 * Allocate a stream head. 3227 */ 3228 struct stdata * 3229 shalloc(queue_t *qp) 3230 { 3231 stdata_t *stp; 3232 3233 stp = kmem_cache_alloc(stream_head_cache, KM_SLEEP); 3234 3235 stp->sd_wrq = _WR(qp); 3236 stp->sd_strtab = NULL; 3237 stp->sd_iocid = 0; 3238 stp->sd_mate = NULL; 3239 stp->sd_freezer = NULL; 3240 stp->sd_refcnt = 0; 3241 stp->sd_wakeq = 0; 3242 stp->sd_anchor = 0; 3243 stp->sd_struiowrq = NULL; 3244 stp->sd_struiordq = NULL; 3245 stp->sd_struiodnak = 0; 3246 stp->sd_struionak = NULL; 3247 #ifdef C2_AUDIT 3248 stp->sd_t_audit_data = NULL; 3249 #endif 3250 stp->sd_rput_opt = 0; 3251 stp->sd_wput_opt = 0; 3252 stp->sd_read_opt = 0; 3253 stp->sd_rprotofunc = strrput_proto; 3254 stp->sd_rmiscfunc = strrput_misc; 3255 stp->sd_rderrfunc = stp->sd_wrerrfunc = NULL; 3256 stp->sd_rputdatafunc = stp->sd_wputdatafunc = NULL; 3257 stp->sd_ciputctrl = NULL; 3258 stp->sd_nciputctrl = 0; 3259 stp->sd_qhead = NULL; 3260 stp->sd_qtail = NULL; 3261 stp->sd_servid = NULL; 3262 stp->sd_nqueues = 0; 3263 stp->sd_svcflags = 0; 3264 stp->sd_copyflag = 0; 3265 3266 return (stp); 3267 } 3268 3269 /* 3270 * Free a stream head. 3271 */ 3272 void 3273 shfree(stdata_t *stp) 3274 { 3275 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock)); 3276 3277 stp->sd_wrq = NULL; 3278 3279 mutex_enter(&stp->sd_qlock); 3280 while (stp->sd_svcflags & STRS_SCHEDULED) { 3281 STRSTAT(strwaits); 3282 cv_wait(&stp->sd_qcv, &stp->sd_qlock); 3283 } 3284 mutex_exit(&stp->sd_qlock); 3285 3286 if (stp->sd_ciputctrl != NULL) { 3287 ASSERT(stp->sd_nciputctrl == n_ciputctrl - 1); 3288 SUMCHECK_CIPUTCTRL_COUNTS(stp->sd_ciputctrl, 3289 stp->sd_nciputctrl, 0); 3290 ASSERT(ciputctrl_cache != NULL); 3291 kmem_cache_free(ciputctrl_cache, stp->sd_ciputctrl); 3292 stp->sd_ciputctrl = NULL; 3293 stp->sd_nciputctrl = 0; 3294 } 3295 ASSERT(stp->sd_qhead == NULL); 3296 ASSERT(stp->sd_qtail == NULL); 3297 ASSERT(stp->sd_nqueues == 0); 3298 kmem_cache_free(stream_head_cache, stp); 3299 } 3300 3301 /* 3302 * Allocate a pair of queues and a syncq for the pair 3303 */ 3304 queue_t * 3305 allocq(void) 3306 { 3307 queinfo_t *qip; 3308 queue_t *qp, *wqp; 3309 syncq_t *sq; 3310 3311 qip = kmem_cache_alloc(queue_cache, KM_SLEEP); 3312 3313 qp = &qip->qu_rqueue; 3314 wqp = &qip->qu_wqueue; 3315 sq = &qip->qu_syncq; 3316 3317 qp->q_last = NULL; 3318 qp->q_next = NULL; 3319 qp->q_ptr = NULL; 3320 qp->q_flag = QUSE | QREADR; 3321 qp->q_bandp = NULL; 3322 qp->q_stream = NULL; 3323 qp->q_syncq = sq; 3324 qp->q_nband = 0; 3325 qp->q_nfsrv = NULL; 3326 qp->q_draining = 0; 3327 qp->q_syncqmsgs = 0; 3328 qp->q_spri = 0; 3329 qp->q_qtstamp = 0; 3330 qp->q_sqtstamp = 0; 3331 qp->q_fp = NULL; 3332 3333 wqp->q_last = NULL; 3334 wqp->q_next = NULL; 3335 wqp->q_ptr = NULL; 3336 wqp->q_flag = QUSE; 3337 wqp->q_bandp = NULL; 3338 wqp->q_stream = NULL; 3339 wqp->q_syncq = sq; 3340 wqp->q_nband = 0; 3341 wqp->q_nfsrv = NULL; 3342 wqp->q_draining = 0; 3343 wqp->q_syncqmsgs = 0; 3344 wqp->q_qtstamp = 0; 3345 wqp->q_sqtstamp = 0; 3346 wqp->q_spri = 0; 3347 3348 sq->sq_count = 0; 3349 sq->sq_rmqcount = 0; 3350 sq->sq_flags = 0; 3351 sq->sq_type = 0; 3352 sq->sq_callbflags = 0; 3353 sq->sq_cancelid = 0; 3354 sq->sq_ciputctrl = NULL; 3355 sq->sq_nciputctrl = 0; 3356 sq->sq_needexcl = 0; 3357 sq->sq_svcflags = 0; 3358 3359 return (qp); 3360 } 3361 3362 /* 3363 * Free a pair of queues and the "attached" syncq. 3364 * Discard any messages left on the syncq(s), remove the syncq(s) from the 3365 * outer perimeter, and free the syncq(s) if they are not the "attached" syncq. 3366 */ 3367 void 3368 freeq(queue_t *qp) 3369 { 3370 qband_t *qbp, *nqbp; 3371 syncq_t *sq, *outer; 3372 queue_t *wqp = _WR(qp); 3373 3374 ASSERT(qp->q_flag & QREADR); 3375 3376 /* 3377 * If a previously dispatched taskq job is scheduled to run 3378 * sync_service() or a service routine is scheduled for the 3379 * queues about to be freed, wait here until all service is 3380 * done on the queue and all associated queues and syncqs. 3381 */ 3382 wait_svc(qp); 3383 3384 (void) flush_syncq(qp->q_syncq, qp); 3385 (void) flush_syncq(wqp->q_syncq, wqp); 3386 ASSERT(qp->q_syncqmsgs == 0 && wqp->q_syncqmsgs == 0); 3387 3388 /* 3389 * Flush the queues before q_next is set to NULL This is needed 3390 * in order to backenable any downstream queue before we go away. 3391 * Note: we are already removed from the stream so that the 3392 * backenabling will not cause any messages to be delivered to our 3393 * put procedures. 3394 */ 3395 flushq(qp, FLUSHALL); 3396 flushq(wqp, FLUSHALL); 3397 3398 /* Tidy up - removeq only does a half-remove from stream */ 3399 qp->q_next = wqp->q_next = NULL; 3400 ASSERT(!(qp->q_flag & QENAB)); 3401 ASSERT(!(wqp->q_flag & QENAB)); 3402 3403 outer = qp->q_syncq->sq_outer; 3404 if (outer != NULL) { 3405 outer_remove(outer, qp->q_syncq); 3406 if (wqp->q_syncq != qp->q_syncq) 3407 outer_remove(outer, wqp->q_syncq); 3408 } 3409 /* 3410 * Free any syncqs that are outside what allocq returned. 3411 */ 3412 if (qp->q_syncq != SQ(qp) && !(qp->q_flag & QPERMOD)) 3413 free_syncq(qp->q_syncq); 3414 if (qp->q_syncq != wqp->q_syncq && wqp->q_syncq != SQ(qp)) 3415 free_syncq(wqp->q_syncq); 3416 3417 ASSERT((qp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0); 3418 ASSERT((wqp->q_sqflags & (Q_SQQUEUED | Q_SQDRAINING)) == 0); 3419 ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); 3420 ASSERT(MUTEX_NOT_HELD(QLOCK(wqp))); 3421 sq = SQ(qp); 3422 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 3423 ASSERT(sq->sq_head == NULL && sq->sq_tail == NULL); 3424 ASSERT(sq->sq_outer == NULL); 3425 ASSERT(sq->sq_onext == NULL && sq->sq_oprev == NULL); 3426 ASSERT(sq->sq_callbpend == NULL); 3427 ASSERT(sq->sq_needexcl == 0); 3428 3429 if (sq->sq_ciputctrl != NULL) { 3430 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1); 3431 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl, 3432 sq->sq_nciputctrl, 0); 3433 ASSERT(ciputctrl_cache != NULL); 3434 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl); 3435 sq->sq_ciputctrl = NULL; 3436 sq->sq_nciputctrl = 0; 3437 } 3438 3439 ASSERT(qp->q_first == NULL && wqp->q_first == NULL); 3440 ASSERT(qp->q_count == 0 && wqp->q_count == 0); 3441 ASSERT(qp->q_mblkcnt == 0 && wqp->q_mblkcnt == 0); 3442 3443 qp->q_flag &= ~QUSE; 3444 wqp->q_flag &= ~QUSE; 3445 3446 /* NOTE: Uncomment the assert below once bugid 1159635 is fixed. */ 3447 /* ASSERT((qp->q_flag & QWANTW) == 0 && (wqp->q_flag & QWANTW) == 0); */ 3448 3449 qbp = qp->q_bandp; 3450 while (qbp) { 3451 nqbp = qbp->qb_next; 3452 freeband(qbp); 3453 qbp = nqbp; 3454 } 3455 qbp = wqp->q_bandp; 3456 while (qbp) { 3457 nqbp = qbp->qb_next; 3458 freeband(qbp); 3459 qbp = nqbp; 3460 } 3461 kmem_cache_free(queue_cache, qp); 3462 } 3463 3464 /* 3465 * Allocate a qband structure. 3466 */ 3467 qband_t * 3468 allocband(void) 3469 { 3470 qband_t *qbp; 3471 3472 qbp = kmem_cache_alloc(qband_cache, KM_NOSLEEP); 3473 if (qbp == NULL) 3474 return (NULL); 3475 3476 qbp->qb_next = NULL; 3477 qbp->qb_count = 0; 3478 qbp->qb_mblkcnt = 0; 3479 qbp->qb_first = NULL; 3480 qbp->qb_last = NULL; 3481 qbp->qb_flag = 0; 3482 3483 return (qbp); 3484 } 3485 3486 /* 3487 * Free a qband structure. 3488 */ 3489 void 3490 freeband(qband_t *qbp) 3491 { 3492 kmem_cache_free(qband_cache, qbp); 3493 } 3494 3495 /* 3496 * Just like putnextctl(9F), except that allocb_wait() is used. 3497 * 3498 * Consolidation Private, and of course only callable from the stream head or 3499 * routines that may block. 3500 */ 3501 int 3502 putnextctl_wait(queue_t *q, int type) 3503 { 3504 mblk_t *bp; 3505 int error; 3506 3507 if ((datamsg(type) && (type != M_DELAY)) || 3508 (bp = allocb_wait(0, BPRI_HI, 0, &error)) == NULL) 3509 return (0); 3510 3511 bp->b_datap->db_type = (unsigned char)type; 3512 putnext(q, bp); 3513 return (1); 3514 } 3515 3516 /* 3517 * run any possible bufcalls. 3518 */ 3519 void 3520 runbufcalls(void) 3521 { 3522 strbufcall_t *bcp; 3523 3524 mutex_enter(&bcall_monitor); 3525 mutex_enter(&strbcall_lock); 3526 3527 if (strbcalls.bc_head) { 3528 size_t count; 3529 int nevent; 3530 3531 /* 3532 * count how many events are on the list 3533 * now so we can check to avoid looping 3534 * in low memory situations 3535 */ 3536 nevent = 0; 3537 for (bcp = strbcalls.bc_head; bcp; bcp = bcp->bc_next) 3538 nevent++; 3539 3540 /* 3541 * get estimate of available memory from kmem_avail(). 3542 * awake all bufcall functions waiting for 3543 * memory whose request could be satisfied 3544 * by 'count' memory and let 'em fight for it. 3545 */ 3546 count = kmem_avail(); 3547 while ((bcp = strbcalls.bc_head) != NULL && nevent) { 3548 STRSTAT(bufcalls); 3549 --nevent; 3550 if (bcp->bc_size <= count) { 3551 bcp->bc_executor = curthread; 3552 mutex_exit(&strbcall_lock); 3553 (*bcp->bc_func)(bcp->bc_arg); 3554 mutex_enter(&strbcall_lock); 3555 bcp->bc_executor = NULL; 3556 cv_broadcast(&bcall_cv); 3557 strbcalls.bc_head = bcp->bc_next; 3558 kmem_free(bcp, sizeof (strbufcall_t)); 3559 } else { 3560 /* 3561 * too big, try again later - note 3562 * that nevent was decremented above 3563 * so we won't retry this one on this 3564 * iteration of the loop 3565 */ 3566 if (bcp->bc_next != NULL) { 3567 strbcalls.bc_head = bcp->bc_next; 3568 bcp->bc_next = NULL; 3569 strbcalls.bc_tail->bc_next = bcp; 3570 strbcalls.bc_tail = bcp; 3571 } 3572 } 3573 } 3574 if (strbcalls.bc_head == NULL) 3575 strbcalls.bc_tail = NULL; 3576 } 3577 3578 mutex_exit(&strbcall_lock); 3579 mutex_exit(&bcall_monitor); 3580 } 3581 3582 3583 /* 3584 * actually run queue's service routine. 3585 */ 3586 static void 3587 runservice(queue_t *q) 3588 { 3589 qband_t *qbp; 3590 3591 ASSERT(q->q_qinfo->qi_srvp); 3592 again: 3593 entersq(q->q_syncq, SQ_SVC); 3594 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_START, 3595 "runservice starts:%p", q); 3596 3597 if (!(q->q_flag & QWCLOSE)) 3598 (*q->q_qinfo->qi_srvp)(q); 3599 3600 TRACE_1(TR_FAC_STREAMS_FR, TR_QRUNSERVICE_END, 3601 "runservice ends:(%p)", q); 3602 3603 leavesq(q->q_syncq, SQ_SVC); 3604 3605 mutex_enter(QLOCK(q)); 3606 if (q->q_flag & QENAB) { 3607 q->q_flag &= ~QENAB; 3608 mutex_exit(QLOCK(q)); 3609 goto again; 3610 } 3611 q->q_flag &= ~QINSERVICE; 3612 q->q_flag &= ~QBACK; 3613 for (qbp = q->q_bandp; qbp; qbp = qbp->qb_next) 3614 qbp->qb_flag &= ~QB_BACK; 3615 /* 3616 * Wakeup thread waiting for the service procedure 3617 * to be run (strclose and qdetach). 3618 */ 3619 cv_broadcast(&q->q_wait); 3620 3621 mutex_exit(QLOCK(q)); 3622 } 3623 3624 /* 3625 * Background processing of bufcalls. 3626 */ 3627 void 3628 streams_bufcall_service(void) 3629 { 3630 callb_cpr_t cprinfo; 3631 3632 CALLB_CPR_INIT(&cprinfo, &strbcall_lock, callb_generic_cpr, 3633 "streams_bufcall_service"); 3634 3635 mutex_enter(&strbcall_lock); 3636 3637 for (;;) { 3638 if (strbcalls.bc_head != NULL && kmem_avail() > 0) { 3639 mutex_exit(&strbcall_lock); 3640 runbufcalls(); 3641 mutex_enter(&strbcall_lock); 3642 } 3643 if (strbcalls.bc_head != NULL) { 3644 clock_t wt, tick; 3645 3646 STRSTAT(bcwaits); 3647 /* Wait for memory to become available */ 3648 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3649 tick = SEC_TO_TICK(60); 3650 time_to_wait(&wt, tick); 3651 (void) cv_timedwait(&memavail_cv, &strbcall_lock, wt); 3652 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock); 3653 } 3654 3655 /* Wait for new work to arrive */ 3656 if (strbcalls.bc_head == NULL) { 3657 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3658 cv_wait(&strbcall_cv, &strbcall_lock); 3659 CALLB_CPR_SAFE_END(&cprinfo, &strbcall_lock); 3660 } 3661 } 3662 } 3663 3664 /* 3665 * Background processing of streams background tasks which failed 3666 * taskq_dispatch. 3667 */ 3668 static void 3669 streams_qbkgrnd_service(void) 3670 { 3671 callb_cpr_t cprinfo; 3672 queue_t *q; 3673 3674 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr, 3675 "streams_bkgrnd_service"); 3676 3677 mutex_enter(&service_queue); 3678 3679 for (;;) { 3680 /* 3681 * Wait for work to arrive. 3682 */ 3683 while ((freebs_list == NULL) && (qhead == NULL)) { 3684 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3685 cv_wait(&services_to_run, &service_queue); 3686 CALLB_CPR_SAFE_END(&cprinfo, &service_queue); 3687 } 3688 /* 3689 * Handle all pending freebs requests to free memory. 3690 */ 3691 while (freebs_list != NULL) { 3692 mblk_t *mp = freebs_list; 3693 freebs_list = mp->b_next; 3694 mutex_exit(&service_queue); 3695 mblk_free(mp); 3696 mutex_enter(&service_queue); 3697 } 3698 /* 3699 * Run pending queues. 3700 */ 3701 while (qhead != NULL) { 3702 DQ(q, qhead, qtail, q_link); 3703 ASSERT(q != NULL); 3704 mutex_exit(&service_queue); 3705 queue_service(q); 3706 mutex_enter(&service_queue); 3707 } 3708 ASSERT(qhead == NULL && qtail == NULL); 3709 } 3710 } 3711 3712 /* 3713 * Background processing of streams background tasks which failed 3714 * taskq_dispatch. 3715 */ 3716 static void 3717 streams_sqbkgrnd_service(void) 3718 { 3719 callb_cpr_t cprinfo; 3720 syncq_t *sq; 3721 3722 CALLB_CPR_INIT(&cprinfo, &service_queue, callb_generic_cpr, 3723 "streams_sqbkgrnd_service"); 3724 3725 mutex_enter(&service_queue); 3726 3727 for (;;) { 3728 /* 3729 * Wait for work to arrive. 3730 */ 3731 while (sqhead == NULL) { 3732 CALLB_CPR_SAFE_BEGIN(&cprinfo); 3733 cv_wait(&syncqs_to_run, &service_queue); 3734 CALLB_CPR_SAFE_END(&cprinfo, &service_queue); 3735 } 3736 3737 /* 3738 * Run pending syncqs. 3739 */ 3740 while (sqhead != NULL) { 3741 DQ(sq, sqhead, sqtail, sq_next); 3742 ASSERT(sq != NULL); 3743 ASSERT(sq->sq_svcflags & SQ_BGTHREAD); 3744 mutex_exit(&service_queue); 3745 syncq_service(sq); 3746 mutex_enter(&service_queue); 3747 } 3748 } 3749 } 3750 3751 /* 3752 * Disable the syncq and wait for background syncq processing to complete. 3753 * If the syncq is placed on the sqhead/sqtail queue, try to remove it from the 3754 * list. 3755 */ 3756 void 3757 wait_sq_svc(syncq_t *sq) 3758 { 3759 mutex_enter(SQLOCK(sq)); 3760 sq->sq_svcflags |= SQ_DISABLED; 3761 if (sq->sq_svcflags & SQ_BGTHREAD) { 3762 syncq_t *sq_chase; 3763 syncq_t *sq_curr; 3764 int removed; 3765 3766 ASSERT(sq->sq_servcount == 1); 3767 mutex_enter(&service_queue); 3768 RMQ(sq, sqhead, sqtail, sq_next, sq_chase, sq_curr, removed); 3769 mutex_exit(&service_queue); 3770 if (removed) { 3771 sq->sq_svcflags &= ~SQ_BGTHREAD; 3772 sq->sq_servcount = 0; 3773 STRSTAT(sqremoved); 3774 goto done; 3775 } 3776 } 3777 while (sq->sq_servcount != 0) { 3778 sq->sq_flags |= SQ_WANTWAKEUP; 3779 cv_wait(&sq->sq_wait, SQLOCK(sq)); 3780 } 3781 done: 3782 mutex_exit(SQLOCK(sq)); 3783 } 3784 3785 /* 3786 * Put a syncq on the list of syncq's to be serviced by the sqthread. 3787 * Add the argument to the end of the sqhead list and set the flag 3788 * indicating this syncq has been enabled. If it has already been 3789 * enabled, don't do anything. 3790 * This routine assumes that SQLOCK is held. 3791 * NOTE that the lock order is to have the SQLOCK first, 3792 * so if the service_syncq lock is held, we need to release it 3793 * before aquiring the SQLOCK (mostly relevant for the background 3794 * thread, and this seems to be common among the STREAMS global locks). 3795 * Note the the sq_svcflags are protected by the SQLOCK. 3796 */ 3797 void 3798 sqenable(syncq_t *sq) 3799 { 3800 /* 3801 * This is probably not important except for where I believe it 3802 * is being called. At that point, it should be held (and it 3803 * is a pain to release it just for this routine, so don't do 3804 * it). 3805 */ 3806 ASSERT(MUTEX_HELD(SQLOCK(sq))); 3807 3808 IMPLY(sq->sq_servcount == 0, sq->sq_next == NULL); 3809 IMPLY(sq->sq_next != NULL, sq->sq_svcflags & SQ_BGTHREAD); 3810 3811 /* 3812 * Do not put on list if background thread is scheduled or 3813 * syncq is disabled. 3814 */ 3815 if (sq->sq_svcflags & (SQ_DISABLED | SQ_BGTHREAD)) 3816 return; 3817 3818 /* 3819 * Check whether we should enable sq at all. 3820 * Non PERMOD syncqs may be drained by at most one thread. 3821 * PERMOD syncqs may be drained by several threads but we limit the 3822 * total amount to the lesser of 3823 * Number of queues on the squeue and 3824 * Number of CPUs. 3825 */ 3826 if (sq->sq_servcount != 0) { 3827 if (((sq->sq_type & SQ_PERMOD) == 0) || 3828 (sq->sq_servcount >= MIN(sq->sq_nqueues, ncpus_online))) { 3829 STRSTAT(sqtoomany); 3830 return; 3831 } 3832 } 3833 3834 sq->sq_tstamp = lbolt; 3835 STRSTAT(sqenables); 3836 3837 /* Attempt a taskq dispatch */ 3838 sq->sq_servid = (void *)taskq_dispatch(streams_taskq, 3839 (task_func_t *)syncq_service, sq, TQ_NOSLEEP | TQ_NOQUEUE); 3840 if (sq->sq_servid != NULL) { 3841 sq->sq_servcount++; 3842 return; 3843 } 3844 3845 /* 3846 * This taskq dispatch failed, but a previous one may have succeeded. 3847 * Don't try to schedule on the background thread whilst there is 3848 * outstanding taskq processing. 3849 */ 3850 if (sq->sq_servcount != 0) 3851 return; 3852 3853 /* 3854 * System is low on resources and can't perform a non-sleeping 3855 * dispatch. Schedule the syncq for a background thread and mark the 3856 * syncq to avoid any further taskq dispatch attempts. 3857 */ 3858 mutex_enter(&service_queue); 3859 STRSTAT(taskqfails); 3860 ENQUEUE(sq, sqhead, sqtail, sq_next); 3861 sq->sq_svcflags |= SQ_BGTHREAD; 3862 sq->sq_servcount = 1; 3863 cv_signal(&syncqs_to_run); 3864 mutex_exit(&service_queue); 3865 } 3866 3867 /* 3868 * Note: fifo_close() depends on the mblk_t on the queue being freed 3869 * asynchronously. The asynchronous freeing of messages breaks the 3870 * recursive call chain of fifo_close() while there are I_SENDFD type of 3871 * messages refering other file pointers on the queue. Then when 3872 * closing pipes it can avoid stack overflow in case of daisy-chained 3873 * pipes, and also avoid deadlock in case of fifonode_t pairs (which 3874 * share the same fifolock_t). 3875 */ 3876 3877 /* ARGSUSED */ 3878 void 3879 freebs_enqueue(mblk_t *mp, dblk_t *dbp) 3880 { 3881 ASSERT(dbp->db_mblk == mp); 3882 3883 /* 3884 * Check data sanity. The dblock should have non-empty free function. 3885 * It is better to panic here then later when the dblock is freed 3886 * asynchronously when the context is lost. 3887 */ 3888 if (dbp->db_frtnp->free_func == NULL) { 3889 panic("freebs_enqueue: dblock %p has a NULL free callback", 3890 (void *) dbp); 3891 } 3892 3893 STRSTAT(freebs); 3894 if (taskq_dispatch(streams_taskq, (task_func_t *)mblk_free, mp, 3895 TQ_NOSLEEP) == NULL) { 3896 /* 3897 * System is low on resources and can't perform a non-sleeping 3898 * dispatch. Schedule for a background thread. 3899 */ 3900 mutex_enter(&service_queue); 3901 STRSTAT(taskqfails); 3902 mp->b_next = freebs_list; 3903 freebs_list = mp; 3904 cv_signal(&services_to_run); 3905 mutex_exit(&service_queue); 3906 } 3907 } 3908 3909 /* 3910 * Set the QBACK or QB_BACK flag in the given queue for 3911 * the given priority band. 3912 */ 3913 void 3914 setqback(queue_t *q, unsigned char pri) 3915 { 3916 int i; 3917 qband_t *qbp; 3918 qband_t **qbpp; 3919 3920 ASSERT(MUTEX_HELD(QLOCK(q))); 3921 if (pri != 0) { 3922 if (pri > q->q_nband) { 3923 qbpp = &q->q_bandp; 3924 while (*qbpp) 3925 qbpp = &(*qbpp)->qb_next; 3926 while (pri > q->q_nband) { 3927 if ((*qbpp = allocband()) == NULL) { 3928 cmn_err(CE_WARN, 3929 "setqback: can't allocate qband\n"); 3930 return; 3931 } 3932 (*qbpp)->qb_hiwat = q->q_hiwat; 3933 (*qbpp)->qb_lowat = q->q_lowat; 3934 q->q_nband++; 3935 qbpp = &(*qbpp)->qb_next; 3936 } 3937 } 3938 qbp = q->q_bandp; 3939 i = pri; 3940 while (--i) 3941 qbp = qbp->qb_next; 3942 qbp->qb_flag |= QB_BACK; 3943 } else { 3944 q->q_flag |= QBACK; 3945 } 3946 } 3947 3948 int 3949 strcopyin(void *from, void *to, size_t len, int copyflag) 3950 { 3951 if (copyflag & U_TO_K) { 3952 ASSERT((copyflag & K_TO_K) == 0); 3953 if (copyin(from, to, len)) 3954 return (EFAULT); 3955 } else { 3956 ASSERT(copyflag & K_TO_K); 3957 bcopy(from, to, len); 3958 } 3959 return (0); 3960 } 3961 3962 int 3963 strcopyout(void *from, void *to, size_t len, int copyflag) 3964 { 3965 if (copyflag & U_TO_K) { 3966 if (copyout(from, to, len)) 3967 return (EFAULT); 3968 } else { 3969 ASSERT(copyflag & K_TO_K); 3970 bcopy(from, to, len); 3971 } 3972 return (0); 3973 } 3974 3975 /* 3976 * strsignal_nolock() posts a signal to the process(es) at the stream head. 3977 * It assumes that the stream head lock is already held, whereas strsignal() 3978 * acquires the lock first. This routine was created because a few callers 3979 * release the stream head lock before calling only to re-acquire it after 3980 * it returns. 3981 */ 3982 void 3983 strsignal_nolock(stdata_t *stp, int sig, int32_t band) 3984 { 3985 ASSERT(MUTEX_HELD(&stp->sd_lock)); 3986 switch (sig) { 3987 case SIGPOLL: 3988 if (stp->sd_sigflags & S_MSG) 3989 strsendsig(stp->sd_siglist, S_MSG, (uchar_t)band, 0); 3990 break; 3991 3992 default: 3993 if (stp->sd_pgidp) { 3994 pgsignal(stp->sd_pgidp, sig); 3995 } 3996 break; 3997 } 3998 } 3999 4000 void 4001 strsignal(stdata_t *stp, int sig, int32_t band) 4002 { 4003 TRACE_3(TR_FAC_STREAMS_FR, TR_SENDSIG, 4004 "strsignal:%p, %X, %X", stp, sig, band); 4005 4006 mutex_enter(&stp->sd_lock); 4007 switch (sig) { 4008 case SIGPOLL: 4009 if (stp->sd_sigflags & S_MSG) 4010 strsendsig(stp->sd_siglist, S_MSG, (uchar_t)band, 0); 4011 break; 4012 4013 default: 4014 if (stp->sd_pgidp) { 4015 pgsignal(stp->sd_pgidp, sig); 4016 } 4017 break; 4018 } 4019 mutex_exit(&stp->sd_lock); 4020 } 4021 4022 void 4023 strhup(stdata_t *stp) 4024 { 4025 ASSERT(mutex_owned(&stp->sd_lock)); 4026 pollwakeup(&stp->sd_pollist, POLLHUP); 4027 if (stp->sd_sigflags & S_HANGUP) 4028 strsendsig(stp->sd_siglist, S_HANGUP, 0, 0); 4029 } 4030 4031 /* 4032 * Backenable the first queue upstream from `q' with a service procedure. 4033 */ 4034 void 4035 backenable(queue_t *q, uchar_t pri) 4036 { 4037 queue_t *nq; 4038 4039 /* 4040 * our presence might not prevent other modules in our own 4041 * stream from popping/pushing since the caller of getq might not 4042 * have a claim on the queue (some drivers do a getq on somebody 4043 * else's queue - they know that the queue itself is not going away 4044 * but the framework has to guarantee q_next in that stream.) 4045 */ 4046 claimstr(q); 4047 4048 /* find nearest back queue with service proc */ 4049 for (nq = backq(q); nq && !nq->q_qinfo->qi_srvp; nq = backq(nq)) { 4050 ASSERT(STRMATED(q->q_stream) || STREAM(q) == STREAM(nq)); 4051 } 4052 4053 if (nq) { 4054 kthread_t *freezer; 4055 /* 4056 * backenable can be called either with no locks held 4057 * or with the stream frozen (the latter occurs when a module 4058 * calls rmvq with the stream frozen.) If the stream is frozen 4059 * by the caller the caller will hold all qlocks in the stream. 4060 */ 4061 freezer = STREAM(q)->sd_freezer; 4062 if (freezer != curthread) { 4063 mutex_enter(QLOCK(nq)); 4064 } 4065 #ifdef DEBUG 4066 else { 4067 ASSERT(frozenstr(q)); 4068 ASSERT(MUTEX_HELD(QLOCK(q))); 4069 ASSERT(MUTEX_HELD(QLOCK(nq))); 4070 } 4071 #endif 4072 setqback(nq, pri); 4073 qenable_locked(nq); 4074 if (freezer != curthread) 4075 mutex_exit(QLOCK(nq)); 4076 } 4077 releasestr(q); 4078 } 4079 4080 /* 4081 * Return the appropriate errno when one of flags_to_check is set 4082 * in sd_flags. Uses the exported error routines if they are set. 4083 * Will return 0 if non error is set (or if the exported error routines 4084 * do not return an error). 4085 * 4086 * If there is both a read and write error to check we prefer the read error. 4087 * Also, give preference to recorded errno's over the error functions. 4088 * The flags that are handled are: 4089 * STPLEX return EINVAL 4090 * STRDERR return sd_rerror (and clear if STRDERRNONPERSIST) 4091 * STWRERR return sd_werror (and clear if STWRERRNONPERSIST) 4092 * STRHUP return sd_werror 4093 * 4094 * If the caller indicates that the operation is a peek a nonpersistent error 4095 * is not cleared. 4096 */ 4097 int 4098 strgeterr(stdata_t *stp, int32_t flags_to_check, int ispeek) 4099 { 4100 int32_t sd_flag = stp->sd_flag & flags_to_check; 4101 int error = 0; 4102 4103 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4104 ASSERT((flags_to_check & ~(STRDERR|STWRERR|STRHUP|STPLEX)) == 0); 4105 if (sd_flag & STPLEX) 4106 error = EINVAL; 4107 else if (sd_flag & STRDERR) { 4108 error = stp->sd_rerror; 4109 if ((stp->sd_flag & STRDERRNONPERSIST) && !ispeek) { 4110 /* 4111 * Read errors are non-persistent i.e. discarded once 4112 * returned to a non-peeking caller, 4113 */ 4114 stp->sd_rerror = 0; 4115 stp->sd_flag &= ~STRDERR; 4116 } 4117 if (error == 0 && stp->sd_rderrfunc != NULL) { 4118 int clearerr = 0; 4119 4120 error = (*stp->sd_rderrfunc)(stp->sd_vnode, ispeek, 4121 &clearerr); 4122 if (clearerr) { 4123 stp->sd_flag &= ~STRDERR; 4124 stp->sd_rderrfunc = NULL; 4125 } 4126 } 4127 } else if (sd_flag & STWRERR) { 4128 error = stp->sd_werror; 4129 if ((stp->sd_flag & STWRERRNONPERSIST) && !ispeek) { 4130 /* 4131 * Write errors are non-persistent i.e. discarded once 4132 * returned to a non-peeking caller, 4133 */ 4134 stp->sd_werror = 0; 4135 stp->sd_flag &= ~STWRERR; 4136 } 4137 if (error == 0 && stp->sd_wrerrfunc != NULL) { 4138 int clearerr = 0; 4139 4140 error = (*stp->sd_wrerrfunc)(stp->sd_vnode, ispeek, 4141 &clearerr); 4142 if (clearerr) { 4143 stp->sd_flag &= ~STWRERR; 4144 stp->sd_wrerrfunc = NULL; 4145 } 4146 } 4147 } else if (sd_flag & STRHUP) { 4148 /* sd_werror set when STRHUP */ 4149 error = stp->sd_werror; 4150 } 4151 return (error); 4152 } 4153 4154 4155 /* 4156 * single-thread open/close/push/pop 4157 * for twisted streams also 4158 */ 4159 int 4160 strstartplumb(stdata_t *stp, int flag, int cmd) 4161 { 4162 int waited = 1; 4163 int error = 0; 4164 4165 if (STRMATED(stp)) { 4166 struct stdata *stmatep = stp->sd_mate; 4167 4168 STRLOCKMATES(stp); 4169 while (waited) { 4170 waited = 0; 4171 while (stmatep->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4172 if ((cmd == I_POP) && 4173 (flag & (FNDELAY|FNONBLOCK))) { 4174 STRUNLOCKMATES(stp); 4175 return (EAGAIN); 4176 } 4177 waited = 1; 4178 mutex_exit(&stp->sd_lock); 4179 if (!cv_wait_sig(&stmatep->sd_monitor, 4180 &stmatep->sd_lock)) { 4181 mutex_exit(&stmatep->sd_lock); 4182 return (EINTR); 4183 } 4184 mutex_exit(&stmatep->sd_lock); 4185 STRLOCKMATES(stp); 4186 } 4187 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4188 if ((cmd == I_POP) && 4189 (flag & (FNDELAY|FNONBLOCK))) { 4190 STRUNLOCKMATES(stp); 4191 return (EAGAIN); 4192 } 4193 waited = 1; 4194 mutex_exit(&stmatep->sd_lock); 4195 if (!cv_wait_sig(&stp->sd_monitor, 4196 &stp->sd_lock)) { 4197 mutex_exit(&stp->sd_lock); 4198 return (EINTR); 4199 } 4200 mutex_exit(&stp->sd_lock); 4201 STRLOCKMATES(stp); 4202 } 4203 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) { 4204 error = strgeterr(stp, 4205 STRDERR|STWRERR|STRHUP|STPLEX, 0); 4206 if (error != 0) { 4207 STRUNLOCKMATES(stp); 4208 return (error); 4209 } 4210 } 4211 } 4212 stp->sd_flag |= STRPLUMB; 4213 STRUNLOCKMATES(stp); 4214 } else { 4215 mutex_enter(&stp->sd_lock); 4216 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 4217 if (((cmd == I_POP) || (cmd == _I_REMOVE)) && 4218 (flag & (FNDELAY|FNONBLOCK))) { 4219 mutex_exit(&stp->sd_lock); 4220 return (EAGAIN); 4221 } 4222 if (!cv_wait_sig(&stp->sd_monitor, &stp->sd_lock)) { 4223 mutex_exit(&stp->sd_lock); 4224 return (EINTR); 4225 } 4226 if (stp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) { 4227 error = strgeterr(stp, 4228 STRDERR|STWRERR|STRHUP|STPLEX, 0); 4229 if (error != 0) { 4230 mutex_exit(&stp->sd_lock); 4231 return (error); 4232 } 4233 } 4234 } 4235 stp->sd_flag |= STRPLUMB; 4236 mutex_exit(&stp->sd_lock); 4237 } 4238 return (0); 4239 } 4240 4241 /* 4242 * Complete the plumbing operation associated with stream `stp'. 4243 */ 4244 void 4245 strendplumb(stdata_t *stp) 4246 { 4247 ASSERT(MUTEX_HELD(&stp->sd_lock)); 4248 ASSERT(stp->sd_flag & STRPLUMB); 4249 stp->sd_flag &= ~STRPLUMB; 4250 cv_broadcast(&stp->sd_monitor); 4251 } 4252 4253 /* 4254 * This describes how the STREAMS framework handles synchronization 4255 * during open/push and close/pop. 4256 * The key interfaces for open and close are qprocson and qprocsoff, 4257 * respectively. While the close case in general is harder both open 4258 * have close have significant similarities. 4259 * 4260 * During close the STREAMS framework has to both ensure that there 4261 * are no stale references to the queue pair (and syncq) that 4262 * are being closed and also provide the guarantees that are documented 4263 * in qprocsoff(9F). 4264 * If there are stale references to the queue that is closing it can 4265 * result in kernel memory corruption or kernel panics. 4266 * 4267 * Note that is it up to the module/driver to ensure that it itself 4268 * does not have any stale references to the closing queues once its close 4269 * routine returns. This includes: 4270 * - Cancelling any timeout/bufcall/qtimeout/qbufcall callback routines 4271 * associated with the queues. For timeout and bufcall callbacks the 4272 * module/driver also has to ensure (or wait for) any callbacks that 4273 * are in progress. 4274 * - If the module/driver is using esballoc it has to ensure that any 4275 * esballoc free functions do not refer to a queue that has closed. 4276 * (Note that in general the close routine can not wait for the esballoc'ed 4277 * messages to be freed since that can cause a deadlock.) 4278 * - Cancelling any interrupts that refer to the closing queues and 4279 * also ensuring that there are no interrupts in progress that will 4280 * refer to the closing queues once the close routine returns. 4281 * - For multiplexors removing any driver global state that refers to 4282 * the closing queue and also ensuring that there are no threads in 4283 * the multiplexor that has picked up a queue pointer but not yet 4284 * finished using it. 4285 * 4286 * In addition, a driver/module can only reference the q_next pointer 4287 * in its open, close, put, or service procedures or in a 4288 * qtimeout/qbufcall callback procedure executing "on" the correct 4289 * stream. Thus it can not reference the q_next pointer in an interrupt 4290 * routine or a timeout, bufcall or esballoc callback routine. Likewise 4291 * it can not reference q_next of a different queue e.g. in a mux that 4292 * passes messages from one queues put/service procedure to another queue. 4293 * In all the cases when the driver/module can not access the q_next 4294 * field it must use the *next* versions e.g. canputnext instead of 4295 * canput(q->q_next) and putnextctl instead of putctl(q->q_next, ...). 4296 * 4297 * 4298 * Assuming that the driver/module conforms to the above constraints 4299 * the STREAMS framework has to avoid stale references to q_next for all 4300 * the framework internal cases which include (but are not limited to): 4301 * - Threads in canput/canputnext/backenable and elsewhere that are 4302 * walking q_next. 4303 * - Messages on a syncq that have a reference to the queue through b_queue. 4304 * - Messages on an outer perimeter (syncq) that have a reference to the 4305 * queue through b_queue. 4306 * - Threads that use q_nfsrv (e.g. canput) to find a queue. 4307 * Note that only canput and bcanput use q_nfsrv without any locking. 4308 * 4309 * The STREAMS framework providing the qprocsoff(9F) guarantees means that 4310 * after qprocsoff returns, the framework has to ensure that no threads can 4311 * enter the put or service routines for the closing read or write-side queue. 4312 * In addition to preventing "direct" entry into the put procedures 4313 * the framework also has to prevent messages being drained from 4314 * the syncq or the outer perimeter. 4315 * XXX Note that currently qdetach does relies on D_MTOCEXCL as the only 4316 * mechanism to prevent qwriter(PERIM_OUTER) from running after 4317 * qprocsoff has returned. 4318 * Note that if a module/driver uses put(9F) on one of its own queues 4319 * it is up to the module/driver to ensure that the put() doesn't 4320 * get called when the queue is closing. 4321 * 4322 * 4323 * The framework aspects of the above "contract" is implemented by 4324 * qprocsoff, removeq, and strlock: 4325 * - qprocsoff (disable_svc) sets QWCLOSE to prevent runservice from 4326 * entering the service procedures. 4327 * - strlock acquires the sd_lock and sd_reflock to prevent putnext, 4328 * canputnext, backenable etc from dereferencing the q_next that will 4329 * soon change. 4330 * - strlock waits for sd_refcnt to be zero to wait for e.g. any canputnext 4331 * or other q_next walker that uses claimstr/releasestr to finish. 4332 * - optionally for every syncq in the stream strlock acquires all the 4333 * sq_lock's and waits for all sq_counts to drop to a value that indicates 4334 * that no thread executes in the put or service procedures and that no 4335 * thread is draining into the module/driver. This ensures that no 4336 * open, close, put, service, or qtimeout/qbufcall callback procedure is 4337 * currently executing hence no such thread can end up with the old stale 4338 * q_next value and no canput/backenable can have the old stale 4339 * q_nfsrv/q_next. 4340 * - qdetach (wait_svc) makes sure that any scheduled or running threads 4341 * have either finished or observed the QWCLOSE flag and gone away. 4342 */ 4343 4344 4345 /* 4346 * Get all the locks necessary to change q_next. 4347 * 4348 * Wait for sd_refcnt to reach 0 and, if sqlist is present, wait for the 4349 * sq_count of each syncq in the list to drop to sq_rmqcount, indicating that 4350 * the only threads inside the sqncq are threads currently calling removeq(). 4351 * Since threads calling removeq() are in the process of removing their queues 4352 * from the stream, we do not need to worry about them accessing a stale q_next 4353 * pointer and thus we do not need to wait for them to exit (in fact, waiting 4354 * for them can cause deadlock). 4355 * 4356 * This routine is subject to starvation since it does not set any flag to 4357 * prevent threads from entering a module in the stream(i.e. sq_count can 4358 * increase on some syncq while it is waiting on some other syncq.) 4359 * 4360 * Assumes that only one thread attempts to call strlock for a given 4361 * stream. If this is not the case the two threads would deadlock. 4362 * This assumption is guaranteed since strlock is only called by insertq 4363 * and removeq and streams plumbing changes are single-threaded for 4364 * a given stream using the STWOPEN, STRCLOSE, and STRPLUMB flags. 4365 * 4366 * For pipes, it is not difficult to atomically designate a pair of streams 4367 * to be mated. Once mated atomically by the framework the twisted pair remain 4368 * configured that way until dismantled atomically by the framework. 4369 * When plumbing takes place on a twisted stream it is necessary to ensure that 4370 * this operation is done exclusively on the twisted stream since two such 4371 * operations, each initiated on different ends of the pipe will deadlock 4372 * waiting for each other to complete. 4373 * 4374 * On entry, no locks should be held. 4375 * The locks acquired and held by strlock depends on a few factors. 4376 * - If sqlist is non-NULL all the syncq locks in the sqlist will be acquired 4377 * and held on exit and all sq_count are at an acceptable level. 4378 * - In all cases, sd_lock and sd_reflock are acquired and held on exit with 4379 * sd_refcnt being zero. 4380 */ 4381 4382 static void 4383 strlock(struct stdata *stp, sqlist_t *sqlist) 4384 { 4385 syncql_t *sql, *sql2; 4386 retry: 4387 /* 4388 * Wait for any claimstr to go away. 4389 */ 4390 if (STRMATED(stp)) { 4391 struct stdata *stp1, *stp2; 4392 4393 STRLOCKMATES(stp); 4394 /* 4395 * Note that the selection of locking order is not 4396 * important, just that they are always aquired in 4397 * the same order. To assure this, we choose this 4398 * order based on the value of the pointer, and since 4399 * the pointer will not change for the life of this 4400 * pair, we will always grab the locks in the same 4401 * order (and hence, prevent deadlocks). 4402 */ 4403 if (&(stp->sd_lock) > &((stp->sd_mate)->sd_lock)) { 4404 stp1 = stp; 4405 stp2 = stp->sd_mate; 4406 } else { 4407 stp2 = stp; 4408 stp1 = stp->sd_mate; 4409 } 4410 mutex_enter(&stp1->sd_reflock); 4411 if (stp1->sd_refcnt > 0) { 4412 STRUNLOCKMATES(stp); 4413 cv_wait(&stp1->sd_refmonitor, &stp1->sd_reflock); 4414 mutex_exit(&stp1->sd_reflock); 4415 goto retry; 4416 } 4417 mutex_enter(&stp2->sd_reflock); 4418 if (stp2->sd_refcnt > 0) { 4419 STRUNLOCKMATES(stp); 4420 mutex_exit(&stp1->sd_reflock); 4421 cv_wait(&stp2->sd_refmonitor, &stp2->sd_reflock); 4422 mutex_exit(&stp2->sd_reflock); 4423 goto retry; 4424 } 4425 STREAM_PUTLOCKS_ENTER(stp1); 4426 STREAM_PUTLOCKS_ENTER(stp2); 4427 } else { 4428 mutex_enter(&stp->sd_lock); 4429 mutex_enter(&stp->sd_reflock); 4430 while (stp->sd_refcnt > 0) { 4431 mutex_exit(&stp->sd_lock); 4432 cv_wait(&stp->sd_refmonitor, &stp->sd_reflock); 4433 if (mutex_tryenter(&stp->sd_lock) == 0) { 4434 mutex_exit(&stp->sd_reflock); 4435 mutex_enter(&stp->sd_lock); 4436 mutex_enter(&stp->sd_reflock); 4437 } 4438 } 4439 STREAM_PUTLOCKS_ENTER(stp); 4440 } 4441 4442 if (sqlist == NULL) 4443 return; 4444 4445 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) { 4446 syncq_t *sq = sql->sql_sq; 4447 uint16_t count; 4448 4449 mutex_enter(SQLOCK(sq)); 4450 count = sq->sq_count; 4451 ASSERT(sq->sq_rmqcount <= count); 4452 SQ_PUTLOCKS_ENTER(sq); 4453 SUM_SQ_PUTCOUNTS(sq, count); 4454 if (count == sq->sq_rmqcount) 4455 continue; 4456 4457 /* Failed - drop all locks that we have acquired so far */ 4458 if (STRMATED(stp)) { 4459 STREAM_PUTLOCKS_EXIT(stp); 4460 STREAM_PUTLOCKS_EXIT(stp->sd_mate); 4461 STRUNLOCKMATES(stp); 4462 mutex_exit(&stp->sd_reflock); 4463 mutex_exit(&stp->sd_mate->sd_reflock); 4464 } else { 4465 STREAM_PUTLOCKS_EXIT(stp); 4466 mutex_exit(&stp->sd_lock); 4467 mutex_exit(&stp->sd_reflock); 4468 } 4469 for (sql2 = sqlist->sqlist_head; sql2 != sql; 4470 sql2 = sql2->sql_next) { 4471 SQ_PUTLOCKS_EXIT(sql2->sql_sq); 4472 mutex_exit(SQLOCK(sql2->sql_sq)); 4473 } 4474 4475 /* 4476 * The wait loop below may starve when there are many threads 4477 * claiming the syncq. This is especially a problem with permod 4478 * syncqs (IP). To lessen the impact of the problem we increment 4479 * sq_needexcl and clear fastbits so that putnexts will slow 4480 * down and call sqenable instead of draining right away. 4481 */ 4482 sq->sq_needexcl++; 4483 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 4484 while (count > sq->sq_rmqcount) { 4485 sq->sq_flags |= SQ_WANTWAKEUP; 4486 SQ_PUTLOCKS_EXIT(sq); 4487 cv_wait(&sq->sq_wait, SQLOCK(sq)); 4488 count = sq->sq_count; 4489 SQ_PUTLOCKS_ENTER(sq); 4490 SUM_SQ_PUTCOUNTS(sq, count); 4491 } 4492 sq->sq_needexcl--; 4493 if (sq->sq_needexcl == 0) 4494 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 4495 SQ_PUTLOCKS_EXIT(sq); 4496 ASSERT(count == sq->sq_rmqcount); 4497 mutex_exit(SQLOCK(sq)); 4498 goto retry; 4499 } 4500 } 4501 4502 /* 4503 * Drop all the locks that strlock acquired. 4504 */ 4505 static void 4506 strunlock(struct stdata *stp, sqlist_t *sqlist) 4507 { 4508 syncql_t *sql; 4509 4510 if (STRMATED(stp)) { 4511 STREAM_PUTLOCKS_EXIT(stp); 4512 STREAM_PUTLOCKS_EXIT(stp->sd_mate); 4513 STRUNLOCKMATES(stp); 4514 mutex_exit(&stp->sd_reflock); 4515 mutex_exit(&stp->sd_mate->sd_reflock); 4516 } else { 4517 STREAM_PUTLOCKS_EXIT(stp); 4518 mutex_exit(&stp->sd_lock); 4519 mutex_exit(&stp->sd_reflock); 4520 } 4521 4522 if (sqlist == NULL) 4523 return; 4524 4525 for (sql = sqlist->sqlist_head; sql; sql = sql->sql_next) { 4526 SQ_PUTLOCKS_EXIT(sql->sql_sq); 4527 mutex_exit(SQLOCK(sql->sql_sq)); 4528 } 4529 } 4530 4531 /* 4532 * When the module has service procedure, we need check if the next 4533 * module which has service procedure is in flow control to trigger 4534 * the backenable. 4535 */ 4536 static void 4537 backenable_insertedq(queue_t *q) 4538 { 4539 qband_t *qbp; 4540 4541 claimstr(q); 4542 if (q->q_qinfo->qi_srvp != NULL && q->q_next != NULL) { 4543 if (q->q_next->q_nfsrv->q_flag & QWANTW) 4544 backenable(q, 0); 4545 4546 qbp = q->q_next->q_nfsrv->q_bandp; 4547 for (; qbp != NULL; qbp = qbp->qb_next) 4548 if ((qbp->qb_flag & QB_WANTW) && qbp->qb_first != NULL) 4549 backenable(q, qbp->qb_first->b_band); 4550 } 4551 releasestr(q); 4552 } 4553 4554 /* 4555 * Given two read queues, insert a new single one after another. 4556 * 4557 * This routine acquires all the necessary locks in order to change 4558 * q_next and related pointer using strlock(). 4559 * It depends on the stream head ensuring that there are no concurrent 4560 * insertq or removeq on the same stream. The stream head ensures this 4561 * using the flags STWOPEN, STRCLOSE, and STRPLUMB. 4562 * 4563 * Note that no syncq locks are held during the q_next change. This is 4564 * applied to all streams since, unlike removeq, there is no problem of stale 4565 * pointers when adding a module to the stream. Thus drivers/modules that do a 4566 * canput(rq->q_next) would never get a closed/freed queue pointer even if we 4567 * applied this optimization to all streams. 4568 */ 4569 void 4570 insertq(struct stdata *stp, queue_t *new) 4571 { 4572 queue_t *after; 4573 queue_t *wafter; 4574 queue_t *wnew = _WR(new); 4575 boolean_t have_fifo = B_FALSE; 4576 4577 if (new->q_flag & _QINSERTING) { 4578 ASSERT(stp->sd_vnode->v_type != VFIFO); 4579 after = new->q_next; 4580 wafter = _WR(new->q_next); 4581 } else { 4582 after = _RD(stp->sd_wrq); 4583 wafter = stp->sd_wrq; 4584 } 4585 4586 TRACE_2(TR_FAC_STREAMS_FR, TR_INSERTQ, 4587 "insertq:%p, %p", after, new); 4588 ASSERT(after->q_flag & QREADR); 4589 ASSERT(new->q_flag & QREADR); 4590 4591 strlock(stp, NULL); 4592 4593 /* Do we have a FIFO? */ 4594 if (wafter->q_next == after) { 4595 have_fifo = B_TRUE; 4596 wnew->q_next = new; 4597 } else { 4598 wnew->q_next = wafter->q_next; 4599 } 4600 new->q_next = after; 4601 4602 set_nfsrv_ptr(new, wnew, after, wafter); 4603 /* 4604 * set_nfsrv_ptr() needs to know if this is an insertion or not, 4605 * so only reset this flag after calling it. 4606 */ 4607 new->q_flag &= ~_QINSERTING; 4608 4609 if (have_fifo) { 4610 wafter->q_next = wnew; 4611 } else { 4612 if (wafter->q_next) 4613 _OTHERQ(wafter->q_next)->q_next = new; 4614 wafter->q_next = wnew; 4615 } 4616 4617 set_qend(new); 4618 /* The QEND flag might have to be updated for the upstream guy */ 4619 set_qend(after); 4620 4621 ASSERT(_SAMESTR(new) == O_SAMESTR(new)); 4622 ASSERT(_SAMESTR(wnew) == O_SAMESTR(wnew)); 4623 ASSERT(_SAMESTR(after) == O_SAMESTR(after)); 4624 ASSERT(_SAMESTR(wafter) == O_SAMESTR(wafter)); 4625 strsetuio(stp); 4626 4627 /* 4628 * If this was a module insertion, bump the push count. 4629 */ 4630 if (!(new->q_flag & QISDRV)) 4631 stp->sd_pushcnt++; 4632 4633 strunlock(stp, NULL); 4634 4635 /* check if the write Q needs backenable */ 4636 backenable_insertedq(wnew); 4637 4638 /* check if the read Q needs backenable */ 4639 backenable_insertedq(new); 4640 } 4641 4642 /* 4643 * Given a read queue, unlink it from any neighbors. 4644 * 4645 * This routine acquires all the necessary locks in order to 4646 * change q_next and related pointers and also guard against 4647 * stale references (e.g. through q_next) to the queue that 4648 * is being removed. It also plays part of the role in ensuring 4649 * that the module's/driver's put procedure doesn't get called 4650 * after qprocsoff returns. 4651 * 4652 * Removeq depends on the stream head ensuring that there are 4653 * no concurrent insertq or removeq on the same stream. The 4654 * stream head ensures this using the flags STWOPEN, STRCLOSE and 4655 * STRPLUMB. 4656 * 4657 * The set of locks needed to remove the queue is different in 4658 * different cases: 4659 * 4660 * Acquire sd_lock, sd_reflock, and all the syncq locks in the stream after 4661 * waiting for the syncq reference count to drop to 0 indicating that no 4662 * non-close threads are present anywhere in the stream. This ensures that any 4663 * module/driver can reference q_next in its open, close, put, or service 4664 * procedures. 4665 * 4666 * The sq_rmqcount counter tracks the number of threads inside removeq(). 4667 * strlock() ensures that there is either no threads executing inside perimeter 4668 * or there is only a thread calling qprocsoff(). 4669 * 4670 * strlock() compares the value of sq_count with the number of threads inside 4671 * removeq() and waits until sq_count is equal to sq_rmqcount. We need to wakeup 4672 * any threads waiting in strlock() when the sq_rmqcount increases. 4673 */ 4674 4675 void 4676 removeq(queue_t *qp) 4677 { 4678 queue_t *wqp = _WR(qp); 4679 struct stdata *stp = STREAM(qp); 4680 sqlist_t *sqlist = NULL; 4681 boolean_t isdriver; 4682 int moved; 4683 syncq_t *sq = qp->q_syncq; 4684 syncq_t *wsq = wqp->q_syncq; 4685 4686 ASSERT(stp); 4687 4688 TRACE_2(TR_FAC_STREAMS_FR, TR_REMOVEQ, 4689 "removeq:%p %p", qp, wqp); 4690 ASSERT(qp->q_flag&QREADR); 4691 4692 /* 4693 * For queues using Synchronous streams, we must wait for all threads in 4694 * rwnext() to drain out before proceeding. 4695 */ 4696 if (qp->q_flag & QSYNCSTR) { 4697 /* First, we need wakeup any threads blocked in rwnext() */ 4698 mutex_enter(SQLOCK(sq)); 4699 if (sq->sq_flags & SQ_WANTWAKEUP) { 4700 sq->sq_flags &= ~SQ_WANTWAKEUP; 4701 cv_broadcast(&sq->sq_wait); 4702 } 4703 mutex_exit(SQLOCK(sq)); 4704 4705 if (wsq != sq) { 4706 mutex_enter(SQLOCK(wsq)); 4707 if (wsq->sq_flags & SQ_WANTWAKEUP) { 4708 wsq->sq_flags &= ~SQ_WANTWAKEUP; 4709 cv_broadcast(&wsq->sq_wait); 4710 } 4711 mutex_exit(SQLOCK(wsq)); 4712 } 4713 4714 mutex_enter(QLOCK(qp)); 4715 while (qp->q_rwcnt > 0) { 4716 qp->q_flag |= QWANTRMQSYNC; 4717 cv_wait(&qp->q_wait, QLOCK(qp)); 4718 } 4719 mutex_exit(QLOCK(qp)); 4720 4721 mutex_enter(QLOCK(wqp)); 4722 while (wqp->q_rwcnt > 0) { 4723 wqp->q_flag |= QWANTRMQSYNC; 4724 cv_wait(&wqp->q_wait, QLOCK(wqp)); 4725 } 4726 mutex_exit(QLOCK(wqp)); 4727 } 4728 4729 mutex_enter(SQLOCK(sq)); 4730 sq->sq_rmqcount++; 4731 if (sq->sq_flags & SQ_WANTWAKEUP) { 4732 sq->sq_flags &= ~SQ_WANTWAKEUP; 4733 cv_broadcast(&sq->sq_wait); 4734 } 4735 mutex_exit(SQLOCK(sq)); 4736 4737 isdriver = (qp->q_flag & QISDRV); 4738 4739 sqlist = sqlist_build(qp, stp, STRMATED(stp)); 4740 strlock(stp, sqlist); 4741 4742 reset_nfsrv_ptr(qp, wqp); 4743 4744 ASSERT(wqp->q_next == NULL || backq(qp)->q_next == qp); 4745 ASSERT(qp->q_next == NULL || backq(wqp)->q_next == wqp); 4746 /* Do we have a FIFO? */ 4747 if (wqp->q_next == qp) { 4748 stp->sd_wrq->q_next = _RD(stp->sd_wrq); 4749 } else { 4750 if (wqp->q_next) 4751 backq(qp)->q_next = qp->q_next; 4752 if (qp->q_next) 4753 backq(wqp)->q_next = wqp->q_next; 4754 } 4755 4756 /* The QEND flag might have to be updated for the upstream guy */ 4757 if (qp->q_next) 4758 set_qend(qp->q_next); 4759 4760 ASSERT(_SAMESTR(stp->sd_wrq) == O_SAMESTR(stp->sd_wrq)); 4761 ASSERT(_SAMESTR(_RD(stp->sd_wrq)) == O_SAMESTR(_RD(stp->sd_wrq))); 4762 4763 /* 4764 * Move any messages destined for the put procedures to the next 4765 * syncq in line. Otherwise free them. 4766 */ 4767 moved = 0; 4768 /* 4769 * Quick check to see whether there are any messages or events. 4770 */ 4771 if (qp->q_syncqmsgs != 0 || (qp->q_syncq->sq_flags & SQ_EVENTS)) 4772 moved += propagate_syncq(qp); 4773 if (wqp->q_syncqmsgs != 0 || 4774 (wqp->q_syncq->sq_flags & SQ_EVENTS)) 4775 moved += propagate_syncq(wqp); 4776 4777 strsetuio(stp); 4778 4779 /* 4780 * If this was a module removal, decrement the push count. 4781 */ 4782 if (!isdriver) 4783 stp->sd_pushcnt--; 4784 4785 strunlock(stp, sqlist); 4786 sqlist_free(sqlist); 4787 4788 /* 4789 * Make sure any messages that were propagated are drained. 4790 * Also clear any QFULL bit caused by messages that were propagated. 4791 */ 4792 4793 if (qp->q_next != NULL) { 4794 clr_qfull(qp); 4795 /* 4796 * For the driver calling qprocsoff, propagate_syncq 4797 * frees all the messages instead of putting it in 4798 * the stream head 4799 */ 4800 if (!isdriver && (moved > 0)) 4801 emptysq(qp->q_next->q_syncq); 4802 } 4803 if (wqp->q_next != NULL) { 4804 clr_qfull(wqp); 4805 /* 4806 * We come here for any pop of a module except for the 4807 * case of driver being removed. We don't call emptysq 4808 * if we did not move any messages. This will avoid holding 4809 * PERMOD syncq locks in emptysq 4810 */ 4811 if (moved > 0) 4812 emptysq(wqp->q_next->q_syncq); 4813 } 4814 4815 mutex_enter(SQLOCK(sq)); 4816 sq->sq_rmqcount--; 4817 mutex_exit(SQLOCK(sq)); 4818 } 4819 4820 /* 4821 * Prevent further entry by setting a flag (like SQ_FROZEN, SQ_BLOCKED or 4822 * SQ_WRITER) on a syncq. 4823 * If maxcnt is not -1 it assumes that caller has "maxcnt" claim(s) on the 4824 * sync queue and waits until sq_count reaches maxcnt. 4825 * 4826 * if maxcnt is -1 there's no need to grab sq_putlocks since the caller 4827 * does not care about putnext threads that are in the middle of calling put 4828 * entry points. 4829 * 4830 * This routine is used for both inner and outer syncqs. 4831 */ 4832 static void 4833 blocksq(syncq_t *sq, ushort_t flag, int maxcnt) 4834 { 4835 uint16_t count = 0; 4836 4837 mutex_enter(SQLOCK(sq)); 4838 /* 4839 * Wait for SQ_FROZEN/SQ_BLOCKED to be reset. 4840 * SQ_FROZEN will be set if there is a frozen stream that has a 4841 * queue which also refers to this "shared" syncq. 4842 * SQ_BLOCKED will be set if there is "off" queue which also 4843 * refers to this "shared" syncq. 4844 */ 4845 if (maxcnt != -1) { 4846 count = sq->sq_count; 4847 SQ_PUTLOCKS_ENTER(sq); 4848 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 4849 SUM_SQ_PUTCOUNTS(sq, count); 4850 } 4851 sq->sq_needexcl++; 4852 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 4853 4854 while ((sq->sq_flags & flag) || 4855 (maxcnt != -1 && count > (unsigned)maxcnt)) { 4856 sq->sq_flags |= SQ_WANTWAKEUP; 4857 if (maxcnt != -1) { 4858 SQ_PUTLOCKS_EXIT(sq); 4859 } 4860 cv_wait(&sq->sq_wait, SQLOCK(sq)); 4861 if (maxcnt != -1) { 4862 count = sq->sq_count; 4863 SQ_PUTLOCKS_ENTER(sq); 4864 SUM_SQ_PUTCOUNTS(sq, count); 4865 } 4866 } 4867 sq->sq_needexcl--; 4868 sq->sq_flags |= flag; 4869 ASSERT(maxcnt == -1 || count == maxcnt); 4870 if (maxcnt != -1) { 4871 if (sq->sq_needexcl == 0) { 4872 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 4873 } 4874 SQ_PUTLOCKS_EXIT(sq); 4875 } else if (sq->sq_needexcl == 0) { 4876 SQ_PUTCOUNT_SETFAST(sq); 4877 } 4878 4879 mutex_exit(SQLOCK(sq)); 4880 } 4881 4882 /* 4883 * Reset a flag that was set with blocksq. 4884 * 4885 * Can not use this routine to reset SQ_WRITER. 4886 * 4887 * If "isouter" is set then the syncq is assumed to be an outer perimeter 4888 * and drain_syncq is not called. Instead we rely on the qwriter_outer thread 4889 * to handle the queued qwriter operations. 4890 * 4891 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 4892 * sq_putlocks are used. 4893 */ 4894 static void 4895 unblocksq(syncq_t *sq, uint16_t resetflag, int isouter) 4896 { 4897 uint16_t flags; 4898 4899 mutex_enter(SQLOCK(sq)); 4900 ASSERT(resetflag != SQ_WRITER); 4901 ASSERT(sq->sq_flags & resetflag); 4902 flags = sq->sq_flags & ~resetflag; 4903 sq->sq_flags = flags; 4904 if (flags & (SQ_QUEUED | SQ_WANTWAKEUP)) { 4905 if (flags & SQ_WANTWAKEUP) { 4906 flags &= ~SQ_WANTWAKEUP; 4907 cv_broadcast(&sq->sq_wait); 4908 } 4909 sq->sq_flags = flags; 4910 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 4911 if (!isouter) { 4912 /* drain_syncq drops SQLOCK */ 4913 drain_syncq(sq); 4914 return; 4915 } 4916 } 4917 } 4918 mutex_exit(SQLOCK(sq)); 4919 } 4920 4921 /* 4922 * Reset a flag that was set with blocksq. 4923 * Does not drain the syncq. Use emptysq() for that. 4924 * Returns 1 if SQ_QUEUED is set. Otherwise 0. 4925 * 4926 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 4927 * sq_putlocks are used. 4928 */ 4929 static int 4930 dropsq(syncq_t *sq, uint16_t resetflag) 4931 { 4932 uint16_t flags; 4933 4934 mutex_enter(SQLOCK(sq)); 4935 ASSERT(sq->sq_flags & resetflag); 4936 flags = sq->sq_flags & ~resetflag; 4937 if (flags & SQ_WANTWAKEUP) { 4938 flags &= ~SQ_WANTWAKEUP; 4939 cv_broadcast(&sq->sq_wait); 4940 } 4941 sq->sq_flags = flags; 4942 mutex_exit(SQLOCK(sq)); 4943 if (flags & SQ_QUEUED) 4944 return (1); 4945 return (0); 4946 } 4947 4948 /* 4949 * Empty all the messages on a syncq. 4950 * 4951 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 4952 * sq_putlocks are used. 4953 */ 4954 static void 4955 emptysq(syncq_t *sq) 4956 { 4957 uint16_t flags; 4958 4959 mutex_enter(SQLOCK(sq)); 4960 flags = sq->sq_flags; 4961 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 4962 /* 4963 * To prevent potential recursive invocation of drain_syncq we 4964 * do not call drain_syncq if count is non-zero. 4965 */ 4966 if (sq->sq_count == 0) { 4967 /* drain_syncq() drops SQLOCK */ 4968 drain_syncq(sq); 4969 return; 4970 } else 4971 sqenable(sq); 4972 } 4973 mutex_exit(SQLOCK(sq)); 4974 } 4975 4976 /* 4977 * Ordered insert while removing duplicates. 4978 */ 4979 static void 4980 sqlist_insert(sqlist_t *sqlist, syncq_t *sqp) 4981 { 4982 syncql_t *sqlp, **prev_sqlpp, *new_sqlp; 4983 4984 prev_sqlpp = &sqlist->sqlist_head; 4985 while ((sqlp = *prev_sqlpp) != NULL) { 4986 if (sqlp->sql_sq >= sqp) { 4987 if (sqlp->sql_sq == sqp) /* duplicate */ 4988 return; 4989 break; 4990 } 4991 prev_sqlpp = &sqlp->sql_next; 4992 } 4993 new_sqlp = &sqlist->sqlist_array[sqlist->sqlist_index++]; 4994 ASSERT((char *)new_sqlp < (char *)sqlist + sqlist->sqlist_size); 4995 new_sqlp->sql_next = sqlp; 4996 new_sqlp->sql_sq = sqp; 4997 *prev_sqlpp = new_sqlp; 4998 } 4999 5000 /* 5001 * Walk the write side queues until we hit either the driver 5002 * or a twist in the stream (_SAMESTR will return false in both 5003 * these cases) then turn around and walk the read side queues 5004 * back up to the stream head. 5005 */ 5006 static void 5007 sqlist_insertall(sqlist_t *sqlist, queue_t *q) 5008 { 5009 while (q != NULL) { 5010 sqlist_insert(sqlist, q->q_syncq); 5011 5012 if (_SAMESTR(q)) 5013 q = q->q_next; 5014 else if (!(q->q_flag & QREADR)) 5015 q = _RD(q); 5016 else 5017 q = NULL; 5018 } 5019 } 5020 5021 /* 5022 * Allocate and build a list of all syncqs in a stream and the syncq(s) 5023 * associated with the "q" parameter. The resulting list is sorted in a 5024 * canonical order and is free of duplicates. 5025 * Assumes the passed queue is a _RD(q). 5026 */ 5027 static sqlist_t * 5028 sqlist_build(queue_t *q, struct stdata *stp, boolean_t do_twist) 5029 { 5030 sqlist_t *sqlist = sqlist_alloc(stp, KM_SLEEP); 5031 5032 /* 5033 * start with the current queue/qpair 5034 */ 5035 ASSERT(q->q_flag & QREADR); 5036 5037 sqlist_insert(sqlist, q->q_syncq); 5038 sqlist_insert(sqlist, _WR(q)->q_syncq); 5039 5040 sqlist_insertall(sqlist, stp->sd_wrq); 5041 if (do_twist) 5042 sqlist_insertall(sqlist, stp->sd_mate->sd_wrq); 5043 5044 return (sqlist); 5045 } 5046 5047 static sqlist_t * 5048 sqlist_alloc(struct stdata *stp, int kmflag) 5049 { 5050 size_t sqlist_size; 5051 sqlist_t *sqlist; 5052 5053 /* 5054 * Allocate 2 syncql_t's for each pushed module. Note that 5055 * the sqlist_t structure already has 4 syncql_t's built in: 5056 * 2 for the stream head, and 2 for the driver/other stream head. 5057 */ 5058 sqlist_size = 2 * sizeof (syncql_t) * stp->sd_pushcnt + 5059 sizeof (sqlist_t); 5060 if (STRMATED(stp)) 5061 sqlist_size += 2 * sizeof (syncql_t) * stp->sd_mate->sd_pushcnt; 5062 sqlist = kmem_alloc(sqlist_size, kmflag); 5063 5064 sqlist->sqlist_head = NULL; 5065 sqlist->sqlist_size = sqlist_size; 5066 sqlist->sqlist_index = 0; 5067 5068 return (sqlist); 5069 } 5070 5071 /* 5072 * Free the list created by sqlist_alloc() 5073 */ 5074 static void 5075 sqlist_free(sqlist_t *sqlist) 5076 { 5077 kmem_free(sqlist, sqlist->sqlist_size); 5078 } 5079 5080 /* 5081 * Prevent any new entries into any syncq in this stream. 5082 * Used by freezestr. 5083 */ 5084 void 5085 strblock(queue_t *q) 5086 { 5087 struct stdata *stp; 5088 syncql_t *sql; 5089 sqlist_t *sqlist; 5090 5091 q = _RD(q); 5092 5093 stp = STREAM(q); 5094 ASSERT(stp != NULL); 5095 5096 /* 5097 * Get a sorted list with all the duplicates removed containing 5098 * all the syncqs referenced by this stream. 5099 */ 5100 sqlist = sqlist_build(q, stp, B_FALSE); 5101 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next) 5102 blocksq(sql->sql_sq, SQ_FROZEN, -1); 5103 sqlist_free(sqlist); 5104 } 5105 5106 /* 5107 * Release the block on new entries into this stream 5108 */ 5109 void 5110 strunblock(queue_t *q) 5111 { 5112 struct stdata *stp; 5113 syncql_t *sql; 5114 sqlist_t *sqlist; 5115 int drain_needed; 5116 5117 q = _RD(q); 5118 5119 /* 5120 * Get a sorted list with all the duplicates removed containing 5121 * all the syncqs referenced by this stream. 5122 * Have to drop the SQ_FROZEN flag on all the syncqs before 5123 * starting to drain them; otherwise the draining might 5124 * cause a freezestr in some module on the stream (which 5125 * would deadlock.) 5126 */ 5127 stp = STREAM(q); 5128 ASSERT(stp != NULL); 5129 sqlist = sqlist_build(q, stp, B_FALSE); 5130 drain_needed = 0; 5131 for (sql = sqlist->sqlist_head; sql != NULL; sql = sql->sql_next) 5132 drain_needed += dropsq(sql->sql_sq, SQ_FROZEN); 5133 if (drain_needed) { 5134 for (sql = sqlist->sqlist_head; sql != NULL; 5135 sql = sql->sql_next) 5136 emptysq(sql->sql_sq); 5137 } 5138 sqlist_free(sqlist); 5139 } 5140 5141 #ifdef DEBUG 5142 static int 5143 qprocsareon(queue_t *rq) 5144 { 5145 if (rq->q_next == NULL) 5146 return (0); 5147 return (_WR(rq->q_next)->q_next == _WR(rq)); 5148 } 5149 5150 int 5151 qclaimed(queue_t *q) 5152 { 5153 uint_t count; 5154 5155 count = q->q_syncq->sq_count; 5156 SUM_SQ_PUTCOUNTS(q->q_syncq, count); 5157 return (count != 0); 5158 } 5159 5160 /* 5161 * Check if anyone has frozen this stream with freezestr 5162 */ 5163 int 5164 frozenstr(queue_t *q) 5165 { 5166 return ((q->q_syncq->sq_flags & SQ_FROZEN) != 0); 5167 } 5168 #endif /* DEBUG */ 5169 5170 /* 5171 * Enter a queue. 5172 * Obsoleted interface. Should not be used. 5173 */ 5174 void 5175 enterq(queue_t *q) 5176 { 5177 entersq(q->q_syncq, SQ_CALLBACK); 5178 } 5179 5180 void 5181 leaveq(queue_t *q) 5182 { 5183 leavesq(q->q_syncq, SQ_CALLBACK); 5184 } 5185 5186 /* 5187 * Enter a perimeter. c_inner and c_outer specifies which concurrency bits 5188 * to check. 5189 * Wait if SQ_QUEUED is set to preserve ordering between messages and qwriter 5190 * calls and the running of open, close and service procedures. 5191 * 5192 * if c_inner bit is set no need to grab sq_putlocks since we don't care 5193 * if other threads have entered or are entering put entry point. 5194 * 5195 * if c_inner bit is set it might have been posible to use 5196 * sq_putlocks/sq_putcounts instead of SQLOCK/sq_count (e.g. to optimize 5197 * open/close path for IP) but since the count may need to be decremented in 5198 * qwait() we wouldn't know which counter to decrement. Currently counter is 5199 * selected by current cpu_seqid and current CPU can change at any moment. XXX 5200 * in the future we might use curthread id bits to select the counter and this 5201 * would stay constant across routine calls. 5202 */ 5203 void 5204 entersq(syncq_t *sq, int entrypoint) 5205 { 5206 uint16_t count = 0; 5207 uint16_t flags; 5208 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL; 5209 uint16_t type; 5210 uint_t c_inner = entrypoint & SQ_CI; 5211 uint_t c_outer = entrypoint & SQ_CO; 5212 5213 /* 5214 * Increment ref count to keep closes out of this queue. 5215 */ 5216 ASSERT(sq); 5217 ASSERT(c_inner && c_outer); 5218 mutex_enter(SQLOCK(sq)); 5219 flags = sq->sq_flags; 5220 type = sq->sq_type; 5221 if (!(type & c_inner)) { 5222 /* Make sure all putcounts now use slowlock. */ 5223 count = sq->sq_count; 5224 SQ_PUTLOCKS_ENTER(sq); 5225 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 5226 SUM_SQ_PUTCOUNTS(sq, count); 5227 sq->sq_needexcl++; 5228 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 5229 waitflags |= SQ_MESSAGES; 5230 } 5231 /* 5232 * Wait until we can enter the inner perimeter. 5233 * If we want exclusive access we wait until sq_count is 0. 5234 * We have to do this before entering the outer perimeter in order 5235 * to preserve put/close message ordering. 5236 */ 5237 while ((flags & waitflags) || (!(type & c_inner) && count != 0)) { 5238 sq->sq_flags = flags | SQ_WANTWAKEUP; 5239 if (!(type & c_inner)) { 5240 SQ_PUTLOCKS_EXIT(sq); 5241 } 5242 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5243 if (!(type & c_inner)) { 5244 count = sq->sq_count; 5245 SQ_PUTLOCKS_ENTER(sq); 5246 SUM_SQ_PUTCOUNTS(sq, count); 5247 } 5248 flags = sq->sq_flags; 5249 } 5250 5251 if (!(type & c_inner)) { 5252 ASSERT(sq->sq_needexcl > 0); 5253 sq->sq_needexcl--; 5254 if (sq->sq_needexcl == 0) { 5255 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 5256 } 5257 } 5258 5259 /* Check if we need to enter the outer perimeter */ 5260 if (!(type & c_outer)) { 5261 /* 5262 * We have to enter the outer perimeter exclusively before 5263 * we can increment sq_count to avoid deadlock. This implies 5264 * that we have to re-check sq_flags and sq_count. 5265 * 5266 * is it possible to have c_inner set when c_outer is not set? 5267 */ 5268 if (!(type & c_inner)) { 5269 SQ_PUTLOCKS_EXIT(sq); 5270 } 5271 mutex_exit(SQLOCK(sq)); 5272 outer_enter(sq->sq_outer, SQ_GOAWAY); 5273 mutex_enter(SQLOCK(sq)); 5274 flags = sq->sq_flags; 5275 /* 5276 * there should be no need to recheck sq_putcounts 5277 * because outer_enter() has already waited for them to clear 5278 * after setting SQ_WRITER. 5279 */ 5280 count = sq->sq_count; 5281 #ifdef DEBUG 5282 /* 5283 * SUMCHECK_SQ_PUTCOUNTS should return the sum instead 5284 * of doing an ASSERT internally. Others should do 5285 * something like 5286 * ASSERT(SUMCHECK_SQ_PUTCOUNTS(sq) == 0); 5287 * without the need to #ifdef DEBUG it. 5288 */ 5289 SUMCHECK_SQ_PUTCOUNTS(sq, 0); 5290 #endif 5291 while ((flags & (SQ_EXCL|SQ_BLOCKED|SQ_FROZEN)) || 5292 (!(type & c_inner) && count != 0)) { 5293 sq->sq_flags = flags | SQ_WANTWAKEUP; 5294 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5295 count = sq->sq_count; 5296 flags = sq->sq_flags; 5297 } 5298 } 5299 5300 sq->sq_count++; 5301 ASSERT(sq->sq_count != 0); /* Wraparound */ 5302 if (!(type & c_inner)) { 5303 /* Exclusive entry */ 5304 ASSERT(sq->sq_count == 1); 5305 sq->sq_flags |= SQ_EXCL; 5306 if (type & c_outer) { 5307 SQ_PUTLOCKS_EXIT(sq); 5308 } 5309 } 5310 mutex_exit(SQLOCK(sq)); 5311 } 5312 5313 /* 5314 * leave a syncq. announce to framework that closes may proceed. 5315 * c_inner and c_outer specifies which concurrency bits 5316 * to check. 5317 * 5318 * must never be called from driver or module put entry point. 5319 * 5320 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 5321 * sq_putlocks are used. 5322 */ 5323 void 5324 leavesq(syncq_t *sq, int entrypoint) 5325 { 5326 uint16_t flags; 5327 uint16_t type; 5328 uint_t c_outer = entrypoint & SQ_CO; 5329 #ifdef DEBUG 5330 uint_t c_inner = entrypoint & SQ_CI; 5331 #endif 5332 5333 /* 5334 * decrement ref count, drain the syncq if possible, and wake up 5335 * any waiting close. 5336 */ 5337 ASSERT(sq); 5338 ASSERT(c_inner && c_outer); 5339 mutex_enter(SQLOCK(sq)); 5340 flags = sq->sq_flags; 5341 type = sq->sq_type; 5342 if (flags & (SQ_QUEUED|SQ_WANTWAKEUP|SQ_WANTEXWAKEUP)) { 5343 5344 if (flags & SQ_WANTWAKEUP) { 5345 flags &= ~SQ_WANTWAKEUP; 5346 cv_broadcast(&sq->sq_wait); 5347 } 5348 if (flags & SQ_WANTEXWAKEUP) { 5349 flags &= ~SQ_WANTEXWAKEUP; 5350 cv_broadcast(&sq->sq_exitwait); 5351 } 5352 5353 if ((flags & SQ_QUEUED) && !(flags & SQ_STAYAWAY)) { 5354 /* 5355 * The syncq needs to be drained. "Exit" the syncq 5356 * before calling drain_syncq. 5357 */ 5358 ASSERT(sq->sq_count != 0); 5359 sq->sq_count--; 5360 ASSERT((flags & SQ_EXCL) || (type & c_inner)); 5361 sq->sq_flags = flags & ~SQ_EXCL; 5362 drain_syncq(sq); 5363 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 5364 /* Check if we need to exit the outer perimeter */ 5365 /* XXX will this ever be true? */ 5366 if (!(type & c_outer)) 5367 outer_exit(sq->sq_outer); 5368 return; 5369 } 5370 } 5371 ASSERT(sq->sq_count != 0); 5372 sq->sq_count--; 5373 ASSERT((flags & SQ_EXCL) || (type & c_inner)); 5374 sq->sq_flags = flags & ~SQ_EXCL; 5375 mutex_exit(SQLOCK(sq)); 5376 5377 /* Check if we need to exit the outer perimeter */ 5378 if (!(sq->sq_type & c_outer)) 5379 outer_exit(sq->sq_outer); 5380 } 5381 5382 /* 5383 * Prevent q_next from changing in this stream by incrementing sq_count. 5384 * 5385 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 5386 * sq_putlocks are used. 5387 */ 5388 void 5389 claimq(queue_t *qp) 5390 { 5391 syncq_t *sq = qp->q_syncq; 5392 5393 mutex_enter(SQLOCK(sq)); 5394 sq->sq_count++; 5395 ASSERT(sq->sq_count != 0); /* Wraparound */ 5396 mutex_exit(SQLOCK(sq)); 5397 } 5398 5399 /* 5400 * Undo claimq. 5401 * 5402 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 5403 * sq_putlocks are used. 5404 */ 5405 void 5406 releaseq(queue_t *qp) 5407 { 5408 syncq_t *sq = qp->q_syncq; 5409 uint16_t flags; 5410 5411 mutex_enter(SQLOCK(sq)); 5412 ASSERT(sq->sq_count > 0); 5413 sq->sq_count--; 5414 5415 flags = sq->sq_flags; 5416 if (flags & (SQ_WANTWAKEUP|SQ_QUEUED)) { 5417 if (flags & SQ_WANTWAKEUP) { 5418 flags &= ~SQ_WANTWAKEUP; 5419 cv_broadcast(&sq->sq_wait); 5420 } 5421 sq->sq_flags = flags; 5422 if ((flags & SQ_QUEUED) && !(flags & (SQ_STAYAWAY|SQ_EXCL))) { 5423 /* 5424 * To prevent potential recursive invocation of 5425 * drain_syncq we do not call drain_syncq if count is 5426 * non-zero. 5427 */ 5428 if (sq->sq_count == 0) { 5429 drain_syncq(sq); 5430 return; 5431 } else 5432 sqenable(sq); 5433 } 5434 } 5435 mutex_exit(SQLOCK(sq)); 5436 } 5437 5438 /* 5439 * Prevent q_next from changing in this stream by incrementing sd_refcnt. 5440 */ 5441 void 5442 claimstr(queue_t *qp) 5443 { 5444 struct stdata *stp = STREAM(qp); 5445 5446 mutex_enter(&stp->sd_reflock); 5447 stp->sd_refcnt++; 5448 ASSERT(stp->sd_refcnt != 0); /* Wraparound */ 5449 mutex_exit(&stp->sd_reflock); 5450 } 5451 5452 /* 5453 * Undo claimstr. 5454 */ 5455 void 5456 releasestr(queue_t *qp) 5457 { 5458 struct stdata *stp = STREAM(qp); 5459 5460 mutex_enter(&stp->sd_reflock); 5461 ASSERT(stp->sd_refcnt != 0); 5462 if (--stp->sd_refcnt == 0) 5463 cv_broadcast(&stp->sd_refmonitor); 5464 mutex_exit(&stp->sd_reflock); 5465 } 5466 5467 static syncq_t * 5468 new_syncq(void) 5469 { 5470 return (kmem_cache_alloc(syncq_cache, KM_SLEEP)); 5471 } 5472 5473 static void 5474 free_syncq(syncq_t *sq) 5475 { 5476 ASSERT(sq->sq_head == NULL); 5477 ASSERT(sq->sq_outer == NULL); 5478 ASSERT(sq->sq_callbpend == NULL); 5479 ASSERT((sq->sq_onext == NULL && sq->sq_oprev == NULL) || 5480 (sq->sq_onext == sq && sq->sq_oprev == sq)); 5481 5482 if (sq->sq_ciputctrl != NULL) { 5483 ASSERT(sq->sq_nciputctrl == n_ciputctrl - 1); 5484 SUMCHECK_CIPUTCTRL_COUNTS(sq->sq_ciputctrl, 5485 sq->sq_nciputctrl, 0); 5486 ASSERT(ciputctrl_cache != NULL); 5487 kmem_cache_free(ciputctrl_cache, sq->sq_ciputctrl); 5488 } 5489 5490 sq->sq_tail = NULL; 5491 sq->sq_evhead = NULL; 5492 sq->sq_evtail = NULL; 5493 sq->sq_ciputctrl = NULL; 5494 sq->sq_nciputctrl = 0; 5495 sq->sq_count = 0; 5496 sq->sq_rmqcount = 0; 5497 sq->sq_callbflags = 0; 5498 sq->sq_cancelid = 0; 5499 sq->sq_next = NULL; 5500 sq->sq_needexcl = 0; 5501 sq->sq_svcflags = 0; 5502 sq->sq_nqueues = 0; 5503 sq->sq_pri = 0; 5504 sq->sq_onext = NULL; 5505 sq->sq_oprev = NULL; 5506 sq->sq_flags = 0; 5507 sq->sq_type = 0; 5508 sq->sq_servcount = 0; 5509 5510 kmem_cache_free(syncq_cache, sq); 5511 } 5512 5513 /* Outer perimeter code */ 5514 5515 /* 5516 * The outer syncq uses the fields and flags in the syncq slightly 5517 * differently from the inner syncqs. 5518 * sq_count Incremented when there are pending or running 5519 * writers at the outer perimeter to prevent the set of 5520 * inner syncqs that belong to the outer perimeter from 5521 * changing. 5522 * sq_head/tail List of deferred qwriter(OUTER) operations. 5523 * 5524 * SQ_BLOCKED Set to prevent traversing of sq_next,sq_prev while 5525 * inner syncqs are added to or removed from the 5526 * outer perimeter. 5527 * SQ_QUEUED sq_head/tail has messages or eventsqueued. 5528 * 5529 * SQ_WRITER A thread is currently traversing all the inner syncqs 5530 * setting the SQ_WRITER flag. 5531 */ 5532 5533 /* 5534 * Get write access at the outer perimeter. 5535 * Note that read access is done by entersq, putnext, and put by simply 5536 * incrementing sq_count in the inner syncq. 5537 * 5538 * Waits until "flags" is no longer set in the outer to prevent multiple 5539 * threads from having write access at the same time. SQ_WRITER has to be part 5540 * of "flags". 5541 * 5542 * Increases sq_count on the outer syncq to keep away outer_insert/remove 5543 * until the outer_exit is finished. 5544 * 5545 * outer_enter is vulnerable to starvation since it does not prevent new 5546 * threads from entering the inner syncqs while it is waiting for sq_count to 5547 * go to zero. 5548 */ 5549 void 5550 outer_enter(syncq_t *outer, uint16_t flags) 5551 { 5552 syncq_t *sq; 5553 int wait_needed; 5554 uint16_t count; 5555 5556 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5557 outer->sq_oprev != NULL); 5558 ASSERT(flags & SQ_WRITER); 5559 5560 retry: 5561 mutex_enter(SQLOCK(outer)); 5562 while (outer->sq_flags & flags) { 5563 outer->sq_flags |= SQ_WANTWAKEUP; 5564 cv_wait(&outer->sq_wait, SQLOCK(outer)); 5565 } 5566 5567 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5568 outer->sq_flags |= SQ_WRITER; 5569 outer->sq_count++; 5570 ASSERT(outer->sq_count != 0); /* wraparound */ 5571 wait_needed = 0; 5572 /* 5573 * Set SQ_WRITER on all the inner syncqs while holding 5574 * the SQLOCK on the outer syncq. This ensures that the changing 5575 * of SQ_WRITER is atomic under the outer SQLOCK. 5576 */ 5577 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 5578 mutex_enter(SQLOCK(sq)); 5579 count = sq->sq_count; 5580 SQ_PUTLOCKS_ENTER(sq); 5581 sq->sq_flags |= SQ_WRITER; 5582 SUM_SQ_PUTCOUNTS(sq, count); 5583 if (count != 0) 5584 wait_needed = 1; 5585 SQ_PUTLOCKS_EXIT(sq); 5586 mutex_exit(SQLOCK(sq)); 5587 } 5588 mutex_exit(SQLOCK(outer)); 5589 5590 /* 5591 * Get everybody out of the syncqs sequentially. 5592 * Note that we don't actually need to aqiure the PUTLOCKS, since 5593 * we have already cleared the fastbit, and set QWRITER. By 5594 * definition, the count can not increase since putnext will 5595 * take the slowlock path (and the purpose of aquiring the 5596 * putlocks was to make sure it didn't increase while we were 5597 * waiting). 5598 * 5599 * Note that we still aquire the PUTLOCKS to be safe. 5600 */ 5601 if (wait_needed) { 5602 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 5603 mutex_enter(SQLOCK(sq)); 5604 count = sq->sq_count; 5605 SQ_PUTLOCKS_ENTER(sq); 5606 SUM_SQ_PUTCOUNTS(sq, count); 5607 while (count != 0) { 5608 sq->sq_flags |= SQ_WANTWAKEUP; 5609 SQ_PUTLOCKS_EXIT(sq); 5610 cv_wait(&sq->sq_wait, SQLOCK(sq)); 5611 count = sq->sq_count; 5612 SQ_PUTLOCKS_ENTER(sq); 5613 SUM_SQ_PUTCOUNTS(sq, count); 5614 } 5615 SQ_PUTLOCKS_EXIT(sq); 5616 mutex_exit(SQLOCK(sq)); 5617 } 5618 /* 5619 * Verify that none of the flags got set while we 5620 * were waiting for the sq_counts to drop. 5621 * If this happens we exit and retry entering the 5622 * outer perimeter. 5623 */ 5624 mutex_enter(SQLOCK(outer)); 5625 if (outer->sq_flags & (flags & ~SQ_WRITER)) { 5626 mutex_exit(SQLOCK(outer)); 5627 outer_exit(outer); 5628 goto retry; 5629 } 5630 mutex_exit(SQLOCK(outer)); 5631 } 5632 } 5633 5634 /* 5635 * Drop the write access at the outer perimeter. 5636 * Read access is dropped implicitly (by putnext, put, and leavesq) by 5637 * decrementing sq_count. 5638 */ 5639 void 5640 outer_exit(syncq_t *outer) 5641 { 5642 syncq_t *sq; 5643 int drain_needed; 5644 uint16_t flags; 5645 5646 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5647 outer->sq_oprev != NULL); 5648 ASSERT(MUTEX_NOT_HELD(SQLOCK(outer))); 5649 5650 /* 5651 * Atomically (from the perspective of threads calling become_writer) 5652 * drop the write access at the outer perimeter by holding 5653 * SQLOCK(outer) across all the dropsq calls and the resetting of 5654 * SQ_WRITER. 5655 * This defines a locking order between the outer perimeter 5656 * SQLOCK and the inner perimeter SQLOCKs. 5657 */ 5658 mutex_enter(SQLOCK(outer)); 5659 flags = outer->sq_flags; 5660 ASSERT(outer->sq_flags & SQ_WRITER); 5661 if (flags & SQ_QUEUED) { 5662 write_now(outer); 5663 flags = outer->sq_flags; 5664 } 5665 5666 /* 5667 * sq_onext is stable since sq_count has not yet been decreased. 5668 * Reset the SQ_WRITER flags in all syncqs. 5669 * After dropping SQ_WRITER on the outer syncq we empty all the 5670 * inner syncqs. 5671 */ 5672 drain_needed = 0; 5673 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) 5674 drain_needed += dropsq(sq, SQ_WRITER); 5675 ASSERT(!(outer->sq_flags & SQ_QUEUED)); 5676 flags &= ~SQ_WRITER; 5677 if (drain_needed) { 5678 outer->sq_flags = flags; 5679 mutex_exit(SQLOCK(outer)); 5680 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) 5681 emptysq(sq); 5682 mutex_enter(SQLOCK(outer)); 5683 flags = outer->sq_flags; 5684 } 5685 if (flags & SQ_WANTWAKEUP) { 5686 flags &= ~SQ_WANTWAKEUP; 5687 cv_broadcast(&outer->sq_wait); 5688 } 5689 outer->sq_flags = flags; 5690 ASSERT(outer->sq_count > 0); 5691 outer->sq_count--; 5692 mutex_exit(SQLOCK(outer)); 5693 } 5694 5695 /* 5696 * Add another syncq to an outer perimeter. 5697 * Block out all other access to the outer perimeter while it is being 5698 * changed using blocksq. 5699 * Assumes that the caller has *not* done an outer_enter. 5700 * 5701 * Vulnerable to starvation in blocksq. 5702 */ 5703 static void 5704 outer_insert(syncq_t *outer, syncq_t *sq) 5705 { 5706 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5707 outer->sq_oprev != NULL); 5708 ASSERT(sq->sq_outer == NULL && sq->sq_onext == NULL && 5709 sq->sq_oprev == NULL); /* Can't be in an outer perimeter */ 5710 5711 /* Get exclusive access to the outer perimeter list */ 5712 blocksq(outer, SQ_BLOCKED, 0); 5713 ASSERT(outer->sq_flags & SQ_BLOCKED); 5714 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5715 5716 mutex_enter(SQLOCK(sq)); 5717 sq->sq_outer = outer; 5718 outer->sq_onext->sq_oprev = sq; 5719 sq->sq_onext = outer->sq_onext; 5720 outer->sq_onext = sq; 5721 sq->sq_oprev = outer; 5722 mutex_exit(SQLOCK(sq)); 5723 unblocksq(outer, SQ_BLOCKED, 1); 5724 } 5725 5726 /* 5727 * Remove a syncq from an outer perimeter. 5728 * Block out all other access to the outer perimeter while it is being 5729 * changed using blocksq. 5730 * Assumes that the caller has *not* done an outer_enter. 5731 * 5732 * Vulnerable to starvation in blocksq. 5733 */ 5734 static void 5735 outer_remove(syncq_t *outer, syncq_t *sq) 5736 { 5737 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5738 outer->sq_oprev != NULL); 5739 ASSERT(sq->sq_outer == outer); 5740 5741 /* Get exclusive access to the outer perimeter list */ 5742 blocksq(outer, SQ_BLOCKED, 0); 5743 ASSERT(outer->sq_flags & SQ_BLOCKED); 5744 ASSERT(!(outer->sq_flags & SQ_WRITER)); 5745 5746 mutex_enter(SQLOCK(sq)); 5747 sq->sq_outer = NULL; 5748 sq->sq_onext->sq_oprev = sq->sq_oprev; 5749 sq->sq_oprev->sq_onext = sq->sq_onext; 5750 sq->sq_oprev = sq->sq_onext = NULL; 5751 mutex_exit(SQLOCK(sq)); 5752 unblocksq(outer, SQ_BLOCKED, 1); 5753 } 5754 5755 /* 5756 * Queue a deferred qwriter(OUTER) callback for this outer perimeter. 5757 * If this is the first callback for this outer perimeter then add 5758 * this outer perimeter to the list of outer perimeters that 5759 * the qwriter_outer_thread will process. 5760 * 5761 * Increments sq_count in the outer syncq to prevent the membership 5762 * of the outer perimeter (in terms of inner syncqs) to change while 5763 * the callback is pending. 5764 */ 5765 static void 5766 queue_writer(syncq_t *outer, void (*func)(), queue_t *q, mblk_t *mp) 5767 { 5768 ASSERT(MUTEX_HELD(SQLOCK(outer))); 5769 5770 mp->b_prev = (mblk_t *)func; 5771 mp->b_queue = q; 5772 mp->b_next = NULL; 5773 outer->sq_count++; /* Decremented when dequeued */ 5774 ASSERT(outer->sq_count != 0); /* Wraparound */ 5775 if (outer->sq_evhead == NULL) { 5776 /* First message. */ 5777 outer->sq_evhead = outer->sq_evtail = mp; 5778 outer->sq_flags |= SQ_EVENTS; 5779 mutex_exit(SQLOCK(outer)); 5780 STRSTAT(qwr_outer); 5781 (void) taskq_dispatch(streams_taskq, 5782 (task_func_t *)qwriter_outer_service, outer, TQ_SLEEP); 5783 } else { 5784 ASSERT(outer->sq_flags & SQ_EVENTS); 5785 outer->sq_evtail->b_next = mp; 5786 outer->sq_evtail = mp; 5787 mutex_exit(SQLOCK(outer)); 5788 } 5789 } 5790 5791 /* 5792 * Try and upgrade to write access at the outer perimeter. If this can 5793 * not be done without blocking then queue the callback to be done 5794 * by the qwriter_outer_thread. 5795 * 5796 * This routine can only be called from put or service procedures plus 5797 * asynchronous callback routines that have properly entered to 5798 * queue (with entersq.) Thus qwriter(OUTER) assumes the caller has one claim 5799 * on the syncq associated with q. 5800 */ 5801 void 5802 qwriter_outer(queue_t *q, mblk_t *mp, void (*func)()) 5803 { 5804 syncq_t *osq, *sq, *outer; 5805 int failed; 5806 uint16_t flags; 5807 5808 osq = q->q_syncq; 5809 outer = osq->sq_outer; 5810 if (outer == NULL) 5811 panic("qwriter(PERIM_OUTER): no outer perimeter"); 5812 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5813 outer->sq_oprev != NULL); 5814 5815 mutex_enter(SQLOCK(outer)); 5816 flags = outer->sq_flags; 5817 /* 5818 * If some thread is traversing sq_next, or if we are blocked by 5819 * outer_insert or outer_remove, or if the we already have queued 5820 * callbacks, then queue this callback for later processing. 5821 * 5822 * Also queue the qwriter for an interrupt thread in order 5823 * to reduce the time spent running at high IPL. 5824 * to identify there are events. 5825 */ 5826 if ((flags & SQ_GOAWAY) || (curthread->t_pri >= kpreemptpri)) { 5827 /* 5828 * Queue the become_writer request. 5829 * The queueing is atomic under SQLOCK(outer) in order 5830 * to synchronize with outer_exit. 5831 * queue_writer will drop the outer SQLOCK 5832 */ 5833 if (flags & SQ_BLOCKED) { 5834 /* Must set SQ_WRITER on inner perimeter */ 5835 mutex_enter(SQLOCK(osq)); 5836 osq->sq_flags |= SQ_WRITER; 5837 mutex_exit(SQLOCK(osq)); 5838 } else { 5839 if (!(flags & SQ_WRITER)) { 5840 /* 5841 * The outer could have been SQ_BLOCKED thus 5842 * SQ_WRITER might not be set on the inner. 5843 */ 5844 mutex_enter(SQLOCK(osq)); 5845 osq->sq_flags |= SQ_WRITER; 5846 mutex_exit(SQLOCK(osq)); 5847 } 5848 ASSERT(osq->sq_flags & SQ_WRITER); 5849 } 5850 queue_writer(outer, func, q, mp); 5851 return; 5852 } 5853 /* 5854 * We are half-way to exclusive access to the outer perimeter. 5855 * Prevent any outer_enter, qwriter(OUTER), or outer_insert/remove 5856 * while the inner syncqs are traversed. 5857 */ 5858 outer->sq_count++; 5859 ASSERT(outer->sq_count != 0); /* wraparound */ 5860 flags |= SQ_WRITER; 5861 /* 5862 * Check if we can run the function immediately. Mark all 5863 * syncqs with the writer flag to prevent new entries into 5864 * put and service procedures. 5865 * 5866 * Set SQ_WRITER on all the inner syncqs while holding 5867 * the SQLOCK on the outer syncq. This ensures that the changing 5868 * of SQ_WRITER is atomic under the outer SQLOCK. 5869 */ 5870 failed = 0; 5871 for (sq = outer->sq_onext; sq != outer; sq = sq->sq_onext) { 5872 uint16_t count; 5873 uint_t maxcnt = (sq == osq) ? 1 : 0; 5874 5875 mutex_enter(SQLOCK(sq)); 5876 count = sq->sq_count; 5877 SQ_PUTLOCKS_ENTER(sq); 5878 SUM_SQ_PUTCOUNTS(sq, count); 5879 if (sq->sq_count > maxcnt) 5880 failed = 1; 5881 sq->sq_flags |= SQ_WRITER; 5882 SQ_PUTLOCKS_EXIT(sq); 5883 mutex_exit(SQLOCK(sq)); 5884 } 5885 if (failed) { 5886 /* 5887 * Some other thread has a read claim on the outer perimeter. 5888 * Queue the callback for deferred processing. 5889 * 5890 * queue_writer will set SQ_QUEUED before we drop SQ_WRITER 5891 * so that other qwriter(OUTER) calls will queue their 5892 * callbacks as well. queue_writer increments sq_count so we 5893 * decrement to compensate for the our increment. 5894 * 5895 * Dropping SQ_WRITER enables the writer thread to work 5896 * on this outer perimeter. 5897 */ 5898 outer->sq_flags = flags; 5899 queue_writer(outer, func, q, mp); 5900 /* queue_writer dropper the lock */ 5901 mutex_enter(SQLOCK(outer)); 5902 ASSERT(outer->sq_count > 0); 5903 outer->sq_count--; 5904 ASSERT(outer->sq_flags & SQ_WRITER); 5905 flags = outer->sq_flags; 5906 flags &= ~SQ_WRITER; 5907 if (flags & SQ_WANTWAKEUP) { 5908 flags &= ~SQ_WANTWAKEUP; 5909 cv_broadcast(&outer->sq_wait); 5910 } 5911 outer->sq_flags = flags; 5912 mutex_exit(SQLOCK(outer)); 5913 return; 5914 } else { 5915 outer->sq_flags = flags; 5916 mutex_exit(SQLOCK(outer)); 5917 } 5918 5919 /* Can run it immediately */ 5920 (*func)(q, mp); 5921 5922 outer_exit(outer); 5923 } 5924 5925 /* 5926 * Dequeue all writer callbacks from the outer perimeter and run them. 5927 */ 5928 static void 5929 write_now(syncq_t *outer) 5930 { 5931 mblk_t *mp; 5932 queue_t *q; 5933 void (*func)(); 5934 5935 ASSERT(MUTEX_HELD(SQLOCK(outer))); 5936 ASSERT(outer->sq_outer == NULL && outer->sq_onext != NULL && 5937 outer->sq_oprev != NULL); 5938 while ((mp = outer->sq_evhead) != NULL) { 5939 /* 5940 * queues cannot be placed on the queuelist on the outer 5941 * perimiter. 5942 */ 5943 ASSERT(!(outer->sq_flags & SQ_MESSAGES)); 5944 ASSERT((outer->sq_flags & SQ_EVENTS)); 5945 5946 outer->sq_evhead = mp->b_next; 5947 if (outer->sq_evhead == NULL) { 5948 outer->sq_evtail = NULL; 5949 outer->sq_flags &= ~SQ_EVENTS; 5950 } 5951 ASSERT(outer->sq_count != 0); 5952 outer->sq_count--; /* Incremented when enqueued. */ 5953 mutex_exit(SQLOCK(outer)); 5954 /* 5955 * Drop the message if the queue is closing. 5956 * Make sure that the queue is "claimed" when the callback 5957 * is run in order to satisfy various ASSERTs. 5958 */ 5959 q = mp->b_queue; 5960 func = (void (*)())mp->b_prev; 5961 ASSERT(func != NULL); 5962 mp->b_next = mp->b_prev = NULL; 5963 if (q->q_flag & QWCLOSE) { 5964 freemsg(mp); 5965 } else { 5966 claimq(q); 5967 (*func)(q, mp); 5968 releaseq(q); 5969 } 5970 mutex_enter(SQLOCK(outer)); 5971 } 5972 ASSERT(MUTEX_HELD(SQLOCK(outer))); 5973 } 5974 5975 /* 5976 * The list of messages on the inner syncq is effectively hashed 5977 * by destination queue. These destination queues are doubly 5978 * linked lists (hopefully) in priority order. Messages are then 5979 * put on the queue referenced by the q_sqhead/q_sqtail elements. 5980 * Additional messages are linked together by the b_next/b_prev 5981 * elements in the mblk, with (similar to putq()) the first message 5982 * having a NULL b_prev and the last message having a NULL b_next. 5983 * 5984 * Events, such as qwriter callbacks, are put onto a list in FIFO 5985 * order referenced by sq_evhead, and sq_evtail. This is a singly 5986 * linked list, and messages here MUST be processed in the order queued. 5987 */ 5988 5989 /* 5990 * Run the events on the syncq event list (sq_evhead). 5991 * Assumes there is only one claim on the syncq, it is 5992 * already exclusive (SQ_EXCL set), and the SQLOCK held. 5993 * Messages here are processed in order, with the SQ_EXCL bit 5994 * held all the way through till the last message is processed. 5995 */ 5996 void 5997 sq_run_events(syncq_t *sq) 5998 { 5999 mblk_t *bp; 6000 queue_t *qp; 6001 uint16_t flags = sq->sq_flags; 6002 void (*func)(); 6003 6004 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6005 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6006 sq->sq_oprev == NULL) || 6007 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6008 sq->sq_oprev != NULL)); 6009 6010 ASSERT(flags & SQ_EXCL); 6011 ASSERT(sq->sq_count == 1); 6012 6013 /* 6014 * We need to process all of the events on this list. It 6015 * is possible that new events will be added while we are 6016 * away processing a callback, so on every loop, we start 6017 * back at the beginning of the list. 6018 */ 6019 /* 6020 * We have to reaccess sq_evhead since there is a 6021 * possibility of a new entry while we were running 6022 * the callback. 6023 */ 6024 for (bp = sq->sq_evhead; bp != NULL; bp = sq->sq_evhead) { 6025 ASSERT(bp->b_queue->q_syncq == sq); 6026 ASSERT(sq->sq_flags & SQ_EVENTS); 6027 6028 qp = bp->b_queue; 6029 func = (void (*)())bp->b_prev; 6030 ASSERT(func != NULL); 6031 6032 /* 6033 * Messages from the event queue must be taken off in 6034 * FIFO order. 6035 */ 6036 ASSERT(sq->sq_evhead == bp); 6037 sq->sq_evhead = bp->b_next; 6038 6039 if (bp->b_next == NULL) { 6040 /* Deleting last */ 6041 ASSERT(sq->sq_evtail == bp); 6042 sq->sq_evtail = NULL; 6043 sq->sq_flags &= ~SQ_EVENTS; 6044 } 6045 bp->b_prev = bp->b_next = NULL; 6046 ASSERT(bp->b_datap->db_ref != 0); 6047 6048 mutex_exit(SQLOCK(sq)); 6049 6050 (*func)(qp, bp); 6051 6052 mutex_enter(SQLOCK(sq)); 6053 /* 6054 * re-read the flags, since they could have changed. 6055 */ 6056 flags = sq->sq_flags; 6057 ASSERT(flags & SQ_EXCL); 6058 } 6059 ASSERT(sq->sq_evhead == NULL && sq->sq_evtail == NULL); 6060 ASSERT(!(sq->sq_flags & SQ_EVENTS)); 6061 6062 if (flags & SQ_WANTWAKEUP) { 6063 flags &= ~SQ_WANTWAKEUP; 6064 cv_broadcast(&sq->sq_wait); 6065 } 6066 if (flags & SQ_WANTEXWAKEUP) { 6067 flags &= ~SQ_WANTEXWAKEUP; 6068 cv_broadcast(&sq->sq_exitwait); 6069 } 6070 sq->sq_flags = flags; 6071 } 6072 6073 /* 6074 * Put messages on the event list. 6075 * If we can go exclusive now, do so and process the event list, otherwise 6076 * let the last claim service this list (or wake the sqthread). 6077 * This procedure assumes SQLOCK is held. To run the event list, it 6078 * must be called with no claims. 6079 */ 6080 static void 6081 sqfill_events(syncq_t *sq, queue_t *q, mblk_t *mp, void (*func)()) 6082 { 6083 uint16_t count; 6084 6085 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6086 ASSERT(func != NULL); 6087 6088 /* 6089 * This is a callback. Add it to the list of callbacks 6090 * and see about upgrading. 6091 */ 6092 mp->b_prev = (mblk_t *)func; 6093 mp->b_queue = q; 6094 mp->b_next = NULL; 6095 if (sq->sq_evhead == NULL) { 6096 sq->sq_evhead = sq->sq_evtail = mp; 6097 sq->sq_flags |= SQ_EVENTS; 6098 } else { 6099 ASSERT(sq->sq_evtail != NULL); 6100 ASSERT(sq->sq_evtail->b_next == NULL); 6101 ASSERT(sq->sq_flags & SQ_EVENTS); 6102 sq->sq_evtail->b_next = mp; 6103 sq->sq_evtail = mp; 6104 } 6105 /* 6106 * We have set SQ_EVENTS, so threads will have to 6107 * unwind out of the perimiter, and new entries will 6108 * not grab a putlock. But we still need to know 6109 * how many threads have already made a claim to the 6110 * syncq, so grab the putlocks, and sum the counts. 6111 * If there are no claims on the syncq, we can upgrade 6112 * to exclusive, and run the event list. 6113 * NOTE: We hold the SQLOCK, so we can just grab the 6114 * putlocks. 6115 */ 6116 count = sq->sq_count; 6117 SQ_PUTLOCKS_ENTER(sq); 6118 SUM_SQ_PUTCOUNTS(sq, count); 6119 /* 6120 * We have no claim, so we need to check if there 6121 * are no others, then we can upgrade. 6122 */ 6123 /* 6124 * There are currently no claims on 6125 * the syncq by this thread (at least on this entry). The thread who has 6126 * the claim should drain syncq. 6127 */ 6128 if (count > 0) { 6129 /* 6130 * Can't upgrade - other threads inside. 6131 */ 6132 SQ_PUTLOCKS_EXIT(sq); 6133 mutex_exit(SQLOCK(sq)); 6134 return; 6135 } 6136 /* 6137 * Need to set SQ_EXCL and make a claim on the syncq. 6138 */ 6139 ASSERT((sq->sq_flags & SQ_EXCL) == 0); 6140 sq->sq_flags |= SQ_EXCL; 6141 ASSERT(sq->sq_count == 0); 6142 sq->sq_count++; 6143 SQ_PUTLOCKS_EXIT(sq); 6144 6145 /* Process the events list */ 6146 sq_run_events(sq); 6147 6148 /* 6149 * Release our claim... 6150 */ 6151 sq->sq_count--; 6152 6153 /* 6154 * And release SQ_EXCL. 6155 * We don't need to acquire the putlocks to release 6156 * SQ_EXCL, since we are exclusive, and hold the SQLOCK. 6157 */ 6158 sq->sq_flags &= ~SQ_EXCL; 6159 6160 /* 6161 * sq_run_events should have released SQ_EXCL 6162 */ 6163 ASSERT(!(sq->sq_flags & SQ_EXCL)); 6164 6165 /* 6166 * If anything happened while we were running the 6167 * events (or was there before), we need to process 6168 * them now. We shouldn't be exclusive sine we 6169 * released the perimiter above (plus, we asserted 6170 * for it). 6171 */ 6172 if (!(sq->sq_flags & SQ_STAYAWAY) && (sq->sq_flags & SQ_QUEUED)) 6173 drain_syncq(sq); 6174 else 6175 mutex_exit(SQLOCK(sq)); 6176 } 6177 6178 /* 6179 * Perform delayed processing. The caller has to make sure that it is safe 6180 * to enter the syncq (e.g. by checking that none of the SQ_STAYAWAY bits are 6181 * set.) 6182 * 6183 * Assume that the caller has NO claims on the syncq. However, a claim 6184 * on the syncq does not indicate that a thread is draining the syncq. 6185 * There may be more claims on the syncq than there are threads draining 6186 * (i.e. #_threads_draining <= sq_count) 6187 * 6188 * drain_syncq has to terminate when one of the SQ_STAYAWAY bits gets set 6189 * in order to preserve qwriter(OUTER) ordering constraints. 6190 * 6191 * sq_putcount only needs to be checked when dispatching the queued 6192 * writer call for CIPUT sync queue, but this is handled in sq_run_events. 6193 */ 6194 void 6195 drain_syncq(syncq_t *sq) 6196 { 6197 queue_t *qp; 6198 uint16_t count; 6199 uint16_t type = sq->sq_type; 6200 uint16_t flags = sq->sq_flags; 6201 boolean_t bg_service = sq->sq_svcflags & SQ_SERVICE; 6202 6203 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START, 6204 "drain_syncq start:%p", sq); 6205 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6206 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6207 sq->sq_oprev == NULL) || 6208 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6209 sq->sq_oprev != NULL)); 6210 6211 /* 6212 * Drop SQ_SERVICE flag. 6213 */ 6214 if (bg_service) 6215 sq->sq_svcflags &= ~SQ_SERVICE; 6216 6217 /* 6218 * If SQ_EXCL is set, someone else is processing this syncq - let him 6219 * finish the job. 6220 */ 6221 if (flags & SQ_EXCL) { 6222 if (bg_service) { 6223 ASSERT(sq->sq_servcount != 0); 6224 sq->sq_servcount--; 6225 } 6226 mutex_exit(SQLOCK(sq)); 6227 return; 6228 } 6229 6230 /* 6231 * This routine can be called by a background thread if 6232 * it was scheduled by a hi-priority thread. SO, if there are 6233 * NOT messages queued, return (remember, we have the SQLOCK, 6234 * and it cannot change until we release it). Wakeup any waiters also. 6235 */ 6236 if (!(flags & SQ_QUEUED)) { 6237 if (flags & SQ_WANTWAKEUP) { 6238 flags &= ~SQ_WANTWAKEUP; 6239 cv_broadcast(&sq->sq_wait); 6240 } 6241 if (flags & SQ_WANTEXWAKEUP) { 6242 flags &= ~SQ_WANTEXWAKEUP; 6243 cv_broadcast(&sq->sq_exitwait); 6244 } 6245 sq->sq_flags = flags; 6246 if (bg_service) { 6247 ASSERT(sq->sq_servcount != 0); 6248 sq->sq_servcount--; 6249 } 6250 mutex_exit(SQLOCK(sq)); 6251 return; 6252 } 6253 6254 /* 6255 * If this is not a concurrent put perimiter, we need to 6256 * become exclusive to drain. Also, if not CIPUT, we would 6257 * not have acquired a putlock, so we don't need to check 6258 * the putcounts. If not entering with a claim, we test 6259 * for sq_count == 0. 6260 */ 6261 type = sq->sq_type; 6262 if (!(type & SQ_CIPUT)) { 6263 if (sq->sq_count > 1) { 6264 if (bg_service) { 6265 ASSERT(sq->sq_servcount != 0); 6266 sq->sq_servcount--; 6267 } 6268 mutex_exit(SQLOCK(sq)); 6269 return; 6270 } 6271 sq->sq_flags |= SQ_EXCL; 6272 } 6273 6274 /* 6275 * This is where we make a claim to the syncq. 6276 * This can either be done by incrementing a putlock, or 6277 * the sq_count. But since we already have the SQLOCK 6278 * here, we just bump the sq_count. 6279 * 6280 * Note that after we make a claim, we need to let the code 6281 * fall through to the end of this routine to clean itself 6282 * up. A return in the while loop will put the syncq in a 6283 * very bad state. 6284 */ 6285 sq->sq_count++; 6286 ASSERT(sq->sq_count != 0); /* wraparound */ 6287 6288 while ((flags = sq->sq_flags) & SQ_QUEUED) { 6289 /* 6290 * If we are told to stayaway or went exclusive, 6291 * we are done. 6292 */ 6293 if (flags & (SQ_STAYAWAY)) { 6294 break; 6295 } 6296 6297 /* 6298 * If there are events to run, do so. 6299 * We have one claim to the syncq, so if there are 6300 * more than one, other threads are running. 6301 */ 6302 if (sq->sq_evhead != NULL) { 6303 ASSERT(sq->sq_flags & SQ_EVENTS); 6304 6305 count = sq->sq_count; 6306 SQ_PUTLOCKS_ENTER(sq); 6307 SUM_SQ_PUTCOUNTS(sq, count); 6308 if (count > 1) { 6309 SQ_PUTLOCKS_EXIT(sq); 6310 /* Can't upgrade - other threads inside */ 6311 break; 6312 } 6313 ASSERT((flags & SQ_EXCL) == 0); 6314 sq->sq_flags = flags | SQ_EXCL; 6315 SQ_PUTLOCKS_EXIT(sq); 6316 /* 6317 * we have the only claim, run the events, 6318 * sq_run_events will clear the SQ_EXCL flag. 6319 */ 6320 sq_run_events(sq); 6321 6322 /* 6323 * If this is a CIPUT perimiter, we need 6324 * to drop the SQ_EXCL flag so we can properly 6325 * continue draining the syncq. 6326 */ 6327 if (type & SQ_CIPUT) { 6328 ASSERT(sq->sq_flags & SQ_EXCL); 6329 sq->sq_flags &= ~SQ_EXCL; 6330 } 6331 6332 /* 6333 * And go back to the beginning just in case 6334 * anything changed while we were away. 6335 */ 6336 ASSERT((sq->sq_flags & SQ_EXCL) || (type & SQ_CIPUT)); 6337 continue; 6338 } 6339 6340 ASSERT(sq->sq_evhead == NULL); 6341 ASSERT(!(sq->sq_flags & SQ_EVENTS)); 6342 6343 /* 6344 * Find the queue that is not draining. 6345 * 6346 * q_draining is protected by QLOCK which we do not hold. 6347 * But if it was set, then a thread was draining, and if it gets 6348 * cleared, then it was because the thread has successfully 6349 * drained the syncq, or a GOAWAY state occured. For the GOAWAY 6350 * state to happen, a thread needs the SQLOCK which we hold, and 6351 * if there was such a flag, we whould have already seen it. 6352 */ 6353 6354 for (qp = sq->sq_head; 6355 qp != NULL && (qp->q_draining || 6356 (qp->q_sqflags & Q_SQDRAINING)); 6357 qp = qp->q_sqnext) 6358 ; 6359 6360 if (qp == NULL) 6361 break; 6362 6363 /* 6364 * We have a queue to work on, and we hold the 6365 * SQLOCK and one claim, call qdrain_syncq. 6366 * This means we need to release the SQLOCK and 6367 * aquire the QLOCK (OK since we have a claim). 6368 * Note that qdrain_syncq will actually dequeue 6369 * this queue from the sq_head list when it is 6370 * convinced all the work is done and release 6371 * the QLOCK before returning. 6372 */ 6373 qp->q_sqflags |= Q_SQDRAINING; 6374 mutex_exit(SQLOCK(sq)); 6375 mutex_enter(QLOCK(qp)); 6376 qdrain_syncq(sq, qp); 6377 mutex_enter(SQLOCK(sq)); 6378 6379 /* The queue is drained */ 6380 ASSERT(qp->q_sqflags & Q_SQDRAINING); 6381 qp->q_sqflags &= ~Q_SQDRAINING; 6382 /* 6383 * NOTE: After this point qp should not be used since it may be 6384 * closed. 6385 */ 6386 } 6387 6388 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6389 flags = sq->sq_flags; 6390 6391 /* 6392 * sq->sq_head cannot change because we hold the 6393 * sqlock. However, a thread CAN decide that it is no longer 6394 * going to drain that queue. However, this should be due to 6395 * a GOAWAY state, and we should see that here. 6396 * 6397 * This loop is not very efficient. One solution may be adding a second 6398 * pointer to the "draining" queue, but it is difficult to do when 6399 * queues are inserted in the middle due to priority ordering. Another 6400 * possibility is to yank the queue out of the sq list and put it onto 6401 * the "draining list" and then put it back if it can't be drained. 6402 */ 6403 6404 ASSERT((sq->sq_head == NULL) || (flags & SQ_GOAWAY) || 6405 (type & SQ_CI) || sq->sq_head->q_draining); 6406 6407 /* Drop SQ_EXCL for non-CIPUT perimiters */ 6408 if (!(type & SQ_CIPUT)) 6409 flags &= ~SQ_EXCL; 6410 ASSERT((flags & SQ_EXCL) == 0); 6411 6412 /* Wake up any waiters. */ 6413 if (flags & SQ_WANTWAKEUP) { 6414 flags &= ~SQ_WANTWAKEUP; 6415 cv_broadcast(&sq->sq_wait); 6416 } 6417 if (flags & SQ_WANTEXWAKEUP) { 6418 flags &= ~SQ_WANTEXWAKEUP; 6419 cv_broadcast(&sq->sq_exitwait); 6420 } 6421 sq->sq_flags = flags; 6422 6423 ASSERT(sq->sq_count != 0); 6424 /* Release our claim. */ 6425 sq->sq_count--; 6426 6427 if (bg_service) { 6428 ASSERT(sq->sq_servcount != 0); 6429 sq->sq_servcount--; 6430 } 6431 6432 mutex_exit(SQLOCK(sq)); 6433 6434 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END, 6435 "drain_syncq end:%p", sq); 6436 } 6437 6438 6439 /* 6440 * 6441 * qdrain_syncq can be called (currently) from only one of two places: 6442 * drain_syncq 6443 * putnext (or some variation of it). 6444 * and eventually 6445 * qwait(_sig) 6446 * 6447 * If called from drain_syncq, we found it in the list 6448 * of queue's needing service, so there is work to be done (or it 6449 * wouldn't be on the list). 6450 * 6451 * If called from some putnext variation, it was because the 6452 * perimiter is open, but messages are blocking a putnext and 6453 * there is not a thread working on it. Now a thread could start 6454 * working on it while we are getting ready to do so ourself, but 6455 * the thread would set the q_draining flag, and we can spin out. 6456 * 6457 * As for qwait(_sig), I think I shall let it continue to call 6458 * drain_syncq directly (after all, it will get here eventually). 6459 * 6460 * qdrain_syncq has to terminate when: 6461 * - one of the SQ_STAYAWAY bits gets set to preserve qwriter(OUTER) ordering 6462 * - SQ_EVENTS gets set to preserve qwriter(INNER) ordering 6463 * 6464 * ASSUMES: 6465 * One claim 6466 * QLOCK held 6467 * SQLOCK not held 6468 * Will release QLOCK before returning 6469 */ 6470 void 6471 qdrain_syncq(syncq_t *sq, queue_t *q) 6472 { 6473 mblk_t *bp; 6474 boolean_t do_clr; 6475 #ifdef DEBUG 6476 uint16_t count; 6477 #endif 6478 6479 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_START, 6480 "drain_syncq start:%p", sq); 6481 ASSERT(q->q_syncq == sq); 6482 ASSERT(MUTEX_HELD(QLOCK(q))); 6483 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6484 /* 6485 * For non-CIPUT perimiters, we should be called with the 6486 * exclusive bit set already. For non-CIPUT perimiters we 6487 * will be doing a concurrent drain, so it better not be set. 6488 */ 6489 ASSERT((sq->sq_flags & (SQ_EXCL|SQ_CIPUT))); 6490 ASSERT(!((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL))); 6491 ASSERT((sq->sq_type & SQ_CIPUT) || (sq->sq_flags & SQ_EXCL)); 6492 /* 6493 * All outer pointers are set, or none of them are 6494 */ 6495 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6496 sq->sq_oprev == NULL) || 6497 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6498 sq->sq_oprev != NULL)); 6499 #ifdef DEBUG 6500 count = sq->sq_count; 6501 /* 6502 * This is OK without the putlocks, because we have one 6503 * claim either from the sq_count, or a putcount. We could 6504 * get an erroneous value from other counts, but ours won't 6505 * change, so one way or another, we will have at least a 6506 * value of one. 6507 */ 6508 SUM_SQ_PUTCOUNTS(sq, count); 6509 ASSERT(count >= 1); 6510 #endif /* DEBUG */ 6511 6512 /* 6513 * The first thing to do here, is find out if a thread is already 6514 * draining this queue or the queue is closing. If so, we are done, 6515 * just return. Also, if there are no messages, we are done as well. 6516 * Note that we check the q_sqhead since there is s window of 6517 * opportunity for us to enter here because Q_SQQUEUED was set, but is 6518 * not anymore. 6519 */ 6520 if (q->q_draining || (q->q_sqhead == NULL)) { 6521 mutex_exit(QLOCK(q)); 6522 return; 6523 } 6524 6525 /* 6526 * If the perimiter is exclusive, there is nothing we can 6527 * do right now, go away. 6528 * Note that there is nothing to prevent this case from changing 6529 * right after this check, but the spin-out will catch it. 6530 */ 6531 6532 /* Tell other threads that we are draining this queue */ 6533 q->q_draining = 1; /* Protected by QLOCK */ 6534 6535 for (bp = q->q_sqhead; bp != NULL; bp = q->q_sqhead) { 6536 6537 /* 6538 * Because we can enter this routine just because 6539 * a putnext is blocked, we need to spin out if 6540 * the perimiter wants to go exclusive as well 6541 * as just blocked. We need to spin out also if 6542 * events are queued on the syncq. 6543 * Don't check for SQ_EXCL, because non-CIPUT 6544 * perimiters would set it, and it can't become 6545 * exclusive while we hold a claim. 6546 */ 6547 if (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS)) { 6548 break; 6549 } 6550 6551 #ifdef DEBUG 6552 /* 6553 * Since we are in qdrain_syncq, we already know the queue, 6554 * but for sanity, we want to check this against the qp that 6555 * was passed in by bp->b_queue. 6556 */ 6557 6558 ASSERT(bp->b_queue == q); 6559 ASSERT(bp->b_queue->q_syncq == sq); 6560 bp->b_queue = NULL; 6561 6562 /* 6563 * We would have the following check in the DEBUG code: 6564 * 6565 * if (bp->b_prev != NULL) { 6566 * ASSERT(bp->b_prev == (void (*)())q->q_qinfo->qi_putp); 6567 * } 6568 * 6569 * This can't be done, however, since IP modifies qinfo 6570 * structure at run-time (switching between IPv4 qinfo and IPv6 6571 * qinfo), invalidating the check. 6572 * So the assignment to func is left here, but the ASSERT itself 6573 * is removed until the whole issue is resolved. 6574 */ 6575 #endif 6576 ASSERT(q->q_sqhead == bp); 6577 q->q_sqhead = bp->b_next; 6578 bp->b_prev = bp->b_next = NULL; 6579 ASSERT(q->q_syncqmsgs > 0); 6580 mutex_exit(QLOCK(q)); 6581 6582 ASSERT(bp->b_datap->db_ref != 0); 6583 6584 (void) (*q->q_qinfo->qi_putp)(q, bp); 6585 6586 mutex_enter(QLOCK(q)); 6587 /* 6588 * We should decrement q_syncqmsgs only after executing the 6589 * put procedure to avoid a possible race with putnext(). 6590 * In putnext() though it sees Q_SQQUEUED is set, there is 6591 * an optimization which allows putnext to call the put 6592 * procedure directly if (q_syncqmsgs == 0) and thus 6593 * a message reodering could otherwise occur. 6594 */ 6595 q->q_syncqmsgs--; 6596 6597 /* 6598 * Clear QFULL in the next service procedure queue if 6599 * this is the last message destined to that queue. 6600 * 6601 * It would make better sense to have some sort of 6602 * tunable for the low water mark, but these symantics 6603 * are not yet defined. So, alas, we use a constant. 6604 */ 6605 do_clr = (q->q_syncqmsgs == 0); 6606 mutex_exit(QLOCK(q)); 6607 6608 if (do_clr) 6609 clr_qfull(q); 6610 6611 mutex_enter(QLOCK(q)); 6612 /* 6613 * Always clear SQ_EXCL when CIPUT in order to handle 6614 * qwriter(INNER). 6615 */ 6616 /* 6617 * The putp() can call qwriter and get exclusive access 6618 * IFF this is the only claim. So, we need to test for 6619 * this possibility so we can aquire the mutex and clear 6620 * the bit. 6621 */ 6622 if ((sq->sq_type & SQ_CIPUT) && (sq->sq_flags & SQ_EXCL)) { 6623 mutex_enter(SQLOCK(sq)); 6624 sq->sq_flags &= ~SQ_EXCL; 6625 mutex_exit(SQLOCK(sq)); 6626 } 6627 } 6628 6629 /* 6630 * We should either have no queues on the syncq, or we were 6631 * told to goaway by a waiter (which we will wake up at the 6632 * end of this function). 6633 */ 6634 ASSERT((q->q_sqhead == NULL) || 6635 (sq->sq_flags & (SQ_STAYAWAY | SQ_EVENTS))); 6636 6637 ASSERT(MUTEX_HELD(QLOCK(q))); 6638 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6639 6640 /* 6641 * Remove the q from the syncq list if all the messages are 6642 * drained. 6643 */ 6644 if (q->q_sqhead == NULL) { 6645 mutex_enter(SQLOCK(sq)); 6646 if (q->q_sqflags & Q_SQQUEUED) 6647 SQRM_Q(sq, q); 6648 mutex_exit(SQLOCK(sq)); 6649 /* 6650 * Since the queue is removed from the list, reset its priority. 6651 */ 6652 q->q_spri = 0; 6653 } 6654 6655 /* 6656 * Remember, the q_draining flag is used to let another 6657 * thread know that there is a thread currently draining 6658 * the messages for a queue. Since we are now done with 6659 * this queue (even if there may be messages still there), 6660 * we need to clear this flag so some thread will work 6661 * on it if needed. 6662 */ 6663 ASSERT(q->q_draining); 6664 q->q_draining = 0; 6665 6666 /* called with a claim, so OK to drop all locks. */ 6667 mutex_exit(QLOCK(q)); 6668 6669 TRACE_1(TR_FAC_STREAMS_FR, TR_DRAIN_SYNCQ_END, 6670 "drain_syncq end:%p", sq); 6671 } 6672 /* END OF QDRAIN_SYNCQ */ 6673 6674 6675 /* 6676 * This is the mate to qdrain_syncq, except that it is putting the 6677 * message onto the the queue instead draining. Since the 6678 * message is destined for the queue that is selected, there is 6679 * no need to identify the function because the message is 6680 * intended for the put routine for the queue. But this 6681 * routine will do it anyway just in case (but only for debug kernels). 6682 * 6683 * After the message is enqueued on the syncq, it calls putnext_tail() 6684 * which will schedule a background thread to actually process the message. 6685 * 6686 * Assumes that there is a claim on the syncq (sq->sq_count > 0) and 6687 * SQLOCK(sq) and QLOCK(q) are not held. 6688 */ 6689 void 6690 qfill_syncq(syncq_t *sq, queue_t *q, mblk_t *mp) 6691 { 6692 queue_t *fq = NULL; 6693 6694 ASSERT(MUTEX_NOT_HELD(SQLOCK(sq))); 6695 ASSERT(MUTEX_NOT_HELD(QLOCK(q))); 6696 ASSERT(sq->sq_count > 0); 6697 ASSERT(q->q_syncq == sq); 6698 ASSERT((sq->sq_outer == NULL && sq->sq_onext == NULL && 6699 sq->sq_oprev == NULL) || 6700 (sq->sq_outer != NULL && sq->sq_onext != NULL && 6701 sq->sq_oprev != NULL)); 6702 6703 mutex_enter(QLOCK(q)); 6704 6705 /* 6706 * Set QFULL in next service procedure queue (that cares) if not 6707 * already set and if there are already more messages on the syncq 6708 * than sq_max_size. If sq_max_size is 0, no flow control will be 6709 * asserted on any syncq. 6710 * 6711 * The fq here is the next queue with a service procedure. 6712 * This is where we would fail canputnext, so this is where we 6713 * need to set QFULL. 6714 * 6715 * LOCKING HIERARCHY: In the case when fq != q we need to 6716 * a) Take QLOCK(fq) to set QFULL flag and 6717 * b) Take sd_reflock in the case of the hot stream to update 6718 * sd_refcnt. 6719 * We already have QLOCK at this point. To avoid cross-locks with 6720 * freezestr() which grabs all QLOCKs and with strlock() which grabs 6721 * both SQLOCK and sd_reflock, we need to drop respective locks first. 6722 */ 6723 if ((sq_max_size != 0) && (!(q->q_nfsrv->q_flag & QFULL)) && 6724 (q->q_syncqmsgs > sq_max_size)) { 6725 if ((fq = q->q_nfsrv) == q) { 6726 fq->q_flag |= QFULL; 6727 } else { 6728 mutex_exit(QLOCK(q)); 6729 mutex_enter(QLOCK(fq)); 6730 fq->q_flag |= QFULL; 6731 mutex_exit(QLOCK(fq)); 6732 mutex_enter(QLOCK(q)); 6733 } 6734 } 6735 6736 #ifdef DEBUG 6737 /* 6738 * This is used for debug in the qfill_syncq/qdrain_syncq case 6739 * to trace the queue that the message is intended for. Note 6740 * that the original use was to identify the queue and function 6741 * to call on the drain. In the new syncq, we have the context 6742 * of the queue that we are draining, so call it's putproc and 6743 * don't rely on the saved values. But for debug this is still 6744 * usefull information. 6745 */ 6746 mp->b_prev = (mblk_t *)q->q_qinfo->qi_putp; 6747 mp->b_queue = q; 6748 mp->b_next = NULL; 6749 #endif 6750 ASSERT(q->q_syncq == sq); 6751 /* 6752 * Enqueue the message on the list. 6753 * SQPUT_MP() accesses q_syncqmsgs. We are already holding QLOCK to 6754 * protect it. So its ok to acquire SQLOCK after SQPUT_MP(). 6755 */ 6756 SQPUT_MP(q, mp); 6757 mutex_enter(SQLOCK(sq)); 6758 6759 /* 6760 * And queue on syncq for scheduling, if not already queued. 6761 * Note that we need the SQLOCK for this, and for testing flags 6762 * at the end to see if we will drain. So grab it now, and 6763 * release it before we call qdrain_syncq or return. 6764 */ 6765 if (!(q->q_sqflags & Q_SQQUEUED)) { 6766 q->q_spri = curthread->t_pri; 6767 SQPUT_Q(sq, q); 6768 } 6769 #ifdef DEBUG 6770 else { 6771 /* 6772 * All of these conditions MUST be true! 6773 */ 6774 ASSERT(sq->sq_tail != NULL); 6775 if (sq->sq_tail == sq->sq_head) { 6776 ASSERT((q->q_sqprev == NULL) && 6777 (q->q_sqnext == NULL)); 6778 } else { 6779 ASSERT((q->q_sqprev != NULL) || 6780 (q->q_sqnext != NULL)); 6781 } 6782 ASSERT(sq->sq_flags & SQ_QUEUED); 6783 ASSERT(q->q_syncqmsgs != 0); 6784 ASSERT(q->q_sqflags & Q_SQQUEUED); 6785 } 6786 #endif 6787 mutex_exit(QLOCK(q)); 6788 /* 6789 * SQLOCK is still held, so sq_count can be safely decremented. 6790 */ 6791 sq->sq_count--; 6792 6793 putnext_tail(sq, q, 0); 6794 /* Should not reference sq or q after this point. */ 6795 } 6796 6797 /* End of qfill_syncq */ 6798 6799 /* 6800 * Remove all messages from a syncq (if qp is NULL) or remove all messages 6801 * that would be put into qp by drain_syncq. 6802 * Used when deleting the syncq (qp == NULL) or when detaching 6803 * a queue (qp != NULL). 6804 * Return non-zero if one or more messages were freed. 6805 * 6806 * no need to grab sq_putlocks here. See comment in strsubr.h that explains when 6807 * sq_putlocks are used. 6808 * 6809 * NOTE: This function assumes that it is called from the close() context and 6810 * that all the queues in the syncq are going aay. For this reason it doesn't 6811 * acquire QLOCK for modifying q_sqhead/q_sqtail fields. This assumption is 6812 * currently valid, but it is useful to rethink this function to behave properly 6813 * in other cases. 6814 */ 6815 int 6816 flush_syncq(syncq_t *sq, queue_t *qp) 6817 { 6818 mblk_t *bp, *mp_head, *mp_next, *mp_prev; 6819 queue_t *q; 6820 int ret = 0; 6821 6822 mutex_enter(SQLOCK(sq)); 6823 6824 /* 6825 * Before we leave, we need to make sure there are no 6826 * events listed for this queue. All events for this queue 6827 * will just be freed. 6828 */ 6829 if (qp != NULL && sq->sq_evhead != NULL) { 6830 ASSERT(sq->sq_flags & SQ_EVENTS); 6831 6832 mp_prev = NULL; 6833 for (bp = sq->sq_evhead; bp != NULL; bp = mp_next) { 6834 mp_next = bp->b_next; 6835 if (bp->b_queue == qp) { 6836 /* Delete this message */ 6837 if (mp_prev != NULL) { 6838 mp_prev->b_next = mp_next; 6839 /* 6840 * Update sq_evtail if the last element 6841 * is removed. 6842 */ 6843 if (bp == sq->sq_evtail) { 6844 ASSERT(mp_next == NULL); 6845 sq->sq_evtail = mp_prev; 6846 } 6847 } else 6848 sq->sq_evhead = mp_next; 6849 if (sq->sq_evhead == NULL) 6850 sq->sq_flags &= ~SQ_EVENTS; 6851 bp->b_prev = bp->b_next = NULL; 6852 freemsg(bp); 6853 ret++; 6854 } else { 6855 mp_prev = bp; 6856 } 6857 } 6858 } 6859 6860 /* 6861 * Walk sq_head and: 6862 * - match qp if qp is set, remove it's messages 6863 * - all if qp is not set 6864 */ 6865 q = sq->sq_head; 6866 while (q != NULL) { 6867 ASSERT(q->q_syncq == sq); 6868 if ((qp == NULL) || (qp == q)) { 6869 /* 6870 * Yank the messages as a list off the queue 6871 */ 6872 mp_head = q->q_sqhead; 6873 /* 6874 * We do not have QLOCK(q) here (which is safe due to 6875 * assumptions mentioned above). To obtain the lock we 6876 * need to release SQLOCK which may allow lots of things 6877 * to change upon us. This place requires more analysis. 6878 */ 6879 q->q_sqhead = q->q_sqtail = NULL; 6880 ASSERT(mp_head->b_queue && 6881 mp_head->b_queue->q_syncq == sq); 6882 6883 /* 6884 * Free each of the messages. 6885 */ 6886 for (bp = mp_head; bp != NULL; bp = mp_next) { 6887 mp_next = bp->b_next; 6888 bp->b_prev = bp->b_next = NULL; 6889 freemsg(bp); 6890 ret++; 6891 } 6892 /* 6893 * Now remove the queue from the syncq. 6894 */ 6895 ASSERT(q->q_sqflags & Q_SQQUEUED); 6896 SQRM_Q(sq, q); 6897 q->q_spri = 0; 6898 q->q_syncqmsgs = 0; 6899 6900 /* 6901 * If qp was specified, we are done with it and are 6902 * going to drop SQLOCK(sq) and return. We wakeup syncq 6903 * waiters while we still have the SQLOCK. 6904 */ 6905 if ((qp != NULL) && (sq->sq_flags & SQ_WANTWAKEUP)) { 6906 sq->sq_flags &= ~SQ_WANTWAKEUP; 6907 cv_broadcast(&sq->sq_wait); 6908 } 6909 /* Drop SQLOCK across clr_qfull */ 6910 mutex_exit(SQLOCK(sq)); 6911 6912 /* 6913 * We avoid doing the test that drain_syncq does and 6914 * unconditionally clear qfull for every flushed 6915 * message. Since flush_syncq is only called during 6916 * close this should not be a problem. 6917 */ 6918 clr_qfull(q); 6919 if (qp != NULL) { 6920 return (ret); 6921 } else { 6922 mutex_enter(SQLOCK(sq)); 6923 /* 6924 * The head was removed by SQRM_Q above. 6925 * reread the new head and flush it. 6926 */ 6927 q = sq->sq_head; 6928 } 6929 } else { 6930 q = q->q_sqnext; 6931 } 6932 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6933 } 6934 6935 if (sq->sq_flags & SQ_WANTWAKEUP) { 6936 sq->sq_flags &= ~SQ_WANTWAKEUP; 6937 cv_broadcast(&sq->sq_wait); 6938 } 6939 6940 mutex_exit(SQLOCK(sq)); 6941 return (ret); 6942 } 6943 6944 /* 6945 * Propagate all messages from a syncq to the next syncq that are associated 6946 * with the specified queue. If the queue is attached to a driver or if the 6947 * messages have been added due to a qwriter(PERIM_INNER), free the messages. 6948 * 6949 * Assumes that the stream is strlock()'ed. We don't come here if there 6950 * are no messages to propagate. 6951 * 6952 * NOTE : If the queue is attached to a driver, all the messages are freed 6953 * as there is no point in propagating the messages from the driver syncq 6954 * to the closing stream head which will in turn get freed later. 6955 */ 6956 static int 6957 propagate_syncq(queue_t *qp) 6958 { 6959 mblk_t *bp, *head, *tail, *prev, *next; 6960 syncq_t *sq; 6961 queue_t *nqp; 6962 syncq_t *nsq; 6963 boolean_t isdriver; 6964 int moved = 0; 6965 uint16_t flags; 6966 pri_t priority = curthread->t_pri; 6967 #ifdef DEBUG 6968 void (*func)(); 6969 #endif 6970 6971 sq = qp->q_syncq; 6972 ASSERT(MUTEX_HELD(SQLOCK(sq))); 6973 /* debug macro */ 6974 SQ_PUTLOCKS_HELD(sq); 6975 /* 6976 * As entersq() does not increment the sq_count for 6977 * the write side, check sq_count for non-QPERQ 6978 * perimeters alone. 6979 */ 6980 ASSERT((qp->q_flag & QPERQ) || (sq->sq_count >= 1)); 6981 6982 /* 6983 * propagate_syncq() can be called because of either messages on the 6984 * queue syncq or because on events on the queue syncq. Do actual 6985 * message propagations if there are any messages. 6986 */ 6987 if (qp->q_syncqmsgs) { 6988 isdriver = (qp->q_flag & QISDRV); 6989 6990 if (!isdriver) { 6991 nqp = qp->q_next; 6992 nsq = nqp->q_syncq; 6993 ASSERT(MUTEX_HELD(SQLOCK(nsq))); 6994 /* debug macro */ 6995 SQ_PUTLOCKS_HELD(nsq); 6996 #ifdef DEBUG 6997 func = (void (*)())nqp->q_qinfo->qi_putp; 6998 #endif 6999 } 7000 7001 SQRM_Q(sq, qp); 7002 priority = MAX(qp->q_spri, priority); 7003 qp->q_spri = 0; 7004 head = qp->q_sqhead; 7005 tail = qp->q_sqtail; 7006 qp->q_sqhead = qp->q_sqtail = NULL; 7007 qp->q_syncqmsgs = 0; 7008 7009 /* 7010 * Walk the list of messages, and free them if this is a driver, 7011 * otherwise reset the b_prev and b_queue value to the new putp. 7012 * Afterward, we will just add the head to the end of the next 7013 * syncq, and point the tail to the end of this one. 7014 */ 7015 7016 for (bp = head; bp != NULL; bp = next) { 7017 next = bp->b_next; 7018 if (isdriver) { 7019 bp->b_prev = bp->b_next = NULL; 7020 freemsg(bp); 7021 continue; 7022 } 7023 /* Change the q values for this message */ 7024 bp->b_queue = nqp; 7025 #ifdef DEBUG 7026 bp->b_prev = (mblk_t *)func; 7027 #endif 7028 moved++; 7029 } 7030 /* 7031 * Attach list of messages to the end of the new queue (if there 7032 * is a list of messages). 7033 */ 7034 7035 if (!isdriver && head != NULL) { 7036 ASSERT(tail != NULL); 7037 if (nqp->q_sqhead == NULL) { 7038 nqp->q_sqhead = head; 7039 } else { 7040 ASSERT(nqp->q_sqtail != NULL); 7041 nqp->q_sqtail->b_next = head; 7042 } 7043 nqp->q_sqtail = tail; 7044 /* 7045 * When messages are moved from high priority queue to 7046 * another queue, the destination queue priority is 7047 * upgraded. 7048 */ 7049 7050 if (priority > nqp->q_spri) 7051 nqp->q_spri = priority; 7052 7053 SQPUT_Q(nsq, nqp); 7054 7055 nqp->q_syncqmsgs += moved; 7056 ASSERT(nqp->q_syncqmsgs != 0); 7057 } 7058 } 7059 7060 /* 7061 * Before we leave, we need to make sure there are no 7062 * events listed for this queue. All events for this queue 7063 * will just be freed. 7064 */ 7065 if (sq->sq_evhead != NULL) { 7066 ASSERT(sq->sq_flags & SQ_EVENTS); 7067 prev = NULL; 7068 for (bp = sq->sq_evhead; bp != NULL; bp = next) { 7069 next = bp->b_next; 7070 if (bp->b_queue == qp) { 7071 /* Delete this message */ 7072 if (prev != NULL) { 7073 prev->b_next = next; 7074 /* 7075 * Update sq_evtail if the last element 7076 * is removed. 7077 */ 7078 if (bp == sq->sq_evtail) { 7079 ASSERT(next == NULL); 7080 sq->sq_evtail = prev; 7081 } 7082 } else 7083 sq->sq_evhead = next; 7084 if (sq->sq_evhead == NULL) 7085 sq->sq_flags &= ~SQ_EVENTS; 7086 bp->b_prev = bp->b_next = NULL; 7087 freemsg(bp); 7088 } else { 7089 prev = bp; 7090 } 7091 } 7092 } 7093 7094 flags = sq->sq_flags; 7095 7096 /* Wake up any waiter before leaving. */ 7097 if (flags & SQ_WANTWAKEUP) { 7098 flags &= ~SQ_WANTWAKEUP; 7099 cv_broadcast(&sq->sq_wait); 7100 } 7101 sq->sq_flags = flags; 7102 7103 return (moved); 7104 } 7105 7106 /* 7107 * Try and upgrade to exclusive access at the inner perimeter. If this can 7108 * not be done without blocking then request will be queued on the syncq 7109 * and drain_syncq will run it later. 7110 * 7111 * This routine can only be called from put or service procedures plus 7112 * asynchronous callback routines that have properly entered to 7113 * queue (with entersq.) Thus qwriter_inner assumes the caller has one claim 7114 * on the syncq associated with q. 7115 */ 7116 void 7117 qwriter_inner(queue_t *q, mblk_t *mp, void (*func)()) 7118 { 7119 syncq_t *sq = q->q_syncq; 7120 uint16_t count; 7121 7122 mutex_enter(SQLOCK(sq)); 7123 count = sq->sq_count; 7124 SQ_PUTLOCKS_ENTER(sq); 7125 SUM_SQ_PUTCOUNTS(sq, count); 7126 ASSERT(count >= 1); 7127 ASSERT(sq->sq_type & (SQ_CIPUT|SQ_CISVC)); 7128 7129 if (count == 1) { 7130 /* 7131 * Can upgrade. This case also handles nested qwriter calls 7132 * (when the qwriter callback function calls qwriter). In that 7133 * case SQ_EXCL is already set. 7134 */ 7135 sq->sq_flags |= SQ_EXCL; 7136 SQ_PUTLOCKS_EXIT(sq); 7137 mutex_exit(SQLOCK(sq)); 7138 (*func)(q, mp); 7139 /* 7140 * Assumes that leavesq, putnext, and drain_syncq will reset 7141 * SQ_EXCL for SQ_CIPUT/SQ_CISVC queues. We leave SQ_EXCL on 7142 * until putnext, leavesq, or drain_syncq drops it. 7143 * That way we handle nested qwriter(INNER) without dropping 7144 * SQ_EXCL until the outermost qwriter callback routine is 7145 * done. 7146 */ 7147 return; 7148 } 7149 SQ_PUTLOCKS_EXIT(sq); 7150 sqfill_events(sq, q, mp, func); 7151 } 7152 7153 /* 7154 * Synchronous callback support functions 7155 */ 7156 7157 /* 7158 * Allocate a callback parameter structure. 7159 * Assumes that caller initializes the flags and the id. 7160 * Acquires SQLOCK(sq) if non-NULL is returned. 7161 */ 7162 callbparams_t * 7163 callbparams_alloc(syncq_t *sq, void (*func)(void *), void *arg, int kmflags) 7164 { 7165 callbparams_t *cbp; 7166 size_t size = sizeof (callbparams_t); 7167 7168 cbp = kmem_alloc(size, kmflags & ~KM_PANIC); 7169 7170 /* 7171 * Only try tryhard allocation if the caller is ready to panic. 7172 * Otherwise just fail. 7173 */ 7174 if (cbp == NULL) { 7175 if (kmflags & KM_PANIC) 7176 cbp = kmem_alloc_tryhard(sizeof (callbparams_t), 7177 &size, kmflags); 7178 else 7179 return (NULL); 7180 } 7181 7182 ASSERT(size >= sizeof (callbparams_t)); 7183 cbp->cbp_size = size; 7184 cbp->cbp_sq = sq; 7185 cbp->cbp_func = func; 7186 cbp->cbp_arg = arg; 7187 mutex_enter(SQLOCK(sq)); 7188 cbp->cbp_next = sq->sq_callbpend; 7189 sq->sq_callbpend = cbp; 7190 return (cbp); 7191 } 7192 7193 void 7194 callbparams_free(syncq_t *sq, callbparams_t *cbp) 7195 { 7196 callbparams_t **pp, *p; 7197 7198 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7199 7200 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) { 7201 if (p == cbp) { 7202 *pp = p->cbp_next; 7203 kmem_free(p, p->cbp_size); 7204 return; 7205 } 7206 } 7207 (void) (STRLOG(0, 0, 0, SL_CONSOLE, 7208 "callbparams_free: not found\n")); 7209 } 7210 7211 void 7212 callbparams_free_id(syncq_t *sq, callbparams_id_t id, int32_t flag) 7213 { 7214 callbparams_t **pp, *p; 7215 7216 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7217 7218 for (pp = &sq->sq_callbpend; (p = *pp) != NULL; pp = &p->cbp_next) { 7219 if (p->cbp_id == id && p->cbp_flags == flag) { 7220 *pp = p->cbp_next; 7221 kmem_free(p, p->cbp_size); 7222 return; 7223 } 7224 } 7225 (void) (STRLOG(0, 0, 0, SL_CONSOLE, 7226 "callbparams_free_id: not found\n")); 7227 } 7228 7229 /* 7230 * Callback wrapper function used by once-only callbacks that can be 7231 * cancelled (qtimeout and qbufcall) 7232 * Contains inline version of entersq(sq, SQ_CALLBACK) that can be 7233 * cancelled by the qun* functions. 7234 */ 7235 void 7236 qcallbwrapper(void *arg) 7237 { 7238 callbparams_t *cbp = arg; 7239 syncq_t *sq; 7240 uint16_t count = 0; 7241 uint16_t waitflags = SQ_STAYAWAY | SQ_EVENTS | SQ_EXCL; 7242 uint16_t type; 7243 7244 sq = cbp->cbp_sq; 7245 mutex_enter(SQLOCK(sq)); 7246 type = sq->sq_type; 7247 if (!(type & SQ_CICB)) { 7248 count = sq->sq_count; 7249 SQ_PUTLOCKS_ENTER(sq); 7250 SQ_PUTCOUNT_CLRFAST_LOCKED(sq); 7251 SUM_SQ_PUTCOUNTS(sq, count); 7252 sq->sq_needexcl++; 7253 ASSERT(sq->sq_needexcl != 0); /* wraparound */ 7254 waitflags |= SQ_MESSAGES; 7255 } 7256 /* Can not handle exlusive entry at outer perimeter */ 7257 ASSERT(type & SQ_COCB); 7258 7259 while ((sq->sq_flags & waitflags) || (!(type & SQ_CICB) &&count != 0)) { 7260 if ((sq->sq_callbflags & cbp->cbp_flags) && 7261 (sq->sq_cancelid == cbp->cbp_id)) { 7262 /* timeout has been cancelled */ 7263 sq->sq_callbflags |= SQ_CALLB_BYPASSED; 7264 callbparams_free(sq, cbp); 7265 if (!(type & SQ_CICB)) { 7266 ASSERT(sq->sq_needexcl > 0); 7267 sq->sq_needexcl--; 7268 if (sq->sq_needexcl == 0) { 7269 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 7270 } 7271 SQ_PUTLOCKS_EXIT(sq); 7272 } 7273 mutex_exit(SQLOCK(sq)); 7274 return; 7275 } 7276 sq->sq_flags |= SQ_WANTWAKEUP; 7277 if (!(type & SQ_CICB)) { 7278 SQ_PUTLOCKS_EXIT(sq); 7279 } 7280 cv_wait(&sq->sq_wait, SQLOCK(sq)); 7281 if (!(type & SQ_CICB)) { 7282 count = sq->sq_count; 7283 SQ_PUTLOCKS_ENTER(sq); 7284 SUM_SQ_PUTCOUNTS(sq, count); 7285 } 7286 } 7287 7288 sq->sq_count++; 7289 ASSERT(sq->sq_count != 0); /* Wraparound */ 7290 if (!(type & SQ_CICB)) { 7291 ASSERT(count == 0); 7292 sq->sq_flags |= SQ_EXCL; 7293 ASSERT(sq->sq_needexcl > 0); 7294 sq->sq_needexcl--; 7295 if (sq->sq_needexcl == 0) { 7296 SQ_PUTCOUNT_SETFAST_LOCKED(sq); 7297 } 7298 SQ_PUTLOCKS_EXIT(sq); 7299 } 7300 7301 mutex_exit(SQLOCK(sq)); 7302 7303 cbp->cbp_func(cbp->cbp_arg); 7304 7305 /* 7306 * We drop the lock only for leavesq to re-acquire it. 7307 * Possible optimization is inline of leavesq. 7308 */ 7309 mutex_enter(SQLOCK(sq)); 7310 callbparams_free(sq, cbp); 7311 mutex_exit(SQLOCK(sq)); 7312 leavesq(sq, SQ_CALLBACK); 7313 } 7314 7315 /* 7316 * no need to grab sq_putlocks here. See comment in strsubr.h that 7317 * explains when sq_putlocks are used. 7318 * 7319 * sq_count (or one of the sq_putcounts) has already been 7320 * decremented by the caller, and if SQ_QUEUED, we need to call 7321 * drain_syncq (the global syncq drain). 7322 * If putnext_tail is called with the SQ_EXCL bit set, we are in 7323 * one of two states, non-CIPUT perimiter, and we need to clear 7324 * it, or we went exclusive in the put procedure. In any case, 7325 * we want to clear the bit now, and it is probably easier to do 7326 * this at the beginning of this function (remember, we hold 7327 * the SQLOCK). Lastly, if there are other messages queued 7328 * on the syncq (and not for our destination), enable the syncq 7329 * for background work. 7330 */ 7331 7332 /* ARGSUSED */ 7333 void 7334 putnext_tail(syncq_t *sq, queue_t *qp, uint32_t passflags) 7335 { 7336 uint16_t flags = sq->sq_flags; 7337 7338 ASSERT(MUTEX_HELD(SQLOCK(sq))); 7339 ASSERT(MUTEX_NOT_HELD(QLOCK(qp))); 7340 7341 /* Clear SQ_EXCL if set in passflags */ 7342 if (passflags & SQ_EXCL) { 7343 flags &= ~SQ_EXCL; 7344 } 7345 if (flags & SQ_WANTWAKEUP) { 7346 flags &= ~SQ_WANTWAKEUP; 7347 cv_broadcast(&sq->sq_wait); 7348 } 7349 if (flags & SQ_WANTEXWAKEUP) { 7350 flags &= ~SQ_WANTEXWAKEUP; 7351 cv_broadcast(&sq->sq_exitwait); 7352 } 7353 sq->sq_flags = flags; 7354 7355 /* 7356 * We have cleared SQ_EXCL if we were asked to, and started 7357 * the wakeup process for waiters. If there are no writers 7358 * then we need to drain the syncq if we were told to, or 7359 * enable the background thread to do it. 7360 */ 7361 if (!(flags & (SQ_STAYAWAY|SQ_EXCL))) { 7362 if ((passflags & SQ_QUEUED) || 7363 (sq->sq_svcflags & SQ_DISABLED)) { 7364 /* drain_syncq will take care of events in the list */ 7365 drain_syncq(sq); 7366 return; 7367 } else if (flags & SQ_QUEUED) { 7368 sqenable(sq); 7369 } 7370 } 7371 /* Drop the SQLOCK on exit */ 7372 mutex_exit(SQLOCK(sq)); 7373 TRACE_3(TR_FAC_STREAMS_FR, TR_PUTNEXT_END, 7374 "putnext_end:(%p, %p, %p) done", NULL, qp, sq); 7375 } 7376 7377 void 7378 set_qend(queue_t *q) 7379 { 7380 mutex_enter(QLOCK(q)); 7381 if (!O_SAMESTR(q)) 7382 q->q_flag |= QEND; 7383 else 7384 q->q_flag &= ~QEND; 7385 mutex_exit(QLOCK(q)); 7386 q = _OTHERQ(q); 7387 mutex_enter(QLOCK(q)); 7388 if (!O_SAMESTR(q)) 7389 q->q_flag |= QEND; 7390 else 7391 q->q_flag &= ~QEND; 7392 mutex_exit(QLOCK(q)); 7393 } 7394 7395 7396 void 7397 clr_qfull(queue_t *q) 7398 { 7399 queue_t *oq = q; 7400 7401 q = q->q_nfsrv; 7402 /* Fast check if there is any work to do before getting the lock. */ 7403 if ((q->q_flag & (QFULL|QWANTW)) == 0) { 7404 return; 7405 } 7406 7407 /* 7408 * Do not reset QFULL (and backenable) if the q_count is the reason 7409 * for QFULL being set. 7410 */ 7411 mutex_enter(QLOCK(q)); 7412 /* 7413 * If both q_count and q_mblkcnt are less than the hiwat mark 7414 */ 7415 if ((q->q_count < q->q_hiwat) && (q->q_mblkcnt < q->q_hiwat)) { 7416 q->q_flag &= ~QFULL; 7417 /* 7418 * A little more confusing, how about this way: 7419 * if someone wants to write, 7420 * AND 7421 * both counts are less than the lowat mark 7422 * OR 7423 * the lowat mark is zero 7424 * THEN 7425 * backenable 7426 */ 7427 if ((q->q_flag & QWANTW) && 7428 (((q->q_count < q->q_lowat) && 7429 (q->q_mblkcnt < q->q_lowat)) || q->q_lowat == 0)) { 7430 q->q_flag &= ~QWANTW; 7431 mutex_exit(QLOCK(q)); 7432 backenable(oq, 0); 7433 } else 7434 mutex_exit(QLOCK(q)); 7435 } else 7436 mutex_exit(QLOCK(q)); 7437 } 7438 7439 /* 7440 * Set the forward service procedure pointer. 7441 * 7442 * Called at insert-time to cache a queue's next forward service procedure in 7443 * q_nfsrv; used by canput() and canputnext(). If the queue to be inserted 7444 * has a service procedure then q_nfsrv points to itself. If the queue to be 7445 * inserted does not have a service procedure, then q_nfsrv points to the next 7446 * queue forward that has a service procedure. If the queue is at the logical 7447 * end of the stream (driver for write side, stream head for the read side) 7448 * and does not have a service procedure, then q_nfsrv also points to itself. 7449 */ 7450 void 7451 set_nfsrv_ptr( 7452 queue_t *rnew, /* read queue pointer to new module */ 7453 queue_t *wnew, /* write queue pointer to new module */ 7454 queue_t *prev_rq, /* read queue pointer to the module above */ 7455 queue_t *prev_wq) /* write queue pointer to the module above */ 7456 { 7457 queue_t *qp; 7458 7459 if (prev_wq->q_next == NULL) { 7460 /* 7461 * Insert the driver, initialize the driver and stream head. 7462 * In this case, prev_rq/prev_wq should be the stream head. 7463 * _I_INSERT does not allow inserting a driver. Make sure 7464 * that it is not an insertion. 7465 */ 7466 ASSERT(!(rnew->q_flag & _QINSERTING)); 7467 wnew->q_nfsrv = wnew; 7468 if (rnew->q_qinfo->qi_srvp) 7469 rnew->q_nfsrv = rnew; 7470 else 7471 rnew->q_nfsrv = prev_rq; 7472 prev_rq->q_nfsrv = prev_rq; 7473 prev_wq->q_nfsrv = prev_wq; 7474 } else { 7475 /* 7476 * set up read side q_nfsrv pointer. This MUST be done 7477 * before setting the write side, because the setting of 7478 * the write side for a fifo may depend on it. 7479 * 7480 * Suppose we have a fifo that only has pipemod pushed. 7481 * pipemod has no read or write service procedures, so 7482 * nfsrv for both pipemod queues points to prev_rq (the 7483 * stream read head). Now push bufmod (which has only a 7484 * read service procedure). Doing the write side first, 7485 * wnew->q_nfsrv is set to pipemod's writeq nfsrv, which 7486 * is WRONG; the next queue forward from wnew with a 7487 * service procedure will be rnew, not the stream read head. 7488 * Since the downstream queue (which in the case of a fifo 7489 * is the read queue rnew) can affect upstream queues, it 7490 * needs to be done first. Setting up the read side first 7491 * sets nfsrv for both pipemod queues to rnew and then 7492 * when the write side is set up, wnew-q_nfsrv will also 7493 * point to rnew. 7494 */ 7495 if (rnew->q_qinfo->qi_srvp) { 7496 /* 7497 * use _OTHERQ() because, if this is a pipe, next 7498 * module may have been pushed from other end and 7499 * q_next could be a read queue. 7500 */ 7501 qp = _OTHERQ(prev_wq->q_next); 7502 while (qp && qp->q_nfsrv != qp) { 7503 qp->q_nfsrv = rnew; 7504 qp = backq(qp); 7505 } 7506 rnew->q_nfsrv = rnew; 7507 } else 7508 rnew->q_nfsrv = prev_rq->q_nfsrv; 7509 7510 /* set up write side q_nfsrv pointer */ 7511 if (wnew->q_qinfo->qi_srvp) { 7512 wnew->q_nfsrv = wnew; 7513 7514 /* 7515 * For insertion, need to update nfsrv of the modules 7516 * above which do not have a service routine. 7517 */ 7518 if (rnew->q_flag & _QINSERTING) { 7519 for (qp = prev_wq; 7520 qp != NULL && qp->q_nfsrv != qp; 7521 qp = backq(qp)) { 7522 qp->q_nfsrv = wnew->q_nfsrv; 7523 } 7524 } 7525 } else { 7526 if (prev_wq->q_next == prev_rq) 7527 /* 7528 * Since prev_wq/prev_rq are the middle of a 7529 * fifo, wnew/rnew will also be the middle of 7530 * a fifo and wnew's nfsrv is same as rnew's. 7531 */ 7532 wnew->q_nfsrv = rnew->q_nfsrv; 7533 else 7534 wnew->q_nfsrv = prev_wq->q_next->q_nfsrv; 7535 } 7536 } 7537 } 7538 7539 /* 7540 * Reset the forward service procedure pointer; called at remove-time. 7541 */ 7542 void 7543 reset_nfsrv_ptr(queue_t *rqp, queue_t *wqp) 7544 { 7545 queue_t *tmp_qp; 7546 7547 /* Reset the write side q_nfsrv pointer for _I_REMOVE */ 7548 if ((rqp->q_flag & _QREMOVING) && (wqp->q_qinfo->qi_srvp != NULL)) { 7549 for (tmp_qp = backq(wqp); 7550 tmp_qp != NULL && tmp_qp->q_nfsrv == wqp; 7551 tmp_qp = backq(tmp_qp)) { 7552 tmp_qp->q_nfsrv = wqp->q_nfsrv; 7553 } 7554 } 7555 7556 /* reset the read side q_nfsrv pointer */ 7557 if (rqp->q_qinfo->qi_srvp) { 7558 if (wqp->q_next) { /* non-driver case */ 7559 tmp_qp = _OTHERQ(wqp->q_next); 7560 while (tmp_qp && tmp_qp->q_nfsrv == rqp) { 7561 /* Note that rqp->q_next cannot be NULL */ 7562 ASSERT(rqp->q_next != NULL); 7563 tmp_qp->q_nfsrv = rqp->q_next->q_nfsrv; 7564 tmp_qp = backq(tmp_qp); 7565 } 7566 } 7567 } 7568 } 7569 7570 /* 7571 * This routine should be called after all stream geometry changes to update 7572 * the stream head cached struio() rd/wr queue pointers. Note must be called 7573 * with the streamlock()ed. 7574 * 7575 * Note: only enables Synchronous STREAMS for a side of a Stream which has 7576 * an explicit synchronous barrier module queue. That is, a queue that 7577 * has specified a struio() type. 7578 */ 7579 static void 7580 strsetuio(stdata_t *stp) 7581 { 7582 queue_t *wrq; 7583 7584 if (stp->sd_flag & STPLEX) { 7585 /* 7586 * Not stremahead, but a mux, so no Synchronous STREAMS. 7587 */ 7588 stp->sd_struiowrq = NULL; 7589 stp->sd_struiordq = NULL; 7590 return; 7591 } 7592 /* 7593 * Scan the write queue(s) while synchronous 7594 * until we find a qinfo uio type specified. 7595 */ 7596 wrq = stp->sd_wrq->q_next; 7597 while (wrq) { 7598 if (wrq->q_struiot == STRUIOT_NONE) { 7599 wrq = 0; 7600 break; 7601 } 7602 if (wrq->q_struiot != STRUIOT_DONTCARE) 7603 break; 7604 if (! _SAMESTR(wrq)) { 7605 wrq = 0; 7606 break; 7607 } 7608 wrq = wrq->q_next; 7609 } 7610 stp->sd_struiowrq = wrq; 7611 /* 7612 * Scan the read queue(s) while synchronous 7613 * until we find a qinfo uio type specified. 7614 */ 7615 wrq = stp->sd_wrq->q_next; 7616 while (wrq) { 7617 if (_RD(wrq)->q_struiot == STRUIOT_NONE) { 7618 wrq = 0; 7619 break; 7620 } 7621 if (_RD(wrq)->q_struiot != STRUIOT_DONTCARE) 7622 break; 7623 if (! _SAMESTR(wrq)) { 7624 wrq = 0; 7625 break; 7626 } 7627 wrq = wrq->q_next; 7628 } 7629 stp->sd_struiordq = wrq ? _RD(wrq) : 0; 7630 } 7631 7632 /* 7633 * pass_wput, unblocks the passthru queues, so that 7634 * messages can arrive at muxs lower read queue, before 7635 * I_LINK/I_UNLINK is acked/nacked. 7636 */ 7637 static void 7638 pass_wput(queue_t *q, mblk_t *mp) 7639 { 7640 syncq_t *sq; 7641 7642 sq = _RD(q)->q_syncq; 7643 if (sq->sq_flags & SQ_BLOCKED) 7644 unblocksq(sq, SQ_BLOCKED, 0); 7645 putnext(q, mp); 7646 } 7647 7648 /* 7649 * Set up queues for the link/unlink. 7650 * Create a new queue and block it and then insert it 7651 * below the stream head on the lower stream. 7652 * This prevents any messages from arriving during the setq 7653 * as well as while the mux is processing the LINK/I_UNLINK. 7654 * The blocked passq is unblocked once the LINK/I_UNLINK has 7655 * been acked or nacked or if a message is generated and sent 7656 * down muxs write put procedure. 7657 * see pass_wput(). 7658 * 7659 * After the new queue is inserted, all messages coming from below are 7660 * blocked. The call to strlock will ensure that all activity in the stream head 7661 * read queue syncq is stopped (sq_count drops to zero). 7662 */ 7663 static queue_t * 7664 link_addpassthru(stdata_t *stpdown) 7665 { 7666 queue_t *passq; 7667 sqlist_t sqlist; 7668 7669 passq = allocq(); 7670 STREAM(passq) = STREAM(_WR(passq)) = stpdown; 7671 /* setq might sleep in allocator - avoid holding locks. */ 7672 setq(passq, &passthru_rinit, &passthru_winit, NULL, QPERQ, 7673 SQ_CI|SQ_CO, B_FALSE); 7674 claimq(passq); 7675 blocksq(passq->q_syncq, SQ_BLOCKED, 1); 7676 insertq(STREAM(passq), passq); 7677 7678 /* 7679 * Use strlock() to wait for the stream head sq_count to drop to zero 7680 * since we are going to change q_ptr in the stream head. Note that 7681 * insertq() doesn't wait for any syncq counts to drop to zero. 7682 */ 7683 sqlist.sqlist_head = NULL; 7684 sqlist.sqlist_index = 0; 7685 sqlist.sqlist_size = sizeof (sqlist_t); 7686 sqlist_insert(&sqlist, _RD(stpdown->sd_wrq)->q_syncq); 7687 strlock(stpdown, &sqlist); 7688 strunlock(stpdown, &sqlist); 7689 7690 releaseq(passq); 7691 return (passq); 7692 } 7693 7694 /* 7695 * Let messages flow up into the mux by removing 7696 * the passq. 7697 */ 7698 static void 7699 link_rempassthru(queue_t *passq) 7700 { 7701 claimq(passq); 7702 removeq(passq); 7703 releaseq(passq); 7704 freeq(passq); 7705 } 7706 7707 /* 7708 * Wait for the condition variable pointed to by `cvp' to be signaled, 7709 * or for `tim' milliseconds to elapse, whichever comes first. If `tim' 7710 * is negative, then there is no time limit. If `nosigs' is non-zero, 7711 * then the wait will be non-interruptible. 7712 * 7713 * Returns >0 if signaled, 0 if interrupted, or -1 upon timeout. 7714 */ 7715 clock_t 7716 str_cv_wait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim, int nosigs) 7717 { 7718 clock_t ret, now, tick; 7719 7720 if (tim < 0) { 7721 if (nosigs) { 7722 cv_wait(cvp, mp); 7723 ret = 1; 7724 } else { 7725 ret = cv_wait_sig(cvp, mp); 7726 } 7727 } else if (tim > 0) { 7728 /* 7729 * convert milliseconds to clock ticks 7730 */ 7731 tick = MSEC_TO_TICK_ROUNDUP(tim); 7732 time_to_wait(&now, tick); 7733 if (nosigs) { 7734 ret = cv_timedwait(cvp, mp, now); 7735 } else { 7736 ret = cv_timedwait_sig(cvp, mp, now); 7737 } 7738 } else { 7739 ret = -1; 7740 } 7741 return (ret); 7742 } 7743 7744 /* 7745 * Wait until the stream head can determine if it is at the mark but 7746 * don't wait forever to prevent a race condition between the "mark" state 7747 * in the stream head and any mark state in the caller/user of this routine. 7748 * 7749 * This is used by sockets and for a socket it would be incorrect 7750 * to return a failure for SIOCATMARK when there is no data in the receive 7751 * queue and the marked urgent data is traveling up the stream. 7752 * 7753 * This routine waits until the mark is known by waiting for one of these 7754 * three events: 7755 * The stream head read queue becoming non-empty (including an EOF) 7756 * The STRATMARK flag being set. (Due to a MSGMARKNEXT message.) 7757 * The STRNOTATMARK flag being set (which indicates that the transport 7758 * has sent a MSGNOTMARKNEXT message to indicate that it is not at 7759 * the mark). 7760 * 7761 * The routine returns 1 if the stream is at the mark; 0 if it can 7762 * be determined that the stream is not at the mark. 7763 * If the wait times out and it can't determine 7764 * whether or not the stream might be at the mark the routine will return -1. 7765 * 7766 * Note: This routine should only be used when a mark is pending i.e., 7767 * in the socket case the SIGURG has been posted. 7768 * Note2: This can not wakeup just because synchronous streams indicate 7769 * that data is available since it is not possible to use the synchronous 7770 * streams interfaces to determine the b_flag value for the data queued below 7771 * the stream head. 7772 */ 7773 int 7774 strwaitmark(vnode_t *vp) 7775 { 7776 struct stdata *stp = vp->v_stream; 7777 queue_t *rq = _RD(stp->sd_wrq); 7778 int mark; 7779 7780 mutex_enter(&stp->sd_lock); 7781 while (rq->q_first == NULL && 7782 !(stp->sd_flag & (STRATMARK|STRNOTATMARK|STREOF))) { 7783 stp->sd_flag |= RSLEEP; 7784 7785 /* Wait for 100 milliseconds for any state change. */ 7786 if (str_cv_wait(&rq->q_wait, &stp->sd_lock, 100, 1) == -1) { 7787 mutex_exit(&stp->sd_lock); 7788 return (-1); 7789 } 7790 } 7791 if (stp->sd_flag & STRATMARK) 7792 mark = 1; 7793 else if (rq->q_first != NULL && (rq->q_first->b_flag & MSGMARK)) 7794 mark = 1; 7795 else 7796 mark = 0; 7797 7798 mutex_exit(&stp->sd_lock); 7799 return (mark); 7800 } 7801 7802 /* 7803 * Set a read side error. If persist is set change the socket error 7804 * to persistent. If errfunc is set install the function as the exported 7805 * error handler. 7806 */ 7807 void 7808 strsetrerror(vnode_t *vp, int error, int persist, errfunc_t errfunc) 7809 { 7810 struct stdata *stp = vp->v_stream; 7811 7812 mutex_enter(&stp->sd_lock); 7813 stp->sd_rerror = error; 7814 if (error == 0 && errfunc == NULL) 7815 stp->sd_flag &= ~STRDERR; 7816 else 7817 stp->sd_flag |= STRDERR; 7818 if (persist) { 7819 stp->sd_flag &= ~STRDERRNONPERSIST; 7820 } else { 7821 stp->sd_flag |= STRDERRNONPERSIST; 7822 } 7823 stp->sd_rderrfunc = errfunc; 7824 if (error != 0 || errfunc != NULL) { 7825 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */ 7826 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */ 7827 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 7828 7829 mutex_exit(&stp->sd_lock); 7830 pollwakeup(&stp->sd_pollist, POLLERR); 7831 mutex_enter(&stp->sd_lock); 7832 7833 if (stp->sd_sigflags & S_ERROR) 7834 strsendsig(stp->sd_siglist, S_ERROR, 0, error); 7835 } 7836 mutex_exit(&stp->sd_lock); 7837 } 7838 7839 /* 7840 * Set a write side error. If persist is set change the socket error 7841 * to persistent. 7842 */ 7843 void 7844 strsetwerror(vnode_t *vp, int error, int persist, errfunc_t errfunc) 7845 { 7846 struct stdata *stp = vp->v_stream; 7847 7848 mutex_enter(&stp->sd_lock); 7849 stp->sd_werror = error; 7850 if (error == 0 && errfunc == NULL) 7851 stp->sd_flag &= ~STWRERR; 7852 else 7853 stp->sd_flag |= STWRERR; 7854 if (persist) { 7855 stp->sd_flag &= ~STWRERRNONPERSIST; 7856 } else { 7857 stp->sd_flag |= STWRERRNONPERSIST; 7858 } 7859 stp->sd_wrerrfunc = errfunc; 7860 if (error != 0 || errfunc != NULL) { 7861 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); /* readers */ 7862 cv_broadcast(&stp->sd_wrq->q_wait); /* writers */ 7863 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 7864 7865 mutex_exit(&stp->sd_lock); 7866 pollwakeup(&stp->sd_pollist, POLLERR); 7867 mutex_enter(&stp->sd_lock); 7868 7869 if (stp->sd_sigflags & S_ERROR) 7870 strsendsig(stp->sd_siglist, S_ERROR, 0, error); 7871 } 7872 mutex_exit(&stp->sd_lock); 7873 } 7874 7875 /* 7876 * Make the stream return 0 (EOF) when all data has been read. 7877 * No effect on write side. 7878 */ 7879 void 7880 strseteof(vnode_t *vp, int eof) 7881 { 7882 struct stdata *stp = vp->v_stream; 7883 7884 mutex_enter(&stp->sd_lock); 7885 if (!eof) { 7886 stp->sd_flag &= ~STREOF; 7887 mutex_exit(&stp->sd_lock); 7888 return; 7889 } 7890 stp->sd_flag |= STREOF; 7891 if (stp->sd_flag & RSLEEP) { 7892 stp->sd_flag &= ~RSLEEP; 7893 cv_broadcast(&_RD(stp->sd_wrq)->q_wait); 7894 } 7895 7896 mutex_exit(&stp->sd_lock); 7897 pollwakeup(&stp->sd_pollist, POLLIN|POLLRDNORM); 7898 mutex_enter(&stp->sd_lock); 7899 7900 if (stp->sd_sigflags & (S_INPUT|S_RDNORM)) 7901 strsendsig(stp->sd_siglist, S_INPUT|S_RDNORM, 0, 0); 7902 mutex_exit(&stp->sd_lock); 7903 } 7904 7905 void 7906 strflushrq(vnode_t *vp, int flag) 7907 { 7908 struct stdata *stp = vp->v_stream; 7909 7910 mutex_enter(&stp->sd_lock); 7911 flushq(_RD(stp->sd_wrq), flag); 7912 mutex_exit(&stp->sd_lock); 7913 } 7914 7915 void 7916 strsetrputhooks(vnode_t *vp, uint_t flags, 7917 msgfunc_t protofunc, msgfunc_t miscfunc) 7918 { 7919 struct stdata *stp = vp->v_stream; 7920 7921 mutex_enter(&stp->sd_lock); 7922 7923 if (protofunc == NULL) 7924 stp->sd_rprotofunc = strrput_proto; 7925 else 7926 stp->sd_rprotofunc = protofunc; 7927 7928 if (miscfunc == NULL) 7929 stp->sd_rmiscfunc = strrput_misc; 7930 else 7931 stp->sd_rmiscfunc = miscfunc; 7932 7933 if (flags & SH_CONSOL_DATA) 7934 stp->sd_rput_opt |= SR_CONSOL_DATA; 7935 else 7936 stp->sd_rput_opt &= ~SR_CONSOL_DATA; 7937 7938 if (flags & SH_SIGALLDATA) 7939 stp->sd_rput_opt |= SR_SIGALLDATA; 7940 else 7941 stp->sd_rput_opt &= ~SR_SIGALLDATA; 7942 7943 if (flags & SH_IGN_ZEROLEN) 7944 stp->sd_rput_opt |= SR_IGN_ZEROLEN; 7945 else 7946 stp->sd_rput_opt &= ~SR_IGN_ZEROLEN; 7947 7948 mutex_exit(&stp->sd_lock); 7949 } 7950 7951 void 7952 strsetwputhooks(vnode_t *vp, uint_t flags, clock_t closetime) 7953 { 7954 struct stdata *stp = vp->v_stream; 7955 7956 mutex_enter(&stp->sd_lock); 7957 stp->sd_closetime = closetime; 7958 7959 if (flags & SH_SIGPIPE) 7960 stp->sd_wput_opt |= SW_SIGPIPE; 7961 else 7962 stp->sd_wput_opt &= ~SW_SIGPIPE; 7963 if (flags & SH_RECHECK_ERR) 7964 stp->sd_wput_opt |= SW_RECHECK_ERR; 7965 else 7966 stp->sd_wput_opt &= ~SW_RECHECK_ERR; 7967 7968 mutex_exit(&stp->sd_lock); 7969 } 7970 7971 void 7972 strsetrwputdatahooks(vnode_t *vp, msgfunc_t rdatafunc, msgfunc_t wdatafunc) 7973 { 7974 struct stdata *stp = vp->v_stream; 7975 7976 mutex_enter(&stp->sd_lock); 7977 7978 stp->sd_rputdatafunc = rdatafunc; 7979 stp->sd_wputdatafunc = wdatafunc; 7980 7981 mutex_exit(&stp->sd_lock); 7982 } 7983 7984 /* Used within framework when the queue is already locked */ 7985 void 7986 qenable_locked(queue_t *q) 7987 { 7988 stdata_t *stp = STREAM(q); 7989 7990 ASSERT(MUTEX_HELD(QLOCK(q))); 7991 7992 if (!q->q_qinfo->qi_srvp) 7993 return; 7994 7995 /* 7996 * Do not place on run queue if already enabled or closing. 7997 */ 7998 if (q->q_flag & (QWCLOSE|QENAB)) 7999 return; 8000 8001 /* 8002 * mark queue enabled and place on run list if it is not already being 8003 * serviced. If it is serviced, the runservice() function will detect 8004 * that QENAB is set and call service procedure before clearing 8005 * QINSERVICE flag. 8006 */ 8007 q->q_flag |= QENAB; 8008 if (q->q_flag & QINSERVICE) 8009 return; 8010 8011 /* Record the time of qenable */ 8012 q->q_qtstamp = lbolt; 8013 8014 /* 8015 * Put the queue in the stp list and schedule it for background 8016 * processing if it is not already scheduled or if stream head does not 8017 * intent to process it in the foreground later by setting 8018 * STRS_WILLSERVICE flag. 8019 */ 8020 mutex_enter(&stp->sd_qlock); 8021 /* 8022 * If there are already something on the list, stp flags should show 8023 * intention to drain it. 8024 */ 8025 IMPLY(STREAM_NEEDSERVICE(stp), 8026 (stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))); 8027 8028 ENQUEUE(q, stp->sd_qhead, stp->sd_qtail, q_link); 8029 stp->sd_nqueues++; 8030 8031 /* 8032 * If no one will drain this stream we are the first producer and 8033 * need to schedule it for background thread. 8034 */ 8035 if (!(stp->sd_svcflags & (STRS_WILLSERVICE | STRS_SCHEDULED))) { 8036 /* 8037 * No one will service this stream later, so we have to 8038 * schedule it now. 8039 */ 8040 STRSTAT(stenables); 8041 stp->sd_svcflags |= STRS_SCHEDULED; 8042 stp->sd_servid = (void *)taskq_dispatch(streams_taskq, 8043 (task_func_t *)stream_service, stp, TQ_NOSLEEP|TQ_NOQUEUE); 8044 8045 if (stp->sd_servid == NULL) { 8046 /* 8047 * Task queue failed so fail over to the backup 8048 * servicing thread. 8049 */ 8050 STRSTAT(taskqfails); 8051 /* 8052 * It is safe to clear STRS_SCHEDULED flag because it 8053 * was set by this thread above. 8054 */ 8055 stp->sd_svcflags &= ~STRS_SCHEDULED; 8056 8057 /* 8058 * Failover scheduling is protected by service_queue 8059 * lock. 8060 */ 8061 mutex_enter(&service_queue); 8062 ASSERT((stp->sd_qhead == q) && (stp->sd_qtail == q)); 8063 ASSERT(q->q_link == NULL); 8064 /* 8065 * Append the queue to qhead/qtail list. 8066 */ 8067 if (qhead == NULL) 8068 qhead = q; 8069 else 8070 qtail->q_link = q; 8071 qtail = q; 8072 /* 8073 * Clear stp queue list. 8074 */ 8075 stp->sd_qhead = stp->sd_qtail = NULL; 8076 stp->sd_nqueues = 0; 8077 /* 8078 * Wakeup background queue processing thread. 8079 */ 8080 cv_signal(&services_to_run); 8081 mutex_exit(&service_queue); 8082 } 8083 } 8084 mutex_exit(&stp->sd_qlock); 8085 } 8086 8087 static void 8088 queue_service(queue_t *q) 8089 { 8090 /* 8091 * The queue in the list should have 8092 * QENAB flag set and should not have 8093 * QINSERVICE flag set. QINSERVICE is 8094 * set when the queue is dequeued and 8095 * qenable_locked doesn't enqueue a 8096 * queue with QINSERVICE set. 8097 */ 8098 8099 ASSERT(!(q->q_flag & QINSERVICE)); 8100 ASSERT((q->q_flag & QENAB)); 8101 mutex_enter(QLOCK(q)); 8102 q->q_flag &= ~QENAB; 8103 q->q_flag |= QINSERVICE; 8104 mutex_exit(QLOCK(q)); 8105 runservice(q); 8106 } 8107 8108 static void 8109 syncq_service(syncq_t *sq) 8110 { 8111 STRSTAT(syncqservice); 8112 mutex_enter(SQLOCK(sq)); 8113 ASSERT(!(sq->sq_svcflags & SQ_SERVICE)); 8114 ASSERT(sq->sq_servcount != 0); 8115 ASSERT(sq->sq_next == NULL); 8116 8117 /* if we came here from the background thread, clear the flag */ 8118 if (sq->sq_svcflags & SQ_BGTHREAD) 8119 sq->sq_svcflags &= ~SQ_BGTHREAD; 8120 8121 /* let drain_syncq know that it's being called in the background */ 8122 sq->sq_svcflags |= SQ_SERVICE; 8123 drain_syncq(sq); 8124 } 8125 8126 static void 8127 qwriter_outer_service(syncq_t *outer) 8128 { 8129 /* 8130 * Note that SQ_WRITER is used on the outer perimeter 8131 * to signal that a qwriter(OUTER) is either investigating 8132 * running or that it is actually running a function. 8133 */ 8134 outer_enter(outer, SQ_BLOCKED|SQ_WRITER); 8135 8136 /* 8137 * All inner syncq are empty and have SQ_WRITER set 8138 * to block entering the outer perimeter. 8139 * 8140 * We do not need to explicitly call write_now since 8141 * outer_exit does it for us. 8142 */ 8143 outer_exit(outer); 8144 } 8145 8146 static void 8147 mblk_free(mblk_t *mp) 8148 { 8149 dblk_t *dbp = mp->b_datap; 8150 frtn_t *frp = dbp->db_frtnp; 8151 8152 mp->b_next = NULL; 8153 if (dbp->db_fthdr != NULL) 8154 str_ftfree(dbp); 8155 8156 ASSERT(dbp->db_fthdr == NULL); 8157 frp->free_func(frp->free_arg); 8158 ASSERT(dbp->db_mblk == mp); 8159 8160 if (dbp->db_credp != NULL) { 8161 crfree(dbp->db_credp); 8162 dbp->db_credp = NULL; 8163 } 8164 dbp->db_cpid = -1; 8165 dbp->db_struioflag = 0; 8166 dbp->db_struioun.cksum.flags = 0; 8167 8168 kmem_cache_free(dbp->db_cache, dbp); 8169 } 8170 8171 /* 8172 * Background processing of the stream queue list. 8173 */ 8174 static void 8175 stream_service(stdata_t *stp) 8176 { 8177 queue_t *q; 8178 8179 mutex_enter(&stp->sd_qlock); 8180 8181 STR_SERVICE(stp, q); 8182 8183 stp->sd_svcflags &= ~STRS_SCHEDULED; 8184 stp->sd_servid = NULL; 8185 cv_signal(&stp->sd_qcv); 8186 mutex_exit(&stp->sd_qlock); 8187 } 8188 8189 /* 8190 * Foreground processing of the stream queue list. 8191 */ 8192 void 8193 stream_runservice(stdata_t *stp) 8194 { 8195 queue_t *q; 8196 8197 mutex_enter(&stp->sd_qlock); 8198 STRSTAT(rservice); 8199 /* 8200 * We are going to drain this stream queue list, so qenable_locked will 8201 * not schedule it until we finish. 8202 */ 8203 stp->sd_svcflags |= STRS_WILLSERVICE; 8204 8205 STR_SERVICE(stp, q); 8206 8207 stp->sd_svcflags &= ~STRS_WILLSERVICE; 8208 mutex_exit(&stp->sd_qlock); 8209 /* 8210 * Help backup background thread to drain the qhead/qtail list. 8211 */ 8212 while (qhead != NULL) { 8213 STRSTAT(qhelps); 8214 mutex_enter(&service_queue); 8215 DQ(q, qhead, qtail, q_link); 8216 mutex_exit(&service_queue); 8217 if (q != NULL) 8218 queue_service(q); 8219 } 8220 } 8221 8222 void 8223 stream_willservice(stdata_t *stp) 8224 { 8225 mutex_enter(&stp->sd_qlock); 8226 stp->sd_svcflags |= STRS_WILLSERVICE; 8227 mutex_exit(&stp->sd_qlock); 8228 } 8229 8230 /* 8231 * Replace the cred currently in the mblk with a different one. 8232 */ 8233 void 8234 mblk_setcred(mblk_t *mp, cred_t *cr) 8235 { 8236 cred_t *ocr = DB_CRED(mp); 8237 8238 ASSERT(cr != NULL); 8239 8240 if (cr != ocr) { 8241 crhold(mp->b_datap->db_credp = cr); 8242 if (ocr != NULL) 8243 crfree(ocr); 8244 } 8245 } 8246 8247 int 8248 hcksum_assoc(mblk_t *mp, multidata_t *mmd, pdesc_t *pd, 8249 uint32_t start, uint32_t stuff, uint32_t end, uint32_t value, 8250 uint32_t flags, int km_flags) 8251 { 8252 int rc = 0; 8253 8254 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8255 if (mp->b_datap->db_type == M_DATA) { 8256 /* Associate values for M_DATA type */ 8257 DB_CKSUMSTART(mp) = (intptr_t)start; 8258 DB_CKSUMSTUFF(mp) = (intptr_t)stuff; 8259 DB_CKSUMEND(mp) = (intptr_t)end; 8260 DB_CKSUMFLAGS(mp) = flags; 8261 DB_CKSUM16(mp) = (uint16_t)value; 8262 8263 } else { 8264 pattrinfo_t pa_info; 8265 8266 ASSERT(mmd != NULL); 8267 8268 pa_info.type = PATTR_HCKSUM; 8269 pa_info.len = sizeof (pattr_hcksum_t); 8270 8271 if (mmd_addpattr(mmd, pd, &pa_info, B_TRUE, km_flags) != NULL) { 8272 pattr_hcksum_t *hck = (pattr_hcksum_t *)pa_info.buf; 8273 8274 hck->hcksum_start_offset = start; 8275 hck->hcksum_stuff_offset = stuff; 8276 hck->hcksum_end_offset = end; 8277 hck->hcksum_cksum_val.inet_cksum = (uint16_t)value; 8278 hck->hcksum_flags = flags; 8279 } else { 8280 rc = -1; 8281 } 8282 } 8283 return (rc); 8284 } 8285 8286 void 8287 hcksum_retrieve(mblk_t *mp, multidata_t *mmd, pdesc_t *pd, 8288 uint32_t *start, uint32_t *stuff, uint32_t *end, 8289 uint32_t *value, uint32_t *flags) 8290 { 8291 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8292 if (mp->b_datap->db_type == M_DATA) { 8293 if (flags != NULL) { 8294 *flags = DB_CKSUMFLAGS(mp); 8295 if (*flags & HCK_PARTIALCKSUM) { 8296 if (start != NULL) 8297 *start = (uint32_t)DB_CKSUMSTART(mp); 8298 if (stuff != NULL) 8299 *stuff = (uint32_t)DB_CKSUMSTUFF(mp); 8300 if (end != NULL) 8301 *end = (uint32_t)DB_CKSUMEND(mp); 8302 if (value != NULL) 8303 *value = (uint32_t)DB_CKSUM16(mp); 8304 } else if ((*flags & HW_LSO) && (value != NULL)) 8305 *value = (uint32_t)DB_LSOMSS(mp); 8306 } 8307 } else { 8308 pattrinfo_t hck_attr = {PATTR_HCKSUM}; 8309 8310 ASSERT(mmd != NULL); 8311 8312 /* get hardware checksum attribute */ 8313 if (mmd_getpattr(mmd, pd, &hck_attr) != NULL) { 8314 pattr_hcksum_t *hck = (pattr_hcksum_t *)hck_attr.buf; 8315 8316 ASSERT(hck_attr.len >= sizeof (pattr_hcksum_t)); 8317 if (flags != NULL) 8318 *flags = hck->hcksum_flags; 8319 if (start != NULL) 8320 *start = hck->hcksum_start_offset; 8321 if (stuff != NULL) 8322 *stuff = hck->hcksum_stuff_offset; 8323 if (end != NULL) 8324 *end = hck->hcksum_end_offset; 8325 if (value != NULL) 8326 *value = (uint32_t) 8327 hck->hcksum_cksum_val.inet_cksum; 8328 } 8329 } 8330 } 8331 8332 /* 8333 * Checksum buffer *bp for len bytes with psum partial checksum, 8334 * or 0 if none, and return the 16 bit partial checksum. 8335 */ 8336 unsigned 8337 bcksum(uchar_t *bp, int len, unsigned int psum) 8338 { 8339 int odd = len & 1; 8340 extern unsigned int ip_ocsum(); 8341 8342 if (((intptr_t)bp & 1) == 0 && !odd) { 8343 /* 8344 * Bp is 16 bit aligned and len is multiple of 16 bit word. 8345 */ 8346 return (ip_ocsum((ushort_t *)bp, len >> 1, psum)); 8347 } 8348 if (((intptr_t)bp & 1) != 0) { 8349 /* 8350 * Bp isn't 16 bit aligned. 8351 */ 8352 unsigned int tsum; 8353 8354 #ifdef _LITTLE_ENDIAN 8355 psum += *bp; 8356 #else 8357 psum += *bp << 8; 8358 #endif 8359 len--; 8360 bp++; 8361 tsum = ip_ocsum((ushort_t *)bp, len >> 1, 0); 8362 psum += (tsum << 8) & 0xffff | (tsum >> 8); 8363 if (len & 1) { 8364 bp += len - 1; 8365 #ifdef _LITTLE_ENDIAN 8366 psum += *bp << 8; 8367 #else 8368 psum += *bp; 8369 #endif 8370 } 8371 } else { 8372 /* 8373 * Bp is 16 bit aligned. 8374 */ 8375 psum = ip_ocsum((ushort_t *)bp, len >> 1, psum); 8376 if (odd) { 8377 bp += len - 1; 8378 #ifdef _LITTLE_ENDIAN 8379 psum += *bp; 8380 #else 8381 psum += *bp << 8; 8382 #endif 8383 } 8384 } 8385 /* 8386 * Normalize psum to 16 bits before returning the new partial 8387 * checksum. The max psum value before normalization is 0x3FDFE. 8388 */ 8389 return ((psum >> 16) + (psum & 0xFFFF)); 8390 } 8391 8392 boolean_t 8393 is_vmloaned_mblk(mblk_t *mp, multidata_t *mmd, pdesc_t *pd) 8394 { 8395 boolean_t rc; 8396 8397 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_MULTIDATA); 8398 if (DB_TYPE(mp) == M_DATA) { 8399 rc = (((mp)->b_datap->db_struioflag & STRUIO_ZC) != 0); 8400 } else { 8401 pattrinfo_t zcopy_attr = {PATTR_ZCOPY}; 8402 8403 ASSERT(mmd != NULL); 8404 rc = (mmd_getpattr(mmd, pd, &zcopy_attr) != NULL); 8405 } 8406 return (rc); 8407 } 8408 8409 void 8410 freemsgchain(mblk_t *mp) 8411 { 8412 mblk_t *next; 8413 8414 while (mp != NULL) { 8415 next = mp->b_next; 8416 mp->b_next = NULL; 8417 8418 freemsg(mp); 8419 mp = next; 8420 } 8421 } 8422 8423 mblk_t * 8424 copymsgchain(mblk_t *mp) 8425 { 8426 mblk_t *nmp = NULL; 8427 mblk_t **nmpp = &nmp; 8428 8429 for (; mp != NULL; mp = mp->b_next) { 8430 if ((*nmpp = copymsg(mp)) == NULL) { 8431 freemsgchain(nmp); 8432 return (NULL); 8433 } 8434 8435 nmpp = &((*nmpp)->b_next); 8436 } 8437 8438 return (nmp); 8439 } 8440 8441 /* NOTE: Do not add code after this point. */ 8442 #undef QLOCK 8443 8444 /* 8445 * replacement for QLOCK macro for those that can't use it. 8446 */ 8447 kmutex_t * 8448 QLOCK(queue_t *q) 8449 { 8450 return (&(q)->q_lock); 8451 } 8452 8453 /* 8454 * Dummy runqueues/queuerun functions functions for backwards compatibility. 8455 */ 8456 #undef runqueues 8457 void 8458 runqueues(void) 8459 { 8460 } 8461 8462 #undef queuerun 8463 void 8464 queuerun(void) 8465 { 8466 } 8467