1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 22 /* All Rights Reserved */ 23 24 25 /* 26 * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved. 27 * Copyright 2017 Joyent, Inc. 28 * Copyright 2020 OmniOS Community Edition (OmniOSce) Association. 29 */ 30 31 #include <sys/types.h> 32 #include <sys/sysmacros.h> 33 #include <sys/param.h> 34 #include <sys/errno.h> 35 #include <sys/signal.h> 36 #include <sys/stat.h> 37 #include <sys/proc.h> 38 #include <sys/cred.h> 39 #include <sys/user.h> 40 #include <sys/vnode.h> 41 #include <sys/file.h> 42 #include <sys/stream.h> 43 #include <sys/strsubr.h> 44 #include <sys/stropts.h> 45 #include <sys/tihdr.h> 46 #include <sys/var.h> 47 #include <sys/poll.h> 48 #include <sys/termio.h> 49 #include <sys/ttold.h> 50 #include <sys/systm.h> 51 #include <sys/uio.h> 52 #include <sys/cmn_err.h> 53 #include <sys/sad.h> 54 #include <sys/netstack.h> 55 #include <sys/priocntl.h> 56 #include <sys/jioctl.h> 57 #include <sys/procset.h> 58 #include <sys/session.h> 59 #include <sys/kmem.h> 60 #include <sys/filio.h> 61 #include <sys/vtrace.h> 62 #include <sys/debug.h> 63 #include <sys/strredir.h> 64 #include <sys/fs/fifonode.h> 65 #include <sys/fs/snode.h> 66 #include <sys/strlog.h> 67 #include <sys/strsun.h> 68 #include <sys/project.h> 69 #include <sys/kbio.h> 70 #include <sys/msio.h> 71 #include <sys/tty.h> 72 #include <sys/ptyvar.h> 73 #include <sys/vuid_event.h> 74 #include <sys/modctl.h> 75 #include <sys/sunddi.h> 76 #include <sys/sunldi_impl.h> 77 #include <sys/autoconf.h> 78 #include <sys/policy.h> 79 #include <sys/dld.h> 80 #include <sys/zone.h> 81 #include <sys/ptms.h> 82 #include <sys/limits.h> 83 #include <c2/audit.h> 84 85 /* 86 * This define helps improve the readability of streams code while 87 * still maintaining a very old streams performance enhancement. The 88 * performance enhancement basically involved having all callers 89 * of straccess() perform the first check that straccess() will do 90 * locally before actually calling straccess(). (There by reducing 91 * the number of unnecessary calls to straccess().) 92 */ 93 #define i_straccess(x, y) ((stp->sd_sidp == NULL) ? 0 : \ 94 (stp->sd_vnode->v_type == VFIFO) ? 0 : \ 95 straccess((x), (y))) 96 97 /* 98 * what is mblk_pull_len? 99 * 100 * If a streams message consists of many short messages, 101 * a performance degradation occurs from copyout overhead. 102 * To decrease the per mblk overhead, messages that are 103 * likely to consist of many small mblks are pulled up into 104 * one continuous chunk of memory. 105 * 106 * To avoid the processing overhead of examining every 107 * mblk, a quick heuristic is used. If the first mblk in 108 * the message is shorter than mblk_pull_len, it is likely 109 * that the rest of the mblk will be short. 110 * 111 * This heuristic was decided upon after performance tests 112 * indicated that anything more complex slowed down the main 113 * code path. 114 */ 115 #define MBLK_PULL_LEN 64 116 uint32_t mblk_pull_len = MBLK_PULL_LEN; 117 118 /* 119 * The sgttyb_handling flag controls the handling of the old BSD 120 * TIOCGETP, TIOCSETP, and TIOCSETN ioctls as follows: 121 * 122 * 0 - Emit no warnings at all and retain old, broken behavior. 123 * 1 - Emit no warnings and silently handle new semantics. 124 * 2 - Send cmn_err(CE_NOTE) when either TIOCSETP or TIOCSETN is used 125 * (once per system invocation). Handle with new semantics. 126 * 3 - Send SIGSYS when any TIOCGETP, TIOCSETP, or TIOCSETN call is 127 * made (so that offenders drop core and are easy to debug). 128 * 129 * The "new semantics" are that TIOCGETP returns B38400 for 130 * sg_[io]speed if the corresponding value is over B38400, and that 131 * TIOCSET[PN] accept B38400 in these cases to mean "retain current 132 * bit rate." 133 */ 134 int sgttyb_handling = 1; 135 static boolean_t sgttyb_complaint; 136 137 /* don't push drcompat module by default on Style-2 streams */ 138 static int push_drcompat = 0; 139 140 /* 141 * id value used to distinguish between different ioctl messages 142 */ 143 static uint32_t ioc_id; 144 145 static void putback(struct stdata *, queue_t *, mblk_t *, int); 146 static void strcleanall(struct vnode *); 147 static int strwsrv(queue_t *); 148 static int strdocmd(struct stdata *, struct strcmd *, cred_t *); 149 150 /* 151 * qinit and module_info structures for stream head read and write queues 152 */ 153 struct module_info strm_info = { 0, "strrhead", 0, INFPSZ, STRHIGH, STRLOW }; 154 struct module_info stwm_info = { 0, "strwhead", 0, 0, 0, 0 }; 155 struct qinit strdata = { strrput, NULL, NULL, NULL, NULL, &strm_info }; 156 struct qinit stwdata = { NULL, strwsrv, NULL, NULL, NULL, &stwm_info }; 157 struct module_info fiform_info = { 0, "fifostrrhead", 0, PIPE_BUF, FIFOHIWAT, 158 FIFOLOWAT }; 159 struct module_info fifowm_info = { 0, "fifostrwhead", 0, 0, 0, 0 }; 160 struct qinit fifo_strdata = { strrput, NULL, NULL, NULL, NULL, &fiform_info }; 161 struct qinit fifo_stwdata = { NULL, strwsrv, NULL, NULL, NULL, &fifowm_info }; 162 163 extern kmutex_t strresources; /* protects global resources */ 164 extern kmutex_t muxifier; /* single-threads multiplexor creation */ 165 166 static boolean_t msghasdata(mblk_t *bp); 167 #define msgnodata(bp) (!msghasdata(bp)) 168 169 /* 170 * Stream head locking notes: 171 * There are four monitors associated with the stream head: 172 * 1. v_stream monitor: in stropen() and strclose() v_lock 173 * is held while the association of vnode and stream 174 * head is established or tested for. 175 * 2. open/close/push/pop monitor: sd_lock is held while each 176 * thread bids for exclusive access to this monitor 177 * for opening or closing a stream. In addition, this 178 * monitor is entered during pushes and pops. This 179 * guarantees that during plumbing operations there 180 * is only one thread trying to change the plumbing. 181 * Any other threads present in the stream are only 182 * using the plumbing. 183 * 3. read/write monitor: in the case of read, a thread holds 184 * sd_lock while trying to get data from the stream 185 * head queue. if there is none to fulfill a read 186 * request, it sets RSLEEP and calls cv_wait_sig() down 187 * in strwaitq() to await the arrival of new data. 188 * when new data arrives in strrput(), sd_lock is acquired 189 * before testing for RSLEEP and calling cv_broadcast(). 190 * the behavior of strwrite(), strwsrv(), and WSLEEP 191 * mirror this. 192 * 4. ioctl monitor: sd_lock is gotten to ensure that only one 193 * thread is doing an ioctl at a time. 194 */ 195 196 static int 197 push_mod(queue_t *qp, dev_t *devp, struct stdata *stp, const char *name, 198 int anchor, cred_t *crp, uint_t anchor_zoneid) 199 { 200 int error; 201 fmodsw_impl_t *fp; 202 203 if (stp->sd_flag & (STRHUP|STRDERR|STWRERR)) { 204 error = (stp->sd_flag & STRHUP) ? ENXIO : EIO; 205 return (error); 206 } 207 if (stp->sd_pushcnt >= nstrpush) { 208 return (EINVAL); 209 } 210 211 if ((fp = fmodsw_find(name, FMODSW_HOLD | FMODSW_LOAD)) == NULL) { 212 stp->sd_flag |= STREOPENFAIL; 213 return (EINVAL); 214 } 215 216 /* 217 * push new module and call its open routine via qattach 218 */ 219 if ((error = qattach(qp, devp, 0, crp, fp, B_FALSE)) != 0) 220 return (error); 221 222 /* 223 * Check to see if caller wants a STREAMS anchor 224 * put at this place in the stream, and add if so. 225 */ 226 mutex_enter(&stp->sd_lock); 227 if (anchor == stp->sd_pushcnt) { 228 stp->sd_anchor = stp->sd_pushcnt; 229 stp->sd_anchorzone = anchor_zoneid; 230 } 231 mutex_exit(&stp->sd_lock); 232 233 return (0); 234 } 235 236 static int 237 xpg4_fixup(queue_t *qp, dev_t *devp, struct stdata *stp, cred_t *crp) 238 { 239 static const char *ptsmods[] = { 240 "ptem", "ldterm", "ttcompat" 241 }; 242 dev_t dummydev = *devp; 243 struct strioctl strioc; 244 zoneid_t zoneid; 245 int32_t rval; 246 uint_t i; 247 248 /* 249 * Push modules required for the slave PTY to have terminal 250 * semantics out of the box; this is required by XPG4v2. 251 * These three modules are flagged as single-instance so that 252 * the system will never end up with duplicate copies pushed 253 * onto a stream. 254 */ 255 256 zoneid = crgetzoneid(crp); 257 for (i = 0; i < ARRAY_SIZE(ptsmods); i++) { 258 int error; 259 260 error = push_mod(qp, &dummydev, stp, ptsmods[i], 0, 261 crp, zoneid); 262 if (error != 0) 263 return (error); 264 } 265 266 /* 267 * Send PTSSTTY down the stream 268 */ 269 270 strioc.ic_cmd = PTSSTTY; 271 strioc.ic_timout = 0; 272 strioc.ic_len = 0; 273 strioc.ic_dp = NULL; 274 275 (void) strdoioctl(stp, &strioc, FNATIVE, K_TO_K, crp, &rval); 276 277 return (0); 278 } 279 280 /* 281 * Open a stream device. 282 */ 283 int 284 stropen(vnode_t *vp, dev_t *devp, int flag, cred_t *crp) 285 { 286 struct stdata *stp; 287 queue_t *qp; 288 int s; 289 dev_t dummydev, savedev; 290 struct autopush *ap; 291 struct dlautopush dlap; 292 int error = 0; 293 ssize_t rmin, rmax; 294 int cloneopen; 295 queue_t *brq; 296 major_t major; 297 str_stack_t *ss; 298 zoneid_t zoneid; 299 uint_t anchor; 300 301 /* 302 * If the stream already exists, wait for any open in progress 303 * to complete, then call the open function of each module and 304 * driver in the stream. Otherwise create the stream. 305 */ 306 TRACE_1(TR_FAC_STREAMS_FR, TR_STROPEN, "stropen:%p", vp); 307 retry: 308 mutex_enter(&vp->v_lock); 309 if ((stp = vp->v_stream) != NULL) { 310 311 /* 312 * Waiting for stream to be created to device 313 * due to another open. 314 */ 315 mutex_exit(&vp->v_lock); 316 317 if (STRMATED(stp)) { 318 struct stdata *strmatep = stp->sd_mate; 319 320 STRLOCKMATES(stp); 321 if (strmatep->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 322 if (flag & (FNDELAY|FNONBLOCK)) { 323 error = EAGAIN; 324 mutex_exit(&strmatep->sd_lock); 325 goto ckreturn; 326 } 327 mutex_exit(&stp->sd_lock); 328 if (!cv_wait_sig(&strmatep->sd_monitor, 329 &strmatep->sd_lock)) { 330 error = EINTR; 331 mutex_exit(&strmatep->sd_lock); 332 mutex_enter(&stp->sd_lock); 333 goto ckreturn; 334 } 335 mutex_exit(&strmatep->sd_lock); 336 goto retry; 337 } 338 if (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 339 if (flag & (FNDELAY|FNONBLOCK)) { 340 error = EAGAIN; 341 mutex_exit(&strmatep->sd_lock); 342 goto ckreturn; 343 } 344 mutex_exit(&strmatep->sd_lock); 345 if (!cv_wait_sig(&stp->sd_monitor, 346 &stp->sd_lock)) { 347 error = EINTR; 348 goto ckreturn; 349 } 350 mutex_exit(&stp->sd_lock); 351 goto retry; 352 } 353 354 if (stp->sd_flag & (STRDERR|STWRERR)) { 355 error = EIO; 356 mutex_exit(&strmatep->sd_lock); 357 goto ckreturn; 358 } 359 360 stp->sd_flag |= STWOPEN; 361 STRUNLOCKMATES(stp); 362 } else { 363 mutex_enter(&stp->sd_lock); 364 if (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 365 if (flag & (FNDELAY|FNONBLOCK)) { 366 error = EAGAIN; 367 goto ckreturn; 368 } 369 if (!cv_wait_sig(&stp->sd_monitor, 370 &stp->sd_lock)) { 371 error = EINTR; 372 goto ckreturn; 373 } 374 mutex_exit(&stp->sd_lock); 375 goto retry; /* could be clone! */ 376 } 377 378 if (stp->sd_flag & (STRDERR|STWRERR)) { 379 error = EIO; 380 goto ckreturn; 381 } 382 383 stp->sd_flag |= STWOPEN; 384 mutex_exit(&stp->sd_lock); 385 } 386 387 /* 388 * Open all modules and devices down stream to notify 389 * that another user is streaming. For modules, set the 390 * last argument to MODOPEN and do not pass any open flags. 391 * Ignore dummydev since this is not the first open. 392 */ 393 claimstr(stp->sd_wrq); 394 qp = stp->sd_wrq; 395 while (_SAMESTR(qp)) { 396 qp = qp->q_next; 397 if ((error = qreopen(_RD(qp), devp, flag, crp)) != 0) 398 break; 399 } 400 releasestr(stp->sd_wrq); 401 mutex_enter(&stp->sd_lock); 402 stp->sd_flag &= ~(STRHUP|STWOPEN|STRDERR|STWRERR); 403 stp->sd_rerror = 0; 404 stp->sd_werror = 0; 405 ckreturn: 406 cv_broadcast(&stp->sd_monitor); 407 mutex_exit(&stp->sd_lock); 408 return (error); 409 } 410 411 /* 412 * This vnode isn't streaming. SPECFS already 413 * checked for multiple vnodes pointing to the 414 * same stream, so create a stream to the driver. 415 */ 416 qp = allocq(); 417 stp = shalloc(qp); 418 419 /* 420 * Initialize stream head. shalloc() has given us 421 * exclusive access, and we have the vnode locked; 422 * we can do whatever we want with stp. 423 */ 424 stp->sd_flag = STWOPEN; 425 stp->sd_siglist = NULL; 426 stp->sd_pollist.ph_list = NULL; 427 stp->sd_sigflags = 0; 428 stp->sd_mark = NULL; 429 stp->sd_closetime = STRTIMOUT; 430 stp->sd_sidp = NULL; 431 stp->sd_pgidp = NULL; 432 stp->sd_vnode = vp; 433 stp->sd_pvnode = NULL; 434 stp->sd_rerror = 0; 435 stp->sd_werror = 0; 436 stp->sd_wroff = 0; 437 stp->sd_tail = 0; 438 stp->sd_iocblk = NULL; 439 stp->sd_cmdblk = NULL; 440 stp->sd_pushcnt = 0; 441 stp->sd_qn_minpsz = 0; 442 stp->sd_qn_maxpsz = INFPSZ - 1; /* used to check for initialization */ 443 stp->sd_maxblk = INFPSZ; 444 qp->q_ptr = _WR(qp)->q_ptr = stp; 445 STREAM(qp) = STREAM(_WR(qp)) = stp; 446 vp->v_stream = stp; 447 mutex_exit(&vp->v_lock); 448 if (vp->v_type == VFIFO) { 449 stp->sd_flag |= OLDNDELAY; 450 /* 451 * This means, both for pipes and fifos 452 * strwrite will send SIGPIPE if the other 453 * end is closed. For putmsg it depends 454 * on whether it is a XPG4_2 application 455 * or not 456 */ 457 stp->sd_wput_opt = SW_SIGPIPE; 458 459 /* setq might sleep in kmem_alloc - avoid holding locks. */ 460 setq(qp, &fifo_strdata, &fifo_stwdata, NULL, QMTSAFE, 461 SQ_CI|SQ_CO, B_FALSE); 462 463 set_qend(qp); 464 stp->sd_strtab = fifo_getinfo(); 465 _WR(qp)->q_nfsrv = _WR(qp); 466 qp->q_nfsrv = qp; 467 /* 468 * Wake up others that are waiting for stream to be created. 469 */ 470 mutex_enter(&stp->sd_lock); 471 /* 472 * nothing is be pushed on stream yet, so 473 * optimized stream head packetsizes are just that 474 * of the read queue 475 */ 476 stp->sd_qn_minpsz = qp->q_minpsz; 477 stp->sd_qn_maxpsz = qp->q_maxpsz; 478 stp->sd_flag &= ~STWOPEN; 479 goto fifo_opendone; 480 } 481 /* setq might sleep in kmem_alloc - avoid holding locks. */ 482 setq(qp, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, B_FALSE); 483 484 set_qend(qp); 485 486 /* 487 * Open driver and create stream to it (via qattach). 488 */ 489 savedev = *devp; 490 cloneopen = (getmajor(*devp) == clone_major); 491 if ((error = qattach(qp, devp, flag, crp, NULL, B_FALSE)) != 0) { 492 mutex_enter(&vp->v_lock); 493 vp->v_stream = NULL; 494 mutex_exit(&vp->v_lock); 495 mutex_enter(&stp->sd_lock); 496 cv_broadcast(&stp->sd_monitor); 497 mutex_exit(&stp->sd_lock); 498 freeq(_RD(qp)); 499 shfree(stp); 500 return (error); 501 } 502 /* 503 * Set sd_strtab after open in order to handle clonable drivers 504 */ 505 stp->sd_strtab = STREAMSTAB(getmajor(*devp)); 506 507 /* 508 * Historical note: dummydev used to be be prior to the initial 509 * open (via qattach above), which made the value seen 510 * inconsistent between an I_PUSH and an autopush of a module. 511 */ 512 dummydev = *devp; 513 514 /* 515 * For clone open of old style (Q not associated) network driver, 516 * push DRMODNAME module to handle DL_ATTACH/DL_DETACH 517 */ 518 brq = _RD(_WR(qp)->q_next); 519 major = getmajor(*devp); 520 if (push_drcompat && cloneopen && NETWORK_DRV(major) && 521 ((brq->q_flag & _QASSOCIATED) == 0)) { 522 if (push_mod(qp, &dummydev, stp, DRMODNAME, 0, crp, 0) != 0) 523 cmn_err(CE_WARN, "cannot push " DRMODNAME 524 " streams module"); 525 } 526 527 if (!NETWORK_DRV(major)) { 528 savedev = *devp; 529 } else { 530 /* 531 * For network devices, process differently based on the 532 * return value from dld_autopush(): 533 * 534 * 0: the passed-in device points to a GLDv3 datalink with 535 * per-link autopush configuration; use that configuration 536 * and ignore any per-driver autopush configuration. 537 * 538 * 1: the passed-in device points to a physical GLDv3 539 * datalink without per-link autopush configuration. The 540 * passed in device was changed to refer to the actual 541 * physical device (if it's not already); we use that new 542 * device to look up any per-driver autopush configuration. 543 * 544 * -1: neither of the above cases applied; use the initial 545 * device to look up any per-driver autopush configuration. 546 */ 547 switch (dld_autopush(&savedev, &dlap)) { 548 case 0: 549 zoneid = crgetzoneid(crp); 550 for (s = 0; s < dlap.dap_npush; s++) { 551 error = push_mod(qp, &dummydev, stp, 552 dlap.dap_aplist[s], dlap.dap_anchor, crp, 553 zoneid); 554 if (error != 0) 555 break; 556 } 557 goto opendone; 558 case 1: 559 break; 560 case -1: 561 savedev = *devp; 562 break; 563 } 564 } 565 /* 566 * Find the autopush configuration based on "savedev". Start with the 567 * global zone. If not found check in the local zone. 568 */ 569 zoneid = GLOBAL_ZONEID; 570 retryap: 571 ss = netstack_find_by_stackid(zoneid_to_netstackid(zoneid))-> 572 netstack_str; 573 if ((ap = sad_ap_find_by_dev(savedev, ss)) == NULL) { 574 netstack_rele(ss->ss_netstack); 575 if (zoneid == GLOBAL_ZONEID) { 576 /* 577 * None found. Also look in the zone's autopush table. 578 */ 579 zoneid = crgetzoneid(crp); 580 if (zoneid != GLOBAL_ZONEID) 581 goto retryap; 582 } 583 goto opendone; 584 } 585 anchor = ap->ap_anchor; 586 zoneid = crgetzoneid(crp); 587 for (s = 0; s < ap->ap_npush; s++) { 588 error = push_mod(qp, &dummydev, stp, ap->ap_list[s], 589 anchor, crp, zoneid); 590 if (error != 0) 591 break; 592 } 593 sad_ap_rele(ap, ss); 594 netstack_rele(ss->ss_netstack); 595 596 opendone: 597 598 if (error == 0 && 599 (stp->sd_flag & (STRISTTY|STRXPG4TTY)) == (STRISTTY|STRXPG4TTY)) { 600 error = xpg4_fixup(qp, devp, stp, crp); 601 } 602 603 /* 604 * let specfs know that open failed part way through 605 */ 606 if (error != 0) { 607 mutex_enter(&stp->sd_lock); 608 stp->sd_flag |= STREOPENFAIL; 609 mutex_exit(&stp->sd_lock); 610 } 611 612 /* 613 * Wake up others that are waiting for stream to be created. 614 */ 615 mutex_enter(&stp->sd_lock); 616 stp->sd_flag &= ~STWOPEN; 617 618 /* 619 * As a performance concern we are caching the values of 620 * q_minpsz and q_maxpsz of the module below the stream 621 * head in the stream head. 622 */ 623 mutex_enter(QLOCK(stp->sd_wrq->q_next)); 624 rmin = stp->sd_wrq->q_next->q_minpsz; 625 rmax = stp->sd_wrq->q_next->q_maxpsz; 626 mutex_exit(QLOCK(stp->sd_wrq->q_next)); 627 628 /* do this processing here as a performance concern */ 629 if (strmsgsz != 0) { 630 if (rmax == INFPSZ) 631 rmax = strmsgsz; 632 else 633 rmax = MIN(strmsgsz, rmax); 634 } 635 636 mutex_enter(QLOCK(stp->sd_wrq)); 637 stp->sd_qn_minpsz = rmin; 638 stp->sd_qn_maxpsz = rmax; 639 mutex_exit(QLOCK(stp->sd_wrq)); 640 641 fifo_opendone: 642 cv_broadcast(&stp->sd_monitor); 643 mutex_exit(&stp->sd_lock); 644 return (error); 645 } 646 647 static int strsink(queue_t *, mblk_t *); 648 static struct qinit deadrend = { 649 strsink, NULL, NULL, NULL, NULL, &strm_info, NULL 650 }; 651 static struct qinit deadwend = { 652 NULL, NULL, NULL, NULL, NULL, &stwm_info, NULL 653 }; 654 655 /* 656 * Close a stream. 657 * This is called from closef() on the last close of an open stream. 658 * Strclean() will already have removed the siglist and pollist 659 * information, so all that remains is to remove all multiplexor links 660 * for the stream, pop all the modules (and the driver), and free the 661 * stream structure. 662 */ 663 664 int 665 strclose(struct vnode *vp, int flag, cred_t *crp) 666 { 667 struct stdata *stp; 668 queue_t *qp; 669 int rval; 670 int freestp = 1; 671 queue_t *rmq; 672 673 TRACE_1(TR_FAC_STREAMS_FR, 674 TR_STRCLOSE, "strclose:%p", vp); 675 ASSERT(vp->v_stream); 676 677 stp = vp->v_stream; 678 ASSERT(!(stp->sd_flag & STPLEX)); 679 qp = stp->sd_wrq; 680 681 /* 682 * Needed so that strpoll will return non-zero for this fd. 683 * Note that with POLLNOERR STRHUP does still cause POLLHUP. 684 */ 685 mutex_enter(&stp->sd_lock); 686 stp->sd_flag |= STRHUP; 687 mutex_exit(&stp->sd_lock); 688 689 /* 690 * If the registered process or process group did not have an 691 * open instance of this stream then strclean would not be 692 * called. Thus at the time of closing all remaining siglist entries 693 * are removed. 694 */ 695 if (stp->sd_siglist != NULL) 696 strcleanall(vp); 697 698 ASSERT(stp->sd_siglist == NULL); 699 ASSERT(stp->sd_sigflags == 0); 700 701 if (STRMATED(stp)) { 702 struct stdata *strmatep = stp->sd_mate; 703 int waited = 1; 704 705 STRLOCKMATES(stp); 706 while (waited) { 707 waited = 0; 708 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 709 mutex_exit(&strmatep->sd_lock); 710 cv_wait(&stp->sd_monitor, &stp->sd_lock); 711 mutex_exit(&stp->sd_lock); 712 STRLOCKMATES(stp); 713 waited = 1; 714 } 715 while (strmatep->sd_flag & 716 (STWOPEN|STRCLOSE|STRPLUMB)) { 717 mutex_exit(&stp->sd_lock); 718 cv_wait(&strmatep->sd_monitor, 719 &strmatep->sd_lock); 720 mutex_exit(&strmatep->sd_lock); 721 STRLOCKMATES(stp); 722 waited = 1; 723 } 724 } 725 stp->sd_flag |= STRCLOSE; 726 STRUNLOCKMATES(stp); 727 } else { 728 mutex_enter(&stp->sd_lock); 729 stp->sd_flag |= STRCLOSE; 730 mutex_exit(&stp->sd_lock); 731 } 732 733 ASSERT(qp->q_first == NULL); /* No more delayed write */ 734 735 /* Check if an I_LINK was ever done on this stream */ 736 if (stp->sd_flag & STRHASLINKS) { 737 netstack_t *ns; 738 str_stack_t *ss; 739 740 ns = netstack_find_by_cred(crp); 741 ASSERT(ns != NULL); 742 ss = ns->netstack_str; 743 ASSERT(ss != NULL); 744 745 (void) munlinkall(stp, LINKCLOSE|LINKNORMAL, crp, &rval, ss); 746 netstack_rele(ss->ss_netstack); 747 } 748 749 while (_SAMESTR(qp)) { 750 /* 751 * Holding sd_lock prevents q_next from changing in 752 * this stream. 753 */ 754 mutex_enter(&stp->sd_lock); 755 if (!(flag & (FNDELAY|FNONBLOCK)) && (stp->sd_closetime > 0)) { 756 757 /* 758 * sleep until awakened by strwsrv() or timeout 759 */ 760 for (;;) { 761 mutex_enter(QLOCK(qp->q_next)); 762 if (!(qp->q_next->q_mblkcnt)) { 763 mutex_exit(QLOCK(qp->q_next)); 764 break; 765 } 766 stp->sd_flag |= WSLEEP; 767 768 /* ensure strwsrv gets enabled */ 769 qp->q_next->q_flag |= QWANTW; 770 mutex_exit(QLOCK(qp->q_next)); 771 /* get out if we timed out or recv'd a signal */ 772 if (str_cv_wait(&qp->q_wait, &stp->sd_lock, 773 stp->sd_closetime, 0) <= 0) { 774 break; 775 } 776 } 777 stp->sd_flag &= ~WSLEEP; 778 } 779 mutex_exit(&stp->sd_lock); 780 781 rmq = qp->q_next; 782 if (rmq->q_flag & QISDRV) { 783 ASSERT(!_SAMESTR(rmq)); 784 wait_sq_svc(_RD(qp)->q_syncq); 785 } 786 787 qdetach(_RD(rmq), 1, flag, crp, B_FALSE); 788 } 789 790 /* 791 * Since we call pollwakeup in close() now, the poll list should 792 * be empty in most cases. The only exception is the layered devices 793 * (e.g. the console drivers with redirection modules pushed on top 794 * of it). We have to do this after calling qdetach() because 795 * the redirection module won't have torn down the console 796 * redirection until after qdetach() has been invoked. 797 */ 798 if (stp->sd_pollist.ph_list != NULL) { 799 pollwakeup(&stp->sd_pollist, POLLERR); 800 pollhead_clean(&stp->sd_pollist); 801 } 802 ASSERT(stp->sd_pollist.ph_list == NULL); 803 ASSERT(stp->sd_sidp == NULL); 804 ASSERT(stp->sd_pgidp == NULL); 805 806 /* Prevent qenable from re-enabling the stream head queue */ 807 disable_svc(_RD(qp)); 808 809 /* 810 * Wait until service procedure of each queue is 811 * run, if QINSERVICE is set. 812 */ 813 wait_svc(_RD(qp)); 814 815 /* 816 * Now, flush both queues. 817 */ 818 flushq(_RD(qp), FLUSHALL); 819 flushq(qp, FLUSHALL); 820 821 /* 822 * If the write queue of the stream head is pointing to a 823 * read queue, we have a twisted stream. If the read queue 824 * is alive, convert the stream head queues into a dead end. 825 * If the read queue is dead, free the dead pair. 826 */ 827 if (qp->q_next && !_SAMESTR(qp)) { 828 if (qp->q_next->q_qinfo == &deadrend) { /* half-closed pipe */ 829 flushq(qp->q_next, FLUSHALL); /* ensure no message */ 830 shfree(qp->q_next->q_stream); 831 freeq(qp->q_next); 832 freeq(_RD(qp)); 833 } else if (qp->q_next == _RD(qp)) { /* fifo */ 834 freeq(_RD(qp)); 835 } else { /* pipe */ 836 freestp = 0; 837 /* 838 * The q_info pointers are never accessed when 839 * SQLOCK is held. 840 */ 841 ASSERT(qp->q_syncq == _RD(qp)->q_syncq); 842 mutex_enter(SQLOCK(qp->q_syncq)); 843 qp->q_qinfo = &deadwend; 844 _RD(qp)->q_qinfo = &deadrend; 845 mutex_exit(SQLOCK(qp->q_syncq)); 846 } 847 } else { 848 freeq(_RD(qp)); /* free stream head queue pair */ 849 } 850 851 mutex_enter(&vp->v_lock); 852 if (stp->sd_iocblk) { 853 if (stp->sd_iocblk != (mblk_t *)-1) { 854 freemsg(stp->sd_iocblk); 855 } 856 stp->sd_iocblk = NULL; 857 } 858 stp->sd_vnode = stp->sd_pvnode = NULL; 859 vp->v_stream = NULL; 860 mutex_exit(&vp->v_lock); 861 mutex_enter(&stp->sd_lock); 862 freemsg(stp->sd_cmdblk); 863 stp->sd_cmdblk = NULL; 864 stp->sd_flag &= ~STRCLOSE; 865 cv_broadcast(&stp->sd_monitor); 866 mutex_exit(&stp->sd_lock); 867 868 if (freestp) 869 shfree(stp); 870 return (0); 871 } 872 873 static int 874 strsink(queue_t *q, mblk_t *bp) 875 { 876 struct copyresp *resp; 877 878 switch (bp->b_datap->db_type) { 879 case M_FLUSH: 880 if ((*bp->b_rptr & FLUSHW) && !(bp->b_flag & MSGNOLOOP)) { 881 *bp->b_rptr &= ~FLUSHR; 882 bp->b_flag |= MSGNOLOOP; 883 /* 884 * Protect against the driver passing up 885 * messages after it has done a qprocsoff. 886 */ 887 if (_OTHERQ(q)->q_next == NULL) 888 freemsg(bp); 889 else 890 qreply(q, bp); 891 } else { 892 freemsg(bp); 893 } 894 break; 895 896 case M_COPYIN: 897 case M_COPYOUT: 898 if (bp->b_cont) { 899 freemsg(bp->b_cont); 900 bp->b_cont = NULL; 901 } 902 bp->b_datap->db_type = M_IOCDATA; 903 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp); 904 resp = (struct copyresp *)bp->b_rptr; 905 resp->cp_rval = (caddr_t)1; /* failure */ 906 /* 907 * Protect against the driver passing up 908 * messages after it has done a qprocsoff. 909 */ 910 if (_OTHERQ(q)->q_next == NULL) 911 freemsg(bp); 912 else 913 qreply(q, bp); 914 break; 915 916 case M_IOCTL: 917 if (bp->b_cont) { 918 freemsg(bp->b_cont); 919 bp->b_cont = NULL; 920 } 921 bp->b_datap->db_type = M_IOCNAK; 922 /* 923 * Protect against the driver passing up 924 * messages after it has done a qprocsoff. 925 */ 926 if (_OTHERQ(q)->q_next == NULL) 927 freemsg(bp); 928 else 929 qreply(q, bp); 930 break; 931 932 default: 933 freemsg(bp); 934 break; 935 } 936 937 return (0); 938 } 939 940 /* 941 * Clean up after a process when it closes a stream. This is called 942 * from closef for all closes, whereas strclose is called only for the 943 * last close on a stream. The siglist is scanned for entries for the 944 * current process, and these are removed. 945 */ 946 void 947 strclean(struct vnode *vp) 948 { 949 strsig_t *ssp, *pssp, *tssp; 950 stdata_t *stp; 951 int update = 0; 952 953 TRACE_1(TR_FAC_STREAMS_FR, 954 TR_STRCLEAN, "strclean:%p", vp); 955 stp = vp->v_stream; 956 pssp = NULL; 957 mutex_enter(&stp->sd_lock); 958 ssp = stp->sd_siglist; 959 while (ssp) { 960 if (ssp->ss_pidp == curproc->p_pidp) { 961 tssp = ssp->ss_next; 962 if (pssp) 963 pssp->ss_next = tssp; 964 else 965 stp->sd_siglist = tssp; 966 mutex_enter(&pidlock); 967 PID_RELE(ssp->ss_pidp); 968 mutex_exit(&pidlock); 969 kmem_free(ssp, sizeof (strsig_t)); 970 update = 1; 971 ssp = tssp; 972 } else { 973 pssp = ssp; 974 ssp = ssp->ss_next; 975 } 976 } 977 if (update) { 978 stp->sd_sigflags = 0; 979 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next) 980 stp->sd_sigflags |= ssp->ss_events; 981 } 982 mutex_exit(&stp->sd_lock); 983 } 984 985 /* 986 * Used on the last close to remove any remaining items on the siglist. 987 * These could be present on the siglist due to I_ESETSIG calls that 988 * use process groups or processed that do not have an open file descriptor 989 * for this stream (Such entries would not be removed by strclean). 990 */ 991 static void 992 strcleanall(struct vnode *vp) 993 { 994 strsig_t *ssp, *nssp; 995 stdata_t *stp; 996 997 stp = vp->v_stream; 998 mutex_enter(&stp->sd_lock); 999 ssp = stp->sd_siglist; 1000 stp->sd_siglist = NULL; 1001 while (ssp) { 1002 nssp = ssp->ss_next; 1003 mutex_enter(&pidlock); 1004 PID_RELE(ssp->ss_pidp); 1005 mutex_exit(&pidlock); 1006 kmem_free(ssp, sizeof (strsig_t)); 1007 ssp = nssp; 1008 } 1009 stp->sd_sigflags = 0; 1010 mutex_exit(&stp->sd_lock); 1011 } 1012 1013 /* 1014 * Retrieve the next message from the logical stream head read queue 1015 * using either rwnext (if sync stream) or getq_noenab. 1016 * It is the callers responsibility to call qbackenable after 1017 * it is finished with the message. The caller should not call 1018 * qbackenable until after any putback calls to avoid spurious backenabling. 1019 */ 1020 mblk_t * 1021 strget(struct stdata *stp, queue_t *q, struct uio *uiop, int first, 1022 int *errorp) 1023 { 1024 mblk_t *bp; 1025 int error; 1026 ssize_t rbytes = 0; 1027 1028 /* Holding sd_lock prevents the read queue from changing */ 1029 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1030 1031 if (uiop != NULL && stp->sd_struiordq != NULL && 1032 q->q_first == NULL && 1033 (!first || (stp->sd_wakeq & RSLEEP))) { 1034 /* 1035 * Stream supports rwnext() for the read side. 1036 * If this is the first time we're called by e.g. strread 1037 * only do the downcall if there is a deferred wakeup 1038 * (registered in sd_wakeq). 1039 */ 1040 struiod_t uiod; 1041 struct iovec buf[IOV_MAX_STACK]; 1042 int iovlen = 0; 1043 1044 if (first) 1045 stp->sd_wakeq &= ~RSLEEP; 1046 1047 if (uiop->uio_iovcnt > IOV_MAX_STACK) { 1048 iovlen = uiop->uio_iovcnt * sizeof (iovec_t); 1049 uiod.d_iov = kmem_alloc(iovlen, KM_SLEEP); 1050 } else { 1051 uiod.d_iov = buf; 1052 } 1053 1054 (void) uiodup(uiop, &uiod.d_uio, uiod.d_iov, uiop->uio_iovcnt); 1055 uiod.d_mp = 0; 1056 /* 1057 * Mark that a thread is in rwnext on the read side 1058 * to prevent strrput from nacking ioctls immediately. 1059 * When the last concurrent rwnext returns 1060 * the ioctls are nack'ed. 1061 */ 1062 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1063 stp->sd_struiodnak++; 1064 /* 1065 * Note: rwnext will drop sd_lock. 1066 */ 1067 error = rwnext(q, &uiod); 1068 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock)); 1069 mutex_enter(&stp->sd_lock); 1070 stp->sd_struiodnak--; 1071 while (stp->sd_struiodnak == 0 && 1072 ((bp = stp->sd_struionak) != NULL)) { 1073 stp->sd_struionak = bp->b_next; 1074 bp->b_next = NULL; 1075 bp->b_datap->db_type = M_IOCNAK; 1076 /* 1077 * Protect against the driver passing up 1078 * messages after it has done a qprocsoff. 1079 */ 1080 if (_OTHERQ(q)->q_next == NULL) 1081 freemsg(bp); 1082 else { 1083 mutex_exit(&stp->sd_lock); 1084 qreply(q, bp); 1085 mutex_enter(&stp->sd_lock); 1086 } 1087 } 1088 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1089 if (error == 0 || error == EWOULDBLOCK) { 1090 if ((bp = uiod.d_mp) != NULL) { 1091 *errorp = 0; 1092 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1093 if (iovlen != 0) 1094 kmem_free(uiod.d_iov, iovlen); 1095 return (bp); 1096 } 1097 error = 0; 1098 } else if (error == EINVAL) { 1099 /* 1100 * The stream plumbing must have 1101 * changed while we were away, so 1102 * just turn off rwnext()s. 1103 */ 1104 error = 0; 1105 } else if (error == EBUSY) { 1106 /* 1107 * The module might have data in transit using putnext 1108 * Fall back on waiting + getq. 1109 */ 1110 error = 0; 1111 } else { 1112 *errorp = error; 1113 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1114 if (iovlen != 0) 1115 kmem_free(uiod.d_iov, iovlen); 1116 return (NULL); 1117 } 1118 1119 if (iovlen != 0) 1120 kmem_free(uiod.d_iov, iovlen); 1121 1122 /* 1123 * Try a getq in case a rwnext() generated mblk 1124 * has bubbled up via strrput(). 1125 */ 1126 } 1127 *errorp = 0; 1128 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1129 1130 /* 1131 * If we have a valid uio, try and use this as a guide for how 1132 * many bytes to retrieve from the queue via getq_noenab(). 1133 * Doing this can avoid unneccesary counting of overlong 1134 * messages in putback(). We currently only do this for sockets 1135 * and only if there is no sd_rputdatafunc hook. 1136 * 1137 * The sd_rputdatafunc hook transforms the entire message 1138 * before any bytes in it can be given to a client. So, rbytes 1139 * must be 0 if there is a hook. 1140 */ 1141 if ((uiop != NULL) && (stp->sd_vnode->v_type == VSOCK) && 1142 (stp->sd_rputdatafunc == NULL)) 1143 rbytes = uiop->uio_resid; 1144 1145 return (getq_noenab(q, rbytes)); 1146 } 1147 1148 /* 1149 * Copy out the message pointed to by `bp' into the uio pointed to by `uiop'. 1150 * If the message does not fit in the uio the remainder of it is returned; 1151 * otherwise NULL is returned. Any embedded zero-length mblk_t's are 1152 * consumed, even if uio_resid reaches zero. On error, `*errorp' is set to 1153 * the error code, the message is consumed, and NULL is returned. 1154 */ 1155 static mblk_t * 1156 struiocopyout(mblk_t *bp, struct uio *uiop, int *errorp) 1157 { 1158 int error; 1159 ptrdiff_t n; 1160 mblk_t *nbp; 1161 1162 ASSERT(bp->b_wptr >= bp->b_rptr); 1163 1164 do { 1165 if ((n = MIN(uiop->uio_resid, MBLKL(bp))) != 0) { 1166 ASSERT(n > 0); 1167 1168 error = uiomove(bp->b_rptr, n, UIO_READ, uiop); 1169 if (error != 0) { 1170 freemsg(bp); 1171 *errorp = error; 1172 return (NULL); 1173 } 1174 } 1175 1176 bp->b_rptr += n; 1177 while (bp != NULL && (bp->b_rptr >= bp->b_wptr)) { 1178 nbp = bp; 1179 bp = bp->b_cont; 1180 freeb(nbp); 1181 } 1182 } while (bp != NULL && uiop->uio_resid > 0); 1183 1184 *errorp = 0; 1185 return (bp); 1186 } 1187 1188 /* 1189 * Read a stream according to the mode flags in sd_flag: 1190 * 1191 * (default mode) - Byte stream, msg boundaries are ignored 1192 * RD_MSGDIS (msg discard) - Read on msg boundaries and throw away 1193 * any data remaining in msg 1194 * RD_MSGNODIS (msg non-discard) - Read on msg boundaries and put back 1195 * any remaining data on head of read queue 1196 * 1197 * Consume readable messages on the front of the queue until 1198 * ttolwp(curthread)->lwp_count 1199 * is satisfied, the readable messages are exhausted, or a message 1200 * boundary is reached in a message mode. If no data was read and 1201 * the stream was not opened with the NDELAY flag, block until data arrives. 1202 * Otherwise return the data read and update the count. 1203 * 1204 * In default mode a 0 length message signifies end-of-file and terminates 1205 * a read in progress. The 0 length message is removed from the queue 1206 * only if it is the only message read (no data is read). 1207 * 1208 * An attempt to read an M_PROTO or M_PCPROTO message results in an 1209 * EBADMSG error return, unless either RD_PROTDAT or RD_PROTDIS are set. 1210 * If RD_PROTDAT is set, M_PROTO and M_PCPROTO messages are read as data. 1211 * If RD_PROTDIS is set, the M_PROTO and M_PCPROTO parts of the message 1212 * are unlinked from and M_DATA blocks in the message, the protos are 1213 * thrown away, and the data is read. 1214 */ 1215 /* ARGSUSED */ 1216 int 1217 strread(struct vnode *vp, struct uio *uiop, cred_t *crp) 1218 { 1219 struct stdata *stp; 1220 mblk_t *bp, *nbp; 1221 queue_t *q; 1222 int error = 0; 1223 uint_t old_sd_flag; 1224 int first; 1225 char rflg; 1226 uint_t mark; /* Contains MSG*MARK and _LASTMARK */ 1227 #define _LASTMARK 0x8000 /* Distinct from MSG*MARK */ 1228 short delim; 1229 unsigned char pri = 0; 1230 char waitflag; 1231 unsigned char type; 1232 1233 TRACE_1(TR_FAC_STREAMS_FR, 1234 TR_STRREAD_ENTER, "strread:%p", vp); 1235 ASSERT(vp->v_stream); 1236 stp = vp->v_stream; 1237 1238 mutex_enter(&stp->sd_lock); 1239 1240 if ((error = i_straccess(stp, JCREAD)) != 0) { 1241 mutex_exit(&stp->sd_lock); 1242 return (error); 1243 } 1244 1245 if (stp->sd_flag & (STRDERR|STPLEX)) { 1246 error = strgeterr(stp, STRDERR|STPLEX, 0); 1247 if (error != 0) { 1248 mutex_exit(&stp->sd_lock); 1249 return (error); 1250 } 1251 } 1252 1253 /* 1254 * Loop terminates when uiop->uio_resid == 0. 1255 */ 1256 rflg = 0; 1257 waitflag = READWAIT; 1258 q = _RD(stp->sd_wrq); 1259 for (;;) { 1260 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1261 old_sd_flag = stp->sd_flag; 1262 mark = 0; 1263 delim = 0; 1264 first = 1; 1265 while ((bp = strget(stp, q, uiop, first, &error)) == NULL) { 1266 int done = 0; 1267 1268 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1269 1270 if (error != 0) 1271 goto oops; 1272 1273 if (stp->sd_flag & (STRHUP|STREOF)) { 1274 goto oops; 1275 } 1276 if (rflg && !(stp->sd_flag & STRDELIM)) { 1277 goto oops; 1278 } 1279 /* 1280 * If a read(fd,buf,0) has been done, there is no 1281 * need to sleep. We always have zero bytes to 1282 * return. 1283 */ 1284 if (uiop->uio_resid == 0) { 1285 goto oops; 1286 } 1287 1288 qbackenable(q, 0); 1289 1290 TRACE_3(TR_FAC_STREAMS_FR, TR_STRREAD_WAIT, 1291 "strread calls strwaitq:%p, %p, %p", 1292 vp, uiop, crp); 1293 if ((error = strwaitq(stp, waitflag, uiop->uio_resid, 1294 uiop->uio_fmode, -1, &done)) != 0 || done) { 1295 TRACE_3(TR_FAC_STREAMS_FR, TR_STRREAD_DONE, 1296 "strread error or done:%p, %p, %p", 1297 vp, uiop, crp); 1298 if ((uiop->uio_fmode & FNDELAY) && 1299 (stp->sd_flag & OLDNDELAY) && 1300 (error == EAGAIN)) 1301 error = 0; 1302 goto oops; 1303 } 1304 TRACE_3(TR_FAC_STREAMS_FR, TR_STRREAD_AWAKE, 1305 "strread awakes:%p, %p, %p", vp, uiop, crp); 1306 if ((error = i_straccess(stp, JCREAD)) != 0) { 1307 goto oops; 1308 } 1309 first = 0; 1310 } 1311 1312 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1313 ASSERT(bp); 1314 pri = bp->b_band; 1315 /* 1316 * Extract any mark information. If the message is not 1317 * completely consumed this information will be put in the mblk 1318 * that is putback. 1319 * If MSGMARKNEXT is set and the message is completely consumed 1320 * the STRATMARK flag will be set below. Likewise, if 1321 * MSGNOTMARKNEXT is set and the message is 1322 * completely consumed STRNOTATMARK will be set. 1323 * 1324 * For some unknown reason strread only breaks the read at the 1325 * last mark. 1326 */ 1327 mark = bp->b_flag & (MSGMARK | MSGMARKNEXT | MSGNOTMARKNEXT); 1328 ASSERT((mark & (MSGMARKNEXT|MSGNOTMARKNEXT)) != 1329 (MSGMARKNEXT|MSGNOTMARKNEXT)); 1330 if (mark != 0 && bp == stp->sd_mark) { 1331 if (rflg) { 1332 putback(stp, q, bp, pri); 1333 goto oops; 1334 } 1335 mark |= _LASTMARK; 1336 stp->sd_mark = NULL; 1337 } 1338 if ((stp->sd_flag & STRDELIM) && (bp->b_flag & MSGDELIM)) 1339 delim = 1; 1340 mutex_exit(&stp->sd_lock); 1341 1342 if (STREAM_NEEDSERVICE(stp)) 1343 stream_runservice(stp); 1344 1345 type = bp->b_datap->db_type; 1346 1347 switch (type) { 1348 1349 case M_DATA: 1350 ismdata: 1351 if (msgnodata(bp)) { 1352 if (mark || delim) { 1353 freemsg(bp); 1354 } else if (rflg) { 1355 1356 /* 1357 * If already read data put zero 1358 * length message back on queue else 1359 * free msg and return 0. 1360 */ 1361 bp->b_band = pri; 1362 mutex_enter(&stp->sd_lock); 1363 putback(stp, q, bp, pri); 1364 mutex_exit(&stp->sd_lock); 1365 } else { 1366 freemsg(bp); 1367 } 1368 error = 0; 1369 goto oops1; 1370 } 1371 1372 rflg = 1; 1373 waitflag |= NOINTR; 1374 bp = struiocopyout(bp, uiop, &error); 1375 if (error != 0) 1376 goto oops1; 1377 1378 mutex_enter(&stp->sd_lock); 1379 if (bp) { 1380 /* 1381 * Have remaining data in message. 1382 * Free msg if in discard mode. 1383 */ 1384 if (stp->sd_read_opt & RD_MSGDIS) { 1385 freemsg(bp); 1386 } else { 1387 bp->b_band = pri; 1388 if ((mark & _LASTMARK) && 1389 (stp->sd_mark == NULL)) 1390 stp->sd_mark = bp; 1391 bp->b_flag |= mark & ~_LASTMARK; 1392 if (delim) 1393 bp->b_flag |= MSGDELIM; 1394 if (msgnodata(bp)) 1395 freemsg(bp); 1396 else 1397 putback(stp, q, bp, pri); 1398 } 1399 } else { 1400 /* 1401 * Consumed the complete message. 1402 * Move the MSG*MARKNEXT information 1403 * to the stream head just in case 1404 * the read queue becomes empty. 1405 * 1406 * If the stream head was at the mark 1407 * (STRATMARK) before we dropped sd_lock above 1408 * and some data was consumed then we have 1409 * moved past the mark thus STRATMARK is 1410 * cleared. However, if a message arrived in 1411 * strrput during the copyout above causing 1412 * STRATMARK to be set we can not clear that 1413 * flag. 1414 */ 1415 if (mark & 1416 (MSGMARKNEXT|MSGNOTMARKNEXT|MSGMARK)) { 1417 if (mark & MSGMARKNEXT) { 1418 stp->sd_flag &= ~STRNOTATMARK; 1419 stp->sd_flag |= STRATMARK; 1420 } else if (mark & MSGNOTMARKNEXT) { 1421 stp->sd_flag &= ~STRATMARK; 1422 stp->sd_flag |= STRNOTATMARK; 1423 } else { 1424 stp->sd_flag &= 1425 ~(STRATMARK|STRNOTATMARK); 1426 } 1427 } else if (rflg && (old_sd_flag & STRATMARK)) { 1428 stp->sd_flag &= ~STRATMARK; 1429 } 1430 } 1431 1432 /* 1433 * Check for signal messages at the front of the read 1434 * queue and generate the signal(s) if appropriate. 1435 * The only signal that can be on queue is M_SIG at 1436 * this point. 1437 */ 1438 while ((((bp = q->q_first)) != NULL) && 1439 (bp->b_datap->db_type == M_SIG)) { 1440 bp = getq_noenab(q, 0); 1441 /* 1442 * sd_lock is held so the content of the 1443 * read queue can not change. 1444 */ 1445 ASSERT(bp != NULL && DB_TYPE(bp) == M_SIG); 1446 strsignal_nolock(stp, *bp->b_rptr, bp->b_band); 1447 mutex_exit(&stp->sd_lock); 1448 freemsg(bp); 1449 if (STREAM_NEEDSERVICE(stp)) 1450 stream_runservice(stp); 1451 mutex_enter(&stp->sd_lock); 1452 } 1453 1454 if ((uiop->uio_resid == 0) || (mark & _LASTMARK) || 1455 delim || 1456 (stp->sd_read_opt & (RD_MSGDIS|RD_MSGNODIS))) { 1457 goto oops; 1458 } 1459 continue; 1460 1461 case M_SIG: 1462 strsignal(stp, *bp->b_rptr, (int32_t)bp->b_band); 1463 freemsg(bp); 1464 mutex_enter(&stp->sd_lock); 1465 continue; 1466 1467 case M_PROTO: 1468 case M_PCPROTO: 1469 /* 1470 * Only data messages are readable. 1471 * Any others generate an error, unless 1472 * RD_PROTDIS or RD_PROTDAT is set. 1473 */ 1474 if (stp->sd_read_opt & RD_PROTDAT) { 1475 for (nbp = bp; nbp; nbp = nbp->b_next) { 1476 if ((nbp->b_datap->db_type == 1477 M_PROTO) || 1478 (nbp->b_datap->db_type == 1479 M_PCPROTO)) { 1480 nbp->b_datap->db_type = M_DATA; 1481 } else { 1482 break; 1483 } 1484 } 1485 /* 1486 * clear stream head hi pri flag based on 1487 * first message 1488 */ 1489 if (type == M_PCPROTO) { 1490 mutex_enter(&stp->sd_lock); 1491 stp->sd_flag &= ~STRPRI; 1492 mutex_exit(&stp->sd_lock); 1493 } 1494 goto ismdata; 1495 } else if (stp->sd_read_opt & RD_PROTDIS) { 1496 /* 1497 * discard non-data messages 1498 */ 1499 while (bp && 1500 ((bp->b_datap->db_type == M_PROTO) || 1501 (bp->b_datap->db_type == M_PCPROTO))) { 1502 nbp = unlinkb(bp); 1503 freeb(bp); 1504 bp = nbp; 1505 } 1506 /* 1507 * clear stream head hi pri flag based on 1508 * first message 1509 */ 1510 if (type == M_PCPROTO) { 1511 mutex_enter(&stp->sd_lock); 1512 stp->sd_flag &= ~STRPRI; 1513 mutex_exit(&stp->sd_lock); 1514 } 1515 if (bp) { 1516 bp->b_band = pri; 1517 goto ismdata; 1518 } else { 1519 break; 1520 } 1521 } 1522 /* FALLTHRU */ 1523 case M_PASSFP: 1524 if ((bp->b_datap->db_type == M_PASSFP) && 1525 (stp->sd_read_opt & RD_PROTDIS)) { 1526 freemsg(bp); 1527 break; 1528 } 1529 mutex_enter(&stp->sd_lock); 1530 putback(stp, q, bp, pri); 1531 mutex_exit(&stp->sd_lock); 1532 if (rflg == 0) 1533 error = EBADMSG; 1534 goto oops1; 1535 1536 default: 1537 /* 1538 * Garbage on stream head read queue. 1539 */ 1540 cmn_err(CE_WARN, "bad %x found at stream head\n", 1541 bp->b_datap->db_type); 1542 freemsg(bp); 1543 goto oops1; 1544 } 1545 mutex_enter(&stp->sd_lock); 1546 } 1547 oops: 1548 mutex_exit(&stp->sd_lock); 1549 oops1: 1550 qbackenable(q, pri); 1551 return (error); 1552 #undef _LASTMARK 1553 } 1554 1555 /* 1556 * Default processing of M_PROTO/M_PCPROTO messages. 1557 * Determine which wakeups and signals are needed. 1558 * This can be replaced by a user-specified procedure for kernel users 1559 * of STREAMS. 1560 */ 1561 /* ARGSUSED */ 1562 mblk_t * 1563 strrput_proto(vnode_t *vp, mblk_t *mp, 1564 strwakeup_t *wakeups, strsigset_t *firstmsgsigs, 1565 strsigset_t *allmsgsigs, strpollset_t *pollwakeups) 1566 { 1567 *wakeups = RSLEEP; 1568 *allmsgsigs = 0; 1569 1570 switch (mp->b_datap->db_type) { 1571 case M_PROTO: 1572 if (mp->b_band == 0) { 1573 *firstmsgsigs = S_INPUT | S_RDNORM; 1574 *pollwakeups = POLLIN | POLLRDNORM; 1575 } else { 1576 *firstmsgsigs = S_INPUT | S_RDBAND; 1577 *pollwakeups = POLLIN | POLLRDBAND; 1578 } 1579 break; 1580 case M_PCPROTO: 1581 *firstmsgsigs = S_HIPRI; 1582 *pollwakeups = POLLPRI; 1583 break; 1584 } 1585 return (mp); 1586 } 1587 1588 /* 1589 * Default processing of everything but M_DATA, M_PROTO, M_PCPROTO and 1590 * M_PASSFP messages. 1591 * Determine which wakeups and signals are needed. 1592 * This can be replaced by a user-specified procedure for kernel users 1593 * of STREAMS. 1594 */ 1595 /* ARGSUSED */ 1596 mblk_t * 1597 strrput_misc(vnode_t *vp, mblk_t *mp, 1598 strwakeup_t *wakeups, strsigset_t *firstmsgsigs, 1599 strsigset_t *allmsgsigs, strpollset_t *pollwakeups) 1600 { 1601 *wakeups = 0; 1602 *firstmsgsigs = 0; 1603 *allmsgsigs = 0; 1604 *pollwakeups = 0; 1605 return (mp); 1606 } 1607 1608 /* 1609 * Stream read put procedure. Called from downstream driver/module 1610 * with messages for the stream head. Data, protocol, and in-stream 1611 * signal messages are placed on the queue, others are handled directly. 1612 */ 1613 int 1614 strrput(queue_t *q, mblk_t *bp) 1615 { 1616 struct stdata *stp; 1617 ulong_t rput_opt; 1618 strwakeup_t wakeups; 1619 strsigset_t firstmsgsigs; /* Signals if first message on queue */ 1620 strsigset_t allmsgsigs; /* Signals for all messages */ 1621 strsigset_t signals; /* Signals events to generate */ 1622 strpollset_t pollwakeups; 1623 mblk_t *nextbp; 1624 uchar_t band = 0; 1625 int hipri_sig; 1626 1627 stp = (struct stdata *)q->q_ptr; 1628 /* 1629 * Use rput_opt for optimized access to the SR_ flags except 1630 * SR_POLLIN. That flag has to be checked under sd_lock since it 1631 * is modified by strpoll(). 1632 */ 1633 rput_opt = stp->sd_rput_opt; 1634 1635 ASSERT(qclaimed(q)); 1636 TRACE_2(TR_FAC_STREAMS_FR, TR_STRRPUT_ENTER, 1637 "strrput called with message type:q %p bp %p", q, bp); 1638 1639 /* 1640 * Perform initial processing and pass to the parameterized functions. 1641 */ 1642 ASSERT(bp->b_next == NULL); 1643 1644 switch (bp->b_datap->db_type) { 1645 case M_DATA: 1646 /* 1647 * sockfs is the only consumer of STREOF and when it is set, 1648 * it implies that the receiver is not interested in receiving 1649 * any more data, hence the mblk is freed to prevent unnecessary 1650 * message queueing at the stream head. 1651 */ 1652 if (stp->sd_flag == STREOF) { 1653 freemsg(bp); 1654 return (0); 1655 } 1656 if ((rput_opt & SR_IGN_ZEROLEN) && 1657 bp->b_rptr == bp->b_wptr && msgnodata(bp)) { 1658 /* 1659 * Ignore zero-length M_DATA messages. These might be 1660 * generated by some transports. 1661 * The zero-length M_DATA messages, even if they 1662 * are ignored, should effect the atmark tracking and 1663 * should wake up a thread sleeping in strwaitmark. 1664 */ 1665 mutex_enter(&stp->sd_lock); 1666 if (bp->b_flag & MSGMARKNEXT) { 1667 /* 1668 * Record the position of the mark either 1669 * in q_last or in STRATMARK. 1670 */ 1671 if (q->q_last != NULL) { 1672 q->q_last->b_flag &= ~MSGNOTMARKNEXT; 1673 q->q_last->b_flag |= MSGMARKNEXT; 1674 } else { 1675 stp->sd_flag &= ~STRNOTATMARK; 1676 stp->sd_flag |= STRATMARK; 1677 } 1678 } else if (bp->b_flag & MSGNOTMARKNEXT) { 1679 /* 1680 * Record that this is not the position of 1681 * the mark either in q_last or in 1682 * STRNOTATMARK. 1683 */ 1684 if (q->q_last != NULL) { 1685 q->q_last->b_flag &= ~MSGMARKNEXT; 1686 q->q_last->b_flag |= MSGNOTMARKNEXT; 1687 } else { 1688 stp->sd_flag &= ~STRATMARK; 1689 stp->sd_flag |= STRNOTATMARK; 1690 } 1691 } 1692 if (stp->sd_flag & RSLEEP) { 1693 stp->sd_flag &= ~RSLEEP; 1694 cv_broadcast(&q->q_wait); 1695 } 1696 mutex_exit(&stp->sd_lock); 1697 freemsg(bp); 1698 return (0); 1699 } 1700 wakeups = RSLEEP; 1701 if (bp->b_band == 0) { 1702 firstmsgsigs = S_INPUT | S_RDNORM; 1703 pollwakeups = POLLIN | POLLRDNORM; 1704 } else { 1705 firstmsgsigs = S_INPUT | S_RDBAND; 1706 pollwakeups = POLLIN | POLLRDBAND; 1707 } 1708 if (rput_opt & SR_SIGALLDATA) 1709 allmsgsigs = firstmsgsigs; 1710 else 1711 allmsgsigs = 0; 1712 1713 mutex_enter(&stp->sd_lock); 1714 if ((rput_opt & SR_CONSOL_DATA) && 1715 (q->q_last != NULL) && 1716 (bp->b_flag & (MSGMARK|MSGDELIM)) == 0) { 1717 /* 1718 * Consolidate an M_DATA message onto an M_DATA, 1719 * M_PROTO, or M_PCPROTO by merging it with q_last. 1720 * The consolidation does not take place if 1721 * the old message is marked with either of the 1722 * marks or the delim flag or if the new 1723 * message is marked with MSGMARK. The MSGMARK 1724 * check is needed to handle the odd semantics of 1725 * MSGMARK where essentially the whole message 1726 * is to be treated as marked. 1727 * Carry any MSGMARKNEXT and MSGNOTMARKNEXT from the 1728 * new message to the front of the b_cont chain. 1729 */ 1730 mblk_t *lbp = q->q_last; 1731 unsigned char db_type = lbp->b_datap->db_type; 1732 1733 if ((db_type == M_DATA || db_type == M_PROTO || 1734 db_type == M_PCPROTO) && 1735 !(lbp->b_flag & (MSGDELIM|MSGMARK|MSGMARKNEXT))) { 1736 rmvq_noenab(q, lbp); 1737 /* 1738 * The first message in the b_cont list 1739 * tracks MSGMARKNEXT and MSGNOTMARKNEXT. 1740 * We need to handle the case where we 1741 * are appending: 1742 * 1743 * 1) a MSGMARKNEXT to a MSGNOTMARKNEXT. 1744 * 2) a MSGMARKNEXT to a plain message. 1745 * 3) a MSGNOTMARKNEXT to a plain message 1746 * 4) a MSGNOTMARKNEXT to a MSGNOTMARKNEXT 1747 * message. 1748 * 1749 * Thus we never append a MSGMARKNEXT or 1750 * MSGNOTMARKNEXT to a MSGMARKNEXT message. 1751 */ 1752 if (bp->b_flag & MSGMARKNEXT) { 1753 lbp->b_flag |= MSGMARKNEXT; 1754 lbp->b_flag &= ~MSGNOTMARKNEXT; 1755 bp->b_flag &= ~MSGMARKNEXT; 1756 } else if (bp->b_flag & MSGNOTMARKNEXT) { 1757 lbp->b_flag |= MSGNOTMARKNEXT; 1758 bp->b_flag &= ~MSGNOTMARKNEXT; 1759 } 1760 1761 linkb(lbp, bp); 1762 bp = lbp; 1763 /* 1764 * The new message logically isn't the first 1765 * even though the q_first check below thinks 1766 * it is. Clear the firstmsgsigs to make it 1767 * not appear to be first. 1768 */ 1769 firstmsgsigs = 0; 1770 } 1771 } 1772 break; 1773 1774 case M_PASSFP: 1775 wakeups = RSLEEP; 1776 allmsgsigs = 0; 1777 if (bp->b_band == 0) { 1778 firstmsgsigs = S_INPUT | S_RDNORM; 1779 pollwakeups = POLLIN | POLLRDNORM; 1780 } else { 1781 firstmsgsigs = S_INPUT | S_RDBAND; 1782 pollwakeups = POLLIN | POLLRDBAND; 1783 } 1784 mutex_enter(&stp->sd_lock); 1785 break; 1786 1787 case M_PROTO: 1788 case M_PCPROTO: 1789 ASSERT(stp->sd_rprotofunc != NULL); 1790 bp = (stp->sd_rprotofunc)(stp->sd_vnode, bp, 1791 &wakeups, &firstmsgsigs, &allmsgsigs, &pollwakeups); 1792 #define ALLSIG (S_INPUT|S_HIPRI|S_OUTPUT|S_MSG|S_ERROR|S_HANGUP|S_RDNORM|\ 1793 S_WRNORM|S_RDBAND|S_WRBAND|S_BANDURG) 1794 #define ALLPOLL (POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLWRNORM|POLLRDBAND|\ 1795 POLLWRBAND) 1796 1797 ASSERT((wakeups & ~(RSLEEP|WSLEEP)) == 0); 1798 ASSERT((firstmsgsigs & ~ALLSIG) == 0); 1799 ASSERT((allmsgsigs & ~ALLSIG) == 0); 1800 ASSERT((pollwakeups & ~ALLPOLL) == 0); 1801 1802 mutex_enter(&stp->sd_lock); 1803 break; 1804 1805 default: 1806 ASSERT(stp->sd_rmiscfunc != NULL); 1807 bp = (stp->sd_rmiscfunc)(stp->sd_vnode, bp, 1808 &wakeups, &firstmsgsigs, &allmsgsigs, &pollwakeups); 1809 ASSERT((wakeups & ~(RSLEEP|WSLEEP)) == 0); 1810 ASSERT((firstmsgsigs & ~ALLSIG) == 0); 1811 ASSERT((allmsgsigs & ~ALLSIG) == 0); 1812 ASSERT((pollwakeups & ~ALLPOLL) == 0); 1813 #undef ALLSIG 1814 #undef ALLPOLL 1815 mutex_enter(&stp->sd_lock); 1816 break; 1817 } 1818 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1819 1820 /* By default generate superset of signals */ 1821 signals = (firstmsgsigs | allmsgsigs); 1822 1823 /* 1824 * The proto and misc functions can return multiple messages 1825 * as a b_next chain. Such messages are processed separately. 1826 */ 1827 one_more: 1828 hipri_sig = 0; 1829 if (bp == NULL) { 1830 nextbp = NULL; 1831 } else { 1832 nextbp = bp->b_next; 1833 bp->b_next = NULL; 1834 1835 switch (bp->b_datap->db_type) { 1836 case M_PCPROTO: 1837 /* 1838 * Only one priority protocol message is allowed at the 1839 * stream head at a time. 1840 */ 1841 if (stp->sd_flag & STRPRI) { 1842 TRACE_0(TR_FAC_STREAMS_FR, TR_STRRPUT_PROTERR, 1843 "M_PCPROTO already at head"); 1844 freemsg(bp); 1845 mutex_exit(&stp->sd_lock); 1846 goto done; 1847 } 1848 stp->sd_flag |= STRPRI; 1849 hipri_sig = 1; 1850 /* FALLTHRU */ 1851 case M_DATA: 1852 case M_PROTO: 1853 case M_PASSFP: 1854 band = bp->b_band; 1855 /* 1856 * Marking doesn't work well when messages 1857 * are marked in more than one band. We only 1858 * remember the last message received, even if 1859 * it is placed on the queue ahead of other 1860 * marked messages. 1861 */ 1862 if (bp->b_flag & MSGMARK) 1863 stp->sd_mark = bp; 1864 (void) putq(q, bp); 1865 1866 /* 1867 * If message is a PCPROTO message, always use 1868 * firstmsgsigs to determine if a signal should be 1869 * sent as strrput is the only place to send 1870 * signals for PCPROTO. Other messages are based on 1871 * the STRGETINPROG flag. The flag determines if 1872 * strrput or (k)strgetmsg will be responsible for 1873 * sending the signals, in the firstmsgsigs case. 1874 */ 1875 if ((hipri_sig == 1) || 1876 (((stp->sd_flag & STRGETINPROG) == 0) && 1877 (q->q_first == bp))) 1878 signals = (firstmsgsigs | allmsgsigs); 1879 else 1880 signals = allmsgsigs; 1881 break; 1882 1883 default: 1884 mutex_exit(&stp->sd_lock); 1885 (void) strrput_nondata(q, bp); 1886 mutex_enter(&stp->sd_lock); 1887 break; 1888 } 1889 } 1890 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1891 /* 1892 * Wake sleeping read/getmsg and cancel deferred wakeup 1893 */ 1894 if (wakeups & RSLEEP) 1895 stp->sd_wakeq &= ~RSLEEP; 1896 1897 wakeups &= stp->sd_flag; 1898 if (wakeups & RSLEEP) { 1899 stp->sd_flag &= ~RSLEEP; 1900 cv_broadcast(&q->q_wait); 1901 } 1902 if (wakeups & WSLEEP) { 1903 stp->sd_flag &= ~WSLEEP; 1904 cv_broadcast(&_WR(q)->q_wait); 1905 } 1906 1907 if (pollwakeups != 0) { 1908 if (pollwakeups == (POLLIN | POLLRDNORM)) { 1909 /* 1910 * Can't use rput_opt since it was not 1911 * read when sd_lock was held and SR_POLLIN is changed 1912 * by strpoll() under sd_lock. 1913 */ 1914 if (!(stp->sd_rput_opt & SR_POLLIN)) 1915 goto no_pollwake; 1916 stp->sd_rput_opt &= ~SR_POLLIN; 1917 } 1918 mutex_exit(&stp->sd_lock); 1919 pollwakeup(&stp->sd_pollist, pollwakeups); 1920 mutex_enter(&stp->sd_lock); 1921 } 1922 no_pollwake: 1923 1924 /* 1925 * strsendsig can handle multiple signals with a 1926 * single call. 1927 */ 1928 if (stp->sd_sigflags & signals) 1929 strsendsig(stp->sd_siglist, signals, band, 0); 1930 mutex_exit(&stp->sd_lock); 1931 1932 1933 done: 1934 if (nextbp == NULL) 1935 return (0); 1936 1937 /* 1938 * Any signals were handled the first time. 1939 * Wakeups and pollwakeups are redone to avoid any race 1940 * conditions - all the messages are not queued until the 1941 * last message has been processed by strrput. 1942 */ 1943 bp = nextbp; 1944 signals = firstmsgsigs = allmsgsigs = 0; 1945 mutex_enter(&stp->sd_lock); 1946 goto one_more; 1947 } 1948 1949 static void 1950 log_dupioc(queue_t *rq, mblk_t *bp) 1951 { 1952 queue_t *wq, *qp; 1953 char *modnames, *mnp, *dname; 1954 size_t maxmodstr; 1955 boolean_t islast; 1956 1957 /* 1958 * Allocate a buffer large enough to hold the names of nstrpush modules 1959 * and one driver, with spaces between and NUL terminator. If we can't 1960 * get memory, then we'll just log the driver name. 1961 */ 1962 maxmodstr = nstrpush * (FMNAMESZ + 1); 1963 mnp = modnames = kmem_alloc(maxmodstr, KM_NOSLEEP); 1964 1965 /* march down write side to print log message down to the driver */ 1966 wq = WR(rq); 1967 1968 /* make sure q_next doesn't shift around while we're grabbing data */ 1969 claimstr(wq); 1970 qp = wq->q_next; 1971 do { 1972 dname = Q2NAME(qp); 1973 islast = !SAMESTR(qp) || qp->q_next == NULL; 1974 if (modnames == NULL) { 1975 /* 1976 * If we don't have memory, then get the driver name in 1977 * the log where we can see it. Note that memory 1978 * pressure is a possible cause of these sorts of bugs. 1979 */ 1980 if (islast) { 1981 modnames = dname; 1982 maxmodstr = 0; 1983 } 1984 } else { 1985 mnp += snprintf(mnp, FMNAMESZ + 1, "%s", dname); 1986 if (!islast) 1987 *mnp++ = ' '; 1988 } 1989 qp = qp->q_next; 1990 } while (!islast); 1991 releasestr(wq); 1992 /* Cannot happen unless stream head is corrupt. */ 1993 ASSERT(modnames != NULL); 1994 (void) strlog(rq->q_qinfo->qi_minfo->mi_idnum, 0, 1, 1995 SL_CONSOLE|SL_TRACE|SL_ERROR, 1996 "Warning: stream %p received duplicate %X M_IOC%s; module list: %s", 1997 rq->q_ptr, ((struct iocblk *)bp->b_rptr)->ioc_cmd, 1998 (DB_TYPE(bp) == M_IOCACK ? "ACK" : "NAK"), modnames); 1999 if (maxmodstr != 0) 2000 kmem_free(modnames, maxmodstr); 2001 } 2002 2003 int 2004 strrput_nondata(queue_t *q, mblk_t *bp) 2005 { 2006 struct stdata *stp; 2007 struct iocblk *iocbp; 2008 struct stroptions *sop; 2009 struct copyreq *reqp; 2010 struct copyresp *resp; 2011 unsigned char bpri; 2012 unsigned char flushed_already = 0; 2013 2014 stp = (struct stdata *)q->q_ptr; 2015 2016 ASSERT(!(stp->sd_flag & STPLEX)); 2017 ASSERT(qclaimed(q)); 2018 2019 switch (bp->b_datap->db_type) { 2020 case M_ERROR: 2021 /* 2022 * An error has occurred downstream, the errno is in the first 2023 * bytes of the message. 2024 */ 2025 if ((bp->b_wptr - bp->b_rptr) == 2) { /* New flavor */ 2026 unsigned char rw = 0; 2027 2028 mutex_enter(&stp->sd_lock); 2029 if (*bp->b_rptr != NOERROR) { /* read error */ 2030 if (*bp->b_rptr != 0) { 2031 if (stp->sd_flag & STRDERR) 2032 flushed_already |= FLUSHR; 2033 stp->sd_flag |= STRDERR; 2034 rw |= FLUSHR; 2035 } else { 2036 stp->sd_flag &= ~STRDERR; 2037 } 2038 stp->sd_rerror = *bp->b_rptr; 2039 } 2040 bp->b_rptr++; 2041 if (*bp->b_rptr != NOERROR) { /* write error */ 2042 if (*bp->b_rptr != 0) { 2043 if (stp->sd_flag & STWRERR) 2044 flushed_already |= FLUSHW; 2045 stp->sd_flag |= STWRERR; 2046 rw |= FLUSHW; 2047 } else { 2048 stp->sd_flag &= ~STWRERR; 2049 } 2050 stp->sd_werror = *bp->b_rptr; 2051 } 2052 if (rw) { 2053 TRACE_2(TR_FAC_STREAMS_FR, TR_STRRPUT_WAKE, 2054 "strrput cv_broadcast:q %p, bp %p", 2055 q, bp); 2056 cv_broadcast(&q->q_wait); /* readers */ 2057 cv_broadcast(&_WR(q)->q_wait); /* writers */ 2058 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 2059 2060 mutex_exit(&stp->sd_lock); 2061 pollwakeup(&stp->sd_pollist, POLLERR); 2062 mutex_enter(&stp->sd_lock); 2063 2064 if (stp->sd_sigflags & S_ERROR) 2065 strsendsig(stp->sd_siglist, S_ERROR, 0, 2066 ((rw & FLUSHR) ? stp->sd_rerror : 2067 stp->sd_werror)); 2068 mutex_exit(&stp->sd_lock); 2069 /* 2070 * Send the M_FLUSH only 2071 * for the first M_ERROR 2072 * message on the stream 2073 */ 2074 if (flushed_already == rw) { 2075 freemsg(bp); 2076 return (0); 2077 } 2078 2079 bp->b_datap->db_type = M_FLUSH; 2080 *bp->b_rptr = rw; 2081 bp->b_wptr = bp->b_rptr + 1; 2082 /* 2083 * Protect against the driver 2084 * passing up messages after 2085 * it has done a qprocsoff 2086 */ 2087 if (_OTHERQ(q)->q_next == NULL) 2088 freemsg(bp); 2089 else 2090 qreply(q, bp); 2091 return (0); 2092 } else 2093 mutex_exit(&stp->sd_lock); 2094 } else if (*bp->b_rptr != 0) { /* Old flavor */ 2095 if (stp->sd_flag & (STRDERR|STWRERR)) 2096 flushed_already = FLUSHRW; 2097 mutex_enter(&stp->sd_lock); 2098 stp->sd_flag |= (STRDERR|STWRERR); 2099 stp->sd_rerror = *bp->b_rptr; 2100 stp->sd_werror = *bp->b_rptr; 2101 TRACE_2(TR_FAC_STREAMS_FR, 2102 TR_STRRPUT_WAKE2, 2103 "strrput wakeup #2:q %p, bp %p", q, bp); 2104 cv_broadcast(&q->q_wait); /* the readers */ 2105 cv_broadcast(&_WR(q)->q_wait); /* the writers */ 2106 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 2107 2108 mutex_exit(&stp->sd_lock); 2109 pollwakeup(&stp->sd_pollist, POLLERR); 2110 mutex_enter(&stp->sd_lock); 2111 2112 if (stp->sd_sigflags & S_ERROR) 2113 strsendsig(stp->sd_siglist, S_ERROR, 0, 2114 (stp->sd_werror ? stp->sd_werror : 2115 stp->sd_rerror)); 2116 mutex_exit(&stp->sd_lock); 2117 2118 /* 2119 * Send the M_FLUSH only 2120 * for the first M_ERROR 2121 * message on the stream 2122 */ 2123 if (flushed_already != FLUSHRW) { 2124 bp->b_datap->db_type = M_FLUSH; 2125 *bp->b_rptr = FLUSHRW; 2126 /* 2127 * Protect against the driver passing up 2128 * messages after it has done a 2129 * qprocsoff. 2130 */ 2131 if (_OTHERQ(q)->q_next == NULL) 2132 freemsg(bp); 2133 else 2134 qreply(q, bp); 2135 return (0); 2136 } 2137 } 2138 freemsg(bp); 2139 return (0); 2140 2141 case M_HANGUP: 2142 2143 freemsg(bp); 2144 mutex_enter(&stp->sd_lock); 2145 stp->sd_werror = ENXIO; 2146 stp->sd_flag |= STRHUP; 2147 stp->sd_flag &= ~(WSLEEP|RSLEEP); 2148 2149 /* 2150 * send signal if controlling tty 2151 */ 2152 2153 if (stp->sd_sidp) { 2154 prsignal(stp->sd_sidp, SIGHUP); 2155 if (stp->sd_sidp != stp->sd_pgidp) 2156 pgsignal(stp->sd_pgidp, SIGTSTP); 2157 } 2158 2159 /* 2160 * wake up read, write, and exception pollers and 2161 * reset wakeup mechanism. 2162 */ 2163 cv_broadcast(&q->q_wait); /* the readers */ 2164 cv_broadcast(&_WR(q)->q_wait); /* the writers */ 2165 cv_broadcast(&stp->sd_monitor); /* the ioctllers */ 2166 strhup(stp); 2167 mutex_exit(&stp->sd_lock); 2168 return (0); 2169 2170 case M_UNHANGUP: 2171 freemsg(bp); 2172 mutex_enter(&stp->sd_lock); 2173 stp->sd_werror = 0; 2174 stp->sd_flag &= ~STRHUP; 2175 mutex_exit(&stp->sd_lock); 2176 return (0); 2177 2178 case M_SIG: 2179 /* 2180 * Someone downstream wants to post a signal. The 2181 * signal to post is contained in the first byte of the 2182 * message. If the message would go on the front of 2183 * the queue, send a signal to the process group 2184 * (if not SIGPOLL) or to the siglist processes 2185 * (SIGPOLL). If something is already on the queue, 2186 * OR if we are delivering a delayed suspend (*sigh* 2187 * another "tty" hack) and there's no one sleeping already, 2188 * just enqueue the message. 2189 */ 2190 mutex_enter(&stp->sd_lock); 2191 if (q->q_first || (*bp->b_rptr == SIGTSTP && 2192 !(stp->sd_flag & RSLEEP))) { 2193 (void) putq(q, bp); 2194 mutex_exit(&stp->sd_lock); 2195 return (0); 2196 } 2197 mutex_exit(&stp->sd_lock); 2198 /* FALLTHRU */ 2199 2200 case M_PCSIG: 2201 /* 2202 * Don't enqueue, just post the signal. 2203 */ 2204 strsignal(stp, *bp->b_rptr, 0L); 2205 freemsg(bp); 2206 return (0); 2207 2208 case M_CMD: 2209 if (MBLKL(bp) != sizeof (cmdblk_t)) { 2210 freemsg(bp); 2211 return (0); 2212 } 2213 2214 mutex_enter(&stp->sd_lock); 2215 if (stp->sd_flag & STRCMDWAIT) { 2216 ASSERT(stp->sd_cmdblk == NULL); 2217 stp->sd_cmdblk = bp; 2218 cv_broadcast(&stp->sd_monitor); 2219 mutex_exit(&stp->sd_lock); 2220 } else { 2221 mutex_exit(&stp->sd_lock); 2222 freemsg(bp); 2223 } 2224 return (0); 2225 2226 case M_FLUSH: 2227 /* 2228 * Flush queues. The indication of which queues to flush 2229 * is in the first byte of the message. If the read queue 2230 * is specified, then flush it. If FLUSHBAND is set, just 2231 * flush the band specified by the second byte of the message. 2232 * 2233 * If a module has issued a M_SETOPT to not flush hi 2234 * priority messages off of the stream head, then pass this 2235 * flag into the flushq code to preserve such messages. 2236 */ 2237 2238 if (*bp->b_rptr & FLUSHR) { 2239 mutex_enter(&stp->sd_lock); 2240 if (*bp->b_rptr & FLUSHBAND) { 2241 ASSERT((bp->b_wptr - bp->b_rptr) >= 2); 2242 flushband(q, *(bp->b_rptr + 1), FLUSHALL); 2243 } else 2244 flushq_common(q, FLUSHALL, 2245 stp->sd_read_opt & RFLUSHPCPROT); 2246 if ((q->q_first == NULL) || 2247 (q->q_first->b_datap->db_type < QPCTL)) 2248 stp->sd_flag &= ~STRPRI; 2249 else { 2250 ASSERT(stp->sd_flag & STRPRI); 2251 } 2252 mutex_exit(&stp->sd_lock); 2253 } 2254 if ((*bp->b_rptr & FLUSHW) && !(bp->b_flag & MSGNOLOOP)) { 2255 *bp->b_rptr &= ~FLUSHR; 2256 bp->b_flag |= MSGNOLOOP; 2257 /* 2258 * Protect against the driver passing up 2259 * messages after it has done a qprocsoff. 2260 */ 2261 if (_OTHERQ(q)->q_next == NULL) 2262 freemsg(bp); 2263 else 2264 qreply(q, bp); 2265 return (0); 2266 } 2267 freemsg(bp); 2268 return (0); 2269 2270 case M_IOCACK: 2271 case M_IOCNAK: 2272 iocbp = (struct iocblk *)bp->b_rptr; 2273 /* 2274 * If not waiting for ACK or NAK then just free msg. 2275 * If incorrect id sequence number then just free msg. 2276 * If already have ACK or NAK for user then this is a 2277 * duplicate, display a warning and free the msg. 2278 */ 2279 mutex_enter(&stp->sd_lock); 2280 if ((stp->sd_flag & IOCWAIT) == 0 || stp->sd_iocblk || 2281 (stp->sd_iocid != iocbp->ioc_id)) { 2282 /* 2283 * If the ACK/NAK is a dup, display a message 2284 * Dup is when sd_iocid == ioc_id, and 2285 * sd_iocblk == <valid ptr> or -1 (the former 2286 * is when an ioctl has been put on the stream 2287 * head, but has not yet been consumed, the 2288 * later is when it has been consumed). 2289 */ 2290 if ((stp->sd_iocid == iocbp->ioc_id) && 2291 (stp->sd_iocblk != NULL)) { 2292 log_dupioc(q, bp); 2293 } 2294 freemsg(bp); 2295 mutex_exit(&stp->sd_lock); 2296 return (0); 2297 } 2298 2299 /* 2300 * Assign ACK or NAK to user and wake up. 2301 */ 2302 stp->sd_iocblk = bp; 2303 cv_broadcast(&stp->sd_monitor); 2304 mutex_exit(&stp->sd_lock); 2305 return (0); 2306 2307 case M_COPYIN: 2308 case M_COPYOUT: 2309 reqp = (struct copyreq *)bp->b_rptr; 2310 2311 /* 2312 * If not waiting for ACK or NAK then just fail request. 2313 * If already have ACK, NAK, or copy request, then just 2314 * fail request. 2315 * If incorrect id sequence number then just fail request. 2316 */ 2317 mutex_enter(&stp->sd_lock); 2318 if ((stp->sd_flag & IOCWAIT) == 0 || stp->sd_iocblk || 2319 (stp->sd_iocid != reqp->cq_id)) { 2320 if (bp->b_cont) { 2321 freemsg(bp->b_cont); 2322 bp->b_cont = NULL; 2323 } 2324 bp->b_datap->db_type = M_IOCDATA; 2325 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp); 2326 resp = (struct copyresp *)bp->b_rptr; 2327 resp->cp_rval = (caddr_t)1; /* failure */ 2328 mutex_exit(&stp->sd_lock); 2329 putnext(stp->sd_wrq, bp); 2330 return (0); 2331 } 2332 2333 /* 2334 * Assign copy request to user and wake up. 2335 */ 2336 stp->sd_iocblk = bp; 2337 cv_broadcast(&stp->sd_monitor); 2338 mutex_exit(&stp->sd_lock); 2339 return (0); 2340 2341 case M_SETOPTS: 2342 /* 2343 * Set stream head options (read option, write offset, 2344 * min/max packet size, and/or high/low water marks for 2345 * the read side only). 2346 */ 2347 2348 bpri = 0; 2349 sop = (struct stroptions *)bp->b_rptr; 2350 mutex_enter(&stp->sd_lock); 2351 if (sop->so_flags & SO_READOPT) { 2352 switch (sop->so_readopt & RMODEMASK) { 2353 case RNORM: 2354 stp->sd_read_opt &= ~(RD_MSGDIS | RD_MSGNODIS); 2355 break; 2356 2357 case RMSGD: 2358 stp->sd_read_opt = 2359 ((stp->sd_read_opt & ~RD_MSGNODIS) | 2360 RD_MSGDIS); 2361 break; 2362 2363 case RMSGN: 2364 stp->sd_read_opt = 2365 ((stp->sd_read_opt & ~RD_MSGDIS) | 2366 RD_MSGNODIS); 2367 break; 2368 } 2369 switch (sop->so_readopt & RPROTMASK) { 2370 case RPROTNORM: 2371 stp->sd_read_opt &= ~(RD_PROTDAT | RD_PROTDIS); 2372 break; 2373 2374 case RPROTDAT: 2375 stp->sd_read_opt = 2376 ((stp->sd_read_opt & ~RD_PROTDIS) | 2377 RD_PROTDAT); 2378 break; 2379 2380 case RPROTDIS: 2381 stp->sd_read_opt = 2382 ((stp->sd_read_opt & ~RD_PROTDAT) | 2383 RD_PROTDIS); 2384 break; 2385 } 2386 switch (sop->so_readopt & RFLUSHMASK) { 2387 case RFLUSHPCPROT: 2388 /* 2389 * This sets the stream head to NOT flush 2390 * M_PCPROTO messages. 2391 */ 2392 stp->sd_read_opt |= RFLUSHPCPROT; 2393 break; 2394 } 2395 } 2396 if (sop->so_flags & SO_ERROPT) { 2397 switch (sop->so_erropt & RERRMASK) { 2398 case RERRNORM: 2399 stp->sd_flag &= ~STRDERRNONPERSIST; 2400 break; 2401 case RERRNONPERSIST: 2402 stp->sd_flag |= STRDERRNONPERSIST; 2403 break; 2404 } 2405 switch (sop->so_erropt & WERRMASK) { 2406 case WERRNORM: 2407 stp->sd_flag &= ~STWRERRNONPERSIST; 2408 break; 2409 case WERRNONPERSIST: 2410 stp->sd_flag |= STWRERRNONPERSIST; 2411 break; 2412 } 2413 } 2414 if (sop->so_flags & SO_COPYOPT) { 2415 if (sop->so_copyopt & ZCVMSAFE) { 2416 stp->sd_copyflag |= STZCVMSAFE; 2417 stp->sd_copyflag &= ~STZCVMUNSAFE; 2418 } else if (sop->so_copyopt & ZCVMUNSAFE) { 2419 stp->sd_copyflag |= STZCVMUNSAFE; 2420 stp->sd_copyflag &= ~STZCVMSAFE; 2421 } 2422 2423 if (sop->so_copyopt & COPYCACHED) { 2424 stp->sd_copyflag |= STRCOPYCACHED; 2425 } 2426 } 2427 if (sop->so_flags & SO_WROFF) 2428 stp->sd_wroff = sop->so_wroff; 2429 if (sop->so_flags & SO_TAIL) 2430 stp->sd_tail = sop->so_tail; 2431 if (sop->so_flags & SO_MINPSZ) 2432 q->q_minpsz = sop->so_minpsz; 2433 if (sop->so_flags & SO_MAXPSZ) 2434 q->q_maxpsz = sop->so_maxpsz; 2435 if (sop->so_flags & SO_MAXBLK) 2436 stp->sd_maxblk = sop->so_maxblk; 2437 if (sop->so_flags & SO_HIWAT) { 2438 if (sop->so_flags & SO_BAND) { 2439 if (strqset(q, QHIWAT, 2440 sop->so_band, sop->so_hiwat)) { 2441 cmn_err(CE_WARN, "strrput: could not " 2442 "allocate qband\n"); 2443 } else { 2444 bpri = sop->so_band; 2445 } 2446 } else { 2447 q->q_hiwat = sop->so_hiwat; 2448 } 2449 } 2450 if (sop->so_flags & SO_LOWAT) { 2451 if (sop->so_flags & SO_BAND) { 2452 if (strqset(q, QLOWAT, 2453 sop->so_band, sop->so_lowat)) { 2454 cmn_err(CE_WARN, "strrput: could not " 2455 "allocate qband\n"); 2456 } else { 2457 bpri = sop->so_band; 2458 } 2459 } else { 2460 q->q_lowat = sop->so_lowat; 2461 } 2462 } 2463 if (sop->so_flags & SO_MREADON) 2464 stp->sd_flag |= SNDMREAD; 2465 if (sop->so_flags & SO_MREADOFF) 2466 stp->sd_flag &= ~SNDMREAD; 2467 if (sop->so_flags & SO_NDELON) 2468 stp->sd_flag |= OLDNDELAY; 2469 if (sop->so_flags & SO_NDELOFF) 2470 stp->sd_flag &= ~OLDNDELAY; 2471 if (sop->so_flags & SO_ISTTY) 2472 stp->sd_flag |= STRISTTY; 2473 if (sop->so_flags & SO_ISNTTY) 2474 stp->sd_flag &= ~STRISTTY; 2475 if (sop->so_flags & SO_TOSTOP) 2476 stp->sd_flag |= STRTOSTOP; 2477 if (sop->so_flags & SO_TONSTOP) 2478 stp->sd_flag &= ~STRTOSTOP; 2479 if (sop->so_flags & SO_DELIM) 2480 stp->sd_flag |= STRDELIM; 2481 if (sop->so_flags & SO_NODELIM) 2482 stp->sd_flag &= ~STRDELIM; 2483 2484 mutex_exit(&stp->sd_lock); 2485 freemsg(bp); 2486 2487 /* Check backenable in case the water marks changed */ 2488 qbackenable(q, bpri); 2489 return (0); 2490 2491 /* 2492 * The following set of cases deal with situations where two stream 2493 * heads are connected to each other (twisted streams). These messages 2494 * have no meaning at the stream head. 2495 */ 2496 case M_BREAK: 2497 case M_CTL: 2498 case M_DELAY: 2499 case M_START: 2500 case M_STOP: 2501 case M_IOCDATA: 2502 case M_STARTI: 2503 case M_STOPI: 2504 freemsg(bp); 2505 return (0); 2506 2507 case M_IOCTL: 2508 /* 2509 * Always NAK this condition 2510 * (makes no sense) 2511 * If there is one or more threads in the read side 2512 * rwnext we have to defer the nacking until that thread 2513 * returns (in strget). 2514 */ 2515 mutex_enter(&stp->sd_lock); 2516 if (stp->sd_struiodnak != 0) { 2517 /* 2518 * Defer NAK to the streamhead. Queue at the end 2519 * the list. 2520 */ 2521 mblk_t *mp = stp->sd_struionak; 2522 2523 while (mp && mp->b_next) 2524 mp = mp->b_next; 2525 if (mp) 2526 mp->b_next = bp; 2527 else 2528 stp->sd_struionak = bp; 2529 bp->b_next = NULL; 2530 mutex_exit(&stp->sd_lock); 2531 return (0); 2532 } 2533 mutex_exit(&stp->sd_lock); 2534 2535 bp->b_datap->db_type = M_IOCNAK; 2536 /* 2537 * Protect against the driver passing up 2538 * messages after it has done a qprocsoff. 2539 */ 2540 if (_OTHERQ(q)->q_next == NULL) 2541 freemsg(bp); 2542 else 2543 qreply(q, bp); 2544 return (0); 2545 2546 default: 2547 #ifdef DEBUG 2548 cmn_err(CE_WARN, 2549 "bad message type %x received at stream head\n", 2550 bp->b_datap->db_type); 2551 #endif 2552 freemsg(bp); 2553 return (0); 2554 } 2555 2556 /* NOTREACHED */ 2557 } 2558 2559 /* 2560 * Check if the stream pointed to by `stp' can be written to, and return an 2561 * error code if not. If `eiohup' is set, then return EIO if STRHUP is set. 2562 * If `sigpipeok' is set and the SW_SIGPIPE option is enabled on the stream, 2563 * then always return EPIPE and send a SIGPIPE to the invoking thread. 2564 */ 2565 static int 2566 strwriteable(struct stdata *stp, boolean_t eiohup, boolean_t sigpipeok) 2567 { 2568 int error; 2569 2570 ASSERT(MUTEX_HELD(&stp->sd_lock)); 2571 2572 /* 2573 * For modem support, POSIX states that on writes, EIO should 2574 * be returned if the stream has been hung up. 2575 */ 2576 if (eiohup && (stp->sd_flag & (STPLEX|STRHUP)) == STRHUP) 2577 error = EIO; 2578 else 2579 error = strgeterr(stp, STRHUP|STPLEX|STWRERR, 0); 2580 2581 if (error != 0) { 2582 if (!(stp->sd_flag & STPLEX) && 2583 (stp->sd_wput_opt & SW_SIGPIPE) && sigpipeok) { 2584 tsignal(curthread, SIGPIPE); 2585 error = EPIPE; 2586 } 2587 } 2588 2589 return (error); 2590 } 2591 2592 /* 2593 * Copyin and send data down a stream. 2594 * The caller will allocate and copyin any control part that precedes the 2595 * message and pass that in as mctl. 2596 * 2597 * Caller should *not* hold sd_lock. 2598 * When EWOULDBLOCK is returned the caller has to redo the canputnext 2599 * under sd_lock in order to avoid missing a backenabling wakeup. 2600 * 2601 * Use iosize = -1 to not send any M_DATA. iosize = 0 sends zero-length M_DATA. 2602 * 2603 * Set MSG_IGNFLOW in flags to ignore flow control for hipri messages. 2604 * For sync streams we can only ignore flow control by reverting to using 2605 * putnext. 2606 * 2607 * If sd_maxblk is less than *iosize this routine might return without 2608 * transferring all of *iosize. In all cases, on return *iosize will contain 2609 * the amount of data that was transferred. 2610 */ 2611 static int 2612 strput(struct stdata *stp, mblk_t *mctl, struct uio *uiop, ssize_t *iosize, 2613 int b_flag, int pri, int flags) 2614 { 2615 struiod_t uiod; 2616 struct iovec buf[IOV_MAX_STACK]; 2617 int iovlen = 0; 2618 mblk_t *mp; 2619 queue_t *wqp = stp->sd_wrq; 2620 int error = 0; 2621 ssize_t count = *iosize; 2622 2623 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock)); 2624 2625 if (uiop != NULL && count >= 0) 2626 flags |= stp->sd_struiowrq ? STRUIO_POSTPONE : 0; 2627 2628 if (!(flags & STRUIO_POSTPONE)) { 2629 /* 2630 * Use regular canputnext, strmakedata, putnext sequence. 2631 */ 2632 if (pri == 0) { 2633 if (!canputnext(wqp) && !(flags & MSG_IGNFLOW)) { 2634 freemsg(mctl); 2635 return (EWOULDBLOCK); 2636 } 2637 } else { 2638 if (!(flags & MSG_IGNFLOW) && !bcanputnext(wqp, pri)) { 2639 freemsg(mctl); 2640 return (EWOULDBLOCK); 2641 } 2642 } 2643 2644 if ((error = strmakedata(iosize, uiop, stp, flags, 2645 &mp)) != 0) { 2646 freemsg(mctl); 2647 /* 2648 * need to change return code to ENOMEM 2649 * so that this is not confused with 2650 * flow control, EAGAIN. 2651 */ 2652 2653 if (error == EAGAIN) 2654 return (ENOMEM); 2655 else 2656 return (error); 2657 } 2658 if (mctl != NULL) { 2659 if (mctl->b_cont == NULL) 2660 mctl->b_cont = mp; 2661 else if (mp != NULL) 2662 linkb(mctl, mp); 2663 mp = mctl; 2664 } else if (mp == NULL) 2665 return (0); 2666 2667 mp->b_flag |= b_flag; 2668 mp->b_band = (uchar_t)pri; 2669 2670 if (flags & MSG_IGNFLOW) { 2671 /* 2672 * XXX Hack: Don't get stuck running service 2673 * procedures. This is needed for sockfs when 2674 * sending the unbind message out of the rput 2675 * procedure - we don't want a put procedure 2676 * to run service procedures. 2677 */ 2678 putnext(wqp, mp); 2679 } else { 2680 stream_willservice(stp); 2681 putnext(wqp, mp); 2682 stream_runservice(stp); 2683 } 2684 return (0); 2685 } 2686 /* 2687 * Stream supports rwnext() for the write side. 2688 */ 2689 if ((error = strmakedata(iosize, uiop, stp, flags, &mp)) != 0) { 2690 freemsg(mctl); 2691 /* 2692 * map EAGAIN to ENOMEM since EAGAIN means "flow controlled". 2693 */ 2694 return (error == EAGAIN ? ENOMEM : error); 2695 } 2696 if (mctl != NULL) { 2697 if (mctl->b_cont == NULL) 2698 mctl->b_cont = mp; 2699 else if (mp != NULL) 2700 linkb(mctl, mp); 2701 mp = mctl; 2702 } else if (mp == NULL) { 2703 return (0); 2704 } 2705 2706 mp->b_flag |= b_flag; 2707 mp->b_band = (uchar_t)pri; 2708 2709 if (uiop->uio_iovcnt > IOV_MAX_STACK) { 2710 iovlen = uiop->uio_iovcnt * sizeof (iovec_t); 2711 uiod.d_iov = (struct iovec *)kmem_alloc(iovlen, KM_SLEEP); 2712 } else { 2713 uiod.d_iov = buf; 2714 } 2715 2716 (void) uiodup(uiop, &uiod.d_uio, uiod.d_iov, uiop->uio_iovcnt); 2717 uiod.d_uio.uio_offset = 0; 2718 uiod.d_mp = mp; 2719 error = rwnext(wqp, &uiod); 2720 if (! uiod.d_mp) { 2721 uioskip(uiop, *iosize); 2722 if (iovlen != 0) 2723 kmem_free(uiod.d_iov, iovlen); 2724 return (error); 2725 } 2726 ASSERT(mp == uiod.d_mp); 2727 if (error == EINVAL) { 2728 /* 2729 * The stream plumbing must have changed while 2730 * we were away, so just turn off rwnext()s. 2731 */ 2732 error = 0; 2733 } else if (error == EBUSY || error == EWOULDBLOCK) { 2734 /* 2735 * Couldn't enter a perimeter or took a page fault, 2736 * so fall-back to putnext(). 2737 */ 2738 error = 0; 2739 } else { 2740 freemsg(mp); 2741 if (iovlen != 0) 2742 kmem_free(uiod.d_iov, iovlen); 2743 return (error); 2744 } 2745 /* Have to check canput before consuming data from the uio */ 2746 if (pri == 0) { 2747 if (!canputnext(wqp) && !(flags & MSG_IGNFLOW)) { 2748 freemsg(mp); 2749 if (iovlen != 0) 2750 kmem_free(uiod.d_iov, iovlen); 2751 return (EWOULDBLOCK); 2752 } 2753 } else { 2754 if (!bcanputnext(wqp, pri) && !(flags & MSG_IGNFLOW)) { 2755 freemsg(mp); 2756 if (iovlen != 0) 2757 kmem_free(uiod.d_iov, iovlen); 2758 return (EWOULDBLOCK); 2759 } 2760 } 2761 ASSERT(mp == uiod.d_mp); 2762 /* Copyin data from the uio */ 2763 if ((error = struioget(wqp, mp, &uiod, 0)) != 0) { 2764 freemsg(mp); 2765 if (iovlen != 0) 2766 kmem_free(uiod.d_iov, iovlen); 2767 return (error); 2768 } 2769 uioskip(uiop, *iosize); 2770 if (flags & MSG_IGNFLOW) { 2771 /* 2772 * XXX Hack: Don't get stuck running service procedures. 2773 * This is needed for sockfs when sending the unbind message 2774 * out of the rput procedure - we don't want a put procedure 2775 * to run service procedures. 2776 */ 2777 putnext(wqp, mp); 2778 } else { 2779 stream_willservice(stp); 2780 putnext(wqp, mp); 2781 stream_runservice(stp); 2782 } 2783 if (iovlen != 0) 2784 kmem_free(uiod.d_iov, iovlen); 2785 return (0); 2786 } 2787 2788 /* 2789 * Write attempts to break the write request into messages conforming 2790 * with the minimum and maximum packet sizes set downstream. 2791 * 2792 * Write will not block if downstream queue is full and 2793 * O_NDELAY is set, otherwise it will block waiting for the queue to get room. 2794 * 2795 * A write of zero bytes gets packaged into a zero length message and sent 2796 * downstream like any other message. 2797 * 2798 * If buffers of the requested sizes are not available, the write will 2799 * sleep until the buffers become available. 2800 * 2801 * Write (if specified) will supply a write offset in a message if it 2802 * makes sense. This can be specified by downstream modules as part of 2803 * a M_SETOPTS message. Write will not supply the write offset if it 2804 * cannot supply any data in a buffer. In other words, write will never 2805 * send down an empty packet due to a write offset. 2806 */ 2807 /* ARGSUSED2 */ 2808 int 2809 strwrite(struct vnode *vp, struct uio *uiop, cred_t *crp) 2810 { 2811 return (strwrite_common(vp, uiop, crp, 0)); 2812 } 2813 2814 /* ARGSUSED2 */ 2815 int 2816 strwrite_common(struct vnode *vp, struct uio *uiop, cred_t *crp, int wflag) 2817 { 2818 struct stdata *stp; 2819 struct queue *wqp; 2820 ssize_t rmin, rmax; 2821 ssize_t iosize; 2822 int waitflag; 2823 int tempmode; 2824 int error = 0; 2825 int b_flag; 2826 2827 ASSERT(vp->v_stream); 2828 stp = vp->v_stream; 2829 2830 mutex_enter(&stp->sd_lock); 2831 2832 if ((error = i_straccess(stp, JCWRITE)) != 0) { 2833 mutex_exit(&stp->sd_lock); 2834 return (error); 2835 } 2836 2837 if (stp->sd_flag & (STWRERR|STRHUP|STPLEX)) { 2838 error = strwriteable(stp, B_TRUE, B_TRUE); 2839 if (error != 0) { 2840 mutex_exit(&stp->sd_lock); 2841 return (error); 2842 } 2843 } 2844 2845 mutex_exit(&stp->sd_lock); 2846 2847 wqp = stp->sd_wrq; 2848 2849 /* get these values from them cached in the stream head */ 2850 rmin = stp->sd_qn_minpsz; 2851 rmax = stp->sd_qn_maxpsz; 2852 2853 /* 2854 * Check the min/max packet size constraints. If min packet size 2855 * is non-zero, the write cannot be split into multiple messages 2856 * and still guarantee the size constraints. 2857 */ 2858 TRACE_1(TR_FAC_STREAMS_FR, TR_STRWRITE_IN, "strwrite in:q %p", wqp); 2859 2860 ASSERT((rmax >= 0) || (rmax == INFPSZ)); 2861 if (rmax == 0) { 2862 return (0); 2863 } 2864 if (rmin > 0) { 2865 if (uiop->uio_resid < rmin) { 2866 TRACE_3(TR_FAC_STREAMS_FR, TR_STRWRITE_OUT, 2867 "strwrite out:q %p out %d error %d", 2868 wqp, 0, ERANGE); 2869 return (ERANGE); 2870 } 2871 if ((rmax != INFPSZ) && (uiop->uio_resid > rmax)) { 2872 TRACE_3(TR_FAC_STREAMS_FR, TR_STRWRITE_OUT, 2873 "strwrite out:q %p out %d error %d", 2874 wqp, 1, ERANGE); 2875 return (ERANGE); 2876 } 2877 } 2878 2879 /* 2880 * Do until count satisfied or error. 2881 */ 2882 waitflag = WRITEWAIT | wflag; 2883 if (stp->sd_flag & OLDNDELAY) 2884 tempmode = uiop->uio_fmode & ~FNDELAY; 2885 else 2886 tempmode = uiop->uio_fmode; 2887 2888 if (rmax == INFPSZ) 2889 rmax = uiop->uio_resid; 2890 2891 /* 2892 * Note that tempmode does not get used in strput/strmakedata 2893 * but only in strwaitq. The other routines use uio_fmode 2894 * unmodified. 2895 */ 2896 2897 /* LINTED: constant in conditional context */ 2898 while (1) { /* breaks when uio_resid reaches zero */ 2899 /* 2900 * Determine the size of the next message to be 2901 * packaged. May have to break write into several 2902 * messages based on max packet size. 2903 */ 2904 iosize = MIN(uiop->uio_resid, rmax); 2905 2906 /* 2907 * Put block downstream when flow control allows it. 2908 */ 2909 if ((stp->sd_flag & STRDELIM) && (uiop->uio_resid == iosize)) 2910 b_flag = MSGDELIM; 2911 else 2912 b_flag = 0; 2913 2914 for (;;) { 2915 int done = 0; 2916 2917 error = strput(stp, NULL, uiop, &iosize, b_flag, 0, 0); 2918 if (error == 0) 2919 break; 2920 if (error != EWOULDBLOCK) 2921 goto out; 2922 2923 mutex_enter(&stp->sd_lock); 2924 /* 2925 * Check for a missed wakeup. 2926 * Needed since strput did not hold sd_lock across 2927 * the canputnext. 2928 */ 2929 if (canputnext(wqp)) { 2930 /* Try again */ 2931 mutex_exit(&stp->sd_lock); 2932 continue; 2933 } 2934 TRACE_1(TR_FAC_STREAMS_FR, TR_STRWRITE_WAIT, 2935 "strwrite wait:q %p wait", wqp); 2936 if ((error = strwaitq(stp, waitflag, (ssize_t)0, 2937 tempmode, -1, &done)) != 0 || done) { 2938 mutex_exit(&stp->sd_lock); 2939 if ((vp->v_type == VFIFO) && 2940 (uiop->uio_fmode & FNDELAY) && 2941 (error == EAGAIN)) 2942 error = 0; 2943 goto out; 2944 } 2945 TRACE_1(TR_FAC_STREAMS_FR, TR_STRWRITE_WAKE, 2946 "strwrite wake:q %p awakes", wqp); 2947 if ((error = i_straccess(stp, JCWRITE)) != 0) { 2948 mutex_exit(&stp->sd_lock); 2949 goto out; 2950 } 2951 mutex_exit(&stp->sd_lock); 2952 } 2953 waitflag |= NOINTR; 2954 TRACE_2(TR_FAC_STREAMS_FR, TR_STRWRITE_RESID, 2955 "strwrite resid:q %p uiop %p", wqp, uiop); 2956 if (uiop->uio_resid) { 2957 /* Recheck for errors - needed for sockets */ 2958 if ((stp->sd_wput_opt & SW_RECHECK_ERR) && 2959 (stp->sd_flag & (STWRERR|STRHUP|STPLEX))) { 2960 mutex_enter(&stp->sd_lock); 2961 error = strwriteable(stp, B_FALSE, B_TRUE); 2962 mutex_exit(&stp->sd_lock); 2963 if (error != 0) 2964 return (error); 2965 } 2966 continue; 2967 } 2968 break; 2969 } 2970 out: 2971 /* 2972 * For historical reasons, applications expect EAGAIN when a data 2973 * mblk_t cannot be allocated, so change ENOMEM back to EAGAIN. 2974 */ 2975 if (error == ENOMEM) 2976 error = EAGAIN; 2977 TRACE_3(TR_FAC_STREAMS_FR, TR_STRWRITE_OUT, 2978 "strwrite out:q %p out %d error %d", wqp, 2, error); 2979 return (error); 2980 } 2981 2982 /* 2983 * Stream head write service routine. 2984 * Its job is to wake up any sleeping writers when a queue 2985 * downstream needs data (part of the flow control in putq and getq). 2986 * It also must wake anyone sleeping on a poll(). 2987 * For stream head right below mux module, it must also invoke put procedure 2988 * of next downstream module. 2989 */ 2990 int 2991 strwsrv(queue_t *q) 2992 { 2993 struct stdata *stp; 2994 queue_t *tq; 2995 qband_t *qbp; 2996 int i; 2997 qband_t *myqbp; 2998 int isevent; 2999 unsigned char qbf[NBAND]; /* band flushing backenable flags */ 3000 3001 TRACE_1(TR_FAC_STREAMS_FR, 3002 TR_STRWSRV, "strwsrv:q %p", q); 3003 stp = (struct stdata *)q->q_ptr; 3004 ASSERT(qclaimed(q)); 3005 mutex_enter(&stp->sd_lock); 3006 ASSERT(!(stp->sd_flag & STPLEX)); 3007 3008 if (stp->sd_flag & WSLEEP) { 3009 stp->sd_flag &= ~WSLEEP; 3010 cv_broadcast(&q->q_wait); 3011 } 3012 mutex_exit(&stp->sd_lock); 3013 3014 /* The other end of a stream pipe went away. */ 3015 if ((tq = q->q_next) == NULL) { 3016 return (0); 3017 } 3018 3019 /* Find the next module forward that has a service procedure */ 3020 claimstr(q); 3021 tq = q->q_nfsrv; 3022 ASSERT(tq != NULL); 3023 3024 if ((q->q_flag & QBACK)) { 3025 if ((tq->q_flag & QFULL)) { 3026 mutex_enter(QLOCK(tq)); 3027 if (!(tq->q_flag & QFULL)) { 3028 mutex_exit(QLOCK(tq)); 3029 goto wakeup; 3030 } 3031 /* 3032 * The queue must have become full again. Set QWANTW 3033 * again so strwsrv will be back enabled when 3034 * the queue becomes non-full next time. 3035 */ 3036 tq->q_flag |= QWANTW; 3037 mutex_exit(QLOCK(tq)); 3038 } else { 3039 wakeup: 3040 pollwakeup(&stp->sd_pollist, POLLWRNORM); 3041 mutex_enter(&stp->sd_lock); 3042 if (stp->sd_sigflags & S_WRNORM) 3043 strsendsig(stp->sd_siglist, S_WRNORM, 0, 0); 3044 mutex_exit(&stp->sd_lock); 3045 } 3046 } 3047 3048 isevent = 0; 3049 i = 1; 3050 bzero((caddr_t)qbf, NBAND); 3051 mutex_enter(QLOCK(tq)); 3052 if ((myqbp = q->q_bandp) != NULL) 3053 for (qbp = tq->q_bandp; qbp && myqbp; qbp = qbp->qb_next) { 3054 ASSERT(myqbp); 3055 if ((myqbp->qb_flag & QB_BACK)) { 3056 if (qbp->qb_flag & QB_FULL) { 3057 /* 3058 * The band must have become full again. 3059 * Set QB_WANTW again so strwsrv will 3060 * be back enabled when the band becomes 3061 * non-full next time. 3062 */ 3063 qbp->qb_flag |= QB_WANTW; 3064 } else { 3065 isevent = 1; 3066 qbf[i] = 1; 3067 } 3068 } 3069 myqbp = myqbp->qb_next; 3070 i++; 3071 } 3072 mutex_exit(QLOCK(tq)); 3073 3074 if (isevent) { 3075 for (i = tq->q_nband; i; i--) { 3076 if (qbf[i]) { 3077 pollwakeup(&stp->sd_pollist, POLLWRBAND); 3078 mutex_enter(&stp->sd_lock); 3079 if (stp->sd_sigflags & S_WRBAND) 3080 strsendsig(stp->sd_siglist, S_WRBAND, 3081 (uchar_t)i, 0); 3082 mutex_exit(&stp->sd_lock); 3083 } 3084 } 3085 } 3086 3087 releasestr(q); 3088 return (0); 3089 } 3090 3091 /* 3092 * Special case of strcopyin/strcopyout for copying 3093 * struct strioctl that can deal with both data 3094 * models. 3095 */ 3096 3097 #ifdef _LP64 3098 3099 static int 3100 strcopyin_strioctl(void *from, void *to, int flag, int copyflag) 3101 { 3102 struct strioctl32 strioc32; 3103 struct strioctl *striocp; 3104 3105 if (copyflag & U_TO_K) { 3106 ASSERT((copyflag & K_TO_K) == 0); 3107 3108 if ((flag & FMODELS) == DATAMODEL_ILP32) { 3109 if (copyin(from, &strioc32, sizeof (strioc32))) 3110 return (EFAULT); 3111 3112 striocp = (struct strioctl *)to; 3113 striocp->ic_cmd = strioc32.ic_cmd; 3114 striocp->ic_timout = strioc32.ic_timout; 3115 striocp->ic_len = strioc32.ic_len; 3116 striocp->ic_dp = (char *)(uintptr_t)strioc32.ic_dp; 3117 3118 } else { /* NATIVE data model */ 3119 if (copyin(from, to, sizeof (struct strioctl))) { 3120 return (EFAULT); 3121 } else { 3122 return (0); 3123 } 3124 } 3125 } else { 3126 ASSERT(copyflag & K_TO_K); 3127 bcopy(from, to, sizeof (struct strioctl)); 3128 } 3129 return (0); 3130 } 3131 3132 static int 3133 strcopyout_strioctl(void *from, void *to, int flag, int copyflag) 3134 { 3135 struct strioctl32 strioc32; 3136 struct strioctl *striocp; 3137 3138 if (copyflag & U_TO_K) { 3139 ASSERT((copyflag & K_TO_K) == 0); 3140 3141 if ((flag & FMODELS) == DATAMODEL_ILP32) { 3142 striocp = (struct strioctl *)from; 3143 strioc32.ic_cmd = striocp->ic_cmd; 3144 strioc32.ic_timout = striocp->ic_timout; 3145 strioc32.ic_len = striocp->ic_len; 3146 strioc32.ic_dp = (caddr32_t)(uintptr_t)striocp->ic_dp; 3147 ASSERT((char *)(uintptr_t)strioc32.ic_dp == 3148 striocp->ic_dp); 3149 3150 if (copyout(&strioc32, to, sizeof (strioc32))) 3151 return (EFAULT); 3152 3153 } else { /* NATIVE data model */ 3154 if (copyout(from, to, sizeof (struct strioctl))) { 3155 return (EFAULT); 3156 } else { 3157 return (0); 3158 } 3159 } 3160 } else { 3161 ASSERT(copyflag & K_TO_K); 3162 bcopy(from, to, sizeof (struct strioctl)); 3163 } 3164 return (0); 3165 } 3166 3167 #else /* ! _LP64 */ 3168 3169 /* ARGSUSED2 */ 3170 static int 3171 strcopyin_strioctl(void *from, void *to, int flag, int copyflag) 3172 { 3173 return (strcopyin(from, to, sizeof (struct strioctl), copyflag)); 3174 } 3175 3176 /* ARGSUSED2 */ 3177 static int 3178 strcopyout_strioctl(void *from, void *to, int flag, int copyflag) 3179 { 3180 return (strcopyout(from, to, sizeof (struct strioctl), copyflag)); 3181 } 3182 3183 #endif /* _LP64 */ 3184 3185 /* 3186 * Determine type of job control semantics expected by user. The 3187 * possibilities are: 3188 * JCREAD - Behaves like read() on fd; send SIGTTIN 3189 * JCWRITE - Behaves like write() on fd; send SIGTTOU if TOSTOP set 3190 * JCSETP - Sets a value in the stream; send SIGTTOU, ignore TOSTOP 3191 * JCGETP - Gets a value in the stream; no signals. 3192 * See straccess in strsubr.c for usage of these values. 3193 * 3194 * This routine also returns -1 for I_STR as a special case; the 3195 * caller must call again with the real ioctl number for 3196 * classification. 3197 */ 3198 static int 3199 job_control_type(int cmd) 3200 { 3201 switch (cmd) { 3202 case I_STR: 3203 return (-1); 3204 3205 case I_RECVFD: 3206 case I_E_RECVFD: 3207 return (JCREAD); 3208 3209 case I_FDINSERT: 3210 case I_SENDFD: 3211 return (JCWRITE); 3212 3213 case TCSETA: 3214 case TCSETAW: 3215 case TCSETAF: 3216 case TCSBRK: 3217 case TCXONC: 3218 case TCFLSH: 3219 case TCDSET: /* Obsolete */ 3220 case TIOCSWINSZ: 3221 case TCSETS: 3222 case TCSETSW: 3223 case TCSETSF: 3224 case TIOCSETD: 3225 case TIOCHPCL: 3226 case TIOCSETP: 3227 case TIOCSETN: 3228 case TIOCEXCL: 3229 case TIOCNXCL: 3230 case TIOCFLUSH: 3231 case TIOCSETC: 3232 case TIOCLBIS: 3233 case TIOCLBIC: 3234 case TIOCLSET: 3235 case TIOCSBRK: 3236 case TIOCCBRK: 3237 case TIOCSDTR: 3238 case TIOCCDTR: 3239 case TIOCSLTC: 3240 case TIOCSTOP: 3241 case TIOCSTART: 3242 case TIOCSTI: 3243 case TIOCSPGRP: 3244 case TIOCMSET: 3245 case TIOCMBIS: 3246 case TIOCMBIC: 3247 case TIOCREMOTE: 3248 case TIOCSIGNAL: 3249 case LDSETT: 3250 case LDSMAP: /* Obsolete */ 3251 case DIOCSETP: 3252 case I_FLUSH: 3253 case I_SRDOPT: 3254 case I_SETSIG: 3255 case I_SWROPT: 3256 case I_FLUSHBAND: 3257 case I_SETCLTIME: 3258 case I_SERROPT: 3259 case I_ESETSIG: 3260 case FIONBIO: 3261 case FIOASYNC: 3262 case FIOSETOWN: 3263 case JBOOT: /* Obsolete */ 3264 case JTERM: /* Obsolete */ 3265 case JTIMOM: /* Obsolete */ 3266 case JZOMBOOT: /* Obsolete */ 3267 case JAGENT: /* Obsolete */ 3268 case JTRUN: /* Obsolete */ 3269 case JXTPROTO: /* Obsolete */ 3270 return (JCSETP); 3271 } 3272 3273 return (JCGETP); 3274 } 3275 3276 /* 3277 * ioctl for streams 3278 */ 3279 int 3280 strioctl(struct vnode *vp, int cmd, intptr_t arg, int flag, int copyflag, 3281 cred_t *crp, int *rvalp) 3282 { 3283 struct stdata *stp; 3284 struct strcmd *scp; 3285 struct strioctl strioc; 3286 struct uio uio; 3287 struct iovec iov; 3288 int access; 3289 mblk_t *mp; 3290 int error = 0; 3291 int done = 0; 3292 ssize_t rmin, rmax; 3293 queue_t *wrq; 3294 queue_t *rdq; 3295 boolean_t kioctl = B_FALSE; 3296 uint32_t auditing = AU_AUDITING(); 3297 3298 if (flag & FKIOCTL) { 3299 copyflag = K_TO_K; 3300 kioctl = B_TRUE; 3301 } 3302 ASSERT(vp->v_stream); 3303 ASSERT(copyflag == U_TO_K || copyflag == K_TO_K); 3304 stp = vp->v_stream; 3305 3306 TRACE_3(TR_FAC_STREAMS_FR, TR_IOCTL_ENTER, 3307 "strioctl:stp %p cmd %X arg %lX", stp, cmd, arg); 3308 3309 /* 3310 * If the copy is kernel to kernel, make sure that the FNATIVE 3311 * flag is set. After this it would be a serious error to have 3312 * no model flag. 3313 */ 3314 if (copyflag == K_TO_K) 3315 flag = (flag & ~FMODELS) | FNATIVE; 3316 3317 ASSERT((flag & FMODELS) != 0); 3318 3319 wrq = stp->sd_wrq; 3320 rdq = _RD(wrq); 3321 3322 access = job_control_type(cmd); 3323 3324 /* We should never see these here, should be handled by iwscn */ 3325 if (cmd == SRIOCSREDIR || cmd == SRIOCISREDIR) 3326 return (EINVAL); 3327 3328 mutex_enter(&stp->sd_lock); 3329 if ((access != -1) && ((error = i_straccess(stp, access)) != 0)) { 3330 mutex_exit(&stp->sd_lock); 3331 return (error); 3332 } 3333 mutex_exit(&stp->sd_lock); 3334 3335 /* 3336 * Check for sgttyb-related ioctls first, and complain as 3337 * necessary. 3338 */ 3339 switch (cmd) { 3340 case TIOCGETP: 3341 case TIOCSETP: 3342 case TIOCSETN: 3343 if (sgttyb_handling >= 2 && !sgttyb_complaint) { 3344 sgttyb_complaint = B_TRUE; 3345 cmn_err(CE_NOTE, 3346 "application used obsolete TIOC[GS]ET"); 3347 } 3348 if (sgttyb_handling >= 3) { 3349 tsignal(curthread, SIGSYS); 3350 return (EIO); 3351 } 3352 break; 3353 } 3354 3355 mutex_enter(&stp->sd_lock); 3356 3357 switch (cmd) { 3358 case I_RECVFD: 3359 case I_E_RECVFD: 3360 case I_PEEK: 3361 case I_NREAD: 3362 case FIONREAD: 3363 case FIORDCHK: 3364 case I_ATMARK: 3365 case FIONBIO: 3366 case FIOASYNC: 3367 if (stp->sd_flag & (STRDERR|STPLEX)) { 3368 error = strgeterr(stp, STRDERR|STPLEX, 0); 3369 if (error != 0) { 3370 mutex_exit(&stp->sd_lock); 3371 return (error); 3372 } 3373 } 3374 break; 3375 3376 default: 3377 if (stp->sd_flag & (STRDERR|STWRERR|STPLEX)) { 3378 error = strgeterr(stp, STRDERR|STWRERR|STPLEX, 0); 3379 if (error != 0) { 3380 mutex_exit(&stp->sd_lock); 3381 return (error); 3382 } 3383 } 3384 } 3385 3386 mutex_exit(&stp->sd_lock); 3387 3388 switch (cmd) { 3389 default: 3390 /* 3391 * The stream head has hardcoded knowledge of a 3392 * miscellaneous collection of terminal-, keyboard- and 3393 * mouse-related ioctls, enumerated below. This hardcoded 3394 * knowledge allows the stream head to automatically 3395 * convert transparent ioctl requests made by userland 3396 * programs into I_STR ioctls which many old STREAMS 3397 * modules and drivers require. 3398 * 3399 * No new ioctls should ever be added to this list. 3400 * Instead, the STREAMS module or driver should be written 3401 * to either handle transparent ioctls or require any 3402 * userland programs to use I_STR ioctls (by returning 3403 * EINVAL to any transparent ioctl requests). 3404 * 3405 * More importantly, removing ioctls from this list should 3406 * be done with the utmost care, since our STREAMS modules 3407 * and drivers *count* on the stream head performing this 3408 * conversion, and thus may panic while processing 3409 * transparent ioctl request for one of these ioctls (keep 3410 * in mind that third party modules and drivers may have 3411 * similar problems). 3412 */ 3413 if (((cmd & IOCTYPE) == LDIOC) || 3414 ((cmd & IOCTYPE) == tIOC) || 3415 ((cmd & IOCTYPE) == TIOC) || 3416 ((cmd & IOCTYPE) == KIOC) || 3417 ((cmd & IOCTYPE) == MSIOC) || 3418 ((cmd & IOCTYPE) == VUIOC)) { 3419 /* 3420 * The ioctl is a tty ioctl - set up strioc buffer 3421 * and call strdoioctl() to do the work. 3422 */ 3423 if (stp->sd_flag & STRHUP) 3424 return (ENXIO); 3425 strioc.ic_cmd = cmd; 3426 strioc.ic_timout = INFTIM; 3427 3428 switch (cmd) { 3429 3430 case TCXONC: 3431 case TCSBRK: 3432 case TCFLSH: 3433 case TCDSET: 3434 { 3435 int native_arg = (int)arg; 3436 strioc.ic_len = sizeof (int); 3437 strioc.ic_dp = (char *)&native_arg; 3438 return (strdoioctl(stp, &strioc, flag, 3439 K_TO_K, crp, rvalp)); 3440 } 3441 3442 case TCSETA: 3443 case TCSETAW: 3444 case TCSETAF: 3445 strioc.ic_len = sizeof (struct termio); 3446 strioc.ic_dp = (char *)arg; 3447 return (strdoioctl(stp, &strioc, flag, 3448 copyflag, crp, rvalp)); 3449 3450 case TCSETS: 3451 case TCSETSW: 3452 case TCSETSF: 3453 strioc.ic_len = sizeof (struct termios); 3454 strioc.ic_dp = (char *)arg; 3455 return (strdoioctl(stp, &strioc, flag, 3456 copyflag, crp, rvalp)); 3457 3458 case LDSETT: 3459 strioc.ic_len = sizeof (struct termcb); 3460 strioc.ic_dp = (char *)arg; 3461 return (strdoioctl(stp, &strioc, flag, 3462 copyflag, crp, rvalp)); 3463 3464 case TIOCSETP: 3465 strioc.ic_len = sizeof (struct sgttyb); 3466 strioc.ic_dp = (char *)arg; 3467 return (strdoioctl(stp, &strioc, flag, 3468 copyflag, crp, rvalp)); 3469 3470 case TIOCSTI: 3471 if ((flag & FREAD) == 0 && 3472 secpolicy_sti(crp) != 0) { 3473 return (EPERM); 3474 } 3475 mutex_enter(&stp->sd_lock); 3476 mutex_enter(&curproc->p_splock); 3477 if (stp->sd_sidp != curproc->p_sessp->s_sidp && 3478 secpolicy_sti(crp) != 0) { 3479 mutex_exit(&curproc->p_splock); 3480 mutex_exit(&stp->sd_lock); 3481 return (EACCES); 3482 } 3483 mutex_exit(&curproc->p_splock); 3484 mutex_exit(&stp->sd_lock); 3485 3486 strioc.ic_len = sizeof (char); 3487 strioc.ic_dp = (char *)arg; 3488 return (strdoioctl(stp, &strioc, flag, 3489 copyflag, crp, rvalp)); 3490 3491 case TIOCSWINSZ: 3492 strioc.ic_len = sizeof (struct winsize); 3493 strioc.ic_dp = (char *)arg; 3494 return (strdoioctl(stp, &strioc, flag, 3495 copyflag, crp, rvalp)); 3496 3497 case TIOCSSIZE: 3498 strioc.ic_len = sizeof (struct ttysize); 3499 strioc.ic_dp = (char *)arg; 3500 return (strdoioctl(stp, &strioc, flag, 3501 copyflag, crp, rvalp)); 3502 3503 case TIOCSSOFTCAR: 3504 case KIOCTRANS: 3505 case KIOCTRANSABLE: 3506 case KIOCCMD: 3507 case KIOCSDIRECT: 3508 case KIOCSCOMPAT: 3509 case KIOCSKABORTEN: 3510 case KIOCSRPTCOUNT: 3511 case KIOCSRPTDELAY: 3512 case KIOCSRPTRATE: 3513 case VUIDSFORMAT: 3514 case TIOCSPPS: 3515 strioc.ic_len = sizeof (int); 3516 strioc.ic_dp = (char *)arg; 3517 return (strdoioctl(stp, &strioc, flag, 3518 copyflag, crp, rvalp)); 3519 3520 case KIOCSETKEY: 3521 case KIOCGETKEY: 3522 strioc.ic_len = sizeof (struct kiockey); 3523 strioc.ic_dp = (char *)arg; 3524 return (strdoioctl(stp, &strioc, flag, 3525 copyflag, crp, rvalp)); 3526 3527 case KIOCSKEY: 3528 case KIOCGKEY: 3529 strioc.ic_len = sizeof (struct kiockeymap); 3530 strioc.ic_dp = (char *)arg; 3531 return (strdoioctl(stp, &strioc, flag, 3532 copyflag, crp, rvalp)); 3533 3534 case KIOCSLED: 3535 /* arg is a pointer to char */ 3536 strioc.ic_len = sizeof (char); 3537 strioc.ic_dp = (char *)arg; 3538 return (strdoioctl(stp, &strioc, flag, 3539 copyflag, crp, rvalp)); 3540 3541 case MSIOSETPARMS: 3542 strioc.ic_len = sizeof (Ms_parms); 3543 strioc.ic_dp = (char *)arg; 3544 return (strdoioctl(stp, &strioc, flag, 3545 copyflag, crp, rvalp)); 3546 3547 case VUIDSADDR: 3548 case VUIDGADDR: 3549 strioc.ic_len = sizeof (struct vuid_addr_probe); 3550 strioc.ic_dp = (char *)arg; 3551 return (strdoioctl(stp, &strioc, flag, 3552 copyflag, crp, rvalp)); 3553 3554 /* 3555 * These M_IOCTL's don't require any data to be sent 3556 * downstream, and the driver will allocate and link 3557 * on its own mblk_t upon M_IOCACK -- thus we set 3558 * ic_len to zero and set ic_dp to arg so we know 3559 * where to copyout to later. 3560 */ 3561 case TIOCGSOFTCAR: 3562 case TIOCGWINSZ: 3563 case TIOCGSIZE: 3564 case KIOCGTRANS: 3565 case KIOCGTRANSABLE: 3566 case KIOCTYPE: 3567 case KIOCGDIRECT: 3568 case KIOCGCOMPAT: 3569 case KIOCLAYOUT: 3570 case KIOCGLED: 3571 case MSIOGETPARMS: 3572 case MSIOBUTTONS: 3573 case VUIDGFORMAT: 3574 case TIOCGPPS: 3575 case TIOCGPPSEV: 3576 case TCGETA: 3577 case TCGETS: 3578 case LDGETT: 3579 case TIOCGETP: 3580 case KIOCGRPTCOUNT: 3581 case KIOCGRPTDELAY: 3582 case KIOCGRPTRATE: 3583 strioc.ic_len = 0; 3584 strioc.ic_dp = (char *)arg; 3585 return (strdoioctl(stp, &strioc, flag, 3586 copyflag, crp, rvalp)); 3587 } 3588 } 3589 3590 /* 3591 * Unknown cmd - send it down as a transparent ioctl. 3592 */ 3593 strioc.ic_cmd = cmd; 3594 strioc.ic_timout = INFTIM; 3595 strioc.ic_len = TRANSPARENT; 3596 strioc.ic_dp = (char *)&arg; 3597 3598 return (strdoioctl(stp, &strioc, flag, copyflag, crp, rvalp)); 3599 3600 case I_STR: 3601 /* 3602 * Stream ioctl. Read in an strioctl buffer from the user 3603 * along with any data specified and send it downstream. 3604 * Strdoioctl will wait allow only one ioctl message at 3605 * a time, and waits for the acknowledgement. 3606 */ 3607 3608 if (stp->sd_flag & STRHUP) 3609 return (ENXIO); 3610 3611 error = strcopyin_strioctl((void *)arg, &strioc, flag, 3612 copyflag); 3613 if (error != 0) 3614 return (error); 3615 3616 if ((strioc.ic_len < 0) || (strioc.ic_timout < -1)) 3617 return (EINVAL); 3618 3619 access = job_control_type(strioc.ic_cmd); 3620 mutex_enter(&stp->sd_lock); 3621 if ((access != -1) && 3622 ((error = i_straccess(stp, access)) != 0)) { 3623 mutex_exit(&stp->sd_lock); 3624 return (error); 3625 } 3626 mutex_exit(&stp->sd_lock); 3627 3628 /* 3629 * The I_STR facility provides a trap door for malicious 3630 * code to send down bogus streamio(7I) ioctl commands to 3631 * unsuspecting STREAMS modules and drivers which expect to 3632 * only get these messages from the stream head. 3633 * Explicitly prohibit any streamio ioctls which can be 3634 * passed downstream by the stream head. Note that we do 3635 * not block all streamio ioctls because the ioctl 3636 * numberspace is not well managed and thus it's possible 3637 * that a module or driver's ioctl numbers may accidentally 3638 * collide with them. 3639 */ 3640 switch (strioc.ic_cmd) { 3641 case I_LINK: 3642 case I_PLINK: 3643 case I_UNLINK: 3644 case I_PUNLINK: 3645 case _I_GETPEERCRED: 3646 case _I_PLINK_LH: 3647 return (EINVAL); 3648 } 3649 3650 error = strdoioctl(stp, &strioc, flag, copyflag, crp, rvalp); 3651 if (error == 0) { 3652 error = strcopyout_strioctl(&strioc, (void *)arg, 3653 flag, copyflag); 3654 } 3655 return (error); 3656 3657 case _I_CMD: 3658 /* 3659 * Like I_STR, but without using M_IOC* messages and without 3660 * copyins/copyouts beyond the passed-in argument. 3661 */ 3662 if (stp->sd_flag & STRHUP) 3663 return (ENXIO); 3664 3665 if (copyflag == U_TO_K) { 3666 if ((scp = kmem_alloc(sizeof (strcmd_t), 3667 KM_NOSLEEP)) == NULL) { 3668 return (ENOMEM); 3669 } 3670 3671 if (copyin((void *)arg, scp, sizeof (strcmd_t))) { 3672 kmem_free(scp, sizeof (strcmd_t)); 3673 return (EFAULT); 3674 } 3675 } else { 3676 scp = (strcmd_t *)arg; 3677 } 3678 3679 access = job_control_type(scp->sc_cmd); 3680 mutex_enter(&stp->sd_lock); 3681 if (access != -1 && (error = i_straccess(stp, access)) != 0) { 3682 mutex_exit(&stp->sd_lock); 3683 if (copyflag == U_TO_K) 3684 kmem_free(scp, sizeof (strcmd_t)); 3685 return (error); 3686 } 3687 mutex_exit(&stp->sd_lock); 3688 3689 *rvalp = 0; 3690 if ((error = strdocmd(stp, scp, crp)) == 0) { 3691 if (copyflag == U_TO_K && 3692 copyout(scp, (void *)arg, sizeof (strcmd_t))) { 3693 error = EFAULT; 3694 } 3695 } 3696 if (copyflag == U_TO_K) 3697 kmem_free(scp, sizeof (strcmd_t)); 3698 return (error); 3699 3700 case I_NREAD: 3701 /* 3702 * Return number of bytes of data in first message 3703 * in queue in "arg" and return the number of messages 3704 * in queue in return value. 3705 */ 3706 { 3707 size_t size; 3708 int retval; 3709 int count = 0; 3710 3711 mutex_enter(QLOCK(rdq)); 3712 3713 size = msgdsize(rdq->q_first); 3714 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next) 3715 count++; 3716 3717 mutex_exit(QLOCK(rdq)); 3718 if (stp->sd_struiordq) { 3719 infod_t infod; 3720 3721 infod.d_cmd = INFOD_COUNT; 3722 infod.d_count = 0; 3723 if (count == 0) { 3724 infod.d_cmd |= INFOD_FIRSTBYTES; 3725 infod.d_bytes = 0; 3726 } 3727 infod.d_res = 0; 3728 (void) infonext(rdq, &infod); 3729 count += infod.d_count; 3730 if (infod.d_res & INFOD_FIRSTBYTES) 3731 size = infod.d_bytes; 3732 } 3733 3734 /* 3735 * Drop down from size_t to the "int" required by the 3736 * interface. Cap at INT_MAX. 3737 */ 3738 retval = MIN(size, INT_MAX); 3739 error = strcopyout(&retval, (void *)arg, sizeof (retval), 3740 copyflag); 3741 if (!error) 3742 *rvalp = count; 3743 return (error); 3744 } 3745 3746 case FIONREAD: 3747 /* 3748 * Return number of bytes of data in all data messages 3749 * in queue in "arg". 3750 */ 3751 { 3752 size_t size = 0; 3753 int retval; 3754 3755 mutex_enter(QLOCK(rdq)); 3756 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next) 3757 size += msgdsize(mp); 3758 mutex_exit(QLOCK(rdq)); 3759 3760 if (stp->sd_struiordq) { 3761 infod_t infod; 3762 3763 infod.d_cmd = INFOD_BYTES; 3764 infod.d_res = 0; 3765 infod.d_bytes = 0; 3766 (void) infonext(rdq, &infod); 3767 size += infod.d_bytes; 3768 } 3769 3770 /* 3771 * Drop down from size_t to the "int" required by the 3772 * interface. Cap at INT_MAX. 3773 */ 3774 retval = MIN(size, INT_MAX); 3775 error = strcopyout(&retval, (void *)arg, sizeof (retval), 3776 copyflag); 3777 3778 *rvalp = 0; 3779 return (error); 3780 } 3781 case FIORDCHK: 3782 /* 3783 * FIORDCHK does not use arg value (like FIONREAD), 3784 * instead a count is returned. I_NREAD value may 3785 * not be accurate but safe. The real thing to do is 3786 * to add the msgdsizes of all data messages until 3787 * a non-data message. 3788 */ 3789 { 3790 size_t size = 0; 3791 3792 mutex_enter(QLOCK(rdq)); 3793 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next) 3794 size += msgdsize(mp); 3795 mutex_exit(QLOCK(rdq)); 3796 3797 if (stp->sd_struiordq) { 3798 infod_t infod; 3799 3800 infod.d_cmd = INFOD_BYTES; 3801 infod.d_res = 0; 3802 infod.d_bytes = 0; 3803 (void) infonext(rdq, &infod); 3804 size += infod.d_bytes; 3805 } 3806 3807 /* 3808 * Since ioctl returns an int, and memory sizes under 3809 * LP64 may not fit, we return INT_MAX if the count was 3810 * actually greater. 3811 */ 3812 *rvalp = MIN(size, INT_MAX); 3813 return (0); 3814 } 3815 3816 case I_FIND: 3817 /* 3818 * Get module name. 3819 */ 3820 { 3821 char mname[FMNAMESZ + 1]; 3822 queue_t *q; 3823 3824 error = (copyflag & U_TO_K ? copyinstr : copystr)((void *)arg, 3825 mname, FMNAMESZ + 1, NULL); 3826 if (error) 3827 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT); 3828 3829 /* 3830 * Return EINVAL if we're handed a bogus module name. 3831 */ 3832 if (fmodsw_find(mname, FMODSW_LOAD) == NULL) { 3833 TRACE_0(TR_FAC_STREAMS_FR, 3834 TR_I_CANT_FIND, "couldn't I_FIND"); 3835 return (EINVAL); 3836 } 3837 3838 *rvalp = 0; 3839 3840 /* Look downstream to see if module is there. */ 3841 claimstr(stp->sd_wrq); 3842 for (q = stp->sd_wrq->q_next; q; q = q->q_next) { 3843 if (q->q_flag & QREADR) { 3844 q = NULL; 3845 break; 3846 } 3847 if (strcmp(mname, Q2NAME(q)) == 0) 3848 break; 3849 } 3850 releasestr(stp->sd_wrq); 3851 3852 *rvalp = (q ? 1 : 0); 3853 return (error); 3854 } 3855 3856 case I_PUSH: 3857 case __I_PUSH_NOCTTY: 3858 /* 3859 * Push a module. 3860 * For the case __I_PUSH_NOCTTY push a module but 3861 * do not allocate controlling tty. See bugid 4025044 3862 */ 3863 3864 { 3865 char mname[FMNAMESZ + 1]; 3866 fmodsw_impl_t *fp; 3867 dev_t dummydev; 3868 3869 if (stp->sd_flag & STRHUP) 3870 return (ENXIO); 3871 3872 /* 3873 * Get module name and look up in fmodsw. 3874 */ 3875 error = (copyflag & U_TO_K ? copyinstr : copystr)((void *)arg, 3876 mname, FMNAMESZ + 1, NULL); 3877 if (error) 3878 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT); 3879 3880 if ((fp = fmodsw_find(mname, FMODSW_HOLD | FMODSW_LOAD)) == 3881 NULL) 3882 return (EINVAL); 3883 3884 TRACE_2(TR_FAC_STREAMS_FR, TR_I_PUSH, 3885 "I_PUSH:fp %p stp %p", fp, stp); 3886 3887 /* 3888 * If the module is flagged as single-instance, then check 3889 * to see if the module is already pushed. If it is, return 3890 * as if the push was successful. 3891 */ 3892 if (fp->f_qflag & _QSINGLE_INSTANCE) { 3893 queue_t *q; 3894 3895 claimstr(stp->sd_wrq); 3896 for (q = stp->sd_wrq->q_next; q; q = q->q_next) { 3897 if (q->q_flag & QREADR) { 3898 q = NULL; 3899 break; 3900 } 3901 if (strcmp(mname, Q2NAME(q)) == 0) 3902 break; 3903 } 3904 releasestr(stp->sd_wrq); 3905 if (q != NULL) { 3906 fmodsw_rele(fp); 3907 return (0); 3908 } 3909 } 3910 3911 if (error = strstartplumb(stp, flag, cmd)) { 3912 fmodsw_rele(fp); 3913 return (error); 3914 } 3915 3916 /* 3917 * See if any more modules can be pushed on this stream. 3918 * Note that this check must be done after strstartplumb() 3919 * since otherwise multiple threads issuing I_PUSHes on 3920 * the same stream will be able to exceed nstrpush. 3921 */ 3922 mutex_enter(&stp->sd_lock); 3923 if (stp->sd_pushcnt >= nstrpush) { 3924 fmodsw_rele(fp); 3925 strendplumb(stp); 3926 mutex_exit(&stp->sd_lock); 3927 return (EINVAL); 3928 } 3929 mutex_exit(&stp->sd_lock); 3930 3931 /* 3932 * Push new module and call its open routine 3933 * via qattach(). Modules don't change device 3934 * numbers, so just ignore dummydev here. 3935 */ 3936 dummydev = vp->v_rdev; 3937 if ((error = qattach(rdq, &dummydev, 0, crp, fp, 3938 B_FALSE)) == 0) { 3939 if (vp->v_type == VCHR && /* sorry, no pipes allowed */ 3940 (cmd == I_PUSH) && (stp->sd_flag & STRISTTY)) { 3941 /* 3942 * try to allocate it as a controlling terminal 3943 */ 3944 (void) strctty(stp); 3945 } 3946 } 3947 3948 mutex_enter(&stp->sd_lock); 3949 3950 /* 3951 * As a performance concern we are caching the values of 3952 * q_minpsz and q_maxpsz of the module below the stream 3953 * head in the stream head. 3954 */ 3955 mutex_enter(QLOCK(stp->sd_wrq->q_next)); 3956 rmin = stp->sd_wrq->q_next->q_minpsz; 3957 rmax = stp->sd_wrq->q_next->q_maxpsz; 3958 mutex_exit(QLOCK(stp->sd_wrq->q_next)); 3959 3960 /* Do this processing here as a performance concern */ 3961 if (strmsgsz != 0) { 3962 if (rmax == INFPSZ) 3963 rmax = strmsgsz; 3964 else { 3965 if (vp->v_type == VFIFO) 3966 rmax = MIN(PIPE_BUF, rmax); 3967 else rmax = MIN(strmsgsz, rmax); 3968 } 3969 } 3970 3971 mutex_enter(QLOCK(wrq)); 3972 stp->sd_qn_minpsz = rmin; 3973 stp->sd_qn_maxpsz = rmax; 3974 mutex_exit(QLOCK(wrq)); 3975 3976 strendplumb(stp); 3977 mutex_exit(&stp->sd_lock); 3978 return (error); 3979 } 3980 3981 case I_POP: 3982 { 3983 queue_t *q; 3984 3985 if (stp->sd_flag & STRHUP) 3986 return (ENXIO); 3987 if (!wrq->q_next) /* for broken pipes */ 3988 return (EINVAL); 3989 3990 if (error = strstartplumb(stp, flag, cmd)) 3991 return (error); 3992 3993 /* 3994 * If there is an anchor on this stream and popping 3995 * the current module would attempt to pop through the 3996 * anchor, then disallow the pop unless we have sufficient 3997 * privileges; take the cheapest (non-locking) check 3998 * first. 3999 */ 4000 if (secpolicy_ip_config(crp, B_TRUE) != 0 || 4001 (stp->sd_anchorzone != crgetzoneid(crp))) { 4002 mutex_enter(&stp->sd_lock); 4003 /* 4004 * Anchors only apply if there's at least one 4005 * module on the stream (sd_pushcnt > 0). 4006 */ 4007 if (stp->sd_pushcnt > 0 && 4008 stp->sd_pushcnt == stp->sd_anchor && 4009 stp->sd_vnode->v_type != VFIFO) { 4010 strendplumb(stp); 4011 mutex_exit(&stp->sd_lock); 4012 if (stp->sd_anchorzone != crgetzoneid(crp)) 4013 return (EINVAL); 4014 /* Audit and report error */ 4015 return (secpolicy_ip_config(crp, B_FALSE)); 4016 } 4017 mutex_exit(&stp->sd_lock); 4018 } 4019 4020 q = wrq->q_next; 4021 TRACE_2(TR_FAC_STREAMS_FR, TR_I_POP, 4022 "I_POP:%p from %p", q, stp); 4023 if (q->q_next == NULL || (q->q_flag & (QREADR|QISDRV))) { 4024 error = EINVAL; 4025 } else { 4026 qdetach(_RD(q), 1, flag, crp, B_FALSE); 4027 error = 0; 4028 } 4029 mutex_enter(&stp->sd_lock); 4030 4031 /* 4032 * As a performance concern we are caching the values of 4033 * q_minpsz and q_maxpsz of the module below the stream 4034 * head in the stream head. 4035 */ 4036 mutex_enter(QLOCK(wrq->q_next)); 4037 rmin = wrq->q_next->q_minpsz; 4038 rmax = wrq->q_next->q_maxpsz; 4039 mutex_exit(QLOCK(wrq->q_next)); 4040 4041 /* Do this processing here as a performance concern */ 4042 if (strmsgsz != 0) { 4043 if (rmax == INFPSZ) 4044 rmax = strmsgsz; 4045 else { 4046 if (vp->v_type == VFIFO) 4047 rmax = MIN(PIPE_BUF, rmax); 4048 else rmax = MIN(strmsgsz, rmax); 4049 } 4050 } 4051 4052 mutex_enter(QLOCK(wrq)); 4053 stp->sd_qn_minpsz = rmin; 4054 stp->sd_qn_maxpsz = rmax; 4055 mutex_exit(QLOCK(wrq)); 4056 4057 /* If we popped through the anchor, then reset the anchor. */ 4058 if (stp->sd_pushcnt < stp->sd_anchor) { 4059 stp->sd_anchor = 0; 4060 stp->sd_anchorzone = 0; 4061 } 4062 strendplumb(stp); 4063 mutex_exit(&stp->sd_lock); 4064 return (error); 4065 } 4066 4067 case _I_MUXID2FD: 4068 { 4069 /* 4070 * Create a fd for a I_PLINK'ed lower stream with a given 4071 * muxid. With the fd, application can send down ioctls, 4072 * like I_LIST, to the previously I_PLINK'ed stream. Note 4073 * that after getting the fd, the application has to do an 4074 * I_PUNLINK on the muxid before it can do any operation 4075 * on the lower stream. This is required by spec1170. 4076 * 4077 * The fd used to do this ioctl should point to the same 4078 * controlling device used to do the I_PLINK. If it uses 4079 * a different stream or an invalid muxid, I_MUXID2FD will 4080 * fail. The error code is set to EINVAL. 4081 * 4082 * The intended use of this interface is the following. 4083 * An application I_PLINK'ed a stream and exits. The fd 4084 * to the lower stream is gone. Another application 4085 * wants to get a fd to the lower stream, it uses I_MUXID2FD. 4086 */ 4087 int muxid = (int)arg; 4088 int fd; 4089 linkinfo_t *linkp; 4090 struct file *fp; 4091 netstack_t *ns; 4092 str_stack_t *ss; 4093 4094 /* 4095 * Do not allow the wildcard muxid. This ioctl is not 4096 * intended to find arbitrary link. 4097 */ 4098 if (muxid == 0) { 4099 return (EINVAL); 4100 } 4101 4102 ns = netstack_find_by_cred(crp); 4103 ASSERT(ns != NULL); 4104 ss = ns->netstack_str; 4105 ASSERT(ss != NULL); 4106 4107 mutex_enter(&muxifier); 4108 linkp = findlinks(vp->v_stream, muxid, LINKPERSIST, ss); 4109 if (linkp == NULL) { 4110 mutex_exit(&muxifier); 4111 netstack_rele(ss->ss_netstack); 4112 return (EINVAL); 4113 } 4114 4115 if ((fd = ufalloc(0)) == -1) { 4116 mutex_exit(&muxifier); 4117 netstack_rele(ss->ss_netstack); 4118 return (EMFILE); 4119 } 4120 fp = linkp->li_fpdown; 4121 mutex_enter(&fp->f_tlock); 4122 fp->f_count++; 4123 mutex_exit(&fp->f_tlock); 4124 mutex_exit(&muxifier); 4125 setf(fd, fp); 4126 *rvalp = fd; 4127 netstack_rele(ss->ss_netstack); 4128 return (0); 4129 } 4130 4131 case _I_INSERT: 4132 { 4133 /* 4134 * To insert a module to a given position in a stream. 4135 * In the first release, only allow privileged user 4136 * to use this ioctl. Furthermore, the insert is only allowed 4137 * below an anchor if the zoneid is the same as the zoneid 4138 * which created the anchor. 4139 * 4140 * Note that we do not plan to support this ioctl 4141 * on pipes in the first release. We want to learn more 4142 * about the implications of these ioctls before extending 4143 * their support. And we do not think these features are 4144 * valuable for pipes. 4145 */ 4146 STRUCT_DECL(strmodconf, strmodinsert); 4147 char mod_name[FMNAMESZ + 1]; 4148 fmodsw_impl_t *fp; 4149 dev_t dummydev; 4150 queue_t *tmp_wrq; 4151 int pos; 4152 boolean_t is_insert; 4153 4154 STRUCT_INIT(strmodinsert, flag); 4155 if (stp->sd_flag & STRHUP) 4156 return (ENXIO); 4157 if (STRMATED(stp)) 4158 return (EINVAL); 4159 if ((error = secpolicy_net_config(crp, B_FALSE)) != 0) 4160 return (error); 4161 if (stp->sd_anchor != 0 && 4162 stp->sd_anchorzone != crgetzoneid(crp)) 4163 return (EINVAL); 4164 4165 error = strcopyin((void *)arg, STRUCT_BUF(strmodinsert), 4166 STRUCT_SIZE(strmodinsert), copyflag); 4167 if (error) 4168 return (error); 4169 4170 /* 4171 * Get module name and look up in fmodsw. 4172 */ 4173 error = (copyflag & U_TO_K ? copyinstr : 4174 copystr)(STRUCT_FGETP(strmodinsert, mod_name), 4175 mod_name, FMNAMESZ + 1, NULL); 4176 if (error) 4177 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT); 4178 4179 if ((fp = fmodsw_find(mod_name, FMODSW_HOLD | FMODSW_LOAD)) == 4180 NULL) 4181 return (EINVAL); 4182 4183 if (error = strstartplumb(stp, flag, cmd)) { 4184 fmodsw_rele(fp); 4185 return (error); 4186 } 4187 4188 /* 4189 * Is this _I_INSERT just like an I_PUSH? We need to know 4190 * this because we do some optimizations if this is a 4191 * module being pushed. 4192 */ 4193 pos = STRUCT_FGET(strmodinsert, pos); 4194 is_insert = (pos != 0); 4195 4196 /* 4197 * Make sure pos is valid. Even though it is not an I_PUSH, 4198 * we impose the same limit on the number of modules in a 4199 * stream. 4200 */ 4201 mutex_enter(&stp->sd_lock); 4202 if (stp->sd_pushcnt >= nstrpush || pos < 0 || 4203 pos > stp->sd_pushcnt) { 4204 fmodsw_rele(fp); 4205 strendplumb(stp); 4206 mutex_exit(&stp->sd_lock); 4207 return (EINVAL); 4208 } 4209 if (stp->sd_anchor != 0) { 4210 /* 4211 * Is this insert below the anchor? 4212 * Pushcnt hasn't been increased yet hence 4213 * we test for greater than here, and greater or 4214 * equal after qattach. 4215 */ 4216 if (pos > (stp->sd_pushcnt - stp->sd_anchor) && 4217 stp->sd_anchorzone != crgetzoneid(crp)) { 4218 fmodsw_rele(fp); 4219 strendplumb(stp); 4220 mutex_exit(&stp->sd_lock); 4221 return (EPERM); 4222 } 4223 } 4224 4225 mutex_exit(&stp->sd_lock); 4226 4227 /* 4228 * First find the correct position this module to 4229 * be inserted. We don't need to call claimstr() 4230 * as the stream should not be changing at this point. 4231 * 4232 * Insert new module and call its open routine 4233 * via qattach(). Modules don't change device 4234 * numbers, so just ignore dummydev here. 4235 */ 4236 for (tmp_wrq = stp->sd_wrq; pos > 0; 4237 tmp_wrq = tmp_wrq->q_next, pos--) { 4238 ASSERT(SAMESTR(tmp_wrq)); 4239 } 4240 dummydev = vp->v_rdev; 4241 if ((error = qattach(_RD(tmp_wrq), &dummydev, 0, crp, 4242 fp, is_insert)) != 0) { 4243 mutex_enter(&stp->sd_lock); 4244 strendplumb(stp); 4245 mutex_exit(&stp->sd_lock); 4246 return (error); 4247 } 4248 4249 mutex_enter(&stp->sd_lock); 4250 4251 /* 4252 * As a performance concern we are caching the values of 4253 * q_minpsz and q_maxpsz of the module below the stream 4254 * head in the stream head. 4255 */ 4256 if (!is_insert) { 4257 mutex_enter(QLOCK(stp->sd_wrq->q_next)); 4258 rmin = stp->sd_wrq->q_next->q_minpsz; 4259 rmax = stp->sd_wrq->q_next->q_maxpsz; 4260 mutex_exit(QLOCK(stp->sd_wrq->q_next)); 4261 4262 /* Do this processing here as a performance concern */ 4263 if (strmsgsz != 0) { 4264 if (rmax == INFPSZ) { 4265 rmax = strmsgsz; 4266 } else { 4267 rmax = MIN(strmsgsz, rmax); 4268 } 4269 } 4270 4271 mutex_enter(QLOCK(wrq)); 4272 stp->sd_qn_minpsz = rmin; 4273 stp->sd_qn_maxpsz = rmax; 4274 mutex_exit(QLOCK(wrq)); 4275 } 4276 4277 /* 4278 * Need to update the anchor value if this module is 4279 * inserted below the anchor point. 4280 */ 4281 if (stp->sd_anchor != 0) { 4282 pos = STRUCT_FGET(strmodinsert, pos); 4283 if (pos >= (stp->sd_pushcnt - stp->sd_anchor)) 4284 stp->sd_anchor++; 4285 } 4286 4287 strendplumb(stp); 4288 mutex_exit(&stp->sd_lock); 4289 return (0); 4290 } 4291 4292 case _I_REMOVE: 4293 { 4294 /* 4295 * To remove a module with a given name in a stream. The 4296 * caller of this ioctl needs to provide both the name and 4297 * the position of the module to be removed. This eliminates 4298 * the ambiguity of removal if a module is inserted/pushed 4299 * multiple times in a stream. In the first release, only 4300 * allow privileged user to use this ioctl. 4301 * Furthermore, the remove is only allowed 4302 * below an anchor if the zoneid is the same as the zoneid 4303 * which created the anchor. 4304 * 4305 * Note that we do not plan to support this ioctl 4306 * on pipes in the first release. We want to learn more 4307 * about the implications of these ioctls before extending 4308 * their support. And we do not think these features are 4309 * valuable for pipes. 4310 * 4311 * Also note that _I_REMOVE cannot be used to remove a 4312 * driver or the stream head. 4313 */ 4314 STRUCT_DECL(strmodconf, strmodremove); 4315 queue_t *q; 4316 int pos; 4317 char mod_name[FMNAMESZ + 1]; 4318 boolean_t is_remove; 4319 4320 STRUCT_INIT(strmodremove, flag); 4321 if (stp->sd_flag & STRHUP) 4322 return (ENXIO); 4323 if (STRMATED(stp)) 4324 return (EINVAL); 4325 if ((error = secpolicy_net_config(crp, B_FALSE)) != 0) 4326 return (error); 4327 if (stp->sd_anchor != 0 && 4328 stp->sd_anchorzone != crgetzoneid(crp)) 4329 return (EINVAL); 4330 4331 error = strcopyin((void *)arg, STRUCT_BUF(strmodremove), 4332 STRUCT_SIZE(strmodremove), copyflag); 4333 if (error) 4334 return (error); 4335 4336 error = (copyflag & U_TO_K ? copyinstr : 4337 copystr)(STRUCT_FGETP(strmodremove, mod_name), 4338 mod_name, FMNAMESZ + 1, NULL); 4339 if (error) 4340 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT); 4341 4342 if ((error = strstartplumb(stp, flag, cmd)) != 0) 4343 return (error); 4344 4345 /* 4346 * Match the name of given module to the name of module at 4347 * the given position. 4348 */ 4349 pos = STRUCT_FGET(strmodremove, pos); 4350 4351 is_remove = (pos != 0); 4352 for (q = stp->sd_wrq->q_next; SAMESTR(q) && pos > 0; 4353 q = q->q_next, pos--) 4354 ; 4355 if (pos > 0 || !SAMESTR(q) || 4356 strcmp(Q2NAME(q), mod_name) != 0) { 4357 mutex_enter(&stp->sd_lock); 4358 strendplumb(stp); 4359 mutex_exit(&stp->sd_lock); 4360 return (EINVAL); 4361 } 4362 4363 /* 4364 * If the position is at or below an anchor, then the zoneid 4365 * must match the zoneid that created the anchor. 4366 */ 4367 if (stp->sd_anchor != 0) { 4368 pos = STRUCT_FGET(strmodremove, pos); 4369 if (pos >= (stp->sd_pushcnt - stp->sd_anchor) && 4370 stp->sd_anchorzone != crgetzoneid(crp)) { 4371 mutex_enter(&stp->sd_lock); 4372 strendplumb(stp); 4373 mutex_exit(&stp->sd_lock); 4374 return (EPERM); 4375 } 4376 } 4377 4378 4379 ASSERT(!(q->q_flag & QREADR)); 4380 qdetach(_RD(q), 1, flag, crp, is_remove); 4381 4382 mutex_enter(&stp->sd_lock); 4383 4384 /* 4385 * As a performance concern we are caching the values of 4386 * q_minpsz and q_maxpsz of the module below the stream 4387 * head in the stream head. 4388 */ 4389 if (!is_remove) { 4390 mutex_enter(QLOCK(wrq->q_next)); 4391 rmin = wrq->q_next->q_minpsz; 4392 rmax = wrq->q_next->q_maxpsz; 4393 mutex_exit(QLOCK(wrq->q_next)); 4394 4395 /* Do this processing here as a performance concern */ 4396 if (strmsgsz != 0) { 4397 if (rmax == INFPSZ) 4398 rmax = strmsgsz; 4399 else { 4400 if (vp->v_type == VFIFO) 4401 rmax = MIN(PIPE_BUF, rmax); 4402 else rmax = MIN(strmsgsz, rmax); 4403 } 4404 } 4405 4406 mutex_enter(QLOCK(wrq)); 4407 stp->sd_qn_minpsz = rmin; 4408 stp->sd_qn_maxpsz = rmax; 4409 mutex_exit(QLOCK(wrq)); 4410 } 4411 4412 /* 4413 * Need to update the anchor value if this module is removed 4414 * at or below the anchor point. If the removed module is at 4415 * the anchor point, remove the anchor for this stream if 4416 * there is no module above the anchor point. Otherwise, if 4417 * the removed module is below the anchor point, decrement the 4418 * anchor point by 1. 4419 */ 4420 if (stp->sd_anchor != 0) { 4421 pos = STRUCT_FGET(strmodremove, pos); 4422 if (pos == stp->sd_pushcnt - stp->sd_anchor + 1) 4423 stp->sd_anchor = 0; 4424 else if (pos > (stp->sd_pushcnt - stp->sd_anchor + 1)) 4425 stp->sd_anchor--; 4426 } 4427 4428 strendplumb(stp); 4429 mutex_exit(&stp->sd_lock); 4430 return (0); 4431 } 4432 4433 case I_ANCHOR: 4434 /* 4435 * Set the anchor position on the stream to reside at 4436 * the top module (in other words, the top module 4437 * cannot be popped). Anchors with a FIFO make no 4438 * obvious sense, so they're not allowed. 4439 */ 4440 mutex_enter(&stp->sd_lock); 4441 4442 if (stp->sd_vnode->v_type == VFIFO) { 4443 mutex_exit(&stp->sd_lock); 4444 return (EINVAL); 4445 } 4446 /* Only allow the same zoneid to update the anchor */ 4447 if (stp->sd_anchor != 0 && 4448 stp->sd_anchorzone != crgetzoneid(crp)) { 4449 mutex_exit(&stp->sd_lock); 4450 return (EINVAL); 4451 } 4452 stp->sd_anchor = stp->sd_pushcnt; 4453 stp->sd_anchorzone = crgetzoneid(crp); 4454 mutex_exit(&stp->sd_lock); 4455 return (0); 4456 4457 case I_LOOK: 4458 /* 4459 * Get name of first module downstream. 4460 * If no module, return an error. 4461 */ 4462 claimstr(wrq); 4463 if (_SAMESTR(wrq) && wrq->q_next->q_next != NULL) { 4464 char *name = Q2NAME(wrq->q_next); 4465 4466 error = strcopyout(name, (void *)arg, strlen(name) + 1, 4467 copyflag); 4468 releasestr(wrq); 4469 return (error); 4470 } 4471 releasestr(wrq); 4472 return (EINVAL); 4473 4474 case I_LINK: 4475 case I_PLINK: 4476 /* 4477 * Link a multiplexor. 4478 */ 4479 return (mlink(vp, cmd, (int)arg, crp, rvalp, 0)); 4480 4481 case _I_PLINK_LH: 4482 /* 4483 * Link a multiplexor: Call must originate from kernel. 4484 */ 4485 if (kioctl) 4486 return (ldi_mlink_lh(vp, cmd, arg, crp, rvalp)); 4487 4488 return (EINVAL); 4489 case I_UNLINK: 4490 case I_PUNLINK: 4491 /* 4492 * Unlink a multiplexor. 4493 * If arg is -1, unlink all links for which this is the 4494 * controlling stream. Otherwise, arg is an index number 4495 * for a link to be removed. 4496 */ 4497 { 4498 struct linkinfo *linkp; 4499 int native_arg = (int)arg; 4500 int type; 4501 netstack_t *ns; 4502 str_stack_t *ss; 4503 4504 TRACE_1(TR_FAC_STREAMS_FR, 4505 TR_I_UNLINK, "I_UNLINK/I_PUNLINK:%p", stp); 4506 if (vp->v_type == VFIFO) { 4507 return (EINVAL); 4508 } 4509 if (cmd == I_UNLINK) 4510 type = LINKNORMAL; 4511 else /* I_PUNLINK */ 4512 type = LINKPERSIST; 4513 if (native_arg == 0) { 4514 return (EINVAL); 4515 } 4516 ns = netstack_find_by_cred(crp); 4517 ASSERT(ns != NULL); 4518 ss = ns->netstack_str; 4519 ASSERT(ss != NULL); 4520 4521 if (native_arg == MUXID_ALL) 4522 error = munlinkall(stp, type, crp, rvalp, ss); 4523 else { 4524 mutex_enter(&muxifier); 4525 if (!(linkp = findlinks(stp, (int)arg, type, ss))) { 4526 /* invalid user supplied index number */ 4527 mutex_exit(&muxifier); 4528 netstack_rele(ss->ss_netstack); 4529 return (EINVAL); 4530 } 4531 /* munlink drops the muxifier lock */ 4532 error = munlink(stp, linkp, type, crp, rvalp, ss); 4533 } 4534 netstack_rele(ss->ss_netstack); 4535 return (error); 4536 } 4537 4538 case I_FLUSH: 4539 /* 4540 * send a flush message downstream 4541 * flush message can indicate 4542 * FLUSHR - flush read queue 4543 * FLUSHW - flush write queue 4544 * FLUSHRW - flush read/write queue 4545 */ 4546 if (stp->sd_flag & STRHUP) 4547 return (ENXIO); 4548 if (arg & ~FLUSHRW) 4549 return (EINVAL); 4550 4551 for (;;) { 4552 if (putnextctl1(stp->sd_wrq, M_FLUSH, (int)arg)) { 4553 break; 4554 } 4555 if (error = strwaitbuf(1, BPRI_HI)) { 4556 return (error); 4557 } 4558 } 4559 4560 /* 4561 * Send down an unsupported ioctl and wait for the nack 4562 * in order to allow the M_FLUSH to propagate back 4563 * up to the stream head. 4564 * Replaces if (qready()) runqueues(); 4565 */ 4566 strioc.ic_cmd = -1; /* The unsupported ioctl */ 4567 strioc.ic_timout = 0; 4568 strioc.ic_len = 0; 4569 strioc.ic_dp = NULL; 4570 (void) strdoioctl(stp, &strioc, flag, K_TO_K, crp, rvalp); 4571 *rvalp = 0; 4572 return (0); 4573 4574 case I_FLUSHBAND: 4575 { 4576 struct bandinfo binfo; 4577 4578 error = strcopyin((void *)arg, &binfo, sizeof (binfo), 4579 copyflag); 4580 if (error) 4581 return (error); 4582 if (stp->sd_flag & STRHUP) 4583 return (ENXIO); 4584 if (binfo.bi_flag & ~FLUSHRW) 4585 return (EINVAL); 4586 while (!(mp = allocb(2, BPRI_HI))) { 4587 if (error = strwaitbuf(2, BPRI_HI)) 4588 return (error); 4589 } 4590 mp->b_datap->db_type = M_FLUSH; 4591 *mp->b_wptr++ = binfo.bi_flag | FLUSHBAND; 4592 *mp->b_wptr++ = binfo.bi_pri; 4593 putnext(stp->sd_wrq, mp); 4594 /* 4595 * Send down an unsupported ioctl and wait for the nack 4596 * in order to allow the M_FLUSH to propagate back 4597 * up to the stream head. 4598 * Replaces if (qready()) runqueues(); 4599 */ 4600 strioc.ic_cmd = -1; /* The unsupported ioctl */ 4601 strioc.ic_timout = 0; 4602 strioc.ic_len = 0; 4603 strioc.ic_dp = NULL; 4604 (void) strdoioctl(stp, &strioc, flag, K_TO_K, crp, rvalp); 4605 *rvalp = 0; 4606 return (0); 4607 } 4608 4609 case I_SRDOPT: 4610 /* 4611 * Set read options 4612 * 4613 * RNORM - default stream mode 4614 * RMSGN - message no discard 4615 * RMSGD - message discard 4616 * RPROTNORM - fail read with EBADMSG for M_[PC]PROTOs 4617 * RPROTDAT - convert M_[PC]PROTOs to M_DATAs 4618 * RPROTDIS - discard M_[PC]PROTOs and retain M_DATAs 4619 */ 4620 if (arg & ~(RMODEMASK | RPROTMASK)) 4621 return (EINVAL); 4622 4623 if ((arg & (RMSGD|RMSGN)) == (RMSGD|RMSGN)) 4624 return (EINVAL); 4625 4626 mutex_enter(&stp->sd_lock); 4627 switch (arg & RMODEMASK) { 4628 case RNORM: 4629 stp->sd_read_opt &= ~(RD_MSGDIS | RD_MSGNODIS); 4630 break; 4631 case RMSGD: 4632 stp->sd_read_opt = (stp->sd_read_opt & ~RD_MSGNODIS) | 4633 RD_MSGDIS; 4634 break; 4635 case RMSGN: 4636 stp->sd_read_opt = (stp->sd_read_opt & ~RD_MSGDIS) | 4637 RD_MSGNODIS; 4638 break; 4639 } 4640 4641 switch (arg & RPROTMASK) { 4642 case RPROTNORM: 4643 stp->sd_read_opt &= ~(RD_PROTDAT | RD_PROTDIS); 4644 break; 4645 4646 case RPROTDAT: 4647 stp->sd_read_opt = ((stp->sd_read_opt & ~RD_PROTDIS) | 4648 RD_PROTDAT); 4649 break; 4650 4651 case RPROTDIS: 4652 stp->sd_read_opt = ((stp->sd_read_opt & ~RD_PROTDAT) | 4653 RD_PROTDIS); 4654 break; 4655 } 4656 mutex_exit(&stp->sd_lock); 4657 return (0); 4658 4659 case I_GRDOPT: 4660 /* 4661 * Get read option and return the value 4662 * to spot pointed to by arg 4663 */ 4664 { 4665 int rdopt; 4666 4667 rdopt = ((stp->sd_read_opt & RD_MSGDIS) ? RMSGD : 4668 ((stp->sd_read_opt & RD_MSGNODIS) ? RMSGN : RNORM)); 4669 rdopt |= ((stp->sd_read_opt & RD_PROTDAT) ? RPROTDAT : 4670 ((stp->sd_read_opt & RD_PROTDIS) ? RPROTDIS : RPROTNORM)); 4671 4672 return (strcopyout(&rdopt, (void *)arg, sizeof (int), 4673 copyflag)); 4674 } 4675 4676 case I_SERROPT: 4677 /* 4678 * Set error options 4679 * 4680 * RERRNORM - persistent read errors 4681 * RERRNONPERSIST - non-persistent read errors 4682 * WERRNORM - persistent write errors 4683 * WERRNONPERSIST - non-persistent write errors 4684 */ 4685 if (arg & ~(RERRMASK | WERRMASK)) 4686 return (EINVAL); 4687 4688 mutex_enter(&stp->sd_lock); 4689 switch (arg & RERRMASK) { 4690 case RERRNORM: 4691 stp->sd_flag &= ~STRDERRNONPERSIST; 4692 break; 4693 case RERRNONPERSIST: 4694 stp->sd_flag |= STRDERRNONPERSIST; 4695 break; 4696 } 4697 switch (arg & WERRMASK) { 4698 case WERRNORM: 4699 stp->sd_flag &= ~STWRERRNONPERSIST; 4700 break; 4701 case WERRNONPERSIST: 4702 stp->sd_flag |= STWRERRNONPERSIST; 4703 break; 4704 } 4705 mutex_exit(&stp->sd_lock); 4706 return (0); 4707 4708 case I_GERROPT: 4709 /* 4710 * Get error option and return the value 4711 * to spot pointed to by arg 4712 */ 4713 { 4714 int erropt = 0; 4715 4716 erropt |= (stp->sd_flag & STRDERRNONPERSIST) ? RERRNONPERSIST : 4717 RERRNORM; 4718 erropt |= (stp->sd_flag & STWRERRNONPERSIST) ? WERRNONPERSIST : 4719 WERRNORM; 4720 return (strcopyout(&erropt, (void *)arg, sizeof (int), 4721 copyflag)); 4722 } 4723 4724 case I_SETSIG: 4725 /* 4726 * Register the calling proc to receive the SIGPOLL 4727 * signal based on the events given in arg. If 4728 * arg is zero, remove the proc from register list. 4729 */ 4730 { 4731 strsig_t *ssp, *pssp; 4732 struct pid *pidp; 4733 4734 pssp = NULL; 4735 pidp = curproc->p_pidp; 4736 /* 4737 * Hold sd_lock to prevent traversal of sd_siglist while 4738 * it is modified. 4739 */ 4740 mutex_enter(&stp->sd_lock); 4741 for (ssp = stp->sd_siglist; ssp && (ssp->ss_pidp != pidp); 4742 pssp = ssp, ssp = ssp->ss_next) 4743 ; 4744 4745 if (arg) { 4746 if (arg & ~(S_INPUT|S_HIPRI|S_MSG|S_HANGUP|S_ERROR| 4747 S_RDNORM|S_WRNORM|S_RDBAND|S_WRBAND|S_BANDURG)) { 4748 mutex_exit(&stp->sd_lock); 4749 return (EINVAL); 4750 } 4751 if ((arg & S_BANDURG) && !(arg & S_RDBAND)) { 4752 mutex_exit(&stp->sd_lock); 4753 return (EINVAL); 4754 } 4755 4756 /* 4757 * If proc not already registered, add it 4758 * to list. 4759 */ 4760 if (!ssp) { 4761 ssp = kmem_alloc(sizeof (strsig_t), KM_SLEEP); 4762 ssp->ss_pidp = pidp; 4763 ssp->ss_pid = pidp->pid_id; 4764 ssp->ss_next = NULL; 4765 if (pssp) 4766 pssp->ss_next = ssp; 4767 else 4768 stp->sd_siglist = ssp; 4769 mutex_enter(&pidlock); 4770 PID_HOLD(pidp); 4771 mutex_exit(&pidlock); 4772 } 4773 4774 /* 4775 * Set events. 4776 */ 4777 ssp->ss_events = (int)arg; 4778 } else { 4779 /* 4780 * Remove proc from register list. 4781 */ 4782 if (ssp) { 4783 mutex_enter(&pidlock); 4784 PID_RELE(pidp); 4785 mutex_exit(&pidlock); 4786 if (pssp) 4787 pssp->ss_next = ssp->ss_next; 4788 else 4789 stp->sd_siglist = ssp->ss_next; 4790 kmem_free(ssp, sizeof (strsig_t)); 4791 } else { 4792 mutex_exit(&stp->sd_lock); 4793 return (EINVAL); 4794 } 4795 } 4796 4797 /* 4798 * Recalculate OR of sig events. 4799 */ 4800 stp->sd_sigflags = 0; 4801 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next) 4802 stp->sd_sigflags |= ssp->ss_events; 4803 mutex_exit(&stp->sd_lock); 4804 return (0); 4805 } 4806 4807 case I_GETSIG: 4808 /* 4809 * Return (in arg) the current registration of events 4810 * for which the calling proc is to be signaled. 4811 */ 4812 { 4813 struct strsig *ssp; 4814 struct pid *pidp; 4815 4816 pidp = curproc->p_pidp; 4817 mutex_enter(&stp->sd_lock); 4818 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next) 4819 if (ssp->ss_pidp == pidp) { 4820 error = strcopyout(&ssp->ss_events, (void *)arg, 4821 sizeof (int), copyflag); 4822 mutex_exit(&stp->sd_lock); 4823 return (error); 4824 } 4825 mutex_exit(&stp->sd_lock); 4826 return (EINVAL); 4827 } 4828 4829 case I_ESETSIG: 4830 /* 4831 * Register the ss_pid to receive the SIGPOLL 4832 * signal based on the events is ss_events arg. If 4833 * ss_events is zero, remove the proc from register list. 4834 */ 4835 { 4836 struct strsig *ssp, *pssp; 4837 struct proc *proc; 4838 struct pid *pidp; 4839 pid_t pid; 4840 struct strsigset ss; 4841 4842 error = strcopyin((void *)arg, &ss, sizeof (ss), copyflag); 4843 if (error) 4844 return (error); 4845 4846 pid = ss.ss_pid; 4847 4848 if (ss.ss_events != 0) { 4849 /* 4850 * Permissions check by sending signal 0. 4851 * Note that when kill fails it does a set_errno 4852 * causing the system call to fail. 4853 */ 4854 error = kill(pid, 0); 4855 if (error) { 4856 return (error); 4857 } 4858 } 4859 mutex_enter(&pidlock); 4860 if (pid == 0) 4861 proc = curproc; 4862 else if (pid < 0) 4863 proc = pgfind(-pid); 4864 else 4865 proc = prfind(pid); 4866 if (proc == NULL) { 4867 mutex_exit(&pidlock); 4868 return (ESRCH); 4869 } 4870 if (pid < 0) 4871 pidp = proc->p_pgidp; 4872 else 4873 pidp = proc->p_pidp; 4874 ASSERT(pidp); 4875 /* 4876 * Get a hold on the pid structure while referencing it. 4877 * There is a separate PID_HOLD should it be inserted 4878 * in the list below. 4879 */ 4880 PID_HOLD(pidp); 4881 mutex_exit(&pidlock); 4882 4883 pssp = NULL; 4884 /* 4885 * Hold sd_lock to prevent traversal of sd_siglist while 4886 * it is modified. 4887 */ 4888 mutex_enter(&stp->sd_lock); 4889 for (ssp = stp->sd_siglist; ssp && (ssp->ss_pid != pid); 4890 pssp = ssp, ssp = ssp->ss_next) 4891 ; 4892 4893 if (ss.ss_events) { 4894 if (ss.ss_events & 4895 ~(S_INPUT|S_HIPRI|S_MSG|S_HANGUP|S_ERROR| 4896 S_RDNORM|S_WRNORM|S_RDBAND|S_WRBAND|S_BANDURG)) { 4897 mutex_exit(&stp->sd_lock); 4898 mutex_enter(&pidlock); 4899 PID_RELE(pidp); 4900 mutex_exit(&pidlock); 4901 return (EINVAL); 4902 } 4903 if ((ss.ss_events & S_BANDURG) && 4904 !(ss.ss_events & S_RDBAND)) { 4905 mutex_exit(&stp->sd_lock); 4906 mutex_enter(&pidlock); 4907 PID_RELE(pidp); 4908 mutex_exit(&pidlock); 4909 return (EINVAL); 4910 } 4911 4912 /* 4913 * If proc not already registered, add it 4914 * to list. 4915 */ 4916 if (!ssp) { 4917 ssp = kmem_alloc(sizeof (strsig_t), KM_SLEEP); 4918 ssp->ss_pidp = pidp; 4919 ssp->ss_pid = pid; 4920 ssp->ss_next = NULL; 4921 if (pssp) 4922 pssp->ss_next = ssp; 4923 else 4924 stp->sd_siglist = ssp; 4925 mutex_enter(&pidlock); 4926 PID_HOLD(pidp); 4927 mutex_exit(&pidlock); 4928 } 4929 4930 /* 4931 * Set events. 4932 */ 4933 ssp->ss_events = ss.ss_events; 4934 } else { 4935 /* 4936 * Remove proc from register list. 4937 */ 4938 if (ssp) { 4939 mutex_enter(&pidlock); 4940 PID_RELE(pidp); 4941 mutex_exit(&pidlock); 4942 if (pssp) 4943 pssp->ss_next = ssp->ss_next; 4944 else 4945 stp->sd_siglist = ssp->ss_next; 4946 kmem_free(ssp, sizeof (strsig_t)); 4947 } else { 4948 mutex_exit(&stp->sd_lock); 4949 mutex_enter(&pidlock); 4950 PID_RELE(pidp); 4951 mutex_exit(&pidlock); 4952 return (EINVAL); 4953 } 4954 } 4955 4956 /* 4957 * Recalculate OR of sig events. 4958 */ 4959 stp->sd_sigflags = 0; 4960 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next) 4961 stp->sd_sigflags |= ssp->ss_events; 4962 mutex_exit(&stp->sd_lock); 4963 mutex_enter(&pidlock); 4964 PID_RELE(pidp); 4965 mutex_exit(&pidlock); 4966 return (0); 4967 } 4968 4969 case I_EGETSIG: 4970 /* 4971 * Return (in arg) the current registration of events 4972 * for which the calling proc is to be signaled. 4973 */ 4974 { 4975 struct strsig *ssp; 4976 struct proc *proc; 4977 pid_t pid; 4978 struct pid *pidp; 4979 struct strsigset ss; 4980 4981 error = strcopyin((void *)arg, &ss, sizeof (ss), copyflag); 4982 if (error) 4983 return (error); 4984 4985 pid = ss.ss_pid; 4986 mutex_enter(&pidlock); 4987 if (pid == 0) 4988 proc = curproc; 4989 else if (pid < 0) 4990 proc = pgfind(-pid); 4991 else 4992 proc = prfind(pid); 4993 if (proc == NULL) { 4994 mutex_exit(&pidlock); 4995 return (ESRCH); 4996 } 4997 if (pid < 0) 4998 pidp = proc->p_pgidp; 4999 else 5000 pidp = proc->p_pidp; 5001 5002 /* Prevent the pidp from being reassigned */ 5003 PID_HOLD(pidp); 5004 mutex_exit(&pidlock); 5005 5006 mutex_enter(&stp->sd_lock); 5007 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next) 5008 if (ssp->ss_pid == pid) { 5009 ss.ss_pid = ssp->ss_pid; 5010 ss.ss_events = ssp->ss_events; 5011 error = strcopyout(&ss, (void *)arg, 5012 sizeof (struct strsigset), copyflag); 5013 mutex_exit(&stp->sd_lock); 5014 mutex_enter(&pidlock); 5015 PID_RELE(pidp); 5016 mutex_exit(&pidlock); 5017 return (error); 5018 } 5019 mutex_exit(&stp->sd_lock); 5020 mutex_enter(&pidlock); 5021 PID_RELE(pidp); 5022 mutex_exit(&pidlock); 5023 return (EINVAL); 5024 } 5025 5026 case I_PEEK: 5027 { 5028 STRUCT_DECL(strpeek, strpeek); 5029 size_t n; 5030 mblk_t *fmp, *tmp_mp = NULL; 5031 5032 STRUCT_INIT(strpeek, flag); 5033 5034 error = strcopyin((void *)arg, STRUCT_BUF(strpeek), 5035 STRUCT_SIZE(strpeek), copyflag); 5036 if (error) 5037 return (error); 5038 5039 mutex_enter(QLOCK(rdq)); 5040 /* 5041 * Skip the invalid messages 5042 */ 5043 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next) 5044 if (mp->b_datap->db_type != M_SIG) 5045 break; 5046 5047 /* 5048 * If user has requested to peek at a high priority message 5049 * and first message is not, return 0 5050 */ 5051 if (mp != NULL) { 5052 if ((STRUCT_FGET(strpeek, flags) & RS_HIPRI) && 5053 queclass(mp) == QNORM) { 5054 *rvalp = 0; 5055 mutex_exit(QLOCK(rdq)); 5056 return (0); 5057 } 5058 } else if (stp->sd_struiordq == NULL || 5059 (STRUCT_FGET(strpeek, flags) & RS_HIPRI)) { 5060 /* 5061 * No mblks to look at at the streamhead and 5062 * 1). This isn't a synch stream or 5063 * 2). This is a synch stream but caller wants high 5064 * priority messages which is not supported by 5065 * the synch stream. (it only supports QNORM) 5066 */ 5067 *rvalp = 0; 5068 mutex_exit(QLOCK(rdq)); 5069 return (0); 5070 } 5071 5072 fmp = mp; 5073 5074 if (mp && mp->b_datap->db_type == M_PASSFP) { 5075 mutex_exit(QLOCK(rdq)); 5076 return (EBADMSG); 5077 } 5078 5079 ASSERT(mp == NULL || mp->b_datap->db_type == M_PCPROTO || 5080 mp->b_datap->db_type == M_PROTO || 5081 mp->b_datap->db_type == M_DATA); 5082 5083 if (mp && mp->b_datap->db_type == M_PCPROTO) { 5084 STRUCT_FSET(strpeek, flags, RS_HIPRI); 5085 } else { 5086 STRUCT_FSET(strpeek, flags, 0); 5087 } 5088 5089 5090 if (mp && ((tmp_mp = dupmsg(mp)) == NULL)) { 5091 mutex_exit(QLOCK(rdq)); 5092 return (ENOSR); 5093 } 5094 mutex_exit(QLOCK(rdq)); 5095 5096 /* 5097 * set mp = tmp_mp, so that I_PEEK processing can continue. 5098 * tmp_mp is used to free the dup'd message. 5099 */ 5100 mp = tmp_mp; 5101 5102 uio.uio_fmode = 0; 5103 uio.uio_extflg = UIO_COPY_CACHED; 5104 uio.uio_segflg = (copyflag == U_TO_K) ? UIO_USERSPACE : 5105 UIO_SYSSPACE; 5106 uio.uio_limit = 0; 5107 /* 5108 * First process PROTO blocks, if any. 5109 * If user doesn't want to get ctl info by setting maxlen <= 0, 5110 * then set len to -1/0 and skip control blocks part. 5111 */ 5112 if (STRUCT_FGET(strpeek, ctlbuf.maxlen) < 0) 5113 STRUCT_FSET(strpeek, ctlbuf.len, -1); 5114 else if (STRUCT_FGET(strpeek, ctlbuf.maxlen) == 0) 5115 STRUCT_FSET(strpeek, ctlbuf.len, 0); 5116 else { 5117 int ctl_part = 0; 5118 5119 iov.iov_base = STRUCT_FGETP(strpeek, ctlbuf.buf); 5120 iov.iov_len = STRUCT_FGET(strpeek, ctlbuf.maxlen); 5121 uio.uio_iov = &iov; 5122 uio.uio_resid = iov.iov_len; 5123 uio.uio_loffset = 0; 5124 uio.uio_iovcnt = 1; 5125 while (mp && mp->b_datap->db_type != M_DATA && 5126 uio.uio_resid >= 0) { 5127 ASSERT(STRUCT_FGET(strpeek, flags) == 0 ? 5128 mp->b_datap->db_type == M_PROTO : 5129 mp->b_datap->db_type == M_PCPROTO); 5130 5131 if ((n = MIN(uio.uio_resid, 5132 mp->b_wptr - mp->b_rptr)) != 0 && 5133 (error = uiomove((char *)mp->b_rptr, n, 5134 UIO_READ, &uio)) != 0) { 5135 freemsg(tmp_mp); 5136 return (error); 5137 } 5138 ctl_part = 1; 5139 mp = mp->b_cont; 5140 } 5141 /* No ctl message */ 5142 if (ctl_part == 0) 5143 STRUCT_FSET(strpeek, ctlbuf.len, -1); 5144 else 5145 STRUCT_FSET(strpeek, ctlbuf.len, 5146 STRUCT_FGET(strpeek, ctlbuf.maxlen) - 5147 uio.uio_resid); 5148 } 5149 5150 /* 5151 * Now process DATA blocks, if any. 5152 * If user doesn't want to get data info by setting maxlen <= 0, 5153 * then set len to -1/0 and skip data blocks part. 5154 */ 5155 if (STRUCT_FGET(strpeek, databuf.maxlen) < 0) 5156 STRUCT_FSET(strpeek, databuf.len, -1); 5157 else if (STRUCT_FGET(strpeek, databuf.maxlen) == 0) 5158 STRUCT_FSET(strpeek, databuf.len, 0); 5159 else { 5160 int data_part = 0; 5161 5162 iov.iov_base = STRUCT_FGETP(strpeek, databuf.buf); 5163 iov.iov_len = STRUCT_FGET(strpeek, databuf.maxlen); 5164 uio.uio_iov = &iov; 5165 uio.uio_resid = iov.iov_len; 5166 uio.uio_loffset = 0; 5167 uio.uio_iovcnt = 1; 5168 while (mp && uio.uio_resid) { 5169 if (mp->b_datap->db_type == M_DATA) { 5170 if ((n = MIN(uio.uio_resid, 5171 mp->b_wptr - mp->b_rptr)) != 0 && 5172 (error = uiomove((char *)mp->b_rptr, 5173 n, UIO_READ, &uio)) != 0) { 5174 freemsg(tmp_mp); 5175 return (error); 5176 } 5177 data_part = 1; 5178 } 5179 ASSERT(data_part == 0 || 5180 mp->b_datap->db_type == M_DATA); 5181 mp = mp->b_cont; 5182 } 5183 /* No data message */ 5184 if (data_part == 0) 5185 STRUCT_FSET(strpeek, databuf.len, -1); 5186 else 5187 STRUCT_FSET(strpeek, databuf.len, 5188 STRUCT_FGET(strpeek, databuf.maxlen) - 5189 uio.uio_resid); 5190 } 5191 freemsg(tmp_mp); 5192 5193 /* 5194 * It is a synch stream and user wants to get 5195 * data (maxlen > 0). 5196 * uio setup is done by the codes that process DATA 5197 * blocks above. 5198 */ 5199 if ((fmp == NULL) && STRUCT_FGET(strpeek, databuf.maxlen) > 0) { 5200 infod_t infod; 5201 5202 infod.d_cmd = INFOD_COPYOUT; 5203 infod.d_res = 0; 5204 infod.d_uiop = &uio; 5205 error = infonext(rdq, &infod); 5206 if (error == EINVAL || error == EBUSY) 5207 error = 0; 5208 if (error) 5209 return (error); 5210 STRUCT_FSET(strpeek, databuf.len, STRUCT_FGET(strpeek, 5211 databuf.maxlen) - uio.uio_resid); 5212 if (STRUCT_FGET(strpeek, databuf.len) == 0) { 5213 /* 5214 * No data found by the infonext(). 5215 */ 5216 STRUCT_FSET(strpeek, databuf.len, -1); 5217 } 5218 } 5219 error = strcopyout(STRUCT_BUF(strpeek), (void *)arg, 5220 STRUCT_SIZE(strpeek), copyflag); 5221 if (error) { 5222 return (error); 5223 } 5224 /* 5225 * If there is no message retrieved, set return code to 0 5226 * otherwise, set it to 1. 5227 */ 5228 if (STRUCT_FGET(strpeek, ctlbuf.len) == -1 && 5229 STRUCT_FGET(strpeek, databuf.len) == -1) 5230 *rvalp = 0; 5231 else 5232 *rvalp = 1; 5233 return (0); 5234 } 5235 5236 case I_FDINSERT: 5237 { 5238 STRUCT_DECL(strfdinsert, strfdinsert); 5239 struct file *resftp; 5240 struct stdata *resstp; 5241 t_uscalar_t ival; 5242 ssize_t msgsize; 5243 struct strbuf mctl; 5244 5245 STRUCT_INIT(strfdinsert, flag); 5246 if (stp->sd_flag & STRHUP) 5247 return (ENXIO); 5248 /* 5249 * STRDERR, STWRERR and STPLEX tested above. 5250 */ 5251 error = strcopyin((void *)arg, STRUCT_BUF(strfdinsert), 5252 STRUCT_SIZE(strfdinsert), copyflag); 5253 if (error) 5254 return (error); 5255 5256 if (STRUCT_FGET(strfdinsert, offset) < 0 || 5257 (STRUCT_FGET(strfdinsert, offset) % 5258 sizeof (t_uscalar_t)) != 0) 5259 return (EINVAL); 5260 if ((resftp = getf(STRUCT_FGET(strfdinsert, fildes))) != NULL) { 5261 if ((resstp = resftp->f_vnode->v_stream) == NULL) { 5262 releasef(STRUCT_FGET(strfdinsert, fildes)); 5263 return (EINVAL); 5264 } 5265 } else 5266 return (EINVAL); 5267 5268 mutex_enter(&resstp->sd_lock); 5269 if (resstp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) { 5270 error = strgeterr(resstp, 5271 STRDERR|STWRERR|STRHUP|STPLEX, 0); 5272 if (error != 0) { 5273 mutex_exit(&resstp->sd_lock); 5274 releasef(STRUCT_FGET(strfdinsert, fildes)); 5275 return (error); 5276 } 5277 } 5278 mutex_exit(&resstp->sd_lock); 5279 5280 #ifdef _ILP32 5281 { 5282 queue_t *q; 5283 queue_t *mate = NULL; 5284 5285 /* get read queue of stream terminus */ 5286 claimstr(resstp->sd_wrq); 5287 for (q = resstp->sd_wrq->q_next; q->q_next != NULL; 5288 q = q->q_next) 5289 if (!STRMATED(resstp) && STREAM(q) != resstp && 5290 mate == NULL) { 5291 ASSERT(q->q_qinfo->qi_srvp); 5292 ASSERT(_OTHERQ(q)->q_qinfo->qi_srvp); 5293 claimstr(q); 5294 mate = q; 5295 } 5296 q = _RD(q); 5297 if (mate) 5298 releasestr(mate); 5299 releasestr(resstp->sd_wrq); 5300 ival = (t_uscalar_t)q; 5301 } 5302 #else 5303 ival = (t_uscalar_t)getminor(resftp->f_vnode->v_rdev); 5304 #endif /* _ILP32 */ 5305 5306 if (STRUCT_FGET(strfdinsert, ctlbuf.len) < 5307 STRUCT_FGET(strfdinsert, offset) + sizeof (t_uscalar_t)) { 5308 releasef(STRUCT_FGET(strfdinsert, fildes)); 5309 return (EINVAL); 5310 } 5311 5312 /* 5313 * Check for legal flag value. 5314 */ 5315 if (STRUCT_FGET(strfdinsert, flags) & ~RS_HIPRI) { 5316 releasef(STRUCT_FGET(strfdinsert, fildes)); 5317 return (EINVAL); 5318 } 5319 5320 /* get these values from those cached in the stream head */ 5321 mutex_enter(QLOCK(stp->sd_wrq)); 5322 rmin = stp->sd_qn_minpsz; 5323 rmax = stp->sd_qn_maxpsz; 5324 mutex_exit(QLOCK(stp->sd_wrq)); 5325 5326 /* 5327 * Make sure ctl and data sizes together fall within 5328 * the limits of the max and min receive packet sizes 5329 * and do not exceed system limit. A negative data 5330 * length means that no data part is to be sent. 5331 */ 5332 ASSERT((rmax >= 0) || (rmax == INFPSZ)); 5333 if (rmax == 0) { 5334 releasef(STRUCT_FGET(strfdinsert, fildes)); 5335 return (ERANGE); 5336 } 5337 if ((msgsize = STRUCT_FGET(strfdinsert, databuf.len)) < 0) 5338 msgsize = 0; 5339 if ((msgsize < rmin) || 5340 ((msgsize > rmax) && (rmax != INFPSZ)) || 5341 (STRUCT_FGET(strfdinsert, ctlbuf.len) > strctlsz)) { 5342 releasef(STRUCT_FGET(strfdinsert, fildes)); 5343 return (ERANGE); 5344 } 5345 5346 mutex_enter(&stp->sd_lock); 5347 while (!(STRUCT_FGET(strfdinsert, flags) & RS_HIPRI) && 5348 !canputnext(stp->sd_wrq)) { 5349 if ((error = strwaitq(stp, WRITEWAIT, (ssize_t)0, 5350 flag, -1, &done)) != 0 || done) { 5351 mutex_exit(&stp->sd_lock); 5352 releasef(STRUCT_FGET(strfdinsert, fildes)); 5353 return (error); 5354 } 5355 if ((error = i_straccess(stp, access)) != 0) { 5356 mutex_exit(&stp->sd_lock); 5357 releasef( 5358 STRUCT_FGET(strfdinsert, fildes)); 5359 return (error); 5360 } 5361 } 5362 mutex_exit(&stp->sd_lock); 5363 5364 /* 5365 * Copy strfdinsert.ctlbuf into native form of 5366 * ctlbuf to pass down into strmakemsg(). 5367 */ 5368 mctl.maxlen = STRUCT_FGET(strfdinsert, ctlbuf.maxlen); 5369 mctl.len = STRUCT_FGET(strfdinsert, ctlbuf.len); 5370 mctl.buf = STRUCT_FGETP(strfdinsert, ctlbuf.buf); 5371 5372 iov.iov_base = STRUCT_FGETP(strfdinsert, databuf.buf); 5373 iov.iov_len = STRUCT_FGET(strfdinsert, databuf.len); 5374 uio.uio_iov = &iov; 5375 uio.uio_iovcnt = 1; 5376 uio.uio_loffset = 0; 5377 uio.uio_segflg = (copyflag == U_TO_K) ? UIO_USERSPACE : 5378 UIO_SYSSPACE; 5379 uio.uio_fmode = 0; 5380 uio.uio_extflg = UIO_COPY_CACHED; 5381 uio.uio_resid = iov.iov_len; 5382 if ((error = strmakemsg(&mctl, 5383 &msgsize, &uio, stp, 5384 STRUCT_FGET(strfdinsert, flags), &mp)) != 0 || !mp) { 5385 STRUCT_FSET(strfdinsert, databuf.len, msgsize); 5386 releasef(STRUCT_FGET(strfdinsert, fildes)); 5387 return (error); 5388 } 5389 5390 STRUCT_FSET(strfdinsert, databuf.len, msgsize); 5391 5392 /* 5393 * Place the possibly reencoded queue pointer 'offset' bytes 5394 * from the start of the control portion of the message. 5395 */ 5396 *((t_uscalar_t *)(mp->b_rptr + 5397 STRUCT_FGET(strfdinsert, offset))) = ival; 5398 5399 /* 5400 * Put message downstream. 5401 */ 5402 stream_willservice(stp); 5403 putnext(stp->sd_wrq, mp); 5404 stream_runservice(stp); 5405 releasef(STRUCT_FGET(strfdinsert, fildes)); 5406 return (error); 5407 } 5408 5409 case I_SENDFD: 5410 { 5411 struct file *fp; 5412 5413 if ((fp = getf((int)arg)) == NULL) 5414 return (EBADF); 5415 error = do_sendfp(stp, fp, crp); 5416 if (auditing) { 5417 audit_fdsend((int)arg, fp, error); 5418 } 5419 releasef((int)arg); 5420 return (error); 5421 } 5422 5423 case I_RECVFD: 5424 case I_E_RECVFD: 5425 { 5426 struct k_strrecvfd *srf; 5427 int i, fd; 5428 5429 mutex_enter(&stp->sd_lock); 5430 while (!(mp = getq(rdq))) { 5431 if (stp->sd_flag & (STRHUP|STREOF)) { 5432 mutex_exit(&stp->sd_lock); 5433 return (ENXIO); 5434 } 5435 if ((error = strwaitq(stp, GETWAIT, (ssize_t)0, 5436 flag, -1, &done)) != 0 || done) { 5437 mutex_exit(&stp->sd_lock); 5438 return (error); 5439 } 5440 if ((error = i_straccess(stp, access)) != 0) { 5441 mutex_exit(&stp->sd_lock); 5442 return (error); 5443 } 5444 } 5445 if (mp->b_datap->db_type != M_PASSFP) { 5446 putback(stp, rdq, mp, mp->b_band); 5447 mutex_exit(&stp->sd_lock); 5448 return (EBADMSG); 5449 } 5450 mutex_exit(&stp->sd_lock); 5451 5452 srf = (struct k_strrecvfd *)mp->b_rptr; 5453 if ((fd = ufalloc(0)) == -1) { 5454 mutex_enter(&stp->sd_lock); 5455 putback(stp, rdq, mp, mp->b_band); 5456 mutex_exit(&stp->sd_lock); 5457 return (EMFILE); 5458 } 5459 if (cmd == I_RECVFD) { 5460 struct o_strrecvfd ostrfd; 5461 5462 /* check to see if uid/gid values are too large. */ 5463 5464 if (srf->uid > (o_uid_t)USHRT_MAX || 5465 srf->gid > (o_gid_t)USHRT_MAX) { 5466 mutex_enter(&stp->sd_lock); 5467 putback(stp, rdq, mp, mp->b_band); 5468 mutex_exit(&stp->sd_lock); 5469 setf(fd, NULL); /* release fd entry */ 5470 return (EOVERFLOW); 5471 } 5472 5473 ostrfd.fd = fd; 5474 ostrfd.uid = (o_uid_t)srf->uid; 5475 ostrfd.gid = (o_gid_t)srf->gid; 5476 5477 /* Null the filler bits */ 5478 for (i = 0; i < 8; i++) 5479 ostrfd.fill[i] = 0; 5480 5481 error = strcopyout(&ostrfd, (void *)arg, 5482 sizeof (struct o_strrecvfd), copyflag); 5483 } else { /* I_E_RECVFD */ 5484 struct strrecvfd strfd; 5485 5486 strfd.fd = fd; 5487 strfd.uid = srf->uid; 5488 strfd.gid = srf->gid; 5489 5490 /* null the filler bits */ 5491 for (i = 0; i < 8; i++) 5492 strfd.fill[i] = 0; 5493 5494 error = strcopyout(&strfd, (void *)arg, 5495 sizeof (struct strrecvfd), copyflag); 5496 } 5497 5498 if (error) { 5499 setf(fd, NULL); /* release fd entry */ 5500 mutex_enter(&stp->sd_lock); 5501 putback(stp, rdq, mp, mp->b_band); 5502 mutex_exit(&stp->sd_lock); 5503 return (error); 5504 } 5505 if (auditing) { 5506 audit_fdrecv(fd, srf->fp); 5507 } 5508 5509 /* 5510 * Always increment f_count since the freemsg() below will 5511 * always call free_passfp() which performs a closef(). 5512 */ 5513 mutex_enter(&srf->fp->f_tlock); 5514 srf->fp->f_count++; 5515 mutex_exit(&srf->fp->f_tlock); 5516 setf(fd, srf->fp); 5517 freemsg(mp); 5518 return (0); 5519 } 5520 5521 case I_SWROPT: 5522 /* 5523 * Set/clear the write options. arg is a bit 5524 * mask with any of the following bits set... 5525 * SNDZERO - send zero length message 5526 * SNDPIPE - send sigpipe to process if 5527 * sd_werror is set and process is 5528 * doing a write or putmsg. 5529 * The new stream head write options should reflect 5530 * what is in arg. 5531 */ 5532 if (arg & ~(SNDZERO|SNDPIPE)) 5533 return (EINVAL); 5534 5535 mutex_enter(&stp->sd_lock); 5536 stp->sd_wput_opt &= ~(SW_SIGPIPE|SW_SNDZERO); 5537 if (arg & SNDZERO) 5538 stp->sd_wput_opt |= SW_SNDZERO; 5539 if (arg & SNDPIPE) 5540 stp->sd_wput_opt |= SW_SIGPIPE; 5541 mutex_exit(&stp->sd_lock); 5542 return (0); 5543 5544 case I_GWROPT: 5545 { 5546 int wropt = 0; 5547 5548 if (stp->sd_wput_opt & SW_SNDZERO) 5549 wropt |= SNDZERO; 5550 if (stp->sd_wput_opt & SW_SIGPIPE) 5551 wropt |= SNDPIPE; 5552 return (strcopyout(&wropt, (void *)arg, sizeof (wropt), 5553 copyflag)); 5554 } 5555 5556 case I_LIST: 5557 /* 5558 * Returns all the modules found on this stream, 5559 * upto the driver. If argument is NULL, return the 5560 * number of modules (including driver). If argument 5561 * is not NULL, copy the names into the structure 5562 * provided. 5563 */ 5564 5565 { 5566 queue_t *q; 5567 char *qname; 5568 int i, nmods; 5569 struct str_mlist *mlist; 5570 STRUCT_DECL(str_list, strlist); 5571 5572 if (arg == 0) { /* Return number of modules plus driver */ 5573 if (stp->sd_vnode->v_type == VFIFO) 5574 *rvalp = stp->sd_pushcnt; 5575 else 5576 *rvalp = stp->sd_pushcnt + 1; 5577 return (0); 5578 } 5579 5580 STRUCT_INIT(strlist, flag); 5581 5582 error = strcopyin((void *)arg, STRUCT_BUF(strlist), 5583 STRUCT_SIZE(strlist), copyflag); 5584 if (error != 0) 5585 return (error); 5586 5587 mlist = STRUCT_FGETP(strlist, sl_modlist); 5588 nmods = STRUCT_FGET(strlist, sl_nmods); 5589 if (nmods <= 0) 5590 return (EINVAL); 5591 5592 claimstr(stp->sd_wrq); 5593 q = stp->sd_wrq; 5594 for (i = 0; i < nmods && _SAMESTR(q); i++, q = q->q_next) { 5595 qname = Q2NAME(q->q_next); 5596 error = strcopyout(qname, &mlist[i], strlen(qname) + 1, 5597 copyflag); 5598 if (error != 0) { 5599 releasestr(stp->sd_wrq); 5600 return (error); 5601 } 5602 } 5603 releasestr(stp->sd_wrq); 5604 return (strcopyout(&i, (void *)arg, sizeof (int), copyflag)); 5605 } 5606 5607 case I_CKBAND: 5608 { 5609 queue_t *q; 5610 qband_t *qbp; 5611 5612 if ((arg < 0) || (arg >= NBAND)) 5613 return (EINVAL); 5614 q = _RD(stp->sd_wrq); 5615 mutex_enter(QLOCK(q)); 5616 if (arg > (int)q->q_nband) { 5617 *rvalp = 0; 5618 } else { 5619 if (arg == 0) { 5620 if (q->q_first) 5621 *rvalp = 1; 5622 else 5623 *rvalp = 0; 5624 } else { 5625 qbp = q->q_bandp; 5626 while (--arg > 0) 5627 qbp = qbp->qb_next; 5628 if (qbp->qb_first) 5629 *rvalp = 1; 5630 else 5631 *rvalp = 0; 5632 } 5633 } 5634 mutex_exit(QLOCK(q)); 5635 return (0); 5636 } 5637 5638 case I_GETBAND: 5639 { 5640 int intpri; 5641 queue_t *q; 5642 5643 q = _RD(stp->sd_wrq); 5644 mutex_enter(QLOCK(q)); 5645 mp = q->q_first; 5646 if (!mp) { 5647 mutex_exit(QLOCK(q)); 5648 return (ENODATA); 5649 } 5650 intpri = (int)mp->b_band; 5651 error = strcopyout(&intpri, (void *)arg, sizeof (int), 5652 copyflag); 5653 mutex_exit(QLOCK(q)); 5654 return (error); 5655 } 5656 5657 case I_ATMARK: 5658 { 5659 queue_t *q; 5660 5661 if (arg & ~(ANYMARK|LASTMARK)) 5662 return (EINVAL); 5663 q = _RD(stp->sd_wrq); 5664 mutex_enter(&stp->sd_lock); 5665 if ((stp->sd_flag & STRATMARK) && (arg == ANYMARK)) { 5666 *rvalp = 1; 5667 } else { 5668 mutex_enter(QLOCK(q)); 5669 mp = q->q_first; 5670 5671 if (mp == NULL) 5672 *rvalp = 0; 5673 else if ((arg == ANYMARK) && (mp->b_flag & MSGMARK)) 5674 *rvalp = 1; 5675 else if ((arg == LASTMARK) && (mp == stp->sd_mark)) 5676 *rvalp = 1; 5677 else 5678 *rvalp = 0; 5679 mutex_exit(QLOCK(q)); 5680 } 5681 mutex_exit(&stp->sd_lock); 5682 return (0); 5683 } 5684 5685 case I_CANPUT: 5686 { 5687 char band; 5688 5689 if ((arg < 0) || (arg >= NBAND)) 5690 return (EINVAL); 5691 band = (char)arg; 5692 *rvalp = bcanputnext(stp->sd_wrq, band); 5693 return (0); 5694 } 5695 5696 case I_SETCLTIME: 5697 { 5698 int closetime; 5699 5700 error = strcopyin((void *)arg, &closetime, sizeof (int), 5701 copyflag); 5702 if (error) 5703 return (error); 5704 if (closetime < 0) 5705 return (EINVAL); 5706 5707 stp->sd_closetime = closetime; 5708 return (0); 5709 } 5710 5711 case I_GETCLTIME: 5712 { 5713 int closetime; 5714 5715 closetime = stp->sd_closetime; 5716 return (strcopyout(&closetime, (void *)arg, sizeof (int), 5717 copyflag)); 5718 } 5719 5720 case TIOCGSID: 5721 { 5722 pid_t sid; 5723 5724 mutex_enter(&stp->sd_lock); 5725 if (stp->sd_sidp == NULL) { 5726 mutex_exit(&stp->sd_lock); 5727 return (ENOTTY); 5728 } 5729 sid = stp->sd_sidp->pid_id; 5730 mutex_exit(&stp->sd_lock); 5731 return (strcopyout(&sid, (void *)arg, sizeof (pid_t), 5732 copyflag)); 5733 } 5734 5735 case TIOCSPGRP: 5736 { 5737 pid_t pgrp; 5738 proc_t *q; 5739 pid_t sid, fg_pgid, bg_pgid; 5740 5741 if (error = strcopyin((void *)arg, &pgrp, sizeof (pid_t), 5742 copyflag)) 5743 return (error); 5744 mutex_enter(&stp->sd_lock); 5745 mutex_enter(&pidlock); 5746 if (stp->sd_sidp != ttoproc(curthread)->p_sessp->s_sidp) { 5747 mutex_exit(&pidlock); 5748 mutex_exit(&stp->sd_lock); 5749 return (ENOTTY); 5750 } 5751 if (pgrp == stp->sd_pgidp->pid_id) { 5752 mutex_exit(&pidlock); 5753 mutex_exit(&stp->sd_lock); 5754 return (0); 5755 } 5756 if (pgrp <= 0 || pgrp >= maxpid) { 5757 mutex_exit(&pidlock); 5758 mutex_exit(&stp->sd_lock); 5759 return (EINVAL); 5760 } 5761 if ((q = pgfind(pgrp)) == NULL || 5762 q->p_sessp != ttoproc(curthread)->p_sessp) { 5763 mutex_exit(&pidlock); 5764 mutex_exit(&stp->sd_lock); 5765 return (EPERM); 5766 } 5767 sid = stp->sd_sidp->pid_id; 5768 fg_pgid = q->p_pgrp; 5769 bg_pgid = stp->sd_pgidp->pid_id; 5770 CL_SET_PROCESS_GROUP(curthread, sid, bg_pgid, fg_pgid); 5771 PID_RELE(stp->sd_pgidp); 5772 ctty_clear_sighuped(); 5773 stp->sd_pgidp = q->p_pgidp; 5774 PID_HOLD(stp->sd_pgidp); 5775 mutex_exit(&pidlock); 5776 mutex_exit(&stp->sd_lock); 5777 return (0); 5778 } 5779 5780 case TIOCGPGRP: 5781 { 5782 pid_t pgrp; 5783 5784 mutex_enter(&stp->sd_lock); 5785 if (stp->sd_sidp == NULL) { 5786 mutex_exit(&stp->sd_lock); 5787 return (ENOTTY); 5788 } 5789 pgrp = stp->sd_pgidp->pid_id; 5790 mutex_exit(&stp->sd_lock); 5791 return (strcopyout(&pgrp, (void *)arg, sizeof (pid_t), 5792 copyflag)); 5793 } 5794 5795 case TIOCSCTTY: 5796 { 5797 return (strctty(stp)); 5798 } 5799 5800 case TIOCNOTTY: 5801 { 5802 /* freectty() always assumes curproc. */ 5803 if (freectty(B_FALSE) != 0) 5804 return (0); 5805 return (ENOTTY); 5806 } 5807 5808 case FIONBIO: 5809 case FIOASYNC: 5810 return (0); /* handled by the upper layer */ 5811 } 5812 } 5813 5814 /* 5815 * Custom free routine used for M_PASSFP messages. 5816 */ 5817 static void 5818 free_passfp(struct k_strrecvfd *srf) 5819 { 5820 (void) closef(srf->fp); 5821 kmem_free(srf, sizeof (struct k_strrecvfd) + sizeof (frtn_t)); 5822 } 5823 5824 /* ARGSUSED */ 5825 int 5826 do_sendfp(struct stdata *stp, struct file *fp, struct cred *cr) 5827 { 5828 queue_t *qp, *nextqp; 5829 struct k_strrecvfd *srf; 5830 mblk_t *mp; 5831 frtn_t *frtnp; 5832 size_t bufsize; 5833 queue_t *mate = NULL; 5834 syncq_t *sq = NULL; 5835 int retval = 0; 5836 5837 if (stp->sd_flag & STRHUP) 5838 return (ENXIO); 5839 5840 claimstr(stp->sd_wrq); 5841 5842 /* Fastpath, we have a pipe, and we are already mated, use it. */ 5843 if (STRMATED(stp)) { 5844 qp = _RD(stp->sd_mate->sd_wrq); 5845 claimstr(qp); 5846 mate = qp; 5847 } else { /* Not already mated. */ 5848 5849 /* 5850 * Walk the stream to the end of this one. 5851 * assumes that the claimstr() will prevent 5852 * plumbing between the stream head and the 5853 * driver from changing 5854 */ 5855 qp = stp->sd_wrq; 5856 5857 /* 5858 * Loop until we reach the end of this stream. 5859 * On completion, qp points to the write queue 5860 * at the end of the stream, or the read queue 5861 * at the stream head if this is a fifo. 5862 */ 5863 while (((qp = qp->q_next) != NULL) && _SAMESTR(qp)) 5864 ; 5865 5866 /* 5867 * Just in case we get a q_next which is NULL, but 5868 * not at the end of the stream. This is actually 5869 * broken, so we set an assert to catch it in 5870 * debug, and set an error and return if not debug. 5871 */ 5872 ASSERT(qp); 5873 if (qp == NULL) { 5874 releasestr(stp->sd_wrq); 5875 return (EINVAL); 5876 } 5877 5878 /* 5879 * Enter the syncq for the driver, so (hopefully) 5880 * the queue values will not change on us. 5881 * XXXX - This will only prevent the race IFF only 5882 * the write side modifies the q_next member, and 5883 * the put procedure is protected by at least 5884 * MT_PERQ. 5885 */ 5886 if ((sq = qp->q_syncq) != NULL) 5887 entersq(sq, SQ_PUT); 5888 5889 /* Now get the q_next value from this qp. */ 5890 nextqp = qp->q_next; 5891 5892 /* 5893 * If nextqp exists and the other stream is different 5894 * from this one claim the stream, set the mate, and 5895 * get the read queue at the stream head of the other 5896 * stream. Assumes that nextqp was at least valid when 5897 * we got it. Hopefully the entersq of the driver 5898 * will prevent it from changing on us. 5899 */ 5900 if ((nextqp != NULL) && (STREAM(nextqp) != stp)) { 5901 ASSERT(qp->q_qinfo->qi_srvp); 5902 ASSERT(_OTHERQ(qp)->q_qinfo->qi_srvp); 5903 ASSERT(_OTHERQ(qp->q_next)->q_qinfo->qi_srvp); 5904 claimstr(nextqp); 5905 5906 /* Make sure we still have a q_next */ 5907 if (nextqp != qp->q_next) { 5908 releasestr(stp->sd_wrq); 5909 releasestr(nextqp); 5910 return (EINVAL); 5911 } 5912 5913 qp = _RD(STREAM(nextqp)->sd_wrq); 5914 mate = qp; 5915 } 5916 /* If we entered the synq above, leave it. */ 5917 if (sq != NULL) 5918 leavesq(sq, SQ_PUT); 5919 } /* STRMATED(STP) */ 5920 5921 /* XXX prevents substitution of the ops vector */ 5922 if (qp->q_qinfo != &strdata && qp->q_qinfo != &fifo_strdata) { 5923 retval = EINVAL; 5924 goto out; 5925 } 5926 5927 if (qp->q_flag & QFULL) { 5928 retval = EAGAIN; 5929 goto out; 5930 } 5931 5932 /* 5933 * Since M_PASSFP messages include a file descriptor, we use 5934 * esballoc() and specify a custom free routine (free_passfp()) that 5935 * will close the descriptor as part of freeing the message. For 5936 * convenience, we stash the frtn_t right after the data block. 5937 */ 5938 bufsize = sizeof (struct k_strrecvfd) + sizeof (frtn_t); 5939 srf = kmem_alloc(bufsize, KM_NOSLEEP); 5940 if (srf == NULL) { 5941 retval = EAGAIN; 5942 goto out; 5943 } 5944 5945 frtnp = (frtn_t *)(srf + 1); 5946 frtnp->free_arg = (caddr_t)srf; 5947 frtnp->free_func = free_passfp; 5948 5949 mp = esballoc((uchar_t *)srf, bufsize, BPRI_MED, frtnp); 5950 if (mp == NULL) { 5951 kmem_free(srf, bufsize); 5952 retval = EAGAIN; 5953 goto out; 5954 } 5955 mp->b_wptr += sizeof (struct k_strrecvfd); 5956 mp->b_datap->db_type = M_PASSFP; 5957 5958 srf->fp = fp; 5959 srf->uid = crgetuid(curthread->t_cred); 5960 srf->gid = crgetgid(curthread->t_cred); 5961 mutex_enter(&fp->f_tlock); 5962 fp->f_count++; 5963 mutex_exit(&fp->f_tlock); 5964 5965 put(qp, mp); 5966 out: 5967 releasestr(stp->sd_wrq); 5968 if (mate) 5969 releasestr(mate); 5970 return (retval); 5971 } 5972 5973 /* 5974 * Send an ioctl message downstream and wait for acknowledgement. 5975 * flags may be set to either U_TO_K or K_TO_K and a combination 5976 * of STR_NOERROR or STR_NOSIG 5977 * STR_NOSIG: Signals are essentially ignored or held and have 5978 * no effect for the duration of the call. 5979 * STR_NOERROR: Ignores stream head read, write and hup errors. 5980 * Additionally, if an existing ioctl times out, it is assumed 5981 * lost and and this ioctl will continue as if the previous ioctl had 5982 * finished. ETIME may be returned if this ioctl times out (i.e. 5983 * ic_timout is not INFTIM). Non-stream head errors may be returned if 5984 * the ioc_error indicates that the driver/module had problems, 5985 * an EFAULT was found when accessing user data, a lack of 5986 * resources, etc. 5987 */ 5988 int 5989 strdoioctl( 5990 struct stdata *stp, 5991 struct strioctl *strioc, 5992 int fflags, /* file flags with model info */ 5993 int flag, 5994 cred_t *crp, 5995 int *rvalp) 5996 { 5997 mblk_t *bp; 5998 struct iocblk *iocbp; 5999 struct copyreq *reqp; 6000 struct copyresp *resp; 6001 int id; 6002 int transparent = 0; 6003 int error = 0; 6004 int len = 0; 6005 caddr_t taddr; 6006 int copyflag = (flag & (U_TO_K | K_TO_K)); 6007 int sigflag = (flag & STR_NOSIG); 6008 int errs; 6009 uint_t waitflags; 6010 boolean_t set_iocwaitne = B_FALSE; 6011 6012 ASSERT(copyflag == U_TO_K || copyflag == K_TO_K); 6013 ASSERT((fflags & FMODELS) != 0); 6014 6015 TRACE_2(TR_FAC_STREAMS_FR, 6016 TR_STRDOIOCTL, 6017 "strdoioctl:stp %p strioc %p", stp, strioc); 6018 if (strioc->ic_len == TRANSPARENT) { /* send arg in M_DATA block */ 6019 transparent = 1; 6020 strioc->ic_len = sizeof (intptr_t); 6021 } 6022 6023 if (strioc->ic_len < 0 || (strmsgsz > 0 && strioc->ic_len > strmsgsz)) 6024 return (EINVAL); 6025 6026 if ((bp = allocb_cred_wait(sizeof (union ioctypes), sigflag, &error, 6027 crp, curproc->p_pid)) == NULL) 6028 return (error); 6029 6030 bzero(bp->b_wptr, sizeof (union ioctypes)); 6031 6032 iocbp = (struct iocblk *)bp->b_wptr; 6033 iocbp->ioc_count = strioc->ic_len; 6034 iocbp->ioc_cmd = strioc->ic_cmd; 6035 iocbp->ioc_flag = (fflags & FMODELS); 6036 6037 crhold(crp); 6038 iocbp->ioc_cr = crp; 6039 DB_TYPE(bp) = M_IOCTL; 6040 bp->b_wptr += sizeof (struct iocblk); 6041 6042 if (flag & STR_NOERROR) 6043 errs = STPLEX; 6044 else 6045 errs = STRHUP|STRDERR|STWRERR|STPLEX; 6046 6047 /* 6048 * If there is data to copy into ioctl block, do so. 6049 */ 6050 if (iocbp->ioc_count > 0) { 6051 if (transparent) 6052 /* 6053 * Note: STR_NOERROR does not have an effect 6054 * in putiocd() 6055 */ 6056 id = K_TO_K | sigflag; 6057 else 6058 id = flag; 6059 if ((error = putiocd(bp, strioc->ic_dp, id, crp)) != 0) { 6060 freemsg(bp); 6061 crfree(crp); 6062 return (error); 6063 } 6064 6065 /* 6066 * We could have slept copying in user pages. 6067 * Recheck the stream head state (the other end 6068 * of a pipe could have gone away). 6069 */ 6070 if (stp->sd_flag & errs) { 6071 mutex_enter(&stp->sd_lock); 6072 error = strgeterr(stp, errs, 0); 6073 mutex_exit(&stp->sd_lock); 6074 if (error != 0) { 6075 freemsg(bp); 6076 crfree(crp); 6077 return (error); 6078 } 6079 } 6080 } 6081 if (transparent) 6082 iocbp->ioc_count = TRANSPARENT; 6083 6084 /* 6085 * Block for up to STRTIMOUT milliseconds if there is an outstanding 6086 * ioctl for this stream already running. All processes 6087 * sleeping here will be awakened as a result of an ACK 6088 * or NAK being received for the outstanding ioctl, or 6089 * as a result of the timer expiring on the outstanding 6090 * ioctl (a failure), or as a result of any waiting 6091 * process's timer expiring (also a failure). 6092 */ 6093 6094 error = 0; 6095 mutex_enter(&stp->sd_lock); 6096 while ((stp->sd_flag & IOCWAIT) || 6097 (!set_iocwaitne && (stp->sd_flag & IOCWAITNE))) { 6098 clock_t cv_rval; 6099 6100 TRACE_0(TR_FAC_STREAMS_FR, 6101 TR_STRDOIOCTL_WAIT, 6102 "strdoioctl sleeps - IOCWAIT"); 6103 cv_rval = str_cv_wait(&stp->sd_iocmonitor, &stp->sd_lock, 6104 STRTIMOUT, sigflag); 6105 if (cv_rval <= 0) { 6106 if (cv_rval == 0) { 6107 error = EINTR; 6108 } else { 6109 if (flag & STR_NOERROR) { 6110 /* 6111 * Terminating current ioctl in 6112 * progress -- assume it got lost and 6113 * wake up the other thread so that the 6114 * operation completes. 6115 */ 6116 if (!(stp->sd_flag & IOCWAITNE)) { 6117 set_iocwaitne = B_TRUE; 6118 stp->sd_flag |= IOCWAITNE; 6119 cv_broadcast(&stp->sd_monitor); 6120 } 6121 /* 6122 * Otherwise, there's a running 6123 * STR_NOERROR -- we have no choice 6124 * here but to wait forever (or until 6125 * interrupted). 6126 */ 6127 } else { 6128 /* 6129 * pending ioctl has caused 6130 * us to time out 6131 */ 6132 error = ETIME; 6133 } 6134 } 6135 } else if ((stp->sd_flag & errs)) { 6136 error = strgeterr(stp, errs, 0); 6137 } 6138 if (error) { 6139 mutex_exit(&stp->sd_lock); 6140 freemsg(bp); 6141 crfree(crp); 6142 return (error); 6143 } 6144 } 6145 6146 /* 6147 * Have control of ioctl mechanism. 6148 * Send down ioctl packet and wait for response. 6149 */ 6150 if (stp->sd_iocblk != (mblk_t *)-1) { 6151 freemsg(stp->sd_iocblk); 6152 } 6153 stp->sd_iocblk = NULL; 6154 6155 /* 6156 * If this is marked with 'noerror' (internal; mostly 6157 * I_{P,}{UN,}LINK), then make sure nobody else is able to get 6158 * in here by setting IOCWAITNE. 6159 */ 6160 waitflags = IOCWAIT; 6161 if (flag & STR_NOERROR) 6162 waitflags |= IOCWAITNE; 6163 6164 stp->sd_flag |= waitflags; 6165 6166 /* 6167 * Assign sequence number. 6168 */ 6169 iocbp->ioc_id = stp->sd_iocid = getiocseqno(); 6170 6171 mutex_exit(&stp->sd_lock); 6172 6173 TRACE_1(TR_FAC_STREAMS_FR, 6174 TR_STRDOIOCTL_PUT, "strdoioctl put: stp %p", stp); 6175 stream_willservice(stp); 6176 putnext(stp->sd_wrq, bp); 6177 stream_runservice(stp); 6178 6179 /* 6180 * Timed wait for acknowledgment. The wait time is limited by the 6181 * timeout value, which must be a positive integer (number of 6182 * milliseconds) to wait, or 0 (use default value of STRTIMOUT 6183 * milliseconds), or -1 (wait forever). This will be awakened 6184 * either by an ACK/NAK message arriving, the timer expiring, or 6185 * the timer expiring on another ioctl waiting for control of the 6186 * mechanism. 6187 */ 6188 waitioc: 6189 mutex_enter(&stp->sd_lock); 6190 6191 6192 /* 6193 * If the reply has already arrived, don't sleep. If awakened from 6194 * the sleep, fail only if the reply has not arrived by then. 6195 * Otherwise, process the reply. 6196 */ 6197 while (!stp->sd_iocblk) { 6198 clock_t cv_rval; 6199 6200 if (stp->sd_flag & errs) { 6201 error = strgeterr(stp, errs, 0); 6202 if (error != 0) { 6203 stp->sd_flag &= ~waitflags; 6204 cv_broadcast(&stp->sd_iocmonitor); 6205 mutex_exit(&stp->sd_lock); 6206 crfree(crp); 6207 return (error); 6208 } 6209 } 6210 6211 TRACE_0(TR_FAC_STREAMS_FR, 6212 TR_STRDOIOCTL_WAIT2, 6213 "strdoioctl sleeps awaiting reply"); 6214 ASSERT(error == 0); 6215 6216 cv_rval = str_cv_wait(&stp->sd_monitor, &stp->sd_lock, 6217 (strioc->ic_timout ? 6218 strioc->ic_timout * 1000 : STRTIMOUT), sigflag); 6219 6220 /* 6221 * There are four possible cases here: interrupt, timeout, 6222 * wakeup by IOCWAITNE (above), or wakeup by strrput_nondata (a 6223 * valid M_IOCTL reply). 6224 * 6225 * If we've been awakened by a STR_NOERROR ioctl on some other 6226 * thread, then sd_iocblk will still be NULL, and IOCWAITNE 6227 * will be set. Pretend as if we just timed out. Note that 6228 * this other thread waited at least STRTIMOUT before trying to 6229 * awaken our thread, so this is indistinguishable (even for 6230 * INFTIM) from the case where we failed with ETIME waiting on 6231 * IOCWAIT in the prior loop. 6232 */ 6233 if (cv_rval > 0 && !(flag & STR_NOERROR) && 6234 stp->sd_iocblk == NULL && (stp->sd_flag & IOCWAITNE)) { 6235 cv_rval = -1; 6236 } 6237 6238 /* 6239 * note: STR_NOERROR does not protect 6240 * us here.. use ic_timout < 0 6241 */ 6242 if (cv_rval <= 0) { 6243 if (cv_rval == 0) { 6244 error = EINTR; 6245 } else { 6246 error = ETIME; 6247 } 6248 /* 6249 * A message could have come in after we were scheduled 6250 * but before we were actually run. 6251 */ 6252 bp = stp->sd_iocblk; 6253 stp->sd_iocblk = NULL; 6254 if (bp != NULL) { 6255 if ((bp->b_datap->db_type == M_COPYIN) || 6256 (bp->b_datap->db_type == M_COPYOUT)) { 6257 mutex_exit(&stp->sd_lock); 6258 if (bp->b_cont) { 6259 freemsg(bp->b_cont); 6260 bp->b_cont = NULL; 6261 } 6262 bp->b_datap->db_type = M_IOCDATA; 6263 bp->b_wptr = bp->b_rptr + 6264 sizeof (struct copyresp); 6265 resp = (struct copyresp *)bp->b_rptr; 6266 resp->cp_rval = 6267 (caddr_t)1; /* failure */ 6268 stream_willservice(stp); 6269 putnext(stp->sd_wrq, bp); 6270 stream_runservice(stp); 6271 mutex_enter(&stp->sd_lock); 6272 } else { 6273 freemsg(bp); 6274 } 6275 } 6276 stp->sd_flag &= ~waitflags; 6277 cv_broadcast(&stp->sd_iocmonitor); 6278 mutex_exit(&stp->sd_lock); 6279 crfree(crp); 6280 return (error); 6281 } 6282 } 6283 bp = stp->sd_iocblk; 6284 /* 6285 * Note: it is strictly impossible to get here with sd_iocblk set to 6286 * -1. This is because the initial loop above doesn't allow any new 6287 * ioctls into the fray until all others have passed this point. 6288 */ 6289 ASSERT(bp != NULL && bp != (mblk_t *)-1); 6290 TRACE_1(TR_FAC_STREAMS_FR, 6291 TR_STRDOIOCTL_ACK, "strdoioctl got reply: bp %p", bp); 6292 if ((bp->b_datap->db_type == M_IOCACK) || 6293 (bp->b_datap->db_type == M_IOCNAK)) { 6294 /* for detection of duplicate ioctl replies */ 6295 stp->sd_iocblk = (mblk_t *)-1; 6296 stp->sd_flag &= ~waitflags; 6297 cv_broadcast(&stp->sd_iocmonitor); 6298 mutex_exit(&stp->sd_lock); 6299 } else { 6300 /* 6301 * flags not cleared here because we're still doing 6302 * copy in/out for ioctl. 6303 */ 6304 stp->sd_iocblk = NULL; 6305 mutex_exit(&stp->sd_lock); 6306 } 6307 6308 6309 /* 6310 * Have received acknowledgment. 6311 */ 6312 6313 switch (bp->b_datap->db_type) { 6314 case M_IOCACK: 6315 /* 6316 * Positive ack. 6317 */ 6318 iocbp = (struct iocblk *)bp->b_rptr; 6319 6320 /* 6321 * Set error if indicated. 6322 */ 6323 if (iocbp->ioc_error) { 6324 error = iocbp->ioc_error; 6325 break; 6326 } 6327 6328 /* 6329 * Set return value. 6330 */ 6331 *rvalp = iocbp->ioc_rval; 6332 6333 /* 6334 * Data may have been returned in ACK message (ioc_count > 0). 6335 * If so, copy it out to the user's buffer. 6336 */ 6337 if (iocbp->ioc_count && !transparent) { 6338 if (error = getiocd(bp, strioc->ic_dp, copyflag)) 6339 break; 6340 } 6341 if (!transparent) { 6342 if (len) /* an M_COPYOUT was used with I_STR */ 6343 strioc->ic_len = len; 6344 else 6345 strioc->ic_len = (int)iocbp->ioc_count; 6346 } 6347 break; 6348 6349 case M_IOCNAK: 6350 /* 6351 * Negative ack. 6352 * 6353 * The only thing to do is set error as specified 6354 * in neg ack packet. 6355 */ 6356 iocbp = (struct iocblk *)bp->b_rptr; 6357 6358 error = (iocbp->ioc_error ? iocbp->ioc_error : EINVAL); 6359 break; 6360 6361 case M_COPYIN: 6362 /* 6363 * Driver or module has requested user ioctl data. 6364 */ 6365 reqp = (struct copyreq *)bp->b_rptr; 6366 6367 /* 6368 * M_COPYIN should *never* have a message attached, though 6369 * it's harmless if it does -- thus, panic on a DEBUG 6370 * kernel and just free it on a non-DEBUG build. 6371 */ 6372 ASSERT(bp->b_cont == NULL); 6373 if (bp->b_cont != NULL) { 6374 freemsg(bp->b_cont); 6375 bp->b_cont = NULL; 6376 } 6377 6378 error = putiocd(bp, reqp->cq_addr, flag, crp); 6379 if (error && bp->b_cont) { 6380 freemsg(bp->b_cont); 6381 bp->b_cont = NULL; 6382 } 6383 6384 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp); 6385 bp->b_datap->db_type = M_IOCDATA; 6386 6387 mblk_setcred(bp, crp, curproc->p_pid); 6388 resp = (struct copyresp *)bp->b_rptr; 6389 resp->cp_rval = (caddr_t)(uintptr_t)error; 6390 resp->cp_flag = (fflags & FMODELS); 6391 6392 stream_willservice(stp); 6393 putnext(stp->sd_wrq, bp); 6394 stream_runservice(stp); 6395 6396 if (error) { 6397 mutex_enter(&stp->sd_lock); 6398 stp->sd_flag &= ~waitflags; 6399 cv_broadcast(&stp->sd_iocmonitor); 6400 mutex_exit(&stp->sd_lock); 6401 crfree(crp); 6402 return (error); 6403 } 6404 6405 goto waitioc; 6406 6407 case M_COPYOUT: 6408 /* 6409 * Driver or module has ioctl data for a user. 6410 */ 6411 reqp = (struct copyreq *)bp->b_rptr; 6412 ASSERT(bp->b_cont != NULL); 6413 6414 /* 6415 * Always (transparent or non-transparent ) 6416 * use the address specified in the request 6417 */ 6418 taddr = reqp->cq_addr; 6419 if (!transparent) 6420 len = (int)reqp->cq_size; 6421 6422 /* copyout data to the provided address */ 6423 error = getiocd(bp, taddr, copyflag); 6424 6425 freemsg(bp->b_cont); 6426 bp->b_cont = NULL; 6427 6428 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp); 6429 bp->b_datap->db_type = M_IOCDATA; 6430 6431 mblk_setcred(bp, crp, curproc->p_pid); 6432 resp = (struct copyresp *)bp->b_rptr; 6433 resp->cp_rval = (caddr_t)(uintptr_t)error; 6434 resp->cp_flag = (fflags & FMODELS); 6435 6436 stream_willservice(stp); 6437 putnext(stp->sd_wrq, bp); 6438 stream_runservice(stp); 6439 6440 if (error) { 6441 mutex_enter(&stp->sd_lock); 6442 stp->sd_flag &= ~waitflags; 6443 cv_broadcast(&stp->sd_iocmonitor); 6444 mutex_exit(&stp->sd_lock); 6445 crfree(crp); 6446 return (error); 6447 } 6448 goto waitioc; 6449 6450 default: 6451 ASSERT(0); 6452 mutex_enter(&stp->sd_lock); 6453 stp->sd_flag &= ~waitflags; 6454 cv_broadcast(&stp->sd_iocmonitor); 6455 mutex_exit(&stp->sd_lock); 6456 break; 6457 } 6458 6459 freemsg(bp); 6460 crfree(crp); 6461 return (error); 6462 } 6463 6464 /* 6465 * Send an M_CMD message downstream and wait for a reply. This is a ptools 6466 * special used to retrieve information from modules/drivers a stream without 6467 * being subjected to flow control or interfering with pending messages on the 6468 * stream (e.g. an ioctl in flight). 6469 */ 6470 int 6471 strdocmd(struct stdata *stp, struct strcmd *scp, cred_t *crp) 6472 { 6473 mblk_t *mp; 6474 struct cmdblk *cmdp; 6475 int error = 0; 6476 int errs = STRHUP|STRDERR|STWRERR|STPLEX; 6477 clock_t rval, timeout = STRTIMOUT; 6478 6479 if (scp->sc_len < 0 || scp->sc_len > sizeof (scp->sc_buf) || 6480 scp->sc_timeout < -1) 6481 return (EINVAL); 6482 6483 if (scp->sc_timeout > 0) 6484 timeout = scp->sc_timeout * MILLISEC; 6485 6486 if ((mp = allocb_cred(sizeof (struct cmdblk), crp, 6487 curproc->p_pid)) == NULL) 6488 return (ENOMEM); 6489 6490 crhold(crp); 6491 6492 cmdp = (struct cmdblk *)mp->b_wptr; 6493 cmdp->cb_cr = crp; 6494 cmdp->cb_cmd = scp->sc_cmd; 6495 cmdp->cb_len = scp->sc_len; 6496 cmdp->cb_error = 0; 6497 mp->b_wptr += sizeof (struct cmdblk); 6498 6499 DB_TYPE(mp) = M_CMD; 6500 DB_CPID(mp) = curproc->p_pid; 6501 6502 /* 6503 * Copy in the payload. 6504 */ 6505 if (cmdp->cb_len > 0) { 6506 mp->b_cont = allocb_cred(sizeof (scp->sc_buf), crp, 6507 curproc->p_pid); 6508 if (mp->b_cont == NULL) { 6509 error = ENOMEM; 6510 goto out; 6511 } 6512 6513 /* cb_len comes from sc_len, which has already been checked */ 6514 ASSERT(cmdp->cb_len <= sizeof (scp->sc_buf)); 6515 (void) bcopy(scp->sc_buf, mp->b_cont->b_wptr, cmdp->cb_len); 6516 mp->b_cont->b_wptr += cmdp->cb_len; 6517 DB_CPID(mp->b_cont) = curproc->p_pid; 6518 } 6519 6520 /* 6521 * Since this mechanism is strictly for ptools, and since only one 6522 * process can be grabbed at a time, we simply fail if there's 6523 * currently an operation pending. 6524 */ 6525 mutex_enter(&stp->sd_lock); 6526 if (stp->sd_flag & STRCMDWAIT) { 6527 mutex_exit(&stp->sd_lock); 6528 error = EBUSY; 6529 goto out; 6530 } 6531 stp->sd_flag |= STRCMDWAIT; 6532 ASSERT(stp->sd_cmdblk == NULL); 6533 mutex_exit(&stp->sd_lock); 6534 6535 putnext(stp->sd_wrq, mp); 6536 mp = NULL; 6537 6538 /* 6539 * Timed wait for acknowledgment. If the reply has already arrived, 6540 * don't sleep. If awakened from the sleep, fail only if the reply 6541 * has not arrived by then. Otherwise, process the reply. 6542 */ 6543 mutex_enter(&stp->sd_lock); 6544 while (stp->sd_cmdblk == NULL) { 6545 if (stp->sd_flag & errs) { 6546 if ((error = strgeterr(stp, errs, 0)) != 0) 6547 goto waitout; 6548 } 6549 6550 rval = str_cv_wait(&stp->sd_monitor, &stp->sd_lock, timeout, 0); 6551 if (stp->sd_cmdblk != NULL) 6552 break; 6553 6554 if (rval <= 0) { 6555 error = (rval == 0) ? EINTR : ETIME; 6556 goto waitout; 6557 } 6558 } 6559 6560 /* 6561 * We received a reply. 6562 */ 6563 mp = stp->sd_cmdblk; 6564 stp->sd_cmdblk = NULL; 6565 ASSERT(mp != NULL && DB_TYPE(mp) == M_CMD); 6566 ASSERT(stp->sd_flag & STRCMDWAIT); 6567 stp->sd_flag &= ~STRCMDWAIT; 6568 mutex_exit(&stp->sd_lock); 6569 6570 cmdp = (struct cmdblk *)mp->b_rptr; 6571 if ((error = cmdp->cb_error) != 0) 6572 goto out; 6573 6574 /* 6575 * Data may have been returned in the reply (cb_len > 0). 6576 * If so, copy it out to the user's buffer. 6577 */ 6578 if (cmdp->cb_len > 0) { 6579 if (mp->b_cont == NULL || MBLKL(mp->b_cont) < cmdp->cb_len) { 6580 error = EPROTO; 6581 goto out; 6582 } 6583 6584 cmdp->cb_len = MIN(cmdp->cb_len, sizeof (scp->sc_buf)); 6585 (void) bcopy(mp->b_cont->b_rptr, scp->sc_buf, cmdp->cb_len); 6586 } 6587 scp->sc_len = cmdp->cb_len; 6588 out: 6589 freemsg(mp); 6590 crfree(crp); 6591 return (error); 6592 waitout: 6593 ASSERT(stp->sd_cmdblk == NULL); 6594 stp->sd_flag &= ~STRCMDWAIT; 6595 mutex_exit(&stp->sd_lock); 6596 crfree(crp); 6597 return (error); 6598 } 6599 6600 /* 6601 * For the SunOS keyboard driver. 6602 * Return the next available "ioctl" sequence number. 6603 * Exported, so that streams modules can send "ioctl" messages 6604 * downstream from their open routine. 6605 */ 6606 int 6607 getiocseqno(void) 6608 { 6609 int i; 6610 6611 mutex_enter(&strresources); 6612 i = ++ioc_id; 6613 mutex_exit(&strresources); 6614 return (i); 6615 } 6616 6617 /* 6618 * Get the next message from the read queue. If the message is 6619 * priority, STRPRI will have been set by strrput(). This flag 6620 * should be reset only when the entire message at the front of the 6621 * queue as been consumed. 6622 * 6623 * NOTE: strgetmsg and kstrgetmsg have much of the logic in common. 6624 */ 6625 int 6626 strgetmsg( 6627 struct vnode *vp, 6628 struct strbuf *mctl, 6629 struct strbuf *mdata, 6630 unsigned char *prip, 6631 int *flagsp, 6632 int fmode, 6633 rval_t *rvp) 6634 { 6635 struct stdata *stp; 6636 mblk_t *bp, *nbp; 6637 mblk_t *savemp = NULL; 6638 mblk_t *savemptail = NULL; 6639 uint_t old_sd_flag; 6640 int flg = MSG_BAND; 6641 int more = 0; 6642 int error = 0; 6643 char first = 1; 6644 uint_t mark; /* Contains MSG*MARK and _LASTMARK */ 6645 #define _LASTMARK 0x8000 /* Distinct from MSG*MARK */ 6646 unsigned char pri = 0; 6647 queue_t *q; 6648 int pr = 0; /* Partial read successful */ 6649 struct uio uios; 6650 struct uio *uiop = &uios; 6651 struct iovec iovs; 6652 unsigned char type; 6653 6654 TRACE_1(TR_FAC_STREAMS_FR, TR_STRGETMSG_ENTER, 6655 "strgetmsg:%p", vp); 6656 6657 ASSERT(vp->v_stream); 6658 stp = vp->v_stream; 6659 rvp->r_val1 = 0; 6660 6661 mutex_enter(&stp->sd_lock); 6662 6663 if ((error = i_straccess(stp, JCREAD)) != 0) { 6664 mutex_exit(&stp->sd_lock); 6665 return (error); 6666 } 6667 6668 if (stp->sd_flag & (STRDERR|STPLEX)) { 6669 error = strgeterr(stp, STRDERR|STPLEX, 0); 6670 if (error != 0) { 6671 mutex_exit(&stp->sd_lock); 6672 return (error); 6673 } 6674 } 6675 mutex_exit(&stp->sd_lock); 6676 6677 switch (*flagsp) { 6678 case MSG_HIPRI: 6679 if (*prip != 0) 6680 return (EINVAL); 6681 break; 6682 6683 case MSG_ANY: 6684 case MSG_BAND: 6685 break; 6686 6687 default: 6688 return (EINVAL); 6689 } 6690 /* 6691 * Setup uio and iov for data part 6692 */ 6693 iovs.iov_base = mdata->buf; 6694 iovs.iov_len = mdata->maxlen; 6695 uios.uio_iov = &iovs; 6696 uios.uio_iovcnt = 1; 6697 uios.uio_loffset = 0; 6698 uios.uio_segflg = UIO_USERSPACE; 6699 uios.uio_fmode = 0; 6700 uios.uio_extflg = UIO_COPY_CACHED; 6701 uios.uio_resid = mdata->maxlen; 6702 uios.uio_offset = 0; 6703 6704 q = _RD(stp->sd_wrq); 6705 mutex_enter(&stp->sd_lock); 6706 old_sd_flag = stp->sd_flag; 6707 mark = 0; 6708 for (;;) { 6709 int done = 0; 6710 mblk_t *q_first = q->q_first; 6711 6712 /* 6713 * Get the next message of appropriate priority 6714 * from the stream head. If the caller is interested 6715 * in band or hipri messages, then they should already 6716 * be enqueued at the stream head. On the other hand 6717 * if the caller wants normal (band 0) messages, they 6718 * might be deferred in a synchronous stream and they 6719 * will need to be pulled up. 6720 * 6721 * After we have dequeued a message, we might find that 6722 * it was a deferred M_SIG that was enqueued at the 6723 * stream head. It must now be posted as part of the 6724 * read by calling strsignal_nolock(). 6725 * 6726 * Also note that strrput does not enqueue an M_PCSIG, 6727 * and there cannot be more than one hipri message, 6728 * so there was no need to have the M_PCSIG case. 6729 * 6730 * At some time it might be nice to try and wrap the 6731 * functionality of kstrgetmsg() and strgetmsg() into 6732 * a common routine so to reduce the amount of replicated 6733 * code (since they are extremely similar). 6734 */ 6735 if (!(*flagsp & (MSG_HIPRI|MSG_BAND))) { 6736 /* Asking for normal, band0 data */ 6737 bp = strget(stp, q, uiop, first, &error); 6738 ASSERT(MUTEX_HELD(&stp->sd_lock)); 6739 if (bp != NULL) { 6740 if (DB_TYPE(bp) == M_SIG) { 6741 strsignal_nolock(stp, *bp->b_rptr, 6742 bp->b_band); 6743 freemsg(bp); 6744 continue; 6745 } else { 6746 break; 6747 } 6748 } 6749 if (error != 0) 6750 goto getmout; 6751 6752 /* 6753 * We can't depend on the value of STRPRI here because 6754 * the stream head may be in transit. Therefore, we 6755 * must look at the type of the first message to 6756 * determine if a high priority messages is waiting 6757 */ 6758 } else if ((*flagsp & MSG_HIPRI) && q_first != NULL && 6759 DB_TYPE(q_first) >= QPCTL && 6760 (bp = getq_noenab(q, 0)) != NULL) { 6761 /* Asked for HIPRI and got one */ 6762 ASSERT(DB_TYPE(bp) >= QPCTL); 6763 break; 6764 } else if ((*flagsp & MSG_BAND) && q_first != NULL && 6765 ((q_first->b_band >= *prip) || DB_TYPE(q_first) >= QPCTL) && 6766 (bp = getq_noenab(q, 0)) != NULL) { 6767 /* 6768 * Asked for at least band "prip" and got either at 6769 * least that band or a hipri message. 6770 */ 6771 ASSERT(bp->b_band >= *prip || DB_TYPE(bp) >= QPCTL); 6772 if (DB_TYPE(bp) == M_SIG) { 6773 strsignal_nolock(stp, *bp->b_rptr, bp->b_band); 6774 freemsg(bp); 6775 continue; 6776 } else { 6777 break; 6778 } 6779 } 6780 6781 /* No data. Time to sleep? */ 6782 qbackenable(q, 0); 6783 6784 /* 6785 * If STRHUP or STREOF, return 0 length control and data. 6786 * If resid is 0, then a read(fd,buf,0) was done. Do not 6787 * sleep to satisfy this request because by default we have 6788 * zero bytes to return. 6789 */ 6790 if ((stp->sd_flag & (STRHUP|STREOF)) || (mctl->maxlen == 0 && 6791 mdata->maxlen == 0)) { 6792 mctl->len = mdata->len = 0; 6793 *flagsp = 0; 6794 mutex_exit(&stp->sd_lock); 6795 return (0); 6796 } 6797 TRACE_2(TR_FAC_STREAMS_FR, TR_STRGETMSG_WAIT, 6798 "strgetmsg calls strwaitq:%p, %p", 6799 vp, uiop); 6800 if (((error = strwaitq(stp, GETWAIT, (ssize_t)0, fmode, -1, 6801 &done)) != 0) || done) { 6802 TRACE_2(TR_FAC_STREAMS_FR, TR_STRGETMSG_DONE, 6803 "strgetmsg error or done:%p, %p", 6804 vp, uiop); 6805 mutex_exit(&stp->sd_lock); 6806 return (error); 6807 } 6808 TRACE_2(TR_FAC_STREAMS_FR, TR_STRGETMSG_AWAKE, 6809 "strgetmsg awakes:%p, %p", vp, uiop); 6810 if ((error = i_straccess(stp, JCREAD)) != 0) { 6811 mutex_exit(&stp->sd_lock); 6812 return (error); 6813 } 6814 first = 0; 6815 } 6816 ASSERT(bp != NULL); 6817 /* 6818 * Extract any mark information. If the message is not completely 6819 * consumed this information will be put in the mblk 6820 * that is putback. 6821 * If MSGMARKNEXT is set and the message is completely consumed 6822 * the STRATMARK flag will be set below. Likewise, if 6823 * MSGNOTMARKNEXT is set and the message is 6824 * completely consumed STRNOTATMARK will be set. 6825 */ 6826 mark = bp->b_flag & (MSGMARK | MSGMARKNEXT | MSGNOTMARKNEXT); 6827 ASSERT((mark & (MSGMARKNEXT|MSGNOTMARKNEXT)) != 6828 (MSGMARKNEXT|MSGNOTMARKNEXT)); 6829 if (mark != 0 && bp == stp->sd_mark) { 6830 mark |= _LASTMARK; 6831 stp->sd_mark = NULL; 6832 } 6833 /* 6834 * keep track of the original message type and priority 6835 */ 6836 pri = bp->b_band; 6837 type = bp->b_datap->db_type; 6838 if (type == M_PASSFP) { 6839 if ((mark & _LASTMARK) && (stp->sd_mark == NULL)) 6840 stp->sd_mark = bp; 6841 bp->b_flag |= mark & ~_LASTMARK; 6842 putback(stp, q, bp, pri); 6843 qbackenable(q, pri); 6844 mutex_exit(&stp->sd_lock); 6845 return (EBADMSG); 6846 } 6847 ASSERT(type != M_SIG); 6848 6849 /* 6850 * Set this flag so strrput will not generate signals. Need to 6851 * make sure this flag is cleared before leaving this routine 6852 * else signals will stop being sent. 6853 */ 6854 stp->sd_flag |= STRGETINPROG; 6855 mutex_exit(&stp->sd_lock); 6856 6857 if (STREAM_NEEDSERVICE(stp)) 6858 stream_runservice(stp); 6859 6860 /* 6861 * Set HIPRI flag if message is priority. 6862 */ 6863 if (type >= QPCTL) 6864 flg = MSG_HIPRI; 6865 else 6866 flg = MSG_BAND; 6867 6868 /* 6869 * First process PROTO or PCPROTO blocks, if any. 6870 */ 6871 if (mctl->maxlen >= 0 && type != M_DATA) { 6872 size_t n, bcnt; 6873 char *ubuf; 6874 6875 bcnt = mctl->maxlen; 6876 ubuf = mctl->buf; 6877 while (bp != NULL && bp->b_datap->db_type != M_DATA) { 6878 if ((n = MIN(bcnt, bp->b_wptr - bp->b_rptr)) != 0 && 6879 copyout(bp->b_rptr, ubuf, n)) { 6880 error = EFAULT; 6881 mutex_enter(&stp->sd_lock); 6882 /* 6883 * clear stream head pri flag based on 6884 * first message type 6885 */ 6886 if (type >= QPCTL) { 6887 ASSERT(type == M_PCPROTO); 6888 stp->sd_flag &= ~STRPRI; 6889 } 6890 more = 0; 6891 freemsg(bp); 6892 goto getmout; 6893 } 6894 ubuf += n; 6895 bp->b_rptr += n; 6896 if (bp->b_rptr >= bp->b_wptr) { 6897 nbp = bp; 6898 bp = bp->b_cont; 6899 freeb(nbp); 6900 } 6901 ASSERT(n <= bcnt); 6902 bcnt -= n; 6903 if (bcnt == 0) 6904 break; 6905 } 6906 mctl->len = mctl->maxlen - bcnt; 6907 } else 6908 mctl->len = -1; 6909 6910 if (bp && bp->b_datap->db_type != M_DATA) { 6911 /* 6912 * More PROTO blocks in msg. 6913 */ 6914 more |= MORECTL; 6915 savemp = bp; 6916 while (bp && bp->b_datap->db_type != M_DATA) { 6917 savemptail = bp; 6918 bp = bp->b_cont; 6919 } 6920 savemptail->b_cont = NULL; 6921 } 6922 6923 /* 6924 * Now process DATA blocks, if any. 6925 */ 6926 if (mdata->maxlen >= 0 && bp) { 6927 /* 6928 * struiocopyout will consume a potential zero-length 6929 * M_DATA even if uio_resid is zero. 6930 */ 6931 size_t oldresid = uiop->uio_resid; 6932 6933 bp = struiocopyout(bp, uiop, &error); 6934 if (error != 0) { 6935 mutex_enter(&stp->sd_lock); 6936 /* 6937 * clear stream head hi pri flag based on 6938 * first message 6939 */ 6940 if (type >= QPCTL) { 6941 ASSERT(type == M_PCPROTO); 6942 stp->sd_flag &= ~STRPRI; 6943 } 6944 more = 0; 6945 freemsg(savemp); 6946 goto getmout; 6947 } 6948 /* 6949 * (pr == 1) indicates a partial read. 6950 */ 6951 if (oldresid > uiop->uio_resid) 6952 pr = 1; 6953 mdata->len = mdata->maxlen - uiop->uio_resid; 6954 } else 6955 mdata->len = -1; 6956 6957 if (bp) { /* more data blocks in msg */ 6958 more |= MOREDATA; 6959 if (savemp) 6960 savemptail->b_cont = bp; 6961 else 6962 savemp = bp; 6963 } 6964 6965 mutex_enter(&stp->sd_lock); 6966 if (savemp) { 6967 if (pr && (savemp->b_datap->db_type == M_DATA) && 6968 msgnodata(savemp)) { 6969 /* 6970 * Avoid queuing a zero-length tail part of 6971 * a message. pr=1 indicates that we read some of 6972 * the message. 6973 */ 6974 freemsg(savemp); 6975 more &= ~MOREDATA; 6976 /* 6977 * clear stream head hi pri flag based on 6978 * first message 6979 */ 6980 if (type >= QPCTL) { 6981 ASSERT(type == M_PCPROTO); 6982 stp->sd_flag &= ~STRPRI; 6983 } 6984 } else { 6985 savemp->b_band = pri; 6986 /* 6987 * If the first message was HIPRI and the one we're 6988 * putting back isn't, then clear STRPRI, otherwise 6989 * set STRPRI again. Note that we must set STRPRI 6990 * again since the flush logic in strrput_nondata() 6991 * may have cleared it while we had sd_lock dropped. 6992 */ 6993 if (type >= QPCTL) { 6994 ASSERT(type == M_PCPROTO); 6995 if (queclass(savemp) < QPCTL) 6996 stp->sd_flag &= ~STRPRI; 6997 else 6998 stp->sd_flag |= STRPRI; 6999 } else if (queclass(savemp) >= QPCTL) { 7000 /* 7001 * The first message was not a HIPRI message, 7002 * but the one we are about to putback is. 7003 * For simplicitly, we do not allow for HIPRI 7004 * messages to be embedded in the message 7005 * body, so just force it to same type as 7006 * first message. 7007 */ 7008 ASSERT(type == M_DATA || type == M_PROTO); 7009 ASSERT(savemp->b_datap->db_type == M_PCPROTO); 7010 savemp->b_datap->db_type = type; 7011 } 7012 if (mark != 0) { 7013 savemp->b_flag |= mark & ~_LASTMARK; 7014 if ((mark & _LASTMARK) && 7015 (stp->sd_mark == NULL)) { 7016 /* 7017 * If another marked message arrived 7018 * while sd_lock was not held sd_mark 7019 * would be non-NULL. 7020 */ 7021 stp->sd_mark = savemp; 7022 } 7023 } 7024 putback(stp, q, savemp, pri); 7025 } 7026 } else { 7027 /* 7028 * The complete message was consumed. 7029 * 7030 * If another M_PCPROTO arrived while sd_lock was not held 7031 * it would have been discarded since STRPRI was still set. 7032 * 7033 * Move the MSG*MARKNEXT information 7034 * to the stream head just in case 7035 * the read queue becomes empty. 7036 * clear stream head hi pri flag based on 7037 * first message 7038 * 7039 * If the stream head was at the mark 7040 * (STRATMARK) before we dropped sd_lock above 7041 * and some data was consumed then we have 7042 * moved past the mark thus STRATMARK is 7043 * cleared. However, if a message arrived in 7044 * strrput during the copyout above causing 7045 * STRATMARK to be set we can not clear that 7046 * flag. 7047 */ 7048 if (type >= QPCTL) { 7049 ASSERT(type == M_PCPROTO); 7050 stp->sd_flag &= ~STRPRI; 7051 } 7052 if (mark & (MSGMARKNEXT|MSGNOTMARKNEXT|MSGMARK)) { 7053 if (mark & MSGMARKNEXT) { 7054 stp->sd_flag &= ~STRNOTATMARK; 7055 stp->sd_flag |= STRATMARK; 7056 } else if (mark & MSGNOTMARKNEXT) { 7057 stp->sd_flag &= ~STRATMARK; 7058 stp->sd_flag |= STRNOTATMARK; 7059 } else { 7060 stp->sd_flag &= ~(STRATMARK|STRNOTATMARK); 7061 } 7062 } else if (pr && (old_sd_flag & STRATMARK)) { 7063 stp->sd_flag &= ~STRATMARK; 7064 } 7065 } 7066 7067 *flagsp = flg; 7068 *prip = pri; 7069 7070 /* 7071 * Getmsg cleanup processing - if the state of the queue has changed 7072 * some signals may need to be sent and/or poll awakened. 7073 */ 7074 getmout: 7075 qbackenable(q, pri); 7076 7077 /* 7078 * We dropped the stream head lock above. Send all M_SIG messages 7079 * before processing stream head for SIGPOLL messages. 7080 */ 7081 ASSERT(MUTEX_HELD(&stp->sd_lock)); 7082 while ((bp = q->q_first) != NULL && 7083 (bp->b_datap->db_type == M_SIG)) { 7084 /* 7085 * sd_lock is held so the content of the read queue can not 7086 * change. 7087 */ 7088 bp = getq(q); 7089 ASSERT(bp != NULL && bp->b_datap->db_type == M_SIG); 7090 7091 strsignal_nolock(stp, *bp->b_rptr, bp->b_band); 7092 mutex_exit(&stp->sd_lock); 7093 freemsg(bp); 7094 if (STREAM_NEEDSERVICE(stp)) 7095 stream_runservice(stp); 7096 mutex_enter(&stp->sd_lock); 7097 } 7098 7099 /* 7100 * stream head cannot change while we make the determination 7101 * whether or not to send a signal. Drop the flag to allow strrput 7102 * to send firstmsgsigs again. 7103 */ 7104 stp->sd_flag &= ~STRGETINPROG; 7105 7106 /* 7107 * If the type of message at the front of the queue changed 7108 * due to the receive the appropriate signals and pollwakeup events 7109 * are generated. The type of changes are: 7110 * Processed a hipri message, q_first is not hipri. 7111 * Processed a band X message, and q_first is band Y. 7112 * The generated signals and pollwakeups are identical to what 7113 * strrput() generates should the message that is now on q_first 7114 * arrive to an empty read queue. 7115 * 7116 * Note: only strrput will send a signal for a hipri message. 7117 */ 7118 if ((bp = q->q_first) != NULL && !(stp->sd_flag & STRPRI)) { 7119 strsigset_t signals = 0; 7120 strpollset_t pollwakeups = 0; 7121 7122 if (flg & MSG_HIPRI) { 7123 /* 7124 * Removed a hipri message. Regular data at 7125 * the front of the queue. 7126 */ 7127 if (bp->b_band == 0) { 7128 signals = S_INPUT | S_RDNORM; 7129 pollwakeups = POLLIN | POLLRDNORM; 7130 } else { 7131 signals = S_INPUT | S_RDBAND; 7132 pollwakeups = POLLIN | POLLRDBAND; 7133 } 7134 } else if (pri != bp->b_band) { 7135 /* 7136 * The band is different for the new q_first. 7137 */ 7138 if (bp->b_band == 0) { 7139 signals = S_RDNORM; 7140 pollwakeups = POLLIN | POLLRDNORM; 7141 } else { 7142 signals = S_RDBAND; 7143 pollwakeups = POLLIN | POLLRDBAND; 7144 } 7145 } 7146 7147 if (pollwakeups != 0) { 7148 if (pollwakeups == (POLLIN | POLLRDNORM)) { 7149 if (!(stp->sd_rput_opt & SR_POLLIN)) 7150 goto no_pollwake; 7151 stp->sd_rput_opt &= ~SR_POLLIN; 7152 } 7153 mutex_exit(&stp->sd_lock); 7154 pollwakeup(&stp->sd_pollist, pollwakeups); 7155 mutex_enter(&stp->sd_lock); 7156 } 7157 no_pollwake: 7158 7159 if (stp->sd_sigflags & signals) 7160 strsendsig(stp->sd_siglist, signals, bp->b_band, 0); 7161 } 7162 mutex_exit(&stp->sd_lock); 7163 7164 rvp->r_val1 = more; 7165 return (error); 7166 #undef _LASTMARK 7167 } 7168 7169 /* 7170 * Get the next message from the read queue. If the message is 7171 * priority, STRPRI will have been set by strrput(). This flag 7172 * should be reset only when the entire message at the front of the 7173 * queue as been consumed. 7174 * 7175 * If uiop is NULL all data is returned in mctlp. 7176 * Note that a NULL uiop implies that FNDELAY and FNONBLOCK are assumed 7177 * not enabled. 7178 * The timeout parameter is in milliseconds; -1 for infinity. 7179 * This routine handles the consolidation private flags: 7180 * MSG_IGNERROR Ignore any stream head error except STPLEX. 7181 * MSG_DELAYERROR Defer the error check until the queue is empty. 7182 * MSG_HOLDSIG Hold signals while waiting for data. 7183 * MSG_IPEEK Only peek at messages. 7184 * MSG_DISCARDTAIL Discard the tail M_DATA part of the message 7185 * that doesn't fit. 7186 * MSG_NOMARK If the message is marked leave it on the queue. 7187 * 7188 * NOTE: strgetmsg and kstrgetmsg have much of the logic in common. 7189 */ 7190 int 7191 kstrgetmsg( 7192 struct vnode *vp, 7193 mblk_t **mctlp, 7194 struct uio *uiop, 7195 unsigned char *prip, 7196 int *flagsp, 7197 clock_t timout, 7198 rval_t *rvp) 7199 { 7200 struct stdata *stp; 7201 mblk_t *bp, *nbp; 7202 mblk_t *savemp = NULL; 7203 mblk_t *savemptail = NULL; 7204 int flags; 7205 uint_t old_sd_flag; 7206 int flg = MSG_BAND; 7207 int more = 0; 7208 int error = 0; 7209 char first = 1; 7210 uint_t mark; /* Contains MSG*MARK and _LASTMARK */ 7211 #define _LASTMARK 0x8000 /* Distinct from MSG*MARK */ 7212 unsigned char pri = 0; 7213 queue_t *q; 7214 int pr = 0; /* Partial read successful */ 7215 unsigned char type; 7216 7217 TRACE_1(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_ENTER, 7218 "kstrgetmsg:%p", vp); 7219 7220 ASSERT(vp->v_stream); 7221 stp = vp->v_stream; 7222 rvp->r_val1 = 0; 7223 7224 mutex_enter(&stp->sd_lock); 7225 7226 if ((error = i_straccess(stp, JCREAD)) != 0) { 7227 mutex_exit(&stp->sd_lock); 7228 return (error); 7229 } 7230 7231 flags = *flagsp; 7232 if (stp->sd_flag & (STRDERR|STPLEX)) { 7233 if ((stp->sd_flag & STPLEX) || 7234 (flags & (MSG_IGNERROR|MSG_DELAYERROR)) == 0) { 7235 error = strgeterr(stp, STRDERR|STPLEX, 7236 (flags & MSG_IPEEK)); 7237 if (error != 0) { 7238 mutex_exit(&stp->sd_lock); 7239 return (error); 7240 } 7241 } 7242 } 7243 mutex_exit(&stp->sd_lock); 7244 7245 switch (flags & (MSG_HIPRI|MSG_ANY|MSG_BAND)) { 7246 case MSG_HIPRI: 7247 if (*prip != 0) 7248 return (EINVAL); 7249 break; 7250 7251 case MSG_ANY: 7252 case MSG_BAND: 7253 break; 7254 7255 default: 7256 return (EINVAL); 7257 } 7258 7259 retry: 7260 q = _RD(stp->sd_wrq); 7261 mutex_enter(&stp->sd_lock); 7262 old_sd_flag = stp->sd_flag; 7263 mark = 0; 7264 for (;;) { 7265 int done = 0; 7266 int waitflag; 7267 int fmode; 7268 mblk_t *q_first = q->q_first; 7269 7270 /* 7271 * This section of the code operates just like the code 7272 * in strgetmsg(). There is a comment there about what 7273 * is going on here. 7274 */ 7275 if (!(flags & (MSG_HIPRI|MSG_BAND))) { 7276 /* Asking for normal, band0 data */ 7277 bp = strget(stp, q, uiop, first, &error); 7278 ASSERT(MUTEX_HELD(&stp->sd_lock)); 7279 if (bp != NULL) { 7280 if (DB_TYPE(bp) == M_SIG) { 7281 strsignal_nolock(stp, *bp->b_rptr, 7282 bp->b_band); 7283 freemsg(bp); 7284 continue; 7285 } else { 7286 break; 7287 } 7288 } 7289 if (error != 0) { 7290 goto getmout; 7291 } 7292 /* 7293 * We can't depend on the value of STRPRI here because 7294 * the stream head may be in transit. Therefore, we 7295 * must look at the type of the first message to 7296 * determine if a high priority messages is waiting 7297 */ 7298 } else if ((flags & MSG_HIPRI) && q_first != NULL && 7299 DB_TYPE(q_first) >= QPCTL && 7300 (bp = getq_noenab(q, 0)) != NULL) { 7301 ASSERT(DB_TYPE(bp) >= QPCTL); 7302 break; 7303 } else if ((flags & MSG_BAND) && q_first != NULL && 7304 ((q_first->b_band >= *prip) || DB_TYPE(q_first) >= QPCTL) && 7305 (bp = getq_noenab(q, 0)) != NULL) { 7306 /* 7307 * Asked for at least band "prip" and got either at 7308 * least that band or a hipri message. 7309 */ 7310 ASSERT(bp->b_band >= *prip || DB_TYPE(bp) >= QPCTL); 7311 if (DB_TYPE(bp) == M_SIG) { 7312 strsignal_nolock(stp, *bp->b_rptr, bp->b_band); 7313 freemsg(bp); 7314 continue; 7315 } else { 7316 break; 7317 } 7318 } 7319 7320 /* No data. Time to sleep? */ 7321 qbackenable(q, 0); 7322 7323 /* 7324 * Delayed error notification? 7325 */ 7326 if ((stp->sd_flag & (STRDERR|STPLEX)) && 7327 (flags & (MSG_IGNERROR|MSG_DELAYERROR)) == MSG_DELAYERROR) { 7328 error = strgeterr(stp, STRDERR|STPLEX, 7329 (flags & MSG_IPEEK)); 7330 if (error != 0) { 7331 mutex_exit(&stp->sd_lock); 7332 return (error); 7333 } 7334 } 7335 7336 /* 7337 * If STRHUP or STREOF, return 0 length control and data. 7338 * If a read(fd,buf,0) has been done, do not sleep, just 7339 * return. 7340 * 7341 * If mctlp == NULL and uiop == NULL, then the code will 7342 * do the strwaitq. This is an understood way of saying 7343 * sleep "polling" until a message is received. 7344 */ 7345 if ((stp->sd_flag & (STRHUP|STREOF)) || 7346 (uiop != NULL && uiop->uio_resid == 0)) { 7347 if (mctlp != NULL) 7348 *mctlp = NULL; 7349 *flagsp = 0; 7350 mutex_exit(&stp->sd_lock); 7351 return (0); 7352 } 7353 7354 waitflag = GETWAIT; 7355 if (flags & 7356 (MSG_HOLDSIG|MSG_IGNERROR|MSG_IPEEK|MSG_DELAYERROR)) { 7357 if (flags & MSG_HOLDSIG) 7358 waitflag |= STR_NOSIG; 7359 if (flags & MSG_IGNERROR) 7360 waitflag |= STR_NOERROR; 7361 if (flags & MSG_IPEEK) 7362 waitflag |= STR_PEEK; 7363 if (flags & MSG_DELAYERROR) 7364 waitflag |= STR_DELAYERR; 7365 } 7366 if (uiop != NULL) 7367 fmode = uiop->uio_fmode; 7368 else 7369 fmode = 0; 7370 7371 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_WAIT, 7372 "kstrgetmsg calls strwaitq:%p, %p", 7373 vp, uiop); 7374 if (((error = strwaitq(stp, waitflag, (ssize_t)0, 7375 fmode, timout, &done))) != 0 || done) { 7376 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_DONE, 7377 "kstrgetmsg error or done:%p, %p", 7378 vp, uiop); 7379 mutex_exit(&stp->sd_lock); 7380 return (error); 7381 } 7382 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_AWAKE, 7383 "kstrgetmsg awakes:%p, %p", vp, uiop); 7384 if ((error = i_straccess(stp, JCREAD)) != 0) { 7385 mutex_exit(&stp->sd_lock); 7386 return (error); 7387 } 7388 first = 0; 7389 } 7390 ASSERT(bp != NULL); 7391 /* 7392 * Extract any mark information. If the message is not completely 7393 * consumed this information will be put in the mblk 7394 * that is putback. 7395 * If MSGMARKNEXT is set and the message is completely consumed 7396 * the STRATMARK flag will be set below. Likewise, if 7397 * MSGNOTMARKNEXT is set and the message is 7398 * completely consumed STRNOTATMARK will be set. 7399 */ 7400 mark = bp->b_flag & (MSGMARK | MSGMARKNEXT | MSGNOTMARKNEXT); 7401 ASSERT((mark & (MSGMARKNEXT|MSGNOTMARKNEXT)) != 7402 (MSGMARKNEXT|MSGNOTMARKNEXT)); 7403 pri = bp->b_band; 7404 if (mark != 0) { 7405 /* 7406 * If the caller doesn't want the mark return. 7407 * Used to implement MSG_WAITALL in sockets. 7408 */ 7409 if (flags & MSG_NOMARK) { 7410 putback(stp, q, bp, pri); 7411 qbackenable(q, pri); 7412 mutex_exit(&stp->sd_lock); 7413 return (EWOULDBLOCK); 7414 } 7415 if (bp == stp->sd_mark) { 7416 mark |= _LASTMARK; 7417 stp->sd_mark = NULL; 7418 } 7419 } 7420 7421 /* 7422 * keep track of the first message type 7423 */ 7424 type = bp->b_datap->db_type; 7425 7426 if (bp->b_datap->db_type == M_PASSFP) { 7427 if ((mark & _LASTMARK) && (stp->sd_mark == NULL)) 7428 stp->sd_mark = bp; 7429 bp->b_flag |= mark & ~_LASTMARK; 7430 putback(stp, q, bp, pri); 7431 qbackenable(q, pri); 7432 mutex_exit(&stp->sd_lock); 7433 return (EBADMSG); 7434 } 7435 ASSERT(type != M_SIG); 7436 7437 if (flags & MSG_IPEEK) { 7438 /* 7439 * Clear any struioflag - we do the uiomove over again 7440 * when peeking since it simplifies the code. 7441 * 7442 * Dup the message and put the original back on the queue. 7443 * If dupmsg() fails, try again with copymsg() to see if 7444 * there is indeed a shortage of memory. dupmsg() may fail 7445 * if db_ref in any of the messages reaches its limit. 7446 */ 7447 7448 if ((nbp = dupmsg(bp)) == NULL && (nbp = copymsg(bp)) == NULL) { 7449 /* 7450 * Restore the state of the stream head since we 7451 * need to drop sd_lock (strwaitbuf is sleeping). 7452 */ 7453 size_t size = msgdsize(bp); 7454 7455 if ((mark & _LASTMARK) && (stp->sd_mark == NULL)) 7456 stp->sd_mark = bp; 7457 bp->b_flag |= mark & ~_LASTMARK; 7458 putback(stp, q, bp, pri); 7459 mutex_exit(&stp->sd_lock); 7460 error = strwaitbuf(size, BPRI_HI); 7461 if (error) { 7462 /* 7463 * There is no net change to the queue thus 7464 * no need to qbackenable. 7465 */ 7466 return (error); 7467 } 7468 goto retry; 7469 } 7470 7471 if ((mark & _LASTMARK) && (stp->sd_mark == NULL)) 7472 stp->sd_mark = bp; 7473 bp->b_flag |= mark & ~_LASTMARK; 7474 putback(stp, q, bp, pri); 7475 bp = nbp; 7476 } 7477 7478 /* 7479 * Set this flag so strrput will not generate signals. Need to 7480 * make sure this flag is cleared before leaving this routine 7481 * else signals will stop being sent. 7482 */ 7483 stp->sd_flag |= STRGETINPROG; 7484 mutex_exit(&stp->sd_lock); 7485 7486 if ((stp->sd_rputdatafunc != NULL) && (DB_TYPE(bp) == M_DATA)) { 7487 mblk_t *tmp, *prevmp; 7488 7489 /* 7490 * Put first non-data mblk back to stream head and 7491 * cut the mblk chain so sd_rputdatafunc only sees 7492 * M_DATA mblks. We can skip the first mblk since it 7493 * is M_DATA according to the condition above. 7494 */ 7495 for (prevmp = bp, tmp = bp->b_cont; tmp != NULL; 7496 prevmp = tmp, tmp = tmp->b_cont) { 7497 if (DB_TYPE(tmp) != M_DATA) { 7498 prevmp->b_cont = NULL; 7499 mutex_enter(&stp->sd_lock); 7500 putback(stp, q, tmp, tmp->b_band); 7501 mutex_exit(&stp->sd_lock); 7502 break; 7503 } 7504 } 7505 7506 bp = (stp->sd_rputdatafunc)(stp->sd_vnode, bp, 7507 NULL, NULL, NULL, NULL); 7508 7509 if (bp == NULL) 7510 goto retry; 7511 } 7512 7513 if (STREAM_NEEDSERVICE(stp)) 7514 stream_runservice(stp); 7515 7516 /* 7517 * Set HIPRI flag if message is priority. 7518 */ 7519 if (type >= QPCTL) 7520 flg = MSG_HIPRI; 7521 else 7522 flg = MSG_BAND; 7523 7524 /* 7525 * First process PROTO or PCPROTO blocks, if any. 7526 */ 7527 if (mctlp != NULL && type != M_DATA) { 7528 mblk_t *nbp; 7529 7530 *mctlp = bp; 7531 while (bp->b_cont && bp->b_cont->b_datap->db_type != M_DATA) 7532 bp = bp->b_cont; 7533 nbp = bp->b_cont; 7534 bp->b_cont = NULL; 7535 bp = nbp; 7536 } 7537 7538 if (bp && bp->b_datap->db_type != M_DATA) { 7539 /* 7540 * More PROTO blocks in msg. Will only happen if mctlp is NULL. 7541 */ 7542 more |= MORECTL; 7543 savemp = bp; 7544 while (bp && bp->b_datap->db_type != M_DATA) { 7545 savemptail = bp; 7546 bp = bp->b_cont; 7547 } 7548 savemptail->b_cont = NULL; 7549 } 7550 7551 /* 7552 * Now process DATA blocks, if any. 7553 */ 7554 if (uiop == NULL) { 7555 /* Append data to tail of mctlp */ 7556 7557 if (mctlp != NULL) { 7558 mblk_t **mpp = mctlp; 7559 7560 while (*mpp != NULL) 7561 mpp = &((*mpp)->b_cont); 7562 *mpp = bp; 7563 bp = NULL; 7564 } 7565 } else if (uiop->uio_resid >= 0 && bp) { 7566 size_t oldresid = uiop->uio_resid; 7567 7568 /* 7569 * If a streams message is likely to consist 7570 * of many small mblks, it is pulled up into 7571 * one continuous chunk of memory. 7572 * The size of the first mblk may be bogus because 7573 * successive read() calls on the socket reduce 7574 * the size of this mblk until it is exhausted 7575 * and then the code walks on to the next. Thus 7576 * the size of the mblk may not be the original size 7577 * that was passed up, it's simply a remainder 7578 * and hence can be very small without any 7579 * implication that the packet is badly fragmented. 7580 * So the size of the possible second mblk is 7581 * used to spot a badly fragmented packet. 7582 * see longer comment at top of page 7583 * by mblk_pull_len declaration. 7584 */ 7585 7586 if (bp->b_cont != NULL && MBLKL(bp->b_cont) < mblk_pull_len) { 7587 (void) pullupmsg(bp, -1); 7588 } 7589 7590 bp = struiocopyout(bp, uiop, &error); 7591 if (error != 0) { 7592 if (mctlp != NULL) { 7593 freemsg(*mctlp); 7594 *mctlp = NULL; 7595 } else 7596 freemsg(savemp); 7597 mutex_enter(&stp->sd_lock); 7598 /* 7599 * clear stream head hi pri flag based on 7600 * first message 7601 */ 7602 if (!(flags & MSG_IPEEK) && (type >= QPCTL)) { 7603 ASSERT(type == M_PCPROTO); 7604 stp->sd_flag &= ~STRPRI; 7605 } 7606 more = 0; 7607 goto getmout; 7608 } 7609 /* 7610 * (pr == 1) indicates a partial read. 7611 */ 7612 if (oldresid > uiop->uio_resid) 7613 pr = 1; 7614 } 7615 7616 if (bp) { /* more data blocks in msg */ 7617 more |= MOREDATA; 7618 if (savemp) 7619 savemptail->b_cont = bp; 7620 else 7621 savemp = bp; 7622 } 7623 7624 mutex_enter(&stp->sd_lock); 7625 if (savemp) { 7626 if (flags & (MSG_IPEEK|MSG_DISCARDTAIL)) { 7627 /* 7628 * When MSG_DISCARDTAIL is set or 7629 * when peeking discard any tail. When peeking this 7630 * is the tail of the dup that was copied out - the 7631 * message has already been putback on the queue. 7632 * Return MOREDATA to the caller even though the data 7633 * is discarded. This is used by sockets (to 7634 * set MSG_TRUNC). 7635 */ 7636 freemsg(savemp); 7637 if (!(flags & MSG_IPEEK) && (type >= QPCTL)) { 7638 ASSERT(type == M_PCPROTO); 7639 stp->sd_flag &= ~STRPRI; 7640 } 7641 } else if (pr && (savemp->b_datap->db_type == M_DATA) && 7642 msgnodata(savemp)) { 7643 /* 7644 * Avoid queuing a zero-length tail part of 7645 * a message. pr=1 indicates that we read some of 7646 * the message. 7647 */ 7648 freemsg(savemp); 7649 more &= ~MOREDATA; 7650 if (type >= QPCTL) { 7651 ASSERT(type == M_PCPROTO); 7652 stp->sd_flag &= ~STRPRI; 7653 } 7654 } else { 7655 savemp->b_band = pri; 7656 /* 7657 * If the first message was HIPRI and the one we're 7658 * putting back isn't, then clear STRPRI, otherwise 7659 * set STRPRI again. Note that we must set STRPRI 7660 * again since the flush logic in strrput_nondata() 7661 * may have cleared it while we had sd_lock dropped. 7662 */ 7663 7664 if (type >= QPCTL) { 7665 ASSERT(type == M_PCPROTO); 7666 if (queclass(savemp) < QPCTL) 7667 stp->sd_flag &= ~STRPRI; 7668 else 7669 stp->sd_flag |= STRPRI; 7670 } else if (queclass(savemp) >= QPCTL) { 7671 /* 7672 * The first message was not a HIPRI message, 7673 * but the one we are about to putback is. 7674 * For simplicitly, we do not allow for HIPRI 7675 * messages to be embedded in the message 7676 * body, so just force it to same type as 7677 * first message. 7678 */ 7679 ASSERT(type == M_DATA || type == M_PROTO); 7680 ASSERT(savemp->b_datap->db_type == M_PCPROTO); 7681 savemp->b_datap->db_type = type; 7682 } 7683 if (mark != 0) { 7684 if ((mark & _LASTMARK) && 7685 (stp->sd_mark == NULL)) { 7686 /* 7687 * If another marked message arrived 7688 * while sd_lock was not held sd_mark 7689 * would be non-NULL. 7690 */ 7691 stp->sd_mark = savemp; 7692 } 7693 savemp->b_flag |= mark & ~_LASTMARK; 7694 } 7695 putback(stp, q, savemp, pri); 7696 } 7697 } else if (!(flags & MSG_IPEEK)) { 7698 /* 7699 * The complete message was consumed. 7700 * 7701 * If another M_PCPROTO arrived while sd_lock was not held 7702 * it would have been discarded since STRPRI was still set. 7703 * 7704 * Move the MSG*MARKNEXT information 7705 * to the stream head just in case 7706 * the read queue becomes empty. 7707 * clear stream head hi pri flag based on 7708 * first message 7709 * 7710 * If the stream head was at the mark 7711 * (STRATMARK) before we dropped sd_lock above 7712 * and some data was consumed then we have 7713 * moved past the mark thus STRATMARK is 7714 * cleared. However, if a message arrived in 7715 * strrput during the copyout above causing 7716 * STRATMARK to be set we can not clear that 7717 * flag. 7718 * XXX A "perimeter" would help by single-threading strrput, 7719 * strread, strgetmsg and kstrgetmsg. 7720 */ 7721 if (type >= QPCTL) { 7722 ASSERT(type == M_PCPROTO); 7723 stp->sd_flag &= ~STRPRI; 7724 } 7725 if (mark & (MSGMARKNEXT|MSGNOTMARKNEXT|MSGMARK)) { 7726 if (mark & MSGMARKNEXT) { 7727 stp->sd_flag &= ~STRNOTATMARK; 7728 stp->sd_flag |= STRATMARK; 7729 } else if (mark & MSGNOTMARKNEXT) { 7730 stp->sd_flag &= ~STRATMARK; 7731 stp->sd_flag |= STRNOTATMARK; 7732 } else { 7733 stp->sd_flag &= ~(STRATMARK|STRNOTATMARK); 7734 } 7735 } else if (pr && (old_sd_flag & STRATMARK)) { 7736 stp->sd_flag &= ~STRATMARK; 7737 } 7738 } 7739 7740 *flagsp = flg; 7741 *prip = pri; 7742 7743 /* 7744 * Getmsg cleanup processing - if the state of the queue has changed 7745 * some signals may need to be sent and/or poll awakened. 7746 */ 7747 getmout: 7748 qbackenable(q, pri); 7749 7750 /* 7751 * We dropped the stream head lock above. Send all M_SIG messages 7752 * before processing stream head for SIGPOLL messages. 7753 */ 7754 ASSERT(MUTEX_HELD(&stp->sd_lock)); 7755 while ((bp = q->q_first) != NULL && 7756 (bp->b_datap->db_type == M_SIG)) { 7757 /* 7758 * sd_lock is held so the content of the read queue can not 7759 * change. 7760 */ 7761 bp = getq(q); 7762 ASSERT(bp != NULL && bp->b_datap->db_type == M_SIG); 7763 7764 strsignal_nolock(stp, *bp->b_rptr, bp->b_band); 7765 mutex_exit(&stp->sd_lock); 7766 freemsg(bp); 7767 if (STREAM_NEEDSERVICE(stp)) 7768 stream_runservice(stp); 7769 mutex_enter(&stp->sd_lock); 7770 } 7771 7772 /* 7773 * stream head cannot change while we make the determination 7774 * whether or not to send a signal. Drop the flag to allow strrput 7775 * to send firstmsgsigs again. 7776 */ 7777 stp->sd_flag &= ~STRGETINPROG; 7778 7779 /* 7780 * If the type of message at the front of the queue changed 7781 * due to the receive the appropriate signals and pollwakeup events 7782 * are generated. The type of changes are: 7783 * Processed a hipri message, q_first is not hipri. 7784 * Processed a band X message, and q_first is band Y. 7785 * The generated signals and pollwakeups are identical to what 7786 * strrput() generates should the message that is now on q_first 7787 * arrive to an empty read queue. 7788 * 7789 * Note: only strrput will send a signal for a hipri message. 7790 */ 7791 if ((bp = q->q_first) != NULL && !(stp->sd_flag & STRPRI)) { 7792 strsigset_t signals = 0; 7793 strpollset_t pollwakeups = 0; 7794 7795 if (flg & MSG_HIPRI) { 7796 /* 7797 * Removed a hipri message. Regular data at 7798 * the front of the queue. 7799 */ 7800 if (bp->b_band == 0) { 7801 signals = S_INPUT | S_RDNORM; 7802 pollwakeups = POLLIN | POLLRDNORM; 7803 } else { 7804 signals = S_INPUT | S_RDBAND; 7805 pollwakeups = POLLIN | POLLRDBAND; 7806 } 7807 } else if (pri != bp->b_band) { 7808 /* 7809 * The band is different for the new q_first. 7810 */ 7811 if (bp->b_band == 0) { 7812 signals = S_RDNORM; 7813 pollwakeups = POLLIN | POLLRDNORM; 7814 } else { 7815 signals = S_RDBAND; 7816 pollwakeups = POLLIN | POLLRDBAND; 7817 } 7818 } 7819 7820 if (pollwakeups != 0) { 7821 if (pollwakeups == (POLLIN | POLLRDNORM)) { 7822 if (!(stp->sd_rput_opt & SR_POLLIN)) 7823 goto no_pollwake; 7824 stp->sd_rput_opt &= ~SR_POLLIN; 7825 } 7826 mutex_exit(&stp->sd_lock); 7827 pollwakeup(&stp->sd_pollist, pollwakeups); 7828 mutex_enter(&stp->sd_lock); 7829 } 7830 no_pollwake: 7831 7832 if (stp->sd_sigflags & signals) 7833 strsendsig(stp->sd_siglist, signals, bp->b_band, 0); 7834 } 7835 mutex_exit(&stp->sd_lock); 7836 7837 rvp->r_val1 = more; 7838 return (error); 7839 #undef _LASTMARK 7840 } 7841 7842 /* 7843 * Put a message downstream. 7844 * 7845 * NOTE: strputmsg and kstrputmsg have much of the logic in common. 7846 */ 7847 int 7848 strputmsg( 7849 struct vnode *vp, 7850 struct strbuf *mctl, 7851 struct strbuf *mdata, 7852 unsigned char pri, 7853 int flag, 7854 int fmode) 7855 { 7856 struct stdata *stp; 7857 queue_t *wqp; 7858 mblk_t *mp; 7859 ssize_t msgsize; 7860 ssize_t rmin, rmax; 7861 int error; 7862 struct uio uios; 7863 struct uio *uiop = &uios; 7864 struct iovec iovs; 7865 int xpg4 = 0; 7866 7867 ASSERT(vp->v_stream); 7868 stp = vp->v_stream; 7869 wqp = stp->sd_wrq; 7870 7871 /* 7872 * If it is an XPG4 application, we need to send 7873 * SIGPIPE below 7874 */ 7875 7876 xpg4 = (flag & MSG_XPG4) ? 1 : 0; 7877 flag &= ~MSG_XPG4; 7878 7879 if (AU_AUDITING()) 7880 audit_strputmsg(vp, mctl, mdata, pri, flag, fmode); 7881 7882 mutex_enter(&stp->sd_lock); 7883 7884 if ((error = i_straccess(stp, JCWRITE)) != 0) { 7885 mutex_exit(&stp->sd_lock); 7886 return (error); 7887 } 7888 7889 if (stp->sd_flag & (STWRERR|STRHUP|STPLEX)) { 7890 error = strwriteable(stp, B_FALSE, xpg4); 7891 if (error != 0) { 7892 mutex_exit(&stp->sd_lock); 7893 return (error); 7894 } 7895 } 7896 7897 mutex_exit(&stp->sd_lock); 7898 7899 /* 7900 * Check for legal flag value. 7901 */ 7902 switch (flag) { 7903 case MSG_HIPRI: 7904 if ((mctl->len < 0) || (pri != 0)) 7905 return (EINVAL); 7906 break; 7907 case MSG_BAND: 7908 break; 7909 7910 default: 7911 return (EINVAL); 7912 } 7913 7914 TRACE_1(TR_FAC_STREAMS_FR, TR_STRPUTMSG_IN, 7915 "strputmsg in:stp %p", stp); 7916 7917 /* get these values from those cached in the stream head */ 7918 rmin = stp->sd_qn_minpsz; 7919 rmax = stp->sd_qn_maxpsz; 7920 7921 /* 7922 * Make sure ctl and data sizes together fall within the 7923 * limits of the max and min receive packet sizes and do 7924 * not exceed system limit. 7925 */ 7926 ASSERT((rmax >= 0) || (rmax == INFPSZ)); 7927 if (rmax == 0) { 7928 return (ERANGE); 7929 } 7930 /* 7931 * Use the MAXIMUM of sd_maxblk and q_maxpsz. 7932 * Needed to prevent partial failures in the strmakedata loop. 7933 */ 7934 if (stp->sd_maxblk != INFPSZ && rmax != INFPSZ && rmax < stp->sd_maxblk) 7935 rmax = stp->sd_maxblk; 7936 7937 if ((msgsize = mdata->len) < 0) { 7938 msgsize = 0; 7939 rmin = 0; /* no range check for NULL data part */ 7940 } 7941 if ((msgsize < rmin) || 7942 ((msgsize > rmax) && (rmax != INFPSZ)) || 7943 (mctl->len > strctlsz)) { 7944 return (ERANGE); 7945 } 7946 7947 /* 7948 * Setup uio and iov for data part 7949 */ 7950 iovs.iov_base = mdata->buf; 7951 iovs.iov_len = msgsize; 7952 uios.uio_iov = &iovs; 7953 uios.uio_iovcnt = 1; 7954 uios.uio_loffset = 0; 7955 uios.uio_segflg = UIO_USERSPACE; 7956 uios.uio_fmode = fmode; 7957 uios.uio_extflg = UIO_COPY_DEFAULT; 7958 uios.uio_resid = msgsize; 7959 uios.uio_offset = 0; 7960 7961 /* Ignore flow control in strput for HIPRI */ 7962 if (flag & MSG_HIPRI) 7963 flag |= MSG_IGNFLOW; 7964 7965 for (;;) { 7966 int done = 0; 7967 7968 /* 7969 * strput will always free the ctl mblk - even when strput 7970 * fails. 7971 */ 7972 if ((error = strmakectl(mctl, flag, fmode, &mp)) != 0) { 7973 TRACE_3(TR_FAC_STREAMS_FR, TR_STRPUTMSG_OUT, 7974 "strputmsg out:stp %p out %d error %d", 7975 stp, 1, error); 7976 return (error); 7977 } 7978 /* 7979 * Verify that the whole message can be transferred by 7980 * strput. 7981 */ 7982 ASSERT(stp->sd_maxblk == INFPSZ || 7983 stp->sd_maxblk >= mdata->len); 7984 7985 msgsize = mdata->len; 7986 error = strput(stp, mp, uiop, &msgsize, 0, pri, flag); 7987 mdata->len = msgsize; 7988 7989 if (error == 0) 7990 break; 7991 7992 if (error != EWOULDBLOCK) 7993 goto out; 7994 7995 mutex_enter(&stp->sd_lock); 7996 /* 7997 * Check for a missed wakeup. 7998 * Needed since strput did not hold sd_lock across 7999 * the canputnext. 8000 */ 8001 if (bcanputnext(wqp, pri)) { 8002 /* Try again */ 8003 mutex_exit(&stp->sd_lock); 8004 continue; 8005 } 8006 TRACE_2(TR_FAC_STREAMS_FR, TR_STRPUTMSG_WAIT, 8007 "strputmsg wait:stp %p waits pri %d", stp, pri); 8008 if (((error = strwaitq(stp, WRITEWAIT, (ssize_t)0, fmode, -1, 8009 &done)) != 0) || done) { 8010 mutex_exit(&stp->sd_lock); 8011 TRACE_3(TR_FAC_STREAMS_FR, TR_STRPUTMSG_OUT, 8012 "strputmsg out:q %p out %d error %d", 8013 stp, 0, error); 8014 return (error); 8015 } 8016 TRACE_1(TR_FAC_STREAMS_FR, TR_STRPUTMSG_WAKE, 8017 "strputmsg wake:stp %p wakes", stp); 8018 if ((error = i_straccess(stp, JCWRITE)) != 0) { 8019 mutex_exit(&stp->sd_lock); 8020 return (error); 8021 } 8022 mutex_exit(&stp->sd_lock); 8023 } 8024 out: 8025 /* 8026 * For historic reasons, applications expect EAGAIN 8027 * when data mblk could not be allocated. so change 8028 * ENOMEM back to EAGAIN 8029 */ 8030 if (error == ENOMEM) 8031 error = EAGAIN; 8032 TRACE_3(TR_FAC_STREAMS_FR, TR_STRPUTMSG_OUT, 8033 "strputmsg out:stp %p out %d error %d", stp, 2, error); 8034 return (error); 8035 } 8036 8037 /* 8038 * Put a message downstream. 8039 * Can send only an M_PROTO/M_PCPROTO by passing in a NULL uiop. 8040 * The fmode flag (NDELAY, NONBLOCK) is the or of the flags in the uio 8041 * and the fmode parameter. 8042 * 8043 * This routine handles the consolidation private flags: 8044 * MSG_IGNERROR Ignore any stream head error except STPLEX. 8045 * MSG_HOLDSIG Hold signals while waiting for data. 8046 * MSG_IGNFLOW Don't check streams flow control. 8047 * 8048 * NOTE: strputmsg and kstrputmsg have much of the logic in common. 8049 */ 8050 int 8051 kstrputmsg( 8052 struct vnode *vp, 8053 mblk_t *mctl, 8054 struct uio *uiop, 8055 ssize_t msgsize, 8056 unsigned char pri, 8057 int flag, 8058 int fmode) 8059 { 8060 struct stdata *stp; 8061 queue_t *wqp; 8062 ssize_t rmin, rmax; 8063 int error; 8064 8065 ASSERT(vp->v_stream); 8066 stp = vp->v_stream; 8067 wqp = stp->sd_wrq; 8068 if (AU_AUDITING()) 8069 audit_strputmsg(vp, NULL, NULL, pri, flag, fmode); 8070 if (mctl == NULL) 8071 return (EINVAL); 8072 8073 mutex_enter(&stp->sd_lock); 8074 8075 if ((error = i_straccess(stp, JCWRITE)) != 0) { 8076 mutex_exit(&stp->sd_lock); 8077 freemsg(mctl); 8078 return (error); 8079 } 8080 8081 if ((stp->sd_flag & STPLEX) || !(flag & MSG_IGNERROR)) { 8082 if (stp->sd_flag & (STWRERR|STRHUP|STPLEX)) { 8083 error = strwriteable(stp, B_FALSE, B_TRUE); 8084 if (error != 0) { 8085 mutex_exit(&stp->sd_lock); 8086 freemsg(mctl); 8087 return (error); 8088 } 8089 } 8090 } 8091 8092 mutex_exit(&stp->sd_lock); 8093 8094 /* 8095 * Check for legal flag value. 8096 */ 8097 switch (flag & (MSG_HIPRI|MSG_BAND|MSG_ANY)) { 8098 case MSG_HIPRI: 8099 if (pri != 0) { 8100 freemsg(mctl); 8101 return (EINVAL); 8102 } 8103 break; 8104 case MSG_BAND: 8105 break; 8106 default: 8107 freemsg(mctl); 8108 return (EINVAL); 8109 } 8110 8111 TRACE_1(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_IN, 8112 "kstrputmsg in:stp %p", stp); 8113 8114 /* get these values from those cached in the stream head */ 8115 rmin = stp->sd_qn_minpsz; 8116 rmax = stp->sd_qn_maxpsz; 8117 8118 /* 8119 * Make sure ctl and data sizes together fall within the 8120 * limits of the max and min receive packet sizes and do 8121 * not exceed system limit. 8122 */ 8123 ASSERT((rmax >= 0) || (rmax == INFPSZ)); 8124 if (rmax == 0) { 8125 freemsg(mctl); 8126 return (ERANGE); 8127 } 8128 /* 8129 * Use the MAXIMUM of sd_maxblk and q_maxpsz. 8130 * Needed to prevent partial failures in the strmakedata loop. 8131 */ 8132 if (stp->sd_maxblk != INFPSZ && rmax != INFPSZ && rmax < stp->sd_maxblk) 8133 rmax = stp->sd_maxblk; 8134 8135 if (uiop == NULL) { 8136 msgsize = -1; 8137 rmin = -1; /* no range check for NULL data part */ 8138 } else { 8139 /* Use uio flags as well as the fmode parameter flags */ 8140 fmode |= uiop->uio_fmode; 8141 8142 if ((msgsize < rmin) || 8143 ((msgsize > rmax) && (rmax != INFPSZ))) { 8144 freemsg(mctl); 8145 return (ERANGE); 8146 } 8147 } 8148 8149 /* Ignore flow control in strput for HIPRI */ 8150 if (flag & MSG_HIPRI) 8151 flag |= MSG_IGNFLOW; 8152 8153 for (;;) { 8154 int done = 0; 8155 int waitflag; 8156 mblk_t *mp; 8157 8158 /* 8159 * strput will always free the ctl mblk - even when strput 8160 * fails. If MSG_IGNFLOW is set then any error returned 8161 * will cause us to break the loop, so we don't need a copy 8162 * of the message. If MSG_IGNFLOW is not set, then we can 8163 * get hit by flow control and be forced to try again. In 8164 * this case we need to have a copy of the message. We 8165 * do this using copymsg since the message may get modified 8166 * by something below us. 8167 * 8168 * We've observed that many TPI providers do not check db_ref 8169 * on the control messages but blindly reuse them for the 8170 * T_OK_ACK/T_ERROR_ACK. Thus using copymsg is more 8171 * friendly to such providers than using dupmsg. Also, note 8172 * that sockfs uses MSG_IGNFLOW for all TPI control messages. 8173 * Only data messages are subject to flow control, hence 8174 * subject to this copymsg. 8175 */ 8176 if (flag & MSG_IGNFLOW) { 8177 mp = mctl; 8178 mctl = NULL; 8179 } else { 8180 do { 8181 /* 8182 * If a message has a free pointer, the message 8183 * must be dupmsg to maintain this pointer. 8184 * Code using this facility must be sure 8185 * that modules below will not change the 8186 * contents of the dblk without checking db_ref 8187 * first. If db_ref is > 1, then the module 8188 * needs to do a copymsg first. Otherwise, 8189 * the contents of the dblk may become 8190 * inconsistent because the freesmg/freeb below 8191 * may end up calling atomic_add_32_nv. 8192 * The atomic_add_32_nv in freeb (accessing 8193 * all of db_ref, db_type, db_flags, and 8194 * db_struioflag) does not prevent other threads 8195 * from concurrently trying to modify e.g. 8196 * db_type. 8197 */ 8198 if (mctl->b_datap->db_frtnp != NULL) 8199 mp = dupmsg(mctl); 8200 else 8201 mp = copymsg(mctl); 8202 8203 if (mp != NULL) 8204 break; 8205 8206 error = strwaitbuf(msgdsize(mctl), BPRI_MED); 8207 if (error) { 8208 freemsg(mctl); 8209 return (error); 8210 } 8211 } while (mp == NULL); 8212 } 8213 /* 8214 * Verify that all of msgsize can be transferred by 8215 * strput. 8216 */ 8217 ASSERT(stp->sd_maxblk == INFPSZ || stp->sd_maxblk >= msgsize); 8218 error = strput(stp, mp, uiop, &msgsize, 0, pri, flag); 8219 if (error == 0) 8220 break; 8221 8222 if (error != EWOULDBLOCK) 8223 goto out; 8224 8225 /* 8226 * IF MSG_IGNFLOW is set we should have broken out of loop 8227 * above. 8228 */ 8229 ASSERT(!(flag & MSG_IGNFLOW)); 8230 mutex_enter(&stp->sd_lock); 8231 /* 8232 * Check for a missed wakeup. 8233 * Needed since strput did not hold sd_lock across 8234 * the canputnext. 8235 */ 8236 if (bcanputnext(wqp, pri)) { 8237 /* Try again */ 8238 mutex_exit(&stp->sd_lock); 8239 continue; 8240 } 8241 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_WAIT, 8242 "kstrputmsg wait:stp %p waits pri %d", stp, pri); 8243 8244 waitflag = WRITEWAIT; 8245 if (flag & (MSG_HOLDSIG|MSG_IGNERROR)) { 8246 if (flag & MSG_HOLDSIG) 8247 waitflag |= STR_NOSIG; 8248 if (flag & MSG_IGNERROR) 8249 waitflag |= STR_NOERROR; 8250 } 8251 if (((error = strwaitq(stp, waitflag, 8252 (ssize_t)0, fmode, -1, &done)) != 0) || done) { 8253 mutex_exit(&stp->sd_lock); 8254 TRACE_3(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_OUT, 8255 "kstrputmsg out:stp %p out %d error %d", 8256 stp, 0, error); 8257 freemsg(mctl); 8258 return (error); 8259 } 8260 TRACE_1(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_WAKE, 8261 "kstrputmsg wake:stp %p wakes", stp); 8262 if ((error = i_straccess(stp, JCWRITE)) != 0) { 8263 mutex_exit(&stp->sd_lock); 8264 freemsg(mctl); 8265 return (error); 8266 } 8267 mutex_exit(&stp->sd_lock); 8268 } 8269 out: 8270 freemsg(mctl); 8271 /* 8272 * For historic reasons, applications expect EAGAIN 8273 * when data mblk could not be allocated. so change 8274 * ENOMEM back to EAGAIN 8275 */ 8276 if (error == ENOMEM) 8277 error = EAGAIN; 8278 TRACE_3(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_OUT, 8279 "kstrputmsg out:stp %p out %d error %d", stp, 2, error); 8280 return (error); 8281 } 8282 8283 /* 8284 * Determines whether the necessary conditions are set on a stream 8285 * for it to be readable, writeable, or have exceptions. 8286 * 8287 * strpoll handles the consolidation private events: 8288 * POLLNOERR Do not return POLLERR even if there are stream 8289 * head errors. 8290 * Used by sockfs. 8291 * POLLRDDATA Do not return POLLIN unless at least one message on 8292 * the queue contains one or more M_DATA mblks. Thus 8293 * when this flag is set a queue with only 8294 * M_PROTO/M_PCPROTO mblks does not return POLLIN. 8295 * Used by sockfs to ignore T_EXDATA_IND messages. 8296 * 8297 * Note: POLLRDDATA assumes that synch streams only return messages with 8298 * an M_DATA attached (i.e. not messages consisting of only 8299 * an M_PROTO/M_PCPROTO part). 8300 */ 8301 int 8302 strpoll(struct stdata *stp, short events_arg, int anyyet, short *reventsp, 8303 struct pollhead **phpp) 8304 { 8305 int events = (ushort_t)events_arg; 8306 int retevents = 0; 8307 mblk_t *mp; 8308 qband_t *qbp; 8309 long sd_flags = stp->sd_flag; 8310 int headlocked = 0; 8311 8312 /* 8313 * For performance, a single 'if' tests for most possible edge 8314 * conditions in one shot 8315 */ 8316 if (sd_flags & (STPLEX | STRDERR | STWRERR)) { 8317 if (sd_flags & STPLEX) { 8318 *reventsp = POLLNVAL; 8319 return (EINVAL); 8320 } 8321 if (((events & (POLLIN | POLLRDNORM | POLLRDBAND | POLLPRI)) && 8322 (sd_flags & STRDERR)) || 8323 ((events & (POLLOUT | POLLWRNORM | POLLWRBAND)) && 8324 (sd_flags & STWRERR))) { 8325 if (!(events & POLLNOERR)) { 8326 *reventsp = POLLERR; 8327 return (0); 8328 } 8329 } 8330 } 8331 if (sd_flags & STRHUP) { 8332 retevents |= POLLHUP; 8333 } else if (events & (POLLWRNORM | POLLWRBAND)) { 8334 queue_t *tq; 8335 queue_t *qp = stp->sd_wrq; 8336 8337 claimstr(qp); 8338 /* Find next module forward that has a service procedure */ 8339 tq = qp->q_next->q_nfsrv; 8340 ASSERT(tq != NULL); 8341 8342 if (polllock(&stp->sd_pollist, QLOCK(tq)) != 0) { 8343 releasestr(qp); 8344 *reventsp = POLLNVAL; 8345 return (0); 8346 } 8347 if (events & POLLWRNORM) { 8348 queue_t *sqp; 8349 8350 if (tq->q_flag & QFULL) 8351 /* ensure backq svc procedure runs */ 8352 tq->q_flag |= QWANTW; 8353 else if ((sqp = stp->sd_struiowrq) != NULL) { 8354 /* Check sync stream barrier write q */ 8355 mutex_exit(QLOCK(tq)); 8356 if (polllock(&stp->sd_pollist, 8357 QLOCK(sqp)) != 0) { 8358 releasestr(qp); 8359 *reventsp = POLLNVAL; 8360 return (0); 8361 } 8362 if (sqp->q_flag & QFULL) 8363 /* ensure pollwakeup() is done */ 8364 sqp->q_flag |= QWANTWSYNC; 8365 else 8366 retevents |= POLLOUT; 8367 /* More write events to process ??? */ 8368 if (! (events & POLLWRBAND)) { 8369 mutex_exit(QLOCK(sqp)); 8370 releasestr(qp); 8371 goto chkrd; 8372 } 8373 mutex_exit(QLOCK(sqp)); 8374 if (polllock(&stp->sd_pollist, 8375 QLOCK(tq)) != 0) { 8376 releasestr(qp); 8377 *reventsp = POLLNVAL; 8378 return (0); 8379 } 8380 } else 8381 retevents |= POLLOUT; 8382 } 8383 if (events & POLLWRBAND) { 8384 qbp = tq->q_bandp; 8385 if (qbp) { 8386 while (qbp) { 8387 if (qbp->qb_flag & QB_FULL) 8388 qbp->qb_flag |= QB_WANTW; 8389 else 8390 retevents |= POLLWRBAND; 8391 qbp = qbp->qb_next; 8392 } 8393 } else { 8394 retevents |= POLLWRBAND; 8395 } 8396 } 8397 mutex_exit(QLOCK(tq)); 8398 releasestr(qp); 8399 } 8400 chkrd: 8401 if (sd_flags & STRPRI) { 8402 retevents |= (events & POLLPRI); 8403 } else if (events & (POLLRDNORM | POLLRDBAND | POLLIN)) { 8404 queue_t *qp = _RD(stp->sd_wrq); 8405 int normevents = (events & (POLLIN | POLLRDNORM)); 8406 8407 /* 8408 * Note: Need to do polllock() here since ps_lock may be 8409 * held. See bug 4191544. 8410 */ 8411 if (polllock(&stp->sd_pollist, &stp->sd_lock) != 0) { 8412 *reventsp = POLLNVAL; 8413 return (0); 8414 } 8415 headlocked = 1; 8416 mp = qp->q_first; 8417 while (mp) { 8418 /* 8419 * For POLLRDDATA we scan b_cont and b_next until we 8420 * find an M_DATA. 8421 */ 8422 if ((events & POLLRDDATA) && 8423 mp->b_datap->db_type != M_DATA) { 8424 mblk_t *nmp = mp->b_cont; 8425 8426 while (nmp != NULL && 8427 nmp->b_datap->db_type != M_DATA) 8428 nmp = nmp->b_cont; 8429 if (nmp == NULL) { 8430 mp = mp->b_next; 8431 continue; 8432 } 8433 } 8434 if (mp->b_band == 0) 8435 retevents |= normevents; 8436 else 8437 retevents |= (events & (POLLIN | POLLRDBAND)); 8438 break; 8439 } 8440 if (!(retevents & normevents) && (stp->sd_wakeq & RSLEEP)) { 8441 /* 8442 * Sync stream barrier read queue has data. 8443 */ 8444 retevents |= normevents; 8445 } 8446 /* Treat eof as normal data */ 8447 if (sd_flags & STREOF) 8448 retevents |= normevents; 8449 } 8450 8451 /* 8452 * Pass back a pollhead if no events are pending or if edge-triggering 8453 * has been configured on this resource. 8454 */ 8455 if ((retevents == 0 && !anyyet) || (events & POLLET)) { 8456 *phpp = &stp->sd_pollist; 8457 if (headlocked == 0) { 8458 if (polllock(&stp->sd_pollist, &stp->sd_lock) != 0) { 8459 *reventsp = POLLNVAL; 8460 return (0); 8461 } 8462 headlocked = 1; 8463 } 8464 stp->sd_rput_opt |= SR_POLLIN; 8465 } 8466 8467 *reventsp = (short)retevents; 8468 if (headlocked) 8469 mutex_exit(&stp->sd_lock); 8470 return (0); 8471 } 8472 8473 /* 8474 * The purpose of putback() is to assure sleeping polls/reads 8475 * are awakened when there are no new messages arriving at the, 8476 * stream head, and a message is placed back on the read queue. 8477 * 8478 * sd_lock must be held when messages are placed back on stream 8479 * head. (getq() holds sd_lock when it removes messages from 8480 * the queue) 8481 */ 8482 8483 static void 8484 putback(struct stdata *stp, queue_t *q, mblk_t *bp, int band) 8485 { 8486 mblk_t *qfirst; 8487 ASSERT(MUTEX_HELD(&stp->sd_lock)); 8488 8489 /* 8490 * As a result of lock-step ordering around q_lock and sd_lock, 8491 * it's possible for function calls like putnext() and 8492 * canputnext() to get an inaccurate picture of how much 8493 * data is really being processed at the stream head. 8494 * We only consolidate with existing messages on the queue 8495 * if the length of the message we want to put back is smaller 8496 * than the queue hiwater mark. 8497 */ 8498 if ((stp->sd_rput_opt & SR_CONSOL_DATA) && 8499 (DB_TYPE(bp) == M_DATA) && ((qfirst = q->q_first) != NULL) && 8500 (DB_TYPE(qfirst) == M_DATA) && 8501 ((qfirst->b_flag & (MSGMARK|MSGDELIM)) == 0) && 8502 ((bp->b_flag & (MSGMARK|MSGDELIM|MSGMARKNEXT)) == 0) && 8503 (mp_cont_len(bp, NULL) < q->q_hiwat)) { 8504 /* 8505 * We use the same logic as defined in strrput() 8506 * but in reverse as we are putting back onto the 8507 * queue and want to retain byte ordering. 8508 * Consolidate M_DATA messages with M_DATA ONLY. 8509 * strrput() allows the consolidation of M_DATA onto 8510 * M_PROTO | M_PCPROTO but not the other way round. 8511 * 8512 * The consolidation does not take place if the message 8513 * we are returning to the queue is marked with either 8514 * of the marks or the delim flag or if q_first 8515 * is marked with MSGMARK. The MSGMARK check is needed to 8516 * handle the odd semantics of MSGMARK where essentially 8517 * the whole message is to be treated as marked. 8518 * Carry any MSGMARKNEXT and MSGNOTMARKNEXT from q_first 8519 * to the front of the b_cont chain. 8520 */ 8521 rmvq_noenab(q, qfirst); 8522 8523 /* 8524 * The first message in the b_cont list 8525 * tracks MSGMARKNEXT and MSGNOTMARKNEXT. 8526 * We need to handle the case where we 8527 * are appending: 8528 * 8529 * 1) a MSGMARKNEXT to a MSGNOTMARKNEXT. 8530 * 2) a MSGMARKNEXT to a plain message. 8531 * 3) a MSGNOTMARKNEXT to a plain message 8532 * 4) a MSGNOTMARKNEXT to a MSGNOTMARKNEXT 8533 * message. 8534 * 8535 * Thus we never append a MSGMARKNEXT or 8536 * MSGNOTMARKNEXT to a MSGMARKNEXT message. 8537 */ 8538 if (qfirst->b_flag & MSGMARKNEXT) { 8539 bp->b_flag |= MSGMARKNEXT; 8540 bp->b_flag &= ~MSGNOTMARKNEXT; 8541 qfirst->b_flag &= ~MSGMARKNEXT; 8542 } else if (qfirst->b_flag & MSGNOTMARKNEXT) { 8543 bp->b_flag |= MSGNOTMARKNEXT; 8544 qfirst->b_flag &= ~MSGNOTMARKNEXT; 8545 } 8546 8547 linkb(bp, qfirst); 8548 } 8549 (void) putbq(q, bp); 8550 8551 /* 8552 * A message may have come in when the sd_lock was dropped in the 8553 * calling routine. If this is the case and STR*ATMARK info was 8554 * received, need to move that from the stream head to the q_last 8555 * so that SIOCATMARK can return the proper value. 8556 */ 8557 if (stp->sd_flag & (STRATMARK | STRNOTATMARK)) { 8558 unsigned short *flagp = &q->q_last->b_flag; 8559 uint_t b_flag = (uint_t)*flagp; 8560 8561 if (stp->sd_flag & STRATMARK) { 8562 b_flag &= ~MSGNOTMARKNEXT; 8563 b_flag |= MSGMARKNEXT; 8564 stp->sd_flag &= ~STRATMARK; 8565 } else { 8566 b_flag &= ~MSGMARKNEXT; 8567 b_flag |= MSGNOTMARKNEXT; 8568 stp->sd_flag &= ~STRNOTATMARK; 8569 } 8570 *flagp = (unsigned short) b_flag; 8571 } 8572 8573 #ifdef DEBUG 8574 /* 8575 * Make sure that the flags are not messed up. 8576 */ 8577 { 8578 mblk_t *mp; 8579 mp = q->q_last; 8580 while (mp != NULL) { 8581 ASSERT((mp->b_flag & (MSGMARKNEXT|MSGNOTMARKNEXT)) != 8582 (MSGMARKNEXT|MSGNOTMARKNEXT)); 8583 mp = mp->b_cont; 8584 } 8585 } 8586 #endif 8587 if (q->q_first == bp) { 8588 short pollevents; 8589 8590 if (stp->sd_flag & RSLEEP) { 8591 stp->sd_flag &= ~RSLEEP; 8592 cv_broadcast(&q->q_wait); 8593 } 8594 if (stp->sd_flag & STRPRI) { 8595 pollevents = POLLPRI; 8596 } else { 8597 if (band == 0) { 8598 if (!(stp->sd_rput_opt & SR_POLLIN)) 8599 return; 8600 stp->sd_rput_opt &= ~SR_POLLIN; 8601 pollevents = POLLIN | POLLRDNORM; 8602 } else { 8603 pollevents = POLLIN | POLLRDBAND; 8604 } 8605 } 8606 mutex_exit(&stp->sd_lock); 8607 pollwakeup(&stp->sd_pollist, pollevents); 8608 mutex_enter(&stp->sd_lock); 8609 } 8610 } 8611 8612 /* 8613 * Return the held vnode attached to the stream head of a 8614 * given queue 8615 * It is the responsibility of the calling routine to ensure 8616 * that the queue does not go away (e.g. pop). 8617 */ 8618 vnode_t * 8619 strq2vp(queue_t *qp) 8620 { 8621 vnode_t *vp; 8622 vp = STREAM(qp)->sd_vnode; 8623 ASSERT(vp != NULL); 8624 VN_HOLD(vp); 8625 return (vp); 8626 } 8627 8628 /* 8629 * return the stream head write queue for the given vp 8630 * It is the responsibility of the calling routine to ensure 8631 * that the stream or vnode do not close. 8632 */ 8633 queue_t * 8634 strvp2wq(vnode_t *vp) 8635 { 8636 ASSERT(vp->v_stream != NULL); 8637 return (vp->v_stream->sd_wrq); 8638 } 8639 8640 /* 8641 * pollwakeup stream head 8642 * It is the responsibility of the calling routine to ensure 8643 * that the stream or vnode do not close. 8644 */ 8645 void 8646 strpollwakeup(vnode_t *vp, short event) 8647 { 8648 ASSERT(vp->v_stream); 8649 pollwakeup(&vp->v_stream->sd_pollist, event); 8650 } 8651 8652 /* 8653 * Mate the stream heads of two vnodes together. If the two vnodes are the 8654 * same, we just make the write-side point at the read-side -- otherwise, 8655 * we do a full mate. Only works on vnodes associated with streams that are 8656 * still being built and thus have only a stream head. 8657 */ 8658 void 8659 strmate(vnode_t *vp1, vnode_t *vp2) 8660 { 8661 queue_t *wrq1 = strvp2wq(vp1); 8662 queue_t *wrq2 = strvp2wq(vp2); 8663 8664 /* 8665 * Verify that there are no modules on the stream yet. We also 8666 * rely on the stream head always having a service procedure to 8667 * avoid tweaking q_nfsrv. 8668 */ 8669 ASSERT(wrq1->q_next == NULL && wrq2->q_next == NULL); 8670 ASSERT(wrq1->q_qinfo->qi_srvp != NULL); 8671 ASSERT(wrq2->q_qinfo->qi_srvp != NULL); 8672 8673 /* 8674 * If the queues are the same, just twist; otherwise do a full mate. 8675 */ 8676 if (wrq1 == wrq2) { 8677 wrq1->q_next = _RD(wrq1); 8678 } else { 8679 wrq1->q_next = _RD(wrq2); 8680 wrq2->q_next = _RD(wrq1); 8681 STREAM(wrq1)->sd_mate = STREAM(wrq2); 8682 STREAM(wrq1)->sd_flag |= STRMATE; 8683 STREAM(wrq2)->sd_mate = STREAM(wrq1); 8684 STREAM(wrq2)->sd_flag |= STRMATE; 8685 } 8686 } 8687 8688 /* 8689 * XXX will go away when console is correctly fixed. 8690 * Clean up the console PIDS, from previous I_SETSIG, 8691 * called only for cnopen which never calls strclean(). 8692 */ 8693 void 8694 str_cn_clean(struct vnode *vp) 8695 { 8696 strsig_t *ssp, *pssp, *tssp; 8697 struct stdata *stp; 8698 struct pid *pidp; 8699 int update = 0; 8700 8701 ASSERT(vp->v_stream); 8702 stp = vp->v_stream; 8703 pssp = NULL; 8704 mutex_enter(&stp->sd_lock); 8705 ssp = stp->sd_siglist; 8706 while (ssp) { 8707 mutex_enter(&pidlock); 8708 pidp = ssp->ss_pidp; 8709 /* 8710 * Get rid of PID if the proc is gone. 8711 */ 8712 if (pidp->pid_prinactive) { 8713 tssp = ssp->ss_next; 8714 if (pssp) 8715 pssp->ss_next = tssp; 8716 else 8717 stp->sd_siglist = tssp; 8718 ASSERT(pidp->pid_ref <= 1); 8719 PID_RELE(ssp->ss_pidp); 8720 mutex_exit(&pidlock); 8721 kmem_free(ssp, sizeof (strsig_t)); 8722 update = 1; 8723 ssp = tssp; 8724 continue; 8725 } else 8726 mutex_exit(&pidlock); 8727 pssp = ssp; 8728 ssp = ssp->ss_next; 8729 } 8730 if (update) { 8731 stp->sd_sigflags = 0; 8732 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next) 8733 stp->sd_sigflags |= ssp->ss_events; 8734 } 8735 mutex_exit(&stp->sd_lock); 8736 } 8737 8738 /* 8739 * Return B_TRUE if there is data in the message, B_FALSE otherwise. 8740 */ 8741 static boolean_t 8742 msghasdata(mblk_t *bp) 8743 { 8744 for (; bp; bp = bp->b_cont) 8745 if (bp->b_datap->db_type == M_DATA) { 8746 ASSERT(bp->b_wptr >= bp->b_rptr); 8747 if (bp->b_wptr > bp->b_rptr) 8748 return (B_TRUE); 8749 } 8750 return (B_FALSE); 8751 } 8752