1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 22 /* All Rights Reserved */ 23 24 25 /* 26 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 30 #include <sys/types.h> 31 #include <sys/sysmacros.h> 32 #include <sys/param.h> 33 #include <sys/errno.h> 34 #include <sys/signal.h> 35 #include <sys/stat.h> 36 #include <sys/proc.h> 37 #include <sys/cred.h> 38 #include <sys/user.h> 39 #include <sys/vnode.h> 40 #include <sys/file.h> 41 #include <sys/stream.h> 42 #include <sys/strsubr.h> 43 #include <sys/stropts.h> 44 #include <sys/tihdr.h> 45 #include <sys/var.h> 46 #include <sys/poll.h> 47 #include <sys/termio.h> 48 #include <sys/ttold.h> 49 #include <sys/systm.h> 50 #include <sys/uio.h> 51 #include <sys/cmn_err.h> 52 #include <sys/sad.h> 53 #include <sys/netstack.h> 54 #include <sys/priocntl.h> 55 #include <sys/jioctl.h> 56 #include <sys/procset.h> 57 #include <sys/session.h> 58 #include <sys/kmem.h> 59 #include <sys/filio.h> 60 #include <sys/vtrace.h> 61 #include <sys/debug.h> 62 #include <sys/strredir.h> 63 #include <sys/fs/fifonode.h> 64 #include <sys/fs/snode.h> 65 #include <sys/strlog.h> 66 #include <sys/strsun.h> 67 #include <sys/project.h> 68 #include <sys/kbio.h> 69 #include <sys/msio.h> 70 #include <sys/tty.h> 71 #include <sys/ptyvar.h> 72 #include <sys/vuid_event.h> 73 #include <sys/modctl.h> 74 #include <sys/sunddi.h> 75 #include <sys/sunldi_impl.h> 76 #include <sys/autoconf.h> 77 #include <sys/policy.h> 78 #include <sys/dld.h> 79 #include <sys/zone.h> 80 81 /* 82 * This define helps improve the readability of streams code while 83 * still maintaining a very old streams performance enhancement. The 84 * performance enhancement basically involved having all callers 85 * of straccess() perform the first check that straccess() will do 86 * locally before actually calling straccess(). (There by reducing 87 * the number of unnecessary calls to straccess().) 88 */ 89 #define i_straccess(x, y) ((stp->sd_sidp == NULL) ? 0 : \ 90 (stp->sd_vnode->v_type == VFIFO) ? 0 : \ 91 straccess((x), (y))) 92 93 /* 94 * what is mblk_pull_len? 95 * 96 * If a streams message consists of many short messages, 97 * a performance degradation occurs from copyout overhead. 98 * To decrease the per mblk overhead, messages that are 99 * likely to consist of many small mblks are pulled up into 100 * one continuous chunk of memory. 101 * 102 * To avoid the processing overhead of examining every 103 * mblk, a quick heuristic is used. If the first mblk in 104 * the message is shorter than mblk_pull_len, it is likely 105 * that the rest of the mblk will be short. 106 * 107 * This heuristic was decided upon after performance tests 108 * indicated that anything more complex slowed down the main 109 * code path. 110 */ 111 #define MBLK_PULL_LEN 64 112 uint32_t mblk_pull_len = MBLK_PULL_LEN; 113 114 /* 115 * The sgttyb_handling flag controls the handling of the old BSD 116 * TIOCGETP, TIOCSETP, and TIOCSETN ioctls as follows: 117 * 118 * 0 - Emit no warnings at all and retain old, broken behavior. 119 * 1 - Emit no warnings and silently handle new semantics. 120 * 2 - Send cmn_err(CE_NOTE) when either TIOCSETP or TIOCSETN is used 121 * (once per system invocation). Handle with new semantics. 122 * 3 - Send SIGSYS when any TIOCGETP, TIOCSETP, or TIOCSETN call is 123 * made (so that offenders drop core and are easy to debug). 124 * 125 * The "new semantics" are that TIOCGETP returns B38400 for 126 * sg_[io]speed if the corresponding value is over B38400, and that 127 * TIOCSET[PN] accept B38400 in these cases to mean "retain current 128 * bit rate." 129 */ 130 int sgttyb_handling = 1; 131 static boolean_t sgttyb_complaint; 132 133 /* don't push drcompat module by default on Style-2 streams */ 134 static int push_drcompat = 0; 135 136 /* 137 * id value used to distinguish between different ioctl messages 138 */ 139 static uint32_t ioc_id; 140 141 static void putback(struct stdata *, queue_t *, mblk_t *, int); 142 static void strcleanall(struct vnode *); 143 static int strwsrv(queue_t *); 144 static int strdocmd(struct stdata *, struct strcmd *, cred_t *); 145 static void struioainit(queue_t *, sodirect_t *, uio_t *); 146 147 /* 148 * qinit and module_info structures for stream head read and write queues 149 */ 150 struct module_info strm_info = { 0, "strrhead", 0, INFPSZ, STRHIGH, STRLOW }; 151 struct module_info stwm_info = { 0, "strwhead", 0, 0, 0, 0 }; 152 struct qinit strdata = { strrput, NULL, NULL, NULL, NULL, &strm_info }; 153 struct qinit stwdata = { NULL, strwsrv, NULL, NULL, NULL, &stwm_info }; 154 struct module_info fiform_info = { 0, "fifostrrhead", 0, PIPE_BUF, FIFOHIWAT, 155 FIFOLOWAT }; 156 struct module_info fifowm_info = { 0, "fifostrwhead", 0, 0, 0, 0 }; 157 struct qinit fifo_strdata = { strrput, NULL, NULL, NULL, NULL, &fiform_info }; 158 struct qinit fifo_stwdata = { NULL, strwsrv, NULL, NULL, NULL, &fifowm_info }; 159 160 extern kmutex_t strresources; /* protects global resources */ 161 extern kmutex_t muxifier; /* single-threads multiplexor creation */ 162 163 static boolean_t msghasdata(mblk_t *bp); 164 #define msgnodata(bp) (!msghasdata(bp)) 165 166 /* 167 * Stream head locking notes: 168 * There are four monitors associated with the stream head: 169 * 1. v_stream monitor: in stropen() and strclose() v_lock 170 * is held while the association of vnode and stream 171 * head is established or tested for. 172 * 2. open/close/push/pop monitor: sd_lock is held while each 173 * thread bids for exclusive access to this monitor 174 * for opening or closing a stream. In addition, this 175 * monitor is entered during pushes and pops. This 176 * guarantees that during plumbing operations there 177 * is only one thread trying to change the plumbing. 178 * Any other threads present in the stream are only 179 * using the plumbing. 180 * 3. read/write monitor: in the case of read, a thread holds 181 * sd_lock while trying to get data from the stream 182 * head queue. if there is none to fulfill a read 183 * request, it sets RSLEEP and calls cv_wait_sig() down 184 * in strwaitq() to await the arrival of new data. 185 * when new data arrives in strrput(), sd_lock is acquired 186 * before testing for RSLEEP and calling cv_broadcast(). 187 * the behavior of strwrite(), strwsrv(), and WSLEEP 188 * mirror this. 189 * 4. ioctl monitor: sd_lock is gotten to ensure that only one 190 * thread is doing an ioctl at a time. 191 * 192 * Note, for sodirect case 3. is extended to (*sodirect_t.sod_enqueue)() 193 * call-back from below, further the sodirect support is for code paths 194 * called via kstgetmsg(), all other code paths ASSERT() that sodirect 195 * uioa generated mblk_t's (i.e. DBLK_UIOA) aren't processed. 196 */ 197 198 static int 199 push_mod(queue_t *qp, dev_t *devp, struct stdata *stp, const char *name, 200 int anchor, cred_t *crp, uint_t anchor_zoneid) 201 { 202 int error; 203 fmodsw_impl_t *fp; 204 205 if (stp->sd_flag & (STRHUP|STRDERR|STWRERR)) { 206 error = (stp->sd_flag & STRHUP) ? ENXIO : EIO; 207 return (error); 208 } 209 if (stp->sd_pushcnt >= nstrpush) { 210 return (EINVAL); 211 } 212 213 if ((fp = fmodsw_find(name, FMODSW_HOLD | FMODSW_LOAD)) == NULL) { 214 stp->sd_flag |= STREOPENFAIL; 215 return (EINVAL); 216 } 217 218 /* 219 * push new module and call its open routine via qattach 220 */ 221 if ((error = qattach(qp, devp, 0, crp, fp, B_FALSE)) != 0) 222 return (error); 223 224 /* 225 * Check to see if caller wants a STREAMS anchor 226 * put at this place in the stream, and add if so. 227 */ 228 mutex_enter(&stp->sd_lock); 229 if (anchor == stp->sd_pushcnt) { 230 stp->sd_anchor = stp->sd_pushcnt; 231 stp->sd_anchorzone = anchor_zoneid; 232 } 233 mutex_exit(&stp->sd_lock); 234 235 return (0); 236 } 237 238 /* 239 * Open a stream device. 240 */ 241 int 242 stropen(vnode_t *vp, dev_t *devp, int flag, cred_t *crp) 243 { 244 struct stdata *stp; 245 queue_t *qp; 246 int s; 247 dev_t dummydev, savedev; 248 struct autopush *ap; 249 struct dlautopush dlap; 250 int error = 0; 251 ssize_t rmin, rmax; 252 int cloneopen; 253 queue_t *brq; 254 major_t major; 255 str_stack_t *ss; 256 zoneid_t zoneid; 257 uint_t anchor; 258 259 if (audit_active) 260 audit_stropen(vp, devp, flag, crp); 261 262 /* 263 * If the stream already exists, wait for any open in progress 264 * to complete, then call the open function of each module and 265 * driver in the stream. Otherwise create the stream. 266 */ 267 TRACE_1(TR_FAC_STREAMS_FR, TR_STROPEN, "stropen:%p", vp); 268 retry: 269 mutex_enter(&vp->v_lock); 270 if ((stp = vp->v_stream) != NULL) { 271 272 /* 273 * Waiting for stream to be created to device 274 * due to another open. 275 */ 276 mutex_exit(&vp->v_lock); 277 278 if (STRMATED(stp)) { 279 struct stdata *strmatep = stp->sd_mate; 280 281 STRLOCKMATES(stp); 282 if (strmatep->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 283 if (flag & (FNDELAY|FNONBLOCK)) { 284 error = EAGAIN; 285 mutex_exit(&strmatep->sd_lock); 286 goto ckreturn; 287 } 288 mutex_exit(&stp->sd_lock); 289 if (!cv_wait_sig(&strmatep->sd_monitor, 290 &strmatep->sd_lock)) { 291 error = EINTR; 292 mutex_exit(&strmatep->sd_lock); 293 mutex_enter(&stp->sd_lock); 294 goto ckreturn; 295 } 296 mutex_exit(&strmatep->sd_lock); 297 goto retry; 298 } 299 if (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 300 if (flag & (FNDELAY|FNONBLOCK)) { 301 error = EAGAIN; 302 mutex_exit(&strmatep->sd_lock); 303 goto ckreturn; 304 } 305 mutex_exit(&strmatep->sd_lock); 306 if (!cv_wait_sig(&stp->sd_monitor, 307 &stp->sd_lock)) { 308 error = EINTR; 309 goto ckreturn; 310 } 311 mutex_exit(&stp->sd_lock); 312 goto retry; 313 } 314 315 if (stp->sd_flag & (STRDERR|STWRERR)) { 316 error = EIO; 317 mutex_exit(&strmatep->sd_lock); 318 goto ckreturn; 319 } 320 321 stp->sd_flag |= STWOPEN; 322 STRUNLOCKMATES(stp); 323 } else { 324 mutex_enter(&stp->sd_lock); 325 if (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 326 if (flag & (FNDELAY|FNONBLOCK)) { 327 error = EAGAIN; 328 goto ckreturn; 329 } 330 if (!cv_wait_sig(&stp->sd_monitor, 331 &stp->sd_lock)) { 332 error = EINTR; 333 goto ckreturn; 334 } 335 mutex_exit(&stp->sd_lock); 336 goto retry; /* could be clone! */ 337 } 338 339 if (stp->sd_flag & (STRDERR|STWRERR)) { 340 error = EIO; 341 goto ckreturn; 342 } 343 344 stp->sd_flag |= STWOPEN; 345 mutex_exit(&stp->sd_lock); 346 } 347 348 /* 349 * Open all modules and devices down stream to notify 350 * that another user is streaming. For modules, set the 351 * last argument to MODOPEN and do not pass any open flags. 352 * Ignore dummydev since this is not the first open. 353 */ 354 claimstr(stp->sd_wrq); 355 qp = stp->sd_wrq; 356 while (_SAMESTR(qp)) { 357 qp = qp->q_next; 358 if ((error = qreopen(_RD(qp), devp, flag, crp)) != 0) 359 break; 360 } 361 releasestr(stp->sd_wrq); 362 mutex_enter(&stp->sd_lock); 363 stp->sd_flag &= ~(STRHUP|STWOPEN|STRDERR|STWRERR); 364 stp->sd_rerror = 0; 365 stp->sd_werror = 0; 366 ckreturn: 367 cv_broadcast(&stp->sd_monitor); 368 mutex_exit(&stp->sd_lock); 369 return (error); 370 } 371 372 /* 373 * This vnode isn't streaming. SPECFS already 374 * checked for multiple vnodes pointing to the 375 * same stream, so create a stream to the driver. 376 */ 377 qp = allocq(); 378 stp = shalloc(qp); 379 380 /* 381 * Initialize stream head. shalloc() has given us 382 * exclusive access, and we have the vnode locked; 383 * we can do whatever we want with stp. 384 */ 385 stp->sd_flag = STWOPEN; 386 stp->sd_siglist = NULL; 387 stp->sd_pollist.ph_list = NULL; 388 stp->sd_sigflags = 0; 389 stp->sd_mark = NULL; 390 stp->sd_closetime = STRTIMOUT; 391 stp->sd_sidp = NULL; 392 stp->sd_pgidp = NULL; 393 stp->sd_vnode = vp; 394 stp->sd_rerror = 0; 395 stp->sd_werror = 0; 396 stp->sd_wroff = 0; 397 stp->sd_tail = 0; 398 stp->sd_iocblk = NULL; 399 stp->sd_cmdblk = NULL; 400 stp->sd_pushcnt = 0; 401 stp->sd_qn_minpsz = 0; 402 stp->sd_qn_maxpsz = INFPSZ - 1; /* used to check for initialization */ 403 stp->sd_maxblk = INFPSZ; 404 stp->sd_sodirect = NULL; 405 qp->q_ptr = _WR(qp)->q_ptr = stp; 406 STREAM(qp) = STREAM(_WR(qp)) = stp; 407 vp->v_stream = stp; 408 mutex_exit(&vp->v_lock); 409 if (vp->v_type == VFIFO) { 410 stp->sd_flag |= OLDNDELAY; 411 /* 412 * This means, both for pipes and fifos 413 * strwrite will send SIGPIPE if the other 414 * end is closed. For putmsg it depends 415 * on whether it is a XPG4_2 application 416 * or not 417 */ 418 stp->sd_wput_opt = SW_SIGPIPE; 419 420 /* setq might sleep in kmem_alloc - avoid holding locks. */ 421 setq(qp, &fifo_strdata, &fifo_stwdata, NULL, QMTSAFE, 422 SQ_CI|SQ_CO, B_FALSE); 423 424 set_qend(qp); 425 stp->sd_strtab = fifo_getinfo(); 426 _WR(qp)->q_nfsrv = _WR(qp); 427 qp->q_nfsrv = qp; 428 /* 429 * Wake up others that are waiting for stream to be created. 430 */ 431 mutex_enter(&stp->sd_lock); 432 /* 433 * nothing is be pushed on stream yet, so 434 * optimized stream head packetsizes are just that 435 * of the read queue 436 */ 437 stp->sd_qn_minpsz = qp->q_minpsz; 438 stp->sd_qn_maxpsz = qp->q_maxpsz; 439 stp->sd_flag &= ~STWOPEN; 440 goto fifo_opendone; 441 } 442 /* setq might sleep in kmem_alloc - avoid holding locks. */ 443 setq(qp, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, B_FALSE); 444 445 set_qend(qp); 446 447 /* 448 * Open driver and create stream to it (via qattach). 449 */ 450 savedev = *devp; 451 cloneopen = (getmajor(*devp) == clone_major); 452 if ((error = qattach(qp, devp, flag, crp, NULL, B_FALSE)) != 0) { 453 mutex_enter(&vp->v_lock); 454 vp->v_stream = NULL; 455 mutex_exit(&vp->v_lock); 456 mutex_enter(&stp->sd_lock); 457 cv_broadcast(&stp->sd_monitor); 458 mutex_exit(&stp->sd_lock); 459 freeq(_RD(qp)); 460 shfree(stp); 461 return (error); 462 } 463 /* 464 * Set sd_strtab after open in order to handle clonable drivers 465 */ 466 stp->sd_strtab = STREAMSTAB(getmajor(*devp)); 467 468 /* 469 * Historical note: dummydev used to be be prior to the initial 470 * open (via qattach above), which made the value seen 471 * inconsistent between an I_PUSH and an autopush of a module. 472 */ 473 dummydev = *devp; 474 475 /* 476 * For clone open of old style (Q not associated) network driver, 477 * push DRMODNAME module to handle DL_ATTACH/DL_DETACH 478 */ 479 brq = _RD(_WR(qp)->q_next); 480 major = getmajor(*devp); 481 if (push_drcompat && cloneopen && NETWORK_DRV(major) && 482 ((brq->q_flag & _QASSOCIATED) == 0)) { 483 if (push_mod(qp, &dummydev, stp, DRMODNAME, 0, crp, 0) != 0) 484 cmn_err(CE_WARN, "cannot push " DRMODNAME 485 " streams module"); 486 } 487 488 if (!NETWORK_DRV(major)) { 489 savedev = *devp; 490 } else { 491 /* 492 * For network devices, process differently based on the 493 * return value from dld_autopush(): 494 * 495 * 0: the passed-in device points to a GLDv3 datalink with 496 * per-link autopush configuration; use that configuration 497 * and ignore any per-driver autopush configuration. 498 * 499 * 1: the passed-in device points to a physical GLDv3 500 * datalink without per-link autopush configuration. The 501 * passed in device was changed to refer to the actual 502 * physical device (if it's not already); we use that new 503 * device to look up any per-driver autopush configuration. 504 * 505 * -1: neither of the above cases applied; use the initial 506 * device to look up any per-driver autopush configuration. 507 */ 508 switch (dld_autopush(&savedev, &dlap)) { 509 case 0: 510 zoneid = crgetzoneid(crp); 511 for (s = 0; s < dlap.dap_npush; s++) { 512 error = push_mod(qp, &dummydev, stp, 513 dlap.dap_aplist[s], dlap.dap_anchor, crp, 514 zoneid); 515 if (error != 0) 516 break; 517 } 518 goto opendone; 519 case 1: 520 break; 521 case -1: 522 savedev = *devp; 523 break; 524 } 525 } 526 /* 527 * Find the autopush configuration based on "savedev". Start with the 528 * global zone. If not found check in the local zone. 529 */ 530 zoneid = GLOBAL_ZONEID; 531 retryap: 532 ss = netstack_find_by_stackid(zoneid_to_netstackid(zoneid))-> 533 netstack_str; 534 if ((ap = sad_ap_find_by_dev(savedev, ss)) == NULL) { 535 netstack_rele(ss->ss_netstack); 536 if (zoneid == GLOBAL_ZONEID) { 537 /* 538 * None found. Also look in the zone's autopush table. 539 */ 540 zoneid = crgetzoneid(crp); 541 if (zoneid != GLOBAL_ZONEID) 542 goto retryap; 543 } 544 goto opendone; 545 } 546 anchor = ap->ap_anchor; 547 zoneid = crgetzoneid(crp); 548 for (s = 0; s < ap->ap_npush; s++) { 549 error = push_mod(qp, &dummydev, stp, ap->ap_list[s], 550 anchor, crp, zoneid); 551 if (error != 0) 552 break; 553 } 554 sad_ap_rele(ap, ss); 555 netstack_rele(ss->ss_netstack); 556 557 opendone: 558 559 /* 560 * let specfs know that open failed part way through 561 */ 562 if (error) { 563 mutex_enter(&stp->sd_lock); 564 stp->sd_flag |= STREOPENFAIL; 565 mutex_exit(&stp->sd_lock); 566 } 567 568 /* 569 * Wake up others that are waiting for stream to be created. 570 */ 571 mutex_enter(&stp->sd_lock); 572 stp->sd_flag &= ~STWOPEN; 573 574 /* 575 * As a performance concern we are caching the values of 576 * q_minpsz and q_maxpsz of the module below the stream 577 * head in the stream head. 578 */ 579 mutex_enter(QLOCK(stp->sd_wrq->q_next)); 580 rmin = stp->sd_wrq->q_next->q_minpsz; 581 rmax = stp->sd_wrq->q_next->q_maxpsz; 582 mutex_exit(QLOCK(stp->sd_wrq->q_next)); 583 584 /* do this processing here as a performance concern */ 585 if (strmsgsz != 0) { 586 if (rmax == INFPSZ) 587 rmax = strmsgsz; 588 else 589 rmax = MIN(strmsgsz, rmax); 590 } 591 592 mutex_enter(QLOCK(stp->sd_wrq)); 593 stp->sd_qn_minpsz = rmin; 594 stp->sd_qn_maxpsz = rmax; 595 mutex_exit(QLOCK(stp->sd_wrq)); 596 597 fifo_opendone: 598 cv_broadcast(&stp->sd_monitor); 599 mutex_exit(&stp->sd_lock); 600 return (error); 601 } 602 603 static int strsink(queue_t *, mblk_t *); 604 static struct qinit deadrend = { 605 strsink, NULL, NULL, NULL, NULL, &strm_info, NULL 606 }; 607 static struct qinit deadwend = { 608 NULL, NULL, NULL, NULL, NULL, &stwm_info, NULL 609 }; 610 611 /* 612 * Close a stream. 613 * This is called from closef() on the last close of an open stream. 614 * Strclean() will already have removed the siglist and pollist 615 * information, so all that remains is to remove all multiplexor links 616 * for the stream, pop all the modules (and the driver), and free the 617 * stream structure. 618 */ 619 620 int 621 strclose(struct vnode *vp, int flag, cred_t *crp) 622 { 623 struct stdata *stp; 624 queue_t *qp; 625 int rval; 626 int freestp = 1; 627 queue_t *rmq; 628 629 if (audit_active) 630 audit_strclose(vp, flag, crp); 631 632 TRACE_1(TR_FAC_STREAMS_FR, 633 TR_STRCLOSE, "strclose:%p", vp); 634 ASSERT(vp->v_stream); 635 636 stp = vp->v_stream; 637 ASSERT(!(stp->sd_flag & STPLEX)); 638 qp = stp->sd_wrq; 639 640 /* 641 * Needed so that strpoll will return non-zero for this fd. 642 * Note that with POLLNOERR STRHUP does still cause POLLHUP. 643 */ 644 mutex_enter(&stp->sd_lock); 645 stp->sd_flag |= STRHUP; 646 mutex_exit(&stp->sd_lock); 647 648 /* 649 * If the registered process or process group did not have an 650 * open instance of this stream then strclean would not be 651 * called. Thus at the time of closing all remaining siglist entries 652 * are removed. 653 */ 654 if (stp->sd_siglist != NULL) 655 strcleanall(vp); 656 657 ASSERT(stp->sd_siglist == NULL); 658 ASSERT(stp->sd_sigflags == 0); 659 660 if (STRMATED(stp)) { 661 struct stdata *strmatep = stp->sd_mate; 662 int waited = 1; 663 664 STRLOCKMATES(stp); 665 while (waited) { 666 waited = 0; 667 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 668 mutex_exit(&strmatep->sd_lock); 669 cv_wait(&stp->sd_monitor, &stp->sd_lock); 670 mutex_exit(&stp->sd_lock); 671 STRLOCKMATES(stp); 672 waited = 1; 673 } 674 while (strmatep->sd_flag & 675 (STWOPEN|STRCLOSE|STRPLUMB)) { 676 mutex_exit(&stp->sd_lock); 677 cv_wait(&strmatep->sd_monitor, 678 &strmatep->sd_lock); 679 mutex_exit(&strmatep->sd_lock); 680 STRLOCKMATES(stp); 681 waited = 1; 682 } 683 } 684 stp->sd_flag |= STRCLOSE; 685 STRUNLOCKMATES(stp); 686 } else { 687 mutex_enter(&stp->sd_lock); 688 stp->sd_flag |= STRCLOSE; 689 mutex_exit(&stp->sd_lock); 690 } 691 692 ASSERT(qp->q_first == NULL); /* No more delayed write */ 693 694 /* Check if an I_LINK was ever done on this stream */ 695 if (stp->sd_flag & STRHASLINKS) { 696 netstack_t *ns; 697 str_stack_t *ss; 698 699 ns = netstack_find_by_cred(crp); 700 ASSERT(ns != NULL); 701 ss = ns->netstack_str; 702 ASSERT(ss != NULL); 703 704 (void) munlinkall(stp, LINKCLOSE|LINKNORMAL, crp, &rval, ss); 705 netstack_rele(ss->ss_netstack); 706 } 707 708 while (_SAMESTR(qp)) { 709 /* 710 * Holding sd_lock prevents q_next from changing in 711 * this stream. 712 */ 713 mutex_enter(&stp->sd_lock); 714 if (!(flag & (FNDELAY|FNONBLOCK)) && (stp->sd_closetime > 0)) { 715 716 /* 717 * sleep until awakened by strwsrv() or timeout 718 */ 719 for (;;) { 720 mutex_enter(QLOCK(qp->q_next)); 721 if (!(qp->q_next->q_mblkcnt)) { 722 mutex_exit(QLOCK(qp->q_next)); 723 break; 724 } 725 stp->sd_flag |= WSLEEP; 726 727 /* ensure strwsrv gets enabled */ 728 qp->q_next->q_flag |= QWANTW; 729 mutex_exit(QLOCK(qp->q_next)); 730 /* get out if we timed out or recv'd a signal */ 731 if (str_cv_wait(&qp->q_wait, &stp->sd_lock, 732 stp->sd_closetime, 0) <= 0) { 733 break; 734 } 735 } 736 stp->sd_flag &= ~WSLEEP; 737 } 738 mutex_exit(&stp->sd_lock); 739 740 rmq = qp->q_next; 741 if (rmq->q_flag & QISDRV) { 742 ASSERT(!_SAMESTR(rmq)); 743 wait_sq_svc(_RD(qp)->q_syncq); 744 } 745 746 qdetach(_RD(rmq), 1, flag, crp, B_FALSE); 747 } 748 749 /* 750 * Since we call pollwakeup in close() now, the poll list should 751 * be empty in most cases. The only exception is the layered devices 752 * (e.g. the console drivers with redirection modules pushed on top 753 * of it). We have to do this after calling qdetach() because 754 * the redirection module won't have torn down the console 755 * redirection until after qdetach() has been invoked. 756 */ 757 if (stp->sd_pollist.ph_list != NULL) { 758 pollwakeup(&stp->sd_pollist, POLLERR); 759 pollhead_clean(&stp->sd_pollist); 760 } 761 ASSERT(stp->sd_pollist.ph_list == NULL); 762 ASSERT(stp->sd_sidp == NULL); 763 ASSERT(stp->sd_pgidp == NULL); 764 765 /* Prevent qenable from re-enabling the stream head queue */ 766 disable_svc(_RD(qp)); 767 768 /* 769 * Wait until service procedure of each queue is 770 * run, if QINSERVICE is set. 771 */ 772 wait_svc(_RD(qp)); 773 774 /* 775 * Now, flush both queues. 776 */ 777 flushq(_RD(qp), FLUSHALL); 778 flushq(qp, FLUSHALL); 779 780 /* 781 * If the write queue of the stream head is pointing to a 782 * read queue, we have a twisted stream. If the read queue 783 * is alive, convert the stream head queues into a dead end. 784 * If the read queue is dead, free the dead pair. 785 */ 786 if (qp->q_next && !_SAMESTR(qp)) { 787 if (qp->q_next->q_qinfo == &deadrend) { /* half-closed pipe */ 788 flushq(qp->q_next, FLUSHALL); /* ensure no message */ 789 shfree(qp->q_next->q_stream); 790 freeq(qp->q_next); 791 freeq(_RD(qp)); 792 } else if (qp->q_next == _RD(qp)) { /* fifo */ 793 freeq(_RD(qp)); 794 } else { /* pipe */ 795 freestp = 0; 796 /* 797 * The q_info pointers are never accessed when 798 * SQLOCK is held. 799 */ 800 ASSERT(qp->q_syncq == _RD(qp)->q_syncq); 801 mutex_enter(SQLOCK(qp->q_syncq)); 802 qp->q_qinfo = &deadwend; 803 _RD(qp)->q_qinfo = &deadrend; 804 mutex_exit(SQLOCK(qp->q_syncq)); 805 } 806 } else { 807 freeq(_RD(qp)); /* free stream head queue pair */ 808 } 809 810 mutex_enter(&vp->v_lock); 811 if (stp->sd_iocblk) { 812 if (stp->sd_iocblk != (mblk_t *)-1) { 813 freemsg(stp->sd_iocblk); 814 } 815 stp->sd_iocblk = NULL; 816 } 817 stp->sd_vnode = NULL; 818 vp->v_stream = NULL; 819 mutex_exit(&vp->v_lock); 820 mutex_enter(&stp->sd_lock); 821 freemsg(stp->sd_cmdblk); 822 stp->sd_cmdblk = NULL; 823 stp->sd_flag &= ~STRCLOSE; 824 cv_broadcast(&stp->sd_monitor); 825 mutex_exit(&stp->sd_lock); 826 827 if (freestp) 828 shfree(stp); 829 return (0); 830 } 831 832 static int 833 strsink(queue_t *q, mblk_t *bp) 834 { 835 struct copyresp *resp; 836 837 switch (bp->b_datap->db_type) { 838 case M_FLUSH: 839 if ((*bp->b_rptr & FLUSHW) && !(bp->b_flag & MSGNOLOOP)) { 840 *bp->b_rptr &= ~FLUSHR; 841 bp->b_flag |= MSGNOLOOP; 842 /* 843 * Protect against the driver passing up 844 * messages after it has done a qprocsoff. 845 */ 846 if (_OTHERQ(q)->q_next == NULL) 847 freemsg(bp); 848 else 849 qreply(q, bp); 850 } else { 851 freemsg(bp); 852 } 853 break; 854 855 case M_COPYIN: 856 case M_COPYOUT: 857 if (bp->b_cont) { 858 freemsg(bp->b_cont); 859 bp->b_cont = NULL; 860 } 861 bp->b_datap->db_type = M_IOCDATA; 862 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp); 863 resp = (struct copyresp *)bp->b_rptr; 864 resp->cp_rval = (caddr_t)1; /* failure */ 865 /* 866 * Protect against the driver passing up 867 * messages after it has done a qprocsoff. 868 */ 869 if (_OTHERQ(q)->q_next == NULL) 870 freemsg(bp); 871 else 872 qreply(q, bp); 873 break; 874 875 case M_IOCTL: 876 if (bp->b_cont) { 877 freemsg(bp->b_cont); 878 bp->b_cont = NULL; 879 } 880 bp->b_datap->db_type = M_IOCNAK; 881 /* 882 * Protect against the driver passing up 883 * messages after it has done a qprocsoff. 884 */ 885 if (_OTHERQ(q)->q_next == NULL) 886 freemsg(bp); 887 else 888 qreply(q, bp); 889 break; 890 891 default: 892 freemsg(bp); 893 break; 894 } 895 896 return (0); 897 } 898 899 /* 900 * Clean up after a process when it closes a stream. This is called 901 * from closef for all closes, whereas strclose is called only for the 902 * last close on a stream. The siglist is scanned for entries for the 903 * current process, and these are removed. 904 */ 905 void 906 strclean(struct vnode *vp) 907 { 908 strsig_t *ssp, *pssp, *tssp; 909 stdata_t *stp; 910 int update = 0; 911 912 TRACE_1(TR_FAC_STREAMS_FR, 913 TR_STRCLEAN, "strclean:%p", vp); 914 stp = vp->v_stream; 915 pssp = NULL; 916 mutex_enter(&stp->sd_lock); 917 ssp = stp->sd_siglist; 918 while (ssp) { 919 if (ssp->ss_pidp == curproc->p_pidp) { 920 tssp = ssp->ss_next; 921 if (pssp) 922 pssp->ss_next = tssp; 923 else 924 stp->sd_siglist = tssp; 925 mutex_enter(&pidlock); 926 PID_RELE(ssp->ss_pidp); 927 mutex_exit(&pidlock); 928 kmem_free(ssp, sizeof (strsig_t)); 929 update = 1; 930 ssp = tssp; 931 } else { 932 pssp = ssp; 933 ssp = ssp->ss_next; 934 } 935 } 936 if (update) { 937 stp->sd_sigflags = 0; 938 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next) 939 stp->sd_sigflags |= ssp->ss_events; 940 } 941 mutex_exit(&stp->sd_lock); 942 } 943 944 /* 945 * Used on the last close to remove any remaining items on the siglist. 946 * These could be present on the siglist due to I_ESETSIG calls that 947 * use process groups or processed that do not have an open file descriptor 948 * for this stream (Such entries would not be removed by strclean). 949 */ 950 static void 951 strcleanall(struct vnode *vp) 952 { 953 strsig_t *ssp, *nssp; 954 stdata_t *stp; 955 956 stp = vp->v_stream; 957 mutex_enter(&stp->sd_lock); 958 ssp = stp->sd_siglist; 959 stp->sd_siglist = NULL; 960 while (ssp) { 961 nssp = ssp->ss_next; 962 mutex_enter(&pidlock); 963 PID_RELE(ssp->ss_pidp); 964 mutex_exit(&pidlock); 965 kmem_free(ssp, sizeof (strsig_t)); 966 ssp = nssp; 967 } 968 stp->sd_sigflags = 0; 969 mutex_exit(&stp->sd_lock); 970 } 971 972 /* 973 * Retrieve the next message from the logical stream head read queue 974 * using either rwnext (if sync stream) or getq_noenab. 975 * It is the callers responsibility to call qbackenable after 976 * it is finished with the message. The caller should not call 977 * qbackenable until after any putback calls to avoid spurious backenabling. 978 * 979 * Also, handle uioa initialization and process any DBLK_UIOA flaged messages. 980 */ 981 mblk_t * 982 strget(struct stdata *stp, queue_t *q, struct uio *uiop, int first, 983 int *errorp) 984 { 985 sodirect_t *sodp = stp->sd_sodirect; 986 mblk_t *bp; 987 int error; 988 ssize_t rbytes = 0; 989 990 /* Holding sd_lock prevents the read queue from changing */ 991 ASSERT(MUTEX_HELD(&stp->sd_lock)); 992 993 if (uiop != NULL && stp->sd_struiordq != NULL && 994 q->q_first == NULL && 995 (!first || (stp->sd_wakeq & RSLEEP))) { 996 /* 997 * Stream supports rwnext() for the read side. 998 * If this is the first time we're called by e.g. strread 999 * only do the downcall if there is a deferred wakeup 1000 * (registered in sd_wakeq). 1001 */ 1002 struiod_t uiod; 1003 1004 if (first) 1005 stp->sd_wakeq &= ~RSLEEP; 1006 1007 (void) uiodup(uiop, &uiod.d_uio, uiod.d_iov, 1008 sizeof (uiod.d_iov) / sizeof (*uiod.d_iov)); 1009 uiod.d_mp = 0; 1010 /* 1011 * Mark that a thread is in rwnext on the read side 1012 * to prevent strrput from nacking ioctls immediately. 1013 * When the last concurrent rwnext returns 1014 * the ioctls are nack'ed. 1015 */ 1016 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1017 stp->sd_struiodnak++; 1018 /* 1019 * Note: rwnext will drop sd_lock. 1020 */ 1021 error = rwnext(q, &uiod); 1022 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock)); 1023 mutex_enter(&stp->sd_lock); 1024 stp->sd_struiodnak--; 1025 while (stp->sd_struiodnak == 0 && 1026 ((bp = stp->sd_struionak) != NULL)) { 1027 stp->sd_struionak = bp->b_next; 1028 bp->b_next = NULL; 1029 bp->b_datap->db_type = M_IOCNAK; 1030 /* 1031 * Protect against the driver passing up 1032 * messages after it has done a qprocsoff. 1033 */ 1034 if (_OTHERQ(q)->q_next == NULL) 1035 freemsg(bp); 1036 else { 1037 mutex_exit(&stp->sd_lock); 1038 qreply(q, bp); 1039 mutex_enter(&stp->sd_lock); 1040 } 1041 } 1042 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1043 if (error == 0 || error == EWOULDBLOCK) { 1044 if ((bp = uiod.d_mp) != NULL) { 1045 *errorp = 0; 1046 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1047 return (bp); 1048 } 1049 error = 0; 1050 } else if (error == EINVAL) { 1051 /* 1052 * The stream plumbing must have 1053 * changed while we were away, so 1054 * just turn off rwnext()s. 1055 */ 1056 error = 0; 1057 } else if (error == EBUSY) { 1058 /* 1059 * The module might have data in transit using putnext 1060 * Fall back on waiting + getq. 1061 */ 1062 error = 0; 1063 } else { 1064 *errorp = error; 1065 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1066 return (NULL); 1067 } 1068 /* 1069 * Try a getq in case a rwnext() generated mblk 1070 * has bubbled up via strrput(). 1071 */ 1072 } 1073 *errorp = 0; 1074 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1075 1076 if (sodp != NULL && sodp->sod_state & SOD_ENABLED) { 1077 if (sodp->sod_uioa.uioa_state & UIOA_INIT) { 1078 /* 1079 * First kstrgetmsg() call for an uioa_t so if any 1080 * queued mblk_t's need to consume them before uioa 1081 * from below can occur. 1082 */ 1083 sodp->sod_uioa.uioa_state &= UIOA_CLR; 1084 sodp->sod_uioa.uioa_state |= UIOA_ENABLED; 1085 if (q->q_first != NULL) { 1086 struioainit(q, sodp, uiop); 1087 } 1088 } else if (sodp->sod_uioa.uioa_state & 1089 (UIOA_ENABLED|UIOA_FINI)) { 1090 ASSERT(uiop == (uio_t *)&sodp->sod_uioa); 1091 rbytes = 0; 1092 } else { 1093 rbytes = uiop->uio_resid; 1094 } 1095 } else { 1096 /* 1097 * If we have a valid uio, try and use this as a guide for how 1098 * many bytes to retrieve from the queue via getq_noenab(). 1099 * Doing this can avoid unneccesary counting of overlong 1100 * messages in putback(). We currently only do this for sockets 1101 * and only if there is no sd_rputdatafunc hook. 1102 * 1103 * The sd_rputdatafunc hook transforms the entire message 1104 * before any bytes in it can be given to a client. So, rbytes 1105 * must be 0 if there is a hook. 1106 */ 1107 if ((uiop != NULL) && (stp->sd_vnode->v_type == VSOCK) && 1108 (stp->sd_rputdatafunc == NULL)) 1109 rbytes = uiop->uio_resid; 1110 } 1111 1112 bp = getq_noenab(q, rbytes); 1113 if (bp != NULL && (bp->b_datap->db_flags & DBLK_UIOA)) { 1114 /* 1115 * A uioa flaged mblk_t chain, already uio processed, 1116 * add it to the sodirect uioa pending free list. 1117 * 1118 * Note, a b_cont chain headed by a DBLK_UIOA enable 1119 * mblk_t must have all mblk_t(s) DBLK_UIOA enabled. 1120 */ 1121 mblk_t *bpt = sodp->sod_uioaft; 1122 1123 ASSERT(sodp != NULL); 1124 ASSERT(msgdsize(bp) == sodp->sod_uioa.uioa_mbytes); 1125 1126 /* 1127 * Add first mblk_t of "bp" chain to current sodirect uioa 1128 * free list tail mblk_t, if any, else empty list so new head. 1129 */ 1130 if (bpt == NULL) 1131 sodp->sod_uioafh = bp; 1132 else 1133 bpt->b_cont = bp; 1134 1135 /* 1136 * Walk mblk_t "bp" chain to find tail and adjust rptr of 1137 * each to reflect that uioamove() has consumed all data. 1138 */ 1139 bpt = bp; 1140 for (;;) { 1141 bpt->b_rptr = bpt->b_wptr; 1142 if (bpt->b_cont == NULL) 1143 break; 1144 bpt = bpt->b_cont; 1145 1146 ASSERT(bpt->b_datap->db_flags & DBLK_UIOA); 1147 } 1148 /* New sodirect uioa free list tail */ 1149 sodp->sod_uioaft = bpt; 1150 1151 /* Only 1 strget() with data returned per uioa_t */ 1152 if (sodp->sod_uioa.uioa_state & UIOA_ENABLED) { 1153 sodp->sod_uioa.uioa_state &= UIOA_CLR; 1154 sodp->sod_uioa.uioa_state |= UIOA_FINI; 1155 } 1156 } 1157 1158 return (bp); 1159 } 1160 1161 /* 1162 * Copy out the message pointed to by `bp' into the uio pointed to by `uiop'. 1163 * If the message does not fit in the uio the remainder of it is returned; 1164 * otherwise NULL is returned. Any embedded zero-length mblk_t's are 1165 * consumed, even if uio_resid reaches zero. On error, `*errorp' is set to 1166 * the error code, the message is consumed, and NULL is returned. 1167 */ 1168 static mblk_t * 1169 struiocopyout(mblk_t *bp, struct uio *uiop, int *errorp) 1170 { 1171 int error; 1172 ptrdiff_t n; 1173 mblk_t *nbp; 1174 1175 ASSERT(bp->b_wptr >= bp->b_rptr); 1176 1177 do { 1178 ASSERT(!(bp->b_datap->db_flags & DBLK_UIOA)); 1179 1180 if ((n = MIN(uiop->uio_resid, MBLKL(bp))) != 0) { 1181 ASSERT(n > 0); 1182 1183 error = uiomove(bp->b_rptr, n, UIO_READ, uiop); 1184 if (error != 0) { 1185 freemsg(bp); 1186 *errorp = error; 1187 return (NULL); 1188 } 1189 } 1190 1191 bp->b_rptr += n; 1192 while (bp != NULL && (bp->b_rptr >= bp->b_wptr)) { 1193 nbp = bp; 1194 bp = bp->b_cont; 1195 freeb(nbp); 1196 } 1197 } while (bp != NULL && uiop->uio_resid > 0); 1198 1199 *errorp = 0; 1200 return (bp); 1201 } 1202 1203 /* 1204 * Read a stream according to the mode flags in sd_flag: 1205 * 1206 * (default mode) - Byte stream, msg boundaries are ignored 1207 * RD_MSGDIS (msg discard) - Read on msg boundaries and throw away 1208 * any data remaining in msg 1209 * RD_MSGNODIS (msg non-discard) - Read on msg boundaries and put back 1210 * any remaining data on head of read queue 1211 * 1212 * Consume readable messages on the front of the queue until 1213 * ttolwp(curthread)->lwp_count 1214 * is satisfied, the readable messages are exhausted, or a message 1215 * boundary is reached in a message mode. If no data was read and 1216 * the stream was not opened with the NDELAY flag, block until data arrives. 1217 * Otherwise return the data read and update the count. 1218 * 1219 * In default mode a 0 length message signifies end-of-file and terminates 1220 * a read in progress. The 0 length message is removed from the queue 1221 * only if it is the only message read (no data is read). 1222 * 1223 * An attempt to read an M_PROTO or M_PCPROTO message results in an 1224 * EBADMSG error return, unless either RD_PROTDAT or RD_PROTDIS are set. 1225 * If RD_PROTDAT is set, M_PROTO and M_PCPROTO messages are read as data. 1226 * If RD_PROTDIS is set, the M_PROTO and M_PCPROTO parts of the message 1227 * are unlinked from and M_DATA blocks in the message, the protos are 1228 * thrown away, and the data is read. 1229 */ 1230 /* ARGSUSED */ 1231 int 1232 strread(struct vnode *vp, struct uio *uiop, cred_t *crp) 1233 { 1234 struct stdata *stp; 1235 mblk_t *bp, *nbp; 1236 queue_t *q; 1237 int error = 0; 1238 uint_t old_sd_flag; 1239 int first; 1240 char rflg; 1241 uint_t mark; /* Contains MSG*MARK and _LASTMARK */ 1242 #define _LASTMARK 0x8000 /* Distinct from MSG*MARK */ 1243 short delim; 1244 unsigned char pri = 0; 1245 char waitflag; 1246 unsigned char type; 1247 1248 TRACE_1(TR_FAC_STREAMS_FR, 1249 TR_STRREAD_ENTER, "strread:%p", vp); 1250 ASSERT(vp->v_stream); 1251 stp = vp->v_stream; 1252 1253 mutex_enter(&stp->sd_lock); 1254 1255 if ((error = i_straccess(stp, JCREAD)) != 0) { 1256 mutex_exit(&stp->sd_lock); 1257 return (error); 1258 } 1259 1260 if (stp->sd_flag & (STRDERR|STPLEX)) { 1261 error = strgeterr(stp, STRDERR|STPLEX, 0); 1262 if (error != 0) { 1263 mutex_exit(&stp->sd_lock); 1264 return (error); 1265 } 1266 } 1267 1268 /* 1269 * Loop terminates when uiop->uio_resid == 0. 1270 */ 1271 rflg = 0; 1272 waitflag = READWAIT; 1273 q = _RD(stp->sd_wrq); 1274 for (;;) { 1275 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1276 old_sd_flag = stp->sd_flag; 1277 mark = 0; 1278 delim = 0; 1279 first = 1; 1280 while ((bp = strget(stp, q, uiop, first, &error)) == NULL) { 1281 int done = 0; 1282 1283 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1284 1285 if (error != 0) 1286 goto oops; 1287 1288 if (stp->sd_flag & (STRHUP|STREOF)) { 1289 goto oops; 1290 } 1291 if (rflg && !(stp->sd_flag & STRDELIM)) { 1292 goto oops; 1293 } 1294 /* 1295 * If a read(fd,buf,0) has been done, there is no 1296 * need to sleep. We always have zero bytes to 1297 * return. 1298 */ 1299 if (uiop->uio_resid == 0) { 1300 goto oops; 1301 } 1302 1303 qbackenable(q, 0); 1304 1305 TRACE_3(TR_FAC_STREAMS_FR, TR_STRREAD_WAIT, 1306 "strread calls strwaitq:%p, %p, %p", 1307 vp, uiop, crp); 1308 if ((error = strwaitq(stp, waitflag, uiop->uio_resid, 1309 uiop->uio_fmode, -1, &done)) != 0 || done) { 1310 TRACE_3(TR_FAC_STREAMS_FR, TR_STRREAD_DONE, 1311 "strread error or done:%p, %p, %p", 1312 vp, uiop, crp); 1313 if ((uiop->uio_fmode & FNDELAY) && 1314 (stp->sd_flag & OLDNDELAY) && 1315 (error == EAGAIN)) 1316 error = 0; 1317 goto oops; 1318 } 1319 TRACE_3(TR_FAC_STREAMS_FR, TR_STRREAD_AWAKE, 1320 "strread awakes:%p, %p, %p", vp, uiop, crp); 1321 if ((error = i_straccess(stp, JCREAD)) != 0) { 1322 goto oops; 1323 } 1324 first = 0; 1325 } 1326 1327 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1328 ASSERT(bp); 1329 ASSERT(!(bp->b_datap->db_flags & DBLK_UIOA)); 1330 pri = bp->b_band; 1331 /* 1332 * Extract any mark information. If the message is not 1333 * completely consumed this information will be put in the mblk 1334 * that is putback. 1335 * If MSGMARKNEXT is set and the message is completely consumed 1336 * the STRATMARK flag will be set below. Likewise, if 1337 * MSGNOTMARKNEXT is set and the message is 1338 * completely consumed STRNOTATMARK will be set. 1339 * 1340 * For some unknown reason strread only breaks the read at the 1341 * last mark. 1342 */ 1343 mark = bp->b_flag & (MSGMARK | MSGMARKNEXT | MSGNOTMARKNEXT); 1344 ASSERT((mark & (MSGMARKNEXT|MSGNOTMARKNEXT)) != 1345 (MSGMARKNEXT|MSGNOTMARKNEXT)); 1346 if (mark != 0 && bp == stp->sd_mark) { 1347 if (rflg) { 1348 putback(stp, q, bp, pri); 1349 goto oops; 1350 } 1351 mark |= _LASTMARK; 1352 stp->sd_mark = NULL; 1353 } 1354 if ((stp->sd_flag & STRDELIM) && (bp->b_flag & MSGDELIM)) 1355 delim = 1; 1356 mutex_exit(&stp->sd_lock); 1357 1358 if (STREAM_NEEDSERVICE(stp)) 1359 stream_runservice(stp); 1360 1361 type = bp->b_datap->db_type; 1362 1363 switch (type) { 1364 1365 case M_DATA: 1366 ismdata: 1367 if (msgnodata(bp)) { 1368 if (mark || delim) { 1369 freemsg(bp); 1370 } else if (rflg) { 1371 1372 /* 1373 * If already read data put zero 1374 * length message back on queue else 1375 * free msg and return 0. 1376 */ 1377 bp->b_band = pri; 1378 mutex_enter(&stp->sd_lock); 1379 putback(stp, q, bp, pri); 1380 mutex_exit(&stp->sd_lock); 1381 } else { 1382 freemsg(bp); 1383 } 1384 error = 0; 1385 goto oops1; 1386 } 1387 1388 rflg = 1; 1389 waitflag |= NOINTR; 1390 bp = struiocopyout(bp, uiop, &error); 1391 if (error != 0) 1392 goto oops1; 1393 1394 mutex_enter(&stp->sd_lock); 1395 if (bp) { 1396 /* 1397 * Have remaining data in message. 1398 * Free msg if in discard mode. 1399 */ 1400 if (stp->sd_read_opt & RD_MSGDIS) { 1401 freemsg(bp); 1402 } else { 1403 bp->b_band = pri; 1404 if ((mark & _LASTMARK) && 1405 (stp->sd_mark == NULL)) 1406 stp->sd_mark = bp; 1407 bp->b_flag |= mark & ~_LASTMARK; 1408 if (delim) 1409 bp->b_flag |= MSGDELIM; 1410 if (msgnodata(bp)) 1411 freemsg(bp); 1412 else 1413 putback(stp, q, bp, pri); 1414 } 1415 } else { 1416 /* 1417 * Consumed the complete message. 1418 * Move the MSG*MARKNEXT information 1419 * to the stream head just in case 1420 * the read queue becomes empty. 1421 * 1422 * If the stream head was at the mark 1423 * (STRATMARK) before we dropped sd_lock above 1424 * and some data was consumed then we have 1425 * moved past the mark thus STRATMARK is 1426 * cleared. However, if a message arrived in 1427 * strrput during the copyout above causing 1428 * STRATMARK to be set we can not clear that 1429 * flag. 1430 */ 1431 if (mark & 1432 (MSGMARKNEXT|MSGNOTMARKNEXT|MSGMARK)) { 1433 if (mark & MSGMARKNEXT) { 1434 stp->sd_flag &= ~STRNOTATMARK; 1435 stp->sd_flag |= STRATMARK; 1436 } else if (mark & MSGNOTMARKNEXT) { 1437 stp->sd_flag &= ~STRATMARK; 1438 stp->sd_flag |= STRNOTATMARK; 1439 } else { 1440 stp->sd_flag &= 1441 ~(STRATMARK|STRNOTATMARK); 1442 } 1443 } else if (rflg && (old_sd_flag & STRATMARK)) { 1444 stp->sd_flag &= ~STRATMARK; 1445 } 1446 } 1447 1448 /* 1449 * Check for signal messages at the front of the read 1450 * queue and generate the signal(s) if appropriate. 1451 * The only signal that can be on queue is M_SIG at 1452 * this point. 1453 */ 1454 while ((((bp = q->q_first)) != NULL) && 1455 (bp->b_datap->db_type == M_SIG)) { 1456 bp = getq_noenab(q, 0); 1457 /* 1458 * sd_lock is held so the content of the 1459 * read queue can not change. 1460 */ 1461 ASSERT(bp != NULL && 1462 bp->b_datap->db_type == M_SIG); 1463 strsignal_nolock(stp, *bp->b_rptr, 1464 (int32_t)bp->b_band); 1465 mutex_exit(&stp->sd_lock); 1466 freemsg(bp); 1467 if (STREAM_NEEDSERVICE(stp)) 1468 stream_runservice(stp); 1469 mutex_enter(&stp->sd_lock); 1470 } 1471 1472 if ((uiop->uio_resid == 0) || (mark & _LASTMARK) || 1473 delim || 1474 (stp->sd_read_opt & (RD_MSGDIS|RD_MSGNODIS))) { 1475 goto oops; 1476 } 1477 continue; 1478 1479 case M_SIG: 1480 strsignal(stp, *bp->b_rptr, (int32_t)bp->b_band); 1481 freemsg(bp); 1482 mutex_enter(&stp->sd_lock); 1483 continue; 1484 1485 case M_PROTO: 1486 case M_PCPROTO: 1487 /* 1488 * Only data messages are readable. 1489 * Any others generate an error, unless 1490 * RD_PROTDIS or RD_PROTDAT is set. 1491 */ 1492 if (stp->sd_read_opt & RD_PROTDAT) { 1493 for (nbp = bp; nbp; nbp = nbp->b_next) { 1494 if ((nbp->b_datap->db_type == 1495 M_PROTO) || 1496 (nbp->b_datap->db_type == 1497 M_PCPROTO)) { 1498 nbp->b_datap->db_type = M_DATA; 1499 } else { 1500 break; 1501 } 1502 } 1503 /* 1504 * clear stream head hi pri flag based on 1505 * first message 1506 */ 1507 if (type == M_PCPROTO) { 1508 mutex_enter(&stp->sd_lock); 1509 stp->sd_flag &= ~STRPRI; 1510 mutex_exit(&stp->sd_lock); 1511 } 1512 goto ismdata; 1513 } else if (stp->sd_read_opt & RD_PROTDIS) { 1514 /* 1515 * discard non-data messages 1516 */ 1517 while (bp && 1518 ((bp->b_datap->db_type == M_PROTO) || 1519 (bp->b_datap->db_type == M_PCPROTO))) { 1520 nbp = unlinkb(bp); 1521 freeb(bp); 1522 bp = nbp; 1523 } 1524 /* 1525 * clear stream head hi pri flag based on 1526 * first message 1527 */ 1528 if (type == M_PCPROTO) { 1529 mutex_enter(&stp->sd_lock); 1530 stp->sd_flag &= ~STRPRI; 1531 mutex_exit(&stp->sd_lock); 1532 } 1533 if (bp) { 1534 bp->b_band = pri; 1535 goto ismdata; 1536 } else { 1537 break; 1538 } 1539 } 1540 /* FALLTHRU */ 1541 case M_PASSFP: 1542 if ((bp->b_datap->db_type == M_PASSFP) && 1543 (stp->sd_read_opt & RD_PROTDIS)) { 1544 freemsg(bp); 1545 break; 1546 } 1547 mutex_enter(&stp->sd_lock); 1548 putback(stp, q, bp, pri); 1549 mutex_exit(&stp->sd_lock); 1550 if (rflg == 0) 1551 error = EBADMSG; 1552 goto oops1; 1553 1554 default: 1555 /* 1556 * Garbage on stream head read queue. 1557 */ 1558 cmn_err(CE_WARN, "bad %x found at stream head\n", 1559 bp->b_datap->db_type); 1560 freemsg(bp); 1561 goto oops1; 1562 } 1563 mutex_enter(&stp->sd_lock); 1564 } 1565 oops: 1566 mutex_exit(&stp->sd_lock); 1567 oops1: 1568 qbackenable(q, pri); 1569 return (error); 1570 #undef _LASTMARK 1571 } 1572 1573 /* 1574 * Default processing of M_PROTO/M_PCPROTO messages. 1575 * Determine which wakeups and signals are needed. 1576 * This can be replaced by a user-specified procedure for kernel users 1577 * of STREAMS. 1578 */ 1579 /* ARGSUSED */ 1580 mblk_t * 1581 strrput_proto(vnode_t *vp, mblk_t *mp, 1582 strwakeup_t *wakeups, strsigset_t *firstmsgsigs, 1583 strsigset_t *allmsgsigs, strpollset_t *pollwakeups) 1584 { 1585 *wakeups = RSLEEP; 1586 *allmsgsigs = 0; 1587 1588 switch (mp->b_datap->db_type) { 1589 case M_PROTO: 1590 if (mp->b_band == 0) { 1591 *firstmsgsigs = S_INPUT | S_RDNORM; 1592 *pollwakeups = POLLIN | POLLRDNORM; 1593 } else { 1594 *firstmsgsigs = S_INPUT | S_RDBAND; 1595 *pollwakeups = POLLIN | POLLRDBAND; 1596 } 1597 break; 1598 case M_PCPROTO: 1599 *firstmsgsigs = S_HIPRI; 1600 *pollwakeups = POLLPRI; 1601 break; 1602 } 1603 return (mp); 1604 } 1605 1606 /* 1607 * Default processing of everything but M_DATA, M_PROTO, M_PCPROTO and 1608 * M_PASSFP messages. 1609 * Determine which wakeups and signals are needed. 1610 * This can be replaced by a user-specified procedure for kernel users 1611 * of STREAMS. 1612 */ 1613 /* ARGSUSED */ 1614 mblk_t * 1615 strrput_misc(vnode_t *vp, mblk_t *mp, 1616 strwakeup_t *wakeups, strsigset_t *firstmsgsigs, 1617 strsigset_t *allmsgsigs, strpollset_t *pollwakeups) 1618 { 1619 *wakeups = 0; 1620 *firstmsgsigs = 0; 1621 *allmsgsigs = 0; 1622 *pollwakeups = 0; 1623 return (mp); 1624 } 1625 1626 /* 1627 * Stream read put procedure. Called from downstream driver/module 1628 * with messages for the stream head. Data, protocol, and in-stream 1629 * signal messages are placed on the queue, others are handled directly. 1630 */ 1631 int 1632 strrput(queue_t *q, mblk_t *bp) 1633 { 1634 struct stdata *stp; 1635 ulong_t rput_opt; 1636 strwakeup_t wakeups; 1637 strsigset_t firstmsgsigs; /* Signals if first message on queue */ 1638 strsigset_t allmsgsigs; /* Signals for all messages */ 1639 strsigset_t signals; /* Signals events to generate */ 1640 strpollset_t pollwakeups; 1641 mblk_t *nextbp; 1642 uchar_t band = 0; 1643 int hipri_sig; 1644 1645 stp = (struct stdata *)q->q_ptr; 1646 /* 1647 * Use rput_opt for optimized access to the SR_ flags except 1648 * SR_POLLIN. That flag has to be checked under sd_lock since it 1649 * is modified by strpoll(). 1650 */ 1651 rput_opt = stp->sd_rput_opt; 1652 1653 ASSERT(qclaimed(q)); 1654 TRACE_2(TR_FAC_STREAMS_FR, TR_STRRPUT_ENTER, 1655 "strrput called with message type:q %p bp %p", q, bp); 1656 1657 /* 1658 * Perform initial processing and pass to the parameterized functions. 1659 */ 1660 ASSERT(bp->b_next == NULL); 1661 1662 switch (bp->b_datap->db_type) { 1663 case M_DATA: 1664 /* 1665 * sockfs is the only consumer of STREOF and when it is set, 1666 * it implies that the receiver is not interested in receiving 1667 * any more data, hence the mblk is freed to prevent unnecessary 1668 * message queueing at the stream head. 1669 */ 1670 if (stp->sd_flag == STREOF) { 1671 freemsg(bp); 1672 return (0); 1673 } 1674 if ((rput_opt & SR_IGN_ZEROLEN) && 1675 bp->b_rptr == bp->b_wptr && msgnodata(bp)) { 1676 /* 1677 * Ignore zero-length M_DATA messages. These might be 1678 * generated by some transports. 1679 * The zero-length M_DATA messages, even if they 1680 * are ignored, should effect the atmark tracking and 1681 * should wake up a thread sleeping in strwaitmark. 1682 */ 1683 mutex_enter(&stp->sd_lock); 1684 if (bp->b_flag & MSGMARKNEXT) { 1685 /* 1686 * Record the position of the mark either 1687 * in q_last or in STRATMARK. 1688 */ 1689 if (q->q_last != NULL) { 1690 q->q_last->b_flag &= ~MSGNOTMARKNEXT; 1691 q->q_last->b_flag |= MSGMARKNEXT; 1692 } else { 1693 stp->sd_flag &= ~STRNOTATMARK; 1694 stp->sd_flag |= STRATMARK; 1695 } 1696 } else if (bp->b_flag & MSGNOTMARKNEXT) { 1697 /* 1698 * Record that this is not the position of 1699 * the mark either in q_last or in 1700 * STRNOTATMARK. 1701 */ 1702 if (q->q_last != NULL) { 1703 q->q_last->b_flag &= ~MSGMARKNEXT; 1704 q->q_last->b_flag |= MSGNOTMARKNEXT; 1705 } else { 1706 stp->sd_flag &= ~STRATMARK; 1707 stp->sd_flag |= STRNOTATMARK; 1708 } 1709 } 1710 if (stp->sd_flag & RSLEEP) { 1711 stp->sd_flag &= ~RSLEEP; 1712 cv_broadcast(&q->q_wait); 1713 } 1714 mutex_exit(&stp->sd_lock); 1715 freemsg(bp); 1716 return (0); 1717 } 1718 wakeups = RSLEEP; 1719 if (bp->b_band == 0) { 1720 firstmsgsigs = S_INPUT | S_RDNORM; 1721 pollwakeups = POLLIN | POLLRDNORM; 1722 } else { 1723 firstmsgsigs = S_INPUT | S_RDBAND; 1724 pollwakeups = POLLIN | POLLRDBAND; 1725 } 1726 if (rput_opt & SR_SIGALLDATA) 1727 allmsgsigs = firstmsgsigs; 1728 else 1729 allmsgsigs = 0; 1730 1731 mutex_enter(&stp->sd_lock); 1732 if ((rput_opt & SR_CONSOL_DATA) && 1733 (q->q_last != NULL) && 1734 (bp->b_flag & (MSGMARK|MSGDELIM)) == 0) { 1735 /* 1736 * Consolidate an M_DATA message onto an M_DATA, 1737 * M_PROTO, or M_PCPROTO by merging it with q_last. 1738 * The consolidation does not take place if 1739 * the old message is marked with either of the 1740 * marks or the delim flag or if the new 1741 * message is marked with MSGMARK. The MSGMARK 1742 * check is needed to handle the odd semantics of 1743 * MSGMARK where essentially the whole message 1744 * is to be treated as marked. 1745 * Carry any MSGMARKNEXT and MSGNOTMARKNEXT from the 1746 * new message to the front of the b_cont chain. 1747 */ 1748 mblk_t *lbp = q->q_last; 1749 unsigned char db_type = lbp->b_datap->db_type; 1750 1751 if ((db_type == M_DATA || db_type == M_PROTO || 1752 db_type == M_PCPROTO) && 1753 !(lbp->b_flag & (MSGDELIM|MSGMARK|MSGMARKNEXT))) { 1754 rmvq_noenab(q, lbp); 1755 /* 1756 * The first message in the b_cont list 1757 * tracks MSGMARKNEXT and MSGNOTMARKNEXT. 1758 * We need to handle the case where we 1759 * are appending: 1760 * 1761 * 1) a MSGMARKNEXT to a MSGNOTMARKNEXT. 1762 * 2) a MSGMARKNEXT to a plain message. 1763 * 3) a MSGNOTMARKNEXT to a plain message 1764 * 4) a MSGNOTMARKNEXT to a MSGNOTMARKNEXT 1765 * message. 1766 * 1767 * Thus we never append a MSGMARKNEXT or 1768 * MSGNOTMARKNEXT to a MSGMARKNEXT message. 1769 */ 1770 if (bp->b_flag & MSGMARKNEXT) { 1771 lbp->b_flag |= MSGMARKNEXT; 1772 lbp->b_flag &= ~MSGNOTMARKNEXT; 1773 bp->b_flag &= ~MSGMARKNEXT; 1774 } else if (bp->b_flag & MSGNOTMARKNEXT) { 1775 lbp->b_flag |= MSGNOTMARKNEXT; 1776 bp->b_flag &= ~MSGNOTMARKNEXT; 1777 } 1778 1779 linkb(lbp, bp); 1780 bp = lbp; 1781 /* 1782 * The new message logically isn't the first 1783 * even though the q_first check below thinks 1784 * it is. Clear the firstmsgsigs to make it 1785 * not appear to be first. 1786 */ 1787 firstmsgsigs = 0; 1788 } 1789 } 1790 break; 1791 1792 case M_PASSFP: 1793 wakeups = RSLEEP; 1794 allmsgsigs = 0; 1795 if (bp->b_band == 0) { 1796 firstmsgsigs = S_INPUT | S_RDNORM; 1797 pollwakeups = POLLIN | POLLRDNORM; 1798 } else { 1799 firstmsgsigs = S_INPUT | S_RDBAND; 1800 pollwakeups = POLLIN | POLLRDBAND; 1801 } 1802 mutex_enter(&stp->sd_lock); 1803 break; 1804 1805 case M_PROTO: 1806 case M_PCPROTO: 1807 ASSERT(stp->sd_rprotofunc != NULL); 1808 bp = (stp->sd_rprotofunc)(stp->sd_vnode, bp, 1809 &wakeups, &firstmsgsigs, &allmsgsigs, &pollwakeups); 1810 #define ALLSIG (S_INPUT|S_HIPRI|S_OUTPUT|S_MSG|S_ERROR|S_HANGUP|S_RDNORM|\ 1811 S_WRNORM|S_RDBAND|S_WRBAND|S_BANDURG) 1812 #define ALLPOLL (POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLWRNORM|POLLRDBAND|\ 1813 POLLWRBAND) 1814 1815 ASSERT((wakeups & ~(RSLEEP|WSLEEP)) == 0); 1816 ASSERT((firstmsgsigs & ~ALLSIG) == 0); 1817 ASSERT((allmsgsigs & ~ALLSIG) == 0); 1818 ASSERT((pollwakeups & ~ALLPOLL) == 0); 1819 1820 mutex_enter(&stp->sd_lock); 1821 break; 1822 1823 default: 1824 ASSERT(stp->sd_rmiscfunc != NULL); 1825 bp = (stp->sd_rmiscfunc)(stp->sd_vnode, bp, 1826 &wakeups, &firstmsgsigs, &allmsgsigs, &pollwakeups); 1827 ASSERT((wakeups & ~(RSLEEP|WSLEEP)) == 0); 1828 ASSERT((firstmsgsigs & ~ALLSIG) == 0); 1829 ASSERT((allmsgsigs & ~ALLSIG) == 0); 1830 ASSERT((pollwakeups & ~ALLPOLL) == 0); 1831 #undef ALLSIG 1832 #undef ALLPOLL 1833 mutex_enter(&stp->sd_lock); 1834 break; 1835 } 1836 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1837 1838 /* By default generate superset of signals */ 1839 signals = (firstmsgsigs | allmsgsigs); 1840 1841 /* 1842 * The proto and misc functions can return multiple messages 1843 * as a b_next chain. Such messages are processed separately. 1844 */ 1845 one_more: 1846 hipri_sig = 0; 1847 if (bp == NULL) { 1848 nextbp = NULL; 1849 } else { 1850 nextbp = bp->b_next; 1851 bp->b_next = NULL; 1852 1853 switch (bp->b_datap->db_type) { 1854 case M_PCPROTO: 1855 /* 1856 * Only one priority protocol message is allowed at the 1857 * stream head at a time. 1858 */ 1859 if (stp->sd_flag & STRPRI) { 1860 TRACE_0(TR_FAC_STREAMS_FR, TR_STRRPUT_PROTERR, 1861 "M_PCPROTO already at head"); 1862 freemsg(bp); 1863 mutex_exit(&stp->sd_lock); 1864 goto done; 1865 } 1866 stp->sd_flag |= STRPRI; 1867 hipri_sig = 1; 1868 /* FALLTHRU */ 1869 case M_DATA: 1870 case M_PROTO: 1871 case M_PASSFP: 1872 band = bp->b_band; 1873 /* 1874 * Marking doesn't work well when messages 1875 * are marked in more than one band. We only 1876 * remember the last message received, even if 1877 * it is placed on the queue ahead of other 1878 * marked messages. 1879 */ 1880 if (bp->b_flag & MSGMARK) 1881 stp->sd_mark = bp; 1882 (void) putq(q, bp); 1883 1884 /* 1885 * If message is a PCPROTO message, always use 1886 * firstmsgsigs to determine if a signal should be 1887 * sent as strrput is the only place to send 1888 * signals for PCPROTO. Other messages are based on 1889 * the STRGETINPROG flag. The flag determines if 1890 * strrput or (k)strgetmsg will be responsible for 1891 * sending the signals, in the firstmsgsigs case. 1892 */ 1893 if ((hipri_sig == 1) || 1894 (((stp->sd_flag & STRGETINPROG) == 0) && 1895 (q->q_first == bp))) 1896 signals = (firstmsgsigs | allmsgsigs); 1897 else 1898 signals = allmsgsigs; 1899 break; 1900 1901 default: 1902 mutex_exit(&stp->sd_lock); 1903 (void) strrput_nondata(q, bp); 1904 mutex_enter(&stp->sd_lock); 1905 break; 1906 } 1907 } 1908 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1909 /* 1910 * Wake sleeping read/getmsg and cancel deferred wakeup 1911 */ 1912 if (wakeups & RSLEEP) 1913 stp->sd_wakeq &= ~RSLEEP; 1914 1915 wakeups &= stp->sd_flag; 1916 if (wakeups & RSLEEP) { 1917 stp->sd_flag &= ~RSLEEP; 1918 cv_broadcast(&q->q_wait); 1919 } 1920 if (wakeups & WSLEEP) { 1921 stp->sd_flag &= ~WSLEEP; 1922 cv_broadcast(&_WR(q)->q_wait); 1923 } 1924 1925 if (pollwakeups != 0) { 1926 if (pollwakeups == (POLLIN | POLLRDNORM)) { 1927 /* 1928 * Can't use rput_opt since it was not 1929 * read when sd_lock was held and SR_POLLIN is changed 1930 * by strpoll() under sd_lock. 1931 */ 1932 if (!(stp->sd_rput_opt & SR_POLLIN)) 1933 goto no_pollwake; 1934 stp->sd_rput_opt &= ~SR_POLLIN; 1935 } 1936 mutex_exit(&stp->sd_lock); 1937 pollwakeup(&stp->sd_pollist, pollwakeups); 1938 mutex_enter(&stp->sd_lock); 1939 } 1940 no_pollwake: 1941 1942 /* 1943 * strsendsig can handle multiple signals with a 1944 * single call. 1945 */ 1946 if (stp->sd_sigflags & signals) 1947 strsendsig(stp->sd_siglist, signals, band, 0); 1948 mutex_exit(&stp->sd_lock); 1949 1950 1951 done: 1952 if (nextbp == NULL) 1953 return (0); 1954 1955 /* 1956 * Any signals were handled the first time. 1957 * Wakeups and pollwakeups are redone to avoid any race 1958 * conditions - all the messages are not queued until the 1959 * last message has been processed by strrput. 1960 */ 1961 bp = nextbp; 1962 signals = firstmsgsigs = allmsgsigs = 0; 1963 mutex_enter(&stp->sd_lock); 1964 goto one_more; 1965 } 1966 1967 static void 1968 log_dupioc(queue_t *rq, mblk_t *bp) 1969 { 1970 queue_t *wq, *qp; 1971 char *modnames, *mnp, *dname; 1972 size_t maxmodstr; 1973 boolean_t islast; 1974 1975 /* 1976 * Allocate a buffer large enough to hold the names of nstrpush modules 1977 * and one driver, with spaces between and NUL terminator. If we can't 1978 * get memory, then we'll just log the driver name. 1979 */ 1980 maxmodstr = nstrpush * (FMNAMESZ + 1); 1981 mnp = modnames = kmem_alloc(maxmodstr, KM_NOSLEEP); 1982 1983 /* march down write side to print log message down to the driver */ 1984 wq = WR(rq); 1985 1986 /* make sure q_next doesn't shift around while we're grabbing data */ 1987 claimstr(wq); 1988 qp = wq->q_next; 1989 do { 1990 if ((dname = qp->q_qinfo->qi_minfo->mi_idname) == NULL) 1991 dname = "?"; 1992 islast = !SAMESTR(qp) || qp->q_next == NULL; 1993 if (modnames == NULL) { 1994 /* 1995 * If we don't have memory, then get the driver name in 1996 * the log where we can see it. Note that memory 1997 * pressure is a possible cause of these sorts of bugs. 1998 */ 1999 if (islast) { 2000 modnames = dname; 2001 maxmodstr = 0; 2002 } 2003 } else { 2004 mnp += snprintf(mnp, FMNAMESZ + 1, "%s", dname); 2005 if (!islast) 2006 *mnp++ = ' '; 2007 } 2008 qp = qp->q_next; 2009 } while (!islast); 2010 releasestr(wq); 2011 /* Cannot happen unless stream head is corrupt. */ 2012 ASSERT(modnames != NULL); 2013 (void) strlog(rq->q_qinfo->qi_minfo->mi_idnum, 0, 1, 2014 SL_CONSOLE|SL_TRACE|SL_ERROR, 2015 "Warning: stream %p received duplicate %X M_IOC%s; module list: %s", 2016 rq->q_ptr, ((struct iocblk *)bp->b_rptr)->ioc_cmd, 2017 (DB_TYPE(bp) == M_IOCACK ? "ACK" : "NAK"), modnames); 2018 if (maxmodstr != 0) 2019 kmem_free(modnames, maxmodstr); 2020 } 2021 2022 int 2023 strrput_nondata(queue_t *q, mblk_t *bp) 2024 { 2025 struct stdata *stp; 2026 struct iocblk *iocbp; 2027 struct stroptions *sop; 2028 struct copyreq *reqp; 2029 struct copyresp *resp; 2030 unsigned char bpri; 2031 unsigned char flushed_already = 0; 2032 2033 stp = (struct stdata *)q->q_ptr; 2034 2035 ASSERT(!(stp->sd_flag & STPLEX)); 2036 ASSERT(qclaimed(q)); 2037 2038 switch (bp->b_datap->db_type) { 2039 case M_ERROR: 2040 /* 2041 * An error has occurred downstream, the errno is in the first 2042 * bytes of the message. 2043 */ 2044 if ((bp->b_wptr - bp->b_rptr) == 2) { /* New flavor */ 2045 unsigned char rw = 0; 2046 2047 mutex_enter(&stp->sd_lock); 2048 if (*bp->b_rptr != NOERROR) { /* read error */ 2049 if (*bp->b_rptr != 0) { 2050 if (stp->sd_flag & STRDERR) 2051 flushed_already |= FLUSHR; 2052 stp->sd_flag |= STRDERR; 2053 rw |= FLUSHR; 2054 } else { 2055 stp->sd_flag &= ~STRDERR; 2056 } 2057 stp->sd_rerror = *bp->b_rptr; 2058 } 2059 bp->b_rptr++; 2060 if (*bp->b_rptr != NOERROR) { /* write error */ 2061 if (*bp->b_rptr != 0) { 2062 if (stp->sd_flag & STWRERR) 2063 flushed_already |= FLUSHW; 2064 stp->sd_flag |= STWRERR; 2065 rw |= FLUSHW; 2066 } else { 2067 stp->sd_flag &= ~STWRERR; 2068 } 2069 stp->sd_werror = *bp->b_rptr; 2070 } 2071 if (rw) { 2072 TRACE_2(TR_FAC_STREAMS_FR, TR_STRRPUT_WAKE, 2073 "strrput cv_broadcast:q %p, bp %p", 2074 q, bp); 2075 cv_broadcast(&q->q_wait); /* readers */ 2076 cv_broadcast(&_WR(q)->q_wait); /* writers */ 2077 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 2078 2079 mutex_exit(&stp->sd_lock); 2080 pollwakeup(&stp->sd_pollist, POLLERR); 2081 mutex_enter(&stp->sd_lock); 2082 2083 if (stp->sd_sigflags & S_ERROR) 2084 strsendsig(stp->sd_siglist, S_ERROR, 0, 2085 ((rw & FLUSHR) ? stp->sd_rerror : 2086 stp->sd_werror)); 2087 mutex_exit(&stp->sd_lock); 2088 /* 2089 * Send the M_FLUSH only 2090 * for the first M_ERROR 2091 * message on the stream 2092 */ 2093 if (flushed_already == rw) { 2094 freemsg(bp); 2095 return (0); 2096 } 2097 2098 bp->b_datap->db_type = M_FLUSH; 2099 *bp->b_rptr = rw; 2100 bp->b_wptr = bp->b_rptr + 1; 2101 /* 2102 * Protect against the driver 2103 * passing up messages after 2104 * it has done a qprocsoff 2105 */ 2106 if (_OTHERQ(q)->q_next == NULL) 2107 freemsg(bp); 2108 else 2109 qreply(q, bp); 2110 return (0); 2111 } else 2112 mutex_exit(&stp->sd_lock); 2113 } else if (*bp->b_rptr != 0) { /* Old flavor */ 2114 if (stp->sd_flag & (STRDERR|STWRERR)) 2115 flushed_already = FLUSHRW; 2116 mutex_enter(&stp->sd_lock); 2117 stp->sd_flag |= (STRDERR|STWRERR); 2118 stp->sd_rerror = *bp->b_rptr; 2119 stp->sd_werror = *bp->b_rptr; 2120 TRACE_2(TR_FAC_STREAMS_FR, 2121 TR_STRRPUT_WAKE2, 2122 "strrput wakeup #2:q %p, bp %p", q, bp); 2123 cv_broadcast(&q->q_wait); /* the readers */ 2124 cv_broadcast(&_WR(q)->q_wait); /* the writers */ 2125 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 2126 2127 mutex_exit(&stp->sd_lock); 2128 pollwakeup(&stp->sd_pollist, POLLERR); 2129 mutex_enter(&stp->sd_lock); 2130 2131 if (stp->sd_sigflags & S_ERROR) 2132 strsendsig(stp->sd_siglist, S_ERROR, 0, 2133 (stp->sd_werror ? stp->sd_werror : 2134 stp->sd_rerror)); 2135 mutex_exit(&stp->sd_lock); 2136 2137 /* 2138 * Send the M_FLUSH only 2139 * for the first M_ERROR 2140 * message on the stream 2141 */ 2142 if (flushed_already != FLUSHRW) { 2143 bp->b_datap->db_type = M_FLUSH; 2144 *bp->b_rptr = FLUSHRW; 2145 /* 2146 * Protect against the driver passing up 2147 * messages after it has done a 2148 * qprocsoff. 2149 */ 2150 if (_OTHERQ(q)->q_next == NULL) 2151 freemsg(bp); 2152 else 2153 qreply(q, bp); 2154 return (0); 2155 } 2156 } 2157 freemsg(bp); 2158 return (0); 2159 2160 case M_HANGUP: 2161 2162 freemsg(bp); 2163 mutex_enter(&stp->sd_lock); 2164 stp->sd_werror = ENXIO; 2165 stp->sd_flag |= STRHUP; 2166 stp->sd_flag &= ~(WSLEEP|RSLEEP); 2167 2168 /* 2169 * send signal if controlling tty 2170 */ 2171 2172 if (stp->sd_sidp) { 2173 prsignal(stp->sd_sidp, SIGHUP); 2174 if (stp->sd_sidp != stp->sd_pgidp) 2175 pgsignal(stp->sd_pgidp, SIGTSTP); 2176 } 2177 2178 /* 2179 * wake up read, write, and exception pollers and 2180 * reset wakeup mechanism. 2181 */ 2182 cv_broadcast(&q->q_wait); /* the readers */ 2183 cv_broadcast(&_WR(q)->q_wait); /* the writers */ 2184 cv_broadcast(&stp->sd_monitor); /* the ioctllers */ 2185 strhup(stp); 2186 mutex_exit(&stp->sd_lock); 2187 return (0); 2188 2189 case M_UNHANGUP: 2190 freemsg(bp); 2191 mutex_enter(&stp->sd_lock); 2192 stp->sd_werror = 0; 2193 stp->sd_flag &= ~STRHUP; 2194 mutex_exit(&stp->sd_lock); 2195 return (0); 2196 2197 case M_SIG: 2198 /* 2199 * Someone downstream wants to post a signal. The 2200 * signal to post is contained in the first byte of the 2201 * message. If the message would go on the front of 2202 * the queue, send a signal to the process group 2203 * (if not SIGPOLL) or to the siglist processes 2204 * (SIGPOLL). If something is already on the queue, 2205 * OR if we are delivering a delayed suspend (*sigh* 2206 * another "tty" hack) and there's no one sleeping already, 2207 * just enqueue the message. 2208 */ 2209 mutex_enter(&stp->sd_lock); 2210 if (q->q_first || (*bp->b_rptr == SIGTSTP && 2211 !(stp->sd_flag & RSLEEP))) { 2212 (void) putq(q, bp); 2213 mutex_exit(&stp->sd_lock); 2214 return (0); 2215 } 2216 mutex_exit(&stp->sd_lock); 2217 /* FALLTHRU */ 2218 2219 case M_PCSIG: 2220 /* 2221 * Don't enqueue, just post the signal. 2222 */ 2223 strsignal(stp, *bp->b_rptr, 0L); 2224 freemsg(bp); 2225 return (0); 2226 2227 case M_CMD: 2228 if (MBLKL(bp) != sizeof (cmdblk_t)) { 2229 freemsg(bp); 2230 return (0); 2231 } 2232 2233 mutex_enter(&stp->sd_lock); 2234 if (stp->sd_flag & STRCMDWAIT) { 2235 ASSERT(stp->sd_cmdblk == NULL); 2236 stp->sd_cmdblk = bp; 2237 cv_broadcast(&stp->sd_monitor); 2238 mutex_exit(&stp->sd_lock); 2239 } else { 2240 mutex_exit(&stp->sd_lock); 2241 freemsg(bp); 2242 } 2243 return (0); 2244 2245 case M_FLUSH: 2246 /* 2247 * Flush queues. The indication of which queues to flush 2248 * is in the first byte of the message. If the read queue 2249 * is specified, then flush it. If FLUSHBAND is set, just 2250 * flush the band specified by the second byte of the message. 2251 * 2252 * If a module has issued a M_SETOPT to not flush hi 2253 * priority messages off of the stream head, then pass this 2254 * flag into the flushq code to preserve such messages. 2255 */ 2256 2257 if (*bp->b_rptr & FLUSHR) { 2258 mutex_enter(&stp->sd_lock); 2259 if (*bp->b_rptr & FLUSHBAND) { 2260 ASSERT((bp->b_wptr - bp->b_rptr) >= 2); 2261 flushband(q, *(bp->b_rptr + 1), FLUSHALL); 2262 } else 2263 flushq_common(q, FLUSHALL, 2264 stp->sd_read_opt & RFLUSHPCPROT); 2265 if ((q->q_first == NULL) || 2266 (q->q_first->b_datap->db_type < QPCTL)) 2267 stp->sd_flag &= ~STRPRI; 2268 else { 2269 ASSERT(stp->sd_flag & STRPRI); 2270 } 2271 mutex_exit(&stp->sd_lock); 2272 } 2273 if ((*bp->b_rptr & FLUSHW) && !(bp->b_flag & MSGNOLOOP)) { 2274 *bp->b_rptr &= ~FLUSHR; 2275 bp->b_flag |= MSGNOLOOP; 2276 /* 2277 * Protect against the driver passing up 2278 * messages after it has done a qprocsoff. 2279 */ 2280 if (_OTHERQ(q)->q_next == NULL) 2281 freemsg(bp); 2282 else 2283 qreply(q, bp); 2284 return (0); 2285 } 2286 freemsg(bp); 2287 return (0); 2288 2289 case M_IOCACK: 2290 case M_IOCNAK: 2291 iocbp = (struct iocblk *)bp->b_rptr; 2292 /* 2293 * If not waiting for ACK or NAK then just free msg. 2294 * If incorrect id sequence number then just free msg. 2295 * If already have ACK or NAK for user then this is a 2296 * duplicate, display a warning and free the msg. 2297 */ 2298 mutex_enter(&stp->sd_lock); 2299 if ((stp->sd_flag & IOCWAIT) == 0 || stp->sd_iocblk || 2300 (stp->sd_iocid != iocbp->ioc_id)) { 2301 /* 2302 * If the ACK/NAK is a dup, display a message 2303 * Dup is when sd_iocid == ioc_id, and 2304 * sd_iocblk == <valid ptr> or -1 (the former 2305 * is when an ioctl has been put on the stream 2306 * head, but has not yet been consumed, the 2307 * later is when it has been consumed). 2308 */ 2309 if ((stp->sd_iocid == iocbp->ioc_id) && 2310 (stp->sd_iocblk != NULL)) { 2311 log_dupioc(q, bp); 2312 } 2313 freemsg(bp); 2314 mutex_exit(&stp->sd_lock); 2315 return (0); 2316 } 2317 2318 /* 2319 * Assign ACK or NAK to user and wake up. 2320 */ 2321 stp->sd_iocblk = bp; 2322 cv_broadcast(&stp->sd_monitor); 2323 mutex_exit(&stp->sd_lock); 2324 return (0); 2325 2326 case M_COPYIN: 2327 case M_COPYOUT: 2328 reqp = (struct copyreq *)bp->b_rptr; 2329 2330 /* 2331 * If not waiting for ACK or NAK then just fail request. 2332 * If already have ACK, NAK, or copy request, then just 2333 * fail request. 2334 * If incorrect id sequence number then just fail request. 2335 */ 2336 mutex_enter(&stp->sd_lock); 2337 if ((stp->sd_flag & IOCWAIT) == 0 || stp->sd_iocblk || 2338 (stp->sd_iocid != reqp->cq_id)) { 2339 if (bp->b_cont) { 2340 freemsg(bp->b_cont); 2341 bp->b_cont = NULL; 2342 } 2343 bp->b_datap->db_type = M_IOCDATA; 2344 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp); 2345 resp = (struct copyresp *)bp->b_rptr; 2346 resp->cp_rval = (caddr_t)1; /* failure */ 2347 mutex_exit(&stp->sd_lock); 2348 putnext(stp->sd_wrq, bp); 2349 return (0); 2350 } 2351 2352 /* 2353 * Assign copy request to user and wake up. 2354 */ 2355 stp->sd_iocblk = bp; 2356 cv_broadcast(&stp->sd_monitor); 2357 mutex_exit(&stp->sd_lock); 2358 return (0); 2359 2360 case M_SETOPTS: 2361 /* 2362 * Set stream head options (read option, write offset, 2363 * min/max packet size, and/or high/low water marks for 2364 * the read side only). 2365 */ 2366 2367 bpri = 0; 2368 sop = (struct stroptions *)bp->b_rptr; 2369 mutex_enter(&stp->sd_lock); 2370 if (sop->so_flags & SO_READOPT) { 2371 switch (sop->so_readopt & RMODEMASK) { 2372 case RNORM: 2373 stp->sd_read_opt &= ~(RD_MSGDIS | RD_MSGNODIS); 2374 break; 2375 2376 case RMSGD: 2377 stp->sd_read_opt = 2378 ((stp->sd_read_opt & ~RD_MSGNODIS) | 2379 RD_MSGDIS); 2380 break; 2381 2382 case RMSGN: 2383 stp->sd_read_opt = 2384 ((stp->sd_read_opt & ~RD_MSGDIS) | 2385 RD_MSGNODIS); 2386 break; 2387 } 2388 switch (sop->so_readopt & RPROTMASK) { 2389 case RPROTNORM: 2390 stp->sd_read_opt &= ~(RD_PROTDAT | RD_PROTDIS); 2391 break; 2392 2393 case RPROTDAT: 2394 stp->sd_read_opt = 2395 ((stp->sd_read_opt & ~RD_PROTDIS) | 2396 RD_PROTDAT); 2397 break; 2398 2399 case RPROTDIS: 2400 stp->sd_read_opt = 2401 ((stp->sd_read_opt & ~RD_PROTDAT) | 2402 RD_PROTDIS); 2403 break; 2404 } 2405 switch (sop->so_readopt & RFLUSHMASK) { 2406 case RFLUSHPCPROT: 2407 /* 2408 * This sets the stream head to NOT flush 2409 * M_PCPROTO messages. 2410 */ 2411 stp->sd_read_opt |= RFLUSHPCPROT; 2412 break; 2413 } 2414 } 2415 if (sop->so_flags & SO_ERROPT) { 2416 switch (sop->so_erropt & RERRMASK) { 2417 case RERRNORM: 2418 stp->sd_flag &= ~STRDERRNONPERSIST; 2419 break; 2420 case RERRNONPERSIST: 2421 stp->sd_flag |= STRDERRNONPERSIST; 2422 break; 2423 } 2424 switch (sop->so_erropt & WERRMASK) { 2425 case WERRNORM: 2426 stp->sd_flag &= ~STWRERRNONPERSIST; 2427 break; 2428 case WERRNONPERSIST: 2429 stp->sd_flag |= STWRERRNONPERSIST; 2430 break; 2431 } 2432 } 2433 if (sop->so_flags & SO_COPYOPT) { 2434 if (sop->so_copyopt & ZCVMSAFE) { 2435 stp->sd_copyflag |= STZCVMSAFE; 2436 stp->sd_copyflag &= ~STZCVMUNSAFE; 2437 } else if (sop->so_copyopt & ZCVMUNSAFE) { 2438 stp->sd_copyflag |= STZCVMUNSAFE; 2439 stp->sd_copyflag &= ~STZCVMSAFE; 2440 } 2441 2442 if (sop->so_copyopt & COPYCACHED) { 2443 stp->sd_copyflag |= STRCOPYCACHED; 2444 } 2445 } 2446 if (sop->so_flags & SO_WROFF) 2447 stp->sd_wroff = sop->so_wroff; 2448 if (sop->so_flags & SO_TAIL) 2449 stp->sd_tail = sop->so_tail; 2450 if (sop->so_flags & SO_MINPSZ) 2451 q->q_minpsz = sop->so_minpsz; 2452 if (sop->so_flags & SO_MAXPSZ) 2453 q->q_maxpsz = sop->so_maxpsz; 2454 if (sop->so_flags & SO_MAXBLK) 2455 stp->sd_maxblk = sop->so_maxblk; 2456 if (sop->so_flags & SO_HIWAT) { 2457 if (sop->so_flags & SO_BAND) { 2458 if (strqset(q, QHIWAT, 2459 sop->so_band, sop->so_hiwat)) { 2460 cmn_err(CE_WARN, "strrput: could not " 2461 "allocate qband\n"); 2462 } else { 2463 bpri = sop->so_band; 2464 } 2465 } else { 2466 q->q_hiwat = sop->so_hiwat; 2467 } 2468 } 2469 if (sop->so_flags & SO_LOWAT) { 2470 if (sop->so_flags & SO_BAND) { 2471 if (strqset(q, QLOWAT, 2472 sop->so_band, sop->so_lowat)) { 2473 cmn_err(CE_WARN, "strrput: could not " 2474 "allocate qband\n"); 2475 } else { 2476 bpri = sop->so_band; 2477 } 2478 } else { 2479 q->q_lowat = sop->so_lowat; 2480 } 2481 } 2482 if (sop->so_flags & SO_MREADON) 2483 stp->sd_flag |= SNDMREAD; 2484 if (sop->so_flags & SO_MREADOFF) 2485 stp->sd_flag &= ~SNDMREAD; 2486 if (sop->so_flags & SO_NDELON) 2487 stp->sd_flag |= OLDNDELAY; 2488 if (sop->so_flags & SO_NDELOFF) 2489 stp->sd_flag &= ~OLDNDELAY; 2490 if (sop->so_flags & SO_ISTTY) 2491 stp->sd_flag |= STRISTTY; 2492 if (sop->so_flags & SO_ISNTTY) 2493 stp->sd_flag &= ~STRISTTY; 2494 if (sop->so_flags & SO_TOSTOP) 2495 stp->sd_flag |= STRTOSTOP; 2496 if (sop->so_flags & SO_TONSTOP) 2497 stp->sd_flag &= ~STRTOSTOP; 2498 if (sop->so_flags & SO_DELIM) 2499 stp->sd_flag |= STRDELIM; 2500 if (sop->so_flags & SO_NODELIM) 2501 stp->sd_flag &= ~STRDELIM; 2502 2503 mutex_exit(&stp->sd_lock); 2504 freemsg(bp); 2505 2506 /* Check backenable in case the water marks changed */ 2507 qbackenable(q, bpri); 2508 return (0); 2509 2510 /* 2511 * The following set of cases deal with situations where two stream 2512 * heads are connected to each other (twisted streams). These messages 2513 * have no meaning at the stream head. 2514 */ 2515 case M_BREAK: 2516 case M_CTL: 2517 case M_DELAY: 2518 case M_START: 2519 case M_STOP: 2520 case M_IOCDATA: 2521 case M_STARTI: 2522 case M_STOPI: 2523 freemsg(bp); 2524 return (0); 2525 2526 case M_IOCTL: 2527 /* 2528 * Always NAK this condition 2529 * (makes no sense) 2530 * If there is one or more threads in the read side 2531 * rwnext we have to defer the nacking until that thread 2532 * returns (in strget). 2533 */ 2534 mutex_enter(&stp->sd_lock); 2535 if (stp->sd_struiodnak != 0) { 2536 /* 2537 * Defer NAK to the streamhead. Queue at the end 2538 * the list. 2539 */ 2540 mblk_t *mp = stp->sd_struionak; 2541 2542 while (mp && mp->b_next) 2543 mp = mp->b_next; 2544 if (mp) 2545 mp->b_next = bp; 2546 else 2547 stp->sd_struionak = bp; 2548 bp->b_next = NULL; 2549 mutex_exit(&stp->sd_lock); 2550 return (0); 2551 } 2552 mutex_exit(&stp->sd_lock); 2553 2554 bp->b_datap->db_type = M_IOCNAK; 2555 /* 2556 * Protect against the driver passing up 2557 * messages after it has done a qprocsoff. 2558 */ 2559 if (_OTHERQ(q)->q_next == NULL) 2560 freemsg(bp); 2561 else 2562 qreply(q, bp); 2563 return (0); 2564 2565 default: 2566 #ifdef DEBUG 2567 cmn_err(CE_WARN, 2568 "bad message type %x received at stream head\n", 2569 bp->b_datap->db_type); 2570 #endif 2571 freemsg(bp); 2572 return (0); 2573 } 2574 2575 /* NOTREACHED */ 2576 } 2577 2578 /* 2579 * Check if the stream pointed to by `stp' can be written to, and return an 2580 * error code if not. If `eiohup' is set, then return EIO if STRHUP is set. 2581 * If `sigpipeok' is set and the SW_SIGPIPE option is enabled on the stream, 2582 * then always return EPIPE and send a SIGPIPE to the invoking thread. 2583 */ 2584 static int 2585 strwriteable(struct stdata *stp, boolean_t eiohup, boolean_t sigpipeok) 2586 { 2587 int error; 2588 2589 ASSERT(MUTEX_HELD(&stp->sd_lock)); 2590 2591 /* 2592 * For modem support, POSIX states that on writes, EIO should 2593 * be returned if the stream has been hung up. 2594 */ 2595 if (eiohup && (stp->sd_flag & (STPLEX|STRHUP)) == STRHUP) 2596 error = EIO; 2597 else 2598 error = strgeterr(stp, STRHUP|STPLEX|STWRERR, 0); 2599 2600 if (error != 0) { 2601 if (!(stp->sd_flag & STPLEX) && 2602 (stp->sd_wput_opt & SW_SIGPIPE) && sigpipeok) { 2603 tsignal(curthread, SIGPIPE); 2604 error = EPIPE; 2605 } 2606 } 2607 2608 return (error); 2609 } 2610 2611 /* 2612 * Copyin and send data down a stream. 2613 * The caller will allocate and copyin any control part that precedes the 2614 * message and pass than in as mctl. 2615 * 2616 * Caller should *not* hold sd_lock. 2617 * When EWOULDBLOCK is returned the caller has to redo the canputnext 2618 * under sd_lock in order to avoid missing a backenabling wakeup. 2619 * 2620 * Use iosize = -1 to not send any M_DATA. iosize = 0 sends zero-length M_DATA. 2621 * 2622 * Set MSG_IGNFLOW in flags to ignore flow control for hipri messages. 2623 * For sync streams we can only ignore flow control by reverting to using 2624 * putnext. 2625 * 2626 * If sd_maxblk is less than *iosize this routine might return without 2627 * transferring all of *iosize. In all cases, on return *iosize will contain 2628 * the amount of data that was transferred. 2629 */ 2630 static int 2631 strput(struct stdata *stp, mblk_t *mctl, struct uio *uiop, ssize_t *iosize, 2632 int b_flag, int pri, int flags) 2633 { 2634 struiod_t uiod; 2635 mblk_t *mp; 2636 queue_t *wqp = stp->sd_wrq; 2637 int error = 0; 2638 ssize_t count = *iosize; 2639 cred_t *cr; 2640 2641 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock)); 2642 2643 if (uiop != NULL && count >= 0) 2644 flags |= stp->sd_struiowrq ? STRUIO_POSTPONE : 0; 2645 2646 if (!(flags & STRUIO_POSTPONE)) { 2647 /* 2648 * Use regular canputnext, strmakedata, putnext sequence. 2649 */ 2650 if (pri == 0) { 2651 if (!canputnext(wqp) && !(flags & MSG_IGNFLOW)) { 2652 freemsg(mctl); 2653 return (EWOULDBLOCK); 2654 } 2655 } else { 2656 if (!(flags & MSG_IGNFLOW) && !bcanputnext(wqp, pri)) { 2657 freemsg(mctl); 2658 return (EWOULDBLOCK); 2659 } 2660 } 2661 2662 if ((error = strmakedata(iosize, uiop, stp, flags, 2663 &mp)) != 0) { 2664 freemsg(mctl); 2665 /* 2666 * need to change return code to ENOMEM 2667 * so that this is not confused with 2668 * flow control, EAGAIN. 2669 */ 2670 2671 if (error == EAGAIN) 2672 return (ENOMEM); 2673 else 2674 return (error); 2675 } 2676 if (mctl != NULL) { 2677 if (mctl->b_cont == NULL) 2678 mctl->b_cont = mp; 2679 else if (mp != NULL) 2680 linkb(mctl, mp); 2681 mp = mctl; 2682 /* 2683 * Note that for interrupt thread, the CRED() is 2684 * NULL. Don't bother with the pid either. 2685 */ 2686 if ((cr = CRED()) != NULL) { 2687 mblk_setcred(mp, cr); 2688 DB_CPID(mp) = curproc->p_pid; 2689 } 2690 } else if (mp == NULL) 2691 return (0); 2692 2693 mp->b_flag |= b_flag; 2694 mp->b_band = (uchar_t)pri; 2695 2696 if (flags & MSG_IGNFLOW) { 2697 /* 2698 * XXX Hack: Don't get stuck running service 2699 * procedures. This is needed for sockfs when 2700 * sending the unbind message out of the rput 2701 * procedure - we don't want a put procedure 2702 * to run service procedures. 2703 */ 2704 putnext(wqp, mp); 2705 } else { 2706 stream_willservice(stp); 2707 putnext(wqp, mp); 2708 stream_runservice(stp); 2709 } 2710 return (0); 2711 } 2712 /* 2713 * Stream supports rwnext() for the write side. 2714 */ 2715 if ((error = strmakedata(iosize, uiop, stp, flags, &mp)) != 0) { 2716 freemsg(mctl); 2717 /* 2718 * map EAGAIN to ENOMEM since EAGAIN means "flow controlled". 2719 */ 2720 return (error == EAGAIN ? ENOMEM : error); 2721 } 2722 if (mctl != NULL) { 2723 if (mctl->b_cont == NULL) 2724 mctl->b_cont = mp; 2725 else if (mp != NULL) 2726 linkb(mctl, mp); 2727 mp = mctl; 2728 /* 2729 * Note that for interrupt thread, the CRED() is 2730 * NULL. Don't bother with the pid either. 2731 */ 2732 if ((cr = CRED()) != NULL) { 2733 mblk_setcred(mp, cr); 2734 DB_CPID(mp) = curproc->p_pid; 2735 } 2736 } else if (mp == NULL) { 2737 return (0); 2738 } 2739 2740 mp->b_flag |= b_flag; 2741 mp->b_band = (uchar_t)pri; 2742 2743 (void) uiodup(uiop, &uiod.d_uio, uiod.d_iov, 2744 sizeof (uiod.d_iov) / sizeof (*uiod.d_iov)); 2745 uiod.d_uio.uio_offset = 0; 2746 uiod.d_mp = mp; 2747 error = rwnext(wqp, &uiod); 2748 if (! uiod.d_mp) { 2749 uioskip(uiop, *iosize); 2750 return (error); 2751 } 2752 ASSERT(mp == uiod.d_mp); 2753 if (error == EINVAL) { 2754 /* 2755 * The stream plumbing must have changed while 2756 * we were away, so just turn off rwnext()s. 2757 */ 2758 error = 0; 2759 } else if (error == EBUSY || error == EWOULDBLOCK) { 2760 /* 2761 * Couldn't enter a perimeter or took a page fault, 2762 * so fall-back to putnext(). 2763 */ 2764 error = 0; 2765 } else { 2766 freemsg(mp); 2767 return (error); 2768 } 2769 /* Have to check canput before consuming data from the uio */ 2770 if (pri == 0) { 2771 if (!canputnext(wqp) && !(flags & MSG_IGNFLOW)) { 2772 freemsg(mp); 2773 return (EWOULDBLOCK); 2774 } 2775 } else { 2776 if (!bcanputnext(wqp, pri) && !(flags & MSG_IGNFLOW)) { 2777 freemsg(mp); 2778 return (EWOULDBLOCK); 2779 } 2780 } 2781 ASSERT(mp == uiod.d_mp); 2782 /* Copyin data from the uio */ 2783 if ((error = struioget(wqp, mp, &uiod, 0)) != 0) { 2784 freemsg(mp); 2785 return (error); 2786 } 2787 uioskip(uiop, *iosize); 2788 if (flags & MSG_IGNFLOW) { 2789 /* 2790 * XXX Hack: Don't get stuck running service procedures. 2791 * This is needed for sockfs when sending the unbind message 2792 * out of the rput procedure - we don't want a put procedure 2793 * to run service procedures. 2794 */ 2795 putnext(wqp, mp); 2796 } else { 2797 stream_willservice(stp); 2798 putnext(wqp, mp); 2799 stream_runservice(stp); 2800 } 2801 return (0); 2802 } 2803 2804 /* 2805 * Write attempts to break the write request into messages conforming 2806 * with the minimum and maximum packet sizes set downstream. 2807 * 2808 * Write will not block if downstream queue is full and 2809 * O_NDELAY is set, otherwise it will block waiting for the queue to get room. 2810 * 2811 * A write of zero bytes gets packaged into a zero length message and sent 2812 * downstream like any other message. 2813 * 2814 * If buffers of the requested sizes are not available, the write will 2815 * sleep until the buffers become available. 2816 * 2817 * Write (if specified) will supply a write offset in a message if it 2818 * makes sense. This can be specified by downstream modules as part of 2819 * a M_SETOPTS message. Write will not supply the write offset if it 2820 * cannot supply any data in a buffer. In other words, write will never 2821 * send down an empty packet due to a write offset. 2822 */ 2823 /* ARGSUSED2 */ 2824 int 2825 strwrite(struct vnode *vp, struct uio *uiop, cred_t *crp) 2826 { 2827 return (strwrite_common(vp, uiop, crp, 0)); 2828 } 2829 2830 /* ARGSUSED2 */ 2831 int 2832 strwrite_common(struct vnode *vp, struct uio *uiop, cred_t *crp, int wflag) 2833 { 2834 struct stdata *stp; 2835 struct queue *wqp; 2836 ssize_t rmin, rmax; 2837 ssize_t iosize; 2838 int waitflag; 2839 int tempmode; 2840 int error = 0; 2841 int b_flag; 2842 2843 ASSERT(vp->v_stream); 2844 stp = vp->v_stream; 2845 2846 mutex_enter(&stp->sd_lock); 2847 2848 if ((error = i_straccess(stp, JCWRITE)) != 0) { 2849 mutex_exit(&stp->sd_lock); 2850 return (error); 2851 } 2852 2853 if (stp->sd_flag & (STWRERR|STRHUP|STPLEX)) { 2854 error = strwriteable(stp, B_TRUE, B_TRUE); 2855 if (error != 0) { 2856 mutex_exit(&stp->sd_lock); 2857 return (error); 2858 } 2859 } 2860 2861 mutex_exit(&stp->sd_lock); 2862 2863 wqp = stp->sd_wrq; 2864 2865 /* get these values from them cached in the stream head */ 2866 rmin = stp->sd_qn_minpsz; 2867 rmax = stp->sd_qn_maxpsz; 2868 2869 /* 2870 * Check the min/max packet size constraints. If min packet size 2871 * is non-zero, the write cannot be split into multiple messages 2872 * and still guarantee the size constraints. 2873 */ 2874 TRACE_1(TR_FAC_STREAMS_FR, TR_STRWRITE_IN, "strwrite in:q %p", wqp); 2875 2876 ASSERT((rmax >= 0) || (rmax == INFPSZ)); 2877 if (rmax == 0) { 2878 return (0); 2879 } 2880 if (rmin > 0) { 2881 if (uiop->uio_resid < rmin) { 2882 TRACE_3(TR_FAC_STREAMS_FR, TR_STRWRITE_OUT, 2883 "strwrite out:q %p out %d error %d", 2884 wqp, 0, ERANGE); 2885 return (ERANGE); 2886 } 2887 if ((rmax != INFPSZ) && (uiop->uio_resid > rmax)) { 2888 TRACE_3(TR_FAC_STREAMS_FR, TR_STRWRITE_OUT, 2889 "strwrite out:q %p out %d error %d", 2890 wqp, 1, ERANGE); 2891 return (ERANGE); 2892 } 2893 } 2894 2895 /* 2896 * Do until count satisfied or error. 2897 */ 2898 waitflag = WRITEWAIT | wflag; 2899 if (stp->sd_flag & OLDNDELAY) 2900 tempmode = uiop->uio_fmode & ~FNDELAY; 2901 else 2902 tempmode = uiop->uio_fmode; 2903 2904 if (rmax == INFPSZ) 2905 rmax = uiop->uio_resid; 2906 2907 /* 2908 * Note that tempmode does not get used in strput/strmakedata 2909 * but only in strwaitq. The other routines use uio_fmode 2910 * unmodified. 2911 */ 2912 2913 /* LINTED: constant in conditional context */ 2914 while (1) { /* breaks when uio_resid reaches zero */ 2915 /* 2916 * Determine the size of the next message to be 2917 * packaged. May have to break write into several 2918 * messages based on max packet size. 2919 */ 2920 iosize = MIN(uiop->uio_resid, rmax); 2921 2922 /* 2923 * Put block downstream when flow control allows it. 2924 */ 2925 if ((stp->sd_flag & STRDELIM) && (uiop->uio_resid == iosize)) 2926 b_flag = MSGDELIM; 2927 else 2928 b_flag = 0; 2929 2930 for (;;) { 2931 int done = 0; 2932 2933 error = strput(stp, NULL, uiop, &iosize, b_flag, 0, 0); 2934 if (error == 0) 2935 break; 2936 if (error != EWOULDBLOCK) 2937 goto out; 2938 2939 mutex_enter(&stp->sd_lock); 2940 /* 2941 * Check for a missed wakeup. 2942 * Needed since strput did not hold sd_lock across 2943 * the canputnext. 2944 */ 2945 if (canputnext(wqp)) { 2946 /* Try again */ 2947 mutex_exit(&stp->sd_lock); 2948 continue; 2949 } 2950 TRACE_1(TR_FAC_STREAMS_FR, TR_STRWRITE_WAIT, 2951 "strwrite wait:q %p wait", wqp); 2952 if ((error = strwaitq(stp, waitflag, (ssize_t)0, 2953 tempmode, -1, &done)) != 0 || done) { 2954 mutex_exit(&stp->sd_lock); 2955 if ((vp->v_type == VFIFO) && 2956 (uiop->uio_fmode & FNDELAY) && 2957 (error == EAGAIN)) 2958 error = 0; 2959 goto out; 2960 } 2961 TRACE_1(TR_FAC_STREAMS_FR, TR_STRWRITE_WAKE, 2962 "strwrite wake:q %p awakes", wqp); 2963 if ((error = i_straccess(stp, JCWRITE)) != 0) { 2964 mutex_exit(&stp->sd_lock); 2965 goto out; 2966 } 2967 mutex_exit(&stp->sd_lock); 2968 } 2969 waitflag |= NOINTR; 2970 TRACE_2(TR_FAC_STREAMS_FR, TR_STRWRITE_RESID, 2971 "strwrite resid:q %p uiop %p", wqp, uiop); 2972 if (uiop->uio_resid) { 2973 /* Recheck for errors - needed for sockets */ 2974 if ((stp->sd_wput_opt & SW_RECHECK_ERR) && 2975 (stp->sd_flag & (STWRERR|STRHUP|STPLEX))) { 2976 mutex_enter(&stp->sd_lock); 2977 error = strwriteable(stp, B_FALSE, B_TRUE); 2978 mutex_exit(&stp->sd_lock); 2979 if (error != 0) 2980 return (error); 2981 } 2982 continue; 2983 } 2984 break; 2985 } 2986 out: 2987 /* 2988 * For historical reasons, applications expect EAGAIN when a data 2989 * mblk_t cannot be allocated, so change ENOMEM back to EAGAIN. 2990 */ 2991 if (error == ENOMEM) 2992 error = EAGAIN; 2993 TRACE_3(TR_FAC_STREAMS_FR, TR_STRWRITE_OUT, 2994 "strwrite out:q %p out %d error %d", wqp, 2, error); 2995 return (error); 2996 } 2997 2998 /* 2999 * Stream head write service routine. 3000 * Its job is to wake up any sleeping writers when a queue 3001 * downstream needs data (part of the flow control in putq and getq). 3002 * It also must wake anyone sleeping on a poll(). 3003 * For stream head right below mux module, it must also invoke put procedure 3004 * of next downstream module. 3005 */ 3006 int 3007 strwsrv(queue_t *q) 3008 { 3009 struct stdata *stp; 3010 queue_t *tq; 3011 qband_t *qbp; 3012 int i; 3013 qband_t *myqbp; 3014 int isevent; 3015 unsigned char qbf[NBAND]; /* band flushing backenable flags */ 3016 3017 TRACE_1(TR_FAC_STREAMS_FR, 3018 TR_STRWSRV, "strwsrv:q %p", q); 3019 stp = (struct stdata *)q->q_ptr; 3020 ASSERT(qclaimed(q)); 3021 mutex_enter(&stp->sd_lock); 3022 ASSERT(!(stp->sd_flag & STPLEX)); 3023 3024 if (stp->sd_flag & WSLEEP) { 3025 stp->sd_flag &= ~WSLEEP; 3026 cv_broadcast(&q->q_wait); 3027 } 3028 mutex_exit(&stp->sd_lock); 3029 3030 /* The other end of a stream pipe went away. */ 3031 if ((tq = q->q_next) == NULL) { 3032 return (0); 3033 } 3034 3035 /* Find the next module forward that has a service procedure */ 3036 claimstr(q); 3037 tq = q->q_nfsrv; 3038 ASSERT(tq != NULL); 3039 3040 if ((q->q_flag & QBACK)) { 3041 if ((tq->q_flag & QFULL)) { 3042 mutex_enter(QLOCK(tq)); 3043 if (!(tq->q_flag & QFULL)) { 3044 mutex_exit(QLOCK(tq)); 3045 goto wakeup; 3046 } 3047 /* 3048 * The queue must have become full again. Set QWANTW 3049 * again so strwsrv will be back enabled when 3050 * the queue becomes non-full next time. 3051 */ 3052 tq->q_flag |= QWANTW; 3053 mutex_exit(QLOCK(tq)); 3054 } else { 3055 wakeup: 3056 pollwakeup(&stp->sd_pollist, POLLWRNORM); 3057 mutex_enter(&stp->sd_lock); 3058 if (stp->sd_sigflags & S_WRNORM) 3059 strsendsig(stp->sd_siglist, S_WRNORM, 0, 0); 3060 mutex_exit(&stp->sd_lock); 3061 } 3062 } 3063 3064 isevent = 0; 3065 i = 1; 3066 bzero((caddr_t)qbf, NBAND); 3067 mutex_enter(QLOCK(tq)); 3068 if ((myqbp = q->q_bandp) != NULL) 3069 for (qbp = tq->q_bandp; qbp && myqbp; qbp = qbp->qb_next) { 3070 ASSERT(myqbp); 3071 if ((myqbp->qb_flag & QB_BACK)) { 3072 if (qbp->qb_flag & QB_FULL) { 3073 /* 3074 * The band must have become full again. 3075 * Set QB_WANTW again so strwsrv will 3076 * be back enabled when the band becomes 3077 * non-full next time. 3078 */ 3079 qbp->qb_flag |= QB_WANTW; 3080 } else { 3081 isevent = 1; 3082 qbf[i] = 1; 3083 } 3084 } 3085 myqbp = myqbp->qb_next; 3086 i++; 3087 } 3088 mutex_exit(QLOCK(tq)); 3089 3090 if (isevent) { 3091 for (i = tq->q_nband; i; i--) { 3092 if (qbf[i]) { 3093 pollwakeup(&stp->sd_pollist, POLLWRBAND); 3094 mutex_enter(&stp->sd_lock); 3095 if (stp->sd_sigflags & S_WRBAND) 3096 strsendsig(stp->sd_siglist, S_WRBAND, 3097 (uchar_t)i, 0); 3098 mutex_exit(&stp->sd_lock); 3099 } 3100 } 3101 } 3102 3103 releasestr(q); 3104 return (0); 3105 } 3106 3107 /* 3108 * Special case of strcopyin/strcopyout for copying 3109 * struct strioctl that can deal with both data 3110 * models. 3111 */ 3112 3113 #ifdef _LP64 3114 3115 static int 3116 strcopyin_strioctl(void *from, void *to, int flag, int copyflag) 3117 { 3118 struct strioctl32 strioc32; 3119 struct strioctl *striocp; 3120 3121 if (copyflag & U_TO_K) { 3122 ASSERT((copyflag & K_TO_K) == 0); 3123 3124 if ((flag & FMODELS) == DATAMODEL_ILP32) { 3125 if (copyin(from, &strioc32, sizeof (strioc32))) 3126 return (EFAULT); 3127 3128 striocp = (struct strioctl *)to; 3129 striocp->ic_cmd = strioc32.ic_cmd; 3130 striocp->ic_timout = strioc32.ic_timout; 3131 striocp->ic_len = strioc32.ic_len; 3132 striocp->ic_dp = (char *)(uintptr_t)strioc32.ic_dp; 3133 3134 } else { /* NATIVE data model */ 3135 if (copyin(from, to, sizeof (struct strioctl))) { 3136 return (EFAULT); 3137 } else { 3138 return (0); 3139 } 3140 } 3141 } else { 3142 ASSERT(copyflag & K_TO_K); 3143 bcopy(from, to, sizeof (struct strioctl)); 3144 } 3145 return (0); 3146 } 3147 3148 static int 3149 strcopyout_strioctl(void *from, void *to, int flag, int copyflag) 3150 { 3151 struct strioctl32 strioc32; 3152 struct strioctl *striocp; 3153 3154 if (copyflag & U_TO_K) { 3155 ASSERT((copyflag & K_TO_K) == 0); 3156 3157 if ((flag & FMODELS) == DATAMODEL_ILP32) { 3158 striocp = (struct strioctl *)from; 3159 strioc32.ic_cmd = striocp->ic_cmd; 3160 strioc32.ic_timout = striocp->ic_timout; 3161 strioc32.ic_len = striocp->ic_len; 3162 strioc32.ic_dp = (caddr32_t)(uintptr_t)striocp->ic_dp; 3163 ASSERT((char *)(uintptr_t)strioc32.ic_dp == 3164 striocp->ic_dp); 3165 3166 if (copyout(&strioc32, to, sizeof (strioc32))) 3167 return (EFAULT); 3168 3169 } else { /* NATIVE data model */ 3170 if (copyout(from, to, sizeof (struct strioctl))) { 3171 return (EFAULT); 3172 } else { 3173 return (0); 3174 } 3175 } 3176 } else { 3177 ASSERT(copyflag & K_TO_K); 3178 bcopy(from, to, sizeof (struct strioctl)); 3179 } 3180 return (0); 3181 } 3182 3183 #else /* ! _LP64 */ 3184 3185 /* ARGSUSED2 */ 3186 static int 3187 strcopyin_strioctl(void *from, void *to, int flag, int copyflag) 3188 { 3189 return (strcopyin(from, to, sizeof (struct strioctl), copyflag)); 3190 } 3191 3192 /* ARGSUSED2 */ 3193 static int 3194 strcopyout_strioctl(void *from, void *to, int flag, int copyflag) 3195 { 3196 return (strcopyout(from, to, sizeof (struct strioctl), copyflag)); 3197 } 3198 3199 #endif /* _LP64 */ 3200 3201 /* 3202 * Determine type of job control semantics expected by user. The 3203 * possibilities are: 3204 * JCREAD - Behaves like read() on fd; send SIGTTIN 3205 * JCWRITE - Behaves like write() on fd; send SIGTTOU if TOSTOP set 3206 * JCSETP - Sets a value in the stream; send SIGTTOU, ignore TOSTOP 3207 * JCGETP - Gets a value in the stream; no signals. 3208 * See straccess in strsubr.c for usage of these values. 3209 * 3210 * This routine also returns -1 for I_STR as a special case; the 3211 * caller must call again with the real ioctl number for 3212 * classification. 3213 */ 3214 static int 3215 job_control_type(int cmd) 3216 { 3217 switch (cmd) { 3218 case I_STR: 3219 return (-1); 3220 3221 case I_RECVFD: 3222 case I_E_RECVFD: 3223 return (JCREAD); 3224 3225 case I_FDINSERT: 3226 case I_SENDFD: 3227 return (JCWRITE); 3228 3229 case TCSETA: 3230 case TCSETAW: 3231 case TCSETAF: 3232 case TCSBRK: 3233 case TCXONC: 3234 case TCFLSH: 3235 case TCDSET: /* Obsolete */ 3236 case TIOCSWINSZ: 3237 case TCSETS: 3238 case TCSETSW: 3239 case TCSETSF: 3240 case TIOCSETD: 3241 case TIOCHPCL: 3242 case TIOCSETP: 3243 case TIOCSETN: 3244 case TIOCEXCL: 3245 case TIOCNXCL: 3246 case TIOCFLUSH: 3247 case TIOCSETC: 3248 case TIOCLBIS: 3249 case TIOCLBIC: 3250 case TIOCLSET: 3251 case TIOCSBRK: 3252 case TIOCCBRK: 3253 case TIOCSDTR: 3254 case TIOCCDTR: 3255 case TIOCSLTC: 3256 case TIOCSTOP: 3257 case TIOCSTART: 3258 case TIOCSTI: 3259 case TIOCSPGRP: 3260 case TIOCMSET: 3261 case TIOCMBIS: 3262 case TIOCMBIC: 3263 case TIOCREMOTE: 3264 case TIOCSIGNAL: 3265 case LDSETT: 3266 case LDSMAP: /* Obsolete */ 3267 case DIOCSETP: 3268 case I_FLUSH: 3269 case I_SRDOPT: 3270 case I_SETSIG: 3271 case I_SWROPT: 3272 case I_FLUSHBAND: 3273 case I_SETCLTIME: 3274 case I_SERROPT: 3275 case I_ESETSIG: 3276 case FIONBIO: 3277 case FIOASYNC: 3278 case FIOSETOWN: 3279 case JBOOT: /* Obsolete */ 3280 case JTERM: /* Obsolete */ 3281 case JTIMOM: /* Obsolete */ 3282 case JZOMBOOT: /* Obsolete */ 3283 case JAGENT: /* Obsolete */ 3284 case JTRUN: /* Obsolete */ 3285 case JXTPROTO: /* Obsolete */ 3286 case TIOCSETLD: 3287 return (JCSETP); 3288 } 3289 3290 return (JCGETP); 3291 } 3292 3293 /* 3294 * ioctl for streams 3295 */ 3296 int 3297 strioctl(struct vnode *vp, int cmd, intptr_t arg, int flag, int copyflag, 3298 cred_t *crp, int *rvalp) 3299 { 3300 struct stdata *stp; 3301 struct strcmd *scp; 3302 struct strioctl strioc; 3303 struct uio uio; 3304 struct iovec iov; 3305 int access; 3306 mblk_t *mp; 3307 int error = 0; 3308 int done = 0; 3309 ssize_t rmin, rmax; 3310 queue_t *wrq; 3311 queue_t *rdq; 3312 boolean_t kioctl = B_FALSE; 3313 3314 if (flag & FKIOCTL) { 3315 copyflag = K_TO_K; 3316 kioctl = B_TRUE; 3317 } 3318 ASSERT(vp->v_stream); 3319 ASSERT(copyflag == U_TO_K || copyflag == K_TO_K); 3320 stp = vp->v_stream; 3321 3322 TRACE_3(TR_FAC_STREAMS_FR, TR_IOCTL_ENTER, 3323 "strioctl:stp %p cmd %X arg %lX", stp, cmd, arg); 3324 3325 if (audit_active) 3326 audit_strioctl(vp, cmd, arg, flag, copyflag, crp, rvalp); 3327 3328 /* 3329 * If the copy is kernel to kernel, make sure that the FNATIVE 3330 * flag is set. After this it would be a serious error to have 3331 * no model flag. 3332 */ 3333 if (copyflag == K_TO_K) 3334 flag = (flag & ~FMODELS) | FNATIVE; 3335 3336 ASSERT((flag & FMODELS) != 0); 3337 3338 wrq = stp->sd_wrq; 3339 rdq = _RD(wrq); 3340 3341 access = job_control_type(cmd); 3342 3343 /* We should never see these here, should be handled by iwscn */ 3344 if (cmd == SRIOCSREDIR || cmd == SRIOCISREDIR) 3345 return (EINVAL); 3346 3347 mutex_enter(&stp->sd_lock); 3348 if ((access != -1) && ((error = i_straccess(stp, access)) != 0)) { 3349 mutex_exit(&stp->sd_lock); 3350 return (error); 3351 } 3352 mutex_exit(&stp->sd_lock); 3353 3354 /* 3355 * Check for sgttyb-related ioctls first, and complain as 3356 * necessary. 3357 */ 3358 switch (cmd) { 3359 case TIOCGETP: 3360 case TIOCSETP: 3361 case TIOCSETN: 3362 if (sgttyb_handling >= 2 && !sgttyb_complaint) { 3363 sgttyb_complaint = B_TRUE; 3364 cmn_err(CE_NOTE, 3365 "application used obsolete TIOC[GS]ET"); 3366 } 3367 if (sgttyb_handling >= 3) { 3368 tsignal(curthread, SIGSYS); 3369 return (EIO); 3370 } 3371 break; 3372 } 3373 3374 mutex_enter(&stp->sd_lock); 3375 3376 switch (cmd) { 3377 case I_RECVFD: 3378 case I_E_RECVFD: 3379 case I_PEEK: 3380 case I_NREAD: 3381 case FIONREAD: 3382 case FIORDCHK: 3383 case I_ATMARK: 3384 case FIONBIO: 3385 case FIOASYNC: 3386 if (stp->sd_flag & (STRDERR|STPLEX)) { 3387 error = strgeterr(stp, STRDERR|STPLEX, 0); 3388 if (error != 0) { 3389 mutex_exit(&stp->sd_lock); 3390 return (error); 3391 } 3392 } 3393 break; 3394 3395 default: 3396 if (stp->sd_flag & (STRDERR|STWRERR|STPLEX)) { 3397 error = strgeterr(stp, STRDERR|STWRERR|STPLEX, 0); 3398 if (error != 0) { 3399 mutex_exit(&stp->sd_lock); 3400 return (error); 3401 } 3402 } 3403 } 3404 3405 mutex_exit(&stp->sd_lock); 3406 3407 switch (cmd) { 3408 default: 3409 /* 3410 * The stream head has hardcoded knowledge of a 3411 * miscellaneous collection of terminal-, keyboard- and 3412 * mouse-related ioctls, enumerated below. This hardcoded 3413 * knowledge allows the stream head to automatically 3414 * convert transparent ioctl requests made by userland 3415 * programs into I_STR ioctls which many old STREAMS 3416 * modules and drivers require. 3417 * 3418 * No new ioctls should ever be added to this list. 3419 * Instead, the STREAMS module or driver should be written 3420 * to either handle transparent ioctls or require any 3421 * userland programs to use I_STR ioctls (by returning 3422 * EINVAL to any transparent ioctl requests). 3423 * 3424 * More importantly, removing ioctls from this list should 3425 * be done with the utmost care, since our STREAMS modules 3426 * and drivers *count* on the stream head performing this 3427 * conversion, and thus may panic while processing 3428 * transparent ioctl request for one of these ioctls (keep 3429 * in mind that third party modules and drivers may have 3430 * similar problems). 3431 */ 3432 if (((cmd & IOCTYPE) == LDIOC) || 3433 ((cmd & IOCTYPE) == tIOC) || 3434 ((cmd & IOCTYPE) == TIOC) || 3435 ((cmd & IOCTYPE) == KIOC) || 3436 ((cmd & IOCTYPE) == MSIOC) || 3437 ((cmd & IOCTYPE) == VUIOC)) { 3438 /* 3439 * The ioctl is a tty ioctl - set up strioc buffer 3440 * and call strdoioctl() to do the work. 3441 */ 3442 if (stp->sd_flag & STRHUP) 3443 return (ENXIO); 3444 strioc.ic_cmd = cmd; 3445 strioc.ic_timout = INFTIM; 3446 3447 switch (cmd) { 3448 3449 case TCXONC: 3450 case TCSBRK: 3451 case TCFLSH: 3452 case TCDSET: 3453 { 3454 int native_arg = (int)arg; 3455 strioc.ic_len = sizeof (int); 3456 strioc.ic_dp = (char *)&native_arg; 3457 return (strdoioctl(stp, &strioc, flag, 3458 K_TO_K, crp, rvalp)); 3459 } 3460 3461 case TCSETA: 3462 case TCSETAW: 3463 case TCSETAF: 3464 strioc.ic_len = sizeof (struct termio); 3465 strioc.ic_dp = (char *)arg; 3466 return (strdoioctl(stp, &strioc, flag, 3467 copyflag, crp, rvalp)); 3468 3469 case TCSETS: 3470 case TCSETSW: 3471 case TCSETSF: 3472 strioc.ic_len = sizeof (struct termios); 3473 strioc.ic_dp = (char *)arg; 3474 return (strdoioctl(stp, &strioc, flag, 3475 copyflag, crp, rvalp)); 3476 3477 case LDSETT: 3478 strioc.ic_len = sizeof (struct termcb); 3479 strioc.ic_dp = (char *)arg; 3480 return (strdoioctl(stp, &strioc, flag, 3481 copyflag, crp, rvalp)); 3482 3483 case TIOCSETP: 3484 strioc.ic_len = sizeof (struct sgttyb); 3485 strioc.ic_dp = (char *)arg; 3486 return (strdoioctl(stp, &strioc, flag, 3487 copyflag, crp, rvalp)); 3488 3489 case TIOCSTI: 3490 if ((flag & FREAD) == 0 && 3491 secpolicy_sti(crp) != 0) { 3492 return (EPERM); 3493 } 3494 mutex_enter(&stp->sd_lock); 3495 mutex_enter(&curproc->p_splock); 3496 if (stp->sd_sidp != curproc->p_sessp->s_sidp && 3497 secpolicy_sti(crp) != 0) { 3498 mutex_exit(&curproc->p_splock); 3499 mutex_exit(&stp->sd_lock); 3500 return (EACCES); 3501 } 3502 mutex_exit(&curproc->p_splock); 3503 mutex_exit(&stp->sd_lock); 3504 3505 strioc.ic_len = sizeof (char); 3506 strioc.ic_dp = (char *)arg; 3507 return (strdoioctl(stp, &strioc, flag, 3508 copyflag, crp, rvalp)); 3509 3510 case TIOCSWINSZ: 3511 strioc.ic_len = sizeof (struct winsize); 3512 strioc.ic_dp = (char *)arg; 3513 return (strdoioctl(stp, &strioc, flag, 3514 copyflag, crp, rvalp)); 3515 3516 case TIOCSSIZE: 3517 strioc.ic_len = sizeof (struct ttysize); 3518 strioc.ic_dp = (char *)arg; 3519 return (strdoioctl(stp, &strioc, flag, 3520 copyflag, crp, rvalp)); 3521 3522 case TIOCSSOFTCAR: 3523 case KIOCTRANS: 3524 case KIOCTRANSABLE: 3525 case KIOCCMD: 3526 case KIOCSDIRECT: 3527 case KIOCSCOMPAT: 3528 case KIOCSKABORTEN: 3529 case KIOCSRPTDELAY: 3530 case KIOCSRPTRATE: 3531 case VUIDSFORMAT: 3532 case TIOCSPPS: 3533 strioc.ic_len = sizeof (int); 3534 strioc.ic_dp = (char *)arg; 3535 return (strdoioctl(stp, &strioc, flag, 3536 copyflag, crp, rvalp)); 3537 3538 case KIOCSETKEY: 3539 case KIOCGETKEY: 3540 strioc.ic_len = sizeof (struct kiockey); 3541 strioc.ic_dp = (char *)arg; 3542 return (strdoioctl(stp, &strioc, flag, 3543 copyflag, crp, rvalp)); 3544 3545 case KIOCSKEY: 3546 case KIOCGKEY: 3547 strioc.ic_len = sizeof (struct kiockeymap); 3548 strioc.ic_dp = (char *)arg; 3549 return (strdoioctl(stp, &strioc, flag, 3550 copyflag, crp, rvalp)); 3551 3552 case KIOCSLED: 3553 /* arg is a pointer to char */ 3554 strioc.ic_len = sizeof (char); 3555 strioc.ic_dp = (char *)arg; 3556 return (strdoioctl(stp, &strioc, flag, 3557 copyflag, crp, rvalp)); 3558 3559 case MSIOSETPARMS: 3560 strioc.ic_len = sizeof (Ms_parms); 3561 strioc.ic_dp = (char *)arg; 3562 return (strdoioctl(stp, &strioc, flag, 3563 copyflag, crp, rvalp)); 3564 3565 case VUIDSADDR: 3566 case VUIDGADDR: 3567 strioc.ic_len = sizeof (struct vuid_addr_probe); 3568 strioc.ic_dp = (char *)arg; 3569 return (strdoioctl(stp, &strioc, flag, 3570 copyflag, crp, rvalp)); 3571 3572 /* 3573 * These M_IOCTL's don't require any data to be sent 3574 * downstream, and the driver will allocate and link 3575 * on its own mblk_t upon M_IOCACK -- thus we set 3576 * ic_len to zero and set ic_dp to arg so we know 3577 * where to copyout to later. 3578 */ 3579 case TIOCGSOFTCAR: 3580 case TIOCGWINSZ: 3581 case TIOCGSIZE: 3582 case KIOCGTRANS: 3583 case KIOCGTRANSABLE: 3584 case KIOCTYPE: 3585 case KIOCGDIRECT: 3586 case KIOCGCOMPAT: 3587 case KIOCLAYOUT: 3588 case KIOCGLED: 3589 case MSIOGETPARMS: 3590 case MSIOBUTTONS: 3591 case VUIDGFORMAT: 3592 case TIOCGPPS: 3593 case TIOCGPPSEV: 3594 case TCGETA: 3595 case TCGETS: 3596 case LDGETT: 3597 case TIOCGETP: 3598 case KIOCGRPTDELAY: 3599 case KIOCGRPTRATE: 3600 strioc.ic_len = 0; 3601 strioc.ic_dp = (char *)arg; 3602 return (strdoioctl(stp, &strioc, flag, 3603 copyflag, crp, rvalp)); 3604 } 3605 } 3606 3607 /* 3608 * Unknown cmd - send it down as a transparent ioctl. 3609 */ 3610 strioc.ic_cmd = cmd; 3611 strioc.ic_timout = INFTIM; 3612 strioc.ic_len = TRANSPARENT; 3613 strioc.ic_dp = (char *)&arg; 3614 3615 return (strdoioctl(stp, &strioc, flag, copyflag, crp, rvalp)); 3616 3617 case I_STR: 3618 /* 3619 * Stream ioctl. Read in an strioctl buffer from the user 3620 * along with any data specified and send it downstream. 3621 * Strdoioctl will wait allow only one ioctl message at 3622 * a time, and waits for the acknowledgement. 3623 */ 3624 3625 if (stp->sd_flag & STRHUP) 3626 return (ENXIO); 3627 3628 error = strcopyin_strioctl((void *)arg, &strioc, flag, 3629 copyflag); 3630 if (error != 0) 3631 return (error); 3632 3633 if ((strioc.ic_len < 0) || (strioc.ic_timout < -1)) 3634 return (EINVAL); 3635 3636 access = job_control_type(strioc.ic_cmd); 3637 mutex_enter(&stp->sd_lock); 3638 if ((access != -1) && 3639 ((error = i_straccess(stp, access)) != 0)) { 3640 mutex_exit(&stp->sd_lock); 3641 return (error); 3642 } 3643 mutex_exit(&stp->sd_lock); 3644 3645 /* 3646 * The I_STR facility provides a trap door for malicious 3647 * code to send down bogus streamio(7I) ioctl commands to 3648 * unsuspecting STREAMS modules and drivers which expect to 3649 * only get these messages from the stream head. 3650 * Explicitly prohibit any streamio ioctls which can be 3651 * passed downstream by the stream head. Note that we do 3652 * not block all streamio ioctls because the ioctl 3653 * numberspace is not well managed and thus it's possible 3654 * that a module or driver's ioctl numbers may accidentally 3655 * collide with them. 3656 */ 3657 switch (strioc.ic_cmd) { 3658 case I_LINK: 3659 case I_PLINK: 3660 case I_UNLINK: 3661 case I_PUNLINK: 3662 case _I_GETPEERCRED: 3663 case _I_PLINK_LH: 3664 return (EINVAL); 3665 } 3666 3667 error = strdoioctl(stp, &strioc, flag, copyflag, crp, rvalp); 3668 if (error == 0) { 3669 error = strcopyout_strioctl(&strioc, (void *)arg, 3670 flag, copyflag); 3671 } 3672 return (error); 3673 3674 case _I_CMD: 3675 /* 3676 * Like I_STR, but without using M_IOC* messages and without 3677 * copyins/copyouts beyond the passed-in argument. 3678 */ 3679 if (stp->sd_flag & STRHUP) 3680 return (ENXIO); 3681 3682 if ((scp = kmem_alloc(sizeof (strcmd_t), KM_NOSLEEP)) == NULL) 3683 return (ENOMEM); 3684 3685 if (copyin((void *)arg, scp, sizeof (strcmd_t))) { 3686 kmem_free(scp, sizeof (strcmd_t)); 3687 return (EFAULT); 3688 } 3689 3690 access = job_control_type(scp->sc_cmd); 3691 mutex_enter(&stp->sd_lock); 3692 if (access != -1 && (error = i_straccess(stp, access)) != 0) { 3693 mutex_exit(&stp->sd_lock); 3694 kmem_free(scp, sizeof (strcmd_t)); 3695 return (error); 3696 } 3697 mutex_exit(&stp->sd_lock); 3698 3699 *rvalp = 0; 3700 if ((error = strdocmd(stp, scp, crp)) == 0) { 3701 if (copyout(scp, (void *)arg, sizeof (strcmd_t))) 3702 error = EFAULT; 3703 } 3704 kmem_free(scp, sizeof (strcmd_t)); 3705 return (error); 3706 3707 case I_NREAD: 3708 /* 3709 * Return number of bytes of data in first message 3710 * in queue in "arg" and return the number of messages 3711 * in queue in return value. 3712 */ 3713 { 3714 size_t size; 3715 int retval; 3716 int count = 0; 3717 3718 mutex_enter(QLOCK(rdq)); 3719 3720 size = msgdsize(rdq->q_first); 3721 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next) 3722 count++; 3723 3724 mutex_exit(QLOCK(rdq)); 3725 if (stp->sd_struiordq) { 3726 infod_t infod; 3727 3728 infod.d_cmd = INFOD_COUNT; 3729 infod.d_count = 0; 3730 if (count == 0) { 3731 infod.d_cmd |= INFOD_FIRSTBYTES; 3732 infod.d_bytes = 0; 3733 } 3734 infod.d_res = 0; 3735 (void) infonext(rdq, &infod); 3736 count += infod.d_count; 3737 if (infod.d_res & INFOD_FIRSTBYTES) 3738 size = infod.d_bytes; 3739 } 3740 3741 /* 3742 * Drop down from size_t to the "int" required by the 3743 * interface. Cap at INT_MAX. 3744 */ 3745 retval = MIN(size, INT_MAX); 3746 error = strcopyout(&retval, (void *)arg, sizeof (retval), 3747 copyflag); 3748 if (!error) 3749 *rvalp = count; 3750 return (error); 3751 } 3752 3753 case FIONREAD: 3754 /* 3755 * Return number of bytes of data in all data messages 3756 * in queue in "arg". 3757 */ 3758 { 3759 size_t size = 0; 3760 int retval; 3761 3762 mutex_enter(QLOCK(rdq)); 3763 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next) 3764 size += msgdsize(mp); 3765 mutex_exit(QLOCK(rdq)); 3766 3767 if (stp->sd_struiordq) { 3768 infod_t infod; 3769 3770 infod.d_cmd = INFOD_BYTES; 3771 infod.d_res = 0; 3772 infod.d_bytes = 0; 3773 (void) infonext(rdq, &infod); 3774 size += infod.d_bytes; 3775 } 3776 3777 /* 3778 * Drop down from size_t to the "int" required by the 3779 * interface. Cap at INT_MAX. 3780 */ 3781 retval = MIN(size, INT_MAX); 3782 error = strcopyout(&retval, (void *)arg, sizeof (retval), 3783 copyflag); 3784 3785 *rvalp = 0; 3786 return (error); 3787 } 3788 case FIORDCHK: 3789 /* 3790 * FIORDCHK does not use arg value (like FIONREAD), 3791 * instead a count is returned. I_NREAD value may 3792 * not be accurate but safe. The real thing to do is 3793 * to add the msgdsizes of all data messages until 3794 * a non-data message. 3795 */ 3796 { 3797 size_t size = 0; 3798 3799 mutex_enter(QLOCK(rdq)); 3800 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next) 3801 size += msgdsize(mp); 3802 mutex_exit(QLOCK(rdq)); 3803 3804 if (stp->sd_struiordq) { 3805 infod_t infod; 3806 3807 infod.d_cmd = INFOD_BYTES; 3808 infod.d_res = 0; 3809 infod.d_bytes = 0; 3810 (void) infonext(rdq, &infod); 3811 size += infod.d_bytes; 3812 } 3813 3814 /* 3815 * Since ioctl returns an int, and memory sizes under 3816 * LP64 may not fit, we return INT_MAX if the count was 3817 * actually greater. 3818 */ 3819 *rvalp = MIN(size, INT_MAX); 3820 return (0); 3821 } 3822 3823 case I_FIND: 3824 /* 3825 * Get module name. 3826 */ 3827 { 3828 char mname[FMNAMESZ + 1]; 3829 queue_t *q; 3830 3831 error = (copyflag & U_TO_K ? copyinstr : copystr)((void *)arg, 3832 mname, FMNAMESZ + 1, NULL); 3833 if (error) 3834 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT); 3835 3836 /* 3837 * Return EINVAL if we're handed a bogus module name. 3838 */ 3839 if (fmodsw_find(mname, FMODSW_LOAD) == NULL) { 3840 TRACE_0(TR_FAC_STREAMS_FR, 3841 TR_I_CANT_FIND, "couldn't I_FIND"); 3842 return (EINVAL); 3843 } 3844 3845 *rvalp = 0; 3846 3847 /* Look downstream to see if module is there. */ 3848 claimstr(stp->sd_wrq); 3849 for (q = stp->sd_wrq->q_next; q; q = q->q_next) { 3850 if (q->q_flag&QREADR) { 3851 q = NULL; 3852 break; 3853 } 3854 if (strcmp(mname, q->q_qinfo->qi_minfo->mi_idname) == 0) 3855 break; 3856 } 3857 releasestr(stp->sd_wrq); 3858 3859 *rvalp = (q ? 1 : 0); 3860 return (error); 3861 } 3862 3863 case I_PUSH: 3864 case __I_PUSH_NOCTTY: 3865 /* 3866 * Push a module. 3867 * For the case __I_PUSH_NOCTTY push a module but 3868 * do not allocate controlling tty. See bugid 4025044 3869 */ 3870 3871 { 3872 char mname[FMNAMESZ + 1]; 3873 fmodsw_impl_t *fp; 3874 dev_t dummydev; 3875 3876 if (stp->sd_flag & STRHUP) 3877 return (ENXIO); 3878 3879 /* 3880 * Get module name and look up in fmodsw. 3881 */ 3882 error = (copyflag & U_TO_K ? copyinstr : copystr)((void *)arg, 3883 mname, FMNAMESZ + 1, NULL); 3884 if (error) 3885 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT); 3886 3887 if ((fp = fmodsw_find(mname, FMODSW_HOLD | FMODSW_LOAD)) == 3888 NULL) 3889 return (EINVAL); 3890 3891 TRACE_2(TR_FAC_STREAMS_FR, TR_I_PUSH, 3892 "I_PUSH:fp %p stp %p", fp, stp); 3893 3894 if (error = strstartplumb(stp, flag, cmd)) { 3895 fmodsw_rele(fp); 3896 return (error); 3897 } 3898 3899 /* 3900 * See if any more modules can be pushed on this stream. 3901 * Note that this check must be done after strstartplumb() 3902 * since otherwise multiple threads issuing I_PUSHes on 3903 * the same stream will be able to exceed nstrpush. 3904 */ 3905 mutex_enter(&stp->sd_lock); 3906 if (stp->sd_pushcnt >= nstrpush) { 3907 fmodsw_rele(fp); 3908 strendplumb(stp); 3909 mutex_exit(&stp->sd_lock); 3910 return (EINVAL); 3911 } 3912 mutex_exit(&stp->sd_lock); 3913 3914 /* 3915 * Push new module and call its open routine 3916 * via qattach(). Modules don't change device 3917 * numbers, so just ignore dummydev here. 3918 */ 3919 dummydev = vp->v_rdev; 3920 if ((error = qattach(rdq, &dummydev, 0, crp, fp, 3921 B_FALSE)) == 0) { 3922 if (vp->v_type == VCHR && /* sorry, no pipes allowed */ 3923 (cmd == I_PUSH) && (stp->sd_flag & STRISTTY)) { 3924 /* 3925 * try to allocate it as a controlling terminal 3926 */ 3927 (void) strctty(stp); 3928 } 3929 } 3930 3931 mutex_enter(&stp->sd_lock); 3932 3933 /* 3934 * As a performance concern we are caching the values of 3935 * q_minpsz and q_maxpsz of the module below the stream 3936 * head in the stream head. 3937 */ 3938 mutex_enter(QLOCK(stp->sd_wrq->q_next)); 3939 rmin = stp->sd_wrq->q_next->q_minpsz; 3940 rmax = stp->sd_wrq->q_next->q_maxpsz; 3941 mutex_exit(QLOCK(stp->sd_wrq->q_next)); 3942 3943 /* Do this processing here as a performance concern */ 3944 if (strmsgsz != 0) { 3945 if (rmax == INFPSZ) 3946 rmax = strmsgsz; 3947 else { 3948 if (vp->v_type == VFIFO) 3949 rmax = MIN(PIPE_BUF, rmax); 3950 else rmax = MIN(strmsgsz, rmax); 3951 } 3952 } 3953 3954 mutex_enter(QLOCK(wrq)); 3955 stp->sd_qn_minpsz = rmin; 3956 stp->sd_qn_maxpsz = rmax; 3957 mutex_exit(QLOCK(wrq)); 3958 3959 strendplumb(stp); 3960 mutex_exit(&stp->sd_lock); 3961 return (error); 3962 } 3963 3964 case I_POP: 3965 { 3966 queue_t *q; 3967 3968 if (stp->sd_flag & STRHUP) 3969 return (ENXIO); 3970 if (!wrq->q_next) /* for broken pipes */ 3971 return (EINVAL); 3972 3973 if (error = strstartplumb(stp, flag, cmd)) 3974 return (error); 3975 3976 /* 3977 * If there is an anchor on this stream and popping 3978 * the current module would attempt to pop through the 3979 * anchor, then disallow the pop unless we have sufficient 3980 * privileges; take the cheapest (non-locking) check 3981 * first. 3982 */ 3983 if (secpolicy_ip_config(crp, B_TRUE) != 0 || 3984 (stp->sd_anchorzone != crgetzoneid(crp))) { 3985 mutex_enter(&stp->sd_lock); 3986 /* 3987 * Anchors only apply if there's at least one 3988 * module on the stream (sd_pushcnt > 0). 3989 */ 3990 if (stp->sd_pushcnt > 0 && 3991 stp->sd_pushcnt == stp->sd_anchor && 3992 stp->sd_vnode->v_type != VFIFO) { 3993 strendplumb(stp); 3994 mutex_exit(&stp->sd_lock); 3995 if (stp->sd_anchorzone != crgetzoneid(crp)) 3996 return (EINVAL); 3997 /* Audit and report error */ 3998 return (secpolicy_ip_config(crp, B_FALSE)); 3999 } 4000 mutex_exit(&stp->sd_lock); 4001 } 4002 4003 q = wrq->q_next; 4004 TRACE_2(TR_FAC_STREAMS_FR, TR_I_POP, 4005 "I_POP:%p from %p", q, stp); 4006 if (q->q_next == NULL || (q->q_flag & (QREADR|QISDRV))) { 4007 error = EINVAL; 4008 } else { 4009 qdetach(_RD(q), 1, flag, crp, B_FALSE); 4010 error = 0; 4011 } 4012 mutex_enter(&stp->sd_lock); 4013 4014 /* 4015 * As a performance concern we are caching the values of 4016 * q_minpsz and q_maxpsz of the module below the stream 4017 * head in the stream head. 4018 */ 4019 mutex_enter(QLOCK(wrq->q_next)); 4020 rmin = wrq->q_next->q_minpsz; 4021 rmax = wrq->q_next->q_maxpsz; 4022 mutex_exit(QLOCK(wrq->q_next)); 4023 4024 /* Do this processing here as a performance concern */ 4025 if (strmsgsz != 0) { 4026 if (rmax == INFPSZ) 4027 rmax = strmsgsz; 4028 else { 4029 if (vp->v_type == VFIFO) 4030 rmax = MIN(PIPE_BUF, rmax); 4031 else rmax = MIN(strmsgsz, rmax); 4032 } 4033 } 4034 4035 mutex_enter(QLOCK(wrq)); 4036 stp->sd_qn_minpsz = rmin; 4037 stp->sd_qn_maxpsz = rmax; 4038 mutex_exit(QLOCK(wrq)); 4039 4040 /* If we popped through the anchor, then reset the anchor. */ 4041 if (stp->sd_pushcnt < stp->sd_anchor) { 4042 stp->sd_anchor = 0; 4043 stp->sd_anchorzone = 0; 4044 } 4045 strendplumb(stp); 4046 mutex_exit(&stp->sd_lock); 4047 return (error); 4048 } 4049 4050 case _I_MUXID2FD: 4051 { 4052 /* 4053 * Create a fd for a I_PLINK'ed lower stream with a given 4054 * muxid. With the fd, application can send down ioctls, 4055 * like I_LIST, to the previously I_PLINK'ed stream. Note 4056 * that after getting the fd, the application has to do an 4057 * I_PUNLINK on the muxid before it can do any operation 4058 * on the lower stream. This is required by spec1170. 4059 * 4060 * The fd used to do this ioctl should point to the same 4061 * controlling device used to do the I_PLINK. If it uses 4062 * a different stream or an invalid muxid, I_MUXID2FD will 4063 * fail. The error code is set to EINVAL. 4064 * 4065 * The intended use of this interface is the following. 4066 * An application I_PLINK'ed a stream and exits. The fd 4067 * to the lower stream is gone. Another application 4068 * wants to get a fd to the lower stream, it uses I_MUXID2FD. 4069 */ 4070 int muxid = (int)arg; 4071 int fd; 4072 linkinfo_t *linkp; 4073 struct file *fp; 4074 netstack_t *ns; 4075 str_stack_t *ss; 4076 4077 /* 4078 * Do not allow the wildcard muxid. This ioctl is not 4079 * intended to find arbitrary link. 4080 */ 4081 if (muxid == 0) { 4082 return (EINVAL); 4083 } 4084 4085 ns = netstack_find_by_cred(crp); 4086 ASSERT(ns != NULL); 4087 ss = ns->netstack_str; 4088 ASSERT(ss != NULL); 4089 4090 mutex_enter(&muxifier); 4091 linkp = findlinks(vp->v_stream, muxid, LINKPERSIST, ss); 4092 if (linkp == NULL) { 4093 mutex_exit(&muxifier); 4094 netstack_rele(ss->ss_netstack); 4095 return (EINVAL); 4096 } 4097 4098 if ((fd = ufalloc(0)) == -1) { 4099 mutex_exit(&muxifier); 4100 netstack_rele(ss->ss_netstack); 4101 return (EMFILE); 4102 } 4103 fp = linkp->li_fpdown; 4104 mutex_enter(&fp->f_tlock); 4105 fp->f_count++; 4106 mutex_exit(&fp->f_tlock); 4107 mutex_exit(&muxifier); 4108 setf(fd, fp); 4109 *rvalp = fd; 4110 netstack_rele(ss->ss_netstack); 4111 return (0); 4112 } 4113 4114 case _I_INSERT: 4115 { 4116 /* 4117 * To insert a module to a given position in a stream. 4118 * In the first release, only allow privileged user 4119 * to use this ioctl. Furthermore, the insert is only allowed 4120 * below an anchor if the zoneid is the same as the zoneid 4121 * which created the anchor. 4122 * 4123 * Note that we do not plan to support this ioctl 4124 * on pipes in the first release. We want to learn more 4125 * about the implications of these ioctls before extending 4126 * their support. And we do not think these features are 4127 * valuable for pipes. 4128 * 4129 * Neither do we support O/C hot stream. Note that only 4130 * the upper streams of TCP/IP stack are O/C hot streams. 4131 * The lower IP stream is not. 4132 * When there is a O/C cold barrier, we only allow inserts 4133 * above the barrier. 4134 */ 4135 STRUCT_DECL(strmodconf, strmodinsert); 4136 char mod_name[FMNAMESZ + 1]; 4137 fmodsw_impl_t *fp; 4138 dev_t dummydev; 4139 queue_t *tmp_wrq; 4140 int pos; 4141 boolean_t is_insert; 4142 4143 STRUCT_INIT(strmodinsert, flag); 4144 if (stp->sd_flag & STRHUP) 4145 return (ENXIO); 4146 if (STRMATED(stp)) 4147 return (EINVAL); 4148 if ((error = secpolicy_net_config(crp, B_FALSE)) != 0) 4149 return (error); 4150 if (stp->sd_anchor != 0 && 4151 stp->sd_anchorzone != crgetzoneid(crp)) 4152 return (EINVAL); 4153 4154 error = strcopyin((void *)arg, STRUCT_BUF(strmodinsert), 4155 STRUCT_SIZE(strmodinsert), copyflag); 4156 if (error) 4157 return (error); 4158 4159 /* 4160 * Get module name and look up in fmodsw. 4161 */ 4162 error = (copyflag & U_TO_K ? copyinstr : 4163 copystr)(STRUCT_FGETP(strmodinsert, mod_name), 4164 mod_name, FMNAMESZ + 1, NULL); 4165 if (error) 4166 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT); 4167 4168 if ((fp = fmodsw_find(mod_name, FMODSW_HOLD | FMODSW_LOAD)) == 4169 NULL) 4170 return (EINVAL); 4171 4172 if (error = strstartplumb(stp, flag, cmd)) { 4173 fmodsw_rele(fp); 4174 return (error); 4175 } 4176 4177 /* 4178 * Is this _I_INSERT just like an I_PUSH? We need to know 4179 * this because we do some optimizations if this is a 4180 * module being pushed. 4181 */ 4182 pos = STRUCT_FGET(strmodinsert, pos); 4183 is_insert = (pos != 0); 4184 4185 /* 4186 * Make sure pos is valid. Even though it is not an I_PUSH, 4187 * we impose the same limit on the number of modules in a 4188 * stream. 4189 */ 4190 mutex_enter(&stp->sd_lock); 4191 if (stp->sd_pushcnt >= nstrpush || pos < 0 || 4192 pos > stp->sd_pushcnt) { 4193 fmodsw_rele(fp); 4194 strendplumb(stp); 4195 mutex_exit(&stp->sd_lock); 4196 return (EINVAL); 4197 } 4198 if (stp->sd_anchor != 0) { 4199 /* 4200 * Is this insert below the anchor? 4201 * Pushcnt hasn't been increased yet hence 4202 * we test for greater than here, and greater or 4203 * equal after qattach. 4204 */ 4205 if (pos > (stp->sd_pushcnt - stp->sd_anchor) && 4206 stp->sd_anchorzone != crgetzoneid(crp)) { 4207 fmodsw_rele(fp); 4208 strendplumb(stp); 4209 mutex_exit(&stp->sd_lock); 4210 return (EPERM); 4211 } 4212 } 4213 4214 mutex_exit(&stp->sd_lock); 4215 4216 /* 4217 * First find the correct position this module to 4218 * be inserted. We don't need to call claimstr() 4219 * as the stream should not be changing at this point. 4220 * 4221 * Insert new module and call its open routine 4222 * via qattach(). Modules don't change device 4223 * numbers, so just ignore dummydev here. 4224 */ 4225 for (tmp_wrq = stp->sd_wrq; pos > 0; 4226 tmp_wrq = tmp_wrq->q_next, pos--) { 4227 ASSERT(SAMESTR(tmp_wrq)); 4228 } 4229 dummydev = vp->v_rdev; 4230 if ((error = qattach(_RD(tmp_wrq), &dummydev, 0, crp, 4231 fp, is_insert)) != 0) { 4232 mutex_enter(&stp->sd_lock); 4233 strendplumb(stp); 4234 mutex_exit(&stp->sd_lock); 4235 return (error); 4236 } 4237 4238 mutex_enter(&stp->sd_lock); 4239 4240 /* 4241 * As a performance concern we are caching the values of 4242 * q_minpsz and q_maxpsz of the module below the stream 4243 * head in the stream head. 4244 */ 4245 if (!is_insert) { 4246 mutex_enter(QLOCK(stp->sd_wrq->q_next)); 4247 rmin = stp->sd_wrq->q_next->q_minpsz; 4248 rmax = stp->sd_wrq->q_next->q_maxpsz; 4249 mutex_exit(QLOCK(stp->sd_wrq->q_next)); 4250 4251 /* Do this processing here as a performance concern */ 4252 if (strmsgsz != 0) { 4253 if (rmax == INFPSZ) { 4254 rmax = strmsgsz; 4255 } else { 4256 rmax = MIN(strmsgsz, rmax); 4257 } 4258 } 4259 4260 mutex_enter(QLOCK(wrq)); 4261 stp->sd_qn_minpsz = rmin; 4262 stp->sd_qn_maxpsz = rmax; 4263 mutex_exit(QLOCK(wrq)); 4264 } 4265 4266 /* 4267 * Need to update the anchor value if this module is 4268 * inserted below the anchor point. 4269 */ 4270 if (stp->sd_anchor != 0) { 4271 pos = STRUCT_FGET(strmodinsert, pos); 4272 if (pos >= (stp->sd_pushcnt - stp->sd_anchor)) 4273 stp->sd_anchor++; 4274 } 4275 4276 strendplumb(stp); 4277 mutex_exit(&stp->sd_lock); 4278 return (0); 4279 } 4280 4281 case _I_REMOVE: 4282 { 4283 /* 4284 * To remove a module with a given name in a stream. The 4285 * caller of this ioctl needs to provide both the name and 4286 * the position of the module to be removed. This eliminates 4287 * the ambiguity of removal if a module is inserted/pushed 4288 * multiple times in a stream. In the first release, only 4289 * allow privileged user to use this ioctl. 4290 * Furthermore, the remove is only allowed 4291 * below an anchor if the zoneid is the same as the zoneid 4292 * which created the anchor. 4293 * 4294 * Note that we do not plan to support this ioctl 4295 * on pipes in the first release. We want to learn more 4296 * about the implications of these ioctls before extending 4297 * their support. And we do not think these features are 4298 * valuable for pipes. 4299 * 4300 * Neither do we support O/C hot stream. Note that only 4301 * the upper streams of TCP/IP stack are O/C hot streams. 4302 * The lower IP stream is not. 4303 * When there is a O/C cold barrier we do not allow removal 4304 * below the barrier. 4305 * 4306 * Also note that _I_REMOVE cannot be used to remove a 4307 * driver or the stream head. 4308 */ 4309 STRUCT_DECL(strmodconf, strmodremove); 4310 queue_t *q; 4311 int pos; 4312 char mod_name[FMNAMESZ + 1]; 4313 boolean_t is_remove; 4314 4315 STRUCT_INIT(strmodremove, flag); 4316 if (stp->sd_flag & STRHUP) 4317 return (ENXIO); 4318 if (STRMATED(stp)) 4319 return (EINVAL); 4320 if ((error = secpolicy_net_config(crp, B_FALSE)) != 0) 4321 return (error); 4322 if (stp->sd_anchor != 0 && 4323 stp->sd_anchorzone != crgetzoneid(crp)) 4324 return (EINVAL); 4325 4326 error = strcopyin((void *)arg, STRUCT_BUF(strmodremove), 4327 STRUCT_SIZE(strmodremove), copyflag); 4328 if (error) 4329 return (error); 4330 4331 error = (copyflag & U_TO_K ? copyinstr : 4332 copystr)(STRUCT_FGETP(strmodremove, mod_name), 4333 mod_name, FMNAMESZ + 1, NULL); 4334 if (error) 4335 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT); 4336 4337 if ((error = strstartplumb(stp, flag, cmd)) != 0) 4338 return (error); 4339 4340 /* 4341 * Match the name of given module to the name of module at 4342 * the given position. 4343 */ 4344 pos = STRUCT_FGET(strmodremove, pos); 4345 4346 is_remove = (pos != 0); 4347 for (q = stp->sd_wrq->q_next; SAMESTR(q) && pos > 0; 4348 q = q->q_next, pos--) 4349 ; 4350 if (pos > 0 || ! SAMESTR(q) || 4351 strncmp(q->q_qinfo->qi_minfo->mi_idname, mod_name, 4352 strlen(q->q_qinfo->qi_minfo->mi_idname)) != 0) { 4353 mutex_enter(&stp->sd_lock); 4354 strendplumb(stp); 4355 mutex_exit(&stp->sd_lock); 4356 return (EINVAL); 4357 } 4358 4359 /* 4360 * If the position is at or below an anchor, then the zoneid 4361 * must match the zoneid that created the anchor. 4362 */ 4363 if (stp->sd_anchor != 0) { 4364 pos = STRUCT_FGET(strmodremove, pos); 4365 if (pos >= (stp->sd_pushcnt - stp->sd_anchor) && 4366 stp->sd_anchorzone != crgetzoneid(crp)) { 4367 mutex_enter(&stp->sd_lock); 4368 strendplumb(stp); 4369 mutex_exit(&stp->sd_lock); 4370 return (EPERM); 4371 } 4372 } 4373 4374 4375 ASSERT(!(q->q_flag & QREADR)); 4376 qdetach(_RD(q), 1, flag, crp, is_remove); 4377 4378 mutex_enter(&stp->sd_lock); 4379 4380 /* 4381 * As a performance concern we are caching the values of 4382 * q_minpsz and q_maxpsz of the module below the stream 4383 * head in the stream head. 4384 */ 4385 if (!is_remove) { 4386 mutex_enter(QLOCK(wrq->q_next)); 4387 rmin = wrq->q_next->q_minpsz; 4388 rmax = wrq->q_next->q_maxpsz; 4389 mutex_exit(QLOCK(wrq->q_next)); 4390 4391 /* Do this processing here as a performance concern */ 4392 if (strmsgsz != 0) { 4393 if (rmax == INFPSZ) 4394 rmax = strmsgsz; 4395 else { 4396 if (vp->v_type == VFIFO) 4397 rmax = MIN(PIPE_BUF, rmax); 4398 else rmax = MIN(strmsgsz, rmax); 4399 } 4400 } 4401 4402 mutex_enter(QLOCK(wrq)); 4403 stp->sd_qn_minpsz = rmin; 4404 stp->sd_qn_maxpsz = rmax; 4405 mutex_exit(QLOCK(wrq)); 4406 } 4407 4408 /* 4409 * Need to update the anchor value if this module is removed 4410 * at or below the anchor point. If the removed module is at 4411 * the anchor point, remove the anchor for this stream if 4412 * there is no module above the anchor point. Otherwise, if 4413 * the removed module is below the anchor point, decrement the 4414 * anchor point by 1. 4415 */ 4416 if (stp->sd_anchor != 0) { 4417 pos = STRUCT_FGET(strmodremove, pos); 4418 if (pos == stp->sd_pushcnt - stp->sd_anchor + 1) 4419 stp->sd_anchor = 0; 4420 else if (pos > (stp->sd_pushcnt - stp->sd_anchor + 1)) 4421 stp->sd_anchor--; 4422 } 4423 4424 strendplumb(stp); 4425 mutex_exit(&stp->sd_lock); 4426 return (0); 4427 } 4428 4429 case I_ANCHOR: 4430 /* 4431 * Set the anchor position on the stream to reside at 4432 * the top module (in other words, the top module 4433 * cannot be popped). Anchors with a FIFO make no 4434 * obvious sense, so they're not allowed. 4435 */ 4436 mutex_enter(&stp->sd_lock); 4437 4438 if (stp->sd_vnode->v_type == VFIFO) { 4439 mutex_exit(&stp->sd_lock); 4440 return (EINVAL); 4441 } 4442 /* Only allow the same zoneid to update the anchor */ 4443 if (stp->sd_anchor != 0 && 4444 stp->sd_anchorzone != crgetzoneid(crp)) { 4445 mutex_exit(&stp->sd_lock); 4446 return (EINVAL); 4447 } 4448 stp->sd_anchor = stp->sd_pushcnt; 4449 stp->sd_anchorzone = crgetzoneid(crp); 4450 mutex_exit(&stp->sd_lock); 4451 return (0); 4452 4453 case I_LOOK: 4454 /* 4455 * Get name of first module downstream. 4456 * If no module, return an error. 4457 */ 4458 { 4459 claimstr(wrq); 4460 if (_SAMESTR(wrq) && wrq->q_next->q_next) { 4461 char *name = wrq->q_next->q_qinfo->qi_minfo->mi_idname; 4462 error = strcopyout(name, (void *)arg, strlen(name) + 1, 4463 copyflag); 4464 releasestr(wrq); 4465 return (error); 4466 } 4467 releasestr(wrq); 4468 return (EINVAL); 4469 } 4470 4471 case I_LINK: 4472 case I_PLINK: 4473 /* 4474 * Link a multiplexor. 4475 */ 4476 error = mlink(vp, cmd, (int)arg, crp, rvalp, 0); 4477 return (error); 4478 4479 case _I_PLINK_LH: 4480 /* 4481 * Link a multiplexor: Call must originate from kernel. 4482 */ 4483 if (kioctl) 4484 return (ldi_mlink_lh(vp, cmd, arg, crp, rvalp)); 4485 4486 return (EINVAL); 4487 case I_UNLINK: 4488 case I_PUNLINK: 4489 /* 4490 * Unlink a multiplexor. 4491 * If arg is -1, unlink all links for which this is the 4492 * controlling stream. Otherwise, arg is an index number 4493 * for a link to be removed. 4494 */ 4495 { 4496 struct linkinfo *linkp; 4497 int native_arg = (int)arg; 4498 int type; 4499 netstack_t *ns; 4500 str_stack_t *ss; 4501 4502 TRACE_1(TR_FAC_STREAMS_FR, 4503 TR_I_UNLINK, "I_UNLINK/I_PUNLINK:%p", stp); 4504 if (vp->v_type == VFIFO) { 4505 return (EINVAL); 4506 } 4507 if (cmd == I_UNLINK) 4508 type = LINKNORMAL; 4509 else /* I_PUNLINK */ 4510 type = LINKPERSIST; 4511 if (native_arg == 0) { 4512 return (EINVAL); 4513 } 4514 ns = netstack_find_by_cred(crp); 4515 ASSERT(ns != NULL); 4516 ss = ns->netstack_str; 4517 ASSERT(ss != NULL); 4518 4519 if (native_arg == MUXID_ALL) 4520 error = munlinkall(stp, type, crp, rvalp, ss); 4521 else { 4522 mutex_enter(&muxifier); 4523 if (!(linkp = findlinks(stp, (int)arg, type, ss))) { 4524 /* invalid user supplied index number */ 4525 mutex_exit(&muxifier); 4526 netstack_rele(ss->ss_netstack); 4527 return (EINVAL); 4528 } 4529 /* munlink drops the muxifier lock */ 4530 error = munlink(stp, linkp, type, crp, rvalp, ss); 4531 } 4532 netstack_rele(ss->ss_netstack); 4533 return (error); 4534 } 4535 4536 case I_FLUSH: 4537 /* 4538 * send a flush message downstream 4539 * flush message can indicate 4540 * FLUSHR - flush read queue 4541 * FLUSHW - flush write queue 4542 * FLUSHRW - flush read/write queue 4543 */ 4544 if (stp->sd_flag & STRHUP) 4545 return (ENXIO); 4546 if (arg & ~FLUSHRW) 4547 return (EINVAL); 4548 4549 for (;;) { 4550 if (putnextctl1(stp->sd_wrq, M_FLUSH, (int)arg)) { 4551 break; 4552 } 4553 if (error = strwaitbuf(1, BPRI_HI)) { 4554 return (error); 4555 } 4556 } 4557 4558 /* 4559 * Send down an unsupported ioctl and wait for the nack 4560 * in order to allow the M_FLUSH to propagate back 4561 * up to the stream head. 4562 * Replaces if (qready()) runqueues(); 4563 */ 4564 strioc.ic_cmd = -1; /* The unsupported ioctl */ 4565 strioc.ic_timout = 0; 4566 strioc.ic_len = 0; 4567 strioc.ic_dp = NULL; 4568 (void) strdoioctl(stp, &strioc, flag, K_TO_K, crp, rvalp); 4569 *rvalp = 0; 4570 return (0); 4571 4572 case I_FLUSHBAND: 4573 { 4574 struct bandinfo binfo; 4575 4576 error = strcopyin((void *)arg, &binfo, sizeof (binfo), 4577 copyflag); 4578 if (error) 4579 return (error); 4580 if (stp->sd_flag & STRHUP) 4581 return (ENXIO); 4582 if (binfo.bi_flag & ~FLUSHRW) 4583 return (EINVAL); 4584 while (!(mp = allocb(2, BPRI_HI))) { 4585 if (error = strwaitbuf(2, BPRI_HI)) 4586 return (error); 4587 } 4588 mp->b_datap->db_type = M_FLUSH; 4589 *mp->b_wptr++ = binfo.bi_flag | FLUSHBAND; 4590 *mp->b_wptr++ = binfo.bi_pri; 4591 putnext(stp->sd_wrq, mp); 4592 /* 4593 * Send down an unsupported ioctl and wait for the nack 4594 * in order to allow the M_FLUSH to propagate back 4595 * up to the stream head. 4596 * Replaces if (qready()) runqueues(); 4597 */ 4598 strioc.ic_cmd = -1; /* The unsupported ioctl */ 4599 strioc.ic_timout = 0; 4600 strioc.ic_len = 0; 4601 strioc.ic_dp = NULL; 4602 (void) strdoioctl(stp, &strioc, flag, K_TO_K, crp, rvalp); 4603 *rvalp = 0; 4604 return (0); 4605 } 4606 4607 case I_SRDOPT: 4608 /* 4609 * Set read options 4610 * 4611 * RNORM - default stream mode 4612 * RMSGN - message no discard 4613 * RMSGD - message discard 4614 * RPROTNORM - fail read with EBADMSG for M_[PC]PROTOs 4615 * RPROTDAT - convert M_[PC]PROTOs to M_DATAs 4616 * RPROTDIS - discard M_[PC]PROTOs and retain M_DATAs 4617 */ 4618 if (arg & ~(RMODEMASK | RPROTMASK)) 4619 return (EINVAL); 4620 4621 if ((arg & (RMSGD|RMSGN)) == (RMSGD|RMSGN)) 4622 return (EINVAL); 4623 4624 mutex_enter(&stp->sd_lock); 4625 switch (arg & RMODEMASK) { 4626 case RNORM: 4627 stp->sd_read_opt &= ~(RD_MSGDIS | RD_MSGNODIS); 4628 break; 4629 case RMSGD: 4630 stp->sd_read_opt = (stp->sd_read_opt & ~RD_MSGNODIS) | 4631 RD_MSGDIS; 4632 break; 4633 case RMSGN: 4634 stp->sd_read_opt = (stp->sd_read_opt & ~RD_MSGDIS) | 4635 RD_MSGNODIS; 4636 break; 4637 } 4638 4639 switch (arg & RPROTMASK) { 4640 case RPROTNORM: 4641 stp->sd_read_opt &= ~(RD_PROTDAT | RD_PROTDIS); 4642 break; 4643 4644 case RPROTDAT: 4645 stp->sd_read_opt = ((stp->sd_read_opt & ~RD_PROTDIS) | 4646 RD_PROTDAT); 4647 break; 4648 4649 case RPROTDIS: 4650 stp->sd_read_opt = ((stp->sd_read_opt & ~RD_PROTDAT) | 4651 RD_PROTDIS); 4652 break; 4653 } 4654 mutex_exit(&stp->sd_lock); 4655 return (0); 4656 4657 case I_GRDOPT: 4658 /* 4659 * Get read option and return the value 4660 * to spot pointed to by arg 4661 */ 4662 { 4663 int rdopt; 4664 4665 rdopt = ((stp->sd_read_opt & RD_MSGDIS) ? RMSGD : 4666 ((stp->sd_read_opt & RD_MSGNODIS) ? RMSGN : RNORM)); 4667 rdopt |= ((stp->sd_read_opt & RD_PROTDAT) ? RPROTDAT : 4668 ((stp->sd_read_opt & RD_PROTDIS) ? RPROTDIS : RPROTNORM)); 4669 4670 return (strcopyout(&rdopt, (void *)arg, sizeof (int), 4671 copyflag)); 4672 } 4673 4674 case I_SERROPT: 4675 /* 4676 * Set error options 4677 * 4678 * RERRNORM - persistent read errors 4679 * RERRNONPERSIST - non-persistent read errors 4680 * WERRNORM - persistent write errors 4681 * WERRNONPERSIST - non-persistent write errors 4682 */ 4683 if (arg & ~(RERRMASK | WERRMASK)) 4684 return (EINVAL); 4685 4686 mutex_enter(&stp->sd_lock); 4687 switch (arg & RERRMASK) { 4688 case RERRNORM: 4689 stp->sd_flag &= ~STRDERRNONPERSIST; 4690 break; 4691 case RERRNONPERSIST: 4692 stp->sd_flag |= STRDERRNONPERSIST; 4693 break; 4694 } 4695 switch (arg & WERRMASK) { 4696 case WERRNORM: 4697 stp->sd_flag &= ~STWRERRNONPERSIST; 4698 break; 4699 case WERRNONPERSIST: 4700 stp->sd_flag |= STWRERRNONPERSIST; 4701 break; 4702 } 4703 mutex_exit(&stp->sd_lock); 4704 return (0); 4705 4706 case I_GERROPT: 4707 /* 4708 * Get error option and return the value 4709 * to spot pointed to by arg 4710 */ 4711 { 4712 int erropt = 0; 4713 4714 erropt |= (stp->sd_flag & STRDERRNONPERSIST) ? RERRNONPERSIST : 4715 RERRNORM; 4716 erropt |= (stp->sd_flag & STWRERRNONPERSIST) ? WERRNONPERSIST : 4717 WERRNORM; 4718 return (strcopyout(&erropt, (void *)arg, sizeof (int), 4719 copyflag)); 4720 } 4721 4722 case I_SETSIG: 4723 /* 4724 * Register the calling proc to receive the SIGPOLL 4725 * signal based on the events given in arg. If 4726 * arg is zero, remove the proc from register list. 4727 */ 4728 { 4729 strsig_t *ssp, *pssp; 4730 struct pid *pidp; 4731 4732 pssp = NULL; 4733 pidp = curproc->p_pidp; 4734 /* 4735 * Hold sd_lock to prevent traversal of sd_siglist while 4736 * it is modified. 4737 */ 4738 mutex_enter(&stp->sd_lock); 4739 for (ssp = stp->sd_siglist; ssp && (ssp->ss_pidp != pidp); 4740 pssp = ssp, ssp = ssp->ss_next) 4741 ; 4742 4743 if (arg) { 4744 if (arg & ~(S_INPUT|S_HIPRI|S_MSG|S_HANGUP|S_ERROR| 4745 S_RDNORM|S_WRNORM|S_RDBAND|S_WRBAND|S_BANDURG)) { 4746 mutex_exit(&stp->sd_lock); 4747 return (EINVAL); 4748 } 4749 if ((arg & S_BANDURG) && !(arg & S_RDBAND)) { 4750 mutex_exit(&stp->sd_lock); 4751 return (EINVAL); 4752 } 4753 4754 /* 4755 * If proc not already registered, add it 4756 * to list. 4757 */ 4758 if (!ssp) { 4759 ssp = kmem_alloc(sizeof (strsig_t), KM_SLEEP); 4760 ssp->ss_pidp = pidp; 4761 ssp->ss_pid = pidp->pid_id; 4762 ssp->ss_next = NULL; 4763 if (pssp) 4764 pssp->ss_next = ssp; 4765 else 4766 stp->sd_siglist = ssp; 4767 mutex_enter(&pidlock); 4768 PID_HOLD(pidp); 4769 mutex_exit(&pidlock); 4770 } 4771 4772 /* 4773 * Set events. 4774 */ 4775 ssp->ss_events = (int)arg; 4776 } else { 4777 /* 4778 * Remove proc from register list. 4779 */ 4780 if (ssp) { 4781 mutex_enter(&pidlock); 4782 PID_RELE(pidp); 4783 mutex_exit(&pidlock); 4784 if (pssp) 4785 pssp->ss_next = ssp->ss_next; 4786 else 4787 stp->sd_siglist = ssp->ss_next; 4788 kmem_free(ssp, sizeof (strsig_t)); 4789 } else { 4790 mutex_exit(&stp->sd_lock); 4791 return (EINVAL); 4792 } 4793 } 4794 4795 /* 4796 * Recalculate OR of sig events. 4797 */ 4798 stp->sd_sigflags = 0; 4799 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next) 4800 stp->sd_sigflags |= ssp->ss_events; 4801 mutex_exit(&stp->sd_lock); 4802 return (0); 4803 } 4804 4805 case I_GETSIG: 4806 /* 4807 * Return (in arg) the current registration of events 4808 * for which the calling proc is to be signaled. 4809 */ 4810 { 4811 struct strsig *ssp; 4812 struct pid *pidp; 4813 4814 pidp = curproc->p_pidp; 4815 mutex_enter(&stp->sd_lock); 4816 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next) 4817 if (ssp->ss_pidp == pidp) { 4818 error = strcopyout(&ssp->ss_events, (void *)arg, 4819 sizeof (int), copyflag); 4820 mutex_exit(&stp->sd_lock); 4821 return (error); 4822 } 4823 mutex_exit(&stp->sd_lock); 4824 return (EINVAL); 4825 } 4826 4827 case I_ESETSIG: 4828 /* 4829 * Register the ss_pid to receive the SIGPOLL 4830 * signal based on the events is ss_events arg. If 4831 * ss_events is zero, remove the proc from register list. 4832 */ 4833 { 4834 struct strsig *ssp, *pssp; 4835 struct proc *proc; 4836 struct pid *pidp; 4837 pid_t pid; 4838 struct strsigset ss; 4839 4840 error = strcopyin((void *)arg, &ss, sizeof (ss), copyflag); 4841 if (error) 4842 return (error); 4843 4844 pid = ss.ss_pid; 4845 4846 if (ss.ss_events != 0) { 4847 /* 4848 * Permissions check by sending signal 0. 4849 * Note that when kill fails it does a set_errno 4850 * causing the system call to fail. 4851 */ 4852 error = kill(pid, 0); 4853 if (error) { 4854 return (error); 4855 } 4856 } 4857 mutex_enter(&pidlock); 4858 if (pid == 0) 4859 proc = curproc; 4860 else if (pid < 0) 4861 proc = pgfind(-pid); 4862 else 4863 proc = prfind(pid); 4864 if (proc == NULL) { 4865 mutex_exit(&pidlock); 4866 return (ESRCH); 4867 } 4868 if (pid < 0) 4869 pidp = proc->p_pgidp; 4870 else 4871 pidp = proc->p_pidp; 4872 ASSERT(pidp); 4873 /* 4874 * Get a hold on the pid structure while referencing it. 4875 * There is a separate PID_HOLD should it be inserted 4876 * in the list below. 4877 */ 4878 PID_HOLD(pidp); 4879 mutex_exit(&pidlock); 4880 4881 pssp = NULL; 4882 /* 4883 * Hold sd_lock to prevent traversal of sd_siglist while 4884 * it is modified. 4885 */ 4886 mutex_enter(&stp->sd_lock); 4887 for (ssp = stp->sd_siglist; ssp && (ssp->ss_pid != pid); 4888 pssp = ssp, ssp = ssp->ss_next) 4889 ; 4890 4891 if (ss.ss_events) { 4892 if (ss.ss_events & 4893 ~(S_INPUT|S_HIPRI|S_MSG|S_HANGUP|S_ERROR| 4894 S_RDNORM|S_WRNORM|S_RDBAND|S_WRBAND|S_BANDURG)) { 4895 mutex_exit(&stp->sd_lock); 4896 mutex_enter(&pidlock); 4897 PID_RELE(pidp); 4898 mutex_exit(&pidlock); 4899 return (EINVAL); 4900 } 4901 if ((ss.ss_events & S_BANDURG) && 4902 !(ss.ss_events & S_RDBAND)) { 4903 mutex_exit(&stp->sd_lock); 4904 mutex_enter(&pidlock); 4905 PID_RELE(pidp); 4906 mutex_exit(&pidlock); 4907 return (EINVAL); 4908 } 4909 4910 /* 4911 * If proc not already registered, add it 4912 * to list. 4913 */ 4914 if (!ssp) { 4915 ssp = kmem_alloc(sizeof (strsig_t), KM_SLEEP); 4916 ssp->ss_pidp = pidp; 4917 ssp->ss_pid = pid; 4918 ssp->ss_next = NULL; 4919 if (pssp) 4920 pssp->ss_next = ssp; 4921 else 4922 stp->sd_siglist = ssp; 4923 mutex_enter(&pidlock); 4924 PID_HOLD(pidp); 4925 mutex_exit(&pidlock); 4926 } 4927 4928 /* 4929 * Set events. 4930 */ 4931 ssp->ss_events = ss.ss_events; 4932 } else { 4933 /* 4934 * Remove proc from register list. 4935 */ 4936 if (ssp) { 4937 mutex_enter(&pidlock); 4938 PID_RELE(pidp); 4939 mutex_exit(&pidlock); 4940 if (pssp) 4941 pssp->ss_next = ssp->ss_next; 4942 else 4943 stp->sd_siglist = ssp->ss_next; 4944 kmem_free(ssp, sizeof (strsig_t)); 4945 } else { 4946 mutex_exit(&stp->sd_lock); 4947 mutex_enter(&pidlock); 4948 PID_RELE(pidp); 4949 mutex_exit(&pidlock); 4950 return (EINVAL); 4951 } 4952 } 4953 4954 /* 4955 * Recalculate OR of sig events. 4956 */ 4957 stp->sd_sigflags = 0; 4958 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next) 4959 stp->sd_sigflags |= ssp->ss_events; 4960 mutex_exit(&stp->sd_lock); 4961 mutex_enter(&pidlock); 4962 PID_RELE(pidp); 4963 mutex_exit(&pidlock); 4964 return (0); 4965 } 4966 4967 case I_EGETSIG: 4968 /* 4969 * Return (in arg) the current registration of events 4970 * for which the calling proc is to be signaled. 4971 */ 4972 { 4973 struct strsig *ssp; 4974 struct proc *proc; 4975 pid_t pid; 4976 struct pid *pidp; 4977 struct strsigset ss; 4978 4979 error = strcopyin((void *)arg, &ss, sizeof (ss), copyflag); 4980 if (error) 4981 return (error); 4982 4983 pid = ss.ss_pid; 4984 mutex_enter(&pidlock); 4985 if (pid == 0) 4986 proc = curproc; 4987 else if (pid < 0) 4988 proc = pgfind(-pid); 4989 else 4990 proc = prfind(pid); 4991 if (proc == NULL) { 4992 mutex_exit(&pidlock); 4993 return (ESRCH); 4994 } 4995 if (pid < 0) 4996 pidp = proc->p_pgidp; 4997 else 4998 pidp = proc->p_pidp; 4999 5000 /* Prevent the pidp from being reassigned */ 5001 PID_HOLD(pidp); 5002 mutex_exit(&pidlock); 5003 5004 mutex_enter(&stp->sd_lock); 5005 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next) 5006 if (ssp->ss_pid == pid) { 5007 ss.ss_pid = ssp->ss_pid; 5008 ss.ss_events = ssp->ss_events; 5009 error = strcopyout(&ss, (void *)arg, 5010 sizeof (struct strsigset), copyflag); 5011 mutex_exit(&stp->sd_lock); 5012 mutex_enter(&pidlock); 5013 PID_RELE(pidp); 5014 mutex_exit(&pidlock); 5015 return (error); 5016 } 5017 mutex_exit(&stp->sd_lock); 5018 mutex_enter(&pidlock); 5019 PID_RELE(pidp); 5020 mutex_exit(&pidlock); 5021 return (EINVAL); 5022 } 5023 5024 case I_PEEK: 5025 { 5026 STRUCT_DECL(strpeek, strpeek); 5027 size_t n; 5028 mblk_t *fmp, *tmp_mp = NULL; 5029 5030 STRUCT_INIT(strpeek, flag); 5031 5032 error = strcopyin((void *)arg, STRUCT_BUF(strpeek), 5033 STRUCT_SIZE(strpeek), copyflag); 5034 if (error) 5035 return (error); 5036 5037 mutex_enter(QLOCK(rdq)); 5038 /* 5039 * Skip the invalid messages 5040 */ 5041 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next) 5042 if (mp->b_datap->db_type != M_SIG) 5043 break; 5044 5045 /* 5046 * If user has requested to peek at a high priority message 5047 * and first message is not, return 0 5048 */ 5049 if (mp != NULL) { 5050 if ((STRUCT_FGET(strpeek, flags) & RS_HIPRI) && 5051 queclass(mp) == QNORM) { 5052 *rvalp = 0; 5053 mutex_exit(QLOCK(rdq)); 5054 return (0); 5055 } 5056 } else if (stp->sd_struiordq == NULL || 5057 (STRUCT_FGET(strpeek, flags) & RS_HIPRI)) { 5058 /* 5059 * No mblks to look at at the streamhead and 5060 * 1). This isn't a synch stream or 5061 * 2). This is a synch stream but caller wants high 5062 * priority messages which is not supported by 5063 * the synch stream. (it only supports QNORM) 5064 */ 5065 *rvalp = 0; 5066 mutex_exit(QLOCK(rdq)); 5067 return (0); 5068 } 5069 5070 fmp = mp; 5071 5072 if (mp && mp->b_datap->db_type == M_PASSFP) { 5073 mutex_exit(QLOCK(rdq)); 5074 return (EBADMSG); 5075 } 5076 5077 ASSERT(mp == NULL || mp->b_datap->db_type == M_PCPROTO || 5078 mp->b_datap->db_type == M_PROTO || 5079 mp->b_datap->db_type == M_DATA); 5080 5081 if (mp && mp->b_datap->db_type == M_PCPROTO) { 5082 STRUCT_FSET(strpeek, flags, RS_HIPRI); 5083 } else { 5084 STRUCT_FSET(strpeek, flags, 0); 5085 } 5086 5087 5088 if (mp && ((tmp_mp = dupmsg(mp)) == NULL)) { 5089 mutex_exit(QLOCK(rdq)); 5090 return (ENOSR); 5091 } 5092 mutex_exit(QLOCK(rdq)); 5093 5094 /* 5095 * set mp = tmp_mp, so that I_PEEK processing can continue. 5096 * tmp_mp is used to free the dup'd message. 5097 */ 5098 mp = tmp_mp; 5099 5100 uio.uio_fmode = 0; 5101 uio.uio_extflg = UIO_COPY_CACHED; 5102 uio.uio_segflg = (copyflag == U_TO_K) ? UIO_USERSPACE : 5103 UIO_SYSSPACE; 5104 uio.uio_limit = 0; 5105 /* 5106 * First process PROTO blocks, if any. 5107 * If user doesn't want to get ctl info by setting maxlen <= 0, 5108 * then set len to -1/0 and skip control blocks part. 5109 */ 5110 if (STRUCT_FGET(strpeek, ctlbuf.maxlen) < 0) 5111 STRUCT_FSET(strpeek, ctlbuf.len, -1); 5112 else if (STRUCT_FGET(strpeek, ctlbuf.maxlen) == 0) 5113 STRUCT_FSET(strpeek, ctlbuf.len, 0); 5114 else { 5115 int ctl_part = 0; 5116 5117 iov.iov_base = STRUCT_FGETP(strpeek, ctlbuf.buf); 5118 iov.iov_len = STRUCT_FGET(strpeek, ctlbuf.maxlen); 5119 uio.uio_iov = &iov; 5120 uio.uio_resid = iov.iov_len; 5121 uio.uio_loffset = 0; 5122 uio.uio_iovcnt = 1; 5123 while (mp && mp->b_datap->db_type != M_DATA && 5124 uio.uio_resid >= 0) { 5125 ASSERT(STRUCT_FGET(strpeek, flags) == 0 ? 5126 mp->b_datap->db_type == M_PROTO : 5127 mp->b_datap->db_type == M_PCPROTO); 5128 5129 if ((n = MIN(uio.uio_resid, 5130 mp->b_wptr - mp->b_rptr)) != 0 && 5131 (error = uiomove((char *)mp->b_rptr, n, 5132 UIO_READ, &uio)) != 0) { 5133 freemsg(tmp_mp); 5134 return (error); 5135 } 5136 ctl_part = 1; 5137 mp = mp->b_cont; 5138 } 5139 /* No ctl message */ 5140 if (ctl_part == 0) 5141 STRUCT_FSET(strpeek, ctlbuf.len, -1); 5142 else 5143 STRUCT_FSET(strpeek, ctlbuf.len, 5144 STRUCT_FGET(strpeek, ctlbuf.maxlen) - 5145 uio.uio_resid); 5146 } 5147 5148 /* 5149 * Now process DATA blocks, if any. 5150 * If user doesn't want to get data info by setting maxlen <= 0, 5151 * then set len to -1/0 and skip data blocks part. 5152 */ 5153 if (STRUCT_FGET(strpeek, databuf.maxlen) < 0) 5154 STRUCT_FSET(strpeek, databuf.len, -1); 5155 else if (STRUCT_FGET(strpeek, databuf.maxlen) == 0) 5156 STRUCT_FSET(strpeek, databuf.len, 0); 5157 else { 5158 int data_part = 0; 5159 5160 iov.iov_base = STRUCT_FGETP(strpeek, databuf.buf); 5161 iov.iov_len = STRUCT_FGET(strpeek, databuf.maxlen); 5162 uio.uio_iov = &iov; 5163 uio.uio_resid = iov.iov_len; 5164 uio.uio_loffset = 0; 5165 uio.uio_iovcnt = 1; 5166 while (mp && uio.uio_resid) { 5167 if (mp->b_datap->db_type == M_DATA) { 5168 if ((n = MIN(uio.uio_resid, 5169 mp->b_wptr - mp->b_rptr)) != 0 && 5170 (error = uiomove((char *)mp->b_rptr, 5171 n, UIO_READ, &uio)) != 0) { 5172 freemsg(tmp_mp); 5173 return (error); 5174 } 5175 data_part = 1; 5176 } 5177 ASSERT(data_part == 0 || 5178 mp->b_datap->db_type == M_DATA); 5179 mp = mp->b_cont; 5180 } 5181 /* No data message */ 5182 if (data_part == 0) 5183 STRUCT_FSET(strpeek, databuf.len, -1); 5184 else 5185 STRUCT_FSET(strpeek, databuf.len, 5186 STRUCT_FGET(strpeek, databuf.maxlen) - 5187 uio.uio_resid); 5188 } 5189 freemsg(tmp_mp); 5190 5191 /* 5192 * It is a synch stream and user wants to get 5193 * data (maxlen > 0). 5194 * uio setup is done by the codes that process DATA 5195 * blocks above. 5196 */ 5197 if ((fmp == NULL) && STRUCT_FGET(strpeek, databuf.maxlen) > 0) { 5198 infod_t infod; 5199 5200 infod.d_cmd = INFOD_COPYOUT; 5201 infod.d_res = 0; 5202 infod.d_uiop = &uio; 5203 error = infonext(rdq, &infod); 5204 if (error == EINVAL || error == EBUSY) 5205 error = 0; 5206 if (error) 5207 return (error); 5208 STRUCT_FSET(strpeek, databuf.len, STRUCT_FGET(strpeek, 5209 databuf.maxlen) - uio.uio_resid); 5210 if (STRUCT_FGET(strpeek, databuf.len) == 0) { 5211 /* 5212 * No data found by the infonext(). 5213 */ 5214 STRUCT_FSET(strpeek, databuf.len, -1); 5215 } 5216 } 5217 error = strcopyout(STRUCT_BUF(strpeek), (void *)arg, 5218 STRUCT_SIZE(strpeek), copyflag); 5219 if (error) { 5220 return (error); 5221 } 5222 /* 5223 * If there is no message retrieved, set return code to 0 5224 * otherwise, set it to 1. 5225 */ 5226 if (STRUCT_FGET(strpeek, ctlbuf.len) == -1 && 5227 STRUCT_FGET(strpeek, databuf.len) == -1) 5228 *rvalp = 0; 5229 else 5230 *rvalp = 1; 5231 return (0); 5232 } 5233 5234 case I_FDINSERT: 5235 { 5236 STRUCT_DECL(strfdinsert, strfdinsert); 5237 struct file *resftp; 5238 struct stdata *resstp; 5239 t_uscalar_t ival; 5240 ssize_t msgsize; 5241 struct strbuf mctl; 5242 5243 STRUCT_INIT(strfdinsert, flag); 5244 if (stp->sd_flag & STRHUP) 5245 return (ENXIO); 5246 /* 5247 * STRDERR, STWRERR and STPLEX tested above. 5248 */ 5249 error = strcopyin((void *)arg, STRUCT_BUF(strfdinsert), 5250 STRUCT_SIZE(strfdinsert), copyflag); 5251 if (error) 5252 return (error); 5253 5254 if (STRUCT_FGET(strfdinsert, offset) < 0 || 5255 (STRUCT_FGET(strfdinsert, offset) % 5256 sizeof (t_uscalar_t)) != 0) 5257 return (EINVAL); 5258 if ((resftp = getf(STRUCT_FGET(strfdinsert, fildes))) != NULL) { 5259 if ((resstp = resftp->f_vnode->v_stream) == NULL) { 5260 releasef(STRUCT_FGET(strfdinsert, fildes)); 5261 return (EINVAL); 5262 } 5263 } else 5264 return (EINVAL); 5265 5266 mutex_enter(&resstp->sd_lock); 5267 if (resstp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) { 5268 error = strgeterr(resstp, 5269 STRDERR|STWRERR|STRHUP|STPLEX, 0); 5270 if (error != 0) { 5271 mutex_exit(&resstp->sd_lock); 5272 releasef(STRUCT_FGET(strfdinsert, fildes)); 5273 return (error); 5274 } 5275 } 5276 mutex_exit(&resstp->sd_lock); 5277 5278 #ifdef _ILP32 5279 { 5280 queue_t *q; 5281 queue_t *mate = NULL; 5282 5283 /* get read queue of stream terminus */ 5284 claimstr(resstp->sd_wrq); 5285 for (q = resstp->sd_wrq->q_next; q->q_next != NULL; 5286 q = q->q_next) 5287 if (!STRMATED(resstp) && STREAM(q) != resstp && 5288 mate == NULL) { 5289 ASSERT(q->q_qinfo->qi_srvp); 5290 ASSERT(_OTHERQ(q)->q_qinfo->qi_srvp); 5291 claimstr(q); 5292 mate = q; 5293 } 5294 q = _RD(q); 5295 if (mate) 5296 releasestr(mate); 5297 releasestr(resstp->sd_wrq); 5298 ival = (t_uscalar_t)q; 5299 } 5300 #else 5301 ival = (t_uscalar_t)getminor(resftp->f_vnode->v_rdev); 5302 #endif /* _ILP32 */ 5303 5304 if (STRUCT_FGET(strfdinsert, ctlbuf.len) < 5305 STRUCT_FGET(strfdinsert, offset) + sizeof (t_uscalar_t)) { 5306 releasef(STRUCT_FGET(strfdinsert, fildes)); 5307 return (EINVAL); 5308 } 5309 5310 /* 5311 * Check for legal flag value. 5312 */ 5313 if (STRUCT_FGET(strfdinsert, flags) & ~RS_HIPRI) { 5314 releasef(STRUCT_FGET(strfdinsert, fildes)); 5315 return (EINVAL); 5316 } 5317 5318 /* get these values from those cached in the stream head */ 5319 mutex_enter(QLOCK(stp->sd_wrq)); 5320 rmin = stp->sd_qn_minpsz; 5321 rmax = stp->sd_qn_maxpsz; 5322 mutex_exit(QLOCK(stp->sd_wrq)); 5323 5324 /* 5325 * Make sure ctl and data sizes together fall within 5326 * the limits of the max and min receive packet sizes 5327 * and do not exceed system limit. A negative data 5328 * length means that no data part is to be sent. 5329 */ 5330 ASSERT((rmax >= 0) || (rmax == INFPSZ)); 5331 if (rmax == 0) { 5332 releasef(STRUCT_FGET(strfdinsert, fildes)); 5333 return (ERANGE); 5334 } 5335 if ((msgsize = STRUCT_FGET(strfdinsert, databuf.len)) < 0) 5336 msgsize = 0; 5337 if ((msgsize < rmin) || 5338 ((msgsize > rmax) && (rmax != INFPSZ)) || 5339 (STRUCT_FGET(strfdinsert, ctlbuf.len) > strctlsz)) { 5340 releasef(STRUCT_FGET(strfdinsert, fildes)); 5341 return (ERANGE); 5342 } 5343 5344 mutex_enter(&stp->sd_lock); 5345 while (!(STRUCT_FGET(strfdinsert, flags) & RS_HIPRI) && 5346 !canputnext(stp->sd_wrq)) { 5347 if ((error = strwaitq(stp, WRITEWAIT, (ssize_t)0, 5348 flag, -1, &done)) != 0 || done) { 5349 mutex_exit(&stp->sd_lock); 5350 releasef(STRUCT_FGET(strfdinsert, fildes)); 5351 return (error); 5352 } 5353 if ((error = i_straccess(stp, access)) != 0) { 5354 mutex_exit(&stp->sd_lock); 5355 releasef( 5356 STRUCT_FGET(strfdinsert, fildes)); 5357 return (error); 5358 } 5359 } 5360 mutex_exit(&stp->sd_lock); 5361 5362 /* 5363 * Copy strfdinsert.ctlbuf into native form of 5364 * ctlbuf to pass down into strmakemsg(). 5365 */ 5366 mctl.maxlen = STRUCT_FGET(strfdinsert, ctlbuf.maxlen); 5367 mctl.len = STRUCT_FGET(strfdinsert, ctlbuf.len); 5368 mctl.buf = STRUCT_FGETP(strfdinsert, ctlbuf.buf); 5369 5370 iov.iov_base = STRUCT_FGETP(strfdinsert, databuf.buf); 5371 iov.iov_len = STRUCT_FGET(strfdinsert, databuf.len); 5372 uio.uio_iov = &iov; 5373 uio.uio_iovcnt = 1; 5374 uio.uio_loffset = 0; 5375 uio.uio_segflg = (copyflag == U_TO_K) ? UIO_USERSPACE : 5376 UIO_SYSSPACE; 5377 uio.uio_fmode = 0; 5378 uio.uio_extflg = UIO_COPY_CACHED; 5379 uio.uio_resid = iov.iov_len; 5380 if ((error = strmakemsg(&mctl, 5381 &msgsize, &uio, stp, 5382 STRUCT_FGET(strfdinsert, flags), &mp)) != 0 || !mp) { 5383 STRUCT_FSET(strfdinsert, databuf.len, msgsize); 5384 releasef(STRUCT_FGET(strfdinsert, fildes)); 5385 return (error); 5386 } 5387 5388 STRUCT_FSET(strfdinsert, databuf.len, msgsize); 5389 5390 /* 5391 * Place the possibly reencoded queue pointer 'offset' bytes 5392 * from the start of the control portion of the message. 5393 */ 5394 *((t_uscalar_t *)(mp->b_rptr + 5395 STRUCT_FGET(strfdinsert, offset))) = ival; 5396 5397 /* 5398 * Put message downstream. 5399 */ 5400 stream_willservice(stp); 5401 putnext(stp->sd_wrq, mp); 5402 stream_runservice(stp); 5403 releasef(STRUCT_FGET(strfdinsert, fildes)); 5404 return (error); 5405 } 5406 5407 case I_SENDFD: 5408 { 5409 struct file *fp; 5410 5411 if ((fp = getf((int)arg)) == NULL) 5412 return (EBADF); 5413 error = do_sendfp(stp, fp, crp); 5414 if (audit_active) { 5415 audit_fdsend((int)arg, fp, error); 5416 } 5417 releasef((int)arg); 5418 return (error); 5419 } 5420 5421 case I_RECVFD: 5422 case I_E_RECVFD: 5423 { 5424 struct k_strrecvfd *srf; 5425 int i, fd; 5426 5427 mutex_enter(&stp->sd_lock); 5428 while (!(mp = getq(rdq))) { 5429 if (stp->sd_flag & (STRHUP|STREOF)) { 5430 mutex_exit(&stp->sd_lock); 5431 return (ENXIO); 5432 } 5433 if ((error = strwaitq(stp, GETWAIT, (ssize_t)0, 5434 flag, -1, &done)) != 0 || done) { 5435 mutex_exit(&stp->sd_lock); 5436 return (error); 5437 } 5438 if ((error = i_straccess(stp, access)) != 0) { 5439 mutex_exit(&stp->sd_lock); 5440 return (error); 5441 } 5442 } 5443 if (mp->b_datap->db_type != M_PASSFP) { 5444 putback(stp, rdq, mp, mp->b_band); 5445 mutex_exit(&stp->sd_lock); 5446 return (EBADMSG); 5447 } 5448 mutex_exit(&stp->sd_lock); 5449 5450 srf = (struct k_strrecvfd *)mp->b_rptr; 5451 if ((fd = ufalloc(0)) == -1) { 5452 mutex_enter(&stp->sd_lock); 5453 putback(stp, rdq, mp, mp->b_band); 5454 mutex_exit(&stp->sd_lock); 5455 return (EMFILE); 5456 } 5457 if (cmd == I_RECVFD) { 5458 struct o_strrecvfd ostrfd; 5459 5460 /* check to see if uid/gid values are too large. */ 5461 5462 if (srf->uid > (o_uid_t)USHRT_MAX || 5463 srf->gid > (o_gid_t)USHRT_MAX) { 5464 mutex_enter(&stp->sd_lock); 5465 putback(stp, rdq, mp, mp->b_band); 5466 mutex_exit(&stp->sd_lock); 5467 setf(fd, NULL); /* release fd entry */ 5468 return (EOVERFLOW); 5469 } 5470 5471 ostrfd.fd = fd; 5472 ostrfd.uid = (o_uid_t)srf->uid; 5473 ostrfd.gid = (o_gid_t)srf->gid; 5474 5475 /* Null the filler bits */ 5476 for (i = 0; i < 8; i++) 5477 ostrfd.fill[i] = 0; 5478 5479 error = strcopyout(&ostrfd, (void *)arg, 5480 sizeof (struct o_strrecvfd), copyflag); 5481 } else { /* I_E_RECVFD */ 5482 struct strrecvfd strfd; 5483 5484 strfd.fd = fd; 5485 strfd.uid = srf->uid; 5486 strfd.gid = srf->gid; 5487 5488 /* null the filler bits */ 5489 for (i = 0; i < 8; i++) 5490 strfd.fill[i] = 0; 5491 5492 error = strcopyout(&strfd, (void *)arg, 5493 sizeof (struct strrecvfd), copyflag); 5494 } 5495 5496 if (error) { 5497 setf(fd, NULL); /* release fd entry */ 5498 mutex_enter(&stp->sd_lock); 5499 putback(stp, rdq, mp, mp->b_band); 5500 mutex_exit(&stp->sd_lock); 5501 return (error); 5502 } 5503 if (audit_active) { 5504 audit_fdrecv(fd, srf->fp); 5505 } 5506 5507 /* 5508 * Always increment f_count since the freemsg() below will 5509 * always call free_passfp() which performs a closef(). 5510 */ 5511 mutex_enter(&srf->fp->f_tlock); 5512 srf->fp->f_count++; 5513 mutex_exit(&srf->fp->f_tlock); 5514 setf(fd, srf->fp); 5515 freemsg(mp); 5516 return (0); 5517 } 5518 5519 case I_SWROPT: 5520 /* 5521 * Set/clear the write options. arg is a bit 5522 * mask with any of the following bits set... 5523 * SNDZERO - send zero length message 5524 * SNDPIPE - send sigpipe to process if 5525 * sd_werror is set and process is 5526 * doing a write or putmsg. 5527 * The new stream head write options should reflect 5528 * what is in arg. 5529 */ 5530 if (arg & ~(SNDZERO|SNDPIPE)) 5531 return (EINVAL); 5532 5533 mutex_enter(&stp->sd_lock); 5534 stp->sd_wput_opt &= ~(SW_SIGPIPE|SW_SNDZERO); 5535 if (arg & SNDZERO) 5536 stp->sd_wput_opt |= SW_SNDZERO; 5537 if (arg & SNDPIPE) 5538 stp->sd_wput_opt |= SW_SIGPIPE; 5539 mutex_exit(&stp->sd_lock); 5540 return (0); 5541 5542 case I_GWROPT: 5543 { 5544 int wropt = 0; 5545 5546 if (stp->sd_wput_opt & SW_SNDZERO) 5547 wropt |= SNDZERO; 5548 if (stp->sd_wput_opt & SW_SIGPIPE) 5549 wropt |= SNDPIPE; 5550 return (strcopyout(&wropt, (void *)arg, sizeof (wropt), 5551 copyflag)); 5552 } 5553 5554 case I_LIST: 5555 /* 5556 * Returns all the modules found on this stream, 5557 * upto the driver. If argument is NULL, return the 5558 * number of modules (including driver). If argument 5559 * is not NULL, copy the names into the structure 5560 * provided. 5561 */ 5562 5563 { 5564 queue_t *q; 5565 int num_modules, space_allocated; 5566 STRUCT_DECL(str_list, strlist); 5567 struct str_mlist *mlist_ptr; 5568 5569 if (arg == NULL) { /* Return number of modules plus driver */ 5570 q = stp->sd_wrq; 5571 if (stp->sd_vnode->v_type == VFIFO) { 5572 *rvalp = stp->sd_pushcnt; 5573 } else { 5574 *rvalp = stp->sd_pushcnt + 1; 5575 } 5576 } else { 5577 STRUCT_INIT(strlist, flag); 5578 5579 error = strcopyin((void *)arg, STRUCT_BUF(strlist), 5580 STRUCT_SIZE(strlist), copyflag); 5581 if (error) 5582 return (error); 5583 5584 space_allocated = STRUCT_FGET(strlist, sl_nmods); 5585 if ((space_allocated) <= 0) 5586 return (EINVAL); 5587 claimstr(stp->sd_wrq); 5588 q = stp->sd_wrq; 5589 num_modules = 0; 5590 while (_SAMESTR(q) && (space_allocated != 0)) { 5591 char *name = 5592 q->q_next->q_qinfo->qi_minfo->mi_idname; 5593 5594 mlist_ptr = STRUCT_FGETP(strlist, sl_modlist); 5595 5596 error = strcopyout(name, mlist_ptr, 5597 strlen(name) + 1, copyflag); 5598 5599 if (error) { 5600 releasestr(stp->sd_wrq); 5601 return (error); 5602 } 5603 q = q->q_next; 5604 space_allocated--; 5605 num_modules++; 5606 mlist_ptr = 5607 (struct str_mlist *)((uintptr_t)mlist_ptr + 5608 sizeof (struct str_mlist)); 5609 STRUCT_FSETP(strlist, sl_modlist, mlist_ptr); 5610 } 5611 releasestr(stp->sd_wrq); 5612 error = strcopyout(&num_modules, (void *)arg, 5613 sizeof (int), copyflag); 5614 } 5615 return (error); 5616 } 5617 5618 case I_CKBAND: 5619 { 5620 queue_t *q; 5621 qband_t *qbp; 5622 5623 if ((arg < 0) || (arg >= NBAND)) 5624 return (EINVAL); 5625 q = _RD(stp->sd_wrq); 5626 mutex_enter(QLOCK(q)); 5627 if (arg > (int)q->q_nband) { 5628 *rvalp = 0; 5629 } else { 5630 if (arg == 0) { 5631 if (q->q_first) 5632 *rvalp = 1; 5633 else 5634 *rvalp = 0; 5635 } else { 5636 qbp = q->q_bandp; 5637 while (--arg > 0) 5638 qbp = qbp->qb_next; 5639 if (qbp->qb_first) 5640 *rvalp = 1; 5641 else 5642 *rvalp = 0; 5643 } 5644 } 5645 mutex_exit(QLOCK(q)); 5646 return (0); 5647 } 5648 5649 case I_GETBAND: 5650 { 5651 int intpri; 5652 queue_t *q; 5653 5654 q = _RD(stp->sd_wrq); 5655 mutex_enter(QLOCK(q)); 5656 mp = q->q_first; 5657 if (!mp) { 5658 mutex_exit(QLOCK(q)); 5659 return (ENODATA); 5660 } 5661 intpri = (int)mp->b_band; 5662 error = strcopyout(&intpri, (void *)arg, sizeof (int), 5663 copyflag); 5664 mutex_exit(QLOCK(q)); 5665 return (error); 5666 } 5667 5668 case I_ATMARK: 5669 { 5670 queue_t *q; 5671 5672 if (arg & ~(ANYMARK|LASTMARK)) 5673 return (EINVAL); 5674 q = _RD(stp->sd_wrq); 5675 mutex_enter(&stp->sd_lock); 5676 if ((stp->sd_flag & STRATMARK) && (arg == ANYMARK)) { 5677 *rvalp = 1; 5678 } else { 5679 mutex_enter(QLOCK(q)); 5680 mp = q->q_first; 5681 5682 if (mp == NULL) 5683 *rvalp = 0; 5684 else if ((arg == ANYMARK) && (mp->b_flag & MSGMARK)) 5685 *rvalp = 1; 5686 else if ((arg == LASTMARK) && (mp == stp->sd_mark)) 5687 *rvalp = 1; 5688 else 5689 *rvalp = 0; 5690 mutex_exit(QLOCK(q)); 5691 } 5692 mutex_exit(&stp->sd_lock); 5693 return (0); 5694 } 5695 5696 case I_CANPUT: 5697 { 5698 char band; 5699 5700 if ((arg < 0) || (arg >= NBAND)) 5701 return (EINVAL); 5702 band = (char)arg; 5703 *rvalp = bcanputnext(stp->sd_wrq, band); 5704 return (0); 5705 } 5706 5707 case I_SETCLTIME: 5708 { 5709 int closetime; 5710 5711 error = strcopyin((void *)arg, &closetime, sizeof (int), 5712 copyflag); 5713 if (error) 5714 return (error); 5715 if (closetime < 0) 5716 return (EINVAL); 5717 5718 stp->sd_closetime = closetime; 5719 return (0); 5720 } 5721 5722 case I_GETCLTIME: 5723 { 5724 int closetime; 5725 5726 closetime = stp->sd_closetime; 5727 return (strcopyout(&closetime, (void *)arg, sizeof (int), 5728 copyflag)); 5729 } 5730 5731 case TIOCGSID: 5732 { 5733 pid_t sid; 5734 5735 mutex_enter(&stp->sd_lock); 5736 if (stp->sd_sidp == NULL) { 5737 mutex_exit(&stp->sd_lock); 5738 return (ENOTTY); 5739 } 5740 sid = stp->sd_sidp->pid_id; 5741 mutex_exit(&stp->sd_lock); 5742 return (strcopyout(&sid, (void *)arg, sizeof (pid_t), 5743 copyflag)); 5744 } 5745 5746 case TIOCSPGRP: 5747 { 5748 pid_t pgrp; 5749 proc_t *q; 5750 pid_t sid, fg_pgid, bg_pgid; 5751 5752 if (error = strcopyin((void *)arg, &pgrp, sizeof (pid_t), 5753 copyflag)) 5754 return (error); 5755 mutex_enter(&stp->sd_lock); 5756 mutex_enter(&pidlock); 5757 if (stp->sd_sidp != ttoproc(curthread)->p_sessp->s_sidp) { 5758 mutex_exit(&pidlock); 5759 mutex_exit(&stp->sd_lock); 5760 return (ENOTTY); 5761 } 5762 if (pgrp == stp->sd_pgidp->pid_id) { 5763 mutex_exit(&pidlock); 5764 mutex_exit(&stp->sd_lock); 5765 return (0); 5766 } 5767 if (pgrp <= 0 || pgrp >= maxpid) { 5768 mutex_exit(&pidlock); 5769 mutex_exit(&stp->sd_lock); 5770 return (EINVAL); 5771 } 5772 if ((q = pgfind(pgrp)) == NULL || 5773 q->p_sessp != ttoproc(curthread)->p_sessp) { 5774 mutex_exit(&pidlock); 5775 mutex_exit(&stp->sd_lock); 5776 return (EPERM); 5777 } 5778 sid = stp->sd_sidp->pid_id; 5779 fg_pgid = q->p_pgrp; 5780 bg_pgid = stp->sd_pgidp->pid_id; 5781 CL_SET_PROCESS_GROUP(curthread, sid, bg_pgid, fg_pgid); 5782 PID_RELE(stp->sd_pgidp); 5783 ctty_clear_sighuped(); 5784 stp->sd_pgidp = q->p_pgidp; 5785 PID_HOLD(stp->sd_pgidp); 5786 mutex_exit(&pidlock); 5787 mutex_exit(&stp->sd_lock); 5788 return (0); 5789 } 5790 5791 case TIOCGPGRP: 5792 { 5793 pid_t pgrp; 5794 5795 mutex_enter(&stp->sd_lock); 5796 if (stp->sd_sidp == NULL) { 5797 mutex_exit(&stp->sd_lock); 5798 return (ENOTTY); 5799 } 5800 pgrp = stp->sd_pgidp->pid_id; 5801 mutex_exit(&stp->sd_lock); 5802 return (strcopyout(&pgrp, (void *)arg, sizeof (pid_t), 5803 copyflag)); 5804 } 5805 5806 case TIOCSCTTY: 5807 { 5808 return (strctty(stp)); 5809 } 5810 5811 case TIOCNOTTY: 5812 { 5813 /* freectty() always assumes curproc. */ 5814 if (freectty(B_FALSE) != 0) 5815 return (0); 5816 return (ENOTTY); 5817 } 5818 5819 case FIONBIO: 5820 case FIOASYNC: 5821 return (0); /* handled by the upper layer */ 5822 } 5823 } 5824 5825 /* 5826 * Custom free routine used for M_PASSFP messages. 5827 */ 5828 static void 5829 free_passfp(struct k_strrecvfd *srf) 5830 { 5831 (void) closef(srf->fp); 5832 kmem_free(srf, sizeof (struct k_strrecvfd) + sizeof (frtn_t)); 5833 } 5834 5835 /* ARGSUSED */ 5836 int 5837 do_sendfp(struct stdata *stp, struct file *fp, struct cred *cr) 5838 { 5839 queue_t *qp, *nextqp; 5840 struct k_strrecvfd *srf; 5841 mblk_t *mp; 5842 frtn_t *frtnp; 5843 size_t bufsize; 5844 queue_t *mate = NULL; 5845 syncq_t *sq = NULL; 5846 int retval = 0; 5847 5848 if (stp->sd_flag & STRHUP) 5849 return (ENXIO); 5850 5851 claimstr(stp->sd_wrq); 5852 5853 /* Fastpath, we have a pipe, and we are already mated, use it. */ 5854 if (STRMATED(stp)) { 5855 qp = _RD(stp->sd_mate->sd_wrq); 5856 claimstr(qp); 5857 mate = qp; 5858 } else { /* Not already mated. */ 5859 5860 /* 5861 * Walk the stream to the end of this one. 5862 * assumes that the claimstr() will prevent 5863 * plumbing between the stream head and the 5864 * driver from changing 5865 */ 5866 qp = stp->sd_wrq; 5867 5868 /* 5869 * Loop until we reach the end of this stream. 5870 * On completion, qp points to the write queue 5871 * at the end of the stream, or the read queue 5872 * at the stream head if this is a fifo. 5873 */ 5874 while (((qp = qp->q_next) != NULL) && _SAMESTR(qp)) 5875 ; 5876 5877 /* 5878 * Just in case we get a q_next which is NULL, but 5879 * not at the end of the stream. This is actually 5880 * broken, so we set an assert to catch it in 5881 * debug, and set an error and return if not debug. 5882 */ 5883 ASSERT(qp); 5884 if (qp == NULL) { 5885 releasestr(stp->sd_wrq); 5886 return (EINVAL); 5887 } 5888 5889 /* 5890 * Enter the syncq for the driver, so (hopefully) 5891 * the queue values will not change on us. 5892 * XXXX - This will only prevent the race IFF only 5893 * the write side modifies the q_next member, and 5894 * the put procedure is protected by at least 5895 * MT_PERQ. 5896 */ 5897 if ((sq = qp->q_syncq) != NULL) 5898 entersq(sq, SQ_PUT); 5899 5900 /* Now get the q_next value from this qp. */ 5901 nextqp = qp->q_next; 5902 5903 /* 5904 * If nextqp exists and the other stream is different 5905 * from this one claim the stream, set the mate, and 5906 * get the read queue at the stream head of the other 5907 * stream. Assumes that nextqp was at least valid when 5908 * we got it. Hopefully the entersq of the driver 5909 * will prevent it from changing on us. 5910 */ 5911 if ((nextqp != NULL) && (STREAM(nextqp) != stp)) { 5912 ASSERT(qp->q_qinfo->qi_srvp); 5913 ASSERT(_OTHERQ(qp)->q_qinfo->qi_srvp); 5914 ASSERT(_OTHERQ(qp->q_next)->q_qinfo->qi_srvp); 5915 claimstr(nextqp); 5916 5917 /* Make sure we still have a q_next */ 5918 if (nextqp != qp->q_next) { 5919 releasestr(stp->sd_wrq); 5920 releasestr(nextqp); 5921 return (EINVAL); 5922 } 5923 5924 qp = _RD(STREAM(nextqp)->sd_wrq); 5925 mate = qp; 5926 } 5927 /* If we entered the synq above, leave it. */ 5928 if (sq != NULL) 5929 leavesq(sq, SQ_PUT); 5930 } /* STRMATED(STP) */ 5931 5932 /* XXX prevents substitution of the ops vector */ 5933 if (qp->q_qinfo != &strdata && qp->q_qinfo != &fifo_strdata) { 5934 retval = EINVAL; 5935 goto out; 5936 } 5937 5938 if (qp->q_flag & QFULL) { 5939 retval = EAGAIN; 5940 goto out; 5941 } 5942 5943 /* 5944 * Since M_PASSFP messages include a file descriptor, we use 5945 * esballoc() and specify a custom free routine (free_passfp()) that 5946 * will close the descriptor as part of freeing the message. For 5947 * convenience, we stash the frtn_t right after the data block. 5948 */ 5949 bufsize = sizeof (struct k_strrecvfd) + sizeof (frtn_t); 5950 srf = kmem_alloc(bufsize, KM_NOSLEEP); 5951 if (srf == NULL) { 5952 retval = EAGAIN; 5953 goto out; 5954 } 5955 5956 frtnp = (frtn_t *)(srf + 1); 5957 frtnp->free_arg = (caddr_t)srf; 5958 frtnp->free_func = free_passfp; 5959 5960 mp = esballoc((uchar_t *)srf, bufsize, BPRI_MED, frtnp); 5961 if (mp == NULL) { 5962 kmem_free(srf, bufsize); 5963 retval = EAGAIN; 5964 goto out; 5965 } 5966 mp->b_wptr += sizeof (struct k_strrecvfd); 5967 mp->b_datap->db_type = M_PASSFP; 5968 5969 srf->fp = fp; 5970 srf->uid = crgetuid(curthread->t_cred); 5971 srf->gid = crgetgid(curthread->t_cred); 5972 mutex_enter(&fp->f_tlock); 5973 fp->f_count++; 5974 mutex_exit(&fp->f_tlock); 5975 5976 put(qp, mp); 5977 out: 5978 releasestr(stp->sd_wrq); 5979 if (mate) 5980 releasestr(mate); 5981 return (retval); 5982 } 5983 5984 /* 5985 * Send an ioctl message downstream and wait for acknowledgement. 5986 * flags may be set to either U_TO_K or K_TO_K and a combination 5987 * of STR_NOERROR or STR_NOSIG 5988 * STR_NOSIG: Signals are essentially ignored or held and have 5989 * no effect for the duration of the call. 5990 * STR_NOERROR: Ignores stream head read, write and hup errors. 5991 * Additionally, if an existing ioctl times out, it is assumed 5992 * lost and and this ioctl will continue as if the previous ioctl had 5993 * finished. ETIME may be returned if this ioctl times out (i.e. 5994 * ic_timout is not INFTIM). Non-stream head errors may be returned if 5995 * the ioc_error indicates that the driver/module had problems, 5996 * an EFAULT was found when accessing user data, a lack of 5997 * resources, etc. 5998 */ 5999 int 6000 strdoioctl( 6001 struct stdata *stp, 6002 struct strioctl *strioc, 6003 int fflags, /* file flags with model info */ 6004 int flag, 6005 cred_t *crp, 6006 int *rvalp) 6007 { 6008 mblk_t *bp; 6009 struct iocblk *iocbp; 6010 struct copyreq *reqp; 6011 struct copyresp *resp; 6012 int id; 6013 int transparent = 0; 6014 int error = 0; 6015 int len = 0; 6016 caddr_t taddr; 6017 int copyflag = (flag & (U_TO_K | K_TO_K)); 6018 int sigflag = (flag & STR_NOSIG); 6019 int errs; 6020 uint_t waitflags; 6021 6022 ASSERT(copyflag == U_TO_K || copyflag == K_TO_K); 6023 ASSERT((fflags & FMODELS) != 0); 6024 6025 TRACE_2(TR_FAC_STREAMS_FR, 6026 TR_STRDOIOCTL, 6027 "strdoioctl:stp %p strioc %p", stp, strioc); 6028 if (strioc->ic_len == TRANSPARENT) { /* send arg in M_DATA block */ 6029 transparent = 1; 6030 strioc->ic_len = sizeof (intptr_t); 6031 } 6032 6033 if (strioc->ic_len < 0 || (strmsgsz > 0 && strioc->ic_len > strmsgsz)) 6034 return (EINVAL); 6035 6036 if ((bp = allocb_cred_wait(sizeof (union ioctypes), sigflag, &error, 6037 crp)) == NULL) 6038 return (error); 6039 6040 bzero(bp->b_wptr, sizeof (union ioctypes)); 6041 6042 iocbp = (struct iocblk *)bp->b_wptr; 6043 iocbp->ioc_count = strioc->ic_len; 6044 iocbp->ioc_cmd = strioc->ic_cmd; 6045 iocbp->ioc_flag = (fflags & FMODELS); 6046 6047 crhold(crp); 6048 iocbp->ioc_cr = crp; 6049 DB_TYPE(bp) = M_IOCTL; 6050 DB_CPID(bp) = curproc->p_pid; 6051 bp->b_wptr += sizeof (struct iocblk); 6052 6053 if (flag & STR_NOERROR) 6054 errs = STPLEX; 6055 else 6056 errs = STRHUP|STRDERR|STWRERR|STPLEX; 6057 6058 /* 6059 * If there is data to copy into ioctl block, do so. 6060 */ 6061 if (iocbp->ioc_count > 0) { 6062 if (transparent) 6063 /* 6064 * Note: STR_NOERROR does not have an effect 6065 * in putiocd() 6066 */ 6067 id = K_TO_K | sigflag; 6068 else 6069 id = flag; 6070 if ((error = putiocd(bp, strioc->ic_dp, id, crp)) != 0) { 6071 freemsg(bp); 6072 crfree(crp); 6073 return (error); 6074 } 6075 6076 /* 6077 * We could have slept copying in user pages. 6078 * Recheck the stream head state (the other end 6079 * of a pipe could have gone away). 6080 */ 6081 if (stp->sd_flag & errs) { 6082 mutex_enter(&stp->sd_lock); 6083 error = strgeterr(stp, errs, 0); 6084 mutex_exit(&stp->sd_lock); 6085 if (error != 0) { 6086 freemsg(bp); 6087 crfree(crp); 6088 return (error); 6089 } 6090 } 6091 } 6092 if (transparent) 6093 iocbp->ioc_count = TRANSPARENT; 6094 6095 /* 6096 * Block for up to STRTIMOUT milliseconds if there is an outstanding 6097 * ioctl for this stream already running. All processes 6098 * sleeping here will be awakened as a result of an ACK 6099 * or NAK being received for the outstanding ioctl, or 6100 * as a result of the timer expiring on the outstanding 6101 * ioctl (a failure), or as a result of any waiting 6102 * process's timer expiring (also a failure). 6103 */ 6104 6105 error = 0; 6106 mutex_enter(&stp->sd_lock); 6107 while (stp->sd_flag & (IOCWAIT | IOCWAITNE)) { 6108 clock_t cv_rval; 6109 6110 TRACE_0(TR_FAC_STREAMS_FR, 6111 TR_STRDOIOCTL_WAIT, 6112 "strdoioctl sleeps - IOCWAIT"); 6113 cv_rval = str_cv_wait(&stp->sd_iocmonitor, &stp->sd_lock, 6114 STRTIMOUT, sigflag); 6115 if (cv_rval <= 0) { 6116 if (cv_rval == 0) { 6117 error = EINTR; 6118 } else { 6119 if (flag & STR_NOERROR) { 6120 /* 6121 * Terminating current ioctl in 6122 * progress -- assume it got lost and 6123 * wake up the other thread so that the 6124 * operation completes. 6125 */ 6126 if (!(stp->sd_flag & IOCWAITNE)) { 6127 stp->sd_flag |= IOCWAITNE; 6128 cv_broadcast(&stp->sd_monitor); 6129 } 6130 /* 6131 * Otherwise, there's a running 6132 * STR_NOERROR -- we have no choice 6133 * here but to wait forever (or until 6134 * interrupted). 6135 */ 6136 } else { 6137 /* 6138 * pending ioctl has caused 6139 * us to time out 6140 */ 6141 error = ETIME; 6142 } 6143 } 6144 } else if ((stp->sd_flag & errs)) { 6145 error = strgeterr(stp, errs, 0); 6146 } 6147 if (error) { 6148 mutex_exit(&stp->sd_lock); 6149 freemsg(bp); 6150 crfree(crp); 6151 return (error); 6152 } 6153 } 6154 6155 /* 6156 * Have control of ioctl mechanism. 6157 * Send down ioctl packet and wait for response. 6158 */ 6159 if (stp->sd_iocblk != (mblk_t *)-1) { 6160 freemsg(stp->sd_iocblk); 6161 } 6162 stp->sd_iocblk = NULL; 6163 6164 /* 6165 * If this is marked with 'noerror' (internal; mostly 6166 * I_{P,}{UN,}LINK), then make sure nobody else is able to get 6167 * in here by setting IOCWAITNE. 6168 */ 6169 waitflags = IOCWAIT; 6170 if (flag & STR_NOERROR) 6171 waitflags |= IOCWAITNE; 6172 6173 stp->sd_flag |= waitflags; 6174 6175 /* 6176 * Assign sequence number. 6177 */ 6178 iocbp->ioc_id = stp->sd_iocid = getiocseqno(); 6179 6180 mutex_exit(&stp->sd_lock); 6181 6182 TRACE_1(TR_FAC_STREAMS_FR, 6183 TR_STRDOIOCTL_PUT, "strdoioctl put: stp %p", stp); 6184 stream_willservice(stp); 6185 putnext(stp->sd_wrq, bp); 6186 stream_runservice(stp); 6187 6188 /* 6189 * Timed wait for acknowledgment. The wait time is limited by the 6190 * timeout value, which must be a positive integer (number of 6191 * milliseconds) to wait, or 0 (use default value of STRTIMOUT 6192 * milliseconds), or -1 (wait forever). This will be awakened 6193 * either by an ACK/NAK message arriving, the timer expiring, or 6194 * the timer expiring on another ioctl waiting for control of the 6195 * mechanism. 6196 */ 6197 waitioc: 6198 mutex_enter(&stp->sd_lock); 6199 6200 6201 /* 6202 * If the reply has already arrived, don't sleep. If awakened from 6203 * the sleep, fail only if the reply has not arrived by then. 6204 * Otherwise, process the reply. 6205 */ 6206 while (!stp->sd_iocblk) { 6207 clock_t cv_rval; 6208 6209 if (stp->sd_flag & errs) { 6210 error = strgeterr(stp, errs, 0); 6211 if (error != 0) { 6212 stp->sd_flag &= ~waitflags; 6213 cv_broadcast(&stp->sd_iocmonitor); 6214 mutex_exit(&stp->sd_lock); 6215 crfree(crp); 6216 return (error); 6217 } 6218 } 6219 6220 TRACE_0(TR_FAC_STREAMS_FR, 6221 TR_STRDOIOCTL_WAIT2, 6222 "strdoioctl sleeps awaiting reply"); 6223 ASSERT(error == 0); 6224 6225 cv_rval = str_cv_wait(&stp->sd_monitor, &stp->sd_lock, 6226 (strioc->ic_timout ? 6227 strioc->ic_timout * 1000 : STRTIMOUT), sigflag); 6228 6229 /* 6230 * There are four possible cases here: interrupt, timeout, 6231 * wakeup by IOCWAITNE (above), or wakeup by strrput_nondata (a 6232 * valid M_IOCTL reply). 6233 * 6234 * If we've been awakened by a STR_NOERROR ioctl on some other 6235 * thread, then sd_iocblk will still be NULL, and IOCWAITNE 6236 * will be set. Pretend as if we just timed out. Note that 6237 * this other thread waited at least STRTIMOUT before trying to 6238 * awaken our thread, so this is indistinguishable (even for 6239 * INFTIM) from the case where we failed with ETIME waiting on 6240 * IOCWAIT in the prior loop. 6241 */ 6242 if (cv_rval > 0 && !(flag & STR_NOERROR) && 6243 stp->sd_iocblk == NULL && (stp->sd_flag & IOCWAITNE)) { 6244 cv_rval = -1; 6245 } 6246 6247 /* 6248 * note: STR_NOERROR does not protect 6249 * us here.. use ic_timout < 0 6250 */ 6251 if (cv_rval <= 0) { 6252 if (cv_rval == 0) { 6253 error = EINTR; 6254 } else { 6255 error = ETIME; 6256 } 6257 /* 6258 * A message could have come in after we were scheduled 6259 * but before we were actually run. 6260 */ 6261 bp = stp->sd_iocblk; 6262 stp->sd_iocblk = NULL; 6263 if (bp != NULL) { 6264 if ((bp->b_datap->db_type == M_COPYIN) || 6265 (bp->b_datap->db_type == M_COPYOUT)) { 6266 mutex_exit(&stp->sd_lock); 6267 if (bp->b_cont) { 6268 freemsg(bp->b_cont); 6269 bp->b_cont = NULL; 6270 } 6271 bp->b_datap->db_type = M_IOCDATA; 6272 bp->b_wptr = bp->b_rptr + 6273 sizeof (struct copyresp); 6274 resp = (struct copyresp *)bp->b_rptr; 6275 resp->cp_rval = 6276 (caddr_t)1; /* failure */ 6277 stream_willservice(stp); 6278 putnext(stp->sd_wrq, bp); 6279 stream_runservice(stp); 6280 mutex_enter(&stp->sd_lock); 6281 } else { 6282 freemsg(bp); 6283 } 6284 } 6285 stp->sd_flag &= ~waitflags; 6286 cv_broadcast(&stp->sd_iocmonitor); 6287 mutex_exit(&stp->sd_lock); 6288 crfree(crp); 6289 return (error); 6290 } 6291 } 6292 bp = stp->sd_iocblk; 6293 /* 6294 * Note: it is strictly impossible to get here with sd_iocblk set to 6295 * -1. This is because the initial loop above doesn't allow any new 6296 * ioctls into the fray until all others have passed this point. 6297 */ 6298 ASSERT(bp != NULL && bp != (mblk_t *)-1); 6299 TRACE_1(TR_FAC_STREAMS_FR, 6300 TR_STRDOIOCTL_ACK, "strdoioctl got reply: bp %p", bp); 6301 if ((bp->b_datap->db_type == M_IOCACK) || 6302 (bp->b_datap->db_type == M_IOCNAK)) { 6303 /* for detection of duplicate ioctl replies */ 6304 stp->sd_iocblk = (mblk_t *)-1; 6305 stp->sd_flag &= ~waitflags; 6306 cv_broadcast(&stp->sd_iocmonitor); 6307 mutex_exit(&stp->sd_lock); 6308 } else { 6309 /* 6310 * flags not cleared here because we're still doing 6311 * copy in/out for ioctl. 6312 */ 6313 stp->sd_iocblk = NULL; 6314 mutex_exit(&stp->sd_lock); 6315 } 6316 6317 6318 /* 6319 * Have received acknowledgment. 6320 */ 6321 6322 switch (bp->b_datap->db_type) { 6323 case M_IOCACK: 6324 /* 6325 * Positive ack. 6326 */ 6327 iocbp = (struct iocblk *)bp->b_rptr; 6328 6329 /* 6330 * Set error if indicated. 6331 */ 6332 if (iocbp->ioc_error) { 6333 error = iocbp->ioc_error; 6334 break; 6335 } 6336 6337 /* 6338 * Set return value. 6339 */ 6340 *rvalp = iocbp->ioc_rval; 6341 6342 /* 6343 * Data may have been returned in ACK message (ioc_count > 0). 6344 * If so, copy it out to the user's buffer. 6345 */ 6346 if (iocbp->ioc_count && !transparent) { 6347 if (error = getiocd(bp, strioc->ic_dp, copyflag)) 6348 break; 6349 } 6350 if (!transparent) { 6351 if (len) /* an M_COPYOUT was used with I_STR */ 6352 strioc->ic_len = len; 6353 else 6354 strioc->ic_len = (int)iocbp->ioc_count; 6355 } 6356 break; 6357 6358 case M_IOCNAK: 6359 /* 6360 * Negative ack. 6361 * 6362 * The only thing to do is set error as specified 6363 * in neg ack packet. 6364 */ 6365 iocbp = (struct iocblk *)bp->b_rptr; 6366 6367 error = (iocbp->ioc_error ? iocbp->ioc_error : EINVAL); 6368 break; 6369 6370 case M_COPYIN: 6371 /* 6372 * Driver or module has requested user ioctl data. 6373 */ 6374 reqp = (struct copyreq *)bp->b_rptr; 6375 6376 /* 6377 * M_COPYIN should *never* have a message attached, though 6378 * it's harmless if it does -- thus, panic on a DEBUG 6379 * kernel and just free it on a non-DEBUG build. 6380 */ 6381 ASSERT(bp->b_cont == NULL); 6382 if (bp->b_cont != NULL) { 6383 freemsg(bp->b_cont); 6384 bp->b_cont = NULL; 6385 } 6386 6387 error = putiocd(bp, reqp->cq_addr, flag, crp); 6388 if (error && bp->b_cont) { 6389 freemsg(bp->b_cont); 6390 bp->b_cont = NULL; 6391 } 6392 6393 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp); 6394 bp->b_datap->db_type = M_IOCDATA; 6395 6396 mblk_setcred(bp, crp); 6397 DB_CPID(bp) = curproc->p_pid; 6398 resp = (struct copyresp *)bp->b_rptr; 6399 resp->cp_rval = (caddr_t)(uintptr_t)error; 6400 resp->cp_flag = (fflags & FMODELS); 6401 6402 stream_willservice(stp); 6403 putnext(stp->sd_wrq, bp); 6404 stream_runservice(stp); 6405 6406 if (error) { 6407 mutex_enter(&stp->sd_lock); 6408 stp->sd_flag &= ~waitflags; 6409 cv_broadcast(&stp->sd_iocmonitor); 6410 mutex_exit(&stp->sd_lock); 6411 crfree(crp); 6412 return (error); 6413 } 6414 6415 goto waitioc; 6416 6417 case M_COPYOUT: 6418 /* 6419 * Driver or module has ioctl data for a user. 6420 */ 6421 reqp = (struct copyreq *)bp->b_rptr; 6422 ASSERT(bp->b_cont != NULL); 6423 6424 /* 6425 * Always (transparent or non-transparent ) 6426 * use the address specified in the request 6427 */ 6428 taddr = reqp->cq_addr; 6429 if (!transparent) 6430 len = (int)reqp->cq_size; 6431 6432 /* copyout data to the provided address */ 6433 error = getiocd(bp, taddr, copyflag); 6434 6435 freemsg(bp->b_cont); 6436 bp->b_cont = NULL; 6437 6438 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp); 6439 bp->b_datap->db_type = M_IOCDATA; 6440 6441 mblk_setcred(bp, crp); 6442 DB_CPID(bp) = curproc->p_pid; 6443 resp = (struct copyresp *)bp->b_rptr; 6444 resp->cp_rval = (caddr_t)(uintptr_t)error; 6445 resp->cp_flag = (fflags & FMODELS); 6446 6447 stream_willservice(stp); 6448 putnext(stp->sd_wrq, bp); 6449 stream_runservice(stp); 6450 6451 if (error) { 6452 mutex_enter(&stp->sd_lock); 6453 stp->sd_flag &= ~waitflags; 6454 cv_broadcast(&stp->sd_iocmonitor); 6455 mutex_exit(&stp->sd_lock); 6456 crfree(crp); 6457 return (error); 6458 } 6459 goto waitioc; 6460 6461 default: 6462 ASSERT(0); 6463 mutex_enter(&stp->sd_lock); 6464 stp->sd_flag &= ~waitflags; 6465 cv_broadcast(&stp->sd_iocmonitor); 6466 mutex_exit(&stp->sd_lock); 6467 break; 6468 } 6469 6470 freemsg(bp); 6471 crfree(crp); 6472 return (error); 6473 } 6474 6475 /* 6476 * Send an M_CMD message downstream and wait for a reply. This is a ptools 6477 * special used to retrieve information from modules/drivers a stream without 6478 * being subjected to flow control or interfering with pending messages on the 6479 * stream (e.g. an ioctl in flight). 6480 */ 6481 int 6482 strdocmd(struct stdata *stp, struct strcmd *scp, cred_t *crp) 6483 { 6484 mblk_t *mp; 6485 struct cmdblk *cmdp; 6486 int error = 0; 6487 int errs = STRHUP|STRDERR|STWRERR|STPLEX; 6488 clock_t rval, timeout = STRTIMOUT; 6489 6490 if (scp->sc_len < 0 || scp->sc_len > sizeof (scp->sc_buf) || 6491 scp->sc_timeout < -1) 6492 return (EINVAL); 6493 6494 if (scp->sc_timeout > 0) 6495 timeout = scp->sc_timeout * MILLISEC; 6496 6497 if ((mp = allocb_cred(sizeof (struct cmdblk), crp)) == NULL) 6498 return (ENOMEM); 6499 6500 crhold(crp); 6501 6502 cmdp = (struct cmdblk *)mp->b_wptr; 6503 cmdp->cb_cr = crp; 6504 cmdp->cb_cmd = scp->sc_cmd; 6505 cmdp->cb_len = scp->sc_len; 6506 cmdp->cb_error = 0; 6507 mp->b_wptr += sizeof (struct cmdblk); 6508 6509 DB_TYPE(mp) = M_CMD; 6510 DB_CPID(mp) = curproc->p_pid; 6511 6512 /* 6513 * Copy in the payload. 6514 */ 6515 if (cmdp->cb_len > 0) { 6516 mp->b_cont = allocb_cred(sizeof (scp->sc_buf), crp); 6517 if (mp->b_cont == NULL) { 6518 error = ENOMEM; 6519 goto out; 6520 } 6521 6522 /* cb_len comes from sc_len, which has already been checked */ 6523 ASSERT(cmdp->cb_len <= sizeof (scp->sc_buf)); 6524 (void) bcopy(scp->sc_buf, mp->b_cont->b_wptr, cmdp->cb_len); 6525 mp->b_cont->b_wptr += cmdp->cb_len; 6526 DB_CPID(mp->b_cont) = curproc->p_pid; 6527 } 6528 6529 /* 6530 * Since this mechanism is strictly for ptools, and since only one 6531 * process can be grabbed at a time, we simply fail if there's 6532 * currently an operation pending. 6533 */ 6534 mutex_enter(&stp->sd_lock); 6535 if (stp->sd_flag & STRCMDWAIT) { 6536 mutex_exit(&stp->sd_lock); 6537 error = EBUSY; 6538 goto out; 6539 } 6540 stp->sd_flag |= STRCMDWAIT; 6541 ASSERT(stp->sd_cmdblk == NULL); 6542 mutex_exit(&stp->sd_lock); 6543 6544 putnext(stp->sd_wrq, mp); 6545 mp = NULL; 6546 6547 /* 6548 * Timed wait for acknowledgment. If the reply has already arrived, 6549 * don't sleep. If awakened from the sleep, fail only if the reply 6550 * has not arrived by then. Otherwise, process the reply. 6551 */ 6552 mutex_enter(&stp->sd_lock); 6553 while (stp->sd_cmdblk == NULL) { 6554 if (stp->sd_flag & errs) { 6555 if ((error = strgeterr(stp, errs, 0)) != 0) 6556 goto waitout; 6557 } 6558 6559 rval = str_cv_wait(&stp->sd_monitor, &stp->sd_lock, timeout, 0); 6560 if (stp->sd_cmdblk != NULL) 6561 break; 6562 6563 if (rval <= 0) { 6564 error = (rval == 0) ? EINTR : ETIME; 6565 goto waitout; 6566 } 6567 } 6568 6569 /* 6570 * We received a reply. 6571 */ 6572 mp = stp->sd_cmdblk; 6573 stp->sd_cmdblk = NULL; 6574 ASSERT(mp != NULL && DB_TYPE(mp) == M_CMD); 6575 ASSERT(stp->sd_flag & STRCMDWAIT); 6576 stp->sd_flag &= ~STRCMDWAIT; 6577 mutex_exit(&stp->sd_lock); 6578 6579 cmdp = (struct cmdblk *)mp->b_rptr; 6580 if ((error = cmdp->cb_error) != 0) 6581 goto out; 6582 6583 /* 6584 * Data may have been returned in the reply (cb_len > 0). 6585 * If so, copy it out to the user's buffer. 6586 */ 6587 if (cmdp->cb_len > 0) { 6588 if (mp->b_cont == NULL || MBLKL(mp->b_cont) < cmdp->cb_len) { 6589 error = EPROTO; 6590 goto out; 6591 } 6592 6593 cmdp->cb_len = MIN(cmdp->cb_len, sizeof (scp->sc_buf)); 6594 (void) bcopy(mp->b_cont->b_rptr, scp->sc_buf, cmdp->cb_len); 6595 } 6596 scp->sc_len = cmdp->cb_len; 6597 out: 6598 freemsg(mp); 6599 crfree(crp); 6600 return (error); 6601 waitout: 6602 ASSERT(stp->sd_cmdblk == NULL); 6603 stp->sd_flag &= ~STRCMDWAIT; 6604 mutex_exit(&stp->sd_lock); 6605 crfree(crp); 6606 return (error); 6607 } 6608 6609 /* 6610 * For the SunOS keyboard driver. 6611 * Return the next available "ioctl" sequence number. 6612 * Exported, so that streams modules can send "ioctl" messages 6613 * downstream from their open routine. 6614 */ 6615 int 6616 getiocseqno(void) 6617 { 6618 int i; 6619 6620 mutex_enter(&strresources); 6621 i = ++ioc_id; 6622 mutex_exit(&strresources); 6623 return (i); 6624 } 6625 6626 /* 6627 * Get the next message from the read queue. If the message is 6628 * priority, STRPRI will have been set by strrput(). This flag 6629 * should be reset only when the entire message at the front of the 6630 * queue as been consumed. 6631 * 6632 * NOTE: strgetmsg and kstrgetmsg have much of the logic in common. 6633 */ 6634 int 6635 strgetmsg( 6636 struct vnode *vp, 6637 struct strbuf *mctl, 6638 struct strbuf *mdata, 6639 unsigned char *prip, 6640 int *flagsp, 6641 int fmode, 6642 rval_t *rvp) 6643 { 6644 struct stdata *stp; 6645 mblk_t *bp, *nbp; 6646 mblk_t *savemp = NULL; 6647 mblk_t *savemptail = NULL; 6648 uint_t old_sd_flag; 6649 int flg; 6650 int more = 0; 6651 int error = 0; 6652 char first = 1; 6653 uint_t mark; /* Contains MSG*MARK and _LASTMARK */ 6654 #define _LASTMARK 0x8000 /* Distinct from MSG*MARK */ 6655 unsigned char pri = 0; 6656 queue_t *q; 6657 int pr = 0; /* Partial read successful */ 6658 struct uio uios; 6659 struct uio *uiop = &uios; 6660 struct iovec iovs; 6661 unsigned char type; 6662 6663 TRACE_1(TR_FAC_STREAMS_FR, TR_STRGETMSG_ENTER, 6664 "strgetmsg:%p", vp); 6665 6666 ASSERT(vp->v_stream); 6667 stp = vp->v_stream; 6668 rvp->r_val1 = 0; 6669 6670 mutex_enter(&stp->sd_lock); 6671 6672 if ((error = i_straccess(stp, JCREAD)) != 0) { 6673 mutex_exit(&stp->sd_lock); 6674 return (error); 6675 } 6676 6677 if (stp->sd_flag & (STRDERR|STPLEX)) { 6678 error = strgeterr(stp, STRDERR|STPLEX, 0); 6679 if (error != 0) { 6680 mutex_exit(&stp->sd_lock); 6681 return (error); 6682 } 6683 } 6684 mutex_exit(&stp->sd_lock); 6685 6686 switch (*flagsp) { 6687 case MSG_HIPRI: 6688 if (*prip != 0) 6689 return (EINVAL); 6690 break; 6691 6692 case MSG_ANY: 6693 case MSG_BAND: 6694 break; 6695 6696 default: 6697 return (EINVAL); 6698 } 6699 /* 6700 * Setup uio and iov for data part 6701 */ 6702 iovs.iov_base = mdata->buf; 6703 iovs.iov_len = mdata->maxlen; 6704 uios.uio_iov = &iovs; 6705 uios.uio_iovcnt = 1; 6706 uios.uio_loffset = 0; 6707 uios.uio_segflg = UIO_USERSPACE; 6708 uios.uio_fmode = 0; 6709 uios.uio_extflg = UIO_COPY_CACHED; 6710 uios.uio_resid = mdata->maxlen; 6711 uios.uio_offset = 0; 6712 6713 q = _RD(stp->sd_wrq); 6714 mutex_enter(&stp->sd_lock); 6715 old_sd_flag = stp->sd_flag; 6716 mark = 0; 6717 for (;;) { 6718 int done = 0; 6719 mblk_t *q_first = q->q_first; 6720 6721 /* 6722 * Get the next message of appropriate priority 6723 * from the stream head. If the caller is interested 6724 * in band or hipri messages, then they should already 6725 * be enqueued at the stream head. On the other hand 6726 * if the caller wants normal (band 0) messages, they 6727 * might be deferred in a synchronous stream and they 6728 * will need to be pulled up. 6729 * 6730 * After we have dequeued a message, we might find that 6731 * it was a deferred M_SIG that was enqueued at the 6732 * stream head. It must now be posted as part of the 6733 * read by calling strsignal_nolock(). 6734 * 6735 * Also note that strrput does not enqueue an M_PCSIG, 6736 * and there cannot be more than one hipri message, 6737 * so there was no need to have the M_PCSIG case. 6738 * 6739 * At some time it might be nice to try and wrap the 6740 * functionality of kstrgetmsg() and strgetmsg() into 6741 * a common routine so to reduce the amount of replicated 6742 * code (since they are extremely similar). 6743 */ 6744 if (!(*flagsp & (MSG_HIPRI|MSG_BAND))) { 6745 /* Asking for normal, band0 data */ 6746 bp = strget(stp, q, uiop, first, &error); 6747 ASSERT(MUTEX_HELD(&stp->sd_lock)); 6748 if (bp != NULL) { 6749 ASSERT(!(bp->b_datap->db_flags & DBLK_UIOA)); 6750 if (bp->b_datap->db_type == M_SIG) { 6751 strsignal_nolock(stp, *bp->b_rptr, 6752 (int32_t)bp->b_band); 6753 continue; 6754 } else { 6755 break; 6756 } 6757 } 6758 if (error != 0) { 6759 goto getmout; 6760 } 6761 6762 /* 6763 * We can't depend on the value of STRPRI here because 6764 * the stream head may be in transit. Therefore, we 6765 * must look at the type of the first message to 6766 * determine if a high priority messages is waiting 6767 */ 6768 } else if ((*flagsp & MSG_HIPRI) && q_first != NULL && 6769 q_first->b_datap->db_type >= QPCTL && 6770 (bp = getq_noenab(q, 0)) != NULL) { 6771 /* Asked for HIPRI and got one */ 6772 ASSERT(bp->b_datap->db_type >= QPCTL); 6773 break; 6774 } else if ((*flagsp & MSG_BAND) && q_first != NULL && 6775 ((q_first->b_band >= *prip) || 6776 q_first->b_datap->db_type >= QPCTL) && 6777 (bp = getq_noenab(q, 0)) != NULL) { 6778 /* 6779 * Asked for at least band "prip" and got either at 6780 * least that band or a hipri message. 6781 */ 6782 ASSERT(bp->b_band >= *prip || 6783 bp->b_datap->db_type >= QPCTL); 6784 if (bp->b_datap->db_type == M_SIG) { 6785 strsignal_nolock(stp, *bp->b_rptr, 6786 (int32_t)bp->b_band); 6787 continue; 6788 } else { 6789 break; 6790 } 6791 } 6792 6793 /* No data. Time to sleep? */ 6794 qbackenable(q, 0); 6795 6796 /* 6797 * If STRHUP or STREOF, return 0 length control and data. 6798 * If resid is 0, then a read(fd,buf,0) was done. Do not 6799 * sleep to satisfy this request because by default we have 6800 * zero bytes to return. 6801 */ 6802 if ((stp->sd_flag & (STRHUP|STREOF)) || (mctl->maxlen == 0 && 6803 mdata->maxlen == 0)) { 6804 mctl->len = mdata->len = 0; 6805 *flagsp = 0; 6806 mutex_exit(&stp->sd_lock); 6807 return (0); 6808 } 6809 TRACE_2(TR_FAC_STREAMS_FR, TR_STRGETMSG_WAIT, 6810 "strgetmsg calls strwaitq:%p, %p", 6811 vp, uiop); 6812 if (((error = strwaitq(stp, GETWAIT, (ssize_t)0, fmode, -1, 6813 &done)) != 0) || done) { 6814 TRACE_2(TR_FAC_STREAMS_FR, TR_STRGETMSG_DONE, 6815 "strgetmsg error or done:%p, %p", 6816 vp, uiop); 6817 mutex_exit(&stp->sd_lock); 6818 return (error); 6819 } 6820 TRACE_2(TR_FAC_STREAMS_FR, TR_STRGETMSG_AWAKE, 6821 "strgetmsg awakes:%p, %p", vp, uiop); 6822 if ((error = i_straccess(stp, JCREAD)) != 0) { 6823 mutex_exit(&stp->sd_lock); 6824 return (error); 6825 } 6826 first = 0; 6827 } 6828 ASSERT(bp != NULL); 6829 /* 6830 * Extract any mark information. If the message is not completely 6831 * consumed this information will be put in the mblk 6832 * that is putback. 6833 * If MSGMARKNEXT is set and the message is completely consumed 6834 * the STRATMARK flag will be set below. Likewise, if 6835 * MSGNOTMARKNEXT is set and the message is 6836 * completely consumed STRNOTATMARK will be set. 6837 */ 6838 mark = bp->b_flag & (MSGMARK | MSGMARKNEXT | MSGNOTMARKNEXT); 6839 ASSERT((mark & (MSGMARKNEXT|MSGNOTMARKNEXT)) != 6840 (MSGMARKNEXT|MSGNOTMARKNEXT)); 6841 if (mark != 0 && bp == stp->sd_mark) { 6842 mark |= _LASTMARK; 6843 stp->sd_mark = NULL; 6844 } 6845 /* 6846 * keep track of the original message type and priority 6847 */ 6848 pri = bp->b_band; 6849 type = bp->b_datap->db_type; 6850 if (type == M_PASSFP) { 6851 if ((mark & _LASTMARK) && (stp->sd_mark == NULL)) 6852 stp->sd_mark = bp; 6853 bp->b_flag |= mark & ~_LASTMARK; 6854 putback(stp, q, bp, pri); 6855 qbackenable(q, pri); 6856 mutex_exit(&stp->sd_lock); 6857 return (EBADMSG); 6858 } 6859 ASSERT(type != M_SIG); 6860 6861 /* 6862 * Set this flag so strrput will not generate signals. Need to 6863 * make sure this flag is cleared before leaving this routine 6864 * else signals will stop being sent. 6865 */ 6866 stp->sd_flag |= STRGETINPROG; 6867 mutex_exit(&stp->sd_lock); 6868 6869 if (STREAM_NEEDSERVICE(stp)) 6870 stream_runservice(stp); 6871 6872 /* 6873 * Set HIPRI flag if message is priority. 6874 */ 6875 if (type >= QPCTL) 6876 flg = MSG_HIPRI; 6877 else 6878 flg = MSG_BAND; 6879 6880 /* 6881 * First process PROTO or PCPROTO blocks, if any. 6882 */ 6883 if (mctl->maxlen >= 0 && type != M_DATA) { 6884 size_t n, bcnt; 6885 char *ubuf; 6886 6887 bcnt = mctl->maxlen; 6888 ubuf = mctl->buf; 6889 while (bp != NULL && bp->b_datap->db_type != M_DATA) { 6890 if ((n = MIN(bcnt, bp->b_wptr - bp->b_rptr)) != 0 && 6891 copyout(bp->b_rptr, ubuf, n)) { 6892 error = EFAULT; 6893 mutex_enter(&stp->sd_lock); 6894 /* 6895 * clear stream head pri flag based on 6896 * first message type 6897 */ 6898 if (type >= QPCTL) { 6899 ASSERT(type == M_PCPROTO); 6900 stp->sd_flag &= ~STRPRI; 6901 } 6902 more = 0; 6903 freemsg(bp); 6904 goto getmout; 6905 } 6906 ubuf += n; 6907 bp->b_rptr += n; 6908 if (bp->b_rptr >= bp->b_wptr) { 6909 nbp = bp; 6910 bp = bp->b_cont; 6911 freeb(nbp); 6912 } 6913 ASSERT(n <= bcnt); 6914 bcnt -= n; 6915 if (bcnt == 0) 6916 break; 6917 } 6918 mctl->len = mctl->maxlen - bcnt; 6919 } else 6920 mctl->len = -1; 6921 6922 if (bp && bp->b_datap->db_type != M_DATA) { 6923 /* 6924 * More PROTO blocks in msg. 6925 */ 6926 more |= MORECTL; 6927 savemp = bp; 6928 while (bp && bp->b_datap->db_type != M_DATA) { 6929 savemptail = bp; 6930 bp = bp->b_cont; 6931 } 6932 savemptail->b_cont = NULL; 6933 } 6934 6935 /* 6936 * Now process DATA blocks, if any. 6937 */ 6938 if (mdata->maxlen >= 0 && bp) { 6939 /* 6940 * struiocopyout will consume a potential zero-length 6941 * M_DATA even if uio_resid is zero. 6942 */ 6943 size_t oldresid = uiop->uio_resid; 6944 6945 bp = struiocopyout(bp, uiop, &error); 6946 if (error != 0) { 6947 mutex_enter(&stp->sd_lock); 6948 /* 6949 * clear stream head hi pri flag based on 6950 * first message 6951 */ 6952 if (type >= QPCTL) { 6953 ASSERT(type == M_PCPROTO); 6954 stp->sd_flag &= ~STRPRI; 6955 } 6956 more = 0; 6957 freemsg(savemp); 6958 goto getmout; 6959 } 6960 /* 6961 * (pr == 1) indicates a partial read. 6962 */ 6963 if (oldresid > uiop->uio_resid) 6964 pr = 1; 6965 mdata->len = mdata->maxlen - uiop->uio_resid; 6966 } else 6967 mdata->len = -1; 6968 6969 if (bp) { /* more data blocks in msg */ 6970 more |= MOREDATA; 6971 if (savemp) 6972 savemptail->b_cont = bp; 6973 else 6974 savemp = bp; 6975 } 6976 6977 mutex_enter(&stp->sd_lock); 6978 if (savemp) { 6979 if (pr && (savemp->b_datap->db_type == M_DATA) && 6980 msgnodata(savemp)) { 6981 /* 6982 * Avoid queuing a zero-length tail part of 6983 * a message. pr=1 indicates that we read some of 6984 * the message. 6985 */ 6986 freemsg(savemp); 6987 more &= ~MOREDATA; 6988 /* 6989 * clear stream head hi pri flag based on 6990 * first message 6991 */ 6992 if (type >= QPCTL) { 6993 ASSERT(type == M_PCPROTO); 6994 stp->sd_flag &= ~STRPRI; 6995 } 6996 } else { 6997 savemp->b_band = pri; 6998 /* 6999 * If the first message was HIPRI and the one we're 7000 * putting back isn't, then clear STRPRI, otherwise 7001 * set STRPRI again. Note that we must set STRPRI 7002 * again since the flush logic in strrput_nondata() 7003 * may have cleared it while we had sd_lock dropped. 7004 */ 7005 if (type >= QPCTL) { 7006 ASSERT(type == M_PCPROTO); 7007 if (queclass(savemp) < QPCTL) 7008 stp->sd_flag &= ~STRPRI; 7009 else 7010 stp->sd_flag |= STRPRI; 7011 } else if (queclass(savemp) >= QPCTL) { 7012 /* 7013 * The first message was not a HIPRI message, 7014 * but the one we are about to putback is. 7015 * For simplicitly, we do not allow for HIPRI 7016 * messages to be embedded in the message 7017 * body, so just force it to same type as 7018 * first message. 7019 */ 7020 ASSERT(type == M_DATA || type == M_PROTO); 7021 ASSERT(savemp->b_datap->db_type == M_PCPROTO); 7022 savemp->b_datap->db_type = type; 7023 } 7024 if (mark != 0) { 7025 savemp->b_flag |= mark & ~_LASTMARK; 7026 if ((mark & _LASTMARK) && 7027 (stp->sd_mark == NULL)) { 7028 /* 7029 * If another marked message arrived 7030 * while sd_lock was not held sd_mark 7031 * would be non-NULL. 7032 */ 7033 stp->sd_mark = savemp; 7034 } 7035 } 7036 putback(stp, q, savemp, pri); 7037 } 7038 } else { 7039 /* 7040 * The complete message was consumed. 7041 * 7042 * If another M_PCPROTO arrived while sd_lock was not held 7043 * it would have been discarded since STRPRI was still set. 7044 * 7045 * Move the MSG*MARKNEXT information 7046 * to the stream head just in case 7047 * the read queue becomes empty. 7048 * clear stream head hi pri flag based on 7049 * first message 7050 * 7051 * If the stream head was at the mark 7052 * (STRATMARK) before we dropped sd_lock above 7053 * and some data was consumed then we have 7054 * moved past the mark thus STRATMARK is 7055 * cleared. However, if a message arrived in 7056 * strrput during the copyout above causing 7057 * STRATMARK to be set we can not clear that 7058 * flag. 7059 */ 7060 if (type >= QPCTL) { 7061 ASSERT(type == M_PCPROTO); 7062 stp->sd_flag &= ~STRPRI; 7063 } 7064 if (mark & (MSGMARKNEXT|MSGNOTMARKNEXT|MSGMARK)) { 7065 if (mark & MSGMARKNEXT) { 7066 stp->sd_flag &= ~STRNOTATMARK; 7067 stp->sd_flag |= STRATMARK; 7068 } else if (mark & MSGNOTMARKNEXT) { 7069 stp->sd_flag &= ~STRATMARK; 7070 stp->sd_flag |= STRNOTATMARK; 7071 } else { 7072 stp->sd_flag &= ~(STRATMARK|STRNOTATMARK); 7073 } 7074 } else if (pr && (old_sd_flag & STRATMARK)) { 7075 stp->sd_flag &= ~STRATMARK; 7076 } 7077 } 7078 7079 *flagsp = flg; 7080 *prip = pri; 7081 7082 /* 7083 * Getmsg cleanup processing - if the state of the queue has changed 7084 * some signals may need to be sent and/or poll awakened. 7085 */ 7086 getmout: 7087 qbackenable(q, pri); 7088 7089 /* 7090 * We dropped the stream head lock above. Send all M_SIG messages 7091 * before processing stream head for SIGPOLL messages. 7092 */ 7093 ASSERT(MUTEX_HELD(&stp->sd_lock)); 7094 while ((bp = q->q_first) != NULL && 7095 (bp->b_datap->db_type == M_SIG)) { 7096 /* 7097 * sd_lock is held so the content of the read queue can not 7098 * change. 7099 */ 7100 bp = getq(q); 7101 ASSERT(bp != NULL && bp->b_datap->db_type == M_SIG); 7102 7103 strsignal_nolock(stp, *bp->b_rptr, (int32_t)bp->b_band); 7104 mutex_exit(&stp->sd_lock); 7105 freemsg(bp); 7106 if (STREAM_NEEDSERVICE(stp)) 7107 stream_runservice(stp); 7108 mutex_enter(&stp->sd_lock); 7109 } 7110 7111 /* 7112 * stream head cannot change while we make the determination 7113 * whether or not to send a signal. Drop the flag to allow strrput 7114 * to send firstmsgsigs again. 7115 */ 7116 stp->sd_flag &= ~STRGETINPROG; 7117 7118 /* 7119 * If the type of message at the front of the queue changed 7120 * due to the receive the appropriate signals and pollwakeup events 7121 * are generated. The type of changes are: 7122 * Processed a hipri message, q_first is not hipri. 7123 * Processed a band X message, and q_first is band Y. 7124 * The generated signals and pollwakeups are identical to what 7125 * strrput() generates should the message that is now on q_first 7126 * arrive to an empty read queue. 7127 * 7128 * Note: only strrput will send a signal for a hipri message. 7129 */ 7130 if ((bp = q->q_first) != NULL && !(stp->sd_flag & STRPRI)) { 7131 strsigset_t signals = 0; 7132 strpollset_t pollwakeups = 0; 7133 7134 if (flg & MSG_HIPRI) { 7135 /* 7136 * Removed a hipri message. Regular data at 7137 * the front of the queue. 7138 */ 7139 if (bp->b_band == 0) { 7140 signals = S_INPUT | S_RDNORM; 7141 pollwakeups = POLLIN | POLLRDNORM; 7142 } else { 7143 signals = S_INPUT | S_RDBAND; 7144 pollwakeups = POLLIN | POLLRDBAND; 7145 } 7146 } else if (pri != bp->b_band) { 7147 /* 7148 * The band is different for the new q_first. 7149 */ 7150 if (bp->b_band == 0) { 7151 signals = S_RDNORM; 7152 pollwakeups = POLLIN | POLLRDNORM; 7153 } else { 7154 signals = S_RDBAND; 7155 pollwakeups = POLLIN | POLLRDBAND; 7156 } 7157 } 7158 7159 if (pollwakeups != 0) { 7160 if (pollwakeups == (POLLIN | POLLRDNORM)) { 7161 if (!(stp->sd_rput_opt & SR_POLLIN)) 7162 goto no_pollwake; 7163 stp->sd_rput_opt &= ~SR_POLLIN; 7164 } 7165 mutex_exit(&stp->sd_lock); 7166 pollwakeup(&stp->sd_pollist, pollwakeups); 7167 mutex_enter(&stp->sd_lock); 7168 } 7169 no_pollwake: 7170 7171 if (stp->sd_sigflags & signals) 7172 strsendsig(stp->sd_siglist, signals, bp->b_band, 0); 7173 } 7174 mutex_exit(&stp->sd_lock); 7175 7176 rvp->r_val1 = more; 7177 return (error); 7178 #undef _LASTMARK 7179 } 7180 7181 /* 7182 * Get the next message from the read queue. If the message is 7183 * priority, STRPRI will have been set by strrput(). This flag 7184 * should be reset only when the entire message at the front of the 7185 * queue as been consumed. 7186 * 7187 * If uiop is NULL all data is returned in mctlp. 7188 * Note that a NULL uiop implies that FNDELAY and FNONBLOCK are assumed 7189 * not enabled. 7190 * The timeout parameter is in milliseconds; -1 for infinity. 7191 * This routine handles the consolidation private flags: 7192 * MSG_IGNERROR Ignore any stream head error except STPLEX. 7193 * MSG_DELAYERROR Defer the error check until the queue is empty. 7194 * MSG_HOLDSIG Hold signals while waiting for data. 7195 * MSG_IPEEK Only peek at messages. 7196 * MSG_DISCARDTAIL Discard the tail M_DATA part of the message 7197 * that doesn't fit. 7198 * MSG_NOMARK If the message is marked leave it on the queue. 7199 * 7200 * NOTE: strgetmsg and kstrgetmsg have much of the logic in common. 7201 */ 7202 int 7203 kstrgetmsg( 7204 struct vnode *vp, 7205 mblk_t **mctlp, 7206 struct uio *uiop, 7207 unsigned char *prip, 7208 int *flagsp, 7209 clock_t timout, 7210 rval_t *rvp) 7211 { 7212 struct stdata *stp; 7213 mblk_t *bp, *nbp; 7214 mblk_t *savemp = NULL; 7215 mblk_t *savemptail = NULL; 7216 int flags; 7217 uint_t old_sd_flag; 7218 int flg; 7219 int more = 0; 7220 int error = 0; 7221 char first = 1; 7222 uint_t mark; /* Contains MSG*MARK and _LASTMARK */ 7223 #define _LASTMARK 0x8000 /* Distinct from MSG*MARK */ 7224 unsigned char pri = 0; 7225 queue_t *q; 7226 int pr = 0; /* Partial read successful */ 7227 unsigned char type; 7228 7229 TRACE_1(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_ENTER, 7230 "kstrgetmsg:%p", vp); 7231 7232 ASSERT(vp->v_stream); 7233 stp = vp->v_stream; 7234 rvp->r_val1 = 0; 7235 7236 mutex_enter(&stp->sd_lock); 7237 7238 if ((error = i_straccess(stp, JCREAD)) != 0) { 7239 mutex_exit(&stp->sd_lock); 7240 return (error); 7241 } 7242 7243 flags = *flagsp; 7244 if (stp->sd_flag & (STRDERR|STPLEX)) { 7245 if ((stp->sd_flag & STPLEX) || 7246 (flags & (MSG_IGNERROR|MSG_DELAYERROR)) == 0) { 7247 error = strgeterr(stp, STRDERR|STPLEX, 7248 (flags & MSG_IPEEK)); 7249 if (error != 0) { 7250 mutex_exit(&stp->sd_lock); 7251 return (error); 7252 } 7253 } 7254 } 7255 mutex_exit(&stp->sd_lock); 7256 7257 switch (flags & (MSG_HIPRI|MSG_ANY|MSG_BAND)) { 7258 case MSG_HIPRI: 7259 if (*prip != 0) 7260 return (EINVAL); 7261 break; 7262 7263 case MSG_ANY: 7264 case MSG_BAND: 7265 break; 7266 7267 default: 7268 return (EINVAL); 7269 } 7270 7271 retry: 7272 q = _RD(stp->sd_wrq); 7273 mutex_enter(&stp->sd_lock); 7274 old_sd_flag = stp->sd_flag; 7275 mark = 0; 7276 for (;;) { 7277 int done = 0; 7278 int waitflag; 7279 int fmode; 7280 mblk_t *q_first = q->q_first; 7281 7282 /* 7283 * This section of the code operates just like the code 7284 * in strgetmsg(). There is a comment there about what 7285 * is going on here. 7286 */ 7287 if (!(flags & (MSG_HIPRI|MSG_BAND))) { 7288 /* Asking for normal, band0 data */ 7289 bp = strget(stp, q, uiop, first, &error); 7290 ASSERT(MUTEX_HELD(&stp->sd_lock)); 7291 if (bp != NULL) { 7292 if (bp->b_datap->db_type == M_SIG) { 7293 strsignal_nolock(stp, *bp->b_rptr, 7294 (int32_t)bp->b_band); 7295 continue; 7296 } else { 7297 break; 7298 } 7299 } 7300 if (error != 0) { 7301 goto getmout; 7302 } 7303 /* 7304 * We can't depend on the value of STRPRI here because 7305 * the stream head may be in transit. Therefore, we 7306 * must look at the type of the first message to 7307 * determine if a high priority messages is waiting 7308 */ 7309 } else if ((flags & MSG_HIPRI) && q_first != NULL && 7310 q_first->b_datap->db_type >= QPCTL && 7311 (bp = getq_noenab(q, 0)) != NULL) { 7312 ASSERT(bp->b_datap->db_type >= QPCTL); 7313 break; 7314 } else if ((flags & MSG_BAND) && q_first != NULL && 7315 ((q_first->b_band >= *prip) || 7316 q_first->b_datap->db_type >= QPCTL) && 7317 (bp = getq_noenab(q, 0)) != NULL) { 7318 /* 7319 * Asked for at least band "prip" and got either at 7320 * least that band or a hipri message. 7321 */ 7322 ASSERT(bp->b_band >= *prip || 7323 bp->b_datap->db_type >= QPCTL); 7324 if (bp->b_datap->db_type == M_SIG) { 7325 strsignal_nolock(stp, *bp->b_rptr, 7326 (int32_t)bp->b_band); 7327 continue; 7328 } else { 7329 break; 7330 } 7331 } 7332 7333 /* No data. Time to sleep? */ 7334 qbackenable(q, 0); 7335 7336 /* 7337 * Delayed error notification? 7338 */ 7339 if ((stp->sd_flag & (STRDERR|STPLEX)) && 7340 (flags & (MSG_IGNERROR|MSG_DELAYERROR)) == MSG_DELAYERROR) { 7341 error = strgeterr(stp, STRDERR|STPLEX, 7342 (flags & MSG_IPEEK)); 7343 if (error != 0) { 7344 mutex_exit(&stp->sd_lock); 7345 return (error); 7346 } 7347 } 7348 7349 /* 7350 * If STRHUP or STREOF, return 0 length control and data. 7351 * If a read(fd,buf,0) has been done, do not sleep, just 7352 * return. 7353 * 7354 * If mctlp == NULL and uiop == NULL, then the code will 7355 * do the strwaitq. This is an understood way of saying 7356 * sleep "polling" until a message is received. 7357 */ 7358 if ((stp->sd_flag & (STRHUP|STREOF)) || 7359 (uiop != NULL && uiop->uio_resid == 0)) { 7360 if (mctlp != NULL) 7361 *mctlp = NULL; 7362 *flagsp = 0; 7363 mutex_exit(&stp->sd_lock); 7364 return (0); 7365 } 7366 7367 waitflag = GETWAIT; 7368 if (flags & 7369 (MSG_HOLDSIG|MSG_IGNERROR|MSG_IPEEK|MSG_DELAYERROR)) { 7370 if (flags & MSG_HOLDSIG) 7371 waitflag |= STR_NOSIG; 7372 if (flags & MSG_IGNERROR) 7373 waitflag |= STR_NOERROR; 7374 if (flags & MSG_IPEEK) 7375 waitflag |= STR_PEEK; 7376 if (flags & MSG_DELAYERROR) 7377 waitflag |= STR_DELAYERR; 7378 } 7379 if (uiop != NULL) 7380 fmode = uiop->uio_fmode; 7381 else 7382 fmode = 0; 7383 7384 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_WAIT, 7385 "kstrgetmsg calls strwaitq:%p, %p", 7386 vp, uiop); 7387 if (((error = strwaitq(stp, waitflag, (ssize_t)0, 7388 fmode, timout, &done))) != 0 || done) { 7389 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_DONE, 7390 "kstrgetmsg error or done:%p, %p", 7391 vp, uiop); 7392 mutex_exit(&stp->sd_lock); 7393 return (error); 7394 } 7395 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_AWAKE, 7396 "kstrgetmsg awakes:%p, %p", vp, uiop); 7397 if ((error = i_straccess(stp, JCREAD)) != 0) { 7398 mutex_exit(&stp->sd_lock); 7399 return (error); 7400 } 7401 first = 0; 7402 } 7403 ASSERT(bp != NULL); 7404 /* 7405 * Extract any mark information. If the message is not completely 7406 * consumed this information will be put in the mblk 7407 * that is putback. 7408 * If MSGMARKNEXT is set and the message is completely consumed 7409 * the STRATMARK flag will be set below. Likewise, if 7410 * MSGNOTMARKNEXT is set and the message is 7411 * completely consumed STRNOTATMARK will be set. 7412 */ 7413 mark = bp->b_flag & (MSGMARK | MSGMARKNEXT | MSGNOTMARKNEXT); 7414 ASSERT((mark & (MSGMARKNEXT|MSGNOTMARKNEXT)) != 7415 (MSGMARKNEXT|MSGNOTMARKNEXT)); 7416 pri = bp->b_band; 7417 if (mark != 0) { 7418 /* 7419 * If the caller doesn't want the mark return. 7420 * Used to implement MSG_WAITALL in sockets. 7421 */ 7422 if (flags & MSG_NOMARK) { 7423 putback(stp, q, bp, pri); 7424 qbackenable(q, pri); 7425 mutex_exit(&stp->sd_lock); 7426 return (EWOULDBLOCK); 7427 } 7428 if (bp == stp->sd_mark) { 7429 mark |= _LASTMARK; 7430 stp->sd_mark = NULL; 7431 } 7432 } 7433 7434 /* 7435 * keep track of the first message type 7436 */ 7437 type = bp->b_datap->db_type; 7438 7439 if (bp->b_datap->db_type == M_PASSFP) { 7440 if ((mark & _LASTMARK) && (stp->sd_mark == NULL)) 7441 stp->sd_mark = bp; 7442 bp->b_flag |= mark & ~_LASTMARK; 7443 putback(stp, q, bp, pri); 7444 qbackenable(q, pri); 7445 mutex_exit(&stp->sd_lock); 7446 return (EBADMSG); 7447 } 7448 ASSERT(type != M_SIG); 7449 7450 if (flags & MSG_IPEEK) { 7451 /* 7452 * Clear any struioflag - we do the uiomove over again 7453 * when peeking since it simplifies the code. 7454 * 7455 * Dup the message and put the original back on the queue. 7456 * If dupmsg() fails, try again with copymsg() to see if 7457 * there is indeed a shortage of memory. dupmsg() may fail 7458 * if db_ref in any of the messages reaches its limit. 7459 */ 7460 7461 ASSERT(!(bp->b_datap->db_flags & DBLK_UIOA)); 7462 if ((nbp = dupmsg(bp)) == NULL && (nbp = copymsg(bp)) == NULL) { 7463 /* 7464 * Restore the state of the stream head since we 7465 * need to drop sd_lock (strwaitbuf is sleeping). 7466 */ 7467 size_t size = msgdsize(bp); 7468 7469 if ((mark & _LASTMARK) && (stp->sd_mark == NULL)) 7470 stp->sd_mark = bp; 7471 bp->b_flag |= mark & ~_LASTMARK; 7472 putback(stp, q, bp, pri); 7473 mutex_exit(&stp->sd_lock); 7474 error = strwaitbuf(size, BPRI_HI); 7475 if (error) { 7476 /* 7477 * There is no net change to the queue thus 7478 * no need to qbackenable. 7479 */ 7480 return (error); 7481 } 7482 goto retry; 7483 } 7484 7485 if ((mark & _LASTMARK) && (stp->sd_mark == NULL)) 7486 stp->sd_mark = bp; 7487 bp->b_flag |= mark & ~_LASTMARK; 7488 putback(stp, q, bp, pri); 7489 bp = nbp; 7490 } 7491 7492 /* 7493 * Set this flag so strrput will not generate signals. Need to 7494 * make sure this flag is cleared before leaving this routine 7495 * else signals will stop being sent. 7496 */ 7497 stp->sd_flag |= STRGETINPROG; 7498 mutex_exit(&stp->sd_lock); 7499 7500 if ((stp->sd_rputdatafunc != NULL) && (DB_TYPE(bp) == M_DATA)) { 7501 mblk_t *tmp, *prevmp; 7502 7503 /* 7504 * Put first non-data mblk back to stream head and 7505 * cut the mblk chain so sd_rputdatafunc only sees 7506 * M_DATA mblks. We can skip the first mblk since it 7507 * is M_DATA according to the condition above. 7508 */ 7509 for (prevmp = bp, tmp = bp->b_cont; tmp != NULL; 7510 prevmp = tmp, tmp = tmp->b_cont) { 7511 if (DB_TYPE(tmp) != M_DATA) { 7512 prevmp->b_cont = NULL; 7513 mutex_enter(&stp->sd_lock); 7514 putback(stp, q, tmp, tmp->b_band); 7515 mutex_exit(&stp->sd_lock); 7516 break; 7517 } 7518 } 7519 7520 ASSERT(!(bp->b_datap->db_flags & DBLK_UIOA)); 7521 bp = (stp->sd_rputdatafunc)(stp->sd_vnode, bp, 7522 NULL, NULL, NULL, NULL); 7523 7524 if (bp == NULL) 7525 goto retry; 7526 } 7527 7528 if (STREAM_NEEDSERVICE(stp)) 7529 stream_runservice(stp); 7530 7531 /* 7532 * Set HIPRI flag if message is priority. 7533 */ 7534 if (type >= QPCTL) 7535 flg = MSG_HIPRI; 7536 else 7537 flg = MSG_BAND; 7538 7539 /* 7540 * First process PROTO or PCPROTO blocks, if any. 7541 */ 7542 if (mctlp != NULL && type != M_DATA) { 7543 mblk_t *nbp; 7544 7545 *mctlp = bp; 7546 while (bp->b_cont && bp->b_cont->b_datap->db_type != M_DATA) 7547 bp = bp->b_cont; 7548 nbp = bp->b_cont; 7549 bp->b_cont = NULL; 7550 bp = nbp; 7551 } 7552 7553 if (bp && bp->b_datap->db_type != M_DATA) { 7554 /* 7555 * More PROTO blocks in msg. Will only happen if mctlp is NULL. 7556 */ 7557 more |= MORECTL; 7558 savemp = bp; 7559 while (bp && bp->b_datap->db_type != M_DATA) { 7560 savemptail = bp; 7561 bp = bp->b_cont; 7562 } 7563 savemptail->b_cont = NULL; 7564 } 7565 7566 /* 7567 * Now process DATA blocks, if any. 7568 */ 7569 if (uiop == NULL) { 7570 /* Append data to tail of mctlp */ 7571 7572 ASSERT(bp == NULL || !(bp->b_datap->db_flags & DBLK_UIOA)); 7573 if (mctlp != NULL) { 7574 mblk_t **mpp = mctlp; 7575 7576 while (*mpp != NULL) 7577 mpp = &((*mpp)->b_cont); 7578 *mpp = bp; 7579 bp = NULL; 7580 } 7581 } else if (bp && (bp->b_datap->db_flags & DBLK_UIOA)) { 7582 /* 7583 * A uioa mblk_t chain, as uio processing has already 7584 * been done we simple skip over processing. 7585 */ 7586 bp = NULL; 7587 pr = 0; 7588 7589 } else if (uiop->uio_resid >= 0 && bp) { 7590 size_t oldresid = uiop->uio_resid; 7591 7592 /* 7593 * If a streams message is likely to consist 7594 * of many small mblks, it is pulled up into 7595 * one continuous chunk of memory. 7596 * see longer comment at top of page 7597 * by mblk_pull_len declaration. 7598 */ 7599 7600 if (MBLKL(bp) < mblk_pull_len) { 7601 (void) pullupmsg(bp, -1); 7602 } 7603 7604 bp = struiocopyout(bp, uiop, &error); 7605 if (error != 0) { 7606 if (mctlp != NULL) { 7607 freemsg(*mctlp); 7608 *mctlp = NULL; 7609 } else 7610 freemsg(savemp); 7611 mutex_enter(&stp->sd_lock); 7612 /* 7613 * clear stream head hi pri flag based on 7614 * first message 7615 */ 7616 if (!(flags & MSG_IPEEK) && (type >= QPCTL)) { 7617 ASSERT(type == M_PCPROTO); 7618 stp->sd_flag &= ~STRPRI; 7619 } 7620 more = 0; 7621 goto getmout; 7622 } 7623 /* 7624 * (pr == 1) indicates a partial read. 7625 */ 7626 if (oldresid > uiop->uio_resid) 7627 pr = 1; 7628 } 7629 7630 if (bp) { /* more data blocks in msg */ 7631 more |= MOREDATA; 7632 if (savemp) 7633 savemptail->b_cont = bp; 7634 else 7635 savemp = bp; 7636 } 7637 7638 mutex_enter(&stp->sd_lock); 7639 if (savemp) { 7640 if (flags & (MSG_IPEEK|MSG_DISCARDTAIL)) { 7641 /* 7642 * When MSG_DISCARDTAIL is set or 7643 * when peeking discard any tail. When peeking this 7644 * is the tail of the dup that was copied out - the 7645 * message has already been putback on the queue. 7646 * Return MOREDATA to the caller even though the data 7647 * is discarded. This is used by sockets (to 7648 * set MSG_TRUNC). 7649 */ 7650 freemsg(savemp); 7651 if (!(flags & MSG_IPEEK) && (type >= QPCTL)) { 7652 ASSERT(type == M_PCPROTO); 7653 stp->sd_flag &= ~STRPRI; 7654 } 7655 } else if (pr && (savemp->b_datap->db_type == M_DATA) && 7656 msgnodata(savemp)) { 7657 /* 7658 * Avoid queuing a zero-length tail part of 7659 * a message. pr=1 indicates that we read some of 7660 * the message. 7661 */ 7662 freemsg(savemp); 7663 more &= ~MOREDATA; 7664 if (type >= QPCTL) { 7665 ASSERT(type == M_PCPROTO); 7666 stp->sd_flag &= ~STRPRI; 7667 } 7668 } else { 7669 savemp->b_band = pri; 7670 /* 7671 * If the first message was HIPRI and the one we're 7672 * putting back isn't, then clear STRPRI, otherwise 7673 * set STRPRI again. Note that we must set STRPRI 7674 * again since the flush logic in strrput_nondata() 7675 * may have cleared it while we had sd_lock dropped. 7676 */ 7677 7678 ASSERT(!(savemp->b_datap->db_flags & DBLK_UIOA)); 7679 if (type >= QPCTL) { 7680 ASSERT(type == M_PCPROTO); 7681 if (queclass(savemp) < QPCTL) 7682 stp->sd_flag &= ~STRPRI; 7683 else 7684 stp->sd_flag |= STRPRI; 7685 } else if (queclass(savemp) >= QPCTL) { 7686 /* 7687 * The first message was not a HIPRI message, 7688 * but the one we are about to putback is. 7689 * For simplicitly, we do not allow for HIPRI 7690 * messages to be embedded in the message 7691 * body, so just force it to same type as 7692 * first message. 7693 */ 7694 ASSERT(type == M_DATA || type == M_PROTO); 7695 ASSERT(savemp->b_datap->db_type == M_PCPROTO); 7696 savemp->b_datap->db_type = type; 7697 } 7698 if (mark != 0) { 7699 if ((mark & _LASTMARK) && 7700 (stp->sd_mark == NULL)) { 7701 /* 7702 * If another marked message arrived 7703 * while sd_lock was not held sd_mark 7704 * would be non-NULL. 7705 */ 7706 stp->sd_mark = savemp; 7707 } 7708 savemp->b_flag |= mark & ~_LASTMARK; 7709 } 7710 putback(stp, q, savemp, pri); 7711 } 7712 } else if (!(flags & MSG_IPEEK)) { 7713 /* 7714 * The complete message was consumed. 7715 * 7716 * If another M_PCPROTO arrived while sd_lock was not held 7717 * it would have been discarded since STRPRI was still set. 7718 * 7719 * Move the MSG*MARKNEXT information 7720 * to the stream head just in case 7721 * the read queue becomes empty. 7722 * clear stream head hi pri flag based on 7723 * first message 7724 * 7725 * If the stream head was at the mark 7726 * (STRATMARK) before we dropped sd_lock above 7727 * and some data was consumed then we have 7728 * moved past the mark thus STRATMARK is 7729 * cleared. However, if a message arrived in 7730 * strrput during the copyout above causing 7731 * STRATMARK to be set we can not clear that 7732 * flag. 7733 * XXX A "perimeter" would help by single-threading strrput, 7734 * strread, strgetmsg and kstrgetmsg. 7735 */ 7736 if (type >= QPCTL) { 7737 ASSERT(type == M_PCPROTO); 7738 stp->sd_flag &= ~STRPRI; 7739 } 7740 if (mark & (MSGMARKNEXT|MSGNOTMARKNEXT|MSGMARK)) { 7741 if (mark & MSGMARKNEXT) { 7742 stp->sd_flag &= ~STRNOTATMARK; 7743 stp->sd_flag |= STRATMARK; 7744 } else if (mark & MSGNOTMARKNEXT) { 7745 stp->sd_flag &= ~STRATMARK; 7746 stp->sd_flag |= STRNOTATMARK; 7747 } else { 7748 stp->sd_flag &= ~(STRATMARK|STRNOTATMARK); 7749 } 7750 } else if (pr && (old_sd_flag & STRATMARK)) { 7751 stp->sd_flag &= ~STRATMARK; 7752 } 7753 } 7754 7755 *flagsp = flg; 7756 *prip = pri; 7757 7758 /* 7759 * Getmsg cleanup processing - if the state of the queue has changed 7760 * some signals may need to be sent and/or poll awakened. 7761 */ 7762 getmout: 7763 qbackenable(q, pri); 7764 7765 /* 7766 * We dropped the stream head lock above. Send all M_SIG messages 7767 * before processing stream head for SIGPOLL messages. 7768 */ 7769 ASSERT(MUTEX_HELD(&stp->sd_lock)); 7770 while ((bp = q->q_first) != NULL && 7771 (bp->b_datap->db_type == M_SIG)) { 7772 /* 7773 * sd_lock is held so the content of the read queue can not 7774 * change. 7775 */ 7776 bp = getq(q); 7777 ASSERT(bp != NULL && bp->b_datap->db_type == M_SIG); 7778 7779 strsignal_nolock(stp, *bp->b_rptr, (int32_t)bp->b_band); 7780 mutex_exit(&stp->sd_lock); 7781 freemsg(bp); 7782 if (STREAM_NEEDSERVICE(stp)) 7783 stream_runservice(stp); 7784 mutex_enter(&stp->sd_lock); 7785 } 7786 7787 /* 7788 * stream head cannot change while we make the determination 7789 * whether or not to send a signal. Drop the flag to allow strrput 7790 * to send firstmsgsigs again. 7791 */ 7792 stp->sd_flag &= ~STRGETINPROG; 7793 7794 /* 7795 * If the type of message at the front of the queue changed 7796 * due to the receive the appropriate signals and pollwakeup events 7797 * are generated. The type of changes are: 7798 * Processed a hipri message, q_first is not hipri. 7799 * Processed a band X message, and q_first is band Y. 7800 * The generated signals and pollwakeups are identical to what 7801 * strrput() generates should the message that is now on q_first 7802 * arrive to an empty read queue. 7803 * 7804 * Note: only strrput will send a signal for a hipri message. 7805 */ 7806 if ((bp = q->q_first) != NULL && !(stp->sd_flag & STRPRI)) { 7807 strsigset_t signals = 0; 7808 strpollset_t pollwakeups = 0; 7809 7810 if (flg & MSG_HIPRI) { 7811 /* 7812 * Removed a hipri message. Regular data at 7813 * the front of the queue. 7814 */ 7815 if (bp->b_band == 0) { 7816 signals = S_INPUT | S_RDNORM; 7817 pollwakeups = POLLIN | POLLRDNORM; 7818 } else { 7819 signals = S_INPUT | S_RDBAND; 7820 pollwakeups = POLLIN | POLLRDBAND; 7821 } 7822 } else if (pri != bp->b_band) { 7823 /* 7824 * The band is different for the new q_first. 7825 */ 7826 if (bp->b_band == 0) { 7827 signals = S_RDNORM; 7828 pollwakeups = POLLIN | POLLRDNORM; 7829 } else { 7830 signals = S_RDBAND; 7831 pollwakeups = POLLIN | POLLRDBAND; 7832 } 7833 } 7834 7835 if (pollwakeups != 0) { 7836 if (pollwakeups == (POLLIN | POLLRDNORM)) { 7837 if (!(stp->sd_rput_opt & SR_POLLIN)) 7838 goto no_pollwake; 7839 stp->sd_rput_opt &= ~SR_POLLIN; 7840 } 7841 mutex_exit(&stp->sd_lock); 7842 pollwakeup(&stp->sd_pollist, pollwakeups); 7843 mutex_enter(&stp->sd_lock); 7844 } 7845 no_pollwake: 7846 7847 if (stp->sd_sigflags & signals) 7848 strsendsig(stp->sd_siglist, signals, bp->b_band, 0); 7849 } 7850 mutex_exit(&stp->sd_lock); 7851 7852 rvp->r_val1 = more; 7853 return (error); 7854 #undef _LASTMARK 7855 } 7856 7857 /* 7858 * Put a message downstream. 7859 * 7860 * NOTE: strputmsg and kstrputmsg have much of the logic in common. 7861 */ 7862 int 7863 strputmsg( 7864 struct vnode *vp, 7865 struct strbuf *mctl, 7866 struct strbuf *mdata, 7867 unsigned char pri, 7868 int flag, 7869 int fmode) 7870 { 7871 struct stdata *stp; 7872 queue_t *wqp; 7873 mblk_t *mp; 7874 ssize_t msgsize; 7875 ssize_t rmin, rmax; 7876 int error; 7877 struct uio uios; 7878 struct uio *uiop = &uios; 7879 struct iovec iovs; 7880 int xpg4 = 0; 7881 7882 ASSERT(vp->v_stream); 7883 stp = vp->v_stream; 7884 wqp = stp->sd_wrq; 7885 7886 /* 7887 * If it is an XPG4 application, we need to send 7888 * SIGPIPE below 7889 */ 7890 7891 xpg4 = (flag & MSG_XPG4) ? 1 : 0; 7892 flag &= ~MSG_XPG4; 7893 7894 if (audit_active) 7895 audit_strputmsg(vp, mctl, mdata, pri, flag, fmode); 7896 7897 mutex_enter(&stp->sd_lock); 7898 7899 if ((error = i_straccess(stp, JCWRITE)) != 0) { 7900 mutex_exit(&stp->sd_lock); 7901 return (error); 7902 } 7903 7904 if (stp->sd_flag & (STWRERR|STRHUP|STPLEX)) { 7905 error = strwriteable(stp, B_FALSE, xpg4); 7906 if (error != 0) { 7907 mutex_exit(&stp->sd_lock); 7908 return (error); 7909 } 7910 } 7911 7912 mutex_exit(&stp->sd_lock); 7913 7914 /* 7915 * Check for legal flag value. 7916 */ 7917 switch (flag) { 7918 case MSG_HIPRI: 7919 if ((mctl->len < 0) || (pri != 0)) 7920 return (EINVAL); 7921 break; 7922 case MSG_BAND: 7923 break; 7924 7925 default: 7926 return (EINVAL); 7927 } 7928 7929 TRACE_1(TR_FAC_STREAMS_FR, TR_STRPUTMSG_IN, 7930 "strputmsg in:stp %p", stp); 7931 7932 /* get these values from those cached in the stream head */ 7933 rmin = stp->sd_qn_minpsz; 7934 rmax = stp->sd_qn_maxpsz; 7935 7936 /* 7937 * Make sure ctl and data sizes together fall within the 7938 * limits of the max and min receive packet sizes and do 7939 * not exceed system limit. 7940 */ 7941 ASSERT((rmax >= 0) || (rmax == INFPSZ)); 7942 if (rmax == 0) { 7943 return (ERANGE); 7944 } 7945 /* 7946 * Use the MAXIMUM of sd_maxblk and q_maxpsz. 7947 * Needed to prevent partial failures in the strmakedata loop. 7948 */ 7949 if (stp->sd_maxblk != INFPSZ && rmax != INFPSZ && rmax < stp->sd_maxblk) 7950 rmax = stp->sd_maxblk; 7951 7952 if ((msgsize = mdata->len) < 0) { 7953 msgsize = 0; 7954 rmin = 0; /* no range check for NULL data part */ 7955 } 7956 if ((msgsize < rmin) || 7957 ((msgsize > rmax) && (rmax != INFPSZ)) || 7958 (mctl->len > strctlsz)) { 7959 return (ERANGE); 7960 } 7961 7962 /* 7963 * Setup uio and iov for data part 7964 */ 7965 iovs.iov_base = mdata->buf; 7966 iovs.iov_len = msgsize; 7967 uios.uio_iov = &iovs; 7968 uios.uio_iovcnt = 1; 7969 uios.uio_loffset = 0; 7970 uios.uio_segflg = UIO_USERSPACE; 7971 uios.uio_fmode = fmode; 7972 uios.uio_extflg = UIO_COPY_DEFAULT; 7973 uios.uio_resid = msgsize; 7974 uios.uio_offset = 0; 7975 7976 /* Ignore flow control in strput for HIPRI */ 7977 if (flag & MSG_HIPRI) 7978 flag |= MSG_IGNFLOW; 7979 7980 for (;;) { 7981 int done = 0; 7982 7983 /* 7984 * strput will always free the ctl mblk - even when strput 7985 * fails. 7986 */ 7987 if ((error = strmakectl(mctl, flag, fmode, &mp)) != 0) { 7988 TRACE_3(TR_FAC_STREAMS_FR, TR_STRPUTMSG_OUT, 7989 "strputmsg out:stp %p out %d error %d", 7990 stp, 1, error); 7991 return (error); 7992 } 7993 /* 7994 * Verify that the whole message can be transferred by 7995 * strput. 7996 */ 7997 ASSERT(stp->sd_maxblk == INFPSZ || 7998 stp->sd_maxblk >= mdata->len); 7999 8000 msgsize = mdata->len; 8001 error = strput(stp, mp, uiop, &msgsize, 0, pri, flag); 8002 mdata->len = msgsize; 8003 8004 if (error == 0) 8005 break; 8006 8007 if (error != EWOULDBLOCK) 8008 goto out; 8009 8010 mutex_enter(&stp->sd_lock); 8011 /* 8012 * Check for a missed wakeup. 8013 * Needed since strput did not hold sd_lock across 8014 * the canputnext. 8015 */ 8016 if (bcanputnext(wqp, pri)) { 8017 /* Try again */ 8018 mutex_exit(&stp->sd_lock); 8019 continue; 8020 } 8021 TRACE_2(TR_FAC_STREAMS_FR, TR_STRPUTMSG_WAIT, 8022 "strputmsg wait:stp %p waits pri %d", stp, pri); 8023 if (((error = strwaitq(stp, WRITEWAIT, (ssize_t)0, fmode, -1, 8024 &done)) != 0) || done) { 8025 mutex_exit(&stp->sd_lock); 8026 TRACE_3(TR_FAC_STREAMS_FR, TR_STRPUTMSG_OUT, 8027 "strputmsg out:q %p out %d error %d", 8028 stp, 0, error); 8029 return (error); 8030 } 8031 TRACE_1(TR_FAC_STREAMS_FR, TR_STRPUTMSG_WAKE, 8032 "strputmsg wake:stp %p wakes", stp); 8033 if ((error = i_straccess(stp, JCWRITE)) != 0) { 8034 mutex_exit(&stp->sd_lock); 8035 return (error); 8036 } 8037 mutex_exit(&stp->sd_lock); 8038 } 8039 out: 8040 /* 8041 * For historic reasons, applications expect EAGAIN 8042 * when data mblk could not be allocated. so change 8043 * ENOMEM back to EAGAIN 8044 */ 8045 if (error == ENOMEM) 8046 error = EAGAIN; 8047 TRACE_3(TR_FAC_STREAMS_FR, TR_STRPUTMSG_OUT, 8048 "strputmsg out:stp %p out %d error %d", stp, 2, error); 8049 return (error); 8050 } 8051 8052 /* 8053 * Put a message downstream. 8054 * Can send only an M_PROTO/M_PCPROTO by passing in a NULL uiop. 8055 * The fmode flag (NDELAY, NONBLOCK) is the or of the flags in the uio 8056 * and the fmode parameter. 8057 * 8058 * This routine handles the consolidation private flags: 8059 * MSG_IGNERROR Ignore any stream head error except STPLEX. 8060 * MSG_HOLDSIG Hold signals while waiting for data. 8061 * MSG_IGNFLOW Don't check streams flow control. 8062 * 8063 * NOTE: strputmsg and kstrputmsg have much of the logic in common. 8064 */ 8065 int 8066 kstrputmsg( 8067 struct vnode *vp, 8068 mblk_t *mctl, 8069 struct uio *uiop, 8070 ssize_t msgsize, 8071 unsigned char pri, 8072 int flag, 8073 int fmode) 8074 { 8075 struct stdata *stp; 8076 queue_t *wqp; 8077 ssize_t rmin, rmax; 8078 int error; 8079 8080 ASSERT(vp->v_stream); 8081 stp = vp->v_stream; 8082 wqp = stp->sd_wrq; 8083 if (audit_active) 8084 audit_strputmsg(vp, NULL, NULL, pri, flag, fmode); 8085 if (mctl == NULL) 8086 return (EINVAL); 8087 8088 mutex_enter(&stp->sd_lock); 8089 8090 if ((error = i_straccess(stp, JCWRITE)) != 0) { 8091 mutex_exit(&stp->sd_lock); 8092 freemsg(mctl); 8093 return (error); 8094 } 8095 8096 if ((stp->sd_flag & STPLEX) || !(flag & MSG_IGNERROR)) { 8097 if (stp->sd_flag & (STWRERR|STRHUP|STPLEX)) { 8098 error = strwriteable(stp, B_FALSE, B_TRUE); 8099 if (error != 0) { 8100 mutex_exit(&stp->sd_lock); 8101 freemsg(mctl); 8102 return (error); 8103 } 8104 } 8105 } 8106 8107 mutex_exit(&stp->sd_lock); 8108 8109 /* 8110 * Check for legal flag value. 8111 */ 8112 switch (flag & (MSG_HIPRI|MSG_BAND|MSG_ANY)) { 8113 case MSG_HIPRI: 8114 if (pri != 0) { 8115 freemsg(mctl); 8116 return (EINVAL); 8117 } 8118 break; 8119 case MSG_BAND: 8120 break; 8121 default: 8122 freemsg(mctl); 8123 return (EINVAL); 8124 } 8125 8126 TRACE_1(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_IN, 8127 "kstrputmsg in:stp %p", stp); 8128 8129 /* get these values from those cached in the stream head */ 8130 rmin = stp->sd_qn_minpsz; 8131 rmax = stp->sd_qn_maxpsz; 8132 8133 /* 8134 * Make sure ctl and data sizes together fall within the 8135 * limits of the max and min receive packet sizes and do 8136 * not exceed system limit. 8137 */ 8138 ASSERT((rmax >= 0) || (rmax == INFPSZ)); 8139 if (rmax == 0) { 8140 freemsg(mctl); 8141 return (ERANGE); 8142 } 8143 /* 8144 * Use the MAXIMUM of sd_maxblk and q_maxpsz. 8145 * Needed to prevent partial failures in the strmakedata loop. 8146 */ 8147 if (stp->sd_maxblk != INFPSZ && rmax != INFPSZ && rmax < stp->sd_maxblk) 8148 rmax = stp->sd_maxblk; 8149 8150 if (uiop == NULL) { 8151 msgsize = -1; 8152 rmin = -1; /* no range check for NULL data part */ 8153 } else { 8154 /* Use uio flags as well as the fmode parameter flags */ 8155 fmode |= uiop->uio_fmode; 8156 8157 if ((msgsize < rmin) || 8158 ((msgsize > rmax) && (rmax != INFPSZ))) { 8159 freemsg(mctl); 8160 return (ERANGE); 8161 } 8162 } 8163 8164 /* Ignore flow control in strput for HIPRI */ 8165 if (flag & MSG_HIPRI) 8166 flag |= MSG_IGNFLOW; 8167 8168 for (;;) { 8169 int done = 0; 8170 int waitflag; 8171 mblk_t *mp; 8172 8173 /* 8174 * strput will always free the ctl mblk - even when strput 8175 * fails. If MSG_IGNFLOW is set then any error returned 8176 * will cause us to break the loop, so we don't need a copy 8177 * of the message. If MSG_IGNFLOW is not set, then we can 8178 * get hit by flow control and be forced to try again. In 8179 * this case we need to have a copy of the message. We 8180 * do this using copymsg since the message may get modified 8181 * by something below us. 8182 * 8183 * We've observed that many TPI providers do not check db_ref 8184 * on the control messages but blindly reuse them for the 8185 * T_OK_ACK/T_ERROR_ACK. Thus using copymsg is more 8186 * friendly to such providers than using dupmsg. Also, note 8187 * that sockfs uses MSG_IGNFLOW for all TPI control messages. 8188 * Only data messages are subject to flow control, hence 8189 * subject to this copymsg. 8190 */ 8191 if (flag & MSG_IGNFLOW) { 8192 mp = mctl; 8193 mctl = NULL; 8194 } else { 8195 do { 8196 /* 8197 * If a message has a free pointer, the message 8198 * must be dupmsg to maintain this pointer. 8199 * Code using this facility must be sure 8200 * that modules below will not change the 8201 * contents of the dblk without checking db_ref 8202 * first. If db_ref is > 1, then the module 8203 * needs to do a copymsg first. Otherwise, 8204 * the contents of the dblk may become 8205 * inconsistent because the freesmg/freeb below 8206 * may end up calling atomic_add_32_nv. 8207 * The atomic_add_32_nv in freeb (accessing 8208 * all of db_ref, db_type, db_flags, and 8209 * db_struioflag) does not prevent other threads 8210 * from concurrently trying to modify e.g. 8211 * db_type. 8212 */ 8213 if (mctl->b_datap->db_frtnp != NULL) 8214 mp = dupmsg(mctl); 8215 else 8216 mp = copymsg(mctl); 8217 8218 if (mp != NULL) 8219 break; 8220 8221 error = strwaitbuf(msgdsize(mctl), BPRI_MED); 8222 if (error) { 8223 freemsg(mctl); 8224 return (error); 8225 } 8226 } while (mp == NULL); 8227 } 8228 /* 8229 * Verify that all of msgsize can be transferred by 8230 * strput. 8231 */ 8232 ASSERT(stp->sd_maxblk == INFPSZ || stp->sd_maxblk >= msgsize); 8233 error = strput(stp, mp, uiop, &msgsize, 0, pri, flag); 8234 if (error == 0) 8235 break; 8236 8237 if (error != EWOULDBLOCK) 8238 goto out; 8239 8240 /* 8241 * IF MSG_IGNFLOW is set we should have broken out of loop 8242 * above. 8243 */ 8244 ASSERT(!(flag & MSG_IGNFLOW)); 8245 mutex_enter(&stp->sd_lock); 8246 /* 8247 * Check for a missed wakeup. 8248 * Needed since strput did not hold sd_lock across 8249 * the canputnext. 8250 */ 8251 if (bcanputnext(wqp, pri)) { 8252 /* Try again */ 8253 mutex_exit(&stp->sd_lock); 8254 continue; 8255 } 8256 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_WAIT, 8257 "kstrputmsg wait:stp %p waits pri %d", stp, pri); 8258 8259 waitflag = WRITEWAIT; 8260 if (flag & (MSG_HOLDSIG|MSG_IGNERROR)) { 8261 if (flag & MSG_HOLDSIG) 8262 waitflag |= STR_NOSIG; 8263 if (flag & MSG_IGNERROR) 8264 waitflag |= STR_NOERROR; 8265 } 8266 if (((error = strwaitq(stp, waitflag, 8267 (ssize_t)0, fmode, -1, &done)) != 0) || done) { 8268 mutex_exit(&stp->sd_lock); 8269 TRACE_3(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_OUT, 8270 "kstrputmsg out:stp %p out %d error %d", 8271 stp, 0, error); 8272 freemsg(mctl); 8273 return (error); 8274 } 8275 TRACE_1(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_WAKE, 8276 "kstrputmsg wake:stp %p wakes", stp); 8277 if ((error = i_straccess(stp, JCWRITE)) != 0) { 8278 mutex_exit(&stp->sd_lock); 8279 freemsg(mctl); 8280 return (error); 8281 } 8282 mutex_exit(&stp->sd_lock); 8283 } 8284 out: 8285 freemsg(mctl); 8286 /* 8287 * For historic reasons, applications expect EAGAIN 8288 * when data mblk could not be allocated. so change 8289 * ENOMEM back to EAGAIN 8290 */ 8291 if (error == ENOMEM) 8292 error = EAGAIN; 8293 TRACE_3(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_OUT, 8294 "kstrputmsg out:stp %p out %d error %d", stp, 2, error); 8295 return (error); 8296 } 8297 8298 /* 8299 * Determines whether the necessary conditions are set on a stream 8300 * for it to be readable, writeable, or have exceptions. 8301 * 8302 * strpoll handles the consolidation private events: 8303 * POLLNOERR Do not return POLLERR even if there are stream 8304 * head errors. 8305 * Used by sockfs. 8306 * POLLRDDATA Do not return POLLIN unless at least one message on 8307 * the queue contains one or more M_DATA mblks. Thus 8308 * when this flag is set a queue with only 8309 * M_PROTO/M_PCPROTO mblks does not return POLLIN. 8310 * Used by sockfs to ignore T_EXDATA_IND messages. 8311 * 8312 * Note: POLLRDDATA assumes that synch streams only return messages with 8313 * an M_DATA attached (i.e. not messages consisting of only 8314 * an M_PROTO/M_PCPROTO part). 8315 */ 8316 int 8317 strpoll( 8318 struct stdata *stp, 8319 short events_arg, 8320 int anyyet, 8321 short *reventsp, 8322 struct pollhead **phpp) 8323 { 8324 int events = (ushort_t)events_arg; 8325 int retevents = 0; 8326 mblk_t *mp; 8327 qband_t *qbp; 8328 long sd_flags = stp->sd_flag; 8329 int headlocked = 0; 8330 8331 /* 8332 * For performance, a single 'if' tests for most possible edge 8333 * conditions in one shot 8334 */ 8335 if (sd_flags & (STPLEX | STRDERR | STWRERR)) { 8336 if (sd_flags & STPLEX) { 8337 *reventsp = POLLNVAL; 8338 return (EINVAL); 8339 } 8340 if (((events & (POLLIN | POLLRDNORM | POLLRDBAND | POLLPRI)) && 8341 (sd_flags & STRDERR)) || 8342 ((events & (POLLOUT | POLLWRNORM | POLLWRBAND)) && 8343 (sd_flags & STWRERR))) { 8344 if (!(events & POLLNOERR)) { 8345 *reventsp = POLLERR; 8346 return (0); 8347 } 8348 } 8349 } 8350 if (sd_flags & STRHUP) { 8351 retevents |= POLLHUP; 8352 } else if (events & (POLLWRNORM | POLLWRBAND)) { 8353 queue_t *tq; 8354 queue_t *qp = stp->sd_wrq; 8355 8356 claimstr(qp); 8357 /* Find next module forward that has a service procedure */ 8358 tq = qp->q_next->q_nfsrv; 8359 ASSERT(tq != NULL); 8360 8361 polllock(&stp->sd_pollist, QLOCK(tq)); 8362 if (events & POLLWRNORM) { 8363 queue_t *sqp; 8364 8365 if (tq->q_flag & QFULL) 8366 /* ensure backq svc procedure runs */ 8367 tq->q_flag |= QWANTW; 8368 else if ((sqp = stp->sd_struiowrq) != NULL) { 8369 /* Check sync stream barrier write q */ 8370 mutex_exit(QLOCK(tq)); 8371 polllock(&stp->sd_pollist, QLOCK(sqp)); 8372 if (sqp->q_flag & QFULL) 8373 /* ensure pollwakeup() is done */ 8374 sqp->q_flag |= QWANTWSYNC; 8375 else 8376 retevents |= POLLOUT; 8377 /* More write events to process ??? */ 8378 if (! (events & POLLWRBAND)) { 8379 mutex_exit(QLOCK(sqp)); 8380 releasestr(qp); 8381 goto chkrd; 8382 } 8383 mutex_exit(QLOCK(sqp)); 8384 polllock(&stp->sd_pollist, QLOCK(tq)); 8385 } else 8386 retevents |= POLLOUT; 8387 } 8388 if (events & POLLWRBAND) { 8389 qbp = tq->q_bandp; 8390 if (qbp) { 8391 while (qbp) { 8392 if (qbp->qb_flag & QB_FULL) 8393 qbp->qb_flag |= QB_WANTW; 8394 else 8395 retevents |= POLLWRBAND; 8396 qbp = qbp->qb_next; 8397 } 8398 } else { 8399 retevents |= POLLWRBAND; 8400 } 8401 } 8402 mutex_exit(QLOCK(tq)); 8403 releasestr(qp); 8404 } 8405 chkrd: 8406 if (sd_flags & STRPRI) { 8407 retevents |= (events & POLLPRI); 8408 } else if (events & (POLLRDNORM | POLLRDBAND | POLLIN)) { 8409 queue_t *qp = _RD(stp->sd_wrq); 8410 int normevents = (events & (POLLIN | POLLRDNORM)); 8411 8412 /* 8413 * Note: Need to do polllock() here since ps_lock may be 8414 * held. See bug 4191544. 8415 */ 8416 polllock(&stp->sd_pollist, &stp->sd_lock); 8417 headlocked = 1; 8418 mp = qp->q_first; 8419 while (mp) { 8420 /* 8421 * For POLLRDDATA we scan b_cont and b_next until we 8422 * find an M_DATA. 8423 */ 8424 if ((events & POLLRDDATA) && 8425 mp->b_datap->db_type != M_DATA) { 8426 mblk_t *nmp = mp->b_cont; 8427 8428 while (nmp != NULL && 8429 nmp->b_datap->db_type != M_DATA) 8430 nmp = nmp->b_cont; 8431 if (nmp == NULL) { 8432 mp = mp->b_next; 8433 continue; 8434 } 8435 } 8436 if (mp->b_band == 0) 8437 retevents |= normevents; 8438 else 8439 retevents |= (events & (POLLIN | POLLRDBAND)); 8440 break; 8441 } 8442 if (! (retevents & normevents) && 8443 (stp->sd_wakeq & RSLEEP)) { 8444 /* 8445 * Sync stream barrier read queue has data. 8446 */ 8447 retevents |= normevents; 8448 } 8449 /* Treat eof as normal data */ 8450 if (sd_flags & STREOF) 8451 retevents |= normevents; 8452 } 8453 8454 *reventsp = (short)retevents; 8455 if (retevents) { 8456 if (headlocked) 8457 mutex_exit(&stp->sd_lock); 8458 return (0); 8459 } 8460 8461 /* 8462 * If poll() has not found any events yet, set up event cell 8463 * to wake up the poll if a requested event occurs on this 8464 * stream. Check for collisions with outstanding poll requests. 8465 */ 8466 if (!anyyet) { 8467 *phpp = &stp->sd_pollist; 8468 if (headlocked == 0) { 8469 polllock(&stp->sd_pollist, &stp->sd_lock); 8470 headlocked = 1; 8471 } 8472 stp->sd_rput_opt |= SR_POLLIN; 8473 } 8474 if (headlocked) 8475 mutex_exit(&stp->sd_lock); 8476 return (0); 8477 } 8478 8479 /* 8480 * The purpose of putback() is to assure sleeping polls/reads 8481 * are awakened when there are no new messages arriving at the, 8482 * stream head, and a message is placed back on the read queue. 8483 * 8484 * sd_lock must be held when messages are placed back on stream 8485 * head. (getq() holds sd_lock when it removes messages from 8486 * the queue) 8487 */ 8488 8489 static void 8490 putback(struct stdata *stp, queue_t *q, mblk_t *bp, int band) 8491 { 8492 mblk_t *qfirst; 8493 ASSERT(MUTEX_HELD(&stp->sd_lock)); 8494 8495 /* 8496 * As a result of lock-step ordering around q_lock and sd_lock, 8497 * it's possible for function calls like putnext() and 8498 * canputnext() to get an inaccurate picture of how much 8499 * data is really being processed at the stream head. 8500 * We only consolidate with existing messages on the queue 8501 * if the length of the message we want to put back is smaller 8502 * than the queue hiwater mark. 8503 */ 8504 if ((stp->sd_rput_opt & SR_CONSOL_DATA) && 8505 (DB_TYPE(bp) == M_DATA) && ((qfirst = q->q_first) != NULL) && 8506 (DB_TYPE(qfirst) == M_DATA) && 8507 ((qfirst->b_flag & (MSGMARK|MSGDELIM)) == 0) && 8508 ((bp->b_flag & (MSGMARK|MSGDELIM|MSGMARKNEXT)) == 0) && 8509 (mp_cont_len(bp, NULL) < q->q_hiwat)) { 8510 /* 8511 * We use the same logic as defined in strrput() 8512 * but in reverse as we are putting back onto the 8513 * queue and want to retain byte ordering. 8514 * Consolidate M_DATA messages with M_DATA ONLY. 8515 * strrput() allows the consolidation of M_DATA onto 8516 * M_PROTO | M_PCPROTO but not the other way round. 8517 * 8518 * The consolidation does not take place if the message 8519 * we are returning to the queue is marked with either 8520 * of the marks or the delim flag or if q_first 8521 * is marked with MSGMARK. The MSGMARK check is needed to 8522 * handle the odd semantics of MSGMARK where essentially 8523 * the whole message is to be treated as marked. 8524 * Carry any MSGMARKNEXT and MSGNOTMARKNEXT from q_first 8525 * to the front of the b_cont chain. 8526 */ 8527 rmvq_noenab(q, qfirst); 8528 8529 /* 8530 * The first message in the b_cont list 8531 * tracks MSGMARKNEXT and MSGNOTMARKNEXT. 8532 * We need to handle the case where we 8533 * are appending: 8534 * 8535 * 1) a MSGMARKNEXT to a MSGNOTMARKNEXT. 8536 * 2) a MSGMARKNEXT to a plain message. 8537 * 3) a MSGNOTMARKNEXT to a plain message 8538 * 4) a MSGNOTMARKNEXT to a MSGNOTMARKNEXT 8539 * message. 8540 * 8541 * Thus we never append a MSGMARKNEXT or 8542 * MSGNOTMARKNEXT to a MSGMARKNEXT message. 8543 */ 8544 if (qfirst->b_flag & MSGMARKNEXT) { 8545 bp->b_flag |= MSGMARKNEXT; 8546 bp->b_flag &= ~MSGNOTMARKNEXT; 8547 qfirst->b_flag &= ~MSGMARKNEXT; 8548 } else if (qfirst->b_flag & MSGNOTMARKNEXT) { 8549 bp->b_flag |= MSGNOTMARKNEXT; 8550 qfirst->b_flag &= ~MSGNOTMARKNEXT; 8551 } 8552 8553 linkb(bp, qfirst); 8554 } 8555 (void) putbq(q, bp); 8556 8557 /* 8558 * A message may have come in when the sd_lock was dropped in the 8559 * calling routine. If this is the case and STR*ATMARK info was 8560 * received, need to move that from the stream head to the q_last 8561 * so that SIOCATMARK can return the proper value. 8562 */ 8563 if (stp->sd_flag & (STRATMARK | STRNOTATMARK)) { 8564 unsigned short *flagp = &q->q_last->b_flag; 8565 uint_t b_flag = (uint_t)*flagp; 8566 8567 if (stp->sd_flag & STRATMARK) { 8568 b_flag &= ~MSGNOTMARKNEXT; 8569 b_flag |= MSGMARKNEXT; 8570 stp->sd_flag &= ~STRATMARK; 8571 } else { 8572 b_flag &= ~MSGMARKNEXT; 8573 b_flag |= MSGNOTMARKNEXT; 8574 stp->sd_flag &= ~STRNOTATMARK; 8575 } 8576 *flagp = (unsigned short) b_flag; 8577 } 8578 8579 #ifdef DEBUG 8580 /* 8581 * Make sure that the flags are not messed up. 8582 */ 8583 { 8584 mblk_t *mp; 8585 mp = q->q_last; 8586 while (mp != NULL) { 8587 ASSERT((mp->b_flag & (MSGMARKNEXT|MSGNOTMARKNEXT)) != 8588 (MSGMARKNEXT|MSGNOTMARKNEXT)); 8589 mp = mp->b_cont; 8590 } 8591 } 8592 #endif 8593 if (q->q_first == bp) { 8594 short pollevents; 8595 8596 if (stp->sd_flag & RSLEEP) { 8597 stp->sd_flag &= ~RSLEEP; 8598 cv_broadcast(&q->q_wait); 8599 } 8600 if (stp->sd_flag & STRPRI) { 8601 pollevents = POLLPRI; 8602 } else { 8603 if (band == 0) { 8604 if (!(stp->sd_rput_opt & SR_POLLIN)) 8605 return; 8606 stp->sd_rput_opt &= ~SR_POLLIN; 8607 pollevents = POLLIN | POLLRDNORM; 8608 } else { 8609 pollevents = POLLIN | POLLRDBAND; 8610 } 8611 } 8612 mutex_exit(&stp->sd_lock); 8613 pollwakeup(&stp->sd_pollist, pollevents); 8614 mutex_enter(&stp->sd_lock); 8615 } 8616 } 8617 8618 /* 8619 * Return the held vnode attached to the stream head of a 8620 * given queue 8621 * It is the responsibility of the calling routine to ensure 8622 * that the queue does not go away (e.g. pop). 8623 */ 8624 vnode_t * 8625 strq2vp(queue_t *qp) 8626 { 8627 vnode_t *vp; 8628 vp = STREAM(qp)->sd_vnode; 8629 ASSERT(vp != NULL); 8630 VN_HOLD(vp); 8631 return (vp); 8632 } 8633 8634 /* 8635 * return the stream head write queue for the given vp 8636 * It is the responsibility of the calling routine to ensure 8637 * that the stream or vnode do not close. 8638 */ 8639 queue_t * 8640 strvp2wq(vnode_t *vp) 8641 { 8642 ASSERT(vp->v_stream != NULL); 8643 return (vp->v_stream->sd_wrq); 8644 } 8645 8646 /* 8647 * pollwakeup stream head 8648 * It is the responsibility of the calling routine to ensure 8649 * that the stream or vnode do not close. 8650 */ 8651 void 8652 strpollwakeup(vnode_t *vp, short event) 8653 { 8654 ASSERT(vp->v_stream); 8655 pollwakeup(&vp->v_stream->sd_pollist, event); 8656 } 8657 8658 /* 8659 * Mate the stream heads of two vnodes together. If the two vnodes are the 8660 * same, we just make the write-side point at the read-side -- otherwise, 8661 * we do a full mate. Only works on vnodes associated with streams that are 8662 * still being built and thus have only a stream head. 8663 */ 8664 void 8665 strmate(vnode_t *vp1, vnode_t *vp2) 8666 { 8667 queue_t *wrq1 = strvp2wq(vp1); 8668 queue_t *wrq2 = strvp2wq(vp2); 8669 8670 /* 8671 * Verify that there are no modules on the stream yet. We also 8672 * rely on the stream head always having a service procedure to 8673 * avoid tweaking q_nfsrv. 8674 */ 8675 ASSERT(wrq1->q_next == NULL && wrq2->q_next == NULL); 8676 ASSERT(wrq1->q_qinfo->qi_srvp != NULL); 8677 ASSERT(wrq2->q_qinfo->qi_srvp != NULL); 8678 8679 /* 8680 * If the queues are the same, just twist; otherwise do a full mate. 8681 */ 8682 if (wrq1 == wrq2) { 8683 wrq1->q_next = _RD(wrq1); 8684 } else { 8685 wrq1->q_next = _RD(wrq2); 8686 wrq2->q_next = _RD(wrq1); 8687 STREAM(wrq1)->sd_mate = STREAM(wrq2); 8688 STREAM(wrq1)->sd_flag |= STRMATE; 8689 STREAM(wrq2)->sd_mate = STREAM(wrq1); 8690 STREAM(wrq2)->sd_flag |= STRMATE; 8691 } 8692 } 8693 8694 /* 8695 * XXX will go away when console is correctly fixed. 8696 * Clean up the console PIDS, from previous I_SETSIG, 8697 * called only for cnopen which never calls strclean(). 8698 */ 8699 void 8700 str_cn_clean(struct vnode *vp) 8701 { 8702 strsig_t *ssp, *pssp, *tssp; 8703 struct stdata *stp; 8704 struct pid *pidp; 8705 int update = 0; 8706 8707 ASSERT(vp->v_stream); 8708 stp = vp->v_stream; 8709 pssp = NULL; 8710 mutex_enter(&stp->sd_lock); 8711 ssp = stp->sd_siglist; 8712 while (ssp) { 8713 mutex_enter(&pidlock); 8714 pidp = ssp->ss_pidp; 8715 /* 8716 * Get rid of PID if the proc is gone. 8717 */ 8718 if (pidp->pid_prinactive) { 8719 tssp = ssp->ss_next; 8720 if (pssp) 8721 pssp->ss_next = tssp; 8722 else 8723 stp->sd_siglist = tssp; 8724 ASSERT(pidp->pid_ref <= 1); 8725 PID_RELE(ssp->ss_pidp); 8726 mutex_exit(&pidlock); 8727 kmem_free(ssp, sizeof (strsig_t)); 8728 update = 1; 8729 ssp = tssp; 8730 continue; 8731 } else 8732 mutex_exit(&pidlock); 8733 pssp = ssp; 8734 ssp = ssp->ss_next; 8735 } 8736 if (update) { 8737 stp->sd_sigflags = 0; 8738 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next) 8739 stp->sd_sigflags |= ssp->ss_events; 8740 } 8741 mutex_exit(&stp->sd_lock); 8742 } 8743 8744 /* 8745 * Return B_TRUE if there is data in the message, B_FALSE otherwise. 8746 */ 8747 static boolean_t 8748 msghasdata(mblk_t *bp) 8749 { 8750 for (; bp; bp = bp->b_cont) 8751 if (bp->b_datap->db_type == M_DATA) { 8752 ASSERT(bp->b_wptr >= bp->b_rptr); 8753 if (bp->b_wptr > bp->b_rptr) 8754 return (B_TRUE); 8755 } 8756 return (B_FALSE); 8757 } 8758 8759 /* 8760 * Called on the first strget() of a sodirect/uioa enabled streamhead, 8761 * if any mblk_t(s) enqueued they must first be uioamove()d before uioa 8762 * can be enabled for the underlying transport's use. 8763 */ 8764 void 8765 struioainit(queue_t *q, sodirect_t *sodp, uio_t *uiop) 8766 { 8767 uioa_t *uioap = (uioa_t *)uiop; 8768 mblk_t *bp; 8769 mblk_t *lbp = NULL; 8770 mblk_t *wbp; 8771 int len; 8772 int error; 8773 8774 ASSERT(MUTEX_HELD(sodp->sod_lockp)); 8775 ASSERT(&sodp->sod_uioa == uioap); 8776 8777 /* 8778 * Walk first b_cont chain in sod_q 8779 * and schedule any M_DATA mblk_t's for uio asynchronous move. 8780 */ 8781 mutex_enter(QLOCK(q)); 8782 if ((bp = q->q_first) == NULL) { 8783 mutex_exit(QLOCK(q)); 8784 return; 8785 } 8786 /* Walk the chain */ 8787 wbp = bp; 8788 do { 8789 if (wbp->b_datap->db_type != M_DATA) { 8790 /* Not M_DATA, no more uioa */ 8791 goto nouioa; 8792 } 8793 if ((len = wbp->b_wptr - wbp->b_rptr) > 0) { 8794 /* Have a M_DATA mblk_t with data */ 8795 if (len > uioap->uio_resid) { 8796 /* Not enough uio sapce */ 8797 goto nouioa; 8798 } 8799 ASSERT(!(wbp->b_datap->db_flags & DBLK_UIOA)); 8800 error = uioamove(wbp->b_rptr, len, 8801 UIO_READ, uioap); 8802 if (!error) { 8803 /* Scheduled, mark dblk_t as such */ 8804 wbp->b_datap->db_flags |= DBLK_UIOA; 8805 } else { 8806 /* Break the mblk chain */ 8807 goto nouioa; 8808 } 8809 } 8810 /* Save last wbp processed */ 8811 lbp = wbp; 8812 } while ((wbp = wbp->b_cont) != NULL); 8813 8814 mutex_exit(QLOCK(q)); 8815 return; 8816 8817 nouioa: 8818 /* No more uioa */ 8819 uioap->uioa_state &= UIOA_CLR; 8820 uioap->uioa_state |= UIOA_FINI; 8821 8822 /* 8823 * If we processed 1 or more mblk_t(s) then we need to split the 8824 * current mblk_t chain in 2 so that all the uioamove()ed mblk_t(s) 8825 * are in the current chain and the rest are in the following new 8826 * chain. 8827 */ 8828 if (lbp != NULL) { 8829 /* New end of current chain */ 8830 lbp->b_cont = NULL; 8831 8832 /* Insert new chain wbp after bp */ 8833 if ((wbp->b_next = bp->b_next) != NULL) 8834 bp->b_next->b_prev = wbp; 8835 else 8836 q->q_last = wbp; 8837 wbp->b_prev = bp; 8838 bp->b_next = wbp; 8839 } 8840 mutex_exit(QLOCK(q)); 8841 } 8842