1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 22 /* All Rights Reserved */ 23 24 25 /* 26 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 27 * Use is subject to license terms. 28 */ 29 30 #pragma ident "%Z%%M% %I% %E% SMI" 31 32 #include <sys/types.h> 33 #include <sys/sysmacros.h> 34 #include <sys/param.h> 35 #include <sys/errno.h> 36 #include <sys/signal.h> 37 #include <sys/stat.h> 38 #include <sys/proc.h> 39 #include <sys/cred.h> 40 #include <sys/user.h> 41 #include <sys/vnode.h> 42 #include <sys/file.h> 43 #include <sys/stream.h> 44 #include <sys/strsubr.h> 45 #include <sys/stropts.h> 46 #include <sys/tihdr.h> 47 #include <sys/var.h> 48 #include <sys/poll.h> 49 #include <sys/termio.h> 50 #include <sys/ttold.h> 51 #include <sys/systm.h> 52 #include <sys/uio.h> 53 #include <sys/cmn_err.h> 54 #include <sys/sad.h> 55 #include <sys/netstack.h> 56 #include <sys/priocntl.h> 57 #include <sys/jioctl.h> 58 #include <sys/procset.h> 59 #include <sys/session.h> 60 #include <sys/kmem.h> 61 #include <sys/filio.h> 62 #include <sys/vtrace.h> 63 #include <sys/debug.h> 64 #include <sys/strredir.h> 65 #include <sys/fs/fifonode.h> 66 #include <sys/fs/snode.h> 67 #include <sys/strlog.h> 68 #include <sys/strsun.h> 69 #include <sys/project.h> 70 #include <sys/kbio.h> 71 #include <sys/msio.h> 72 #include <sys/tty.h> 73 #include <sys/ptyvar.h> 74 #include <sys/vuid_event.h> 75 #include <sys/modctl.h> 76 #include <sys/sunddi.h> 77 #include <sys/sunldi_impl.h> 78 #include <sys/autoconf.h> 79 #include <sys/policy.h> 80 #include <sys/dld.h> 81 #include <sys/zone.h> 82 83 /* 84 * This define helps improve the readability of streams code while 85 * still maintaining a very old streams performance enhancement. The 86 * performance enhancement basically involved having all callers 87 * of straccess() perform the first check that straccess() will do 88 * locally before actually calling straccess(). (There by reducing 89 * the number of unnecessary calls to straccess().) 90 */ 91 #define i_straccess(x, y) ((stp->sd_sidp == NULL) ? 0 : \ 92 (stp->sd_vnode->v_type == VFIFO) ? 0 : \ 93 straccess((x), (y))) 94 95 /* 96 * what is mblk_pull_len? 97 * 98 * If a streams message consists of many short messages, 99 * a performance degradation occurs from copyout overhead. 100 * To decrease the per mblk overhead, messages that are 101 * likely to consist of many small mblks are pulled up into 102 * one continuous chunk of memory. 103 * 104 * To avoid the processing overhead of examining every 105 * mblk, a quick heuristic is used. If the first mblk in 106 * the message is shorter than mblk_pull_len, it is likely 107 * that the rest of the mblk will be short. 108 * 109 * This heuristic was decided upon after performance tests 110 * indicated that anything more complex slowed down the main 111 * code path. 112 */ 113 #define MBLK_PULL_LEN 64 114 uint32_t mblk_pull_len = MBLK_PULL_LEN; 115 116 /* 117 * The sgttyb_handling flag controls the handling of the old BSD 118 * TIOCGETP, TIOCSETP, and TIOCSETN ioctls as follows: 119 * 120 * 0 - Emit no warnings at all and retain old, broken behavior. 121 * 1 - Emit no warnings and silently handle new semantics. 122 * 2 - Send cmn_err(CE_NOTE) when either TIOCSETP or TIOCSETN is used 123 * (once per system invocation). Handle with new semantics. 124 * 3 - Send SIGSYS when any TIOCGETP, TIOCSETP, or TIOCSETN call is 125 * made (so that offenders drop core and are easy to debug). 126 * 127 * The "new semantics" are that TIOCGETP returns B38400 for 128 * sg_[io]speed if the corresponding value is over B38400, and that 129 * TIOCSET[PN] accept B38400 in these cases to mean "retain current 130 * bit rate." 131 */ 132 int sgttyb_handling = 1; 133 static boolean_t sgttyb_complaint; 134 135 /* don't push drcompat module by default on Style-2 streams */ 136 static int push_drcompat = 0; 137 138 /* 139 * id value used to distinguish between different ioctl messages 140 */ 141 static uint32_t ioc_id; 142 143 static void putback(struct stdata *, queue_t *, mblk_t *, int); 144 static void strcleanall(struct vnode *); 145 static int strwsrv(queue_t *); 146 static int strdocmd(struct stdata *, struct strcmd *, cred_t *); 147 static void struioainit(queue_t *, sodirect_t *, uio_t *); 148 149 /* 150 * qinit and module_info structures for stream head read and write queues 151 */ 152 struct module_info strm_info = { 0, "strrhead", 0, INFPSZ, STRHIGH, STRLOW }; 153 struct module_info stwm_info = { 0, "strwhead", 0, 0, 0, 0 }; 154 struct qinit strdata = { strrput, NULL, NULL, NULL, NULL, &strm_info }; 155 struct qinit stwdata = { NULL, strwsrv, NULL, NULL, NULL, &stwm_info }; 156 struct module_info fiform_info = { 0, "fifostrrhead", 0, PIPE_BUF, FIFOHIWAT, 157 FIFOLOWAT }; 158 struct module_info fifowm_info = { 0, "fifostrwhead", 0, 0, 0, 0 }; 159 struct qinit fifo_strdata = { strrput, NULL, NULL, NULL, NULL, &fiform_info }; 160 struct qinit fifo_stwdata = { NULL, strwsrv, NULL, NULL, NULL, &fifowm_info }; 161 162 extern kmutex_t strresources; /* protects global resources */ 163 extern kmutex_t muxifier; /* single-threads multiplexor creation */ 164 165 static boolean_t msghasdata(mblk_t *bp); 166 #define msgnodata(bp) (!msghasdata(bp)) 167 168 /* 169 * Stream head locking notes: 170 * There are four monitors associated with the stream head: 171 * 1. v_stream monitor: in stropen() and strclose() v_lock 172 * is held while the association of vnode and stream 173 * head is established or tested for. 174 * 2. open/close/push/pop monitor: sd_lock is held while each 175 * thread bids for exclusive access to this monitor 176 * for opening or closing a stream. In addition, this 177 * monitor is entered during pushes and pops. This 178 * guarantees that during plumbing operations there 179 * is only one thread trying to change the plumbing. 180 * Any other threads present in the stream are only 181 * using the plumbing. 182 * 3. read/write monitor: in the case of read, a thread holds 183 * sd_lock while trying to get data from the stream 184 * head queue. if there is none to fulfill a read 185 * request, it sets RSLEEP and calls cv_wait_sig() down 186 * in strwaitq() to await the arrival of new data. 187 * when new data arrives in strrput(), sd_lock is acquired 188 * before testing for RSLEEP and calling cv_broadcast(). 189 * the behavior of strwrite(), strwsrv(), and WSLEEP 190 * mirror this. 191 * 4. ioctl monitor: sd_lock is gotten to ensure that only one 192 * thread is doing an ioctl at a time. 193 * 194 * Note, for sodirect case 3. is extended to (*sodirect_t.sod_enqueue)() 195 * call-back from below, further the sodirect support is for code paths 196 * called via kstgetmsg(), all other code paths ASSERT() that sodirect 197 * uioa generated mblk_t's (i.e. DBLK_UIOA) aren't processed. 198 */ 199 200 static int 201 push_mod(queue_t *qp, dev_t *devp, struct stdata *stp, const char *name, 202 int anchor, cred_t *crp, uint_t anchor_zoneid) 203 { 204 int error; 205 fmodsw_impl_t *fp; 206 207 if (stp->sd_flag & (STRHUP|STRDERR|STWRERR)) { 208 error = (stp->sd_flag & STRHUP) ? ENXIO : EIO; 209 return (error); 210 } 211 if (stp->sd_pushcnt >= nstrpush) { 212 return (EINVAL); 213 } 214 215 if ((fp = fmodsw_find(name, FMODSW_HOLD | FMODSW_LOAD)) == NULL) { 216 stp->sd_flag |= STREOPENFAIL; 217 return (EINVAL); 218 } 219 220 /* 221 * push new module and call its open routine via qattach 222 */ 223 if ((error = qattach(qp, devp, 0, crp, fp, B_FALSE)) != 0) 224 return (error); 225 226 /* 227 * Check to see if caller wants a STREAMS anchor 228 * put at this place in the stream, and add if so. 229 */ 230 mutex_enter(&stp->sd_lock); 231 if (anchor == stp->sd_pushcnt) { 232 stp->sd_anchor = stp->sd_pushcnt; 233 stp->sd_anchorzone = anchor_zoneid; 234 } 235 mutex_exit(&stp->sd_lock); 236 237 return (0); 238 } 239 240 /* 241 * Open a stream device. 242 */ 243 int 244 stropen(vnode_t *vp, dev_t *devp, int flag, cred_t *crp) 245 { 246 struct stdata *stp; 247 queue_t *qp; 248 int s; 249 dev_t dummydev, savedev; 250 struct autopush *ap; 251 struct dlautopush dlap; 252 int error = 0; 253 ssize_t rmin, rmax; 254 int cloneopen; 255 queue_t *brq; 256 major_t major; 257 str_stack_t *ss; 258 zoneid_t zoneid; 259 uint_t anchor; 260 261 if (audit_active) 262 audit_stropen(vp, devp, flag, crp); 263 264 /* 265 * If the stream already exists, wait for any open in progress 266 * to complete, then call the open function of each module and 267 * driver in the stream. Otherwise create the stream. 268 */ 269 TRACE_1(TR_FAC_STREAMS_FR, TR_STROPEN, "stropen:%p", vp); 270 retry: 271 mutex_enter(&vp->v_lock); 272 if ((stp = vp->v_stream) != NULL) { 273 274 /* 275 * Waiting for stream to be created to device 276 * due to another open. 277 */ 278 mutex_exit(&vp->v_lock); 279 280 if (STRMATED(stp)) { 281 struct stdata *strmatep = stp->sd_mate; 282 283 STRLOCKMATES(stp); 284 if (strmatep->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 285 if (flag & (FNDELAY|FNONBLOCK)) { 286 error = EAGAIN; 287 mutex_exit(&strmatep->sd_lock); 288 goto ckreturn; 289 } 290 mutex_exit(&stp->sd_lock); 291 if (!cv_wait_sig(&strmatep->sd_monitor, 292 &strmatep->sd_lock)) { 293 error = EINTR; 294 mutex_exit(&strmatep->sd_lock); 295 mutex_enter(&stp->sd_lock); 296 goto ckreturn; 297 } 298 mutex_exit(&strmatep->sd_lock); 299 goto retry; 300 } 301 if (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 302 if (flag & (FNDELAY|FNONBLOCK)) { 303 error = EAGAIN; 304 mutex_exit(&strmatep->sd_lock); 305 goto ckreturn; 306 } 307 mutex_exit(&strmatep->sd_lock); 308 if (!cv_wait_sig(&stp->sd_monitor, 309 &stp->sd_lock)) { 310 error = EINTR; 311 goto ckreturn; 312 } 313 mutex_exit(&stp->sd_lock); 314 goto retry; 315 } 316 317 if (stp->sd_flag & (STRDERR|STWRERR)) { 318 error = EIO; 319 mutex_exit(&strmatep->sd_lock); 320 goto ckreturn; 321 } 322 323 stp->sd_flag |= STWOPEN; 324 STRUNLOCKMATES(stp); 325 } else { 326 mutex_enter(&stp->sd_lock); 327 if (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 328 if (flag & (FNDELAY|FNONBLOCK)) { 329 error = EAGAIN; 330 goto ckreturn; 331 } 332 if (!cv_wait_sig(&stp->sd_monitor, 333 &stp->sd_lock)) { 334 error = EINTR; 335 goto ckreturn; 336 } 337 mutex_exit(&stp->sd_lock); 338 goto retry; /* could be clone! */ 339 } 340 341 if (stp->sd_flag & (STRDERR|STWRERR)) { 342 error = EIO; 343 goto ckreturn; 344 } 345 346 stp->sd_flag |= STWOPEN; 347 mutex_exit(&stp->sd_lock); 348 } 349 350 /* 351 * Open all modules and devices down stream to notify 352 * that another user is streaming. For modules, set the 353 * last argument to MODOPEN and do not pass any open flags. 354 * Ignore dummydev since this is not the first open. 355 */ 356 claimstr(stp->sd_wrq); 357 qp = stp->sd_wrq; 358 while (_SAMESTR(qp)) { 359 qp = qp->q_next; 360 if ((error = qreopen(_RD(qp), devp, flag, crp)) != 0) 361 break; 362 } 363 releasestr(stp->sd_wrq); 364 mutex_enter(&stp->sd_lock); 365 stp->sd_flag &= ~(STRHUP|STWOPEN|STRDERR|STWRERR); 366 stp->sd_rerror = 0; 367 stp->sd_werror = 0; 368 ckreturn: 369 cv_broadcast(&stp->sd_monitor); 370 mutex_exit(&stp->sd_lock); 371 return (error); 372 } 373 374 /* 375 * This vnode isn't streaming. SPECFS already 376 * checked for multiple vnodes pointing to the 377 * same stream, so create a stream to the driver. 378 */ 379 qp = allocq(); 380 stp = shalloc(qp); 381 382 /* 383 * Initialize stream head. shalloc() has given us 384 * exclusive access, and we have the vnode locked; 385 * we can do whatever we want with stp. 386 */ 387 stp->sd_flag = STWOPEN; 388 stp->sd_siglist = NULL; 389 stp->sd_pollist.ph_list = NULL; 390 stp->sd_sigflags = 0; 391 stp->sd_mark = NULL; 392 stp->sd_closetime = STRTIMOUT; 393 stp->sd_sidp = NULL; 394 stp->sd_pgidp = NULL; 395 stp->sd_vnode = vp; 396 stp->sd_rerror = 0; 397 stp->sd_werror = 0; 398 stp->sd_wroff = 0; 399 stp->sd_tail = 0; 400 stp->sd_iocblk = NULL; 401 stp->sd_cmdblk = NULL; 402 stp->sd_pushcnt = 0; 403 stp->sd_qn_minpsz = 0; 404 stp->sd_qn_maxpsz = INFPSZ - 1; /* used to check for initialization */ 405 stp->sd_maxblk = INFPSZ; 406 stp->sd_sodirect = NULL; 407 qp->q_ptr = _WR(qp)->q_ptr = stp; 408 STREAM(qp) = STREAM(_WR(qp)) = stp; 409 vp->v_stream = stp; 410 mutex_exit(&vp->v_lock); 411 if (vp->v_type == VFIFO) { 412 stp->sd_flag |= OLDNDELAY; 413 /* 414 * This means, both for pipes and fifos 415 * strwrite will send SIGPIPE if the other 416 * end is closed. For putmsg it depends 417 * on whether it is a XPG4_2 application 418 * or not 419 */ 420 stp->sd_wput_opt = SW_SIGPIPE; 421 422 /* setq might sleep in kmem_alloc - avoid holding locks. */ 423 setq(qp, &fifo_strdata, &fifo_stwdata, NULL, QMTSAFE, 424 SQ_CI|SQ_CO, B_FALSE); 425 426 set_qend(qp); 427 stp->sd_strtab = fifo_getinfo(); 428 _WR(qp)->q_nfsrv = _WR(qp); 429 qp->q_nfsrv = qp; 430 /* 431 * Wake up others that are waiting for stream to be created. 432 */ 433 mutex_enter(&stp->sd_lock); 434 /* 435 * nothing is be pushed on stream yet, so 436 * optimized stream head packetsizes are just that 437 * of the read queue 438 */ 439 stp->sd_qn_minpsz = qp->q_minpsz; 440 stp->sd_qn_maxpsz = qp->q_maxpsz; 441 stp->sd_flag &= ~STWOPEN; 442 goto fifo_opendone; 443 } 444 /* setq might sleep in kmem_alloc - avoid holding locks. */ 445 setq(qp, &strdata, &stwdata, NULL, QMTSAFE, SQ_CI|SQ_CO, B_FALSE); 446 447 set_qend(qp); 448 449 /* 450 * Open driver and create stream to it (via qattach). 451 */ 452 savedev = *devp; 453 cloneopen = (getmajor(*devp) == clone_major); 454 if ((error = qattach(qp, devp, flag, crp, NULL, B_FALSE)) != 0) { 455 mutex_enter(&vp->v_lock); 456 vp->v_stream = NULL; 457 mutex_exit(&vp->v_lock); 458 mutex_enter(&stp->sd_lock); 459 cv_broadcast(&stp->sd_monitor); 460 mutex_exit(&stp->sd_lock); 461 freeq(_RD(qp)); 462 shfree(stp); 463 return (error); 464 } 465 /* 466 * Set sd_strtab after open in order to handle clonable drivers 467 */ 468 stp->sd_strtab = STREAMSTAB(getmajor(*devp)); 469 470 /* 471 * Historical note: dummydev used to be be prior to the initial 472 * open (via qattach above), which made the value seen 473 * inconsistent between an I_PUSH and an autopush of a module. 474 */ 475 dummydev = *devp; 476 477 /* 478 * For clone open of old style (Q not associated) network driver, 479 * push DRMODNAME module to handle DL_ATTACH/DL_DETACH 480 */ 481 brq = _RD(_WR(qp)->q_next); 482 major = getmajor(*devp); 483 if (push_drcompat && cloneopen && NETWORK_DRV(major) && 484 ((brq->q_flag & _QASSOCIATED) == 0)) { 485 if (push_mod(qp, &dummydev, stp, DRMODNAME, 0, crp, 0) != 0) 486 cmn_err(CE_WARN, "cannot push " DRMODNAME 487 " streams module"); 488 } 489 490 if (!NETWORK_DRV(major)) { 491 savedev = *devp; 492 } else { 493 /* 494 * For network devices, process differently based on the 495 * return value from dld_autopush(): 496 * 497 * 0: the passed-in device points to a GLDv3 datalink with 498 * per-link autopush configuration; use that configuration 499 * and ignore any per-driver autopush configuration. 500 * 501 * 1: the passed-in device points to a physical GLDv3 502 * datalink without per-link autopush configuration. The 503 * passed in device was changed to refer to the actual 504 * physical device (if it's not already); we use that new 505 * device to look up any per-driver autopush configuration. 506 * 507 * -1: neither of the above cases applied; use the initial 508 * device to look up any per-driver autopush configuration. 509 */ 510 switch (dld_autopush(&savedev, &dlap)) { 511 case 0: 512 zoneid = crgetzoneid(crp); 513 for (s = 0; s < dlap.dap_npush; s++) { 514 error = push_mod(qp, &dummydev, stp, 515 dlap.dap_aplist[s], dlap.dap_anchor, crp, 516 zoneid); 517 if (error != 0) 518 break; 519 } 520 goto opendone; 521 case 1: 522 break; 523 case -1: 524 savedev = *devp; 525 break; 526 } 527 } 528 /* 529 * Find the autopush configuration based on "savedev". Start with the 530 * global zone. If not found check in the local zone. 531 */ 532 zoneid = GLOBAL_ZONEID; 533 retryap: 534 ss = netstack_find_by_stackid(zoneid_to_netstackid(zoneid))-> 535 netstack_str; 536 if ((ap = sad_ap_find_by_dev(savedev, ss)) == NULL) { 537 netstack_rele(ss->ss_netstack); 538 if (zoneid == GLOBAL_ZONEID) { 539 /* 540 * None found. Also look in the zone's autopush table. 541 */ 542 zoneid = crgetzoneid(crp); 543 if (zoneid != GLOBAL_ZONEID) 544 goto retryap; 545 } 546 goto opendone; 547 } 548 anchor = ap->ap_anchor; 549 zoneid = crgetzoneid(crp); 550 for (s = 0; s < ap->ap_npush; s++) { 551 error = push_mod(qp, &dummydev, stp, ap->ap_list[s], 552 anchor, crp, zoneid); 553 if (error != 0) 554 break; 555 } 556 sad_ap_rele(ap, ss); 557 netstack_rele(ss->ss_netstack); 558 559 opendone: 560 561 /* 562 * let specfs know that open failed part way through 563 */ 564 if (error) { 565 mutex_enter(&stp->sd_lock); 566 stp->sd_flag |= STREOPENFAIL; 567 mutex_exit(&stp->sd_lock); 568 } 569 570 /* 571 * Wake up others that are waiting for stream to be created. 572 */ 573 mutex_enter(&stp->sd_lock); 574 stp->sd_flag &= ~STWOPEN; 575 576 /* 577 * As a performance concern we are caching the values of 578 * q_minpsz and q_maxpsz of the module below the stream 579 * head in the stream head. 580 */ 581 mutex_enter(QLOCK(stp->sd_wrq->q_next)); 582 rmin = stp->sd_wrq->q_next->q_minpsz; 583 rmax = stp->sd_wrq->q_next->q_maxpsz; 584 mutex_exit(QLOCK(stp->sd_wrq->q_next)); 585 586 /* do this processing here as a performance concern */ 587 if (strmsgsz != 0) { 588 if (rmax == INFPSZ) 589 rmax = strmsgsz; 590 else 591 rmax = MIN(strmsgsz, rmax); 592 } 593 594 mutex_enter(QLOCK(stp->sd_wrq)); 595 stp->sd_qn_minpsz = rmin; 596 stp->sd_qn_maxpsz = rmax; 597 mutex_exit(QLOCK(stp->sd_wrq)); 598 599 fifo_opendone: 600 cv_broadcast(&stp->sd_monitor); 601 mutex_exit(&stp->sd_lock); 602 return (error); 603 } 604 605 static int strsink(queue_t *, mblk_t *); 606 static struct qinit deadrend = { 607 strsink, NULL, NULL, NULL, NULL, &strm_info, NULL 608 }; 609 static struct qinit deadwend = { 610 NULL, NULL, NULL, NULL, NULL, &stwm_info, NULL 611 }; 612 613 /* 614 * Close a stream. 615 * This is called from closef() on the last close of an open stream. 616 * Strclean() will already have removed the siglist and pollist 617 * information, so all that remains is to remove all multiplexor links 618 * for the stream, pop all the modules (and the driver), and free the 619 * stream structure. 620 */ 621 622 int 623 strclose(struct vnode *vp, int flag, cred_t *crp) 624 { 625 struct stdata *stp; 626 queue_t *qp; 627 int rval; 628 int freestp = 1; 629 queue_t *rmq; 630 631 if (audit_active) 632 audit_strclose(vp, flag, crp); 633 634 TRACE_1(TR_FAC_STREAMS_FR, 635 TR_STRCLOSE, "strclose:%p", vp); 636 ASSERT(vp->v_stream); 637 638 stp = vp->v_stream; 639 ASSERT(!(stp->sd_flag & STPLEX)); 640 qp = stp->sd_wrq; 641 642 /* 643 * Needed so that strpoll will return non-zero for this fd. 644 * Note that with POLLNOERR STRHUP does still cause POLLHUP. 645 */ 646 mutex_enter(&stp->sd_lock); 647 stp->sd_flag |= STRHUP; 648 mutex_exit(&stp->sd_lock); 649 650 /* 651 * If the registered process or process group did not have an 652 * open instance of this stream then strclean would not be 653 * called. Thus at the time of closing all remaining siglist entries 654 * are removed. 655 */ 656 if (stp->sd_siglist != NULL) 657 strcleanall(vp); 658 659 ASSERT(stp->sd_siglist == NULL); 660 ASSERT(stp->sd_sigflags == 0); 661 662 if (STRMATED(stp)) { 663 struct stdata *strmatep = stp->sd_mate; 664 int waited = 1; 665 666 STRLOCKMATES(stp); 667 while (waited) { 668 waited = 0; 669 while (stp->sd_flag & (STWOPEN|STRCLOSE|STRPLUMB)) { 670 mutex_exit(&strmatep->sd_lock); 671 cv_wait(&stp->sd_monitor, &stp->sd_lock); 672 mutex_exit(&stp->sd_lock); 673 STRLOCKMATES(stp); 674 waited = 1; 675 } 676 while (strmatep->sd_flag & 677 (STWOPEN|STRCLOSE|STRPLUMB)) { 678 mutex_exit(&stp->sd_lock); 679 cv_wait(&strmatep->sd_monitor, 680 &strmatep->sd_lock); 681 mutex_exit(&strmatep->sd_lock); 682 STRLOCKMATES(stp); 683 waited = 1; 684 } 685 } 686 stp->sd_flag |= STRCLOSE; 687 STRUNLOCKMATES(stp); 688 } else { 689 mutex_enter(&stp->sd_lock); 690 stp->sd_flag |= STRCLOSE; 691 mutex_exit(&stp->sd_lock); 692 } 693 694 ASSERT(qp->q_first == NULL); /* No more delayed write */ 695 696 /* Check if an I_LINK was ever done on this stream */ 697 if (stp->sd_flag & STRHASLINKS) { 698 netstack_t *ns; 699 str_stack_t *ss; 700 701 ns = netstack_find_by_cred(crp); 702 ASSERT(ns != NULL); 703 ss = ns->netstack_str; 704 ASSERT(ss != NULL); 705 706 (void) munlinkall(stp, LINKCLOSE|LINKNORMAL, crp, &rval, ss); 707 netstack_rele(ss->ss_netstack); 708 } 709 710 while (_SAMESTR(qp)) { 711 /* 712 * Holding sd_lock prevents q_next from changing in 713 * this stream. 714 */ 715 mutex_enter(&stp->sd_lock); 716 if (!(flag & (FNDELAY|FNONBLOCK)) && (stp->sd_closetime > 0)) { 717 718 /* 719 * sleep until awakened by strwsrv() or timeout 720 */ 721 for (;;) { 722 mutex_enter(QLOCK(qp->q_next)); 723 if (!(qp->q_next->q_mblkcnt)) { 724 mutex_exit(QLOCK(qp->q_next)); 725 break; 726 } 727 stp->sd_flag |= WSLEEP; 728 729 /* ensure strwsrv gets enabled */ 730 qp->q_next->q_flag |= QWANTW; 731 mutex_exit(QLOCK(qp->q_next)); 732 /* get out if we timed out or recv'd a signal */ 733 if (str_cv_wait(&qp->q_wait, &stp->sd_lock, 734 stp->sd_closetime, 0) <= 0) { 735 break; 736 } 737 } 738 stp->sd_flag &= ~WSLEEP; 739 } 740 mutex_exit(&stp->sd_lock); 741 742 rmq = qp->q_next; 743 if (rmq->q_flag & QISDRV) { 744 ASSERT(!_SAMESTR(rmq)); 745 wait_sq_svc(_RD(qp)->q_syncq); 746 } 747 748 qdetach(_RD(rmq), 1, flag, crp, B_FALSE); 749 } 750 751 /* 752 * Since we call pollwakeup in close() now, the poll list should 753 * be empty in most cases. The only exception is the layered devices 754 * (e.g. the console drivers with redirection modules pushed on top 755 * of it). We have to do this after calling qdetach() because 756 * the redirection module won't have torn down the console 757 * redirection until after qdetach() has been invoked. 758 */ 759 if (stp->sd_pollist.ph_list != NULL) { 760 pollwakeup(&stp->sd_pollist, POLLERR); 761 pollhead_clean(&stp->sd_pollist); 762 } 763 ASSERT(stp->sd_pollist.ph_list == NULL); 764 ASSERT(stp->sd_sidp == NULL); 765 ASSERT(stp->sd_pgidp == NULL); 766 767 /* Prevent qenable from re-enabling the stream head queue */ 768 disable_svc(_RD(qp)); 769 770 /* 771 * Wait until service procedure of each queue is 772 * run, if QINSERVICE is set. 773 */ 774 wait_svc(_RD(qp)); 775 776 /* 777 * Now, flush both queues. 778 */ 779 flushq(_RD(qp), FLUSHALL); 780 flushq(qp, FLUSHALL); 781 782 /* 783 * If the write queue of the stream head is pointing to a 784 * read queue, we have a twisted stream. If the read queue 785 * is alive, convert the stream head queues into a dead end. 786 * If the read queue is dead, free the dead pair. 787 */ 788 if (qp->q_next && !_SAMESTR(qp)) { 789 if (qp->q_next->q_qinfo == &deadrend) { /* half-closed pipe */ 790 flushq(qp->q_next, FLUSHALL); /* ensure no message */ 791 shfree(qp->q_next->q_stream); 792 freeq(qp->q_next); 793 freeq(_RD(qp)); 794 } else if (qp->q_next == _RD(qp)) { /* fifo */ 795 freeq(_RD(qp)); 796 } else { /* pipe */ 797 freestp = 0; 798 /* 799 * The q_info pointers are never accessed when 800 * SQLOCK is held. 801 */ 802 ASSERT(qp->q_syncq == _RD(qp)->q_syncq); 803 mutex_enter(SQLOCK(qp->q_syncq)); 804 qp->q_qinfo = &deadwend; 805 _RD(qp)->q_qinfo = &deadrend; 806 mutex_exit(SQLOCK(qp->q_syncq)); 807 } 808 } else { 809 freeq(_RD(qp)); /* free stream head queue pair */ 810 } 811 812 mutex_enter(&vp->v_lock); 813 if (stp->sd_iocblk) { 814 if (stp->sd_iocblk != (mblk_t *)-1) { 815 freemsg(stp->sd_iocblk); 816 } 817 stp->sd_iocblk = NULL; 818 } 819 stp->sd_vnode = NULL; 820 vp->v_stream = NULL; 821 mutex_exit(&vp->v_lock); 822 mutex_enter(&stp->sd_lock); 823 freemsg(stp->sd_cmdblk); 824 stp->sd_cmdblk = NULL; 825 stp->sd_flag &= ~STRCLOSE; 826 cv_broadcast(&stp->sd_monitor); 827 mutex_exit(&stp->sd_lock); 828 829 if (freestp) 830 shfree(stp); 831 return (0); 832 } 833 834 static int 835 strsink(queue_t *q, mblk_t *bp) 836 { 837 struct copyresp *resp; 838 839 switch (bp->b_datap->db_type) { 840 case M_FLUSH: 841 if ((*bp->b_rptr & FLUSHW) && !(bp->b_flag & MSGNOLOOP)) { 842 *bp->b_rptr &= ~FLUSHR; 843 bp->b_flag |= MSGNOLOOP; 844 /* 845 * Protect against the driver passing up 846 * messages after it has done a qprocsoff. 847 */ 848 if (_OTHERQ(q)->q_next == NULL) 849 freemsg(bp); 850 else 851 qreply(q, bp); 852 } else { 853 freemsg(bp); 854 } 855 break; 856 857 case M_COPYIN: 858 case M_COPYOUT: 859 if (bp->b_cont) { 860 freemsg(bp->b_cont); 861 bp->b_cont = NULL; 862 } 863 bp->b_datap->db_type = M_IOCDATA; 864 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp); 865 resp = (struct copyresp *)bp->b_rptr; 866 resp->cp_rval = (caddr_t)1; /* failure */ 867 /* 868 * Protect against the driver passing up 869 * messages after it has done a qprocsoff. 870 */ 871 if (_OTHERQ(q)->q_next == NULL) 872 freemsg(bp); 873 else 874 qreply(q, bp); 875 break; 876 877 case M_IOCTL: 878 if (bp->b_cont) { 879 freemsg(bp->b_cont); 880 bp->b_cont = NULL; 881 } 882 bp->b_datap->db_type = M_IOCNAK; 883 /* 884 * Protect against the driver passing up 885 * messages after it has done a qprocsoff. 886 */ 887 if (_OTHERQ(q)->q_next == NULL) 888 freemsg(bp); 889 else 890 qreply(q, bp); 891 break; 892 893 default: 894 freemsg(bp); 895 break; 896 } 897 898 return (0); 899 } 900 901 /* 902 * Clean up after a process when it closes a stream. This is called 903 * from closef for all closes, whereas strclose is called only for the 904 * last close on a stream. The siglist is scanned for entries for the 905 * current process, and these are removed. 906 */ 907 void 908 strclean(struct vnode *vp) 909 { 910 strsig_t *ssp, *pssp, *tssp; 911 stdata_t *stp; 912 int update = 0; 913 914 TRACE_1(TR_FAC_STREAMS_FR, 915 TR_STRCLEAN, "strclean:%p", vp); 916 stp = vp->v_stream; 917 pssp = NULL; 918 mutex_enter(&stp->sd_lock); 919 ssp = stp->sd_siglist; 920 while (ssp) { 921 if (ssp->ss_pidp == curproc->p_pidp) { 922 tssp = ssp->ss_next; 923 if (pssp) 924 pssp->ss_next = tssp; 925 else 926 stp->sd_siglist = tssp; 927 mutex_enter(&pidlock); 928 PID_RELE(ssp->ss_pidp); 929 mutex_exit(&pidlock); 930 kmem_free(ssp, sizeof (strsig_t)); 931 update = 1; 932 ssp = tssp; 933 } else { 934 pssp = ssp; 935 ssp = ssp->ss_next; 936 } 937 } 938 if (update) { 939 stp->sd_sigflags = 0; 940 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next) 941 stp->sd_sigflags |= ssp->ss_events; 942 } 943 mutex_exit(&stp->sd_lock); 944 } 945 946 /* 947 * Used on the last close to remove any remaining items on the siglist. 948 * These could be present on the siglist due to I_ESETSIG calls that 949 * use process groups or processed that do not have an open file descriptor 950 * for this stream (Such entries would not be removed by strclean). 951 */ 952 static void 953 strcleanall(struct vnode *vp) 954 { 955 strsig_t *ssp, *nssp; 956 stdata_t *stp; 957 958 stp = vp->v_stream; 959 mutex_enter(&stp->sd_lock); 960 ssp = stp->sd_siglist; 961 stp->sd_siglist = NULL; 962 while (ssp) { 963 nssp = ssp->ss_next; 964 mutex_enter(&pidlock); 965 PID_RELE(ssp->ss_pidp); 966 mutex_exit(&pidlock); 967 kmem_free(ssp, sizeof (strsig_t)); 968 ssp = nssp; 969 } 970 stp->sd_sigflags = 0; 971 mutex_exit(&stp->sd_lock); 972 } 973 974 /* 975 * Retrieve the next message from the logical stream head read queue 976 * using either rwnext (if sync stream) or getq_noenab. 977 * It is the callers responsibility to call qbackenable after 978 * it is finished with the message. The caller should not call 979 * qbackenable until after any putback calls to avoid spurious backenabling. 980 * 981 * Also, handle uioa initialization and process any DBLK_UIOA flaged messages. 982 */ 983 mblk_t * 984 strget(struct stdata *stp, queue_t *q, struct uio *uiop, int first, 985 int *errorp) 986 { 987 sodirect_t *sodp = stp->sd_sodirect; 988 mblk_t *bp; 989 int error; 990 ssize_t rbytes = 0; 991 992 /* Holding sd_lock prevents the read queue from changing */ 993 ASSERT(MUTEX_HELD(&stp->sd_lock)); 994 995 if (uiop != NULL && stp->sd_struiordq != NULL && 996 q->q_first == NULL && 997 (!first || (stp->sd_wakeq & RSLEEP))) { 998 /* 999 * Stream supports rwnext() for the read side. 1000 * If this is the first time we're called by e.g. strread 1001 * only do the downcall if there is a deferred wakeup 1002 * (registered in sd_wakeq). 1003 */ 1004 struiod_t uiod; 1005 1006 if (first) 1007 stp->sd_wakeq &= ~RSLEEP; 1008 1009 (void) uiodup(uiop, &uiod.d_uio, uiod.d_iov, 1010 sizeof (uiod.d_iov) / sizeof (*uiod.d_iov)); 1011 uiod.d_mp = 0; 1012 /* 1013 * Mark that a thread is in rwnext on the read side 1014 * to prevent strrput from nacking ioctls immediately. 1015 * When the last concurrent rwnext returns 1016 * the ioctls are nack'ed. 1017 */ 1018 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1019 stp->sd_struiodnak++; 1020 /* 1021 * Note: rwnext will drop sd_lock. 1022 */ 1023 error = rwnext(q, &uiod); 1024 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock)); 1025 mutex_enter(&stp->sd_lock); 1026 stp->sd_struiodnak--; 1027 while (stp->sd_struiodnak == 0 && 1028 ((bp = stp->sd_struionak) != NULL)) { 1029 stp->sd_struionak = bp->b_next; 1030 bp->b_next = NULL; 1031 bp->b_datap->db_type = M_IOCNAK; 1032 /* 1033 * Protect against the driver passing up 1034 * messages after it has done a qprocsoff. 1035 */ 1036 if (_OTHERQ(q)->q_next == NULL) 1037 freemsg(bp); 1038 else { 1039 mutex_exit(&stp->sd_lock); 1040 qreply(q, bp); 1041 mutex_enter(&stp->sd_lock); 1042 } 1043 } 1044 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1045 if (error == 0 || error == EWOULDBLOCK) { 1046 if ((bp = uiod.d_mp) != NULL) { 1047 *errorp = 0; 1048 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1049 return (bp); 1050 } 1051 error = 0; 1052 } else if (error == EINVAL) { 1053 /* 1054 * The stream plumbing must have 1055 * changed while we were away, so 1056 * just turn off rwnext()s. 1057 */ 1058 error = 0; 1059 } else if (error == EBUSY) { 1060 /* 1061 * The module might have data in transit using putnext 1062 * Fall back on waiting + getq. 1063 */ 1064 error = 0; 1065 } else { 1066 *errorp = error; 1067 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1068 return (NULL); 1069 } 1070 /* 1071 * Try a getq in case a rwnext() generated mblk 1072 * has bubbled up via strrput(). 1073 */ 1074 } 1075 *errorp = 0; 1076 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1077 1078 if (sodp != NULL && (sodp->sod_state & SOD_ENABLED) && 1079 (sodp->sod_uioa.uioa_state & UIOA_INIT)) { 1080 /* 1081 * First kstrgetmsg() call for an uioa_t so if any 1082 * queued mblk_t's need to consume them before uioa 1083 * from below can occur. 1084 */ 1085 sodp->sod_uioa.uioa_state &= UIOA_CLR; 1086 sodp->sod_uioa.uioa_state |= UIOA_ENABLED; 1087 if (q->q_first != NULL) { 1088 struioainit(q, sodp, uiop); 1089 } 1090 } else { 1091 /* 1092 * If we have a valid uio, try and use this as a guide for how 1093 * many bytes to retrieve from the queue via getq_noenab(). 1094 * Doing this can avoid unneccesary counting of overlong 1095 * messages in putback(). We currently only do this for sockets. 1096 */ 1097 if ((uiop != NULL) && (stp->sd_vnode->v_type == VSOCK)) 1098 rbytes = uiop->uio_resid; 1099 } 1100 1101 bp = getq_noenab(q, rbytes); 1102 if (bp != NULL && (bp->b_datap->db_flags & DBLK_UIOA)) { 1103 /* 1104 * A uioa flaged mblk_t chain, already uio processed, 1105 * add it to the sodirect uioa pending free list. 1106 * 1107 * Note, a b_cont chain headed by a DBLK_UIOA enable 1108 * mblk_t must have all mblk_t(s) DBLK_UIOA enabled. 1109 */ 1110 mblk_t *bpt = sodp->sod_uioaft; 1111 1112 ASSERT(sodp != NULL); 1113 1114 /* 1115 * Add first mblk_t of "bp" chain to current sodirect uioa 1116 * free list tail mblk_t, if any, else empty list so new head. 1117 */ 1118 if (bpt == NULL) 1119 sodp->sod_uioafh = bp; 1120 else 1121 bpt->b_cont = bp; 1122 1123 /* 1124 * Walk mblk_t "bp" chain to find tail and adjust rptr of 1125 * each to reflect that uioamove() has consumed all data. 1126 */ 1127 bpt = bp; 1128 for (;;) { 1129 bpt->b_rptr = bpt->b_wptr; 1130 if (bpt->b_cont == NULL) 1131 break; 1132 bpt = bpt->b_cont; 1133 1134 ASSERT(bpt->b_datap->db_flags & DBLK_UIOA); 1135 } 1136 /* New sodirect uioa free list tail */ 1137 sodp->sod_uioaft = bpt; 1138 1139 /* Only 1 strget() with data returned per uioa_t */ 1140 if (sodp->sod_uioa.uioa_state & UIOA_ENABLED) { 1141 sodp->sod_uioa.uioa_state &= UIOA_CLR; 1142 sodp->sod_uioa.uioa_state |= UIOA_FINI; 1143 } 1144 } 1145 1146 return (bp); 1147 } 1148 1149 /* 1150 * Copy out the message pointed to by `bp' into the uio pointed to by `uiop'. 1151 * If the message does not fit in the uio the remainder of it is returned; 1152 * otherwise NULL is returned. Any embedded zero-length mblk_t's are 1153 * consumed, even if uio_resid reaches zero. On error, `*errorp' is set to 1154 * the error code, the message is consumed, and NULL is returned. 1155 */ 1156 static mblk_t * 1157 struiocopyout(mblk_t *bp, struct uio *uiop, int *errorp) 1158 { 1159 int error; 1160 ptrdiff_t n; 1161 mblk_t *nbp; 1162 1163 ASSERT(bp->b_wptr >= bp->b_rptr); 1164 1165 do { 1166 ASSERT(!(bp->b_datap->db_flags & DBLK_UIOA)); 1167 1168 if ((n = MIN(uiop->uio_resid, MBLKL(bp))) != 0) { 1169 ASSERT(n > 0); 1170 1171 error = uiomove(bp->b_rptr, n, UIO_READ, uiop); 1172 if (error != 0) { 1173 freemsg(bp); 1174 *errorp = error; 1175 return (NULL); 1176 } 1177 } 1178 1179 bp->b_rptr += n; 1180 while (bp != NULL && (bp->b_rptr >= bp->b_wptr)) { 1181 nbp = bp; 1182 bp = bp->b_cont; 1183 freeb(nbp); 1184 } 1185 } while (bp != NULL && uiop->uio_resid > 0); 1186 1187 *errorp = 0; 1188 return (bp); 1189 } 1190 1191 /* 1192 * Read a stream according to the mode flags in sd_flag: 1193 * 1194 * (default mode) - Byte stream, msg boundaries are ignored 1195 * RD_MSGDIS (msg discard) - Read on msg boundaries and throw away 1196 * any data remaining in msg 1197 * RD_MSGNODIS (msg non-discard) - Read on msg boundaries and put back 1198 * any remaining data on head of read queue 1199 * 1200 * Consume readable messages on the front of the queue until 1201 * ttolwp(curthread)->lwp_count 1202 * is satisfied, the readable messages are exhausted, or a message 1203 * boundary is reached in a message mode. If no data was read and 1204 * the stream was not opened with the NDELAY flag, block until data arrives. 1205 * Otherwise return the data read and update the count. 1206 * 1207 * In default mode a 0 length message signifies end-of-file and terminates 1208 * a read in progress. The 0 length message is removed from the queue 1209 * only if it is the only message read (no data is read). 1210 * 1211 * An attempt to read an M_PROTO or M_PCPROTO message results in an 1212 * EBADMSG error return, unless either RD_PROTDAT or RD_PROTDIS are set. 1213 * If RD_PROTDAT is set, M_PROTO and M_PCPROTO messages are read as data. 1214 * If RD_PROTDIS is set, the M_PROTO and M_PCPROTO parts of the message 1215 * are unlinked from and M_DATA blocks in the message, the protos are 1216 * thrown away, and the data is read. 1217 */ 1218 /* ARGSUSED */ 1219 int 1220 strread(struct vnode *vp, struct uio *uiop, cred_t *crp) 1221 { 1222 struct stdata *stp; 1223 mblk_t *bp, *nbp; 1224 queue_t *q; 1225 int error = 0; 1226 uint_t old_sd_flag; 1227 int first; 1228 char rflg; 1229 uint_t mark; /* Contains MSG*MARK and _LASTMARK */ 1230 #define _LASTMARK 0x8000 /* Distinct from MSG*MARK */ 1231 short delim; 1232 unsigned char pri = 0; 1233 char waitflag; 1234 unsigned char type; 1235 1236 TRACE_1(TR_FAC_STREAMS_FR, 1237 TR_STRREAD_ENTER, "strread:%p", vp); 1238 ASSERT(vp->v_stream); 1239 stp = vp->v_stream; 1240 1241 mutex_enter(&stp->sd_lock); 1242 1243 if ((error = i_straccess(stp, JCREAD)) != 0) { 1244 mutex_exit(&stp->sd_lock); 1245 return (error); 1246 } 1247 1248 if (stp->sd_flag & (STRDERR|STPLEX)) { 1249 error = strgeterr(stp, STRDERR|STPLEX, 0); 1250 if (error != 0) { 1251 mutex_exit(&stp->sd_lock); 1252 return (error); 1253 } 1254 } 1255 1256 /* 1257 * Loop terminates when uiop->uio_resid == 0. 1258 */ 1259 rflg = 0; 1260 waitflag = READWAIT; 1261 q = _RD(stp->sd_wrq); 1262 for (;;) { 1263 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1264 old_sd_flag = stp->sd_flag; 1265 mark = 0; 1266 delim = 0; 1267 first = 1; 1268 while ((bp = strget(stp, q, uiop, first, &error)) == NULL) { 1269 int done = 0; 1270 1271 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1272 1273 if (error != 0) 1274 goto oops; 1275 1276 if (stp->sd_flag & (STRHUP|STREOF)) { 1277 goto oops; 1278 } 1279 if (rflg && !(stp->sd_flag & STRDELIM)) { 1280 goto oops; 1281 } 1282 /* 1283 * If a read(fd,buf,0) has been done, there is no 1284 * need to sleep. We always have zero bytes to 1285 * return. 1286 */ 1287 if (uiop->uio_resid == 0) { 1288 goto oops; 1289 } 1290 1291 qbackenable(q, 0); 1292 1293 TRACE_3(TR_FAC_STREAMS_FR, TR_STRREAD_WAIT, 1294 "strread calls strwaitq:%p, %p, %p", 1295 vp, uiop, crp); 1296 if ((error = strwaitq(stp, waitflag, uiop->uio_resid, 1297 uiop->uio_fmode, -1, &done)) != 0 || done) { 1298 TRACE_3(TR_FAC_STREAMS_FR, TR_STRREAD_DONE, 1299 "strread error or done:%p, %p, %p", 1300 vp, uiop, crp); 1301 if ((uiop->uio_fmode & FNDELAY) && 1302 (stp->sd_flag & OLDNDELAY) && 1303 (error == EAGAIN)) 1304 error = 0; 1305 goto oops; 1306 } 1307 TRACE_3(TR_FAC_STREAMS_FR, TR_STRREAD_AWAKE, 1308 "strread awakes:%p, %p, %p", vp, uiop, crp); 1309 if ((error = i_straccess(stp, JCREAD)) != 0) { 1310 goto oops; 1311 } 1312 first = 0; 1313 } 1314 1315 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1316 ASSERT(bp); 1317 ASSERT(!(bp->b_datap->db_flags & DBLK_UIOA)); 1318 pri = bp->b_band; 1319 /* 1320 * Extract any mark information. If the message is not 1321 * completely consumed this information will be put in the mblk 1322 * that is putback. 1323 * If MSGMARKNEXT is set and the message is completely consumed 1324 * the STRATMARK flag will be set below. Likewise, if 1325 * MSGNOTMARKNEXT is set and the message is 1326 * completely consumed STRNOTATMARK will be set. 1327 * 1328 * For some unknown reason strread only breaks the read at the 1329 * last mark. 1330 */ 1331 mark = bp->b_flag & (MSGMARK | MSGMARKNEXT | MSGNOTMARKNEXT); 1332 ASSERT((mark & (MSGMARKNEXT|MSGNOTMARKNEXT)) != 1333 (MSGMARKNEXT|MSGNOTMARKNEXT)); 1334 if (mark != 0 && bp == stp->sd_mark) { 1335 if (rflg) { 1336 putback(stp, q, bp, pri); 1337 goto oops; 1338 } 1339 mark |= _LASTMARK; 1340 stp->sd_mark = NULL; 1341 } 1342 if ((stp->sd_flag & STRDELIM) && (bp->b_flag & MSGDELIM)) 1343 delim = 1; 1344 mutex_exit(&stp->sd_lock); 1345 1346 if (STREAM_NEEDSERVICE(stp)) 1347 stream_runservice(stp); 1348 1349 type = bp->b_datap->db_type; 1350 1351 switch (type) { 1352 1353 case M_DATA: 1354 ismdata: 1355 if (msgnodata(bp)) { 1356 if (mark || delim) { 1357 freemsg(bp); 1358 } else if (rflg) { 1359 1360 /* 1361 * If already read data put zero 1362 * length message back on queue else 1363 * free msg and return 0. 1364 */ 1365 bp->b_band = pri; 1366 mutex_enter(&stp->sd_lock); 1367 putback(stp, q, bp, pri); 1368 mutex_exit(&stp->sd_lock); 1369 } else { 1370 freemsg(bp); 1371 } 1372 error = 0; 1373 goto oops1; 1374 } 1375 1376 rflg = 1; 1377 waitflag |= NOINTR; 1378 bp = struiocopyout(bp, uiop, &error); 1379 if (error != 0) 1380 goto oops1; 1381 1382 mutex_enter(&stp->sd_lock); 1383 if (bp) { 1384 /* 1385 * Have remaining data in message. 1386 * Free msg if in discard mode. 1387 */ 1388 if (stp->sd_read_opt & RD_MSGDIS) { 1389 freemsg(bp); 1390 } else { 1391 bp->b_band = pri; 1392 if ((mark & _LASTMARK) && 1393 (stp->sd_mark == NULL)) 1394 stp->sd_mark = bp; 1395 bp->b_flag |= mark & ~_LASTMARK; 1396 if (delim) 1397 bp->b_flag |= MSGDELIM; 1398 if (msgnodata(bp)) 1399 freemsg(bp); 1400 else 1401 putback(stp, q, bp, pri); 1402 } 1403 } else { 1404 /* 1405 * Consumed the complete message. 1406 * Move the MSG*MARKNEXT information 1407 * to the stream head just in case 1408 * the read queue becomes empty. 1409 * 1410 * If the stream head was at the mark 1411 * (STRATMARK) before we dropped sd_lock above 1412 * and some data was consumed then we have 1413 * moved past the mark thus STRATMARK is 1414 * cleared. However, if a message arrived in 1415 * strrput during the copyout above causing 1416 * STRATMARK to be set we can not clear that 1417 * flag. 1418 */ 1419 if (mark & 1420 (MSGMARKNEXT|MSGNOTMARKNEXT|MSGMARK)) { 1421 if (mark & MSGMARKNEXT) { 1422 stp->sd_flag &= ~STRNOTATMARK; 1423 stp->sd_flag |= STRATMARK; 1424 } else if (mark & MSGNOTMARKNEXT) { 1425 stp->sd_flag &= ~STRATMARK; 1426 stp->sd_flag |= STRNOTATMARK; 1427 } else { 1428 stp->sd_flag &= 1429 ~(STRATMARK|STRNOTATMARK); 1430 } 1431 } else if (rflg && (old_sd_flag & STRATMARK)) { 1432 stp->sd_flag &= ~STRATMARK; 1433 } 1434 } 1435 1436 /* 1437 * Check for signal messages at the front of the read 1438 * queue and generate the signal(s) if appropriate. 1439 * The only signal that can be on queue is M_SIG at 1440 * this point. 1441 */ 1442 while ((((bp = q->q_first)) != NULL) && 1443 (bp->b_datap->db_type == M_SIG)) { 1444 bp = getq_noenab(q, 0); 1445 /* 1446 * sd_lock is held so the content of the 1447 * read queue can not change. 1448 */ 1449 ASSERT(bp != NULL && 1450 bp->b_datap->db_type == M_SIG); 1451 strsignal_nolock(stp, *bp->b_rptr, 1452 (int32_t)bp->b_band); 1453 mutex_exit(&stp->sd_lock); 1454 freemsg(bp); 1455 if (STREAM_NEEDSERVICE(stp)) 1456 stream_runservice(stp); 1457 mutex_enter(&stp->sd_lock); 1458 } 1459 1460 if ((uiop->uio_resid == 0) || (mark & _LASTMARK) || 1461 delim || 1462 (stp->sd_read_opt & (RD_MSGDIS|RD_MSGNODIS))) { 1463 goto oops; 1464 } 1465 continue; 1466 1467 case M_SIG: 1468 strsignal(stp, *bp->b_rptr, (int32_t)bp->b_band); 1469 freemsg(bp); 1470 mutex_enter(&stp->sd_lock); 1471 continue; 1472 1473 case M_PROTO: 1474 case M_PCPROTO: 1475 /* 1476 * Only data messages are readable. 1477 * Any others generate an error, unless 1478 * RD_PROTDIS or RD_PROTDAT is set. 1479 */ 1480 if (stp->sd_read_opt & RD_PROTDAT) { 1481 for (nbp = bp; nbp; nbp = nbp->b_next) { 1482 if ((nbp->b_datap->db_type == 1483 M_PROTO) || 1484 (nbp->b_datap->db_type == 1485 M_PCPROTO)) { 1486 nbp->b_datap->db_type = M_DATA; 1487 } else { 1488 break; 1489 } 1490 } 1491 /* 1492 * clear stream head hi pri flag based on 1493 * first message 1494 */ 1495 if (type == M_PCPROTO) { 1496 mutex_enter(&stp->sd_lock); 1497 stp->sd_flag &= ~STRPRI; 1498 mutex_exit(&stp->sd_lock); 1499 } 1500 goto ismdata; 1501 } else if (stp->sd_read_opt & RD_PROTDIS) { 1502 /* 1503 * discard non-data messages 1504 */ 1505 while (bp && 1506 ((bp->b_datap->db_type == M_PROTO) || 1507 (bp->b_datap->db_type == M_PCPROTO))) { 1508 nbp = unlinkb(bp); 1509 freeb(bp); 1510 bp = nbp; 1511 } 1512 /* 1513 * clear stream head hi pri flag based on 1514 * first message 1515 */ 1516 if (type == M_PCPROTO) { 1517 mutex_enter(&stp->sd_lock); 1518 stp->sd_flag &= ~STRPRI; 1519 mutex_exit(&stp->sd_lock); 1520 } 1521 if (bp) { 1522 bp->b_band = pri; 1523 goto ismdata; 1524 } else { 1525 break; 1526 } 1527 } 1528 /* FALLTHRU */ 1529 case M_PASSFP: 1530 if ((bp->b_datap->db_type == M_PASSFP) && 1531 (stp->sd_read_opt & RD_PROTDIS)) { 1532 freemsg(bp); 1533 break; 1534 } 1535 mutex_enter(&stp->sd_lock); 1536 putback(stp, q, bp, pri); 1537 mutex_exit(&stp->sd_lock); 1538 if (rflg == 0) 1539 error = EBADMSG; 1540 goto oops1; 1541 1542 default: 1543 /* 1544 * Garbage on stream head read queue. 1545 */ 1546 cmn_err(CE_WARN, "bad %x found at stream head\n", 1547 bp->b_datap->db_type); 1548 freemsg(bp); 1549 goto oops1; 1550 } 1551 mutex_enter(&stp->sd_lock); 1552 } 1553 oops: 1554 mutex_exit(&stp->sd_lock); 1555 oops1: 1556 qbackenable(q, pri); 1557 return (error); 1558 #undef _LASTMARK 1559 } 1560 1561 /* 1562 * Default processing of M_PROTO/M_PCPROTO messages. 1563 * Determine which wakeups and signals are needed. 1564 * This can be replaced by a user-specified procedure for kernel users 1565 * of STREAMS. 1566 */ 1567 /* ARGSUSED */ 1568 mblk_t * 1569 strrput_proto(vnode_t *vp, mblk_t *mp, 1570 strwakeup_t *wakeups, strsigset_t *firstmsgsigs, 1571 strsigset_t *allmsgsigs, strpollset_t *pollwakeups) 1572 { 1573 *wakeups = RSLEEP; 1574 *allmsgsigs = 0; 1575 1576 switch (mp->b_datap->db_type) { 1577 case M_PROTO: 1578 if (mp->b_band == 0) { 1579 *firstmsgsigs = S_INPUT | S_RDNORM; 1580 *pollwakeups = POLLIN | POLLRDNORM; 1581 } else { 1582 *firstmsgsigs = S_INPUT | S_RDBAND; 1583 *pollwakeups = POLLIN | POLLRDBAND; 1584 } 1585 break; 1586 case M_PCPROTO: 1587 *firstmsgsigs = S_HIPRI; 1588 *pollwakeups = POLLPRI; 1589 break; 1590 } 1591 return (mp); 1592 } 1593 1594 /* 1595 * Default processing of everything but M_DATA, M_PROTO, M_PCPROTO and 1596 * M_PASSFP messages. 1597 * Determine which wakeups and signals are needed. 1598 * This can be replaced by a user-specified procedure for kernel users 1599 * of STREAMS. 1600 */ 1601 /* ARGSUSED */ 1602 mblk_t * 1603 strrput_misc(vnode_t *vp, mblk_t *mp, 1604 strwakeup_t *wakeups, strsigset_t *firstmsgsigs, 1605 strsigset_t *allmsgsigs, strpollset_t *pollwakeups) 1606 { 1607 *wakeups = 0; 1608 *firstmsgsigs = 0; 1609 *allmsgsigs = 0; 1610 *pollwakeups = 0; 1611 return (mp); 1612 } 1613 1614 /* 1615 * Stream read put procedure. Called from downstream driver/module 1616 * with messages for the stream head. Data, protocol, and in-stream 1617 * signal messages are placed on the queue, others are handled directly. 1618 */ 1619 int 1620 strrput(queue_t *q, mblk_t *bp) 1621 { 1622 struct stdata *stp; 1623 ulong_t rput_opt; 1624 strwakeup_t wakeups; 1625 strsigset_t firstmsgsigs; /* Signals if first message on queue */ 1626 strsigset_t allmsgsigs; /* Signals for all messages */ 1627 strsigset_t signals; /* Signals events to generate */ 1628 strpollset_t pollwakeups; 1629 mblk_t *nextbp; 1630 uchar_t band = 0; 1631 int hipri_sig; 1632 1633 stp = (struct stdata *)q->q_ptr; 1634 /* 1635 * Use rput_opt for optimized access to the SR_ flags except 1636 * SR_POLLIN. That flag has to be checked under sd_lock since it 1637 * is modified by strpoll(). 1638 */ 1639 rput_opt = stp->sd_rput_opt; 1640 1641 ASSERT(qclaimed(q)); 1642 TRACE_2(TR_FAC_STREAMS_FR, TR_STRRPUT_ENTER, 1643 "strrput called with message type:q %p bp %p", q, bp); 1644 1645 /* 1646 * Perform initial processing and pass to the parameterized functions. 1647 */ 1648 ASSERT(bp->b_next == NULL); 1649 1650 switch (bp->b_datap->db_type) { 1651 case M_DATA: 1652 /* 1653 * sockfs is the only consumer of STREOF and when it is set, 1654 * it implies that the receiver is not interested in receiving 1655 * any more data, hence the mblk is freed to prevent unnecessary 1656 * message queueing at the stream head. 1657 */ 1658 if (stp->sd_flag == STREOF) { 1659 freemsg(bp); 1660 return (0); 1661 } 1662 if ((rput_opt & SR_IGN_ZEROLEN) && 1663 bp->b_rptr == bp->b_wptr && msgnodata(bp)) { 1664 /* 1665 * Ignore zero-length M_DATA messages. These might be 1666 * generated by some transports. 1667 * The zero-length M_DATA messages, even if they 1668 * are ignored, should effect the atmark tracking and 1669 * should wake up a thread sleeping in strwaitmark. 1670 */ 1671 mutex_enter(&stp->sd_lock); 1672 if (bp->b_flag & MSGMARKNEXT) { 1673 /* 1674 * Record the position of the mark either 1675 * in q_last or in STRATMARK. 1676 */ 1677 if (q->q_last != NULL) { 1678 q->q_last->b_flag &= ~MSGNOTMARKNEXT; 1679 q->q_last->b_flag |= MSGMARKNEXT; 1680 } else { 1681 stp->sd_flag &= ~STRNOTATMARK; 1682 stp->sd_flag |= STRATMARK; 1683 } 1684 } else if (bp->b_flag & MSGNOTMARKNEXT) { 1685 /* 1686 * Record that this is not the position of 1687 * the mark either in q_last or in 1688 * STRNOTATMARK. 1689 */ 1690 if (q->q_last != NULL) { 1691 q->q_last->b_flag &= ~MSGMARKNEXT; 1692 q->q_last->b_flag |= MSGNOTMARKNEXT; 1693 } else { 1694 stp->sd_flag &= ~STRATMARK; 1695 stp->sd_flag |= STRNOTATMARK; 1696 } 1697 } 1698 if (stp->sd_flag & RSLEEP) { 1699 stp->sd_flag &= ~RSLEEP; 1700 cv_broadcast(&q->q_wait); 1701 } 1702 mutex_exit(&stp->sd_lock); 1703 freemsg(bp); 1704 return (0); 1705 } 1706 wakeups = RSLEEP; 1707 if (bp->b_band == 0) { 1708 firstmsgsigs = S_INPUT | S_RDNORM; 1709 pollwakeups = POLLIN | POLLRDNORM; 1710 } else { 1711 firstmsgsigs = S_INPUT | S_RDBAND; 1712 pollwakeups = POLLIN | POLLRDBAND; 1713 } 1714 if (rput_opt & SR_SIGALLDATA) 1715 allmsgsigs = firstmsgsigs; 1716 else 1717 allmsgsigs = 0; 1718 1719 mutex_enter(&stp->sd_lock); 1720 if ((rput_opt & SR_CONSOL_DATA) && 1721 (q->q_last != NULL) && 1722 (bp->b_flag & (MSGMARK|MSGDELIM)) == 0) { 1723 /* 1724 * Consolidate an M_DATA message onto an M_DATA, 1725 * M_PROTO, or M_PCPROTO by merging it with q_last. 1726 * The consolidation does not take place if 1727 * the old message is marked with either of the 1728 * marks or the delim flag or if the new 1729 * message is marked with MSGMARK. The MSGMARK 1730 * check is needed to handle the odd semantics of 1731 * MSGMARK where essentially the whole message 1732 * is to be treated as marked. 1733 * Carry any MSGMARKNEXT and MSGNOTMARKNEXT from the 1734 * new message to the front of the b_cont chain. 1735 */ 1736 mblk_t *lbp = q->q_last; 1737 unsigned char db_type = lbp->b_datap->db_type; 1738 1739 if ((db_type == M_DATA || db_type == M_PROTO || 1740 db_type == M_PCPROTO) && 1741 !(lbp->b_flag & (MSGDELIM|MSGMARK|MSGMARKNEXT))) { 1742 rmvq_noenab(q, lbp); 1743 /* 1744 * The first message in the b_cont list 1745 * tracks MSGMARKNEXT and MSGNOTMARKNEXT. 1746 * We need to handle the case where we 1747 * are appending: 1748 * 1749 * 1) a MSGMARKNEXT to a MSGNOTMARKNEXT. 1750 * 2) a MSGMARKNEXT to a plain message. 1751 * 3) a MSGNOTMARKNEXT to a plain message 1752 * 4) a MSGNOTMARKNEXT to a MSGNOTMARKNEXT 1753 * message. 1754 * 1755 * Thus we never append a MSGMARKNEXT or 1756 * MSGNOTMARKNEXT to a MSGMARKNEXT message. 1757 */ 1758 if (bp->b_flag & MSGMARKNEXT) { 1759 lbp->b_flag |= MSGMARKNEXT; 1760 lbp->b_flag &= ~MSGNOTMARKNEXT; 1761 bp->b_flag &= ~MSGMARKNEXT; 1762 } else if (bp->b_flag & MSGNOTMARKNEXT) { 1763 lbp->b_flag |= MSGNOTMARKNEXT; 1764 bp->b_flag &= ~MSGNOTMARKNEXT; 1765 } 1766 1767 linkb(lbp, bp); 1768 bp = lbp; 1769 /* 1770 * The new message logically isn't the first 1771 * even though the q_first check below thinks 1772 * it is. Clear the firstmsgsigs to make it 1773 * not appear to be first. 1774 */ 1775 firstmsgsigs = 0; 1776 } 1777 } 1778 break; 1779 1780 case M_PASSFP: 1781 wakeups = RSLEEP; 1782 allmsgsigs = 0; 1783 if (bp->b_band == 0) { 1784 firstmsgsigs = S_INPUT | S_RDNORM; 1785 pollwakeups = POLLIN | POLLRDNORM; 1786 } else { 1787 firstmsgsigs = S_INPUT | S_RDBAND; 1788 pollwakeups = POLLIN | POLLRDBAND; 1789 } 1790 mutex_enter(&stp->sd_lock); 1791 break; 1792 1793 case M_PROTO: 1794 case M_PCPROTO: 1795 ASSERT(stp->sd_rprotofunc != NULL); 1796 bp = (stp->sd_rprotofunc)(stp->sd_vnode, bp, 1797 &wakeups, &firstmsgsigs, &allmsgsigs, &pollwakeups); 1798 #define ALLSIG (S_INPUT|S_HIPRI|S_OUTPUT|S_MSG|S_ERROR|S_HANGUP|S_RDNORM|\ 1799 S_WRNORM|S_RDBAND|S_WRBAND|S_BANDURG) 1800 #define ALLPOLL (POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLWRNORM|POLLRDBAND|\ 1801 POLLWRBAND) 1802 1803 ASSERT((wakeups & ~(RSLEEP|WSLEEP)) == 0); 1804 ASSERT((firstmsgsigs & ~ALLSIG) == 0); 1805 ASSERT((allmsgsigs & ~ALLSIG) == 0); 1806 ASSERT((pollwakeups & ~ALLPOLL) == 0); 1807 1808 mutex_enter(&stp->sd_lock); 1809 break; 1810 1811 default: 1812 ASSERT(stp->sd_rmiscfunc != NULL); 1813 bp = (stp->sd_rmiscfunc)(stp->sd_vnode, bp, 1814 &wakeups, &firstmsgsigs, &allmsgsigs, &pollwakeups); 1815 ASSERT((wakeups & ~(RSLEEP|WSLEEP)) == 0); 1816 ASSERT((firstmsgsigs & ~ALLSIG) == 0); 1817 ASSERT((allmsgsigs & ~ALLSIG) == 0); 1818 ASSERT((pollwakeups & ~ALLPOLL) == 0); 1819 #undef ALLSIG 1820 #undef ALLPOLL 1821 mutex_enter(&stp->sd_lock); 1822 break; 1823 } 1824 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1825 1826 /* By default generate superset of signals */ 1827 signals = (firstmsgsigs | allmsgsigs); 1828 1829 /* 1830 * The proto and misc functions can return multiple messages 1831 * as a b_next chain. Such messages are processed separately. 1832 */ 1833 one_more: 1834 hipri_sig = 0; 1835 if (bp == NULL) { 1836 nextbp = NULL; 1837 } else { 1838 nextbp = bp->b_next; 1839 bp->b_next = NULL; 1840 1841 switch (bp->b_datap->db_type) { 1842 case M_PCPROTO: 1843 /* 1844 * Only one priority protocol message is allowed at the 1845 * stream head at a time. 1846 */ 1847 if (stp->sd_flag & STRPRI) { 1848 TRACE_0(TR_FAC_STREAMS_FR, TR_STRRPUT_PROTERR, 1849 "M_PCPROTO already at head"); 1850 freemsg(bp); 1851 mutex_exit(&stp->sd_lock); 1852 goto done; 1853 } 1854 stp->sd_flag |= STRPRI; 1855 hipri_sig = 1; 1856 /* FALLTHRU */ 1857 case M_DATA: 1858 case M_PROTO: 1859 case M_PASSFP: 1860 band = bp->b_band; 1861 /* 1862 * Marking doesn't work well when messages 1863 * are marked in more than one band. We only 1864 * remember the last message received, even if 1865 * it is placed on the queue ahead of other 1866 * marked messages. 1867 */ 1868 if (bp->b_flag & MSGMARK) 1869 stp->sd_mark = bp; 1870 (void) putq(q, bp); 1871 1872 /* 1873 * If message is a PCPROTO message, always use 1874 * firstmsgsigs to determine if a signal should be 1875 * sent as strrput is the only place to send 1876 * signals for PCPROTO. Other messages are based on 1877 * the STRGETINPROG flag. The flag determines if 1878 * strrput or (k)strgetmsg will be responsible for 1879 * sending the signals, in the firstmsgsigs case. 1880 */ 1881 if ((hipri_sig == 1) || 1882 (((stp->sd_flag & STRGETINPROG) == 0) && 1883 (q->q_first == bp))) 1884 signals = (firstmsgsigs | allmsgsigs); 1885 else 1886 signals = allmsgsigs; 1887 break; 1888 1889 default: 1890 mutex_exit(&stp->sd_lock); 1891 (void) strrput_nondata(q, bp); 1892 mutex_enter(&stp->sd_lock); 1893 break; 1894 } 1895 } 1896 ASSERT(MUTEX_HELD(&stp->sd_lock)); 1897 /* 1898 * Wake sleeping read/getmsg and cancel deferred wakeup 1899 */ 1900 if (wakeups & RSLEEP) 1901 stp->sd_wakeq &= ~RSLEEP; 1902 1903 wakeups &= stp->sd_flag; 1904 if (wakeups & RSLEEP) { 1905 stp->sd_flag &= ~RSLEEP; 1906 cv_broadcast(&q->q_wait); 1907 } 1908 if (wakeups & WSLEEP) { 1909 stp->sd_flag &= ~WSLEEP; 1910 cv_broadcast(&_WR(q)->q_wait); 1911 } 1912 1913 if (pollwakeups != 0) { 1914 if (pollwakeups == (POLLIN | POLLRDNORM)) { 1915 /* 1916 * Can't use rput_opt since it was not 1917 * read when sd_lock was held and SR_POLLIN is changed 1918 * by strpoll() under sd_lock. 1919 */ 1920 if (!(stp->sd_rput_opt & SR_POLLIN)) 1921 goto no_pollwake; 1922 stp->sd_rput_opt &= ~SR_POLLIN; 1923 } 1924 mutex_exit(&stp->sd_lock); 1925 pollwakeup(&stp->sd_pollist, pollwakeups); 1926 mutex_enter(&stp->sd_lock); 1927 } 1928 no_pollwake: 1929 1930 /* 1931 * strsendsig can handle multiple signals with a 1932 * single call. 1933 */ 1934 if (stp->sd_sigflags & signals) 1935 strsendsig(stp->sd_siglist, signals, band, 0); 1936 mutex_exit(&stp->sd_lock); 1937 1938 1939 done: 1940 if (nextbp == NULL) 1941 return (0); 1942 1943 /* 1944 * Any signals were handled the first time. 1945 * Wakeups and pollwakeups are redone to avoid any race 1946 * conditions - all the messages are not queued until the 1947 * last message has been processed by strrput. 1948 */ 1949 bp = nextbp; 1950 signals = firstmsgsigs = allmsgsigs = 0; 1951 mutex_enter(&stp->sd_lock); 1952 goto one_more; 1953 } 1954 1955 static void 1956 log_dupioc(queue_t *rq, mblk_t *bp) 1957 { 1958 queue_t *wq, *qp; 1959 char *modnames, *mnp, *dname; 1960 size_t maxmodstr; 1961 boolean_t islast; 1962 1963 /* 1964 * Allocate a buffer large enough to hold the names of nstrpush modules 1965 * and one driver, with spaces between and NUL terminator. If we can't 1966 * get memory, then we'll just log the driver name. 1967 */ 1968 maxmodstr = nstrpush * (FMNAMESZ + 1); 1969 mnp = modnames = kmem_alloc(maxmodstr, KM_NOSLEEP); 1970 1971 /* march down write side to print log message down to the driver */ 1972 wq = WR(rq); 1973 1974 /* make sure q_next doesn't shift around while we're grabbing data */ 1975 claimstr(wq); 1976 qp = wq->q_next; 1977 do { 1978 if ((dname = qp->q_qinfo->qi_minfo->mi_idname) == NULL) 1979 dname = "?"; 1980 islast = !SAMESTR(qp) || qp->q_next == NULL; 1981 if (modnames == NULL) { 1982 /* 1983 * If we don't have memory, then get the driver name in 1984 * the log where we can see it. Note that memory 1985 * pressure is a possible cause of these sorts of bugs. 1986 */ 1987 if (islast) { 1988 modnames = dname; 1989 maxmodstr = 0; 1990 } 1991 } else { 1992 mnp += snprintf(mnp, FMNAMESZ + 1, "%s", dname); 1993 if (!islast) 1994 *mnp++ = ' '; 1995 } 1996 qp = qp->q_next; 1997 } while (!islast); 1998 releasestr(wq); 1999 /* Cannot happen unless stream head is corrupt. */ 2000 ASSERT(modnames != NULL); 2001 (void) strlog(rq->q_qinfo->qi_minfo->mi_idnum, 0, 1, 2002 SL_CONSOLE|SL_TRACE|SL_ERROR, 2003 "Warning: stream %p received duplicate %X M_IOC%s; module list: %s", 2004 rq->q_ptr, ((struct iocblk *)bp->b_rptr)->ioc_cmd, 2005 (DB_TYPE(bp) == M_IOCACK ? "ACK" : "NAK"), modnames); 2006 if (maxmodstr != 0) 2007 kmem_free(modnames, maxmodstr); 2008 } 2009 2010 int 2011 strrput_nondata(queue_t *q, mblk_t *bp) 2012 { 2013 struct stdata *stp; 2014 struct iocblk *iocbp; 2015 struct stroptions *sop; 2016 struct copyreq *reqp; 2017 struct copyresp *resp; 2018 unsigned char bpri; 2019 unsigned char flushed_already = 0; 2020 2021 stp = (struct stdata *)q->q_ptr; 2022 2023 ASSERT(!(stp->sd_flag & STPLEX)); 2024 ASSERT(qclaimed(q)); 2025 2026 switch (bp->b_datap->db_type) { 2027 case M_ERROR: 2028 /* 2029 * An error has occurred downstream, the errno is in the first 2030 * bytes of the message. 2031 */ 2032 if ((bp->b_wptr - bp->b_rptr) == 2) { /* New flavor */ 2033 unsigned char rw = 0; 2034 2035 mutex_enter(&stp->sd_lock); 2036 if (*bp->b_rptr != NOERROR) { /* read error */ 2037 if (*bp->b_rptr != 0) { 2038 if (stp->sd_flag & STRDERR) 2039 flushed_already |= FLUSHR; 2040 stp->sd_flag |= STRDERR; 2041 rw |= FLUSHR; 2042 } else { 2043 stp->sd_flag &= ~STRDERR; 2044 } 2045 stp->sd_rerror = *bp->b_rptr; 2046 } 2047 bp->b_rptr++; 2048 if (*bp->b_rptr != NOERROR) { /* write error */ 2049 if (*bp->b_rptr != 0) { 2050 if (stp->sd_flag & STWRERR) 2051 flushed_already |= FLUSHW; 2052 stp->sd_flag |= STWRERR; 2053 rw |= FLUSHW; 2054 } else { 2055 stp->sd_flag &= ~STWRERR; 2056 } 2057 stp->sd_werror = *bp->b_rptr; 2058 } 2059 if (rw) { 2060 TRACE_2(TR_FAC_STREAMS_FR, TR_STRRPUT_WAKE, 2061 "strrput cv_broadcast:q %p, bp %p", 2062 q, bp); 2063 cv_broadcast(&q->q_wait); /* readers */ 2064 cv_broadcast(&_WR(q)->q_wait); /* writers */ 2065 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 2066 2067 mutex_exit(&stp->sd_lock); 2068 pollwakeup(&stp->sd_pollist, POLLERR); 2069 mutex_enter(&stp->sd_lock); 2070 2071 if (stp->sd_sigflags & S_ERROR) 2072 strsendsig(stp->sd_siglist, S_ERROR, 0, 2073 ((rw & FLUSHR) ? stp->sd_rerror : 2074 stp->sd_werror)); 2075 mutex_exit(&stp->sd_lock); 2076 /* 2077 * Send the M_FLUSH only 2078 * for the first M_ERROR 2079 * message on the stream 2080 */ 2081 if (flushed_already == rw) { 2082 freemsg(bp); 2083 return (0); 2084 } 2085 2086 bp->b_datap->db_type = M_FLUSH; 2087 *bp->b_rptr = rw; 2088 bp->b_wptr = bp->b_rptr + 1; 2089 /* 2090 * Protect against the driver 2091 * passing up messages after 2092 * it has done a qprocsoff 2093 */ 2094 if (_OTHERQ(q)->q_next == NULL) 2095 freemsg(bp); 2096 else 2097 qreply(q, bp); 2098 return (0); 2099 } else 2100 mutex_exit(&stp->sd_lock); 2101 } else if (*bp->b_rptr != 0) { /* Old flavor */ 2102 if (stp->sd_flag & (STRDERR|STWRERR)) 2103 flushed_already = FLUSHRW; 2104 mutex_enter(&stp->sd_lock); 2105 stp->sd_flag |= (STRDERR|STWRERR); 2106 stp->sd_rerror = *bp->b_rptr; 2107 stp->sd_werror = *bp->b_rptr; 2108 TRACE_2(TR_FAC_STREAMS_FR, 2109 TR_STRRPUT_WAKE2, 2110 "strrput wakeup #2:q %p, bp %p", q, bp); 2111 cv_broadcast(&q->q_wait); /* the readers */ 2112 cv_broadcast(&_WR(q)->q_wait); /* the writers */ 2113 cv_broadcast(&stp->sd_monitor); /* ioctllers */ 2114 2115 mutex_exit(&stp->sd_lock); 2116 pollwakeup(&stp->sd_pollist, POLLERR); 2117 mutex_enter(&stp->sd_lock); 2118 2119 if (stp->sd_sigflags & S_ERROR) 2120 strsendsig(stp->sd_siglist, S_ERROR, 0, 2121 (stp->sd_werror ? stp->sd_werror : 2122 stp->sd_rerror)); 2123 mutex_exit(&stp->sd_lock); 2124 2125 /* 2126 * Send the M_FLUSH only 2127 * for the first M_ERROR 2128 * message on the stream 2129 */ 2130 if (flushed_already != FLUSHRW) { 2131 bp->b_datap->db_type = M_FLUSH; 2132 *bp->b_rptr = FLUSHRW; 2133 /* 2134 * Protect against the driver passing up 2135 * messages after it has done a 2136 * qprocsoff. 2137 */ 2138 if (_OTHERQ(q)->q_next == NULL) 2139 freemsg(bp); 2140 else 2141 qreply(q, bp); 2142 return (0); 2143 } 2144 } 2145 freemsg(bp); 2146 return (0); 2147 2148 case M_HANGUP: 2149 2150 freemsg(bp); 2151 mutex_enter(&stp->sd_lock); 2152 stp->sd_werror = ENXIO; 2153 stp->sd_flag |= STRHUP; 2154 stp->sd_flag &= ~(WSLEEP|RSLEEP); 2155 2156 /* 2157 * send signal if controlling tty 2158 */ 2159 2160 if (stp->sd_sidp) { 2161 prsignal(stp->sd_sidp, SIGHUP); 2162 if (stp->sd_sidp != stp->sd_pgidp) 2163 pgsignal(stp->sd_pgidp, SIGTSTP); 2164 } 2165 2166 /* 2167 * wake up read, write, and exception pollers and 2168 * reset wakeup mechanism. 2169 */ 2170 cv_broadcast(&q->q_wait); /* the readers */ 2171 cv_broadcast(&_WR(q)->q_wait); /* the writers */ 2172 cv_broadcast(&stp->sd_monitor); /* the ioctllers */ 2173 strhup(stp); 2174 mutex_exit(&stp->sd_lock); 2175 return (0); 2176 2177 case M_UNHANGUP: 2178 freemsg(bp); 2179 mutex_enter(&stp->sd_lock); 2180 stp->sd_werror = 0; 2181 stp->sd_flag &= ~STRHUP; 2182 mutex_exit(&stp->sd_lock); 2183 return (0); 2184 2185 case M_SIG: 2186 /* 2187 * Someone downstream wants to post a signal. The 2188 * signal to post is contained in the first byte of the 2189 * message. If the message would go on the front of 2190 * the queue, send a signal to the process group 2191 * (if not SIGPOLL) or to the siglist processes 2192 * (SIGPOLL). If something is already on the queue, 2193 * OR if we are delivering a delayed suspend (*sigh* 2194 * another "tty" hack) and there's no one sleeping already, 2195 * just enqueue the message. 2196 */ 2197 mutex_enter(&stp->sd_lock); 2198 if (q->q_first || (*bp->b_rptr == SIGTSTP && 2199 !(stp->sd_flag & RSLEEP))) { 2200 (void) putq(q, bp); 2201 mutex_exit(&stp->sd_lock); 2202 return (0); 2203 } 2204 mutex_exit(&stp->sd_lock); 2205 /* FALLTHRU */ 2206 2207 case M_PCSIG: 2208 /* 2209 * Don't enqueue, just post the signal. 2210 */ 2211 strsignal(stp, *bp->b_rptr, 0L); 2212 freemsg(bp); 2213 return (0); 2214 2215 case M_CMD: 2216 if (MBLKL(bp) != sizeof (cmdblk_t)) { 2217 freemsg(bp); 2218 return (0); 2219 } 2220 2221 mutex_enter(&stp->sd_lock); 2222 if (stp->sd_flag & STRCMDWAIT) { 2223 ASSERT(stp->sd_cmdblk == NULL); 2224 stp->sd_cmdblk = bp; 2225 cv_broadcast(&stp->sd_monitor); 2226 mutex_exit(&stp->sd_lock); 2227 } else { 2228 mutex_exit(&stp->sd_lock); 2229 freemsg(bp); 2230 } 2231 return (0); 2232 2233 case M_FLUSH: 2234 /* 2235 * Flush queues. The indication of which queues to flush 2236 * is in the first byte of the message. If the read queue 2237 * is specified, then flush it. If FLUSHBAND is set, just 2238 * flush the band specified by the second byte of the message. 2239 * 2240 * If a module has issued a M_SETOPT to not flush hi 2241 * priority messages off of the stream head, then pass this 2242 * flag into the flushq code to preserve such messages. 2243 */ 2244 2245 if (*bp->b_rptr & FLUSHR) { 2246 mutex_enter(&stp->sd_lock); 2247 if (*bp->b_rptr & FLUSHBAND) { 2248 ASSERT((bp->b_wptr - bp->b_rptr) >= 2); 2249 flushband(q, *(bp->b_rptr + 1), FLUSHALL); 2250 } else 2251 flushq_common(q, FLUSHALL, 2252 stp->sd_read_opt & RFLUSHPCPROT); 2253 if ((q->q_first == NULL) || 2254 (q->q_first->b_datap->db_type < QPCTL)) 2255 stp->sd_flag &= ~STRPRI; 2256 else { 2257 ASSERT(stp->sd_flag & STRPRI); 2258 } 2259 mutex_exit(&stp->sd_lock); 2260 } 2261 if ((*bp->b_rptr & FLUSHW) && !(bp->b_flag & MSGNOLOOP)) { 2262 *bp->b_rptr &= ~FLUSHR; 2263 bp->b_flag |= MSGNOLOOP; 2264 /* 2265 * Protect against the driver passing up 2266 * messages after it has done a qprocsoff. 2267 */ 2268 if (_OTHERQ(q)->q_next == NULL) 2269 freemsg(bp); 2270 else 2271 qreply(q, bp); 2272 return (0); 2273 } 2274 freemsg(bp); 2275 return (0); 2276 2277 case M_IOCACK: 2278 case M_IOCNAK: 2279 iocbp = (struct iocblk *)bp->b_rptr; 2280 /* 2281 * If not waiting for ACK or NAK then just free msg. 2282 * If incorrect id sequence number then just free msg. 2283 * If already have ACK or NAK for user then this is a 2284 * duplicate, display a warning and free the msg. 2285 */ 2286 mutex_enter(&stp->sd_lock); 2287 if ((stp->sd_flag & IOCWAIT) == 0 || stp->sd_iocblk || 2288 (stp->sd_iocid != iocbp->ioc_id)) { 2289 /* 2290 * If the ACK/NAK is a dup, display a message 2291 * Dup is when sd_iocid == ioc_id, and 2292 * sd_iocblk == <valid ptr> or -1 (the former 2293 * is when an ioctl has been put on the stream 2294 * head, but has not yet been consumed, the 2295 * later is when it has been consumed). 2296 */ 2297 if ((stp->sd_iocid == iocbp->ioc_id) && 2298 (stp->sd_iocblk != NULL)) { 2299 log_dupioc(q, bp); 2300 } 2301 freemsg(bp); 2302 mutex_exit(&stp->sd_lock); 2303 return (0); 2304 } 2305 2306 /* 2307 * Assign ACK or NAK to user and wake up. 2308 */ 2309 stp->sd_iocblk = bp; 2310 cv_broadcast(&stp->sd_monitor); 2311 mutex_exit(&stp->sd_lock); 2312 return (0); 2313 2314 case M_COPYIN: 2315 case M_COPYOUT: 2316 reqp = (struct copyreq *)bp->b_rptr; 2317 2318 /* 2319 * If not waiting for ACK or NAK then just fail request. 2320 * If already have ACK, NAK, or copy request, then just 2321 * fail request. 2322 * If incorrect id sequence number then just fail request. 2323 */ 2324 mutex_enter(&stp->sd_lock); 2325 if ((stp->sd_flag & IOCWAIT) == 0 || stp->sd_iocblk || 2326 (stp->sd_iocid != reqp->cq_id)) { 2327 if (bp->b_cont) { 2328 freemsg(bp->b_cont); 2329 bp->b_cont = NULL; 2330 } 2331 bp->b_datap->db_type = M_IOCDATA; 2332 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp); 2333 resp = (struct copyresp *)bp->b_rptr; 2334 resp->cp_rval = (caddr_t)1; /* failure */ 2335 mutex_exit(&stp->sd_lock); 2336 putnext(stp->sd_wrq, bp); 2337 return (0); 2338 } 2339 2340 /* 2341 * Assign copy request to user and wake up. 2342 */ 2343 stp->sd_iocblk = bp; 2344 cv_broadcast(&stp->sd_monitor); 2345 mutex_exit(&stp->sd_lock); 2346 return (0); 2347 2348 case M_SETOPTS: 2349 /* 2350 * Set stream head options (read option, write offset, 2351 * min/max packet size, and/or high/low water marks for 2352 * the read side only). 2353 */ 2354 2355 bpri = 0; 2356 sop = (struct stroptions *)bp->b_rptr; 2357 mutex_enter(&stp->sd_lock); 2358 if (sop->so_flags & SO_READOPT) { 2359 switch (sop->so_readopt & RMODEMASK) { 2360 case RNORM: 2361 stp->sd_read_opt &= ~(RD_MSGDIS | RD_MSGNODIS); 2362 break; 2363 2364 case RMSGD: 2365 stp->sd_read_opt = 2366 ((stp->sd_read_opt & ~RD_MSGNODIS) | 2367 RD_MSGDIS); 2368 break; 2369 2370 case RMSGN: 2371 stp->sd_read_opt = 2372 ((stp->sd_read_opt & ~RD_MSGDIS) | 2373 RD_MSGNODIS); 2374 break; 2375 } 2376 switch (sop->so_readopt & RPROTMASK) { 2377 case RPROTNORM: 2378 stp->sd_read_opt &= ~(RD_PROTDAT | RD_PROTDIS); 2379 break; 2380 2381 case RPROTDAT: 2382 stp->sd_read_opt = 2383 ((stp->sd_read_opt & ~RD_PROTDIS) | 2384 RD_PROTDAT); 2385 break; 2386 2387 case RPROTDIS: 2388 stp->sd_read_opt = 2389 ((stp->sd_read_opt & ~RD_PROTDAT) | 2390 RD_PROTDIS); 2391 break; 2392 } 2393 switch (sop->so_readopt & RFLUSHMASK) { 2394 case RFLUSHPCPROT: 2395 /* 2396 * This sets the stream head to NOT flush 2397 * M_PCPROTO messages. 2398 */ 2399 stp->sd_read_opt |= RFLUSHPCPROT; 2400 break; 2401 } 2402 } 2403 if (sop->so_flags & SO_ERROPT) { 2404 switch (sop->so_erropt & RERRMASK) { 2405 case RERRNORM: 2406 stp->sd_flag &= ~STRDERRNONPERSIST; 2407 break; 2408 case RERRNONPERSIST: 2409 stp->sd_flag |= STRDERRNONPERSIST; 2410 break; 2411 } 2412 switch (sop->so_erropt & WERRMASK) { 2413 case WERRNORM: 2414 stp->sd_flag &= ~STWRERRNONPERSIST; 2415 break; 2416 case WERRNONPERSIST: 2417 stp->sd_flag |= STWRERRNONPERSIST; 2418 break; 2419 } 2420 } 2421 if (sop->so_flags & SO_COPYOPT) { 2422 if (sop->so_copyopt & ZCVMSAFE) { 2423 stp->sd_copyflag |= STZCVMSAFE; 2424 stp->sd_copyflag &= ~STZCVMUNSAFE; 2425 } else if (sop->so_copyopt & ZCVMUNSAFE) { 2426 stp->sd_copyflag |= STZCVMUNSAFE; 2427 stp->sd_copyflag &= ~STZCVMSAFE; 2428 } 2429 2430 if (sop->so_copyopt & COPYCACHED) { 2431 stp->sd_copyflag |= STRCOPYCACHED; 2432 } 2433 } 2434 if (sop->so_flags & SO_WROFF) 2435 stp->sd_wroff = sop->so_wroff; 2436 if (sop->so_flags & SO_TAIL) 2437 stp->sd_tail = sop->so_tail; 2438 if (sop->so_flags & SO_MINPSZ) 2439 q->q_minpsz = sop->so_minpsz; 2440 if (sop->so_flags & SO_MAXPSZ) 2441 q->q_maxpsz = sop->so_maxpsz; 2442 if (sop->so_flags & SO_MAXBLK) 2443 stp->sd_maxblk = sop->so_maxblk; 2444 if (sop->so_flags & SO_HIWAT) { 2445 if (sop->so_flags & SO_BAND) { 2446 if (strqset(q, QHIWAT, 2447 sop->so_band, sop->so_hiwat)) { 2448 cmn_err(CE_WARN, "strrput: could not " 2449 "allocate qband\n"); 2450 } else { 2451 bpri = sop->so_band; 2452 } 2453 } else { 2454 q->q_hiwat = sop->so_hiwat; 2455 } 2456 } 2457 if (sop->so_flags & SO_LOWAT) { 2458 if (sop->so_flags & SO_BAND) { 2459 if (strqset(q, QLOWAT, 2460 sop->so_band, sop->so_lowat)) { 2461 cmn_err(CE_WARN, "strrput: could not " 2462 "allocate qband\n"); 2463 } else { 2464 bpri = sop->so_band; 2465 } 2466 } else { 2467 q->q_lowat = sop->so_lowat; 2468 } 2469 } 2470 if (sop->so_flags & SO_MREADON) 2471 stp->sd_flag |= SNDMREAD; 2472 if (sop->so_flags & SO_MREADOFF) 2473 stp->sd_flag &= ~SNDMREAD; 2474 if (sop->so_flags & SO_NDELON) 2475 stp->sd_flag |= OLDNDELAY; 2476 if (sop->so_flags & SO_NDELOFF) 2477 stp->sd_flag &= ~OLDNDELAY; 2478 if (sop->so_flags & SO_ISTTY) 2479 stp->sd_flag |= STRISTTY; 2480 if (sop->so_flags & SO_ISNTTY) 2481 stp->sd_flag &= ~STRISTTY; 2482 if (sop->so_flags & SO_TOSTOP) 2483 stp->sd_flag |= STRTOSTOP; 2484 if (sop->so_flags & SO_TONSTOP) 2485 stp->sd_flag &= ~STRTOSTOP; 2486 if (sop->so_flags & SO_DELIM) 2487 stp->sd_flag |= STRDELIM; 2488 if (sop->so_flags & SO_NODELIM) 2489 stp->sd_flag &= ~STRDELIM; 2490 2491 mutex_exit(&stp->sd_lock); 2492 freemsg(bp); 2493 2494 /* Check backenable in case the water marks changed */ 2495 qbackenable(q, bpri); 2496 return (0); 2497 2498 /* 2499 * The following set of cases deal with situations where two stream 2500 * heads are connected to each other (twisted streams). These messages 2501 * have no meaning at the stream head. 2502 */ 2503 case M_BREAK: 2504 case M_CTL: 2505 case M_DELAY: 2506 case M_START: 2507 case M_STOP: 2508 case M_IOCDATA: 2509 case M_STARTI: 2510 case M_STOPI: 2511 freemsg(bp); 2512 return (0); 2513 2514 case M_IOCTL: 2515 /* 2516 * Always NAK this condition 2517 * (makes no sense) 2518 * If there is one or more threads in the read side 2519 * rwnext we have to defer the nacking until that thread 2520 * returns (in strget). 2521 */ 2522 mutex_enter(&stp->sd_lock); 2523 if (stp->sd_struiodnak != 0) { 2524 /* 2525 * Defer NAK to the streamhead. Queue at the end 2526 * the list. 2527 */ 2528 mblk_t *mp = stp->sd_struionak; 2529 2530 while (mp && mp->b_next) 2531 mp = mp->b_next; 2532 if (mp) 2533 mp->b_next = bp; 2534 else 2535 stp->sd_struionak = bp; 2536 bp->b_next = NULL; 2537 mutex_exit(&stp->sd_lock); 2538 return (0); 2539 } 2540 mutex_exit(&stp->sd_lock); 2541 2542 bp->b_datap->db_type = M_IOCNAK; 2543 /* 2544 * Protect against the driver passing up 2545 * messages after it has done a qprocsoff. 2546 */ 2547 if (_OTHERQ(q)->q_next == NULL) 2548 freemsg(bp); 2549 else 2550 qreply(q, bp); 2551 return (0); 2552 2553 default: 2554 #ifdef DEBUG 2555 cmn_err(CE_WARN, 2556 "bad message type %x received at stream head\n", 2557 bp->b_datap->db_type); 2558 #endif 2559 freemsg(bp); 2560 return (0); 2561 } 2562 2563 /* NOTREACHED */ 2564 } 2565 2566 /* 2567 * Check if the stream pointed to by `stp' can be written to, and return an 2568 * error code if not. If `eiohup' is set, then return EIO if STRHUP is set. 2569 * If `sigpipeok' is set and the SW_SIGPIPE option is enabled on the stream, 2570 * then always return EPIPE and send a SIGPIPE to the invoking thread. 2571 */ 2572 static int 2573 strwriteable(struct stdata *stp, boolean_t eiohup, boolean_t sigpipeok) 2574 { 2575 int error; 2576 2577 ASSERT(MUTEX_HELD(&stp->sd_lock)); 2578 2579 /* 2580 * For modem support, POSIX states that on writes, EIO should 2581 * be returned if the stream has been hung up. 2582 */ 2583 if (eiohup && (stp->sd_flag & (STPLEX|STRHUP)) == STRHUP) 2584 error = EIO; 2585 else 2586 error = strgeterr(stp, STRHUP|STPLEX|STWRERR, 0); 2587 2588 if (error != 0) { 2589 if (!(stp->sd_flag & STPLEX) && 2590 (stp->sd_wput_opt & SW_SIGPIPE) && sigpipeok) { 2591 tsignal(curthread, SIGPIPE); 2592 error = EPIPE; 2593 } 2594 } 2595 2596 return (error); 2597 } 2598 2599 /* 2600 * Copyin and send data down a stream. 2601 * The caller will allocate and copyin any control part that precedes the 2602 * message and pass than in as mctl. 2603 * 2604 * Caller should *not* hold sd_lock. 2605 * When EWOULDBLOCK is returned the caller has to redo the canputnext 2606 * under sd_lock in order to avoid missing a backenabling wakeup. 2607 * 2608 * Use iosize = -1 to not send any M_DATA. iosize = 0 sends zero-length M_DATA. 2609 * 2610 * Set MSG_IGNFLOW in flags to ignore flow control for hipri messages. 2611 * For sync streams we can only ignore flow control by reverting to using 2612 * putnext. 2613 * 2614 * If sd_maxblk is less than *iosize this routine might return without 2615 * transferring all of *iosize. In all cases, on return *iosize will contain 2616 * the amount of data that was transferred. 2617 */ 2618 static int 2619 strput(struct stdata *stp, mblk_t *mctl, struct uio *uiop, ssize_t *iosize, 2620 int b_flag, int pri, int flags) 2621 { 2622 struiod_t uiod; 2623 mblk_t *mp; 2624 queue_t *wqp = stp->sd_wrq; 2625 int error = 0; 2626 ssize_t count = *iosize; 2627 cred_t *cr; 2628 2629 ASSERT(MUTEX_NOT_HELD(&stp->sd_lock)); 2630 2631 if (uiop != NULL && count >= 0) 2632 flags |= stp->sd_struiowrq ? STRUIO_POSTPONE : 0; 2633 2634 if (!(flags & STRUIO_POSTPONE)) { 2635 /* 2636 * Use regular canputnext, strmakedata, putnext sequence. 2637 */ 2638 if (pri == 0) { 2639 if (!canputnext(wqp) && !(flags & MSG_IGNFLOW)) { 2640 freemsg(mctl); 2641 return (EWOULDBLOCK); 2642 } 2643 } else { 2644 if (!(flags & MSG_IGNFLOW) && !bcanputnext(wqp, pri)) { 2645 freemsg(mctl); 2646 return (EWOULDBLOCK); 2647 } 2648 } 2649 2650 if ((error = strmakedata(iosize, uiop, stp, flags, 2651 &mp)) != 0) { 2652 freemsg(mctl); 2653 /* 2654 * need to change return code to ENOMEM 2655 * so that this is not confused with 2656 * flow control, EAGAIN. 2657 */ 2658 2659 if (error == EAGAIN) 2660 return (ENOMEM); 2661 else 2662 return (error); 2663 } 2664 if (mctl != NULL) { 2665 if (mctl->b_cont == NULL) 2666 mctl->b_cont = mp; 2667 else if (mp != NULL) 2668 linkb(mctl, mp); 2669 mp = mctl; 2670 /* 2671 * Note that for interrupt thread, the CRED() is 2672 * NULL. Don't bother with the pid either. 2673 */ 2674 if ((cr = CRED()) != NULL) { 2675 mblk_setcred(mp, cr); 2676 DB_CPID(mp) = curproc->p_pid; 2677 } 2678 } else if (mp == NULL) 2679 return (0); 2680 2681 mp->b_flag |= b_flag; 2682 mp->b_band = (uchar_t)pri; 2683 2684 if (flags & MSG_IGNFLOW) { 2685 /* 2686 * XXX Hack: Don't get stuck running service 2687 * procedures. This is needed for sockfs when 2688 * sending the unbind message out of the rput 2689 * procedure - we don't want a put procedure 2690 * to run service procedures. 2691 */ 2692 putnext(wqp, mp); 2693 } else { 2694 stream_willservice(stp); 2695 putnext(wqp, mp); 2696 stream_runservice(stp); 2697 } 2698 return (0); 2699 } 2700 /* 2701 * Stream supports rwnext() for the write side. 2702 */ 2703 if ((error = strmakedata(iosize, uiop, stp, flags, &mp)) != 0) { 2704 freemsg(mctl); 2705 /* 2706 * map EAGAIN to ENOMEM since EAGAIN means "flow controlled". 2707 */ 2708 return (error == EAGAIN ? ENOMEM : error); 2709 } 2710 if (mctl != NULL) { 2711 if (mctl->b_cont == NULL) 2712 mctl->b_cont = mp; 2713 else if (mp != NULL) 2714 linkb(mctl, mp); 2715 mp = mctl; 2716 /* 2717 * Note that for interrupt thread, the CRED() is 2718 * NULL. Don't bother with the pid either. 2719 */ 2720 if ((cr = CRED()) != NULL) { 2721 mblk_setcred(mp, cr); 2722 DB_CPID(mp) = curproc->p_pid; 2723 } 2724 } else if (mp == NULL) { 2725 return (0); 2726 } 2727 2728 mp->b_flag |= b_flag; 2729 mp->b_band = (uchar_t)pri; 2730 2731 (void) uiodup(uiop, &uiod.d_uio, uiod.d_iov, 2732 sizeof (uiod.d_iov) / sizeof (*uiod.d_iov)); 2733 uiod.d_uio.uio_offset = 0; 2734 uiod.d_mp = mp; 2735 error = rwnext(wqp, &uiod); 2736 if (! uiod.d_mp) { 2737 uioskip(uiop, *iosize); 2738 return (error); 2739 } 2740 ASSERT(mp == uiod.d_mp); 2741 if (error == EINVAL) { 2742 /* 2743 * The stream plumbing must have changed while 2744 * we were away, so just turn off rwnext()s. 2745 */ 2746 error = 0; 2747 } else if (error == EBUSY || error == EWOULDBLOCK) { 2748 /* 2749 * Couldn't enter a perimeter or took a page fault, 2750 * so fall-back to putnext(). 2751 */ 2752 error = 0; 2753 } else { 2754 freemsg(mp); 2755 return (error); 2756 } 2757 /* Have to check canput before consuming data from the uio */ 2758 if (pri == 0) { 2759 if (!canputnext(wqp) && !(flags & MSG_IGNFLOW)) { 2760 freemsg(mp); 2761 return (EWOULDBLOCK); 2762 } 2763 } else { 2764 if (!bcanputnext(wqp, pri) && !(flags & MSG_IGNFLOW)) { 2765 freemsg(mp); 2766 return (EWOULDBLOCK); 2767 } 2768 } 2769 ASSERT(mp == uiod.d_mp); 2770 /* Copyin data from the uio */ 2771 if ((error = struioget(wqp, mp, &uiod, 0)) != 0) { 2772 freemsg(mp); 2773 return (error); 2774 } 2775 uioskip(uiop, *iosize); 2776 if (flags & MSG_IGNFLOW) { 2777 /* 2778 * XXX Hack: Don't get stuck running service procedures. 2779 * This is needed for sockfs when sending the unbind message 2780 * out of the rput procedure - we don't want a put procedure 2781 * to run service procedures. 2782 */ 2783 putnext(wqp, mp); 2784 } else { 2785 stream_willservice(stp); 2786 putnext(wqp, mp); 2787 stream_runservice(stp); 2788 } 2789 return (0); 2790 } 2791 2792 /* 2793 * Write attempts to break the write request into messages conforming 2794 * with the minimum and maximum packet sizes set downstream. 2795 * 2796 * Write will not block if downstream queue is full and 2797 * O_NDELAY is set, otherwise it will block waiting for the queue to get room. 2798 * 2799 * A write of zero bytes gets packaged into a zero length message and sent 2800 * downstream like any other message. 2801 * 2802 * If buffers of the requested sizes are not available, the write will 2803 * sleep until the buffers become available. 2804 * 2805 * Write (if specified) will supply a write offset in a message if it 2806 * makes sense. This can be specified by downstream modules as part of 2807 * a M_SETOPTS message. Write will not supply the write offset if it 2808 * cannot supply any data in a buffer. In other words, write will never 2809 * send down an empty packet due to a write offset. 2810 */ 2811 /* ARGSUSED2 */ 2812 int 2813 strwrite(struct vnode *vp, struct uio *uiop, cred_t *crp) 2814 { 2815 return (strwrite_common(vp, uiop, crp, 0)); 2816 } 2817 2818 /* ARGSUSED2 */ 2819 int 2820 strwrite_common(struct vnode *vp, struct uio *uiop, cred_t *crp, int wflag) 2821 { 2822 struct stdata *stp; 2823 struct queue *wqp; 2824 ssize_t rmin, rmax; 2825 ssize_t iosize; 2826 int waitflag; 2827 int tempmode; 2828 int error = 0; 2829 int b_flag; 2830 2831 ASSERT(vp->v_stream); 2832 stp = vp->v_stream; 2833 2834 mutex_enter(&stp->sd_lock); 2835 2836 if ((error = i_straccess(stp, JCWRITE)) != 0) { 2837 mutex_exit(&stp->sd_lock); 2838 return (error); 2839 } 2840 2841 if (stp->sd_flag & (STWRERR|STRHUP|STPLEX)) { 2842 error = strwriteable(stp, B_TRUE, B_TRUE); 2843 if (error != 0) { 2844 mutex_exit(&stp->sd_lock); 2845 return (error); 2846 } 2847 } 2848 2849 mutex_exit(&stp->sd_lock); 2850 2851 wqp = stp->sd_wrq; 2852 2853 /* get these values from them cached in the stream head */ 2854 rmin = stp->sd_qn_minpsz; 2855 rmax = stp->sd_qn_maxpsz; 2856 2857 /* 2858 * Check the min/max packet size constraints. If min packet size 2859 * is non-zero, the write cannot be split into multiple messages 2860 * and still guarantee the size constraints. 2861 */ 2862 TRACE_1(TR_FAC_STREAMS_FR, TR_STRWRITE_IN, "strwrite in:q %p", wqp); 2863 2864 ASSERT((rmax >= 0) || (rmax == INFPSZ)); 2865 if (rmax == 0) { 2866 return (0); 2867 } 2868 if (rmin > 0) { 2869 if (uiop->uio_resid < rmin) { 2870 TRACE_3(TR_FAC_STREAMS_FR, TR_STRWRITE_OUT, 2871 "strwrite out:q %p out %d error %d", 2872 wqp, 0, ERANGE); 2873 return (ERANGE); 2874 } 2875 if ((rmax != INFPSZ) && (uiop->uio_resid > rmax)) { 2876 TRACE_3(TR_FAC_STREAMS_FR, TR_STRWRITE_OUT, 2877 "strwrite out:q %p out %d error %d", 2878 wqp, 1, ERANGE); 2879 return (ERANGE); 2880 } 2881 } 2882 2883 /* 2884 * Do until count satisfied or error. 2885 */ 2886 waitflag = WRITEWAIT | wflag; 2887 if (stp->sd_flag & OLDNDELAY) 2888 tempmode = uiop->uio_fmode & ~FNDELAY; 2889 else 2890 tempmode = uiop->uio_fmode; 2891 2892 if (rmax == INFPSZ) 2893 rmax = uiop->uio_resid; 2894 2895 /* 2896 * Note that tempmode does not get used in strput/strmakedata 2897 * but only in strwaitq. The other routines use uio_fmode 2898 * unmodified. 2899 */ 2900 2901 /* LINTED: constant in conditional context */ 2902 while (1) { /* breaks when uio_resid reaches zero */ 2903 /* 2904 * Determine the size of the next message to be 2905 * packaged. May have to break write into several 2906 * messages based on max packet size. 2907 */ 2908 iosize = MIN(uiop->uio_resid, rmax); 2909 2910 /* 2911 * Put block downstream when flow control allows it. 2912 */ 2913 if ((stp->sd_flag & STRDELIM) && (uiop->uio_resid == iosize)) 2914 b_flag = MSGDELIM; 2915 else 2916 b_flag = 0; 2917 2918 for (;;) { 2919 int done = 0; 2920 2921 error = strput(stp, NULL, uiop, &iosize, b_flag, 0, 0); 2922 if (error == 0) 2923 break; 2924 if (error != EWOULDBLOCK) 2925 goto out; 2926 2927 mutex_enter(&stp->sd_lock); 2928 /* 2929 * Check for a missed wakeup. 2930 * Needed since strput did not hold sd_lock across 2931 * the canputnext. 2932 */ 2933 if (canputnext(wqp)) { 2934 /* Try again */ 2935 mutex_exit(&stp->sd_lock); 2936 continue; 2937 } 2938 TRACE_1(TR_FAC_STREAMS_FR, TR_STRWRITE_WAIT, 2939 "strwrite wait:q %p wait", wqp); 2940 if ((error = strwaitq(stp, waitflag, (ssize_t)0, 2941 tempmode, -1, &done)) != 0 || done) { 2942 mutex_exit(&stp->sd_lock); 2943 if ((vp->v_type == VFIFO) && 2944 (uiop->uio_fmode & FNDELAY) && 2945 (error == EAGAIN)) 2946 error = 0; 2947 goto out; 2948 } 2949 TRACE_1(TR_FAC_STREAMS_FR, TR_STRWRITE_WAKE, 2950 "strwrite wake:q %p awakes", wqp); 2951 if ((error = i_straccess(stp, JCWRITE)) != 0) { 2952 mutex_exit(&stp->sd_lock); 2953 goto out; 2954 } 2955 mutex_exit(&stp->sd_lock); 2956 } 2957 waitflag |= NOINTR; 2958 TRACE_2(TR_FAC_STREAMS_FR, TR_STRWRITE_RESID, 2959 "strwrite resid:q %p uiop %p", wqp, uiop); 2960 if (uiop->uio_resid) { 2961 /* Recheck for errors - needed for sockets */ 2962 if ((stp->sd_wput_opt & SW_RECHECK_ERR) && 2963 (stp->sd_flag & (STWRERR|STRHUP|STPLEX))) { 2964 mutex_enter(&stp->sd_lock); 2965 error = strwriteable(stp, B_FALSE, B_TRUE); 2966 mutex_exit(&stp->sd_lock); 2967 if (error != 0) 2968 return (error); 2969 } 2970 continue; 2971 } 2972 break; 2973 } 2974 out: 2975 /* 2976 * For historical reasons, applications expect EAGAIN when a data 2977 * mblk_t cannot be allocated, so change ENOMEM back to EAGAIN. 2978 */ 2979 if (error == ENOMEM) 2980 error = EAGAIN; 2981 TRACE_3(TR_FAC_STREAMS_FR, TR_STRWRITE_OUT, 2982 "strwrite out:q %p out %d error %d", wqp, 2, error); 2983 return (error); 2984 } 2985 2986 /* 2987 * Stream head write service routine. 2988 * Its job is to wake up any sleeping writers when a queue 2989 * downstream needs data (part of the flow control in putq and getq). 2990 * It also must wake anyone sleeping on a poll(). 2991 * For stream head right below mux module, it must also invoke put procedure 2992 * of next downstream module. 2993 */ 2994 int 2995 strwsrv(queue_t *q) 2996 { 2997 struct stdata *stp; 2998 queue_t *tq; 2999 qband_t *qbp; 3000 int i; 3001 qband_t *myqbp; 3002 int isevent; 3003 unsigned char qbf[NBAND]; /* band flushing backenable flags */ 3004 3005 TRACE_1(TR_FAC_STREAMS_FR, 3006 TR_STRWSRV, "strwsrv:q %p", q); 3007 stp = (struct stdata *)q->q_ptr; 3008 ASSERT(qclaimed(q)); 3009 mutex_enter(&stp->sd_lock); 3010 ASSERT(!(stp->sd_flag & STPLEX)); 3011 3012 if (stp->sd_flag & WSLEEP) { 3013 stp->sd_flag &= ~WSLEEP; 3014 cv_broadcast(&q->q_wait); 3015 } 3016 mutex_exit(&stp->sd_lock); 3017 3018 /* The other end of a stream pipe went away. */ 3019 if ((tq = q->q_next) == NULL) { 3020 return (0); 3021 } 3022 3023 /* Find the next module forward that has a service procedure */ 3024 claimstr(q); 3025 tq = q->q_nfsrv; 3026 ASSERT(tq != NULL); 3027 3028 if ((q->q_flag & QBACK)) { 3029 if ((tq->q_flag & QFULL)) { 3030 mutex_enter(QLOCK(tq)); 3031 if (!(tq->q_flag & QFULL)) { 3032 mutex_exit(QLOCK(tq)); 3033 goto wakeup; 3034 } 3035 /* 3036 * The queue must have become full again. Set QWANTW 3037 * again so strwsrv will be back enabled when 3038 * the queue becomes non-full next time. 3039 */ 3040 tq->q_flag |= QWANTW; 3041 mutex_exit(QLOCK(tq)); 3042 } else { 3043 wakeup: 3044 pollwakeup(&stp->sd_pollist, POLLWRNORM); 3045 mutex_enter(&stp->sd_lock); 3046 if (stp->sd_sigflags & S_WRNORM) 3047 strsendsig(stp->sd_siglist, S_WRNORM, 0, 0); 3048 mutex_exit(&stp->sd_lock); 3049 } 3050 } 3051 3052 isevent = 0; 3053 i = 1; 3054 bzero((caddr_t)qbf, NBAND); 3055 mutex_enter(QLOCK(tq)); 3056 if ((myqbp = q->q_bandp) != NULL) 3057 for (qbp = tq->q_bandp; qbp && myqbp; qbp = qbp->qb_next) { 3058 ASSERT(myqbp); 3059 if ((myqbp->qb_flag & QB_BACK)) { 3060 if (qbp->qb_flag & QB_FULL) { 3061 /* 3062 * The band must have become full again. 3063 * Set QB_WANTW again so strwsrv will 3064 * be back enabled when the band becomes 3065 * non-full next time. 3066 */ 3067 qbp->qb_flag |= QB_WANTW; 3068 } else { 3069 isevent = 1; 3070 qbf[i] = 1; 3071 } 3072 } 3073 myqbp = myqbp->qb_next; 3074 i++; 3075 } 3076 mutex_exit(QLOCK(tq)); 3077 3078 if (isevent) { 3079 for (i = tq->q_nband; i; i--) { 3080 if (qbf[i]) { 3081 pollwakeup(&stp->sd_pollist, POLLWRBAND); 3082 mutex_enter(&stp->sd_lock); 3083 if (stp->sd_sigflags & S_WRBAND) 3084 strsendsig(stp->sd_siglist, S_WRBAND, 3085 (uchar_t)i, 0); 3086 mutex_exit(&stp->sd_lock); 3087 } 3088 } 3089 } 3090 3091 releasestr(q); 3092 return (0); 3093 } 3094 3095 /* 3096 * Special case of strcopyin/strcopyout for copying 3097 * struct strioctl that can deal with both data 3098 * models. 3099 */ 3100 3101 #ifdef _LP64 3102 3103 static int 3104 strcopyin_strioctl(void *from, void *to, int flag, int copyflag) 3105 { 3106 struct strioctl32 strioc32; 3107 struct strioctl *striocp; 3108 3109 if (copyflag & U_TO_K) { 3110 ASSERT((copyflag & K_TO_K) == 0); 3111 3112 if ((flag & FMODELS) == DATAMODEL_ILP32) { 3113 if (copyin(from, &strioc32, sizeof (strioc32))) 3114 return (EFAULT); 3115 3116 striocp = (struct strioctl *)to; 3117 striocp->ic_cmd = strioc32.ic_cmd; 3118 striocp->ic_timout = strioc32.ic_timout; 3119 striocp->ic_len = strioc32.ic_len; 3120 striocp->ic_dp = (char *)(uintptr_t)strioc32.ic_dp; 3121 3122 } else { /* NATIVE data model */ 3123 if (copyin(from, to, sizeof (struct strioctl))) { 3124 return (EFAULT); 3125 } else { 3126 return (0); 3127 } 3128 } 3129 } else { 3130 ASSERT(copyflag & K_TO_K); 3131 bcopy(from, to, sizeof (struct strioctl)); 3132 } 3133 return (0); 3134 } 3135 3136 static int 3137 strcopyout_strioctl(void *from, void *to, int flag, int copyflag) 3138 { 3139 struct strioctl32 strioc32; 3140 struct strioctl *striocp; 3141 3142 if (copyflag & U_TO_K) { 3143 ASSERT((copyflag & K_TO_K) == 0); 3144 3145 if ((flag & FMODELS) == DATAMODEL_ILP32) { 3146 striocp = (struct strioctl *)from; 3147 strioc32.ic_cmd = striocp->ic_cmd; 3148 strioc32.ic_timout = striocp->ic_timout; 3149 strioc32.ic_len = striocp->ic_len; 3150 strioc32.ic_dp = (caddr32_t)(uintptr_t)striocp->ic_dp; 3151 ASSERT((char *)(uintptr_t)strioc32.ic_dp == 3152 striocp->ic_dp); 3153 3154 if (copyout(&strioc32, to, sizeof (strioc32))) 3155 return (EFAULT); 3156 3157 } else { /* NATIVE data model */ 3158 if (copyout(from, to, sizeof (struct strioctl))) { 3159 return (EFAULT); 3160 } else { 3161 return (0); 3162 } 3163 } 3164 } else { 3165 ASSERT(copyflag & K_TO_K); 3166 bcopy(from, to, sizeof (struct strioctl)); 3167 } 3168 return (0); 3169 } 3170 3171 #else /* ! _LP64 */ 3172 3173 /* ARGSUSED2 */ 3174 static int 3175 strcopyin_strioctl(void *from, void *to, int flag, int copyflag) 3176 { 3177 return (strcopyin(from, to, sizeof (struct strioctl), copyflag)); 3178 } 3179 3180 /* ARGSUSED2 */ 3181 static int 3182 strcopyout_strioctl(void *from, void *to, int flag, int copyflag) 3183 { 3184 return (strcopyout(from, to, sizeof (struct strioctl), copyflag)); 3185 } 3186 3187 #endif /* _LP64 */ 3188 3189 /* 3190 * Determine type of job control semantics expected by user. The 3191 * possibilities are: 3192 * JCREAD - Behaves like read() on fd; send SIGTTIN 3193 * JCWRITE - Behaves like write() on fd; send SIGTTOU if TOSTOP set 3194 * JCSETP - Sets a value in the stream; send SIGTTOU, ignore TOSTOP 3195 * JCGETP - Gets a value in the stream; no signals. 3196 * See straccess in strsubr.c for usage of these values. 3197 * 3198 * This routine also returns -1 for I_STR as a special case; the 3199 * caller must call again with the real ioctl number for 3200 * classification. 3201 */ 3202 static int 3203 job_control_type(int cmd) 3204 { 3205 switch (cmd) { 3206 case I_STR: 3207 return (-1); 3208 3209 case I_RECVFD: 3210 case I_E_RECVFD: 3211 return (JCREAD); 3212 3213 case I_FDINSERT: 3214 case I_SENDFD: 3215 return (JCWRITE); 3216 3217 case TCSETA: 3218 case TCSETAW: 3219 case TCSETAF: 3220 case TCSBRK: 3221 case TCXONC: 3222 case TCFLSH: 3223 case TCDSET: /* Obsolete */ 3224 case TIOCSWINSZ: 3225 case TCSETS: 3226 case TCSETSW: 3227 case TCSETSF: 3228 case TIOCSETD: 3229 case TIOCHPCL: 3230 case TIOCSETP: 3231 case TIOCSETN: 3232 case TIOCEXCL: 3233 case TIOCNXCL: 3234 case TIOCFLUSH: 3235 case TIOCSETC: 3236 case TIOCLBIS: 3237 case TIOCLBIC: 3238 case TIOCLSET: 3239 case TIOCSBRK: 3240 case TIOCCBRK: 3241 case TIOCSDTR: 3242 case TIOCCDTR: 3243 case TIOCSLTC: 3244 case TIOCSTOP: 3245 case TIOCSTART: 3246 case TIOCSTI: 3247 case TIOCSPGRP: 3248 case TIOCMSET: 3249 case TIOCMBIS: 3250 case TIOCMBIC: 3251 case TIOCREMOTE: 3252 case TIOCSIGNAL: 3253 case LDSETT: 3254 case LDSMAP: /* Obsolete */ 3255 case DIOCSETP: 3256 case I_FLUSH: 3257 case I_SRDOPT: 3258 case I_SETSIG: 3259 case I_SWROPT: 3260 case I_FLUSHBAND: 3261 case I_SETCLTIME: 3262 case I_SERROPT: 3263 case I_ESETSIG: 3264 case FIONBIO: 3265 case FIOASYNC: 3266 case FIOSETOWN: 3267 case JBOOT: /* Obsolete */ 3268 case JTERM: /* Obsolete */ 3269 case JTIMOM: /* Obsolete */ 3270 case JZOMBOOT: /* Obsolete */ 3271 case JAGENT: /* Obsolete */ 3272 case JTRUN: /* Obsolete */ 3273 case JXTPROTO: /* Obsolete */ 3274 case TIOCSETLD: 3275 return (JCSETP); 3276 } 3277 3278 return (JCGETP); 3279 } 3280 3281 /* 3282 * ioctl for streams 3283 */ 3284 int 3285 strioctl(struct vnode *vp, int cmd, intptr_t arg, int flag, int copyflag, 3286 cred_t *crp, int *rvalp) 3287 { 3288 struct stdata *stp; 3289 struct strcmd *scp; 3290 struct strioctl strioc; 3291 struct uio uio; 3292 struct iovec iov; 3293 int access; 3294 mblk_t *mp; 3295 int error = 0; 3296 int done = 0; 3297 ssize_t rmin, rmax; 3298 queue_t *wrq; 3299 queue_t *rdq; 3300 boolean_t kioctl = B_FALSE; 3301 3302 if (flag & FKIOCTL) { 3303 copyflag = K_TO_K; 3304 kioctl = B_TRUE; 3305 } 3306 ASSERT(vp->v_stream); 3307 ASSERT(copyflag == U_TO_K || copyflag == K_TO_K); 3308 stp = vp->v_stream; 3309 3310 TRACE_3(TR_FAC_STREAMS_FR, TR_IOCTL_ENTER, 3311 "strioctl:stp %p cmd %X arg %lX", stp, cmd, arg); 3312 3313 if (audit_active) 3314 audit_strioctl(vp, cmd, arg, flag, copyflag, crp, rvalp); 3315 3316 /* 3317 * If the copy is kernel to kernel, make sure that the FNATIVE 3318 * flag is set. After this it would be a serious error to have 3319 * no model flag. 3320 */ 3321 if (copyflag == K_TO_K) 3322 flag = (flag & ~FMODELS) | FNATIVE; 3323 3324 ASSERT((flag & FMODELS) != 0); 3325 3326 wrq = stp->sd_wrq; 3327 rdq = _RD(wrq); 3328 3329 access = job_control_type(cmd); 3330 3331 /* We should never see these here, should be handled by iwscn */ 3332 if (cmd == SRIOCSREDIR || cmd == SRIOCISREDIR) 3333 return (EINVAL); 3334 3335 mutex_enter(&stp->sd_lock); 3336 if ((access != -1) && ((error = i_straccess(stp, access)) != 0)) { 3337 mutex_exit(&stp->sd_lock); 3338 return (error); 3339 } 3340 mutex_exit(&stp->sd_lock); 3341 3342 /* 3343 * Check for sgttyb-related ioctls first, and complain as 3344 * necessary. 3345 */ 3346 switch (cmd) { 3347 case TIOCGETP: 3348 case TIOCSETP: 3349 case TIOCSETN: 3350 if (sgttyb_handling >= 2 && !sgttyb_complaint) { 3351 sgttyb_complaint = B_TRUE; 3352 cmn_err(CE_NOTE, 3353 "application used obsolete TIOC[GS]ET"); 3354 } 3355 if (sgttyb_handling >= 3) { 3356 tsignal(curthread, SIGSYS); 3357 return (EIO); 3358 } 3359 break; 3360 } 3361 3362 mutex_enter(&stp->sd_lock); 3363 3364 switch (cmd) { 3365 case I_RECVFD: 3366 case I_E_RECVFD: 3367 case I_PEEK: 3368 case I_NREAD: 3369 case FIONREAD: 3370 case FIORDCHK: 3371 case I_ATMARK: 3372 case FIONBIO: 3373 case FIOASYNC: 3374 if (stp->sd_flag & (STRDERR|STPLEX)) { 3375 error = strgeterr(stp, STRDERR|STPLEX, 0); 3376 if (error != 0) { 3377 mutex_exit(&stp->sd_lock); 3378 return (error); 3379 } 3380 } 3381 break; 3382 3383 default: 3384 if (stp->sd_flag & (STRDERR|STWRERR|STPLEX)) { 3385 error = strgeterr(stp, STRDERR|STWRERR|STPLEX, 0); 3386 if (error != 0) { 3387 mutex_exit(&stp->sd_lock); 3388 return (error); 3389 } 3390 } 3391 } 3392 3393 mutex_exit(&stp->sd_lock); 3394 3395 switch (cmd) { 3396 default: 3397 /* 3398 * The stream head has hardcoded knowledge of a 3399 * miscellaneous collection of terminal-, keyboard- and 3400 * mouse-related ioctls, enumerated below. This hardcoded 3401 * knowledge allows the stream head to automatically 3402 * convert transparent ioctl requests made by userland 3403 * programs into I_STR ioctls which many old STREAMS 3404 * modules and drivers require. 3405 * 3406 * No new ioctls should ever be added to this list. 3407 * Instead, the STREAMS module or driver should be written 3408 * to either handle transparent ioctls or require any 3409 * userland programs to use I_STR ioctls (by returning 3410 * EINVAL to any transparent ioctl requests). 3411 * 3412 * More importantly, removing ioctls from this list should 3413 * be done with the utmost care, since our STREAMS modules 3414 * and drivers *count* on the stream head performing this 3415 * conversion, and thus may panic while processing 3416 * transparent ioctl request for one of these ioctls (keep 3417 * in mind that third party modules and drivers may have 3418 * similar problems). 3419 */ 3420 if (((cmd & IOCTYPE) == LDIOC) || 3421 ((cmd & IOCTYPE) == tIOC) || 3422 ((cmd & IOCTYPE) == TIOC) || 3423 ((cmd & IOCTYPE) == KIOC) || 3424 ((cmd & IOCTYPE) == MSIOC) || 3425 ((cmd & IOCTYPE) == VUIOC)) { 3426 /* 3427 * The ioctl is a tty ioctl - set up strioc buffer 3428 * and call strdoioctl() to do the work. 3429 */ 3430 if (stp->sd_flag & STRHUP) 3431 return (ENXIO); 3432 strioc.ic_cmd = cmd; 3433 strioc.ic_timout = INFTIM; 3434 3435 switch (cmd) { 3436 3437 case TCXONC: 3438 case TCSBRK: 3439 case TCFLSH: 3440 case TCDSET: 3441 { 3442 int native_arg = (int)arg; 3443 strioc.ic_len = sizeof (int); 3444 strioc.ic_dp = (char *)&native_arg; 3445 return (strdoioctl(stp, &strioc, flag, 3446 K_TO_K, crp, rvalp)); 3447 } 3448 3449 case TCSETA: 3450 case TCSETAW: 3451 case TCSETAF: 3452 strioc.ic_len = sizeof (struct termio); 3453 strioc.ic_dp = (char *)arg; 3454 return (strdoioctl(stp, &strioc, flag, 3455 copyflag, crp, rvalp)); 3456 3457 case TCSETS: 3458 case TCSETSW: 3459 case TCSETSF: 3460 strioc.ic_len = sizeof (struct termios); 3461 strioc.ic_dp = (char *)arg; 3462 return (strdoioctl(stp, &strioc, flag, 3463 copyflag, crp, rvalp)); 3464 3465 case LDSETT: 3466 strioc.ic_len = sizeof (struct termcb); 3467 strioc.ic_dp = (char *)arg; 3468 return (strdoioctl(stp, &strioc, flag, 3469 copyflag, crp, rvalp)); 3470 3471 case TIOCSETP: 3472 strioc.ic_len = sizeof (struct sgttyb); 3473 strioc.ic_dp = (char *)arg; 3474 return (strdoioctl(stp, &strioc, flag, 3475 copyflag, crp, rvalp)); 3476 3477 case TIOCSTI: 3478 if ((flag & FREAD) == 0 && 3479 secpolicy_sti(crp) != 0) { 3480 return (EPERM); 3481 } 3482 mutex_enter(&stp->sd_lock); 3483 mutex_enter(&curproc->p_splock); 3484 if (stp->sd_sidp != curproc->p_sessp->s_sidp && 3485 secpolicy_sti(crp) != 0) { 3486 mutex_exit(&curproc->p_splock); 3487 mutex_exit(&stp->sd_lock); 3488 return (EACCES); 3489 } 3490 mutex_exit(&curproc->p_splock); 3491 mutex_exit(&stp->sd_lock); 3492 3493 strioc.ic_len = sizeof (char); 3494 strioc.ic_dp = (char *)arg; 3495 return (strdoioctl(stp, &strioc, flag, 3496 copyflag, crp, rvalp)); 3497 3498 case TIOCSWINSZ: 3499 strioc.ic_len = sizeof (struct winsize); 3500 strioc.ic_dp = (char *)arg; 3501 return (strdoioctl(stp, &strioc, flag, 3502 copyflag, crp, rvalp)); 3503 3504 case TIOCSSIZE: 3505 strioc.ic_len = sizeof (struct ttysize); 3506 strioc.ic_dp = (char *)arg; 3507 return (strdoioctl(stp, &strioc, flag, 3508 copyflag, crp, rvalp)); 3509 3510 case TIOCSSOFTCAR: 3511 case KIOCTRANS: 3512 case KIOCTRANSABLE: 3513 case KIOCCMD: 3514 case KIOCSDIRECT: 3515 case KIOCSCOMPAT: 3516 case KIOCSKABORTEN: 3517 case KIOCSRPTDELAY: 3518 case KIOCSRPTRATE: 3519 case VUIDSFORMAT: 3520 case TIOCSPPS: 3521 strioc.ic_len = sizeof (int); 3522 strioc.ic_dp = (char *)arg; 3523 return (strdoioctl(stp, &strioc, flag, 3524 copyflag, crp, rvalp)); 3525 3526 case KIOCSETKEY: 3527 case KIOCGETKEY: 3528 strioc.ic_len = sizeof (struct kiockey); 3529 strioc.ic_dp = (char *)arg; 3530 return (strdoioctl(stp, &strioc, flag, 3531 copyflag, crp, rvalp)); 3532 3533 case KIOCSKEY: 3534 case KIOCGKEY: 3535 strioc.ic_len = sizeof (struct kiockeymap); 3536 strioc.ic_dp = (char *)arg; 3537 return (strdoioctl(stp, &strioc, flag, 3538 copyflag, crp, rvalp)); 3539 3540 case KIOCSLED: 3541 /* arg is a pointer to char */ 3542 strioc.ic_len = sizeof (char); 3543 strioc.ic_dp = (char *)arg; 3544 return (strdoioctl(stp, &strioc, flag, 3545 copyflag, crp, rvalp)); 3546 3547 case MSIOSETPARMS: 3548 strioc.ic_len = sizeof (Ms_parms); 3549 strioc.ic_dp = (char *)arg; 3550 return (strdoioctl(stp, &strioc, flag, 3551 copyflag, crp, rvalp)); 3552 3553 case VUIDSADDR: 3554 case VUIDGADDR: 3555 strioc.ic_len = sizeof (struct vuid_addr_probe); 3556 strioc.ic_dp = (char *)arg; 3557 return (strdoioctl(stp, &strioc, flag, 3558 copyflag, crp, rvalp)); 3559 3560 /* 3561 * These M_IOCTL's don't require any data to be sent 3562 * downstream, and the driver will allocate and link 3563 * on its own mblk_t upon M_IOCACK -- thus we set 3564 * ic_len to zero and set ic_dp to arg so we know 3565 * where to copyout to later. 3566 */ 3567 case TIOCGSOFTCAR: 3568 case TIOCGWINSZ: 3569 case TIOCGSIZE: 3570 case KIOCGTRANS: 3571 case KIOCGTRANSABLE: 3572 case KIOCTYPE: 3573 case KIOCGDIRECT: 3574 case KIOCGCOMPAT: 3575 case KIOCLAYOUT: 3576 case KIOCGLED: 3577 case MSIOGETPARMS: 3578 case MSIOBUTTONS: 3579 case VUIDGFORMAT: 3580 case TIOCGPPS: 3581 case TIOCGPPSEV: 3582 case TCGETA: 3583 case TCGETS: 3584 case LDGETT: 3585 case TIOCGETP: 3586 case KIOCGRPTDELAY: 3587 case KIOCGRPTRATE: 3588 strioc.ic_len = 0; 3589 strioc.ic_dp = (char *)arg; 3590 return (strdoioctl(stp, &strioc, flag, 3591 copyflag, crp, rvalp)); 3592 } 3593 } 3594 3595 /* 3596 * Unknown cmd - send it down as a transparent ioctl. 3597 */ 3598 strioc.ic_cmd = cmd; 3599 strioc.ic_timout = INFTIM; 3600 strioc.ic_len = TRANSPARENT; 3601 strioc.ic_dp = (char *)&arg; 3602 3603 return (strdoioctl(stp, &strioc, flag, copyflag, crp, rvalp)); 3604 3605 case I_STR: 3606 /* 3607 * Stream ioctl. Read in an strioctl buffer from the user 3608 * along with any data specified and send it downstream. 3609 * Strdoioctl will wait allow only one ioctl message at 3610 * a time, and waits for the acknowledgement. 3611 */ 3612 3613 if (stp->sd_flag & STRHUP) 3614 return (ENXIO); 3615 3616 error = strcopyin_strioctl((void *)arg, &strioc, flag, 3617 copyflag); 3618 if (error != 0) 3619 return (error); 3620 3621 if ((strioc.ic_len < 0) || (strioc.ic_timout < -1)) 3622 return (EINVAL); 3623 3624 access = job_control_type(strioc.ic_cmd); 3625 mutex_enter(&stp->sd_lock); 3626 if ((access != -1) && 3627 ((error = i_straccess(stp, access)) != 0)) { 3628 mutex_exit(&stp->sd_lock); 3629 return (error); 3630 } 3631 mutex_exit(&stp->sd_lock); 3632 3633 /* 3634 * The I_STR facility provides a trap door for malicious 3635 * code to send down bogus streamio(7I) ioctl commands to 3636 * unsuspecting STREAMS modules and drivers which expect to 3637 * only get these messages from the stream head. 3638 * Explicitly prohibit any streamio ioctls which can be 3639 * passed downstream by the stream head. Note that we do 3640 * not block all streamio ioctls because the ioctl 3641 * numberspace is not well managed and thus it's possible 3642 * that a module or driver's ioctl numbers may accidentally 3643 * collide with them. 3644 */ 3645 switch (strioc.ic_cmd) { 3646 case I_LINK: 3647 case I_PLINK: 3648 case I_UNLINK: 3649 case I_PUNLINK: 3650 case _I_GETPEERCRED: 3651 case _I_PLINK_LH: 3652 return (EINVAL); 3653 } 3654 3655 error = strdoioctl(stp, &strioc, flag, copyflag, crp, rvalp); 3656 if (error == 0) { 3657 error = strcopyout_strioctl(&strioc, (void *)arg, 3658 flag, copyflag); 3659 } 3660 return (error); 3661 3662 case _I_CMD: 3663 /* 3664 * Like I_STR, but without using M_IOC* messages and without 3665 * copyins/copyouts beyond the passed-in argument. 3666 */ 3667 if (stp->sd_flag & STRHUP) 3668 return (ENXIO); 3669 3670 if ((scp = kmem_alloc(sizeof (strcmd_t), KM_NOSLEEP)) == NULL) 3671 return (ENOMEM); 3672 3673 if (copyin((void *)arg, scp, sizeof (strcmd_t))) { 3674 kmem_free(scp, sizeof (strcmd_t)); 3675 return (EFAULT); 3676 } 3677 3678 access = job_control_type(scp->sc_cmd); 3679 mutex_enter(&stp->sd_lock); 3680 if (access != -1 && (error = i_straccess(stp, access)) != 0) { 3681 mutex_exit(&stp->sd_lock); 3682 kmem_free(scp, sizeof (strcmd_t)); 3683 return (error); 3684 } 3685 mutex_exit(&stp->sd_lock); 3686 3687 *rvalp = 0; 3688 if ((error = strdocmd(stp, scp, crp)) == 0) { 3689 if (copyout(scp, (void *)arg, sizeof (strcmd_t))) 3690 error = EFAULT; 3691 } 3692 kmem_free(scp, sizeof (strcmd_t)); 3693 return (error); 3694 3695 case I_NREAD: 3696 /* 3697 * Return number of bytes of data in first message 3698 * in queue in "arg" and return the number of messages 3699 * in queue in return value. 3700 */ 3701 { 3702 size_t size; 3703 int retval; 3704 int count = 0; 3705 3706 mutex_enter(QLOCK(rdq)); 3707 3708 size = msgdsize(rdq->q_first); 3709 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next) 3710 count++; 3711 3712 mutex_exit(QLOCK(rdq)); 3713 if (stp->sd_struiordq) { 3714 infod_t infod; 3715 3716 infod.d_cmd = INFOD_COUNT; 3717 infod.d_count = 0; 3718 if (count == 0) { 3719 infod.d_cmd |= INFOD_FIRSTBYTES; 3720 infod.d_bytes = 0; 3721 } 3722 infod.d_res = 0; 3723 (void) infonext(rdq, &infod); 3724 count += infod.d_count; 3725 if (infod.d_res & INFOD_FIRSTBYTES) 3726 size = infod.d_bytes; 3727 } 3728 3729 /* 3730 * Drop down from size_t to the "int" required by the 3731 * interface. Cap at INT_MAX. 3732 */ 3733 retval = MIN(size, INT_MAX); 3734 error = strcopyout(&retval, (void *)arg, sizeof (retval), 3735 copyflag); 3736 if (!error) 3737 *rvalp = count; 3738 return (error); 3739 } 3740 3741 case FIONREAD: 3742 /* 3743 * Return number of bytes of data in all data messages 3744 * in queue in "arg". 3745 */ 3746 { 3747 size_t size = 0; 3748 int retval; 3749 3750 mutex_enter(QLOCK(rdq)); 3751 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next) 3752 size += msgdsize(mp); 3753 mutex_exit(QLOCK(rdq)); 3754 3755 if (stp->sd_struiordq) { 3756 infod_t infod; 3757 3758 infod.d_cmd = INFOD_BYTES; 3759 infod.d_res = 0; 3760 infod.d_bytes = 0; 3761 (void) infonext(rdq, &infod); 3762 size += infod.d_bytes; 3763 } 3764 3765 /* 3766 * Drop down from size_t to the "int" required by the 3767 * interface. Cap at INT_MAX. 3768 */ 3769 retval = MIN(size, INT_MAX); 3770 error = strcopyout(&retval, (void *)arg, sizeof (retval), 3771 copyflag); 3772 3773 *rvalp = 0; 3774 return (error); 3775 } 3776 case FIORDCHK: 3777 /* 3778 * FIORDCHK does not use arg value (like FIONREAD), 3779 * instead a count is returned. I_NREAD value may 3780 * not be accurate but safe. The real thing to do is 3781 * to add the msgdsizes of all data messages until 3782 * a non-data message. 3783 */ 3784 { 3785 size_t size = 0; 3786 3787 mutex_enter(QLOCK(rdq)); 3788 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next) 3789 size += msgdsize(mp); 3790 mutex_exit(QLOCK(rdq)); 3791 3792 if (stp->sd_struiordq) { 3793 infod_t infod; 3794 3795 infod.d_cmd = INFOD_BYTES; 3796 infod.d_res = 0; 3797 infod.d_bytes = 0; 3798 (void) infonext(rdq, &infod); 3799 size += infod.d_bytes; 3800 } 3801 3802 /* 3803 * Since ioctl returns an int, and memory sizes under 3804 * LP64 may not fit, we return INT_MAX if the count was 3805 * actually greater. 3806 */ 3807 *rvalp = MIN(size, INT_MAX); 3808 return (0); 3809 } 3810 3811 case I_FIND: 3812 /* 3813 * Get module name. 3814 */ 3815 { 3816 char mname[FMNAMESZ + 1]; 3817 queue_t *q; 3818 3819 error = (copyflag & U_TO_K ? copyinstr : copystr)((void *)arg, 3820 mname, FMNAMESZ + 1, NULL); 3821 if (error) 3822 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT); 3823 3824 /* 3825 * Return EINVAL if we're handed a bogus module name. 3826 */ 3827 if (fmodsw_find(mname, FMODSW_LOAD) == NULL) { 3828 TRACE_0(TR_FAC_STREAMS_FR, 3829 TR_I_CANT_FIND, "couldn't I_FIND"); 3830 return (EINVAL); 3831 } 3832 3833 *rvalp = 0; 3834 3835 /* Look downstream to see if module is there. */ 3836 claimstr(stp->sd_wrq); 3837 for (q = stp->sd_wrq->q_next; q; q = q->q_next) { 3838 if (q->q_flag&QREADR) { 3839 q = NULL; 3840 break; 3841 } 3842 if (strcmp(mname, q->q_qinfo->qi_minfo->mi_idname) == 0) 3843 break; 3844 } 3845 releasestr(stp->sd_wrq); 3846 3847 *rvalp = (q ? 1 : 0); 3848 return (error); 3849 } 3850 3851 case I_PUSH: 3852 case __I_PUSH_NOCTTY: 3853 /* 3854 * Push a module. 3855 * For the case __I_PUSH_NOCTTY push a module but 3856 * do not allocate controlling tty. See bugid 4025044 3857 */ 3858 3859 { 3860 char mname[FMNAMESZ + 1]; 3861 fmodsw_impl_t *fp; 3862 dev_t dummydev; 3863 3864 if (stp->sd_flag & STRHUP) 3865 return (ENXIO); 3866 3867 /* 3868 * Get module name and look up in fmodsw. 3869 */ 3870 error = (copyflag & U_TO_K ? copyinstr : copystr)((void *)arg, 3871 mname, FMNAMESZ + 1, NULL); 3872 if (error) 3873 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT); 3874 3875 if ((fp = fmodsw_find(mname, FMODSW_HOLD | FMODSW_LOAD)) == 3876 NULL) 3877 return (EINVAL); 3878 3879 TRACE_2(TR_FAC_STREAMS_FR, TR_I_PUSH, 3880 "I_PUSH:fp %p stp %p", fp, stp); 3881 3882 if (error = strstartplumb(stp, flag, cmd)) { 3883 fmodsw_rele(fp); 3884 return (error); 3885 } 3886 3887 /* 3888 * See if any more modules can be pushed on this stream. 3889 * Note that this check must be done after strstartplumb() 3890 * since otherwise multiple threads issuing I_PUSHes on 3891 * the same stream will be able to exceed nstrpush. 3892 */ 3893 mutex_enter(&stp->sd_lock); 3894 if (stp->sd_pushcnt >= nstrpush) { 3895 fmodsw_rele(fp); 3896 strendplumb(stp); 3897 mutex_exit(&stp->sd_lock); 3898 return (EINVAL); 3899 } 3900 mutex_exit(&stp->sd_lock); 3901 3902 /* 3903 * Push new module and call its open routine 3904 * via qattach(). Modules don't change device 3905 * numbers, so just ignore dummydev here. 3906 */ 3907 dummydev = vp->v_rdev; 3908 if ((error = qattach(rdq, &dummydev, 0, crp, fp, 3909 B_FALSE)) == 0) { 3910 if (vp->v_type == VCHR && /* sorry, no pipes allowed */ 3911 (cmd == I_PUSH) && (stp->sd_flag & STRISTTY)) { 3912 /* 3913 * try to allocate it as a controlling terminal 3914 */ 3915 (void) strctty(stp); 3916 } 3917 } 3918 3919 mutex_enter(&stp->sd_lock); 3920 3921 /* 3922 * As a performance concern we are caching the values of 3923 * q_minpsz and q_maxpsz of the module below the stream 3924 * head in the stream head. 3925 */ 3926 mutex_enter(QLOCK(stp->sd_wrq->q_next)); 3927 rmin = stp->sd_wrq->q_next->q_minpsz; 3928 rmax = stp->sd_wrq->q_next->q_maxpsz; 3929 mutex_exit(QLOCK(stp->sd_wrq->q_next)); 3930 3931 /* Do this processing here as a performance concern */ 3932 if (strmsgsz != 0) { 3933 if (rmax == INFPSZ) 3934 rmax = strmsgsz; 3935 else { 3936 if (vp->v_type == VFIFO) 3937 rmax = MIN(PIPE_BUF, rmax); 3938 else rmax = MIN(strmsgsz, rmax); 3939 } 3940 } 3941 3942 mutex_enter(QLOCK(wrq)); 3943 stp->sd_qn_minpsz = rmin; 3944 stp->sd_qn_maxpsz = rmax; 3945 mutex_exit(QLOCK(wrq)); 3946 3947 strendplumb(stp); 3948 mutex_exit(&stp->sd_lock); 3949 return (error); 3950 } 3951 3952 case I_POP: 3953 { 3954 queue_t *q; 3955 3956 if (stp->sd_flag & STRHUP) 3957 return (ENXIO); 3958 if (!wrq->q_next) /* for broken pipes */ 3959 return (EINVAL); 3960 3961 if (error = strstartplumb(stp, flag, cmd)) 3962 return (error); 3963 3964 /* 3965 * If there is an anchor on this stream and popping 3966 * the current module would attempt to pop through the 3967 * anchor, then disallow the pop unless we have sufficient 3968 * privileges; take the cheapest (non-locking) check 3969 * first. 3970 */ 3971 if (secpolicy_ip_config(crp, B_TRUE) != 0 || 3972 (stp->sd_anchorzone != crgetzoneid(crp))) { 3973 mutex_enter(&stp->sd_lock); 3974 /* 3975 * Anchors only apply if there's at least one 3976 * module on the stream (sd_pushcnt > 0). 3977 */ 3978 if (stp->sd_pushcnt > 0 && 3979 stp->sd_pushcnt == stp->sd_anchor && 3980 stp->sd_vnode->v_type != VFIFO) { 3981 strendplumb(stp); 3982 mutex_exit(&stp->sd_lock); 3983 if (stp->sd_anchorzone != crgetzoneid(crp)) 3984 return (EINVAL); 3985 /* Audit and report error */ 3986 return (secpolicy_ip_config(crp, B_FALSE)); 3987 } 3988 mutex_exit(&stp->sd_lock); 3989 } 3990 3991 q = wrq->q_next; 3992 TRACE_2(TR_FAC_STREAMS_FR, TR_I_POP, 3993 "I_POP:%p from %p", q, stp); 3994 if (q->q_next == NULL || (q->q_flag & (QREADR|QISDRV))) { 3995 error = EINVAL; 3996 } else { 3997 qdetach(_RD(q), 1, flag, crp, B_FALSE); 3998 error = 0; 3999 } 4000 mutex_enter(&stp->sd_lock); 4001 4002 /* 4003 * As a performance concern we are caching the values of 4004 * q_minpsz and q_maxpsz of the module below the stream 4005 * head in the stream head. 4006 */ 4007 mutex_enter(QLOCK(wrq->q_next)); 4008 rmin = wrq->q_next->q_minpsz; 4009 rmax = wrq->q_next->q_maxpsz; 4010 mutex_exit(QLOCK(wrq->q_next)); 4011 4012 /* Do this processing here as a performance concern */ 4013 if (strmsgsz != 0) { 4014 if (rmax == INFPSZ) 4015 rmax = strmsgsz; 4016 else { 4017 if (vp->v_type == VFIFO) 4018 rmax = MIN(PIPE_BUF, rmax); 4019 else rmax = MIN(strmsgsz, rmax); 4020 } 4021 } 4022 4023 mutex_enter(QLOCK(wrq)); 4024 stp->sd_qn_minpsz = rmin; 4025 stp->sd_qn_maxpsz = rmax; 4026 mutex_exit(QLOCK(wrq)); 4027 4028 /* If we popped through the anchor, then reset the anchor. */ 4029 if (stp->sd_pushcnt < stp->sd_anchor) { 4030 stp->sd_anchor = 0; 4031 stp->sd_anchorzone = 0; 4032 } 4033 strendplumb(stp); 4034 mutex_exit(&stp->sd_lock); 4035 return (error); 4036 } 4037 4038 case _I_MUXID2FD: 4039 { 4040 /* 4041 * Create a fd for a I_PLINK'ed lower stream with a given 4042 * muxid. With the fd, application can send down ioctls, 4043 * like I_LIST, to the previously I_PLINK'ed stream. Note 4044 * that after getting the fd, the application has to do an 4045 * I_PUNLINK on the muxid before it can do any operation 4046 * on the lower stream. This is required by spec1170. 4047 * 4048 * The fd used to do this ioctl should point to the same 4049 * controlling device used to do the I_PLINK. If it uses 4050 * a different stream or an invalid muxid, I_MUXID2FD will 4051 * fail. The error code is set to EINVAL. 4052 * 4053 * The intended use of this interface is the following. 4054 * An application I_PLINK'ed a stream and exits. The fd 4055 * to the lower stream is gone. Another application 4056 * wants to get a fd to the lower stream, it uses I_MUXID2FD. 4057 */ 4058 int muxid = (int)arg; 4059 int fd; 4060 linkinfo_t *linkp; 4061 struct file *fp; 4062 netstack_t *ns; 4063 str_stack_t *ss; 4064 4065 /* 4066 * Do not allow the wildcard muxid. This ioctl is not 4067 * intended to find arbitrary link. 4068 */ 4069 if (muxid == 0) { 4070 return (EINVAL); 4071 } 4072 4073 ns = netstack_find_by_cred(crp); 4074 ASSERT(ns != NULL); 4075 ss = ns->netstack_str; 4076 ASSERT(ss != NULL); 4077 4078 mutex_enter(&muxifier); 4079 linkp = findlinks(vp->v_stream, muxid, LINKPERSIST, ss); 4080 if (linkp == NULL) { 4081 mutex_exit(&muxifier); 4082 netstack_rele(ss->ss_netstack); 4083 return (EINVAL); 4084 } 4085 4086 if ((fd = ufalloc(0)) == -1) { 4087 mutex_exit(&muxifier); 4088 netstack_rele(ss->ss_netstack); 4089 return (EMFILE); 4090 } 4091 fp = linkp->li_fpdown; 4092 mutex_enter(&fp->f_tlock); 4093 fp->f_count++; 4094 mutex_exit(&fp->f_tlock); 4095 mutex_exit(&muxifier); 4096 setf(fd, fp); 4097 *rvalp = fd; 4098 netstack_rele(ss->ss_netstack); 4099 return (0); 4100 } 4101 4102 case _I_INSERT: 4103 { 4104 /* 4105 * To insert a module to a given position in a stream. 4106 * In the first release, only allow privileged user 4107 * to use this ioctl. Furthermore, the insert is only allowed 4108 * below an anchor if the zoneid is the same as the zoneid 4109 * which created the anchor. 4110 * 4111 * Note that we do not plan to support this ioctl 4112 * on pipes in the first release. We want to learn more 4113 * about the implications of these ioctls before extending 4114 * their support. And we do not think these features are 4115 * valuable for pipes. 4116 * 4117 * Neither do we support O/C hot stream. Note that only 4118 * the upper streams of TCP/IP stack are O/C hot streams. 4119 * The lower IP stream is not. 4120 * When there is a O/C cold barrier, we only allow inserts 4121 * above the barrier. 4122 */ 4123 STRUCT_DECL(strmodconf, strmodinsert); 4124 char mod_name[FMNAMESZ + 1]; 4125 fmodsw_impl_t *fp; 4126 dev_t dummydev; 4127 queue_t *tmp_wrq; 4128 int pos; 4129 boolean_t is_insert; 4130 4131 STRUCT_INIT(strmodinsert, flag); 4132 if (stp->sd_flag & STRHUP) 4133 return (ENXIO); 4134 if (STRMATED(stp)) 4135 return (EINVAL); 4136 if ((error = secpolicy_net_config(crp, B_FALSE)) != 0) 4137 return (error); 4138 if (stp->sd_anchor != 0 && 4139 stp->sd_anchorzone != crgetzoneid(crp)) 4140 return (EINVAL); 4141 4142 error = strcopyin((void *)arg, STRUCT_BUF(strmodinsert), 4143 STRUCT_SIZE(strmodinsert), copyflag); 4144 if (error) 4145 return (error); 4146 4147 /* 4148 * Get module name and look up in fmodsw. 4149 */ 4150 error = (copyflag & U_TO_K ? copyinstr : 4151 copystr)(STRUCT_FGETP(strmodinsert, mod_name), 4152 mod_name, FMNAMESZ + 1, NULL); 4153 if (error) 4154 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT); 4155 4156 if ((fp = fmodsw_find(mod_name, FMODSW_HOLD | FMODSW_LOAD)) == 4157 NULL) 4158 return (EINVAL); 4159 4160 if (error = strstartplumb(stp, flag, cmd)) { 4161 fmodsw_rele(fp); 4162 return (error); 4163 } 4164 4165 /* 4166 * Is this _I_INSERT just like an I_PUSH? We need to know 4167 * this because we do some optimizations if this is a 4168 * module being pushed. 4169 */ 4170 pos = STRUCT_FGET(strmodinsert, pos); 4171 is_insert = (pos != 0); 4172 4173 /* 4174 * Make sure pos is valid. Even though it is not an I_PUSH, 4175 * we impose the same limit on the number of modules in a 4176 * stream. 4177 */ 4178 mutex_enter(&stp->sd_lock); 4179 if (stp->sd_pushcnt >= nstrpush || pos < 0 || 4180 pos > stp->sd_pushcnt) { 4181 fmodsw_rele(fp); 4182 strendplumb(stp); 4183 mutex_exit(&stp->sd_lock); 4184 return (EINVAL); 4185 } 4186 if (stp->sd_anchor != 0) { 4187 /* 4188 * Is this insert below the anchor? 4189 * Pushcnt hasn't been increased yet hence 4190 * we test for greater than here, and greater or 4191 * equal after qattach. 4192 */ 4193 if (pos > (stp->sd_pushcnt - stp->sd_anchor) && 4194 stp->sd_anchorzone != crgetzoneid(crp)) { 4195 fmodsw_rele(fp); 4196 strendplumb(stp); 4197 mutex_exit(&stp->sd_lock); 4198 return (EPERM); 4199 } 4200 } 4201 4202 mutex_exit(&stp->sd_lock); 4203 4204 /* 4205 * First find the correct position this module to 4206 * be inserted. We don't need to call claimstr() 4207 * as the stream should not be changing at this point. 4208 * 4209 * Insert new module and call its open routine 4210 * via qattach(). Modules don't change device 4211 * numbers, so just ignore dummydev here. 4212 */ 4213 for (tmp_wrq = stp->sd_wrq; pos > 0; 4214 tmp_wrq = tmp_wrq->q_next, pos--) { 4215 ASSERT(SAMESTR(tmp_wrq)); 4216 } 4217 dummydev = vp->v_rdev; 4218 if ((error = qattach(_RD(tmp_wrq), &dummydev, 0, crp, 4219 fp, is_insert)) != 0) { 4220 mutex_enter(&stp->sd_lock); 4221 strendplumb(stp); 4222 mutex_exit(&stp->sd_lock); 4223 return (error); 4224 } 4225 4226 mutex_enter(&stp->sd_lock); 4227 4228 /* 4229 * As a performance concern we are caching the values of 4230 * q_minpsz and q_maxpsz of the module below the stream 4231 * head in the stream head. 4232 */ 4233 if (!is_insert) { 4234 mutex_enter(QLOCK(stp->sd_wrq->q_next)); 4235 rmin = stp->sd_wrq->q_next->q_minpsz; 4236 rmax = stp->sd_wrq->q_next->q_maxpsz; 4237 mutex_exit(QLOCK(stp->sd_wrq->q_next)); 4238 4239 /* Do this processing here as a performance concern */ 4240 if (strmsgsz != 0) { 4241 if (rmax == INFPSZ) { 4242 rmax = strmsgsz; 4243 } else { 4244 rmax = MIN(strmsgsz, rmax); 4245 } 4246 } 4247 4248 mutex_enter(QLOCK(wrq)); 4249 stp->sd_qn_minpsz = rmin; 4250 stp->sd_qn_maxpsz = rmax; 4251 mutex_exit(QLOCK(wrq)); 4252 } 4253 4254 /* 4255 * Need to update the anchor value if this module is 4256 * inserted below the anchor point. 4257 */ 4258 if (stp->sd_anchor != 0) { 4259 pos = STRUCT_FGET(strmodinsert, pos); 4260 if (pos >= (stp->sd_pushcnt - stp->sd_anchor)) 4261 stp->sd_anchor++; 4262 } 4263 4264 strendplumb(stp); 4265 mutex_exit(&stp->sd_lock); 4266 return (0); 4267 } 4268 4269 case _I_REMOVE: 4270 { 4271 /* 4272 * To remove a module with a given name in a stream. The 4273 * caller of this ioctl needs to provide both the name and 4274 * the position of the module to be removed. This eliminates 4275 * the ambiguity of removal if a module is inserted/pushed 4276 * multiple times in a stream. In the first release, only 4277 * allow privileged user to use this ioctl. 4278 * Furthermore, the remove is only allowed 4279 * below an anchor if the zoneid is the same as the zoneid 4280 * which created the anchor. 4281 * 4282 * Note that we do not plan to support this ioctl 4283 * on pipes in the first release. We want to learn more 4284 * about the implications of these ioctls before extending 4285 * their support. And we do not think these features are 4286 * valuable for pipes. 4287 * 4288 * Neither do we support O/C hot stream. Note that only 4289 * the upper streams of TCP/IP stack are O/C hot streams. 4290 * The lower IP stream is not. 4291 * When there is a O/C cold barrier we do not allow removal 4292 * below the barrier. 4293 * 4294 * Also note that _I_REMOVE cannot be used to remove a 4295 * driver or the stream head. 4296 */ 4297 STRUCT_DECL(strmodconf, strmodremove); 4298 queue_t *q; 4299 int pos; 4300 char mod_name[FMNAMESZ + 1]; 4301 boolean_t is_remove; 4302 4303 STRUCT_INIT(strmodremove, flag); 4304 if (stp->sd_flag & STRHUP) 4305 return (ENXIO); 4306 if (STRMATED(stp)) 4307 return (EINVAL); 4308 if ((error = secpolicy_net_config(crp, B_FALSE)) != 0) 4309 return (error); 4310 if (stp->sd_anchor != 0 && 4311 stp->sd_anchorzone != crgetzoneid(crp)) 4312 return (EINVAL); 4313 4314 error = strcopyin((void *)arg, STRUCT_BUF(strmodremove), 4315 STRUCT_SIZE(strmodremove), copyflag); 4316 if (error) 4317 return (error); 4318 4319 error = (copyflag & U_TO_K ? copyinstr : 4320 copystr)(STRUCT_FGETP(strmodremove, mod_name), 4321 mod_name, FMNAMESZ + 1, NULL); 4322 if (error) 4323 return ((error == ENAMETOOLONG) ? EINVAL : EFAULT); 4324 4325 if ((error = strstartplumb(stp, flag, cmd)) != 0) 4326 return (error); 4327 4328 /* 4329 * Match the name of given module to the name of module at 4330 * the given position. 4331 */ 4332 pos = STRUCT_FGET(strmodremove, pos); 4333 4334 is_remove = (pos != 0); 4335 for (q = stp->sd_wrq->q_next; SAMESTR(q) && pos > 0; 4336 q = q->q_next, pos--) 4337 ; 4338 if (pos > 0 || ! SAMESTR(q) || 4339 strncmp(q->q_qinfo->qi_minfo->mi_idname, mod_name, 4340 strlen(q->q_qinfo->qi_minfo->mi_idname)) != 0) { 4341 mutex_enter(&stp->sd_lock); 4342 strendplumb(stp); 4343 mutex_exit(&stp->sd_lock); 4344 return (EINVAL); 4345 } 4346 4347 /* 4348 * If the position is at or below an anchor, then the zoneid 4349 * must match the zoneid that created the anchor. 4350 */ 4351 if (stp->sd_anchor != 0) { 4352 pos = STRUCT_FGET(strmodremove, pos); 4353 if (pos >= (stp->sd_pushcnt - stp->sd_anchor) && 4354 stp->sd_anchorzone != crgetzoneid(crp)) { 4355 mutex_enter(&stp->sd_lock); 4356 strendplumb(stp); 4357 mutex_exit(&stp->sd_lock); 4358 return (EPERM); 4359 } 4360 } 4361 4362 4363 ASSERT(!(q->q_flag & QREADR)); 4364 qdetach(_RD(q), 1, flag, crp, is_remove); 4365 4366 mutex_enter(&stp->sd_lock); 4367 4368 /* 4369 * As a performance concern we are caching the values of 4370 * q_minpsz and q_maxpsz of the module below the stream 4371 * head in the stream head. 4372 */ 4373 if (!is_remove) { 4374 mutex_enter(QLOCK(wrq->q_next)); 4375 rmin = wrq->q_next->q_minpsz; 4376 rmax = wrq->q_next->q_maxpsz; 4377 mutex_exit(QLOCK(wrq->q_next)); 4378 4379 /* Do this processing here as a performance concern */ 4380 if (strmsgsz != 0) { 4381 if (rmax == INFPSZ) 4382 rmax = strmsgsz; 4383 else { 4384 if (vp->v_type == VFIFO) 4385 rmax = MIN(PIPE_BUF, rmax); 4386 else rmax = MIN(strmsgsz, rmax); 4387 } 4388 } 4389 4390 mutex_enter(QLOCK(wrq)); 4391 stp->sd_qn_minpsz = rmin; 4392 stp->sd_qn_maxpsz = rmax; 4393 mutex_exit(QLOCK(wrq)); 4394 } 4395 4396 /* 4397 * Need to update the anchor value if this module is removed 4398 * at or below the anchor point. If the removed module is at 4399 * the anchor point, remove the anchor for this stream if 4400 * there is no module above the anchor point. Otherwise, if 4401 * the removed module is below the anchor point, decrement the 4402 * anchor point by 1. 4403 */ 4404 if (stp->sd_anchor != 0) { 4405 pos = STRUCT_FGET(strmodremove, pos); 4406 if (pos == stp->sd_pushcnt - stp->sd_anchor + 1) 4407 stp->sd_anchor = 0; 4408 else if (pos > (stp->sd_pushcnt - stp->sd_anchor + 1)) 4409 stp->sd_anchor--; 4410 } 4411 4412 strendplumb(stp); 4413 mutex_exit(&stp->sd_lock); 4414 return (0); 4415 } 4416 4417 case I_ANCHOR: 4418 /* 4419 * Set the anchor position on the stream to reside at 4420 * the top module (in other words, the top module 4421 * cannot be popped). Anchors with a FIFO make no 4422 * obvious sense, so they're not allowed. 4423 */ 4424 mutex_enter(&stp->sd_lock); 4425 4426 if (stp->sd_vnode->v_type == VFIFO) { 4427 mutex_exit(&stp->sd_lock); 4428 return (EINVAL); 4429 } 4430 /* Only allow the same zoneid to update the anchor */ 4431 if (stp->sd_anchor != 0 && 4432 stp->sd_anchorzone != crgetzoneid(crp)) { 4433 mutex_exit(&stp->sd_lock); 4434 return (EINVAL); 4435 } 4436 stp->sd_anchor = stp->sd_pushcnt; 4437 stp->sd_anchorzone = crgetzoneid(crp); 4438 mutex_exit(&stp->sd_lock); 4439 return (0); 4440 4441 case I_LOOK: 4442 /* 4443 * Get name of first module downstream. 4444 * If no module, return an error. 4445 */ 4446 { 4447 claimstr(wrq); 4448 if (_SAMESTR(wrq) && wrq->q_next->q_next) { 4449 char *name = wrq->q_next->q_qinfo->qi_minfo->mi_idname; 4450 error = strcopyout(name, (void *)arg, strlen(name) + 1, 4451 copyflag); 4452 releasestr(wrq); 4453 return (error); 4454 } 4455 releasestr(wrq); 4456 return (EINVAL); 4457 } 4458 4459 case I_LINK: 4460 case I_PLINK: 4461 /* 4462 * Link a multiplexor. 4463 */ 4464 error = mlink(vp, cmd, (int)arg, crp, rvalp, 0); 4465 return (error); 4466 4467 case _I_PLINK_LH: 4468 /* 4469 * Link a multiplexor: Call must originate from kernel. 4470 */ 4471 if (kioctl) 4472 return (ldi_mlink_lh(vp, cmd, arg, crp, rvalp)); 4473 4474 return (EINVAL); 4475 case I_UNLINK: 4476 case I_PUNLINK: 4477 /* 4478 * Unlink a multiplexor. 4479 * If arg is -1, unlink all links for which this is the 4480 * controlling stream. Otherwise, arg is an index number 4481 * for a link to be removed. 4482 */ 4483 { 4484 struct linkinfo *linkp; 4485 int native_arg = (int)arg; 4486 int type; 4487 netstack_t *ns; 4488 str_stack_t *ss; 4489 4490 TRACE_1(TR_FAC_STREAMS_FR, 4491 TR_I_UNLINK, "I_UNLINK/I_PUNLINK:%p", stp); 4492 if (vp->v_type == VFIFO) { 4493 return (EINVAL); 4494 } 4495 if (cmd == I_UNLINK) 4496 type = LINKNORMAL; 4497 else /* I_PUNLINK */ 4498 type = LINKPERSIST; 4499 if (native_arg == 0) { 4500 return (EINVAL); 4501 } 4502 ns = netstack_find_by_cred(crp); 4503 ASSERT(ns != NULL); 4504 ss = ns->netstack_str; 4505 ASSERT(ss != NULL); 4506 4507 if (native_arg == MUXID_ALL) 4508 error = munlinkall(stp, type, crp, rvalp, ss); 4509 else { 4510 mutex_enter(&muxifier); 4511 if (!(linkp = findlinks(stp, (int)arg, type, ss))) { 4512 /* invalid user supplied index number */ 4513 mutex_exit(&muxifier); 4514 netstack_rele(ss->ss_netstack); 4515 return (EINVAL); 4516 } 4517 /* munlink drops the muxifier lock */ 4518 error = munlink(stp, linkp, type, crp, rvalp, ss); 4519 } 4520 netstack_rele(ss->ss_netstack); 4521 return (error); 4522 } 4523 4524 case I_FLUSH: 4525 /* 4526 * send a flush message downstream 4527 * flush message can indicate 4528 * FLUSHR - flush read queue 4529 * FLUSHW - flush write queue 4530 * FLUSHRW - flush read/write queue 4531 */ 4532 if (stp->sd_flag & STRHUP) 4533 return (ENXIO); 4534 if (arg & ~FLUSHRW) 4535 return (EINVAL); 4536 4537 for (;;) { 4538 if (putnextctl1(stp->sd_wrq, M_FLUSH, (int)arg)) { 4539 break; 4540 } 4541 if (error = strwaitbuf(1, BPRI_HI)) { 4542 return (error); 4543 } 4544 } 4545 4546 /* 4547 * Send down an unsupported ioctl and wait for the nack 4548 * in order to allow the M_FLUSH to propagate back 4549 * up to the stream head. 4550 * Replaces if (qready()) runqueues(); 4551 */ 4552 strioc.ic_cmd = -1; /* The unsupported ioctl */ 4553 strioc.ic_timout = 0; 4554 strioc.ic_len = 0; 4555 strioc.ic_dp = NULL; 4556 (void) strdoioctl(stp, &strioc, flag, K_TO_K, crp, rvalp); 4557 *rvalp = 0; 4558 return (0); 4559 4560 case I_FLUSHBAND: 4561 { 4562 struct bandinfo binfo; 4563 4564 error = strcopyin((void *)arg, &binfo, sizeof (binfo), 4565 copyflag); 4566 if (error) 4567 return (error); 4568 if (stp->sd_flag & STRHUP) 4569 return (ENXIO); 4570 if (binfo.bi_flag & ~FLUSHRW) 4571 return (EINVAL); 4572 while (!(mp = allocb(2, BPRI_HI))) { 4573 if (error = strwaitbuf(2, BPRI_HI)) 4574 return (error); 4575 } 4576 mp->b_datap->db_type = M_FLUSH; 4577 *mp->b_wptr++ = binfo.bi_flag | FLUSHBAND; 4578 *mp->b_wptr++ = binfo.bi_pri; 4579 putnext(stp->sd_wrq, mp); 4580 /* 4581 * Send down an unsupported ioctl and wait for the nack 4582 * in order to allow the M_FLUSH to propagate back 4583 * up to the stream head. 4584 * Replaces if (qready()) runqueues(); 4585 */ 4586 strioc.ic_cmd = -1; /* The unsupported ioctl */ 4587 strioc.ic_timout = 0; 4588 strioc.ic_len = 0; 4589 strioc.ic_dp = NULL; 4590 (void) strdoioctl(stp, &strioc, flag, K_TO_K, crp, rvalp); 4591 *rvalp = 0; 4592 return (0); 4593 } 4594 4595 case I_SRDOPT: 4596 /* 4597 * Set read options 4598 * 4599 * RNORM - default stream mode 4600 * RMSGN - message no discard 4601 * RMSGD - message discard 4602 * RPROTNORM - fail read with EBADMSG for M_[PC]PROTOs 4603 * RPROTDAT - convert M_[PC]PROTOs to M_DATAs 4604 * RPROTDIS - discard M_[PC]PROTOs and retain M_DATAs 4605 */ 4606 if (arg & ~(RMODEMASK | RPROTMASK)) 4607 return (EINVAL); 4608 4609 if ((arg & (RMSGD|RMSGN)) == (RMSGD|RMSGN)) 4610 return (EINVAL); 4611 4612 mutex_enter(&stp->sd_lock); 4613 switch (arg & RMODEMASK) { 4614 case RNORM: 4615 stp->sd_read_opt &= ~(RD_MSGDIS | RD_MSGNODIS); 4616 break; 4617 case RMSGD: 4618 stp->sd_read_opt = (stp->sd_read_opt & ~RD_MSGNODIS) | 4619 RD_MSGDIS; 4620 break; 4621 case RMSGN: 4622 stp->sd_read_opt = (stp->sd_read_opt & ~RD_MSGDIS) | 4623 RD_MSGNODIS; 4624 break; 4625 } 4626 4627 switch (arg & RPROTMASK) { 4628 case RPROTNORM: 4629 stp->sd_read_opt &= ~(RD_PROTDAT | RD_PROTDIS); 4630 break; 4631 4632 case RPROTDAT: 4633 stp->sd_read_opt = ((stp->sd_read_opt & ~RD_PROTDIS) | 4634 RD_PROTDAT); 4635 break; 4636 4637 case RPROTDIS: 4638 stp->sd_read_opt = ((stp->sd_read_opt & ~RD_PROTDAT) | 4639 RD_PROTDIS); 4640 break; 4641 } 4642 mutex_exit(&stp->sd_lock); 4643 return (0); 4644 4645 case I_GRDOPT: 4646 /* 4647 * Get read option and return the value 4648 * to spot pointed to by arg 4649 */ 4650 { 4651 int rdopt; 4652 4653 rdopt = ((stp->sd_read_opt & RD_MSGDIS) ? RMSGD : 4654 ((stp->sd_read_opt & RD_MSGNODIS) ? RMSGN : RNORM)); 4655 rdopt |= ((stp->sd_read_opt & RD_PROTDAT) ? RPROTDAT : 4656 ((stp->sd_read_opt & RD_PROTDIS) ? RPROTDIS : RPROTNORM)); 4657 4658 return (strcopyout(&rdopt, (void *)arg, sizeof (int), 4659 copyflag)); 4660 } 4661 4662 case I_SERROPT: 4663 /* 4664 * Set error options 4665 * 4666 * RERRNORM - persistent read errors 4667 * RERRNONPERSIST - non-persistent read errors 4668 * WERRNORM - persistent write errors 4669 * WERRNONPERSIST - non-persistent write errors 4670 */ 4671 if (arg & ~(RERRMASK | WERRMASK)) 4672 return (EINVAL); 4673 4674 mutex_enter(&stp->sd_lock); 4675 switch (arg & RERRMASK) { 4676 case RERRNORM: 4677 stp->sd_flag &= ~STRDERRNONPERSIST; 4678 break; 4679 case RERRNONPERSIST: 4680 stp->sd_flag |= STRDERRNONPERSIST; 4681 break; 4682 } 4683 switch (arg & WERRMASK) { 4684 case WERRNORM: 4685 stp->sd_flag &= ~STWRERRNONPERSIST; 4686 break; 4687 case WERRNONPERSIST: 4688 stp->sd_flag |= STWRERRNONPERSIST; 4689 break; 4690 } 4691 mutex_exit(&stp->sd_lock); 4692 return (0); 4693 4694 case I_GERROPT: 4695 /* 4696 * Get error option and return the value 4697 * to spot pointed to by arg 4698 */ 4699 { 4700 int erropt = 0; 4701 4702 erropt |= (stp->sd_flag & STRDERRNONPERSIST) ? RERRNONPERSIST : 4703 RERRNORM; 4704 erropt |= (stp->sd_flag & STWRERRNONPERSIST) ? WERRNONPERSIST : 4705 WERRNORM; 4706 return (strcopyout(&erropt, (void *)arg, sizeof (int), 4707 copyflag)); 4708 } 4709 4710 case I_SETSIG: 4711 /* 4712 * Register the calling proc to receive the SIGPOLL 4713 * signal based on the events given in arg. If 4714 * arg is zero, remove the proc from register list. 4715 */ 4716 { 4717 strsig_t *ssp, *pssp; 4718 struct pid *pidp; 4719 4720 pssp = NULL; 4721 pidp = curproc->p_pidp; 4722 /* 4723 * Hold sd_lock to prevent traversal of sd_siglist while 4724 * it is modified. 4725 */ 4726 mutex_enter(&stp->sd_lock); 4727 for (ssp = stp->sd_siglist; ssp && (ssp->ss_pidp != pidp); 4728 pssp = ssp, ssp = ssp->ss_next) 4729 ; 4730 4731 if (arg) { 4732 if (arg & ~(S_INPUT|S_HIPRI|S_MSG|S_HANGUP|S_ERROR| 4733 S_RDNORM|S_WRNORM|S_RDBAND|S_WRBAND|S_BANDURG)) { 4734 mutex_exit(&stp->sd_lock); 4735 return (EINVAL); 4736 } 4737 if ((arg & S_BANDURG) && !(arg & S_RDBAND)) { 4738 mutex_exit(&stp->sd_lock); 4739 return (EINVAL); 4740 } 4741 4742 /* 4743 * If proc not already registered, add it 4744 * to list. 4745 */ 4746 if (!ssp) { 4747 ssp = kmem_alloc(sizeof (strsig_t), KM_SLEEP); 4748 ssp->ss_pidp = pidp; 4749 ssp->ss_pid = pidp->pid_id; 4750 ssp->ss_next = NULL; 4751 if (pssp) 4752 pssp->ss_next = ssp; 4753 else 4754 stp->sd_siglist = ssp; 4755 mutex_enter(&pidlock); 4756 PID_HOLD(pidp); 4757 mutex_exit(&pidlock); 4758 } 4759 4760 /* 4761 * Set events. 4762 */ 4763 ssp->ss_events = (int)arg; 4764 } else { 4765 /* 4766 * Remove proc from register list. 4767 */ 4768 if (ssp) { 4769 mutex_enter(&pidlock); 4770 PID_RELE(pidp); 4771 mutex_exit(&pidlock); 4772 if (pssp) 4773 pssp->ss_next = ssp->ss_next; 4774 else 4775 stp->sd_siglist = ssp->ss_next; 4776 kmem_free(ssp, sizeof (strsig_t)); 4777 } else { 4778 mutex_exit(&stp->sd_lock); 4779 return (EINVAL); 4780 } 4781 } 4782 4783 /* 4784 * Recalculate OR of sig events. 4785 */ 4786 stp->sd_sigflags = 0; 4787 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next) 4788 stp->sd_sigflags |= ssp->ss_events; 4789 mutex_exit(&stp->sd_lock); 4790 return (0); 4791 } 4792 4793 case I_GETSIG: 4794 /* 4795 * Return (in arg) the current registration of events 4796 * for which the calling proc is to be signaled. 4797 */ 4798 { 4799 struct strsig *ssp; 4800 struct pid *pidp; 4801 4802 pidp = curproc->p_pidp; 4803 mutex_enter(&stp->sd_lock); 4804 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next) 4805 if (ssp->ss_pidp == pidp) { 4806 error = strcopyout(&ssp->ss_events, (void *)arg, 4807 sizeof (int), copyflag); 4808 mutex_exit(&stp->sd_lock); 4809 return (error); 4810 } 4811 mutex_exit(&stp->sd_lock); 4812 return (EINVAL); 4813 } 4814 4815 case I_ESETSIG: 4816 /* 4817 * Register the ss_pid to receive the SIGPOLL 4818 * signal based on the events is ss_events arg. If 4819 * ss_events is zero, remove the proc from register list. 4820 */ 4821 { 4822 struct strsig *ssp, *pssp; 4823 struct proc *proc; 4824 struct pid *pidp; 4825 pid_t pid; 4826 struct strsigset ss; 4827 4828 error = strcopyin((void *)arg, &ss, sizeof (ss), copyflag); 4829 if (error) 4830 return (error); 4831 4832 pid = ss.ss_pid; 4833 4834 if (ss.ss_events != 0) { 4835 /* 4836 * Permissions check by sending signal 0. 4837 * Note that when kill fails it does a set_errno 4838 * causing the system call to fail. 4839 */ 4840 error = kill(pid, 0); 4841 if (error) { 4842 return (error); 4843 } 4844 } 4845 mutex_enter(&pidlock); 4846 if (pid == 0) 4847 proc = curproc; 4848 else if (pid < 0) 4849 proc = pgfind(-pid); 4850 else 4851 proc = prfind(pid); 4852 if (proc == NULL) { 4853 mutex_exit(&pidlock); 4854 return (ESRCH); 4855 } 4856 if (pid < 0) 4857 pidp = proc->p_pgidp; 4858 else 4859 pidp = proc->p_pidp; 4860 ASSERT(pidp); 4861 /* 4862 * Get a hold on the pid structure while referencing it. 4863 * There is a separate PID_HOLD should it be inserted 4864 * in the list below. 4865 */ 4866 PID_HOLD(pidp); 4867 mutex_exit(&pidlock); 4868 4869 pssp = NULL; 4870 /* 4871 * Hold sd_lock to prevent traversal of sd_siglist while 4872 * it is modified. 4873 */ 4874 mutex_enter(&stp->sd_lock); 4875 for (ssp = stp->sd_siglist; ssp && (ssp->ss_pid != pid); 4876 pssp = ssp, ssp = ssp->ss_next) 4877 ; 4878 4879 if (ss.ss_events) { 4880 if (ss.ss_events & 4881 ~(S_INPUT|S_HIPRI|S_MSG|S_HANGUP|S_ERROR| 4882 S_RDNORM|S_WRNORM|S_RDBAND|S_WRBAND|S_BANDURG)) { 4883 mutex_exit(&stp->sd_lock); 4884 mutex_enter(&pidlock); 4885 PID_RELE(pidp); 4886 mutex_exit(&pidlock); 4887 return (EINVAL); 4888 } 4889 if ((ss.ss_events & S_BANDURG) && 4890 !(ss.ss_events & S_RDBAND)) { 4891 mutex_exit(&stp->sd_lock); 4892 mutex_enter(&pidlock); 4893 PID_RELE(pidp); 4894 mutex_exit(&pidlock); 4895 return (EINVAL); 4896 } 4897 4898 /* 4899 * If proc not already registered, add it 4900 * to list. 4901 */ 4902 if (!ssp) { 4903 ssp = kmem_alloc(sizeof (strsig_t), KM_SLEEP); 4904 ssp->ss_pidp = pidp; 4905 ssp->ss_pid = pid; 4906 ssp->ss_next = NULL; 4907 if (pssp) 4908 pssp->ss_next = ssp; 4909 else 4910 stp->sd_siglist = ssp; 4911 mutex_enter(&pidlock); 4912 PID_HOLD(pidp); 4913 mutex_exit(&pidlock); 4914 } 4915 4916 /* 4917 * Set events. 4918 */ 4919 ssp->ss_events = ss.ss_events; 4920 } else { 4921 /* 4922 * Remove proc from register list. 4923 */ 4924 if (ssp) { 4925 mutex_enter(&pidlock); 4926 PID_RELE(pidp); 4927 mutex_exit(&pidlock); 4928 if (pssp) 4929 pssp->ss_next = ssp->ss_next; 4930 else 4931 stp->sd_siglist = ssp->ss_next; 4932 kmem_free(ssp, sizeof (strsig_t)); 4933 } else { 4934 mutex_exit(&stp->sd_lock); 4935 mutex_enter(&pidlock); 4936 PID_RELE(pidp); 4937 mutex_exit(&pidlock); 4938 return (EINVAL); 4939 } 4940 } 4941 4942 /* 4943 * Recalculate OR of sig events. 4944 */ 4945 stp->sd_sigflags = 0; 4946 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next) 4947 stp->sd_sigflags |= ssp->ss_events; 4948 mutex_exit(&stp->sd_lock); 4949 mutex_enter(&pidlock); 4950 PID_RELE(pidp); 4951 mutex_exit(&pidlock); 4952 return (0); 4953 } 4954 4955 case I_EGETSIG: 4956 /* 4957 * Return (in arg) the current registration of events 4958 * for which the calling proc is to be signaled. 4959 */ 4960 { 4961 struct strsig *ssp; 4962 struct proc *proc; 4963 pid_t pid; 4964 struct pid *pidp; 4965 struct strsigset ss; 4966 4967 error = strcopyin((void *)arg, &ss, sizeof (ss), copyflag); 4968 if (error) 4969 return (error); 4970 4971 pid = ss.ss_pid; 4972 mutex_enter(&pidlock); 4973 if (pid == 0) 4974 proc = curproc; 4975 else if (pid < 0) 4976 proc = pgfind(-pid); 4977 else 4978 proc = prfind(pid); 4979 if (proc == NULL) { 4980 mutex_exit(&pidlock); 4981 return (ESRCH); 4982 } 4983 if (pid < 0) 4984 pidp = proc->p_pgidp; 4985 else 4986 pidp = proc->p_pidp; 4987 4988 /* Prevent the pidp from being reassigned */ 4989 PID_HOLD(pidp); 4990 mutex_exit(&pidlock); 4991 4992 mutex_enter(&stp->sd_lock); 4993 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next) 4994 if (ssp->ss_pid == pid) { 4995 ss.ss_pid = ssp->ss_pid; 4996 ss.ss_events = ssp->ss_events; 4997 error = strcopyout(&ss, (void *)arg, 4998 sizeof (struct strsigset), copyflag); 4999 mutex_exit(&stp->sd_lock); 5000 mutex_enter(&pidlock); 5001 PID_RELE(pidp); 5002 mutex_exit(&pidlock); 5003 return (error); 5004 } 5005 mutex_exit(&stp->sd_lock); 5006 mutex_enter(&pidlock); 5007 PID_RELE(pidp); 5008 mutex_exit(&pidlock); 5009 return (EINVAL); 5010 } 5011 5012 case I_PEEK: 5013 { 5014 STRUCT_DECL(strpeek, strpeek); 5015 size_t n; 5016 mblk_t *fmp, *tmp_mp = NULL; 5017 5018 STRUCT_INIT(strpeek, flag); 5019 5020 error = strcopyin((void *)arg, STRUCT_BUF(strpeek), 5021 STRUCT_SIZE(strpeek), copyflag); 5022 if (error) 5023 return (error); 5024 5025 mutex_enter(QLOCK(rdq)); 5026 /* 5027 * Skip the invalid messages 5028 */ 5029 for (mp = rdq->q_first; mp != NULL; mp = mp->b_next) 5030 if (mp->b_datap->db_type != M_SIG) 5031 break; 5032 5033 /* 5034 * If user has requested to peek at a high priority message 5035 * and first message is not, return 0 5036 */ 5037 if (mp != NULL) { 5038 if ((STRUCT_FGET(strpeek, flags) & RS_HIPRI) && 5039 queclass(mp) == QNORM) { 5040 *rvalp = 0; 5041 mutex_exit(QLOCK(rdq)); 5042 return (0); 5043 } 5044 } else if (stp->sd_struiordq == NULL || 5045 (STRUCT_FGET(strpeek, flags) & RS_HIPRI)) { 5046 /* 5047 * No mblks to look at at the streamhead and 5048 * 1). This isn't a synch stream or 5049 * 2). This is a synch stream but caller wants high 5050 * priority messages which is not supported by 5051 * the synch stream. (it only supports QNORM) 5052 */ 5053 *rvalp = 0; 5054 mutex_exit(QLOCK(rdq)); 5055 return (0); 5056 } 5057 5058 fmp = mp; 5059 5060 if (mp && mp->b_datap->db_type == M_PASSFP) { 5061 mutex_exit(QLOCK(rdq)); 5062 return (EBADMSG); 5063 } 5064 5065 ASSERT(mp == NULL || mp->b_datap->db_type == M_PCPROTO || 5066 mp->b_datap->db_type == M_PROTO || 5067 mp->b_datap->db_type == M_DATA); 5068 5069 if (mp && mp->b_datap->db_type == M_PCPROTO) { 5070 STRUCT_FSET(strpeek, flags, RS_HIPRI); 5071 } else { 5072 STRUCT_FSET(strpeek, flags, 0); 5073 } 5074 5075 5076 if (mp && ((tmp_mp = dupmsg(mp)) == NULL)) { 5077 mutex_exit(QLOCK(rdq)); 5078 return (ENOSR); 5079 } 5080 mutex_exit(QLOCK(rdq)); 5081 5082 /* 5083 * set mp = tmp_mp, so that I_PEEK processing can continue. 5084 * tmp_mp is used to free the dup'd message. 5085 */ 5086 mp = tmp_mp; 5087 5088 uio.uio_fmode = 0; 5089 uio.uio_extflg = UIO_COPY_CACHED; 5090 uio.uio_segflg = (copyflag == U_TO_K) ? UIO_USERSPACE : 5091 UIO_SYSSPACE; 5092 uio.uio_limit = 0; 5093 /* 5094 * First process PROTO blocks, if any. 5095 * If user doesn't want to get ctl info by setting maxlen <= 0, 5096 * then set len to -1/0 and skip control blocks part. 5097 */ 5098 if (STRUCT_FGET(strpeek, ctlbuf.maxlen) < 0) 5099 STRUCT_FSET(strpeek, ctlbuf.len, -1); 5100 else if (STRUCT_FGET(strpeek, ctlbuf.maxlen) == 0) 5101 STRUCT_FSET(strpeek, ctlbuf.len, 0); 5102 else { 5103 int ctl_part = 0; 5104 5105 iov.iov_base = STRUCT_FGETP(strpeek, ctlbuf.buf); 5106 iov.iov_len = STRUCT_FGET(strpeek, ctlbuf.maxlen); 5107 uio.uio_iov = &iov; 5108 uio.uio_resid = iov.iov_len; 5109 uio.uio_loffset = 0; 5110 uio.uio_iovcnt = 1; 5111 while (mp && mp->b_datap->db_type != M_DATA && 5112 uio.uio_resid >= 0) { 5113 ASSERT(STRUCT_FGET(strpeek, flags) == 0 ? 5114 mp->b_datap->db_type == M_PROTO : 5115 mp->b_datap->db_type == M_PCPROTO); 5116 5117 if ((n = MIN(uio.uio_resid, 5118 mp->b_wptr - mp->b_rptr)) != 0 && 5119 (error = uiomove((char *)mp->b_rptr, n, 5120 UIO_READ, &uio)) != 0) { 5121 freemsg(tmp_mp); 5122 return (error); 5123 } 5124 ctl_part = 1; 5125 mp = mp->b_cont; 5126 } 5127 /* No ctl message */ 5128 if (ctl_part == 0) 5129 STRUCT_FSET(strpeek, ctlbuf.len, -1); 5130 else 5131 STRUCT_FSET(strpeek, ctlbuf.len, 5132 STRUCT_FGET(strpeek, ctlbuf.maxlen) - 5133 uio.uio_resid); 5134 } 5135 5136 /* 5137 * Now process DATA blocks, if any. 5138 * If user doesn't want to get data info by setting maxlen <= 0, 5139 * then set len to -1/0 and skip data blocks part. 5140 */ 5141 if (STRUCT_FGET(strpeek, databuf.maxlen) < 0) 5142 STRUCT_FSET(strpeek, databuf.len, -1); 5143 else if (STRUCT_FGET(strpeek, databuf.maxlen) == 0) 5144 STRUCT_FSET(strpeek, databuf.len, 0); 5145 else { 5146 int data_part = 0; 5147 5148 iov.iov_base = STRUCT_FGETP(strpeek, databuf.buf); 5149 iov.iov_len = STRUCT_FGET(strpeek, databuf.maxlen); 5150 uio.uio_iov = &iov; 5151 uio.uio_resid = iov.iov_len; 5152 uio.uio_loffset = 0; 5153 uio.uio_iovcnt = 1; 5154 while (mp && uio.uio_resid) { 5155 if (mp->b_datap->db_type == M_DATA) { 5156 if ((n = MIN(uio.uio_resid, 5157 mp->b_wptr - mp->b_rptr)) != 0 && 5158 (error = uiomove((char *)mp->b_rptr, 5159 n, UIO_READ, &uio)) != 0) { 5160 freemsg(tmp_mp); 5161 return (error); 5162 } 5163 data_part = 1; 5164 } 5165 ASSERT(data_part == 0 || 5166 mp->b_datap->db_type == M_DATA); 5167 mp = mp->b_cont; 5168 } 5169 /* No data message */ 5170 if (data_part == 0) 5171 STRUCT_FSET(strpeek, databuf.len, -1); 5172 else 5173 STRUCT_FSET(strpeek, databuf.len, 5174 STRUCT_FGET(strpeek, databuf.maxlen) - 5175 uio.uio_resid); 5176 } 5177 freemsg(tmp_mp); 5178 5179 /* 5180 * It is a synch stream and user wants to get 5181 * data (maxlen > 0). 5182 * uio setup is done by the codes that process DATA 5183 * blocks above. 5184 */ 5185 if ((fmp == NULL) && STRUCT_FGET(strpeek, databuf.maxlen) > 0) { 5186 infod_t infod; 5187 5188 infod.d_cmd = INFOD_COPYOUT; 5189 infod.d_res = 0; 5190 infod.d_uiop = &uio; 5191 error = infonext(rdq, &infod); 5192 if (error == EINVAL || error == EBUSY) 5193 error = 0; 5194 if (error) 5195 return (error); 5196 STRUCT_FSET(strpeek, databuf.len, STRUCT_FGET(strpeek, 5197 databuf.maxlen) - uio.uio_resid); 5198 if (STRUCT_FGET(strpeek, databuf.len) == 0) { 5199 /* 5200 * No data found by the infonext(). 5201 */ 5202 STRUCT_FSET(strpeek, databuf.len, -1); 5203 } 5204 } 5205 error = strcopyout(STRUCT_BUF(strpeek), (void *)arg, 5206 STRUCT_SIZE(strpeek), copyflag); 5207 if (error) { 5208 return (error); 5209 } 5210 /* 5211 * If there is no message retrieved, set return code to 0 5212 * otherwise, set it to 1. 5213 */ 5214 if (STRUCT_FGET(strpeek, ctlbuf.len) == -1 && 5215 STRUCT_FGET(strpeek, databuf.len) == -1) 5216 *rvalp = 0; 5217 else 5218 *rvalp = 1; 5219 return (0); 5220 } 5221 5222 case I_FDINSERT: 5223 { 5224 STRUCT_DECL(strfdinsert, strfdinsert); 5225 struct file *resftp; 5226 struct stdata *resstp; 5227 t_uscalar_t ival; 5228 ssize_t msgsize; 5229 struct strbuf mctl; 5230 5231 STRUCT_INIT(strfdinsert, flag); 5232 if (stp->sd_flag & STRHUP) 5233 return (ENXIO); 5234 /* 5235 * STRDERR, STWRERR and STPLEX tested above. 5236 */ 5237 error = strcopyin((void *)arg, STRUCT_BUF(strfdinsert), 5238 STRUCT_SIZE(strfdinsert), copyflag); 5239 if (error) 5240 return (error); 5241 5242 if (STRUCT_FGET(strfdinsert, offset) < 0 || 5243 (STRUCT_FGET(strfdinsert, offset) % 5244 sizeof (t_uscalar_t)) != 0) 5245 return (EINVAL); 5246 if ((resftp = getf(STRUCT_FGET(strfdinsert, fildes))) != NULL) { 5247 if ((resstp = resftp->f_vnode->v_stream) == NULL) { 5248 releasef(STRUCT_FGET(strfdinsert, fildes)); 5249 return (EINVAL); 5250 } 5251 } else 5252 return (EINVAL); 5253 5254 mutex_enter(&resstp->sd_lock); 5255 if (resstp->sd_flag & (STRDERR|STWRERR|STRHUP|STPLEX)) { 5256 error = strgeterr(resstp, 5257 STRDERR|STWRERR|STRHUP|STPLEX, 0); 5258 if (error != 0) { 5259 mutex_exit(&resstp->sd_lock); 5260 releasef(STRUCT_FGET(strfdinsert, fildes)); 5261 return (error); 5262 } 5263 } 5264 mutex_exit(&resstp->sd_lock); 5265 5266 #ifdef _ILP32 5267 { 5268 queue_t *q; 5269 queue_t *mate = NULL; 5270 5271 /* get read queue of stream terminus */ 5272 claimstr(resstp->sd_wrq); 5273 for (q = resstp->sd_wrq->q_next; q->q_next != NULL; 5274 q = q->q_next) 5275 if (!STRMATED(resstp) && STREAM(q) != resstp && 5276 mate == NULL) { 5277 ASSERT(q->q_qinfo->qi_srvp); 5278 ASSERT(_OTHERQ(q)->q_qinfo->qi_srvp); 5279 claimstr(q); 5280 mate = q; 5281 } 5282 q = _RD(q); 5283 if (mate) 5284 releasestr(mate); 5285 releasestr(resstp->sd_wrq); 5286 ival = (t_uscalar_t)q; 5287 } 5288 #else 5289 ival = (t_uscalar_t)getminor(resftp->f_vnode->v_rdev); 5290 #endif /* _ILP32 */ 5291 5292 if (STRUCT_FGET(strfdinsert, ctlbuf.len) < 5293 STRUCT_FGET(strfdinsert, offset) + sizeof (t_uscalar_t)) { 5294 releasef(STRUCT_FGET(strfdinsert, fildes)); 5295 return (EINVAL); 5296 } 5297 5298 /* 5299 * Check for legal flag value. 5300 */ 5301 if (STRUCT_FGET(strfdinsert, flags) & ~RS_HIPRI) { 5302 releasef(STRUCT_FGET(strfdinsert, fildes)); 5303 return (EINVAL); 5304 } 5305 5306 /* get these values from those cached in the stream head */ 5307 mutex_enter(QLOCK(stp->sd_wrq)); 5308 rmin = stp->sd_qn_minpsz; 5309 rmax = stp->sd_qn_maxpsz; 5310 mutex_exit(QLOCK(stp->sd_wrq)); 5311 5312 /* 5313 * Make sure ctl and data sizes together fall within 5314 * the limits of the max and min receive packet sizes 5315 * and do not exceed system limit. A negative data 5316 * length means that no data part is to be sent. 5317 */ 5318 ASSERT((rmax >= 0) || (rmax == INFPSZ)); 5319 if (rmax == 0) { 5320 releasef(STRUCT_FGET(strfdinsert, fildes)); 5321 return (ERANGE); 5322 } 5323 if ((msgsize = STRUCT_FGET(strfdinsert, databuf.len)) < 0) 5324 msgsize = 0; 5325 if ((msgsize < rmin) || 5326 ((msgsize > rmax) && (rmax != INFPSZ)) || 5327 (STRUCT_FGET(strfdinsert, ctlbuf.len) > strctlsz)) { 5328 releasef(STRUCT_FGET(strfdinsert, fildes)); 5329 return (ERANGE); 5330 } 5331 5332 mutex_enter(&stp->sd_lock); 5333 while (!(STRUCT_FGET(strfdinsert, flags) & RS_HIPRI) && 5334 !canputnext(stp->sd_wrq)) { 5335 if ((error = strwaitq(stp, WRITEWAIT, (ssize_t)0, 5336 flag, -1, &done)) != 0 || done) { 5337 mutex_exit(&stp->sd_lock); 5338 releasef(STRUCT_FGET(strfdinsert, fildes)); 5339 return (error); 5340 } 5341 if ((error = i_straccess(stp, access)) != 0) { 5342 mutex_exit(&stp->sd_lock); 5343 releasef( 5344 STRUCT_FGET(strfdinsert, fildes)); 5345 return (error); 5346 } 5347 } 5348 mutex_exit(&stp->sd_lock); 5349 5350 /* 5351 * Copy strfdinsert.ctlbuf into native form of 5352 * ctlbuf to pass down into strmakemsg(). 5353 */ 5354 mctl.maxlen = STRUCT_FGET(strfdinsert, ctlbuf.maxlen); 5355 mctl.len = STRUCT_FGET(strfdinsert, ctlbuf.len); 5356 mctl.buf = STRUCT_FGETP(strfdinsert, ctlbuf.buf); 5357 5358 iov.iov_base = STRUCT_FGETP(strfdinsert, databuf.buf); 5359 iov.iov_len = STRUCT_FGET(strfdinsert, databuf.len); 5360 uio.uio_iov = &iov; 5361 uio.uio_iovcnt = 1; 5362 uio.uio_loffset = 0; 5363 uio.uio_segflg = (copyflag == U_TO_K) ? UIO_USERSPACE : 5364 UIO_SYSSPACE; 5365 uio.uio_fmode = 0; 5366 uio.uio_extflg = UIO_COPY_CACHED; 5367 uio.uio_resid = iov.iov_len; 5368 if ((error = strmakemsg(&mctl, 5369 &msgsize, &uio, stp, 5370 STRUCT_FGET(strfdinsert, flags), &mp)) != 0 || !mp) { 5371 STRUCT_FSET(strfdinsert, databuf.len, msgsize); 5372 releasef(STRUCT_FGET(strfdinsert, fildes)); 5373 return (error); 5374 } 5375 5376 STRUCT_FSET(strfdinsert, databuf.len, msgsize); 5377 5378 /* 5379 * Place the possibly reencoded queue pointer 'offset' bytes 5380 * from the start of the control portion of the message. 5381 */ 5382 *((t_uscalar_t *)(mp->b_rptr + 5383 STRUCT_FGET(strfdinsert, offset))) = ival; 5384 5385 /* 5386 * Put message downstream. 5387 */ 5388 stream_willservice(stp); 5389 putnext(stp->sd_wrq, mp); 5390 stream_runservice(stp); 5391 releasef(STRUCT_FGET(strfdinsert, fildes)); 5392 return (error); 5393 } 5394 5395 case I_SENDFD: 5396 { 5397 struct file *fp; 5398 5399 if ((fp = getf((int)arg)) == NULL) 5400 return (EBADF); 5401 error = do_sendfp(stp, fp, crp); 5402 if (audit_active) { 5403 audit_fdsend((int)arg, fp, error); 5404 } 5405 releasef((int)arg); 5406 return (error); 5407 } 5408 5409 case I_RECVFD: 5410 case I_E_RECVFD: 5411 { 5412 struct k_strrecvfd *srf; 5413 int i, fd; 5414 5415 mutex_enter(&stp->sd_lock); 5416 while (!(mp = getq(rdq))) { 5417 if (stp->sd_flag & (STRHUP|STREOF)) { 5418 mutex_exit(&stp->sd_lock); 5419 return (ENXIO); 5420 } 5421 if ((error = strwaitq(stp, GETWAIT, (ssize_t)0, 5422 flag, -1, &done)) != 0 || done) { 5423 mutex_exit(&stp->sd_lock); 5424 return (error); 5425 } 5426 if ((error = i_straccess(stp, access)) != 0) { 5427 mutex_exit(&stp->sd_lock); 5428 return (error); 5429 } 5430 } 5431 if (mp->b_datap->db_type != M_PASSFP) { 5432 putback(stp, rdq, mp, mp->b_band); 5433 mutex_exit(&stp->sd_lock); 5434 return (EBADMSG); 5435 } 5436 mutex_exit(&stp->sd_lock); 5437 5438 srf = (struct k_strrecvfd *)mp->b_rptr; 5439 if ((fd = ufalloc(0)) == -1) { 5440 mutex_enter(&stp->sd_lock); 5441 putback(stp, rdq, mp, mp->b_band); 5442 mutex_exit(&stp->sd_lock); 5443 return (EMFILE); 5444 } 5445 if (cmd == I_RECVFD) { 5446 struct o_strrecvfd ostrfd; 5447 5448 /* check to see if uid/gid values are too large. */ 5449 5450 if (srf->uid > (o_uid_t)USHRT_MAX || 5451 srf->gid > (o_gid_t)USHRT_MAX) { 5452 mutex_enter(&stp->sd_lock); 5453 putback(stp, rdq, mp, mp->b_band); 5454 mutex_exit(&stp->sd_lock); 5455 setf(fd, NULL); /* release fd entry */ 5456 return (EOVERFLOW); 5457 } 5458 5459 ostrfd.fd = fd; 5460 ostrfd.uid = (o_uid_t)srf->uid; 5461 ostrfd.gid = (o_gid_t)srf->gid; 5462 5463 /* Null the filler bits */ 5464 for (i = 0; i < 8; i++) 5465 ostrfd.fill[i] = 0; 5466 5467 error = strcopyout(&ostrfd, (void *)arg, 5468 sizeof (struct o_strrecvfd), copyflag); 5469 } else { /* I_E_RECVFD */ 5470 struct strrecvfd strfd; 5471 5472 strfd.fd = fd; 5473 strfd.uid = srf->uid; 5474 strfd.gid = srf->gid; 5475 5476 /* null the filler bits */ 5477 for (i = 0; i < 8; i++) 5478 strfd.fill[i] = 0; 5479 5480 error = strcopyout(&strfd, (void *)arg, 5481 sizeof (struct strrecvfd), copyflag); 5482 } 5483 5484 if (error) { 5485 setf(fd, NULL); /* release fd entry */ 5486 mutex_enter(&stp->sd_lock); 5487 putback(stp, rdq, mp, mp->b_band); 5488 mutex_exit(&stp->sd_lock); 5489 return (error); 5490 } 5491 if (audit_active) { 5492 audit_fdrecv(fd, srf->fp); 5493 } 5494 5495 /* 5496 * Always increment f_count since the freemsg() below will 5497 * always call free_passfp() which performs a closef(). 5498 */ 5499 mutex_enter(&srf->fp->f_tlock); 5500 srf->fp->f_count++; 5501 mutex_exit(&srf->fp->f_tlock); 5502 setf(fd, srf->fp); 5503 freemsg(mp); 5504 return (0); 5505 } 5506 5507 case I_SWROPT: 5508 /* 5509 * Set/clear the write options. arg is a bit 5510 * mask with any of the following bits set... 5511 * SNDZERO - send zero length message 5512 * SNDPIPE - send sigpipe to process if 5513 * sd_werror is set and process is 5514 * doing a write or putmsg. 5515 * The new stream head write options should reflect 5516 * what is in arg. 5517 */ 5518 if (arg & ~(SNDZERO|SNDPIPE)) 5519 return (EINVAL); 5520 5521 mutex_enter(&stp->sd_lock); 5522 stp->sd_wput_opt &= ~(SW_SIGPIPE|SW_SNDZERO); 5523 if (arg & SNDZERO) 5524 stp->sd_wput_opt |= SW_SNDZERO; 5525 if (arg & SNDPIPE) 5526 stp->sd_wput_opt |= SW_SIGPIPE; 5527 mutex_exit(&stp->sd_lock); 5528 return (0); 5529 5530 case I_GWROPT: 5531 { 5532 int wropt = 0; 5533 5534 if (stp->sd_wput_opt & SW_SNDZERO) 5535 wropt |= SNDZERO; 5536 if (stp->sd_wput_opt & SW_SIGPIPE) 5537 wropt |= SNDPIPE; 5538 return (strcopyout(&wropt, (void *)arg, sizeof (wropt), 5539 copyflag)); 5540 } 5541 5542 case I_LIST: 5543 /* 5544 * Returns all the modules found on this stream, 5545 * upto the driver. If argument is NULL, return the 5546 * number of modules (including driver). If argument 5547 * is not NULL, copy the names into the structure 5548 * provided. 5549 */ 5550 5551 { 5552 queue_t *q; 5553 int num_modules, space_allocated; 5554 STRUCT_DECL(str_list, strlist); 5555 struct str_mlist *mlist_ptr; 5556 5557 if (arg == NULL) { /* Return number of modules plus driver */ 5558 q = stp->sd_wrq; 5559 if (stp->sd_vnode->v_type == VFIFO) { 5560 *rvalp = stp->sd_pushcnt; 5561 } else { 5562 *rvalp = stp->sd_pushcnt + 1; 5563 } 5564 } else { 5565 STRUCT_INIT(strlist, flag); 5566 5567 error = strcopyin((void *)arg, STRUCT_BUF(strlist), 5568 STRUCT_SIZE(strlist), copyflag); 5569 if (error) 5570 return (error); 5571 5572 space_allocated = STRUCT_FGET(strlist, sl_nmods); 5573 if ((space_allocated) <= 0) 5574 return (EINVAL); 5575 claimstr(stp->sd_wrq); 5576 q = stp->sd_wrq; 5577 num_modules = 0; 5578 while (_SAMESTR(q) && (space_allocated != 0)) { 5579 char *name = 5580 q->q_next->q_qinfo->qi_minfo->mi_idname; 5581 5582 mlist_ptr = STRUCT_FGETP(strlist, sl_modlist); 5583 5584 error = strcopyout(name, mlist_ptr, 5585 strlen(name) + 1, copyflag); 5586 5587 if (error) { 5588 releasestr(stp->sd_wrq); 5589 return (error); 5590 } 5591 q = q->q_next; 5592 space_allocated--; 5593 num_modules++; 5594 mlist_ptr = 5595 (struct str_mlist *)((uintptr_t)mlist_ptr + 5596 sizeof (struct str_mlist)); 5597 STRUCT_FSETP(strlist, sl_modlist, mlist_ptr); 5598 } 5599 releasestr(stp->sd_wrq); 5600 error = strcopyout(&num_modules, (void *)arg, 5601 sizeof (int), copyflag); 5602 } 5603 return (error); 5604 } 5605 5606 case I_CKBAND: 5607 { 5608 queue_t *q; 5609 qband_t *qbp; 5610 5611 if ((arg < 0) || (arg >= NBAND)) 5612 return (EINVAL); 5613 q = _RD(stp->sd_wrq); 5614 mutex_enter(QLOCK(q)); 5615 if (arg > (int)q->q_nband) { 5616 *rvalp = 0; 5617 } else { 5618 if (arg == 0) { 5619 if (q->q_first) 5620 *rvalp = 1; 5621 else 5622 *rvalp = 0; 5623 } else { 5624 qbp = q->q_bandp; 5625 while (--arg > 0) 5626 qbp = qbp->qb_next; 5627 if (qbp->qb_first) 5628 *rvalp = 1; 5629 else 5630 *rvalp = 0; 5631 } 5632 } 5633 mutex_exit(QLOCK(q)); 5634 return (0); 5635 } 5636 5637 case I_GETBAND: 5638 { 5639 int intpri; 5640 queue_t *q; 5641 5642 q = _RD(stp->sd_wrq); 5643 mutex_enter(QLOCK(q)); 5644 mp = q->q_first; 5645 if (!mp) { 5646 mutex_exit(QLOCK(q)); 5647 return (ENODATA); 5648 } 5649 intpri = (int)mp->b_band; 5650 error = strcopyout(&intpri, (void *)arg, sizeof (int), 5651 copyflag); 5652 mutex_exit(QLOCK(q)); 5653 return (error); 5654 } 5655 5656 case I_ATMARK: 5657 { 5658 queue_t *q; 5659 5660 if (arg & ~(ANYMARK|LASTMARK)) 5661 return (EINVAL); 5662 q = _RD(stp->sd_wrq); 5663 mutex_enter(&stp->sd_lock); 5664 if ((stp->sd_flag & STRATMARK) && (arg == ANYMARK)) { 5665 *rvalp = 1; 5666 } else { 5667 mutex_enter(QLOCK(q)); 5668 mp = q->q_first; 5669 5670 if (mp == NULL) 5671 *rvalp = 0; 5672 else if ((arg == ANYMARK) && (mp->b_flag & MSGMARK)) 5673 *rvalp = 1; 5674 else if ((arg == LASTMARK) && (mp == stp->sd_mark)) 5675 *rvalp = 1; 5676 else 5677 *rvalp = 0; 5678 mutex_exit(QLOCK(q)); 5679 } 5680 mutex_exit(&stp->sd_lock); 5681 return (0); 5682 } 5683 5684 case I_CANPUT: 5685 { 5686 char band; 5687 5688 if ((arg < 0) || (arg >= NBAND)) 5689 return (EINVAL); 5690 band = (char)arg; 5691 *rvalp = bcanputnext(stp->sd_wrq, band); 5692 return (0); 5693 } 5694 5695 case I_SETCLTIME: 5696 { 5697 int closetime; 5698 5699 error = strcopyin((void *)arg, &closetime, sizeof (int), 5700 copyflag); 5701 if (error) 5702 return (error); 5703 if (closetime < 0) 5704 return (EINVAL); 5705 5706 stp->sd_closetime = closetime; 5707 return (0); 5708 } 5709 5710 case I_GETCLTIME: 5711 { 5712 int closetime; 5713 5714 closetime = stp->sd_closetime; 5715 return (strcopyout(&closetime, (void *)arg, sizeof (int), 5716 copyflag)); 5717 } 5718 5719 case TIOCGSID: 5720 { 5721 pid_t sid; 5722 5723 mutex_enter(&stp->sd_lock); 5724 if (stp->sd_sidp == NULL) { 5725 mutex_exit(&stp->sd_lock); 5726 return (ENOTTY); 5727 } 5728 sid = stp->sd_sidp->pid_id; 5729 mutex_exit(&stp->sd_lock); 5730 return (strcopyout(&sid, (void *)arg, sizeof (pid_t), 5731 copyflag)); 5732 } 5733 5734 case TIOCSPGRP: 5735 { 5736 pid_t pgrp; 5737 proc_t *q; 5738 pid_t sid, fg_pgid, bg_pgid; 5739 5740 if (error = strcopyin((void *)arg, &pgrp, sizeof (pid_t), 5741 copyflag)) 5742 return (error); 5743 mutex_enter(&stp->sd_lock); 5744 mutex_enter(&pidlock); 5745 if (stp->sd_sidp != ttoproc(curthread)->p_sessp->s_sidp) { 5746 mutex_exit(&pidlock); 5747 mutex_exit(&stp->sd_lock); 5748 return (ENOTTY); 5749 } 5750 if (pgrp == stp->sd_pgidp->pid_id) { 5751 mutex_exit(&pidlock); 5752 mutex_exit(&stp->sd_lock); 5753 return (0); 5754 } 5755 if (pgrp <= 0 || pgrp >= maxpid) { 5756 mutex_exit(&pidlock); 5757 mutex_exit(&stp->sd_lock); 5758 return (EINVAL); 5759 } 5760 if ((q = pgfind(pgrp)) == NULL || 5761 q->p_sessp != ttoproc(curthread)->p_sessp) { 5762 mutex_exit(&pidlock); 5763 mutex_exit(&stp->sd_lock); 5764 return (EPERM); 5765 } 5766 sid = stp->sd_sidp->pid_id; 5767 fg_pgid = q->p_pgrp; 5768 bg_pgid = stp->sd_pgidp->pid_id; 5769 CL_SET_PROCESS_GROUP(curthread, sid, bg_pgid, fg_pgid); 5770 PID_RELE(stp->sd_pgidp); 5771 ctty_clear_sighuped(); 5772 stp->sd_pgidp = q->p_pgidp; 5773 PID_HOLD(stp->sd_pgidp); 5774 mutex_exit(&pidlock); 5775 mutex_exit(&stp->sd_lock); 5776 return (0); 5777 } 5778 5779 case TIOCGPGRP: 5780 { 5781 pid_t pgrp; 5782 5783 mutex_enter(&stp->sd_lock); 5784 if (stp->sd_sidp == NULL) { 5785 mutex_exit(&stp->sd_lock); 5786 return (ENOTTY); 5787 } 5788 pgrp = stp->sd_pgidp->pid_id; 5789 mutex_exit(&stp->sd_lock); 5790 return (strcopyout(&pgrp, (void *)arg, sizeof (pid_t), 5791 copyflag)); 5792 } 5793 5794 case TIOCSCTTY: 5795 { 5796 return (strctty(stp)); 5797 } 5798 5799 case TIOCNOTTY: 5800 { 5801 /* freectty() always assumes curproc. */ 5802 if (freectty(B_FALSE) != 0) 5803 return (0); 5804 return (ENOTTY); 5805 } 5806 5807 case FIONBIO: 5808 case FIOASYNC: 5809 return (0); /* handled by the upper layer */ 5810 } 5811 } 5812 5813 /* 5814 * Custom free routine used for M_PASSFP messages. 5815 */ 5816 static void 5817 free_passfp(struct k_strrecvfd *srf) 5818 { 5819 (void) closef(srf->fp); 5820 kmem_free(srf, sizeof (struct k_strrecvfd) + sizeof (frtn_t)); 5821 } 5822 5823 /* ARGSUSED */ 5824 int 5825 do_sendfp(struct stdata *stp, struct file *fp, struct cred *cr) 5826 { 5827 queue_t *qp, *nextqp; 5828 struct k_strrecvfd *srf; 5829 mblk_t *mp; 5830 frtn_t *frtnp; 5831 size_t bufsize; 5832 queue_t *mate = NULL; 5833 syncq_t *sq = NULL; 5834 int retval = 0; 5835 5836 if (stp->sd_flag & STRHUP) 5837 return (ENXIO); 5838 5839 claimstr(stp->sd_wrq); 5840 5841 /* Fastpath, we have a pipe, and we are already mated, use it. */ 5842 if (STRMATED(stp)) { 5843 qp = _RD(stp->sd_mate->sd_wrq); 5844 claimstr(qp); 5845 mate = qp; 5846 } else { /* Not already mated. */ 5847 5848 /* 5849 * Walk the stream to the end of this one. 5850 * assumes that the claimstr() will prevent 5851 * plumbing between the stream head and the 5852 * driver from changing 5853 */ 5854 qp = stp->sd_wrq; 5855 5856 /* 5857 * Loop until we reach the end of this stream. 5858 * On completion, qp points to the write queue 5859 * at the end of the stream, or the read queue 5860 * at the stream head if this is a fifo. 5861 */ 5862 while (((qp = qp->q_next) != NULL) && _SAMESTR(qp)) 5863 ; 5864 5865 /* 5866 * Just in case we get a q_next which is NULL, but 5867 * not at the end of the stream. This is actually 5868 * broken, so we set an assert to catch it in 5869 * debug, and set an error and return if not debug. 5870 */ 5871 ASSERT(qp); 5872 if (qp == NULL) { 5873 releasestr(stp->sd_wrq); 5874 return (EINVAL); 5875 } 5876 5877 /* 5878 * Enter the syncq for the driver, so (hopefully) 5879 * the queue values will not change on us. 5880 * XXXX - This will only prevent the race IFF only 5881 * the write side modifies the q_next member, and 5882 * the put procedure is protected by at least 5883 * MT_PERQ. 5884 */ 5885 if ((sq = qp->q_syncq) != NULL) 5886 entersq(sq, SQ_PUT); 5887 5888 /* Now get the q_next value from this qp. */ 5889 nextqp = qp->q_next; 5890 5891 /* 5892 * If nextqp exists and the other stream is different 5893 * from this one claim the stream, set the mate, and 5894 * get the read queue at the stream head of the other 5895 * stream. Assumes that nextqp was at least valid when 5896 * we got it. Hopefully the entersq of the driver 5897 * will prevent it from changing on us. 5898 */ 5899 if ((nextqp != NULL) && (STREAM(nextqp) != stp)) { 5900 ASSERT(qp->q_qinfo->qi_srvp); 5901 ASSERT(_OTHERQ(qp)->q_qinfo->qi_srvp); 5902 ASSERT(_OTHERQ(qp->q_next)->q_qinfo->qi_srvp); 5903 claimstr(nextqp); 5904 5905 /* Make sure we still have a q_next */ 5906 if (nextqp != qp->q_next) { 5907 releasestr(stp->sd_wrq); 5908 releasestr(nextqp); 5909 return (EINVAL); 5910 } 5911 5912 qp = _RD(STREAM(nextqp)->sd_wrq); 5913 mate = qp; 5914 } 5915 /* If we entered the synq above, leave it. */ 5916 if (sq != NULL) 5917 leavesq(sq, SQ_PUT); 5918 } /* STRMATED(STP) */ 5919 5920 /* XXX prevents substitution of the ops vector */ 5921 if (qp->q_qinfo != &strdata && qp->q_qinfo != &fifo_strdata) { 5922 retval = EINVAL; 5923 goto out; 5924 } 5925 5926 if (qp->q_flag & QFULL) { 5927 retval = EAGAIN; 5928 goto out; 5929 } 5930 5931 /* 5932 * Since M_PASSFP messages include a file descriptor, we use 5933 * esballoc() and specify a custom free routine (free_passfp()) that 5934 * will close the descriptor as part of freeing the message. For 5935 * convenience, we stash the frtn_t right after the data block. 5936 */ 5937 bufsize = sizeof (struct k_strrecvfd) + sizeof (frtn_t); 5938 srf = kmem_alloc(bufsize, KM_NOSLEEP); 5939 if (srf == NULL) { 5940 retval = EAGAIN; 5941 goto out; 5942 } 5943 5944 frtnp = (frtn_t *)(srf + 1); 5945 frtnp->free_arg = (caddr_t)srf; 5946 frtnp->free_func = free_passfp; 5947 5948 mp = esballoc((uchar_t *)srf, bufsize, BPRI_MED, frtnp); 5949 if (mp == NULL) { 5950 kmem_free(srf, bufsize); 5951 retval = EAGAIN; 5952 goto out; 5953 } 5954 mp->b_wptr += sizeof (struct k_strrecvfd); 5955 mp->b_datap->db_type = M_PASSFP; 5956 5957 srf->fp = fp; 5958 srf->uid = crgetuid(curthread->t_cred); 5959 srf->gid = crgetgid(curthread->t_cred); 5960 mutex_enter(&fp->f_tlock); 5961 fp->f_count++; 5962 mutex_exit(&fp->f_tlock); 5963 5964 put(qp, mp); 5965 out: 5966 releasestr(stp->sd_wrq); 5967 if (mate) 5968 releasestr(mate); 5969 return (retval); 5970 } 5971 5972 /* 5973 * Send an ioctl message downstream and wait for acknowledgement. 5974 * flags may be set to either U_TO_K or K_TO_K and a combination 5975 * of STR_NOERROR or STR_NOSIG 5976 * STR_NOSIG: Signals are essentially ignored or held and have 5977 * no effect for the duration of the call. 5978 * STR_NOERROR: Ignores stream head read, write and hup errors. 5979 * Additionally, if an existing ioctl times out, it is assumed 5980 * lost and and this ioctl will continue as if the previous ioctl had 5981 * finished. ETIME may be returned if this ioctl times out (i.e. 5982 * ic_timout is not INFTIM). Non-stream head errors may be returned if 5983 * the ioc_error indicates that the driver/module had problems, 5984 * an EFAULT was found when accessing user data, a lack of 5985 * resources, etc. 5986 */ 5987 int 5988 strdoioctl( 5989 struct stdata *stp, 5990 struct strioctl *strioc, 5991 int fflags, /* file flags with model info */ 5992 int flag, 5993 cred_t *crp, 5994 int *rvalp) 5995 { 5996 mblk_t *bp; 5997 struct iocblk *iocbp; 5998 struct copyreq *reqp; 5999 struct copyresp *resp; 6000 int id; 6001 int transparent = 0; 6002 int error = 0; 6003 int len = 0; 6004 caddr_t taddr; 6005 int copyflag = (flag & (U_TO_K | K_TO_K)); 6006 int sigflag = (flag & STR_NOSIG); 6007 int errs; 6008 uint_t waitflags; 6009 6010 ASSERT(copyflag == U_TO_K || copyflag == K_TO_K); 6011 ASSERT((fflags & FMODELS) != 0); 6012 6013 TRACE_2(TR_FAC_STREAMS_FR, 6014 TR_STRDOIOCTL, 6015 "strdoioctl:stp %p strioc %p", stp, strioc); 6016 if (strioc->ic_len == TRANSPARENT) { /* send arg in M_DATA block */ 6017 transparent = 1; 6018 strioc->ic_len = sizeof (intptr_t); 6019 } 6020 6021 if (strioc->ic_len < 0 || (strmsgsz > 0 && strioc->ic_len > strmsgsz)) 6022 return (EINVAL); 6023 6024 if ((bp = allocb_cred_wait(sizeof (union ioctypes), sigflag, &error, 6025 crp)) == NULL) 6026 return (error); 6027 6028 bzero(bp->b_wptr, sizeof (union ioctypes)); 6029 6030 iocbp = (struct iocblk *)bp->b_wptr; 6031 iocbp->ioc_count = strioc->ic_len; 6032 iocbp->ioc_cmd = strioc->ic_cmd; 6033 iocbp->ioc_flag = (fflags & FMODELS); 6034 6035 crhold(crp); 6036 iocbp->ioc_cr = crp; 6037 DB_TYPE(bp) = M_IOCTL; 6038 DB_CPID(bp) = curproc->p_pid; 6039 bp->b_wptr += sizeof (struct iocblk); 6040 6041 if (flag & STR_NOERROR) 6042 errs = STPLEX; 6043 else 6044 errs = STRHUP|STRDERR|STWRERR|STPLEX; 6045 6046 /* 6047 * If there is data to copy into ioctl block, do so. 6048 */ 6049 if (iocbp->ioc_count > 0) { 6050 if (transparent) 6051 /* 6052 * Note: STR_NOERROR does not have an effect 6053 * in putiocd() 6054 */ 6055 id = K_TO_K | sigflag; 6056 else 6057 id = flag; 6058 if ((error = putiocd(bp, strioc->ic_dp, id, crp)) != 0) { 6059 freemsg(bp); 6060 crfree(crp); 6061 return (error); 6062 } 6063 6064 /* 6065 * We could have slept copying in user pages. 6066 * Recheck the stream head state (the other end 6067 * of a pipe could have gone away). 6068 */ 6069 if (stp->sd_flag & errs) { 6070 mutex_enter(&stp->sd_lock); 6071 error = strgeterr(stp, errs, 0); 6072 mutex_exit(&stp->sd_lock); 6073 if (error != 0) { 6074 freemsg(bp); 6075 crfree(crp); 6076 return (error); 6077 } 6078 } 6079 } 6080 if (transparent) 6081 iocbp->ioc_count = TRANSPARENT; 6082 6083 /* 6084 * Block for up to STRTIMOUT milliseconds if there is an outstanding 6085 * ioctl for this stream already running. All processes 6086 * sleeping here will be awakened as a result of an ACK 6087 * or NAK being received for the outstanding ioctl, or 6088 * as a result of the timer expiring on the outstanding 6089 * ioctl (a failure), or as a result of any waiting 6090 * process's timer expiring (also a failure). 6091 */ 6092 6093 error = 0; 6094 mutex_enter(&stp->sd_lock); 6095 while (stp->sd_flag & (IOCWAIT | IOCWAITNE)) { 6096 clock_t cv_rval; 6097 6098 TRACE_0(TR_FAC_STREAMS_FR, 6099 TR_STRDOIOCTL_WAIT, 6100 "strdoioctl sleeps - IOCWAIT"); 6101 cv_rval = str_cv_wait(&stp->sd_iocmonitor, &stp->sd_lock, 6102 STRTIMOUT, sigflag); 6103 if (cv_rval <= 0) { 6104 if (cv_rval == 0) { 6105 error = EINTR; 6106 } else { 6107 if (flag & STR_NOERROR) { 6108 /* 6109 * Terminating current ioctl in 6110 * progress -- assume it got lost and 6111 * wake up the other thread so that the 6112 * operation completes. 6113 */ 6114 if (!(stp->sd_flag & IOCWAITNE)) { 6115 stp->sd_flag |= IOCWAITNE; 6116 cv_broadcast(&stp->sd_monitor); 6117 } 6118 /* 6119 * Otherwise, there's a running 6120 * STR_NOERROR -- we have no choice 6121 * here but to wait forever (or until 6122 * interrupted). 6123 */ 6124 } else { 6125 /* 6126 * pending ioctl has caused 6127 * us to time out 6128 */ 6129 error = ETIME; 6130 } 6131 } 6132 } else if ((stp->sd_flag & errs)) { 6133 error = strgeterr(stp, errs, 0); 6134 } 6135 if (error) { 6136 mutex_exit(&stp->sd_lock); 6137 freemsg(bp); 6138 crfree(crp); 6139 return (error); 6140 } 6141 } 6142 6143 /* 6144 * Have control of ioctl mechanism. 6145 * Send down ioctl packet and wait for response. 6146 */ 6147 if (stp->sd_iocblk != (mblk_t *)-1) { 6148 freemsg(stp->sd_iocblk); 6149 } 6150 stp->sd_iocblk = NULL; 6151 6152 /* 6153 * If this is marked with 'noerror' (internal; mostly 6154 * I_{P,}{UN,}LINK), then make sure nobody else is able to get 6155 * in here by setting IOCWAITNE. 6156 */ 6157 waitflags = IOCWAIT; 6158 if (flag & STR_NOERROR) 6159 waitflags |= IOCWAITNE; 6160 6161 stp->sd_flag |= waitflags; 6162 6163 /* 6164 * Assign sequence number. 6165 */ 6166 iocbp->ioc_id = stp->sd_iocid = getiocseqno(); 6167 6168 mutex_exit(&stp->sd_lock); 6169 6170 TRACE_1(TR_FAC_STREAMS_FR, 6171 TR_STRDOIOCTL_PUT, "strdoioctl put: stp %p", stp); 6172 stream_willservice(stp); 6173 putnext(stp->sd_wrq, bp); 6174 stream_runservice(stp); 6175 6176 /* 6177 * Timed wait for acknowledgment. The wait time is limited by the 6178 * timeout value, which must be a positive integer (number of 6179 * milliseconds) to wait, or 0 (use default value of STRTIMOUT 6180 * milliseconds), or -1 (wait forever). This will be awakened 6181 * either by an ACK/NAK message arriving, the timer expiring, or 6182 * the timer expiring on another ioctl waiting for control of the 6183 * mechanism. 6184 */ 6185 waitioc: 6186 mutex_enter(&stp->sd_lock); 6187 6188 6189 /* 6190 * If the reply has already arrived, don't sleep. If awakened from 6191 * the sleep, fail only if the reply has not arrived by then. 6192 * Otherwise, process the reply. 6193 */ 6194 while (!stp->sd_iocblk) { 6195 clock_t cv_rval; 6196 6197 if (stp->sd_flag & errs) { 6198 error = strgeterr(stp, errs, 0); 6199 if (error != 0) { 6200 stp->sd_flag &= ~waitflags; 6201 cv_broadcast(&stp->sd_iocmonitor); 6202 mutex_exit(&stp->sd_lock); 6203 crfree(crp); 6204 return (error); 6205 } 6206 } 6207 6208 TRACE_0(TR_FAC_STREAMS_FR, 6209 TR_STRDOIOCTL_WAIT2, 6210 "strdoioctl sleeps awaiting reply"); 6211 ASSERT(error == 0); 6212 6213 cv_rval = str_cv_wait(&stp->sd_monitor, &stp->sd_lock, 6214 (strioc->ic_timout ? 6215 strioc->ic_timout * 1000 : STRTIMOUT), sigflag); 6216 6217 /* 6218 * There are four possible cases here: interrupt, timeout, 6219 * wakeup by IOCWAITNE (above), or wakeup by strrput_nondata (a 6220 * valid M_IOCTL reply). 6221 * 6222 * If we've been awakened by a STR_NOERROR ioctl on some other 6223 * thread, then sd_iocblk will still be NULL, and IOCWAITNE 6224 * will be set. Pretend as if we just timed out. Note that 6225 * this other thread waited at least STRTIMOUT before trying to 6226 * awaken our thread, so this is indistinguishable (even for 6227 * INFTIM) from the case where we failed with ETIME waiting on 6228 * IOCWAIT in the prior loop. 6229 */ 6230 if (cv_rval > 0 && !(flag & STR_NOERROR) && 6231 stp->sd_iocblk == NULL && (stp->sd_flag & IOCWAITNE)) { 6232 cv_rval = -1; 6233 } 6234 6235 /* 6236 * note: STR_NOERROR does not protect 6237 * us here.. use ic_timout < 0 6238 */ 6239 if (cv_rval <= 0) { 6240 if (cv_rval == 0) { 6241 error = EINTR; 6242 } else { 6243 error = ETIME; 6244 } 6245 /* 6246 * A message could have come in after we were scheduled 6247 * but before we were actually run. 6248 */ 6249 bp = stp->sd_iocblk; 6250 stp->sd_iocblk = NULL; 6251 if (bp != NULL) { 6252 if ((bp->b_datap->db_type == M_COPYIN) || 6253 (bp->b_datap->db_type == M_COPYOUT)) { 6254 mutex_exit(&stp->sd_lock); 6255 if (bp->b_cont) { 6256 freemsg(bp->b_cont); 6257 bp->b_cont = NULL; 6258 } 6259 bp->b_datap->db_type = M_IOCDATA; 6260 bp->b_wptr = bp->b_rptr + 6261 sizeof (struct copyresp); 6262 resp = (struct copyresp *)bp->b_rptr; 6263 resp->cp_rval = 6264 (caddr_t)1; /* failure */ 6265 stream_willservice(stp); 6266 putnext(stp->sd_wrq, bp); 6267 stream_runservice(stp); 6268 mutex_enter(&stp->sd_lock); 6269 } else { 6270 freemsg(bp); 6271 } 6272 } 6273 stp->sd_flag &= ~waitflags; 6274 cv_broadcast(&stp->sd_iocmonitor); 6275 mutex_exit(&stp->sd_lock); 6276 crfree(crp); 6277 return (error); 6278 } 6279 } 6280 bp = stp->sd_iocblk; 6281 /* 6282 * Note: it is strictly impossible to get here with sd_iocblk set to 6283 * -1. This is because the initial loop above doesn't allow any new 6284 * ioctls into the fray until all others have passed this point. 6285 */ 6286 ASSERT(bp != NULL && bp != (mblk_t *)-1); 6287 TRACE_1(TR_FAC_STREAMS_FR, 6288 TR_STRDOIOCTL_ACK, "strdoioctl got reply: bp %p", bp); 6289 if ((bp->b_datap->db_type == M_IOCACK) || 6290 (bp->b_datap->db_type == M_IOCNAK)) { 6291 /* for detection of duplicate ioctl replies */ 6292 stp->sd_iocblk = (mblk_t *)-1; 6293 stp->sd_flag &= ~waitflags; 6294 cv_broadcast(&stp->sd_iocmonitor); 6295 mutex_exit(&stp->sd_lock); 6296 } else { 6297 /* 6298 * flags not cleared here because we're still doing 6299 * copy in/out for ioctl. 6300 */ 6301 stp->sd_iocblk = NULL; 6302 mutex_exit(&stp->sd_lock); 6303 } 6304 6305 6306 /* 6307 * Have received acknowledgment. 6308 */ 6309 6310 switch (bp->b_datap->db_type) { 6311 case M_IOCACK: 6312 /* 6313 * Positive ack. 6314 */ 6315 iocbp = (struct iocblk *)bp->b_rptr; 6316 6317 /* 6318 * Set error if indicated. 6319 */ 6320 if (iocbp->ioc_error) { 6321 error = iocbp->ioc_error; 6322 break; 6323 } 6324 6325 /* 6326 * Set return value. 6327 */ 6328 *rvalp = iocbp->ioc_rval; 6329 6330 /* 6331 * Data may have been returned in ACK message (ioc_count > 0). 6332 * If so, copy it out to the user's buffer. 6333 */ 6334 if (iocbp->ioc_count && !transparent) { 6335 if (error = getiocd(bp, strioc->ic_dp, copyflag)) 6336 break; 6337 } 6338 if (!transparent) { 6339 if (len) /* an M_COPYOUT was used with I_STR */ 6340 strioc->ic_len = len; 6341 else 6342 strioc->ic_len = (int)iocbp->ioc_count; 6343 } 6344 break; 6345 6346 case M_IOCNAK: 6347 /* 6348 * Negative ack. 6349 * 6350 * The only thing to do is set error as specified 6351 * in neg ack packet. 6352 */ 6353 iocbp = (struct iocblk *)bp->b_rptr; 6354 6355 error = (iocbp->ioc_error ? iocbp->ioc_error : EINVAL); 6356 break; 6357 6358 case M_COPYIN: 6359 /* 6360 * Driver or module has requested user ioctl data. 6361 */ 6362 reqp = (struct copyreq *)bp->b_rptr; 6363 6364 /* 6365 * M_COPYIN should *never* have a message attached, though 6366 * it's harmless if it does -- thus, panic on a DEBUG 6367 * kernel and just free it on a non-DEBUG build. 6368 */ 6369 ASSERT(bp->b_cont == NULL); 6370 if (bp->b_cont != NULL) { 6371 freemsg(bp->b_cont); 6372 bp->b_cont = NULL; 6373 } 6374 6375 error = putiocd(bp, reqp->cq_addr, flag, crp); 6376 if (error && bp->b_cont) { 6377 freemsg(bp->b_cont); 6378 bp->b_cont = NULL; 6379 } 6380 6381 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp); 6382 bp->b_datap->db_type = M_IOCDATA; 6383 6384 mblk_setcred(bp, crp); 6385 DB_CPID(bp) = curproc->p_pid; 6386 resp = (struct copyresp *)bp->b_rptr; 6387 resp->cp_rval = (caddr_t)(uintptr_t)error; 6388 resp->cp_flag = (fflags & FMODELS); 6389 6390 stream_willservice(stp); 6391 putnext(stp->sd_wrq, bp); 6392 stream_runservice(stp); 6393 6394 if (error) { 6395 mutex_enter(&stp->sd_lock); 6396 stp->sd_flag &= ~waitflags; 6397 cv_broadcast(&stp->sd_iocmonitor); 6398 mutex_exit(&stp->sd_lock); 6399 crfree(crp); 6400 return (error); 6401 } 6402 6403 goto waitioc; 6404 6405 case M_COPYOUT: 6406 /* 6407 * Driver or module has ioctl data for a user. 6408 */ 6409 reqp = (struct copyreq *)bp->b_rptr; 6410 ASSERT(bp->b_cont != NULL); 6411 6412 /* 6413 * Always (transparent or non-transparent ) 6414 * use the address specified in the request 6415 */ 6416 taddr = reqp->cq_addr; 6417 if (!transparent) 6418 len = (int)reqp->cq_size; 6419 6420 /* copyout data to the provided address */ 6421 error = getiocd(bp, taddr, copyflag); 6422 6423 freemsg(bp->b_cont); 6424 bp->b_cont = NULL; 6425 6426 bp->b_wptr = bp->b_rptr + sizeof (struct copyresp); 6427 bp->b_datap->db_type = M_IOCDATA; 6428 6429 mblk_setcred(bp, crp); 6430 DB_CPID(bp) = curproc->p_pid; 6431 resp = (struct copyresp *)bp->b_rptr; 6432 resp->cp_rval = (caddr_t)(uintptr_t)error; 6433 resp->cp_flag = (fflags & FMODELS); 6434 6435 stream_willservice(stp); 6436 putnext(stp->sd_wrq, bp); 6437 stream_runservice(stp); 6438 6439 if (error) { 6440 mutex_enter(&stp->sd_lock); 6441 stp->sd_flag &= ~waitflags; 6442 cv_broadcast(&stp->sd_iocmonitor); 6443 mutex_exit(&stp->sd_lock); 6444 crfree(crp); 6445 return (error); 6446 } 6447 goto waitioc; 6448 6449 default: 6450 ASSERT(0); 6451 mutex_enter(&stp->sd_lock); 6452 stp->sd_flag &= ~waitflags; 6453 cv_broadcast(&stp->sd_iocmonitor); 6454 mutex_exit(&stp->sd_lock); 6455 break; 6456 } 6457 6458 freemsg(bp); 6459 crfree(crp); 6460 return (error); 6461 } 6462 6463 /* 6464 * Send an M_CMD message downstream and wait for a reply. This is a ptools 6465 * special used to retrieve information from modules/drivers a stream without 6466 * being subjected to flow control or interfering with pending messages on the 6467 * stream (e.g. an ioctl in flight). 6468 */ 6469 int 6470 strdocmd(struct stdata *stp, struct strcmd *scp, cred_t *crp) 6471 { 6472 mblk_t *mp; 6473 struct cmdblk *cmdp; 6474 int error = 0; 6475 int errs = STRHUP|STRDERR|STWRERR|STPLEX; 6476 clock_t rval, timeout = STRTIMOUT; 6477 6478 if (scp->sc_len < 0 || scp->sc_len > sizeof (scp->sc_buf) || 6479 scp->sc_timeout < -1) 6480 return (EINVAL); 6481 6482 if (scp->sc_timeout > 0) 6483 timeout = scp->sc_timeout * MILLISEC; 6484 6485 if ((mp = allocb_cred(sizeof (struct cmdblk), crp)) == NULL) 6486 return (ENOMEM); 6487 6488 crhold(crp); 6489 6490 cmdp = (struct cmdblk *)mp->b_wptr; 6491 cmdp->cb_cr = crp; 6492 cmdp->cb_cmd = scp->sc_cmd; 6493 cmdp->cb_len = scp->sc_len; 6494 cmdp->cb_error = 0; 6495 mp->b_wptr += sizeof (struct cmdblk); 6496 6497 DB_TYPE(mp) = M_CMD; 6498 DB_CPID(mp) = curproc->p_pid; 6499 6500 /* 6501 * Copy in the payload. 6502 */ 6503 if (cmdp->cb_len > 0) { 6504 mp->b_cont = allocb_cred(sizeof (scp->sc_buf), crp); 6505 if (mp->b_cont == NULL) { 6506 error = ENOMEM; 6507 goto out; 6508 } 6509 6510 /* cb_len comes from sc_len, which has already been checked */ 6511 ASSERT(cmdp->cb_len <= sizeof (scp->sc_buf)); 6512 (void) bcopy(scp->sc_buf, mp->b_cont->b_wptr, cmdp->cb_len); 6513 mp->b_cont->b_wptr += cmdp->cb_len; 6514 DB_CPID(mp->b_cont) = curproc->p_pid; 6515 } 6516 6517 /* 6518 * Since this mechanism is strictly for ptools, and since only one 6519 * process can be grabbed at a time, we simply fail if there's 6520 * currently an operation pending. 6521 */ 6522 mutex_enter(&stp->sd_lock); 6523 if (stp->sd_flag & STRCMDWAIT) { 6524 mutex_exit(&stp->sd_lock); 6525 error = EBUSY; 6526 goto out; 6527 } 6528 stp->sd_flag |= STRCMDWAIT; 6529 ASSERT(stp->sd_cmdblk == NULL); 6530 mutex_exit(&stp->sd_lock); 6531 6532 putnext(stp->sd_wrq, mp); 6533 mp = NULL; 6534 6535 /* 6536 * Timed wait for acknowledgment. If the reply has already arrived, 6537 * don't sleep. If awakened from the sleep, fail only if the reply 6538 * has not arrived by then. Otherwise, process the reply. 6539 */ 6540 mutex_enter(&stp->sd_lock); 6541 while (stp->sd_cmdblk == NULL) { 6542 if (stp->sd_flag & errs) { 6543 if ((error = strgeterr(stp, errs, 0)) != 0) 6544 goto waitout; 6545 } 6546 6547 rval = str_cv_wait(&stp->sd_monitor, &stp->sd_lock, timeout, 0); 6548 if (stp->sd_cmdblk != NULL) 6549 break; 6550 6551 if (rval <= 0) { 6552 error = (rval == 0) ? EINTR : ETIME; 6553 goto waitout; 6554 } 6555 } 6556 6557 /* 6558 * We received a reply. 6559 */ 6560 mp = stp->sd_cmdblk; 6561 stp->sd_cmdblk = NULL; 6562 ASSERT(mp != NULL && DB_TYPE(mp) == M_CMD); 6563 ASSERT(stp->sd_flag & STRCMDWAIT); 6564 stp->sd_flag &= ~STRCMDWAIT; 6565 mutex_exit(&stp->sd_lock); 6566 6567 cmdp = (struct cmdblk *)mp->b_rptr; 6568 if ((error = cmdp->cb_error) != 0) 6569 goto out; 6570 6571 /* 6572 * Data may have been returned in the reply (cb_len > 0). 6573 * If so, copy it out to the user's buffer. 6574 */ 6575 if (cmdp->cb_len > 0) { 6576 if (mp->b_cont == NULL || MBLKL(mp->b_cont) < cmdp->cb_len) { 6577 error = EPROTO; 6578 goto out; 6579 } 6580 6581 cmdp->cb_len = MIN(cmdp->cb_len, sizeof (scp->sc_buf)); 6582 (void) bcopy(mp->b_cont->b_rptr, scp->sc_buf, cmdp->cb_len); 6583 } 6584 scp->sc_len = cmdp->cb_len; 6585 out: 6586 freemsg(mp); 6587 crfree(crp); 6588 return (error); 6589 waitout: 6590 ASSERT(stp->sd_cmdblk == NULL); 6591 stp->sd_flag &= ~STRCMDWAIT; 6592 mutex_exit(&stp->sd_lock); 6593 crfree(crp); 6594 return (error); 6595 } 6596 6597 /* 6598 * For the SunOS keyboard driver. 6599 * Return the next available "ioctl" sequence number. 6600 * Exported, so that streams modules can send "ioctl" messages 6601 * downstream from their open routine. 6602 */ 6603 int 6604 getiocseqno(void) 6605 { 6606 int i; 6607 6608 mutex_enter(&strresources); 6609 i = ++ioc_id; 6610 mutex_exit(&strresources); 6611 return (i); 6612 } 6613 6614 /* 6615 * Get the next message from the read queue. If the message is 6616 * priority, STRPRI will have been set by strrput(). This flag 6617 * should be reset only when the entire message at the front of the 6618 * queue as been consumed. 6619 * 6620 * NOTE: strgetmsg and kstrgetmsg have much of the logic in common. 6621 */ 6622 int 6623 strgetmsg( 6624 struct vnode *vp, 6625 struct strbuf *mctl, 6626 struct strbuf *mdata, 6627 unsigned char *prip, 6628 int *flagsp, 6629 int fmode, 6630 rval_t *rvp) 6631 { 6632 struct stdata *stp; 6633 mblk_t *bp, *nbp; 6634 mblk_t *savemp = NULL; 6635 mblk_t *savemptail = NULL; 6636 uint_t old_sd_flag; 6637 int flg; 6638 int more = 0; 6639 int error = 0; 6640 char first = 1; 6641 uint_t mark; /* Contains MSG*MARK and _LASTMARK */ 6642 #define _LASTMARK 0x8000 /* Distinct from MSG*MARK */ 6643 unsigned char pri = 0; 6644 queue_t *q; 6645 int pr = 0; /* Partial read successful */ 6646 struct uio uios; 6647 struct uio *uiop = &uios; 6648 struct iovec iovs; 6649 unsigned char type; 6650 6651 TRACE_1(TR_FAC_STREAMS_FR, TR_STRGETMSG_ENTER, 6652 "strgetmsg:%p", vp); 6653 6654 ASSERT(vp->v_stream); 6655 stp = vp->v_stream; 6656 rvp->r_val1 = 0; 6657 6658 mutex_enter(&stp->sd_lock); 6659 6660 if ((error = i_straccess(stp, JCREAD)) != 0) { 6661 mutex_exit(&stp->sd_lock); 6662 return (error); 6663 } 6664 6665 if (stp->sd_flag & (STRDERR|STPLEX)) { 6666 error = strgeterr(stp, STRDERR|STPLEX, 0); 6667 if (error != 0) { 6668 mutex_exit(&stp->sd_lock); 6669 return (error); 6670 } 6671 } 6672 mutex_exit(&stp->sd_lock); 6673 6674 switch (*flagsp) { 6675 case MSG_HIPRI: 6676 if (*prip != 0) 6677 return (EINVAL); 6678 break; 6679 6680 case MSG_ANY: 6681 case MSG_BAND: 6682 break; 6683 6684 default: 6685 return (EINVAL); 6686 } 6687 /* 6688 * Setup uio and iov for data part 6689 */ 6690 iovs.iov_base = mdata->buf; 6691 iovs.iov_len = mdata->maxlen; 6692 uios.uio_iov = &iovs; 6693 uios.uio_iovcnt = 1; 6694 uios.uio_loffset = 0; 6695 uios.uio_segflg = UIO_USERSPACE; 6696 uios.uio_fmode = 0; 6697 uios.uio_extflg = UIO_COPY_CACHED; 6698 uios.uio_resid = mdata->maxlen; 6699 uios.uio_offset = 0; 6700 6701 q = _RD(stp->sd_wrq); 6702 mutex_enter(&stp->sd_lock); 6703 old_sd_flag = stp->sd_flag; 6704 mark = 0; 6705 for (;;) { 6706 int done = 0; 6707 mblk_t *q_first = q->q_first; 6708 6709 /* 6710 * Get the next message of appropriate priority 6711 * from the stream head. If the caller is interested 6712 * in band or hipri messages, then they should already 6713 * be enqueued at the stream head. On the other hand 6714 * if the caller wants normal (band 0) messages, they 6715 * might be deferred in a synchronous stream and they 6716 * will need to be pulled up. 6717 * 6718 * After we have dequeued a message, we might find that 6719 * it was a deferred M_SIG that was enqueued at the 6720 * stream head. It must now be posted as part of the 6721 * read by calling strsignal_nolock(). 6722 * 6723 * Also note that strrput does not enqueue an M_PCSIG, 6724 * and there cannot be more than one hipri message, 6725 * so there was no need to have the M_PCSIG case. 6726 * 6727 * At some time it might be nice to try and wrap the 6728 * functionality of kstrgetmsg() and strgetmsg() into 6729 * a common routine so to reduce the amount of replicated 6730 * code (since they are extremely similar). 6731 */ 6732 if (!(*flagsp & (MSG_HIPRI|MSG_BAND))) { 6733 /* Asking for normal, band0 data */ 6734 bp = strget(stp, q, uiop, first, &error); 6735 ASSERT(MUTEX_HELD(&stp->sd_lock)); 6736 if (bp != NULL) { 6737 ASSERT(!(bp->b_datap->db_flags & DBLK_UIOA)); 6738 if (bp->b_datap->db_type == M_SIG) { 6739 strsignal_nolock(stp, *bp->b_rptr, 6740 (int32_t)bp->b_band); 6741 continue; 6742 } else { 6743 break; 6744 } 6745 } 6746 if (error != 0) { 6747 goto getmout; 6748 } 6749 6750 /* 6751 * We can't depend on the value of STRPRI here because 6752 * the stream head may be in transit. Therefore, we 6753 * must look at the type of the first message to 6754 * determine if a high priority messages is waiting 6755 */ 6756 } else if ((*flagsp & MSG_HIPRI) && q_first != NULL && 6757 q_first->b_datap->db_type >= QPCTL && 6758 (bp = getq_noenab(q, 0)) != NULL) { 6759 /* Asked for HIPRI and got one */ 6760 ASSERT(bp->b_datap->db_type >= QPCTL); 6761 break; 6762 } else if ((*flagsp & MSG_BAND) && q_first != NULL && 6763 ((q_first->b_band >= *prip) || 6764 q_first->b_datap->db_type >= QPCTL) && 6765 (bp = getq_noenab(q, 0)) != NULL) { 6766 /* 6767 * Asked for at least band "prip" and got either at 6768 * least that band or a hipri message. 6769 */ 6770 ASSERT(bp->b_band >= *prip || 6771 bp->b_datap->db_type >= QPCTL); 6772 if (bp->b_datap->db_type == M_SIG) { 6773 strsignal_nolock(stp, *bp->b_rptr, 6774 (int32_t)bp->b_band); 6775 continue; 6776 } else { 6777 break; 6778 } 6779 } 6780 6781 /* No data. Time to sleep? */ 6782 qbackenable(q, 0); 6783 6784 /* 6785 * If STRHUP or STREOF, return 0 length control and data. 6786 * If resid is 0, then a read(fd,buf,0) was done. Do not 6787 * sleep to satisfy this request because by default we have 6788 * zero bytes to return. 6789 */ 6790 if ((stp->sd_flag & (STRHUP|STREOF)) || (mctl->maxlen == 0 && 6791 mdata->maxlen == 0)) { 6792 mctl->len = mdata->len = 0; 6793 *flagsp = 0; 6794 mutex_exit(&stp->sd_lock); 6795 return (0); 6796 } 6797 TRACE_2(TR_FAC_STREAMS_FR, TR_STRGETMSG_WAIT, 6798 "strgetmsg calls strwaitq:%p, %p", 6799 vp, uiop); 6800 if (((error = strwaitq(stp, GETWAIT, (ssize_t)0, fmode, -1, 6801 &done)) != 0) || done) { 6802 TRACE_2(TR_FAC_STREAMS_FR, TR_STRGETMSG_DONE, 6803 "strgetmsg error or done:%p, %p", 6804 vp, uiop); 6805 mutex_exit(&stp->sd_lock); 6806 return (error); 6807 } 6808 TRACE_2(TR_FAC_STREAMS_FR, TR_STRGETMSG_AWAKE, 6809 "strgetmsg awakes:%p, %p", vp, uiop); 6810 if ((error = i_straccess(stp, JCREAD)) != 0) { 6811 mutex_exit(&stp->sd_lock); 6812 return (error); 6813 } 6814 first = 0; 6815 } 6816 ASSERT(bp != NULL); 6817 /* 6818 * Extract any mark information. If the message is not completely 6819 * consumed this information will be put in the mblk 6820 * that is putback. 6821 * If MSGMARKNEXT is set and the message is completely consumed 6822 * the STRATMARK flag will be set below. Likewise, if 6823 * MSGNOTMARKNEXT is set and the message is 6824 * completely consumed STRNOTATMARK will be set. 6825 */ 6826 mark = bp->b_flag & (MSGMARK | MSGMARKNEXT | MSGNOTMARKNEXT); 6827 ASSERT((mark & (MSGMARKNEXT|MSGNOTMARKNEXT)) != 6828 (MSGMARKNEXT|MSGNOTMARKNEXT)); 6829 if (mark != 0 && bp == stp->sd_mark) { 6830 mark |= _LASTMARK; 6831 stp->sd_mark = NULL; 6832 } 6833 /* 6834 * keep track of the original message type and priority 6835 */ 6836 pri = bp->b_band; 6837 type = bp->b_datap->db_type; 6838 if (type == M_PASSFP) { 6839 if ((mark & _LASTMARK) && (stp->sd_mark == NULL)) 6840 stp->sd_mark = bp; 6841 bp->b_flag |= mark & ~_LASTMARK; 6842 putback(stp, q, bp, pri); 6843 qbackenable(q, pri); 6844 mutex_exit(&stp->sd_lock); 6845 return (EBADMSG); 6846 } 6847 ASSERT(type != M_SIG); 6848 6849 /* 6850 * Set this flag so strrput will not generate signals. Need to 6851 * make sure this flag is cleared before leaving this routine 6852 * else signals will stop being sent. 6853 */ 6854 stp->sd_flag |= STRGETINPROG; 6855 mutex_exit(&stp->sd_lock); 6856 6857 if (STREAM_NEEDSERVICE(stp)) 6858 stream_runservice(stp); 6859 6860 /* 6861 * Set HIPRI flag if message is priority. 6862 */ 6863 if (type >= QPCTL) 6864 flg = MSG_HIPRI; 6865 else 6866 flg = MSG_BAND; 6867 6868 /* 6869 * First process PROTO or PCPROTO blocks, if any. 6870 */ 6871 if (mctl->maxlen >= 0 && type != M_DATA) { 6872 size_t n, bcnt; 6873 char *ubuf; 6874 6875 bcnt = mctl->maxlen; 6876 ubuf = mctl->buf; 6877 while (bp != NULL && bp->b_datap->db_type != M_DATA) { 6878 if ((n = MIN(bcnt, bp->b_wptr - bp->b_rptr)) != 0 && 6879 copyout(bp->b_rptr, ubuf, n)) { 6880 error = EFAULT; 6881 mutex_enter(&stp->sd_lock); 6882 /* 6883 * clear stream head pri flag based on 6884 * first message type 6885 */ 6886 if (type >= QPCTL) { 6887 ASSERT(type == M_PCPROTO); 6888 stp->sd_flag &= ~STRPRI; 6889 } 6890 more = 0; 6891 freemsg(bp); 6892 goto getmout; 6893 } 6894 ubuf += n; 6895 bp->b_rptr += n; 6896 if (bp->b_rptr >= bp->b_wptr) { 6897 nbp = bp; 6898 bp = bp->b_cont; 6899 freeb(nbp); 6900 } 6901 ASSERT(n <= bcnt); 6902 bcnt -= n; 6903 if (bcnt == 0) 6904 break; 6905 } 6906 mctl->len = mctl->maxlen - bcnt; 6907 } else 6908 mctl->len = -1; 6909 6910 if (bp && bp->b_datap->db_type != M_DATA) { 6911 /* 6912 * More PROTO blocks in msg. 6913 */ 6914 more |= MORECTL; 6915 savemp = bp; 6916 while (bp && bp->b_datap->db_type != M_DATA) { 6917 savemptail = bp; 6918 bp = bp->b_cont; 6919 } 6920 savemptail->b_cont = NULL; 6921 } 6922 6923 /* 6924 * Now process DATA blocks, if any. 6925 */ 6926 if (mdata->maxlen >= 0 && bp) { 6927 /* 6928 * struiocopyout will consume a potential zero-length 6929 * M_DATA even if uio_resid is zero. 6930 */ 6931 size_t oldresid = uiop->uio_resid; 6932 6933 bp = struiocopyout(bp, uiop, &error); 6934 if (error != 0) { 6935 mutex_enter(&stp->sd_lock); 6936 /* 6937 * clear stream head hi pri flag based on 6938 * first message 6939 */ 6940 if (type >= QPCTL) { 6941 ASSERT(type == M_PCPROTO); 6942 stp->sd_flag &= ~STRPRI; 6943 } 6944 more = 0; 6945 freemsg(savemp); 6946 goto getmout; 6947 } 6948 /* 6949 * (pr == 1) indicates a partial read. 6950 */ 6951 if (oldresid > uiop->uio_resid) 6952 pr = 1; 6953 mdata->len = mdata->maxlen - uiop->uio_resid; 6954 } else 6955 mdata->len = -1; 6956 6957 if (bp) { /* more data blocks in msg */ 6958 more |= MOREDATA; 6959 if (savemp) 6960 savemptail->b_cont = bp; 6961 else 6962 savemp = bp; 6963 } 6964 6965 mutex_enter(&stp->sd_lock); 6966 if (savemp) { 6967 if (pr && (savemp->b_datap->db_type == M_DATA) && 6968 msgnodata(savemp)) { 6969 /* 6970 * Avoid queuing a zero-length tail part of 6971 * a message. pr=1 indicates that we read some of 6972 * the message. 6973 */ 6974 freemsg(savemp); 6975 more &= ~MOREDATA; 6976 /* 6977 * clear stream head hi pri flag based on 6978 * first message 6979 */ 6980 if (type >= QPCTL) { 6981 ASSERT(type == M_PCPROTO); 6982 stp->sd_flag &= ~STRPRI; 6983 } 6984 } else { 6985 savemp->b_band = pri; 6986 /* 6987 * If the first message was HIPRI and the one we're 6988 * putting back isn't, then clear STRPRI, otherwise 6989 * set STRPRI again. Note that we must set STRPRI 6990 * again since the flush logic in strrput_nondata() 6991 * may have cleared it while we had sd_lock dropped. 6992 */ 6993 if (type >= QPCTL) { 6994 ASSERT(type == M_PCPROTO); 6995 if (queclass(savemp) < QPCTL) 6996 stp->sd_flag &= ~STRPRI; 6997 else 6998 stp->sd_flag |= STRPRI; 6999 } else if (queclass(savemp) >= QPCTL) { 7000 /* 7001 * The first message was not a HIPRI message, 7002 * but the one we are about to putback is. 7003 * For simplicitly, we do not allow for HIPRI 7004 * messages to be embedded in the message 7005 * body, so just force it to same type as 7006 * first message. 7007 */ 7008 ASSERT(type == M_DATA || type == M_PROTO); 7009 ASSERT(savemp->b_datap->db_type == M_PCPROTO); 7010 savemp->b_datap->db_type = type; 7011 } 7012 if (mark != 0) { 7013 savemp->b_flag |= mark & ~_LASTMARK; 7014 if ((mark & _LASTMARK) && 7015 (stp->sd_mark == NULL)) { 7016 /* 7017 * If another marked message arrived 7018 * while sd_lock was not held sd_mark 7019 * would be non-NULL. 7020 */ 7021 stp->sd_mark = savemp; 7022 } 7023 } 7024 putback(stp, q, savemp, pri); 7025 } 7026 } else { 7027 /* 7028 * The complete message was consumed. 7029 * 7030 * If another M_PCPROTO arrived while sd_lock was not held 7031 * it would have been discarded since STRPRI was still set. 7032 * 7033 * Move the MSG*MARKNEXT information 7034 * to the stream head just in case 7035 * the read queue becomes empty. 7036 * clear stream head hi pri flag based on 7037 * first message 7038 * 7039 * If the stream head was at the mark 7040 * (STRATMARK) before we dropped sd_lock above 7041 * and some data was consumed then we have 7042 * moved past the mark thus STRATMARK is 7043 * cleared. However, if a message arrived in 7044 * strrput during the copyout above causing 7045 * STRATMARK to be set we can not clear that 7046 * flag. 7047 */ 7048 if (type >= QPCTL) { 7049 ASSERT(type == M_PCPROTO); 7050 stp->sd_flag &= ~STRPRI; 7051 } 7052 if (mark & (MSGMARKNEXT|MSGNOTMARKNEXT|MSGMARK)) { 7053 if (mark & MSGMARKNEXT) { 7054 stp->sd_flag &= ~STRNOTATMARK; 7055 stp->sd_flag |= STRATMARK; 7056 } else if (mark & MSGNOTMARKNEXT) { 7057 stp->sd_flag &= ~STRATMARK; 7058 stp->sd_flag |= STRNOTATMARK; 7059 } else { 7060 stp->sd_flag &= ~(STRATMARK|STRNOTATMARK); 7061 } 7062 } else if (pr && (old_sd_flag & STRATMARK)) { 7063 stp->sd_flag &= ~STRATMARK; 7064 } 7065 } 7066 7067 *flagsp = flg; 7068 *prip = pri; 7069 7070 /* 7071 * Getmsg cleanup processing - if the state of the queue has changed 7072 * some signals may need to be sent and/or poll awakened. 7073 */ 7074 getmout: 7075 qbackenable(q, pri); 7076 7077 /* 7078 * We dropped the stream head lock above. Send all M_SIG messages 7079 * before processing stream head for SIGPOLL messages. 7080 */ 7081 ASSERT(MUTEX_HELD(&stp->sd_lock)); 7082 while ((bp = q->q_first) != NULL && 7083 (bp->b_datap->db_type == M_SIG)) { 7084 /* 7085 * sd_lock is held so the content of the read queue can not 7086 * change. 7087 */ 7088 bp = getq(q); 7089 ASSERT(bp != NULL && bp->b_datap->db_type == M_SIG); 7090 7091 strsignal_nolock(stp, *bp->b_rptr, (int32_t)bp->b_band); 7092 mutex_exit(&stp->sd_lock); 7093 freemsg(bp); 7094 if (STREAM_NEEDSERVICE(stp)) 7095 stream_runservice(stp); 7096 mutex_enter(&stp->sd_lock); 7097 } 7098 7099 /* 7100 * stream head cannot change while we make the determination 7101 * whether or not to send a signal. Drop the flag to allow strrput 7102 * to send firstmsgsigs again. 7103 */ 7104 stp->sd_flag &= ~STRGETINPROG; 7105 7106 /* 7107 * If the type of message at the front of the queue changed 7108 * due to the receive the appropriate signals and pollwakeup events 7109 * are generated. The type of changes are: 7110 * Processed a hipri message, q_first is not hipri. 7111 * Processed a band X message, and q_first is band Y. 7112 * The generated signals and pollwakeups are identical to what 7113 * strrput() generates should the message that is now on q_first 7114 * arrive to an empty read queue. 7115 * 7116 * Note: only strrput will send a signal for a hipri message. 7117 */ 7118 if ((bp = q->q_first) != NULL && !(stp->sd_flag & STRPRI)) { 7119 strsigset_t signals = 0; 7120 strpollset_t pollwakeups = 0; 7121 7122 if (flg & MSG_HIPRI) { 7123 /* 7124 * Removed a hipri message. Regular data at 7125 * the front of the queue. 7126 */ 7127 if (bp->b_band == 0) { 7128 signals = S_INPUT | S_RDNORM; 7129 pollwakeups = POLLIN | POLLRDNORM; 7130 } else { 7131 signals = S_INPUT | S_RDBAND; 7132 pollwakeups = POLLIN | POLLRDBAND; 7133 } 7134 } else if (pri != bp->b_band) { 7135 /* 7136 * The band is different for the new q_first. 7137 */ 7138 if (bp->b_band == 0) { 7139 signals = S_RDNORM; 7140 pollwakeups = POLLIN | POLLRDNORM; 7141 } else { 7142 signals = S_RDBAND; 7143 pollwakeups = POLLIN | POLLRDBAND; 7144 } 7145 } 7146 7147 if (pollwakeups != 0) { 7148 if (pollwakeups == (POLLIN | POLLRDNORM)) { 7149 if (!(stp->sd_rput_opt & SR_POLLIN)) 7150 goto no_pollwake; 7151 stp->sd_rput_opt &= ~SR_POLLIN; 7152 } 7153 mutex_exit(&stp->sd_lock); 7154 pollwakeup(&stp->sd_pollist, pollwakeups); 7155 mutex_enter(&stp->sd_lock); 7156 } 7157 no_pollwake: 7158 7159 if (stp->sd_sigflags & signals) 7160 strsendsig(stp->sd_siglist, signals, bp->b_band, 0); 7161 } 7162 mutex_exit(&stp->sd_lock); 7163 7164 rvp->r_val1 = more; 7165 return (error); 7166 #undef _LASTMARK 7167 } 7168 7169 /* 7170 * Get the next message from the read queue. If the message is 7171 * priority, STRPRI will have been set by strrput(). This flag 7172 * should be reset only when the entire message at the front of the 7173 * queue as been consumed. 7174 * 7175 * If uiop is NULL all data is returned in mctlp. 7176 * Note that a NULL uiop implies that FNDELAY and FNONBLOCK are assumed 7177 * not enabled. 7178 * The timeout parameter is in milliseconds; -1 for infinity. 7179 * This routine handles the consolidation private flags: 7180 * MSG_IGNERROR Ignore any stream head error except STPLEX. 7181 * MSG_DELAYERROR Defer the error check until the queue is empty. 7182 * MSG_HOLDSIG Hold signals while waiting for data. 7183 * MSG_IPEEK Only peek at messages. 7184 * MSG_DISCARDTAIL Discard the tail M_DATA part of the message 7185 * that doesn't fit. 7186 * MSG_NOMARK If the message is marked leave it on the queue. 7187 * 7188 * NOTE: strgetmsg and kstrgetmsg have much of the logic in common. 7189 */ 7190 int 7191 kstrgetmsg( 7192 struct vnode *vp, 7193 mblk_t **mctlp, 7194 struct uio *uiop, 7195 unsigned char *prip, 7196 int *flagsp, 7197 clock_t timout, 7198 rval_t *rvp) 7199 { 7200 struct stdata *stp; 7201 mblk_t *bp, *nbp; 7202 mblk_t *savemp = NULL; 7203 mblk_t *savemptail = NULL; 7204 int flags; 7205 uint_t old_sd_flag; 7206 int flg; 7207 int more = 0; 7208 int error = 0; 7209 char first = 1; 7210 uint_t mark; /* Contains MSG*MARK and _LASTMARK */ 7211 #define _LASTMARK 0x8000 /* Distinct from MSG*MARK */ 7212 unsigned char pri = 0; 7213 queue_t *q; 7214 int pr = 0; /* Partial read successful */ 7215 unsigned char type; 7216 7217 TRACE_1(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_ENTER, 7218 "kstrgetmsg:%p", vp); 7219 7220 ASSERT(vp->v_stream); 7221 stp = vp->v_stream; 7222 rvp->r_val1 = 0; 7223 7224 mutex_enter(&stp->sd_lock); 7225 7226 if ((error = i_straccess(stp, JCREAD)) != 0) { 7227 mutex_exit(&stp->sd_lock); 7228 return (error); 7229 } 7230 7231 flags = *flagsp; 7232 if (stp->sd_flag & (STRDERR|STPLEX)) { 7233 if ((stp->sd_flag & STPLEX) || 7234 (flags & (MSG_IGNERROR|MSG_DELAYERROR)) == 0) { 7235 error = strgeterr(stp, STRDERR|STPLEX, 7236 (flags & MSG_IPEEK)); 7237 if (error != 0) { 7238 mutex_exit(&stp->sd_lock); 7239 return (error); 7240 } 7241 } 7242 } 7243 mutex_exit(&stp->sd_lock); 7244 7245 switch (flags & (MSG_HIPRI|MSG_ANY|MSG_BAND)) { 7246 case MSG_HIPRI: 7247 if (*prip != 0) 7248 return (EINVAL); 7249 break; 7250 7251 case MSG_ANY: 7252 case MSG_BAND: 7253 break; 7254 7255 default: 7256 return (EINVAL); 7257 } 7258 7259 retry: 7260 q = _RD(stp->sd_wrq); 7261 mutex_enter(&stp->sd_lock); 7262 old_sd_flag = stp->sd_flag; 7263 mark = 0; 7264 for (;;) { 7265 int done = 0; 7266 int waitflag; 7267 int fmode; 7268 mblk_t *q_first = q->q_first; 7269 7270 /* 7271 * This section of the code operates just like the code 7272 * in strgetmsg(). There is a comment there about what 7273 * is going on here. 7274 */ 7275 if (!(flags & (MSG_HIPRI|MSG_BAND))) { 7276 /* Asking for normal, band0 data */ 7277 bp = strget(stp, q, uiop, first, &error); 7278 ASSERT(MUTEX_HELD(&stp->sd_lock)); 7279 if (bp != NULL) { 7280 if (bp->b_datap->db_type == M_SIG) { 7281 strsignal_nolock(stp, *bp->b_rptr, 7282 (int32_t)bp->b_band); 7283 continue; 7284 } else { 7285 break; 7286 } 7287 } 7288 if (error != 0) { 7289 goto getmout; 7290 } 7291 /* 7292 * We can't depend on the value of STRPRI here because 7293 * the stream head may be in transit. Therefore, we 7294 * must look at the type of the first message to 7295 * determine if a high priority messages is waiting 7296 */ 7297 } else if ((flags & MSG_HIPRI) && q_first != NULL && 7298 q_first->b_datap->db_type >= QPCTL && 7299 (bp = getq_noenab(q, 0)) != NULL) { 7300 ASSERT(bp->b_datap->db_type >= QPCTL); 7301 break; 7302 } else if ((flags & MSG_BAND) && q_first != NULL && 7303 ((q_first->b_band >= *prip) || 7304 q_first->b_datap->db_type >= QPCTL) && 7305 (bp = getq_noenab(q, 0)) != NULL) { 7306 /* 7307 * Asked for at least band "prip" and got either at 7308 * least that band or a hipri message. 7309 */ 7310 ASSERT(bp->b_band >= *prip || 7311 bp->b_datap->db_type >= QPCTL); 7312 if (bp->b_datap->db_type == M_SIG) { 7313 strsignal_nolock(stp, *bp->b_rptr, 7314 (int32_t)bp->b_band); 7315 continue; 7316 } else { 7317 break; 7318 } 7319 } 7320 7321 /* No data. Time to sleep? */ 7322 qbackenable(q, 0); 7323 7324 /* 7325 * Delayed error notification? 7326 */ 7327 if ((stp->sd_flag & (STRDERR|STPLEX)) && 7328 (flags & (MSG_IGNERROR|MSG_DELAYERROR)) == MSG_DELAYERROR) { 7329 error = strgeterr(stp, STRDERR|STPLEX, 7330 (flags & MSG_IPEEK)); 7331 if (error != 0) { 7332 mutex_exit(&stp->sd_lock); 7333 return (error); 7334 } 7335 } 7336 7337 /* 7338 * If STRHUP or STREOF, return 0 length control and data. 7339 * If a read(fd,buf,0) has been done, do not sleep, just 7340 * return. 7341 * 7342 * If mctlp == NULL and uiop == NULL, then the code will 7343 * do the strwaitq. This is an understood way of saying 7344 * sleep "polling" until a message is received. 7345 */ 7346 if ((stp->sd_flag & (STRHUP|STREOF)) || 7347 (uiop != NULL && uiop->uio_resid == 0)) { 7348 if (mctlp != NULL) 7349 *mctlp = NULL; 7350 *flagsp = 0; 7351 mutex_exit(&stp->sd_lock); 7352 return (0); 7353 } 7354 7355 waitflag = GETWAIT; 7356 if (flags & 7357 (MSG_HOLDSIG|MSG_IGNERROR|MSG_IPEEK|MSG_DELAYERROR)) { 7358 if (flags & MSG_HOLDSIG) 7359 waitflag |= STR_NOSIG; 7360 if (flags & MSG_IGNERROR) 7361 waitflag |= STR_NOERROR; 7362 if (flags & MSG_IPEEK) 7363 waitflag |= STR_PEEK; 7364 if (flags & MSG_DELAYERROR) 7365 waitflag |= STR_DELAYERR; 7366 } 7367 if (uiop != NULL) 7368 fmode = uiop->uio_fmode; 7369 else 7370 fmode = 0; 7371 7372 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_WAIT, 7373 "kstrgetmsg calls strwaitq:%p, %p", 7374 vp, uiop); 7375 if (((error = strwaitq(stp, waitflag, (ssize_t)0, 7376 fmode, timout, &done))) != 0 || done) { 7377 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_DONE, 7378 "kstrgetmsg error or done:%p, %p", 7379 vp, uiop); 7380 mutex_exit(&stp->sd_lock); 7381 return (error); 7382 } 7383 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRGETMSG_AWAKE, 7384 "kstrgetmsg awakes:%p, %p", vp, uiop); 7385 if ((error = i_straccess(stp, JCREAD)) != 0) { 7386 mutex_exit(&stp->sd_lock); 7387 return (error); 7388 } 7389 first = 0; 7390 } 7391 ASSERT(bp != NULL); 7392 /* 7393 * Extract any mark information. If the message is not completely 7394 * consumed this information will be put in the mblk 7395 * that is putback. 7396 * If MSGMARKNEXT is set and the message is completely consumed 7397 * the STRATMARK flag will be set below. Likewise, if 7398 * MSGNOTMARKNEXT is set and the message is 7399 * completely consumed STRNOTATMARK will be set. 7400 */ 7401 mark = bp->b_flag & (MSGMARK | MSGMARKNEXT | MSGNOTMARKNEXT); 7402 ASSERT((mark & (MSGMARKNEXT|MSGNOTMARKNEXT)) != 7403 (MSGMARKNEXT|MSGNOTMARKNEXT)); 7404 pri = bp->b_band; 7405 if (mark != 0) { 7406 /* 7407 * If the caller doesn't want the mark return. 7408 * Used to implement MSG_WAITALL in sockets. 7409 */ 7410 if (flags & MSG_NOMARK) { 7411 putback(stp, q, bp, pri); 7412 qbackenable(q, pri); 7413 mutex_exit(&stp->sd_lock); 7414 return (EWOULDBLOCK); 7415 } 7416 if (bp == stp->sd_mark) { 7417 mark |= _LASTMARK; 7418 stp->sd_mark = NULL; 7419 } 7420 } 7421 7422 /* 7423 * keep track of the first message type 7424 */ 7425 type = bp->b_datap->db_type; 7426 7427 if (bp->b_datap->db_type == M_PASSFP) { 7428 if ((mark & _LASTMARK) && (stp->sd_mark == NULL)) 7429 stp->sd_mark = bp; 7430 bp->b_flag |= mark & ~_LASTMARK; 7431 putback(stp, q, bp, pri); 7432 qbackenable(q, pri); 7433 mutex_exit(&stp->sd_lock); 7434 return (EBADMSG); 7435 } 7436 ASSERT(type != M_SIG); 7437 7438 if (flags & MSG_IPEEK) { 7439 /* 7440 * Clear any struioflag - we do the uiomove over again 7441 * when peeking since it simplifies the code. 7442 * 7443 * Dup the message and put the original back on the queue. 7444 * If dupmsg() fails, try again with copymsg() to see if 7445 * there is indeed a shortage of memory. dupmsg() may fail 7446 * if db_ref in any of the messages reaches its limit. 7447 */ 7448 7449 ASSERT(!(bp->b_datap->db_flags & DBLK_UIOA)); 7450 if ((nbp = dupmsg(bp)) == NULL && (nbp = copymsg(bp)) == NULL) { 7451 /* 7452 * Restore the state of the stream head since we 7453 * need to drop sd_lock (strwaitbuf is sleeping). 7454 */ 7455 size_t size = msgdsize(bp); 7456 7457 if ((mark & _LASTMARK) && (stp->sd_mark == NULL)) 7458 stp->sd_mark = bp; 7459 bp->b_flag |= mark & ~_LASTMARK; 7460 putback(stp, q, bp, pri); 7461 mutex_exit(&stp->sd_lock); 7462 error = strwaitbuf(size, BPRI_HI); 7463 if (error) { 7464 /* 7465 * There is no net change to the queue thus 7466 * no need to qbackenable. 7467 */ 7468 return (error); 7469 } 7470 goto retry; 7471 } 7472 7473 if ((mark & _LASTMARK) && (stp->sd_mark == NULL)) 7474 stp->sd_mark = bp; 7475 bp->b_flag |= mark & ~_LASTMARK; 7476 putback(stp, q, bp, pri); 7477 bp = nbp; 7478 } 7479 7480 /* 7481 * Set this flag so strrput will not generate signals. Need to 7482 * make sure this flag is cleared before leaving this routine 7483 * else signals will stop being sent. 7484 */ 7485 stp->sd_flag |= STRGETINPROG; 7486 mutex_exit(&stp->sd_lock); 7487 7488 if ((stp->sd_rputdatafunc != NULL) && (DB_TYPE(bp) == M_DATA)) { 7489 mblk_t *tmp, *prevmp; 7490 7491 /* 7492 * Put first non-data mblk back to stream head and 7493 * cut the mblk chain so sd_rputdatafunc only sees 7494 * M_DATA mblks. We can skip the first mblk since it 7495 * is M_DATA according to the condition above. 7496 */ 7497 for (prevmp = bp, tmp = bp->b_cont; tmp != NULL; 7498 prevmp = tmp, tmp = tmp->b_cont) { 7499 if (DB_TYPE(tmp) != M_DATA) { 7500 prevmp->b_cont = NULL; 7501 mutex_enter(&stp->sd_lock); 7502 putback(stp, q, tmp, tmp->b_band); 7503 mutex_exit(&stp->sd_lock); 7504 break; 7505 } 7506 } 7507 7508 ASSERT(!(bp->b_datap->db_flags & DBLK_UIOA)); 7509 bp = (stp->sd_rputdatafunc)(stp->sd_vnode, bp, 7510 NULL, NULL, NULL, NULL); 7511 7512 if (bp == NULL) 7513 goto retry; 7514 } 7515 7516 if (STREAM_NEEDSERVICE(stp)) 7517 stream_runservice(stp); 7518 7519 /* 7520 * Set HIPRI flag if message is priority. 7521 */ 7522 if (type >= QPCTL) 7523 flg = MSG_HIPRI; 7524 else 7525 flg = MSG_BAND; 7526 7527 /* 7528 * First process PROTO or PCPROTO blocks, if any. 7529 */ 7530 if (mctlp != NULL && type != M_DATA) { 7531 mblk_t *nbp; 7532 7533 *mctlp = bp; 7534 while (bp->b_cont && bp->b_cont->b_datap->db_type != M_DATA) 7535 bp = bp->b_cont; 7536 nbp = bp->b_cont; 7537 bp->b_cont = NULL; 7538 bp = nbp; 7539 } 7540 7541 if (bp && bp->b_datap->db_type != M_DATA) { 7542 /* 7543 * More PROTO blocks in msg. Will only happen if mctlp is NULL. 7544 */ 7545 more |= MORECTL; 7546 savemp = bp; 7547 while (bp && bp->b_datap->db_type != M_DATA) { 7548 savemptail = bp; 7549 bp = bp->b_cont; 7550 } 7551 savemptail->b_cont = NULL; 7552 } 7553 7554 /* 7555 * Now process DATA blocks, if any. 7556 */ 7557 if (uiop == NULL) { 7558 /* Append data to tail of mctlp */ 7559 7560 ASSERT(bp == NULL || !(bp->b_datap->db_flags & DBLK_UIOA)); 7561 if (mctlp != NULL) { 7562 mblk_t **mpp = mctlp; 7563 7564 while (*mpp != NULL) 7565 mpp = &((*mpp)->b_cont); 7566 *mpp = bp; 7567 bp = NULL; 7568 } 7569 } else if (bp && (bp->b_datap->db_flags & DBLK_UIOA)) { 7570 /* 7571 * A uioa mblk_t chain, as uio processing has already 7572 * been done we simple skip over processing. 7573 */ 7574 bp = NULL; 7575 pr = 0; 7576 7577 } else if (uiop->uio_resid >= 0 && bp) { 7578 size_t oldresid = uiop->uio_resid; 7579 7580 /* 7581 * If a streams message is likely to consist 7582 * of many small mblks, it is pulled up into 7583 * one continuous chunk of memory. 7584 * see longer comment at top of page 7585 * by mblk_pull_len declaration. 7586 */ 7587 7588 if (MBLKL(bp) < mblk_pull_len) { 7589 (void) pullupmsg(bp, -1); 7590 } 7591 7592 bp = struiocopyout(bp, uiop, &error); 7593 if (error != 0) { 7594 if (mctlp != NULL) { 7595 freemsg(*mctlp); 7596 *mctlp = NULL; 7597 } else 7598 freemsg(savemp); 7599 mutex_enter(&stp->sd_lock); 7600 /* 7601 * clear stream head hi pri flag based on 7602 * first message 7603 */ 7604 if (!(flags & MSG_IPEEK) && (type >= QPCTL)) { 7605 ASSERT(type == M_PCPROTO); 7606 stp->sd_flag &= ~STRPRI; 7607 } 7608 more = 0; 7609 goto getmout; 7610 } 7611 /* 7612 * (pr == 1) indicates a partial read. 7613 */ 7614 if (oldresid > uiop->uio_resid) 7615 pr = 1; 7616 } 7617 7618 if (bp) { /* more data blocks in msg */ 7619 more |= MOREDATA; 7620 if (savemp) 7621 savemptail->b_cont = bp; 7622 else 7623 savemp = bp; 7624 } 7625 7626 mutex_enter(&stp->sd_lock); 7627 if (savemp) { 7628 if (flags & (MSG_IPEEK|MSG_DISCARDTAIL)) { 7629 /* 7630 * When MSG_DISCARDTAIL is set or 7631 * when peeking discard any tail. When peeking this 7632 * is the tail of the dup that was copied out - the 7633 * message has already been putback on the queue. 7634 * Return MOREDATA to the caller even though the data 7635 * is discarded. This is used by sockets (to 7636 * set MSG_TRUNC). 7637 */ 7638 freemsg(savemp); 7639 if (!(flags & MSG_IPEEK) && (type >= QPCTL)) { 7640 ASSERT(type == M_PCPROTO); 7641 stp->sd_flag &= ~STRPRI; 7642 } 7643 } else if (pr && (savemp->b_datap->db_type == M_DATA) && 7644 msgnodata(savemp)) { 7645 /* 7646 * Avoid queuing a zero-length tail part of 7647 * a message. pr=1 indicates that we read some of 7648 * the message. 7649 */ 7650 freemsg(savemp); 7651 more &= ~MOREDATA; 7652 if (type >= QPCTL) { 7653 ASSERT(type == M_PCPROTO); 7654 stp->sd_flag &= ~STRPRI; 7655 } 7656 } else { 7657 savemp->b_band = pri; 7658 /* 7659 * If the first message was HIPRI and the one we're 7660 * putting back isn't, then clear STRPRI, otherwise 7661 * set STRPRI again. Note that we must set STRPRI 7662 * again since the flush logic in strrput_nondata() 7663 * may have cleared it while we had sd_lock dropped. 7664 */ 7665 7666 ASSERT(!(savemp->b_datap->db_flags & DBLK_UIOA)); 7667 if (type >= QPCTL) { 7668 ASSERT(type == M_PCPROTO); 7669 if (queclass(savemp) < QPCTL) 7670 stp->sd_flag &= ~STRPRI; 7671 else 7672 stp->sd_flag |= STRPRI; 7673 } else if (queclass(savemp) >= QPCTL) { 7674 /* 7675 * The first message was not a HIPRI message, 7676 * but the one we are about to putback is. 7677 * For simplicitly, we do not allow for HIPRI 7678 * messages to be embedded in the message 7679 * body, so just force it to same type as 7680 * first message. 7681 */ 7682 ASSERT(type == M_DATA || type == M_PROTO); 7683 ASSERT(savemp->b_datap->db_type == M_PCPROTO); 7684 savemp->b_datap->db_type = type; 7685 } 7686 if (mark != 0) { 7687 if ((mark & _LASTMARK) && 7688 (stp->sd_mark == NULL)) { 7689 /* 7690 * If another marked message arrived 7691 * while sd_lock was not held sd_mark 7692 * would be non-NULL. 7693 */ 7694 stp->sd_mark = savemp; 7695 } 7696 savemp->b_flag |= mark & ~_LASTMARK; 7697 } 7698 putback(stp, q, savemp, pri); 7699 } 7700 } else if (!(flags & MSG_IPEEK)) { 7701 /* 7702 * The complete message was consumed. 7703 * 7704 * If another M_PCPROTO arrived while sd_lock was not held 7705 * it would have been discarded since STRPRI was still set. 7706 * 7707 * Move the MSG*MARKNEXT information 7708 * to the stream head just in case 7709 * the read queue becomes empty. 7710 * clear stream head hi pri flag based on 7711 * first message 7712 * 7713 * If the stream head was at the mark 7714 * (STRATMARK) before we dropped sd_lock above 7715 * and some data was consumed then we have 7716 * moved past the mark thus STRATMARK is 7717 * cleared. However, if a message arrived in 7718 * strrput during the copyout above causing 7719 * STRATMARK to be set we can not clear that 7720 * flag. 7721 * XXX A "perimeter" would help by single-threading strrput, 7722 * strread, strgetmsg and kstrgetmsg. 7723 */ 7724 if (type >= QPCTL) { 7725 ASSERT(type == M_PCPROTO); 7726 stp->sd_flag &= ~STRPRI; 7727 } 7728 if (mark & (MSGMARKNEXT|MSGNOTMARKNEXT|MSGMARK)) { 7729 if (mark & MSGMARKNEXT) { 7730 stp->sd_flag &= ~STRNOTATMARK; 7731 stp->sd_flag |= STRATMARK; 7732 } else if (mark & MSGNOTMARKNEXT) { 7733 stp->sd_flag &= ~STRATMARK; 7734 stp->sd_flag |= STRNOTATMARK; 7735 } else { 7736 stp->sd_flag &= ~(STRATMARK|STRNOTATMARK); 7737 } 7738 } else if (pr && (old_sd_flag & STRATMARK)) { 7739 stp->sd_flag &= ~STRATMARK; 7740 } 7741 } 7742 7743 *flagsp = flg; 7744 *prip = pri; 7745 7746 /* 7747 * Getmsg cleanup processing - if the state of the queue has changed 7748 * some signals may need to be sent and/or poll awakened. 7749 */ 7750 getmout: 7751 qbackenable(q, pri); 7752 7753 /* 7754 * We dropped the stream head lock above. Send all M_SIG messages 7755 * before processing stream head for SIGPOLL messages. 7756 */ 7757 ASSERT(MUTEX_HELD(&stp->sd_lock)); 7758 while ((bp = q->q_first) != NULL && 7759 (bp->b_datap->db_type == M_SIG)) { 7760 /* 7761 * sd_lock is held so the content of the read queue can not 7762 * change. 7763 */ 7764 bp = getq(q); 7765 ASSERT(bp != NULL && bp->b_datap->db_type == M_SIG); 7766 7767 strsignal_nolock(stp, *bp->b_rptr, (int32_t)bp->b_band); 7768 mutex_exit(&stp->sd_lock); 7769 freemsg(bp); 7770 if (STREAM_NEEDSERVICE(stp)) 7771 stream_runservice(stp); 7772 mutex_enter(&stp->sd_lock); 7773 } 7774 7775 /* 7776 * stream head cannot change while we make the determination 7777 * whether or not to send a signal. Drop the flag to allow strrput 7778 * to send firstmsgsigs again. 7779 */ 7780 stp->sd_flag &= ~STRGETINPROG; 7781 7782 /* 7783 * If the type of message at the front of the queue changed 7784 * due to the receive the appropriate signals and pollwakeup events 7785 * are generated. The type of changes are: 7786 * Processed a hipri message, q_first is not hipri. 7787 * Processed a band X message, and q_first is band Y. 7788 * The generated signals and pollwakeups are identical to what 7789 * strrput() generates should the message that is now on q_first 7790 * arrive to an empty read queue. 7791 * 7792 * Note: only strrput will send a signal for a hipri message. 7793 */ 7794 if ((bp = q->q_first) != NULL && !(stp->sd_flag & STRPRI)) { 7795 strsigset_t signals = 0; 7796 strpollset_t pollwakeups = 0; 7797 7798 if (flg & MSG_HIPRI) { 7799 /* 7800 * Removed a hipri message. Regular data at 7801 * the front of the queue. 7802 */ 7803 if (bp->b_band == 0) { 7804 signals = S_INPUT | S_RDNORM; 7805 pollwakeups = POLLIN | POLLRDNORM; 7806 } else { 7807 signals = S_INPUT | S_RDBAND; 7808 pollwakeups = POLLIN | POLLRDBAND; 7809 } 7810 } else if (pri != bp->b_band) { 7811 /* 7812 * The band is different for the new q_first. 7813 */ 7814 if (bp->b_band == 0) { 7815 signals = S_RDNORM; 7816 pollwakeups = POLLIN | POLLRDNORM; 7817 } else { 7818 signals = S_RDBAND; 7819 pollwakeups = POLLIN | POLLRDBAND; 7820 } 7821 } 7822 7823 if (pollwakeups != 0) { 7824 if (pollwakeups == (POLLIN | POLLRDNORM)) { 7825 if (!(stp->sd_rput_opt & SR_POLLIN)) 7826 goto no_pollwake; 7827 stp->sd_rput_opt &= ~SR_POLLIN; 7828 } 7829 mutex_exit(&stp->sd_lock); 7830 pollwakeup(&stp->sd_pollist, pollwakeups); 7831 mutex_enter(&stp->sd_lock); 7832 } 7833 no_pollwake: 7834 7835 if (stp->sd_sigflags & signals) 7836 strsendsig(stp->sd_siglist, signals, bp->b_band, 0); 7837 } 7838 mutex_exit(&stp->sd_lock); 7839 7840 rvp->r_val1 = more; 7841 return (error); 7842 #undef _LASTMARK 7843 } 7844 7845 /* 7846 * Put a message downstream. 7847 * 7848 * NOTE: strputmsg and kstrputmsg have much of the logic in common. 7849 */ 7850 int 7851 strputmsg( 7852 struct vnode *vp, 7853 struct strbuf *mctl, 7854 struct strbuf *mdata, 7855 unsigned char pri, 7856 int flag, 7857 int fmode) 7858 { 7859 struct stdata *stp; 7860 queue_t *wqp; 7861 mblk_t *mp; 7862 ssize_t msgsize; 7863 ssize_t rmin, rmax; 7864 int error; 7865 struct uio uios; 7866 struct uio *uiop = &uios; 7867 struct iovec iovs; 7868 int xpg4 = 0; 7869 7870 ASSERT(vp->v_stream); 7871 stp = vp->v_stream; 7872 wqp = stp->sd_wrq; 7873 7874 /* 7875 * If it is an XPG4 application, we need to send 7876 * SIGPIPE below 7877 */ 7878 7879 xpg4 = (flag & MSG_XPG4) ? 1 : 0; 7880 flag &= ~MSG_XPG4; 7881 7882 if (audit_active) 7883 audit_strputmsg(vp, mctl, mdata, pri, flag, fmode); 7884 7885 mutex_enter(&stp->sd_lock); 7886 7887 if ((error = i_straccess(stp, JCWRITE)) != 0) { 7888 mutex_exit(&stp->sd_lock); 7889 return (error); 7890 } 7891 7892 if (stp->sd_flag & (STWRERR|STRHUP|STPLEX)) { 7893 error = strwriteable(stp, B_FALSE, xpg4); 7894 if (error != 0) { 7895 mutex_exit(&stp->sd_lock); 7896 return (error); 7897 } 7898 } 7899 7900 mutex_exit(&stp->sd_lock); 7901 7902 /* 7903 * Check for legal flag value. 7904 */ 7905 switch (flag) { 7906 case MSG_HIPRI: 7907 if ((mctl->len < 0) || (pri != 0)) 7908 return (EINVAL); 7909 break; 7910 case MSG_BAND: 7911 break; 7912 7913 default: 7914 return (EINVAL); 7915 } 7916 7917 TRACE_1(TR_FAC_STREAMS_FR, TR_STRPUTMSG_IN, 7918 "strputmsg in:stp %p", stp); 7919 7920 /* get these values from those cached in the stream head */ 7921 rmin = stp->sd_qn_minpsz; 7922 rmax = stp->sd_qn_maxpsz; 7923 7924 /* 7925 * Make sure ctl and data sizes together fall within the 7926 * limits of the max and min receive packet sizes and do 7927 * not exceed system limit. 7928 */ 7929 ASSERT((rmax >= 0) || (rmax == INFPSZ)); 7930 if (rmax == 0) { 7931 return (ERANGE); 7932 } 7933 /* 7934 * Use the MAXIMUM of sd_maxblk and q_maxpsz. 7935 * Needed to prevent partial failures in the strmakedata loop. 7936 */ 7937 if (stp->sd_maxblk != INFPSZ && rmax != INFPSZ && rmax < stp->sd_maxblk) 7938 rmax = stp->sd_maxblk; 7939 7940 if ((msgsize = mdata->len) < 0) { 7941 msgsize = 0; 7942 rmin = 0; /* no range check for NULL data part */ 7943 } 7944 if ((msgsize < rmin) || 7945 ((msgsize > rmax) && (rmax != INFPSZ)) || 7946 (mctl->len > strctlsz)) { 7947 return (ERANGE); 7948 } 7949 7950 /* 7951 * Setup uio and iov for data part 7952 */ 7953 iovs.iov_base = mdata->buf; 7954 iovs.iov_len = msgsize; 7955 uios.uio_iov = &iovs; 7956 uios.uio_iovcnt = 1; 7957 uios.uio_loffset = 0; 7958 uios.uio_segflg = UIO_USERSPACE; 7959 uios.uio_fmode = fmode; 7960 uios.uio_extflg = UIO_COPY_DEFAULT; 7961 uios.uio_resid = msgsize; 7962 uios.uio_offset = 0; 7963 7964 /* Ignore flow control in strput for HIPRI */ 7965 if (flag & MSG_HIPRI) 7966 flag |= MSG_IGNFLOW; 7967 7968 for (;;) { 7969 int done = 0; 7970 7971 /* 7972 * strput will always free the ctl mblk - even when strput 7973 * fails. 7974 */ 7975 if ((error = strmakectl(mctl, flag, fmode, &mp)) != 0) { 7976 TRACE_3(TR_FAC_STREAMS_FR, TR_STRPUTMSG_OUT, 7977 "strputmsg out:stp %p out %d error %d", 7978 stp, 1, error); 7979 return (error); 7980 } 7981 /* 7982 * Verify that the whole message can be transferred by 7983 * strput. 7984 */ 7985 ASSERT(stp->sd_maxblk == INFPSZ || 7986 stp->sd_maxblk >= mdata->len); 7987 7988 msgsize = mdata->len; 7989 error = strput(stp, mp, uiop, &msgsize, 0, pri, flag); 7990 mdata->len = msgsize; 7991 7992 if (error == 0) 7993 break; 7994 7995 if (error != EWOULDBLOCK) 7996 goto out; 7997 7998 mutex_enter(&stp->sd_lock); 7999 /* 8000 * Check for a missed wakeup. 8001 * Needed since strput did not hold sd_lock across 8002 * the canputnext. 8003 */ 8004 if (bcanputnext(wqp, pri)) { 8005 /* Try again */ 8006 mutex_exit(&stp->sd_lock); 8007 continue; 8008 } 8009 TRACE_2(TR_FAC_STREAMS_FR, TR_STRPUTMSG_WAIT, 8010 "strputmsg wait:stp %p waits pri %d", stp, pri); 8011 if (((error = strwaitq(stp, WRITEWAIT, (ssize_t)0, fmode, -1, 8012 &done)) != 0) || done) { 8013 mutex_exit(&stp->sd_lock); 8014 TRACE_3(TR_FAC_STREAMS_FR, TR_STRPUTMSG_OUT, 8015 "strputmsg out:q %p out %d error %d", 8016 stp, 0, error); 8017 return (error); 8018 } 8019 TRACE_1(TR_FAC_STREAMS_FR, TR_STRPUTMSG_WAKE, 8020 "strputmsg wake:stp %p wakes", stp); 8021 if ((error = i_straccess(stp, JCWRITE)) != 0) { 8022 mutex_exit(&stp->sd_lock); 8023 return (error); 8024 } 8025 mutex_exit(&stp->sd_lock); 8026 } 8027 out: 8028 /* 8029 * For historic reasons, applications expect EAGAIN 8030 * when data mblk could not be allocated. so change 8031 * ENOMEM back to EAGAIN 8032 */ 8033 if (error == ENOMEM) 8034 error = EAGAIN; 8035 TRACE_3(TR_FAC_STREAMS_FR, TR_STRPUTMSG_OUT, 8036 "strputmsg out:stp %p out %d error %d", stp, 2, error); 8037 return (error); 8038 } 8039 8040 /* 8041 * Put a message downstream. 8042 * Can send only an M_PROTO/M_PCPROTO by passing in a NULL uiop. 8043 * The fmode flag (NDELAY, NONBLOCK) is the or of the flags in the uio 8044 * and the fmode parameter. 8045 * 8046 * This routine handles the consolidation private flags: 8047 * MSG_IGNERROR Ignore any stream head error except STPLEX. 8048 * MSG_HOLDSIG Hold signals while waiting for data. 8049 * MSG_IGNFLOW Don't check streams flow control. 8050 * 8051 * NOTE: strputmsg and kstrputmsg have much of the logic in common. 8052 */ 8053 int 8054 kstrputmsg( 8055 struct vnode *vp, 8056 mblk_t *mctl, 8057 struct uio *uiop, 8058 ssize_t msgsize, 8059 unsigned char pri, 8060 int flag, 8061 int fmode) 8062 { 8063 struct stdata *stp; 8064 queue_t *wqp; 8065 ssize_t rmin, rmax; 8066 int error; 8067 8068 ASSERT(vp->v_stream); 8069 stp = vp->v_stream; 8070 wqp = stp->sd_wrq; 8071 if (audit_active) 8072 audit_strputmsg(vp, NULL, NULL, pri, flag, fmode); 8073 if (mctl == NULL) 8074 return (EINVAL); 8075 8076 mutex_enter(&stp->sd_lock); 8077 8078 if ((error = i_straccess(stp, JCWRITE)) != 0) { 8079 mutex_exit(&stp->sd_lock); 8080 freemsg(mctl); 8081 return (error); 8082 } 8083 8084 if ((stp->sd_flag & STPLEX) || !(flag & MSG_IGNERROR)) { 8085 if (stp->sd_flag & (STWRERR|STRHUP|STPLEX)) { 8086 error = strwriteable(stp, B_FALSE, B_TRUE); 8087 if (error != 0) { 8088 mutex_exit(&stp->sd_lock); 8089 freemsg(mctl); 8090 return (error); 8091 } 8092 } 8093 } 8094 8095 mutex_exit(&stp->sd_lock); 8096 8097 /* 8098 * Check for legal flag value. 8099 */ 8100 switch (flag & (MSG_HIPRI|MSG_BAND|MSG_ANY)) { 8101 case MSG_HIPRI: 8102 if (pri != 0) { 8103 freemsg(mctl); 8104 return (EINVAL); 8105 } 8106 break; 8107 case MSG_BAND: 8108 break; 8109 default: 8110 freemsg(mctl); 8111 return (EINVAL); 8112 } 8113 8114 TRACE_1(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_IN, 8115 "kstrputmsg in:stp %p", stp); 8116 8117 /* get these values from those cached in the stream head */ 8118 rmin = stp->sd_qn_minpsz; 8119 rmax = stp->sd_qn_maxpsz; 8120 8121 /* 8122 * Make sure ctl and data sizes together fall within the 8123 * limits of the max and min receive packet sizes and do 8124 * not exceed system limit. 8125 */ 8126 ASSERT((rmax >= 0) || (rmax == INFPSZ)); 8127 if (rmax == 0) { 8128 freemsg(mctl); 8129 return (ERANGE); 8130 } 8131 /* 8132 * Use the MAXIMUM of sd_maxblk and q_maxpsz. 8133 * Needed to prevent partial failures in the strmakedata loop. 8134 */ 8135 if (stp->sd_maxblk != INFPSZ && rmax != INFPSZ && rmax < stp->sd_maxblk) 8136 rmax = stp->sd_maxblk; 8137 8138 if (uiop == NULL) { 8139 msgsize = -1; 8140 rmin = -1; /* no range check for NULL data part */ 8141 } else { 8142 /* Use uio flags as well as the fmode parameter flags */ 8143 fmode |= uiop->uio_fmode; 8144 8145 if ((msgsize < rmin) || 8146 ((msgsize > rmax) && (rmax != INFPSZ))) { 8147 freemsg(mctl); 8148 return (ERANGE); 8149 } 8150 } 8151 8152 /* Ignore flow control in strput for HIPRI */ 8153 if (flag & MSG_HIPRI) 8154 flag |= MSG_IGNFLOW; 8155 8156 for (;;) { 8157 int done = 0; 8158 int waitflag; 8159 mblk_t *mp; 8160 8161 /* 8162 * strput will always free the ctl mblk - even when strput 8163 * fails. If MSG_IGNFLOW is set then any error returned 8164 * will cause us to break the loop, so we don't need a copy 8165 * of the message. If MSG_IGNFLOW is not set, then we can 8166 * get hit by flow control and be forced to try again. In 8167 * this case we need to have a copy of the message. We 8168 * do this using copymsg since the message may get modified 8169 * by something below us. 8170 * 8171 * We've observed that many TPI providers do not check db_ref 8172 * on the control messages but blindly reuse them for the 8173 * T_OK_ACK/T_ERROR_ACK. Thus using copymsg is more 8174 * friendly to such providers than using dupmsg. Also, note 8175 * that sockfs uses MSG_IGNFLOW for all TPI control messages. 8176 * Only data messages are subject to flow control, hence 8177 * subject to this copymsg. 8178 */ 8179 if (flag & MSG_IGNFLOW) { 8180 mp = mctl; 8181 mctl = NULL; 8182 } else { 8183 do { 8184 /* 8185 * If a message has a free pointer, the message 8186 * must be dupmsg to maintain this pointer. 8187 * Code using this facility must be sure 8188 * that modules below will not change the 8189 * contents of the dblk without checking db_ref 8190 * first. If db_ref is > 1, then the module 8191 * needs to do a copymsg first. Otherwise, 8192 * the contents of the dblk may become 8193 * inconsistent because the freesmg/freeb below 8194 * may end up calling atomic_add_32_nv. 8195 * The atomic_add_32_nv in freeb (accessing 8196 * all of db_ref, db_type, db_flags, and 8197 * db_struioflag) does not prevent other threads 8198 * from concurrently trying to modify e.g. 8199 * db_type. 8200 */ 8201 if (mctl->b_datap->db_frtnp != NULL) 8202 mp = dupmsg(mctl); 8203 else 8204 mp = copymsg(mctl); 8205 8206 if (mp != NULL) 8207 break; 8208 8209 error = strwaitbuf(msgdsize(mctl), BPRI_MED); 8210 if (error) { 8211 freemsg(mctl); 8212 return (error); 8213 } 8214 } while (mp == NULL); 8215 } 8216 /* 8217 * Verify that all of msgsize can be transferred by 8218 * strput. 8219 */ 8220 ASSERT(stp->sd_maxblk == INFPSZ || stp->sd_maxblk >= msgsize); 8221 error = strput(stp, mp, uiop, &msgsize, 0, pri, flag); 8222 if (error == 0) 8223 break; 8224 8225 if (error != EWOULDBLOCK) 8226 goto out; 8227 8228 /* 8229 * IF MSG_IGNFLOW is set we should have broken out of loop 8230 * above. 8231 */ 8232 ASSERT(!(flag & MSG_IGNFLOW)); 8233 mutex_enter(&stp->sd_lock); 8234 /* 8235 * Check for a missed wakeup. 8236 * Needed since strput did not hold sd_lock across 8237 * the canputnext. 8238 */ 8239 if (bcanputnext(wqp, pri)) { 8240 /* Try again */ 8241 mutex_exit(&stp->sd_lock); 8242 continue; 8243 } 8244 TRACE_2(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_WAIT, 8245 "kstrputmsg wait:stp %p waits pri %d", stp, pri); 8246 8247 waitflag = WRITEWAIT; 8248 if (flag & (MSG_HOLDSIG|MSG_IGNERROR)) { 8249 if (flag & MSG_HOLDSIG) 8250 waitflag |= STR_NOSIG; 8251 if (flag & MSG_IGNERROR) 8252 waitflag |= STR_NOERROR; 8253 } 8254 if (((error = strwaitq(stp, waitflag, 8255 (ssize_t)0, fmode, -1, &done)) != 0) || done) { 8256 mutex_exit(&stp->sd_lock); 8257 TRACE_3(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_OUT, 8258 "kstrputmsg out:stp %p out %d error %d", 8259 stp, 0, error); 8260 freemsg(mctl); 8261 return (error); 8262 } 8263 TRACE_1(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_WAKE, 8264 "kstrputmsg wake:stp %p wakes", stp); 8265 if ((error = i_straccess(stp, JCWRITE)) != 0) { 8266 mutex_exit(&stp->sd_lock); 8267 freemsg(mctl); 8268 return (error); 8269 } 8270 mutex_exit(&stp->sd_lock); 8271 } 8272 out: 8273 freemsg(mctl); 8274 /* 8275 * For historic reasons, applications expect EAGAIN 8276 * when data mblk could not be allocated. so change 8277 * ENOMEM back to EAGAIN 8278 */ 8279 if (error == ENOMEM) 8280 error = EAGAIN; 8281 TRACE_3(TR_FAC_STREAMS_FR, TR_KSTRPUTMSG_OUT, 8282 "kstrputmsg out:stp %p out %d error %d", stp, 2, error); 8283 return (error); 8284 } 8285 8286 /* 8287 * Determines whether the necessary conditions are set on a stream 8288 * for it to be readable, writeable, or have exceptions. 8289 * 8290 * strpoll handles the consolidation private events: 8291 * POLLNOERR Do not return POLLERR even if there are stream 8292 * head errors. 8293 * Used by sockfs. 8294 * POLLRDDATA Do not return POLLIN unless at least one message on 8295 * the queue contains one or more M_DATA mblks. Thus 8296 * when this flag is set a queue with only 8297 * M_PROTO/M_PCPROTO mblks does not return POLLIN. 8298 * Used by sockfs to ignore T_EXDATA_IND messages. 8299 * 8300 * Note: POLLRDDATA assumes that synch streams only return messages with 8301 * an M_DATA attached (i.e. not messages consisting of only 8302 * an M_PROTO/M_PCPROTO part). 8303 */ 8304 int 8305 strpoll( 8306 struct stdata *stp, 8307 short events_arg, 8308 int anyyet, 8309 short *reventsp, 8310 struct pollhead **phpp) 8311 { 8312 int events = (ushort_t)events_arg; 8313 int retevents = 0; 8314 mblk_t *mp; 8315 qband_t *qbp; 8316 long sd_flags = stp->sd_flag; 8317 int headlocked = 0; 8318 8319 /* 8320 * For performance, a single 'if' tests for most possible edge 8321 * conditions in one shot 8322 */ 8323 if (sd_flags & (STPLEX | STRDERR | STWRERR)) { 8324 if (sd_flags & STPLEX) { 8325 *reventsp = POLLNVAL; 8326 return (EINVAL); 8327 } 8328 if (((events & (POLLIN | POLLRDNORM | POLLRDBAND | POLLPRI)) && 8329 (sd_flags & STRDERR)) || 8330 ((events & (POLLOUT | POLLWRNORM | POLLWRBAND)) && 8331 (sd_flags & STWRERR))) { 8332 if (!(events & POLLNOERR)) { 8333 *reventsp = POLLERR; 8334 return (0); 8335 } 8336 } 8337 } 8338 if (sd_flags & STRHUP) { 8339 retevents |= POLLHUP; 8340 } else if (events & (POLLWRNORM | POLLWRBAND)) { 8341 queue_t *tq; 8342 queue_t *qp = stp->sd_wrq; 8343 8344 claimstr(qp); 8345 /* Find next module forward that has a service procedure */ 8346 tq = qp->q_next->q_nfsrv; 8347 ASSERT(tq != NULL); 8348 8349 polllock(&stp->sd_pollist, QLOCK(tq)); 8350 if (events & POLLWRNORM) { 8351 queue_t *sqp; 8352 8353 if (tq->q_flag & QFULL) 8354 /* ensure backq svc procedure runs */ 8355 tq->q_flag |= QWANTW; 8356 else if ((sqp = stp->sd_struiowrq) != NULL) { 8357 /* Check sync stream barrier write q */ 8358 mutex_exit(QLOCK(tq)); 8359 polllock(&stp->sd_pollist, QLOCK(sqp)); 8360 if (sqp->q_flag & QFULL) 8361 /* ensure pollwakeup() is done */ 8362 sqp->q_flag |= QWANTWSYNC; 8363 else 8364 retevents |= POLLOUT; 8365 /* More write events to process ??? */ 8366 if (! (events & POLLWRBAND)) { 8367 mutex_exit(QLOCK(sqp)); 8368 releasestr(qp); 8369 goto chkrd; 8370 } 8371 mutex_exit(QLOCK(sqp)); 8372 polllock(&stp->sd_pollist, QLOCK(tq)); 8373 } else 8374 retevents |= POLLOUT; 8375 } 8376 if (events & POLLWRBAND) { 8377 qbp = tq->q_bandp; 8378 if (qbp) { 8379 while (qbp) { 8380 if (qbp->qb_flag & QB_FULL) 8381 qbp->qb_flag |= QB_WANTW; 8382 else 8383 retevents |= POLLWRBAND; 8384 qbp = qbp->qb_next; 8385 } 8386 } else { 8387 retevents |= POLLWRBAND; 8388 } 8389 } 8390 mutex_exit(QLOCK(tq)); 8391 releasestr(qp); 8392 } 8393 chkrd: 8394 if (sd_flags & STRPRI) { 8395 retevents |= (events & POLLPRI); 8396 } else if (events & (POLLRDNORM | POLLRDBAND | POLLIN)) { 8397 queue_t *qp = _RD(stp->sd_wrq); 8398 int normevents = (events & (POLLIN | POLLRDNORM)); 8399 8400 /* 8401 * Note: Need to do polllock() here since ps_lock may be 8402 * held. See bug 4191544. 8403 */ 8404 polllock(&stp->sd_pollist, &stp->sd_lock); 8405 headlocked = 1; 8406 mp = qp->q_first; 8407 while (mp) { 8408 /* 8409 * For POLLRDDATA we scan b_cont and b_next until we 8410 * find an M_DATA. 8411 */ 8412 if ((events & POLLRDDATA) && 8413 mp->b_datap->db_type != M_DATA) { 8414 mblk_t *nmp = mp->b_cont; 8415 8416 while (nmp != NULL && 8417 nmp->b_datap->db_type != M_DATA) 8418 nmp = nmp->b_cont; 8419 if (nmp == NULL) { 8420 mp = mp->b_next; 8421 continue; 8422 } 8423 } 8424 if (mp->b_band == 0) 8425 retevents |= normevents; 8426 else 8427 retevents |= (events & (POLLIN | POLLRDBAND)); 8428 break; 8429 } 8430 if (! (retevents & normevents) && 8431 (stp->sd_wakeq & RSLEEP)) { 8432 /* 8433 * Sync stream barrier read queue has data. 8434 */ 8435 retevents |= normevents; 8436 } 8437 /* Treat eof as normal data */ 8438 if (sd_flags & STREOF) 8439 retevents |= normevents; 8440 } 8441 8442 *reventsp = (short)retevents; 8443 if (retevents) { 8444 if (headlocked) 8445 mutex_exit(&stp->sd_lock); 8446 return (0); 8447 } 8448 8449 /* 8450 * If poll() has not found any events yet, set up event cell 8451 * to wake up the poll if a requested event occurs on this 8452 * stream. Check for collisions with outstanding poll requests. 8453 */ 8454 if (!anyyet) { 8455 *phpp = &stp->sd_pollist; 8456 if (headlocked == 0) { 8457 polllock(&stp->sd_pollist, &stp->sd_lock); 8458 headlocked = 1; 8459 } 8460 stp->sd_rput_opt |= SR_POLLIN; 8461 } 8462 if (headlocked) 8463 mutex_exit(&stp->sd_lock); 8464 return (0); 8465 } 8466 8467 /* 8468 * The purpose of putback() is to assure sleeping polls/reads 8469 * are awakened when there are no new messages arriving at the, 8470 * stream head, and a message is placed back on the read queue. 8471 * 8472 * sd_lock must be held when messages are placed back on stream 8473 * head. (getq() holds sd_lock when it removes messages from 8474 * the queue) 8475 */ 8476 8477 static void 8478 putback(struct stdata *stp, queue_t *q, mblk_t *bp, int band) 8479 { 8480 mblk_t *qfirst; 8481 ASSERT(MUTEX_HELD(&stp->sd_lock)); 8482 8483 /* 8484 * As a result of lock-step ordering around q_lock and sd_lock, 8485 * it's possible for function calls like putnext() and 8486 * canputnext() to get an inaccurate picture of how much 8487 * data is really being processed at the stream head. 8488 * We only consolidate with existing messages on the queue 8489 * if the length of the message we want to put back is smaller 8490 * than the queue hiwater mark. 8491 */ 8492 if ((stp->sd_rput_opt & SR_CONSOL_DATA) && 8493 (DB_TYPE(bp) == M_DATA) && ((qfirst = q->q_first) != NULL) && 8494 (DB_TYPE(qfirst) == M_DATA) && 8495 ((qfirst->b_flag & (MSGMARK|MSGDELIM)) == 0) && 8496 ((bp->b_flag & (MSGMARK|MSGDELIM|MSGMARKNEXT)) == 0) && 8497 (mp_cont_len(bp, NULL) < q->q_hiwat)) { 8498 /* 8499 * We use the same logic as defined in strrput() 8500 * but in reverse as we are putting back onto the 8501 * queue and want to retain byte ordering. 8502 * Consolidate M_DATA messages with M_DATA ONLY. 8503 * strrput() allows the consolidation of M_DATA onto 8504 * M_PROTO | M_PCPROTO but not the other way round. 8505 * 8506 * The consolidation does not take place if the message 8507 * we are returning to the queue is marked with either 8508 * of the marks or the delim flag or if q_first 8509 * is marked with MSGMARK. The MSGMARK check is needed to 8510 * handle the odd semantics of MSGMARK where essentially 8511 * the whole message is to be treated as marked. 8512 * Carry any MSGMARKNEXT and MSGNOTMARKNEXT from q_first 8513 * to the front of the b_cont chain. 8514 */ 8515 rmvq_noenab(q, qfirst); 8516 8517 /* 8518 * The first message in the b_cont list 8519 * tracks MSGMARKNEXT and MSGNOTMARKNEXT. 8520 * We need to handle the case where we 8521 * are appending: 8522 * 8523 * 1) a MSGMARKNEXT to a MSGNOTMARKNEXT. 8524 * 2) a MSGMARKNEXT to a plain message. 8525 * 3) a MSGNOTMARKNEXT to a plain message 8526 * 4) a MSGNOTMARKNEXT to a MSGNOTMARKNEXT 8527 * message. 8528 * 8529 * Thus we never append a MSGMARKNEXT or 8530 * MSGNOTMARKNEXT to a MSGMARKNEXT message. 8531 */ 8532 if (qfirst->b_flag & MSGMARKNEXT) { 8533 bp->b_flag |= MSGMARKNEXT; 8534 bp->b_flag &= ~MSGNOTMARKNEXT; 8535 qfirst->b_flag &= ~MSGMARKNEXT; 8536 } else if (qfirst->b_flag & MSGNOTMARKNEXT) { 8537 bp->b_flag |= MSGNOTMARKNEXT; 8538 qfirst->b_flag &= ~MSGNOTMARKNEXT; 8539 } 8540 8541 linkb(bp, qfirst); 8542 } 8543 (void) putbq(q, bp); 8544 8545 /* 8546 * A message may have come in when the sd_lock was dropped in the 8547 * calling routine. If this is the case and STR*ATMARK info was 8548 * received, need to move that from the stream head to the q_last 8549 * so that SIOCATMARK can return the proper value. 8550 */ 8551 if (stp->sd_flag & (STRATMARK | STRNOTATMARK)) { 8552 unsigned short *flagp = &q->q_last->b_flag; 8553 uint_t b_flag = (uint_t)*flagp; 8554 8555 if (stp->sd_flag & STRATMARK) { 8556 b_flag &= ~MSGNOTMARKNEXT; 8557 b_flag |= MSGMARKNEXT; 8558 stp->sd_flag &= ~STRATMARK; 8559 } else { 8560 b_flag &= ~MSGMARKNEXT; 8561 b_flag |= MSGNOTMARKNEXT; 8562 stp->sd_flag &= ~STRNOTATMARK; 8563 } 8564 *flagp = (unsigned short) b_flag; 8565 } 8566 8567 #ifdef DEBUG 8568 /* 8569 * Make sure that the flags are not messed up. 8570 */ 8571 { 8572 mblk_t *mp; 8573 mp = q->q_last; 8574 while (mp != NULL) { 8575 ASSERT((mp->b_flag & (MSGMARKNEXT|MSGNOTMARKNEXT)) != 8576 (MSGMARKNEXT|MSGNOTMARKNEXT)); 8577 mp = mp->b_cont; 8578 } 8579 } 8580 #endif 8581 if (q->q_first == bp) { 8582 short pollevents; 8583 8584 if (stp->sd_flag & RSLEEP) { 8585 stp->sd_flag &= ~RSLEEP; 8586 cv_broadcast(&q->q_wait); 8587 } 8588 if (stp->sd_flag & STRPRI) { 8589 pollevents = POLLPRI; 8590 } else { 8591 if (band == 0) { 8592 if (!(stp->sd_rput_opt & SR_POLLIN)) 8593 return; 8594 stp->sd_rput_opt &= ~SR_POLLIN; 8595 pollevents = POLLIN | POLLRDNORM; 8596 } else { 8597 pollevents = POLLIN | POLLRDBAND; 8598 } 8599 } 8600 mutex_exit(&stp->sd_lock); 8601 pollwakeup(&stp->sd_pollist, pollevents); 8602 mutex_enter(&stp->sd_lock); 8603 } 8604 } 8605 8606 /* 8607 * Return the held vnode attached to the stream head of a 8608 * given queue 8609 * It is the responsibility of the calling routine to ensure 8610 * that the queue does not go away (e.g. pop). 8611 */ 8612 vnode_t * 8613 strq2vp(queue_t *qp) 8614 { 8615 vnode_t *vp; 8616 vp = STREAM(qp)->sd_vnode; 8617 ASSERT(vp != NULL); 8618 VN_HOLD(vp); 8619 return (vp); 8620 } 8621 8622 /* 8623 * return the stream head write queue for the given vp 8624 * It is the responsibility of the calling routine to ensure 8625 * that the stream or vnode do not close. 8626 */ 8627 queue_t * 8628 strvp2wq(vnode_t *vp) 8629 { 8630 ASSERT(vp->v_stream != NULL); 8631 return (vp->v_stream->sd_wrq); 8632 } 8633 8634 /* 8635 * pollwakeup stream head 8636 * It is the responsibility of the calling routine to ensure 8637 * that the stream or vnode do not close. 8638 */ 8639 void 8640 strpollwakeup(vnode_t *vp, short event) 8641 { 8642 ASSERT(vp->v_stream); 8643 pollwakeup(&vp->v_stream->sd_pollist, event); 8644 } 8645 8646 /* 8647 * Mate the stream heads of two vnodes together. If the two vnodes are the 8648 * same, we just make the write-side point at the read-side -- otherwise, 8649 * we do a full mate. Only works on vnodes associated with streams that are 8650 * still being built and thus have only a stream head. 8651 */ 8652 void 8653 strmate(vnode_t *vp1, vnode_t *vp2) 8654 { 8655 queue_t *wrq1 = strvp2wq(vp1); 8656 queue_t *wrq2 = strvp2wq(vp2); 8657 8658 /* 8659 * Verify that there are no modules on the stream yet. We also 8660 * rely on the stream head always having a service procedure to 8661 * avoid tweaking q_nfsrv. 8662 */ 8663 ASSERT(wrq1->q_next == NULL && wrq2->q_next == NULL); 8664 ASSERT(wrq1->q_qinfo->qi_srvp != NULL); 8665 ASSERT(wrq2->q_qinfo->qi_srvp != NULL); 8666 8667 /* 8668 * If the queues are the same, just twist; otherwise do a full mate. 8669 */ 8670 if (wrq1 == wrq2) { 8671 wrq1->q_next = _RD(wrq1); 8672 } else { 8673 wrq1->q_next = _RD(wrq2); 8674 wrq2->q_next = _RD(wrq1); 8675 STREAM(wrq1)->sd_mate = STREAM(wrq2); 8676 STREAM(wrq1)->sd_flag |= STRMATE; 8677 STREAM(wrq2)->sd_mate = STREAM(wrq1); 8678 STREAM(wrq2)->sd_flag |= STRMATE; 8679 } 8680 } 8681 8682 /* 8683 * XXX will go away when console is correctly fixed. 8684 * Clean up the console PIDS, from previous I_SETSIG, 8685 * called only for cnopen which never calls strclean(). 8686 */ 8687 void 8688 str_cn_clean(struct vnode *vp) 8689 { 8690 strsig_t *ssp, *pssp, *tssp; 8691 struct stdata *stp; 8692 struct pid *pidp; 8693 int update = 0; 8694 8695 ASSERT(vp->v_stream); 8696 stp = vp->v_stream; 8697 pssp = NULL; 8698 mutex_enter(&stp->sd_lock); 8699 ssp = stp->sd_siglist; 8700 while (ssp) { 8701 mutex_enter(&pidlock); 8702 pidp = ssp->ss_pidp; 8703 /* 8704 * Get rid of PID if the proc is gone. 8705 */ 8706 if (pidp->pid_prinactive) { 8707 tssp = ssp->ss_next; 8708 if (pssp) 8709 pssp->ss_next = tssp; 8710 else 8711 stp->sd_siglist = tssp; 8712 ASSERT(pidp->pid_ref <= 1); 8713 PID_RELE(ssp->ss_pidp); 8714 mutex_exit(&pidlock); 8715 kmem_free(ssp, sizeof (strsig_t)); 8716 update = 1; 8717 ssp = tssp; 8718 continue; 8719 } else 8720 mutex_exit(&pidlock); 8721 pssp = ssp; 8722 ssp = ssp->ss_next; 8723 } 8724 if (update) { 8725 stp->sd_sigflags = 0; 8726 for (ssp = stp->sd_siglist; ssp; ssp = ssp->ss_next) 8727 stp->sd_sigflags |= ssp->ss_events; 8728 } 8729 mutex_exit(&stp->sd_lock); 8730 } 8731 8732 /* 8733 * Return B_TRUE if there is data in the message, B_FALSE otherwise. 8734 */ 8735 static boolean_t 8736 msghasdata(mblk_t *bp) 8737 { 8738 for (; bp; bp = bp->b_cont) 8739 if (bp->b_datap->db_type == M_DATA) { 8740 ASSERT(bp->b_wptr >= bp->b_rptr); 8741 if (bp->b_wptr > bp->b_rptr) 8742 return (B_TRUE); 8743 } 8744 return (B_FALSE); 8745 } 8746 8747 /* 8748 * Called on the first strget() of a sodirect/uioa enabled streamhead, 8749 * if any mblk_t(s) enqueued they must first be uioamove()d before uioa 8750 * can be enabled for the underlying transport's use. 8751 */ 8752 void 8753 struioainit(queue_t *q, sodirect_t *sodp, uio_t *uiop) 8754 { 8755 uioa_t *uioap = (uioa_t *)uiop; 8756 mblk_t *bp = q->q_first; 8757 mblk_t *lbp = NULL; 8758 mblk_t *nbp, *wbp; 8759 int len; 8760 int error; 8761 8762 ASSERT(MUTEX_HELD(sodp->sod_lock)); 8763 ASSERT(&sodp->sod_uioa == uioap); 8764 8765 /* 8766 * Walk the b_next/b_prev doubly linked list of b_cont chain(s) 8767 * and schedule any M_DATA mblk_t's for uio asynchronous move. 8768 */ 8769 do { 8770 /* Next mblk_t chain */ 8771 nbp = bp->b_next; 8772 /* Walk the chain */ 8773 wbp = bp; 8774 do { 8775 if (wbp->b_datap->db_type != M_DATA) { 8776 /* Not M_DATA, no more uioa */ 8777 goto nouioa; 8778 } 8779 if ((len = wbp->b_wptr - wbp->b_rptr) > 0) { 8780 /* Have a M_DATA mblk_t with data */ 8781 if (len > uioap->uio_resid) { 8782 /* Not enough uio sapce */ 8783 goto nouioa; 8784 } 8785 error = uioamove(wbp->b_rptr, len, 8786 UIO_READ, uioap); 8787 if (!error) { 8788 /* Scheduled, mark dblk_t as such */ 8789 wbp->b_datap->db_flags |= DBLK_UIOA; 8790 } else { 8791 /* Error of some sort, no more uioa */ 8792 uioap->uioa_state &= UIOA_CLR; 8793 uioap->uioa_state |= UIOA_FINI; 8794 return; 8795 } 8796 } 8797 /* Save last wbp processed */ 8798 lbp = wbp; 8799 } while ((wbp = wbp->b_cont) != NULL); 8800 } while ((bp = nbp) != NULL); 8801 8802 return; 8803 8804 nouioa: 8805 /* No more uioa */ 8806 uioap->uioa_state &= UIOA_CLR; 8807 uioap->uioa_state |= UIOA_FINI; 8808 8809 /* 8810 * If we processed 1 or more mblk_t(s) then we need to split the 8811 * current mblk_t chain in 2 so that all the uioamove()ed mblk_t(s) 8812 * are in the current chain and the rest are in the following new 8813 * chain. 8814 */ 8815 if (lbp != NULL) { 8816 /* New end of current chain */ 8817 lbp->b_cont = NULL; 8818 8819 /* Insert new chain wbp after bp */ 8820 if ((wbp->b_next = nbp) != NULL) 8821 nbp->b_prev = wbp; 8822 else 8823 q->q_last = wbp; 8824 wbp->b_prev = bp; 8825 bp->b_next = wbp; 8826 } 8827 } 8828