1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/types.h> 28 #include <sys/inttypes.h> 29 #include <sys/t_lock.h> 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/buf.h> 33 #include <sys/conf.h> 34 #include <sys/cred.h> 35 #include <sys/kmem.h> 36 #include <sys/sysmacros.h> 37 #include <sys/vfs.h> 38 #include <sys/vnode.h> 39 #include <sys/debug.h> 40 #include <sys/errno.h> 41 #include <sys/time.h> 42 #include <sys/file.h> 43 #include <sys/user.h> 44 #include <sys/stream.h> 45 #include <sys/strsubr.h> 46 #include <sys/esunddi.h> 47 #include <sys/flock.h> 48 #include <sys/modctl.h> 49 #include <sys/vtrace.h> 50 #include <sys/strsun.h> 51 #include <sys/cmn_err.h> 52 #include <sys/proc.h> 53 #include <sys/ddi.h> 54 55 #include <sys/suntpi.h> 56 #include <sys/socket.h> 57 #include <sys/sockio.h> 58 #include <sys/socketvar.h> 59 #include <netinet/in.h> 60 #include <inet/common.h> 61 #include <inet/proto_set.h> 62 63 #include <sys/tiuser.h> 64 #define _SUN_TPI_VERSION 2 65 #include <sys/tihdr.h> 66 67 #include <inet/kssl/ksslapi.h> 68 69 #include <c2/audit.h> 70 71 #include <fs/sockfs/socktpi.h> 72 #include <fs/sockfs/socktpi_impl.h> 73 74 int so_default_version = SOV_SOCKSTREAM; 75 76 #ifdef DEBUG 77 /* Set sockdebug to print debug messages when SO_DEBUG is set */ 78 int sockdebug = 0; 79 80 /* Set sockprinterr to print error messages when SO_DEBUG is set */ 81 int sockprinterr = 0; 82 83 /* 84 * Set so_default_options to SO_DEBUG is all sockets should be created 85 * with SO_DEBUG set. This is needed to get debug printouts from the 86 * socket() call itself. 87 */ 88 int so_default_options = 0; 89 #endif /* DEBUG */ 90 91 #ifdef SOCK_TEST 92 /* 93 * Set to number of ticks to limit cv_waits for code coverage testing. 94 * Set to 1000 when SO_DEBUG is set to 2. 95 */ 96 clock_t sock_test_timelimit = 0; 97 #endif /* SOCK_TEST */ 98 99 /* 100 * For concurrency testing of e.g. opening /dev/ip which does not 101 * handle T_INFO_REQ messages. 102 */ 103 int so_no_tinfo = 0; 104 105 /* 106 * Timeout for getting a T_CAPABILITY_ACK - it is possible for a provider 107 * to simply ignore the T_CAPABILITY_REQ. 108 */ 109 clock_t sock_capability_timeout = 2; /* seconds */ 110 111 static int do_tcapability(struct sonode *so, t_uscalar_t cap_bits1); 112 static void so_removehooks(struct sonode *so); 113 114 static mblk_t *strsock_proto(vnode_t *vp, mblk_t *mp, 115 strwakeup_t *wakeups, strsigset_t *firstmsgsigs, 116 strsigset_t *allmsgsigs, strpollset_t *pollwakeups); 117 static mblk_t *strsock_misc(vnode_t *vp, mblk_t *mp, 118 strwakeup_t *wakeups, strsigset_t *firstmsgsigs, 119 strsigset_t *allmsgsigs, strpollset_t *pollwakeups); 120 121 /* 122 * Convert a socket to a stream. Invoked when the illusory sockmod 123 * is popped from the stream. 124 * Change the stream head back to default operation without losing 125 * any messages (T_conn_ind's are moved to the stream head queue). 126 */ 127 int 128 so_sock2stream(struct sonode *so) 129 { 130 struct vnode *vp = SOTOV(so); 131 queue_t *rq; 132 mblk_t *mp; 133 int error = 0; 134 sotpi_info_t *sti = SOTOTPI(so); 135 136 ASSERT(MUTEX_HELD(&sti->sti_plumb_lock)); 137 138 mutex_enter(&so->so_lock); 139 so_lock_single(so); 140 141 ASSERT(so->so_version != SOV_STREAM); 142 143 if (sti->sti_direct) { 144 mblk_t **mpp; 145 int rval; 146 147 /* 148 * Tell the transport below that sockmod is being popped 149 */ 150 mutex_exit(&so->so_lock); 151 error = strioctl(vp, _SIOCSOCKFALLBACK, 0, 0, K_TO_K, CRED(), 152 &rval); 153 mutex_enter(&so->so_lock); 154 if (error != 0) { 155 dprintso(so, 0, ("so_sock2stream(%p): " 156 "_SIOCSOCKFALLBACK failed\n", (void *)so)); 157 goto exit; 158 } 159 sti->sti_direct = 0; 160 161 for (mpp = &sti->sti_conn_ind_head; (mp = *mpp) != NULL; 162 mpp = &mp->b_next) { 163 struct T_conn_ind *conn_ind; 164 165 /* 166 * strsock_proto() has already verified the length of 167 * this message block. 168 */ 169 ASSERT(MBLKL(mp) >= sizeof (struct T_conn_ind)); 170 171 conn_ind = (struct T_conn_ind *)mp->b_rptr; 172 if (conn_ind->OPT_length == 0 && 173 conn_ind->OPT_offset == 0) 174 continue; 175 176 if (DB_REF(mp) > 1) { 177 mblk_t *newmp; 178 size_t length; 179 cred_t *cr; 180 pid_t cpid; 181 int error; /* Dummy - error not returned */ 182 183 /* 184 * Copy the message block because it is used 185 * elsewhere, too. 186 * Can't use copyb since we want to wait 187 * yet allow for EINTR. 188 */ 189 /* Round up size for reuse */ 190 length = MAX(MBLKL(mp), 64); 191 cr = msg_getcred(mp, &cpid); 192 if (cr != NULL) { 193 newmp = allocb_cred_wait(length, 0, 194 &error, cr, cpid); 195 } else { 196 newmp = allocb_wait(length, 0, 0, 197 &error); 198 } 199 if (newmp == NULL) { 200 error = EINTR; 201 goto exit; 202 } 203 bcopy(mp->b_rptr, newmp->b_wptr, length); 204 newmp->b_wptr += length; 205 newmp->b_next = mp->b_next; 206 207 /* 208 * Link the new message block into the queue 209 * and free the old one. 210 */ 211 *mpp = newmp; 212 mp->b_next = NULL; 213 freemsg(mp); 214 215 mp = newmp; 216 conn_ind = (struct T_conn_ind *)mp->b_rptr; 217 } 218 219 /* 220 * Remove options added by TCP for accept fast-path. 221 */ 222 conn_ind->OPT_length = 0; 223 conn_ind->OPT_offset = 0; 224 } 225 } 226 227 so->so_version = SOV_STREAM; 228 so->so_proto_handle = NULL; 229 230 /* 231 * Remove the hooks in the stream head to avoid queuing more 232 * packets in sockfs. 233 */ 234 mutex_exit(&so->so_lock); 235 so_removehooks(so); 236 mutex_enter(&so->so_lock); 237 238 /* 239 * Clear any state related to urgent data. Leave any T_EXDATA_IND 240 * on the queue - the behavior of urgent data after a switch is 241 * left undefined. 242 */ 243 so->so_error = sti->sti_delayed_error = 0; 244 freemsg(so->so_oobmsg); 245 so->so_oobmsg = NULL; 246 sti->sti_oobsigcnt = sti->sti_oobcnt = 0; 247 248 so->so_state &= ~(SS_RCVATMARK|SS_OOBPEND|SS_HAVEOOBDATA|SS_HADOOBDATA| 249 SS_SAVEDEOR); 250 ASSERT(so_verify_oobstate(so)); 251 252 freemsg(sti->sti_ack_mp); 253 sti->sti_ack_mp = NULL; 254 255 /* 256 * Flush the T_DISCON_IND on sti_discon_ind_mp. 257 */ 258 so_flush_discon_ind(so); 259 260 /* 261 * Move any queued T_CONN_IND messages to stream head queue. 262 */ 263 rq = RD(strvp2wq(vp)); 264 while ((mp = sti->sti_conn_ind_head) != NULL) { 265 sti->sti_conn_ind_head = mp->b_next; 266 mp->b_next = NULL; 267 if (sti->sti_conn_ind_head == NULL) { 268 ASSERT(sti->sti_conn_ind_tail == mp); 269 sti->sti_conn_ind_tail = NULL; 270 } 271 dprintso(so, 0, 272 ("so_sock2stream(%p): moving T_CONN_IND\n", (void *)so)); 273 274 /* Drop lock across put() */ 275 mutex_exit(&so->so_lock); 276 put(rq, mp); 277 mutex_enter(&so->so_lock); 278 } 279 280 exit: 281 ASSERT(MUTEX_HELD(&so->so_lock)); 282 so_unlock_single(so, SOLOCKED); 283 mutex_exit(&so->so_lock); 284 return (error); 285 } 286 287 /* 288 * Covert a stream back to a socket. This is invoked when the illusory 289 * sockmod is pushed on a stream (where the stream was "created" by 290 * popping the illusory sockmod). 291 * This routine can not recreate the socket state (certain aspects of 292 * it like urgent data state and the bound/connected addresses for AF_UNIX 293 * sockets can not be recreated by asking the transport for information). 294 * Thus this routine implicitly assumes that the socket is in an initial 295 * state (as if it was just created). It flushes any messages queued on the 296 * read queue to avoid dealing with e.g. TPI acks or T_exdata_ind messages. 297 */ 298 void 299 so_stream2sock(struct sonode *so) 300 { 301 struct vnode *vp = SOTOV(so); 302 sotpi_info_t *sti = SOTOTPI(so); 303 304 ASSERT(MUTEX_HELD(&sti->sti_plumb_lock)); 305 306 mutex_enter(&so->so_lock); 307 so_lock_single(so); 308 ASSERT(so->so_version == SOV_STREAM); 309 so->so_version = SOV_SOCKSTREAM; 310 sti->sti_pushcnt = 0; 311 mutex_exit(&so->so_lock); 312 313 /* 314 * Set a permenent error to force any thread in sorecvmsg to 315 * return (and drop SOREADLOCKED). Clear the error once 316 * we have SOREADLOCKED. 317 * This makes a read sleeping during the I_PUSH of sockmod return 318 * EIO. 319 */ 320 strsetrerror(SOTOV(so), EIO, 1, NULL); 321 322 /* 323 * Get the read lock before flushing data to avoid 324 * problems with the T_EXDATA_IND MSG_PEEK code in sorecvmsg. 325 */ 326 mutex_enter(&so->so_lock); 327 (void) so_lock_read(so, 0); /* Set SOREADLOCKED */ 328 mutex_exit(&so->so_lock); 329 330 strsetrerror(SOTOV(so), 0, 0, NULL); 331 so_installhooks(so); 332 333 /* 334 * Flush everything on the read queue. 335 * This ensures that no T_CONN_IND remain and that no T_EXDATA_IND 336 * remain; those types of messages would confuse sockfs. 337 */ 338 strflushrq(vp, FLUSHALL); 339 mutex_enter(&so->so_lock); 340 341 /* 342 * Flush the T_DISCON_IND on sti_discon_ind_mp. 343 */ 344 so_flush_discon_ind(so); 345 so_unlock_read(so); /* Clear SOREADLOCKED */ 346 347 so_unlock_single(so, SOLOCKED); 348 mutex_exit(&so->so_lock); 349 } 350 351 /* 352 * Install the hooks in the stream head. 353 */ 354 void 355 so_installhooks(struct sonode *so) 356 { 357 struct vnode *vp = SOTOV(so); 358 359 strsetrputhooks(vp, SH_SIGALLDATA | SH_IGN_ZEROLEN | SH_CONSOL_DATA, 360 strsock_proto, strsock_misc); 361 strsetwputhooks(vp, SH_SIGPIPE | SH_RECHECK_ERR, 0); 362 } 363 364 /* 365 * Remove the hooks in the stream head. 366 */ 367 static void 368 so_removehooks(struct sonode *so) 369 { 370 struct vnode *vp = SOTOV(so); 371 372 strsetrputhooks(vp, 0, NULL, NULL); 373 strsetwputhooks(vp, 0, STRTIMOUT); 374 /* 375 * Leave read behavior as it would have been for a normal 376 * stream i.e. a read of an M_PROTO will fail. 377 */ 378 } 379 380 void 381 so_basic_strinit(struct sonode *so) 382 { 383 struct vnode *vp = SOTOV(so); 384 struct stdata *stp; 385 mblk_t *mp; 386 sotpi_info_t *sti = SOTOTPI(so); 387 388 /* Preallocate an unbind_req message */ 389 mp = soallocproto(sizeof (struct T_unbind_req), _ALLOC_SLEEP, CRED()); 390 mutex_enter(&so->so_lock); 391 sti->sti_unbind_mp = mp; 392 #ifdef DEBUG 393 so->so_options = so_default_options; 394 #endif /* DEBUG */ 395 mutex_exit(&so->so_lock); 396 397 so_installhooks(so); 398 399 stp = vp->v_stream; 400 /* 401 * Have to keep minpsz at zero in order to allow write/send of zero 402 * bytes. 403 */ 404 mutex_enter(&stp->sd_lock); 405 if (stp->sd_qn_minpsz == 1) 406 stp->sd_qn_minpsz = 0; 407 mutex_exit(&stp->sd_lock); 408 } 409 410 /* 411 * Initialize the streams side of a socket including 412 * T_info_req/ack processing. If tso is not NULL its values are used thereby 413 * avoiding the T_INFO_REQ. 414 */ 415 int 416 so_strinit(struct sonode *so, struct sonode *tso) 417 { 418 sotpi_info_t *sti = SOTOTPI(so); 419 sotpi_info_t *tsti; 420 int error; 421 422 so_basic_strinit(so); 423 424 /* 425 * The T_CAPABILITY_REQ should be the first message sent down because 426 * at least TCP has a fast-path for this which avoids timeouts while 427 * waiting for the T_CAPABILITY_ACK under high system load. 428 */ 429 if (tso == NULL) { 430 error = do_tcapability(so, TC1_ACCEPTOR_ID | TC1_INFO); 431 if (error) 432 return (error); 433 } else { 434 tsti = SOTOTPI(tso); 435 436 mutex_enter(&so->so_lock); 437 sti->sti_tsdu_size = tsti->sti_tsdu_size; 438 sti->sti_etsdu_size = tsti->sti_etsdu_size; 439 sti->sti_addr_size = tsti->sti_addr_size; 440 sti->sti_opt_size = tsti->sti_opt_size; 441 sti->sti_tidu_size = tsti->sti_tidu_size; 442 sti->sti_serv_type = tsti->sti_serv_type; 443 so->so_mode = tso->so_mode & ~SM_ACCEPTOR_ID; 444 mutex_exit(&so->so_lock); 445 446 /* the following do_tcapability may update so->so_mode */ 447 if ((tsti->sti_serv_type != T_CLTS) && 448 (sti->sti_direct == 0)) { 449 error = do_tcapability(so, TC1_ACCEPTOR_ID); 450 if (error) 451 return (error); 452 } 453 } 454 /* 455 * If the addr_size is 0 we treat it as already bound 456 * and connected. This is used by the routing socket. 457 * We set the addr_size to something to allocate a the address 458 * structures. 459 */ 460 if (sti->sti_addr_size == 0) { 461 so->so_state |= SS_ISBOUND | SS_ISCONNECTED; 462 /* Address size can vary with address families. */ 463 if (so->so_family == AF_INET6) 464 sti->sti_addr_size = 465 (t_scalar_t)sizeof (struct sockaddr_in6); 466 else 467 sti->sti_addr_size = 468 (t_scalar_t)sizeof (struct sockaddr_in); 469 ASSERT(sti->sti_unbind_mp); 470 } 471 472 so_alloc_addr(so, sti->sti_addr_size); 473 474 return (0); 475 } 476 477 static void 478 copy_tinfo(struct sonode *so, struct T_info_ack *tia) 479 { 480 sotpi_info_t *sti = SOTOTPI(so); 481 482 sti->sti_tsdu_size = tia->TSDU_size; 483 sti->sti_etsdu_size = tia->ETSDU_size; 484 sti->sti_addr_size = tia->ADDR_size; 485 sti->sti_opt_size = tia->OPT_size; 486 sti->sti_tidu_size = tia->TIDU_size; 487 sti->sti_serv_type = tia->SERV_type; 488 switch (tia->CURRENT_state) { 489 case TS_UNBND: 490 break; 491 case TS_IDLE: 492 so->so_state |= SS_ISBOUND; 493 sti->sti_laddr_len = 0; 494 sti->sti_laddr_valid = 0; 495 break; 496 case TS_DATA_XFER: 497 so->so_state |= SS_ISBOUND|SS_ISCONNECTED; 498 sti->sti_laddr_len = 0; 499 sti->sti_faddr_len = 0; 500 sti->sti_laddr_valid = 0; 501 sti->sti_faddr_valid = 0; 502 break; 503 } 504 505 /* 506 * Heuristics for determining the socket mode flags 507 * (SM_ATOMIC, SM_CONNREQUIRED, SM_ADDR, SM_FDPASSING, 508 * and SM_EXDATA, SM_OPTDATA, and SM_BYTESTREAM) 509 * from the info ack. 510 */ 511 if (sti->sti_serv_type == T_CLTS) { 512 so->so_mode |= SM_ATOMIC | SM_ADDR; 513 } else { 514 so->so_mode |= SM_CONNREQUIRED; 515 if (sti->sti_etsdu_size != 0 && sti->sti_etsdu_size != -2) 516 so->so_mode |= SM_EXDATA; 517 } 518 if (so->so_type == SOCK_SEQPACKET || so->so_type == SOCK_RAW) { 519 /* Semantics are to discard tail end of messages */ 520 so->so_mode |= SM_ATOMIC; 521 } 522 if (so->so_family == AF_UNIX) { 523 so->so_mode |= SM_FDPASSING | SM_OPTDATA; 524 if (sti->sti_addr_size == -1) { 525 /* MAXPATHLEN + soun_family + nul termination */ 526 sti->sti_addr_size = (t_scalar_t)(MAXPATHLEN + 527 sizeof (short) + 1); 528 } 529 if (so->so_type == SOCK_STREAM) { 530 /* 531 * Make it into a byte-stream transport. 532 * SOCK_SEQPACKET sockets are unchanged. 533 */ 534 sti->sti_tsdu_size = 0; 535 } 536 } else if (sti->sti_addr_size == -1) { 537 /* 538 * Logic extracted from sockmod - have to pick some max address 539 * length in order to preallocate the addresses. 540 */ 541 sti->sti_addr_size = SOA_DEFSIZE; 542 } 543 if (sti->sti_tsdu_size == 0) 544 so->so_mode |= SM_BYTESTREAM; 545 } 546 547 static int 548 check_tinfo(struct sonode *so) 549 { 550 sotpi_info_t *sti = SOTOTPI(so); 551 552 /* Consistency checks */ 553 if (so->so_type == SOCK_DGRAM && sti->sti_serv_type != T_CLTS) { 554 eprintso(so, ("service type and socket type mismatch\n")); 555 eprintsoline(so, EPROTO); 556 return (EPROTO); 557 } 558 if (so->so_type == SOCK_STREAM && sti->sti_serv_type == T_CLTS) { 559 eprintso(so, ("service type and socket type mismatch\n")); 560 eprintsoline(so, EPROTO); 561 return (EPROTO); 562 } 563 if (so->so_type == SOCK_SEQPACKET && sti->sti_serv_type == T_CLTS) { 564 eprintso(so, ("service type and socket type mismatch\n")); 565 eprintsoline(so, EPROTO); 566 return (EPROTO); 567 } 568 if (so->so_family == AF_INET && 569 sti->sti_addr_size != (t_scalar_t)sizeof (struct sockaddr_in)) { 570 eprintso(so, 571 ("AF_INET must have sockaddr_in address length. Got %d\n", 572 sti->sti_addr_size)); 573 eprintsoline(so, EMSGSIZE); 574 return (EMSGSIZE); 575 } 576 if (so->so_family == AF_INET6 && 577 sti->sti_addr_size != (t_scalar_t)sizeof (struct sockaddr_in6)) { 578 eprintso(so, 579 ("AF_INET6 must have sockaddr_in6 address length. Got %d\n", 580 sti->sti_addr_size)); 581 eprintsoline(so, EMSGSIZE); 582 return (EMSGSIZE); 583 } 584 585 dprintso(so, 1, ( 586 "tinfo: serv %d tsdu %d, etsdu %d, addr %d, opt %d, tidu %d\n", 587 sti->sti_serv_type, sti->sti_tsdu_size, sti->sti_etsdu_size, 588 sti->sti_addr_size, sti->sti_opt_size, 589 sti->sti_tidu_size)); 590 dprintso(so, 1, ("tinfo: so_state %s\n", 591 pr_state(so->so_state, so->so_mode))); 592 return (0); 593 } 594 595 /* 596 * Send down T_info_req and wait for the ack. 597 * Record interesting T_info_ack values in the sonode. 598 */ 599 static int 600 do_tinfo(struct sonode *so) 601 { 602 struct T_info_req tir; 603 mblk_t *mp; 604 int error; 605 606 ASSERT(MUTEX_NOT_HELD(&so->so_lock)); 607 608 if (so_no_tinfo) { 609 SOTOTPI(so)->sti_addr_size = 0; 610 return (0); 611 } 612 613 dprintso(so, 1, ("do_tinfo(%p)\n", (void *)so)); 614 615 /* Send T_INFO_REQ */ 616 tir.PRIM_type = T_INFO_REQ; 617 mp = soallocproto1(&tir, sizeof (tir), 618 sizeof (struct T_info_req) + sizeof (struct T_info_ack), 619 _ALLOC_INTR, CRED()); 620 if (mp == NULL) { 621 eprintsoline(so, ENOBUFS); 622 return (ENOBUFS); 623 } 624 /* T_INFO_REQ has to be M_PCPROTO */ 625 DB_TYPE(mp) = M_PCPROTO; 626 627 error = kstrputmsg(SOTOV(so), mp, NULL, 0, 0, 628 MSG_BAND|MSG_HOLDSIG|MSG_IGNERROR, 0); 629 if (error) { 630 eprintsoline(so, error); 631 return (error); 632 } 633 mutex_enter(&so->so_lock); 634 /* Wait for T_INFO_ACK */ 635 if ((error = sowaitprim(so, T_INFO_REQ, T_INFO_ACK, 636 (t_uscalar_t)sizeof (struct T_info_ack), &mp, 0))) { 637 mutex_exit(&so->so_lock); 638 eprintsoline(so, error); 639 return (error); 640 } 641 642 ASSERT(mp); 643 copy_tinfo(so, (struct T_info_ack *)mp->b_rptr); 644 mutex_exit(&so->so_lock); 645 freemsg(mp); 646 return (check_tinfo(so)); 647 } 648 649 /* 650 * Send down T_capability_req and wait for the ack. 651 * Record interesting T_capability_ack values in the sonode. 652 */ 653 static int 654 do_tcapability(struct sonode *so, t_uscalar_t cap_bits1) 655 { 656 struct T_capability_req tcr; 657 struct T_capability_ack *tca; 658 mblk_t *mp; 659 int error; 660 sotpi_info_t *sti = SOTOTPI(so); 661 662 ASSERT(cap_bits1 != 0); 663 ASSERT((cap_bits1 & ~(TC1_ACCEPTOR_ID | TC1_INFO)) == 0); 664 ASSERT(MUTEX_NOT_HELD(&so->so_lock)); 665 666 if (sti->sti_provinfo->tpi_capability == PI_NO) 667 return (do_tinfo(so)); 668 669 if (so_no_tinfo) { 670 sti->sti_addr_size = 0; 671 if ((cap_bits1 &= ~TC1_INFO) == 0) 672 return (0); 673 } 674 675 dprintso(so, 1, ("do_tcapability(%p)\n", (void *)so)); 676 677 /* Send T_CAPABILITY_REQ */ 678 tcr.PRIM_type = T_CAPABILITY_REQ; 679 tcr.CAP_bits1 = cap_bits1; 680 mp = soallocproto1(&tcr, sizeof (tcr), 681 sizeof (struct T_capability_req) + sizeof (struct T_capability_ack), 682 _ALLOC_INTR, CRED()); 683 if (mp == NULL) { 684 eprintsoline(so, ENOBUFS); 685 return (ENOBUFS); 686 } 687 /* T_CAPABILITY_REQ should be M_PCPROTO here */ 688 DB_TYPE(mp) = M_PCPROTO; 689 690 error = kstrputmsg(SOTOV(so), mp, NULL, 0, 0, 691 MSG_BAND|MSG_HOLDSIG|MSG_IGNERROR, 0); 692 if (error) { 693 eprintsoline(so, error); 694 return (error); 695 } 696 mutex_enter(&so->so_lock); 697 /* Wait for T_CAPABILITY_ACK */ 698 if ((error = sowaitprim(so, T_CAPABILITY_REQ, T_CAPABILITY_ACK, 699 (t_uscalar_t)sizeof (*tca), &mp, sock_capability_timeout * hz))) { 700 mutex_exit(&so->so_lock); 701 PI_PROVLOCK(sti->sti_provinfo); 702 if (sti->sti_provinfo->tpi_capability == PI_DONTKNOW) 703 sti->sti_provinfo->tpi_capability = PI_NO; 704 PI_PROVUNLOCK(sti->sti_provinfo); 705 ASSERT((so->so_mode & SM_ACCEPTOR_ID) == 0); 706 if (cap_bits1 & TC1_INFO) { 707 /* 708 * If the T_CAPABILITY_REQ timed out and then a 709 * T_INFO_REQ gets a protocol error, most likely 710 * the capability was slow (vs. unsupported). Return 711 * ENOSR for this case as a best guess. 712 */ 713 if (error == ETIME) { 714 return ((error = do_tinfo(so)) == EPROTO ? 715 ENOSR : error); 716 } 717 return (do_tinfo(so)); 718 } 719 return (0); 720 } 721 722 ASSERT(mp); 723 tca = (struct T_capability_ack *)mp->b_rptr; 724 725 ASSERT((cap_bits1 & TC1_INFO) == (tca->CAP_bits1 & TC1_INFO)); 726 so_proc_tcapability_ack(so, tca); 727 728 cap_bits1 = tca->CAP_bits1; 729 730 mutex_exit(&so->so_lock); 731 freemsg(mp); 732 733 if (cap_bits1 & TC1_INFO) 734 return (check_tinfo(so)); 735 736 return (0); 737 } 738 739 /* 740 * Process a T_CAPABILITY_ACK 741 */ 742 void 743 so_proc_tcapability_ack(struct sonode *so, struct T_capability_ack *tca) 744 { 745 sotpi_info_t *sti = SOTOTPI(so); 746 747 if (sti->sti_provinfo->tpi_capability == PI_DONTKNOW) { 748 PI_PROVLOCK(sti->sti_provinfo); 749 sti->sti_provinfo->tpi_capability = PI_YES; 750 PI_PROVUNLOCK(sti->sti_provinfo); 751 } 752 753 if (tca->CAP_bits1 & TC1_ACCEPTOR_ID) { 754 sti->sti_acceptor_id = tca->ACCEPTOR_id; 755 so->so_mode |= SM_ACCEPTOR_ID; 756 } 757 758 if (tca->CAP_bits1 & TC1_INFO) 759 copy_tinfo(so, &tca->INFO_ack); 760 } 761 762 /* 763 * Retrieve socket error, clear error if not peek. 764 */ 765 int 766 sogeterr(struct sonode *so, boolean_t clear_err) 767 { 768 int error; 769 770 ASSERT(MUTEX_HELD(&so->so_lock)); 771 772 error = so->so_error; 773 if (clear_err) 774 so->so_error = 0; 775 776 return (error); 777 } 778 779 /* 780 * This routine is registered with the stream head to retrieve read 781 * side errors. 782 * It does not clear the socket error for a peeking read side operation. 783 * It the error is to be cleared it sets *clearerr. 784 */ 785 int 786 sogetrderr(vnode_t *vp, int ispeek, int *clearerr) 787 { 788 struct sonode *so = VTOSO(vp); 789 int error; 790 791 mutex_enter(&so->so_lock); 792 if (ispeek) { 793 error = so->so_error; 794 *clearerr = 0; 795 } else { 796 error = so->so_error; 797 so->so_error = 0; 798 *clearerr = 1; 799 } 800 mutex_exit(&so->so_lock); 801 return (error); 802 } 803 804 /* 805 * This routine is registered with the stream head to retrieve write 806 * side errors. 807 * It does not clear the socket error for a peeking read side operation. 808 * It the error is to be cleared it sets *clearerr. 809 */ 810 int 811 sogetwrerr(vnode_t *vp, int ispeek, int *clearerr) 812 { 813 struct sonode *so = VTOSO(vp); 814 int error; 815 816 mutex_enter(&so->so_lock); 817 if (so->so_state & SS_CANTSENDMORE) { 818 error = EPIPE; 819 *clearerr = 0; 820 } else { 821 error = so->so_error; 822 if (ispeek) { 823 *clearerr = 0; 824 } else { 825 so->so_error = 0; 826 *clearerr = 1; 827 } 828 } 829 mutex_exit(&so->so_lock); 830 return (error); 831 } 832 833 /* 834 * Set a nonpersistent read and write error on the socket. 835 * Used when there is a T_uderror_ind for a connected socket. 836 * The caller also needs to call strsetrerror and strsetwerror 837 * after dropping the lock. 838 */ 839 void 840 soseterror(struct sonode *so, int error) 841 { 842 ASSERT(error != 0); 843 844 ASSERT(MUTEX_HELD(&so->so_lock)); 845 so->so_error = (ushort_t)error; 846 } 847 848 void 849 soisconnecting(struct sonode *so) 850 { 851 ASSERT(MUTEX_HELD(&so->so_lock)); 852 so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING); 853 so->so_state |= SS_ISCONNECTING; 854 cv_broadcast(&so->so_state_cv); 855 } 856 857 void 858 soisconnected(struct sonode *so) 859 { 860 ASSERT(MUTEX_HELD(&so->so_lock)); 861 so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING); 862 so->so_state |= SS_ISCONNECTED; 863 cv_broadcast(&so->so_state_cv); 864 } 865 866 /* 867 * The caller also needs to call strsetrerror, strsetwerror and strseteof. 868 */ 869 void 870 soisdisconnected(struct sonode *so, int error) 871 { 872 ASSERT(MUTEX_HELD(&so->so_lock)); 873 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); 874 so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE); 875 so->so_error = (ushort_t)error; 876 if (so->so_peercred != NULL) { 877 crfree(so->so_peercred); 878 so->so_peercred = NULL; 879 } 880 cv_broadcast(&so->so_state_cv); 881 } 882 883 /* 884 * For connected AF_UNIX SOCK_DGRAM sockets when the peer closes. 885 * Does not affect write side. 886 * The caller also has to call strsetrerror. 887 */ 888 static void 889 sobreakconn(struct sonode *so, int error) 890 { 891 ASSERT(MUTEX_HELD(&so->so_lock)); 892 so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); 893 so->so_error = (ushort_t)error; 894 cv_broadcast(&so->so_state_cv); 895 } 896 897 /* 898 * Can no longer send. 899 * Caller must also call strsetwerror. 900 * 901 * We mark the peer address as no longer valid for getpeername, but 902 * leave it around for so_unix_close to notify the peer (that 903 * transport has no addressing held at that layer). 904 */ 905 void 906 socantsendmore(struct sonode *so) 907 { 908 ASSERT(MUTEX_HELD(&so->so_lock)); 909 so->so_state |= SS_CANTSENDMORE; 910 cv_broadcast(&so->so_state_cv); 911 } 912 913 /* 914 * The caller must call strseteof(,1) as well as this routine 915 * to change the socket state. 916 */ 917 void 918 socantrcvmore(struct sonode *so) 919 { 920 ASSERT(MUTEX_HELD(&so->so_lock)); 921 so->so_state |= SS_CANTRCVMORE; 922 cv_broadcast(&so->so_state_cv); 923 } 924 925 /* 926 * The caller has sent down a "request_prim" primitive and wants to wait for 927 * an ack ("ack_prim") or an T_ERROR_ACK for it. 928 * The specified "ack_prim" can be a T_OK_ACK. 929 * 930 * Assumes that all the TPI acks are M_PCPROTO messages. 931 * 932 * Note that the socket is single-threaded (using so_lock_single) 933 * for all operations that generate TPI ack messages. Since 934 * only TPI ack messages are M_PCPROTO we should never receive 935 * anything except either the ack we are expecting or a T_ERROR_ACK 936 * for the same primitive. 937 */ 938 int 939 sowaitprim(struct sonode *so, t_scalar_t request_prim, t_scalar_t ack_prim, 940 t_uscalar_t min_size, mblk_t **mpp, clock_t wait) 941 { 942 mblk_t *mp; 943 union T_primitives *tpr; 944 int error; 945 946 dprintso(so, 1, ("sowaitprim(%p, %d, %d, %d, %p, %lu)\n", 947 (void *)so, request_prim, ack_prim, min_size, (void *)mpp, wait)); 948 949 ASSERT(MUTEX_HELD(&so->so_lock)); 950 951 error = sowaitack(so, &mp, wait); 952 if (error) 953 return (error); 954 955 dprintso(so, 1, ("got msg %p\n", (void *)mp)); 956 if (DB_TYPE(mp) != M_PCPROTO || 957 MBLKL(mp) < sizeof (tpr->type)) { 958 freemsg(mp); 959 eprintsoline(so, EPROTO); 960 return (EPROTO); 961 } 962 tpr = (union T_primitives *)mp->b_rptr; 963 /* 964 * Did we get the primitive that we were asking for? 965 * For T_OK_ACK we also check that it matches the request primitive. 966 */ 967 if (tpr->type == ack_prim && 968 (ack_prim != T_OK_ACK || 969 tpr->ok_ack.CORRECT_prim == request_prim)) { 970 if (MBLKL(mp) >= (ssize_t)min_size) { 971 /* Found what we are looking for */ 972 *mpp = mp; 973 return (0); 974 } 975 /* Too short */ 976 freemsg(mp); 977 eprintsoline(so, EPROTO); 978 return (EPROTO); 979 } 980 981 if (tpr->type == T_ERROR_ACK && 982 tpr->error_ack.ERROR_prim == request_prim) { 983 /* Error to the primitive we were looking for */ 984 if (tpr->error_ack.TLI_error == TSYSERR) { 985 error = tpr->error_ack.UNIX_error; 986 } else { 987 error = proto_tlitosyserr(tpr->error_ack.TLI_error); 988 } 989 dprintso(so, 0, ("error_ack for %d: %d/%d ->%d\n", 990 tpr->error_ack.ERROR_prim, tpr->error_ack.TLI_error, 991 tpr->error_ack.UNIX_error, error)); 992 freemsg(mp); 993 return (error); 994 } 995 /* 996 * Wrong primitive or T_ERROR_ACK for the wrong primitive 997 */ 998 #ifdef DEBUG 999 if (tpr->type == T_ERROR_ACK) { 1000 dprintso(so, 0, ("error_ack for %d: %d/%d\n", 1001 tpr->error_ack.ERROR_prim, tpr->error_ack.TLI_error, 1002 tpr->error_ack.UNIX_error)); 1003 } else if (tpr->type == T_OK_ACK) { 1004 dprintso(so, 0, ("ok_ack for %d, expected %d for %d\n", 1005 tpr->ok_ack.CORRECT_prim, ack_prim, request_prim)); 1006 } else { 1007 dprintso(so, 0, 1008 ("unexpected primitive %d, expected %d for %d\n", 1009 tpr->type, ack_prim, request_prim)); 1010 } 1011 #endif /* DEBUG */ 1012 1013 freemsg(mp); 1014 eprintsoline(so, EPROTO); 1015 return (EPROTO); 1016 } 1017 1018 /* 1019 * Wait for a T_OK_ACK for the specified primitive. 1020 */ 1021 int 1022 sowaitokack(struct sonode *so, t_scalar_t request_prim) 1023 { 1024 mblk_t *mp; 1025 int error; 1026 1027 error = sowaitprim(so, request_prim, T_OK_ACK, 1028 (t_uscalar_t)sizeof (struct T_ok_ack), &mp, 0); 1029 if (error) 1030 return (error); 1031 freemsg(mp); 1032 return (0); 1033 } 1034 1035 /* 1036 * Queue a received TPI ack message on sti_ack_mp. 1037 */ 1038 void 1039 soqueueack(struct sonode *so, mblk_t *mp) 1040 { 1041 sotpi_info_t *sti = SOTOTPI(so); 1042 1043 if (DB_TYPE(mp) != M_PCPROTO) { 1044 zcmn_err(getzoneid(), CE_WARN, 1045 "sockfs: received unexpected M_PROTO TPI ack. Prim %d\n", 1046 *(t_scalar_t *)mp->b_rptr); 1047 freemsg(mp); 1048 return; 1049 } 1050 1051 mutex_enter(&so->so_lock); 1052 if (sti->sti_ack_mp != NULL) { 1053 dprintso(so, 1, ("sti_ack_mp already set\n")); 1054 freemsg(sti->sti_ack_mp); 1055 sti->sti_ack_mp = NULL; 1056 } 1057 sti->sti_ack_mp = mp; 1058 cv_broadcast(&sti->sti_ack_cv); 1059 mutex_exit(&so->so_lock); 1060 } 1061 1062 /* 1063 * Wait for a TPI ack ignoring signals and errors. 1064 */ 1065 int 1066 sowaitack(struct sonode *so, mblk_t **mpp, clock_t wait) 1067 { 1068 sotpi_info_t *sti = SOTOTPI(so); 1069 1070 ASSERT(MUTEX_HELD(&so->so_lock)); 1071 1072 while (sti->sti_ack_mp == NULL) { 1073 #ifdef SOCK_TEST 1074 if (wait == 0 && sock_test_timelimit != 0) 1075 wait = sock_test_timelimit; 1076 #endif 1077 if (wait != 0) { 1078 /* 1079 * Only wait for the time limit. 1080 */ 1081 if (cv_reltimedwait(&sti->sti_ack_cv, &so->so_lock, 1082 wait, TR_CLOCK_TICK) == -1) { 1083 eprintsoline(so, ETIME); 1084 return (ETIME); 1085 } 1086 } 1087 else 1088 cv_wait(&sti->sti_ack_cv, &so->so_lock); 1089 } 1090 *mpp = sti->sti_ack_mp; 1091 #ifdef DEBUG 1092 { 1093 union T_primitives *tpr; 1094 mblk_t *mp = *mpp; 1095 1096 tpr = (union T_primitives *)mp->b_rptr; 1097 ASSERT(DB_TYPE(mp) == M_PCPROTO); 1098 ASSERT(tpr->type == T_OK_ACK || 1099 tpr->type == T_ERROR_ACK || 1100 tpr->type == T_BIND_ACK || 1101 tpr->type == T_CAPABILITY_ACK || 1102 tpr->type == T_INFO_ACK || 1103 tpr->type == T_OPTMGMT_ACK); 1104 } 1105 #endif /* DEBUG */ 1106 sti->sti_ack_mp = NULL; 1107 return (0); 1108 } 1109 1110 /* 1111 * Queue a received T_CONN_IND message on sti_conn_ind_head/tail. 1112 */ 1113 void 1114 soqueueconnind(struct sonode *so, mblk_t *mp) 1115 { 1116 sotpi_info_t *sti = SOTOTPI(so); 1117 1118 if (DB_TYPE(mp) != M_PROTO) { 1119 zcmn_err(getzoneid(), CE_WARN, 1120 "sockfs: received unexpected M_PCPROTO T_CONN_IND\n"); 1121 freemsg(mp); 1122 return; 1123 } 1124 1125 mutex_enter(&so->so_lock); 1126 ASSERT(mp->b_next == NULL); 1127 if (sti->sti_conn_ind_head == NULL) { 1128 sti->sti_conn_ind_head = mp; 1129 } else { 1130 ASSERT(sti->sti_conn_ind_tail->b_next == NULL); 1131 sti->sti_conn_ind_tail->b_next = mp; 1132 } 1133 sti->sti_conn_ind_tail = mp; 1134 /* Wakeup a single consumer of the T_CONN_IND */ 1135 cv_signal(&so->so_acceptq_cv); 1136 mutex_exit(&so->so_lock); 1137 } 1138 1139 /* 1140 * Wait for a T_CONN_IND. 1141 * Don't wait if nonblocking. 1142 * Accept signals and socket errors. 1143 */ 1144 int 1145 sowaitconnind(struct sonode *so, int fmode, mblk_t **mpp) 1146 { 1147 mblk_t *mp; 1148 sotpi_info_t *sti = SOTOTPI(so); 1149 int error = 0; 1150 1151 ASSERT(MUTEX_NOT_HELD(&so->so_lock)); 1152 mutex_enter(&so->so_lock); 1153 check_error: 1154 if (so->so_error) { 1155 error = sogeterr(so, B_TRUE); 1156 if (error) { 1157 mutex_exit(&so->so_lock); 1158 return (error); 1159 } 1160 } 1161 1162 if (sti->sti_conn_ind_head == NULL) { 1163 if (fmode & (FNDELAY|FNONBLOCK)) { 1164 error = EWOULDBLOCK; 1165 goto done; 1166 } 1167 1168 if (so->so_state & SS_CLOSING) { 1169 error = EINTR; 1170 goto done; 1171 } 1172 1173 if (!cv_wait_sig_swap(&so->so_acceptq_cv, &so->so_lock)) { 1174 error = EINTR; 1175 goto done; 1176 } 1177 goto check_error; 1178 } 1179 mp = sti->sti_conn_ind_head; 1180 sti->sti_conn_ind_head = mp->b_next; 1181 mp->b_next = NULL; 1182 if (sti->sti_conn_ind_head == NULL) { 1183 ASSERT(sti->sti_conn_ind_tail == mp); 1184 sti->sti_conn_ind_tail = NULL; 1185 } 1186 *mpp = mp; 1187 done: 1188 mutex_exit(&so->so_lock); 1189 return (error); 1190 } 1191 1192 /* 1193 * Flush a T_CONN_IND matching the sequence number from the list. 1194 * Return zero if found; non-zero otherwise. 1195 * This is called very infrequently thus it is ok to do a linear search. 1196 */ 1197 int 1198 soflushconnind(struct sonode *so, t_scalar_t seqno) 1199 { 1200 mblk_t *prevmp, *mp; 1201 struct T_conn_ind *tci; 1202 sotpi_info_t *sti = SOTOTPI(so); 1203 1204 mutex_enter(&so->so_lock); 1205 for (prevmp = NULL, mp = sti->sti_conn_ind_head; mp != NULL; 1206 prevmp = mp, mp = mp->b_next) { 1207 tci = (struct T_conn_ind *)mp->b_rptr; 1208 if (tci->SEQ_number == seqno) { 1209 dprintso(so, 1, 1210 ("t_discon_ind: found T_CONN_IND %d\n", seqno)); 1211 /* Deleting last? */ 1212 if (sti->sti_conn_ind_tail == mp) { 1213 sti->sti_conn_ind_tail = prevmp; 1214 } 1215 if (prevmp == NULL) { 1216 /* Deleting first */ 1217 sti->sti_conn_ind_head = mp->b_next; 1218 } else { 1219 prevmp->b_next = mp->b_next; 1220 } 1221 mp->b_next = NULL; 1222 1223 ASSERT((sti->sti_conn_ind_head == NULL && 1224 sti->sti_conn_ind_tail == NULL) || 1225 (sti->sti_conn_ind_head != NULL && 1226 sti->sti_conn_ind_tail != NULL)); 1227 1228 so->so_error = ECONNABORTED; 1229 mutex_exit(&so->so_lock); 1230 1231 /* 1232 * T_KSSL_PROXY_CONN_IND may carry a handle for 1233 * an SSL context, and needs to be released. 1234 */ 1235 if ((tci->PRIM_type == T_SSL_PROXY_CONN_IND) && 1236 (mp->b_cont != NULL)) { 1237 kssl_ctx_t kssl_ctx; 1238 1239 ASSERT(MBLKL(mp->b_cont) == 1240 sizeof (kssl_ctx_t)); 1241 kssl_ctx = *((kssl_ctx_t *)mp->b_cont->b_rptr); 1242 kssl_release_ctx(kssl_ctx); 1243 } 1244 freemsg(mp); 1245 return (0); 1246 } 1247 } 1248 mutex_exit(&so->so_lock); 1249 dprintso(so, 1, ("t_discon_ind: NOT found T_CONN_IND %d\n", seqno)); 1250 return (-1); 1251 } 1252 1253 /* 1254 * Wait until the socket is connected or there is an error. 1255 * fmode should contain any nonblocking flags. nosig should be 1256 * set if the caller does not want the wait to be interrupted by a signal. 1257 */ 1258 int 1259 sowaitconnected(struct sonode *so, int fmode, int nosig) 1260 { 1261 int error; 1262 1263 ASSERT(MUTEX_HELD(&so->so_lock)); 1264 1265 while ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 1266 SS_ISCONNECTING && so->so_error == 0) { 1267 1268 dprintso(so, 1, ("waiting for SS_ISCONNECTED on %p\n", 1269 (void *)so)); 1270 if (fmode & (FNDELAY|FNONBLOCK)) 1271 return (EINPROGRESS); 1272 1273 if (so->so_state & SS_CLOSING) 1274 return (EINTR); 1275 1276 if (nosig) 1277 cv_wait(&so->so_state_cv, &so->so_lock); 1278 else if (!cv_wait_sig_swap(&so->so_state_cv, &so->so_lock)) { 1279 /* 1280 * Return EINTR and let the application use 1281 * nonblocking techniques for detecting when 1282 * the connection has been established. 1283 */ 1284 return (EINTR); 1285 } 1286 dprintso(so, 1, ("awoken on %p\n", (void *)so)); 1287 } 1288 1289 if (so->so_error != 0) { 1290 error = sogeterr(so, B_TRUE); 1291 ASSERT(error != 0); 1292 dprintso(so, 1, ("sowaitconnected: error %d\n", error)); 1293 return (error); 1294 } 1295 if (!(so->so_state & SS_ISCONNECTED)) { 1296 /* 1297 * Could have received a T_ORDREL_IND or a T_DISCON_IND with 1298 * zero errno. Or another thread could have consumed so_error 1299 * e.g. by calling read. 1300 */ 1301 error = ECONNREFUSED; 1302 dprintso(so, 1, ("sowaitconnected: error %d\n", error)); 1303 return (error); 1304 } 1305 return (0); 1306 } 1307 1308 1309 /* 1310 * Handle the signal generation aspect of urgent data. 1311 */ 1312 static void 1313 so_oob_sig(struct sonode *so, int extrasig, 1314 strsigset_t *signals, strpollset_t *pollwakeups) 1315 { 1316 sotpi_info_t *sti = SOTOTPI(so); 1317 1318 ASSERT(MUTEX_HELD(&so->so_lock)); 1319 1320 ASSERT(so_verify_oobstate(so)); 1321 ASSERT(sti->sti_oobsigcnt >= sti->sti_oobcnt); 1322 if (sti->sti_oobsigcnt > sti->sti_oobcnt) { 1323 /* 1324 * Signal has already been generated once for this 1325 * urgent "event". However, since TCP can receive updated 1326 * urgent pointers we still generate a signal. 1327 */ 1328 ASSERT(so->so_state & SS_OOBPEND); 1329 if (extrasig) { 1330 *signals |= S_RDBAND; 1331 *pollwakeups |= POLLRDBAND; 1332 } 1333 return; 1334 } 1335 1336 sti->sti_oobsigcnt++; 1337 ASSERT(sti->sti_oobsigcnt > 0); /* Wraparound */ 1338 ASSERT(sti->sti_oobsigcnt > sti->sti_oobcnt); 1339 1340 /* 1341 * Record (for select/poll) that urgent data is pending. 1342 */ 1343 so->so_state |= SS_OOBPEND; 1344 /* 1345 * New urgent data on the way so forget about any old 1346 * urgent data. 1347 */ 1348 so->so_state &= ~(SS_HAVEOOBDATA|SS_HADOOBDATA); 1349 if (so->so_oobmsg != NULL) { 1350 dprintso(so, 1, ("sock: discarding old oob\n")); 1351 freemsg(so->so_oobmsg); 1352 so->so_oobmsg = NULL; 1353 } 1354 *signals |= S_RDBAND; 1355 *pollwakeups |= POLLRDBAND; 1356 ASSERT(so_verify_oobstate(so)); 1357 } 1358 1359 /* 1360 * Handle the processing of the T_EXDATA_IND with urgent data. 1361 * Returns the T_EXDATA_IND if it should be queued on the read queue. 1362 */ 1363 /* ARGSUSED2 */ 1364 static mblk_t * 1365 so_oob_exdata(struct sonode *so, mblk_t *mp, 1366 strsigset_t *signals, strpollset_t *pollwakeups) 1367 { 1368 sotpi_info_t *sti = SOTOTPI(so); 1369 1370 ASSERT(MUTEX_HELD(&so->so_lock)); 1371 1372 ASSERT(so_verify_oobstate(so)); 1373 1374 ASSERT(sti->sti_oobsigcnt > sti->sti_oobcnt); 1375 1376 sti->sti_oobcnt++; 1377 ASSERT(sti->sti_oobcnt > 0); /* wraparound? */ 1378 ASSERT(sti->sti_oobsigcnt >= sti->sti_oobcnt); 1379 1380 /* 1381 * Set MSGMARK for SIOCATMARK. 1382 */ 1383 mp->b_flag |= MSGMARK; 1384 1385 ASSERT(so_verify_oobstate(so)); 1386 return (mp); 1387 } 1388 1389 /* 1390 * Handle the processing of the actual urgent data. 1391 * Returns the data mblk if it should be queued on the read queue. 1392 */ 1393 static mblk_t * 1394 so_oob_data(struct sonode *so, mblk_t *mp, 1395 strsigset_t *signals, strpollset_t *pollwakeups) 1396 { 1397 sotpi_info_t *sti = SOTOTPI(so); 1398 1399 ASSERT(MUTEX_HELD(&so->so_lock)); 1400 1401 ASSERT(so_verify_oobstate(so)); 1402 1403 ASSERT(sti->sti_oobsigcnt >= sti->sti_oobcnt); 1404 ASSERT(mp != NULL); 1405 /* 1406 * For OOBINLINE we keep the data in the T_EXDATA_IND. 1407 * Otherwise we store it in so_oobmsg. 1408 */ 1409 ASSERT(so->so_oobmsg == NULL); 1410 if (so->so_options & SO_OOBINLINE) { 1411 *pollwakeups |= POLLIN | POLLRDNORM | POLLRDBAND; 1412 *signals |= S_INPUT | S_RDNORM; 1413 } else { 1414 *pollwakeups |= POLLRDBAND; 1415 so->so_state |= SS_HAVEOOBDATA; 1416 so->so_oobmsg = mp; 1417 mp = NULL; 1418 } 1419 ASSERT(so_verify_oobstate(so)); 1420 return (mp); 1421 } 1422 1423 /* 1424 * Caller must hold the mutex. 1425 * For delayed processing, save the T_DISCON_IND received 1426 * from below on sti_discon_ind_mp. 1427 * When the message is processed the framework will call: 1428 * (*func)(so, mp); 1429 */ 1430 static void 1431 so_save_discon_ind(struct sonode *so, 1432 mblk_t *mp, 1433 void (*func)(struct sonode *so, mblk_t *)) 1434 { 1435 sotpi_info_t *sti = SOTOTPI(so); 1436 1437 ASSERT(MUTEX_HELD(&so->so_lock)); 1438 1439 /* 1440 * Discard new T_DISCON_IND if we have already received another. 1441 * Currently the earlier message can either be on sti_discon_ind_mp 1442 * or being processed. 1443 */ 1444 if (sti->sti_discon_ind_mp != NULL || (so->so_flag & SOASYNC_UNBIND)) { 1445 zcmn_err(getzoneid(), CE_WARN, 1446 "sockfs: received unexpected additional T_DISCON_IND\n"); 1447 freemsg(mp); 1448 return; 1449 } 1450 mp->b_prev = (mblk_t *)func; 1451 mp->b_next = NULL; 1452 sti->sti_discon_ind_mp = mp; 1453 } 1454 1455 /* 1456 * Caller must hold the mutex and make sure that either SOLOCKED 1457 * or SOASYNC_UNBIND is set. Called from so_unlock_single(). 1458 * Perform delayed processing of T_DISCON_IND message on sti_discon_ind_mp. 1459 * Need to ensure that strsock_proto() will not end up sleeping for 1460 * SOASYNC_UNBIND, while executing this function. 1461 */ 1462 void 1463 so_drain_discon_ind(struct sonode *so) 1464 { 1465 mblk_t *bp; 1466 void (*func)(struct sonode *so, mblk_t *); 1467 sotpi_info_t *sti = SOTOTPI(so); 1468 1469 ASSERT(MUTEX_HELD(&so->so_lock)); 1470 ASSERT(so->so_flag & (SOLOCKED|SOASYNC_UNBIND)); 1471 1472 /* Process T_DISCON_IND on sti_discon_ind_mp */ 1473 if ((bp = sti->sti_discon_ind_mp) != NULL) { 1474 sti->sti_discon_ind_mp = NULL; 1475 func = (void (*)())bp->b_prev; 1476 bp->b_prev = NULL; 1477 1478 /* 1479 * This (*func) is supposed to generate a message downstream 1480 * and we need to have a flag set until the corresponding 1481 * upstream message reaches stream head. 1482 * When processing T_DISCON_IND in strsock_discon_ind 1483 * we hold SOASYN_UNBIND when sending T_UNBIND_REQ down and 1484 * drop the flag after we get the ACK in strsock_proto. 1485 */ 1486 (void) (*func)(so, bp); 1487 } 1488 } 1489 1490 /* 1491 * Caller must hold the mutex. 1492 * Remove the T_DISCON_IND on sti_discon_ind_mp. 1493 */ 1494 void 1495 so_flush_discon_ind(struct sonode *so) 1496 { 1497 mblk_t *bp; 1498 sotpi_info_t *sti = SOTOTPI(so); 1499 1500 ASSERT(MUTEX_HELD(&so->so_lock)); 1501 1502 /* 1503 * Remove T_DISCON_IND mblk at sti_discon_ind_mp. 1504 */ 1505 if ((bp = sti->sti_discon_ind_mp) != NULL) { 1506 sti->sti_discon_ind_mp = NULL; 1507 bp->b_prev = NULL; 1508 freemsg(bp); 1509 } 1510 } 1511 1512 /* 1513 * Caller must hold the mutex. 1514 * 1515 * This function is used to process the T_DISCON_IND message. It does 1516 * immediate processing when called from strsock_proto and delayed 1517 * processing of discon_ind saved on sti_discon_ind_mp when called from 1518 * so_drain_discon_ind. When a T_DISCON_IND message is saved in 1519 * sti_discon_ind_mp for delayed processing, this function is registered 1520 * as the callback function to process the message. 1521 * 1522 * SOASYNC_UNBIND should be held in this function, during the non-blocking 1523 * unbind operation, and should be released only after we receive the ACK 1524 * in strsock_proto, for the T_UNBIND_REQ sent here. Since SOLOCKED is not set, 1525 * no TPI messages would be sent down at this time. This is to prevent M_FLUSH 1526 * sent from either this function or tcp_unbind(), flushing away any TPI 1527 * message that is being sent down and stays in a lower module's queue. 1528 * 1529 * This function drops so_lock and grabs it again. 1530 */ 1531 static void 1532 strsock_discon_ind(struct sonode *so, mblk_t *discon_mp) 1533 { 1534 struct vnode *vp; 1535 struct stdata *stp; 1536 union T_primitives *tpr; 1537 struct T_unbind_req *ubr; 1538 mblk_t *mp; 1539 int error; 1540 sotpi_info_t *sti = SOTOTPI(so); 1541 1542 ASSERT(MUTEX_HELD(&so->so_lock)); 1543 ASSERT(discon_mp); 1544 ASSERT(discon_mp->b_rptr); 1545 1546 tpr = (union T_primitives *)discon_mp->b_rptr; 1547 ASSERT(tpr->type == T_DISCON_IND); 1548 1549 vp = SOTOV(so); 1550 stp = vp->v_stream; 1551 ASSERT(stp); 1552 1553 /* 1554 * Not a listener 1555 */ 1556 ASSERT((so->so_state & SS_ACCEPTCONN) == 0); 1557 1558 /* 1559 * This assumes that the name space for DISCON_reason 1560 * is the errno name space. 1561 */ 1562 soisdisconnected(so, tpr->discon_ind.DISCON_reason); 1563 sti->sti_laddr_valid = 0; 1564 sti->sti_faddr_valid = 0; 1565 1566 /* 1567 * Unbind with the transport without blocking. 1568 * If we've already received a T_DISCON_IND do not unbind. 1569 * 1570 * If there is no preallocated unbind message, we have already 1571 * unbound with the transport 1572 * 1573 * If the socket is not bound, no need to unbind. 1574 */ 1575 mp = sti->sti_unbind_mp; 1576 if (mp == NULL) { 1577 ASSERT(!(so->so_state & SS_ISBOUND)); 1578 mutex_exit(&so->so_lock); 1579 } else if (!(so->so_state & SS_ISBOUND)) { 1580 mutex_exit(&so->so_lock); 1581 } else { 1582 sti->sti_unbind_mp = NULL; 1583 1584 /* 1585 * Is another T_DISCON_IND being processed. 1586 */ 1587 ASSERT((so->so_flag & SOASYNC_UNBIND) == 0); 1588 1589 /* 1590 * Make strsock_proto ignore T_OK_ACK and T_ERROR_ACK for 1591 * this unbind. Set SOASYNC_UNBIND. This should be cleared 1592 * only after we receive the ACK in strsock_proto. 1593 */ 1594 so->so_flag |= SOASYNC_UNBIND; 1595 ASSERT(!(so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING))); 1596 so->so_state &= ~(SS_ISBOUND|SS_ACCEPTCONN); 1597 sti->sti_laddr_valid = 0; 1598 mutex_exit(&so->so_lock); 1599 1600 /* 1601 * Send down T_UNBIND_REQ ignoring flow control. 1602 * XXX Assumes that MSG_IGNFLOW implies that this thread 1603 * does not run service procedures. 1604 */ 1605 ASSERT(DB_TYPE(mp) == M_PROTO); 1606 ubr = (struct T_unbind_req *)mp->b_rptr; 1607 mp->b_wptr += sizeof (*ubr); 1608 ubr->PRIM_type = T_UNBIND_REQ; 1609 1610 /* 1611 * Flush the read and write side (except stream head read queue) 1612 * and send down T_UNBIND_REQ. 1613 */ 1614 (void) putnextctl1(strvp2wq(SOTOV(so)), M_FLUSH, FLUSHRW); 1615 error = kstrputmsg(SOTOV(so), mp, NULL, 0, 0, 1616 MSG_BAND|MSG_HOLDSIG|MSG_IGNERROR|MSG_IGNFLOW, 0); 1617 /* LINTED - warning: statement has no consequent: if */ 1618 if (error) { 1619 eprintsoline(so, error); 1620 } 1621 } 1622 1623 if (tpr->discon_ind.DISCON_reason != 0) 1624 strsetrerror(SOTOV(so), 0, 0, sogetrderr); 1625 strsetwerror(SOTOV(so), 0, 0, sogetwrerr); 1626 strseteof(SOTOV(so), 1); 1627 /* 1628 * strseteof takes care of read side wakeups, 1629 * pollwakeups, and signals. 1630 */ 1631 dprintso(so, 1, ("T_DISCON_IND: error %d\n", so->so_error)); 1632 freemsg(discon_mp); 1633 1634 1635 pollwakeup(&stp->sd_pollist, POLLOUT); 1636 mutex_enter(&stp->sd_lock); 1637 1638 /* 1639 * Wake sleeping write 1640 */ 1641 if (stp->sd_flag & WSLEEP) { 1642 stp->sd_flag &= ~WSLEEP; 1643 cv_broadcast(&stp->sd_wrq->q_wait); 1644 } 1645 1646 /* 1647 * strsendsig can handle multiple signals with a 1648 * single call. Send SIGPOLL for S_OUTPUT event. 1649 */ 1650 if (stp->sd_sigflags & S_OUTPUT) 1651 strsendsig(stp->sd_siglist, S_OUTPUT, 0, 0); 1652 1653 mutex_exit(&stp->sd_lock); 1654 mutex_enter(&so->so_lock); 1655 } 1656 1657 /* 1658 * This routine is registered with the stream head to receive M_PROTO 1659 * and M_PCPROTO messages. 1660 * 1661 * Returns NULL if the message was consumed. 1662 * Returns an mblk to make that mblk be processed (and queued) by the stream 1663 * head. 1664 * 1665 * Sets the return parameters (*wakeups, *firstmsgsigs, *allmsgsigs, and 1666 * *pollwakeups) for the stream head to take action on. Note that since 1667 * sockets always deliver SIGIO for every new piece of data this routine 1668 * never sets *firstmsgsigs; any signals are returned in *allmsgsigs. 1669 * 1670 * This routine handles all data related TPI messages independent of 1671 * the type of the socket i.e. it doesn't care if T_UNITDATA_IND message 1672 * arrive on a SOCK_STREAM. 1673 */ 1674 static mblk_t * 1675 strsock_proto(vnode_t *vp, mblk_t *mp, 1676 strwakeup_t *wakeups, strsigset_t *firstmsgsigs, 1677 strsigset_t *allmsgsigs, strpollset_t *pollwakeups) 1678 { 1679 union T_primitives *tpr; 1680 struct sonode *so; 1681 sotpi_info_t *sti; 1682 uint32_t auditing = AU_AUDITING(); 1683 1684 so = VTOSO(vp); 1685 sti = SOTOTPI(so); 1686 1687 dprintso(so, 1, ("strsock_proto(%p, %p)\n", (void *)vp, (void *)mp)); 1688 1689 /* Set default return values */ 1690 *firstmsgsigs = *wakeups = *allmsgsigs = *pollwakeups = 0; 1691 1692 ASSERT(DB_TYPE(mp) == M_PROTO || 1693 DB_TYPE(mp) == M_PCPROTO); 1694 1695 if (MBLKL(mp) < sizeof (tpr->type)) { 1696 /* The message is too short to even contain the primitive */ 1697 zcmn_err(getzoneid(), CE_WARN, 1698 "sockfs: Too short TPI message received. Len = %ld\n", 1699 (ptrdiff_t)(MBLKL(mp))); 1700 freemsg(mp); 1701 return (NULL); 1702 } 1703 if (!__TPI_PRIM_ISALIGNED(mp->b_rptr)) { 1704 /* The read pointer is not aligned correctly for TPI */ 1705 zcmn_err(getzoneid(), CE_WARN, 1706 "sockfs: Unaligned TPI message received. rptr = %p\n", 1707 (void *)mp->b_rptr); 1708 freemsg(mp); 1709 return (NULL); 1710 } 1711 tpr = (union T_primitives *)mp->b_rptr; 1712 dprintso(so, 1, ("strsock_proto: primitive %d\n", tpr->type)); 1713 1714 switch (tpr->type) { 1715 1716 case T_DATA_IND: 1717 if (MBLKL(mp) < sizeof (struct T_data_ind)) { 1718 zcmn_err(getzoneid(), CE_WARN, 1719 "sockfs: Too short T_DATA_IND. Len = %ld\n", 1720 (ptrdiff_t)(MBLKL(mp))); 1721 freemsg(mp); 1722 return (NULL); 1723 } 1724 /* 1725 * Ignore zero-length T_DATA_IND messages. These might be 1726 * generated by some transports. 1727 * This is needed to prevent read (which skips the M_PROTO 1728 * part) to unexpectedly return 0 (or return EWOULDBLOCK 1729 * on a non-blocking socket after select/poll has indicated 1730 * that data is available). 1731 */ 1732 if (msgdsize(mp->b_cont) == 0) { 1733 dprintso(so, 0, 1734 ("strsock_proto: zero length T_DATA_IND\n")); 1735 freemsg(mp); 1736 return (NULL); 1737 } 1738 *allmsgsigs = S_INPUT | S_RDNORM; 1739 *pollwakeups = POLLIN | POLLRDNORM; 1740 *wakeups = RSLEEP; 1741 return (mp); 1742 1743 case T_UNITDATA_IND: { 1744 struct T_unitdata_ind *tudi = &tpr->unitdata_ind; 1745 void *addr; 1746 t_uscalar_t addrlen; 1747 1748 if (MBLKL(mp) < sizeof (struct T_unitdata_ind)) { 1749 zcmn_err(getzoneid(), CE_WARN, 1750 "sockfs: Too short T_UNITDATA_IND. Len = %ld\n", 1751 (ptrdiff_t)(MBLKL(mp))); 1752 freemsg(mp); 1753 return (NULL); 1754 } 1755 1756 /* Is this is not a connected datagram socket? */ 1757 if ((so->so_mode & SM_CONNREQUIRED) || 1758 !(so->so_state & SS_ISCONNECTED)) { 1759 /* 1760 * Not a connected datagram socket. Look for 1761 * the SO_UNIX_CLOSE option. If such an option is found 1762 * discard the message (since it has no meaning 1763 * unless connected). 1764 */ 1765 if (so->so_family == AF_UNIX && msgdsize(mp) == 0 && 1766 tudi->OPT_length != 0) { 1767 void *opt; 1768 t_uscalar_t optlen = tudi->OPT_length; 1769 1770 opt = sogetoff(mp, tudi->OPT_offset, 1771 optlen, __TPI_ALIGN_SIZE); 1772 if (opt == NULL) { 1773 /* The len/off falls outside mp */ 1774 freemsg(mp); 1775 mutex_enter(&so->so_lock); 1776 soseterror(so, EPROTO); 1777 mutex_exit(&so->so_lock); 1778 zcmn_err(getzoneid(), CE_WARN, 1779 "sockfs: T_unidata_ind with " 1780 "invalid optlen/offset %u/%d\n", 1781 optlen, tudi->OPT_offset); 1782 return (NULL); 1783 } 1784 if (so_getopt_unix_close(opt, optlen)) { 1785 freemsg(mp); 1786 return (NULL); 1787 } 1788 } 1789 *allmsgsigs = S_INPUT | S_RDNORM; 1790 *pollwakeups = POLLIN | POLLRDNORM; 1791 *wakeups = RSLEEP; 1792 if (auditing) 1793 audit_sock(T_UNITDATA_IND, strvp2wq(vp), 1794 mp, 0); 1795 return (mp); 1796 } 1797 1798 /* 1799 * A connect datagram socket. For AF_INET{,6} we verify that 1800 * the source address matches the "connected to" address. 1801 * The semantics of AF_UNIX sockets is to not verify 1802 * the source address. 1803 * Note that this source address verification is transport 1804 * specific. Thus the real fix would be to extent TPI 1805 * to allow T_CONN_REQ messages to be send to connectionless 1806 * transport providers and always let the transport provider 1807 * do whatever filtering is needed. 1808 * 1809 * The verification/filtering semantics for transports 1810 * other than AF_INET and AF_UNIX are unknown. The choice 1811 * would be to either filter using bcmp or let all messages 1812 * get through. This code does not filter other address 1813 * families since this at least allows the application to 1814 * work around any missing filtering. 1815 * 1816 * XXX Should we move filtering to UDP/ICMP??? 1817 * That would require passing e.g. a T_DISCON_REQ to UDP 1818 * when the socket becomes unconnected. 1819 */ 1820 addrlen = tudi->SRC_length; 1821 /* 1822 * The alignment restriction is really to strict but 1823 * we want enough alignment to inspect the fields of 1824 * a sockaddr_in. 1825 */ 1826 addr = sogetoff(mp, tudi->SRC_offset, addrlen, 1827 __TPI_ALIGN_SIZE); 1828 if (addr == NULL) { 1829 freemsg(mp); 1830 mutex_enter(&so->so_lock); 1831 soseterror(so, EPROTO); 1832 mutex_exit(&so->so_lock); 1833 zcmn_err(getzoneid(), CE_WARN, 1834 "sockfs: T_unidata_ind with invalid " 1835 "addrlen/offset %u/%d\n", 1836 addrlen, tudi->SRC_offset); 1837 return (NULL); 1838 } 1839 1840 if (so->so_family == AF_INET) { 1841 /* 1842 * For AF_INET we allow wildcarding both sin_addr 1843 * and sin_port. 1844 */ 1845 struct sockaddr_in *faddr, *sin; 1846 1847 /* Prevent sti_faddr_sa from changing while accessed */ 1848 mutex_enter(&so->so_lock); 1849 ASSERT(sti->sti_faddr_len == 1850 (socklen_t)sizeof (struct sockaddr_in)); 1851 faddr = (struct sockaddr_in *)sti->sti_faddr_sa; 1852 sin = (struct sockaddr_in *)addr; 1853 if (addrlen != 1854 (t_uscalar_t)sizeof (struct sockaddr_in) || 1855 (sin->sin_addr.s_addr != faddr->sin_addr.s_addr && 1856 faddr->sin_addr.s_addr != INADDR_ANY) || 1857 (so->so_type != SOCK_RAW && 1858 sin->sin_port != faddr->sin_port && 1859 faddr->sin_port != 0)) { 1860 #ifdef DEBUG 1861 dprintso(so, 0, 1862 ("sockfs: T_UNITDATA_IND mismatch: %s", 1863 pr_addr(so->so_family, 1864 (struct sockaddr *)addr, addrlen))); 1865 dprintso(so, 0, (" - %s\n", 1866 pr_addr(so->so_family, sti->sti_faddr_sa, 1867 (t_uscalar_t)sti->sti_faddr_len))); 1868 #endif /* DEBUG */ 1869 mutex_exit(&so->so_lock); 1870 freemsg(mp); 1871 return (NULL); 1872 } 1873 mutex_exit(&so->so_lock); 1874 } else if (so->so_family == AF_INET6) { 1875 /* 1876 * For AF_INET6 we allow wildcarding both sin6_addr 1877 * and sin6_port. 1878 */ 1879 struct sockaddr_in6 *faddr6, *sin6; 1880 static struct in6_addr zeroes; /* inits to all zeros */ 1881 1882 /* Prevent sti_faddr_sa from changing while accessed */ 1883 mutex_enter(&so->so_lock); 1884 ASSERT(sti->sti_faddr_len == 1885 (socklen_t)sizeof (struct sockaddr_in6)); 1886 faddr6 = (struct sockaddr_in6 *)sti->sti_faddr_sa; 1887 sin6 = (struct sockaddr_in6 *)addr; 1888 /* XXX could we get a mapped address ::ffff:0.0.0.0 ? */ 1889 if (addrlen != 1890 (t_uscalar_t)sizeof (struct sockaddr_in6) || 1891 (!IN6_ARE_ADDR_EQUAL(&sin6->sin6_addr, 1892 &faddr6->sin6_addr) && 1893 !IN6_ARE_ADDR_EQUAL(&faddr6->sin6_addr, &zeroes)) || 1894 (so->so_type != SOCK_RAW && 1895 sin6->sin6_port != faddr6->sin6_port && 1896 faddr6->sin6_port != 0)) { 1897 #ifdef DEBUG 1898 dprintso(so, 0, 1899 ("sockfs: T_UNITDATA_IND mismatch: %s", 1900 pr_addr(so->so_family, 1901 (struct sockaddr *)addr, addrlen))); 1902 dprintso(so, 0, (" - %s\n", 1903 pr_addr(so->so_family, sti->sti_faddr_sa, 1904 (t_uscalar_t)sti->sti_faddr_len))); 1905 #endif /* DEBUG */ 1906 mutex_exit(&so->so_lock); 1907 freemsg(mp); 1908 return (NULL); 1909 } 1910 mutex_exit(&so->so_lock); 1911 } else if (so->so_family == AF_UNIX && 1912 msgdsize(mp->b_cont) == 0 && 1913 tudi->OPT_length != 0) { 1914 /* 1915 * Attempt to extract AF_UNIX 1916 * SO_UNIX_CLOSE indication from options. 1917 */ 1918 void *opt; 1919 t_uscalar_t optlen = tudi->OPT_length; 1920 1921 opt = sogetoff(mp, tudi->OPT_offset, 1922 optlen, __TPI_ALIGN_SIZE); 1923 if (opt == NULL) { 1924 /* The len/off falls outside mp */ 1925 freemsg(mp); 1926 mutex_enter(&so->so_lock); 1927 soseterror(so, EPROTO); 1928 mutex_exit(&so->so_lock); 1929 zcmn_err(getzoneid(), CE_WARN, 1930 "sockfs: T_unidata_ind with invalid " 1931 "optlen/offset %u/%d\n", 1932 optlen, tudi->OPT_offset); 1933 return (NULL); 1934 } 1935 /* 1936 * If we received a unix close indication mark the 1937 * socket and discard this message. 1938 */ 1939 if (so_getopt_unix_close(opt, optlen)) { 1940 mutex_enter(&so->so_lock); 1941 sobreakconn(so, ECONNRESET); 1942 mutex_exit(&so->so_lock); 1943 strsetrerror(SOTOV(so), 0, 0, sogetrderr); 1944 freemsg(mp); 1945 *pollwakeups = POLLIN | POLLRDNORM; 1946 *allmsgsigs = S_INPUT | S_RDNORM; 1947 *wakeups = RSLEEP; 1948 return (NULL); 1949 } 1950 } 1951 *allmsgsigs = S_INPUT | S_RDNORM; 1952 *pollwakeups = POLLIN | POLLRDNORM; 1953 *wakeups = RSLEEP; 1954 return (mp); 1955 } 1956 1957 case T_OPTDATA_IND: { 1958 struct T_optdata_ind *tdi = &tpr->optdata_ind; 1959 1960 if (MBLKL(mp) < sizeof (struct T_optdata_ind)) { 1961 zcmn_err(getzoneid(), CE_WARN, 1962 "sockfs: Too short T_OPTDATA_IND. Len = %ld\n", 1963 (ptrdiff_t)(MBLKL(mp))); 1964 freemsg(mp); 1965 return (NULL); 1966 } 1967 /* 1968 * Allow zero-length messages carrying options. 1969 * This is used when carrying the SO_UNIX_CLOSE option. 1970 */ 1971 if (so->so_family == AF_UNIX && msgdsize(mp->b_cont) == 0 && 1972 tdi->OPT_length != 0) { 1973 /* 1974 * Attempt to extract AF_UNIX close indication 1975 * from the options. Ignore any other options - 1976 * those are handled once the message is removed 1977 * from the queue. 1978 * The close indication message should not carry data. 1979 */ 1980 void *opt; 1981 t_uscalar_t optlen = tdi->OPT_length; 1982 1983 opt = sogetoff(mp, tdi->OPT_offset, 1984 optlen, __TPI_ALIGN_SIZE); 1985 if (opt == NULL) { 1986 /* The len/off falls outside mp */ 1987 freemsg(mp); 1988 mutex_enter(&so->so_lock); 1989 soseterror(so, EPROTO); 1990 mutex_exit(&so->so_lock); 1991 zcmn_err(getzoneid(), CE_WARN, 1992 "sockfs: T_optdata_ind with invalid " 1993 "optlen/offset %u/%d\n", 1994 optlen, tdi->OPT_offset); 1995 return (NULL); 1996 } 1997 /* 1998 * If we received a close indication mark the 1999 * socket and discard this message. 2000 */ 2001 if (so_getopt_unix_close(opt, optlen)) { 2002 mutex_enter(&so->so_lock); 2003 socantsendmore(so); 2004 sti->sti_faddr_valid = 0; 2005 mutex_exit(&so->so_lock); 2006 strsetwerror(SOTOV(so), 0, 0, sogetwrerr); 2007 freemsg(mp); 2008 return (NULL); 2009 } 2010 } 2011 *allmsgsigs = S_INPUT | S_RDNORM; 2012 *pollwakeups = POLLIN | POLLRDNORM; 2013 *wakeups = RSLEEP; 2014 return (mp); 2015 } 2016 2017 case T_EXDATA_IND: { 2018 mblk_t *mctl, *mdata; 2019 mblk_t *lbp; 2020 union T_primitives *tprp; 2021 struct stdata *stp; 2022 queue_t *qp; 2023 2024 if (MBLKL(mp) < sizeof (struct T_exdata_ind)) { 2025 zcmn_err(getzoneid(), CE_WARN, 2026 "sockfs: Too short T_EXDATA_IND. Len = %ld\n", 2027 (ptrdiff_t)(MBLKL(mp))); 2028 freemsg(mp); 2029 return (NULL); 2030 } 2031 /* 2032 * Ignore zero-length T_EXDATA_IND messages. These might be 2033 * generated by some transports. 2034 * 2035 * This is needed to prevent read (which skips the M_PROTO 2036 * part) to unexpectedly return 0 (or return EWOULDBLOCK 2037 * on a non-blocking socket after select/poll has indicated 2038 * that data is available). 2039 */ 2040 dprintso(so, 1, 2041 ("T_EXDATA_IND(%p): counts %d/%d state %s\n", 2042 (void *)vp, sti->sti_oobsigcnt, sti->sti_oobcnt, 2043 pr_state(so->so_state, so->so_mode))); 2044 2045 if (msgdsize(mp->b_cont) == 0) { 2046 dprintso(so, 0, 2047 ("strsock_proto: zero length T_EXDATA_IND\n")); 2048 freemsg(mp); 2049 return (NULL); 2050 } 2051 2052 /* 2053 * Split into the T_EXDATA_IND and the M_DATA part. 2054 * We process these three pieces separately: 2055 * signal generation 2056 * handling T_EXDATA_IND 2057 * handling M_DATA component 2058 */ 2059 mctl = mp; 2060 mdata = mctl->b_cont; 2061 mctl->b_cont = NULL; 2062 mutex_enter(&so->so_lock); 2063 so_oob_sig(so, 0, allmsgsigs, pollwakeups); 2064 mctl = so_oob_exdata(so, mctl, allmsgsigs, pollwakeups); 2065 mdata = so_oob_data(so, mdata, allmsgsigs, pollwakeups); 2066 2067 stp = vp->v_stream; 2068 ASSERT(stp != NULL); 2069 qp = _RD(stp->sd_wrq); 2070 2071 mutex_enter(QLOCK(qp)); 2072 lbp = qp->q_last; 2073 2074 /* 2075 * We want to avoid queueing up a string of T_EXDATA_IND 2076 * messages with no intervening data messages at the stream 2077 * head. These messages contribute to the total message 2078 * count. Eventually this can lead to STREAMS flow contol 2079 * and also cause TCP to advertise a zero window condition 2080 * to the peer. This can happen in the degenerate case where 2081 * the sender and receiver exchange only OOB data. The sender 2082 * only sends messages with MSG_OOB flag and the receiver 2083 * receives only MSG_OOB messages and does not use SO_OOBINLINE. 2084 * An example of this scenario has been reported in applications 2085 * that use OOB data to exchange heart beats. Flow control 2086 * relief will never happen if the application only reads OOB 2087 * data which is done directly by sorecvoob() and the 2088 * T_EXDATA_IND messages at the streamhead won't be consumed. 2089 * Note that there is no correctness issue in compressing the 2090 * string of T_EXDATA_IND messages into a single T_EXDATA_IND 2091 * message. A single read that does not specify MSG_OOB will 2092 * read across all the marks in a loop in sotpi_recvmsg(). 2093 * Each mark is individually distinguishable only if the 2094 * T_EXDATA_IND messages are separated by data messages. 2095 */ 2096 if ((qp->q_first != NULL) && (DB_TYPE(lbp) == M_PROTO)) { 2097 tprp = (union T_primitives *)lbp->b_rptr; 2098 if ((tprp->type == T_EXDATA_IND) && 2099 !(so->so_options & SO_OOBINLINE)) { 2100 2101 /* 2102 * free the new M_PROTO message 2103 */ 2104 freemsg(mctl); 2105 2106 /* 2107 * adjust the OOB count and OOB signal count 2108 * just incremented for the new OOB data. 2109 */ 2110 sti->sti_oobcnt--; 2111 sti->sti_oobsigcnt--; 2112 mutex_exit(QLOCK(qp)); 2113 mutex_exit(&so->so_lock); 2114 return (NULL); 2115 } 2116 } 2117 mutex_exit(QLOCK(qp)); 2118 2119 /* 2120 * Pass the T_EXDATA_IND and the M_DATA back separately 2121 * by using b_next linkage. (The stream head will queue any 2122 * b_next linked messages separately.) This is needed 2123 * since MSGMARK applies to the last by of the message 2124 * hence we can not have any M_DATA component attached 2125 * to the marked T_EXDATA_IND. Note that the stream head 2126 * will not consolidate M_DATA messages onto an MSGMARK'ed 2127 * message in order to preserve the constraint that 2128 * the T_EXDATA_IND always is a separate message. 2129 */ 2130 ASSERT(mctl != NULL); 2131 mctl->b_next = mdata; 2132 mp = mctl; 2133 #ifdef DEBUG 2134 if (mdata == NULL) { 2135 dprintso(so, 1, 2136 ("after outofline T_EXDATA_IND(%p): " 2137 "counts %d/%d poll 0x%x sig 0x%x state %s\n", 2138 (void *)vp, sti->sti_oobsigcnt, 2139 sti->sti_oobcnt, *pollwakeups, *allmsgsigs, 2140 pr_state(so->so_state, so->so_mode))); 2141 } else { 2142 dprintso(so, 1, 2143 ("after inline T_EXDATA_IND(%p): " 2144 "counts %d/%d poll 0x%x sig 0x%x state %s\n", 2145 (void *)vp, sti->sti_oobsigcnt, 2146 sti->sti_oobcnt, *pollwakeups, *allmsgsigs, 2147 pr_state(so->so_state, so->so_mode))); 2148 } 2149 #endif /* DEBUG */ 2150 mutex_exit(&so->so_lock); 2151 *wakeups = RSLEEP; 2152 return (mp); 2153 } 2154 2155 case T_CONN_CON: { 2156 struct T_conn_con *conn_con; 2157 void *addr; 2158 t_uscalar_t addrlen; 2159 2160 /* 2161 * Verify the state, update the state to ISCONNECTED, 2162 * record the potentially new address in the message, 2163 * and drop the message. 2164 */ 2165 if (MBLKL(mp) < sizeof (struct T_conn_con)) { 2166 zcmn_err(getzoneid(), CE_WARN, 2167 "sockfs: Too short T_CONN_CON. Len = %ld\n", 2168 (ptrdiff_t)(MBLKL(mp))); 2169 freemsg(mp); 2170 return (NULL); 2171 } 2172 2173 mutex_enter(&so->so_lock); 2174 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) != 2175 SS_ISCONNECTING) { 2176 mutex_exit(&so->so_lock); 2177 dprintso(so, 1, 2178 ("T_CONN_CON: state %x\n", so->so_state)); 2179 freemsg(mp); 2180 return (NULL); 2181 } 2182 2183 conn_con = &tpr->conn_con; 2184 addrlen = conn_con->RES_length; 2185 /* 2186 * Allow the address to be of different size than sent down 2187 * in the T_CONN_REQ as long as it doesn't exceed the maxlen. 2188 * For AF_UNIX require the identical length. 2189 */ 2190 if (so->so_family == AF_UNIX ? 2191 addrlen != (t_uscalar_t)sizeof (sti->sti_ux_laddr) : 2192 addrlen > (t_uscalar_t)sti->sti_faddr_maxlen) { 2193 zcmn_err(getzoneid(), CE_WARN, 2194 "sockfs: T_conn_con with different " 2195 "length %u/%d\n", 2196 addrlen, conn_con->RES_length); 2197 soisdisconnected(so, EPROTO); 2198 sti->sti_laddr_valid = 0; 2199 sti->sti_faddr_valid = 0; 2200 mutex_exit(&so->so_lock); 2201 strsetrerror(SOTOV(so), 0, 0, sogetrderr); 2202 strsetwerror(SOTOV(so), 0, 0, sogetwrerr); 2203 strseteof(SOTOV(so), 1); 2204 freemsg(mp); 2205 /* 2206 * strseteof takes care of read side wakeups, 2207 * pollwakeups, and signals. 2208 */ 2209 *wakeups = WSLEEP; 2210 *allmsgsigs = S_OUTPUT; 2211 *pollwakeups = POLLOUT; 2212 return (NULL); 2213 } 2214 addr = sogetoff(mp, conn_con->RES_offset, addrlen, 1); 2215 if (addr == NULL) { 2216 zcmn_err(getzoneid(), CE_WARN, 2217 "sockfs: T_conn_con with invalid " 2218 "addrlen/offset %u/%d\n", 2219 addrlen, conn_con->RES_offset); 2220 mutex_exit(&so->so_lock); 2221 strsetrerror(SOTOV(so), 0, 0, sogetrderr); 2222 strsetwerror(SOTOV(so), 0, 0, sogetwrerr); 2223 strseteof(SOTOV(so), 1); 2224 freemsg(mp); 2225 /* 2226 * strseteof takes care of read side wakeups, 2227 * pollwakeups, and signals. 2228 */ 2229 *wakeups = WSLEEP; 2230 *allmsgsigs = S_OUTPUT; 2231 *pollwakeups = POLLOUT; 2232 return (NULL); 2233 } 2234 2235 /* 2236 * Save for getpeername. 2237 */ 2238 if (so->so_family != AF_UNIX) { 2239 sti->sti_faddr_len = (socklen_t)addrlen; 2240 ASSERT(sti->sti_faddr_len <= sti->sti_faddr_maxlen); 2241 bcopy(addr, sti->sti_faddr_sa, addrlen); 2242 sti->sti_faddr_valid = 1; 2243 } 2244 2245 if (so->so_peercred != NULL) 2246 crfree(so->so_peercred); 2247 so->so_peercred = msg_getcred(mp, &so->so_cpid); 2248 if (so->so_peercred != NULL) 2249 crhold(so->so_peercred); 2250 2251 /* Wakeup anybody sleeping in sowaitconnected */ 2252 soisconnected(so); 2253 mutex_exit(&so->so_lock); 2254 2255 /* 2256 * The socket is now available for sending data. 2257 */ 2258 *wakeups = WSLEEP; 2259 *allmsgsigs = S_OUTPUT; 2260 *pollwakeups = POLLOUT; 2261 freemsg(mp); 2262 return (NULL); 2263 } 2264 2265 /* 2266 * Extra processing in case of an SSL proxy, before queuing or 2267 * forwarding to the fallback endpoint 2268 */ 2269 case T_SSL_PROXY_CONN_IND: 2270 case T_CONN_IND: 2271 /* 2272 * Verify the min size and queue the message on 2273 * the sti_conn_ind_head/tail list. 2274 */ 2275 if (MBLKL(mp) < sizeof (struct T_conn_ind)) { 2276 zcmn_err(getzoneid(), CE_WARN, 2277 "sockfs: Too short T_CONN_IND. Len = %ld\n", 2278 (ptrdiff_t)(MBLKL(mp))); 2279 freemsg(mp); 2280 return (NULL); 2281 } 2282 2283 if (auditing) 2284 audit_sock(T_CONN_IND, strvp2wq(vp), mp, 0); 2285 if (!(so->so_state & SS_ACCEPTCONN)) { 2286 zcmn_err(getzoneid(), CE_WARN, 2287 "sockfs: T_conn_ind on non-listening socket\n"); 2288 freemsg(mp); 2289 return (NULL); 2290 } 2291 2292 if (tpr->type == T_SSL_PROXY_CONN_IND && mp->b_cont == NULL) { 2293 /* No context: need to fall back */ 2294 struct sonode *fbso; 2295 stdata_t *fbstp; 2296 2297 tpr->type = T_CONN_IND; 2298 2299 fbso = kssl_find_fallback(sti->sti_kssl_ent); 2300 2301 /* 2302 * No fallback: the remote will timeout and 2303 * disconnect. 2304 */ 2305 if (fbso == NULL) { 2306 freemsg(mp); 2307 return (NULL); 2308 } 2309 fbstp = SOTOV(fbso)->v_stream; 2310 qreply(fbstp->sd_wrq->q_next, mp); 2311 return (NULL); 2312 } 2313 soqueueconnind(so, mp); 2314 *allmsgsigs = S_INPUT | S_RDNORM; 2315 *pollwakeups = POLLIN | POLLRDNORM; 2316 *wakeups = RSLEEP; 2317 return (NULL); 2318 2319 case T_ORDREL_IND: 2320 if (MBLKL(mp) < sizeof (struct T_ordrel_ind)) { 2321 zcmn_err(getzoneid(), CE_WARN, 2322 "sockfs: Too short T_ORDREL_IND. Len = %ld\n", 2323 (ptrdiff_t)(MBLKL(mp))); 2324 freemsg(mp); 2325 return (NULL); 2326 } 2327 2328 /* 2329 * Some providers send this when not fully connected. 2330 * SunLink X.25 needs to retrieve disconnect reason after 2331 * disconnect for compatibility. It uses T_ORDREL_IND 2332 * instead of T_DISCON_IND so that it may use the 2333 * endpoint after a connect failure to retrieve the 2334 * reason using an ioctl. Thus we explicitly clear 2335 * SS_ISCONNECTING here for SunLink X.25. 2336 * This is a needed TPI violation. 2337 */ 2338 mutex_enter(&so->so_lock); 2339 so->so_state &= ~SS_ISCONNECTING; 2340 socantrcvmore(so); 2341 mutex_exit(&so->so_lock); 2342 strseteof(SOTOV(so), 1); 2343 /* 2344 * strseteof takes care of read side wakeups, 2345 * pollwakeups, and signals. 2346 */ 2347 freemsg(mp); 2348 return (NULL); 2349 2350 case T_DISCON_IND: 2351 if (MBLKL(mp) < sizeof (struct T_discon_ind)) { 2352 zcmn_err(getzoneid(), CE_WARN, 2353 "sockfs: Too short T_DISCON_IND. Len = %ld\n", 2354 (ptrdiff_t)(MBLKL(mp))); 2355 freemsg(mp); 2356 return (NULL); 2357 } 2358 if (so->so_state & SS_ACCEPTCONN) { 2359 /* 2360 * This is a listener. Look for a queued T_CONN_IND 2361 * with a matching sequence number and remove it 2362 * from the list. 2363 * It is normal to not find the sequence number since 2364 * the soaccept might have already dequeued it 2365 * (in which case the T_CONN_RES will fail with 2366 * TBADSEQ). 2367 */ 2368 (void) soflushconnind(so, tpr->discon_ind.SEQ_number); 2369 freemsg(mp); 2370 return (0); 2371 } 2372 2373 /* 2374 * Not a listener 2375 * 2376 * If SS_CANTRCVMORE for AF_UNIX ignore the discon_reason. 2377 * Such a discon_ind appears when the peer has first done 2378 * a shutdown() followed by a close() in which case we just 2379 * want to record socantsendmore. 2380 * In this case sockfs first receives a T_ORDREL_IND followed 2381 * by a T_DISCON_IND. 2382 * Note that for other transports (e.g. TCP) we need to handle 2383 * the discon_ind in this case since it signals an error. 2384 */ 2385 mutex_enter(&so->so_lock); 2386 if ((so->so_state & SS_CANTRCVMORE) && 2387 (so->so_family == AF_UNIX)) { 2388 socantsendmore(so); 2389 sti->sti_faddr_valid = 0; 2390 mutex_exit(&so->so_lock); 2391 strsetwerror(SOTOV(so), 0, 0, sogetwrerr); 2392 dprintso(so, 1, 2393 ("T_DISCON_IND: error %d\n", so->so_error)); 2394 freemsg(mp); 2395 /* 2396 * Set these variables for caller to process them. 2397 * For the else part where T_DISCON_IND is processed, 2398 * this will be done in the function being called 2399 * (strsock_discon_ind()) 2400 */ 2401 *wakeups = WSLEEP; 2402 *allmsgsigs = S_OUTPUT; 2403 *pollwakeups = POLLOUT; 2404 } else if (so->so_flag & (SOASYNC_UNBIND | SOLOCKED)) { 2405 /* 2406 * Deferred processing of T_DISCON_IND 2407 */ 2408 so_save_discon_ind(so, mp, strsock_discon_ind); 2409 mutex_exit(&so->so_lock); 2410 } else { 2411 /* 2412 * Process T_DISCON_IND now 2413 */ 2414 (void) strsock_discon_ind(so, mp); 2415 mutex_exit(&so->so_lock); 2416 } 2417 return (NULL); 2418 2419 case T_UDERROR_IND: { 2420 struct T_uderror_ind *tudi = &tpr->uderror_ind; 2421 void *addr; 2422 t_uscalar_t addrlen; 2423 int error; 2424 2425 dprintso(so, 0, 2426 ("T_UDERROR_IND: error %d\n", tudi->ERROR_type)); 2427 2428 if (MBLKL(mp) < sizeof (struct T_uderror_ind)) { 2429 zcmn_err(getzoneid(), CE_WARN, 2430 "sockfs: Too short T_UDERROR_IND. Len = %ld\n", 2431 (ptrdiff_t)(MBLKL(mp))); 2432 freemsg(mp); 2433 return (NULL); 2434 } 2435 /* Ignore on connection-oriented transports */ 2436 if (so->so_mode & SM_CONNREQUIRED) { 2437 freemsg(mp); 2438 eprintsoline(so, 0); 2439 zcmn_err(getzoneid(), CE_WARN, 2440 "sockfs: T_uderror_ind on connection-oriented " 2441 "transport\n"); 2442 return (NULL); 2443 } 2444 addrlen = tudi->DEST_length; 2445 addr = sogetoff(mp, tudi->DEST_offset, addrlen, 1); 2446 if (addr == NULL) { 2447 zcmn_err(getzoneid(), CE_WARN, 2448 "sockfs: T_uderror_ind with invalid " 2449 "addrlen/offset %u/%d\n", 2450 addrlen, tudi->DEST_offset); 2451 freemsg(mp); 2452 return (NULL); 2453 } 2454 2455 /* Verify source address for connected socket. */ 2456 mutex_enter(&so->so_lock); 2457 if (so->so_state & SS_ISCONNECTED) { 2458 void *faddr; 2459 t_uscalar_t faddr_len; 2460 boolean_t match = B_FALSE; 2461 2462 switch (so->so_family) { 2463 case AF_INET: { 2464 /* Compare just IP address and port */ 2465 struct sockaddr_in *sin1, *sin2; 2466 2467 sin1 = (struct sockaddr_in *)sti->sti_faddr_sa; 2468 sin2 = (struct sockaddr_in *)addr; 2469 if (addrlen == sizeof (struct sockaddr_in) && 2470 sin1->sin_port == sin2->sin_port && 2471 sin1->sin_addr.s_addr == 2472 sin2->sin_addr.s_addr) 2473 match = B_TRUE; 2474 break; 2475 } 2476 case AF_INET6: { 2477 /* Compare just IP address and port. Not flow */ 2478 struct sockaddr_in6 *sin1, *sin2; 2479 2480 sin1 = (struct sockaddr_in6 *)sti->sti_faddr_sa; 2481 sin2 = (struct sockaddr_in6 *)addr; 2482 if (addrlen == sizeof (struct sockaddr_in6) && 2483 sin1->sin6_port == sin2->sin6_port && 2484 IN6_ARE_ADDR_EQUAL(&sin1->sin6_addr, 2485 &sin2->sin6_addr)) 2486 match = B_TRUE; 2487 break; 2488 } 2489 case AF_UNIX: 2490 faddr = &sti->sti_ux_faddr; 2491 faddr_len = 2492 (t_uscalar_t)sizeof (sti->sti_ux_faddr); 2493 if (faddr_len == addrlen && 2494 bcmp(addr, faddr, addrlen) == 0) 2495 match = B_TRUE; 2496 break; 2497 default: 2498 faddr = sti->sti_faddr_sa; 2499 faddr_len = (t_uscalar_t)sti->sti_faddr_len; 2500 if (faddr_len == addrlen && 2501 bcmp(addr, faddr, addrlen) == 0) 2502 match = B_TRUE; 2503 break; 2504 } 2505 2506 if (!match) { 2507 #ifdef DEBUG 2508 dprintso(so, 0, 2509 ("sockfs: T_UDERR_IND mismatch: %s - ", 2510 pr_addr(so->so_family, 2511 (struct sockaddr *)addr, addrlen))); 2512 dprintso(so, 0, ("%s\n", 2513 pr_addr(so->so_family, sti->sti_faddr_sa, 2514 sti->sti_faddr_len))); 2515 #endif /* DEBUG */ 2516 mutex_exit(&so->so_lock); 2517 freemsg(mp); 2518 return (NULL); 2519 } 2520 /* 2521 * Make the write error nonpersistent. If the error 2522 * is zero we use ECONNRESET. 2523 * This assumes that the name space for ERROR_type 2524 * is the errno name space. 2525 */ 2526 if (tudi->ERROR_type != 0) 2527 error = tudi->ERROR_type; 2528 else 2529 error = ECONNRESET; 2530 2531 soseterror(so, error); 2532 mutex_exit(&so->so_lock); 2533 strsetrerror(SOTOV(so), 0, 0, sogetrderr); 2534 strsetwerror(SOTOV(so), 0, 0, sogetwrerr); 2535 *wakeups = RSLEEP | WSLEEP; 2536 *allmsgsigs = S_INPUT | S_RDNORM | S_OUTPUT; 2537 *pollwakeups = POLLIN | POLLRDNORM | POLLOUT; 2538 freemsg(mp); 2539 return (NULL); 2540 } 2541 /* 2542 * If the application asked for delayed errors 2543 * record the T_UDERROR_IND sti_eaddr_mp and the reason in 2544 * sti_delayed_error for delayed error posting. If the reason 2545 * is zero use ECONNRESET. 2546 * Note that delayed error indications do not make sense for 2547 * AF_UNIX sockets since sendto checks that the destination 2548 * address is valid at the time of the sendto. 2549 */ 2550 if (!(so->so_options & SO_DGRAM_ERRIND)) { 2551 mutex_exit(&so->so_lock); 2552 freemsg(mp); 2553 return (NULL); 2554 } 2555 if (sti->sti_eaddr_mp != NULL) 2556 freemsg(sti->sti_eaddr_mp); 2557 2558 sti->sti_eaddr_mp = mp; 2559 if (tudi->ERROR_type != 0) 2560 error = tudi->ERROR_type; 2561 else 2562 error = ECONNRESET; 2563 sti->sti_delayed_error = (ushort_t)error; 2564 mutex_exit(&so->so_lock); 2565 return (NULL); 2566 } 2567 2568 case T_ERROR_ACK: 2569 dprintso(so, 0, 2570 ("strsock_proto: T_ERROR_ACK for %d, error %d/%d\n", 2571 tpr->error_ack.ERROR_prim, 2572 tpr->error_ack.TLI_error, 2573 tpr->error_ack.UNIX_error)); 2574 2575 if (MBLKL(mp) < sizeof (struct T_error_ack)) { 2576 zcmn_err(getzoneid(), CE_WARN, 2577 "sockfs: Too short T_ERROR_ACK. Len = %ld\n", 2578 (ptrdiff_t)(MBLKL(mp))); 2579 freemsg(mp); 2580 return (NULL); 2581 } 2582 /* 2583 * Check if we were waiting for the async message 2584 */ 2585 mutex_enter(&so->so_lock); 2586 if ((so->so_flag & SOASYNC_UNBIND) && 2587 tpr->error_ack.ERROR_prim == T_UNBIND_REQ) { 2588 so_unlock_single(so, SOASYNC_UNBIND); 2589 mutex_exit(&so->so_lock); 2590 freemsg(mp); 2591 return (NULL); 2592 } 2593 mutex_exit(&so->so_lock); 2594 soqueueack(so, mp); 2595 return (NULL); 2596 2597 case T_OK_ACK: 2598 if (MBLKL(mp) < sizeof (struct T_ok_ack)) { 2599 zcmn_err(getzoneid(), CE_WARN, 2600 "sockfs: Too short T_OK_ACK. Len = %ld\n", 2601 (ptrdiff_t)(MBLKL(mp))); 2602 freemsg(mp); 2603 return (NULL); 2604 } 2605 /* 2606 * Check if we were waiting for the async message 2607 */ 2608 mutex_enter(&so->so_lock); 2609 if ((so->so_flag & SOASYNC_UNBIND) && 2610 tpr->ok_ack.CORRECT_prim == T_UNBIND_REQ) { 2611 dprintso(so, 1, 2612 ("strsock_proto: T_OK_ACK async unbind\n")); 2613 so_unlock_single(so, SOASYNC_UNBIND); 2614 mutex_exit(&so->so_lock); 2615 freemsg(mp); 2616 return (NULL); 2617 } 2618 mutex_exit(&so->so_lock); 2619 soqueueack(so, mp); 2620 return (NULL); 2621 2622 case T_INFO_ACK: 2623 if (MBLKL(mp) < sizeof (struct T_info_ack)) { 2624 zcmn_err(getzoneid(), CE_WARN, 2625 "sockfs: Too short T_INFO_ACK. Len = %ld\n", 2626 (ptrdiff_t)(MBLKL(mp))); 2627 freemsg(mp); 2628 return (NULL); 2629 } 2630 soqueueack(so, mp); 2631 return (NULL); 2632 2633 case T_CAPABILITY_ACK: 2634 /* 2635 * A T_capability_ack need only be large enough to hold 2636 * the PRIM_type and CAP_bits1 fields; checking for anything 2637 * larger might reject a correct response from an older 2638 * provider. 2639 */ 2640 if (MBLKL(mp) < 2 * sizeof (t_uscalar_t)) { 2641 zcmn_err(getzoneid(), CE_WARN, 2642 "sockfs: Too short T_CAPABILITY_ACK. Len = %ld\n", 2643 (ptrdiff_t)(MBLKL(mp))); 2644 freemsg(mp); 2645 return (NULL); 2646 } 2647 soqueueack(so, mp); 2648 return (NULL); 2649 2650 case T_BIND_ACK: 2651 if (MBLKL(mp) < sizeof (struct T_bind_ack)) { 2652 zcmn_err(getzoneid(), CE_WARN, 2653 "sockfs: Too short T_BIND_ACK. Len = %ld\n", 2654 (ptrdiff_t)(MBLKL(mp))); 2655 freemsg(mp); 2656 return (NULL); 2657 } 2658 soqueueack(so, mp); 2659 return (NULL); 2660 2661 case T_OPTMGMT_ACK: 2662 if (MBLKL(mp) < sizeof (struct T_optmgmt_ack)) { 2663 zcmn_err(getzoneid(), CE_WARN, 2664 "sockfs: Too short T_OPTMGMT_ACK. Len = %ld\n", 2665 (ptrdiff_t)(MBLKL(mp))); 2666 freemsg(mp); 2667 return (NULL); 2668 } 2669 soqueueack(so, mp); 2670 return (NULL); 2671 default: 2672 #ifdef DEBUG 2673 zcmn_err(getzoneid(), CE_WARN, 2674 "sockfs: unknown TPI primitive %d received\n", 2675 tpr->type); 2676 #endif /* DEBUG */ 2677 freemsg(mp); 2678 return (NULL); 2679 } 2680 } 2681 2682 /* 2683 * This routine is registered with the stream head to receive other 2684 * (non-data, and non-proto) messages. 2685 * 2686 * Returns NULL if the message was consumed. 2687 * Returns an mblk to make that mblk be processed by the stream head. 2688 * 2689 * Sets the return parameters (*wakeups, *firstmsgsigs, *allmsgsigs, and 2690 * *pollwakeups) for the stream head to take action on. 2691 */ 2692 static mblk_t * 2693 strsock_misc(vnode_t *vp, mblk_t *mp, 2694 strwakeup_t *wakeups, strsigset_t *firstmsgsigs, 2695 strsigset_t *allmsgsigs, strpollset_t *pollwakeups) 2696 { 2697 struct sonode *so; 2698 sotpi_info_t *sti; 2699 2700 so = VTOSO(vp); 2701 sti = SOTOTPI(so); 2702 2703 dprintso(so, 1, ("strsock_misc(%p, %p, 0x%x)\n", 2704 (void *)vp, (void *)mp, DB_TYPE(mp))); 2705 2706 /* Set default return values */ 2707 *wakeups = *allmsgsigs = *firstmsgsigs = *pollwakeups = 0; 2708 2709 switch (DB_TYPE(mp)) { 2710 case M_PCSIG: 2711 /* 2712 * This assumes that an M_PCSIG for the urgent data arrives 2713 * before the corresponding T_EXDATA_IND. 2714 * 2715 * Note: Just like in SunOS 4.X and 4.4BSD a poll will be 2716 * awoken before the urgent data shows up. 2717 * For OOBINLINE this can result in select returning 2718 * only exceptions as opposed to except|read. 2719 */ 2720 if (*mp->b_rptr == SIGURG) { 2721 mutex_enter(&so->so_lock); 2722 dprintso(so, 1, 2723 ("SIGURG(%p): counts %d/%d state %s\n", 2724 (void *)vp, sti->sti_oobsigcnt, sti->sti_oobcnt, 2725 pr_state(so->so_state, so->so_mode))); 2726 so_oob_sig(so, 1, allmsgsigs, pollwakeups); 2727 dprintso(so, 1, 2728 ("after SIGURG(%p): counts %d/%d " 2729 " poll 0x%x sig 0x%x state %s\n", 2730 (void *)vp, sti->sti_oobsigcnt, sti->sti_oobcnt, 2731 *pollwakeups, *allmsgsigs, 2732 pr_state(so->so_state, so->so_mode))); 2733 mutex_exit(&so->so_lock); 2734 } 2735 freemsg(mp); 2736 return (NULL); 2737 2738 case M_SIG: 2739 case M_HANGUP: 2740 case M_UNHANGUP: 2741 case M_ERROR: 2742 /* M_ERRORs etc are ignored */ 2743 freemsg(mp); 2744 return (NULL); 2745 2746 case M_FLUSH: 2747 /* 2748 * Do not flush read queue. If the M_FLUSH 2749 * arrives because of an impending T_discon_ind 2750 * we still have to keep any queued data - this is part of 2751 * socket semantics. 2752 */ 2753 if (*mp->b_rptr & FLUSHW) { 2754 *mp->b_rptr &= ~FLUSHR; 2755 return (mp); 2756 } 2757 freemsg(mp); 2758 return (NULL); 2759 2760 default: 2761 return (mp); 2762 } 2763 } 2764 2765 2766 /* Register to receive signals for certain events */ 2767 int 2768 so_set_asyncsigs(vnode_t *vp, pid_t pgrp, int events, int mode, cred_t *cr) 2769 { 2770 struct strsigset ss; 2771 int32_t rval; 2772 2773 /* 2774 * Note that SOLOCKED will be set except for the call from soaccept(). 2775 */ 2776 ASSERT(!mutex_owned(&VTOSO(vp)->so_lock)); 2777 ss.ss_pid = pgrp; 2778 ss.ss_events = events; 2779 return (strioctl(vp, I_ESETSIG, (intptr_t)&ss, mode, K_TO_K, cr, 2780 &rval)); 2781 } 2782 2783 2784 /* Register for events matching the SS_ASYNC flag */ 2785 int 2786 so_set_events(struct sonode *so, vnode_t *vp, cred_t *cr) 2787 { 2788 int events = so->so_state & SS_ASYNC ? 2789 S_RDBAND | S_BANDURG | S_RDNORM | S_OUTPUT : 2790 S_RDBAND | S_BANDURG; 2791 2792 return (so_set_asyncsigs(vp, so->so_pgrp, events, 0, cr)); 2793 } 2794 2795 2796 /* Change the SS_ASYNC flag, and update signal delivery if needed */ 2797 int 2798 so_flip_async(struct sonode *so, vnode_t *vp, int mode, cred_t *cr) 2799 { 2800 ASSERT(mutex_owned(&so->so_lock)); 2801 if (so->so_pgrp != 0) { 2802 int error; 2803 int events = so->so_state & SS_ASYNC ? /* Old flag */ 2804 S_RDBAND | S_BANDURG : /* New sigs */ 2805 S_RDBAND | S_BANDURG | S_RDNORM | S_OUTPUT; 2806 2807 so_lock_single(so); 2808 mutex_exit(&so->so_lock); 2809 2810 error = so_set_asyncsigs(vp, so->so_pgrp, events, mode, cr); 2811 2812 mutex_enter(&so->so_lock); 2813 so_unlock_single(so, SOLOCKED); 2814 if (error) 2815 return (error); 2816 } 2817 so->so_state ^= SS_ASYNC; 2818 return (0); 2819 } 2820 2821 /* 2822 * Set new pid/pgrp for SIGPOLL (or SIGIO for FIOASYNC mode), replacing 2823 * any existing one. If passed zero, just clear the existing one. 2824 */ 2825 int 2826 so_set_siggrp(struct sonode *so, vnode_t *vp, pid_t pgrp, int mode, cred_t *cr) 2827 { 2828 int events = so->so_state & SS_ASYNC ? 2829 S_RDBAND | S_BANDURG | S_RDNORM | S_OUTPUT : 2830 S_RDBAND | S_BANDURG; 2831 int error; 2832 2833 ASSERT(mutex_owned(&so->so_lock)); 2834 2835 /* 2836 * Change socket process (group). 2837 * 2838 * strioctl (via so_set_asyncsigs) will perform permission check and 2839 * also keep a PID_HOLD to prevent the pid from being reused. 2840 */ 2841 so_lock_single(so); 2842 mutex_exit(&so->so_lock); 2843 2844 if (pgrp != 0) { 2845 dprintso(so, 1, ("setown: adding pgrp %d ev 0x%x\n", 2846 pgrp, events)); 2847 error = so_set_asyncsigs(vp, pgrp, events, mode, cr); 2848 if (error != 0) { 2849 eprintsoline(so, error); 2850 goto bad; 2851 } 2852 } 2853 /* Remove the previously registered process/group */ 2854 if (so->so_pgrp != 0) { 2855 dprintso(so, 1, ("setown: removing pgrp %d\n", so->so_pgrp)); 2856 error = so_set_asyncsigs(vp, so->so_pgrp, 0, mode, cr); 2857 if (error != 0) { 2858 eprintsoline(so, error); 2859 error = 0; 2860 } 2861 } 2862 mutex_enter(&so->so_lock); 2863 so_unlock_single(so, SOLOCKED); 2864 so->so_pgrp = pgrp; 2865 return (0); 2866 bad: 2867 mutex_enter(&so->so_lock); 2868 so_unlock_single(so, SOLOCKED); 2869 return (error); 2870 } 2871 2872 /* 2873 * Wrapper for getmsg. If the socket has been converted to a stream 2874 * pass the request to the stream head. 2875 */ 2876 int 2877 sock_getmsg( 2878 struct vnode *vp, 2879 struct strbuf *mctl, 2880 struct strbuf *mdata, 2881 uchar_t *prip, 2882 int *flagsp, 2883 int fmode, 2884 rval_t *rvp 2885 ) 2886 { 2887 struct sonode *so; 2888 2889 ASSERT(vp->v_type == VSOCK); 2890 /* 2891 * Use the stream head to find the real socket vnode. 2892 * This is needed when namefs sits above sockfs. Some 2893 * sockets (like SCTP) are not streams. 2894 */ 2895 if (!vp->v_stream) { 2896 return (ENOSTR); 2897 } 2898 ASSERT(vp->v_stream->sd_vnode); 2899 vp = vp->v_stream->sd_vnode; 2900 ASSERT(vn_matchops(vp, socket_vnodeops)); 2901 so = VTOSO(vp); 2902 2903 dprintso(so, 1, ("sock_getmsg(%p) %s\n", 2904 (void *)so, pr_state(so->so_state, so->so_mode))); 2905 2906 if (so->so_version == SOV_STREAM) { 2907 /* The imaginary "sockmod" has been popped - act as a stream */ 2908 return (strgetmsg(vp, mctl, mdata, prip, flagsp, fmode, rvp)); 2909 } 2910 eprintsoline(so, ENOSTR); 2911 return (ENOSTR); 2912 } 2913 2914 /* 2915 * Wrapper for putmsg. If the socket has been converted to a stream 2916 * pass the request to the stream head. 2917 * 2918 * Note that a while a regular socket (SOV_SOCKSTREAM) does support the 2919 * streams ioctl set it does not support putmsg and getmsg. 2920 * Allowing putmsg would prevent sockfs from tracking the state of 2921 * the socket/transport and would also invalidate the locking in sockfs. 2922 */ 2923 int 2924 sock_putmsg( 2925 struct vnode *vp, 2926 struct strbuf *mctl, 2927 struct strbuf *mdata, 2928 uchar_t pri, 2929 int flag, 2930 int fmode 2931 ) 2932 { 2933 struct sonode *so; 2934 2935 ASSERT(vp->v_type == VSOCK); 2936 /* 2937 * Use the stream head to find the real socket vnode. 2938 * This is needed when namefs sits above sockfs. 2939 */ 2940 if (!vp->v_stream) { 2941 return (ENOSTR); 2942 } 2943 ASSERT(vp->v_stream->sd_vnode); 2944 vp = vp->v_stream->sd_vnode; 2945 ASSERT(vn_matchops(vp, socket_vnodeops)); 2946 so = VTOSO(vp); 2947 2948 dprintso(so, 1, ("sock_putmsg(%p) %s\n", 2949 (void *)so, pr_state(so->so_state, so->so_mode))); 2950 2951 if (so->so_version == SOV_STREAM) { 2952 /* The imaginary "sockmod" has been popped - act as a stream */ 2953 return (strputmsg(vp, mctl, mdata, pri, flag, fmode)); 2954 } 2955 eprintsoline(so, ENOSTR); 2956 return (ENOSTR); 2957 } 2958 2959 /* 2960 * Special function called only from f_getfl(). 2961 * Returns FASYNC if the SS_ASYNC flag is set on a socket, else 0. 2962 * No locks are acquired here, so it is safe to use while uf_lock is held. 2963 * This exists solely for BSD fcntl() FASYNC compatibility. 2964 */ 2965 int 2966 sock_getfasync(vnode_t *vp) 2967 { 2968 struct sonode *so; 2969 2970 ASSERT(vp->v_type == VSOCK); 2971 /* 2972 * For stream model, v_stream is used; For non-stream, v_stream always 2973 * equals NULL 2974 */ 2975 if (vp->v_stream != NULL) 2976 so = VTOSO(vp->v_stream->sd_vnode); 2977 else 2978 so = VTOSO(vp); 2979 2980 if (so->so_version == SOV_STREAM || !(so->so_state & SS_ASYNC)) 2981 return (0); 2982 2983 return (FASYNC); 2984 } 2985