1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/t_lock.h> 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/buf.h> 34 #include <sys/conf.h> 35 #include <sys/cred.h> 36 #include <sys/kmem.h> 37 #include <sys/sysmacros.h> 38 #include <sys/vfs.h> 39 #include <sys/vnode.h> 40 #include <sys/debug.h> 41 #include <sys/errno.h> 42 #include <sys/time.h> 43 #include <sys/file.h> 44 #include <sys/open.h> 45 #include <sys/user.h> 46 #include <sys/termios.h> 47 #include <sys/stream.h> 48 #include <sys/strsubr.h> 49 #include <sys/strsun.h> 50 #include <sys/ddi.h> 51 #include <sys/esunddi.h> 52 #include <sys/flock.h> 53 #include <sys/modctl.h> 54 #include <sys/vtrace.h> 55 #include <sys/cmn_err.h> 56 #include <sys/pathname.h> 57 58 #include <sys/socket.h> 59 #include <sys/socketvar.h> 60 #include <sys/sockio.h> 61 #include <netinet/in.h> 62 #include <sys/un.h> 63 #include <sys/strsun.h> 64 65 #include <sys/tiuser.h> 66 #define _SUN_TPI_VERSION 2 67 #include <sys/tihdr.h> 68 #include <sys/timod.h> /* TI_GETMYNAME, TI_GETPEERNAME */ 69 70 #include <c2/audit.h> 71 72 #include <inet/common.h> 73 #include <inet/ip.h> 74 #include <inet/ip6.h> 75 #include <inet/tcp.h> 76 #include <inet/udp_impl.h> 77 78 #include <sys/zone.h> 79 80 #include <fs/sockfs/nl7c.h> 81 #include <fs/sockfs/nl7curi.h> 82 83 #include <inet/kssl/ksslapi.h> 84 85 /* 86 * Possible failures when memory can't be allocated. The documented behavior: 87 * 88 * 5.5: 4.X: XNET: 89 * accept: ENOMEM/ENOSR/EINTR - (EINTR) ENOMEM/ENOBUFS/ENOSR/ 90 * EINTR 91 * (4.X does not document EINTR but returns it) 92 * bind: ENOSR - ENOBUFS/ENOSR 93 * connect: EINTR EINTR ENOBUFS/ENOSR/EINTR 94 * getpeername: ENOMEM/ENOSR ENOBUFS (-) ENOBUFS/ENOSR 95 * getsockname: ENOMEM/ENOSR ENOBUFS (-) ENOBUFS/ENOSR 96 * (4.X getpeername and getsockname do not fail in practice) 97 * getsockopt: ENOMEM/ENOSR - ENOBUFS/ENOSR 98 * listen: - - ENOBUFS 99 * recv: ENOMEM/ENOSR/EINTR EINTR ENOBUFS/ENOMEM/ENOSR/ 100 * EINTR 101 * send: ENOMEM/ENOSR/EINTR ENOBUFS/EINTR ENOBUFS/ENOMEM/ENOSR/ 102 * EINTR 103 * setsockopt: ENOMEM/ENOSR - ENOBUFS/ENOMEM/ENOSR 104 * shutdown: ENOMEM/ENOSR - ENOBUFS/ENOSR 105 * socket: ENOMEM/ENOSR ENOBUFS ENOBUFS/ENOMEM/ENOSR 106 * socketpair: ENOMEM/ENOSR - ENOBUFS/ENOMEM/ENOSR 107 * 108 * Resolution. When allocation fails: 109 * recv: return EINTR 110 * send: return EINTR 111 * connect, accept: EINTR 112 * bind, listen, shutdown (unbind, unix_close, disconnect): sleep 113 * socket, socketpair: ENOBUFS 114 * getpeername, getsockname: sleep 115 * getsockopt, setsockopt: sleep 116 */ 117 118 #ifdef SOCK_TEST 119 /* 120 * Variables that make sockfs do something other than the standard TPI 121 * for the AF_INET transports. 122 * 123 * solisten_tpi_tcp: 124 * TCP can handle a O_T_BIND_REQ with an increased backlog even though 125 * the transport is already bound. This is needed to avoid loosing the 126 * port number should listen() do a T_UNBIND_REQ followed by a 127 * O_T_BIND_REQ. 128 * 129 * soconnect_tpi_udp: 130 * UDP and ICMP can handle a T_CONN_REQ. 131 * This is needed to make the sequence of connect(), getsockname() 132 * return the local IP address used to send packets to the connected to 133 * destination. 134 * 135 * soconnect_tpi_tcp: 136 * TCP can handle a T_CONN_REQ without seeing a O_T_BIND_REQ. 137 * Set this to non-zero to send TPI conformant messages to TCP in this 138 * respect. This is a performance optimization. 139 * 140 * soaccept_tpi_tcp: 141 * TCP can handle a T_CONN_REQ without the acceptor being bound. 142 * This is a performance optimization that has been picked up in XTI. 143 * 144 * soaccept_tpi_multioptions: 145 * When inheriting SOL_SOCKET options from the listener to the accepting 146 * socket send them as a single message for AF_INET{,6}. 147 */ 148 int solisten_tpi_tcp = 0; 149 int soconnect_tpi_udp = 0; 150 int soconnect_tpi_tcp = 0; 151 int soaccept_tpi_tcp = 0; 152 int soaccept_tpi_multioptions = 1; 153 #else /* SOCK_TEST */ 154 #define soconnect_tpi_tcp 0 155 #define soconnect_tpi_udp 0 156 #define solisten_tpi_tcp 0 157 #define soaccept_tpi_tcp 0 158 #define soaccept_tpi_multioptions 1 159 #endif /* SOCK_TEST */ 160 161 #ifdef SOCK_TEST 162 extern int do_useracc; 163 extern clock_t sock_test_timelimit; 164 #endif /* SOCK_TEST */ 165 166 /* 167 * Some X/Open added checks might have to be backed out to keep SunOS 4.X 168 * applications working. Turn on this flag to disable these checks. 169 */ 170 int xnet_skip_checks = 0; 171 int xnet_check_print = 0; 172 int xnet_truncate_print = 0; 173 174 extern void sigintr(k_sigset_t *, int); 175 extern void sigunintr(k_sigset_t *); 176 177 extern void *nl7c_lookup_addr(void *, t_uscalar_t); 178 extern void *nl7c_add_addr(void *, t_uscalar_t); 179 extern void nl7c_listener_addr(void *, struct sonode *); 180 181 /* Sockets acting as an in-kernel SSL proxy */ 182 extern mblk_t *strsock_kssl_input(vnode_t *, mblk_t *, strwakeup_t *, 183 strsigset_t *, strsigset_t *, strpollset_t *); 184 extern mblk_t *strsock_kssl_output(vnode_t *, mblk_t *, strwakeup_t *, 185 strsigset_t *, strsigset_t *, strpollset_t *); 186 187 static int sotpi_unbind(struct sonode *, int); 188 189 /* TPI sockfs sonode operations */ 190 static int sotpi_accept(struct sonode *, int, struct sonode **); 191 static int sotpi_bind(struct sonode *, struct sockaddr *, socklen_t, 192 int); 193 static int sotpi_connect(struct sonode *, const struct sockaddr *, 194 socklen_t, int, int); 195 static int sotpi_listen(struct sonode *, int); 196 static int sotpi_sendmsg(struct sonode *, struct nmsghdr *, 197 struct uio *); 198 static int sotpi_shutdown(struct sonode *, int); 199 static int sotpi_getsockname(struct sonode *); 200 static int sosend_dgramcmsg(struct sonode *, struct sockaddr *, socklen_t, 201 struct uio *, void *, t_uscalar_t, int); 202 static int sodgram_direct(struct sonode *, struct sockaddr *, 203 socklen_t, struct uio *, int); 204 205 sonodeops_t sotpi_sonodeops = { 206 sotpi_accept, /* sop_accept */ 207 sotpi_bind, /* sop_bind */ 208 sotpi_listen, /* sop_listen */ 209 sotpi_connect, /* sop_connect */ 210 sotpi_recvmsg, /* sop_recvmsg */ 211 sotpi_sendmsg, /* sop_sendmsg */ 212 sotpi_getpeername, /* sop_getpeername */ 213 sotpi_getsockname, /* sop_getsockname */ 214 sotpi_shutdown, /* sop_shutdown */ 215 sotpi_getsockopt, /* sop_getsockopt */ 216 sotpi_setsockopt /* sop_setsockopt */ 217 }; 218 219 /* 220 * Common create code for socket and accept. If tso is set the values 221 * from that node is used instead of issuing a T_INFO_REQ. 222 * 223 * Assumes that the caller has a VN_HOLD on accessvp. 224 * The VN_RELE will occur either when sotpi_create() fails or when 225 * the returned sonode is freed. 226 */ 227 struct sonode * 228 sotpi_create(vnode_t *accessvp, int domain, int type, int protocol, int version, 229 struct sonode *tso, int *errorp) 230 { 231 struct sonode *so; 232 vnode_t *vp; 233 int flags, error; 234 235 ASSERT(accessvp != NULL); 236 vp = makesockvp(accessvp, domain, type, protocol); 237 ASSERT(vp != NULL); 238 so = VTOSO(vp); 239 240 flags = FREAD|FWRITE; 241 242 if ((type == SOCK_STREAM || type == SOCK_DGRAM) && 243 (domain == AF_INET || domain == AF_INET6) && 244 (protocol == IPPROTO_TCP || protocol == IPPROTO_UDP || 245 protocol == IPPROTO_IP)) { 246 /* Tell tcp or udp that it's talking to sockets */ 247 flags |= SO_SOCKSTR; 248 249 /* 250 * Here we indicate to socktpi_open() our attempt to 251 * make direct calls between sockfs and transport. 252 * The final decision is left to socktpi_open(). 253 */ 254 so->so_state |= SS_DIRECT; 255 256 ASSERT(so->so_type != SOCK_DGRAM || tso == NULL); 257 if (so->so_type == SOCK_STREAM && tso != NULL) { 258 if (tso->so_state & SS_DIRECT) { 259 /* 260 * Inherit SS_DIRECT from listener and pass 261 * SO_ACCEPTOR open flag to tcp, indicating 262 * that this is an accept fast-path instance. 263 */ 264 flags |= SO_ACCEPTOR; 265 } else { 266 /* 267 * SS_DIRECT is not set on listener, meaning 268 * that the listener has been converted from 269 * a socket to a stream. Ensure that the 270 * acceptor inherits these settings. 271 */ 272 so->so_state &= ~SS_DIRECT; 273 flags &= ~SO_SOCKSTR; 274 } 275 } 276 } 277 278 /* 279 * Tell local transport that it is talking to sockets. 280 */ 281 if (so->so_family == AF_UNIX) { 282 flags |= SO_SOCKSTR; 283 } 284 285 /* Initialize the kernel SSL proxy fields */ 286 so->so_kssl_type = KSSL_NO_PROXY; 287 so->so_kssl_ent = NULL; 288 so->so_kssl_ctx = NULL; 289 290 if (error = socktpi_open(&vp, flags, CRED())) { 291 VN_RELE(vp); 292 *errorp = error; 293 return (NULL); 294 } 295 296 if (error = so_strinit(so, tso)) { 297 (void) VOP_CLOSE(vp, 0, 1, 0, CRED()); 298 VN_RELE(vp); 299 *errorp = error; 300 return (NULL); 301 } 302 303 if (version == SOV_DEFAULT) 304 version = so_default_version; 305 306 so->so_version = (short)version; 307 308 return (so); 309 } 310 311 /* 312 * Bind the socket to an unspecified address in sockfs only. 313 * Used for TCP/UDP transports where we know that the O_T_BIND_REQ isn't 314 * required in all cases. 315 */ 316 static void 317 so_automatic_bind(struct sonode *so) 318 { 319 ASSERT(so->so_family == AF_INET || so->so_family == AF_INET6); 320 321 ASSERT(MUTEX_HELD(&so->so_lock)); 322 ASSERT(!(so->so_state & SS_ISBOUND)); 323 ASSERT(so->so_unbind_mp); 324 325 ASSERT(so->so_laddr_len <= so->so_laddr_maxlen); 326 bzero(so->so_laddr_sa, so->so_laddr_len); 327 so->so_laddr_sa->sa_family = so->so_family; 328 so->so_state |= SS_ISBOUND; 329 } 330 331 332 /* 333 * bind the socket. 334 * 335 * If the socket is already bound and none of _SOBIND_SOCKBSD or _SOBIND_XPG4_2 336 * are passed in we allow rebinding. Note that for backwards compatibility 337 * even "svr4" sockets pass in _SOBIND_SOCKBSD/SOV_SOCKBSD to sobind/bind. 338 * Thus the rebinding code is currently not executed. 339 * 340 * The constraints for rebinding are: 341 * - it is a SOCK_DGRAM, or 342 * - it is a SOCK_STREAM/SOCK_SEQPACKET that has not been connected 343 * and no listen() has been done. 344 * This rebinding code was added based on some language in the XNET book 345 * about not returning EINVAL it the protocol allows rebinding. However, 346 * this language is not present in the Posix socket draft. Thus maybe the 347 * rebinding logic should be deleted from the source. 348 * 349 * A null "name" can be used to unbind the socket if: 350 * - it is a SOCK_DGRAM, or 351 * - it is a SOCK_STREAM/SOCK_SEQPACKET that has not been connected 352 * and no listen() has been done. 353 */ 354 static int 355 sotpi_bindlisten(struct sonode *so, struct sockaddr *name, 356 socklen_t namelen, int backlog, int flags) 357 { 358 struct T_bind_req bind_req; 359 struct T_bind_ack *bind_ack; 360 int error = 0; 361 mblk_t *mp; 362 void *addr; 363 t_uscalar_t addrlen; 364 int unbind_on_err = 1; 365 boolean_t clear_acceptconn_on_err = B_FALSE; 366 boolean_t restore_backlog_on_err = B_FALSE; 367 int save_so_backlog; 368 t_scalar_t PRIM_type = O_T_BIND_REQ; 369 boolean_t tcp_udp_xport; 370 void *nl7c = NULL; 371 372 dprintso(so, 1, ("sotpi_bindlisten(%p, %p, %d, %d, 0x%x) %s\n", 373 so, name, namelen, backlog, flags, 374 pr_state(so->so_state, so->so_mode))); 375 376 tcp_udp_xport = so->so_type == SOCK_STREAM || so->so_type == SOCK_DGRAM; 377 378 if (!(flags & _SOBIND_LOCK_HELD)) { 379 mutex_enter(&so->so_lock); 380 so_lock_single(so); /* Set SOLOCKED */ 381 } else { 382 ASSERT(MUTEX_HELD(&so->so_lock)); 383 ASSERT(so->so_flag & SOLOCKED); 384 } 385 386 /* 387 * Make sure that there is a preallocated unbind_req message 388 * before binding. This message allocated when the socket is 389 * created but it might be have been consumed. 390 */ 391 if (so->so_unbind_mp == NULL) { 392 dprintso(so, 1, ("sobind: allocating unbind_req\n")); 393 /* NOTE: holding so_lock while sleeping */ 394 so->so_unbind_mp = 395 soallocproto(sizeof (struct T_unbind_req), _ALLOC_SLEEP); 396 } 397 398 if (flags & _SOBIND_REBIND) { 399 /* 400 * Called from solisten after doing an sotpi_unbind() or 401 * potentially without the unbind (latter for AF_INET{,6}). 402 */ 403 ASSERT(name == NULL && namelen == 0); 404 405 if (so->so_family == AF_UNIX) { 406 ASSERT(so->so_ux_bound_vp); 407 addr = &so->so_ux_laddr; 408 addrlen = (t_uscalar_t)sizeof (so->so_ux_laddr); 409 dprintso(so, 1, 410 ("sobind rebind UNIX: addrlen %d, addr 0x%p, vp %p\n", 411 addrlen, 412 ((struct so_ux_addr *)addr)->soua_vp, 413 so->so_ux_bound_vp)); 414 } else { 415 addr = so->so_laddr_sa; 416 addrlen = (t_uscalar_t)so->so_laddr_len; 417 } 418 } else if (flags & _SOBIND_UNSPEC) { 419 ASSERT(name == NULL && namelen == 0); 420 421 /* 422 * The caller checked SS_ISBOUND but not necessarily 423 * under so_lock 424 */ 425 if (so->so_state & SS_ISBOUND) { 426 /* No error */ 427 goto done; 428 } 429 430 /* Set an initial local address */ 431 switch (so->so_family) { 432 case AF_UNIX: 433 /* 434 * Use an address with same size as struct sockaddr 435 * just like BSD. 436 */ 437 so->so_laddr_len = 438 (socklen_t)sizeof (struct sockaddr); 439 ASSERT(so->so_laddr_len <= so->so_laddr_maxlen); 440 bzero(so->so_laddr_sa, so->so_laddr_len); 441 so->so_laddr_sa->sa_family = so->so_family; 442 443 /* 444 * Pass down an address with the implicit bind 445 * magic number and the rest all zeros. 446 * The transport will return a unique address. 447 */ 448 so->so_ux_laddr.soua_vp = NULL; 449 so->so_ux_laddr.soua_magic = SOU_MAGIC_IMPLICIT; 450 addr = &so->so_ux_laddr; 451 addrlen = (t_uscalar_t)sizeof (so->so_ux_laddr); 452 break; 453 454 case AF_INET: 455 case AF_INET6: 456 /* 457 * An unspecified bind in TPI has a NULL address. 458 * Set the address in sockfs to have the sa_family. 459 */ 460 so->so_laddr_len = (so->so_family == AF_INET) ? 461 (socklen_t)sizeof (sin_t) : 462 (socklen_t)sizeof (sin6_t); 463 ASSERT(so->so_laddr_len <= so->so_laddr_maxlen); 464 bzero(so->so_laddr_sa, so->so_laddr_len); 465 so->so_laddr_sa->sa_family = so->so_family; 466 addr = NULL; 467 addrlen = 0; 468 break; 469 470 default: 471 /* 472 * An unspecified bind in TPI has a NULL address. 473 * Set the address in sockfs to be zero length. 474 * 475 * Can not assume there is a sa_family for all 476 * protocol families. For example, AF_X25 does not 477 * have a family field. 478 */ 479 bzero(so->so_laddr_sa, so->so_laddr_len); 480 so->so_laddr_len = 0; /* XXX correct? */ 481 addr = NULL; 482 addrlen = 0; 483 break; 484 } 485 486 } else { 487 if (so->so_state & SS_ISBOUND) { 488 /* 489 * If it is ok to rebind the socket, first unbind 490 * with the transport. A rebind to the NULL address 491 * is interpreted as an unbind. 492 * Note that a bind to NULL in BSD does unbind the 493 * socket but it fails with EINVAL. 494 * Note that regular sockets set SOV_SOCKBSD i.e. 495 * _SOBIND_SOCKBSD gets set here hence no type of 496 * socket does currently allow rebinding. 497 * 498 * If the name is NULL just do an unbind. 499 */ 500 if (flags & (_SOBIND_SOCKBSD|_SOBIND_XPG4_2) && 501 name != NULL) { 502 error = EINVAL; 503 unbind_on_err = 0; 504 eprintsoline(so, error); 505 goto done; 506 } 507 if ((so->so_mode & SM_CONNREQUIRED) && 508 (so->so_state & SS_CANTREBIND)) { 509 error = EINVAL; 510 unbind_on_err = 0; 511 eprintsoline(so, error); 512 goto done; 513 } 514 error = sotpi_unbind(so, 0); 515 if (error) { 516 eprintsoline(so, error); 517 goto done; 518 } 519 ASSERT(!(so->so_state & SS_ISBOUND)); 520 if (name == NULL) { 521 so->so_state &= 522 ~(SS_ISCONNECTED|SS_ISCONNECTING); 523 goto done; 524 } 525 } 526 /* X/Open requires this check */ 527 if ((so->so_state & SS_CANTSENDMORE) && !xnet_skip_checks) { 528 if (xnet_check_print) { 529 printf("sockfs: X/Open bind state check " 530 "caused EINVAL\n"); 531 } 532 error = EINVAL; 533 goto done; 534 } 535 536 switch (so->so_family) { 537 case AF_UNIX: 538 /* 539 * All AF_UNIX addresses are nul terminated 540 * when copied (copyin_name) in so the minimum 541 * length is 3 bytes. 542 */ 543 if (name == NULL || 544 (ssize_t)namelen <= sizeof (short) + 1) { 545 error = EISDIR; 546 eprintsoline(so, error); 547 goto done; 548 } 549 /* 550 * Verify so_family matches the bound family. 551 * BSD does not check this for AF_UNIX resulting 552 * in funny mknods. 553 */ 554 if (name->sa_family != so->so_family) { 555 error = EAFNOSUPPORT; 556 goto done; 557 } 558 break; 559 case AF_INET: 560 if (name == NULL) { 561 error = EINVAL; 562 eprintsoline(so, error); 563 goto done; 564 } 565 if ((size_t)namelen != sizeof (sin_t)) { 566 error = name->sa_family != so->so_family ? 567 EAFNOSUPPORT : EINVAL; 568 eprintsoline(so, error); 569 goto done; 570 } 571 if ((flags & _SOBIND_XPG4_2) && 572 (name->sa_family != so->so_family)) { 573 /* 574 * This check has to be made for X/Open 575 * sockets however application failures have 576 * been observed when it is applied to 577 * all sockets. 578 */ 579 error = EAFNOSUPPORT; 580 eprintsoline(so, error); 581 goto done; 582 } 583 /* 584 * Force a zero sa_family to match so_family. 585 * 586 * Some programs like inetd(1M) don't set the 587 * family field. Other programs leave 588 * sin_family set to garbage - SunOS 4.X does 589 * not check the family field on a bind. 590 * We use the family field that 591 * was passed in to the socket() call. 592 */ 593 name->sa_family = so->so_family; 594 break; 595 596 case AF_INET6: { 597 #ifdef DEBUG 598 sin6_t *sin6 = (sin6_t *)name; 599 #endif /* DEBUG */ 600 601 if (name == NULL) { 602 error = EINVAL; 603 eprintsoline(so, error); 604 goto done; 605 } 606 if ((size_t)namelen != sizeof (sin6_t)) { 607 error = name->sa_family != so->so_family ? 608 EAFNOSUPPORT : EINVAL; 609 eprintsoline(so, error); 610 goto done; 611 } 612 if (name->sa_family != so->so_family) { 613 /* 614 * With IPv6 we require the family to match 615 * unlike in IPv4. 616 */ 617 error = EAFNOSUPPORT; 618 eprintsoline(so, error); 619 goto done; 620 } 621 #ifdef DEBUG 622 /* 623 * Verify that apps don't forget to clear 624 * sin6_scope_id etc 625 */ 626 if (sin6->sin6_scope_id != 0 && 627 !IN6_IS_ADDR_LINKSCOPE(&sin6->sin6_addr)) { 628 zcmn_err(getzoneid(), CE_WARN, 629 "bind with uninitialized sin6_scope_id " 630 "(%d) on socket. Pid = %d\n", 631 (int)sin6->sin6_scope_id, 632 (int)curproc->p_pid); 633 } 634 if (sin6->__sin6_src_id != 0) { 635 zcmn_err(getzoneid(), CE_WARN, 636 "bind with uninitialized __sin6_src_id " 637 "(%d) on socket. Pid = %d\n", 638 (int)sin6->__sin6_src_id, 639 (int)curproc->p_pid); 640 } 641 #endif /* DEBUG */ 642 break; 643 } 644 default: 645 /* 646 * Don't do any length or sa_family check to allow 647 * non-sockaddr style addresses. 648 */ 649 if (name == NULL) { 650 error = EINVAL; 651 eprintsoline(so, error); 652 goto done; 653 } 654 break; 655 } 656 657 if (namelen > (t_uscalar_t)so->so_laddr_maxlen) { 658 error = ENAMETOOLONG; 659 eprintsoline(so, error); 660 goto done; 661 } 662 /* 663 * Save local address. 664 */ 665 so->so_laddr_len = (socklen_t)namelen; 666 ASSERT(so->so_laddr_len <= so->so_laddr_maxlen); 667 bcopy(name, so->so_laddr_sa, namelen); 668 669 addr = so->so_laddr_sa; 670 addrlen = (t_uscalar_t)so->so_laddr_len; 671 switch (so->so_family) { 672 case AF_INET6: 673 case AF_INET: 674 break; 675 case AF_UNIX: { 676 struct sockaddr_un *soun = 677 (struct sockaddr_un *)so->so_laddr_sa; 678 struct vnode *vp; 679 struct vattr vattr; 680 681 ASSERT(so->so_ux_bound_vp == NULL); 682 /* 683 * Create vnode for the specified path name. 684 * Keep vnode held with a reference in so_ux_bound_vp. 685 * Use the vnode pointer as the address used in the 686 * bind with the transport. 687 * 688 * Use the same mode as in BSD. In particular this does 689 * not observe the umask. 690 */ 691 /* MAXPATHLEN + soun_family + nul termination */ 692 if (so->so_laddr_len > 693 (socklen_t)(MAXPATHLEN + sizeof (short) + 1)) { 694 error = ENAMETOOLONG; 695 eprintsoline(so, error); 696 goto done; 697 } 698 vattr.va_type = VSOCK; 699 vattr.va_mode = 0777 & ~u.u_cmask; 700 vattr.va_mask = AT_TYPE|AT_MODE; 701 /* NOTE: holding so_lock */ 702 error = vn_create(soun->sun_path, UIO_SYSSPACE, &vattr, 703 EXCL, 0, &vp, CRMKNOD, 0, 0); 704 if (error) { 705 if (error == EEXIST) 706 error = EADDRINUSE; 707 eprintsoline(so, error); 708 goto done; 709 } 710 /* 711 * Establish pointer from the underlying filesystem 712 * vnode to the socket node. 713 * so_ux_bound_vp and v_stream->sd_vnode form the 714 * cross-linkage between the underlying filesystem 715 * node and the socket node. 716 */ 717 ASSERT(SOTOV(so)->v_stream); 718 mutex_enter(&vp->v_lock); 719 vp->v_stream = SOTOV(so)->v_stream; 720 so->so_ux_bound_vp = vp; 721 mutex_exit(&vp->v_lock); 722 723 /* 724 * Use the vnode pointer value as a unique address 725 * (together with the magic number to avoid conflicts 726 * with implicit binds) in the transport provider. 727 */ 728 so->so_ux_laddr.soua_vp = (void *)so->so_ux_bound_vp; 729 so->so_ux_laddr.soua_magic = SOU_MAGIC_EXPLICIT; 730 addr = &so->so_ux_laddr; 731 addrlen = (t_uscalar_t)sizeof (so->so_ux_laddr); 732 dprintso(so, 1, ("sobind UNIX: addrlen %d, addr %p\n", 733 addrlen, 734 ((struct so_ux_addr *)addr)->soua_vp)); 735 break; 736 } 737 } /* end switch (so->so_family) */ 738 } 739 740 /* 741 * set SS_ACCEPTCONN before sending down O_T_BIND_REQ since 742 * the transport can start passing up T_CONN_IND messages 743 * as soon as it receives the bind req and strsock_proto() 744 * insists that SS_ACCEPTCONN is set when processing T_CONN_INDs. 745 */ 746 if (flags & _SOBIND_LISTEN) { 747 if ((so->so_state & SS_ACCEPTCONN) == 0) 748 clear_acceptconn_on_err = B_TRUE; 749 save_so_backlog = so->so_backlog; 750 restore_backlog_on_err = B_TRUE; 751 so->so_state |= SS_ACCEPTCONN; 752 so->so_backlog = backlog; 753 } 754 755 /* 756 * If NL7C addr(s) have been configured check for addr/port match, 757 * or if an implicit NL7C socket via AF_NCA mark socket as NL7C. 758 * 759 * NL7C supports the TCP transport only so check AF_INET and AF_INET6 760 * family sockets only. If match mark as such. 761 */ 762 if (nl7c_enabled && ((addr != NULL && 763 (so->so_family == AF_INET || so->so_family == AF_INET6) && 764 (nl7c = nl7c_lookup_addr(addr, addrlen))) || 765 so->so_nl7c_flags == NL7C_AF_NCA)) { 766 /* 767 * NL7C is not supported in non-global zones, 768 * we enforce this restriction here. 769 */ 770 if (so->so_zoneid == GLOBAL_ZONEID) { 771 /* An NL7C socket, mark it */ 772 so->so_nl7c_flags |= NL7C_ENABLED; 773 if (nl7c == NULL) { 774 /* 775 * Was an AF_NCA bind() so add it to the 776 * addr list for reporting purposes. 777 */ 778 nl7c = nl7c_add_addr(addr, addrlen); 779 } 780 } else 781 nl7c = NULL; 782 } 783 /* 784 * We send a T_BIND_REQ for TCP/UDP since we know it supports it, 785 * for other transports we will send in a O_T_BIND_REQ. 786 */ 787 if (tcp_udp_xport && 788 (so->so_family == AF_INET || so->so_family == AF_INET6)) 789 PRIM_type = T_BIND_REQ; 790 791 bind_req.PRIM_type = PRIM_type; 792 bind_req.ADDR_length = addrlen; 793 bind_req.ADDR_offset = (t_scalar_t)sizeof (bind_req); 794 bind_req.CONIND_number = backlog; 795 /* NOTE: holding so_lock while sleeping */ 796 mp = soallocproto2(&bind_req, sizeof (bind_req), 797 addr, addrlen, 0, _ALLOC_SLEEP); 798 so->so_state &= ~SS_LADDR_VALID; 799 800 /* Done using so_laddr_sa - can drop the lock */ 801 mutex_exit(&so->so_lock); 802 803 /* 804 * Intercept the bind_req message here to check if this <address/port> 805 * was configured as an SSL proxy server, or if another endpoint was 806 * already configured to act as a proxy for us. 807 * 808 * Note, only if NL7C not enabled for this socket. 809 */ 810 if (nl7c == NULL && 811 (so->so_family == AF_INET || so->so_family == AF_INET6) && 812 so->so_type == SOCK_STREAM) { 813 814 if (so->so_kssl_ent != NULL) { 815 kssl_release_ent(so->so_kssl_ent, so, so->so_kssl_type); 816 so->so_kssl_ent = NULL; 817 } 818 819 so->so_kssl_type = kssl_check_proxy(mp, so, &so->so_kssl_ent); 820 switch (so->so_kssl_type) { 821 case KSSL_NO_PROXY: 822 break; 823 824 case KSSL_HAS_PROXY: 825 mutex_enter(&so->so_lock); 826 goto skip_transport; 827 828 case KSSL_IS_PROXY: 829 break; 830 } 831 } 832 833 error = kstrputmsg(SOTOV(so), mp, NULL, 0, 0, 834 MSG_BAND|MSG_HOLDSIG|MSG_IGNERROR, 0); 835 if (error) { 836 eprintsoline(so, error); 837 mutex_enter(&so->so_lock); 838 goto done; 839 } 840 841 mutex_enter(&so->so_lock); 842 error = sowaitprim(so, PRIM_type, T_BIND_ACK, 843 (t_uscalar_t)sizeof (*bind_ack), &mp, 0); 844 if (error) { 845 eprintsoline(so, error); 846 goto done; 847 } 848 skip_transport: 849 ASSERT(mp); 850 /* 851 * Even if some TPI message (e.g. T_DISCON_IND) was received in 852 * strsock_proto while the lock was dropped above, the bind 853 * is allowed to complete. 854 */ 855 856 /* Mark as bound. This will be undone if we detect errors below. */ 857 if (flags & _SOBIND_NOXLATE) { 858 ASSERT(so->so_family == AF_UNIX); 859 so->so_state |= SS_FADDR_NOXLATE; 860 } 861 ASSERT(!(so->so_state & SS_ISBOUND) || (flags & _SOBIND_REBIND)); 862 so->so_state |= SS_ISBOUND; 863 ASSERT(so->so_unbind_mp); 864 865 /* note that we've already set SS_ACCEPTCONN above */ 866 867 /* 868 * Recompute addrlen - an unspecied bind sent down an 869 * address of length zero but we expect the appropriate length 870 * in return. 871 */ 872 addrlen = (t_uscalar_t)(so->so_family == AF_UNIX ? 873 sizeof (so->so_ux_laddr) : so->so_laddr_len); 874 875 bind_ack = (struct T_bind_ack *)mp->b_rptr; 876 /* 877 * The alignment restriction is really too strict but 878 * we want enough alignment to inspect the fields of 879 * a sockaddr_in. 880 */ 881 addr = sogetoff(mp, bind_ack->ADDR_offset, 882 bind_ack->ADDR_length, 883 __TPI_ALIGN_SIZE); 884 if (addr == NULL) { 885 freemsg(mp); 886 error = EPROTO; 887 eprintsoline(so, error); 888 goto done; 889 } 890 if (!(flags & _SOBIND_UNSPEC)) { 891 /* 892 * Verify that the transport didn't return something we 893 * did not want e.g. an address other than what we asked for. 894 * 895 * NOTE: These checks would go away if/when we switch to 896 * using the new TPI (in which the transport would fail 897 * the request instead of assigning a different address). 898 * 899 * NOTE2: For protocols that we don't know (i.e. any 900 * other than AF_INET6, AF_INET and AF_UNIX), we 901 * cannot know if the transport should be expected to 902 * return the same address as that requested. 903 * 904 * NOTE3: For AF_INET and AF_INET6, TCP/UDP, we send 905 * down a T_BIND_REQ. We use O_T_BIND_REQ for others. 906 * 907 * For example, in the case of netatalk it may be 908 * inappropriate for the transport to return the 909 * requested address (as it may have allocated a local 910 * port number in behaviour similar to that of an 911 * AF_INET bind request with a port number of zero). 912 * 913 * Given the definition of O_T_BIND_REQ, where the 914 * transport may bind to an address other than the 915 * requested address, it's not possible to determine 916 * whether a returned address that differs from the 917 * requested address is a reason to fail (because the 918 * requested address was not available) or succeed 919 * (because the transport allocated an appropriate 920 * address and/or port). 921 * 922 * sockfs currently requires that the transport return 923 * the requested address in the T_BIND_ACK, unless 924 * there is code here to allow for any discrepancy. 925 * Such code exists for AF_INET and AF_INET6. 926 * 927 * Netatalk chooses to return the requested address 928 * rather than the (correct) allocated address. This 929 * means that netatalk violates the TPI specification 930 * (and would not function correctly if used from a 931 * TLI application), but it does mean that it works 932 * with sockfs. 933 * 934 * As noted above, using the newer XTI bind primitive 935 * (T_BIND_REQ) in preference to O_T_BIND_REQ would 936 * allow sockfs to be more sure about whether or not 937 * the bind request had succeeded (as transports are 938 * not permitted to bind to a different address than 939 * that requested - they must return failure). 940 * Unfortunately, support for T_BIND_REQ may not be 941 * present in all transport implementations (netatalk, 942 * for example, doesn't have it), making the 943 * transition difficult. 944 */ 945 if (bind_ack->ADDR_length != addrlen) { 946 /* Assumes that the requested address was in use */ 947 freemsg(mp); 948 error = EADDRINUSE; 949 eprintsoline(so, error); 950 goto done; 951 } 952 953 switch (so->so_family) { 954 case AF_INET6: 955 case AF_INET: { 956 sin_t *rname, *aname; 957 958 rname = (sin_t *)addr; 959 aname = (sin_t *)so->so_laddr_sa; 960 961 /* 962 * Take advantage of the alignment 963 * of sin_port and sin6_port which fall 964 * in the same place in their data structures. 965 * Just use sin_port for either address family. 966 * 967 * This may become a problem if (heaven forbid) 968 * there's a separate ipv6port_reserved... :-P 969 * 970 * Binding to port 0 has the semantics of letting 971 * the transport bind to any port. 972 * 973 * If the transport is TCP or UDP since we had sent 974 * a T_BIND_REQ we would not get a port other than 975 * what we asked for. 976 */ 977 if (tcp_udp_xport) { 978 /* 979 * Pick up the new port number if we bound to 980 * port 0. 981 */ 982 if (aname->sin_port == 0) 983 aname->sin_port = rname->sin_port; 984 so->so_state |= SS_LADDR_VALID; 985 break; 986 } 987 if (aname->sin_port != 0 && 988 aname->sin_port != rname->sin_port) { 989 freemsg(mp); 990 error = EADDRINUSE; 991 eprintsoline(so, error); 992 goto done; 993 } 994 /* 995 * Pick up the new port number if we bound to port 0. 996 */ 997 aname->sin_port = rname->sin_port; 998 999 /* 1000 * Unfortunately, addresses aren't _quite_ the same. 1001 */ 1002 if (so->so_family == AF_INET) { 1003 if (aname->sin_addr.s_addr != 1004 rname->sin_addr.s_addr) { 1005 freemsg(mp); 1006 error = EADDRNOTAVAIL; 1007 eprintsoline(so, error); 1008 goto done; 1009 } 1010 } else { 1011 sin6_t *rname6 = (sin6_t *)rname; 1012 sin6_t *aname6 = (sin6_t *)aname; 1013 1014 if (!IN6_ARE_ADDR_EQUAL(&aname6->sin6_addr, 1015 &rname6->sin6_addr)) { 1016 freemsg(mp); 1017 error = EADDRNOTAVAIL; 1018 eprintsoline(so, error); 1019 goto done; 1020 } 1021 } 1022 break; 1023 } 1024 case AF_UNIX: 1025 if (bcmp(addr, &so->so_ux_laddr, addrlen) != 0) { 1026 freemsg(mp); 1027 error = EADDRINUSE; 1028 eprintsoline(so, error); 1029 eprintso(so, 1030 ("addrlen %d, addr 0x%x, vp %p\n", 1031 addrlen, *((int *)addr), 1032 so->so_ux_bound_vp)); 1033 goto done; 1034 } 1035 so->so_state |= SS_LADDR_VALID; 1036 break; 1037 default: 1038 /* 1039 * NOTE: This assumes that addresses can be 1040 * byte-compared for equivalence. 1041 */ 1042 if (bcmp(addr, so->so_laddr_sa, addrlen) != 0) { 1043 freemsg(mp); 1044 error = EADDRINUSE; 1045 eprintsoline(so, error); 1046 goto done; 1047 } 1048 /* 1049 * Don't mark SS_LADDR_VALID, as we cannot be 1050 * sure that the returned address is the real 1051 * bound address when talking to an unknown 1052 * transport. 1053 */ 1054 break; 1055 } 1056 } else { 1057 /* 1058 * Save for returned address for getsockname. 1059 * Needed for unspecific bind unless transport supports 1060 * the TI_GETMYNAME ioctl. 1061 * Do this for AF_INET{,6} even though they do, as 1062 * caching info here is much better performance than 1063 * a TPI/STREAMS trip to the transport for getsockname. 1064 * Any which can't for some reason _must_ _not_ set 1065 * LADDR_VALID here for the caching version of getsockname 1066 * to not break; 1067 */ 1068 switch (so->so_family) { 1069 case AF_UNIX: 1070 /* 1071 * Record the address bound with the transport 1072 * for use by socketpair. 1073 */ 1074 bcopy(addr, &so->so_ux_laddr, addrlen); 1075 so->so_state |= SS_LADDR_VALID; 1076 break; 1077 case AF_INET: 1078 case AF_INET6: 1079 ASSERT(so->so_laddr_len <= so->so_laddr_maxlen); 1080 bcopy(addr, so->so_laddr_sa, so->so_laddr_len); 1081 so->so_state |= SS_LADDR_VALID; 1082 break; 1083 default: 1084 /* 1085 * Don't mark SS_LADDR_VALID, as we cannot be 1086 * sure that the returned address is the real 1087 * bound address when talking to an unknown 1088 * transport. 1089 */ 1090 break; 1091 } 1092 } 1093 1094 if (nl7c != NULL) { 1095 /* Register listen()er sonode pointer with NL7C */ 1096 nl7c_listener_addr(nl7c, so); 1097 } 1098 1099 freemsg(mp); 1100 1101 done: 1102 if (error) { 1103 /* reset state & backlog to values held on entry */ 1104 if (clear_acceptconn_on_err == B_TRUE) 1105 so->so_state &= ~SS_ACCEPTCONN; 1106 if (restore_backlog_on_err == B_TRUE) 1107 so->so_backlog = save_so_backlog; 1108 1109 if (unbind_on_err && so->so_state & SS_ISBOUND) { 1110 int err; 1111 1112 err = sotpi_unbind(so, 0); 1113 /* LINTED - statement has no consequent: if */ 1114 if (err) { 1115 eprintsoline(so, error); 1116 } else { 1117 ASSERT(!(so->so_state & SS_ISBOUND)); 1118 } 1119 } 1120 } 1121 if (!(flags & _SOBIND_LOCK_HELD)) { 1122 so_unlock_single(so, SOLOCKED); 1123 mutex_exit(&so->so_lock); 1124 } else { 1125 /* If the caller held the lock don't release it here */ 1126 ASSERT(MUTEX_HELD(&so->so_lock)); 1127 ASSERT(so->so_flag & SOLOCKED); 1128 } 1129 return (error); 1130 } 1131 1132 /* bind the socket */ 1133 static int 1134 sotpi_bind(struct sonode *so, struct sockaddr *name, socklen_t namelen, 1135 int flags) 1136 { 1137 if ((flags & _SOBIND_SOCKETPAIR) == 0) 1138 return (sotpi_bindlisten(so, name, namelen, 0, flags)); 1139 1140 flags &= ~_SOBIND_SOCKETPAIR; 1141 return (sotpi_bindlisten(so, name, namelen, 1, flags)); 1142 } 1143 1144 /* 1145 * Unbind a socket - used when bind() fails, when bind() specifies a NULL 1146 * address, or when listen needs to unbind and bind. 1147 * If the _SOUNBIND_REBIND flag is specified the addresses are retained 1148 * so that a sobind can pick them up. 1149 */ 1150 static int 1151 sotpi_unbind(struct sonode *so, int flags) 1152 { 1153 struct T_unbind_req unbind_req; 1154 int error = 0; 1155 mblk_t *mp; 1156 1157 dprintso(so, 1, ("sotpi_unbind(%p, 0x%x) %s\n", 1158 so, flags, pr_state(so->so_state, so->so_mode))); 1159 1160 ASSERT(MUTEX_HELD(&so->so_lock)); 1161 ASSERT(so->so_flag & SOLOCKED); 1162 1163 if (!(so->so_state & SS_ISBOUND)) { 1164 error = EINVAL; 1165 eprintsoline(so, error); 1166 goto done; 1167 } 1168 1169 mutex_exit(&so->so_lock); 1170 1171 /* 1172 * Flush the read and write side (except stream head read queue) 1173 * and send down T_UNBIND_REQ. 1174 */ 1175 (void) putnextctl1(strvp2wq(SOTOV(so)), M_FLUSH, FLUSHRW); 1176 1177 unbind_req.PRIM_type = T_UNBIND_REQ; 1178 mp = soallocproto1(&unbind_req, sizeof (unbind_req), 1179 0, _ALLOC_SLEEP); 1180 error = kstrputmsg(SOTOV(so), mp, NULL, 0, 0, 1181 MSG_BAND|MSG_HOLDSIG|MSG_IGNERROR, 0); 1182 mutex_enter(&so->so_lock); 1183 if (error) { 1184 eprintsoline(so, error); 1185 goto done; 1186 } 1187 1188 error = sowaitokack(so, T_UNBIND_REQ); 1189 if (error) { 1190 eprintsoline(so, error); 1191 goto done; 1192 } 1193 1194 /* 1195 * Even if some TPI message (e.g. T_DISCON_IND) was received in 1196 * strsock_proto while the lock was dropped above, the unbind 1197 * is allowed to complete. 1198 */ 1199 if (!(flags & _SOUNBIND_REBIND)) { 1200 /* 1201 * Clear out bound address. 1202 */ 1203 vnode_t *vp; 1204 1205 if ((vp = so->so_ux_bound_vp) != NULL) { 1206 1207 /* Undo any SSL proxy setup */ 1208 if ((so->so_family == AF_INET || 1209 so->so_family == AF_INET6) && 1210 (so->so_type == SOCK_STREAM) && 1211 (so->so_kssl_ent != NULL)) { 1212 kssl_release_ent(so->so_kssl_ent, so, 1213 so->so_kssl_type); 1214 so->so_kssl_ent = NULL; 1215 so->so_kssl_type = KSSL_NO_PROXY; 1216 } 1217 1218 so->so_ux_bound_vp = NULL; 1219 vn_rele_stream(vp); 1220 } 1221 /* Clear out address */ 1222 so->so_laddr_len = 0; 1223 } 1224 so->so_state &= ~(SS_ISBOUND|SS_ACCEPTCONN|SS_LADDR_VALID); 1225 1226 done: 1227 1228 /* If the caller held the lock don't release it here */ 1229 ASSERT(MUTEX_HELD(&so->so_lock)); 1230 ASSERT(so->so_flag & SOLOCKED); 1231 1232 return (error); 1233 } 1234 1235 /* 1236 * listen on the socket. 1237 * For TPI conforming transports this has to first unbind with the transport 1238 * and then bind again using the new backlog. 1239 */ 1240 int 1241 sotpi_listen(struct sonode *so, int backlog) 1242 { 1243 int error = 0; 1244 1245 dprintso(so, 1, ("sotpi_listen(%p, %d) %s\n", 1246 so, backlog, pr_state(so->so_state, so->so_mode))); 1247 1248 if (so->so_serv_type == T_CLTS) 1249 return (EOPNOTSUPP); 1250 1251 /* 1252 * If the socket is ready to accept connections already, then 1253 * return without doing anything. This avoids a problem where 1254 * a second listen() call fails if a connection is pending and 1255 * leaves the socket unbound. Only when we are not unbinding 1256 * with the transport can we safely increase the backlog. 1257 */ 1258 if (so->so_state & SS_ACCEPTCONN && 1259 !((so->so_family == AF_INET || so->so_family == AF_INET6) && 1260 /*CONSTCOND*/ 1261 !solisten_tpi_tcp)) 1262 return (0); 1263 1264 if (so->so_state & SS_ISCONNECTED) 1265 return (EINVAL); 1266 1267 mutex_enter(&so->so_lock); 1268 so_lock_single(so); /* Set SOLOCKED */ 1269 1270 if (backlog < 0) 1271 backlog = 0; 1272 /* 1273 * Use the same qlimit as in BSD. BSD checks the qlimit 1274 * before queuing the next connection implying that a 1275 * listen(sock, 0) allows one connection to be queued. 1276 * BSD also uses 1.5 times the requested backlog. 1277 * 1278 * XNS Issue 4 required a strict interpretation of the backlog. 1279 * This has been waived subsequently for Issue 4 and the change 1280 * incorporated in XNS Issue 5. So we aren't required to do 1281 * anything special for XPG apps. 1282 */ 1283 if (backlog >= (INT_MAX - 1) / 3) 1284 backlog = INT_MAX; 1285 else 1286 backlog = backlog * 3 / 2 + 1; 1287 1288 /* 1289 * If the listen doesn't change the backlog we do nothing. 1290 * This avoids an EPROTO error from the transport. 1291 */ 1292 if ((so->so_state & SS_ACCEPTCONN) && 1293 so->so_backlog == backlog) 1294 goto done; 1295 1296 if (!(so->so_state & SS_ISBOUND)) { 1297 /* 1298 * Must have been explicitly bound in the UNIX domain. 1299 */ 1300 if (so->so_family == AF_UNIX) { 1301 error = EINVAL; 1302 goto done; 1303 } 1304 error = sotpi_bindlisten(so, NULL, 0, backlog, 1305 _SOBIND_UNSPEC|_SOBIND_LOCK_HELD|_SOBIND_LISTEN); 1306 } else if (backlog > 0) { 1307 /* 1308 * AF_INET{,6} hack to avoid losing the port. 1309 * Assumes that all AF_INET{,6} transports can handle a 1310 * O_T_BIND_REQ with a non-zero CONIND_number when the TPI 1311 * has already bound thus it is possible to avoid the unbind. 1312 */ 1313 if (!((so->so_family == AF_INET || so->so_family == AF_INET6) && 1314 /*CONSTCOND*/ 1315 !solisten_tpi_tcp)) { 1316 error = sotpi_unbind(so, _SOUNBIND_REBIND); 1317 if (error) 1318 goto done; 1319 } 1320 error = sotpi_bindlisten(so, NULL, 0, backlog, 1321 _SOBIND_REBIND|_SOBIND_LOCK_HELD|_SOBIND_LISTEN); 1322 } else { 1323 so->so_state |= SS_ACCEPTCONN; 1324 so->so_backlog = backlog; 1325 } 1326 if (error) 1327 goto done; 1328 ASSERT(so->so_state & SS_ACCEPTCONN); 1329 done: 1330 so_unlock_single(so, SOLOCKED); 1331 mutex_exit(&so->so_lock); 1332 return (error); 1333 } 1334 1335 /* 1336 * Disconnect either a specified seqno or all (-1). 1337 * The former is used on listening sockets only. 1338 * 1339 * When seqno == -1 sodisconnect could call sotpi_unbind. However, 1340 * the current use of sodisconnect(seqno == -1) is only for shutdown 1341 * so there is no point (and potentially incorrect) to unbind. 1342 */ 1343 int 1344 sodisconnect(struct sonode *so, t_scalar_t seqno, int flags) 1345 { 1346 struct T_discon_req discon_req; 1347 int error = 0; 1348 mblk_t *mp; 1349 1350 dprintso(so, 1, ("sodisconnect(%p, %d, 0x%x) %s\n", 1351 so, seqno, flags, pr_state(so->so_state, so->so_mode))); 1352 1353 if (!(flags & _SODISCONNECT_LOCK_HELD)) { 1354 mutex_enter(&so->so_lock); 1355 so_lock_single(so); /* Set SOLOCKED */ 1356 } else { 1357 ASSERT(MUTEX_HELD(&so->so_lock)); 1358 ASSERT(so->so_flag & SOLOCKED); 1359 } 1360 1361 if (!(so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING|SS_ACCEPTCONN))) { 1362 error = EINVAL; 1363 eprintsoline(so, error); 1364 goto done; 1365 } 1366 1367 mutex_exit(&so->so_lock); 1368 /* 1369 * Flush the write side (unless this is a listener) 1370 * and then send down a T_DISCON_REQ. 1371 * (Don't flush on listener since it could flush {O_}T_CONN_RES 1372 * and other messages.) 1373 */ 1374 if (!(so->so_state & SS_ACCEPTCONN)) 1375 (void) putnextctl1(strvp2wq(SOTOV(so)), M_FLUSH, FLUSHW); 1376 1377 discon_req.PRIM_type = T_DISCON_REQ; 1378 discon_req.SEQ_number = seqno; 1379 mp = soallocproto1(&discon_req, sizeof (discon_req), 1380 0, _ALLOC_SLEEP); 1381 error = kstrputmsg(SOTOV(so), mp, NULL, 0, 0, 1382 MSG_BAND|MSG_HOLDSIG|MSG_IGNERROR, 0); 1383 mutex_enter(&so->so_lock); 1384 if (error) { 1385 eprintsoline(so, error); 1386 goto done; 1387 } 1388 1389 error = sowaitokack(so, T_DISCON_REQ); 1390 if (error) { 1391 eprintsoline(so, error); 1392 goto done; 1393 } 1394 /* 1395 * Even if some TPI message (e.g. T_DISCON_IND) was received in 1396 * strsock_proto while the lock was dropped above, the disconnect 1397 * is allowed to complete. However, it is not possible to 1398 * assert that SS_ISCONNECTED|SS_ISCONNECTING are set. 1399 */ 1400 so->so_state &= 1401 ~(SS_ISCONNECTED|SS_ISCONNECTING|SS_LADDR_VALID|SS_FADDR_VALID); 1402 done: 1403 if (!(flags & _SODISCONNECT_LOCK_HELD)) { 1404 so_unlock_single(so, SOLOCKED); 1405 mutex_exit(&so->so_lock); 1406 } else { 1407 /* If the caller held the lock don't release it here */ 1408 ASSERT(MUTEX_HELD(&so->so_lock)); 1409 ASSERT(so->so_flag & SOLOCKED); 1410 } 1411 return (error); 1412 } 1413 1414 int 1415 sotpi_accept(struct sonode *so, int fflag, struct sonode **nsop) 1416 { 1417 struct T_conn_ind *conn_ind; 1418 struct T_conn_res *conn_res; 1419 int error = 0; 1420 mblk_t *mp, *ctxmp; 1421 struct sonode *nso; 1422 vnode_t *nvp; 1423 void *src; 1424 t_uscalar_t srclen; 1425 void *opt; 1426 t_uscalar_t optlen; 1427 t_scalar_t PRIM_type; 1428 t_scalar_t SEQ_number; 1429 1430 dprintso(so, 1, ("sotpi_accept(%p, 0x%x, %p) %s\n", 1431 so, fflag, nsop, pr_state(so->so_state, so->so_mode))); 1432 1433 /* 1434 * Defer single-threading the accepting socket until 1435 * the T_CONN_IND has been received and parsed and the 1436 * new sonode has been opened. 1437 */ 1438 1439 /* Check that we are not already connected */ 1440 if ((so->so_state & SS_ACCEPTCONN) == 0) 1441 goto conn_bad; 1442 again: 1443 if ((error = sowaitconnind(so, fflag, &mp)) != 0) 1444 goto e_bad; 1445 1446 ASSERT(mp); 1447 conn_ind = (struct T_conn_ind *)mp->b_rptr; 1448 ctxmp = mp->b_cont; 1449 1450 /* 1451 * Save SEQ_number for error paths. 1452 */ 1453 SEQ_number = conn_ind->SEQ_number; 1454 1455 srclen = conn_ind->SRC_length; 1456 src = sogetoff(mp, conn_ind->SRC_offset, srclen, 1); 1457 if (src == NULL) { 1458 error = EPROTO; 1459 freemsg(mp); 1460 eprintsoline(so, error); 1461 goto disconnect_unlocked; 1462 } 1463 optlen = conn_ind->OPT_length; 1464 switch (so->so_family) { 1465 case AF_INET: 1466 case AF_INET6: 1467 if ((optlen == sizeof (intptr_t)) && 1468 ((so->so_state & SS_DIRECT) != 0)) { 1469 bcopy(mp->b_rptr + conn_ind->OPT_offset, 1470 &opt, conn_ind->OPT_length); 1471 } else { 1472 /* 1473 * The transport (in this case TCP) hasn't sent up 1474 * a pointer to an instance for the accept fast-path. 1475 * Disable fast-path completely because the call to 1476 * sotpi_create() below would otherwise create an 1477 * incomplete TCP instance, which would lead to 1478 * problems when sockfs sends a normal T_CONN_RES 1479 * message down the new stream. 1480 */ 1481 if (so->so_state & SS_DIRECT) { 1482 int rval; 1483 /* 1484 * For consistency we inform tcp to disable 1485 * direct interface on the listener, though 1486 * we can certainly live without doing this 1487 * because no data will ever travel upstream 1488 * on the listening socket. 1489 */ 1490 so->so_state &= ~SS_DIRECT; 1491 (void) strioctl(SOTOV(so), _SIOCSOCKFALLBACK, 1492 0, 0, K_TO_K, CRED(), &rval); 1493 } 1494 opt = NULL; 1495 optlen = 0; 1496 } 1497 break; 1498 case AF_UNIX: 1499 default: 1500 if (optlen != 0) { 1501 opt = sogetoff(mp, conn_ind->OPT_offset, optlen, 1502 __TPI_ALIGN_SIZE); 1503 if (opt == NULL) { 1504 error = EPROTO; 1505 freemsg(mp); 1506 eprintsoline(so, error); 1507 goto disconnect_unlocked; 1508 } 1509 } 1510 if (so->so_family == AF_UNIX) { 1511 if (!(so->so_state & SS_FADDR_NOXLATE)) { 1512 src = NULL; 1513 srclen = 0; 1514 } 1515 /* Extract src address from options */ 1516 if (optlen != 0) 1517 so_getopt_srcaddr(opt, optlen, &src, &srclen); 1518 } 1519 break; 1520 } 1521 1522 /* 1523 * Create the new socket. 1524 */ 1525 VN_HOLD(so->so_accessvp); 1526 nso = sotpi_create(so->so_accessvp, so->so_family, so->so_type, 1527 so->so_protocol, so->so_version, so, &error); 1528 if (nso == NULL) { 1529 ASSERT(error != 0); 1530 /* 1531 * Accept can not fail with ENOBUFS. sotpi_create 1532 * sleeps waiting for memory until a signal is caught 1533 * so return EINTR. 1534 */ 1535 freemsg(mp); 1536 if (error == ENOBUFS) 1537 error = EINTR; 1538 goto e_disc_unl; 1539 } 1540 nvp = SOTOV(nso); 1541 1542 /* 1543 * If the transport sent up an SSL connection context, then attach 1544 * it the new socket, and set the (sd_wputdatafunc)() and 1545 * (sd_rputdatafunc)() stream head hooks to intercept and process 1546 * SSL records. 1547 */ 1548 if (ctxmp != NULL) { 1549 /* 1550 * This kssl_ctx_t is already held for us by the transport. 1551 * So, we don't need to do a kssl_hold_ctx() here. 1552 */ 1553 nso->so_kssl_ctx = *((kssl_ctx_t *)ctxmp->b_rptr); 1554 freemsg(ctxmp); 1555 mp->b_cont = NULL; 1556 strsetrwputdatahooks(nvp, strsock_kssl_input, 1557 strsock_kssl_output); 1558 } 1559 #ifdef DEBUG 1560 /* 1561 * SO_DEBUG is used to trigger the dprint* and eprint* macros thus 1562 * it's inherited early to allow debugging of the accept code itself. 1563 */ 1564 nso->so_options |= so->so_options & SO_DEBUG; 1565 #endif /* DEBUG */ 1566 1567 /* 1568 * Save the SRC address from the T_CONN_IND 1569 * for getpeername to work on AF_UNIX and on transports that do not 1570 * support TI_GETPEERNAME. 1571 * 1572 * NOTE: AF_UNIX NUL termination is ensured by the sender's 1573 * copyin_name(). 1574 */ 1575 if (srclen > (t_uscalar_t)nso->so_faddr_maxlen) { 1576 error = EINVAL; 1577 freemsg(mp); 1578 eprintsoline(so, error); 1579 goto disconnect_vp_unlocked; 1580 } 1581 nso->so_faddr_len = (socklen_t)srclen; 1582 ASSERT(so->so_faddr_len <= so->so_faddr_maxlen); 1583 bcopy(src, nso->so_faddr_sa, srclen); 1584 nso->so_state |= SS_FADDR_VALID; 1585 1586 if ((DB_REF(mp) > 1) || MBLKSIZE(mp) < 1587 (sizeof (struct T_conn_res) + sizeof (intptr_t))) { 1588 cred_t *cr; 1589 1590 if ((cr = DB_CRED(mp)) != NULL) { 1591 crhold(cr); 1592 nso->so_peercred = cr; 1593 nso->so_cpid = DB_CPID(mp); 1594 } 1595 freemsg(mp); 1596 1597 mp = soallocproto1(NULL, sizeof (struct T_conn_res) + 1598 sizeof (intptr_t), 0, _ALLOC_INTR); 1599 if (mp == NULL) { 1600 /* 1601 * Accept can not fail with ENOBUFS. 1602 * A signal was caught so return EINTR. 1603 */ 1604 error = EINTR; 1605 eprintsoline(so, error); 1606 goto disconnect_vp_unlocked; 1607 } 1608 conn_res = (struct T_conn_res *)mp->b_rptr; 1609 } else { 1610 nso->so_peercred = DB_CRED(mp); 1611 nso->so_cpid = DB_CPID(mp); 1612 DB_CRED(mp) = NULL; 1613 1614 mp->b_rptr = DB_BASE(mp); 1615 conn_res = (struct T_conn_res *)mp->b_rptr; 1616 mp->b_wptr = mp->b_rptr + sizeof (struct T_conn_res); 1617 } 1618 1619 /* 1620 * New socket must be bound at least in sockfs and, except for AF_INET, 1621 * (or AF_INET6) it also has to be bound in the transport provider. 1622 * After accepting the connection on nso so_laddr_sa will be set to 1623 * contain the same address as the listener's local address 1624 * so the address we bind to isn't important. 1625 */ 1626 if ((nso->so_family == AF_INET || nso->so_family == AF_INET6) && 1627 /*CONSTCOND*/ 1628 nso->so_type == SOCK_STREAM && !soaccept_tpi_tcp) { 1629 /* 1630 * Optimization for AF_INET{,6} transports 1631 * that can handle a T_CONN_RES without being bound. 1632 */ 1633 mutex_enter(&nso->so_lock); 1634 so_automatic_bind(nso); 1635 mutex_exit(&nso->so_lock); 1636 } else { 1637 /* Perform NULL bind with the transport provider. */ 1638 if ((error = sotpi_bind(nso, NULL, 0, _SOBIND_UNSPEC)) != 0) { 1639 ASSERT(error != ENOBUFS); 1640 freemsg(mp); 1641 eprintsoline(nso, error); 1642 goto disconnect_vp_unlocked; 1643 } 1644 } 1645 1646 /* 1647 * Inherit SIOCSPGRP, SS_ASYNC before we send the {O_}T_CONN_RES 1648 * so that any data arriving on the new socket will cause the 1649 * appropriate signals to be delivered for the new socket. 1650 * 1651 * No other thread (except strsock_proto and strsock_misc) 1652 * can access the new socket thus we relax the locking. 1653 */ 1654 nso->so_pgrp = so->so_pgrp; 1655 nso->so_state |= so->so_state & (SS_ASYNC|SS_FADDR_NOXLATE); 1656 1657 if (nso->so_pgrp != 0) { 1658 if ((error = so_set_events(nso, nvp, CRED())) != 0) { 1659 eprintsoline(nso, error); 1660 error = 0; 1661 nso->so_pgrp = 0; 1662 } 1663 } 1664 1665 /* 1666 * Make note of the socket level options. TCP and IP level options 1667 * are already inherited. We could do all this after accept is 1668 * successful but doing it here simplifies code and no harm done 1669 * for error case. 1670 */ 1671 nso->so_options = so->so_options & (SO_DEBUG|SO_REUSEADDR|SO_KEEPALIVE| 1672 SO_DONTROUTE|SO_BROADCAST|SO_USELOOPBACK| 1673 SO_OOBINLINE|SO_DGRAM_ERRIND|SO_LINGER); 1674 nso->so_sndbuf = so->so_sndbuf; 1675 nso->so_rcvbuf = so->so_rcvbuf; 1676 if (nso->so_options & SO_LINGER) 1677 nso->so_linger = so->so_linger; 1678 1679 if ((so->so_state & SS_DIRECT) != 0) { 1680 mblk_t *ack_mp; 1681 1682 ASSERT(opt != NULL); 1683 1684 conn_res->OPT_length = optlen; 1685 conn_res->OPT_offset = MBLKL(mp); 1686 bcopy(&opt, mp->b_wptr, optlen); 1687 mp->b_wptr += optlen; 1688 conn_res->PRIM_type = T_CONN_RES; 1689 conn_res->ACCEPTOR_id = 0; 1690 PRIM_type = T_CONN_RES; 1691 1692 /* Send down the T_CONN_RES on acceptor STREAM */ 1693 error = kstrputmsg(SOTOV(nso), mp, NULL, 1694 0, 0, MSG_BAND|MSG_HOLDSIG|MSG_IGNERROR, 0); 1695 if (error) { 1696 mutex_enter(&so->so_lock); 1697 so_lock_single(so); 1698 eprintsoline(so, error); 1699 goto disconnect_vp; 1700 } 1701 mutex_enter(&nso->so_lock); 1702 error = sowaitprim(nso, T_CONN_RES, T_OK_ACK, 1703 (t_uscalar_t)sizeof (struct T_ok_ack), &ack_mp, 0); 1704 if (error) { 1705 mutex_exit(&nso->so_lock); 1706 mutex_enter(&so->so_lock); 1707 so_lock_single(so); 1708 eprintsoline(so, error); 1709 goto disconnect_vp; 1710 } 1711 if (nso->so_family == AF_INET) { 1712 sin_t *sin; 1713 1714 sin = (sin_t *)(ack_mp->b_rptr + 1715 sizeof (struct T_ok_ack)); 1716 bcopy(sin, nso->so_laddr_sa, sizeof (sin_t)); 1717 nso->so_laddr_len = sizeof (sin_t); 1718 } else { 1719 sin6_t *sin6; 1720 1721 sin6 = (sin6_t *)(ack_mp->b_rptr + 1722 sizeof (struct T_ok_ack)); 1723 bcopy(sin6, nso->so_laddr_sa, sizeof (sin6_t)); 1724 nso->so_laddr_len = sizeof (sin6_t); 1725 } 1726 freemsg(ack_mp); 1727 1728 nso->so_state |= SS_ISCONNECTED | SS_LADDR_VALID; 1729 nso->so_priv = opt; 1730 1731 if (so->so_nl7c_flags & NL7C_ENABLED) { 1732 /* 1733 * A NL7C marked listen()er so the new socket 1734 * inherits the listen()er's NL7C state, except 1735 * for NL7C_POLLIN. 1736 * 1737 * Only call NL7C to process the new socket if 1738 * the listen socket allows blocking i/o. 1739 */ 1740 nso->so_nl7c_flags = so->so_nl7c_flags & (~NL7C_POLLIN); 1741 if (so->so_state & (SS_NONBLOCK|SS_NDELAY)) { 1742 /* 1743 * Nonblocking accept() just make it 1744 * persist to defer processing to the 1745 * read-side syscall (e.g. read). 1746 */ 1747 nso->so_nl7c_flags |= NL7C_SOPERSIST; 1748 } else if (nl7c_process(nso, B_FALSE)) { 1749 /* 1750 * NL7C has completed processing on the 1751 * socket, close the socket and back to 1752 * the top to await the next T_CONN_IND. 1753 */ 1754 mutex_exit(&nso->so_lock); 1755 (void) VOP_CLOSE(nvp, 0, 1, (offset_t)0, 1756 CRED()); 1757 VN_RELE(nvp); 1758 goto again; 1759 } 1760 /* Pass the new socket out */ 1761 } 1762 1763 mutex_exit(&nso->so_lock); 1764 1765 /* 1766 * It's possible, through the use of autopush for example, 1767 * that the acceptor stream may not support SS_DIRECT 1768 * semantics. If the new socket does not support SS_DIRECT 1769 * we issue a _SIOCSOCKFALLBACK to inform the transport 1770 * as we would in the I_PUSH case. 1771 */ 1772 if (!(nso->so_state & SS_DIRECT)) { 1773 int rval; 1774 1775 if ((error = strioctl(SOTOV(nso), _SIOCSOCKFALLBACK, 1776 0, 0, K_TO_K, CRED(), &rval)) != 0) { 1777 mutex_enter(&so->so_lock); 1778 so_lock_single(so); 1779 eprintsoline(so, error); 1780 goto disconnect_vp; 1781 } 1782 } 1783 1784 /* 1785 * Pass out new socket. 1786 */ 1787 if (nsop != NULL) 1788 *nsop = nso; 1789 1790 return (0); 1791 } 1792 1793 /* 1794 * Copy local address from listener. 1795 */ 1796 nso->so_laddr_len = so->so_laddr_len; 1797 ASSERT(nso->so_laddr_len <= nso->so_laddr_maxlen); 1798 bcopy(so->so_laddr_sa, nso->so_laddr_sa, nso->so_laddr_len); 1799 nso->so_state |= SS_LADDR_VALID; 1800 1801 /* 1802 * This is the non-performance case for sockets (e.g. AF_UNIX sockets) 1803 * which don't support the FireEngine accept fast-path. It is also 1804 * used when the virtual "sockmod" has been I_POP'd and I_PUSH'd 1805 * again. Neither sockfs nor TCP attempt to find out if some other 1806 * random module has been inserted in between (in which case we 1807 * should follow TLI accept behaviour). We blindly assume the worst 1808 * case and revert back to old behaviour i.e. TCP will not send us 1809 * any option (eager) and the accept should happen on the listener 1810 * queue. Any queued T_conn_ind have already got their options removed 1811 * by so_sock2_stream() when "sockmod" was I_POP'd. 1812 */ 1813 /* 1814 * Fill in the {O_}T_CONN_RES before getting SOLOCKED. 1815 */ 1816 if ((nso->so_mode & SM_ACCEPTOR_ID) == 0) { 1817 #ifdef _ILP32 1818 queue_t *q; 1819 1820 /* 1821 * Find read queue in driver 1822 * Can safely do this since we "own" nso/nvp. 1823 */ 1824 q = strvp2wq(nvp)->q_next; 1825 while (SAMESTR(q)) 1826 q = q->q_next; 1827 q = RD(q); 1828 conn_res->ACCEPTOR_id = (t_uscalar_t)q; 1829 #else 1830 conn_res->ACCEPTOR_id = (t_uscalar_t)getminor(nvp->v_rdev); 1831 #endif /* _ILP32 */ 1832 conn_res->PRIM_type = O_T_CONN_RES; 1833 PRIM_type = O_T_CONN_RES; 1834 } else { 1835 conn_res->ACCEPTOR_id = nso->so_acceptor_id; 1836 conn_res->PRIM_type = T_CONN_RES; 1837 PRIM_type = T_CONN_RES; 1838 } 1839 conn_res->SEQ_number = SEQ_number; 1840 conn_res->OPT_length = 0; 1841 conn_res->OPT_offset = 0; 1842 1843 mutex_enter(&so->so_lock); 1844 so_lock_single(so); /* Set SOLOCKED */ 1845 mutex_exit(&so->so_lock); 1846 1847 error = kstrputmsg(SOTOV(so), mp, NULL, 1848 0, 0, MSG_BAND|MSG_HOLDSIG|MSG_IGNERROR, 0); 1849 mutex_enter(&so->so_lock); 1850 if (error) { 1851 eprintsoline(so, error); 1852 goto disconnect_vp; 1853 } 1854 error = sowaitokack(so, PRIM_type); 1855 if (error) { 1856 eprintsoline(so, error); 1857 goto disconnect_vp; 1858 } 1859 so_unlock_single(so, SOLOCKED); 1860 mutex_exit(&so->so_lock); 1861 1862 nso->so_state |= SS_ISCONNECTED; 1863 1864 /* 1865 * Pass out new socket. 1866 */ 1867 if (nsop != NULL) 1868 *nsop = nso; 1869 1870 return (0); 1871 1872 1873 eproto_disc_unl: 1874 error = EPROTO; 1875 e_disc_unl: 1876 eprintsoline(so, error); 1877 goto disconnect_unlocked; 1878 1879 pr_disc_vp_unl: 1880 eprintsoline(so, error); 1881 disconnect_vp_unlocked: 1882 (void) VOP_CLOSE(nvp, 0, 1, 0, CRED()); 1883 VN_RELE(nvp); 1884 disconnect_unlocked: 1885 (void) sodisconnect(so, SEQ_number, 0); 1886 return (error); 1887 1888 pr_disc_vp: 1889 eprintsoline(so, error); 1890 disconnect_vp: 1891 (void) sodisconnect(so, SEQ_number, _SODISCONNECT_LOCK_HELD); 1892 so_unlock_single(so, SOLOCKED); 1893 mutex_exit(&so->so_lock); 1894 (void) VOP_CLOSE(nvp, 0, 1, 0, CRED()); 1895 VN_RELE(nvp); 1896 return (error); 1897 1898 conn_bad: /* Note: SunOS 4/BSD unconditionally returns EINVAL here */ 1899 error = (so->so_type == SOCK_DGRAM || so->so_type == SOCK_RAW) 1900 ? EOPNOTSUPP : EINVAL; 1901 e_bad: 1902 eprintsoline(so, error); 1903 return (error); 1904 } 1905 1906 /* 1907 * connect a socket. 1908 * 1909 * Allow SOCK_DGRAM sockets to reconnect (by specifying a new address) and to 1910 * unconnect (by specifying a null address). 1911 */ 1912 int 1913 sotpi_connect(struct sonode *so, 1914 const struct sockaddr *name, 1915 socklen_t namelen, 1916 int fflag, 1917 int flags) 1918 { 1919 struct T_conn_req conn_req; 1920 int error = 0; 1921 mblk_t *mp; 1922 void *src; 1923 socklen_t srclen; 1924 void *addr; 1925 socklen_t addrlen; 1926 boolean_t need_unlock; 1927 1928 dprintso(so, 1, ("sotpi_connect(%p, %p, %d, 0x%x, 0x%x) %s\n", 1929 so, name, namelen, fflag, flags, 1930 pr_state(so->so_state, so->so_mode))); 1931 1932 /* 1933 * Preallocate the T_CONN_REQ mblk before grabbing SOLOCKED to 1934 * avoid sleeping for memory with SOLOCKED held. 1935 * We know that the T_CONN_REQ can't be larger than 2 * so_faddr_maxlen 1936 * + sizeof (struct T_opthdr). 1937 * (the AF_UNIX so_ux_addr_xlate() does not make the address 1938 * exceed so_faddr_maxlen). 1939 */ 1940 mp = soallocproto(sizeof (struct T_conn_req) + 1941 2 * so->so_faddr_maxlen + sizeof (struct T_opthdr), _ALLOC_INTR); 1942 if (mp == NULL) { 1943 /* 1944 * Connect can not fail with ENOBUFS. A signal was 1945 * caught so return EINTR. 1946 */ 1947 error = EINTR; 1948 eprintsoline(so, error); 1949 return (error); 1950 } 1951 1952 mutex_enter(&so->so_lock); 1953 /* 1954 * Make sure that there is a preallocated unbind_req 1955 * message before any binding. This message allocated when 1956 * the socket is created but it might be have been 1957 * consumed. 1958 */ 1959 if (so->so_unbind_mp == NULL) { 1960 dprintso(so, 1, ("sotpi_connect: allocating unbind_req\n")); 1961 /* NOTE: holding so_lock while sleeping */ 1962 so->so_unbind_mp = 1963 soallocproto(sizeof (struct T_unbind_req), _ALLOC_INTR); 1964 if (so->so_unbind_mp == NULL) { 1965 error = EINTR; 1966 need_unlock = B_FALSE; 1967 goto done; 1968 } 1969 } 1970 1971 so_lock_single(so); /* Set SOLOCKED */ 1972 need_unlock = B_TRUE; 1973 1974 /* 1975 * Can't have done a listen before connecting. 1976 */ 1977 if (so->so_state & SS_ACCEPTCONN) { 1978 error = EOPNOTSUPP; 1979 goto done; 1980 } 1981 1982 /* 1983 * Must be bound with the transport 1984 */ 1985 if (!(so->so_state & SS_ISBOUND)) { 1986 if ((so->so_family == AF_INET || so->so_family == AF_INET6) && 1987 /*CONSTCOND*/ 1988 so->so_type == SOCK_STREAM && !soconnect_tpi_tcp) { 1989 /* 1990 * Optimization for AF_INET{,6} transports 1991 * that can handle a T_CONN_REQ without being bound. 1992 */ 1993 so_automatic_bind(so); 1994 } else { 1995 error = sotpi_bind(so, NULL, 0, 1996 _SOBIND_UNSPEC|_SOBIND_LOCK_HELD); 1997 if (error) 1998 goto done; 1999 } 2000 ASSERT(so->so_state & SS_ISBOUND); 2001 flags |= _SOCONNECT_DID_BIND; 2002 } 2003 2004 /* 2005 * Handle a connect to a name parameter of type AF_UNSPEC like a 2006 * connect to a null address. This is the portable method to 2007 * unconnect a socket. 2008 */ 2009 if ((namelen >= sizeof (sa_family_t)) && 2010 (name->sa_family == AF_UNSPEC)) { 2011 name = NULL; 2012 namelen = 0; 2013 } 2014 2015 /* 2016 * Check that we are not already connected. 2017 * A connection-oriented socket cannot be reconnected. 2018 * A connected connection-less socket can be 2019 * - connected to a different address by a subsequent connect 2020 * - "unconnected" by a connect to the NULL address 2021 */ 2022 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) { 2023 ASSERT(!(flags & _SOCONNECT_DID_BIND)); 2024 if (so->so_mode & SM_CONNREQUIRED) { 2025 /* Connection-oriented socket */ 2026 error = so->so_state & SS_ISCONNECTED ? 2027 EISCONN : EALREADY; 2028 goto done; 2029 } 2030 /* Connection-less socket */ 2031 if (name == NULL) { 2032 /* 2033 * Remove the connected state and clear SO_DGRAM_ERRIND 2034 * since it was set when the socket was connected. 2035 * If this is UDP also send down a T_DISCON_REQ. 2036 */ 2037 int val; 2038 2039 if ((so->so_family == AF_INET || 2040 so->so_family == AF_INET6) && 2041 (so->so_type == SOCK_DGRAM || 2042 so->so_type == SOCK_RAW) && 2043 /*CONSTCOND*/ 2044 !soconnect_tpi_udp) { 2045 /* XXX What about implicitly unbinding here? */ 2046 error = sodisconnect(so, -1, 2047 _SODISCONNECT_LOCK_HELD); 2048 } else { 2049 so->so_state &= 2050 ~(SS_ISCONNECTED | SS_ISCONNECTING | 2051 SS_FADDR_VALID); 2052 so->so_faddr_len = 0; 2053 } 2054 2055 so_unlock_single(so, SOLOCKED); 2056 mutex_exit(&so->so_lock); 2057 2058 val = 0; 2059 (void) sotpi_setsockopt(so, SOL_SOCKET, SO_DGRAM_ERRIND, 2060 &val, (t_uscalar_t)sizeof (val)); 2061 2062 mutex_enter(&so->so_lock); 2063 so_lock_single(so); /* Set SOLOCKED */ 2064 goto done; 2065 } 2066 } 2067 ASSERT(so->so_state & SS_ISBOUND); 2068 2069 if (name == NULL || namelen == 0) { 2070 error = EINVAL; 2071 goto done; 2072 } 2073 /* 2074 * Mark the socket if so_faddr_sa represents the transport level 2075 * address. 2076 */ 2077 if (flags & _SOCONNECT_NOXLATE) { 2078 struct sockaddr_ux *soaddr_ux; 2079 2080 ASSERT(so->so_family == AF_UNIX); 2081 if (namelen != sizeof (struct sockaddr_ux)) { 2082 error = EINVAL; 2083 goto done; 2084 } 2085 soaddr_ux = (struct sockaddr_ux *)name; 2086 name = (struct sockaddr *)&soaddr_ux->sou_addr; 2087 namelen = sizeof (soaddr_ux->sou_addr); 2088 so->so_state |= SS_FADDR_NOXLATE; 2089 } 2090 2091 /* 2092 * Length and family checks. 2093 */ 2094 error = so_addr_verify(so, name, namelen); 2095 if (error) 2096 goto bad; 2097 2098 /* 2099 * Save foreign address. Needed for AF_UNIX as well as 2100 * transport providers that do not support TI_GETPEERNAME. 2101 * Also used for cached foreign address for TCP and UDP. 2102 */ 2103 if (namelen > (t_uscalar_t)so->so_faddr_maxlen) { 2104 error = EINVAL; 2105 goto done; 2106 } 2107 so->so_faddr_len = (socklen_t)namelen; 2108 ASSERT(so->so_faddr_len <= so->so_faddr_maxlen); 2109 bcopy(name, so->so_faddr_sa, namelen); 2110 so->so_state |= SS_FADDR_VALID; 2111 2112 if (so->so_family == AF_UNIX) { 2113 if (so->so_state & SS_FADDR_NOXLATE) { 2114 /* 2115 * Already have a transport internal address. Do not 2116 * pass any (transport internal) source address. 2117 */ 2118 addr = so->so_faddr_sa; 2119 addrlen = (t_uscalar_t)so->so_faddr_len; 2120 src = NULL; 2121 srclen = 0; 2122 } else { 2123 /* 2124 * Pass the sockaddr_un source address as an option 2125 * and translate the remote address. 2126 * Holding so_lock thus so_laddr_sa can not change. 2127 */ 2128 src = so->so_laddr_sa; 2129 srclen = (t_uscalar_t)so->so_laddr_len; 2130 dprintso(so, 1, 2131 ("sotpi_connect UNIX: srclen %d, src %p\n", 2132 srclen, src)); 2133 error = so_ux_addr_xlate(so, 2134 so->so_faddr_sa, (socklen_t)so->so_faddr_len, 2135 (flags & _SOCONNECT_XPG4_2), 2136 &addr, &addrlen); 2137 if (error) 2138 goto bad; 2139 } 2140 } else { 2141 addr = so->so_faddr_sa; 2142 addrlen = (t_uscalar_t)so->so_faddr_len; 2143 src = NULL; 2144 srclen = 0; 2145 } 2146 /* 2147 * When connecting a datagram socket we issue the SO_DGRAM_ERRIND 2148 * option which asks the transport provider to send T_UDERR_IND 2149 * messages. These T_UDERR_IND messages are used to return connected 2150 * style errors (e.g. ECONNRESET) for connected datagram sockets. 2151 * 2152 * In addition, for UDP (and SOCK_RAW AF_INET{,6} sockets) 2153 * we send down a T_CONN_REQ. This is needed to let the 2154 * transport assign a local address that is consistent with 2155 * the remote address. Applications depend on a getsockname() 2156 * after a connect() to retrieve the "source" IP address for 2157 * the connected socket. Invalidate the cached local address 2158 * to force getsockname() to enquire of the transport. 2159 */ 2160 if (!(so->so_mode & SM_CONNREQUIRED)) { 2161 /* 2162 * Datagram socket. 2163 */ 2164 int32_t val; 2165 2166 so_unlock_single(so, SOLOCKED); 2167 mutex_exit(&so->so_lock); 2168 2169 val = 1; 2170 (void) sotpi_setsockopt(so, SOL_SOCKET, SO_DGRAM_ERRIND, 2171 &val, (t_uscalar_t)sizeof (val)); 2172 2173 mutex_enter(&so->so_lock); 2174 so_lock_single(so); /* Set SOLOCKED */ 2175 if ((so->so_family != AF_INET && so->so_family != AF_INET6) || 2176 (so->so_type != SOCK_DGRAM && so->so_type != SOCK_RAW) || 2177 soconnect_tpi_udp) { 2178 soisconnected(so); 2179 goto done; 2180 } 2181 /* 2182 * Send down T_CONN_REQ etc. 2183 * Clear fflag to avoid returning EWOULDBLOCK. 2184 */ 2185 fflag = 0; 2186 ASSERT(so->so_family != AF_UNIX); 2187 so->so_state &= ~SS_LADDR_VALID; 2188 } else if (so->so_laddr_len != 0) { 2189 /* 2190 * If the local address or port was "any" then it may be 2191 * changed by the transport as a result of the 2192 * connect. Invalidate the cached version if we have one. 2193 */ 2194 switch (so->so_family) { 2195 case AF_INET: 2196 ASSERT(so->so_laddr_len == (socklen_t)sizeof (sin_t)); 2197 if (((sin_t *)so->so_laddr_sa)->sin_addr.s_addr == 2198 INADDR_ANY || 2199 ((sin_t *)so->so_laddr_sa)->sin_port == 0) 2200 so->so_state &= ~SS_LADDR_VALID; 2201 break; 2202 2203 case AF_INET6: 2204 ASSERT(so->so_laddr_len == (socklen_t)sizeof (sin6_t)); 2205 if (IN6_IS_ADDR_UNSPECIFIED( 2206 &((sin6_t *)so->so_laddr_sa) ->sin6_addr) || 2207 IN6_IS_ADDR_V4MAPPED_ANY( 2208 &((sin6_t *)so->so_laddr_sa)->sin6_addr) || 2209 ((sin6_t *)so->so_laddr_sa)->sin6_port == 0) 2210 so->so_state &= ~SS_LADDR_VALID; 2211 break; 2212 2213 default: 2214 break; 2215 } 2216 } 2217 2218 /* 2219 * Check for failure of an earlier call 2220 */ 2221 if (so->so_error != 0) 2222 goto so_bad; 2223 2224 /* 2225 * Send down T_CONN_REQ. Message was allocated above. 2226 */ 2227 conn_req.PRIM_type = T_CONN_REQ; 2228 conn_req.DEST_length = addrlen; 2229 conn_req.DEST_offset = (t_scalar_t)sizeof (conn_req); 2230 if (srclen == 0) { 2231 conn_req.OPT_length = 0; 2232 conn_req.OPT_offset = 0; 2233 soappendmsg(mp, &conn_req, sizeof (conn_req)); 2234 soappendmsg(mp, addr, addrlen); 2235 } else { 2236 /* 2237 * There is a AF_UNIX sockaddr_un to include as a source 2238 * address option. 2239 */ 2240 struct T_opthdr toh; 2241 2242 toh.level = SOL_SOCKET; 2243 toh.name = SO_SRCADDR; 2244 toh.len = (t_uscalar_t)(srclen + sizeof (struct T_opthdr)); 2245 toh.status = 0; 2246 conn_req.OPT_length = 2247 (t_scalar_t)(sizeof (toh) + _TPI_ALIGN_TOPT(srclen)); 2248 conn_req.OPT_offset = (t_scalar_t)(sizeof (conn_req) + 2249 _TPI_ALIGN_TOPT(addrlen)); 2250 2251 soappendmsg(mp, &conn_req, sizeof (conn_req)); 2252 soappendmsg(mp, addr, addrlen); 2253 mp->b_wptr += _TPI_ALIGN_TOPT(addrlen) - addrlen; 2254 soappendmsg(mp, &toh, sizeof (toh)); 2255 soappendmsg(mp, src, srclen); 2256 mp->b_wptr += _TPI_ALIGN_TOPT(srclen) - srclen; 2257 ASSERT(mp->b_wptr <= mp->b_datap->db_lim); 2258 } 2259 /* 2260 * Set SS_ISCONNECTING before sending down the T_CONN_REQ 2261 * in order to have the right state when the T_CONN_CON shows up. 2262 */ 2263 soisconnecting(so); 2264 mutex_exit(&so->so_lock); 2265 2266 #ifdef C2_AUDIT 2267 if (audit_active) 2268 audit_sock(T_CONN_REQ, strvp2wq(SOTOV(so)), mp, 0); 2269 #endif /* C2_AUDIT */ 2270 2271 error = kstrputmsg(SOTOV(so), mp, NULL, 0, 0, 2272 MSG_BAND|MSG_HOLDSIG|MSG_IGNERROR, 0); 2273 mp = NULL; 2274 mutex_enter(&so->so_lock); 2275 if (error != 0) 2276 goto bad; 2277 2278 if ((error = sowaitokack(so, T_CONN_REQ)) != 0) 2279 goto bad; 2280 2281 /* Allow other threads to access the socket */ 2282 so_unlock_single(so, SOLOCKED); 2283 need_unlock = B_FALSE; 2284 2285 /* 2286 * Wait until we get a T_CONN_CON or an error 2287 */ 2288 if ((error = sowaitconnected(so, fflag, 0)) != 0) { 2289 so_lock_single(so); /* Set SOLOCKED */ 2290 need_unlock = B_TRUE; 2291 } 2292 2293 done: 2294 freemsg(mp); 2295 switch (error) { 2296 case EINPROGRESS: 2297 case EALREADY: 2298 case EISCONN: 2299 case EINTR: 2300 /* Non-fatal errors */ 2301 so->so_state &= ~SS_LADDR_VALID; 2302 /* FALLTHRU */ 2303 case 0: 2304 break; 2305 2306 case EHOSTUNREACH: 2307 if (flags & _SOCONNECT_XPG4_2) { 2308 /* 2309 * X/Open specification contains a requirement that 2310 * ENETUNREACH be returned but does not require 2311 * EHOSTUNREACH. In order to keep the test suite 2312 * happy we mess with the errno here. 2313 */ 2314 error = ENETUNREACH; 2315 } 2316 /* FALLTHRU */ 2317 2318 default: 2319 ASSERT(need_unlock); 2320 /* 2321 * Fatal errors: clear SS_ISCONNECTING in case it was set, 2322 * and invalidate local-address cache 2323 */ 2324 so->so_state &= ~(SS_ISCONNECTING | SS_LADDR_VALID); 2325 /* A discon_ind might have already unbound us */ 2326 if ((flags & _SOCONNECT_DID_BIND) && 2327 (so->so_state & SS_ISBOUND)) { 2328 int err; 2329 2330 err = sotpi_unbind(so, 0); 2331 /* LINTED - statement has no conseq */ 2332 if (err) { 2333 eprintsoline(so, err); 2334 } 2335 } 2336 break; 2337 } 2338 if (need_unlock) 2339 so_unlock_single(so, SOLOCKED); 2340 mutex_exit(&so->so_lock); 2341 return (error); 2342 2343 so_bad: error = sogeterr(so); 2344 bad: eprintsoline(so, error); 2345 goto done; 2346 } 2347 2348 int 2349 sotpi_shutdown(struct sonode *so, int how) 2350 { 2351 struct T_ordrel_req ordrel_req; 2352 mblk_t *mp; 2353 uint_t old_state, state_change; 2354 int error = 0; 2355 2356 dprintso(so, 1, ("sotpi_shutdown(%p, %d) %s\n", 2357 so, how, pr_state(so->so_state, so->so_mode))); 2358 2359 mutex_enter(&so->so_lock); 2360 so_lock_single(so); /* Set SOLOCKED */ 2361 2362 /* 2363 * SunOS 4.X has no check for datagram sockets. 2364 * 5.X checks that it is connected (ENOTCONN) 2365 * X/Open requires that we check the connected state. 2366 */ 2367 if (!(so->so_state & SS_ISCONNECTED)) { 2368 if (!xnet_skip_checks) { 2369 error = ENOTCONN; 2370 if (xnet_check_print) { 2371 printf("sockfs: X/Open shutdown check " 2372 "caused ENOTCONN\n"); 2373 } 2374 } 2375 goto done; 2376 } 2377 /* 2378 * Record the current state and then perform any state changes. 2379 * Then use the difference between the old and new states to 2380 * determine which messages need to be sent. 2381 * This prevents e.g. duplicate T_ORDREL_REQ when there are 2382 * duplicate calls to shutdown(). 2383 */ 2384 old_state = so->so_state; 2385 2386 switch (how) { 2387 case 0: 2388 socantrcvmore(so); 2389 break; 2390 case 1: 2391 socantsendmore(so); 2392 break; 2393 case 2: 2394 socantsendmore(so); 2395 socantrcvmore(so); 2396 break; 2397 default: 2398 error = EINVAL; 2399 goto done; 2400 } 2401 2402 /* 2403 * Assumes that the SS_CANT* flags are never cleared in the above code. 2404 */ 2405 state_change = (so->so_state & (SS_CANTRCVMORE|SS_CANTSENDMORE)) - 2406 (old_state & (SS_CANTRCVMORE|SS_CANTSENDMORE)); 2407 ASSERT((state_change & ~(SS_CANTRCVMORE|SS_CANTSENDMORE)) == 0); 2408 2409 switch (state_change) { 2410 case 0: 2411 dprintso(so, 1, 2412 ("sotpi_shutdown: nothing to send in state 0x%x\n", 2413 so->so_state)); 2414 goto done; 2415 2416 case SS_CANTRCVMORE: 2417 mutex_exit(&so->so_lock); 2418 strseteof(SOTOV(so), 1); 2419 /* 2420 * strseteof takes care of read side wakeups, 2421 * pollwakeups, and signals. 2422 */ 2423 /* 2424 * Get the read lock before flushing data to avoid problems 2425 * with the T_EXDATA_IND MSG_PEEK code in sotpi_recvmsg. 2426 */ 2427 mutex_enter(&so->so_lock); 2428 (void) so_lock_read(so, 0); /* Set SOREADLOCKED */ 2429 mutex_exit(&so->so_lock); 2430 2431 /* Flush read side queue */ 2432 strflushrq(SOTOV(so), FLUSHALL); 2433 2434 mutex_enter(&so->so_lock); 2435 so_unlock_read(so); /* Clear SOREADLOCKED */ 2436 break; 2437 2438 case SS_CANTSENDMORE: 2439 mutex_exit(&so->so_lock); 2440 strsetwerror(SOTOV(so), 0, 0, sogetwrerr); 2441 mutex_enter(&so->so_lock); 2442 break; 2443 2444 case SS_CANTSENDMORE|SS_CANTRCVMORE: 2445 mutex_exit(&so->so_lock); 2446 strsetwerror(SOTOV(so), 0, 0, sogetwrerr); 2447 strseteof(SOTOV(so), 1); 2448 /* 2449 * strseteof takes care of read side wakeups, 2450 * pollwakeups, and signals. 2451 */ 2452 /* 2453 * Get the read lock before flushing data to avoid problems 2454 * with the T_EXDATA_IND MSG_PEEK code in sotpi_recvmsg. 2455 */ 2456 mutex_enter(&so->so_lock); 2457 (void) so_lock_read(so, 0); /* Set SOREADLOCKED */ 2458 mutex_exit(&so->so_lock); 2459 2460 /* Flush read side queue */ 2461 strflushrq(SOTOV(so), FLUSHALL); 2462 2463 mutex_enter(&so->so_lock); 2464 so_unlock_read(so); /* Clear SOREADLOCKED */ 2465 break; 2466 } 2467 2468 ASSERT(MUTEX_HELD(&so->so_lock)); 2469 2470 /* 2471 * If either SS_CANTSENDMORE or SS_CANTRCVMORE or both of them 2472 * was set due to this call and the new state has both of them set: 2473 * Send the AF_UNIX close indication 2474 * For T_COTS send a discon_ind 2475 * 2476 * If cantsend was set due to this call: 2477 * For T_COTSORD send an ordrel_ind 2478 * 2479 * Note that for T_CLTS there is no message sent here. 2480 */ 2481 if ((so->so_state & (SS_CANTRCVMORE|SS_CANTSENDMORE)) == 2482 (SS_CANTRCVMORE|SS_CANTSENDMORE)) { 2483 /* 2484 * For SunOS 4.X compatibility we tell the other end 2485 * that we are unable to receive at this point. 2486 */ 2487 if (so->so_family == AF_UNIX && so->so_serv_type != T_CLTS) 2488 so_unix_close(so); 2489 2490 if (so->so_serv_type == T_COTS) 2491 error = sodisconnect(so, -1, _SODISCONNECT_LOCK_HELD); 2492 } 2493 if ((state_change & SS_CANTSENDMORE) && 2494 (so->so_serv_type == T_COTS_ORD)) { 2495 /* Send an orderly release */ 2496 ordrel_req.PRIM_type = T_ORDREL_REQ; 2497 2498 mutex_exit(&so->so_lock); 2499 mp = soallocproto1(&ordrel_req, sizeof (ordrel_req), 2500 0, _ALLOC_SLEEP); 2501 /* 2502 * Send down the T_ORDREL_REQ even if there is flow control. 2503 * This prevents shutdown from blocking. 2504 * Note that there is no T_OK_ACK for ordrel_req. 2505 */ 2506 error = kstrputmsg(SOTOV(so), mp, NULL, 0, 0, 2507 MSG_BAND|MSG_HOLDSIG|MSG_IGNERROR|MSG_IGNFLOW, 0); 2508 mutex_enter(&so->so_lock); 2509 if (error) { 2510 eprintsoline(so, error); 2511 goto done; 2512 } 2513 } 2514 2515 done: 2516 so_unlock_single(so, SOLOCKED); 2517 mutex_exit(&so->so_lock); 2518 return (error); 2519 } 2520 2521 /* 2522 * For any connected SOCK_STREAM/SOCK_SEQPACKET AF_UNIX socket we send 2523 * a zero-length T_OPTDATA_REQ with the SO_UNIX_CLOSE option to inform the peer 2524 * that we have closed. 2525 * Also, for connected AF_UNIX SOCK_DGRAM sockets we send a zero-length 2526 * T_UNITDATA_REQ containing the same option. 2527 * 2528 * For SOCK_DGRAM half-connections (somebody connected to this end 2529 * but this end is not connect) we don't know where to send any 2530 * SO_UNIX_CLOSE. 2531 * 2532 * We have to ignore stream head errors just in case there has been 2533 * a shutdown(output). 2534 * Ignore any flow control to try to get the message more quickly to the peer. 2535 * While locally ignoring flow control solves the problem when there 2536 * is only the loopback transport on the stream it would not provide 2537 * the correct AF_UNIX socket semantics when one or more modules have 2538 * been pushed. 2539 */ 2540 void 2541 so_unix_close(struct sonode *so) 2542 { 2543 int error; 2544 struct T_opthdr toh; 2545 mblk_t *mp; 2546 2547 ASSERT(MUTEX_HELD(&so->so_lock)); 2548 2549 ASSERT(so->so_family == AF_UNIX); 2550 2551 if ((so->so_state & (SS_ISCONNECTED|SS_ISBOUND)) != 2552 (SS_ISCONNECTED|SS_ISBOUND)) 2553 return; 2554 2555 dprintso(so, 1, ("so_unix_close(%p) %s\n", 2556 so, pr_state(so->so_state, so->so_mode))); 2557 2558 toh.level = SOL_SOCKET; 2559 toh.name = SO_UNIX_CLOSE; 2560 2561 /* zero length + header */ 2562 toh.len = (t_uscalar_t)sizeof (struct T_opthdr); 2563 toh.status = 0; 2564 2565 if (so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET) { 2566 struct T_optdata_req tdr; 2567 2568 tdr.PRIM_type = T_OPTDATA_REQ; 2569 tdr.DATA_flag = 0; 2570 2571 tdr.OPT_length = (t_scalar_t)sizeof (toh); 2572 tdr.OPT_offset = (t_scalar_t)sizeof (tdr); 2573 2574 /* NOTE: holding so_lock while sleeping */ 2575 mp = soallocproto2(&tdr, sizeof (tdr), 2576 &toh, sizeof (toh), 0, _ALLOC_SLEEP); 2577 } else { 2578 struct T_unitdata_req tudr; 2579 void *addr; 2580 socklen_t addrlen; 2581 void *src; 2582 socklen_t srclen; 2583 struct T_opthdr toh2; 2584 t_scalar_t size; 2585 2586 /* Connecteded DGRAM socket */ 2587 2588 /* 2589 * For AF_UNIX the destination address is translated to 2590 * an internal name and the source address is passed as 2591 * an option. 2592 */ 2593 /* 2594 * Length and family checks. 2595 */ 2596 error = so_addr_verify(so, so->so_faddr_sa, 2597 (t_uscalar_t)so->so_faddr_len); 2598 if (error) { 2599 eprintsoline(so, error); 2600 return; 2601 } 2602 if (so->so_state & SS_FADDR_NOXLATE) { 2603 /* 2604 * Already have a transport internal address. Do not 2605 * pass any (transport internal) source address. 2606 */ 2607 addr = so->so_faddr_sa; 2608 addrlen = (t_uscalar_t)so->so_faddr_len; 2609 src = NULL; 2610 srclen = 0; 2611 } else { 2612 /* 2613 * Pass the sockaddr_un source address as an option 2614 * and translate the remote address. 2615 * Holding so_lock thus so_laddr_sa can not change. 2616 */ 2617 src = so->so_laddr_sa; 2618 srclen = (socklen_t)so->so_laddr_len; 2619 dprintso(so, 1, 2620 ("so_ux_close: srclen %d, src %p\n", 2621 srclen, src)); 2622 error = so_ux_addr_xlate(so, 2623 so->so_faddr_sa, 2624 (socklen_t)so->so_faddr_len, 0, 2625 &addr, &addrlen); 2626 if (error) { 2627 eprintsoline(so, error); 2628 return; 2629 } 2630 } 2631 tudr.PRIM_type = T_UNITDATA_REQ; 2632 tudr.DEST_length = addrlen; 2633 tudr.DEST_offset = (t_scalar_t)sizeof (tudr); 2634 if (srclen == 0) { 2635 tudr.OPT_length = (t_scalar_t)sizeof (toh); 2636 tudr.OPT_offset = (t_scalar_t)(sizeof (tudr) + 2637 _TPI_ALIGN_TOPT(addrlen)); 2638 2639 size = tudr.OPT_offset + tudr.OPT_length; 2640 /* NOTE: holding so_lock while sleeping */ 2641 mp = soallocproto2(&tudr, sizeof (tudr), 2642 addr, addrlen, size, _ALLOC_SLEEP); 2643 mp->b_wptr += (_TPI_ALIGN_TOPT(addrlen) - addrlen); 2644 soappendmsg(mp, &toh, sizeof (toh)); 2645 } else { 2646 /* 2647 * There is a AF_UNIX sockaddr_un to include as a 2648 * source address option. 2649 */ 2650 tudr.OPT_length = (t_scalar_t)(2 * sizeof (toh) + 2651 _TPI_ALIGN_TOPT(srclen)); 2652 tudr.OPT_offset = (t_scalar_t)(sizeof (tudr) + 2653 _TPI_ALIGN_TOPT(addrlen)); 2654 2655 toh2.level = SOL_SOCKET; 2656 toh2.name = SO_SRCADDR; 2657 toh2.len = (t_uscalar_t)(srclen + 2658 sizeof (struct T_opthdr)); 2659 toh2.status = 0; 2660 2661 size = tudr.OPT_offset + tudr.OPT_length; 2662 2663 /* NOTE: holding so_lock while sleeping */ 2664 mp = soallocproto2(&tudr, sizeof (tudr), 2665 addr, addrlen, size, _ALLOC_SLEEP); 2666 mp->b_wptr += _TPI_ALIGN_TOPT(addrlen) - addrlen; 2667 soappendmsg(mp, &toh, sizeof (toh)); 2668 soappendmsg(mp, &toh2, sizeof (toh2)); 2669 soappendmsg(mp, src, srclen); 2670 mp->b_wptr += _TPI_ALIGN_TOPT(srclen) - srclen; 2671 } 2672 ASSERT(mp->b_wptr <= mp->b_datap->db_lim); 2673 } 2674 mutex_exit(&so->so_lock); 2675 error = kstrputmsg(SOTOV(so), mp, NULL, 0, 0, 2676 MSG_BAND|MSG_HOLDSIG|MSG_IGNERROR|MSG_IGNFLOW, 0); 2677 mutex_enter(&so->so_lock); 2678 } 2679 2680 /* 2681 * Handle recv* calls that set MSG_OOB or MSG_OOB together with MSG_PEEK. 2682 */ 2683 int 2684 sorecvoob(struct sonode *so, struct nmsghdr *msg, struct uio *uiop, int flags) 2685 { 2686 mblk_t *mp, *nmp; 2687 int error; 2688 2689 dprintso(so, 1, ("sorecvoob(%p, %p, 0x%x)\n", so, msg, flags)); 2690 2691 /* 2692 * There is never any oob data with addresses or control since 2693 * the T_EXDATA_IND does not carry any options. 2694 */ 2695 msg->msg_controllen = 0; 2696 msg->msg_namelen = 0; 2697 2698 mutex_enter(&so->so_lock); 2699 ASSERT(so_verify_oobstate(so)); 2700 if ((so->so_options & SO_OOBINLINE) || 2701 (so->so_state & (SS_OOBPEND|SS_HADOOBDATA)) != SS_OOBPEND) { 2702 dprintso(so, 1, ("sorecvoob: inline or data consumed\n")); 2703 mutex_exit(&so->so_lock); 2704 return (EINVAL); 2705 } 2706 if (!(so->so_state & SS_HAVEOOBDATA)) { 2707 dprintso(so, 1, ("sorecvoob: no data yet\n")); 2708 mutex_exit(&so->so_lock); 2709 return (EWOULDBLOCK); 2710 } 2711 ASSERT(so->so_oobmsg != NULL); 2712 mp = so->so_oobmsg; 2713 if (flags & MSG_PEEK) { 2714 /* 2715 * Since recv* can not return ENOBUFS we can not use dupmsg. 2716 * Instead we revert to the consolidation private 2717 * allocb_wait plus bcopy. 2718 */ 2719 mblk_t *mp1; 2720 2721 mp1 = allocb_wait(msgdsize(mp), BPRI_MED, STR_NOSIG, NULL); 2722 ASSERT(mp1); 2723 2724 while (mp != NULL) { 2725 ssize_t size; 2726 2727 size = MBLKL(mp); 2728 bcopy(mp->b_rptr, mp1->b_wptr, size); 2729 mp1->b_wptr += size; 2730 ASSERT(mp1->b_wptr <= mp1->b_datap->db_lim); 2731 mp = mp->b_cont; 2732 } 2733 mp = mp1; 2734 } else { 2735 /* 2736 * Update the state indicating that the data has been consumed. 2737 * Keep SS_OOBPEND set until data is consumed past the mark. 2738 */ 2739 so->so_oobmsg = NULL; 2740 so->so_state ^= SS_HAVEOOBDATA|SS_HADOOBDATA; 2741 } 2742 dprintso(so, 1, 2743 ("after recvoob(%p): counts %d/%d state %s\n", 2744 so, so->so_oobsigcnt, 2745 so->so_oobcnt, pr_state(so->so_state, so->so_mode))); 2746 ASSERT(so_verify_oobstate(so)); 2747 mutex_exit(&so->so_lock); 2748 2749 error = 0; 2750 nmp = mp; 2751 while (nmp != NULL && uiop->uio_resid > 0) { 2752 ssize_t n = MBLKL(nmp); 2753 2754 n = MIN(n, uiop->uio_resid); 2755 if (n > 0) 2756 error = uiomove(nmp->b_rptr, n, 2757 UIO_READ, uiop); 2758 if (error) 2759 break; 2760 nmp = nmp->b_cont; 2761 } 2762 freemsg(mp); 2763 return (error); 2764 } 2765 2766 /* 2767 * Called by sotpi_recvmsg when reading a non-zero amount of data. 2768 * In addition, the caller typically verifies that there is some 2769 * potential state to clear by checking 2770 * if (so->so_state & (SS_OOBPEND|SS_HAVEOOBDATA|SS_RCVATMARK)) 2771 * before calling this routine. 2772 * Note that such a check can be made without holding so_lock since 2773 * sotpi_recvmsg is single-threaded (using SOREADLOCKED) and only sotpi_recvmsg 2774 * decrements so_oobsigcnt. 2775 * 2776 * When data is read *after* the point that all pending 2777 * oob data has been consumed the oob indication is cleared. 2778 * 2779 * This logic keeps select/poll returning POLLRDBAND and 2780 * SIOCATMARK returning true until we have read past 2781 * the mark. 2782 */ 2783 static void 2784 sorecv_update_oobstate(struct sonode *so) 2785 { 2786 mutex_enter(&so->so_lock); 2787 ASSERT(so_verify_oobstate(so)); 2788 dprintso(so, 1, 2789 ("sorecv_update_oobstate: counts %d/%d state %s\n", 2790 so->so_oobsigcnt, 2791 so->so_oobcnt, pr_state(so->so_state, so->so_mode))); 2792 if (so->so_oobsigcnt == 0) { 2793 /* No more pending oob indications */ 2794 so->so_state &= ~(SS_OOBPEND|SS_HAVEOOBDATA|SS_RCVATMARK); 2795 freemsg(so->so_oobmsg); 2796 so->so_oobmsg = NULL; 2797 } 2798 ASSERT(so_verify_oobstate(so)); 2799 mutex_exit(&so->so_lock); 2800 } 2801 2802 /* 2803 * Handle recv* calls for an so which has NL7C saved recv mblk_t(s). 2804 */ 2805 static int 2806 nl7c_sorecv(struct sonode *so, mblk_t **rmp, uio_t *uiop, rval_t *rp) 2807 { 2808 int error = 0; 2809 mblk_t *tmp = NULL; 2810 mblk_t *pmp = NULL; 2811 mblk_t *nmp = so->so_nl7c_rcv_mp; 2812 2813 ASSERT(nmp != NULL); 2814 2815 while (nmp != NULL && uiop->uio_resid > 0) { 2816 ssize_t n; 2817 2818 if (DB_TYPE(nmp) == M_DATA) { 2819 /* 2820 * We have some data, uiomove up to resid bytes. 2821 */ 2822 n = MIN(MBLKL(nmp), uiop->uio_resid); 2823 if (n > 0) 2824 error = uiomove(nmp->b_rptr, n, UIO_READ, uiop); 2825 nmp->b_rptr += n; 2826 if (nmp->b_rptr == nmp->b_wptr) { 2827 pmp = nmp; 2828 nmp = nmp->b_cont; 2829 } 2830 if (error) 2831 break; 2832 } else { 2833 /* 2834 * We only handle data, save for caller to handle. 2835 */ 2836 if (pmp != NULL) { 2837 pmp->b_cont = nmp->b_cont; 2838 } 2839 nmp->b_cont = NULL; 2840 if (*rmp == NULL) { 2841 *rmp = nmp; 2842 } else { 2843 tmp->b_cont = nmp; 2844 } 2845 nmp = nmp->b_cont; 2846 tmp = nmp; 2847 } 2848 } 2849 if (pmp != NULL) { 2850 /* Free any mblk_t(s) which we have consumed */ 2851 pmp->b_cont = NULL; 2852 freemsg(so->so_nl7c_rcv_mp); 2853 } 2854 if ((so->so_nl7c_rcv_mp = nmp) == NULL) { 2855 /* Last mblk_t so return the saved kstrgetmsg() rval/error */ 2856 if (error == 0) { 2857 rval_t *p = (rval_t *)&so->so_nl7c_rcv_rval; 2858 2859 error = p->r_v.r_v2; 2860 p->r_v.r_v2 = 0; 2861 } 2862 rp->r_vals = so->so_nl7c_rcv_rval; 2863 so->so_nl7c_rcv_rval = 0; 2864 } else { 2865 /* More mblk_t(s) to process so no rval to return */ 2866 rp->r_vals = 0; 2867 } 2868 return (error); 2869 } 2870 2871 /* 2872 * Receive the next message on the queue. 2873 * If msg_controllen is non-zero when called the caller is interested in 2874 * any received control info (options). 2875 * If msg_namelen is non-zero when called the caller is interested in 2876 * any received source address. 2877 * The routine returns with msg_control and msg_name pointing to 2878 * kmem_alloc'ed memory which the caller has to free. 2879 */ 2880 int 2881 sotpi_recvmsg(struct sonode *so, struct nmsghdr *msg, struct uio *uiop) 2882 { 2883 union T_primitives *tpr; 2884 mblk_t *mp; 2885 uchar_t pri; 2886 int pflag, opflag; 2887 void *control; 2888 t_uscalar_t controllen; 2889 t_uscalar_t namelen; 2890 int so_state = so->so_state; /* Snapshot */ 2891 ssize_t saved_resid; 2892 int error; 2893 rval_t rval; 2894 int flags; 2895 clock_t timout; 2896 int first; 2897 2898 flags = msg->msg_flags; 2899 msg->msg_flags = 0; 2900 2901 dprintso(so, 1, ("sotpi_recvmsg(%p, %p, 0x%x) state %s err %d\n", 2902 so, msg, flags, 2903 pr_state(so->so_state, so->so_mode), so->so_error)); 2904 2905 /* 2906 * If we are not connected because we have never been connected 2907 * we return ENOTCONN. If we have been connected (but are no longer 2908 * connected) then SS_CANTRCVMORE is set and we let kstrgetmsg return 2909 * the EOF. 2910 * 2911 * An alternative would be to post an ENOTCONN error in stream head 2912 * (read+write) and clear it when we're connected. However, that error 2913 * would cause incorrect poll/select behavior! 2914 */ 2915 if ((so_state & (SS_ISCONNECTED|SS_CANTRCVMORE)) == 0 && 2916 (so->so_mode & SM_CONNREQUIRED)) { 2917 return (ENOTCONN); 2918 } 2919 2920 /* 2921 * Note: SunOS 4.X checks uio_resid == 0 before going to sleep (but 2922 * after checking that the read queue is empty) and returns zero. 2923 * This implementation will sleep (in kstrgetmsg) even if uio_resid 2924 * is zero. 2925 */ 2926 2927 if (flags & MSG_OOB) { 2928 /* Check that the transport supports OOB */ 2929 if (!(so->so_mode & SM_EXDATA)) 2930 return (EOPNOTSUPP); 2931 return (sorecvoob(so, msg, uiop, flags)); 2932 } 2933 2934 /* 2935 * Set msg_controllen and msg_namelen to zero here to make it 2936 * simpler in the cases that no control or name is returned. 2937 */ 2938 controllen = msg->msg_controllen; 2939 namelen = msg->msg_namelen; 2940 msg->msg_controllen = 0; 2941 msg->msg_namelen = 0; 2942 2943 dprintso(so, 1, ("sotpi_recvmsg: namelen %d controllen %d\n", 2944 namelen, controllen)); 2945 2946 mutex_enter(&so->so_lock); 2947 /* 2948 * If an NL7C enabled socket and not waiting for write data. 2949 */ 2950 if ((so->so_nl7c_flags & (NL7C_ENABLED | NL7C_WAITWRITE)) == 2951 NL7C_ENABLED) { 2952 if (so->so_nl7c_uri) { 2953 /* Close uri processing for a previous request */ 2954 nl7c_close(so); 2955 } 2956 if ((so_state & SS_CANTRCVMORE) && so->so_nl7c_rcv_mp == NULL) { 2957 /* Nothing to process, EOF */ 2958 mutex_exit(&so->so_lock); 2959 return (0); 2960 } else if (so->so_nl7c_flags & NL7C_SOPERSIST) { 2961 /* Persistent NL7C socket, try to process request */ 2962 boolean_t ret; 2963 2964 ret = nl7c_process(so, 2965 (so->so_state & (SS_NONBLOCK|SS_NDELAY))); 2966 rval.r_vals = so->so_nl7c_rcv_rval; 2967 error = rval.r_v.r_v2; 2968 if (error) { 2969 /* Error of some sort, return it */ 2970 mutex_exit(&so->so_lock); 2971 return (error); 2972 } 2973 if (so->so_nl7c_flags && 2974 ! (so->so_nl7c_flags & NL7C_WAITWRITE)) { 2975 /* 2976 * Still an NL7C socket and no data 2977 * to pass up to the caller. 2978 */ 2979 mutex_exit(&so->so_lock); 2980 if (ret) { 2981 /* EOF */ 2982 return (0); 2983 } else { 2984 /* Need more data */ 2985 return (EAGAIN); 2986 } 2987 } 2988 } else { 2989 /* 2990 * Not persistent so no further NL7C processing. 2991 */ 2992 so->so_nl7c_flags = 0; 2993 } 2994 } 2995 /* 2996 * Only one reader is allowed at any given time. This is needed 2997 * for T_EXDATA handling and, in the future, MSG_WAITALL. 2998 * 2999 * This is slightly different that BSD behavior in that it fails with 3000 * EWOULDBLOCK when using nonblocking io. In BSD the read queue access 3001 * is single-threaded using sblock(), which is dropped while waiting 3002 * for data to appear. The difference shows up e.g. if one 3003 * file descriptor does not have O_NONBLOCK but a dup'ed file descriptor 3004 * does use nonblocking io and different threads are reading each 3005 * file descriptor. In BSD there would never be an EWOULDBLOCK error 3006 * in this case as long as the read queue doesn't get empty. 3007 * In this implementation the thread using nonblocking io can 3008 * get an EWOULDBLOCK error due to the blocking thread executing 3009 * e.g. in the uiomove in kstrgetmsg. 3010 * This difference is not believed to be significant. 3011 */ 3012 error = so_lock_read_intr(so, uiop->uio_fmode); /* Set SOREADLOCKED */ 3013 mutex_exit(&so->so_lock); 3014 if (error) 3015 return (error); 3016 3017 /* 3018 * Tell kstrgetmsg to not inspect the stream head errors until all 3019 * queued data has been consumed. 3020 * Use a timeout=-1 to wait forever unless MSG_DONTWAIT is set. 3021 * Also, If uio_fmode indicates nonblocking kstrgetmsg will not block. 3022 * 3023 * MSG_WAITALL only applies to M_DATA and T_DATA_IND messages and 3024 * to T_OPTDATA_IND that do not contain any user-visible control msg. 3025 * Note that MSG_WAITALL set with MSG_PEEK is a noop. 3026 */ 3027 pflag = MSG_ANY | MSG_DELAYERROR; 3028 if (flags & MSG_PEEK) { 3029 pflag |= MSG_IPEEK; 3030 flags &= ~MSG_WAITALL; 3031 } 3032 if (so->so_mode & SM_ATOMIC) 3033 pflag |= MSG_DISCARDTAIL; 3034 3035 if (flags & MSG_DONTWAIT) 3036 timout = 0; 3037 else 3038 timout = -1; 3039 opflag = pflag; 3040 first = 1; 3041 3042 retry: 3043 saved_resid = uiop->uio_resid; 3044 pri = 0; 3045 mp = NULL; 3046 if (so->so_nl7c_rcv_mp != NULL) { 3047 /* Already kstrgetmsg()ed saved mblk(s) from NL7C */ 3048 error = nl7c_sorecv(so, &mp, uiop, &rval); 3049 } else { 3050 error = kstrgetmsg(SOTOV(so), &mp, uiop, &pri, &pflag, 3051 timout, &rval); 3052 } 3053 if (error) { 3054 switch (error) { 3055 case EINTR: 3056 case EWOULDBLOCK: 3057 if (!first) 3058 error = 0; 3059 break; 3060 case ETIME: 3061 /* Returned from kstrgetmsg when timeout expires */ 3062 if (!first) 3063 error = 0; 3064 else 3065 error = EWOULDBLOCK; 3066 break; 3067 default: 3068 eprintsoline(so, error); 3069 break; 3070 } 3071 mutex_enter(&so->so_lock); 3072 so_unlock_read(so); /* Clear SOREADLOCKED */ 3073 mutex_exit(&so->so_lock); 3074 return (error); 3075 } 3076 /* 3077 * For datagrams the MOREDATA flag is used to set MSG_TRUNC. 3078 * For non-datagrams MOREDATA is used to set MSG_EOR. 3079 */ 3080 ASSERT(!(rval.r_val1 & MORECTL)); 3081 if ((rval.r_val1 & MOREDATA) && (so->so_mode & SM_ATOMIC)) 3082 msg->msg_flags |= MSG_TRUNC; 3083 3084 if (mp == NULL) { 3085 dprintso(so, 1, ("sotpi_recvmsg: got M_DATA\n")); 3086 /* 3087 * 4.3BSD and 4.4BSD clears the mark when peeking across it. 3088 * The draft Posix socket spec states that the mark should 3089 * not be cleared when peeking. We follow the latter. 3090 */ 3091 if ((so->so_state & 3092 (SS_OOBPEND|SS_HAVEOOBDATA|SS_RCVATMARK)) && 3093 (uiop->uio_resid != saved_resid) && 3094 !(flags & MSG_PEEK)) { 3095 sorecv_update_oobstate(so); 3096 } 3097 3098 mutex_enter(&so->so_lock); 3099 /* Set MSG_EOR based on MOREDATA */ 3100 if (!(rval.r_val1 & MOREDATA)) { 3101 if (so->so_state & SS_SAVEDEOR) { 3102 msg->msg_flags |= MSG_EOR; 3103 so->so_state &= ~SS_SAVEDEOR; 3104 } 3105 } 3106 /* 3107 * If some data was received (i.e. not EOF) and the 3108 * read/recv* has not been satisfied wait for some more. 3109 */ 3110 if ((flags & MSG_WAITALL) && !(msg->msg_flags & MSG_EOR) && 3111 uiop->uio_resid != saved_resid && uiop->uio_resid > 0) { 3112 mutex_exit(&so->so_lock); 3113 first = 0; 3114 pflag = opflag | MSG_NOMARK; 3115 goto retry; 3116 } 3117 so_unlock_read(so); /* Clear SOREADLOCKED */ 3118 mutex_exit(&so->so_lock); 3119 return (0); 3120 } 3121 3122 /* strsock_proto has already verified length and alignment */ 3123 tpr = (union T_primitives *)mp->b_rptr; 3124 dprintso(so, 1, ("sotpi_recvmsg: type %d\n", tpr->type)); 3125 3126 switch (tpr->type) { 3127 case T_DATA_IND: { 3128 if ((so->so_state & 3129 (SS_OOBPEND|SS_HAVEOOBDATA|SS_RCVATMARK)) && 3130 (uiop->uio_resid != saved_resid) && 3131 !(flags & MSG_PEEK)) { 3132 sorecv_update_oobstate(so); 3133 } 3134 3135 /* 3136 * Set msg_flags to MSG_EOR based on 3137 * MORE_flag and MOREDATA. 3138 */ 3139 mutex_enter(&so->so_lock); 3140 so->so_state &= ~SS_SAVEDEOR; 3141 if (!(tpr->data_ind.MORE_flag & 1)) { 3142 if (!(rval.r_val1 & MOREDATA)) 3143 msg->msg_flags |= MSG_EOR; 3144 else 3145 so->so_state |= SS_SAVEDEOR; 3146 } 3147 freemsg(mp); 3148 /* 3149 * If some data was received (i.e. not EOF) and the 3150 * read/recv* has not been satisfied wait for some more. 3151 */ 3152 if ((flags & MSG_WAITALL) && !(msg->msg_flags & MSG_EOR) && 3153 uiop->uio_resid != saved_resid && uiop->uio_resid > 0) { 3154 mutex_exit(&so->so_lock); 3155 first = 0; 3156 pflag = opflag | MSG_NOMARK; 3157 goto retry; 3158 } 3159 so_unlock_read(so); /* Clear SOREADLOCKED */ 3160 mutex_exit(&so->so_lock); 3161 return (0); 3162 } 3163 case T_UNITDATA_IND: { 3164 void *addr; 3165 t_uscalar_t addrlen; 3166 void *abuf; 3167 t_uscalar_t optlen; 3168 void *opt; 3169 3170 if ((so->so_state & 3171 (SS_OOBPEND|SS_HAVEOOBDATA|SS_RCVATMARK)) && 3172 (uiop->uio_resid != saved_resid) && 3173 !(flags & MSG_PEEK)) { 3174 sorecv_update_oobstate(so); 3175 } 3176 3177 if (namelen != 0) { 3178 /* Caller wants source address */ 3179 addrlen = tpr->unitdata_ind.SRC_length; 3180 addr = sogetoff(mp, 3181 tpr->unitdata_ind.SRC_offset, 3182 addrlen, 1); 3183 if (addr == NULL) { 3184 freemsg(mp); 3185 error = EPROTO; 3186 eprintsoline(so, error); 3187 goto err; 3188 } 3189 if (so->so_family == AF_UNIX) { 3190 /* 3191 * Can not use the transport level address. 3192 * If there is a SO_SRCADDR option carrying 3193 * the socket level address it will be 3194 * extracted below. 3195 */ 3196 addr = NULL; 3197 addrlen = 0; 3198 } 3199 } 3200 optlen = tpr->unitdata_ind.OPT_length; 3201 if (optlen != 0) { 3202 t_uscalar_t ncontrollen; 3203 3204 /* 3205 * Extract any source address option. 3206 * Determine how large cmsg buffer is needed. 3207 */ 3208 opt = sogetoff(mp, 3209 tpr->unitdata_ind.OPT_offset, 3210 optlen, __TPI_ALIGN_SIZE); 3211 3212 if (opt == NULL) { 3213 freemsg(mp); 3214 error = EPROTO; 3215 eprintsoline(so, error); 3216 goto err; 3217 } 3218 if (so->so_family == AF_UNIX) 3219 so_getopt_srcaddr(opt, optlen, &addr, &addrlen); 3220 ncontrollen = so_cmsglen(mp, opt, optlen, 3221 !(flags & MSG_XPG4_2)); 3222 if (controllen != 0) 3223 controllen = ncontrollen; 3224 else if (ncontrollen != 0) 3225 msg->msg_flags |= MSG_CTRUNC; 3226 } else { 3227 controllen = 0; 3228 } 3229 3230 if (namelen != 0) { 3231 /* 3232 * Return address to caller. 3233 * Caller handles truncation if length 3234 * exceeds msg_namelen. 3235 * NOTE: AF_UNIX NUL termination is ensured by 3236 * the sender's copyin_name(). 3237 */ 3238 abuf = kmem_alloc(addrlen, KM_SLEEP); 3239 3240 bcopy(addr, abuf, addrlen); 3241 msg->msg_name = abuf; 3242 msg->msg_namelen = addrlen; 3243 } 3244 3245 if (controllen != 0) { 3246 /* 3247 * Return control msg to caller. 3248 * Caller handles truncation if length 3249 * exceeds msg_controllen. 3250 */ 3251 control = kmem_alloc(controllen, KM_SLEEP); 3252 3253 error = so_opt2cmsg(mp, opt, optlen, 3254 !(flags & MSG_XPG4_2), 3255 control, controllen); 3256 if (error) { 3257 freemsg(mp); 3258 if (msg->msg_namelen != 0) 3259 kmem_free(msg->msg_name, 3260 msg->msg_namelen); 3261 kmem_free(control, controllen); 3262 eprintsoline(so, error); 3263 goto err; 3264 } 3265 msg->msg_control = control; 3266 msg->msg_controllen = controllen; 3267 } 3268 3269 freemsg(mp); 3270 mutex_enter(&so->so_lock); 3271 so_unlock_read(so); /* Clear SOREADLOCKED */ 3272 mutex_exit(&so->so_lock); 3273 return (0); 3274 } 3275 case T_OPTDATA_IND: { 3276 struct T_optdata_req *tdr; 3277 void *opt; 3278 t_uscalar_t optlen; 3279 3280 if ((so->so_state & 3281 (SS_OOBPEND|SS_HAVEOOBDATA|SS_RCVATMARK)) && 3282 (uiop->uio_resid != saved_resid) && 3283 !(flags & MSG_PEEK)) { 3284 sorecv_update_oobstate(so); 3285 } 3286 3287 tdr = (struct T_optdata_req *)mp->b_rptr; 3288 optlen = tdr->OPT_length; 3289 if (optlen != 0) { 3290 t_uscalar_t ncontrollen; 3291 /* 3292 * Determine how large cmsg buffer is needed. 3293 */ 3294 opt = sogetoff(mp, 3295 tpr->optdata_ind.OPT_offset, 3296 optlen, __TPI_ALIGN_SIZE); 3297 3298 if (opt == NULL) { 3299 freemsg(mp); 3300 error = EPROTO; 3301 eprintsoline(so, error); 3302 goto err; 3303 } 3304 3305 ncontrollen = so_cmsglen(mp, opt, optlen, 3306 !(flags & MSG_XPG4_2)); 3307 if (controllen != 0) 3308 controllen = ncontrollen; 3309 else if (ncontrollen != 0) 3310 msg->msg_flags |= MSG_CTRUNC; 3311 } else { 3312 controllen = 0; 3313 } 3314 3315 if (controllen != 0) { 3316 /* 3317 * Return control msg to caller. 3318 * Caller handles truncation if length 3319 * exceeds msg_controllen. 3320 */ 3321 control = kmem_alloc(controllen, KM_SLEEP); 3322 3323 error = so_opt2cmsg(mp, opt, optlen, 3324 !(flags & MSG_XPG4_2), 3325 control, controllen); 3326 if (error) { 3327 freemsg(mp); 3328 kmem_free(control, controllen); 3329 eprintsoline(so, error); 3330 goto err; 3331 } 3332 msg->msg_control = control; 3333 msg->msg_controllen = controllen; 3334 } 3335 3336 /* 3337 * Set msg_flags to MSG_EOR based on 3338 * DATA_flag and MOREDATA. 3339 */ 3340 mutex_enter(&so->so_lock); 3341 so->so_state &= ~SS_SAVEDEOR; 3342 if (!(tpr->data_ind.MORE_flag & 1)) { 3343 if (!(rval.r_val1 & MOREDATA)) 3344 msg->msg_flags |= MSG_EOR; 3345 else 3346 so->so_state |= SS_SAVEDEOR; 3347 } 3348 freemsg(mp); 3349 /* 3350 * If some data was received (i.e. not EOF) and the 3351 * read/recv* has not been satisfied wait for some more. 3352 * Not possible to wait if control info was received. 3353 */ 3354 if ((flags & MSG_WAITALL) && !(msg->msg_flags & MSG_EOR) && 3355 controllen == 0 && 3356 uiop->uio_resid != saved_resid && uiop->uio_resid > 0) { 3357 mutex_exit(&so->so_lock); 3358 first = 0; 3359 pflag = opflag | MSG_NOMARK; 3360 goto retry; 3361 } 3362 so_unlock_read(so); /* Clear SOREADLOCKED */ 3363 mutex_exit(&so->so_lock); 3364 return (0); 3365 } 3366 case T_EXDATA_IND: { 3367 dprintso(so, 1, 3368 ("sotpi_recvmsg: EXDATA_IND counts %d/%d consumed %ld " 3369 "state %s\n", 3370 so->so_oobsigcnt, so->so_oobcnt, 3371 saved_resid - uiop->uio_resid, 3372 pr_state(so->so_state, so->so_mode))); 3373 /* 3374 * kstrgetmsg handles MSGMARK so there is nothing to 3375 * inspect in the T_EXDATA_IND. 3376 * strsock_proto makes the stream head queue the T_EXDATA_IND 3377 * as a separate message with no M_DATA component. Furthermore, 3378 * the stream head does not consolidate M_DATA messages onto 3379 * an MSGMARK'ed message ensuring that the T_EXDATA_IND 3380 * remains a message by itself. This is needed since MSGMARK 3381 * marks both the whole message as well as the last byte 3382 * of the message. 3383 */ 3384 freemsg(mp); 3385 ASSERT(uiop->uio_resid == saved_resid); /* No data */ 3386 if (flags & MSG_PEEK) { 3387 /* 3388 * Even though we are peeking we consume the 3389 * T_EXDATA_IND thereby moving the mark information 3390 * to SS_RCVATMARK. Then the oob code below will 3391 * retry the peeking kstrgetmsg. 3392 * Note that the stream head read queue is 3393 * never flushed without holding SOREADLOCKED 3394 * thus the T_EXDATA_IND can not disappear 3395 * underneath us. 3396 */ 3397 dprintso(so, 1, 3398 ("sotpi_recvmsg: consume EXDATA_IND " 3399 "counts %d/%d state %s\n", 3400 so->so_oobsigcnt, 3401 so->so_oobcnt, 3402 pr_state(so->so_state, so->so_mode))); 3403 3404 pflag = MSG_ANY | MSG_DELAYERROR; 3405 if (so->so_mode & SM_ATOMIC) 3406 pflag |= MSG_DISCARDTAIL; 3407 3408 pri = 0; 3409 mp = NULL; 3410 3411 error = kstrgetmsg(SOTOV(so), &mp, uiop, 3412 &pri, &pflag, (clock_t)-1, &rval); 3413 ASSERT(uiop->uio_resid == saved_resid); 3414 3415 if (error) { 3416 #ifdef SOCK_DEBUG 3417 if (error != EWOULDBLOCK && error != EINTR) { 3418 eprintsoline(so, error); 3419 } 3420 #endif /* SOCK_DEBUG */ 3421 mutex_enter(&so->so_lock); 3422 so_unlock_read(so); /* Clear SOREADLOCKED */ 3423 mutex_exit(&so->so_lock); 3424 return (error); 3425 } 3426 ASSERT(mp); 3427 tpr = (union T_primitives *)mp->b_rptr; 3428 ASSERT(tpr->type == T_EXDATA_IND); 3429 freemsg(mp); 3430 } /* end "if (flags & MSG_PEEK)" */ 3431 3432 /* 3433 * Decrement the number of queued and pending oob. 3434 * 3435 * SS_RCVATMARK is cleared when we read past a mark. 3436 * SS_HAVEOOBDATA is cleared when we've read past the 3437 * last mark. 3438 * SS_OOBPEND is cleared if we've read past the last 3439 * mark and no (new) SIGURG has been posted. 3440 */ 3441 mutex_enter(&so->so_lock); 3442 ASSERT(so_verify_oobstate(so)); 3443 ASSERT(so->so_oobsigcnt >= so->so_oobcnt); 3444 ASSERT(so->so_oobsigcnt > 0); 3445 so->so_oobsigcnt--; 3446 ASSERT(so->so_oobcnt > 0); 3447 so->so_oobcnt--; 3448 /* 3449 * Since the T_EXDATA_IND has been removed from the stream 3450 * head, but we have not read data past the mark, 3451 * sockfs needs to track that the socket is still at the mark. 3452 * 3453 * Since no data was received call kstrgetmsg again to wait 3454 * for data. 3455 */ 3456 so->so_state |= SS_RCVATMARK; 3457 mutex_exit(&so->so_lock); 3458 dprintso(so, 1, 3459 ("sotpi_recvmsg: retry EXDATA_IND counts %d/%d state %s\n", 3460 so->so_oobsigcnt, so->so_oobcnt, 3461 pr_state(so->so_state, so->so_mode))); 3462 pflag = opflag; 3463 goto retry; 3464 } 3465 default: 3466 ASSERT(0); 3467 freemsg(mp); 3468 error = EPROTO; 3469 eprintsoline(so, error); 3470 goto err; 3471 } 3472 /* NOTREACHED */ 3473 err: 3474 mutex_enter(&so->so_lock); 3475 so_unlock_read(so); /* Clear SOREADLOCKED */ 3476 mutex_exit(&so->so_lock); 3477 return (error); 3478 } 3479 3480 /* 3481 * Sending data with options on a datagram socket. 3482 * Assumes caller has verified that SS_ISBOUND etc. are set. 3483 */ 3484 static int 3485 sosend_dgramcmsg(struct sonode *so, struct sockaddr *name, socklen_t namelen, 3486 struct uio *uiop, void *control, t_uscalar_t controllen, int flags) 3487 { 3488 struct T_unitdata_req tudr; 3489 mblk_t *mp; 3490 int error; 3491 void *addr; 3492 socklen_t addrlen; 3493 void *src; 3494 socklen_t srclen; 3495 ssize_t len; 3496 int size; 3497 struct T_opthdr toh; 3498 struct fdbuf *fdbuf; 3499 t_uscalar_t optlen; 3500 void *fds; 3501 int fdlen; 3502 3503 ASSERT(name && namelen); 3504 ASSERT(control && controllen); 3505 3506 len = uiop->uio_resid; 3507 if (len > (ssize_t)so->so_tidu_size) { 3508 return (EMSGSIZE); 3509 } 3510 3511 /* 3512 * For AF_UNIX the destination address is translated to an internal 3513 * name and the source address is passed as an option. 3514 * Also, file descriptors are passed as file pointers in an 3515 * option. 3516 */ 3517 3518 /* 3519 * Length and family checks. 3520 */ 3521 error = so_addr_verify(so, name, namelen); 3522 if (error) { 3523 eprintsoline(so, error); 3524 return (error); 3525 } 3526 if (so->so_family == AF_UNIX) { 3527 if (so->so_state & SS_FADDR_NOXLATE) { 3528 /* 3529 * Already have a transport internal address. Do not 3530 * pass any (transport internal) source address. 3531 */ 3532 addr = name; 3533 addrlen = namelen; 3534 src = NULL; 3535 srclen = 0; 3536 } else { 3537 /* 3538 * Pass the sockaddr_un source address as an option 3539 * and translate the remote address. 3540 * 3541 * Note that this code does not prevent so_laddr_sa 3542 * from changing while it is being used. Thus 3543 * if an unbind+bind occurs concurrently with this 3544 * send the peer might see a partially new and a 3545 * partially old "from" address. 3546 */ 3547 src = so->so_laddr_sa; 3548 srclen = (t_uscalar_t)so->so_laddr_len; 3549 dprintso(so, 1, 3550 ("sosend_dgramcmsg UNIX: srclen %d, src %p\n", 3551 srclen, src)); 3552 error = so_ux_addr_xlate(so, name, namelen, 3553 (flags & MSG_XPG4_2), 3554 &addr, &addrlen); 3555 if (error) { 3556 eprintsoline(so, error); 3557 return (error); 3558 } 3559 } 3560 } else { 3561 addr = name; 3562 addrlen = namelen; 3563 src = NULL; 3564 srclen = 0; 3565 } 3566 optlen = so_optlen(control, controllen, 3567 !(flags & MSG_XPG4_2)); 3568 tudr.PRIM_type = T_UNITDATA_REQ; 3569 tudr.DEST_length = addrlen; 3570 tudr.DEST_offset = (t_scalar_t)sizeof (tudr); 3571 if (srclen != 0) 3572 tudr.OPT_length = (t_scalar_t)(optlen + sizeof (toh) + 3573 _TPI_ALIGN_TOPT(srclen)); 3574 else 3575 tudr.OPT_length = optlen; 3576 tudr.OPT_offset = (t_scalar_t)(sizeof (tudr) + 3577 _TPI_ALIGN_TOPT(addrlen)); 3578 3579 size = tudr.OPT_offset + tudr.OPT_length; 3580 3581 /* 3582 * File descriptors only when SM_FDPASSING set. 3583 */ 3584 error = so_getfdopt(control, controllen, 3585 !(flags & MSG_XPG4_2), &fds, &fdlen); 3586 if (error) 3587 return (error); 3588 if (fdlen != -1) { 3589 if (!(so->so_mode & SM_FDPASSING)) 3590 return (EOPNOTSUPP); 3591 3592 error = fdbuf_create(fds, fdlen, &fdbuf); 3593 if (error) 3594 return (error); 3595 mp = fdbuf_allocmsg(size, fdbuf); 3596 } else { 3597 mp = soallocproto(size, _ALLOC_INTR); 3598 if (mp == NULL) { 3599 /* 3600 * Caught a signal waiting for memory. 3601 * Let send* return EINTR. 3602 */ 3603 return (EINTR); 3604 } 3605 } 3606 soappendmsg(mp, &tudr, sizeof (tudr)); 3607 soappendmsg(mp, addr, addrlen); 3608 mp->b_wptr += _TPI_ALIGN_TOPT(addrlen) - addrlen; 3609 3610 if (fdlen != -1) { 3611 ASSERT(fdbuf != NULL); 3612 toh.level = SOL_SOCKET; 3613 toh.name = SO_FILEP; 3614 toh.len = fdbuf->fd_size + 3615 (t_uscalar_t)sizeof (struct T_opthdr); 3616 toh.status = 0; 3617 soappendmsg(mp, &toh, sizeof (toh)); 3618 soappendmsg(mp, fdbuf, fdbuf->fd_size); 3619 ASSERT(__TPI_TOPT_ISALIGNED(mp->b_wptr)); 3620 } 3621 if (srclen != 0) { 3622 /* 3623 * There is a AF_UNIX sockaddr_un to include as a source 3624 * address option. 3625 */ 3626 toh.level = SOL_SOCKET; 3627 toh.name = SO_SRCADDR; 3628 toh.len = (t_uscalar_t)(srclen + sizeof (struct T_opthdr)); 3629 toh.status = 0; 3630 soappendmsg(mp, &toh, sizeof (toh)); 3631 soappendmsg(mp, src, srclen); 3632 mp->b_wptr += _TPI_ALIGN_TOPT(srclen) - srclen; 3633 ASSERT(__TPI_TOPT_ISALIGNED(mp->b_wptr)); 3634 } 3635 ASSERT(mp->b_wptr <= mp->b_datap->db_lim); 3636 so_cmsg2opt(control, controllen, !(flags & MSG_XPG4_2), mp); 3637 /* At most 3 bytes left in the message */ 3638 ASSERT(MBLKL(mp) > (ssize_t)(size - __TPI_ALIGN_SIZE)); 3639 ASSERT(MBLKL(mp) <= (ssize_t)size); 3640 3641 ASSERT(mp->b_wptr <= mp->b_datap->db_lim); 3642 #ifdef C2_AUDIT 3643 if (audit_active) 3644 audit_sock(T_UNITDATA_REQ, strvp2wq(SOTOV(so)), mp, 0); 3645 #endif /* C2_AUDIT */ 3646 3647 error = kstrputmsg(SOTOV(so), mp, uiop, len, 0, MSG_BAND, 0); 3648 #ifdef SOCK_DEBUG 3649 if (error) { 3650 eprintsoline(so, error); 3651 } 3652 #endif /* SOCK_DEBUG */ 3653 return (error); 3654 } 3655 3656 /* 3657 * Sending data with options on a connected stream socket. 3658 * Assumes caller has verified that SS_ISCONNECTED is set. 3659 */ 3660 static int 3661 sosend_svccmsg(struct sonode *so, 3662 struct uio *uiop, 3663 int more, 3664 void *control, 3665 t_uscalar_t controllen, 3666 int flags) 3667 { 3668 struct T_optdata_req tdr; 3669 mblk_t *mp; 3670 int error; 3671 ssize_t iosize; 3672 int first = 1; 3673 int size; 3674 struct fdbuf *fdbuf; 3675 t_uscalar_t optlen; 3676 void *fds; 3677 int fdlen; 3678 struct T_opthdr toh; 3679 3680 dprintso(so, 1, 3681 ("sosend_svccmsg: resid %ld bytes\n", uiop->uio_resid)); 3682 3683 /* 3684 * Has to be bound and connected. However, since no locks are 3685 * held the state could have changed after sotpi_sendmsg checked it 3686 * thus it is not possible to ASSERT on the state. 3687 */ 3688 3689 /* Options on connection-oriented only when SM_OPTDATA set. */ 3690 if (!(so->so_mode & SM_OPTDATA)) 3691 return (EOPNOTSUPP); 3692 3693 do { 3694 /* 3695 * Set the MORE flag if uio_resid does not fit in this 3696 * message or if the caller passed in "more". 3697 * Error for transports with zero tidu_size. 3698 */ 3699 tdr.PRIM_type = T_OPTDATA_REQ; 3700 iosize = so->so_tidu_size; 3701 if (iosize <= 0) 3702 return (EMSGSIZE); 3703 if (uiop->uio_resid > iosize) { 3704 tdr.DATA_flag = 1; 3705 } else { 3706 if (more) 3707 tdr.DATA_flag = 1; 3708 else 3709 tdr.DATA_flag = 0; 3710 iosize = uiop->uio_resid; 3711 } 3712 dprintso(so, 1, ("sosend_svccmsg: sending %d, %ld bytes\n", 3713 tdr.DATA_flag, iosize)); 3714 3715 optlen = so_optlen(control, controllen, !(flags & MSG_XPG4_2)); 3716 tdr.OPT_length = optlen; 3717 tdr.OPT_offset = (t_scalar_t)sizeof (tdr); 3718 3719 size = (int)sizeof (tdr) + optlen; 3720 /* 3721 * File descriptors only when SM_FDPASSING set. 3722 */ 3723 error = so_getfdopt(control, controllen, 3724 !(flags & MSG_XPG4_2), &fds, &fdlen); 3725 if (error) 3726 return (error); 3727 if (fdlen != -1) { 3728 if (!(so->so_mode & SM_FDPASSING)) 3729 return (EOPNOTSUPP); 3730 3731 error = fdbuf_create(fds, fdlen, &fdbuf); 3732 if (error) 3733 return (error); 3734 mp = fdbuf_allocmsg(size, fdbuf); 3735 } else { 3736 mp = soallocproto(size, _ALLOC_INTR); 3737 if (mp == NULL) { 3738 /* 3739 * Caught a signal waiting for memory. 3740 * Let send* return EINTR. 3741 */ 3742 return (first ? EINTR : 0); 3743 } 3744 } 3745 soappendmsg(mp, &tdr, sizeof (tdr)); 3746 3747 if (fdlen != -1) { 3748 ASSERT(fdbuf != NULL); 3749 toh.level = SOL_SOCKET; 3750 toh.name = SO_FILEP; 3751 toh.len = fdbuf->fd_size + 3752 (t_uscalar_t)sizeof (struct T_opthdr); 3753 toh.status = 0; 3754 soappendmsg(mp, &toh, sizeof (toh)); 3755 soappendmsg(mp, fdbuf, fdbuf->fd_size); 3756 ASSERT(__TPI_TOPT_ISALIGNED(mp->b_wptr)); 3757 } 3758 so_cmsg2opt(control, controllen, !(flags & MSG_XPG4_2), mp); 3759 /* At most 3 bytes left in the message */ 3760 ASSERT(MBLKL(mp) > (ssize_t)(size - __TPI_ALIGN_SIZE)); 3761 ASSERT(MBLKL(mp) <= (ssize_t)size); 3762 3763 ASSERT(mp->b_wptr <= mp->b_datap->db_lim); 3764 3765 error = kstrputmsg(SOTOV(so), mp, uiop, iosize, 3766 0, MSG_BAND, 0); 3767 if (error) { 3768 if (!first && error == EWOULDBLOCK) 3769 return (0); 3770 eprintsoline(so, error); 3771 return (error); 3772 } 3773 control = NULL; 3774 first = 0; 3775 if (uiop->uio_resid > 0) { 3776 /* 3777 * Recheck for fatal errors. Fail write even though 3778 * some data have been written. This is consistent 3779 * with strwrite semantics and BSD sockets semantics. 3780 */ 3781 if (so->so_state & SS_CANTSENDMORE) { 3782 tsignal(curthread, SIGPIPE); 3783 eprintsoline(so, error); 3784 return (EPIPE); 3785 } 3786 if (so->so_error != 0) { 3787 mutex_enter(&so->so_lock); 3788 error = sogeterr(so); 3789 mutex_exit(&so->so_lock); 3790 if (error != 0) { 3791 eprintsoline(so, error); 3792 return (error); 3793 } 3794 } 3795 } 3796 } while (uiop->uio_resid > 0); 3797 return (0); 3798 } 3799 3800 /* 3801 * Sending data on a datagram socket. 3802 * Assumes caller has verified that SS_ISBOUND etc. are set. 3803 * 3804 * For AF_UNIX the destination address is translated to an internal 3805 * name and the source address is passed as an option. 3806 */ 3807 int 3808 sosend_dgram(struct sonode *so, struct sockaddr *name, socklen_t namelen, 3809 struct uio *uiop, int flags) 3810 { 3811 struct T_unitdata_req tudr; 3812 mblk_t *mp; 3813 int error; 3814 void *addr; 3815 socklen_t addrlen; 3816 void *src; 3817 socklen_t srclen; 3818 ssize_t len; 3819 3820 ASSERT(name != NULL && namelen != 0); 3821 3822 len = uiop->uio_resid; 3823 if (len > so->so_tidu_size) { 3824 error = EMSGSIZE; 3825 goto done; 3826 } 3827 3828 /* Length and family checks */ 3829 error = so_addr_verify(so, name, namelen); 3830 if (error != 0) 3831 goto done; 3832 3833 if (so->so_state & SS_DIRECT) 3834 return (sodgram_direct(so, name, namelen, uiop, flags)); 3835 3836 if (so->so_family == AF_UNIX) { 3837 if (so->so_state & SS_FADDR_NOXLATE) { 3838 /* 3839 * Already have a transport internal address. Do not 3840 * pass any (transport internal) source address. 3841 */ 3842 addr = name; 3843 addrlen = namelen; 3844 src = NULL; 3845 srclen = 0; 3846 } else { 3847 /* 3848 * Pass the sockaddr_un source address as an option 3849 * and translate the remote address. 3850 * 3851 * Note that this code does not prevent so_laddr_sa 3852 * from changing while it is being used. Thus 3853 * if an unbind+bind occurs concurrently with this 3854 * send the peer might see a partially new and a 3855 * partially old "from" address. 3856 */ 3857 src = so->so_laddr_sa; 3858 srclen = (socklen_t)so->so_laddr_len; 3859 dprintso(so, 1, 3860 ("sosend_dgram UNIX: srclen %d, src %p\n", 3861 srclen, src)); 3862 error = so_ux_addr_xlate(so, name, namelen, 3863 (flags & MSG_XPG4_2), 3864 &addr, &addrlen); 3865 if (error) { 3866 eprintsoline(so, error); 3867 goto done; 3868 } 3869 } 3870 } else { 3871 addr = name; 3872 addrlen = namelen; 3873 src = NULL; 3874 srclen = 0; 3875 } 3876 tudr.PRIM_type = T_UNITDATA_REQ; 3877 tudr.DEST_length = addrlen; 3878 tudr.DEST_offset = (t_scalar_t)sizeof (tudr); 3879 if (srclen == 0) { 3880 tudr.OPT_length = 0; 3881 tudr.OPT_offset = 0; 3882 3883 mp = soallocproto2(&tudr, sizeof (tudr), 3884 addr, addrlen, 0, _ALLOC_INTR); 3885 if (mp == NULL) { 3886 /* 3887 * Caught a signal waiting for memory. 3888 * Let send* return EINTR. 3889 */ 3890 error = EINTR; 3891 goto done; 3892 } 3893 } else { 3894 /* 3895 * There is a AF_UNIX sockaddr_un to include as a source 3896 * address option. 3897 */ 3898 struct T_opthdr toh; 3899 ssize_t size; 3900 3901 tudr.OPT_length = (t_scalar_t)(sizeof (toh) + 3902 _TPI_ALIGN_TOPT(srclen)); 3903 tudr.OPT_offset = (t_scalar_t)(sizeof (tudr) + 3904 _TPI_ALIGN_TOPT(addrlen)); 3905 3906 toh.level = SOL_SOCKET; 3907 toh.name = SO_SRCADDR; 3908 toh.len = (t_uscalar_t)(srclen + sizeof (struct T_opthdr)); 3909 toh.status = 0; 3910 3911 size = tudr.OPT_offset + tudr.OPT_length; 3912 mp = soallocproto2(&tudr, sizeof (tudr), 3913 addr, addrlen, size, _ALLOC_INTR); 3914 if (mp == NULL) { 3915 /* 3916 * Caught a signal waiting for memory. 3917 * Let send* return EINTR. 3918 */ 3919 error = EINTR; 3920 goto done; 3921 } 3922 mp->b_wptr += _TPI_ALIGN_TOPT(addrlen) - addrlen; 3923 soappendmsg(mp, &toh, sizeof (toh)); 3924 soappendmsg(mp, src, srclen); 3925 mp->b_wptr += _TPI_ALIGN_TOPT(srclen) - srclen; 3926 ASSERT(mp->b_wptr <= mp->b_datap->db_lim); 3927 } 3928 3929 #ifdef C2_AUDIT 3930 if (audit_active) 3931 audit_sock(T_UNITDATA_REQ, strvp2wq(SOTOV(so)), mp, 0); 3932 #endif /* C2_AUDIT */ 3933 3934 error = kstrputmsg(SOTOV(so), mp, uiop, len, 0, MSG_BAND, 0); 3935 done: 3936 #ifdef SOCK_DEBUG 3937 if (error) { 3938 eprintsoline(so, error); 3939 } 3940 #endif /* SOCK_DEBUG */ 3941 return (error); 3942 } 3943 3944 /* 3945 * Sending data on a connected stream socket. 3946 * Assumes caller has verified that SS_ISCONNECTED is set. 3947 */ 3948 int 3949 sosend_svc(struct sonode *so, 3950 struct uio *uiop, 3951 t_scalar_t prim, 3952 int more, 3953 int sflag) 3954 { 3955 struct T_data_req tdr; 3956 mblk_t *mp; 3957 int error; 3958 ssize_t iosize; 3959 int first = 1; 3960 3961 dprintso(so, 1, 3962 ("sosend_svc: %p, resid %ld bytes, prim %d, sflag 0x%x\n", 3963 so, uiop->uio_resid, prim, sflag)); 3964 3965 /* 3966 * Has to be bound and connected. However, since no locks are 3967 * held the state could have changed after sotpi_sendmsg checked it 3968 * thus it is not possible to ASSERT on the state. 3969 */ 3970 3971 do { 3972 /* 3973 * Set the MORE flag if uio_resid does not fit in this 3974 * message or if the caller passed in "more". 3975 * Error for transports with zero tidu_size. 3976 */ 3977 tdr.PRIM_type = prim; 3978 iosize = so->so_tidu_size; 3979 if (iosize <= 0) 3980 return (EMSGSIZE); 3981 if (uiop->uio_resid > iosize) { 3982 tdr.MORE_flag = 1; 3983 } else { 3984 if (more) 3985 tdr.MORE_flag = 1; 3986 else 3987 tdr.MORE_flag = 0; 3988 iosize = uiop->uio_resid; 3989 } 3990 dprintso(so, 1, ("sosend_svc: sending 0x%x %d, %ld bytes\n", 3991 prim, tdr.MORE_flag, iosize)); 3992 mp = soallocproto1(&tdr, sizeof (tdr), 0, _ALLOC_INTR); 3993 if (mp == NULL) { 3994 /* 3995 * Caught a signal waiting for memory. 3996 * Let send* return EINTR. 3997 */ 3998 if (first) 3999 return (EINTR); 4000 else 4001 return (0); 4002 } 4003 4004 error = kstrputmsg(SOTOV(so), mp, uiop, iosize, 4005 0, sflag | MSG_BAND, 0); 4006 if (error) { 4007 if (!first && error == EWOULDBLOCK) 4008 return (0); 4009 eprintsoline(so, error); 4010 return (error); 4011 } 4012 first = 0; 4013 if (uiop->uio_resid > 0) { 4014 /* 4015 * Recheck for fatal errors. Fail write even though 4016 * some data have been written. This is consistent 4017 * with strwrite semantics and BSD sockets semantics. 4018 */ 4019 if (so->so_state & SS_CANTSENDMORE) { 4020 tsignal(curthread, SIGPIPE); 4021 eprintsoline(so, error); 4022 return (EPIPE); 4023 } 4024 if (so->so_error != 0) { 4025 mutex_enter(&so->so_lock); 4026 error = sogeterr(so); 4027 mutex_exit(&so->so_lock); 4028 if (error != 0) { 4029 eprintsoline(so, error); 4030 return (error); 4031 } 4032 } 4033 } 4034 } while (uiop->uio_resid > 0); 4035 return (0); 4036 } 4037 4038 /* 4039 * Check the state for errors and call the appropriate send function. 4040 * 4041 * If MSG_DONTROUTE is set (and SO_DONTROUTE isn't already set) 4042 * this function issues a setsockopt to toggle SO_DONTROUTE before and 4043 * after sending the message. 4044 */ 4045 static int 4046 sotpi_sendmsg(struct sonode *so, struct nmsghdr *msg, struct uio *uiop) 4047 { 4048 int so_state; 4049 int so_mode; 4050 int error; 4051 struct sockaddr *name; 4052 t_uscalar_t namelen; 4053 int dontroute; 4054 int flags; 4055 4056 dprintso(so, 1, ("sotpi_sendmsg(%p, %p, 0x%x) state %s, error %d\n", 4057 so, msg, msg->msg_flags, 4058 pr_state(so->so_state, so->so_mode), so->so_error)); 4059 4060 mutex_enter(&so->so_lock); 4061 so_state = so->so_state; 4062 4063 if (so_state & SS_CANTSENDMORE) { 4064 mutex_exit(&so->so_lock); 4065 tsignal(curthread, SIGPIPE); 4066 return (EPIPE); 4067 } 4068 4069 if (so->so_error != 0) { 4070 error = sogeterr(so); 4071 if (error != 0) { 4072 mutex_exit(&so->so_lock); 4073 return (error); 4074 } 4075 } 4076 4077 name = (struct sockaddr *)msg->msg_name; 4078 namelen = msg->msg_namelen; 4079 4080 so_mode = so->so_mode; 4081 4082 if (name == NULL) { 4083 if (!(so_state & SS_ISCONNECTED)) { 4084 mutex_exit(&so->so_lock); 4085 if (so_mode & SM_CONNREQUIRED) 4086 return (ENOTCONN); 4087 else 4088 return (EDESTADDRREQ); 4089 } 4090 if (so_mode & SM_CONNREQUIRED) { 4091 name = NULL; 4092 namelen = 0; 4093 } else { 4094 /* 4095 * Note that this code does not prevent so_faddr_sa 4096 * from changing while it is being used. Thus 4097 * if an "unconnect"+connect occurs concurrently with 4098 * this send the datagram might be delivered to a 4099 * garbaled address. 4100 */ 4101 ASSERT(so->so_faddr_sa); 4102 name = so->so_faddr_sa; 4103 namelen = (t_uscalar_t)so->so_faddr_len; 4104 } 4105 } else { 4106 if (!(so_state & SS_ISCONNECTED) && 4107 (so_mode & SM_CONNREQUIRED)) { 4108 /* Required but not connected */ 4109 mutex_exit(&so->so_lock); 4110 return (ENOTCONN); 4111 } 4112 /* 4113 * Ignore the address on connection-oriented sockets. 4114 * Just like BSD this code does not generate an error for 4115 * TCP (a CONNREQUIRED socket) when sending to an address 4116 * passed in with sendto/sendmsg. Instead the data is 4117 * delivered on the connection as if no address had been 4118 * supplied. 4119 */ 4120 if ((so_state & SS_ISCONNECTED) && 4121 !(so_mode & SM_CONNREQUIRED)) { 4122 mutex_exit(&so->so_lock); 4123 return (EISCONN); 4124 } 4125 if (!(so_state & SS_ISBOUND)) { 4126 so_lock_single(so); /* Set SOLOCKED */ 4127 error = sotpi_bind(so, NULL, 0, 4128 _SOBIND_UNSPEC|_SOBIND_LOCK_HELD); 4129 so_unlock_single(so, SOLOCKED); 4130 if (error) { 4131 mutex_exit(&so->so_lock); 4132 eprintsoline(so, error); 4133 return (error); 4134 } 4135 } 4136 /* 4137 * Handle delayed datagram errors. These are only queued 4138 * when the application sets SO_DGRAM_ERRIND. 4139 * Return the error if we are sending to the address 4140 * that was returned in the last T_UDERROR_IND. 4141 * If sending to some other address discard the delayed 4142 * error indication. 4143 */ 4144 if (so->so_delayed_error) { 4145 struct T_uderror_ind *tudi; 4146 void *addr; 4147 t_uscalar_t addrlen; 4148 boolean_t match = B_FALSE; 4149 4150 ASSERT(so->so_eaddr_mp); 4151 error = so->so_delayed_error; 4152 so->so_delayed_error = 0; 4153 tudi = (struct T_uderror_ind *)so->so_eaddr_mp->b_rptr; 4154 addrlen = tudi->DEST_length; 4155 addr = sogetoff(so->so_eaddr_mp, 4156 tudi->DEST_offset, 4157 addrlen, 1); 4158 ASSERT(addr); /* Checked by strsock_proto */ 4159 switch (so->so_family) { 4160 case AF_INET: { 4161 /* Compare just IP address and port */ 4162 sin_t *sin1 = (sin_t *)name; 4163 sin_t *sin2 = (sin_t *)addr; 4164 4165 if (addrlen == sizeof (sin_t) && 4166 namelen == addrlen && 4167 sin1->sin_port == sin2->sin_port && 4168 sin1->sin_addr.s_addr == 4169 sin2->sin_addr.s_addr) 4170 match = B_TRUE; 4171 break; 4172 } 4173 case AF_INET6: { 4174 /* Compare just IP address and port. Not flow */ 4175 sin6_t *sin1 = (sin6_t *)name; 4176 sin6_t *sin2 = (sin6_t *)addr; 4177 4178 if (addrlen == sizeof (sin6_t) && 4179 namelen == addrlen && 4180 sin1->sin6_port == sin2->sin6_port && 4181 IN6_ARE_ADDR_EQUAL(&sin1->sin6_addr, 4182 &sin2->sin6_addr)) 4183 match = B_TRUE; 4184 break; 4185 } 4186 case AF_UNIX: 4187 default: 4188 if (namelen == addrlen && 4189 bcmp(name, addr, namelen) == 0) 4190 match = B_TRUE; 4191 } 4192 if (match) { 4193 freemsg(so->so_eaddr_mp); 4194 so->so_eaddr_mp = NULL; 4195 mutex_exit(&so->so_lock); 4196 #ifdef DEBUG 4197 dprintso(so, 0, 4198 ("sockfs delayed error %d for %s\n", 4199 error, 4200 pr_addr(so->so_family, name, namelen))); 4201 #endif /* DEBUG */ 4202 return (error); 4203 } 4204 freemsg(so->so_eaddr_mp); 4205 so->so_eaddr_mp = NULL; 4206 } 4207 } 4208 mutex_exit(&so->so_lock); 4209 4210 flags = msg->msg_flags; 4211 dontroute = 0; 4212 if ((flags & MSG_DONTROUTE) && !(so->so_options & SO_DONTROUTE)) { 4213 uint32_t val; 4214 4215 val = 1; 4216 error = sotpi_setsockopt(so, SOL_SOCKET, SO_DONTROUTE, 4217 &val, (t_uscalar_t)sizeof (val)); 4218 if (error) 4219 return (error); 4220 dontroute = 1; 4221 } 4222 4223 if ((flags & MSG_OOB) && !(so_mode & SM_EXDATA)) { 4224 error = EOPNOTSUPP; 4225 goto done; 4226 } 4227 if (msg->msg_controllen != 0) { 4228 if (!(so_mode & SM_CONNREQUIRED)) { 4229 error = sosend_dgramcmsg(so, name, namelen, uiop, 4230 msg->msg_control, msg->msg_controllen, flags); 4231 } else { 4232 if (flags & MSG_OOB) { 4233 /* Can't generate T_EXDATA_REQ with options */ 4234 error = EOPNOTSUPP; 4235 goto done; 4236 } 4237 error = sosend_svccmsg(so, uiop, 4238 !(flags & MSG_EOR), 4239 msg->msg_control, msg->msg_controllen, 4240 flags); 4241 } 4242 goto done; 4243 } 4244 4245 if (!(so_mode & SM_CONNREQUIRED)) { 4246 /* 4247 * If there is no SO_DONTROUTE to turn off return immediately 4248 * from send_dgram. This can allow tail-call optimizations. 4249 */ 4250 if (!dontroute) { 4251 return (sosend_dgram(so, name, namelen, uiop, flags)); 4252 } 4253 error = sosend_dgram(so, name, namelen, uiop, flags); 4254 } else { 4255 t_scalar_t prim; 4256 int sflag; 4257 4258 /* Ignore msg_name in the connected state */ 4259 if (flags & MSG_OOB) { 4260 prim = T_EXDATA_REQ; 4261 /* 4262 * Send down T_EXDATA_REQ even if there is flow 4263 * control for data. 4264 */ 4265 sflag = MSG_IGNFLOW; 4266 } else { 4267 if (so_mode & SM_BYTESTREAM) { 4268 /* Byte stream transport - use write */ 4269 4270 dprintso(so, 1, ("sotpi_sendmsg: write\n")); 4271 /* 4272 * If there is no SO_DONTROUTE to turn off, 4273 * SS_DIRECT is on, and there is no flow 4274 * control, we can take the fast path. 4275 */ 4276 if (!dontroute && 4277 (so_state & SS_DIRECT) && 4278 canputnext(SOTOV(so)->v_stream->sd_wrq)) { 4279 return (sostream_direct(so, uiop, 4280 NULL, CRED())); 4281 } 4282 error = strwrite(SOTOV(so), uiop, CRED()); 4283 goto done; 4284 } 4285 prim = T_DATA_REQ; 4286 sflag = 0; 4287 } 4288 /* 4289 * If there is no SO_DONTROUTE to turn off return immediately 4290 * from sosend_svc. This can allow tail-call optimizations. 4291 */ 4292 if (!dontroute) 4293 return (sosend_svc(so, uiop, prim, 4294 !(flags & MSG_EOR), sflag)); 4295 error = sosend_svc(so, uiop, prim, 4296 !(flags & MSG_EOR), sflag); 4297 } 4298 ASSERT(dontroute); 4299 done: 4300 if (dontroute) { 4301 uint32_t val; 4302 4303 val = 0; 4304 (void) sotpi_setsockopt(so, SOL_SOCKET, SO_DONTROUTE, 4305 &val, (t_uscalar_t)sizeof (val)); 4306 } 4307 return (error); 4308 } 4309 4310 /* 4311 * Sending data on a datagram socket. 4312 * Assumes caller has verified that SS_ISBOUND etc. are set. 4313 */ 4314 /* ARGSUSED */ 4315 static int 4316 sodgram_direct(struct sonode *so, struct sockaddr *name, 4317 socklen_t namelen, struct uio *uiop, int flags) 4318 { 4319 struct T_unitdata_req tudr; 4320 mblk_t *mp; 4321 int error = 0; 4322 void *addr; 4323 socklen_t addrlen; 4324 ssize_t len; 4325 struct stdata *stp = SOTOV(so)->v_stream; 4326 int so_state; 4327 queue_t *udp_wq; 4328 4329 ASSERT(name != NULL && namelen != 0); 4330 ASSERT(!(so->so_mode & SM_CONNREQUIRED)); 4331 ASSERT(!(so->so_mode & SM_EXDATA)); 4332 ASSERT(so->so_family == AF_INET || so->so_family == AF_INET6); 4333 ASSERT(SOTOV(so)->v_type == VSOCK); 4334 4335 /* Caller checked for proper length */ 4336 len = uiop->uio_resid; 4337 ASSERT(len <= so->so_tidu_size); 4338 4339 /* Length and family checks have been done by caller */ 4340 ASSERT(name->sa_family == so->so_family); 4341 ASSERT(so->so_family == AF_INET || 4342 (namelen == (socklen_t)sizeof (struct sockaddr_in6))); 4343 ASSERT(so->so_family == AF_INET6 || 4344 (namelen == (socklen_t)sizeof (struct sockaddr_in))); 4345 4346 addr = name; 4347 addrlen = namelen; 4348 4349 if (stp->sd_sidp != NULL && 4350 (error = straccess(stp, JCWRITE)) != 0) 4351 goto done; 4352 4353 so_state = so->so_state; 4354 4355 /* 4356 * For UDP we don't break up the copyin into smaller pieces 4357 * as in the TCP case. That means if ENOMEM is returned by 4358 * mcopyinuio() then the uio vector has not been modified at 4359 * all and we fallback to either strwrite() or kstrputmsg() 4360 * below. Note also that we never generate priority messages 4361 * from here. 4362 */ 4363 udp_wq = stp->sd_wrq->q_next; 4364 if (canput(udp_wq) && 4365 (mp = mcopyinuio(stp, uiop, -1, -1, &error)) != NULL) { 4366 ASSERT(DB_TYPE(mp) == M_DATA); 4367 ASSERT(uiop->uio_resid == 0); 4368 #ifdef C2_AUDIT 4369 if (audit_active) 4370 audit_sock(T_UNITDATA_REQ, strvp2wq(SOTOV(so)), mp, 0); 4371 #endif /* C2_AUDIT */ 4372 udp_wput_data(udp_wq, mp, addr, addrlen); 4373 return (0); 4374 } 4375 if (error != 0 && error != ENOMEM) 4376 return (error); 4377 4378 /* 4379 * For connected, let strwrite() handle the blocking case. 4380 * Otherwise we fall thru and use kstrputmsg(). 4381 */ 4382 if (so_state & SS_ISCONNECTED) 4383 return (strwrite(SOTOV(so), uiop, CRED())); 4384 4385 tudr.PRIM_type = T_UNITDATA_REQ; 4386 tudr.DEST_length = addrlen; 4387 tudr.DEST_offset = (t_scalar_t)sizeof (tudr); 4388 tudr.OPT_length = 0; 4389 tudr.OPT_offset = 0; 4390 4391 mp = soallocproto2(&tudr, sizeof (tudr), addr, addrlen, 0, _ALLOC_INTR); 4392 if (mp == NULL) { 4393 /* 4394 * Caught a signal waiting for memory. 4395 * Let send* return EINTR. 4396 */ 4397 error = EINTR; 4398 goto done; 4399 } 4400 4401 #ifdef C2_AUDIT 4402 if (audit_active) 4403 audit_sock(T_UNITDATA_REQ, strvp2wq(SOTOV(so)), mp, 0); 4404 #endif /* C2_AUDIT */ 4405 4406 error = kstrputmsg(SOTOV(so), mp, uiop, len, 0, MSG_BAND, 0); 4407 done: 4408 #ifdef SOCK_DEBUG 4409 if (error != 0) { 4410 eprintsoline(so, error); 4411 } 4412 #endif /* SOCK_DEBUG */ 4413 return (error); 4414 } 4415 4416 int 4417 sostream_direct(struct sonode *so, struct uio *uiop, mblk_t *mp, cred_t *cr) 4418 { 4419 struct stdata *stp = SOTOV(so)->v_stream; 4420 ssize_t iosize, rmax, maxblk; 4421 queue_t *tcp_wq = stp->sd_wrq->q_next; 4422 mblk_t *newmp; 4423 int error = 0, wflag = 0; 4424 4425 ASSERT(so->so_mode & SM_BYTESTREAM); 4426 ASSERT(SOTOV(so)->v_type == VSOCK); 4427 4428 if (stp->sd_sidp != NULL && 4429 (error = straccess(stp, JCWRITE)) != 0) 4430 return (error); 4431 4432 if (uiop == NULL) { 4433 /* 4434 * kstrwritemp() should have checked sd_flag and 4435 * flow-control before coming here. If we end up 4436 * here it means that we can simply pass down the 4437 * data to tcp. 4438 */ 4439 ASSERT(mp != NULL); 4440 if (stp->sd_wputdatafunc != NULL) { 4441 newmp = (stp->sd_wputdatafunc)(SOTOV(so), mp, NULL, 4442 NULL, NULL, NULL); 4443 if (newmp == NULL) { 4444 /* The caller will free mp */ 4445 return (ECOMM); 4446 } 4447 mp = newmp; 4448 } 4449 tcp_wput(tcp_wq, mp); 4450 return (0); 4451 } 4452 4453 /* Fallback to strwrite() to do proper error handling */ 4454 if (stp->sd_flag & (STWRERR|STRHUP|STPLEX|STRDELIM|OLDNDELAY)) 4455 return (strwrite(SOTOV(so), uiop, cr)); 4456 4457 rmax = stp->sd_qn_maxpsz; 4458 ASSERT(rmax >= 0 || rmax == INFPSZ); 4459 if (rmax == 0 || uiop->uio_resid <= 0) 4460 return (0); 4461 4462 if (rmax == INFPSZ) 4463 rmax = uiop->uio_resid; 4464 4465 maxblk = stp->sd_maxblk; 4466 4467 for (;;) { 4468 iosize = MIN(uiop->uio_resid, rmax); 4469 4470 mp = mcopyinuio(stp, uiop, iosize, maxblk, &error); 4471 if (mp == NULL) { 4472 /* 4473 * Fallback to strwrite() for ENOMEM; if this 4474 * is our first time in this routine and the uio 4475 * vector has not been modified, we will end up 4476 * calling strwrite() without any flag set. 4477 */ 4478 if (error == ENOMEM) 4479 goto slow_send; 4480 else 4481 return (error); 4482 } 4483 ASSERT(uiop->uio_resid >= 0); 4484 /* 4485 * If mp is non-NULL and ENOMEM is set, it means that 4486 * mcopyinuio() was able to break down some of the user 4487 * data into one or more mblks. Send the partial data 4488 * to tcp and let the rest be handled in strwrite(). 4489 */ 4490 ASSERT(error == 0 || error == ENOMEM); 4491 if (stp->sd_wputdatafunc != NULL) { 4492 newmp = (stp->sd_wputdatafunc)(SOTOV(so), mp, NULL, 4493 NULL, NULL, NULL); 4494 if (newmp == NULL) { 4495 /* The caller will free mp */ 4496 return (ECOMM); 4497 } 4498 mp = newmp; 4499 } 4500 tcp_wput(tcp_wq, mp); 4501 4502 wflag |= NOINTR; 4503 4504 if (uiop->uio_resid == 0) { /* No more data; we're done */ 4505 ASSERT(error == 0); 4506 break; 4507 } else if (error == ENOMEM || !canput(tcp_wq) || (stp->sd_flag & 4508 (STWRERR|STRHUP|STPLEX|STRDELIM|OLDNDELAY))) { 4509 slow_send: 4510 /* 4511 * We were able to send down partial data using 4512 * the direct call interface, but are now relying 4513 * on strwrite() to handle the non-fastpath cases. 4514 * If the socket is blocking we will sleep in 4515 * strwaitq() until write is permitted, otherwise, 4516 * we will need to return the amount of bytes 4517 * written so far back to the app. This is the 4518 * reason why we pass NOINTR flag to strwrite() 4519 * for non-blocking socket, because we don't want 4520 * to return EAGAIN when portion of the user data 4521 * has actually been sent down. 4522 */ 4523 return (strwrite_common(SOTOV(so), uiop, cr, wflag)); 4524 } 4525 } 4526 return (0); 4527 } 4528 4529 /* 4530 * Update so_faddr by asking the transport (unless AF_UNIX). 4531 */ 4532 int 4533 sotpi_getpeername(struct sonode *so) 4534 { 4535 struct strbuf strbuf; 4536 int error = 0, res; 4537 void *addr; 4538 t_uscalar_t addrlen; 4539 k_sigset_t smask; 4540 4541 dprintso(so, 1, ("sotpi_getpeername(%p) %s\n", 4542 so, pr_state(so->so_state, so->so_mode))); 4543 4544 mutex_enter(&so->so_lock); 4545 so_lock_single(so); /* Set SOLOCKED */ 4546 if (!(so->so_state & SS_ISCONNECTED)) { 4547 error = ENOTCONN; 4548 goto done; 4549 } 4550 /* Added this check for X/Open */ 4551 if ((so->so_state & SS_CANTSENDMORE) && !xnet_skip_checks) { 4552 error = EINVAL; 4553 if (xnet_check_print) { 4554 printf("sockfs: X/Open getpeername check => EINVAL\n"); 4555 } 4556 goto done; 4557 } 4558 #ifdef DEBUG 4559 dprintso(so, 1, ("sotpi_getpeername (local): %s\n", 4560 pr_addr(so->so_family, so->so_faddr_sa, 4561 (t_uscalar_t)so->so_faddr_len))); 4562 #endif /* DEBUG */ 4563 4564 if (so->so_family == AF_UNIX) { 4565 /* Transport has different name space - return local info */ 4566 error = 0; 4567 goto done; 4568 } 4569 4570 ASSERT(so->so_faddr_sa); 4571 /* Allocate local buffer to use with ioctl */ 4572 addrlen = (t_uscalar_t)so->so_faddr_maxlen; 4573 mutex_exit(&so->so_lock); 4574 addr = kmem_alloc(addrlen, KM_SLEEP); 4575 4576 /* 4577 * Issue TI_GETPEERNAME with signals masked. 4578 * Put the result in so_faddr_sa so that getpeername works after 4579 * a shutdown(output). 4580 * If the ioctl fails (e.g. due to a ECONNRESET) the error is reposted 4581 * back to the socket. 4582 */ 4583 strbuf.buf = addr; 4584 strbuf.maxlen = addrlen; 4585 strbuf.len = 0; 4586 4587 sigintr(&smask, 0); 4588 res = 0; 4589 ASSERT(CRED()); 4590 error = strioctl(SOTOV(so), TI_GETPEERNAME, (intptr_t)&strbuf, 4591 0, K_TO_K, CRED(), &res); 4592 sigunintr(&smask); 4593 4594 mutex_enter(&so->so_lock); 4595 /* 4596 * If there is an error record the error in so_error put don't fail 4597 * the getpeername. Instead fallback on the recorded 4598 * so->so_faddr_sa. 4599 */ 4600 if (error) { 4601 /* 4602 * Various stream head errors can be returned to the ioctl. 4603 * However, it is impossible to determine which ones of 4604 * these are really socket level errors that were incorrectly 4605 * consumed by the ioctl. Thus this code silently ignores the 4606 * error - to code explicitly does not reinstate the error 4607 * using soseterror(). 4608 * Experiments have shows that at least this set of 4609 * errors are reported and should not be reinstated on the 4610 * socket: 4611 * EINVAL E.g. if an I_LINK was in effect when 4612 * getpeername was called. 4613 * EPIPE The ioctl error semantics prefer the write 4614 * side error over the read side error. 4615 * ENOTCONN The transport just got disconnected but 4616 * sockfs had not yet seen the T_DISCON_IND 4617 * when issuing the ioctl. 4618 */ 4619 error = 0; 4620 } else if (res == 0 && strbuf.len > 0 && 4621 (so->so_state & SS_ISCONNECTED)) { 4622 ASSERT(strbuf.len <= (int)so->so_faddr_maxlen); 4623 so->so_faddr_len = (socklen_t)strbuf.len; 4624 bcopy(addr, so->so_faddr_sa, so->so_faddr_len); 4625 so->so_state |= SS_FADDR_VALID; 4626 } 4627 kmem_free(addr, addrlen); 4628 #ifdef DEBUG 4629 dprintso(so, 1, ("sotpi_getpeername (tp): %s\n", 4630 pr_addr(so->so_family, so->so_faddr_sa, 4631 (t_uscalar_t)so->so_faddr_len))); 4632 #endif /* DEBUG */ 4633 done: 4634 so_unlock_single(so, SOLOCKED); 4635 mutex_exit(&so->so_lock); 4636 return (error); 4637 } 4638 4639 /* 4640 * Update so_laddr by asking the transport (unless AF_UNIX). 4641 */ 4642 int 4643 sotpi_getsockname(struct sonode *so) 4644 { 4645 struct strbuf strbuf; 4646 int error = 0, res; 4647 void *addr; 4648 t_uscalar_t addrlen; 4649 k_sigset_t smask; 4650 4651 dprintso(so, 1, ("sotpi_getsockname(%p) %s\n", 4652 so, pr_state(so->so_state, so->so_mode))); 4653 4654 mutex_enter(&so->so_lock); 4655 so_lock_single(so); /* Set SOLOCKED */ 4656 if (!(so->so_state & SS_ISBOUND) && so->so_family != AF_UNIX) { 4657 /* Return an all zero address except for the family */ 4658 if (so->so_family == AF_INET) 4659 so->so_laddr_len = (socklen_t)sizeof (sin_t); 4660 else if (so->so_family == AF_INET6) 4661 so->so_laddr_len = (socklen_t)sizeof (sin6_t); 4662 ASSERT(so->so_laddr_len <= so->so_laddr_maxlen); 4663 bzero(so->so_laddr_sa, so->so_laddr_len); 4664 /* 4665 * Can not assume there is a sa_family for all 4666 * protocol families. 4667 */ 4668 if (so->so_family == AF_INET || so->so_family == AF_INET6) 4669 so->so_laddr_sa->sa_family = so->so_family; 4670 } 4671 #ifdef DEBUG 4672 dprintso(so, 1, ("sotpi_getsockname (local): %s\n", 4673 pr_addr(so->so_family, so->so_laddr_sa, 4674 (t_uscalar_t)so->so_laddr_len))); 4675 #endif /* DEBUG */ 4676 if (so->so_family == AF_UNIX) { 4677 /* Transport has different name space - return local info */ 4678 error = 0; 4679 goto done; 4680 } 4681 if (!(so->so_state & SS_ISBOUND)) { 4682 /* If not bound, then nothing to return. */ 4683 error = 0; 4684 goto done; 4685 } 4686 /* Allocate local buffer to use with ioctl */ 4687 addrlen = (t_uscalar_t)so->so_laddr_maxlen; 4688 mutex_exit(&so->so_lock); 4689 addr = kmem_alloc(addrlen, KM_SLEEP); 4690 4691 /* 4692 * Issue TI_GETMYNAME with signals masked. 4693 * Put the result in so_laddr_sa so that getsockname works after 4694 * a shutdown(output). 4695 * If the ioctl fails (e.g. due to a ECONNRESET) the error is reposted 4696 * back to the socket. 4697 */ 4698 strbuf.buf = addr; 4699 strbuf.maxlen = addrlen; 4700 strbuf.len = 0; 4701 4702 sigintr(&smask, 0); 4703 res = 0; 4704 ASSERT(CRED()); 4705 error = strioctl(SOTOV(so), TI_GETMYNAME, (intptr_t)&strbuf, 4706 0, K_TO_K, CRED(), &res); 4707 sigunintr(&smask); 4708 4709 mutex_enter(&so->so_lock); 4710 /* 4711 * If there is an error record the error in so_error put don't fail 4712 * the getsockname. Instead fallback on the recorded 4713 * so->so_laddr_sa. 4714 */ 4715 if (error) { 4716 /* 4717 * Various stream head errors can be returned to the ioctl. 4718 * However, it is impossible to determine which ones of 4719 * these are really socket level errors that were incorrectly 4720 * consumed by the ioctl. Thus this code silently ignores the 4721 * error - to code explicitly does not reinstate the error 4722 * using soseterror(). 4723 * Experiments have shows that at least this set of 4724 * errors are reported and should not be reinstated on the 4725 * socket: 4726 * EINVAL E.g. if an I_LINK was in effect when 4727 * getsockname was called. 4728 * EPIPE The ioctl error semantics prefer the write 4729 * side error over the read side error. 4730 */ 4731 error = 0; 4732 } else if (res == 0 && strbuf.len > 0 && 4733 (so->so_state & SS_ISBOUND)) { 4734 ASSERT(strbuf.len <= (int)so->so_laddr_maxlen); 4735 so->so_laddr_len = (socklen_t)strbuf.len; 4736 bcopy(addr, so->so_laddr_sa, so->so_laddr_len); 4737 so->so_state |= SS_LADDR_VALID; 4738 } 4739 kmem_free(addr, addrlen); 4740 #ifdef DEBUG 4741 dprintso(so, 1, ("sotpi_getsockname (tp): %s\n", 4742 pr_addr(so->so_family, so->so_laddr_sa, 4743 (t_uscalar_t)so->so_laddr_len))); 4744 #endif /* DEBUG */ 4745 done: 4746 so_unlock_single(so, SOLOCKED); 4747 mutex_exit(&so->so_lock); 4748 return (error); 4749 } 4750 4751 /* 4752 * Get socket options. For SOL_SOCKET options some options are handled 4753 * by the sockfs while others use the value recorded in the sonode as a 4754 * fallback should the T_SVR4_OPTMGMT_REQ fail. 4755 * 4756 * On the return most *optlenp bytes are copied to optval. 4757 */ 4758 int 4759 sotpi_getsockopt(struct sonode *so, int level, int option_name, 4760 void *optval, socklen_t *optlenp, int flags) 4761 { 4762 struct T_optmgmt_req optmgmt_req; 4763 struct T_optmgmt_ack *optmgmt_ack; 4764 struct opthdr oh; 4765 struct opthdr *opt_res; 4766 mblk_t *mp = NULL; 4767 int error = 0; 4768 void *option = NULL; /* Set if fallback value */ 4769 t_uscalar_t maxlen = *optlenp; 4770 t_uscalar_t len; 4771 uint32_t value; 4772 4773 dprintso(so, 1, ("sotpi_getsockopt(%p, 0x%x, 0x%x, %p, %p) %s\n", 4774 so, level, option_name, optval, optlenp, 4775 pr_state(so->so_state, so->so_mode))); 4776 4777 mutex_enter(&so->so_lock); 4778 so_lock_single(so); /* Set SOLOCKED */ 4779 4780 /* 4781 * Check for SOL_SOCKET options. 4782 * Certain SOL_SOCKET options are returned directly whereas 4783 * others only provide a default (fallback) value should 4784 * the T_SVR4_OPTMGMT_REQ fail. 4785 */ 4786 if (level == SOL_SOCKET) { 4787 /* Check parameters */ 4788 switch (option_name) { 4789 case SO_TYPE: 4790 case SO_ERROR: 4791 case SO_DEBUG: 4792 case SO_ACCEPTCONN: 4793 case SO_REUSEADDR: 4794 case SO_KEEPALIVE: 4795 case SO_DONTROUTE: 4796 case SO_BROADCAST: 4797 case SO_USELOOPBACK: 4798 case SO_OOBINLINE: 4799 case SO_SNDBUF: 4800 case SO_RCVBUF: 4801 #ifdef notyet 4802 case SO_SNDLOWAT: 4803 case SO_RCVLOWAT: 4804 case SO_SNDTIMEO: 4805 case SO_RCVTIMEO: 4806 #endif /* notyet */ 4807 case SO_DGRAM_ERRIND: 4808 if (maxlen < (t_uscalar_t)sizeof (int32_t)) { 4809 error = EINVAL; 4810 eprintsoline(so, error); 4811 goto done2; 4812 } 4813 break; 4814 case SO_LINGER: 4815 if (maxlen < (t_uscalar_t)sizeof (struct linger)) { 4816 error = EINVAL; 4817 eprintsoline(so, error); 4818 goto done2; 4819 } 4820 break; 4821 } 4822 4823 len = (t_uscalar_t)sizeof (uint32_t); /* Default */ 4824 4825 switch (option_name) { 4826 case SO_TYPE: 4827 value = so->so_type; 4828 option = &value; 4829 goto copyout; /* No need to issue T_SVR4_OPTMGMT_REQ */ 4830 4831 case SO_ERROR: 4832 value = sogeterr(so); 4833 option = &value; 4834 goto copyout; /* No need to issue T_SVR4_OPTMGMT_REQ */ 4835 4836 case SO_ACCEPTCONN: 4837 if (so->so_state & SS_ACCEPTCONN) 4838 value = SO_ACCEPTCONN; 4839 else 4840 value = 0; 4841 #ifdef DEBUG 4842 if (value) { 4843 dprintso(so, 1, 4844 ("sotpi_getsockopt: 0x%x is set\n", 4845 option_name)); 4846 } else { 4847 dprintso(so, 1, 4848 ("sotpi_getsockopt: 0x%x not set\n", 4849 option_name)); 4850 } 4851 #endif /* DEBUG */ 4852 option = &value; 4853 goto copyout; /* No need to issue T_SVR4_OPTMGMT_REQ */ 4854 4855 case SO_DEBUG: 4856 case SO_REUSEADDR: 4857 case SO_KEEPALIVE: 4858 case SO_DONTROUTE: 4859 case SO_BROADCAST: 4860 case SO_USELOOPBACK: 4861 case SO_OOBINLINE: 4862 case SO_DGRAM_ERRIND: 4863 value = (so->so_options & option_name); 4864 #ifdef DEBUG 4865 if (value) { 4866 dprintso(so, 1, 4867 ("sotpi_getsockopt: 0x%x is set\n", 4868 option_name)); 4869 } else { 4870 dprintso(so, 1, 4871 ("sotpi_getsockopt: 0x%x not set\n", 4872 option_name)); 4873 } 4874 #endif /* DEBUG */ 4875 option = &value; 4876 goto copyout; /* No need to issue T_SVR4_OPTMGMT_REQ */ 4877 4878 /* 4879 * The following options are only returned by sockfs when the 4880 * T_SVR4_OPTMGMT_REQ fails. 4881 */ 4882 case SO_LINGER: 4883 option = &so->so_linger; 4884 len = (t_uscalar_t)sizeof (struct linger); 4885 break; 4886 case SO_SNDBUF: { 4887 ssize_t lvalue; 4888 4889 /* 4890 * If the option has not been set then get a default 4891 * value from the read queue. This value is 4892 * returned if the transport fails 4893 * the T_SVR4_OPTMGMT_REQ. 4894 */ 4895 lvalue = so->so_sndbuf; 4896 if (lvalue == 0) { 4897 mutex_exit(&so->so_lock); 4898 (void) strqget(strvp2wq(SOTOV(so))->q_next, 4899 QHIWAT, 0, &lvalue); 4900 mutex_enter(&so->so_lock); 4901 dprintso(so, 1, 4902 ("got SO_SNDBUF %ld from q\n", lvalue)); 4903 } 4904 value = (int)lvalue; 4905 option = &value; 4906 len = (t_uscalar_t)sizeof (so->so_sndbuf); 4907 break; 4908 } 4909 case SO_RCVBUF: { 4910 ssize_t lvalue; 4911 4912 /* 4913 * If the option has not been set then get a default 4914 * value from the read queue. This value is 4915 * returned if the transport fails 4916 * the T_SVR4_OPTMGMT_REQ. 4917 * 4918 * XXX If SO_RCVBUF has been set and this is an 4919 * XPG 4.2 application then do not ask the transport 4920 * since the transport might adjust the value and not 4921 * return exactly what was set by the application. 4922 * For non-XPG 4.2 application we return the value 4923 * that the transport is actually using. 4924 */ 4925 lvalue = so->so_rcvbuf; 4926 if (lvalue == 0) { 4927 mutex_exit(&so->so_lock); 4928 (void) strqget(RD(strvp2wq(SOTOV(so))), 4929 QHIWAT, 0, &lvalue); 4930 mutex_enter(&so->so_lock); 4931 dprintso(so, 1, 4932 ("got SO_RCVBUF %ld from q\n", lvalue)); 4933 } else if (flags & _SOGETSOCKOPT_XPG4_2) { 4934 value = (int)lvalue; 4935 option = &value; 4936 goto copyout; /* skip asking transport */ 4937 } 4938 value = (int)lvalue; 4939 option = &value; 4940 len = (t_uscalar_t)sizeof (so->so_rcvbuf); 4941 break; 4942 } 4943 #ifdef notyet 4944 /* 4945 * We do not implement the semantics of these options 4946 * thus we shouldn't implement the options either. 4947 */ 4948 case SO_SNDLOWAT: 4949 value = so->so_sndlowat; 4950 option = &value; 4951 break; 4952 case SO_RCVLOWAT: 4953 value = so->so_rcvlowat; 4954 option = &value; 4955 break; 4956 case SO_SNDTIMEO: 4957 value = so->so_sndtimeo; 4958 option = &value; 4959 break; 4960 case SO_RCVTIMEO: 4961 value = so->so_rcvtimeo; 4962 option = &value; 4963 break; 4964 #endif /* notyet */ 4965 } 4966 } 4967 4968 mutex_exit(&so->so_lock); 4969 4970 /* Send request */ 4971 optmgmt_req.PRIM_type = T_SVR4_OPTMGMT_REQ; 4972 optmgmt_req.MGMT_flags = T_CHECK; 4973 optmgmt_req.OPT_length = (t_scalar_t)(sizeof (oh) + maxlen); 4974 optmgmt_req.OPT_offset = (t_scalar_t)sizeof (optmgmt_req); 4975 4976 oh.level = level; 4977 oh.name = option_name; 4978 oh.len = maxlen; 4979 4980 mp = soallocproto3(&optmgmt_req, sizeof (optmgmt_req), 4981 &oh, sizeof (oh), NULL, maxlen, 0, _ALLOC_SLEEP); 4982 /* Let option management work in the presence of data flow control */ 4983 error = kstrputmsg(SOTOV(so), mp, NULL, 0, 0, 4984 MSG_BAND|MSG_HOLDSIG|MSG_IGNERROR|MSG_IGNFLOW, 0); 4985 mp = NULL; 4986 mutex_enter(&so->so_lock); 4987 if (error) { 4988 eprintsoline(so, error); 4989 goto done2; 4990 } 4991 error = sowaitprim(so, T_SVR4_OPTMGMT_REQ, T_OPTMGMT_ACK, 4992 (t_uscalar_t)(sizeof (*optmgmt_ack) + sizeof (*opt_res)), &mp, 0); 4993 if (error) { 4994 if (option != NULL) { 4995 /* We have a fallback value */ 4996 error = 0; 4997 goto copyout; 4998 } 4999 eprintsoline(so, error); 5000 goto done2; 5001 } 5002 ASSERT(mp); 5003 optmgmt_ack = (struct T_optmgmt_ack *)mp->b_rptr; 5004 opt_res = (struct opthdr *)sogetoff(mp, optmgmt_ack->OPT_offset, 5005 optmgmt_ack->OPT_length, __TPI_ALIGN_SIZE); 5006 if (opt_res == NULL) { 5007 if (option != NULL) { 5008 /* We have a fallback value */ 5009 error = 0; 5010 goto copyout; 5011 } 5012 error = EPROTO; 5013 eprintsoline(so, error); 5014 goto done; 5015 } 5016 option = &opt_res[1]; 5017 5018 /* check to ensure that the option is within bounds */ 5019 if (((uintptr_t)option + opt_res->len < (uintptr_t)option) || 5020 (uintptr_t)option + opt_res->len > (uintptr_t)mp->b_wptr) { 5021 if (option != NULL) { 5022 /* We have a fallback value */ 5023 error = 0; 5024 goto copyout; 5025 } 5026 error = EPROTO; 5027 eprintsoline(so, error); 5028 goto done; 5029 } 5030 5031 len = opt_res->len; 5032 5033 copyout: { 5034 t_uscalar_t size = MIN(len, maxlen); 5035 bcopy(option, optval, size); 5036 bcopy(&size, optlenp, sizeof (size)); 5037 } 5038 done: 5039 freemsg(mp); 5040 done2: 5041 so_unlock_single(so, SOLOCKED); 5042 mutex_exit(&so->so_lock); 5043 return (error); 5044 } 5045 5046 /* 5047 * Set socket options. All options are passed down in a T_SVR4_OPTMGMT_REQ. 5048 * SOL_SOCKET options are also recorded in the sonode. A setsockopt for 5049 * SOL_SOCKET options will not fail just because the T_SVR4_OPTMGMT_REQ fails - 5050 * setsockopt has to work even if the transport does not support the option. 5051 */ 5052 int 5053 sotpi_setsockopt(struct sonode *so, int level, int option_name, 5054 const void *optval, t_uscalar_t optlen) 5055 { 5056 struct T_optmgmt_req optmgmt_req; 5057 struct opthdr oh; 5058 mblk_t *mp; 5059 int error = 0; 5060 boolean_t handled = B_FALSE; 5061 5062 dprintso(so, 1, ("sotpi_setsockopt(%p, 0x%x, 0x%x, %p, %d) %s\n", 5063 so, level, option_name, optval, optlen, 5064 pr_state(so->so_state, so->so_mode))); 5065 5066 5067 /* X/Open requires this check */ 5068 if ((so->so_state & SS_CANTSENDMORE) && !xnet_skip_checks) { 5069 if (xnet_check_print) 5070 printf("sockfs: X/Open setsockopt check => EINVAL\n"); 5071 return (EINVAL); 5072 } 5073 5074 /* Caller allocates aligned optval, or passes null */ 5075 ASSERT(((uintptr_t)optval & (sizeof (t_scalar_t) - 1)) == 0); 5076 /* If optval is null optlen is 0, and vice-versa */ 5077 ASSERT(optval != NULL || optlen == 0); 5078 ASSERT(optlen != 0 || optval == NULL); 5079 5080 mutex_enter(&so->so_lock); 5081 so_lock_single(so); /* Set SOLOCKED */ 5082 mutex_exit(&so->so_lock); 5083 5084 /* 5085 * For SOCKET or TCP level options, try to set it here itself 5086 * provided socket has not been popped and we know the tcp 5087 * structure (stored in so_priv). 5088 */ 5089 if ((level == SOL_SOCKET || level == IPPROTO_TCP) && 5090 (so->so_family == AF_INET || so->so_family == AF_INET6) && 5091 (so->so_version == SOV_SOCKSTREAM) && (so->so_priv != NULL)) { 5092 tcp_t *tcp = so->so_priv; 5093 boolean_t onoff; 5094 5095 #define intvalue (*(int32_t *)optval) 5096 5097 switch (level) { 5098 case SOL_SOCKET: 5099 switch (option_name) { /* Check length param */ 5100 case SO_DEBUG: 5101 case SO_REUSEADDR: 5102 case SO_DONTROUTE: 5103 case SO_BROADCAST: 5104 case SO_USELOOPBACK: 5105 case SO_OOBINLINE: 5106 case SO_DGRAM_ERRIND: 5107 if (optlen != (t_uscalar_t)sizeof (int32_t)) { 5108 error = EINVAL; 5109 eprintsoline(so, error); 5110 mutex_enter(&so->so_lock); 5111 goto done2; 5112 } 5113 ASSERT(optval); 5114 onoff = intvalue != 0; 5115 handled = B_TRUE; 5116 break; 5117 case SO_LINGER: 5118 if (optlen != 5119 (t_uscalar_t)sizeof (struct linger)) { 5120 error = EINVAL; 5121 eprintsoline(so, error); 5122 mutex_enter(&so->so_lock); 5123 goto done2; 5124 } 5125 ASSERT(optval); 5126 handled = B_TRUE; 5127 break; 5128 } 5129 5130 switch (option_name) { /* Do actions */ 5131 case SO_LINGER: { 5132 struct linger *lgr = (struct linger *)optval; 5133 5134 if (lgr->l_onoff) { 5135 tcp->tcp_linger = 1; 5136 tcp->tcp_lingertime = lgr->l_linger; 5137 so->so_linger.l_onoff = SO_LINGER; 5138 so->so_options |= SO_LINGER; 5139 } else { 5140 tcp->tcp_linger = 0; 5141 tcp->tcp_lingertime = 0; 5142 so->so_linger.l_onoff = 0; 5143 so->so_options &= ~SO_LINGER; 5144 } 5145 so->so_linger.l_linger = lgr->l_linger; 5146 handled = B_TRUE; 5147 break; 5148 } 5149 case SO_DEBUG: 5150 tcp->tcp_debug = onoff; 5151 #ifdef SOCK_TEST 5152 if (intvalue & 2) 5153 sock_test_timelimit = 10 * hz; 5154 else 5155 sock_test_timelimit = 0; 5156 5157 if (intvalue & 4) 5158 do_useracc = 0; 5159 else 5160 do_useracc = 1; 5161 #endif /* SOCK_TEST */ 5162 break; 5163 case SO_DONTROUTE: 5164 /* 5165 * SO_DONTROUTE, SO_USELOOPBACK and 5166 * SO_BROADCAST are only of interest to IP. 5167 * We track them here only so 5168 * that we can report their current value. 5169 */ 5170 tcp->tcp_dontroute = onoff; 5171 if (onoff) 5172 so->so_options |= option_name; 5173 else 5174 so->so_options &= ~option_name; 5175 break; 5176 case SO_USELOOPBACK: 5177 tcp->tcp_useloopback = onoff; 5178 if (onoff) 5179 so->so_options |= option_name; 5180 else 5181 so->so_options &= ~option_name; 5182 break; 5183 case SO_BROADCAST: 5184 tcp->tcp_broadcast = onoff; 5185 if (onoff) 5186 so->so_options |= option_name; 5187 else 5188 so->so_options &= ~option_name; 5189 break; 5190 case SO_REUSEADDR: 5191 tcp->tcp_reuseaddr = onoff; 5192 if (onoff) 5193 so->so_options |= option_name; 5194 else 5195 so->so_options &= ~option_name; 5196 break; 5197 case SO_OOBINLINE: 5198 tcp->tcp_oobinline = onoff; 5199 if (onoff) 5200 so->so_options |= option_name; 5201 else 5202 so->so_options &= ~option_name; 5203 break; 5204 case SO_DGRAM_ERRIND: 5205 tcp->tcp_dgram_errind = onoff; 5206 if (onoff) 5207 so->so_options |= option_name; 5208 else 5209 so->so_options &= ~option_name; 5210 break; 5211 } 5212 break; 5213 case IPPROTO_TCP: 5214 switch (option_name) { 5215 case TCP_NODELAY: 5216 if (optlen != (t_uscalar_t)sizeof (int32_t)) { 5217 error = EINVAL; 5218 eprintsoline(so, error); 5219 mutex_enter(&so->so_lock); 5220 goto done2; 5221 } 5222 ASSERT(optval); 5223 tcp->tcp_naglim = intvalue ? 1 : tcp->tcp_mss; 5224 handled = B_TRUE; 5225 break; 5226 } 5227 break; 5228 default: 5229 handled = B_FALSE; 5230 break; 5231 } 5232 } 5233 5234 if (handled) { 5235 mutex_enter(&so->so_lock); 5236 goto done2; 5237 } 5238 5239 optmgmt_req.PRIM_type = T_SVR4_OPTMGMT_REQ; 5240 optmgmt_req.MGMT_flags = T_NEGOTIATE; 5241 optmgmt_req.OPT_length = (t_scalar_t)sizeof (oh) + optlen; 5242 optmgmt_req.OPT_offset = (t_scalar_t)sizeof (optmgmt_req); 5243 5244 oh.level = level; 5245 oh.name = option_name; 5246 oh.len = optlen; 5247 5248 mp = soallocproto3(&optmgmt_req, sizeof (optmgmt_req), 5249 &oh, sizeof (oh), optval, optlen, 0, _ALLOC_SLEEP); 5250 /* Let option management work in the presence of data flow control */ 5251 error = kstrputmsg(SOTOV(so), mp, NULL, 0, 0, 5252 MSG_BAND|MSG_HOLDSIG|MSG_IGNERROR|MSG_IGNFLOW, 0); 5253 mp = NULL; 5254 mutex_enter(&so->so_lock); 5255 if (error) { 5256 eprintsoline(so, error); 5257 goto done; 5258 } 5259 error = sowaitprim(so, T_SVR4_OPTMGMT_REQ, T_OPTMGMT_ACK, 5260 (t_uscalar_t)sizeof (struct T_optmgmt_ack), &mp, 0); 5261 if (error) { 5262 eprintsoline(so, error); 5263 goto done; 5264 } 5265 ASSERT(mp); 5266 /* No need to verify T_optmgmt_ack */ 5267 freemsg(mp); 5268 done: 5269 /* 5270 * Check for SOL_SOCKET options and record their values. 5271 * If we know about a SOL_SOCKET parameter and the transport 5272 * failed it with TBADOPT or TOUTSTATE (i.e. ENOPROTOOPT or 5273 * EPROTO) we let the setsockopt succeed. 5274 */ 5275 if (level == SOL_SOCKET) { 5276 /* Check parameters */ 5277 switch (option_name) { 5278 case SO_DEBUG: 5279 case SO_REUSEADDR: 5280 case SO_KEEPALIVE: 5281 case SO_DONTROUTE: 5282 case SO_BROADCAST: 5283 case SO_USELOOPBACK: 5284 case SO_OOBINLINE: 5285 case SO_SNDBUF: 5286 case SO_RCVBUF: 5287 #ifdef notyet 5288 case SO_SNDLOWAT: 5289 case SO_RCVLOWAT: 5290 case SO_SNDTIMEO: 5291 case SO_RCVTIMEO: 5292 #endif /* notyet */ 5293 case SO_DGRAM_ERRIND: 5294 if (optlen != (t_uscalar_t)sizeof (int32_t)) { 5295 error = EINVAL; 5296 eprintsoline(so, error); 5297 goto done2; 5298 } 5299 ASSERT(optval); 5300 handled = B_TRUE; 5301 break; 5302 case SO_LINGER: 5303 if (optlen != (t_uscalar_t)sizeof (struct linger)) { 5304 error = EINVAL; 5305 eprintsoline(so, error); 5306 goto done2; 5307 } 5308 ASSERT(optval); 5309 handled = B_TRUE; 5310 break; 5311 } 5312 5313 #define intvalue (*(int32_t *)optval) 5314 5315 switch (option_name) { 5316 case SO_TYPE: 5317 case SO_ERROR: 5318 case SO_ACCEPTCONN: 5319 /* Can't be set */ 5320 error = ENOPROTOOPT; 5321 goto done2; 5322 case SO_LINGER: { 5323 struct linger *l = (struct linger *)optval; 5324 5325 so->so_linger.l_linger = l->l_linger; 5326 if (l->l_onoff) { 5327 so->so_linger.l_onoff = SO_LINGER; 5328 so->so_options |= SO_LINGER; 5329 } else { 5330 so->so_linger.l_onoff = 0; 5331 so->so_options &= ~SO_LINGER; 5332 } 5333 break; 5334 } 5335 5336 case SO_DEBUG: 5337 #ifdef SOCK_TEST 5338 if (intvalue & 2) 5339 sock_test_timelimit = 10 * hz; 5340 else 5341 sock_test_timelimit = 0; 5342 5343 if (intvalue & 4) 5344 do_useracc = 0; 5345 else 5346 do_useracc = 1; 5347 #endif /* SOCK_TEST */ 5348 /* FALLTHRU */ 5349 case SO_REUSEADDR: 5350 case SO_KEEPALIVE: 5351 case SO_DONTROUTE: 5352 case SO_BROADCAST: 5353 case SO_USELOOPBACK: 5354 case SO_OOBINLINE: 5355 case SO_DGRAM_ERRIND: 5356 if (intvalue != 0) { 5357 dprintso(so, 1, 5358 ("sotpi_setsockopt: setting 0x%x\n", 5359 option_name)); 5360 so->so_options |= option_name; 5361 } else { 5362 dprintso(so, 1, 5363 ("sotpi_setsockopt: clearing 0x%x\n", 5364 option_name)); 5365 so->so_options &= ~option_name; 5366 } 5367 break; 5368 /* 5369 * The following options are only returned by us when the 5370 * T_SVR4_OPTMGMT_REQ fails. 5371 * XXX XPG 4.2 applications retrieve SO_RCVBUF from sockfs 5372 * since the transport might adjust the value and not 5373 * return exactly what was set by the application. 5374 */ 5375 case SO_SNDBUF: 5376 so->so_sndbuf = intvalue; 5377 break; 5378 case SO_RCVBUF: 5379 so->so_rcvbuf = intvalue; 5380 break; 5381 #ifdef notyet 5382 /* 5383 * We do not implement the semantics of these options 5384 * thus we shouldn't implement the options either. 5385 */ 5386 case SO_SNDLOWAT: 5387 so->so_sndlowat = intvalue; 5388 break; 5389 case SO_RCVLOWAT: 5390 so->so_rcvlowat = intvalue; 5391 break; 5392 case SO_SNDTIMEO: 5393 so->so_sndtimeo = intvalue; 5394 break; 5395 case SO_RCVTIMEO: 5396 so->so_rcvtimeo = intvalue; 5397 break; 5398 #endif /* notyet */ 5399 } 5400 #undef intvalue 5401 5402 if (error) { 5403 if ((error == ENOPROTOOPT || error == EPROTO || 5404 error == EINVAL) && handled) { 5405 dprintso(so, 1, 5406 ("setsockopt: ignoring error %d for 0x%x\n", 5407 error, option_name)); 5408 error = 0; 5409 } 5410 } 5411 } 5412 done2: 5413 ret: 5414 so_unlock_single(so, SOLOCKED); 5415 mutex_exit(&so->so_lock); 5416 return (error); 5417 } 5418