1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1995, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 25 * Copyright 2014 Nexenta Systems, Inc. All rights reserved. 26 */ 27 28 #include <sys/types.h> 29 #include <sys/t_lock.h> 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/buf.h> 33 #include <sys/conf.h> 34 #include <sys/cred.h> 35 #include <sys/kmem.h> 36 #include <sys/kmem_impl.h> 37 #include <sys/sysmacros.h> 38 #include <sys/vfs.h> 39 #include <sys/vnode.h> 40 #include <sys/debug.h> 41 #include <sys/errno.h> 42 #include <sys/time.h> 43 #include <sys/file.h> 44 #include <sys/open.h> 45 #include <sys/user.h> 46 #include <sys/termios.h> 47 #include <sys/stream.h> 48 #include <sys/strsubr.h> 49 #include <sys/strsun.h> 50 #include <sys/suntpi.h> 51 #include <sys/ddi.h> 52 #include <sys/esunddi.h> 53 #include <sys/flock.h> 54 #include <sys/modctl.h> 55 #include <sys/vtrace.h> 56 #include <sys/cmn_err.h> 57 #include <sys/pathname.h> 58 59 #include <sys/socket.h> 60 #include <sys/socketvar.h> 61 #include <sys/sockio.h> 62 #include <netinet/in.h> 63 #include <sys/un.h> 64 #include <sys/strsun.h> 65 66 #include <sys/tiuser.h> 67 #define _SUN_TPI_VERSION 2 68 #include <sys/tihdr.h> 69 #include <sys/timod.h> /* TI_GETMYNAME, TI_GETPEERNAME */ 70 71 #include <c2/audit.h> 72 73 #include <inet/common.h> 74 #include <inet/ip.h> 75 #include <inet/ip6.h> 76 #include <inet/tcp.h> 77 #include <inet/udp_impl.h> 78 79 #include <sys/zone.h> 80 81 #include <fs/sockfs/nl7c.h> 82 #include <fs/sockfs/nl7curi.h> 83 84 #include <fs/sockfs/sockcommon.h> 85 #include <fs/sockfs/socktpi.h> 86 #include <fs/sockfs/socktpi_impl.h> 87 88 /* 89 * Possible failures when memory can't be allocated. The documented behavior: 90 * 91 * 5.5: 4.X: XNET: 92 * accept: ENOMEM/ENOSR/EINTR - (EINTR) ENOMEM/ENOBUFS/ENOSR/ 93 * EINTR 94 * (4.X does not document EINTR but returns it) 95 * bind: ENOSR - ENOBUFS/ENOSR 96 * connect: EINTR EINTR ENOBUFS/ENOSR/EINTR 97 * getpeername: ENOMEM/ENOSR ENOBUFS (-) ENOBUFS/ENOSR 98 * getsockname: ENOMEM/ENOSR ENOBUFS (-) ENOBUFS/ENOSR 99 * (4.X getpeername and getsockname do not fail in practice) 100 * getsockopt: ENOMEM/ENOSR - ENOBUFS/ENOSR 101 * listen: - - ENOBUFS 102 * recv: ENOMEM/ENOSR/EINTR EINTR ENOBUFS/ENOMEM/ENOSR/ 103 * EINTR 104 * send: ENOMEM/ENOSR/EINTR ENOBUFS/EINTR ENOBUFS/ENOMEM/ENOSR/ 105 * EINTR 106 * setsockopt: ENOMEM/ENOSR - ENOBUFS/ENOMEM/ENOSR 107 * shutdown: ENOMEM/ENOSR - ENOBUFS/ENOSR 108 * socket: ENOMEM/ENOSR ENOBUFS ENOBUFS/ENOMEM/ENOSR 109 * socketpair: ENOMEM/ENOSR - ENOBUFS/ENOMEM/ENOSR 110 * 111 * Resolution. When allocation fails: 112 * recv: return EINTR 113 * send: return EINTR 114 * connect, accept: EINTR 115 * bind, listen, shutdown (unbind, unix_close, disconnect): sleep 116 * socket, socketpair: ENOBUFS 117 * getpeername, getsockname: sleep 118 * getsockopt, setsockopt: sleep 119 */ 120 121 #ifdef SOCK_TEST 122 /* 123 * Variables that make sockfs do something other than the standard TPI 124 * for the AF_INET transports. 125 * 126 * solisten_tpi_tcp: 127 * TCP can handle a O_T_BIND_REQ with an increased backlog even though 128 * the transport is already bound. This is needed to avoid loosing the 129 * port number should listen() do a T_UNBIND_REQ followed by a 130 * O_T_BIND_REQ. 131 * 132 * soconnect_tpi_udp: 133 * UDP and ICMP can handle a T_CONN_REQ. 134 * This is needed to make the sequence of connect(), getsockname() 135 * return the local IP address used to send packets to the connected to 136 * destination. 137 * 138 * soconnect_tpi_tcp: 139 * TCP can handle a T_CONN_REQ without seeing a O_T_BIND_REQ. 140 * Set this to non-zero to send TPI conformant messages to TCP in this 141 * respect. This is a performance optimization. 142 * 143 * soaccept_tpi_tcp: 144 * TCP can handle a T_CONN_REQ without the acceptor being bound. 145 * This is a performance optimization that has been picked up in XTI. 146 * 147 * soaccept_tpi_multioptions: 148 * When inheriting SOL_SOCKET options from the listener to the accepting 149 * socket send them as a single message for AF_INET{,6}. 150 */ 151 int solisten_tpi_tcp = 0; 152 int soconnect_tpi_udp = 0; 153 int soconnect_tpi_tcp = 0; 154 int soaccept_tpi_tcp = 0; 155 int soaccept_tpi_multioptions = 1; 156 #else /* SOCK_TEST */ 157 #define soconnect_tpi_tcp 0 158 #define soconnect_tpi_udp 0 159 #define solisten_tpi_tcp 0 160 #define soaccept_tpi_tcp 0 161 #define soaccept_tpi_multioptions 1 162 #endif /* SOCK_TEST */ 163 164 #ifdef SOCK_TEST 165 extern int do_useracc; 166 extern clock_t sock_test_timelimit; 167 #endif /* SOCK_TEST */ 168 169 /* 170 * Some X/Open added checks might have to be backed out to keep SunOS 4.X 171 * applications working. Turn on this flag to disable these checks. 172 */ 173 int xnet_skip_checks = 0; 174 int xnet_check_print = 0; 175 int xnet_truncate_print = 0; 176 177 static void sotpi_destroy(struct sonode *); 178 static struct sonode *sotpi_create(struct sockparams *, int, int, int, int, 179 int, int *, cred_t *cr); 180 181 static boolean_t sotpi_info_create(struct sonode *, int); 182 static void sotpi_info_init(struct sonode *); 183 static void sotpi_info_fini(struct sonode *); 184 static void sotpi_info_destroy(struct sonode *); 185 186 /* 187 * Do direct function call to the transport layer below; this would 188 * also allow the transport to utilize read-side synchronous stream 189 * interface if necessary. This is a /etc/system tunable that must 190 * not be modified on a running system. By default this is enabled 191 * for performance reasons and may be disabled for debugging purposes. 192 */ 193 boolean_t socktpi_direct = B_TRUE; 194 195 static struct kmem_cache *socktpi_cache, *socktpi_unix_cache; 196 197 extern void sigintr(k_sigset_t *, int); 198 extern void sigunintr(k_sigset_t *); 199 200 static int sotpi_unbind(struct sonode *, int); 201 202 /* TPI sockfs sonode operations */ 203 int sotpi_init(struct sonode *, struct sonode *, struct cred *, 204 int); 205 static int sotpi_accept(struct sonode *, int, struct cred *, 206 struct sonode **); 207 static int sotpi_bind(struct sonode *, struct sockaddr *, socklen_t, 208 int, struct cred *); 209 static int sotpi_listen(struct sonode *, int, struct cred *); 210 static int sotpi_connect(struct sonode *, struct sockaddr *, 211 socklen_t, int, int, struct cred *); 212 extern int sotpi_recvmsg(struct sonode *, struct nmsghdr *, 213 struct uio *, struct cred *); 214 static int sotpi_sendmsg(struct sonode *, struct nmsghdr *, 215 struct uio *, struct cred *); 216 static int sotpi_sendmblk(struct sonode *, struct nmsghdr *, int, 217 struct cred *, mblk_t **); 218 static int sosend_dgramcmsg(struct sonode *, struct sockaddr *, socklen_t, 219 struct uio *, void *, t_uscalar_t, int); 220 static int sodgram_direct(struct sonode *, struct sockaddr *, 221 socklen_t, struct uio *, int); 222 extern int sotpi_getpeername(struct sonode *, struct sockaddr *, 223 socklen_t *, boolean_t, struct cred *); 224 static int sotpi_getsockname(struct sonode *, struct sockaddr *, 225 socklen_t *, struct cred *); 226 static int sotpi_shutdown(struct sonode *, int, struct cred *); 227 extern int sotpi_getsockopt(struct sonode *, int, int, void *, 228 socklen_t *, int, struct cred *); 229 extern int sotpi_setsockopt(struct sonode *, int, int, const void *, 230 socklen_t, struct cred *); 231 static int sotpi_ioctl(struct sonode *, int, intptr_t, int, struct cred *, 232 int32_t *); 233 static int socktpi_plumbioctl(struct vnode *, int, intptr_t, int, 234 struct cred *, int32_t *); 235 static int sotpi_poll(struct sonode *, short, int, short *, 236 struct pollhead **); 237 static int sotpi_close(struct sonode *, int, struct cred *); 238 239 static int i_sotpi_info_constructor(sotpi_info_t *); 240 static void i_sotpi_info_destructor(sotpi_info_t *); 241 242 sonodeops_t sotpi_sonodeops = { 243 sotpi_init, /* sop_init */ 244 sotpi_accept, /* sop_accept */ 245 sotpi_bind, /* sop_bind */ 246 sotpi_listen, /* sop_listen */ 247 sotpi_connect, /* sop_connect */ 248 sotpi_recvmsg, /* sop_recvmsg */ 249 sotpi_sendmsg, /* sop_sendmsg */ 250 sotpi_sendmblk, /* sop_sendmblk */ 251 sotpi_getpeername, /* sop_getpeername */ 252 sotpi_getsockname, /* sop_getsockname */ 253 sotpi_shutdown, /* sop_shutdown */ 254 sotpi_getsockopt, /* sop_getsockopt */ 255 sotpi_setsockopt, /* sop_setsockopt */ 256 sotpi_ioctl, /* sop_ioctl */ 257 sotpi_poll, /* sop_poll */ 258 sotpi_close, /* sop_close */ 259 }; 260 261 /* 262 * Return a TPI socket vnode. 263 * 264 * Note that sockets assume that the driver will clone (either itself 265 * or by using the clone driver) i.e. a socket() call will always 266 * result in a new vnode being created. 267 */ 268 269 /* 270 * Common create code for socket and accept. If tso is set the values 271 * from that node is used instead of issuing a T_INFO_REQ. 272 */ 273 274 /* ARGSUSED */ 275 static struct sonode * 276 sotpi_create(struct sockparams *sp, int family, int type, int protocol, 277 int version, int sflags, int *errorp, cred_t *cr) 278 { 279 struct sonode *so; 280 kmem_cache_t *cp; 281 int sfamily = family; 282 283 ASSERT(sp->sp_sdev_info.sd_vnode != NULL); 284 285 if (family == AF_NCA) { 286 /* 287 * The request is for an NCA socket so for NL7C use the 288 * INET domain instead and mark NL7C_AF_NCA below. 289 */ 290 family = AF_INET; 291 /* 292 * NL7C is not supported in the non-global zone, 293 * we enforce this restriction here. 294 */ 295 if (getzoneid() != GLOBAL_ZONEID) { 296 *errorp = ENOTSUP; 297 return (NULL); 298 } 299 } 300 301 /* 302 * to be compatible with old tpi socket implementation ignore 303 * sleep flag (sflags) passed in 304 */ 305 cp = (family == AF_UNIX) ? socktpi_unix_cache : socktpi_cache; 306 so = kmem_cache_alloc(cp, KM_SLEEP); 307 if (so == NULL) { 308 *errorp = ENOMEM; 309 return (NULL); 310 } 311 312 sonode_init(so, sp, family, type, protocol, &sotpi_sonodeops); 313 sotpi_info_init(so); 314 315 if (sfamily == AF_NCA) { 316 SOTOTPI(so)->sti_nl7c_flags = NL7C_AF_NCA; 317 } 318 319 if (version == SOV_DEFAULT) 320 version = so_default_version; 321 322 so->so_version = (short)version; 323 *errorp = 0; 324 325 return (so); 326 } 327 328 static void 329 sotpi_destroy(struct sonode *so) 330 { 331 kmem_cache_t *cp; 332 struct sockparams *origsp; 333 334 /* 335 * If there is a new dealloc function (ie. smod_destroy_func), 336 * then it should check the correctness of the ops. 337 */ 338 339 ASSERT(so->so_ops == &sotpi_sonodeops); 340 341 origsp = SOTOTPI(so)->sti_orig_sp; 342 343 sotpi_info_fini(so); 344 345 if (so->so_state & SS_FALLBACK_COMP) { 346 /* 347 * A fallback happend, which means that a sotpi_info_t struct 348 * was allocated (as opposed to being allocated from the TPI 349 * sonode cache. Therefore we explicitly free the struct 350 * here. 351 */ 352 sotpi_info_destroy(so); 353 ASSERT(origsp != NULL); 354 355 origsp->sp_smod_info->smod_sock_destroy_func(so); 356 SOCKPARAMS_DEC_REF(origsp); 357 } else { 358 sonode_fini(so); 359 cp = (so->so_family == AF_UNIX) ? socktpi_unix_cache : 360 socktpi_cache; 361 kmem_cache_free(cp, so); 362 } 363 } 364 365 /* ARGSUSED1 */ 366 int 367 sotpi_init(struct sonode *so, struct sonode *tso, struct cred *cr, int flags) 368 { 369 major_t maj; 370 dev_t newdev; 371 struct vnode *vp; 372 int error = 0; 373 struct stdata *stp; 374 375 sotpi_info_t *sti = SOTOTPI(so); 376 377 dprint(1, ("sotpi_init()\n")); 378 379 /* 380 * over write the sleep flag passed in but that is ok 381 * as tpi socket does not honor sleep flag. 382 */ 383 flags |= FREAD|FWRITE; 384 385 /* 386 * Record in so_flag that it is a clone. 387 */ 388 if (getmajor(sti->sti_dev) == clone_major) 389 so->so_flag |= SOCLONE; 390 391 if ((so->so_type == SOCK_STREAM || so->so_type == SOCK_DGRAM) && 392 (so->so_family == AF_INET || so->so_family == AF_INET6) && 393 (so->so_protocol == IPPROTO_TCP || so->so_protocol == IPPROTO_UDP || 394 so->so_protocol == IPPROTO_IP)) { 395 /* Tell tcp or udp that it's talking to sockets */ 396 flags |= SO_SOCKSTR; 397 398 /* 399 * Here we indicate to socktpi_open() our attempt to 400 * make direct calls between sockfs and transport. 401 * The final decision is left to socktpi_open(). 402 */ 403 sti->sti_direct = 1; 404 405 ASSERT(so->so_type != SOCK_DGRAM || tso == NULL); 406 if (so->so_type == SOCK_STREAM && tso != NULL) { 407 if (SOTOTPI(tso)->sti_direct) { 408 /* 409 * Inherit sti_direct from listener and pass 410 * SO_ACCEPTOR open flag to tcp, indicating 411 * that this is an accept fast-path instance. 412 */ 413 flags |= SO_ACCEPTOR; 414 } else { 415 /* 416 * sti_direct is not set on listener, meaning 417 * that the listener has been converted from 418 * a socket to a stream. Ensure that the 419 * acceptor inherits these settings. 420 */ 421 sti->sti_direct = 0; 422 flags &= ~SO_SOCKSTR; 423 } 424 } 425 } 426 427 /* 428 * Tell local transport that it is talking to sockets. 429 */ 430 if (so->so_family == AF_UNIX) { 431 flags |= SO_SOCKSTR; 432 } 433 434 vp = SOTOV(so); 435 newdev = vp->v_rdev; 436 maj = getmajor(newdev); 437 ASSERT(STREAMSTAB(maj)); 438 439 error = stropen(vp, &newdev, flags, cr); 440 441 stp = vp->v_stream; 442 if (error == 0) { 443 if (so->so_flag & SOCLONE) 444 ASSERT(newdev != vp->v_rdev); 445 mutex_enter(&so->so_lock); 446 sti->sti_dev = newdev; 447 vp->v_rdev = newdev; 448 mutex_exit(&so->so_lock); 449 450 if (stp->sd_flag & STRISTTY) { 451 /* 452 * this is a post SVR4 tty driver - a socket can not 453 * be a controlling terminal. Fail the open. 454 */ 455 (void) sotpi_close(so, flags, cr); 456 return (ENOTTY); /* XXX */ 457 } 458 459 ASSERT(stp->sd_wrq != NULL); 460 sti->sti_provinfo = tpi_findprov(stp->sd_wrq); 461 462 /* 463 * If caller is interested in doing direct function call 464 * interface to/from transport module, probe the module 465 * directly beneath the streamhead to see if it qualifies. 466 * 467 * We turn off the direct interface when qualifications fail. 468 * In the acceptor case, we simply turn off the sti_direct 469 * flag on the socket. We do the fallback after the accept 470 * has completed, before the new socket is returned to the 471 * application. 472 */ 473 if (sti->sti_direct) { 474 queue_t *tq = stp->sd_wrq->q_next; 475 476 /* 477 * sti_direct is currently supported and tested 478 * only for tcp/udp; this is the main reason to 479 * have the following assertions. 480 */ 481 ASSERT(so->so_family == AF_INET || 482 so->so_family == AF_INET6); 483 ASSERT(so->so_protocol == IPPROTO_UDP || 484 so->so_protocol == IPPROTO_TCP || 485 so->so_protocol == IPPROTO_IP); 486 ASSERT(so->so_type == SOCK_DGRAM || 487 so->so_type == SOCK_STREAM); 488 489 /* 490 * Abort direct call interface if the module directly 491 * underneath the stream head is not defined with the 492 * _D_DIRECT flag. This could happen in the tcp or 493 * udp case, when some other module is autopushed 494 * above it, or for some reasons the expected module 495 * isn't purely D_MP (which is the main requirement). 496 */ 497 if (!socktpi_direct || !(tq->q_flag & _QDIRECT) || 498 !(_OTHERQ(tq)->q_flag & _QDIRECT)) { 499 int rval; 500 501 /* Continue on without direct calls */ 502 sti->sti_direct = 0; 503 504 /* 505 * Cannot issue ioctl on fallback socket since 506 * there is no conn associated with the queue. 507 * The fallback downcall will notify the proto 508 * of the change. 509 */ 510 if (!(flags & SO_ACCEPTOR) && 511 !(flags & SO_FALLBACK)) { 512 if ((error = strioctl(vp, 513 _SIOCSOCKFALLBACK, 0, 0, K_TO_K, 514 cr, &rval)) != 0) { 515 (void) sotpi_close(so, flags, 516 cr); 517 return (error); 518 } 519 } 520 } 521 } 522 523 if (flags & SO_FALLBACK) { 524 /* 525 * The stream created does not have a conn. 526 * do stream set up after conn has been assigned 527 */ 528 return (error); 529 } 530 if (error = so_strinit(so, tso)) { 531 (void) sotpi_close(so, flags, cr); 532 return (error); 533 } 534 535 /* Wildcard */ 536 if (so->so_protocol != so->so_sockparams->sp_protocol) { 537 int protocol = so->so_protocol; 538 /* 539 * Issue SO_PROTOTYPE setsockopt. 540 */ 541 error = sotpi_setsockopt(so, SOL_SOCKET, SO_PROTOTYPE, 542 &protocol, (t_uscalar_t)sizeof (protocol), cr); 543 if (error != 0) { 544 (void) sotpi_close(so, flags, cr); 545 /* 546 * Setsockopt often fails with ENOPROTOOPT but 547 * socket() should fail with 548 * EPROTONOSUPPORT/EPROTOTYPE. 549 */ 550 return (EPROTONOSUPPORT); 551 } 552 } 553 554 } else { 555 /* 556 * While the same socket can not be reopened (unlike specfs) 557 * the stream head sets STREOPENFAIL when the autopush fails. 558 */ 559 if ((stp != NULL) && 560 (stp->sd_flag & STREOPENFAIL)) { 561 /* 562 * Open failed part way through. 563 */ 564 mutex_enter(&stp->sd_lock); 565 stp->sd_flag &= ~STREOPENFAIL; 566 mutex_exit(&stp->sd_lock); 567 (void) sotpi_close(so, flags, cr); 568 return (error); 569 /*NOTREACHED*/ 570 } 571 ASSERT(stp == NULL); 572 } 573 TRACE_4(TR_FAC_SOCKFS, TR_SOCKFS_OPEN, 574 "sockfs open:maj %d vp %p so %p error %d", 575 maj, vp, so, error); 576 return (error); 577 } 578 579 /* 580 * Bind the socket to an unspecified address in sockfs only. 581 * Used for TCP/UDP transports where we know that the O_T_BIND_REQ isn't 582 * required in all cases. 583 */ 584 static void 585 so_automatic_bind(struct sonode *so) 586 { 587 sotpi_info_t *sti = SOTOTPI(so); 588 ASSERT(so->so_family == AF_INET || so->so_family == AF_INET6); 589 590 ASSERT(MUTEX_HELD(&so->so_lock)); 591 ASSERT(!(so->so_state & SS_ISBOUND)); 592 ASSERT(sti->sti_unbind_mp); 593 594 ASSERT(sti->sti_laddr_len <= sti->sti_laddr_maxlen); 595 bzero(sti->sti_laddr_sa, sti->sti_laddr_len); 596 sti->sti_laddr_sa->sa_family = so->so_family; 597 so->so_state |= SS_ISBOUND; 598 } 599 600 601 /* 602 * bind the socket. 603 * 604 * If the socket is already bound and none of _SOBIND_SOCKBSD or _SOBIND_XPG4_2 605 * are passed in we allow rebinding. Note that for backwards compatibility 606 * even "svr4" sockets pass in _SOBIND_SOCKBSD/SOV_SOCKBSD to sobind/bind. 607 * Thus the rebinding code is currently not executed. 608 * 609 * The constraints for rebinding are: 610 * - it is a SOCK_DGRAM, or 611 * - it is a SOCK_STREAM/SOCK_SEQPACKET that has not been connected 612 * and no listen() has been done. 613 * This rebinding code was added based on some language in the XNET book 614 * about not returning EINVAL it the protocol allows rebinding. However, 615 * this language is not present in the Posix socket draft. Thus maybe the 616 * rebinding logic should be deleted from the source. 617 * 618 * A null "name" can be used to unbind the socket if: 619 * - it is a SOCK_DGRAM, or 620 * - it is a SOCK_STREAM/SOCK_SEQPACKET that has not been connected 621 * and no listen() has been done. 622 */ 623 /* ARGSUSED */ 624 static int 625 sotpi_bindlisten(struct sonode *so, struct sockaddr *name, 626 socklen_t namelen, int backlog, int flags, struct cred *cr) 627 { 628 struct T_bind_req bind_req; 629 struct T_bind_ack *bind_ack; 630 int error = 0; 631 mblk_t *mp; 632 void *addr; 633 t_uscalar_t addrlen; 634 int unbind_on_err = 1; 635 boolean_t clear_acceptconn_on_err = B_FALSE; 636 boolean_t restore_backlog_on_err = B_FALSE; 637 int save_so_backlog; 638 t_scalar_t PRIM_type = O_T_BIND_REQ; 639 boolean_t tcp_udp_xport; 640 void *nl7c = NULL; 641 sotpi_info_t *sti = SOTOTPI(so); 642 643 dprintso(so, 1, ("sotpi_bindlisten(%p, %p, %d, %d, 0x%x) %s\n", 644 (void *)so, (void *)name, namelen, backlog, flags, 645 pr_state(so->so_state, so->so_mode))); 646 647 tcp_udp_xport = so->so_type == SOCK_STREAM || so->so_type == SOCK_DGRAM; 648 649 if (!(flags & _SOBIND_LOCK_HELD)) { 650 mutex_enter(&so->so_lock); 651 so_lock_single(so); /* Set SOLOCKED */ 652 } else { 653 ASSERT(MUTEX_HELD(&so->so_lock)); 654 ASSERT(so->so_flag & SOLOCKED); 655 } 656 657 /* 658 * Make sure that there is a preallocated unbind_req message 659 * before binding. This message allocated when the socket is 660 * created but it might be have been consumed. 661 */ 662 if (sti->sti_unbind_mp == NULL) { 663 dprintso(so, 1, ("sobind: allocating unbind_req\n")); 664 /* NOTE: holding so_lock while sleeping */ 665 sti->sti_unbind_mp = 666 soallocproto(sizeof (struct T_unbind_req), _ALLOC_SLEEP, 667 cr); 668 } 669 670 if (flags & _SOBIND_REBIND) { 671 /* 672 * Called from solisten after doing an sotpi_unbind() or 673 * potentially without the unbind (latter for AF_INET{,6}). 674 */ 675 ASSERT(name == NULL && namelen == 0); 676 677 if (so->so_family == AF_UNIX) { 678 ASSERT(sti->sti_ux_bound_vp); 679 addr = &sti->sti_ux_laddr; 680 addrlen = (t_uscalar_t)sizeof (sti->sti_ux_laddr); 681 dprintso(so, 1, ("sobind rebind UNIX: addrlen %d, " 682 "addr 0x%p, vp %p\n", 683 addrlen, 684 (void *)((struct so_ux_addr *)addr)->soua_vp, 685 (void *)sti->sti_ux_bound_vp)); 686 } else { 687 addr = sti->sti_laddr_sa; 688 addrlen = (t_uscalar_t)sti->sti_laddr_len; 689 } 690 } else if (flags & _SOBIND_UNSPEC) { 691 ASSERT(name == NULL && namelen == 0); 692 693 /* 694 * The caller checked SS_ISBOUND but not necessarily 695 * under so_lock 696 */ 697 if (so->so_state & SS_ISBOUND) { 698 /* No error */ 699 goto done; 700 } 701 702 /* Set an initial local address */ 703 switch (so->so_family) { 704 case AF_UNIX: 705 /* 706 * Use an address with same size as struct sockaddr 707 * just like BSD. 708 */ 709 sti->sti_laddr_len = 710 (socklen_t)sizeof (struct sockaddr); 711 ASSERT(sti->sti_laddr_len <= sti->sti_laddr_maxlen); 712 bzero(sti->sti_laddr_sa, sti->sti_laddr_len); 713 sti->sti_laddr_sa->sa_family = so->so_family; 714 715 /* 716 * Pass down an address with the implicit bind 717 * magic number and the rest all zeros. 718 * The transport will return a unique address. 719 */ 720 sti->sti_ux_laddr.soua_vp = NULL; 721 sti->sti_ux_laddr.soua_magic = SOU_MAGIC_IMPLICIT; 722 addr = &sti->sti_ux_laddr; 723 addrlen = (t_uscalar_t)sizeof (sti->sti_ux_laddr); 724 break; 725 726 case AF_INET: 727 case AF_INET6: 728 /* 729 * An unspecified bind in TPI has a NULL address. 730 * Set the address in sockfs to have the sa_family. 731 */ 732 sti->sti_laddr_len = (so->so_family == AF_INET) ? 733 (socklen_t)sizeof (sin_t) : 734 (socklen_t)sizeof (sin6_t); 735 ASSERT(sti->sti_laddr_len <= sti->sti_laddr_maxlen); 736 bzero(sti->sti_laddr_sa, sti->sti_laddr_len); 737 sti->sti_laddr_sa->sa_family = so->so_family; 738 addr = NULL; 739 addrlen = 0; 740 break; 741 742 default: 743 /* 744 * An unspecified bind in TPI has a NULL address. 745 * Set the address in sockfs to be zero length. 746 * 747 * Can not assume there is a sa_family for all 748 * protocol families. For example, AF_X25 does not 749 * have a family field. 750 */ 751 bzero(sti->sti_laddr_sa, sti->sti_laddr_len); 752 sti->sti_laddr_len = 0; /* XXX correct? */ 753 addr = NULL; 754 addrlen = 0; 755 break; 756 } 757 758 } else { 759 if (so->so_state & SS_ISBOUND) { 760 /* 761 * If it is ok to rebind the socket, first unbind 762 * with the transport. A rebind to the NULL address 763 * is interpreted as an unbind. 764 * Note that a bind to NULL in BSD does unbind the 765 * socket but it fails with EINVAL. 766 * Note that regular sockets set SOV_SOCKBSD i.e. 767 * _SOBIND_SOCKBSD gets set here hence no type of 768 * socket does currently allow rebinding. 769 * 770 * If the name is NULL just do an unbind. 771 */ 772 if (flags & (_SOBIND_SOCKBSD|_SOBIND_XPG4_2) && 773 name != NULL) { 774 error = EINVAL; 775 unbind_on_err = 0; 776 eprintsoline(so, error); 777 goto done; 778 } 779 if ((so->so_mode & SM_CONNREQUIRED) && 780 (so->so_state & SS_CANTREBIND)) { 781 error = EINVAL; 782 unbind_on_err = 0; 783 eprintsoline(so, error); 784 goto done; 785 } 786 error = sotpi_unbind(so, 0); 787 if (error) { 788 eprintsoline(so, error); 789 goto done; 790 } 791 ASSERT(!(so->so_state & SS_ISBOUND)); 792 if (name == NULL) { 793 so->so_state &= 794 ~(SS_ISCONNECTED|SS_ISCONNECTING); 795 goto done; 796 } 797 } 798 799 /* X/Open requires this check */ 800 if ((so->so_state & SS_CANTSENDMORE) && !xnet_skip_checks) { 801 if (xnet_check_print) { 802 printf("sockfs: X/Open bind state check " 803 "caused EINVAL\n"); 804 } 805 error = EINVAL; 806 goto done; 807 } 808 809 switch (so->so_family) { 810 case AF_UNIX: 811 /* 812 * All AF_UNIX addresses are nul terminated 813 * when copied (copyin_name) in so the minimum 814 * length is 3 bytes. 815 */ 816 if (name == NULL || 817 (ssize_t)namelen <= sizeof (short) + 1) { 818 error = EISDIR; 819 eprintsoline(so, error); 820 goto done; 821 } 822 /* 823 * Verify so_family matches the bound family. 824 * BSD does not check this for AF_UNIX resulting 825 * in funny mknods. 826 */ 827 if (name->sa_family != so->so_family) { 828 error = EAFNOSUPPORT; 829 goto done; 830 } 831 break; 832 case AF_INET: 833 if (name == NULL) { 834 error = EINVAL; 835 eprintsoline(so, error); 836 goto done; 837 } 838 if ((size_t)namelen != sizeof (sin_t)) { 839 error = name->sa_family != so->so_family ? 840 EAFNOSUPPORT : EINVAL; 841 eprintsoline(so, error); 842 goto done; 843 } 844 if ((flags & _SOBIND_XPG4_2) && 845 (name->sa_family != so->so_family)) { 846 /* 847 * This check has to be made for X/Open 848 * sockets however application failures have 849 * been observed when it is applied to 850 * all sockets. 851 */ 852 error = EAFNOSUPPORT; 853 eprintsoline(so, error); 854 goto done; 855 } 856 /* 857 * Force a zero sa_family to match so_family. 858 * 859 * Some programs like inetd(1M) don't set the 860 * family field. Other programs leave 861 * sin_family set to garbage - SunOS 4.X does 862 * not check the family field on a bind. 863 * We use the family field that 864 * was passed in to the socket() call. 865 */ 866 name->sa_family = so->so_family; 867 break; 868 869 case AF_INET6: { 870 #ifdef DEBUG 871 sin6_t *sin6 = (sin6_t *)name; 872 #endif /* DEBUG */ 873 874 if (name == NULL) { 875 error = EINVAL; 876 eprintsoline(so, error); 877 goto done; 878 } 879 if ((size_t)namelen != sizeof (sin6_t)) { 880 error = name->sa_family != so->so_family ? 881 EAFNOSUPPORT : EINVAL; 882 eprintsoline(so, error); 883 goto done; 884 } 885 if (name->sa_family != so->so_family) { 886 /* 887 * With IPv6 we require the family to match 888 * unlike in IPv4. 889 */ 890 error = EAFNOSUPPORT; 891 eprintsoline(so, error); 892 goto done; 893 } 894 #ifdef DEBUG 895 /* 896 * Verify that apps don't forget to clear 897 * sin6_scope_id etc 898 */ 899 if (sin6->sin6_scope_id != 0 && 900 !IN6_IS_ADDR_LINKSCOPE(&sin6->sin6_addr)) { 901 zcmn_err(getzoneid(), CE_WARN, 902 "bind with uninitialized sin6_scope_id " 903 "(%d) on socket. Pid = %d\n", 904 (int)sin6->sin6_scope_id, 905 (int)curproc->p_pid); 906 } 907 if (sin6->__sin6_src_id != 0) { 908 zcmn_err(getzoneid(), CE_WARN, 909 "bind with uninitialized __sin6_src_id " 910 "(%d) on socket. Pid = %d\n", 911 (int)sin6->__sin6_src_id, 912 (int)curproc->p_pid); 913 } 914 #endif /* DEBUG */ 915 break; 916 } 917 default: 918 /* 919 * Don't do any length or sa_family check to allow 920 * non-sockaddr style addresses. 921 */ 922 if (name == NULL) { 923 error = EINVAL; 924 eprintsoline(so, error); 925 goto done; 926 } 927 break; 928 } 929 930 if (namelen > (t_uscalar_t)sti->sti_laddr_maxlen) { 931 error = ENAMETOOLONG; 932 eprintsoline(so, error); 933 goto done; 934 } 935 /* 936 * Save local address. 937 */ 938 sti->sti_laddr_len = (socklen_t)namelen; 939 ASSERT(sti->sti_laddr_len <= sti->sti_laddr_maxlen); 940 bcopy(name, sti->sti_laddr_sa, namelen); 941 942 addr = sti->sti_laddr_sa; 943 addrlen = (t_uscalar_t)sti->sti_laddr_len; 944 switch (so->so_family) { 945 case AF_INET6: 946 case AF_INET: 947 break; 948 case AF_UNIX: { 949 struct sockaddr_un *soun = 950 (struct sockaddr_un *)sti->sti_laddr_sa; 951 struct vnode *vp, *rvp; 952 struct vattr vattr; 953 954 ASSERT(sti->sti_ux_bound_vp == NULL); 955 /* 956 * Create vnode for the specified path name. 957 * Keep vnode held with a reference in sti_ux_bound_vp. 958 * Use the vnode pointer as the address used in the 959 * bind with the transport. 960 * 961 * Use the same mode as in BSD. In particular this does 962 * not observe the umask. 963 */ 964 /* MAXPATHLEN + soun_family + nul termination */ 965 if (sti->sti_laddr_len > 966 (socklen_t)(MAXPATHLEN + sizeof (short) + 1)) { 967 error = ENAMETOOLONG; 968 eprintsoline(so, error); 969 goto done; 970 } 971 vattr.va_type = VSOCK; 972 vattr.va_mode = 0777 & ~PTOU(curproc)->u_cmask; 973 vattr.va_mask = AT_TYPE|AT_MODE; 974 /* NOTE: holding so_lock */ 975 error = vn_create(soun->sun_path, UIO_SYSSPACE, &vattr, 976 EXCL, 0, &vp, CRMKNOD, 0, 0); 977 if (error) { 978 if (error == EEXIST) 979 error = EADDRINUSE; 980 eprintsoline(so, error); 981 goto done; 982 } 983 /* 984 * Establish pointer from the underlying filesystem 985 * vnode to the socket node. 986 * sti_ux_bound_vp and v_stream->sd_vnode form the 987 * cross-linkage between the underlying filesystem 988 * node and the socket node. 989 */ 990 991 if ((VOP_REALVP(vp, &rvp, NULL) == 0) && (vp != rvp)) { 992 VN_HOLD(rvp); 993 VN_RELE(vp); 994 vp = rvp; 995 } 996 997 ASSERT(SOTOV(so)->v_stream); 998 mutex_enter(&vp->v_lock); 999 vp->v_stream = SOTOV(so)->v_stream; 1000 sti->sti_ux_bound_vp = vp; 1001 mutex_exit(&vp->v_lock); 1002 1003 /* 1004 * Use the vnode pointer value as a unique address 1005 * (together with the magic number to avoid conflicts 1006 * with implicit binds) in the transport provider. 1007 */ 1008 sti->sti_ux_laddr.soua_vp = 1009 (void *)sti->sti_ux_bound_vp; 1010 sti->sti_ux_laddr.soua_magic = SOU_MAGIC_EXPLICIT; 1011 addr = &sti->sti_ux_laddr; 1012 addrlen = (t_uscalar_t)sizeof (sti->sti_ux_laddr); 1013 dprintso(so, 1, ("sobind UNIX: addrlen %d, addr %p\n", 1014 addrlen, 1015 (void *)((struct so_ux_addr *)addr)->soua_vp)); 1016 break; 1017 } 1018 } /* end switch (so->so_family) */ 1019 } 1020 1021 /* 1022 * set SS_ACCEPTCONN before sending down O_T_BIND_REQ since 1023 * the transport can start passing up T_CONN_IND messages 1024 * as soon as it receives the bind req and strsock_proto() 1025 * insists that SS_ACCEPTCONN is set when processing T_CONN_INDs. 1026 */ 1027 if (flags & _SOBIND_LISTEN) { 1028 if ((so->so_state & SS_ACCEPTCONN) == 0) 1029 clear_acceptconn_on_err = B_TRUE; 1030 save_so_backlog = so->so_backlog; 1031 restore_backlog_on_err = B_TRUE; 1032 so->so_state |= SS_ACCEPTCONN; 1033 so->so_backlog = backlog; 1034 } 1035 1036 /* 1037 * If NL7C addr(s) have been configured check for addr/port match, 1038 * or if an implicit NL7C socket via AF_NCA mark socket as NL7C. 1039 * 1040 * NL7C supports the TCP transport only so check AF_INET and AF_INET6 1041 * family sockets only. If match mark as such. 1042 */ 1043 if (nl7c_enabled && ((addr != NULL && 1044 (so->so_family == AF_INET || so->so_family == AF_INET6) && 1045 (nl7c = nl7c_lookup_addr(addr, addrlen))) || 1046 sti->sti_nl7c_flags == NL7C_AF_NCA)) { 1047 /* 1048 * NL7C is not supported in non-global zones, 1049 * we enforce this restriction here. 1050 */ 1051 if (so->so_zoneid == GLOBAL_ZONEID) { 1052 /* An NL7C socket, mark it */ 1053 sti->sti_nl7c_flags |= NL7C_ENABLED; 1054 if (nl7c == NULL) { 1055 /* 1056 * Was an AF_NCA bind() so add it to the 1057 * addr list for reporting purposes. 1058 */ 1059 nl7c = nl7c_add_addr(addr, addrlen); 1060 } 1061 } else 1062 nl7c = NULL; 1063 } 1064 1065 /* 1066 * We send a T_BIND_REQ for TCP/UDP since we know it supports it, 1067 * for other transports we will send in a O_T_BIND_REQ. 1068 */ 1069 if (tcp_udp_xport && 1070 (so->so_family == AF_INET || so->so_family == AF_INET6)) 1071 PRIM_type = T_BIND_REQ; 1072 1073 bind_req.PRIM_type = PRIM_type; 1074 bind_req.ADDR_length = addrlen; 1075 bind_req.ADDR_offset = (t_scalar_t)sizeof (bind_req); 1076 bind_req.CONIND_number = backlog; 1077 /* NOTE: holding so_lock while sleeping */ 1078 mp = soallocproto2(&bind_req, sizeof (bind_req), 1079 addr, addrlen, 0, _ALLOC_SLEEP, cr); 1080 sti->sti_laddr_valid = 0; 1081 1082 /* Done using sti_laddr_sa - can drop the lock */ 1083 mutex_exit(&so->so_lock); 1084 1085 error = kstrputmsg(SOTOV(so), mp, NULL, 0, 0, 1086 MSG_BAND|MSG_HOLDSIG|MSG_IGNERROR, 0); 1087 if (error) { 1088 eprintsoline(so, error); 1089 mutex_enter(&so->so_lock); 1090 goto done; 1091 } 1092 1093 mutex_enter(&so->so_lock); 1094 error = sowaitprim(so, PRIM_type, T_BIND_ACK, 1095 (t_uscalar_t)sizeof (*bind_ack), &mp, 0); 1096 if (error) { 1097 eprintsoline(so, error); 1098 goto done; 1099 } 1100 ASSERT(mp); 1101 /* 1102 * Even if some TPI message (e.g. T_DISCON_IND) was received in 1103 * strsock_proto while the lock was dropped above, the bind 1104 * is allowed to complete. 1105 */ 1106 1107 /* Mark as bound. This will be undone if we detect errors below. */ 1108 if (flags & _SOBIND_NOXLATE) { 1109 ASSERT(so->so_family == AF_UNIX); 1110 sti->sti_faddr_noxlate = 1; 1111 } 1112 ASSERT(!(so->so_state & SS_ISBOUND) || (flags & _SOBIND_REBIND)); 1113 so->so_state |= SS_ISBOUND; 1114 ASSERT(sti->sti_unbind_mp); 1115 1116 /* note that we've already set SS_ACCEPTCONN above */ 1117 1118 /* 1119 * Recompute addrlen - an unspecied bind sent down an 1120 * address of length zero but we expect the appropriate length 1121 * in return. 1122 */ 1123 addrlen = (t_uscalar_t)(so->so_family == AF_UNIX ? 1124 sizeof (sti->sti_ux_laddr) : sti->sti_laddr_len); 1125 1126 bind_ack = (struct T_bind_ack *)mp->b_rptr; 1127 /* 1128 * The alignment restriction is really too strict but 1129 * we want enough alignment to inspect the fields of 1130 * a sockaddr_in. 1131 */ 1132 addr = sogetoff(mp, bind_ack->ADDR_offset, 1133 bind_ack->ADDR_length, 1134 __TPI_ALIGN_SIZE); 1135 if (addr == NULL) { 1136 freemsg(mp); 1137 error = EPROTO; 1138 eprintsoline(so, error); 1139 goto done; 1140 } 1141 if (!(flags & _SOBIND_UNSPEC)) { 1142 /* 1143 * Verify that the transport didn't return something we 1144 * did not want e.g. an address other than what we asked for. 1145 * 1146 * NOTE: These checks would go away if/when we switch to 1147 * using the new TPI (in which the transport would fail 1148 * the request instead of assigning a different address). 1149 * 1150 * NOTE2: For protocols that we don't know (i.e. any 1151 * other than AF_INET6, AF_INET and AF_UNIX), we 1152 * cannot know if the transport should be expected to 1153 * return the same address as that requested. 1154 * 1155 * NOTE3: For AF_INET and AF_INET6, TCP/UDP, we send 1156 * down a T_BIND_REQ. We use O_T_BIND_REQ for others. 1157 * 1158 * For example, in the case of netatalk it may be 1159 * inappropriate for the transport to return the 1160 * requested address (as it may have allocated a local 1161 * port number in behaviour similar to that of an 1162 * AF_INET bind request with a port number of zero). 1163 * 1164 * Given the definition of O_T_BIND_REQ, where the 1165 * transport may bind to an address other than the 1166 * requested address, it's not possible to determine 1167 * whether a returned address that differs from the 1168 * requested address is a reason to fail (because the 1169 * requested address was not available) or succeed 1170 * (because the transport allocated an appropriate 1171 * address and/or port). 1172 * 1173 * sockfs currently requires that the transport return 1174 * the requested address in the T_BIND_ACK, unless 1175 * there is code here to allow for any discrepancy. 1176 * Such code exists for AF_INET and AF_INET6. 1177 * 1178 * Netatalk chooses to return the requested address 1179 * rather than the (correct) allocated address. This 1180 * means that netatalk violates the TPI specification 1181 * (and would not function correctly if used from a 1182 * TLI application), but it does mean that it works 1183 * with sockfs. 1184 * 1185 * As noted above, using the newer XTI bind primitive 1186 * (T_BIND_REQ) in preference to O_T_BIND_REQ would 1187 * allow sockfs to be more sure about whether or not 1188 * the bind request had succeeded (as transports are 1189 * not permitted to bind to a different address than 1190 * that requested - they must return failure). 1191 * Unfortunately, support for T_BIND_REQ may not be 1192 * present in all transport implementations (netatalk, 1193 * for example, doesn't have it), making the 1194 * transition difficult. 1195 */ 1196 if (bind_ack->ADDR_length != addrlen) { 1197 /* Assumes that the requested address was in use */ 1198 freemsg(mp); 1199 error = EADDRINUSE; 1200 eprintsoline(so, error); 1201 goto done; 1202 } 1203 1204 switch (so->so_family) { 1205 case AF_INET6: 1206 case AF_INET: { 1207 sin_t *rname, *aname; 1208 1209 rname = (sin_t *)addr; 1210 aname = (sin_t *)sti->sti_laddr_sa; 1211 1212 /* 1213 * Take advantage of the alignment 1214 * of sin_port and sin6_port which fall 1215 * in the same place in their data structures. 1216 * Just use sin_port for either address family. 1217 * 1218 * This may become a problem if (heaven forbid) 1219 * there's a separate ipv6port_reserved... :-P 1220 * 1221 * Binding to port 0 has the semantics of letting 1222 * the transport bind to any port. 1223 * 1224 * If the transport is TCP or UDP since we had sent 1225 * a T_BIND_REQ we would not get a port other than 1226 * what we asked for. 1227 */ 1228 if (tcp_udp_xport) { 1229 /* 1230 * Pick up the new port number if we bound to 1231 * port 0. 1232 */ 1233 if (aname->sin_port == 0) 1234 aname->sin_port = rname->sin_port; 1235 sti->sti_laddr_valid = 1; 1236 break; 1237 } 1238 if (aname->sin_port != 0 && 1239 aname->sin_port != rname->sin_port) { 1240 freemsg(mp); 1241 error = EADDRINUSE; 1242 eprintsoline(so, error); 1243 goto done; 1244 } 1245 /* 1246 * Pick up the new port number if we bound to port 0. 1247 */ 1248 aname->sin_port = rname->sin_port; 1249 1250 /* 1251 * Unfortunately, addresses aren't _quite_ the same. 1252 */ 1253 if (so->so_family == AF_INET) { 1254 if (aname->sin_addr.s_addr != 1255 rname->sin_addr.s_addr) { 1256 freemsg(mp); 1257 error = EADDRNOTAVAIL; 1258 eprintsoline(so, error); 1259 goto done; 1260 } 1261 } else { 1262 sin6_t *rname6 = (sin6_t *)rname; 1263 sin6_t *aname6 = (sin6_t *)aname; 1264 1265 if (!IN6_ARE_ADDR_EQUAL(&aname6->sin6_addr, 1266 &rname6->sin6_addr)) { 1267 freemsg(mp); 1268 error = EADDRNOTAVAIL; 1269 eprintsoline(so, error); 1270 goto done; 1271 } 1272 } 1273 break; 1274 } 1275 case AF_UNIX: 1276 if (bcmp(addr, &sti->sti_ux_laddr, addrlen) != 0) { 1277 freemsg(mp); 1278 error = EADDRINUSE; 1279 eprintsoline(so, error); 1280 eprintso(so, 1281 ("addrlen %d, addr 0x%x, vp %p\n", 1282 addrlen, *((int *)addr), 1283 (void *)sti->sti_ux_bound_vp)); 1284 goto done; 1285 } 1286 sti->sti_laddr_valid = 1; 1287 break; 1288 default: 1289 /* 1290 * NOTE: This assumes that addresses can be 1291 * byte-compared for equivalence. 1292 */ 1293 if (bcmp(addr, sti->sti_laddr_sa, addrlen) != 0) { 1294 freemsg(mp); 1295 error = EADDRINUSE; 1296 eprintsoline(so, error); 1297 goto done; 1298 } 1299 /* 1300 * Don't mark sti_laddr_valid, as we cannot be 1301 * sure that the returned address is the real 1302 * bound address when talking to an unknown 1303 * transport. 1304 */ 1305 break; 1306 } 1307 } else { 1308 /* 1309 * Save for returned address for getsockname. 1310 * Needed for unspecific bind unless transport supports 1311 * the TI_GETMYNAME ioctl. 1312 * Do this for AF_INET{,6} even though they do, as 1313 * caching info here is much better performance than 1314 * a TPI/STREAMS trip to the transport for getsockname. 1315 * Any which can't for some reason _must_ _not_ set 1316 * sti_laddr_valid here for the caching version of 1317 * getsockname to not break; 1318 */ 1319 switch (so->so_family) { 1320 case AF_UNIX: 1321 /* 1322 * Record the address bound with the transport 1323 * for use by socketpair. 1324 */ 1325 bcopy(addr, &sti->sti_ux_laddr, addrlen); 1326 sti->sti_laddr_valid = 1; 1327 break; 1328 case AF_INET: 1329 case AF_INET6: 1330 ASSERT(sti->sti_laddr_len <= sti->sti_laddr_maxlen); 1331 bcopy(addr, sti->sti_laddr_sa, sti->sti_laddr_len); 1332 sti->sti_laddr_valid = 1; 1333 break; 1334 default: 1335 /* 1336 * Don't mark sti_laddr_valid, as we cannot be 1337 * sure that the returned address is the real 1338 * bound address when talking to an unknown 1339 * transport. 1340 */ 1341 break; 1342 } 1343 } 1344 1345 if (nl7c != NULL) { 1346 /* Register listen()er sonode pointer with NL7C */ 1347 nl7c_listener_addr(nl7c, so); 1348 } 1349 1350 freemsg(mp); 1351 1352 done: 1353 if (error) { 1354 /* reset state & backlog to values held on entry */ 1355 if (clear_acceptconn_on_err == B_TRUE) 1356 so->so_state &= ~SS_ACCEPTCONN; 1357 if (restore_backlog_on_err == B_TRUE) 1358 so->so_backlog = save_so_backlog; 1359 1360 if (unbind_on_err && so->so_state & SS_ISBOUND) { 1361 int err; 1362 1363 err = sotpi_unbind(so, 0); 1364 /* LINTED - statement has no consequent: if */ 1365 if (err) { 1366 eprintsoline(so, error); 1367 } else { 1368 ASSERT(!(so->so_state & SS_ISBOUND)); 1369 } 1370 } 1371 } 1372 if (!(flags & _SOBIND_LOCK_HELD)) { 1373 so_unlock_single(so, SOLOCKED); 1374 mutex_exit(&so->so_lock); 1375 } else { 1376 ASSERT(MUTEX_HELD(&so->so_lock)); 1377 ASSERT(so->so_flag & SOLOCKED); 1378 } 1379 return (error); 1380 } 1381 1382 /* bind the socket */ 1383 static int 1384 sotpi_bind(struct sonode *so, struct sockaddr *name, socklen_t namelen, 1385 int flags, struct cred *cr) 1386 { 1387 if ((flags & _SOBIND_SOCKETPAIR) == 0) 1388 return (sotpi_bindlisten(so, name, namelen, 0, flags, cr)); 1389 1390 flags &= ~_SOBIND_SOCKETPAIR; 1391 return (sotpi_bindlisten(so, name, namelen, 1, flags, cr)); 1392 } 1393 1394 /* 1395 * Unbind a socket - used when bind() fails, when bind() specifies a NULL 1396 * address, or when listen needs to unbind and bind. 1397 * If the _SOUNBIND_REBIND flag is specified the addresses are retained 1398 * so that a sobind can pick them up. 1399 */ 1400 static int 1401 sotpi_unbind(struct sonode *so, int flags) 1402 { 1403 struct T_unbind_req unbind_req; 1404 int error = 0; 1405 mblk_t *mp; 1406 sotpi_info_t *sti = SOTOTPI(so); 1407 1408 dprintso(so, 1, ("sotpi_unbind(%p, 0x%x) %s\n", 1409 (void *)so, flags, pr_state(so->so_state, so->so_mode))); 1410 1411 ASSERT(MUTEX_HELD(&so->so_lock)); 1412 ASSERT(so->so_flag & SOLOCKED); 1413 1414 if (!(so->so_state & SS_ISBOUND)) { 1415 error = EINVAL; 1416 eprintsoline(so, error); 1417 goto done; 1418 } 1419 1420 mutex_exit(&so->so_lock); 1421 1422 /* 1423 * Flush the read and write side (except stream head read queue) 1424 * and send down T_UNBIND_REQ. 1425 */ 1426 (void) putnextctl1(strvp2wq(SOTOV(so)), M_FLUSH, FLUSHRW); 1427 1428 unbind_req.PRIM_type = T_UNBIND_REQ; 1429 mp = soallocproto1(&unbind_req, sizeof (unbind_req), 1430 0, _ALLOC_SLEEP, CRED()); 1431 error = kstrputmsg(SOTOV(so), mp, NULL, 0, 0, 1432 MSG_BAND|MSG_HOLDSIG|MSG_IGNERROR, 0); 1433 mutex_enter(&so->so_lock); 1434 if (error) { 1435 eprintsoline(so, error); 1436 goto done; 1437 } 1438 1439 error = sowaitokack(so, T_UNBIND_REQ); 1440 if (error) { 1441 eprintsoline(so, error); 1442 goto done; 1443 } 1444 1445 /* 1446 * Even if some TPI message (e.g. T_DISCON_IND) was received in 1447 * strsock_proto while the lock was dropped above, the unbind 1448 * is allowed to complete. 1449 */ 1450 if (!(flags & _SOUNBIND_REBIND)) { 1451 /* 1452 * Clear out bound address. 1453 */ 1454 vnode_t *vp; 1455 1456 if ((vp = sti->sti_ux_bound_vp) != NULL) { 1457 sti->sti_ux_bound_vp = NULL; 1458 vn_rele_stream(vp); 1459 } 1460 /* Clear out address */ 1461 sti->sti_laddr_len = 0; 1462 } 1463 so->so_state &= ~(SS_ISBOUND|SS_ACCEPTCONN); 1464 sti->sti_laddr_valid = 0; 1465 1466 done: 1467 1468 /* If the caller held the lock don't release it here */ 1469 ASSERT(MUTEX_HELD(&so->so_lock)); 1470 ASSERT(so->so_flag & SOLOCKED); 1471 1472 return (error); 1473 } 1474 1475 /* 1476 * listen on the socket. 1477 * For TPI conforming transports this has to first unbind with the transport 1478 * and then bind again using the new backlog. 1479 */ 1480 /* ARGSUSED */ 1481 int 1482 sotpi_listen(struct sonode *so, int backlog, struct cred *cr) 1483 { 1484 int error = 0; 1485 sotpi_info_t *sti = SOTOTPI(so); 1486 1487 dprintso(so, 1, ("sotpi_listen(%p, %d) %s\n", 1488 (void *)so, backlog, pr_state(so->so_state, so->so_mode))); 1489 1490 if (sti->sti_serv_type == T_CLTS) 1491 return (EOPNOTSUPP); 1492 1493 /* 1494 * If the socket is ready to accept connections already, then 1495 * return without doing anything. This avoids a problem where 1496 * a second listen() call fails if a connection is pending and 1497 * leaves the socket unbound. Only when we are not unbinding 1498 * with the transport can we safely increase the backlog. 1499 */ 1500 if (so->so_state & SS_ACCEPTCONN && 1501 !((so->so_family == AF_INET || so->so_family == AF_INET6) && 1502 /*CONSTCOND*/ 1503 !solisten_tpi_tcp)) 1504 return (0); 1505 1506 if (so->so_state & SS_ISCONNECTED) 1507 return (EINVAL); 1508 1509 mutex_enter(&so->so_lock); 1510 so_lock_single(so); /* Set SOLOCKED */ 1511 1512 /* 1513 * If the listen doesn't change the backlog we do nothing. 1514 * This avoids an EPROTO error from the transport. 1515 */ 1516 if ((so->so_state & SS_ACCEPTCONN) && 1517 so->so_backlog == backlog) 1518 goto done; 1519 1520 if (!(so->so_state & SS_ISBOUND)) { 1521 /* 1522 * Must have been explicitly bound in the UNIX domain. 1523 */ 1524 if (so->so_family == AF_UNIX) { 1525 error = EINVAL; 1526 goto done; 1527 } 1528 error = sotpi_bindlisten(so, NULL, 0, backlog, 1529 _SOBIND_UNSPEC|_SOBIND_LOCK_HELD|_SOBIND_LISTEN, cr); 1530 } else if (backlog > 0) { 1531 /* 1532 * AF_INET{,6} hack to avoid losing the port. 1533 * Assumes that all AF_INET{,6} transports can handle a 1534 * O_T_BIND_REQ with a non-zero CONIND_number when the TPI 1535 * has already bound thus it is possible to avoid the unbind. 1536 */ 1537 if (!((so->so_family == AF_INET || so->so_family == AF_INET6) && 1538 /*CONSTCOND*/ 1539 !solisten_tpi_tcp)) { 1540 error = sotpi_unbind(so, _SOUNBIND_REBIND); 1541 if (error) 1542 goto done; 1543 } 1544 error = sotpi_bindlisten(so, NULL, 0, backlog, 1545 _SOBIND_REBIND|_SOBIND_LOCK_HELD|_SOBIND_LISTEN, cr); 1546 } else { 1547 so->so_state |= SS_ACCEPTCONN; 1548 so->so_backlog = backlog; 1549 } 1550 if (error) 1551 goto done; 1552 ASSERT(so->so_state & SS_ACCEPTCONN); 1553 done: 1554 so_unlock_single(so, SOLOCKED); 1555 mutex_exit(&so->so_lock); 1556 return (error); 1557 } 1558 1559 /* 1560 * Disconnect either a specified seqno or all (-1). 1561 * The former is used on listening sockets only. 1562 * 1563 * When seqno == -1 sodisconnect could call sotpi_unbind. However, 1564 * the current use of sodisconnect(seqno == -1) is only for shutdown 1565 * so there is no point (and potentially incorrect) to unbind. 1566 */ 1567 static int 1568 sodisconnect(struct sonode *so, t_scalar_t seqno, int flags) 1569 { 1570 struct T_discon_req discon_req; 1571 int error = 0; 1572 mblk_t *mp; 1573 1574 dprintso(so, 1, ("sodisconnect(%p, %d, 0x%x) %s\n", 1575 (void *)so, seqno, flags, pr_state(so->so_state, so->so_mode))); 1576 1577 if (!(flags & _SODISCONNECT_LOCK_HELD)) { 1578 mutex_enter(&so->so_lock); 1579 so_lock_single(so); /* Set SOLOCKED */ 1580 } else { 1581 ASSERT(MUTEX_HELD(&so->so_lock)); 1582 ASSERT(so->so_flag & SOLOCKED); 1583 } 1584 1585 if (!(so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING|SS_ACCEPTCONN))) { 1586 error = EINVAL; 1587 eprintsoline(so, error); 1588 goto done; 1589 } 1590 1591 mutex_exit(&so->so_lock); 1592 /* 1593 * Flush the write side (unless this is a listener) 1594 * and then send down a T_DISCON_REQ. 1595 * (Don't flush on listener since it could flush {O_}T_CONN_RES 1596 * and other messages.) 1597 */ 1598 if (!(so->so_state & SS_ACCEPTCONN)) 1599 (void) putnextctl1(strvp2wq(SOTOV(so)), M_FLUSH, FLUSHW); 1600 1601 discon_req.PRIM_type = T_DISCON_REQ; 1602 discon_req.SEQ_number = seqno; 1603 mp = soallocproto1(&discon_req, sizeof (discon_req), 1604 0, _ALLOC_SLEEP, CRED()); 1605 error = kstrputmsg(SOTOV(so), mp, NULL, 0, 0, 1606 MSG_BAND|MSG_HOLDSIG|MSG_IGNERROR, 0); 1607 mutex_enter(&so->so_lock); 1608 if (error) { 1609 eprintsoline(so, error); 1610 goto done; 1611 } 1612 1613 error = sowaitokack(so, T_DISCON_REQ); 1614 if (error) { 1615 eprintsoline(so, error); 1616 goto done; 1617 } 1618 /* 1619 * Even if some TPI message (e.g. T_DISCON_IND) was received in 1620 * strsock_proto while the lock was dropped above, the disconnect 1621 * is allowed to complete. However, it is not possible to 1622 * assert that SS_ISCONNECTED|SS_ISCONNECTING are set. 1623 */ 1624 so->so_state &= ~(SS_ISCONNECTED|SS_ISCONNECTING); 1625 SOTOTPI(so)->sti_laddr_valid = 0; 1626 SOTOTPI(so)->sti_faddr_valid = 0; 1627 done: 1628 if (!(flags & _SODISCONNECT_LOCK_HELD)) { 1629 so_unlock_single(so, SOLOCKED); 1630 mutex_exit(&so->so_lock); 1631 } else { 1632 /* If the caller held the lock don't release it here */ 1633 ASSERT(MUTEX_HELD(&so->so_lock)); 1634 ASSERT(so->so_flag & SOLOCKED); 1635 } 1636 return (error); 1637 } 1638 1639 /* ARGSUSED */ 1640 int 1641 sotpi_accept(struct sonode *so, int fflag, struct cred *cr, 1642 struct sonode **nsop) 1643 { 1644 struct T_conn_ind *conn_ind; 1645 struct T_conn_res *conn_res; 1646 int error = 0; 1647 mblk_t *mp, *ack_mp; 1648 struct sonode *nso; 1649 vnode_t *nvp; 1650 void *src; 1651 t_uscalar_t srclen; 1652 void *opt; 1653 t_uscalar_t optlen; 1654 t_scalar_t PRIM_type; 1655 t_scalar_t SEQ_number; 1656 size_t sinlen; 1657 sotpi_info_t *sti = SOTOTPI(so); 1658 sotpi_info_t *nsti; 1659 1660 dprintso(so, 1, ("sotpi_accept(%p, 0x%x, %p) %s\n", 1661 (void *)so, fflag, (void *)nsop, 1662 pr_state(so->so_state, so->so_mode))); 1663 1664 /* 1665 * Defer single-threading the accepting socket until 1666 * the T_CONN_IND has been received and parsed and the 1667 * new sonode has been opened. 1668 */ 1669 1670 /* Check that we are not already connected */ 1671 if ((so->so_state & SS_ACCEPTCONN) == 0) 1672 goto conn_bad; 1673 again: 1674 if ((error = sowaitconnind(so, fflag, &mp)) != 0) 1675 goto e_bad; 1676 1677 ASSERT(mp != NULL); 1678 conn_ind = (struct T_conn_ind *)mp->b_rptr; 1679 1680 /* 1681 * Save SEQ_number for error paths. 1682 */ 1683 SEQ_number = conn_ind->SEQ_number; 1684 1685 srclen = conn_ind->SRC_length; 1686 src = sogetoff(mp, conn_ind->SRC_offset, srclen, 1); 1687 if (src == NULL) { 1688 error = EPROTO; 1689 freemsg(mp); 1690 eprintsoline(so, error); 1691 goto disconnect_unlocked; 1692 } 1693 optlen = conn_ind->OPT_length; 1694 switch (so->so_family) { 1695 case AF_INET: 1696 case AF_INET6: 1697 if ((optlen == sizeof (intptr_t)) && (sti->sti_direct != 0)) { 1698 bcopy(mp->b_rptr + conn_ind->OPT_offset, 1699 &opt, conn_ind->OPT_length); 1700 } else { 1701 /* 1702 * The transport (in this case TCP) hasn't sent up 1703 * a pointer to an instance for the accept fast-path. 1704 * Disable fast-path completely because the call to 1705 * sotpi_create() below would otherwise create an 1706 * incomplete TCP instance, which would lead to 1707 * problems when sockfs sends a normal T_CONN_RES 1708 * message down the new stream. 1709 */ 1710 if (sti->sti_direct) { 1711 int rval; 1712 /* 1713 * For consistency we inform tcp to disable 1714 * direct interface on the listener, though 1715 * we can certainly live without doing this 1716 * because no data will ever travel upstream 1717 * on the listening socket. 1718 */ 1719 sti->sti_direct = 0; 1720 (void) strioctl(SOTOV(so), _SIOCSOCKFALLBACK, 1721 0, 0, K_TO_K, cr, &rval); 1722 } 1723 opt = NULL; 1724 optlen = 0; 1725 } 1726 break; 1727 case AF_UNIX: 1728 default: 1729 if (optlen != 0) { 1730 opt = sogetoff(mp, conn_ind->OPT_offset, optlen, 1731 __TPI_ALIGN_SIZE); 1732 if (opt == NULL) { 1733 error = EPROTO; 1734 freemsg(mp); 1735 eprintsoline(so, error); 1736 goto disconnect_unlocked; 1737 } 1738 } 1739 if (so->so_family == AF_UNIX) { 1740 if (!sti->sti_faddr_noxlate) { 1741 src = NULL; 1742 srclen = 0; 1743 } 1744 /* Extract src address from options */ 1745 if (optlen != 0) 1746 so_getopt_srcaddr(opt, optlen, &src, &srclen); 1747 } 1748 break; 1749 } 1750 1751 /* 1752 * Create the new socket. 1753 */ 1754 nso = socket_newconn(so, NULL, NULL, SOCKET_SLEEP, &error); 1755 if (nso == NULL) { 1756 ASSERT(error != 0); 1757 /* 1758 * Accept can not fail with ENOBUFS. sotpi_create 1759 * sleeps waiting for memory until a signal is caught 1760 * so return EINTR. 1761 */ 1762 freemsg(mp); 1763 if (error == ENOBUFS) 1764 error = EINTR; 1765 goto e_disc_unl; 1766 } 1767 nvp = SOTOV(nso); 1768 nsti = SOTOTPI(nso); 1769 1770 #ifdef DEBUG 1771 /* 1772 * SO_DEBUG is used to trigger the dprint* and eprint* macros thus 1773 * it's inherited early to allow debugging of the accept code itself. 1774 */ 1775 nso->so_options |= so->so_options & SO_DEBUG; 1776 #endif /* DEBUG */ 1777 1778 /* 1779 * Save the SRC address from the T_CONN_IND 1780 * for getpeername to work on AF_UNIX and on transports that do not 1781 * support TI_GETPEERNAME. 1782 * 1783 * NOTE: AF_UNIX NUL termination is ensured by the sender's 1784 * copyin_name(). 1785 */ 1786 if (srclen > (t_uscalar_t)nsti->sti_faddr_maxlen) { 1787 error = EINVAL; 1788 freemsg(mp); 1789 eprintsoline(so, error); 1790 goto disconnect_vp_unlocked; 1791 } 1792 nsti->sti_faddr_len = (socklen_t)srclen; 1793 ASSERT(sti->sti_faddr_len <= sti->sti_faddr_maxlen); 1794 bcopy(src, nsti->sti_faddr_sa, srclen); 1795 nsti->sti_faddr_valid = 1; 1796 1797 /* 1798 * Record so_peercred and so_cpid from a cred in the T_CONN_IND. 1799 */ 1800 if ((DB_REF(mp) > 1) || MBLKSIZE(mp) < 1801 (sizeof (struct T_conn_res) + sizeof (intptr_t))) { 1802 cred_t *cr; 1803 pid_t cpid; 1804 1805 cr = msg_getcred(mp, &cpid); 1806 if (cr != NULL) { 1807 crhold(cr); 1808 nso->so_peercred = cr; 1809 nso->so_cpid = cpid; 1810 } 1811 freemsg(mp); 1812 1813 mp = soallocproto1(NULL, sizeof (struct T_conn_res) + 1814 sizeof (intptr_t), 0, _ALLOC_INTR, cr); 1815 if (mp == NULL) { 1816 /* 1817 * Accept can not fail with ENOBUFS. 1818 * A signal was caught so return EINTR. 1819 */ 1820 error = EINTR; 1821 eprintsoline(so, error); 1822 goto disconnect_vp_unlocked; 1823 } 1824 conn_res = (struct T_conn_res *)mp->b_rptr; 1825 } else { 1826 /* 1827 * For efficency reasons we use msg_extractcred; no crhold 1828 * needed since db_credp is cleared (i.e., we move the cred 1829 * from the message to so_peercred. 1830 */ 1831 nso->so_peercred = msg_extractcred(mp, &nso->so_cpid); 1832 1833 mp->b_rptr = DB_BASE(mp); 1834 conn_res = (struct T_conn_res *)mp->b_rptr; 1835 mp->b_wptr = mp->b_rptr + sizeof (struct T_conn_res); 1836 1837 mblk_setcred(mp, cr, curproc->p_pid); 1838 } 1839 1840 /* 1841 * New socket must be bound at least in sockfs and, except for AF_INET, 1842 * (or AF_INET6) it also has to be bound in the transport provider. 1843 * We set the local address in the sonode from the T_OK_ACK of the 1844 * T_CONN_RES. For this reason the address we bind to here isn't 1845 * important. 1846 */ 1847 if ((nso->so_family == AF_INET || nso->so_family == AF_INET6) && 1848 /*CONSTCOND*/ 1849 nso->so_type == SOCK_STREAM && !soaccept_tpi_tcp) { 1850 /* 1851 * Optimization for AF_INET{,6} transports 1852 * that can handle a T_CONN_RES without being bound. 1853 */ 1854 mutex_enter(&nso->so_lock); 1855 so_automatic_bind(nso); 1856 mutex_exit(&nso->so_lock); 1857 } else { 1858 /* Perform NULL bind with the transport provider. */ 1859 if ((error = sotpi_bind(nso, NULL, 0, _SOBIND_UNSPEC, 1860 cr)) != 0) { 1861 ASSERT(error != ENOBUFS); 1862 freemsg(mp); 1863 eprintsoline(nso, error); 1864 goto disconnect_vp_unlocked; 1865 } 1866 } 1867 1868 /* 1869 * Inherit SIOCSPGRP, SS_ASYNC before we send the {O_}T_CONN_RES 1870 * so that any data arriving on the new socket will cause the 1871 * appropriate signals to be delivered for the new socket. 1872 * 1873 * No other thread (except strsock_proto and strsock_misc) 1874 * can access the new socket thus we relax the locking. 1875 */ 1876 nso->so_pgrp = so->so_pgrp; 1877 nso->so_state |= so->so_state & SS_ASYNC; 1878 nsti->sti_faddr_noxlate = sti->sti_faddr_noxlate; 1879 1880 if (nso->so_pgrp != 0) { 1881 if ((error = so_set_events(nso, nvp, cr)) != 0) { 1882 eprintsoline(nso, error); 1883 error = 0; 1884 nso->so_pgrp = 0; 1885 } 1886 } 1887 1888 /* 1889 * Make note of the socket level options. TCP and IP level options 1890 * are already inherited. We could do all this after accept is 1891 * successful but doing it here simplifies code and no harm done 1892 * for error case. 1893 */ 1894 nso->so_options = so->so_options & (SO_DEBUG|SO_REUSEADDR|SO_KEEPALIVE| 1895 SO_DONTROUTE|SO_BROADCAST|SO_USELOOPBACK| 1896 SO_OOBINLINE|SO_DGRAM_ERRIND|SO_LINGER); 1897 nso->so_sndbuf = so->so_sndbuf; 1898 nso->so_rcvbuf = so->so_rcvbuf; 1899 if (nso->so_options & SO_LINGER) 1900 nso->so_linger = so->so_linger; 1901 1902 /* 1903 * Note that the following sti_direct code path should be 1904 * removed once we are confident that the direct sockets 1905 * do not result in any degradation. 1906 */ 1907 if (sti->sti_direct) { 1908 1909 ASSERT(opt != NULL); 1910 1911 conn_res->OPT_length = optlen; 1912 conn_res->OPT_offset = MBLKL(mp); 1913 bcopy(&opt, mp->b_wptr, optlen); 1914 mp->b_wptr += optlen; 1915 conn_res->PRIM_type = T_CONN_RES; 1916 conn_res->ACCEPTOR_id = 0; 1917 PRIM_type = T_CONN_RES; 1918 1919 /* Send down the T_CONN_RES on acceptor STREAM */ 1920 error = kstrputmsg(SOTOV(nso), mp, NULL, 1921 0, 0, MSG_BAND|MSG_HOLDSIG|MSG_IGNERROR, 0); 1922 if (error) { 1923 mutex_enter(&so->so_lock); 1924 so_lock_single(so); 1925 eprintsoline(so, error); 1926 goto disconnect_vp; 1927 } 1928 mutex_enter(&nso->so_lock); 1929 error = sowaitprim(nso, T_CONN_RES, T_OK_ACK, 1930 (t_uscalar_t)sizeof (struct T_ok_ack), &ack_mp, 0); 1931 if (error) { 1932 mutex_exit(&nso->so_lock); 1933 mutex_enter(&so->so_lock); 1934 so_lock_single(so); 1935 eprintsoline(so, error); 1936 goto disconnect_vp; 1937 } 1938 if (nso->so_family == AF_INET) { 1939 sin_t *sin; 1940 1941 sin = (sin_t *)(ack_mp->b_rptr + 1942 sizeof (struct T_ok_ack)); 1943 bcopy(sin, nsti->sti_laddr_sa, sizeof (sin_t)); 1944 nsti->sti_laddr_len = sizeof (sin_t); 1945 } else { 1946 sin6_t *sin6; 1947 1948 sin6 = (sin6_t *)(ack_mp->b_rptr + 1949 sizeof (struct T_ok_ack)); 1950 bcopy(sin6, nsti->sti_laddr_sa, sizeof (sin6_t)); 1951 nsti->sti_laddr_len = sizeof (sin6_t); 1952 } 1953 freemsg(ack_mp); 1954 1955 nso->so_state |= SS_ISCONNECTED; 1956 nso->so_proto_handle = (sock_lower_handle_t)opt; 1957 nsti->sti_laddr_valid = 1; 1958 1959 if (sti->sti_nl7c_flags & NL7C_ENABLED) { 1960 /* 1961 * A NL7C marked listen()er so the new socket 1962 * inherits the listen()er's NL7C state, except 1963 * for NL7C_POLLIN. 1964 * 1965 * Only call NL7C to process the new socket if 1966 * the listen socket allows blocking i/o. 1967 */ 1968 nsti->sti_nl7c_flags = 1969 sti->sti_nl7c_flags & (~NL7C_POLLIN); 1970 if (so->so_state & (SS_NONBLOCK|SS_NDELAY)) { 1971 /* 1972 * Nonblocking accept() just make it 1973 * persist to defer processing to the 1974 * read-side syscall (e.g. read). 1975 */ 1976 nsti->sti_nl7c_flags |= NL7C_SOPERSIST; 1977 } else if (nl7c_process(nso, B_FALSE)) { 1978 /* 1979 * NL7C has completed processing on the 1980 * socket, close the socket and back to 1981 * the top to await the next T_CONN_IND. 1982 */ 1983 mutex_exit(&nso->so_lock); 1984 (void) VOP_CLOSE(nvp, 0, 1, (offset_t)0, 1985 cr, NULL); 1986 VN_RELE(nvp); 1987 goto again; 1988 } 1989 /* Pass the new socket out */ 1990 } 1991 1992 mutex_exit(&nso->so_lock); 1993 1994 /* 1995 * It's possible, through the use of autopush for example, 1996 * that the acceptor stream may not support sti_direct 1997 * semantics. If the new socket does not support sti_direct 1998 * we issue a _SIOCSOCKFALLBACK to inform the transport 1999 * as we would in the I_PUSH case. 2000 */ 2001 if (nsti->sti_direct == 0) { 2002 int rval; 2003 2004 if ((error = strioctl(SOTOV(nso), _SIOCSOCKFALLBACK, 2005 0, 0, K_TO_K, cr, &rval)) != 0) { 2006 mutex_enter(&so->so_lock); 2007 so_lock_single(so); 2008 eprintsoline(so, error); 2009 goto disconnect_vp; 2010 } 2011 } 2012 2013 /* 2014 * Pass out new socket. 2015 */ 2016 if (nsop != NULL) 2017 *nsop = nso; 2018 2019 return (0); 2020 } 2021 2022 /* 2023 * This is the non-performance case for sockets (e.g. AF_UNIX sockets) 2024 * which don't support the FireEngine accept fast-path. It is also 2025 * used when the virtual "sockmod" has been I_POP'd and I_PUSH'd 2026 * again. Neither sockfs nor TCP attempt to find out if some other 2027 * random module has been inserted in between (in which case we 2028 * should follow TLI accept behaviour). We blindly assume the worst 2029 * case and revert back to old behaviour i.e. TCP will not send us 2030 * any option (eager) and the accept should happen on the listener 2031 * queue. Any queued T_conn_ind have already got their options removed 2032 * by so_sock2_stream() when "sockmod" was I_POP'd. 2033 */ 2034 /* 2035 * Fill in the {O_}T_CONN_RES before getting SOLOCKED. 2036 */ 2037 if ((nso->so_mode & SM_ACCEPTOR_ID) == 0) { 2038 #ifdef _ILP32 2039 queue_t *q; 2040 2041 /* 2042 * Find read queue in driver 2043 * Can safely do this since we "own" nso/nvp. 2044 */ 2045 q = strvp2wq(nvp)->q_next; 2046 while (SAMESTR(q)) 2047 q = q->q_next; 2048 q = RD(q); 2049 conn_res->ACCEPTOR_id = (t_uscalar_t)q; 2050 #else 2051 conn_res->ACCEPTOR_id = (t_uscalar_t)getminor(nvp->v_rdev); 2052 #endif /* _ILP32 */ 2053 conn_res->PRIM_type = O_T_CONN_RES; 2054 PRIM_type = O_T_CONN_RES; 2055 } else { 2056 conn_res->ACCEPTOR_id = nsti->sti_acceptor_id; 2057 conn_res->PRIM_type = T_CONN_RES; 2058 PRIM_type = T_CONN_RES; 2059 } 2060 conn_res->SEQ_number = SEQ_number; 2061 conn_res->OPT_length = 0; 2062 conn_res->OPT_offset = 0; 2063 2064 mutex_enter(&so->so_lock); 2065 so_lock_single(so); /* Set SOLOCKED */ 2066 mutex_exit(&so->so_lock); 2067 2068 error = kstrputmsg(SOTOV(so), mp, NULL, 2069 0, 0, MSG_BAND|MSG_HOLDSIG|MSG_IGNERROR, 0); 2070 mutex_enter(&so->so_lock); 2071 if (error) { 2072 eprintsoline(so, error); 2073 goto disconnect_vp; 2074 } 2075 error = sowaitprim(so, PRIM_type, T_OK_ACK, 2076 (t_uscalar_t)sizeof (struct T_ok_ack), &ack_mp, 0); 2077 if (error) { 2078 eprintsoline(so, error); 2079 goto disconnect_vp; 2080 } 2081 mutex_exit(&so->so_lock); 2082 /* 2083 * If there is a sin/sin6 appended onto the T_OK_ACK use 2084 * that to set the local address. If this is not present 2085 * then we zero out the address and don't set the 2086 * sti_laddr_valid bit. For AF_UNIX endpoints we copy over 2087 * the pathname from the listening socket. 2088 * In the case where this is TCP or an AF_UNIX socket the 2089 * client side may have queued data or a T_ORDREL in the 2090 * transport. Having now sent the T_CONN_RES we may receive 2091 * those queued messages at any time. Hold the acceptor 2092 * so_lock until its state and laddr are finalized. 2093 */ 2094 mutex_enter(&nso->so_lock); 2095 sinlen = (nso->so_family == AF_INET) ? sizeof (sin_t) : sizeof (sin6_t); 2096 if ((nso->so_family == AF_INET) || (nso->so_family == AF_INET6) && 2097 MBLKL(ack_mp) == (sizeof (struct T_ok_ack) + sinlen)) { 2098 ack_mp->b_rptr += sizeof (struct T_ok_ack); 2099 bcopy(ack_mp->b_rptr, nsti->sti_laddr_sa, sinlen); 2100 nsti->sti_laddr_len = sinlen; 2101 nsti->sti_laddr_valid = 1; 2102 } else if (nso->so_family == AF_UNIX) { 2103 ASSERT(so->so_family == AF_UNIX); 2104 nsti->sti_laddr_len = sti->sti_laddr_len; 2105 ASSERT(nsti->sti_laddr_len <= nsti->sti_laddr_maxlen); 2106 bcopy(sti->sti_laddr_sa, nsti->sti_laddr_sa, 2107 nsti->sti_laddr_len); 2108 nsti->sti_laddr_valid = 1; 2109 } else { 2110 nsti->sti_laddr_len = sti->sti_laddr_len; 2111 ASSERT(nsti->sti_laddr_len <= nsti->sti_laddr_maxlen); 2112 bzero(nsti->sti_laddr_sa, nsti->sti_addr_size); 2113 nsti->sti_laddr_sa->sa_family = nso->so_family; 2114 } 2115 nso->so_state |= SS_ISCONNECTED; 2116 mutex_exit(&nso->so_lock); 2117 2118 freemsg(ack_mp); 2119 2120 mutex_enter(&so->so_lock); 2121 so_unlock_single(so, SOLOCKED); 2122 mutex_exit(&so->so_lock); 2123 2124 /* 2125 * Pass out new socket. 2126 */ 2127 if (nsop != NULL) 2128 *nsop = nso; 2129 2130 return (0); 2131 2132 2133 eproto_disc_unl: 2134 error = EPROTO; 2135 e_disc_unl: 2136 eprintsoline(so, error); 2137 goto disconnect_unlocked; 2138 2139 pr_disc_vp_unl: 2140 eprintsoline(so, error); 2141 disconnect_vp_unlocked: 2142 (void) VOP_CLOSE(nvp, 0, 1, 0, cr, NULL); 2143 VN_RELE(nvp); 2144 disconnect_unlocked: 2145 (void) sodisconnect(so, SEQ_number, 0); 2146 return (error); 2147 2148 pr_disc_vp: 2149 eprintsoline(so, error); 2150 disconnect_vp: 2151 (void) sodisconnect(so, SEQ_number, _SODISCONNECT_LOCK_HELD); 2152 so_unlock_single(so, SOLOCKED); 2153 mutex_exit(&so->so_lock); 2154 (void) VOP_CLOSE(nvp, 0, 1, 0, cr, NULL); 2155 VN_RELE(nvp); 2156 return (error); 2157 2158 conn_bad: /* Note: SunOS 4/BSD unconditionally returns EINVAL here */ 2159 error = (so->so_type == SOCK_DGRAM || so->so_type == SOCK_RAW) 2160 ? EOPNOTSUPP : EINVAL; 2161 e_bad: 2162 eprintsoline(so, error); 2163 return (error); 2164 } 2165 2166 /* 2167 * connect a socket. 2168 * 2169 * Allow SOCK_DGRAM sockets to reconnect (by specifying a new address) and to 2170 * unconnect (by specifying a null address). 2171 */ 2172 int 2173 sotpi_connect(struct sonode *so, 2174 struct sockaddr *name, 2175 socklen_t namelen, 2176 int fflag, 2177 int flags, 2178 struct cred *cr) 2179 { 2180 struct T_conn_req conn_req; 2181 int error = 0; 2182 mblk_t *mp; 2183 void *src; 2184 socklen_t srclen; 2185 void *addr; 2186 socklen_t addrlen; 2187 boolean_t need_unlock; 2188 sotpi_info_t *sti = SOTOTPI(so); 2189 2190 dprintso(so, 1, ("sotpi_connect(%p, %p, %d, 0x%x, 0x%x) %s\n", 2191 (void *)so, (void *)name, namelen, fflag, flags, 2192 pr_state(so->so_state, so->so_mode))); 2193 2194 /* 2195 * Preallocate the T_CONN_REQ mblk before grabbing SOLOCKED to 2196 * avoid sleeping for memory with SOLOCKED held. 2197 * We know that the T_CONN_REQ can't be larger than 2 * sti_faddr_maxlen 2198 * + sizeof (struct T_opthdr). 2199 * (the AF_UNIX so_ux_addr_xlate() does not make the address 2200 * exceed sti_faddr_maxlen). 2201 */ 2202 mp = soallocproto(sizeof (struct T_conn_req) + 2203 2 * sti->sti_faddr_maxlen + sizeof (struct T_opthdr), _ALLOC_INTR, 2204 cr); 2205 if (mp == NULL) { 2206 /* 2207 * Connect can not fail with ENOBUFS. A signal was 2208 * caught so return EINTR. 2209 */ 2210 error = EINTR; 2211 eprintsoline(so, error); 2212 return (error); 2213 } 2214 2215 mutex_enter(&so->so_lock); 2216 /* 2217 * Make sure there is a preallocated T_unbind_req message 2218 * before any binding. This message is allocated when the 2219 * socket is created. Since another thread can consume 2220 * so_unbind_mp by the time we return from so_lock_single(), 2221 * we should check the availability of so_unbind_mp after 2222 * we return from so_lock_single(). 2223 */ 2224 2225 so_lock_single(so); /* Set SOLOCKED */ 2226 need_unlock = B_TRUE; 2227 2228 if (sti->sti_unbind_mp == NULL) { 2229 dprintso(so, 1, ("sotpi_connect: allocating unbind_req\n")); 2230 /* NOTE: holding so_lock while sleeping */ 2231 sti->sti_unbind_mp = 2232 soallocproto(sizeof (struct T_unbind_req), _ALLOC_INTR, cr); 2233 if (sti->sti_unbind_mp == NULL) { 2234 error = EINTR; 2235 goto done; 2236 } 2237 } 2238 2239 /* 2240 * Can't have done a listen before connecting. 2241 */ 2242 if (so->so_state & SS_ACCEPTCONN) { 2243 error = EOPNOTSUPP; 2244 goto done; 2245 } 2246 2247 /* 2248 * Must be bound with the transport 2249 */ 2250 if (!(so->so_state & SS_ISBOUND)) { 2251 if ((so->so_family == AF_INET || so->so_family == AF_INET6) && 2252 /*CONSTCOND*/ 2253 so->so_type == SOCK_STREAM && !soconnect_tpi_tcp) { 2254 /* 2255 * Optimization for AF_INET{,6} transports 2256 * that can handle a T_CONN_REQ without being bound. 2257 */ 2258 so_automatic_bind(so); 2259 } else { 2260 error = sotpi_bind(so, NULL, 0, 2261 _SOBIND_UNSPEC|_SOBIND_LOCK_HELD, cr); 2262 if (error) 2263 goto done; 2264 } 2265 ASSERT(so->so_state & SS_ISBOUND); 2266 flags |= _SOCONNECT_DID_BIND; 2267 } 2268 2269 /* 2270 * Handle a connect to a name parameter of type AF_UNSPEC like a 2271 * connect to a null address. This is the portable method to 2272 * unconnect a socket. 2273 */ 2274 if ((namelen >= sizeof (sa_family_t)) && 2275 (name->sa_family == AF_UNSPEC)) { 2276 name = NULL; 2277 namelen = 0; 2278 } 2279 2280 /* 2281 * Check that we are not already connected. 2282 * A connection-oriented socket cannot be reconnected. 2283 * A connected connection-less socket can be 2284 * - connected to a different address by a subsequent connect 2285 * - "unconnected" by a connect to the NULL address 2286 */ 2287 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) { 2288 ASSERT(!(flags & _SOCONNECT_DID_BIND)); 2289 if (so->so_mode & SM_CONNREQUIRED) { 2290 /* Connection-oriented socket */ 2291 error = so->so_state & SS_ISCONNECTED ? 2292 EISCONN : EALREADY; 2293 goto done; 2294 } 2295 /* Connection-less socket */ 2296 if (name == NULL) { 2297 /* 2298 * Remove the connected state and clear SO_DGRAM_ERRIND 2299 * since it was set when the socket was connected. 2300 * If this is UDP also send down a T_DISCON_REQ. 2301 */ 2302 int val; 2303 2304 if ((so->so_family == AF_INET || 2305 so->so_family == AF_INET6) && 2306 (so->so_type == SOCK_DGRAM || 2307 so->so_type == SOCK_RAW) && 2308 /*CONSTCOND*/ 2309 !soconnect_tpi_udp) { 2310 /* XXX What about implicitly unbinding here? */ 2311 error = sodisconnect(so, -1, 2312 _SODISCONNECT_LOCK_HELD); 2313 } else { 2314 so->so_state &= 2315 ~(SS_ISCONNECTED | SS_ISCONNECTING); 2316 sti->sti_faddr_valid = 0; 2317 sti->sti_faddr_len = 0; 2318 } 2319 2320 /* Remove SOLOCKED since setsockopt will grab it */ 2321 so_unlock_single(so, SOLOCKED); 2322 mutex_exit(&so->so_lock); 2323 2324 val = 0; 2325 (void) sotpi_setsockopt(so, SOL_SOCKET, 2326 SO_DGRAM_ERRIND, &val, (t_uscalar_t)sizeof (val), 2327 cr); 2328 2329 mutex_enter(&so->so_lock); 2330 so_lock_single(so); /* Set SOLOCKED */ 2331 goto done; 2332 } 2333 } 2334 ASSERT(so->so_state & SS_ISBOUND); 2335 2336 if (name == NULL || namelen == 0) { 2337 error = EINVAL; 2338 goto done; 2339 } 2340 /* 2341 * Mark the socket if sti_faddr_sa represents the transport level 2342 * address. 2343 */ 2344 if (flags & _SOCONNECT_NOXLATE) { 2345 struct sockaddr_ux *soaddr_ux; 2346 2347 ASSERT(so->so_family == AF_UNIX); 2348 if (namelen != sizeof (struct sockaddr_ux)) { 2349 error = EINVAL; 2350 goto done; 2351 } 2352 soaddr_ux = (struct sockaddr_ux *)name; 2353 name = (struct sockaddr *)&soaddr_ux->sou_addr; 2354 namelen = sizeof (soaddr_ux->sou_addr); 2355 sti->sti_faddr_noxlate = 1; 2356 } 2357 2358 /* 2359 * Length and family checks. 2360 */ 2361 error = so_addr_verify(so, name, namelen); 2362 if (error) 2363 goto bad; 2364 2365 /* 2366 * Save foreign address. Needed for AF_UNIX as well as 2367 * transport providers that do not support TI_GETPEERNAME. 2368 * Also used for cached foreign address for TCP and UDP. 2369 */ 2370 if (namelen > (t_uscalar_t)sti->sti_faddr_maxlen) { 2371 error = EINVAL; 2372 goto done; 2373 } 2374 sti->sti_faddr_len = (socklen_t)namelen; 2375 ASSERT(sti->sti_faddr_len <= sti->sti_faddr_maxlen); 2376 bcopy(name, sti->sti_faddr_sa, namelen); 2377 sti->sti_faddr_valid = 1; 2378 2379 if (so->so_family == AF_UNIX) { 2380 if (sti->sti_faddr_noxlate) { 2381 /* 2382 * Already have a transport internal address. Do not 2383 * pass any (transport internal) source address. 2384 */ 2385 addr = sti->sti_faddr_sa; 2386 addrlen = (t_uscalar_t)sti->sti_faddr_len; 2387 src = NULL; 2388 srclen = 0; 2389 } else { 2390 /* 2391 * Pass the sockaddr_un source address as an option 2392 * and translate the remote address. 2393 * Holding so_lock thus sti_laddr_sa can not change. 2394 */ 2395 src = sti->sti_laddr_sa; 2396 srclen = (t_uscalar_t)sti->sti_laddr_len; 2397 dprintso(so, 1, 2398 ("sotpi_connect UNIX: srclen %d, src %p\n", 2399 srclen, src)); 2400 error = so_ux_addr_xlate(so, 2401 sti->sti_faddr_sa, (socklen_t)sti->sti_faddr_len, 2402 (flags & _SOCONNECT_XPG4_2), 2403 &addr, &addrlen); 2404 if (error) 2405 goto bad; 2406 } 2407 } else { 2408 addr = sti->sti_faddr_sa; 2409 addrlen = (t_uscalar_t)sti->sti_faddr_len; 2410 src = NULL; 2411 srclen = 0; 2412 } 2413 /* 2414 * When connecting a datagram socket we issue the SO_DGRAM_ERRIND 2415 * option which asks the transport provider to send T_UDERR_IND 2416 * messages. These T_UDERR_IND messages are used to return connected 2417 * style errors (e.g. ECONNRESET) for connected datagram sockets. 2418 * 2419 * In addition, for UDP (and SOCK_RAW AF_INET{,6} sockets) 2420 * we send down a T_CONN_REQ. This is needed to let the 2421 * transport assign a local address that is consistent with 2422 * the remote address. Applications depend on a getsockname() 2423 * after a connect() to retrieve the "source" IP address for 2424 * the connected socket. Invalidate the cached local address 2425 * to force getsockname() to enquire of the transport. 2426 */ 2427 if (!(so->so_mode & SM_CONNREQUIRED)) { 2428 /* 2429 * Datagram socket. 2430 */ 2431 int32_t val; 2432 2433 so_unlock_single(so, SOLOCKED); 2434 mutex_exit(&so->so_lock); 2435 2436 val = 1; 2437 (void) sotpi_setsockopt(so, SOL_SOCKET, SO_DGRAM_ERRIND, 2438 &val, (t_uscalar_t)sizeof (val), cr); 2439 2440 mutex_enter(&so->so_lock); 2441 so_lock_single(so); /* Set SOLOCKED */ 2442 if ((so->so_family != AF_INET && so->so_family != AF_INET6) || 2443 (so->so_type != SOCK_DGRAM && so->so_type != SOCK_RAW) || 2444 soconnect_tpi_udp) { 2445 soisconnected(so); 2446 goto done; 2447 } 2448 /* 2449 * Send down T_CONN_REQ etc. 2450 * Clear fflag to avoid returning EWOULDBLOCK. 2451 */ 2452 fflag = 0; 2453 ASSERT(so->so_family != AF_UNIX); 2454 sti->sti_laddr_valid = 0; 2455 } else if (sti->sti_laddr_len != 0) { 2456 /* 2457 * If the local address or port was "any" then it may be 2458 * changed by the transport as a result of the 2459 * connect. Invalidate the cached version if we have one. 2460 */ 2461 switch (so->so_family) { 2462 case AF_INET: 2463 ASSERT(sti->sti_laddr_len == (socklen_t)sizeof (sin_t)); 2464 if (((sin_t *)sti->sti_laddr_sa)->sin_addr.s_addr == 2465 INADDR_ANY || 2466 ((sin_t *)sti->sti_laddr_sa)->sin_port == 0) 2467 sti->sti_laddr_valid = 0; 2468 break; 2469 2470 case AF_INET6: 2471 ASSERT(sti->sti_laddr_len == 2472 (socklen_t)sizeof (sin6_t)); 2473 if (IN6_IS_ADDR_UNSPECIFIED( 2474 &((sin6_t *)sti->sti_laddr_sa) ->sin6_addr) || 2475 IN6_IS_ADDR_V4MAPPED_ANY( 2476 &((sin6_t *)sti->sti_laddr_sa)->sin6_addr) || 2477 ((sin6_t *)sti->sti_laddr_sa)->sin6_port == 0) 2478 sti->sti_laddr_valid = 0; 2479 break; 2480 2481 default: 2482 break; 2483 } 2484 } 2485 2486 /* 2487 * Check for failure of an earlier call 2488 */ 2489 if (so->so_error != 0) 2490 goto so_bad; 2491 2492 /* 2493 * Send down T_CONN_REQ. Message was allocated above. 2494 */ 2495 conn_req.PRIM_type = T_CONN_REQ; 2496 conn_req.DEST_length = addrlen; 2497 conn_req.DEST_offset = (t_scalar_t)sizeof (conn_req); 2498 if (srclen == 0) { 2499 conn_req.OPT_length = 0; 2500 conn_req.OPT_offset = 0; 2501 soappendmsg(mp, &conn_req, sizeof (conn_req)); 2502 soappendmsg(mp, addr, addrlen); 2503 } else { 2504 /* 2505 * There is a AF_UNIX sockaddr_un to include as a source 2506 * address option. 2507 */ 2508 struct T_opthdr toh; 2509 2510 toh.level = SOL_SOCKET; 2511 toh.name = SO_SRCADDR; 2512 toh.len = (t_uscalar_t)(srclen + sizeof (struct T_opthdr)); 2513 toh.status = 0; 2514 conn_req.OPT_length = 2515 (t_scalar_t)(sizeof (toh) + _TPI_ALIGN_TOPT(srclen)); 2516 conn_req.OPT_offset = (t_scalar_t)(sizeof (conn_req) + 2517 _TPI_ALIGN_TOPT(addrlen)); 2518 2519 soappendmsg(mp, &conn_req, sizeof (conn_req)); 2520 soappendmsg(mp, addr, addrlen); 2521 mp->b_wptr += _TPI_ALIGN_TOPT(addrlen) - addrlen; 2522 soappendmsg(mp, &toh, sizeof (toh)); 2523 soappendmsg(mp, src, srclen); 2524 mp->b_wptr += _TPI_ALIGN_TOPT(srclen) - srclen; 2525 ASSERT(mp->b_wptr <= mp->b_datap->db_lim); 2526 } 2527 /* 2528 * Set SS_ISCONNECTING before sending down the T_CONN_REQ 2529 * in order to have the right state when the T_CONN_CON shows up. 2530 */ 2531 soisconnecting(so); 2532 mutex_exit(&so->so_lock); 2533 2534 if (AU_AUDITING()) 2535 audit_sock(T_CONN_REQ, strvp2wq(SOTOV(so)), mp, 0); 2536 2537 error = kstrputmsg(SOTOV(so), mp, NULL, 0, 0, 2538 MSG_BAND|MSG_HOLDSIG|MSG_IGNERROR, 0); 2539 mp = NULL; 2540 mutex_enter(&so->so_lock); 2541 if (error != 0) 2542 goto bad; 2543 2544 if ((error = sowaitokack(so, T_CONN_REQ)) != 0) 2545 goto bad; 2546 2547 /* Allow other threads to access the socket */ 2548 so_unlock_single(so, SOLOCKED); 2549 need_unlock = B_FALSE; 2550 2551 /* 2552 * Wait until we get a T_CONN_CON or an error 2553 */ 2554 if ((error = sowaitconnected(so, fflag, 0)) != 0) { 2555 so_lock_single(so); /* Set SOLOCKED */ 2556 need_unlock = B_TRUE; 2557 } 2558 2559 done: 2560 freemsg(mp); 2561 switch (error) { 2562 case EINPROGRESS: 2563 case EALREADY: 2564 case EISCONN: 2565 case EINTR: 2566 /* Non-fatal errors */ 2567 sti->sti_laddr_valid = 0; 2568 /* FALLTHRU */ 2569 case 0: 2570 break; 2571 default: 2572 ASSERT(need_unlock); 2573 /* 2574 * Fatal errors: clear SS_ISCONNECTING in case it was set, 2575 * and invalidate local-address cache 2576 */ 2577 so->so_state &= ~SS_ISCONNECTING; 2578 sti->sti_laddr_valid = 0; 2579 /* A discon_ind might have already unbound us */ 2580 if ((flags & _SOCONNECT_DID_BIND) && 2581 (so->so_state & SS_ISBOUND)) { 2582 int err; 2583 2584 err = sotpi_unbind(so, 0); 2585 /* LINTED - statement has no conseq */ 2586 if (err) { 2587 eprintsoline(so, err); 2588 } 2589 } 2590 break; 2591 } 2592 if (need_unlock) 2593 so_unlock_single(so, SOLOCKED); 2594 mutex_exit(&so->so_lock); 2595 return (error); 2596 2597 so_bad: error = sogeterr(so, B_TRUE); 2598 bad: eprintsoline(so, error); 2599 goto done; 2600 } 2601 2602 /* ARGSUSED */ 2603 int 2604 sotpi_shutdown(struct sonode *so, int how, struct cred *cr) 2605 { 2606 struct T_ordrel_req ordrel_req; 2607 mblk_t *mp; 2608 uint_t old_state, state_change; 2609 int error = 0; 2610 sotpi_info_t *sti = SOTOTPI(so); 2611 2612 dprintso(so, 1, ("sotpi_shutdown(%p, %d) %s\n", 2613 (void *)so, how, pr_state(so->so_state, so->so_mode))); 2614 2615 mutex_enter(&so->so_lock); 2616 so_lock_single(so); /* Set SOLOCKED */ 2617 2618 /* 2619 * SunOS 4.X has no check for datagram sockets. 2620 * 5.X checks that it is connected (ENOTCONN) 2621 * X/Open requires that we check the connected state. 2622 */ 2623 if (!(so->so_state & SS_ISCONNECTED)) { 2624 if (!xnet_skip_checks) { 2625 error = ENOTCONN; 2626 if (xnet_check_print) { 2627 printf("sockfs: X/Open shutdown check " 2628 "caused ENOTCONN\n"); 2629 } 2630 } 2631 goto done; 2632 } 2633 /* 2634 * Record the current state and then perform any state changes. 2635 * Then use the difference between the old and new states to 2636 * determine which messages need to be sent. 2637 * This prevents e.g. duplicate T_ORDREL_REQ when there are 2638 * duplicate calls to shutdown(). 2639 */ 2640 old_state = so->so_state; 2641 2642 switch (how) { 2643 case 0: 2644 socantrcvmore(so); 2645 break; 2646 case 1: 2647 socantsendmore(so); 2648 break; 2649 case 2: 2650 socantsendmore(so); 2651 socantrcvmore(so); 2652 break; 2653 default: 2654 error = EINVAL; 2655 goto done; 2656 } 2657 2658 /* 2659 * Assumes that the SS_CANT* flags are never cleared in the above code. 2660 */ 2661 state_change = (so->so_state & (SS_CANTRCVMORE|SS_CANTSENDMORE)) - 2662 (old_state & (SS_CANTRCVMORE|SS_CANTSENDMORE)); 2663 ASSERT((state_change & ~(SS_CANTRCVMORE|SS_CANTSENDMORE)) == 0); 2664 2665 switch (state_change) { 2666 case 0: 2667 dprintso(so, 1, 2668 ("sotpi_shutdown: nothing to send in state 0x%x\n", 2669 so->so_state)); 2670 goto done; 2671 2672 case SS_CANTRCVMORE: 2673 mutex_exit(&so->so_lock); 2674 strseteof(SOTOV(so), 1); 2675 /* 2676 * strseteof takes care of read side wakeups, 2677 * pollwakeups, and signals. 2678 */ 2679 /* 2680 * Get the read lock before flushing data to avoid problems 2681 * with the T_EXDATA_IND MSG_PEEK code in sotpi_recvmsg. 2682 */ 2683 mutex_enter(&so->so_lock); 2684 (void) so_lock_read(so, 0); /* Set SOREADLOCKED */ 2685 mutex_exit(&so->so_lock); 2686 2687 /* Flush read side queue */ 2688 strflushrq(SOTOV(so), FLUSHALL); 2689 2690 mutex_enter(&so->so_lock); 2691 so_unlock_read(so); /* Clear SOREADLOCKED */ 2692 break; 2693 2694 case SS_CANTSENDMORE: 2695 mutex_exit(&so->so_lock); 2696 strsetwerror(SOTOV(so), 0, 0, sogetwrerr); 2697 mutex_enter(&so->so_lock); 2698 break; 2699 2700 case SS_CANTSENDMORE|SS_CANTRCVMORE: 2701 mutex_exit(&so->so_lock); 2702 strsetwerror(SOTOV(so), 0, 0, sogetwrerr); 2703 strseteof(SOTOV(so), 1); 2704 /* 2705 * strseteof takes care of read side wakeups, 2706 * pollwakeups, and signals. 2707 */ 2708 /* 2709 * Get the read lock before flushing data to avoid problems 2710 * with the T_EXDATA_IND MSG_PEEK code in sotpi_recvmsg. 2711 */ 2712 mutex_enter(&so->so_lock); 2713 (void) so_lock_read(so, 0); /* Set SOREADLOCKED */ 2714 mutex_exit(&so->so_lock); 2715 2716 /* Flush read side queue */ 2717 strflushrq(SOTOV(so), FLUSHALL); 2718 2719 mutex_enter(&so->so_lock); 2720 so_unlock_read(so); /* Clear SOREADLOCKED */ 2721 break; 2722 } 2723 2724 ASSERT(MUTEX_HELD(&so->so_lock)); 2725 2726 /* 2727 * If either SS_CANTSENDMORE or SS_CANTRCVMORE or both of them 2728 * was set due to this call and the new state has both of them set: 2729 * Send the AF_UNIX close indication 2730 * For T_COTS send a discon_ind 2731 * 2732 * If cantsend was set due to this call: 2733 * For T_COTSORD send an ordrel_ind 2734 * 2735 * Note that for T_CLTS there is no message sent here. 2736 */ 2737 if ((so->so_state & (SS_CANTRCVMORE|SS_CANTSENDMORE)) == 2738 (SS_CANTRCVMORE|SS_CANTSENDMORE)) { 2739 /* 2740 * For SunOS 4.X compatibility we tell the other end 2741 * that we are unable to receive at this point. 2742 */ 2743 if (so->so_family == AF_UNIX && sti->sti_serv_type != T_CLTS) 2744 so_unix_close(so); 2745 2746 if (sti->sti_serv_type == T_COTS) 2747 error = sodisconnect(so, -1, _SODISCONNECT_LOCK_HELD); 2748 } 2749 if ((state_change & SS_CANTSENDMORE) && 2750 (sti->sti_serv_type == T_COTS_ORD)) { 2751 /* Send an orderly release */ 2752 ordrel_req.PRIM_type = T_ORDREL_REQ; 2753 2754 mutex_exit(&so->so_lock); 2755 mp = soallocproto1(&ordrel_req, sizeof (ordrel_req), 2756 0, _ALLOC_SLEEP, cr); 2757 /* 2758 * Send down the T_ORDREL_REQ even if there is flow control. 2759 * This prevents shutdown from blocking. 2760 * Note that there is no T_OK_ACK for ordrel_req. 2761 */ 2762 error = kstrputmsg(SOTOV(so), mp, NULL, 0, 0, 2763 MSG_BAND|MSG_HOLDSIG|MSG_IGNERROR|MSG_IGNFLOW, 0); 2764 mutex_enter(&so->so_lock); 2765 if (error) { 2766 eprintsoline(so, error); 2767 goto done; 2768 } 2769 } 2770 2771 done: 2772 so_unlock_single(so, SOLOCKED); 2773 mutex_exit(&so->so_lock); 2774 return (error); 2775 } 2776 2777 /* 2778 * For any connected SOCK_STREAM/SOCK_SEQPACKET AF_UNIX socket we send 2779 * a zero-length T_OPTDATA_REQ with the SO_UNIX_CLOSE option to inform the peer 2780 * that we have closed. 2781 * Also, for connected AF_UNIX SOCK_DGRAM sockets we send a zero-length 2782 * T_UNITDATA_REQ containing the same option. 2783 * 2784 * For SOCK_DGRAM half-connections (somebody connected to this end 2785 * but this end is not connect) we don't know where to send any 2786 * SO_UNIX_CLOSE. 2787 * 2788 * We have to ignore stream head errors just in case there has been 2789 * a shutdown(output). 2790 * Ignore any flow control to try to get the message more quickly to the peer. 2791 * While locally ignoring flow control solves the problem when there 2792 * is only the loopback transport on the stream it would not provide 2793 * the correct AF_UNIX socket semantics when one or more modules have 2794 * been pushed. 2795 */ 2796 void 2797 so_unix_close(struct sonode *so) 2798 { 2799 int error; 2800 struct T_opthdr toh; 2801 mblk_t *mp; 2802 sotpi_info_t *sti = SOTOTPI(so); 2803 2804 ASSERT(MUTEX_HELD(&so->so_lock)); 2805 2806 ASSERT(so->so_family == AF_UNIX); 2807 2808 if ((so->so_state & (SS_ISCONNECTED|SS_ISBOUND)) != 2809 (SS_ISCONNECTED|SS_ISBOUND)) 2810 return; 2811 2812 dprintso(so, 1, ("so_unix_close(%p) %s\n", 2813 (void *)so, pr_state(so->so_state, so->so_mode))); 2814 2815 toh.level = SOL_SOCKET; 2816 toh.name = SO_UNIX_CLOSE; 2817 2818 /* zero length + header */ 2819 toh.len = (t_uscalar_t)sizeof (struct T_opthdr); 2820 toh.status = 0; 2821 2822 if (so->so_type == SOCK_STREAM || so->so_type == SOCK_SEQPACKET) { 2823 struct T_optdata_req tdr; 2824 2825 tdr.PRIM_type = T_OPTDATA_REQ; 2826 tdr.DATA_flag = 0; 2827 2828 tdr.OPT_length = (t_scalar_t)sizeof (toh); 2829 tdr.OPT_offset = (t_scalar_t)sizeof (tdr); 2830 2831 /* NOTE: holding so_lock while sleeping */ 2832 mp = soallocproto2(&tdr, sizeof (tdr), 2833 &toh, sizeof (toh), 0, _ALLOC_SLEEP, CRED()); 2834 } else { 2835 struct T_unitdata_req tudr; 2836 void *addr; 2837 socklen_t addrlen; 2838 void *src; 2839 socklen_t srclen; 2840 struct T_opthdr toh2; 2841 t_scalar_t size; 2842 2843 /* Connecteded DGRAM socket */ 2844 2845 /* 2846 * For AF_UNIX the destination address is translated to 2847 * an internal name and the source address is passed as 2848 * an option. 2849 */ 2850 /* 2851 * Length and family checks. 2852 */ 2853 error = so_addr_verify(so, sti->sti_faddr_sa, 2854 (t_uscalar_t)sti->sti_faddr_len); 2855 if (error) { 2856 eprintsoline(so, error); 2857 return; 2858 } 2859 if (sti->sti_faddr_noxlate) { 2860 /* 2861 * Already have a transport internal address. Do not 2862 * pass any (transport internal) source address. 2863 */ 2864 addr = sti->sti_faddr_sa; 2865 addrlen = (t_uscalar_t)sti->sti_faddr_len; 2866 src = NULL; 2867 srclen = 0; 2868 } else { 2869 /* 2870 * Pass the sockaddr_un source address as an option 2871 * and translate the remote address. 2872 * Holding so_lock thus sti_laddr_sa can not change. 2873 */ 2874 src = sti->sti_laddr_sa; 2875 srclen = (socklen_t)sti->sti_laddr_len; 2876 dprintso(so, 1, 2877 ("so_ux_close: srclen %d, src %p\n", 2878 srclen, src)); 2879 error = so_ux_addr_xlate(so, 2880 sti->sti_faddr_sa, 2881 (socklen_t)sti->sti_faddr_len, 0, 2882 &addr, &addrlen); 2883 if (error) { 2884 eprintsoline(so, error); 2885 return; 2886 } 2887 } 2888 tudr.PRIM_type = T_UNITDATA_REQ; 2889 tudr.DEST_length = addrlen; 2890 tudr.DEST_offset = (t_scalar_t)sizeof (tudr); 2891 if (srclen == 0) { 2892 tudr.OPT_length = (t_scalar_t)sizeof (toh); 2893 tudr.OPT_offset = (t_scalar_t)(sizeof (tudr) + 2894 _TPI_ALIGN_TOPT(addrlen)); 2895 2896 size = tudr.OPT_offset + tudr.OPT_length; 2897 /* NOTE: holding so_lock while sleeping */ 2898 mp = soallocproto2(&tudr, sizeof (tudr), 2899 addr, addrlen, size, _ALLOC_SLEEP, CRED()); 2900 mp->b_wptr += (_TPI_ALIGN_TOPT(addrlen) - addrlen); 2901 soappendmsg(mp, &toh, sizeof (toh)); 2902 } else { 2903 /* 2904 * There is a AF_UNIX sockaddr_un to include as a 2905 * source address option. 2906 */ 2907 tudr.OPT_length = (t_scalar_t)(2 * sizeof (toh) + 2908 _TPI_ALIGN_TOPT(srclen)); 2909 tudr.OPT_offset = (t_scalar_t)(sizeof (tudr) + 2910 _TPI_ALIGN_TOPT(addrlen)); 2911 2912 toh2.level = SOL_SOCKET; 2913 toh2.name = SO_SRCADDR; 2914 toh2.len = (t_uscalar_t)(srclen + 2915 sizeof (struct T_opthdr)); 2916 toh2.status = 0; 2917 2918 size = tudr.OPT_offset + tudr.OPT_length; 2919 2920 /* NOTE: holding so_lock while sleeping */ 2921 mp = soallocproto2(&tudr, sizeof (tudr), 2922 addr, addrlen, size, _ALLOC_SLEEP, CRED()); 2923 mp->b_wptr += _TPI_ALIGN_TOPT(addrlen) - addrlen; 2924 soappendmsg(mp, &toh, sizeof (toh)); 2925 soappendmsg(mp, &toh2, sizeof (toh2)); 2926 soappendmsg(mp, src, srclen); 2927 mp->b_wptr += _TPI_ALIGN_TOPT(srclen) - srclen; 2928 } 2929 ASSERT(mp->b_wptr <= mp->b_datap->db_lim); 2930 } 2931 mutex_exit(&so->so_lock); 2932 error = kstrputmsg(SOTOV(so), mp, NULL, 0, 0, 2933 MSG_BAND|MSG_HOLDSIG|MSG_IGNERROR|MSG_IGNFLOW, 0); 2934 mutex_enter(&so->so_lock); 2935 } 2936 2937 /* 2938 * Called by sotpi_recvmsg when reading a non-zero amount of data. 2939 * In addition, the caller typically verifies that there is some 2940 * potential state to clear by checking 2941 * if (so->so_state & (SS_OOBPEND|SS_HAVEOOBDATA|SS_RCVATMARK)) 2942 * before calling this routine. 2943 * Note that such a check can be made without holding so_lock since 2944 * sotpi_recvmsg is single-threaded (using SOREADLOCKED) and only sotpi_recvmsg 2945 * decrements sti_oobsigcnt. 2946 * 2947 * When data is read *after* the point that all pending 2948 * oob data has been consumed the oob indication is cleared. 2949 * 2950 * This logic keeps select/poll returning POLLRDBAND and 2951 * SIOCATMARK returning true until we have read past 2952 * the mark. 2953 */ 2954 static void 2955 sorecv_update_oobstate(struct sonode *so) 2956 { 2957 sotpi_info_t *sti = SOTOTPI(so); 2958 2959 mutex_enter(&so->so_lock); 2960 ASSERT(so_verify_oobstate(so)); 2961 dprintso(so, 1, 2962 ("sorecv_update_oobstate: counts %d/%d state %s\n", 2963 sti->sti_oobsigcnt, 2964 sti->sti_oobcnt, pr_state(so->so_state, so->so_mode))); 2965 if (sti->sti_oobsigcnt == 0) { 2966 /* No more pending oob indications */ 2967 so->so_state &= ~(SS_OOBPEND|SS_HAVEOOBDATA|SS_RCVATMARK); 2968 freemsg(so->so_oobmsg); 2969 so->so_oobmsg = NULL; 2970 } 2971 ASSERT(so_verify_oobstate(so)); 2972 mutex_exit(&so->so_lock); 2973 } 2974 2975 /* 2976 * Handle recv* calls for an so which has NL7C saved recv mblk_t(s). 2977 */ 2978 static int 2979 nl7c_sorecv(struct sonode *so, mblk_t **rmp, uio_t *uiop, rval_t *rp) 2980 { 2981 sotpi_info_t *sti = SOTOTPI(so); 2982 int error = 0; 2983 mblk_t *tmp = NULL; 2984 mblk_t *pmp = NULL; 2985 mblk_t *nmp = sti->sti_nl7c_rcv_mp; 2986 2987 ASSERT(nmp != NULL); 2988 2989 while (nmp != NULL && uiop->uio_resid > 0) { 2990 ssize_t n; 2991 2992 if (DB_TYPE(nmp) == M_DATA) { 2993 /* 2994 * We have some data, uiomove up to resid bytes. 2995 */ 2996 n = MIN(MBLKL(nmp), uiop->uio_resid); 2997 if (n > 0) 2998 error = uiomove(nmp->b_rptr, n, UIO_READ, uiop); 2999 nmp->b_rptr += n; 3000 if (nmp->b_rptr == nmp->b_wptr) { 3001 pmp = nmp; 3002 nmp = nmp->b_cont; 3003 } 3004 if (error) 3005 break; 3006 } else { 3007 /* 3008 * We only handle data, save for caller to handle. 3009 */ 3010 if (pmp != NULL) { 3011 pmp->b_cont = nmp->b_cont; 3012 } 3013 nmp->b_cont = NULL; 3014 if (*rmp == NULL) { 3015 *rmp = nmp; 3016 } else { 3017 tmp->b_cont = nmp; 3018 } 3019 nmp = nmp->b_cont; 3020 tmp = nmp; 3021 } 3022 } 3023 if (pmp != NULL) { 3024 /* Free any mblk_t(s) which we have consumed */ 3025 pmp->b_cont = NULL; 3026 freemsg(sti->sti_nl7c_rcv_mp); 3027 } 3028 if ((sti->sti_nl7c_rcv_mp = nmp) == NULL) { 3029 /* Last mblk_t so return the saved kstrgetmsg() rval/error */ 3030 if (error == 0) { 3031 rval_t *p = (rval_t *)&sti->sti_nl7c_rcv_rval; 3032 3033 error = p->r_v.r_v2; 3034 p->r_v.r_v2 = 0; 3035 } 3036 rp->r_vals = sti->sti_nl7c_rcv_rval; 3037 sti->sti_nl7c_rcv_rval = 0; 3038 } else { 3039 /* More mblk_t(s) to process so no rval to return */ 3040 rp->r_vals = 0; 3041 } 3042 return (error); 3043 } 3044 /* 3045 * Receive the next message on the queue. 3046 * If msg_controllen is non-zero when called the caller is interested in 3047 * any received control info (options). 3048 * If msg_namelen is non-zero when called the caller is interested in 3049 * any received source address. 3050 * The routine returns with msg_control and msg_name pointing to 3051 * kmem_alloc'ed memory which the caller has to free. 3052 */ 3053 /* ARGSUSED */ 3054 int 3055 sotpi_recvmsg(struct sonode *so, struct nmsghdr *msg, struct uio *uiop, 3056 struct cred *cr) 3057 { 3058 union T_primitives *tpr; 3059 mblk_t *mp; 3060 uchar_t pri; 3061 int pflag, opflag; 3062 void *control; 3063 t_uscalar_t controllen; 3064 t_uscalar_t namelen; 3065 int so_state = so->so_state; /* Snapshot */ 3066 ssize_t saved_resid; 3067 rval_t rval; 3068 int flags; 3069 clock_t timout; 3070 int error = 0; 3071 sotpi_info_t *sti = SOTOTPI(so); 3072 3073 flags = msg->msg_flags; 3074 msg->msg_flags = 0; 3075 3076 dprintso(so, 1, ("sotpi_recvmsg(%p, %p, 0x%x) state %s err %d\n", 3077 (void *)so, (void *)msg, flags, 3078 pr_state(so->so_state, so->so_mode), so->so_error)); 3079 3080 if (so->so_version == SOV_STREAM) { 3081 so_update_attrs(so, SOACC); 3082 /* The imaginary "sockmod" has been popped - act as a stream */ 3083 return (strread(SOTOV(so), uiop, cr)); 3084 } 3085 3086 /* 3087 * If we are not connected because we have never been connected 3088 * we return ENOTCONN. If we have been connected (but are no longer 3089 * connected) then SS_CANTRCVMORE is set and we let kstrgetmsg return 3090 * the EOF. 3091 * 3092 * An alternative would be to post an ENOTCONN error in stream head 3093 * (read+write) and clear it when we're connected. However, that error 3094 * would cause incorrect poll/select behavior! 3095 */ 3096 if ((so_state & (SS_ISCONNECTED|SS_CANTRCVMORE)) == 0 && 3097 (so->so_mode & SM_CONNREQUIRED)) { 3098 return (ENOTCONN); 3099 } 3100 3101 /* 3102 * Note: SunOS 4.X checks uio_resid == 0 before going to sleep (but 3103 * after checking that the read queue is empty) and returns zero. 3104 * This implementation will sleep (in kstrgetmsg) even if uio_resid 3105 * is zero. 3106 */ 3107 3108 if (flags & MSG_OOB) { 3109 /* Check that the transport supports OOB */ 3110 if (!(so->so_mode & SM_EXDATA)) 3111 return (EOPNOTSUPP); 3112 so_update_attrs(so, SOACC); 3113 return (sorecvoob(so, msg, uiop, flags, 3114 (so->so_options & SO_OOBINLINE))); 3115 } 3116 3117 so_update_attrs(so, SOACC); 3118 3119 /* 3120 * Set msg_controllen and msg_namelen to zero here to make it 3121 * simpler in the cases that no control or name is returned. 3122 */ 3123 controllen = msg->msg_controllen; 3124 namelen = msg->msg_namelen; 3125 msg->msg_controllen = 0; 3126 msg->msg_namelen = 0; 3127 3128 dprintso(so, 1, ("sotpi_recvmsg: namelen %d controllen %d\n", 3129 namelen, controllen)); 3130 3131 mutex_enter(&so->so_lock); 3132 /* 3133 * If an NL7C enabled socket and not waiting for write data. 3134 */ 3135 if ((sti->sti_nl7c_flags & (NL7C_ENABLED | NL7C_WAITWRITE)) == 3136 NL7C_ENABLED) { 3137 if (sti->sti_nl7c_uri) { 3138 /* Close uri processing for a previous request */ 3139 nl7c_close(so); 3140 } 3141 if ((so_state & SS_CANTRCVMORE) && 3142 sti->sti_nl7c_rcv_mp == NULL) { 3143 /* Nothing to process, EOF */ 3144 mutex_exit(&so->so_lock); 3145 return (0); 3146 } else if (sti->sti_nl7c_flags & NL7C_SOPERSIST) { 3147 /* Persistent NL7C socket, try to process request */ 3148 boolean_t ret; 3149 3150 ret = nl7c_process(so, 3151 (so->so_state & (SS_NONBLOCK|SS_NDELAY))); 3152 rval.r_vals = sti->sti_nl7c_rcv_rval; 3153 error = rval.r_v.r_v2; 3154 if (error) { 3155 /* Error of some sort, return it */ 3156 mutex_exit(&so->so_lock); 3157 return (error); 3158 } 3159 if (sti->sti_nl7c_flags && 3160 ! (sti->sti_nl7c_flags & NL7C_WAITWRITE)) { 3161 /* 3162 * Still an NL7C socket and no data 3163 * to pass up to the caller. 3164 */ 3165 mutex_exit(&so->so_lock); 3166 if (ret) { 3167 /* EOF */ 3168 return (0); 3169 } else { 3170 /* Need more data */ 3171 return (EAGAIN); 3172 } 3173 } 3174 } else { 3175 /* 3176 * Not persistent so no further NL7C processing. 3177 */ 3178 sti->sti_nl7c_flags = 0; 3179 } 3180 } 3181 /* 3182 * Only one reader is allowed at any given time. This is needed 3183 * for T_EXDATA handling and, in the future, MSG_WAITALL. 3184 * 3185 * This is slightly different that BSD behavior in that it fails with 3186 * EWOULDBLOCK when using nonblocking io. In BSD the read queue access 3187 * is single-threaded using sblock(), which is dropped while waiting 3188 * for data to appear. The difference shows up e.g. if one 3189 * file descriptor does not have O_NONBLOCK but a dup'ed file descriptor 3190 * does use nonblocking io and different threads are reading each 3191 * file descriptor. In BSD there would never be an EWOULDBLOCK error 3192 * in this case as long as the read queue doesn't get empty. 3193 * In this implementation the thread using nonblocking io can 3194 * get an EWOULDBLOCK error due to the blocking thread executing 3195 * e.g. in the uiomove in kstrgetmsg. 3196 * This difference is not believed to be significant. 3197 */ 3198 /* Set SOREADLOCKED */ 3199 error = so_lock_read_intr(so, 3200 uiop->uio_fmode | ((flags & MSG_DONTWAIT) ? FNONBLOCK : 0)); 3201 mutex_exit(&so->so_lock); 3202 if (error) 3203 return (error); 3204 3205 /* 3206 * Tell kstrgetmsg to not inspect the stream head errors until all 3207 * queued data has been consumed. 3208 * Use a timeout=-1 to wait forever unless MSG_DONTWAIT is set. 3209 * Also, If uio_fmode indicates nonblocking kstrgetmsg will not block. 3210 * 3211 * MSG_WAITALL only applies to M_DATA and T_DATA_IND messages and 3212 * to T_OPTDATA_IND that do not contain any user-visible control msg. 3213 * Note that MSG_WAITALL set with MSG_PEEK is a noop. 3214 */ 3215 pflag = MSG_ANY | MSG_DELAYERROR; 3216 if (flags & MSG_PEEK) { 3217 pflag |= MSG_IPEEK; 3218 flags &= ~MSG_WAITALL; 3219 } 3220 if (so->so_mode & SM_ATOMIC) 3221 pflag |= MSG_DISCARDTAIL; 3222 3223 if (flags & MSG_DONTWAIT) 3224 timout = 0; 3225 else if (so->so_rcvtimeo != 0) 3226 timout = TICK_TO_MSEC(so->so_rcvtimeo); 3227 else 3228 timout = -1; 3229 opflag = pflag; 3230 retry: 3231 saved_resid = uiop->uio_resid; 3232 pri = 0; 3233 mp = NULL; 3234 if (sti->sti_nl7c_rcv_mp != NULL) { 3235 /* Already kstrgetmsg()ed saved mblk(s) from NL7C */ 3236 error = nl7c_sorecv(so, &mp, uiop, &rval); 3237 } else { 3238 error = kstrgetmsg(SOTOV(so), &mp, uiop, &pri, &pflag, 3239 timout, &rval); 3240 } 3241 if (error != 0) { 3242 /* kstrgetmsg returns ETIME when timeout expires */ 3243 if (error == ETIME) 3244 error = EWOULDBLOCK; 3245 goto out; 3246 } 3247 /* 3248 * For datagrams the MOREDATA flag is used to set MSG_TRUNC. 3249 * For non-datagrams MOREDATA is used to set MSG_EOR. 3250 */ 3251 ASSERT(!(rval.r_val1 & MORECTL)); 3252 if ((rval.r_val1 & MOREDATA) && (so->so_mode & SM_ATOMIC)) 3253 msg->msg_flags |= MSG_TRUNC; 3254 3255 if (mp == NULL) { 3256 dprintso(so, 1, ("sotpi_recvmsg: got M_DATA\n")); 3257 /* 3258 * 4.3BSD and 4.4BSD clears the mark when peeking across it. 3259 * The draft Posix socket spec states that the mark should 3260 * not be cleared when peeking. We follow the latter. 3261 */ 3262 if ((so->so_state & 3263 (SS_OOBPEND|SS_HAVEOOBDATA|SS_RCVATMARK)) && 3264 (uiop->uio_resid != saved_resid) && 3265 !(flags & MSG_PEEK)) { 3266 sorecv_update_oobstate(so); 3267 } 3268 3269 mutex_enter(&so->so_lock); 3270 /* Set MSG_EOR based on MOREDATA */ 3271 if (!(rval.r_val1 & MOREDATA)) { 3272 if (so->so_state & SS_SAVEDEOR) { 3273 msg->msg_flags |= MSG_EOR; 3274 so->so_state &= ~SS_SAVEDEOR; 3275 } 3276 } 3277 /* 3278 * If some data was received (i.e. not EOF) and the 3279 * read/recv* has not been satisfied wait for some more. 3280 */ 3281 if ((flags & MSG_WAITALL) && !(msg->msg_flags & MSG_EOR) && 3282 uiop->uio_resid != saved_resid && uiop->uio_resid > 0) { 3283 mutex_exit(&so->so_lock); 3284 pflag = opflag | MSG_NOMARK; 3285 goto retry; 3286 } 3287 goto out_locked; 3288 } 3289 3290 /* strsock_proto has already verified length and alignment */ 3291 tpr = (union T_primitives *)mp->b_rptr; 3292 dprintso(so, 1, ("sotpi_recvmsg: type %d\n", tpr->type)); 3293 3294 switch (tpr->type) { 3295 case T_DATA_IND: { 3296 if ((so->so_state & 3297 (SS_OOBPEND|SS_HAVEOOBDATA|SS_RCVATMARK)) && 3298 (uiop->uio_resid != saved_resid) && 3299 !(flags & MSG_PEEK)) { 3300 sorecv_update_oobstate(so); 3301 } 3302 3303 /* 3304 * Set msg_flags to MSG_EOR based on 3305 * MORE_flag and MOREDATA. 3306 */ 3307 mutex_enter(&so->so_lock); 3308 so->so_state &= ~SS_SAVEDEOR; 3309 if (!(tpr->data_ind.MORE_flag & 1)) { 3310 if (!(rval.r_val1 & MOREDATA)) 3311 msg->msg_flags |= MSG_EOR; 3312 else 3313 so->so_state |= SS_SAVEDEOR; 3314 } 3315 freemsg(mp); 3316 /* 3317 * If some data was received (i.e. not EOF) and the 3318 * read/recv* has not been satisfied wait for some more. 3319 */ 3320 if ((flags & MSG_WAITALL) && !(msg->msg_flags & MSG_EOR) && 3321 uiop->uio_resid != saved_resid && uiop->uio_resid > 0) { 3322 mutex_exit(&so->so_lock); 3323 pflag = opflag | MSG_NOMARK; 3324 goto retry; 3325 } 3326 goto out_locked; 3327 } 3328 case T_UNITDATA_IND: { 3329 void *addr; 3330 t_uscalar_t addrlen; 3331 void *abuf; 3332 t_uscalar_t optlen; 3333 void *opt; 3334 3335 if ((so->so_state & 3336 (SS_OOBPEND|SS_HAVEOOBDATA|SS_RCVATMARK)) && 3337 (uiop->uio_resid != saved_resid) && 3338 !(flags & MSG_PEEK)) { 3339 sorecv_update_oobstate(so); 3340 } 3341 3342 if (namelen != 0) { 3343 /* Caller wants source address */ 3344 addrlen = tpr->unitdata_ind.SRC_length; 3345 addr = sogetoff(mp, 3346 tpr->unitdata_ind.SRC_offset, 3347 addrlen, 1); 3348 if (addr == NULL) { 3349 freemsg(mp); 3350 error = EPROTO; 3351 eprintsoline(so, error); 3352 goto out; 3353 } 3354 if (so->so_family == AF_UNIX) { 3355 /* 3356 * Can not use the transport level address. 3357 * If there is a SO_SRCADDR option carrying 3358 * the socket level address it will be 3359 * extracted below. 3360 */ 3361 addr = NULL; 3362 addrlen = 0; 3363 } 3364 } 3365 optlen = tpr->unitdata_ind.OPT_length; 3366 if (optlen != 0) { 3367 t_uscalar_t ncontrollen; 3368 3369 /* 3370 * Extract any source address option. 3371 * Determine how large cmsg buffer is needed. 3372 */ 3373 opt = sogetoff(mp, 3374 tpr->unitdata_ind.OPT_offset, 3375 optlen, __TPI_ALIGN_SIZE); 3376 3377 if (opt == NULL) { 3378 freemsg(mp); 3379 error = EPROTO; 3380 eprintsoline(so, error); 3381 goto out; 3382 } 3383 if (so->so_family == AF_UNIX) 3384 so_getopt_srcaddr(opt, optlen, &addr, &addrlen); 3385 ncontrollen = so_cmsglen(mp, opt, optlen, 3386 !(flags & MSG_XPG4_2)); 3387 if (controllen != 0) 3388 controllen = ncontrollen; 3389 else if (ncontrollen != 0) 3390 msg->msg_flags |= MSG_CTRUNC; 3391 } else { 3392 controllen = 0; 3393 } 3394 3395 if (namelen != 0) { 3396 /* 3397 * Return address to caller. 3398 * Caller handles truncation if length 3399 * exceeds msg_namelen. 3400 * NOTE: AF_UNIX NUL termination is ensured by 3401 * the sender's copyin_name(). 3402 */ 3403 abuf = kmem_alloc(addrlen, KM_SLEEP); 3404 3405 bcopy(addr, abuf, addrlen); 3406 msg->msg_name = abuf; 3407 msg->msg_namelen = addrlen; 3408 } 3409 3410 if (controllen != 0) { 3411 /* 3412 * Return control msg to caller. 3413 * Caller handles truncation if length 3414 * exceeds msg_controllen. 3415 */ 3416 control = kmem_zalloc(controllen, KM_SLEEP); 3417 3418 error = so_opt2cmsg(mp, opt, optlen, 3419 !(flags & MSG_XPG4_2), 3420 control, controllen); 3421 if (error) { 3422 freemsg(mp); 3423 if (msg->msg_namelen != 0) 3424 kmem_free(msg->msg_name, 3425 msg->msg_namelen); 3426 kmem_free(control, controllen); 3427 eprintsoline(so, error); 3428 goto out; 3429 } 3430 msg->msg_control = control; 3431 msg->msg_controllen = controllen; 3432 } 3433 3434 freemsg(mp); 3435 goto out; 3436 } 3437 case T_OPTDATA_IND: { 3438 struct T_optdata_req *tdr; 3439 void *opt; 3440 t_uscalar_t optlen; 3441 3442 if ((so->so_state & 3443 (SS_OOBPEND|SS_HAVEOOBDATA|SS_RCVATMARK)) && 3444 (uiop->uio_resid != saved_resid) && 3445 !(flags & MSG_PEEK)) { 3446 sorecv_update_oobstate(so); 3447 } 3448 3449 tdr = (struct T_optdata_req *)mp->b_rptr; 3450 optlen = tdr->OPT_length; 3451 if (optlen != 0) { 3452 t_uscalar_t ncontrollen; 3453 /* 3454 * Determine how large cmsg buffer is needed. 3455 */ 3456 opt = sogetoff(mp, 3457 tpr->optdata_ind.OPT_offset, 3458 optlen, __TPI_ALIGN_SIZE); 3459 3460 if (opt == NULL) { 3461 freemsg(mp); 3462 error = EPROTO; 3463 eprintsoline(so, error); 3464 goto out; 3465 } 3466 3467 ncontrollen = so_cmsglen(mp, opt, optlen, 3468 !(flags & MSG_XPG4_2)); 3469 if (controllen != 0) 3470 controllen = ncontrollen; 3471 else if (ncontrollen != 0) 3472 msg->msg_flags |= MSG_CTRUNC; 3473 } else { 3474 controllen = 0; 3475 } 3476 3477 if (controllen != 0) { 3478 /* 3479 * Return control msg to caller. 3480 * Caller handles truncation if length 3481 * exceeds msg_controllen. 3482 */ 3483 control = kmem_zalloc(controllen, KM_SLEEP); 3484 3485 error = so_opt2cmsg(mp, opt, optlen, 3486 !(flags & MSG_XPG4_2), 3487 control, controllen); 3488 if (error) { 3489 freemsg(mp); 3490 kmem_free(control, controllen); 3491 eprintsoline(so, error); 3492 goto out; 3493 } 3494 msg->msg_control = control; 3495 msg->msg_controllen = controllen; 3496 } 3497 3498 /* 3499 * Set msg_flags to MSG_EOR based on 3500 * DATA_flag and MOREDATA. 3501 */ 3502 mutex_enter(&so->so_lock); 3503 so->so_state &= ~SS_SAVEDEOR; 3504 if (!(tpr->data_ind.MORE_flag & 1)) { 3505 if (!(rval.r_val1 & MOREDATA)) 3506 msg->msg_flags |= MSG_EOR; 3507 else 3508 so->so_state |= SS_SAVEDEOR; 3509 } 3510 freemsg(mp); 3511 /* 3512 * If some data was received (i.e. not EOF) and the 3513 * read/recv* has not been satisfied wait for some more. 3514 * Not possible to wait if control info was received. 3515 */ 3516 if ((flags & MSG_WAITALL) && !(msg->msg_flags & MSG_EOR) && 3517 controllen == 0 && 3518 uiop->uio_resid != saved_resid && uiop->uio_resid > 0) { 3519 mutex_exit(&so->so_lock); 3520 pflag = opflag | MSG_NOMARK; 3521 goto retry; 3522 } 3523 goto out_locked; 3524 } 3525 case T_EXDATA_IND: { 3526 dprintso(so, 1, 3527 ("sotpi_recvmsg: EXDATA_IND counts %d/%d consumed %ld " 3528 "state %s\n", 3529 sti->sti_oobsigcnt, sti->sti_oobcnt, 3530 saved_resid - uiop->uio_resid, 3531 pr_state(so->so_state, so->so_mode))); 3532 /* 3533 * kstrgetmsg handles MSGMARK so there is nothing to 3534 * inspect in the T_EXDATA_IND. 3535 * strsock_proto makes the stream head queue the T_EXDATA_IND 3536 * as a separate message with no M_DATA component. Furthermore, 3537 * the stream head does not consolidate M_DATA messages onto 3538 * an MSGMARK'ed message ensuring that the T_EXDATA_IND 3539 * remains a message by itself. This is needed since MSGMARK 3540 * marks both the whole message as well as the last byte 3541 * of the message. 3542 */ 3543 freemsg(mp); 3544 ASSERT(uiop->uio_resid == saved_resid); /* No data */ 3545 if (flags & MSG_PEEK) { 3546 /* 3547 * Even though we are peeking we consume the 3548 * T_EXDATA_IND thereby moving the mark information 3549 * to SS_RCVATMARK. Then the oob code below will 3550 * retry the peeking kstrgetmsg. 3551 * Note that the stream head read queue is 3552 * never flushed without holding SOREADLOCKED 3553 * thus the T_EXDATA_IND can not disappear 3554 * underneath us. 3555 */ 3556 dprintso(so, 1, 3557 ("sotpi_recvmsg: consume EXDATA_IND " 3558 "counts %d/%d state %s\n", 3559 sti->sti_oobsigcnt, 3560 sti->sti_oobcnt, 3561 pr_state(so->so_state, so->so_mode))); 3562 3563 pflag = MSG_ANY | MSG_DELAYERROR; 3564 if (so->so_mode & SM_ATOMIC) 3565 pflag |= MSG_DISCARDTAIL; 3566 3567 pri = 0; 3568 mp = NULL; 3569 3570 error = kstrgetmsg(SOTOV(so), &mp, uiop, 3571 &pri, &pflag, (clock_t)-1, &rval); 3572 ASSERT(uiop->uio_resid == saved_resid); 3573 3574 if (error) { 3575 #ifdef SOCK_DEBUG 3576 if (error != EWOULDBLOCK && error != EINTR) { 3577 eprintsoline(so, error); 3578 } 3579 #endif /* SOCK_DEBUG */ 3580 goto out; 3581 } 3582 ASSERT(mp); 3583 tpr = (union T_primitives *)mp->b_rptr; 3584 ASSERT(tpr->type == T_EXDATA_IND); 3585 freemsg(mp); 3586 } /* end "if (flags & MSG_PEEK)" */ 3587 3588 /* 3589 * Decrement the number of queued and pending oob. 3590 * 3591 * SS_RCVATMARK is cleared when we read past a mark. 3592 * SS_HAVEOOBDATA is cleared when we've read past the 3593 * last mark. 3594 * SS_OOBPEND is cleared if we've read past the last 3595 * mark and no (new) SIGURG has been posted. 3596 */ 3597 mutex_enter(&so->so_lock); 3598 ASSERT(so_verify_oobstate(so)); 3599 ASSERT(sti->sti_oobsigcnt >= sti->sti_oobcnt); 3600 ASSERT(sti->sti_oobsigcnt > 0); 3601 sti->sti_oobsigcnt--; 3602 ASSERT(sti->sti_oobcnt > 0); 3603 sti->sti_oobcnt--; 3604 /* 3605 * Since the T_EXDATA_IND has been removed from the stream 3606 * head, but we have not read data past the mark, 3607 * sockfs needs to track that the socket is still at the mark. 3608 * 3609 * Since no data was received call kstrgetmsg again to wait 3610 * for data. 3611 */ 3612 so->so_state |= SS_RCVATMARK; 3613 mutex_exit(&so->so_lock); 3614 dprintso(so, 1, 3615 ("sotpi_recvmsg: retry EXDATA_IND counts %d/%d state %s\n", 3616 sti->sti_oobsigcnt, sti->sti_oobcnt, 3617 pr_state(so->so_state, so->so_mode))); 3618 pflag = opflag; 3619 goto retry; 3620 } 3621 default: 3622 cmn_err(CE_CONT, "sotpi_recvmsg: so %p prim %d mp %p\n", 3623 (void *)so, tpr->type, (void *)mp); 3624 ASSERT(0); 3625 freemsg(mp); 3626 error = EPROTO; 3627 eprintsoline(so, error); 3628 goto out; 3629 } 3630 /* NOTREACHED */ 3631 out: 3632 mutex_enter(&so->so_lock); 3633 out_locked: 3634 so_unlock_read(so); /* Clear SOREADLOCKED */ 3635 mutex_exit(&so->so_lock); 3636 return (error); 3637 } 3638 3639 /* 3640 * Sending data with options on a datagram socket. 3641 * Assumes caller has verified that SS_ISBOUND etc. are set. 3642 */ 3643 static int 3644 sosend_dgramcmsg(struct sonode *so, struct sockaddr *name, socklen_t namelen, 3645 struct uio *uiop, void *control, t_uscalar_t controllen, int flags) 3646 { 3647 struct T_unitdata_req tudr; 3648 mblk_t *mp; 3649 int error; 3650 void *addr; 3651 socklen_t addrlen; 3652 void *src; 3653 socklen_t srclen; 3654 ssize_t len; 3655 int size; 3656 struct T_opthdr toh; 3657 struct fdbuf *fdbuf; 3658 t_uscalar_t optlen; 3659 void *fds; 3660 int fdlen; 3661 sotpi_info_t *sti = SOTOTPI(so); 3662 3663 ASSERT(name && namelen); 3664 ASSERT(control && controllen); 3665 3666 len = uiop->uio_resid; 3667 if (len > (ssize_t)sti->sti_tidu_size) { 3668 return (EMSGSIZE); 3669 } 3670 3671 /* 3672 * For AF_UNIX the destination address is translated to an internal 3673 * name and the source address is passed as an option. 3674 * Also, file descriptors are passed as file pointers in an 3675 * option. 3676 */ 3677 3678 /* 3679 * Length and family checks. 3680 */ 3681 error = so_addr_verify(so, name, namelen); 3682 if (error) { 3683 eprintsoline(so, error); 3684 return (error); 3685 } 3686 if (so->so_family == AF_UNIX) { 3687 if (sti->sti_faddr_noxlate) { 3688 /* 3689 * Already have a transport internal address. Do not 3690 * pass any (transport internal) source address. 3691 */ 3692 addr = name; 3693 addrlen = namelen; 3694 src = NULL; 3695 srclen = 0; 3696 } else { 3697 /* 3698 * Pass the sockaddr_un source address as an option 3699 * and translate the remote address. 3700 * 3701 * Note that this code does not prevent sti_laddr_sa 3702 * from changing while it is being used. Thus 3703 * if an unbind+bind occurs concurrently with this 3704 * send the peer might see a partially new and a 3705 * partially old "from" address. 3706 */ 3707 src = sti->sti_laddr_sa; 3708 srclen = (t_uscalar_t)sti->sti_laddr_len; 3709 dprintso(so, 1, 3710 ("sosend_dgramcmsg UNIX: srclen %d, src %p\n", 3711 srclen, src)); 3712 error = so_ux_addr_xlate(so, name, namelen, 3713 (flags & MSG_XPG4_2), 3714 &addr, &addrlen); 3715 if (error) { 3716 eprintsoline(so, error); 3717 return (error); 3718 } 3719 } 3720 } else { 3721 addr = name; 3722 addrlen = namelen; 3723 src = NULL; 3724 srclen = 0; 3725 } 3726 optlen = so_optlen(control, controllen, 3727 !(flags & MSG_XPG4_2)); 3728 tudr.PRIM_type = T_UNITDATA_REQ; 3729 tudr.DEST_length = addrlen; 3730 tudr.DEST_offset = (t_scalar_t)sizeof (tudr); 3731 if (srclen != 0) 3732 tudr.OPT_length = (t_scalar_t)(optlen + sizeof (toh) + 3733 _TPI_ALIGN_TOPT(srclen)); 3734 else 3735 tudr.OPT_length = optlen; 3736 tudr.OPT_offset = (t_scalar_t)(sizeof (tudr) + 3737 _TPI_ALIGN_TOPT(addrlen)); 3738 3739 size = tudr.OPT_offset + tudr.OPT_length; 3740 3741 /* 3742 * File descriptors only when SM_FDPASSING set. 3743 */ 3744 error = so_getfdopt(control, controllen, 3745 !(flags & MSG_XPG4_2), &fds, &fdlen); 3746 if (error) 3747 return (error); 3748 if (fdlen != -1) { 3749 if (!(so->so_mode & SM_FDPASSING)) 3750 return (EOPNOTSUPP); 3751 3752 error = fdbuf_create(fds, fdlen, &fdbuf); 3753 if (error) 3754 return (error); 3755 mp = fdbuf_allocmsg(size, fdbuf); 3756 } else { 3757 mp = soallocproto(size, _ALLOC_INTR, CRED()); 3758 if (mp == NULL) { 3759 /* 3760 * Caught a signal waiting for memory. 3761 * Let send* return EINTR. 3762 */ 3763 return (EINTR); 3764 } 3765 } 3766 soappendmsg(mp, &tudr, sizeof (tudr)); 3767 soappendmsg(mp, addr, addrlen); 3768 mp->b_wptr += _TPI_ALIGN_TOPT(addrlen) - addrlen; 3769 3770 if (fdlen != -1) { 3771 ASSERT(fdbuf != NULL); 3772 toh.level = SOL_SOCKET; 3773 toh.name = SO_FILEP; 3774 toh.len = fdbuf->fd_size + 3775 (t_uscalar_t)sizeof (struct T_opthdr); 3776 toh.status = 0; 3777 soappendmsg(mp, &toh, sizeof (toh)); 3778 soappendmsg(mp, fdbuf, fdbuf->fd_size); 3779 ASSERT(__TPI_TOPT_ISALIGNED(mp->b_wptr)); 3780 } 3781 if (srclen != 0) { 3782 /* 3783 * There is a AF_UNIX sockaddr_un to include as a source 3784 * address option. 3785 */ 3786 toh.level = SOL_SOCKET; 3787 toh.name = SO_SRCADDR; 3788 toh.len = (t_uscalar_t)(srclen + sizeof (struct T_opthdr)); 3789 toh.status = 0; 3790 soappendmsg(mp, &toh, sizeof (toh)); 3791 soappendmsg(mp, src, srclen); 3792 mp->b_wptr += _TPI_ALIGN_TOPT(srclen) - srclen; 3793 ASSERT(__TPI_TOPT_ISALIGNED(mp->b_wptr)); 3794 } 3795 ASSERT(mp->b_wptr <= mp->b_datap->db_lim); 3796 so_cmsg2opt(control, controllen, !(flags & MSG_XPG4_2), mp); 3797 /* At most 3 bytes left in the message */ 3798 ASSERT(MBLKL(mp) > (ssize_t)(size - __TPI_ALIGN_SIZE)); 3799 ASSERT(MBLKL(mp) <= (ssize_t)size); 3800 3801 ASSERT(mp->b_wptr <= mp->b_datap->db_lim); 3802 if (AU_AUDITING()) 3803 audit_sock(T_UNITDATA_REQ, strvp2wq(SOTOV(so)), mp, 0); 3804 3805 error = kstrputmsg(SOTOV(so), mp, uiop, len, 0, MSG_BAND, 0); 3806 #ifdef SOCK_DEBUG 3807 if (error) { 3808 eprintsoline(so, error); 3809 } 3810 #endif /* SOCK_DEBUG */ 3811 return (error); 3812 } 3813 3814 /* 3815 * Sending data with options on a connected stream socket. 3816 * Assumes caller has verified that SS_ISCONNECTED is set. 3817 */ 3818 static int 3819 sosend_svccmsg(struct sonode *so, struct uio *uiop, int more, void *control, 3820 t_uscalar_t controllen, int flags) 3821 { 3822 struct T_optdata_req tdr; 3823 mblk_t *mp; 3824 int error; 3825 ssize_t iosize; 3826 int size; 3827 struct fdbuf *fdbuf; 3828 t_uscalar_t optlen; 3829 void *fds; 3830 int fdlen; 3831 struct T_opthdr toh; 3832 sotpi_info_t *sti = SOTOTPI(so); 3833 3834 dprintso(so, 1, 3835 ("sosend_svccmsg: resid %ld bytes\n", uiop->uio_resid)); 3836 3837 /* 3838 * Has to be bound and connected. However, since no locks are 3839 * held the state could have changed after sotpi_sendmsg checked it 3840 * thus it is not possible to ASSERT on the state. 3841 */ 3842 3843 /* Options on connection-oriented only when SM_OPTDATA set. */ 3844 if (!(so->so_mode & SM_OPTDATA)) 3845 return (EOPNOTSUPP); 3846 3847 do { 3848 /* 3849 * Set the MORE flag if uio_resid does not fit in this 3850 * message or if the caller passed in "more". 3851 * Error for transports with zero tidu_size. 3852 */ 3853 tdr.PRIM_type = T_OPTDATA_REQ; 3854 iosize = sti->sti_tidu_size; 3855 if (iosize <= 0) 3856 return (EMSGSIZE); 3857 if (uiop->uio_resid > iosize) { 3858 tdr.DATA_flag = 1; 3859 } else { 3860 if (more) 3861 tdr.DATA_flag = 1; 3862 else 3863 tdr.DATA_flag = 0; 3864 iosize = uiop->uio_resid; 3865 } 3866 dprintso(so, 1, ("sosend_svccmsg: sending %d, %ld bytes\n", 3867 tdr.DATA_flag, iosize)); 3868 3869 optlen = so_optlen(control, controllen, !(flags & MSG_XPG4_2)); 3870 tdr.OPT_length = optlen; 3871 tdr.OPT_offset = (t_scalar_t)sizeof (tdr); 3872 3873 size = (int)sizeof (tdr) + optlen; 3874 /* 3875 * File descriptors only when SM_FDPASSING set. 3876 */ 3877 error = so_getfdopt(control, controllen, 3878 !(flags & MSG_XPG4_2), &fds, &fdlen); 3879 if (error) 3880 return (error); 3881 if (fdlen != -1) { 3882 if (!(so->so_mode & SM_FDPASSING)) 3883 return (EOPNOTSUPP); 3884 3885 error = fdbuf_create(fds, fdlen, &fdbuf); 3886 if (error) 3887 return (error); 3888 mp = fdbuf_allocmsg(size, fdbuf); 3889 } else { 3890 mp = soallocproto(size, _ALLOC_INTR, CRED()); 3891 if (mp == NULL) { 3892 /* 3893 * Caught a signal waiting for memory. 3894 * Let send* return EINTR. 3895 */ 3896 return (EINTR); 3897 } 3898 } 3899 soappendmsg(mp, &tdr, sizeof (tdr)); 3900 3901 if (fdlen != -1) { 3902 ASSERT(fdbuf != NULL); 3903 toh.level = SOL_SOCKET; 3904 toh.name = SO_FILEP; 3905 toh.len = fdbuf->fd_size + 3906 (t_uscalar_t)sizeof (struct T_opthdr); 3907 toh.status = 0; 3908 soappendmsg(mp, &toh, sizeof (toh)); 3909 soappendmsg(mp, fdbuf, fdbuf->fd_size); 3910 ASSERT(__TPI_TOPT_ISALIGNED(mp->b_wptr)); 3911 } 3912 so_cmsg2opt(control, controllen, !(flags & MSG_XPG4_2), mp); 3913 /* At most 3 bytes left in the message */ 3914 ASSERT(MBLKL(mp) > (ssize_t)(size - __TPI_ALIGN_SIZE)); 3915 ASSERT(MBLKL(mp) <= (ssize_t)size); 3916 3917 ASSERT(mp->b_wptr <= mp->b_datap->db_lim); 3918 3919 error = kstrputmsg(SOTOV(so), mp, uiop, iosize, 3920 0, MSG_BAND, 0); 3921 if (error) { 3922 eprintsoline(so, error); 3923 return (error); 3924 } 3925 control = NULL; 3926 if (uiop->uio_resid > 0) { 3927 /* 3928 * Recheck for fatal errors. Fail write even though 3929 * some data have been written. This is consistent 3930 * with strwrite semantics and BSD sockets semantics. 3931 */ 3932 if (so->so_state & SS_CANTSENDMORE) { 3933 eprintsoline(so, error); 3934 return (EPIPE); 3935 } 3936 if (so->so_error != 0) { 3937 mutex_enter(&so->so_lock); 3938 error = sogeterr(so, B_TRUE); 3939 mutex_exit(&so->so_lock); 3940 if (error != 0) { 3941 eprintsoline(so, error); 3942 return (error); 3943 } 3944 } 3945 } 3946 } while (uiop->uio_resid > 0); 3947 return (0); 3948 } 3949 3950 /* 3951 * Sending data on a datagram socket. 3952 * Assumes caller has verified that SS_ISBOUND etc. are set. 3953 * 3954 * For AF_UNIX the destination address is translated to an internal 3955 * name and the source address is passed as an option. 3956 */ 3957 int 3958 sosend_dgram(struct sonode *so, struct sockaddr *name, socklen_t namelen, 3959 struct uio *uiop, int flags) 3960 { 3961 struct T_unitdata_req tudr; 3962 mblk_t *mp; 3963 int error; 3964 void *addr; 3965 socklen_t addrlen; 3966 void *src; 3967 socklen_t srclen; 3968 ssize_t len; 3969 sotpi_info_t *sti = SOTOTPI(so); 3970 3971 ASSERT(name != NULL && namelen != 0); 3972 3973 len = uiop->uio_resid; 3974 if (len > sti->sti_tidu_size) { 3975 error = EMSGSIZE; 3976 goto done; 3977 } 3978 3979 /* Length and family checks */ 3980 error = so_addr_verify(so, name, namelen); 3981 if (error != 0) 3982 goto done; 3983 3984 if (sti->sti_direct) 3985 return (sodgram_direct(so, name, namelen, uiop, flags)); 3986 3987 if (so->so_family == AF_UNIX) { 3988 if (sti->sti_faddr_noxlate) { 3989 /* 3990 * Already have a transport internal address. Do not 3991 * pass any (transport internal) source address. 3992 */ 3993 addr = name; 3994 addrlen = namelen; 3995 src = NULL; 3996 srclen = 0; 3997 } else { 3998 /* 3999 * Pass the sockaddr_un source address as an option 4000 * and translate the remote address. 4001 * 4002 * Note that this code does not prevent sti_laddr_sa 4003 * from changing while it is being used. Thus 4004 * if an unbind+bind occurs concurrently with this 4005 * send the peer might see a partially new and a 4006 * partially old "from" address. 4007 */ 4008 src = sti->sti_laddr_sa; 4009 srclen = (socklen_t)sti->sti_laddr_len; 4010 dprintso(so, 1, 4011 ("sosend_dgram UNIX: srclen %d, src %p\n", 4012 srclen, src)); 4013 error = so_ux_addr_xlate(so, name, namelen, 4014 (flags & MSG_XPG4_2), 4015 &addr, &addrlen); 4016 if (error) { 4017 eprintsoline(so, error); 4018 goto done; 4019 } 4020 } 4021 } else { 4022 addr = name; 4023 addrlen = namelen; 4024 src = NULL; 4025 srclen = 0; 4026 } 4027 tudr.PRIM_type = T_UNITDATA_REQ; 4028 tudr.DEST_length = addrlen; 4029 tudr.DEST_offset = (t_scalar_t)sizeof (tudr); 4030 if (srclen == 0) { 4031 tudr.OPT_length = 0; 4032 tudr.OPT_offset = 0; 4033 4034 mp = soallocproto2(&tudr, sizeof (tudr), 4035 addr, addrlen, 0, _ALLOC_INTR, CRED()); 4036 if (mp == NULL) { 4037 /* 4038 * Caught a signal waiting for memory. 4039 * Let send* return EINTR. 4040 */ 4041 error = EINTR; 4042 goto done; 4043 } 4044 } else { 4045 /* 4046 * There is a AF_UNIX sockaddr_un to include as a source 4047 * address option. 4048 */ 4049 struct T_opthdr toh; 4050 ssize_t size; 4051 4052 tudr.OPT_length = (t_scalar_t)(sizeof (toh) + 4053 _TPI_ALIGN_TOPT(srclen)); 4054 tudr.OPT_offset = (t_scalar_t)(sizeof (tudr) + 4055 _TPI_ALIGN_TOPT(addrlen)); 4056 4057 toh.level = SOL_SOCKET; 4058 toh.name = SO_SRCADDR; 4059 toh.len = (t_uscalar_t)(srclen + sizeof (struct T_opthdr)); 4060 toh.status = 0; 4061 4062 size = tudr.OPT_offset + tudr.OPT_length; 4063 mp = soallocproto2(&tudr, sizeof (tudr), 4064 addr, addrlen, size, _ALLOC_INTR, CRED()); 4065 if (mp == NULL) { 4066 /* 4067 * Caught a signal waiting for memory. 4068 * Let send* return EINTR. 4069 */ 4070 error = EINTR; 4071 goto done; 4072 } 4073 mp->b_wptr += _TPI_ALIGN_TOPT(addrlen) - addrlen; 4074 soappendmsg(mp, &toh, sizeof (toh)); 4075 soappendmsg(mp, src, srclen); 4076 mp->b_wptr += _TPI_ALIGN_TOPT(srclen) - srclen; 4077 ASSERT(mp->b_wptr <= mp->b_datap->db_lim); 4078 } 4079 4080 if (AU_AUDITING()) 4081 audit_sock(T_UNITDATA_REQ, strvp2wq(SOTOV(so)), mp, 0); 4082 4083 error = kstrputmsg(SOTOV(so), mp, uiop, len, 0, MSG_BAND, 0); 4084 done: 4085 #ifdef SOCK_DEBUG 4086 if (error) { 4087 eprintsoline(so, error); 4088 } 4089 #endif /* SOCK_DEBUG */ 4090 return (error); 4091 } 4092 4093 /* 4094 * Sending data on a connected stream socket. 4095 * Assumes caller has verified that SS_ISCONNECTED is set. 4096 */ 4097 int 4098 sosend_svc(struct sonode *so, struct uio *uiop, t_scalar_t prim, int more, 4099 int sflag) 4100 { 4101 struct T_data_req tdr; 4102 mblk_t *mp; 4103 int error; 4104 ssize_t iosize; 4105 sotpi_info_t *sti = SOTOTPI(so); 4106 4107 dprintso(so, 1, 4108 ("sosend_svc: %p, resid %ld bytes, prim %d, sflag 0x%x\n", 4109 (void *)so, uiop->uio_resid, prim, sflag)); 4110 4111 /* 4112 * Has to be bound and connected. However, since no locks are 4113 * held the state could have changed after sotpi_sendmsg checked it 4114 * thus it is not possible to ASSERT on the state. 4115 */ 4116 4117 do { 4118 /* 4119 * Set the MORE flag if uio_resid does not fit in this 4120 * message or if the caller passed in "more". 4121 * Error for transports with zero tidu_size. 4122 */ 4123 tdr.PRIM_type = prim; 4124 iosize = sti->sti_tidu_size; 4125 if (iosize <= 0) 4126 return (EMSGSIZE); 4127 if (uiop->uio_resid > iosize) { 4128 tdr.MORE_flag = 1; 4129 } else { 4130 if (more) 4131 tdr.MORE_flag = 1; 4132 else 4133 tdr.MORE_flag = 0; 4134 iosize = uiop->uio_resid; 4135 } 4136 dprintso(so, 1, ("sosend_svc: sending 0x%x %d, %ld bytes\n", 4137 prim, tdr.MORE_flag, iosize)); 4138 mp = soallocproto1(&tdr, sizeof (tdr), 0, _ALLOC_INTR, CRED()); 4139 if (mp == NULL) { 4140 /* 4141 * Caught a signal waiting for memory. 4142 * Let send* return EINTR. 4143 */ 4144 return (EINTR); 4145 } 4146 4147 error = kstrputmsg(SOTOV(so), mp, uiop, iosize, 4148 0, sflag | MSG_BAND, 0); 4149 if (error) { 4150 eprintsoline(so, error); 4151 return (error); 4152 } 4153 if (uiop->uio_resid > 0) { 4154 /* 4155 * Recheck for fatal errors. Fail write even though 4156 * some data have been written. This is consistent 4157 * with strwrite semantics and BSD sockets semantics. 4158 */ 4159 if (so->so_state & SS_CANTSENDMORE) { 4160 eprintsoline(so, error); 4161 return (EPIPE); 4162 } 4163 if (so->so_error != 0) { 4164 mutex_enter(&so->so_lock); 4165 error = sogeterr(so, B_TRUE); 4166 mutex_exit(&so->so_lock); 4167 if (error != 0) { 4168 eprintsoline(so, error); 4169 return (error); 4170 } 4171 } 4172 } 4173 } while (uiop->uio_resid > 0); 4174 return (0); 4175 } 4176 4177 /* 4178 * Check the state for errors and call the appropriate send function. 4179 * 4180 * If MSG_DONTROUTE is set (and SO_DONTROUTE isn't already set) 4181 * this function issues a setsockopt to toggle SO_DONTROUTE before and 4182 * after sending the message. 4183 */ 4184 static int 4185 sotpi_sendmsg(struct sonode *so, struct nmsghdr *msg, struct uio *uiop, 4186 struct cred *cr) 4187 { 4188 int so_state; 4189 int so_mode; 4190 int error; 4191 struct sockaddr *name; 4192 t_uscalar_t namelen; 4193 int dontroute; 4194 int flags; 4195 sotpi_info_t *sti = SOTOTPI(so); 4196 4197 dprintso(so, 1, ("sotpi_sendmsg(%p, %p, 0x%x) state %s, error %d\n", 4198 (void *)so, (void *)msg, msg->msg_flags, 4199 pr_state(so->so_state, so->so_mode), so->so_error)); 4200 4201 if (so->so_version == SOV_STREAM) { 4202 /* The imaginary "sockmod" has been popped - act as a stream */ 4203 so_update_attrs(so, SOMOD); 4204 return (strwrite(SOTOV(so), uiop, cr)); 4205 } 4206 4207 mutex_enter(&so->so_lock); 4208 so_state = so->so_state; 4209 4210 if (so_state & SS_CANTSENDMORE) { 4211 mutex_exit(&so->so_lock); 4212 return (EPIPE); 4213 } 4214 4215 if (so->so_error != 0) { 4216 error = sogeterr(so, B_TRUE); 4217 if (error != 0) { 4218 mutex_exit(&so->so_lock); 4219 return (error); 4220 } 4221 } 4222 4223 name = (struct sockaddr *)msg->msg_name; 4224 namelen = msg->msg_namelen; 4225 4226 so_mode = so->so_mode; 4227 4228 if (name == NULL) { 4229 if (!(so_state & SS_ISCONNECTED)) { 4230 mutex_exit(&so->so_lock); 4231 if (so_mode & SM_CONNREQUIRED) 4232 return (ENOTCONN); 4233 else 4234 return (EDESTADDRREQ); 4235 } 4236 if (so_mode & SM_CONNREQUIRED) { 4237 name = NULL; 4238 namelen = 0; 4239 } else { 4240 /* 4241 * Note that this code does not prevent sti_faddr_sa 4242 * from changing while it is being used. Thus 4243 * if an "unconnect"+connect occurs concurrently with 4244 * this send the datagram might be delivered to a 4245 * garbaled address. 4246 */ 4247 ASSERT(sti->sti_faddr_sa); 4248 name = sti->sti_faddr_sa; 4249 namelen = (t_uscalar_t)sti->sti_faddr_len; 4250 } 4251 } else { 4252 if (!(so_state & SS_ISCONNECTED) && 4253 (so_mode & SM_CONNREQUIRED)) { 4254 /* Required but not connected */ 4255 mutex_exit(&so->so_lock); 4256 return (ENOTCONN); 4257 } 4258 /* 4259 * Ignore the address on connection-oriented sockets. 4260 * Just like BSD this code does not generate an error for 4261 * TCP (a CONNREQUIRED socket) when sending to an address 4262 * passed in with sendto/sendmsg. Instead the data is 4263 * delivered on the connection as if no address had been 4264 * supplied. 4265 */ 4266 if ((so_state & SS_ISCONNECTED) && 4267 !(so_mode & SM_CONNREQUIRED)) { 4268 mutex_exit(&so->so_lock); 4269 return (EISCONN); 4270 } 4271 if (!(so_state & SS_ISBOUND)) { 4272 so_lock_single(so); /* Set SOLOCKED */ 4273 error = sotpi_bind(so, NULL, 0, 4274 _SOBIND_UNSPEC|_SOBIND_LOCK_HELD, cr); 4275 so_unlock_single(so, SOLOCKED); 4276 if (error) { 4277 mutex_exit(&so->so_lock); 4278 eprintsoline(so, error); 4279 return (error); 4280 } 4281 } 4282 /* 4283 * Handle delayed datagram errors. These are only queued 4284 * when the application sets SO_DGRAM_ERRIND. 4285 * Return the error if we are sending to the address 4286 * that was returned in the last T_UDERROR_IND. 4287 * If sending to some other address discard the delayed 4288 * error indication. 4289 */ 4290 if (sti->sti_delayed_error) { 4291 struct T_uderror_ind *tudi; 4292 void *addr; 4293 t_uscalar_t addrlen; 4294 boolean_t match = B_FALSE; 4295 4296 ASSERT(sti->sti_eaddr_mp); 4297 error = sti->sti_delayed_error; 4298 sti->sti_delayed_error = 0; 4299 tudi = 4300 (struct T_uderror_ind *)sti->sti_eaddr_mp->b_rptr; 4301 addrlen = tudi->DEST_length; 4302 addr = sogetoff(sti->sti_eaddr_mp, 4303 tudi->DEST_offset, addrlen, 1); 4304 ASSERT(addr); /* Checked by strsock_proto */ 4305 switch (so->so_family) { 4306 case AF_INET: { 4307 /* Compare just IP address and port */ 4308 sin_t *sin1 = (sin_t *)name; 4309 sin_t *sin2 = (sin_t *)addr; 4310 4311 if (addrlen == sizeof (sin_t) && 4312 namelen == addrlen && 4313 sin1->sin_port == sin2->sin_port && 4314 sin1->sin_addr.s_addr == 4315 sin2->sin_addr.s_addr) 4316 match = B_TRUE; 4317 break; 4318 } 4319 case AF_INET6: { 4320 /* Compare just IP address and port. Not flow */ 4321 sin6_t *sin1 = (sin6_t *)name; 4322 sin6_t *sin2 = (sin6_t *)addr; 4323 4324 if (addrlen == sizeof (sin6_t) && 4325 namelen == addrlen && 4326 sin1->sin6_port == sin2->sin6_port && 4327 IN6_ARE_ADDR_EQUAL(&sin1->sin6_addr, 4328 &sin2->sin6_addr)) 4329 match = B_TRUE; 4330 break; 4331 } 4332 case AF_UNIX: 4333 default: 4334 if (namelen == addrlen && 4335 bcmp(name, addr, namelen) == 0) 4336 match = B_TRUE; 4337 } 4338 if (match) { 4339 freemsg(sti->sti_eaddr_mp); 4340 sti->sti_eaddr_mp = NULL; 4341 mutex_exit(&so->so_lock); 4342 #ifdef DEBUG 4343 dprintso(so, 0, 4344 ("sockfs delayed error %d for %s\n", 4345 error, 4346 pr_addr(so->so_family, name, namelen))); 4347 #endif /* DEBUG */ 4348 return (error); 4349 } 4350 freemsg(sti->sti_eaddr_mp); 4351 sti->sti_eaddr_mp = NULL; 4352 } 4353 } 4354 mutex_exit(&so->so_lock); 4355 4356 flags = msg->msg_flags; 4357 dontroute = 0; 4358 if ((flags & MSG_DONTROUTE) && !(so->so_options & SO_DONTROUTE)) { 4359 uint32_t val; 4360 4361 val = 1; 4362 error = sotpi_setsockopt(so, SOL_SOCKET, SO_DONTROUTE, 4363 &val, (t_uscalar_t)sizeof (val), cr); 4364 if (error) 4365 return (error); 4366 dontroute = 1; 4367 } 4368 4369 if ((flags & MSG_OOB) && !(so_mode & SM_EXDATA)) { 4370 error = EOPNOTSUPP; 4371 goto done; 4372 } 4373 if (msg->msg_controllen != 0) { 4374 if (!(so_mode & SM_CONNREQUIRED)) { 4375 so_update_attrs(so, SOMOD); 4376 error = sosend_dgramcmsg(so, name, namelen, uiop, 4377 msg->msg_control, msg->msg_controllen, flags); 4378 } else { 4379 if (flags & MSG_OOB) { 4380 /* Can't generate T_EXDATA_REQ with options */ 4381 error = EOPNOTSUPP; 4382 goto done; 4383 } 4384 so_update_attrs(so, SOMOD); 4385 error = sosend_svccmsg(so, uiop, 4386 !(flags & MSG_EOR), 4387 msg->msg_control, msg->msg_controllen, 4388 flags); 4389 } 4390 goto done; 4391 } 4392 4393 so_update_attrs(so, SOMOD); 4394 if (!(so_mode & SM_CONNREQUIRED)) { 4395 /* 4396 * If there is no SO_DONTROUTE to turn off return immediately 4397 * from send_dgram. This can allow tail-call optimizations. 4398 */ 4399 if (!dontroute) { 4400 return (sosend_dgram(so, name, namelen, uiop, flags)); 4401 } 4402 error = sosend_dgram(so, name, namelen, uiop, flags); 4403 } else { 4404 t_scalar_t prim; 4405 int sflag; 4406 4407 /* Ignore msg_name in the connected state */ 4408 if (flags & MSG_OOB) { 4409 prim = T_EXDATA_REQ; 4410 /* 4411 * Send down T_EXDATA_REQ even if there is flow 4412 * control for data. 4413 */ 4414 sflag = MSG_IGNFLOW; 4415 } else { 4416 if (so_mode & SM_BYTESTREAM) { 4417 /* Byte stream transport - use write */ 4418 dprintso(so, 1, ("sotpi_sendmsg: write\n")); 4419 4420 /* Send M_DATA messages */ 4421 if ((sti->sti_nl7c_flags & NL7C_ENABLED) && 4422 (error = nl7c_data(so, uiop)) >= 0) { 4423 /* NL7C consumed the data */ 4424 return (error); 4425 } 4426 /* 4427 * If there is no SO_DONTROUTE to turn off, 4428 * sti_direct is on, and there is no flow 4429 * control, we can take the fast path. 4430 */ 4431 if (!dontroute && sti->sti_direct != 0 && 4432 canputnext(SOTOV(so)->v_stream->sd_wrq)) { 4433 return (sostream_direct(so, uiop, 4434 NULL, cr)); 4435 } 4436 error = strwrite(SOTOV(so), uiop, cr); 4437 goto done; 4438 } 4439 prim = T_DATA_REQ; 4440 sflag = 0; 4441 } 4442 /* 4443 * If there is no SO_DONTROUTE to turn off return immediately 4444 * from sosend_svc. This can allow tail-call optimizations. 4445 */ 4446 if (!dontroute) 4447 return (sosend_svc(so, uiop, prim, 4448 !(flags & MSG_EOR), sflag)); 4449 error = sosend_svc(so, uiop, prim, 4450 !(flags & MSG_EOR), sflag); 4451 } 4452 ASSERT(dontroute); 4453 done: 4454 if (dontroute) { 4455 uint32_t val; 4456 4457 val = 0; 4458 (void) sotpi_setsockopt(so, SOL_SOCKET, SO_DONTROUTE, 4459 &val, (t_uscalar_t)sizeof (val), cr); 4460 } 4461 return (error); 4462 } 4463 4464 /* 4465 * kstrwritemp() has very similar semantics as that of strwrite(). 4466 * The main difference is it obtains mblks from the caller and also 4467 * does not do any copy as done in strwrite() from user buffers to 4468 * kernel buffers. 4469 * 4470 * Currently, this routine is used by sendfile to send data allocated 4471 * within the kernel without any copying. This interface does not use the 4472 * synchronous stream interface as synch. stream interface implies 4473 * copying. 4474 */ 4475 int 4476 kstrwritemp(struct vnode *vp, mblk_t *mp, ushort_t fmode) 4477 { 4478 struct stdata *stp; 4479 struct queue *wqp; 4480 mblk_t *newmp; 4481 char waitflag; 4482 int tempmode; 4483 int error = 0; 4484 int done = 0; 4485 struct sonode *so; 4486 boolean_t direct; 4487 4488 ASSERT(vp->v_stream); 4489 stp = vp->v_stream; 4490 4491 so = VTOSO(vp); 4492 direct = _SOTOTPI(so)->sti_direct; 4493 4494 /* 4495 * This is the sockfs direct fast path. canputnext() need 4496 * not be accurate so we don't grab the sd_lock here. If 4497 * we get flow-controlled, we grab sd_lock just before the 4498 * do..while loop below to emulate what strwrite() does. 4499 */ 4500 wqp = stp->sd_wrq; 4501 if (canputnext(wqp) && direct && 4502 !(stp->sd_flag & (STWRERR|STRHUP|STPLEX))) { 4503 return (sostream_direct(so, NULL, mp, CRED())); 4504 } else if (stp->sd_flag & (STWRERR|STRHUP|STPLEX)) { 4505 /* Fast check of flags before acquiring the lock */ 4506 mutex_enter(&stp->sd_lock); 4507 error = strgeterr(stp, STWRERR|STRHUP|STPLEX, 0); 4508 mutex_exit(&stp->sd_lock); 4509 if (error != 0) { 4510 if (!(stp->sd_flag & STPLEX) && 4511 (stp->sd_wput_opt & SW_SIGPIPE)) { 4512 error = EPIPE; 4513 } 4514 return (error); 4515 } 4516 } 4517 4518 waitflag = WRITEWAIT; 4519 if (stp->sd_flag & OLDNDELAY) 4520 tempmode = fmode & ~FNDELAY; 4521 else 4522 tempmode = fmode; 4523 4524 mutex_enter(&stp->sd_lock); 4525 do { 4526 if (canputnext(wqp)) { 4527 mutex_exit(&stp->sd_lock); 4528 if (stp->sd_wputdatafunc != NULL) { 4529 newmp = (stp->sd_wputdatafunc)(vp, mp, NULL, 4530 NULL, NULL, NULL); 4531 if (newmp == NULL) { 4532 /* The caller will free mp */ 4533 return (ECOMM); 4534 } 4535 mp = newmp; 4536 } 4537 putnext(wqp, mp); 4538 return (0); 4539 } 4540 error = strwaitq(stp, waitflag, (ssize_t)0, tempmode, -1, 4541 &done); 4542 } while (error == 0 && !done); 4543 4544 mutex_exit(&stp->sd_lock); 4545 /* 4546 * EAGAIN tells the application to try again. ENOMEM 4547 * is returned only if the memory allocation size 4548 * exceeds the physical limits of the system. ENOMEM 4549 * can't be true here. 4550 */ 4551 if (error == ENOMEM) 4552 error = EAGAIN; 4553 return (error); 4554 } 4555 4556 /* ARGSUSED */ 4557 static int 4558 sotpi_sendmblk(struct sonode *so, struct nmsghdr *msg, int fflag, 4559 struct cred *cr, mblk_t **mpp) 4560 { 4561 int error; 4562 4563 if (so->so_family != AF_INET && so->so_family != AF_INET6) 4564 return (EAFNOSUPPORT); 4565 4566 if (so->so_state & SS_CANTSENDMORE) 4567 return (EPIPE); 4568 4569 if (so->so_type != SOCK_STREAM) 4570 return (EOPNOTSUPP); 4571 4572 if ((so->so_state & SS_ISCONNECTED) == 0) 4573 return (ENOTCONN); 4574 4575 error = kstrwritemp(so->so_vnode, *mpp, fflag); 4576 if (error == 0) 4577 *mpp = NULL; 4578 return (error); 4579 } 4580 4581 /* 4582 * Sending data on a datagram socket. 4583 * Assumes caller has verified that SS_ISBOUND etc. are set. 4584 */ 4585 /* ARGSUSED */ 4586 static int 4587 sodgram_direct(struct sonode *so, struct sockaddr *name, 4588 socklen_t namelen, struct uio *uiop, int flags) 4589 { 4590 struct T_unitdata_req tudr; 4591 mblk_t *mp = NULL; 4592 int error = 0; 4593 void *addr; 4594 socklen_t addrlen; 4595 ssize_t len; 4596 struct stdata *stp = SOTOV(so)->v_stream; 4597 int so_state; 4598 queue_t *udp_wq; 4599 boolean_t connected; 4600 mblk_t *mpdata = NULL; 4601 sotpi_info_t *sti = SOTOTPI(so); 4602 uint32_t auditing = AU_AUDITING(); 4603 4604 ASSERT(name != NULL && namelen != 0); 4605 ASSERT(!(so->so_mode & SM_CONNREQUIRED)); 4606 ASSERT(!(so->so_mode & SM_EXDATA)); 4607 ASSERT(so->so_family == AF_INET || so->so_family == AF_INET6); 4608 ASSERT(SOTOV(so)->v_type == VSOCK); 4609 4610 /* Caller checked for proper length */ 4611 len = uiop->uio_resid; 4612 ASSERT(len <= sti->sti_tidu_size); 4613 4614 /* Length and family checks have been done by caller */ 4615 ASSERT(name->sa_family == so->so_family); 4616 ASSERT(so->so_family == AF_INET || 4617 (namelen == (socklen_t)sizeof (struct sockaddr_in6))); 4618 ASSERT(so->so_family == AF_INET6 || 4619 (namelen == (socklen_t)sizeof (struct sockaddr_in))); 4620 4621 addr = name; 4622 addrlen = namelen; 4623 4624 if (stp->sd_sidp != NULL && 4625 (error = straccess(stp, JCWRITE)) != 0) 4626 goto done; 4627 4628 so_state = so->so_state; 4629 4630 connected = so_state & SS_ISCONNECTED; 4631 if (!connected) { 4632 tudr.PRIM_type = T_UNITDATA_REQ; 4633 tudr.DEST_length = addrlen; 4634 tudr.DEST_offset = (t_scalar_t)sizeof (tudr); 4635 tudr.OPT_length = 0; 4636 tudr.OPT_offset = 0; 4637 4638 mp = soallocproto2(&tudr, sizeof (tudr), addr, addrlen, 0, 4639 _ALLOC_INTR, CRED()); 4640 if (mp == NULL) { 4641 /* 4642 * Caught a signal waiting for memory. 4643 * Let send* return EINTR. 4644 */ 4645 error = EINTR; 4646 goto done; 4647 } 4648 } 4649 4650 /* 4651 * For UDP we don't break up the copyin into smaller pieces 4652 * as in the TCP case. That means if ENOMEM is returned by 4653 * mcopyinuio() then the uio vector has not been modified at 4654 * all and we fallback to either strwrite() or kstrputmsg() 4655 * below. Note also that we never generate priority messages 4656 * from here. 4657 */ 4658 udp_wq = stp->sd_wrq->q_next; 4659 if (canput(udp_wq) && 4660 (mpdata = mcopyinuio(stp, uiop, -1, -1, &error)) != NULL) { 4661 ASSERT(DB_TYPE(mpdata) == M_DATA); 4662 ASSERT(uiop->uio_resid == 0); 4663 if (!connected) 4664 linkb(mp, mpdata); 4665 else 4666 mp = mpdata; 4667 if (auditing) 4668 audit_sock(T_UNITDATA_REQ, strvp2wq(SOTOV(so)), mp, 0); 4669 4670 udp_wput(udp_wq, mp); 4671 return (0); 4672 } 4673 4674 ASSERT(mpdata == NULL); 4675 if (error != 0 && error != ENOMEM) { 4676 freemsg(mp); 4677 return (error); 4678 } 4679 4680 /* 4681 * For connected, let strwrite() handle the blocking case. 4682 * Otherwise we fall thru and use kstrputmsg(). 4683 */ 4684 if (connected) 4685 return (strwrite(SOTOV(so), uiop, CRED())); 4686 4687 if (auditing) 4688 audit_sock(T_UNITDATA_REQ, strvp2wq(SOTOV(so)), mp, 0); 4689 4690 error = kstrputmsg(SOTOV(so), mp, uiop, len, 0, MSG_BAND, 0); 4691 done: 4692 #ifdef SOCK_DEBUG 4693 if (error != 0) { 4694 eprintsoline(so, error); 4695 } 4696 #endif /* SOCK_DEBUG */ 4697 return (error); 4698 } 4699 4700 int 4701 sostream_direct(struct sonode *so, struct uio *uiop, mblk_t *mp, cred_t *cr) 4702 { 4703 struct stdata *stp = SOTOV(so)->v_stream; 4704 ssize_t iosize, rmax, maxblk; 4705 queue_t *tcp_wq = stp->sd_wrq->q_next; 4706 mblk_t *newmp; 4707 int error = 0, wflag = 0; 4708 4709 ASSERT(so->so_mode & SM_BYTESTREAM); 4710 ASSERT(SOTOV(so)->v_type == VSOCK); 4711 4712 if (stp->sd_sidp != NULL && 4713 (error = straccess(stp, JCWRITE)) != 0) 4714 return (error); 4715 4716 if (uiop == NULL) { 4717 /* 4718 * kstrwritemp() should have checked sd_flag and 4719 * flow-control before coming here. If we end up 4720 * here it means that we can simply pass down the 4721 * data to tcp. 4722 */ 4723 ASSERT(mp != NULL); 4724 if (stp->sd_wputdatafunc != NULL) { 4725 newmp = (stp->sd_wputdatafunc)(SOTOV(so), mp, NULL, 4726 NULL, NULL, NULL); 4727 if (newmp == NULL) { 4728 /* The caller will free mp */ 4729 return (ECOMM); 4730 } 4731 mp = newmp; 4732 } 4733 tcp_wput(tcp_wq, mp); 4734 return (0); 4735 } 4736 4737 /* Fallback to strwrite() to do proper error handling */ 4738 if (stp->sd_flag & (STWRERR|STRHUP|STPLEX|STRDELIM|OLDNDELAY)) 4739 return (strwrite(SOTOV(so), uiop, cr)); 4740 4741 rmax = stp->sd_qn_maxpsz; 4742 ASSERT(rmax >= 0 || rmax == INFPSZ); 4743 if (rmax == 0 || uiop->uio_resid <= 0) 4744 return (0); 4745 4746 if (rmax == INFPSZ) 4747 rmax = uiop->uio_resid; 4748 4749 maxblk = stp->sd_maxblk; 4750 4751 for (;;) { 4752 iosize = MIN(uiop->uio_resid, rmax); 4753 4754 mp = mcopyinuio(stp, uiop, iosize, maxblk, &error); 4755 if (mp == NULL) { 4756 /* 4757 * Fallback to strwrite() for ENOMEM; if this 4758 * is our first time in this routine and the uio 4759 * vector has not been modified, we will end up 4760 * calling strwrite() without any flag set. 4761 */ 4762 if (error == ENOMEM) 4763 goto slow_send; 4764 else 4765 return (error); 4766 } 4767 ASSERT(uiop->uio_resid >= 0); 4768 /* 4769 * If mp is non-NULL and ENOMEM is set, it means that 4770 * mcopyinuio() was able to break down some of the user 4771 * data into one or more mblks. Send the partial data 4772 * to tcp and let the rest be handled in strwrite(). 4773 */ 4774 ASSERT(error == 0 || error == ENOMEM); 4775 if (stp->sd_wputdatafunc != NULL) { 4776 newmp = (stp->sd_wputdatafunc)(SOTOV(so), mp, NULL, 4777 NULL, NULL, NULL); 4778 if (newmp == NULL) { 4779 /* The caller will free mp */ 4780 return (ECOMM); 4781 } 4782 mp = newmp; 4783 } 4784 tcp_wput(tcp_wq, mp); 4785 4786 wflag |= NOINTR; 4787 4788 if (uiop->uio_resid == 0) { /* No more data; we're done */ 4789 ASSERT(error == 0); 4790 break; 4791 } else if (error == ENOMEM || !canput(tcp_wq) || (stp->sd_flag & 4792 (STWRERR|STRHUP|STPLEX|STRDELIM|OLDNDELAY))) { 4793 slow_send: 4794 /* 4795 * We were able to send down partial data using 4796 * the direct call interface, but are now relying 4797 * on strwrite() to handle the non-fastpath cases. 4798 * If the socket is blocking we will sleep in 4799 * strwaitq() until write is permitted, otherwise, 4800 * we will need to return the amount of bytes 4801 * written so far back to the app. This is the 4802 * reason why we pass NOINTR flag to strwrite() 4803 * for non-blocking socket, because we don't want 4804 * to return EAGAIN when portion of the user data 4805 * has actually been sent down. 4806 */ 4807 return (strwrite_common(SOTOV(so), uiop, cr, wflag)); 4808 } 4809 } 4810 return (0); 4811 } 4812 4813 /* 4814 * Update sti_faddr by asking the transport (unless AF_UNIX). 4815 */ 4816 /* ARGSUSED */ 4817 int 4818 sotpi_getpeername(struct sonode *so, struct sockaddr *name, socklen_t *namelen, 4819 boolean_t accept, struct cred *cr) 4820 { 4821 struct strbuf strbuf; 4822 int error = 0, res; 4823 void *addr; 4824 t_uscalar_t addrlen; 4825 k_sigset_t smask; 4826 sotpi_info_t *sti = SOTOTPI(so); 4827 4828 dprintso(so, 1, ("sotpi_getpeername(%p) %s\n", 4829 (void *)so, pr_state(so->so_state, so->so_mode))); 4830 4831 ASSERT(*namelen > 0); 4832 mutex_enter(&so->so_lock); 4833 so_lock_single(so); /* Set SOLOCKED */ 4834 4835 if (accept) { 4836 bcopy(sti->sti_faddr_sa, name, 4837 MIN(*namelen, sti->sti_faddr_len)); 4838 *namelen = sti->sti_faddr_noxlate ? 0: sti->sti_faddr_len; 4839 goto done; 4840 } 4841 4842 if (!(so->so_state & SS_ISCONNECTED)) { 4843 error = ENOTCONN; 4844 goto done; 4845 } 4846 /* Added this check for X/Open */ 4847 if ((so->so_state & SS_CANTSENDMORE) && !xnet_skip_checks) { 4848 error = EINVAL; 4849 if (xnet_check_print) { 4850 printf("sockfs: X/Open getpeername check => EINVAL\n"); 4851 } 4852 goto done; 4853 } 4854 4855 if (sti->sti_faddr_valid) { 4856 bcopy(sti->sti_faddr_sa, name, 4857 MIN(*namelen, sti->sti_faddr_len)); 4858 *namelen = sti->sti_faddr_noxlate ? 0: sti->sti_faddr_len; 4859 goto done; 4860 } 4861 4862 #ifdef DEBUG 4863 dprintso(so, 1, ("sotpi_getpeername (local): %s\n", 4864 pr_addr(so->so_family, sti->sti_faddr_sa, 4865 (t_uscalar_t)sti->sti_faddr_len))); 4866 #endif /* DEBUG */ 4867 4868 if (so->so_family == AF_UNIX) { 4869 /* Transport has different name space - return local info */ 4870 if (sti->sti_faddr_noxlate) 4871 *namelen = 0; 4872 error = 0; 4873 goto done; 4874 } 4875 4876 ASSERT(so->so_family != AF_UNIX && sti->sti_faddr_noxlate == 0); 4877 4878 ASSERT(sti->sti_faddr_sa); 4879 /* Allocate local buffer to use with ioctl */ 4880 addrlen = (t_uscalar_t)sti->sti_faddr_maxlen; 4881 mutex_exit(&so->so_lock); 4882 addr = kmem_alloc(addrlen, KM_SLEEP); 4883 4884 /* 4885 * Issue TI_GETPEERNAME with signals masked. 4886 * Put the result in sti_faddr_sa so that getpeername works after 4887 * a shutdown(output). 4888 * If the ioctl fails (e.g. due to a ECONNRESET) the error is reposted 4889 * back to the socket. 4890 */ 4891 strbuf.buf = addr; 4892 strbuf.maxlen = addrlen; 4893 strbuf.len = 0; 4894 4895 sigintr(&smask, 0); 4896 res = 0; 4897 ASSERT(cr); 4898 error = strioctl(SOTOV(so), TI_GETPEERNAME, (intptr_t)&strbuf, 4899 0, K_TO_K, cr, &res); 4900 sigunintr(&smask); 4901 4902 mutex_enter(&so->so_lock); 4903 /* 4904 * If there is an error record the error in so_error put don't fail 4905 * the getpeername. Instead fallback on the recorded 4906 * sti->sti_faddr_sa. 4907 */ 4908 if (error) { 4909 /* 4910 * Various stream head errors can be returned to the ioctl. 4911 * However, it is impossible to determine which ones of 4912 * these are really socket level errors that were incorrectly 4913 * consumed by the ioctl. Thus this code silently ignores the 4914 * error - to code explicitly does not reinstate the error 4915 * using soseterror(). 4916 * Experiments have shows that at least this set of 4917 * errors are reported and should not be reinstated on the 4918 * socket: 4919 * EINVAL E.g. if an I_LINK was in effect when 4920 * getpeername was called. 4921 * EPIPE The ioctl error semantics prefer the write 4922 * side error over the read side error. 4923 * ENOTCONN The transport just got disconnected but 4924 * sockfs had not yet seen the T_DISCON_IND 4925 * when issuing the ioctl. 4926 */ 4927 error = 0; 4928 } else if (res == 0 && strbuf.len > 0 && 4929 (so->so_state & SS_ISCONNECTED)) { 4930 ASSERT(strbuf.len <= (int)sti->sti_faddr_maxlen); 4931 sti->sti_faddr_len = (socklen_t)strbuf.len; 4932 bcopy(addr, sti->sti_faddr_sa, sti->sti_faddr_len); 4933 sti->sti_faddr_valid = 1; 4934 4935 bcopy(addr, name, MIN(*namelen, sti->sti_faddr_len)); 4936 *namelen = sti->sti_faddr_len; 4937 } 4938 kmem_free(addr, addrlen); 4939 #ifdef DEBUG 4940 dprintso(so, 1, ("sotpi_getpeername (tp): %s\n", 4941 pr_addr(so->so_family, sti->sti_faddr_sa, 4942 (t_uscalar_t)sti->sti_faddr_len))); 4943 #endif /* DEBUG */ 4944 done: 4945 so_unlock_single(so, SOLOCKED); 4946 mutex_exit(&so->so_lock); 4947 return (error); 4948 } 4949 4950 /* 4951 * Update sti_laddr by asking the transport (unless AF_UNIX). 4952 */ 4953 int 4954 sotpi_getsockname(struct sonode *so, struct sockaddr *name, socklen_t *namelen, 4955 struct cred *cr) 4956 { 4957 struct strbuf strbuf; 4958 int error = 0, res; 4959 void *addr; 4960 t_uscalar_t addrlen; 4961 k_sigset_t smask; 4962 sotpi_info_t *sti = SOTOTPI(so); 4963 4964 dprintso(so, 1, ("sotpi_getsockname(%p) %s\n", 4965 (void *)so, pr_state(so->so_state, so->so_mode))); 4966 4967 ASSERT(*namelen > 0); 4968 mutex_enter(&so->so_lock); 4969 so_lock_single(so); /* Set SOLOCKED */ 4970 4971 #ifdef DEBUG 4972 4973 dprintso(so, 1, ("sotpi_getsockname (local): %s\n", 4974 pr_addr(so->so_family, sti->sti_laddr_sa, 4975 (t_uscalar_t)sti->sti_laddr_len))); 4976 #endif /* DEBUG */ 4977 if (sti->sti_laddr_valid) { 4978 bcopy(sti->sti_laddr_sa, name, 4979 MIN(*namelen, sti->sti_laddr_len)); 4980 *namelen = sti->sti_laddr_len; 4981 goto done; 4982 } 4983 4984 if (so->so_family == AF_UNIX) { 4985 /* 4986 * Transport has different name space - return local info. If we 4987 * have enough space, let consumers know the family. 4988 */ 4989 if (*namelen >= sizeof (sa_family_t)) { 4990 name->sa_family = AF_UNIX; 4991 *namelen = sizeof (sa_family_t); 4992 } else { 4993 *namelen = 0; 4994 } 4995 error = 0; 4996 goto done; 4997 } 4998 if (!(so->so_state & SS_ISBOUND)) { 4999 /* If not bound, then nothing to return. */ 5000 error = 0; 5001 goto done; 5002 } 5003 5004 /* Allocate local buffer to use with ioctl */ 5005 addrlen = (t_uscalar_t)sti->sti_laddr_maxlen; 5006 mutex_exit(&so->so_lock); 5007 addr = kmem_alloc(addrlen, KM_SLEEP); 5008 5009 /* 5010 * Issue TI_GETMYNAME with signals masked. 5011 * Put the result in sti_laddr_sa so that getsockname works after 5012 * a shutdown(output). 5013 * If the ioctl fails (e.g. due to a ECONNRESET) the error is reposted 5014 * back to the socket. 5015 */ 5016 strbuf.buf = addr; 5017 strbuf.maxlen = addrlen; 5018 strbuf.len = 0; 5019 5020 sigintr(&smask, 0); 5021 res = 0; 5022 ASSERT(cr); 5023 error = strioctl(SOTOV(so), TI_GETMYNAME, (intptr_t)&strbuf, 5024 0, K_TO_K, cr, &res); 5025 sigunintr(&smask); 5026 5027 mutex_enter(&so->so_lock); 5028 /* 5029 * If there is an error record the error in so_error put don't fail 5030 * the getsockname. Instead fallback on the recorded 5031 * sti->sti_laddr_sa. 5032 */ 5033 if (error) { 5034 /* 5035 * Various stream head errors can be returned to the ioctl. 5036 * However, it is impossible to determine which ones of 5037 * these are really socket level errors that were incorrectly 5038 * consumed by the ioctl. Thus this code silently ignores the 5039 * error - to code explicitly does not reinstate the error 5040 * using soseterror(). 5041 * Experiments have shows that at least this set of 5042 * errors are reported and should not be reinstated on the 5043 * socket: 5044 * EINVAL E.g. if an I_LINK was in effect when 5045 * getsockname was called. 5046 * EPIPE The ioctl error semantics prefer the write 5047 * side error over the read side error. 5048 */ 5049 error = 0; 5050 } else if (res == 0 && strbuf.len > 0 && 5051 (so->so_state & SS_ISBOUND)) { 5052 ASSERT(strbuf.len <= (int)sti->sti_laddr_maxlen); 5053 sti->sti_laddr_len = (socklen_t)strbuf.len; 5054 bcopy(addr, sti->sti_laddr_sa, sti->sti_laddr_len); 5055 sti->sti_laddr_valid = 1; 5056 5057 bcopy(addr, name, MIN(sti->sti_laddr_len, *namelen)); 5058 *namelen = sti->sti_laddr_len; 5059 } 5060 kmem_free(addr, addrlen); 5061 #ifdef DEBUG 5062 dprintso(so, 1, ("sotpi_getsockname (tp): %s\n", 5063 pr_addr(so->so_family, sti->sti_laddr_sa, 5064 (t_uscalar_t)sti->sti_laddr_len))); 5065 #endif /* DEBUG */ 5066 done: 5067 so_unlock_single(so, SOLOCKED); 5068 mutex_exit(&so->so_lock); 5069 return (error); 5070 } 5071 5072 /* 5073 * Get socket options. For SOL_SOCKET options some options are handled 5074 * by the sockfs while others use the value recorded in the sonode as a 5075 * fallback should the T_SVR4_OPTMGMT_REQ fail. 5076 * 5077 * On the return most *optlenp bytes are copied to optval. 5078 */ 5079 /* ARGSUSED */ 5080 int 5081 sotpi_getsockopt(struct sonode *so, int level, int option_name, 5082 void *optval, socklen_t *optlenp, int flags, struct cred *cr) 5083 { 5084 struct T_optmgmt_req optmgmt_req; 5085 struct T_optmgmt_ack *optmgmt_ack; 5086 struct opthdr oh; 5087 struct opthdr *opt_res; 5088 mblk_t *mp = NULL; 5089 int error = 0; 5090 void *option = NULL; /* Set if fallback value */ 5091 t_uscalar_t maxlen = *optlenp; 5092 t_uscalar_t len; 5093 uint32_t value; 5094 struct timeval tmo_val; /* used for SO_RCVTIMEO, SO_SNDTIMEO */ 5095 struct timeval32 tmo_val32; 5096 struct so_snd_bufinfo snd_bufinfo; /* used for zero copy */ 5097 5098 dprintso(so, 1, ("sotpi_getsockopt(%p, 0x%x, 0x%x, %p, %p) %s\n", 5099 (void *)so, level, option_name, optval, (void *)optlenp, 5100 pr_state(so->so_state, so->so_mode))); 5101 5102 mutex_enter(&so->so_lock); 5103 so_lock_single(so); /* Set SOLOCKED */ 5104 5105 /* 5106 * Check for SOL_SOCKET options. 5107 * Certain SOL_SOCKET options are returned directly whereas 5108 * others only provide a default (fallback) value should 5109 * the T_SVR4_OPTMGMT_REQ fail. 5110 */ 5111 if (level == SOL_SOCKET) { 5112 /* Check parameters */ 5113 switch (option_name) { 5114 case SO_TYPE: 5115 case SO_ERROR: 5116 case SO_DEBUG: 5117 case SO_ACCEPTCONN: 5118 case SO_REUSEADDR: 5119 case SO_REUSEPORT: 5120 case SO_KEEPALIVE: 5121 case SO_DONTROUTE: 5122 case SO_BROADCAST: 5123 case SO_USELOOPBACK: 5124 case SO_OOBINLINE: 5125 case SO_SNDBUF: 5126 case SO_RCVBUF: 5127 #ifdef notyet 5128 case SO_SNDLOWAT: 5129 case SO_RCVLOWAT: 5130 #endif /* notyet */ 5131 case SO_DOMAIN: 5132 case SO_DGRAM_ERRIND: 5133 if (maxlen < (t_uscalar_t)sizeof (int32_t)) { 5134 error = EINVAL; 5135 eprintsoline(so, error); 5136 goto done2; 5137 } 5138 break; 5139 case SO_RCVTIMEO: 5140 case SO_SNDTIMEO: 5141 if (get_udatamodel() == DATAMODEL_NONE || 5142 get_udatamodel() == DATAMODEL_NATIVE) { 5143 if (maxlen < sizeof (struct timeval)) { 5144 error = EINVAL; 5145 eprintsoline(so, error); 5146 goto done2; 5147 } 5148 } else { 5149 if (maxlen < sizeof (struct timeval32)) { 5150 error = EINVAL; 5151 eprintsoline(so, error); 5152 goto done2; 5153 } 5154 5155 } 5156 break; 5157 case SO_LINGER: 5158 if (maxlen < (t_uscalar_t)sizeof (struct linger)) { 5159 error = EINVAL; 5160 eprintsoline(so, error); 5161 goto done2; 5162 } 5163 break; 5164 case SO_SND_BUFINFO: 5165 if (maxlen < (t_uscalar_t) 5166 sizeof (struct so_snd_bufinfo)) { 5167 error = EINVAL; 5168 eprintsoline(so, error); 5169 goto done2; 5170 } 5171 break; 5172 } 5173 5174 len = (t_uscalar_t)sizeof (uint32_t); /* Default */ 5175 5176 switch (option_name) { 5177 case SO_TYPE: 5178 value = so->so_type; 5179 option = &value; 5180 goto copyout; /* No need to issue T_SVR4_OPTMGMT_REQ */ 5181 5182 case SO_ERROR: 5183 value = sogeterr(so, B_TRUE); 5184 option = &value; 5185 goto copyout; /* No need to issue T_SVR4_OPTMGMT_REQ */ 5186 5187 case SO_ACCEPTCONN: 5188 if (so->so_state & SS_ACCEPTCONN) 5189 value = SO_ACCEPTCONN; 5190 else 5191 value = 0; 5192 #ifdef DEBUG 5193 if (value) { 5194 dprintso(so, 1, 5195 ("sotpi_getsockopt: 0x%x is set\n", 5196 option_name)); 5197 } else { 5198 dprintso(so, 1, 5199 ("sotpi_getsockopt: 0x%x not set\n", 5200 option_name)); 5201 } 5202 #endif /* DEBUG */ 5203 option = &value; 5204 goto copyout; /* No need to issue T_SVR4_OPTMGMT_REQ */ 5205 5206 case SO_DEBUG: 5207 case SO_REUSEADDR: 5208 case SO_REUSEPORT: 5209 case SO_KEEPALIVE: 5210 case SO_DONTROUTE: 5211 case SO_BROADCAST: 5212 case SO_USELOOPBACK: 5213 case SO_OOBINLINE: 5214 case SO_DGRAM_ERRIND: 5215 value = (so->so_options & option_name); 5216 #ifdef DEBUG 5217 if (value) { 5218 dprintso(so, 1, 5219 ("sotpi_getsockopt: 0x%x is set\n", 5220 option_name)); 5221 } else { 5222 dprintso(so, 1, 5223 ("sotpi_getsockopt: 0x%x not set\n", 5224 option_name)); 5225 } 5226 #endif /* DEBUG */ 5227 option = &value; 5228 goto copyout; /* No need to issue T_SVR4_OPTMGMT_REQ */ 5229 5230 /* 5231 * The following options are only returned by sockfs when the 5232 * T_SVR4_OPTMGMT_REQ fails. 5233 */ 5234 case SO_LINGER: 5235 option = &so->so_linger; 5236 len = (t_uscalar_t)sizeof (struct linger); 5237 break; 5238 case SO_SNDBUF: { 5239 ssize_t lvalue; 5240 5241 /* 5242 * If the option has not been set then get a default 5243 * value from the read queue. This value is 5244 * returned if the transport fails 5245 * the T_SVR4_OPTMGMT_REQ. 5246 */ 5247 lvalue = so->so_sndbuf; 5248 if (lvalue == 0) { 5249 mutex_exit(&so->so_lock); 5250 (void) strqget(strvp2wq(SOTOV(so))->q_next, 5251 QHIWAT, 0, &lvalue); 5252 mutex_enter(&so->so_lock); 5253 dprintso(so, 1, 5254 ("got SO_SNDBUF %ld from q\n", lvalue)); 5255 } 5256 value = (int)lvalue; 5257 option = &value; 5258 len = (t_uscalar_t)sizeof (so->so_sndbuf); 5259 break; 5260 } 5261 case SO_RCVBUF: { 5262 ssize_t lvalue; 5263 5264 /* 5265 * If the option has not been set then get a default 5266 * value from the read queue. This value is 5267 * returned if the transport fails 5268 * the T_SVR4_OPTMGMT_REQ. 5269 * 5270 * XXX If SO_RCVBUF has been set and this is an 5271 * XPG 4.2 application then do not ask the transport 5272 * since the transport might adjust the value and not 5273 * return exactly what was set by the application. 5274 * For non-XPG 4.2 application we return the value 5275 * that the transport is actually using. 5276 */ 5277 lvalue = so->so_rcvbuf; 5278 if (lvalue == 0) { 5279 mutex_exit(&so->so_lock); 5280 (void) strqget(RD(strvp2wq(SOTOV(so))), 5281 QHIWAT, 0, &lvalue); 5282 mutex_enter(&so->so_lock); 5283 dprintso(so, 1, 5284 ("got SO_RCVBUF %ld from q\n", lvalue)); 5285 } else if (flags & _SOGETSOCKOPT_XPG4_2) { 5286 value = (int)lvalue; 5287 option = &value; 5288 goto copyout; /* skip asking transport */ 5289 } 5290 value = (int)lvalue; 5291 option = &value; 5292 len = (t_uscalar_t)sizeof (so->so_rcvbuf); 5293 break; 5294 } 5295 case SO_DOMAIN: 5296 value = so->so_family; 5297 option = &value; 5298 goto copyout; /* No need to issue T_SVR4_OPTMGMT_REQ */ 5299 5300 #ifdef notyet 5301 /* 5302 * We do not implement the semantics of these options 5303 * thus we shouldn't implement the options either. 5304 */ 5305 case SO_SNDLOWAT: 5306 value = so->so_sndlowat; 5307 option = &value; 5308 break; 5309 case SO_RCVLOWAT: 5310 value = so->so_rcvlowat; 5311 option = &value; 5312 break; 5313 #endif /* notyet */ 5314 case SO_SNDTIMEO: 5315 case SO_RCVTIMEO: { 5316 clock_t val; 5317 5318 if (option_name == SO_RCVTIMEO) 5319 val = drv_hztousec(so->so_rcvtimeo); 5320 else 5321 val = drv_hztousec(so->so_sndtimeo); 5322 tmo_val.tv_sec = val / (1000 * 1000); 5323 tmo_val.tv_usec = val % (1000 * 1000); 5324 if (get_udatamodel() == DATAMODEL_NONE || 5325 get_udatamodel() == DATAMODEL_NATIVE) { 5326 option = &tmo_val; 5327 len = sizeof (struct timeval); 5328 } else { 5329 TIMEVAL_TO_TIMEVAL32(&tmo_val32, &tmo_val); 5330 option = &tmo_val32; 5331 len = sizeof (struct timeval32); 5332 } 5333 break; 5334 } 5335 case SO_SND_BUFINFO: { 5336 snd_bufinfo.sbi_wroff = 5337 (so->so_proto_props).sopp_wroff; 5338 snd_bufinfo.sbi_maxblk = 5339 (so->so_proto_props).sopp_maxblk; 5340 snd_bufinfo.sbi_maxpsz = 5341 (so->so_proto_props).sopp_maxpsz; 5342 snd_bufinfo.sbi_tail = 5343 (so->so_proto_props).sopp_tail; 5344 option = &snd_bufinfo; 5345 len = (t_uscalar_t)sizeof (struct so_snd_bufinfo); 5346 break; 5347 } 5348 } 5349 } 5350 5351 mutex_exit(&so->so_lock); 5352 5353 /* Send request */ 5354 optmgmt_req.PRIM_type = T_SVR4_OPTMGMT_REQ; 5355 optmgmt_req.MGMT_flags = T_CHECK; 5356 optmgmt_req.OPT_length = (t_scalar_t)(sizeof (oh) + maxlen); 5357 optmgmt_req.OPT_offset = (t_scalar_t)sizeof (optmgmt_req); 5358 5359 oh.level = level; 5360 oh.name = option_name; 5361 oh.len = maxlen; 5362 5363 mp = soallocproto3(&optmgmt_req, sizeof (optmgmt_req), 5364 &oh, sizeof (oh), NULL, maxlen, 0, _ALLOC_SLEEP, cr); 5365 /* Let option management work in the presence of data flow control */ 5366 error = kstrputmsg(SOTOV(so), mp, NULL, 0, 0, 5367 MSG_BAND|MSG_HOLDSIG|MSG_IGNERROR|MSG_IGNFLOW, 0); 5368 mp = NULL; 5369 mutex_enter(&so->so_lock); 5370 if (error) { 5371 eprintsoline(so, error); 5372 goto done2; 5373 } 5374 error = sowaitprim(so, T_SVR4_OPTMGMT_REQ, T_OPTMGMT_ACK, 5375 (t_uscalar_t)(sizeof (*optmgmt_ack) + sizeof (*opt_res)), &mp, 0); 5376 if (error) { 5377 if (option != NULL) { 5378 /* We have a fallback value */ 5379 error = 0; 5380 goto copyout; 5381 } 5382 eprintsoline(so, error); 5383 goto done2; 5384 } 5385 ASSERT(mp); 5386 optmgmt_ack = (struct T_optmgmt_ack *)mp->b_rptr; 5387 opt_res = (struct opthdr *)sogetoff(mp, optmgmt_ack->OPT_offset, 5388 optmgmt_ack->OPT_length, __TPI_ALIGN_SIZE); 5389 if (opt_res == NULL) { 5390 if (option != NULL) { 5391 /* We have a fallback value */ 5392 error = 0; 5393 goto copyout; 5394 } 5395 error = EPROTO; 5396 eprintsoline(so, error); 5397 goto done; 5398 } 5399 option = &opt_res[1]; 5400 5401 /* check to ensure that the option is within bounds */ 5402 if (((uintptr_t)option + opt_res->len < (uintptr_t)option) || 5403 (uintptr_t)option + opt_res->len > (uintptr_t)mp->b_wptr) { 5404 if (option != NULL) { 5405 /* We have a fallback value */ 5406 error = 0; 5407 goto copyout; 5408 } 5409 error = EPROTO; 5410 eprintsoline(so, error); 5411 goto done; 5412 } 5413 5414 len = opt_res->len; 5415 5416 copyout: { 5417 t_uscalar_t size = MIN(len, maxlen); 5418 bcopy(option, optval, size); 5419 bcopy(&size, optlenp, sizeof (size)); 5420 } 5421 done: 5422 freemsg(mp); 5423 done2: 5424 so_unlock_single(so, SOLOCKED); 5425 mutex_exit(&so->so_lock); 5426 5427 return (error); 5428 } 5429 5430 /* 5431 * Set socket options. All options are passed down in a T_SVR4_OPTMGMT_REQ. 5432 * SOL_SOCKET options are also recorded in the sonode. A setsockopt for 5433 * SOL_SOCKET options will not fail just because the T_SVR4_OPTMGMT_REQ fails - 5434 * setsockopt has to work even if the transport does not support the option. 5435 */ 5436 /* ARGSUSED */ 5437 int 5438 sotpi_setsockopt(struct sonode *so, int level, int option_name, 5439 const void *optval, t_uscalar_t optlen, struct cred *cr) 5440 { 5441 struct T_optmgmt_req optmgmt_req; 5442 struct opthdr oh; 5443 mblk_t *mp; 5444 int error = 0; 5445 boolean_t handled = B_FALSE; 5446 5447 dprintso(so, 1, ("sotpi_setsockopt(%p, 0x%x, 0x%x, %p, %d) %s\n", 5448 (void *)so, level, option_name, optval, optlen, 5449 pr_state(so->so_state, so->so_mode))); 5450 5451 /* X/Open requires this check */ 5452 if ((so->so_state & SS_CANTSENDMORE) && !xnet_skip_checks) { 5453 if (xnet_check_print) 5454 printf("sockfs: X/Open setsockopt check => EINVAL\n"); 5455 return (EINVAL); 5456 } 5457 5458 mutex_enter(&so->so_lock); 5459 so_lock_single(so); /* Set SOLOCKED */ 5460 mutex_exit(&so->so_lock); 5461 5462 optmgmt_req.PRIM_type = T_SVR4_OPTMGMT_REQ; 5463 optmgmt_req.MGMT_flags = T_NEGOTIATE; 5464 optmgmt_req.OPT_length = (t_scalar_t)sizeof (oh) + optlen; 5465 optmgmt_req.OPT_offset = (t_scalar_t)sizeof (optmgmt_req); 5466 5467 oh.level = level; 5468 oh.name = option_name; 5469 oh.len = optlen; 5470 5471 mp = soallocproto3(&optmgmt_req, sizeof (optmgmt_req), 5472 &oh, sizeof (oh), optval, optlen, 0, _ALLOC_SLEEP, cr); 5473 /* Let option management work in the presence of data flow control */ 5474 error = kstrputmsg(SOTOV(so), mp, NULL, 0, 0, 5475 MSG_BAND|MSG_HOLDSIG|MSG_IGNERROR|MSG_IGNFLOW, 0); 5476 mp = NULL; 5477 mutex_enter(&so->so_lock); 5478 if (error) { 5479 eprintsoline(so, error); 5480 goto done2; 5481 } 5482 error = sowaitprim(so, T_SVR4_OPTMGMT_REQ, T_OPTMGMT_ACK, 5483 (t_uscalar_t)sizeof (struct T_optmgmt_ack), &mp, 0); 5484 if (error) { 5485 eprintsoline(so, error); 5486 goto done; 5487 } 5488 ASSERT(mp); 5489 /* No need to verify T_optmgmt_ack */ 5490 freemsg(mp); 5491 done: 5492 /* 5493 * Check for SOL_SOCKET options and record their values. 5494 * If we know about a SOL_SOCKET parameter and the transport 5495 * failed it with TBADOPT or TOUTSTATE (i.e. ENOPROTOOPT or 5496 * EPROTO) we let the setsockopt succeed. 5497 */ 5498 if (level == SOL_SOCKET) { 5499 /* Check parameters */ 5500 switch (option_name) { 5501 case SO_DEBUG: 5502 case SO_REUSEADDR: 5503 case SO_REUSEPORT: 5504 case SO_KEEPALIVE: 5505 case SO_DONTROUTE: 5506 case SO_BROADCAST: 5507 case SO_USELOOPBACK: 5508 case SO_OOBINLINE: 5509 case SO_SNDBUF: 5510 case SO_RCVBUF: 5511 #ifdef notyet 5512 case SO_SNDLOWAT: 5513 case SO_RCVLOWAT: 5514 #endif /* notyet */ 5515 case SO_DGRAM_ERRIND: 5516 if (optlen != (t_uscalar_t)sizeof (int32_t)) { 5517 error = EINVAL; 5518 eprintsoline(so, error); 5519 goto done2; 5520 } 5521 ASSERT(optval); 5522 handled = B_TRUE; 5523 break; 5524 case SO_SNDTIMEO: 5525 case SO_RCVTIMEO: 5526 if (get_udatamodel() == DATAMODEL_NONE || 5527 get_udatamodel() == DATAMODEL_NATIVE) { 5528 if (optlen != sizeof (struct timeval)) { 5529 error = EINVAL; 5530 eprintsoline(so, error); 5531 goto done2; 5532 } 5533 } else { 5534 if (optlen != sizeof (struct timeval32)) { 5535 error = EINVAL; 5536 eprintsoline(so, error); 5537 goto done2; 5538 } 5539 } 5540 ASSERT(optval); 5541 handled = B_TRUE; 5542 break; 5543 case SO_LINGER: 5544 if (optlen != (t_uscalar_t)sizeof (struct linger)) { 5545 error = EINVAL; 5546 eprintsoline(so, error); 5547 goto done2; 5548 } 5549 ASSERT(optval); 5550 handled = B_TRUE; 5551 break; 5552 } 5553 5554 #define intvalue (*(int32_t *)optval) 5555 5556 switch (option_name) { 5557 case SO_TYPE: 5558 case SO_ERROR: 5559 case SO_ACCEPTCONN: 5560 /* Can't be set */ 5561 error = ENOPROTOOPT; 5562 goto done2; 5563 case SO_LINGER: { 5564 struct linger *l = (struct linger *)optval; 5565 5566 so->so_linger.l_linger = l->l_linger; 5567 if (l->l_onoff) { 5568 so->so_linger.l_onoff = SO_LINGER; 5569 so->so_options |= SO_LINGER; 5570 } else { 5571 so->so_linger.l_onoff = 0; 5572 so->so_options &= ~SO_LINGER; 5573 } 5574 break; 5575 } 5576 5577 case SO_DEBUG: 5578 #ifdef SOCK_TEST 5579 if (intvalue & 2) 5580 sock_test_timelimit = 10 * hz; 5581 else 5582 sock_test_timelimit = 0; 5583 5584 if (intvalue & 4) 5585 do_useracc = 0; 5586 else 5587 do_useracc = 1; 5588 #endif /* SOCK_TEST */ 5589 /* FALLTHRU */ 5590 case SO_REUSEADDR: 5591 case SO_REUSEPORT: 5592 case SO_KEEPALIVE: 5593 case SO_DONTROUTE: 5594 case SO_BROADCAST: 5595 case SO_USELOOPBACK: 5596 case SO_OOBINLINE: 5597 case SO_DGRAM_ERRIND: 5598 if (intvalue != 0) { 5599 dprintso(so, 1, 5600 ("socket_setsockopt: setting 0x%x\n", 5601 option_name)); 5602 so->so_options |= option_name; 5603 } else { 5604 dprintso(so, 1, 5605 ("socket_setsockopt: clearing 0x%x\n", 5606 option_name)); 5607 so->so_options &= ~option_name; 5608 } 5609 break; 5610 /* 5611 * The following options are only returned by us when the 5612 * transport layer fails. 5613 * XXX XPG 4.2 applications retrieve SO_RCVBUF from sockfs 5614 * since the transport might adjust the value and not 5615 * return exactly what was set by the application. 5616 */ 5617 case SO_SNDBUF: 5618 so->so_sndbuf = intvalue; 5619 break; 5620 case SO_RCVBUF: 5621 so->so_rcvbuf = intvalue; 5622 break; 5623 case SO_RCVPSH: 5624 so->so_rcv_timer_interval = intvalue; 5625 break; 5626 #ifdef notyet 5627 /* 5628 * We do not implement the semantics of these options 5629 * thus we shouldn't implement the options either. 5630 */ 5631 case SO_SNDLOWAT: 5632 so->so_sndlowat = intvalue; 5633 break; 5634 case SO_RCVLOWAT: 5635 so->so_rcvlowat = intvalue; 5636 break; 5637 #endif /* notyet */ 5638 case SO_SNDTIMEO: 5639 case SO_RCVTIMEO: { 5640 struct timeval tl; 5641 clock_t val; 5642 5643 if (get_udatamodel() == DATAMODEL_NONE || 5644 get_udatamodel() == DATAMODEL_NATIVE) 5645 bcopy(&tl, (struct timeval *)optval, 5646 sizeof (struct timeval)); 5647 else 5648 TIMEVAL32_TO_TIMEVAL(&tl, 5649 (struct timeval32 *)optval); 5650 val = tl.tv_sec * 1000 * 1000 + tl.tv_usec; 5651 if (option_name == SO_RCVTIMEO) 5652 so->so_rcvtimeo = drv_usectohz(val); 5653 else 5654 so->so_sndtimeo = drv_usectohz(val); 5655 break; 5656 } 5657 } 5658 #undef intvalue 5659 5660 if (error) { 5661 if ((error == ENOPROTOOPT || error == EPROTO || 5662 error == EINVAL) && handled) { 5663 dprintso(so, 1, 5664 ("setsockopt: ignoring error %d for 0x%x\n", 5665 error, option_name)); 5666 error = 0; 5667 } 5668 } 5669 } 5670 done2: 5671 so_unlock_single(so, SOLOCKED); 5672 mutex_exit(&so->so_lock); 5673 return (error); 5674 } 5675 5676 /* 5677 * sotpi_close() is called when the last open reference goes away. 5678 */ 5679 /* ARGSUSED */ 5680 int 5681 sotpi_close(struct sonode *so, int flag, struct cred *cr) 5682 { 5683 struct vnode *vp = SOTOV(so); 5684 dev_t dev; 5685 int error = 0; 5686 sotpi_info_t *sti = SOTOTPI(so); 5687 5688 dprintso(so, 1, ("sotpi_close(%p, %x) %s\n", 5689 (void *)vp, flag, pr_state(so->so_state, so->so_mode))); 5690 5691 dev = sti->sti_dev; 5692 5693 ASSERT(STREAMSTAB(getmajor(dev))); 5694 5695 mutex_enter(&so->so_lock); 5696 so_lock_single(so); /* Set SOLOCKED */ 5697 5698 ASSERT(so_verify_oobstate(so)); 5699 5700 if (sti->sti_nl7c_flags & NL7C_ENABLED) { 5701 sti->sti_nl7c_flags = 0; 5702 nl7c_close(so); 5703 } 5704 5705 if (vp->v_stream != NULL) { 5706 vnode_t *ux_vp; 5707 5708 if (so->so_family == AF_UNIX) { 5709 /* Could avoid this when CANTSENDMORE for !dgram */ 5710 so_unix_close(so); 5711 } 5712 5713 mutex_exit(&so->so_lock); 5714 /* 5715 * Disassemble the linkage from the AF_UNIX underlying file 5716 * system vnode to this socket (by atomically clearing 5717 * v_stream in vn_rele_stream) before strclose clears sd_vnode 5718 * and frees the stream head. 5719 */ 5720 if ((ux_vp = sti->sti_ux_bound_vp) != NULL) { 5721 ASSERT(ux_vp->v_stream); 5722 sti->sti_ux_bound_vp = NULL; 5723 vn_rele_stream(ux_vp); 5724 } 5725 error = strclose(vp, flag, cr); 5726 vp->v_stream = NULL; 5727 mutex_enter(&so->so_lock); 5728 } 5729 5730 /* 5731 * Flush the T_DISCON_IND on sti_discon_ind_mp. 5732 */ 5733 so_flush_discon_ind(so); 5734 5735 so_unlock_single(so, SOLOCKED); 5736 mutex_exit(&so->so_lock); 5737 5738 /* 5739 * Needed for STREAMs. 5740 * Decrement the device driver's reference count for streams 5741 * opened via the clone dip. The driver was held in clone_open(). 5742 * The absence of clone_close() forces this asymmetry. 5743 */ 5744 if (so->so_flag & SOCLONE) 5745 ddi_rele_driver(getmajor(dev)); 5746 5747 return (error); 5748 } 5749 5750 static int 5751 sotpi_ioctl(struct sonode *so, int cmd, intptr_t arg, int mode, 5752 struct cred *cr, int32_t *rvalp) 5753 { 5754 struct vnode *vp = SOTOV(so); 5755 sotpi_info_t *sti = SOTOTPI(so); 5756 int error = 0; 5757 5758 dprintso(so, 0, ("sotpi_ioctl: cmd 0x%x, arg 0x%lx, state %s\n", 5759 cmd, arg, pr_state(so->so_state, so->so_mode))); 5760 5761 switch (cmd) { 5762 case SIOCSQPTR: 5763 /* 5764 * SIOCSQPTR is valid only when helper stream is created 5765 * by the protocol. 5766 */ 5767 case _I_INSERT: 5768 case _I_REMOVE: 5769 /* 5770 * Since there's no compelling reason to support these ioctls 5771 * on sockets, and doing so would increase the complexity 5772 * markedly, prevent it. 5773 */ 5774 return (EOPNOTSUPP); 5775 5776 case I_FIND: 5777 case I_LIST: 5778 case I_LOOK: 5779 case I_POP: 5780 case I_PUSH: 5781 /* 5782 * To prevent races and inconsistencies between the actual 5783 * state of the stream and the state according to the sonode, 5784 * we serialize all operations which modify or operate on the 5785 * list of modules on the socket's stream. 5786 */ 5787 mutex_enter(&sti->sti_plumb_lock); 5788 error = socktpi_plumbioctl(vp, cmd, arg, mode, cr, rvalp); 5789 mutex_exit(&sti->sti_plumb_lock); 5790 return (error); 5791 5792 default: 5793 if (so->so_version != SOV_STREAM) 5794 break; 5795 5796 /* 5797 * The imaginary "sockmod" has been popped; act as a stream. 5798 */ 5799 return (strioctl(vp, cmd, arg, mode, U_TO_K, cr, rvalp)); 5800 } 5801 5802 ASSERT(so->so_version != SOV_STREAM); 5803 5804 /* 5805 * Process socket-specific ioctls. 5806 */ 5807 switch (cmd) { 5808 case FIONBIO: { 5809 int32_t value; 5810 5811 if (so_copyin((void *)arg, &value, sizeof (int32_t), 5812 (mode & (int)FKIOCTL))) 5813 return (EFAULT); 5814 5815 mutex_enter(&so->so_lock); 5816 if (value) { 5817 so->so_state |= SS_NDELAY; 5818 } else { 5819 so->so_state &= ~SS_NDELAY; 5820 } 5821 mutex_exit(&so->so_lock); 5822 return (0); 5823 } 5824 5825 case FIOASYNC: { 5826 int32_t value; 5827 5828 if (so_copyin((void *)arg, &value, sizeof (int32_t), 5829 (mode & (int)FKIOCTL))) 5830 return (EFAULT); 5831 5832 mutex_enter(&so->so_lock); 5833 /* 5834 * SS_ASYNC flag not already set correctly? 5835 * (!value != !(so->so_state & SS_ASYNC)) 5836 * but some engineers find that too hard to read. 5837 */ 5838 if (value == 0 && (so->so_state & SS_ASYNC) != 0 || 5839 value != 0 && (so->so_state & SS_ASYNC) == 0) 5840 error = so_flip_async(so, vp, mode, cr); 5841 mutex_exit(&so->so_lock); 5842 return (error); 5843 } 5844 5845 case SIOCSPGRP: 5846 case FIOSETOWN: { 5847 pid_t pgrp; 5848 5849 if (so_copyin((void *)arg, &pgrp, sizeof (pid_t), 5850 (mode & (int)FKIOCTL))) 5851 return (EFAULT); 5852 5853 mutex_enter(&so->so_lock); 5854 dprintso(so, 1, ("setown: new %d old %d\n", pgrp, so->so_pgrp)); 5855 /* Any change? */ 5856 if (pgrp != so->so_pgrp) 5857 error = so_set_siggrp(so, vp, pgrp, mode, cr); 5858 mutex_exit(&so->so_lock); 5859 return (error); 5860 } 5861 case SIOCGPGRP: 5862 case FIOGETOWN: 5863 if (so_copyout(&so->so_pgrp, (void *)arg, 5864 sizeof (pid_t), (mode & (int)FKIOCTL))) 5865 return (EFAULT); 5866 return (0); 5867 5868 case SIOCATMARK: { 5869 int retval; 5870 uint_t so_state; 5871 5872 /* 5873 * strwaitmark has a finite timeout after which it 5874 * returns -1 if the mark state is undetermined. 5875 * In order to avoid any race between the mark state 5876 * in sockfs and the mark state in the stream head this 5877 * routine loops until the mark state can be determined 5878 * (or the urgent data indication has been removed by some 5879 * other thread). 5880 */ 5881 do { 5882 mutex_enter(&so->so_lock); 5883 so_state = so->so_state; 5884 mutex_exit(&so->so_lock); 5885 if (so_state & SS_RCVATMARK) { 5886 retval = 1; 5887 } else if (!(so_state & SS_OOBPEND)) { 5888 /* 5889 * No SIGURG has been generated -- there is no 5890 * pending or present urgent data. Thus can't 5891 * possibly be at the mark. 5892 */ 5893 retval = 0; 5894 } else { 5895 /* 5896 * Have the stream head wait until there is 5897 * either some messages on the read queue, or 5898 * STRATMARK or STRNOTATMARK gets set. The 5899 * STRNOTATMARK flag is used so that the 5900 * transport can send up a MSGNOTMARKNEXT 5901 * M_DATA to indicate that it is not 5902 * at the mark and additional data is not about 5903 * to be send upstream. 5904 * 5905 * If the mark state is undetermined this will 5906 * return -1 and we will loop rechecking the 5907 * socket state. 5908 */ 5909 retval = strwaitmark(vp); 5910 } 5911 } while (retval == -1); 5912 5913 if (so_copyout(&retval, (void *)arg, sizeof (int), 5914 (mode & (int)FKIOCTL))) 5915 return (EFAULT); 5916 return (0); 5917 } 5918 5919 case I_FDINSERT: 5920 case I_SENDFD: 5921 case I_RECVFD: 5922 case I_ATMARK: 5923 case _SIOCSOCKFALLBACK: 5924 /* 5925 * These ioctls do not apply to sockets. I_FDINSERT can be 5926 * used to send M_PROTO messages without modifying the socket 5927 * state. I_SENDFD/RECVFD should not be used for socket file 5928 * descriptor passing since they assume a twisted stream. 5929 * SIOCATMARK must be used instead of I_ATMARK. 5930 * 5931 * _SIOCSOCKFALLBACK from an application should never be 5932 * processed. It is only generated by socktpi_open() or 5933 * in response to I_POP or I_PUSH. 5934 */ 5935 #ifdef DEBUG 5936 zcmn_err(getzoneid(), CE_WARN, 5937 "Unsupported STREAMS ioctl 0x%x on socket. " 5938 "Pid = %d\n", cmd, curproc->p_pid); 5939 #endif /* DEBUG */ 5940 return (EOPNOTSUPP); 5941 5942 case _I_GETPEERCRED: 5943 if ((mode & FKIOCTL) == 0) 5944 return (EINVAL); 5945 5946 mutex_enter(&so->so_lock); 5947 if ((so->so_mode & SM_CONNREQUIRED) == 0) { 5948 error = ENOTSUP; 5949 } else if ((so->so_state & SS_ISCONNECTED) == 0) { 5950 error = ENOTCONN; 5951 } else if (so->so_peercred != NULL) { 5952 k_peercred_t *kp = (k_peercred_t *)arg; 5953 kp->pc_cr = so->so_peercred; 5954 kp->pc_cpid = so->so_cpid; 5955 crhold(so->so_peercred); 5956 } else { 5957 error = EINVAL; 5958 } 5959 mutex_exit(&so->so_lock); 5960 return (error); 5961 5962 default: 5963 /* 5964 * Do the higher-order bits of the ioctl cmd indicate 5965 * that it is an I_* streams ioctl? 5966 */ 5967 if ((cmd & 0xffffff00U) == STR && 5968 so->so_version == SOV_SOCKBSD) { 5969 #ifdef DEBUG 5970 zcmn_err(getzoneid(), CE_WARN, 5971 "Unsupported STREAMS ioctl 0x%x on socket. " 5972 "Pid = %d\n", cmd, curproc->p_pid); 5973 #endif /* DEBUG */ 5974 return (EOPNOTSUPP); 5975 } 5976 return (strioctl(vp, cmd, arg, mode, U_TO_K, cr, rvalp)); 5977 } 5978 } 5979 5980 /* 5981 * Handle plumbing-related ioctls. 5982 */ 5983 static int 5984 socktpi_plumbioctl(struct vnode *vp, int cmd, intptr_t arg, int mode, 5985 struct cred *cr, int32_t *rvalp) 5986 { 5987 static const char sockmod_name[] = "sockmod"; 5988 struct sonode *so = VTOSO(vp); 5989 char mname[FMNAMESZ + 1]; 5990 int error; 5991 sotpi_info_t *sti = SOTOTPI(so); 5992 5993 ASSERT(MUTEX_HELD(&sti->sti_plumb_lock)); 5994 5995 if (so->so_version == SOV_SOCKBSD) 5996 return (EOPNOTSUPP); 5997 5998 if (so->so_version == SOV_STREAM) { 5999 /* 6000 * The imaginary "sockmod" has been popped - act as a stream. 6001 * If this is a push of sockmod then change back to a socket. 6002 */ 6003 if (cmd == I_PUSH) { 6004 error = ((mode & FKIOCTL) ? copystr : copyinstr)( 6005 (void *)arg, mname, sizeof (mname), NULL); 6006 6007 if (error == 0 && strcmp(mname, sockmod_name) == 0) { 6008 dprintso(so, 0, ("socktpi_ioctl: going to " 6009 "socket version\n")); 6010 so_stream2sock(so); 6011 return (0); 6012 } 6013 } 6014 return (strioctl(vp, cmd, arg, mode, U_TO_K, cr, rvalp)); 6015 } 6016 6017 switch (cmd) { 6018 case I_PUSH: 6019 if (sti->sti_direct) { 6020 mutex_enter(&so->so_lock); 6021 so_lock_single(so); 6022 mutex_exit(&so->so_lock); 6023 6024 error = strioctl(vp, _SIOCSOCKFALLBACK, 0, 0, K_TO_K, 6025 cr, rvalp); 6026 6027 mutex_enter(&so->so_lock); 6028 if (error == 0) 6029 sti->sti_direct = 0; 6030 so_unlock_single(so, SOLOCKED); 6031 mutex_exit(&so->so_lock); 6032 6033 if (error != 0) 6034 return (error); 6035 } 6036 6037 error = strioctl(vp, cmd, arg, mode, U_TO_K, cr, rvalp); 6038 if (error == 0) 6039 sti->sti_pushcnt++; 6040 return (error); 6041 6042 case I_POP: 6043 if (sti->sti_pushcnt == 0) { 6044 /* Emulate sockmod being popped */ 6045 dprintso(so, 0, 6046 ("socktpi_ioctl: going to STREAMS version\n")); 6047 return (so_sock2stream(so)); 6048 } 6049 6050 error = strioctl(vp, cmd, arg, mode, U_TO_K, cr, rvalp); 6051 if (error == 0) 6052 sti->sti_pushcnt--; 6053 return (error); 6054 6055 case I_LIST: { 6056 struct str_mlist *kmlistp, *umlistp; 6057 struct str_list kstrlist; 6058 ssize_t kstrlistsize; 6059 int i, nmods; 6060 6061 STRUCT_DECL(str_list, ustrlist); 6062 STRUCT_INIT(ustrlist, mode); 6063 6064 if (arg == NULL) { 6065 error = strioctl(vp, cmd, arg, mode, U_TO_K, cr, rvalp); 6066 if (error == 0) 6067 (*rvalp)++; /* Add one for sockmod */ 6068 return (error); 6069 } 6070 6071 error = so_copyin((void *)arg, STRUCT_BUF(ustrlist), 6072 STRUCT_SIZE(ustrlist), mode & FKIOCTL); 6073 if (error != 0) 6074 return (error); 6075 6076 nmods = STRUCT_FGET(ustrlist, sl_nmods); 6077 if (nmods <= 0) 6078 return (EINVAL); 6079 /* 6080 * Ceiling nmods at nstrpush to prevent someone from 6081 * maliciously consuming lots of kernel memory. 6082 */ 6083 nmods = MIN(nmods, nstrpush); 6084 6085 kstrlistsize = (nmods + 1) * sizeof (struct str_mlist); 6086 kstrlist.sl_nmods = nmods; 6087 kstrlist.sl_modlist = kmem_zalloc(kstrlistsize, KM_SLEEP); 6088 6089 error = strioctl(vp, cmd, (intptr_t)&kstrlist, mode, K_TO_K, 6090 cr, rvalp); 6091 if (error != 0) 6092 goto done; 6093 6094 /* 6095 * Considering the module list as a 0-based array of sl_nmods 6096 * modules, sockmod should conceptually exist at slot 6097 * sti_pushcnt. Insert sockmod at this location by sliding all 6098 * of the module names after so_pushcnt over by one. We know 6099 * that there will be room to do this since we allocated 6100 * sl_modlist with an additional slot. 6101 */ 6102 for (i = kstrlist.sl_nmods; i > sti->sti_pushcnt; i--) 6103 kstrlist.sl_modlist[i] = kstrlist.sl_modlist[i - 1]; 6104 6105 (void) strcpy(kstrlist.sl_modlist[i].l_name, sockmod_name); 6106 kstrlist.sl_nmods++; 6107 6108 /* 6109 * Copy all of the entries out to ustrlist. 6110 */ 6111 kmlistp = kstrlist.sl_modlist; 6112 umlistp = STRUCT_FGETP(ustrlist, sl_modlist); 6113 for (i = 0; i < nmods && i < kstrlist.sl_nmods; i++) { 6114 error = so_copyout(kmlistp++, umlistp++, 6115 sizeof (struct str_mlist), mode & FKIOCTL); 6116 if (error != 0) 6117 goto done; 6118 } 6119 6120 error = so_copyout(&i, (void *)arg, sizeof (int32_t), 6121 mode & FKIOCTL); 6122 if (error == 0) 6123 *rvalp = 0; 6124 done: 6125 kmem_free(kstrlist.sl_modlist, kstrlistsize); 6126 return (error); 6127 } 6128 case I_LOOK: 6129 if (sti->sti_pushcnt == 0) { 6130 return (so_copyout(sockmod_name, (void *)arg, 6131 sizeof (sockmod_name), mode & FKIOCTL)); 6132 } 6133 return (strioctl(vp, cmd, arg, mode, U_TO_K, cr, rvalp)); 6134 6135 case I_FIND: 6136 error = strioctl(vp, cmd, arg, mode, U_TO_K, cr, rvalp); 6137 if (error && error != EINVAL) 6138 return (error); 6139 6140 /* if not found and string was sockmod return 1 */ 6141 if (*rvalp == 0 || error == EINVAL) { 6142 error = ((mode & FKIOCTL) ? copystr : copyinstr)( 6143 (void *)arg, mname, sizeof (mname), NULL); 6144 if (error == ENAMETOOLONG) 6145 error = EINVAL; 6146 6147 if (error == 0 && strcmp(mname, sockmod_name) == 0) 6148 *rvalp = 1; 6149 } 6150 return (error); 6151 6152 default: 6153 panic("socktpi_plumbioctl: unknown ioctl %d", cmd); 6154 break; 6155 } 6156 6157 return (0); 6158 } 6159 6160 /* 6161 * Wrapper around the streams poll routine that implements socket poll 6162 * semantics. 6163 * The sockfs never calls pollwakeup itself - the stream head take care 6164 * of all pollwakeups. Since sockfs never holds so_lock when calling the 6165 * stream head there can never be a deadlock due to holding so_lock across 6166 * pollwakeup and acquiring so_lock in this routine. 6167 * 6168 * However, since the performance of VOP_POLL is critical we avoid 6169 * acquiring so_lock here. This is based on two assumptions: 6170 * - The poll implementation holds locks to serialize the VOP_POLL call 6171 * and a pollwakeup for the same pollhead. This ensures that should 6172 * e.g. so_state change during a socktpi_poll call the pollwakeup 6173 * (which strsock_* and strrput conspire to issue) is issued after 6174 * the state change. Thus the pollwakeup will block until VOP_POLL has 6175 * returned and then wake up poll and have it call VOP_POLL again. 6176 * - The reading of so_state without holding so_lock does not result in 6177 * stale data that is older than the latest state change that has dropped 6178 * so_lock. This is ensured by the mutex_exit issuing the appropriate 6179 * memory barrier to force the data into the coherency domain. 6180 */ 6181 static int 6182 sotpi_poll( 6183 struct sonode *so, 6184 short events, 6185 int anyyet, 6186 short *reventsp, 6187 struct pollhead **phpp) 6188 { 6189 short origevents = events; 6190 struct vnode *vp = SOTOV(so); 6191 int error; 6192 int so_state = so->so_state; /* snapshot */ 6193 sotpi_info_t *sti = SOTOTPI(so); 6194 6195 dprintso(so, 0, ("socktpi_poll(%p): state %s err %d\n", 6196 (void *)vp, pr_state(so_state, so->so_mode), so->so_error)); 6197 6198 ASSERT(vp->v_type == VSOCK); 6199 ASSERT(vp->v_stream != NULL); 6200 6201 if (so->so_version == SOV_STREAM) { 6202 /* The imaginary "sockmod" has been popped - act as a stream */ 6203 return (strpoll(vp->v_stream, events, anyyet, 6204 reventsp, phpp)); 6205 } 6206 6207 if (!(so_state & SS_ISCONNECTED) && 6208 (so->so_mode & SM_CONNREQUIRED)) { 6209 /* Not connected yet - turn off write side events */ 6210 events &= ~(POLLOUT|POLLWRBAND); 6211 } 6212 /* 6213 * Check for errors without calling strpoll if the caller wants them. 6214 * In sockets the errors are represented as input/output events 6215 * and there is no need to ask the stream head for this information. 6216 */ 6217 if (so->so_error != 0 && 6218 ((POLLIN|POLLRDNORM|POLLOUT) & origevents) != 0) { 6219 *reventsp = (POLLIN|POLLRDNORM|POLLOUT) & origevents; 6220 return (0); 6221 } 6222 /* 6223 * Ignore M_PROTO only messages such as the T_EXDATA_IND messages. 6224 * These message with only an M_PROTO/M_PCPROTO part and no M_DATA 6225 * will not trigger a POLLIN event with POLLRDDATA set. 6226 * The handling of urgent data (causing POLLRDBAND) is done by 6227 * inspecting SS_OOBPEND below. 6228 */ 6229 events |= POLLRDDATA; 6230 6231 /* 6232 * After shutdown(output) a stream head write error is set. 6233 * However, we should not return output events. 6234 */ 6235 events |= POLLNOERR; 6236 error = strpoll(vp->v_stream, events, anyyet, 6237 reventsp, phpp); 6238 if (error) 6239 return (error); 6240 6241 ASSERT(!(*reventsp & POLLERR)); 6242 6243 /* 6244 * Notes on T_CONN_IND handling for sockets. 6245 * 6246 * If strpoll() returned without events, SR_POLLIN is guaranteed 6247 * to be set, ensuring any subsequent strrput() runs pollwakeup(). 6248 * 6249 * Since the so_lock is not held, soqueueconnind() may have run 6250 * and a T_CONN_IND may be waiting. We now check for any queued 6251 * T_CONN_IND msgs on sti_conn_ind_head and set appropriate events 6252 * to ensure poll returns. 6253 * 6254 * However: 6255 * If the T_CONN_IND hasn't arrived by the time strpoll() returns, 6256 * when strrput() does run for an arriving M_PROTO with T_CONN_IND 6257 * the following actions will occur; taken together they ensure the 6258 * syscall will return. 6259 * 6260 * 1. If a socket, soqueueconnind() will queue the T_CONN_IND but if 6261 * the accept() was run on a non-blocking socket sowaitconnind() 6262 * may have already returned EWOULDBLOCK, so not be waiting to 6263 * process the message. Additionally socktpi_poll() has probably 6264 * proceeded past the sti_conn_ind_head check below. 6265 * 2. strrput() runs pollwakeup()->pollnotify()->cv_signal() to wake 6266 * this thread, however that could occur before poll_common() 6267 * has entered cv_wait. 6268 * 3. pollnotify() sets T_POLLWAKE, while holding the pc_lock. 6269 * 6270 * Before proceeding to cv_wait() in poll_common() for an event, 6271 * poll_common() atomically checks for T_POLLWAKE under the pc_lock, 6272 * and if set, re-calls strpoll() to ensure the late arriving 6273 * T_CONN_IND is recognized, and pollsys() returns. 6274 */ 6275 6276 if (sti->sti_conn_ind_head != NULL) 6277 *reventsp |= (POLLIN|POLLRDNORM) & events; 6278 6279 if (so->so_state & SS_CANTRCVMORE) { 6280 *reventsp |= POLLRDHUP & events; 6281 6282 if (so->so_state & SS_CANTSENDMORE) 6283 *reventsp |= POLLHUP; 6284 } 6285 6286 if (so->so_state & SS_OOBPEND) 6287 *reventsp |= POLLRDBAND & events; 6288 6289 if (sti->sti_nl7c_rcv_mp != NULL) { 6290 *reventsp |= (POLLIN|POLLRDNORM) & events; 6291 } 6292 if ((sti->sti_nl7c_flags & NL7C_ENABLED) && 6293 ((POLLIN|POLLRDNORM) & *reventsp)) { 6294 sti->sti_nl7c_flags |= NL7C_POLLIN; 6295 } 6296 6297 return (0); 6298 } 6299 6300 /*ARGSUSED*/ 6301 static int 6302 socktpi_constructor(void *buf, void *cdrarg, int kmflags) 6303 { 6304 sotpi_sonode_t *st = (sotpi_sonode_t *)buf; 6305 int error = 0; 6306 6307 error = sonode_constructor(buf, cdrarg, kmflags); 6308 if (error != 0) 6309 return (error); 6310 6311 error = i_sotpi_info_constructor(&st->st_info); 6312 if (error != 0) 6313 sonode_destructor(buf, cdrarg); 6314 6315 st->st_sonode.so_priv = &st->st_info; 6316 6317 return (error); 6318 } 6319 6320 /*ARGSUSED1*/ 6321 static void 6322 socktpi_destructor(void *buf, void *cdrarg) 6323 { 6324 sotpi_sonode_t *st = (sotpi_sonode_t *)buf; 6325 6326 ASSERT(st->st_sonode.so_priv == &st->st_info); 6327 st->st_sonode.so_priv = NULL; 6328 6329 i_sotpi_info_destructor(&st->st_info); 6330 sonode_destructor(buf, cdrarg); 6331 } 6332 6333 static int 6334 socktpi_unix_constructor(void *buf, void *cdrarg, int kmflags) 6335 { 6336 int retval; 6337 6338 if ((retval = socktpi_constructor(buf, cdrarg, kmflags)) == 0) { 6339 struct sonode *so = (struct sonode *)buf; 6340 sotpi_info_t *sti = SOTOTPI(so); 6341 6342 mutex_enter(&socklist.sl_lock); 6343 6344 sti->sti_next_so = socklist.sl_list; 6345 sti->sti_prev_so = NULL; 6346 if (sti->sti_next_so != NULL) 6347 SOTOTPI(sti->sti_next_so)->sti_prev_so = so; 6348 socklist.sl_list = so; 6349 6350 mutex_exit(&socklist.sl_lock); 6351 6352 } 6353 return (retval); 6354 } 6355 6356 static void 6357 socktpi_unix_destructor(void *buf, void *cdrarg) 6358 { 6359 struct sonode *so = (struct sonode *)buf; 6360 sotpi_info_t *sti = SOTOTPI(so); 6361 6362 mutex_enter(&socklist.sl_lock); 6363 6364 if (sti->sti_next_so != NULL) 6365 SOTOTPI(sti->sti_next_so)->sti_prev_so = sti->sti_prev_so; 6366 if (sti->sti_prev_so != NULL) 6367 SOTOTPI(sti->sti_prev_so)->sti_next_so = sti->sti_next_so; 6368 else 6369 socklist.sl_list = sti->sti_next_so; 6370 6371 mutex_exit(&socklist.sl_lock); 6372 6373 socktpi_destructor(buf, cdrarg); 6374 } 6375 6376 int 6377 socktpi_init(void) 6378 { 6379 /* 6380 * Create sonode caches. We create a special one for AF_UNIX so 6381 * that we can track them for netstat(1m). 6382 */ 6383 socktpi_cache = kmem_cache_create("socktpi_cache", 6384 sizeof (struct sotpi_sonode), 0, socktpi_constructor, 6385 socktpi_destructor, NULL, NULL, NULL, 0); 6386 6387 socktpi_unix_cache = kmem_cache_create("socktpi_unix_cache", 6388 sizeof (struct sotpi_sonode), 0, socktpi_unix_constructor, 6389 socktpi_unix_destructor, NULL, NULL, NULL, 0); 6390 6391 return (0); 6392 } 6393 6394 /* 6395 * Given a non-TPI sonode, allocate and prep it to be ready for TPI. 6396 * 6397 * Caller must still update state and mode using sotpi_update_state(). 6398 */ 6399 int 6400 sotpi_convert_sonode(struct sonode *so, struct sockparams *newsp, 6401 boolean_t *direct, queue_t **qp, struct cred *cr) 6402 { 6403 sotpi_info_t *sti; 6404 struct sockparams *origsp = so->so_sockparams; 6405 sock_lower_handle_t handle = so->so_proto_handle; 6406 struct stdata *stp; 6407 struct vnode *vp; 6408 queue_t *q; 6409 int error = 0; 6410 6411 ASSERT((so->so_state & (SS_FALLBACK_PENDING|SS_FALLBACK_COMP)) == 6412 SS_FALLBACK_PENDING); 6413 ASSERT(SOCK_IS_NONSTR(so)); 6414 6415 *qp = NULL; 6416 *direct = B_FALSE; 6417 so->so_sockparams = newsp; 6418 /* 6419 * Allocate and initalize fields required by TPI. 6420 */ 6421 (void) sotpi_info_create(so, KM_SLEEP); 6422 sotpi_info_init(so); 6423 6424 if ((error = sotpi_init(so, NULL, cr, SO_FALLBACK)) != 0) { 6425 sotpi_info_fini(so); 6426 sotpi_info_destroy(so); 6427 return (error); 6428 } 6429 ASSERT(handle == so->so_proto_handle); 6430 sti = SOTOTPI(so); 6431 if (sti->sti_direct != 0) 6432 *direct = B_TRUE; 6433 6434 /* 6435 * Keep the original sp around so we can properly dispose of the 6436 * sonode when the socket is being closed. 6437 */ 6438 sti->sti_orig_sp = origsp; 6439 6440 so_basic_strinit(so); /* skips the T_CAPABILITY_REQ */ 6441 so_alloc_addr(so, so->so_max_addr_len); 6442 6443 /* 6444 * If the application has done a SIOCSPGRP, make sure the 6445 * STREAM head is aware. This needs to take place before 6446 * the protocol start sending up messages. Otherwise we 6447 * might miss to generate SIGPOLL. 6448 * 6449 * It is possible that the application will receive duplicate 6450 * signals if some were already generated for either data or 6451 * connection indications. 6452 */ 6453 if (so->so_pgrp != 0) { 6454 if (so_set_events(so, so->so_vnode, cr) != 0) 6455 so->so_pgrp = 0; 6456 } 6457 6458 /* 6459 * Determine which queue to use. 6460 */ 6461 vp = SOTOV(so); 6462 stp = vp->v_stream; 6463 ASSERT(stp != NULL); 6464 q = stp->sd_wrq->q_next; 6465 6466 /* 6467 * Skip any modules that may have been auto pushed when the device 6468 * was opened 6469 */ 6470 while (q->q_next != NULL) 6471 q = q->q_next; 6472 *qp = _RD(q); 6473 6474 /* This is now a STREAMS sockets */ 6475 so->so_not_str = B_FALSE; 6476 6477 return (error); 6478 } 6479 6480 /* 6481 * Revert a TPI sonode. It is only allowed to revert the sonode during 6482 * the fallback process. 6483 */ 6484 void 6485 sotpi_revert_sonode(struct sonode *so, struct cred *cr) 6486 { 6487 vnode_t *vp = SOTOV(so); 6488 6489 ASSERT((so->so_state & (SS_FALLBACK_PENDING|SS_FALLBACK_COMP)) == 6490 SS_FALLBACK_PENDING); 6491 ASSERT(!SOCK_IS_NONSTR(so)); 6492 ASSERT(vp->v_stream != NULL); 6493 6494 strclean(vp); 6495 (void) strclose(vp, FREAD|FWRITE|SO_FALLBACK, cr); 6496 6497 /* 6498 * Restore the original sockparams. The caller is responsible for 6499 * dropping the ref to the new sp. 6500 */ 6501 so->so_sockparams = SOTOTPI(so)->sti_orig_sp; 6502 6503 sotpi_info_fini(so); 6504 sotpi_info_destroy(so); 6505 6506 /* This is no longer a STREAMS sockets */ 6507 so->so_not_str = B_TRUE; 6508 } 6509 6510 void 6511 sotpi_update_state(struct sonode *so, struct T_capability_ack *tcap, 6512 struct sockaddr *laddr, socklen_t laddrlen, struct sockaddr *faddr, 6513 socklen_t faddrlen, short opts) 6514 { 6515 sotpi_info_t *sti = SOTOTPI(so); 6516 6517 so_proc_tcapability_ack(so, tcap); 6518 6519 so->so_options |= opts; 6520 6521 /* 6522 * Determine whether the foreign and local address are valid 6523 */ 6524 if (laddrlen != 0) { 6525 ASSERT(laddrlen <= sti->sti_laddr_maxlen); 6526 sti->sti_laddr_len = laddrlen; 6527 bcopy(laddr, sti->sti_laddr_sa, laddrlen); 6528 sti->sti_laddr_valid = (so->so_state & SS_ISBOUND); 6529 } 6530 6531 if (faddrlen != 0) { 6532 ASSERT(faddrlen <= sti->sti_faddr_maxlen); 6533 sti->sti_faddr_len = faddrlen; 6534 bcopy(faddr, sti->sti_faddr_sa, faddrlen); 6535 sti->sti_faddr_valid = (so->so_state & SS_ISCONNECTED); 6536 } 6537 6538 } 6539 6540 /* 6541 * Allocate enough space to cache the local and foreign addresses. 6542 */ 6543 void 6544 so_alloc_addr(struct sonode *so, t_uscalar_t maxlen) 6545 { 6546 sotpi_info_t *sti = SOTOTPI(so); 6547 6548 ASSERT(sti->sti_laddr_sa == NULL && sti->sti_faddr_sa == NULL); 6549 ASSERT(sti->sti_laddr_len == 0 && sti->sti_faddr_len == 0); 6550 sti->sti_laddr_maxlen = sti->sti_faddr_maxlen = 6551 P2ROUNDUP(maxlen, KMEM_ALIGN); 6552 so->so_max_addr_len = sti->sti_laddr_maxlen; 6553 sti->sti_laddr_sa = kmem_alloc(sti->sti_laddr_maxlen * 2, KM_SLEEP); 6554 sti->sti_faddr_sa = (struct sockaddr *)((caddr_t)sti->sti_laddr_sa 6555 + sti->sti_laddr_maxlen); 6556 6557 if (so->so_family == AF_UNIX) { 6558 /* 6559 * Initialize AF_UNIX related fields. 6560 */ 6561 bzero(&sti->sti_ux_laddr, sizeof (sti->sti_ux_laddr)); 6562 bzero(&sti->sti_ux_faddr, sizeof (sti->sti_ux_faddr)); 6563 } 6564 } 6565 6566 6567 sotpi_info_t * 6568 sotpi_sototpi(struct sonode *so) 6569 { 6570 sotpi_info_t *sti; 6571 6572 ASSERT(so != NULL); 6573 6574 sti = (sotpi_info_t *)so->so_priv; 6575 6576 ASSERT(sti != NULL); 6577 ASSERT(sti->sti_magic == SOTPI_INFO_MAGIC); 6578 6579 return (sti); 6580 } 6581 6582 static int 6583 i_sotpi_info_constructor(sotpi_info_t *sti) 6584 { 6585 sti->sti_magic = SOTPI_INFO_MAGIC; 6586 sti->sti_ack_mp = NULL; 6587 sti->sti_discon_ind_mp = NULL; 6588 sti->sti_ux_bound_vp = NULL; 6589 sti->sti_unbind_mp = NULL; 6590 6591 sti->sti_conn_ind_head = NULL; 6592 sti->sti_conn_ind_tail = NULL; 6593 6594 sti->sti_laddr_sa = NULL; 6595 sti->sti_faddr_sa = NULL; 6596 6597 sti->sti_nl7c_flags = 0; 6598 sti->sti_nl7c_uri = NULL; 6599 sti->sti_nl7c_rcv_mp = NULL; 6600 6601 mutex_init(&sti->sti_plumb_lock, NULL, MUTEX_DEFAULT, NULL); 6602 cv_init(&sti->sti_ack_cv, NULL, CV_DEFAULT, NULL); 6603 6604 return (0); 6605 } 6606 6607 static void 6608 i_sotpi_info_destructor(sotpi_info_t *sti) 6609 { 6610 ASSERT(sti->sti_magic == SOTPI_INFO_MAGIC); 6611 ASSERT(sti->sti_ack_mp == NULL); 6612 ASSERT(sti->sti_discon_ind_mp == NULL); 6613 ASSERT(sti->sti_ux_bound_vp == NULL); 6614 ASSERT(sti->sti_unbind_mp == NULL); 6615 6616 ASSERT(sti->sti_conn_ind_head == NULL); 6617 ASSERT(sti->sti_conn_ind_tail == NULL); 6618 6619 ASSERT(sti->sti_laddr_sa == NULL); 6620 ASSERT(sti->sti_faddr_sa == NULL); 6621 6622 ASSERT(sti->sti_nl7c_flags == 0); 6623 ASSERT(sti->sti_nl7c_uri == NULL); 6624 ASSERT(sti->sti_nl7c_rcv_mp == NULL); 6625 6626 mutex_destroy(&sti->sti_plumb_lock); 6627 cv_destroy(&sti->sti_ack_cv); 6628 } 6629 6630 /* 6631 * Creates and attaches TPI information to the given sonode 6632 */ 6633 static boolean_t 6634 sotpi_info_create(struct sonode *so, int kmflags) 6635 { 6636 sotpi_info_t *sti; 6637 6638 ASSERT(so->so_priv == NULL); 6639 6640 if ((sti = kmem_zalloc(sizeof (*sti), kmflags)) == NULL) 6641 return (B_FALSE); 6642 6643 if (i_sotpi_info_constructor(sti) != 0) { 6644 kmem_free(sti, sizeof (*sti)); 6645 return (B_FALSE); 6646 } 6647 6648 so->so_priv = (void *)sti; 6649 return (B_TRUE); 6650 } 6651 6652 /* 6653 * Initializes the TPI information. 6654 */ 6655 static void 6656 sotpi_info_init(struct sonode *so) 6657 { 6658 struct vnode *vp = SOTOV(so); 6659 sotpi_info_t *sti = SOTOTPI(so); 6660 time_t now; 6661 6662 sti->sti_dev = so->so_sockparams->sp_sdev_info.sd_vnode->v_rdev; 6663 vp->v_rdev = sti->sti_dev; 6664 6665 sti->sti_orig_sp = NULL; 6666 6667 sti->sti_pushcnt = 0; 6668 6669 now = gethrestime_sec(); 6670 sti->sti_atime = now; 6671 sti->sti_mtime = now; 6672 sti->sti_ctime = now; 6673 6674 sti->sti_eaddr_mp = NULL; 6675 sti->sti_delayed_error = 0; 6676 6677 sti->sti_provinfo = NULL; 6678 6679 sti->sti_oobcnt = 0; 6680 sti->sti_oobsigcnt = 0; 6681 6682 ASSERT(sti->sti_laddr_sa == NULL && sti->sti_faddr_sa == NULL); 6683 6684 sti->sti_laddr_sa = 0; 6685 sti->sti_faddr_sa = 0; 6686 sti->sti_laddr_maxlen = sti->sti_faddr_maxlen = 0; 6687 sti->sti_laddr_len = sti->sti_faddr_len = 0; 6688 6689 sti->sti_laddr_valid = 0; 6690 sti->sti_faddr_valid = 0; 6691 sti->sti_faddr_noxlate = 0; 6692 6693 sti->sti_direct = 0; 6694 6695 ASSERT(sti->sti_ack_mp == NULL); 6696 ASSERT(sti->sti_ux_bound_vp == NULL); 6697 ASSERT(sti->sti_unbind_mp == NULL); 6698 6699 ASSERT(sti->sti_conn_ind_head == NULL); 6700 ASSERT(sti->sti_conn_ind_tail == NULL); 6701 } 6702 6703 /* 6704 * Given a sonode, grab the TPI info and free any data. 6705 */ 6706 static void 6707 sotpi_info_fini(struct sonode *so) 6708 { 6709 sotpi_info_t *sti = SOTOTPI(so); 6710 mblk_t *mp; 6711 6712 ASSERT(sti->sti_discon_ind_mp == NULL); 6713 6714 if ((mp = sti->sti_conn_ind_head) != NULL) { 6715 mblk_t *mp1; 6716 6717 while (mp) { 6718 mp1 = mp->b_next; 6719 mp->b_next = NULL; 6720 freemsg(mp); 6721 mp = mp1; 6722 } 6723 sti->sti_conn_ind_head = sti->sti_conn_ind_tail = NULL; 6724 } 6725 6726 /* 6727 * Protect so->so_[lf]addr_sa so that sockfs_snapshot() can safely 6728 * indirect them. It also uses so_count as a validity test. 6729 */ 6730 mutex_enter(&so->so_lock); 6731 6732 if (sti->sti_laddr_sa) { 6733 ASSERT((caddr_t)sti->sti_faddr_sa == 6734 (caddr_t)sti->sti_laddr_sa + sti->sti_laddr_maxlen); 6735 ASSERT(sti->sti_faddr_maxlen == sti->sti_laddr_maxlen); 6736 sti->sti_laddr_valid = 0; 6737 sti->sti_faddr_valid = 0; 6738 kmem_free(sti->sti_laddr_sa, sti->sti_laddr_maxlen * 2); 6739 sti->sti_laddr_sa = NULL; 6740 sti->sti_laddr_len = sti->sti_laddr_maxlen = 0; 6741 sti->sti_faddr_sa = NULL; 6742 sti->sti_faddr_len = sti->sti_faddr_maxlen = 0; 6743 } 6744 6745 mutex_exit(&so->so_lock); 6746 6747 if ((mp = sti->sti_eaddr_mp) != NULL) { 6748 freemsg(mp); 6749 sti->sti_eaddr_mp = NULL; 6750 sti->sti_delayed_error = 0; 6751 } 6752 6753 if ((mp = sti->sti_ack_mp) != NULL) { 6754 freemsg(mp); 6755 sti->sti_ack_mp = NULL; 6756 } 6757 6758 if ((mp = sti->sti_nl7c_rcv_mp) != NULL) { 6759 sti->sti_nl7c_rcv_mp = NULL; 6760 freemsg(mp); 6761 } 6762 sti->sti_nl7c_rcv_rval = 0; 6763 if (sti->sti_nl7c_uri != NULL) { 6764 nl7c_urifree(so); 6765 /* urifree() cleared nl7c_uri */ 6766 } 6767 if (sti->sti_nl7c_flags) { 6768 sti->sti_nl7c_flags = 0; 6769 } 6770 6771 ASSERT(sti->sti_ux_bound_vp == NULL); 6772 if ((mp = sti->sti_unbind_mp) != NULL) { 6773 freemsg(mp); 6774 sti->sti_unbind_mp = NULL; 6775 } 6776 } 6777 6778 /* 6779 * Destroys the TPI information attached to a sonode. 6780 */ 6781 static void 6782 sotpi_info_destroy(struct sonode *so) 6783 { 6784 sotpi_info_t *sti = SOTOTPI(so); 6785 6786 i_sotpi_info_destructor(sti); 6787 kmem_free(sti, sizeof (*sti)); 6788 6789 so->so_priv = NULL; 6790 } 6791 6792 /* 6793 * Create the global sotpi socket module entry. It will never be freed. 6794 */ 6795 smod_info_t * 6796 sotpi_smod_create(void) 6797 { 6798 smod_info_t *smodp; 6799 6800 smodp = kmem_zalloc(sizeof (*smodp), KM_SLEEP); 6801 smodp->smod_name = kmem_alloc(sizeof (SOTPI_SMOD_NAME), KM_SLEEP); 6802 (void) strcpy(smodp->smod_name, SOTPI_SMOD_NAME); 6803 /* 6804 * Initialize the smod_refcnt to 1 so it will never be freed. 6805 */ 6806 smodp->smod_refcnt = 1; 6807 smodp->smod_uc_version = SOCK_UC_VERSION; 6808 smodp->smod_dc_version = SOCK_DC_VERSION; 6809 smodp->smod_sock_create_func = &sotpi_create; 6810 smodp->smod_sock_destroy_func = &sotpi_destroy; 6811 return (smodp); 6812 } 6813