1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1995, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2015, Joyent, Inc. All rights reserved. 25 * Copyright (c) 2013, OmniTI Computer Consulting, Inc. All rights reserved. 26 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 27 * Copyright 2020 OmniOS Community Edition (OmniOSce) Association. 28 */ 29 30 #include <sys/types.h> 31 #include <sys/t_lock.h> 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/buf.h> 35 #include <sys/conf.h> 36 #include <sys/cred.h> 37 #include <sys/kmem.h> 38 #include <sys/sysmacros.h> 39 #include <sys/vfs.h> 40 #include <sys/vnode.h> 41 #include <sys/debug.h> 42 #include <sys/errno.h> 43 #include <sys/time.h> 44 #include <sys/file.h> 45 #include <sys/user.h> 46 #include <sys/stream.h> 47 #include <sys/strsubr.h> 48 #include <sys/strsun.h> 49 #include <sys/sunddi.h> 50 #include <sys/esunddi.h> 51 #include <sys/flock.h> 52 #include <sys/modctl.h> 53 #include <sys/cmn_err.h> 54 #include <sys/vmsystm.h> 55 #include <sys/policy.h> 56 #include <sys/limits.h> 57 58 #include <sys/socket.h> 59 #include <sys/socketvar.h> 60 61 #include <sys/isa_defs.h> 62 #include <sys/inttypes.h> 63 #include <sys/systm.h> 64 #include <sys/cpuvar.h> 65 #include <sys/filio.h> 66 #include <sys/sendfile.h> 67 #include <sys/ddi.h> 68 #include <vm/seg.h> 69 #include <vm/seg_map.h> 70 #include <vm/seg_kpm.h> 71 72 #include <fs/sockfs/nl7c.h> 73 #include <fs/sockfs/sockcommon.h> 74 #include <fs/sockfs/sockfilter_impl.h> 75 #include <fs/sockfs/socktpi.h> 76 77 #ifdef SOCK_TEST 78 int do_useracc = 1; /* Controlled by setting SO_DEBUG to 4 */ 79 #else 80 #define do_useracc 1 81 #endif /* SOCK_TEST */ 82 83 extern int xnet_truncate_print; 84 85 extern void nl7c_init(void); 86 extern int sockfs_defer_nl7c_init; 87 88 /* 89 * Kernel component of socket creation. 90 * 91 * The socket library determines which version number to use. 92 * First the library calls this with a NULL devpath. If this fails 93 * to find a transport (using solookup) the library will look in /etc/netconfig 94 * for the appropriate transport. If one is found it will pass in the 95 * devpath for the kernel to use. 96 */ 97 int 98 so_socket(int family, int type_w_flags, int protocol, char *devpath, 99 int version) 100 { 101 struct sonode *so; 102 vnode_t *vp; 103 struct file *fp; 104 int fd; 105 int error; 106 int type; 107 108 type = type_w_flags & SOCK_TYPE_MASK; 109 type_w_flags &= ~SOCK_TYPE_MASK; 110 if (type_w_flags & ~(SOCK_CLOEXEC|SOCK_NDELAY|SOCK_NONBLOCK)) 111 return (set_errno(EINVAL)); 112 113 if (devpath != NULL) { 114 char *buf; 115 size_t kdevpathlen = 0; 116 117 buf = kmem_alloc(MAXPATHLEN, KM_SLEEP); 118 if ((error = copyinstr(devpath, buf, 119 MAXPATHLEN, &kdevpathlen)) != 0) { 120 kmem_free(buf, MAXPATHLEN); 121 return (set_errno(error)); 122 } 123 so = socket_create(family, type, protocol, buf, NULL, 124 SOCKET_SLEEP, version, CRED(), &error); 125 kmem_free(buf, MAXPATHLEN); 126 } else { 127 so = socket_create(family, type, protocol, NULL, NULL, 128 SOCKET_SLEEP, version, CRED(), &error); 129 } 130 if (so == NULL) 131 return (set_errno(error)); 132 133 /* Allocate a file descriptor for the socket */ 134 vp = SOTOV(so); 135 if (error = falloc(vp, FWRITE|FREAD, &fp, &fd)) { 136 (void) socket_close(so, 0, CRED()); 137 socket_destroy(so); 138 return (set_errno(error)); 139 } 140 141 /* 142 * Now fill in the entries that falloc reserved 143 */ 144 if (type_w_flags & SOCK_NDELAY) { 145 so->so_state |= SS_NDELAY; 146 fp->f_flag |= FNDELAY; 147 } 148 if (type_w_flags & SOCK_NONBLOCK) { 149 so->so_state |= SS_NONBLOCK; 150 fp->f_flag |= FNONBLOCK; 151 } 152 mutex_exit(&fp->f_tlock); 153 setf(fd, fp); 154 if ((type_w_flags & SOCK_CLOEXEC) != 0) { 155 f_setfd(fd, FD_CLOEXEC); 156 } 157 158 return (fd); 159 } 160 161 /* 162 * Map from a file descriptor to a socket node. 163 * Returns with the file descriptor held i.e. the caller has to 164 * use releasef when done with the file descriptor. 165 */ 166 struct sonode * 167 getsonode(int sock, int *errorp, file_t **fpp) 168 { 169 file_t *fp; 170 vnode_t *vp; 171 struct sonode *so; 172 173 if ((fp = getf(sock)) == NULL) { 174 *errorp = EBADF; 175 eprintline(*errorp); 176 return (NULL); 177 } 178 vp = fp->f_vnode; 179 /* Check if it is a socket */ 180 if (vp->v_type != VSOCK) { 181 releasef(sock); 182 *errorp = ENOTSOCK; 183 eprintline(*errorp); 184 return (NULL); 185 } 186 /* 187 * Use the stream head to find the real socket vnode. 188 * This is needed when namefs sits above sockfs. 189 */ 190 if (vp->v_stream) { 191 ASSERT(vp->v_stream->sd_vnode); 192 vp = vp->v_stream->sd_vnode; 193 194 so = VTOSO(vp); 195 if (so->so_version == SOV_STREAM) { 196 releasef(sock); 197 *errorp = ENOTSOCK; 198 eprintsoline(so, *errorp); 199 return (NULL); 200 } 201 } else { 202 so = VTOSO(vp); 203 } 204 if (fpp) 205 *fpp = fp; 206 return (so); 207 } 208 209 /* 210 * Allocate and copyin a sockaddr. 211 * Ensures NULL termination for AF_UNIX addresses by extending them 212 * with one NULL byte if need be. Verifies that the length is not 213 * excessive to prevent an application from consuming all of kernel 214 * memory. Returns NULL when an error occurred. 215 */ 216 static struct sockaddr * 217 copyin_name(struct sonode *so, struct sockaddr *name, socklen_t *namelenp, 218 int *errorp) 219 { 220 char *faddr; 221 size_t namelen = (size_t)*namelenp; 222 223 ASSERT(namelen != 0); 224 if (namelen > SO_MAXARGSIZE) { 225 *errorp = EINVAL; 226 eprintsoline(so, *errorp); 227 return (NULL); 228 } 229 230 faddr = (char *)kmem_alloc(namelen, KM_SLEEP); 231 if (copyin(name, faddr, namelen)) { 232 kmem_free(faddr, namelen); 233 *errorp = EFAULT; 234 eprintsoline(so, *errorp); 235 return (NULL); 236 } 237 238 /* 239 * Add space for NULL termination if needed. 240 * Do a quick check if the last byte is NUL. 241 */ 242 if (so->so_family == AF_UNIX && faddr[namelen - 1] != '\0') { 243 /* Check if there is any NULL termination */ 244 size_t i; 245 int foundnull = 0; 246 247 for (i = sizeof (name->sa_family); i < namelen; i++) { 248 if (faddr[i] == '\0') { 249 foundnull = 1; 250 break; 251 } 252 } 253 if (!foundnull) { 254 /* Add extra byte for NUL padding */ 255 char *nfaddr; 256 257 nfaddr = (char *)kmem_alloc(namelen + 1, KM_SLEEP); 258 bcopy(faddr, nfaddr, namelen); 259 kmem_free(faddr, namelen); 260 261 /* NUL terminate */ 262 nfaddr[namelen] = '\0'; 263 namelen++; 264 ASSERT((socklen_t)namelen == namelen); 265 *namelenp = (socklen_t)namelen; 266 faddr = nfaddr; 267 } 268 } 269 return ((struct sockaddr *)faddr); 270 } 271 272 /* 273 * Copy from kaddr/klen to uaddr/ulen. Updates ulenp if non-NULL. 274 */ 275 static int 276 copyout_arg(void *uaddr, socklen_t ulen, void *ulenp, void *kaddr, 277 socklen_t klen) 278 { 279 if (uaddr != NULL) { 280 if (ulen > klen) 281 ulen = klen; 282 283 if (ulen != 0) { 284 if (copyout(kaddr, uaddr, ulen)) 285 return (EFAULT); 286 } 287 } else 288 ulen = 0; 289 290 if (ulenp != NULL) { 291 if (copyout(&ulen, ulenp, sizeof (ulen))) 292 return (EFAULT); 293 } 294 return (0); 295 } 296 297 /* 298 * Copy from kaddr/klen to uaddr/ulen. Updates ulenp if non-NULL. 299 * If klen is greater than ulen it still uses the non-truncated 300 * klen to update ulenp. 301 */ 302 static int 303 copyout_name(void *uaddr, socklen_t ulen, void *ulenp, void *kaddr, 304 socklen_t klen) 305 { 306 if (uaddr != NULL) { 307 if (ulen >= klen) 308 ulen = klen; 309 else if (ulen != 0 && xnet_truncate_print) { 310 printf("sockfs: truncating copyout of address using " 311 "XNET semantics for pid = %d. Lengths %d, %d\n", 312 curproc->p_pid, klen, ulen); 313 } 314 315 if (ulen != 0) { 316 if (copyout(kaddr, uaddr, ulen)) 317 return (EFAULT); 318 } else 319 klen = 0; 320 } else 321 klen = 0; 322 323 if (ulenp != NULL) { 324 if (copyout(&klen, ulenp, sizeof (klen))) 325 return (EFAULT); 326 } 327 return (0); 328 } 329 330 /* 331 * The socketpair() code in libsocket creates two sockets (using 332 * the /etc/netconfig fallback if needed) before calling this routine 333 * to connect the two sockets together. 334 * 335 * For a SOCK_STREAM socketpair a listener is needed - in that case this 336 * routine will create a new file descriptor as part of accepting the 337 * connection. The library socketpair() will check if svs[2] has changed 338 * in which case it will close the changed fd. 339 * 340 * Note that this code could use the TPI feature of accepting the connection 341 * on the listening endpoint. However, that would require significant changes 342 * to soaccept. 343 */ 344 int 345 so_socketpair(int sv[2]) 346 { 347 int svs[2]; 348 struct sonode *so1, *so2; 349 int error; 350 int orig_flags; 351 struct sockaddr_ux *name; 352 size_t namelen; 353 sotpi_info_t *sti1; 354 sotpi_info_t *sti2; 355 356 dprint(1, ("so_socketpair(%p)\n", (void *)sv)); 357 358 error = useracc(sv, sizeof (svs), B_WRITE); 359 if (error && do_useracc) 360 return (set_errno(EFAULT)); 361 362 if (copyin(sv, svs, sizeof (svs))) 363 return (set_errno(EFAULT)); 364 365 if ((so1 = getsonode(svs[0], &error, NULL)) == NULL) 366 return (set_errno(error)); 367 368 if ((so2 = getsonode(svs[1], &error, NULL)) == NULL) { 369 releasef(svs[0]); 370 return (set_errno(error)); 371 } 372 373 if (so1->so_family != AF_UNIX || so2->so_family != AF_UNIX) { 374 error = EOPNOTSUPP; 375 goto done; 376 } 377 378 sti1 = SOTOTPI(so1); 379 sti2 = SOTOTPI(so2); 380 381 /* 382 * The code below makes assumptions about the "sockfs" implementation. 383 * So make sure that the correct implementation is really used. 384 */ 385 ASSERT(so1->so_ops == &sotpi_sonodeops); 386 ASSERT(so2->so_ops == &sotpi_sonodeops); 387 388 if (so1->so_type == SOCK_DGRAM) { 389 /* 390 * Bind both sockets and connect them with each other. 391 * Need to allocate name/namelen for soconnect. 392 */ 393 error = socket_bind(so1, NULL, 0, _SOBIND_UNSPEC, CRED()); 394 if (error) { 395 eprintsoline(so1, error); 396 goto done; 397 } 398 error = socket_bind(so2, NULL, 0, _SOBIND_UNSPEC, CRED()); 399 if (error) { 400 eprintsoline(so2, error); 401 goto done; 402 } 403 namelen = sizeof (struct sockaddr_ux); 404 name = kmem_alloc(namelen, KM_SLEEP); 405 name->sou_family = AF_UNIX; 406 name->sou_addr = sti2->sti_ux_laddr; 407 error = socket_connect(so1, 408 (struct sockaddr *)name, 409 (socklen_t)namelen, 410 0, _SOCONNECT_NOXLATE, CRED()); 411 if (error) { 412 kmem_free(name, namelen); 413 eprintsoline(so1, error); 414 goto done; 415 } 416 name->sou_addr = sti1->sti_ux_laddr; 417 error = socket_connect(so2, 418 (struct sockaddr *)name, 419 (socklen_t)namelen, 420 0, _SOCONNECT_NOXLATE, CRED()); 421 kmem_free(name, namelen); 422 if (error) { 423 eprintsoline(so2, error); 424 goto done; 425 } 426 releasef(svs[0]); 427 releasef(svs[1]); 428 } else { 429 /* 430 * Bind both sockets, with so1 being a listener. 431 * Connect so2 to so1 - nonblocking to avoid waiting for 432 * soaccept to complete. 433 * Accept a connection on so1. Pass out the new fd as sv[0]. 434 * The library will detect the changed fd and close 435 * the original one. 436 */ 437 struct sonode *nso; 438 struct vnode *nvp; 439 struct file *nfp; 440 int nfd; 441 442 /* 443 * We could simply call socket_listen() here (which would do the 444 * binding automatically) if the code didn't rely on passing 445 * _SOBIND_NOXLATE to the TPI implementation of socket_bind(). 446 */ 447 error = socket_bind(so1, NULL, 0, _SOBIND_UNSPEC| 448 _SOBIND_NOXLATE|_SOBIND_LISTEN|_SOBIND_SOCKETPAIR, 449 CRED()); 450 if (error) { 451 eprintsoline(so1, error); 452 goto done; 453 } 454 error = socket_bind(so2, NULL, 0, _SOBIND_UNSPEC, CRED()); 455 if (error) { 456 eprintsoline(so2, error); 457 goto done; 458 } 459 460 namelen = sizeof (struct sockaddr_ux); 461 name = kmem_alloc(namelen, KM_SLEEP); 462 name->sou_family = AF_UNIX; 463 name->sou_addr = sti1->sti_ux_laddr; 464 error = socket_connect(so2, 465 (struct sockaddr *)name, 466 (socklen_t)namelen, 467 FNONBLOCK, _SOCONNECT_NOXLATE, CRED()); 468 kmem_free(name, namelen); 469 if (error) { 470 if (error != EINPROGRESS) { 471 eprintsoline(so2, error); goto done; 472 } 473 } 474 475 error = socket_accept(so1, 0, CRED(), &nso); 476 if (error) { 477 eprintsoline(so1, error); 478 goto done; 479 } 480 481 /* wait for so2 being SS_CONNECTED ignoring signals */ 482 mutex_enter(&so2->so_lock); 483 error = sowaitconnected(so2, 0, 1); 484 mutex_exit(&so2->so_lock); 485 if (error != 0) { 486 (void) socket_close(nso, 0, CRED()); 487 socket_destroy(nso); 488 eprintsoline(so2, error); 489 goto done; 490 } 491 492 nvp = SOTOV(nso); 493 if (error = falloc(nvp, FWRITE|FREAD, &nfp, &nfd)) { 494 (void) socket_close(nso, 0, CRED()); 495 socket_destroy(nso); 496 eprintsoline(nso, error); 497 goto done; 498 } 499 /* 500 * copy over FNONBLOCK and FNDELAY flags should they exist 501 */ 502 if (so1->so_state & SS_NONBLOCK) 503 nfp->f_flag |= FNONBLOCK; 504 if (so1->so_state & SS_NDELAY) 505 nfp->f_flag |= FNDELAY; 506 507 /* 508 * fill in the entries that falloc reserved 509 */ 510 mutex_exit(&nfp->f_tlock); 511 setf(nfd, nfp); 512 513 /* 514 * get the original flags before we release 515 */ 516 VERIFY(f_getfd_error(svs[0], &orig_flags) == 0); 517 518 releasef(svs[0]); 519 releasef(svs[1]); 520 521 /* 522 * If FD_CLOEXEC was set on the filedescriptor we're 523 * swapping out, we should set it on the new one too. 524 */ 525 if (orig_flags & FD_CLOEXEC) { 526 f_setfd(nfd, FD_CLOEXEC); 527 } 528 529 /* 530 * The socketpair library routine will close the original 531 * svs[0] when this code passes out a different file 532 * descriptor. 533 */ 534 svs[0] = nfd; 535 536 if (copyout(svs, sv, sizeof (svs))) { 537 (void) closeandsetf(nfd, NULL); 538 eprintline(EFAULT); 539 return (set_errno(EFAULT)); 540 } 541 } 542 return (0); 543 544 done: 545 releasef(svs[0]); 546 releasef(svs[1]); 547 return (set_errno(error)); 548 } 549 550 int 551 bind(int sock, struct sockaddr *name, socklen_t namelen, int version) 552 { 553 struct sonode *so; 554 int error; 555 556 dprint(1, ("bind(%d, %p, %d)\n", 557 sock, (void *)name, namelen)); 558 559 if ((so = getsonode(sock, &error, NULL)) == NULL) 560 return (set_errno(error)); 561 562 /* Allocate and copyin name */ 563 /* 564 * X/Open test does not expect EFAULT with NULL name and non-zero 565 * namelen. 566 */ 567 if (name != NULL && namelen != 0) { 568 ASSERT(MUTEX_NOT_HELD(&so->so_lock)); 569 name = copyin_name(so, name, &namelen, &error); 570 if (name == NULL) { 571 releasef(sock); 572 return (set_errno(error)); 573 } 574 } else { 575 name = NULL; 576 namelen = 0; 577 } 578 579 switch (version) { 580 default: 581 error = socket_bind(so, name, namelen, 0, CRED()); 582 break; 583 case SOV_XPG4_2: 584 error = socket_bind(so, name, namelen, _SOBIND_XPG4_2, CRED()); 585 break; 586 case SOV_SOCKBSD: 587 error = socket_bind(so, name, namelen, _SOBIND_SOCKBSD, CRED()); 588 break; 589 } 590 done: 591 releasef(sock); 592 if (name != NULL) 593 kmem_free(name, (size_t)namelen); 594 595 if (error) 596 return (set_errno(error)); 597 return (0); 598 } 599 600 /* ARGSUSED2 */ 601 int 602 listen(int sock, int backlog, int version) 603 { 604 struct sonode *so; 605 int error; 606 607 dprint(1, ("listen(%d, %d)\n", 608 sock, backlog)); 609 610 if ((so = getsonode(sock, &error, NULL)) == NULL) 611 return (set_errno(error)); 612 613 error = socket_listen(so, backlog, CRED()); 614 615 releasef(sock); 616 if (error) 617 return (set_errno(error)); 618 return (0); 619 } 620 621 /*ARGSUSED3*/ 622 int 623 accept(int sock, struct sockaddr *name, socklen_t *namelenp, int version, 624 int flags) 625 { 626 struct sonode *so; 627 file_t *fp; 628 int error; 629 socklen_t namelen; 630 struct sonode *nso; 631 struct vnode *nvp; 632 struct file *nfp; 633 int nfd; 634 int ssflags; 635 struct sockaddr *addrp; 636 socklen_t addrlen; 637 638 dprint(1, ("accept(%d, %p, %p)\n", 639 sock, (void *)name, (void *)namelenp)); 640 641 if (flags & ~(SOCK_CLOEXEC|SOCK_NONBLOCK|SOCK_NDELAY)) { 642 return (set_errno(EINVAL)); 643 } 644 645 /* Translate SOCK_ flags to their SS_ variant */ 646 ssflags = 0; 647 if (flags & SOCK_NONBLOCK) 648 ssflags |= SS_NONBLOCK; 649 if (flags & SOCK_NDELAY) 650 ssflags |= SS_NDELAY; 651 652 if ((so = getsonode(sock, &error, &fp)) == NULL) 653 return (set_errno(error)); 654 655 if (name != NULL) { 656 ASSERT(MUTEX_NOT_HELD(&so->so_lock)); 657 if (copyin(namelenp, &namelen, sizeof (namelen))) { 658 releasef(sock); 659 return (set_errno(EFAULT)); 660 } 661 if (namelen != 0) { 662 error = useracc(name, (size_t)namelen, B_WRITE); 663 if (error && do_useracc) { 664 releasef(sock); 665 return (set_errno(EFAULT)); 666 } 667 } else 668 name = NULL; 669 } else { 670 namelen = 0; 671 } 672 673 /* 674 * Allocate the user fd before socket_accept() in order to 675 * catch EMFILE errors before calling socket_accept(). 676 */ 677 if ((nfd = ufalloc(0)) == -1) { 678 eprintsoline(so, EMFILE); 679 releasef(sock); 680 return (set_errno(EMFILE)); 681 } 682 error = socket_accept(so, fp->f_flag, CRED(), &nso); 683 if (error) { 684 setf(nfd, NULL); 685 releasef(sock); 686 return (set_errno(error)); 687 } 688 689 nvp = SOTOV(nso); 690 691 ASSERT(MUTEX_NOT_HELD(&nso->so_lock)); 692 if (namelen != 0) { 693 addrlen = so->so_max_addr_len; 694 addrp = (struct sockaddr *)kmem_alloc(addrlen, KM_SLEEP); 695 696 if ((error = socket_getpeername(nso, (struct sockaddr *)addrp, 697 &addrlen, B_TRUE, CRED())) == 0) { 698 error = copyout_name(name, namelen, namelenp, 699 addrp, addrlen); 700 } else { 701 ASSERT(error == EINVAL || error == ENOTCONN); 702 error = ECONNABORTED; 703 } 704 kmem_free(addrp, so->so_max_addr_len); 705 } 706 707 if (error) { 708 setf(nfd, NULL); 709 (void) socket_close(nso, 0, CRED()); 710 socket_destroy(nso); 711 releasef(sock); 712 return (set_errno(error)); 713 } 714 if (error = falloc(NULL, FWRITE|FREAD, &nfp, NULL)) { 715 setf(nfd, NULL); 716 (void) socket_close(nso, 0, CRED()); 717 socket_destroy(nso); 718 eprintsoline(so, error); 719 releasef(sock); 720 return (set_errno(error)); 721 } 722 /* 723 * fill in the entries that falloc reserved 724 */ 725 nfp->f_vnode = nvp; 726 mutex_exit(&nfp->f_tlock); 727 setf(nfd, nfp); 728 729 /* 730 * Act on SOCK_CLOEXEC from flags 731 */ 732 if (flags & SOCK_CLOEXEC) { 733 f_setfd(nfd, FD_CLOEXEC); 734 } 735 736 /* 737 * Copy FNDELAY and FNONBLOCK from listener to acceptor 738 * and from ssflags 739 */ 740 if ((ssflags | so->so_state) & (SS_NDELAY|SS_NONBLOCK)) { 741 uint_t oflag = nfp->f_flag; 742 int arg = 0; 743 744 if ((ssflags | so->so_state) & SS_NONBLOCK) 745 arg |= FNONBLOCK; 746 else if ((ssflags | so->so_state) & SS_NDELAY) 747 arg |= FNDELAY; 748 749 /* 750 * This code is a simplification of the F_SETFL code in fcntl() 751 * Ignore any errors from VOP_SETFL. 752 */ 753 if ((error = VOP_SETFL(nvp, oflag, arg, nfp->f_cred, NULL)) 754 != 0) { 755 eprintsoline(so, error); 756 error = 0; 757 } else { 758 mutex_enter(&nfp->f_tlock); 759 nfp->f_flag &= ~FMASK | (FREAD|FWRITE); 760 nfp->f_flag |= arg; 761 mutex_exit(&nfp->f_tlock); 762 } 763 } 764 releasef(sock); 765 return (nfd); 766 } 767 768 int 769 connect(int sock, struct sockaddr *name, socklen_t namelen, int version) 770 { 771 struct sonode *so; 772 file_t *fp; 773 int error; 774 775 dprint(1, ("connect(%d, %p, %d)\n", 776 sock, (void *)name, namelen)); 777 778 if ((so = getsonode(sock, &error, &fp)) == NULL) 779 return (set_errno(error)); 780 781 /* Allocate and copyin name */ 782 if (namelen != 0) { 783 ASSERT(MUTEX_NOT_HELD(&so->so_lock)); 784 name = copyin_name(so, name, &namelen, &error); 785 if (name == NULL) { 786 releasef(sock); 787 return (set_errno(error)); 788 } 789 } else 790 name = NULL; 791 792 error = socket_connect(so, name, namelen, fp->f_flag, 793 (version != SOV_XPG4_2) ? 0 : _SOCONNECT_XPG4_2, CRED()); 794 releasef(sock); 795 if (name) 796 kmem_free(name, (size_t)namelen); 797 if (error) 798 return (set_errno(error)); 799 return (0); 800 } 801 802 /*ARGSUSED2*/ 803 int 804 shutdown(int sock, int how, int version) 805 { 806 struct sonode *so; 807 int error; 808 809 dprint(1, ("shutdown(%d, %d)\n", 810 sock, how)); 811 812 if ((so = getsonode(sock, &error, NULL)) == NULL) 813 return (set_errno(error)); 814 815 error = socket_shutdown(so, how, CRED()); 816 817 releasef(sock); 818 if (error) 819 return (set_errno(error)); 820 return (0); 821 } 822 823 /* 824 * Common receive routine. 825 */ 826 static ssize_t 827 recvit(int sock, struct nmsghdr *msg, struct uio *uiop, int flags, 828 socklen_t *namelenp, socklen_t *controllenp, int *flagsp) 829 { 830 struct sonode *so; 831 file_t *fp; 832 void *name; 833 socklen_t namelen; 834 void *control; 835 socklen_t controllen, free_controllen; 836 ssize_t len; 837 int error; 838 839 if ((so = getsonode(sock, &error, &fp)) == NULL) 840 return (set_errno(error)); 841 842 len = uiop->uio_resid; 843 uiop->uio_fmode = fp->f_flag; 844 uiop->uio_extflg = UIO_COPY_CACHED; 845 846 name = msg->msg_name; 847 namelen = msg->msg_namelen; 848 control = msg->msg_control; 849 controllen = msg->msg_controllen; 850 851 msg->msg_flags = flags & (MSG_OOB | MSG_PEEK | MSG_WAITALL | 852 MSG_DONTWAIT | MSG_XPG4_2); 853 854 error = socket_recvmsg(so, msg, uiop, CRED()); 855 if (error) { 856 releasef(sock); 857 return (set_errno(error)); 858 } 859 lwp_stat_update(LWP_STAT_MSGRCV, 1); 860 releasef(sock); 861 862 free_controllen = msg->msg_controllen; 863 864 error = copyout_name(name, namelen, namelenp, 865 msg->msg_name, msg->msg_namelen); 866 if (error) 867 goto err; 868 869 if (flagsp != NULL) { 870 /* 871 * Clear internal flag. 872 */ 873 msg->msg_flags &= ~MSG_XPG4_2; 874 875 /* 876 * Determine MSG_CTRUNC. sorecvmsg sets MSG_CTRUNC only 877 * when controllen is zero and there is control data to 878 * copy out. 879 */ 880 if (controllen != 0 && 881 (msg->msg_controllen > controllen || control == NULL)) { 882 dprint(1, ("recvit: CTRUNC %d %d %p\n", 883 msg->msg_controllen, controllen, control)); 884 885 msg->msg_flags |= MSG_CTRUNC; 886 } 887 if (copyout(&msg->msg_flags, flagsp, 888 sizeof (msg->msg_flags))) { 889 error = EFAULT; 890 goto err; 891 } 892 } 893 894 if (controllen != 0) { 895 if (!(flags & MSG_XPG4_2)) { 896 /* 897 * Good old msg_accrights can only return a multiple 898 * of 4 bytes. 899 */ 900 controllen &= ~((int)sizeof (uint32_t) - 1); 901 } 902 903 if (msg->msg_controllen > controllen || control == NULL) { 904 /* 905 * If the truncated part contains file descriptors, 906 * then they must be closed in the kernel as they 907 * will not be included in the data returned to 908 * user space. Close them now so that the header size 909 * can be safely adjusted prior to copyout. In case of 910 * an error during copyout, the remaining file 911 * descriptors will be closed in the error handler 912 * below. 913 */ 914 so_closefds(msg->msg_control, msg->msg_controllen, 915 !(flags & MSG_XPG4_2), 916 control == NULL ? 0 : controllen); 917 918 /* 919 * In the case of a truncated control message, the last 920 * cmsg header that fits into the available buffer 921 * space must be adjusted to reflect the actual amount 922 * of associated data that will be returned. This only 923 * needs to be done for XPG4 messages as non-XPG4 924 * messages are not structured (they are just a 925 * buffer and a length - msg_accrights(len)). 926 */ 927 if (control != NULL && (flags & MSG_XPG4_2)) { 928 so_truncatecmsg(msg->msg_control, 929 msg->msg_controllen, controllen); 930 msg->msg_controllen = controllen; 931 } 932 } 933 934 error = copyout_arg(control, controllen, controllenp, 935 msg->msg_control, msg->msg_controllen); 936 937 if (error) 938 goto err; 939 940 } 941 if (msg->msg_namelen != 0) 942 kmem_free(msg->msg_name, (size_t)msg->msg_namelen); 943 if (free_controllen != 0) 944 kmem_free(msg->msg_control, (size_t)free_controllen); 945 return (len - uiop->uio_resid); 946 947 err: 948 /* 949 * If we fail and the control part contains file descriptors 950 * we have to close them. For a truncated control message, the 951 * descriptors which were cut off have already been closed and the 952 * length adjusted so that they will not be closed again. 953 */ 954 if (msg->msg_controllen != 0) 955 so_closefds(msg->msg_control, msg->msg_controllen, 956 !(flags & MSG_XPG4_2), 0); 957 if (msg->msg_namelen != 0) 958 kmem_free(msg->msg_name, (size_t)msg->msg_namelen); 959 if (free_controllen != 0) 960 kmem_free(msg->msg_control, (size_t)free_controllen); 961 return (set_errno(error)); 962 } 963 964 /* 965 * Native system call 966 */ 967 ssize_t 968 recv(int sock, void *buffer, size_t len, int flags) 969 { 970 struct nmsghdr lmsg; 971 struct uio auio; 972 struct iovec aiov[1]; 973 974 dprint(1, ("recv(%d, %p, %ld, %d)\n", 975 sock, buffer, len, flags)); 976 977 if ((ssize_t)len < 0) { 978 return (set_errno(EINVAL)); 979 } 980 981 aiov[0].iov_base = buffer; 982 aiov[0].iov_len = len; 983 auio.uio_loffset = 0; 984 auio.uio_iov = aiov; 985 auio.uio_iovcnt = 1; 986 auio.uio_resid = len; 987 auio.uio_segflg = UIO_USERSPACE; 988 auio.uio_limit = 0; 989 990 lmsg.msg_namelen = 0; 991 lmsg.msg_controllen = 0; 992 lmsg.msg_flags = 0; 993 return (recvit(sock, &lmsg, &auio, flags, NULL, NULL, NULL)); 994 } 995 996 ssize_t 997 recvfrom(int sock, void *buffer, size_t len, int flags, struct sockaddr *name, 998 socklen_t *namelenp) 999 { 1000 struct nmsghdr lmsg; 1001 struct uio auio; 1002 struct iovec aiov[1]; 1003 1004 dprint(1, ("recvfrom(%d, %p, %ld, %d, %p, %p)\n", 1005 sock, buffer, len, flags, (void *)name, (void *)namelenp)); 1006 1007 if ((ssize_t)len < 0) { 1008 return (set_errno(EINVAL)); 1009 } 1010 1011 aiov[0].iov_base = buffer; 1012 aiov[0].iov_len = len; 1013 auio.uio_loffset = 0; 1014 auio.uio_iov = aiov; 1015 auio.uio_iovcnt = 1; 1016 auio.uio_resid = len; 1017 auio.uio_segflg = UIO_USERSPACE; 1018 auio.uio_limit = 0; 1019 1020 lmsg.msg_name = (char *)name; 1021 if (namelenp != NULL) { 1022 if (copyin(namelenp, &lmsg.msg_namelen, 1023 sizeof (lmsg.msg_namelen))) 1024 return (set_errno(EFAULT)); 1025 } else { 1026 lmsg.msg_namelen = 0; 1027 } 1028 lmsg.msg_controllen = 0; 1029 lmsg.msg_flags = 0; 1030 1031 return (recvit(sock, &lmsg, &auio, flags, namelenp, NULL, NULL)); 1032 } 1033 1034 /* 1035 * Uses the MSG_XPG4_2 flag to determine if the caller is using 1036 * struct omsghdr or struct nmsghdr. 1037 */ 1038 ssize_t 1039 recvmsg(int sock, struct nmsghdr *msg, int flags) 1040 { 1041 STRUCT_DECL(nmsghdr, u_lmsg); 1042 STRUCT_HANDLE(nmsghdr, umsgptr); 1043 struct nmsghdr lmsg; 1044 struct uio auio; 1045 struct iovec buf[IOV_MAX_STACK], *aiov = buf; 1046 ssize_t iovsize = 0; 1047 int iovcnt; 1048 ssize_t len, rval; 1049 int i; 1050 int *flagsp; 1051 model_t model; 1052 1053 dprint(1, ("recvmsg(%d, %p, %d)\n", 1054 sock, (void *)msg, flags)); 1055 1056 model = get_udatamodel(); 1057 STRUCT_INIT(u_lmsg, model); 1058 STRUCT_SET_HANDLE(umsgptr, model, msg); 1059 1060 if (flags & MSG_XPG4_2) { 1061 if (copyin(msg, STRUCT_BUF(u_lmsg), STRUCT_SIZE(u_lmsg))) 1062 return (set_errno(EFAULT)); 1063 flagsp = STRUCT_FADDR(umsgptr, msg_flags); 1064 } else { 1065 /* 1066 * Assumes that nmsghdr and omsghdr are identically shaped 1067 * except for the added msg_flags field. 1068 */ 1069 if (copyin(msg, STRUCT_BUF(u_lmsg), 1070 SIZEOF_STRUCT(omsghdr, model))) 1071 return (set_errno(EFAULT)); 1072 STRUCT_FSET(u_lmsg, msg_flags, 0); 1073 flagsp = NULL; 1074 } 1075 1076 /* 1077 * Code below us will kmem_alloc memory and hang it 1078 * off msg_control and msg_name fields. This forces 1079 * us to copy the structure to its native form. 1080 */ 1081 lmsg.msg_name = STRUCT_FGETP(u_lmsg, msg_name); 1082 lmsg.msg_namelen = STRUCT_FGET(u_lmsg, msg_namelen); 1083 lmsg.msg_iov = STRUCT_FGETP(u_lmsg, msg_iov); 1084 lmsg.msg_iovlen = STRUCT_FGET(u_lmsg, msg_iovlen); 1085 lmsg.msg_control = STRUCT_FGETP(u_lmsg, msg_control); 1086 lmsg.msg_controllen = STRUCT_FGET(u_lmsg, msg_controllen); 1087 lmsg.msg_flags = STRUCT_FGET(u_lmsg, msg_flags); 1088 1089 iovcnt = lmsg.msg_iovlen; 1090 1091 if (iovcnt <= 0 || iovcnt > IOV_MAX) { 1092 return (set_errno(EMSGSIZE)); 1093 } 1094 1095 if (iovcnt > IOV_MAX_STACK) { 1096 iovsize = iovcnt * sizeof (struct iovec); 1097 aiov = kmem_alloc(iovsize, KM_SLEEP); 1098 } 1099 1100 #ifdef _SYSCALL32_IMPL 1101 /* 1102 * 32-bit callers need to have their iovec expanded, while ensuring 1103 * that they can't move more than 2Gbytes of data in a single call. 1104 */ 1105 if (model == DATAMODEL_ILP32) { 1106 struct iovec32 buf32[IOV_MAX_STACK], *aiov32 = buf32; 1107 ssize_t iov32size; 1108 ssize32_t count32; 1109 1110 iov32size = iovcnt * sizeof (struct iovec32); 1111 if (iovsize != 0) 1112 aiov32 = kmem_alloc(iov32size, KM_SLEEP); 1113 1114 if (copyin((struct iovec32 *)lmsg.msg_iov, aiov32, iov32size)) { 1115 if (iovsize != 0) { 1116 kmem_free(aiov32, iov32size); 1117 kmem_free(aiov, iovsize); 1118 } 1119 1120 return (set_errno(EFAULT)); 1121 } 1122 1123 count32 = 0; 1124 for (i = 0; i < iovcnt; i++) { 1125 ssize32_t iovlen32; 1126 1127 iovlen32 = aiov32[i].iov_len; 1128 count32 += iovlen32; 1129 if (iovlen32 < 0 || count32 < 0) { 1130 if (iovsize != 0) { 1131 kmem_free(aiov32, iov32size); 1132 kmem_free(aiov, iovsize); 1133 } 1134 1135 return (set_errno(EINVAL)); 1136 } 1137 1138 aiov[i].iov_len = iovlen32; 1139 aiov[i].iov_base = 1140 (caddr_t)(uintptr_t)aiov32[i].iov_base; 1141 } 1142 1143 if (iovsize != 0) 1144 kmem_free(aiov32, iov32size); 1145 } else 1146 #endif /* _SYSCALL32_IMPL */ 1147 if (copyin(lmsg.msg_iov, aiov, iovcnt * sizeof (struct iovec))) { 1148 if (iovsize != 0) 1149 kmem_free(aiov, iovsize); 1150 1151 return (set_errno(EFAULT)); 1152 } 1153 len = 0; 1154 for (i = 0; i < iovcnt; i++) { 1155 ssize_t iovlen = aiov[i].iov_len; 1156 len += iovlen; 1157 if (iovlen < 0 || len < 0) { 1158 if (iovsize != 0) 1159 kmem_free(aiov, iovsize); 1160 1161 return (set_errno(EINVAL)); 1162 } 1163 } 1164 auio.uio_loffset = 0; 1165 auio.uio_iov = aiov; 1166 auio.uio_iovcnt = iovcnt; 1167 auio.uio_resid = len; 1168 auio.uio_segflg = UIO_USERSPACE; 1169 auio.uio_limit = 0; 1170 1171 if (lmsg.msg_control != NULL && 1172 (do_useracc == 0 || 1173 useracc(lmsg.msg_control, lmsg.msg_controllen, 1174 B_WRITE) != 0)) { 1175 if (iovsize != 0) 1176 kmem_free(aiov, iovsize); 1177 1178 return (set_errno(EFAULT)); 1179 } 1180 1181 rval = recvit(sock, &lmsg, &auio, flags, 1182 STRUCT_FADDR(umsgptr, msg_namelen), 1183 STRUCT_FADDR(umsgptr, msg_controllen), flagsp); 1184 1185 if (iovsize != 0) 1186 kmem_free(aiov, iovsize); 1187 1188 return (rval); 1189 } 1190 1191 /* 1192 * Common send function. 1193 */ 1194 static ssize_t 1195 sendit(int sock, struct nmsghdr *msg, struct uio *uiop, int flags) 1196 { 1197 struct sonode *so; 1198 file_t *fp; 1199 void *name; 1200 socklen_t namelen; 1201 void *control; 1202 socklen_t controllen; 1203 ssize_t len; 1204 int error; 1205 1206 if ((so = getsonode(sock, &error, &fp)) == NULL) 1207 return (set_errno(error)); 1208 1209 uiop->uio_fmode = fp->f_flag; 1210 1211 if (so->so_family == AF_UNIX) 1212 uiop->uio_extflg = UIO_COPY_CACHED; 1213 else 1214 uiop->uio_extflg = UIO_COPY_DEFAULT; 1215 1216 /* Allocate and copyin name and control */ 1217 name = msg->msg_name; 1218 namelen = msg->msg_namelen; 1219 if (name != NULL && namelen != 0) { 1220 ASSERT(MUTEX_NOT_HELD(&so->so_lock)); 1221 name = copyin_name(so, 1222 (struct sockaddr *)name, 1223 &namelen, &error); 1224 if (name == NULL) 1225 goto done3; 1226 /* copyin_name null terminates addresses for AF_UNIX */ 1227 msg->msg_namelen = namelen; 1228 msg->msg_name = name; 1229 } else { 1230 msg->msg_name = name = NULL; 1231 msg->msg_namelen = namelen = 0; 1232 } 1233 1234 control = msg->msg_control; 1235 controllen = msg->msg_controllen; 1236 if ((control != NULL) && (controllen != 0)) { 1237 /* 1238 * Verify that the length is not excessive to prevent 1239 * an application from consuming all of kernel memory. 1240 */ 1241 if (controllen > SO_MAXARGSIZE) { 1242 error = EINVAL; 1243 goto done2; 1244 } 1245 control = kmem_alloc(controllen, KM_SLEEP); 1246 1247 ASSERT(MUTEX_NOT_HELD(&so->so_lock)); 1248 if (copyin(msg->msg_control, control, controllen)) { 1249 error = EFAULT; 1250 goto done1; 1251 } 1252 msg->msg_control = control; 1253 } else { 1254 msg->msg_control = control = NULL; 1255 msg->msg_controllen = controllen = 0; 1256 } 1257 1258 len = uiop->uio_resid; 1259 msg->msg_flags = flags; 1260 1261 error = socket_sendmsg(so, msg, uiop, CRED()); 1262 done1: 1263 if (control != NULL) 1264 kmem_free(control, controllen); 1265 done2: 1266 if (name != NULL) 1267 kmem_free(name, namelen); 1268 done3: 1269 if (error != 0) { 1270 releasef(sock); 1271 return (set_errno(error)); 1272 } 1273 lwp_stat_update(LWP_STAT_MSGSND, 1); 1274 releasef(sock); 1275 return (len - uiop->uio_resid); 1276 } 1277 1278 /* 1279 * Native system call 1280 */ 1281 ssize_t 1282 send(int sock, void *buffer, size_t len, int flags) 1283 { 1284 struct nmsghdr lmsg; 1285 struct uio auio; 1286 struct iovec aiov[1]; 1287 1288 dprint(1, ("send(%d, %p, %ld, %d)\n", 1289 sock, buffer, len, flags)); 1290 1291 if ((ssize_t)len < 0) { 1292 return (set_errno(EINVAL)); 1293 } 1294 1295 aiov[0].iov_base = buffer; 1296 aiov[0].iov_len = len; 1297 auio.uio_loffset = 0; 1298 auio.uio_iov = aiov; 1299 auio.uio_iovcnt = 1; 1300 auio.uio_resid = len; 1301 auio.uio_segflg = UIO_USERSPACE; 1302 auio.uio_limit = 0; 1303 1304 lmsg.msg_name = NULL; 1305 lmsg.msg_control = NULL; 1306 if (!(flags & MSG_XPG4_2)) { 1307 /* 1308 * In order to be compatible with the libsocket/sockmod 1309 * implementation we set EOR for all send* calls. 1310 */ 1311 flags |= MSG_EOR; 1312 } 1313 return (sendit(sock, &lmsg, &auio, flags)); 1314 } 1315 1316 /* 1317 * Uses the MSG_XPG4_2 flag to determine if the caller is using 1318 * struct omsghdr or struct nmsghdr. 1319 */ 1320 ssize_t 1321 sendmsg(int sock, struct nmsghdr *msg, int flags) 1322 { 1323 struct nmsghdr lmsg; 1324 STRUCT_DECL(nmsghdr, u_lmsg); 1325 struct uio auio; 1326 struct iovec buf[IOV_MAX_STACK], *aiov = buf; 1327 ssize_t iovsize = 0; 1328 int iovcnt; 1329 ssize_t len, rval; 1330 int i; 1331 model_t model; 1332 1333 dprint(1, ("sendmsg(%d, %p, %d)\n", sock, (void *)msg, flags)); 1334 1335 model = get_udatamodel(); 1336 STRUCT_INIT(u_lmsg, model); 1337 1338 if (flags & MSG_XPG4_2) { 1339 if (copyin(msg, (char *)STRUCT_BUF(u_lmsg), 1340 STRUCT_SIZE(u_lmsg))) 1341 return (set_errno(EFAULT)); 1342 } else { 1343 /* 1344 * Assumes that nmsghdr and omsghdr are identically shaped 1345 * except for the added msg_flags field. 1346 */ 1347 if (copyin(msg, (char *)STRUCT_BUF(u_lmsg), 1348 SIZEOF_STRUCT(omsghdr, model))) 1349 return (set_errno(EFAULT)); 1350 /* 1351 * In order to be compatible with the libsocket/sockmod 1352 * implementation we set EOR for all send* calls. 1353 */ 1354 flags |= MSG_EOR; 1355 } 1356 1357 /* 1358 * Code below us will kmem_alloc memory and hang it 1359 * off msg_control and msg_name fields. This forces 1360 * us to copy the structure to its native form. 1361 */ 1362 lmsg.msg_name = STRUCT_FGETP(u_lmsg, msg_name); 1363 lmsg.msg_namelen = STRUCT_FGET(u_lmsg, msg_namelen); 1364 lmsg.msg_iov = STRUCT_FGETP(u_lmsg, msg_iov); 1365 lmsg.msg_iovlen = STRUCT_FGET(u_lmsg, msg_iovlen); 1366 lmsg.msg_control = STRUCT_FGETP(u_lmsg, msg_control); 1367 lmsg.msg_controllen = STRUCT_FGET(u_lmsg, msg_controllen); 1368 lmsg.msg_flags = STRUCT_FGET(u_lmsg, msg_flags); 1369 1370 iovcnt = lmsg.msg_iovlen; 1371 1372 if (iovcnt <= 0 || iovcnt > IOV_MAX) { 1373 /* 1374 * Unless this is XPG 4.2 we allow iovcnt == 0 to 1375 * be compatible with SunOS 4.X and 4.4BSD. 1376 */ 1377 if (iovcnt != 0 || (flags & MSG_XPG4_2)) 1378 return (set_errno(EMSGSIZE)); 1379 } 1380 1381 if (iovcnt > IOV_MAX_STACK) { 1382 iovsize = iovcnt * sizeof (struct iovec); 1383 aiov = kmem_alloc(iovsize, KM_SLEEP); 1384 } 1385 1386 #ifdef _SYSCALL32_IMPL 1387 /* 1388 * 32-bit callers need to have their iovec expanded, while ensuring 1389 * that they can't move more than 2Gbytes of data in a single call. 1390 */ 1391 if (model == DATAMODEL_ILP32) { 1392 struct iovec32 buf32[IOV_MAX_STACK], *aiov32 = buf32; 1393 ssize_t iov32size; 1394 ssize32_t count32; 1395 1396 iov32size = iovcnt * sizeof (struct iovec32); 1397 if (iovsize != 0) 1398 aiov32 = kmem_alloc(iov32size, KM_SLEEP); 1399 1400 if (iovcnt != 0 && 1401 copyin((struct iovec32 *)lmsg.msg_iov, aiov32, iov32size)) { 1402 if (iovsize != 0) { 1403 kmem_free(aiov32, iov32size); 1404 kmem_free(aiov, iovsize); 1405 } 1406 1407 return (set_errno(EFAULT)); 1408 } 1409 1410 count32 = 0; 1411 for (i = 0; i < iovcnt; i++) { 1412 ssize32_t iovlen32; 1413 1414 iovlen32 = aiov32[i].iov_len; 1415 count32 += iovlen32; 1416 if (iovlen32 < 0 || count32 < 0) { 1417 if (iovsize != 0) { 1418 kmem_free(aiov32, iov32size); 1419 kmem_free(aiov, iovsize); 1420 } 1421 1422 return (set_errno(EINVAL)); 1423 } 1424 1425 aiov[i].iov_len = iovlen32; 1426 aiov[i].iov_base = 1427 (caddr_t)(uintptr_t)aiov32[i].iov_base; 1428 } 1429 1430 if (iovsize != 0) 1431 kmem_free(aiov32, iov32size); 1432 } else 1433 #endif /* _SYSCALL32_IMPL */ 1434 if (iovcnt != 0 && 1435 copyin(lmsg.msg_iov, aiov, 1436 (unsigned)iovcnt * sizeof (struct iovec))) { 1437 if (iovsize != 0) 1438 kmem_free(aiov, iovsize); 1439 1440 return (set_errno(EFAULT)); 1441 } 1442 len = 0; 1443 for (i = 0; i < iovcnt; i++) { 1444 ssize_t iovlen = aiov[i].iov_len; 1445 len += iovlen; 1446 if (iovlen < 0 || len < 0) { 1447 if (iovsize != 0) 1448 kmem_free(aiov, iovsize); 1449 1450 return (set_errno(EINVAL)); 1451 } 1452 } 1453 auio.uio_loffset = 0; 1454 auio.uio_iov = aiov; 1455 auio.uio_iovcnt = iovcnt; 1456 auio.uio_resid = len; 1457 auio.uio_segflg = UIO_USERSPACE; 1458 auio.uio_limit = 0; 1459 1460 rval = sendit(sock, &lmsg, &auio, flags); 1461 1462 if (iovsize != 0) 1463 kmem_free(aiov, iovsize); 1464 1465 return (rval); 1466 } 1467 1468 ssize_t 1469 sendto(int sock, void *buffer, size_t len, int flags, 1470 struct sockaddr *name, socklen_t namelen) 1471 { 1472 struct nmsghdr lmsg; 1473 struct uio auio; 1474 struct iovec aiov[1]; 1475 1476 dprint(1, ("sendto(%d, %p, %ld, %d, %p, %d)\n", 1477 sock, buffer, len, flags, (void *)name, namelen)); 1478 1479 if ((ssize_t)len < 0) { 1480 return (set_errno(EINVAL)); 1481 } 1482 1483 aiov[0].iov_base = buffer; 1484 aiov[0].iov_len = len; 1485 auio.uio_loffset = 0; 1486 auio.uio_iov = aiov; 1487 auio.uio_iovcnt = 1; 1488 auio.uio_resid = len; 1489 auio.uio_segflg = UIO_USERSPACE; 1490 auio.uio_limit = 0; 1491 1492 lmsg.msg_name = (char *)name; 1493 lmsg.msg_namelen = namelen; 1494 lmsg.msg_control = NULL; 1495 if (!(flags & MSG_XPG4_2)) { 1496 /* 1497 * In order to be compatible with the libsocket/sockmod 1498 * implementation we set EOR for all send* calls. 1499 */ 1500 flags |= MSG_EOR; 1501 } 1502 return (sendit(sock, &lmsg, &auio, flags)); 1503 } 1504 1505 /*ARGSUSED3*/ 1506 int 1507 getpeername(int sock, struct sockaddr *name, socklen_t *namelenp, int version) 1508 { 1509 struct sonode *so; 1510 int error; 1511 socklen_t namelen; 1512 socklen_t sock_addrlen; 1513 struct sockaddr *sock_addrp; 1514 1515 dprint(1, ("getpeername(%d, %p, %p)\n", 1516 sock, (void *)name, (void *)namelenp)); 1517 1518 if ((so = getsonode(sock, &error, NULL)) == NULL) 1519 goto bad; 1520 1521 ASSERT(MUTEX_NOT_HELD(&so->so_lock)); 1522 if (copyin(namelenp, &namelen, sizeof (namelen)) || 1523 (name == NULL && namelen != 0)) { 1524 error = EFAULT; 1525 goto rel_out; 1526 } 1527 sock_addrlen = so->so_max_addr_len; 1528 sock_addrp = (struct sockaddr *)kmem_alloc(sock_addrlen, KM_SLEEP); 1529 1530 if ((error = socket_getpeername(so, sock_addrp, &sock_addrlen, 1531 B_FALSE, CRED())) == 0) { 1532 ASSERT(sock_addrlen <= so->so_max_addr_len); 1533 error = copyout_name(name, namelen, namelenp, 1534 (void *)sock_addrp, sock_addrlen); 1535 } 1536 kmem_free(sock_addrp, so->so_max_addr_len); 1537 rel_out: 1538 releasef(sock); 1539 bad: return (error != 0 ? set_errno(error) : 0); 1540 } 1541 1542 /*ARGSUSED3*/ 1543 int 1544 getsockname(int sock, struct sockaddr *name, socklen_t *namelenp, int version) 1545 { 1546 struct sonode *so; 1547 int error; 1548 socklen_t namelen, sock_addrlen; 1549 struct sockaddr *sock_addrp; 1550 1551 dprint(1, ("getsockname(%d, %p, %p)\n", 1552 sock, (void *)name, (void *)namelenp)); 1553 1554 if ((so = getsonode(sock, &error, NULL)) == NULL) 1555 goto bad; 1556 1557 ASSERT(MUTEX_NOT_HELD(&so->so_lock)); 1558 if (copyin(namelenp, &namelen, sizeof (namelen)) || 1559 (name == NULL && namelen != 0)) { 1560 error = EFAULT; 1561 goto rel_out; 1562 } 1563 1564 sock_addrlen = so->so_max_addr_len; 1565 sock_addrp = (struct sockaddr *)kmem_alloc(sock_addrlen, KM_SLEEP); 1566 if ((error = socket_getsockname(so, sock_addrp, &sock_addrlen, 1567 CRED())) == 0) { 1568 ASSERT(MUTEX_NOT_HELD(&so->so_lock)); 1569 ASSERT(sock_addrlen <= so->so_max_addr_len); 1570 error = copyout_name(name, namelen, namelenp, 1571 (void *)sock_addrp, sock_addrlen); 1572 } 1573 kmem_free(sock_addrp, so->so_max_addr_len); 1574 rel_out: 1575 releasef(sock); 1576 bad: return (error != 0 ? set_errno(error) : 0); 1577 } 1578 1579 /*ARGSUSED5*/ 1580 int 1581 getsockopt(int sock, int level, int option_name, void *option_value, 1582 socklen_t *option_lenp, int version) 1583 { 1584 struct sonode *so; 1585 socklen_t optlen, optlen_res; 1586 void *optval; 1587 int error; 1588 1589 dprint(1, ("getsockopt(%d, %d, %d, %p, %p)\n", 1590 sock, level, option_name, option_value, (void *)option_lenp)); 1591 1592 if ((so = getsonode(sock, &error, NULL)) == NULL) 1593 return (set_errno(error)); 1594 1595 ASSERT(MUTEX_NOT_HELD(&so->so_lock)); 1596 if (copyin(option_lenp, &optlen, sizeof (optlen))) { 1597 releasef(sock); 1598 return (set_errno(EFAULT)); 1599 } 1600 /* 1601 * Verify that the length is not excessive to prevent 1602 * an application from consuming all of kernel memory. 1603 */ 1604 if (optlen > SO_MAXARGSIZE) { 1605 error = EINVAL; 1606 releasef(sock); 1607 return (set_errno(error)); 1608 } 1609 optval = kmem_alloc(optlen, KM_SLEEP); 1610 optlen_res = optlen; 1611 error = socket_getsockopt(so, level, option_name, optval, 1612 &optlen_res, (version != SOV_XPG4_2) ? 0 : _SOGETSOCKOPT_XPG4_2, 1613 CRED()); 1614 releasef(sock); 1615 if (error) { 1616 kmem_free(optval, optlen); 1617 return (set_errno(error)); 1618 } 1619 error = copyout_arg(option_value, optlen, option_lenp, 1620 optval, optlen_res); 1621 kmem_free(optval, optlen); 1622 if (error) 1623 return (set_errno(error)); 1624 return (0); 1625 } 1626 1627 /*ARGSUSED5*/ 1628 int 1629 setsockopt(int sock, int level, int option_name, void *option_value, 1630 socklen_t option_len, int version) 1631 { 1632 struct sonode *so; 1633 intptr_t buffer[2]; 1634 void *optval = NULL; 1635 int error; 1636 1637 dprint(1, ("setsockopt(%d, %d, %d, %p, %d)\n", 1638 sock, level, option_name, option_value, option_len)); 1639 1640 if ((so = getsonode(sock, &error, NULL)) == NULL) 1641 return (set_errno(error)); 1642 1643 if (option_value != NULL) { 1644 if (option_len != 0) { 1645 /* 1646 * Verify that the length is not excessive to prevent 1647 * an application from consuming all of kernel memory. 1648 */ 1649 if (option_len > SO_MAXARGSIZE) { 1650 error = EINVAL; 1651 goto done2; 1652 } 1653 optval = option_len <= sizeof (buffer) ? 1654 &buffer : kmem_alloc((size_t)option_len, KM_SLEEP); 1655 ASSERT(MUTEX_NOT_HELD(&so->so_lock)); 1656 if (copyin(option_value, optval, (size_t)option_len)) { 1657 error = EFAULT; 1658 goto done1; 1659 } 1660 } 1661 } else 1662 option_len = 0; 1663 1664 error = socket_setsockopt(so, level, option_name, optval, 1665 (t_uscalar_t)option_len, CRED()); 1666 done1: 1667 if (optval != buffer) 1668 kmem_free(optval, (size_t)option_len); 1669 done2: 1670 releasef(sock); 1671 if (error) 1672 return (set_errno(error)); 1673 return (0); 1674 } 1675 1676 static int 1677 sockconf_add_sock(int family, int type, int protocol, char *name) 1678 { 1679 int error = 0; 1680 char *kdevpath = NULL; 1681 char *kmodule = NULL; 1682 char *buf = NULL; 1683 size_t pathlen = 0; 1684 struct sockparams *sp; 1685 1686 if (name == NULL) 1687 return (EINVAL); 1688 /* 1689 * Copyin the name. 1690 * This also makes it possible to check for too long pathnames. 1691 * Compress the space needed for the name before passing it 1692 * to soconfig - soconfig will store the string until 1693 * the configuration is removed. 1694 */ 1695 buf = kmem_alloc(MAXPATHLEN, KM_SLEEP); 1696 if ((error = copyinstr(name, buf, MAXPATHLEN, &pathlen)) != 0) { 1697 kmem_free(buf, MAXPATHLEN); 1698 return (error); 1699 } 1700 if (strncmp(buf, "/dev", strlen("/dev")) == 0) { 1701 /* For device */ 1702 1703 /* 1704 * Special handling for NCA: 1705 * 1706 * DEV_NCA is never opened even if an application 1707 * requests for AF_NCA. The device opened is instead a 1708 * predefined AF_INET transport (NCA_INET_DEV). 1709 * 1710 * Prior to Volo (PSARC/2007/587) NCA would determine 1711 * the device using a lookup, which worked then because 1712 * all protocols were based on TPI. Since TPI is no 1713 * longer the default, we have to explicitly state 1714 * which device to use. 1715 */ 1716 if (strcmp(buf, NCA_DEV) == 0) { 1717 /* only support entry <28, 2, 0> */ 1718 if (family != AF_NCA || type != SOCK_STREAM || 1719 protocol != 0) { 1720 kmem_free(buf, MAXPATHLEN); 1721 return (EINVAL); 1722 } 1723 1724 pathlen = strlen(NCA_INET_DEV) + 1; 1725 kdevpath = kmem_alloc(pathlen, KM_SLEEP); 1726 bcopy(NCA_INET_DEV, kdevpath, pathlen); 1727 kdevpath[pathlen - 1] = '\0'; 1728 } else { 1729 kdevpath = kmem_alloc(pathlen, KM_SLEEP); 1730 bcopy(buf, kdevpath, pathlen); 1731 kdevpath[pathlen - 1] = '\0'; 1732 } 1733 } else { 1734 /* For socket module */ 1735 kmodule = kmem_alloc(pathlen, KM_SLEEP); 1736 bcopy(buf, kmodule, pathlen); 1737 kmodule[pathlen - 1] = '\0'; 1738 pathlen = 0; 1739 } 1740 kmem_free(buf, MAXPATHLEN); 1741 1742 /* sockparams_create frees mod name and devpath upon failure */ 1743 sp = sockparams_create(family, type, protocol, kmodule, 1744 kdevpath, pathlen, 0, KM_SLEEP, &error); 1745 if (sp != NULL) { 1746 error = sockparams_add(sp); 1747 if (error != 0) 1748 sockparams_destroy(sp); 1749 } 1750 1751 return (error); 1752 } 1753 1754 static int 1755 sockconf_remove_sock(int family, int type, int protocol) 1756 { 1757 return (sockparams_delete(family, type, protocol)); 1758 } 1759 1760 static int 1761 sockconfig_remove_filter(const char *uname) 1762 { 1763 char kname[SOF_MAXNAMELEN]; 1764 size_t len; 1765 int error; 1766 sof_entry_t *ent; 1767 1768 if ((error = copyinstr(uname, kname, SOF_MAXNAMELEN, &len)) != 0) 1769 return (error); 1770 1771 ent = sof_entry_remove_by_name(kname); 1772 if (ent == NULL) 1773 return (ENXIO); 1774 1775 mutex_enter(&ent->sofe_lock); 1776 ASSERT(!(ent->sofe_flags & SOFEF_CONDEMED)); 1777 if (ent->sofe_refcnt == 0) { 1778 mutex_exit(&ent->sofe_lock); 1779 sof_entry_free(ent); 1780 } else { 1781 /* let the last socket free the filter */ 1782 ent->sofe_flags |= SOFEF_CONDEMED; 1783 mutex_exit(&ent->sofe_lock); 1784 } 1785 1786 return (0); 1787 } 1788 1789 static int 1790 sockconfig_add_filter(const char *uname, void *ufilpropp) 1791 { 1792 struct sockconfig_filter_props filprop; 1793 sof_entry_t *ent; 1794 int error; 1795 size_t tuplesz, len; 1796 char hintbuf[SOF_MAXNAMELEN]; 1797 1798 ent = kmem_zalloc(sizeof (sof_entry_t), KM_SLEEP); 1799 mutex_init(&ent->sofe_lock, NULL, MUTEX_DEFAULT, NULL); 1800 1801 if ((error = copyinstr(uname, ent->sofe_name, SOF_MAXNAMELEN, 1802 &len)) != 0) { 1803 sof_entry_free(ent); 1804 return (error); 1805 } 1806 1807 if (get_udatamodel() == DATAMODEL_NATIVE) { 1808 if (copyin(ufilpropp, &filprop, sizeof (filprop)) != 0) { 1809 sof_entry_free(ent); 1810 return (EFAULT); 1811 } 1812 } 1813 #ifdef _SYSCALL32_IMPL 1814 else { 1815 struct sockconfig_filter_props32 filprop32; 1816 1817 if (copyin(ufilpropp, &filprop32, sizeof (filprop32)) != 0) { 1818 sof_entry_free(ent); 1819 return (EFAULT); 1820 } 1821 filprop.sfp_modname = (char *)(uintptr_t)filprop32.sfp_modname; 1822 filprop.sfp_autoattach = filprop32.sfp_autoattach; 1823 filprop.sfp_hint = filprop32.sfp_hint; 1824 filprop.sfp_hintarg = (char *)(uintptr_t)filprop32.sfp_hintarg; 1825 filprop.sfp_socktuple_cnt = filprop32.sfp_socktuple_cnt; 1826 filprop.sfp_socktuple = 1827 (sof_socktuple_t *)(uintptr_t)filprop32.sfp_socktuple; 1828 } 1829 #endif /* _SYSCALL32_IMPL */ 1830 1831 if ((error = copyinstr(filprop.sfp_modname, ent->sofe_modname, 1832 sizeof (ent->sofe_modname), &len)) != 0) { 1833 sof_entry_free(ent); 1834 return (error); 1835 } 1836 1837 /* 1838 * A filter must specify at least one socket tuple. 1839 */ 1840 if (filprop.sfp_socktuple_cnt == 0 || 1841 filprop.sfp_socktuple_cnt > SOF_MAXSOCKTUPLECNT) { 1842 sof_entry_free(ent); 1843 return (EINVAL); 1844 } 1845 ent->sofe_flags = filprop.sfp_autoattach ? SOFEF_AUTO : SOFEF_PROG; 1846 ent->sofe_hint = filprop.sfp_hint; 1847 1848 /* 1849 * Verify the hint, and copy in the hint argument, if necessary. 1850 */ 1851 switch (ent->sofe_hint) { 1852 case SOF_HINT_BEFORE: 1853 case SOF_HINT_AFTER: 1854 if ((error = copyinstr(filprop.sfp_hintarg, hintbuf, 1855 sizeof (hintbuf), &len)) != 0) { 1856 sof_entry_free(ent); 1857 return (error); 1858 } 1859 ent->sofe_hintarg = kmem_alloc(len, KM_SLEEP); 1860 bcopy(hintbuf, ent->sofe_hintarg, len); 1861 /* FALLTHRU */ 1862 case SOF_HINT_TOP: 1863 case SOF_HINT_BOTTOM: 1864 /* hints cannot be used with programmatic filters */ 1865 if (ent->sofe_flags & SOFEF_PROG) { 1866 sof_entry_free(ent); 1867 return (EINVAL); 1868 } 1869 break; 1870 case SOF_HINT_NONE: 1871 break; 1872 default: 1873 /* bad hint value */ 1874 sof_entry_free(ent); 1875 return (EINVAL); 1876 } 1877 1878 ent->sofe_socktuple_cnt = filprop.sfp_socktuple_cnt; 1879 tuplesz = sizeof (sof_socktuple_t) * ent->sofe_socktuple_cnt; 1880 ent->sofe_socktuple = kmem_alloc(tuplesz, KM_SLEEP); 1881 1882 if (get_udatamodel() == DATAMODEL_NATIVE) { 1883 if (copyin(filprop.sfp_socktuple, ent->sofe_socktuple, 1884 tuplesz)) { 1885 sof_entry_free(ent); 1886 return (EFAULT); 1887 } 1888 } 1889 #ifdef _SYSCALL32_IMPL 1890 else { 1891 int i; 1892 caddr_t data = (caddr_t)filprop.sfp_socktuple; 1893 sof_socktuple_t *tup = ent->sofe_socktuple; 1894 sof_socktuple32_t tup32; 1895 1896 tup = ent->sofe_socktuple; 1897 for (i = 0; i < ent->sofe_socktuple_cnt; i++, tup++) { 1898 ASSERT(tup < ent->sofe_socktuple + tuplesz); 1899 1900 if (copyin(data, &tup32, sizeof (tup32)) != 0) { 1901 sof_entry_free(ent); 1902 return (EFAULT); 1903 } 1904 tup->sofst_family = tup32.sofst_family; 1905 tup->sofst_type = tup32.sofst_type; 1906 tup->sofst_protocol = tup32.sofst_protocol; 1907 1908 data += sizeof (tup32); 1909 } 1910 } 1911 #endif /* _SYSCALL32_IMPL */ 1912 1913 /* Sockets can start using the filter as soon as the filter is added */ 1914 if ((error = sof_entry_add(ent)) != 0) 1915 sof_entry_free(ent); 1916 1917 return (error); 1918 } 1919 1920 /* 1921 * Socket configuration system call. It is used to add and remove 1922 * socket types. 1923 */ 1924 int 1925 sockconfig(int cmd, void *arg1, void *arg2, void *arg3, void *arg4) 1926 { 1927 int error = 0; 1928 1929 if (secpolicy_net_config(CRED(), B_FALSE) != 0) 1930 return (set_errno(EPERM)); 1931 1932 if (sockfs_defer_nl7c_init) { 1933 nl7c_init(); 1934 sockfs_defer_nl7c_init = 0; 1935 } 1936 1937 switch (cmd) { 1938 case SOCKCONFIG_ADD_SOCK: 1939 error = sockconf_add_sock((int)(uintptr_t)arg1, 1940 (int)(uintptr_t)arg2, (int)(uintptr_t)arg3, arg4); 1941 break; 1942 case SOCKCONFIG_REMOVE_SOCK: 1943 error = sockconf_remove_sock((int)(uintptr_t)arg1, 1944 (int)(uintptr_t)arg2, (int)(uintptr_t)arg3); 1945 break; 1946 case SOCKCONFIG_ADD_FILTER: 1947 error = sockconfig_add_filter((const char *)arg1, arg2); 1948 break; 1949 case SOCKCONFIG_REMOVE_FILTER: 1950 error = sockconfig_remove_filter((const char *)arg1); 1951 break; 1952 case SOCKCONFIG_GET_SOCKTABLE: 1953 error = sockparams_copyout_socktable((int)(uintptr_t)arg1); 1954 break; 1955 default: 1956 #ifdef DEBUG 1957 cmn_err(CE_NOTE, "sockconfig: unkonwn subcommand %d", cmd); 1958 #endif 1959 error = EINVAL; 1960 break; 1961 } 1962 1963 if (error != 0) { 1964 eprintline(error); 1965 return (set_errno(error)); 1966 } 1967 return (0); 1968 } 1969 1970 1971 /* 1972 * Sendfile is implemented through two schemes, direct I/O or by 1973 * caching in the filesystem page cache. We cache the input file by 1974 * default and use direct I/O only if sendfile_max_size is set 1975 * appropriately as explained below. Note that this logic is consistent 1976 * with other filesystems where caching is turned on by default 1977 * unless explicitly turned off by using the DIRECTIO ioctl. 1978 * 1979 * We choose a slightly different scheme here. One can turn off 1980 * caching by setting sendfile_max_size to 0. One can also enable 1981 * caching of files <= sendfile_max_size by setting sendfile_max_size 1982 * to an appropriate value. By default sendfile_max_size is set to the 1983 * maximum value so that all files are cached. In future, we may provide 1984 * better interfaces for caching the file. 1985 * 1986 * Sendfile through Direct I/O (Zero copy) 1987 * -------------------------------------- 1988 * 1989 * As disks are normally slower than the network, we can't have a 1990 * single thread that reads the disk and writes to the network. We 1991 * need to have parallelism. This is done by having the sendfile 1992 * thread create another thread that reads from the filesystem 1993 * and queues it for network processing. In this scheme, the data 1994 * is never copied anywhere i.e it is zero copy unlike the other 1995 * scheme. 1996 * 1997 * We have a sendfile queue (snfq) where each sendfile 1998 * request (snf_req_t) is queued for processing by a thread. Number 1999 * of threads is dynamically allocated and they exit if they are idling 2000 * beyond a specified amount of time. When each request (snf_req_t) is 2001 * processed by a thread, it produces a number of mblk_t structures to 2002 * be consumed by the sendfile thread. snf_deque and snf_enque are 2003 * used for consuming and producing mblks. Size of the filesystem 2004 * read is determined by the tunable (sendfile_read_size). A single 2005 * mblk holds sendfile_read_size worth of data (except the last 2006 * read of the file) which is sent down as a whole to the network. 2007 * sendfile_read_size is set to 1 MB as this seems to be the optimal 2008 * value for the UFS filesystem backed by a striped storage array. 2009 * 2010 * Synchronisation between read (producer) and write (consumer) threads. 2011 * -------------------------------------------------------------------- 2012 * 2013 * sr_lock protects sr_ib_head and sr_ib_tail. The lock is held while 2014 * adding and deleting items in this list. Error can happen anytime 2015 * during read or write. There could be unprocessed mblks in the 2016 * sr_ib_XXX list when a read or write error occurs. Whenever error 2017 * is encountered, we need two things to happen : 2018 * 2019 * a) One of the threads need to clean the mblks. 2020 * b) When one thread encounters an error, the other should stop. 2021 * 2022 * For (a), we don't want to penalize the reader thread as it could do 2023 * some useful work processing other requests. For (b), the error can 2024 * be detected by examining sr_read_error or sr_write_error. 2025 * sr_lock protects sr_read_error and sr_write_error. If both reader and 2026 * writer encounters error, we need to report the write error back to 2027 * the application as that's what would have happened if the operations 2028 * were done sequentially. With this in mind, following should work : 2029 * 2030 * - Check for errors before read or write. 2031 * - If the reader encounters error, set the error in sr_read_error. 2032 * Check sr_write_error, if it is set, send cv_signal as it is 2033 * waiting for reader to complete. If it is not set, the writer 2034 * is either running sinking data to the network or blocked 2035 * because of flow control. For handling the latter case, we 2036 * always send a signal. In any case, it will examine sr_read_error 2037 * and return. sr_read_error is marked with SR_READ_DONE to tell 2038 * the writer that the reader is done in all the cases. 2039 * - If the writer encounters error, set the error in sr_write_error. 2040 * The reader thread is either blocked because of flow control or 2041 * running reading data from the disk. For the former, we need to 2042 * wakeup the thread. Again to keep it simple, we always wake up 2043 * the reader thread. Then, wait for the read thread to complete 2044 * if it is not done yet. Cleanup and return. 2045 * 2046 * High and low water marks for the read thread. 2047 * -------------------------------------------- 2048 * 2049 * If sendfile() is used to send data over a slow network, we need to 2050 * make sure that the read thread does not produce data at a faster 2051 * rate than the network. This can happen if the disk is faster than 2052 * the network. In such a case, we don't want to build a very large queue. 2053 * But we would still like to get all of the network throughput possible. 2054 * This implies that network should never block waiting for data. 2055 * As there are lot of disk throughput/network throughput combinations 2056 * possible, it is difficult to come up with an accurate number. 2057 * A typical 10K RPM disk has a max seek latency 17ms and rotational 2058 * latency of 3ms for reading a disk block. Thus, the total latency to 2059 * initiate a new read, transfer data from the disk and queue for 2060 * transmission would take about a max of 25ms. Todays max transfer rate 2061 * for network is 100MB/sec. If the thread is blocked because of flow 2062 * control, it would take 25ms to get new data ready for transmission. 2063 * We have to make sure that network is not idling, while we are initiating 2064 * new transfers. So, at 100MB/sec, to keep network busy we would need 2065 * 2.5MB of data. Rounding off, we keep the low water mark to be 3MB of data. 2066 * We need to pick a high water mark so that the woken up thread would 2067 * do considerable work before blocking again to prevent thrashing. Currently, 2068 * we pick this to be 10 times that of the low water mark. 2069 * 2070 * Sendfile with segmap caching (One copy from page cache to mblks). 2071 * ---------------------------------------------------------------- 2072 * 2073 * We use the segmap cache for caching the file, if the size of file 2074 * is <= sendfile_max_size. In this case we don't use threads as VM 2075 * is reasonably fast enough to keep up with the network. If the underlying 2076 * transport allows, we call segmap_getmapflt() to map MAXBSIZE (8K) worth 2077 * of data into segmap space, and use the virtual address from segmap 2078 * directly through desballoc() to avoid copy. Once the transport is done 2079 * with the data, the mapping will be released through segmap_release() 2080 * called by the call-back routine. 2081 * 2082 * If zero-copy is not allowed by the transport, we simply call VOP_READ() 2083 * to copy the data from the filesystem into our temporary network buffer. 2084 * 2085 * To disable caching, set sendfile_max_size to 0. 2086 */ 2087 2088 uint_t sendfile_read_size = 1024 * 1024; 2089 #define SENDFILE_REQ_LOWAT 3 * 1024 * 1024 2090 uint_t sendfile_req_lowat = SENDFILE_REQ_LOWAT; 2091 uint_t sendfile_req_hiwat = 10 * SENDFILE_REQ_LOWAT; 2092 struct sendfile_stats sf_stats; 2093 struct sendfile_queue *snfq; 2094 clock_t snfq_timeout; 2095 off64_t sendfile_max_size; 2096 2097 static void snf_enque(snf_req_t *, mblk_t *); 2098 static mblk_t *snf_deque(snf_req_t *); 2099 2100 void 2101 sendfile_init(void) 2102 { 2103 snfq = kmem_zalloc(sizeof (struct sendfile_queue), KM_SLEEP); 2104 2105 mutex_init(&snfq->snfq_lock, NULL, MUTEX_DEFAULT, NULL); 2106 cv_init(&snfq->snfq_cv, NULL, CV_DEFAULT, NULL); 2107 snfq->snfq_max_threads = max_ncpus; 2108 snfq_timeout = SNFQ_TIMEOUT; 2109 /* Cache all files by default. */ 2110 sendfile_max_size = MAXOFFSET_T; 2111 } 2112 2113 /* 2114 * Queues a mblk_t for network processing. 2115 */ 2116 static void 2117 snf_enque(snf_req_t *sr, mblk_t *mp) 2118 { 2119 mp->b_next = NULL; 2120 mutex_enter(&sr->sr_lock); 2121 if (sr->sr_mp_head == NULL) { 2122 sr->sr_mp_head = sr->sr_mp_tail = mp; 2123 cv_signal(&sr->sr_cv); 2124 } else { 2125 sr->sr_mp_tail->b_next = mp; 2126 sr->sr_mp_tail = mp; 2127 } 2128 sr->sr_qlen += MBLKL(mp); 2129 while ((sr->sr_qlen > sr->sr_hiwat) && 2130 (sr->sr_write_error == 0)) { 2131 sf_stats.ss_full_waits++; 2132 cv_wait(&sr->sr_cv, &sr->sr_lock); 2133 } 2134 mutex_exit(&sr->sr_lock); 2135 } 2136 2137 /* 2138 * De-queues a mblk_t for network processing. 2139 */ 2140 static mblk_t * 2141 snf_deque(snf_req_t *sr) 2142 { 2143 mblk_t *mp; 2144 2145 mutex_enter(&sr->sr_lock); 2146 /* 2147 * If we have encountered an error on read or read is 2148 * completed and no more mblks, return NULL. 2149 * We need to check for NULL sr_mp_head also as 2150 * the reads could have completed and there is 2151 * nothing more to come. 2152 */ 2153 if (((sr->sr_read_error & ~SR_READ_DONE) != 0) || 2154 ((sr->sr_read_error & SR_READ_DONE) && 2155 sr->sr_mp_head == NULL)) { 2156 mutex_exit(&sr->sr_lock); 2157 return (NULL); 2158 } 2159 /* 2160 * To start with neither SR_READ_DONE is marked nor 2161 * the error is set. When we wake up from cv_wait, 2162 * following are the possibilities : 2163 * 2164 * a) sr_read_error is zero and mblks are queued. 2165 * b) sr_read_error is set to SR_READ_DONE 2166 * and mblks are queued. 2167 * c) sr_read_error is set to SR_READ_DONE 2168 * and no mblks. 2169 * d) sr_read_error is set to some error other 2170 * than SR_READ_DONE. 2171 */ 2172 2173 while ((sr->sr_read_error == 0) && (sr->sr_mp_head == NULL)) { 2174 sf_stats.ss_empty_waits++; 2175 cv_wait(&sr->sr_cv, &sr->sr_lock); 2176 } 2177 /* Handle (a) and (b) first - the normal case. */ 2178 if (((sr->sr_read_error & ~SR_READ_DONE) == 0) && 2179 (sr->sr_mp_head != NULL)) { 2180 mp = sr->sr_mp_head; 2181 sr->sr_mp_head = mp->b_next; 2182 sr->sr_qlen -= MBLKL(mp); 2183 if (sr->sr_qlen < sr->sr_lowat) 2184 cv_signal(&sr->sr_cv); 2185 mutex_exit(&sr->sr_lock); 2186 mp->b_next = NULL; 2187 return (mp); 2188 } 2189 /* Handle (c) and (d). */ 2190 mutex_exit(&sr->sr_lock); 2191 return (NULL); 2192 } 2193 2194 /* 2195 * Reads data from the filesystem and queues it for network processing. 2196 */ 2197 void 2198 snf_async_read(snf_req_t *sr) 2199 { 2200 size_t iosize; 2201 u_offset_t fileoff; 2202 u_offset_t size; 2203 int ret_size; 2204 int error; 2205 file_t *fp; 2206 mblk_t *mp; 2207 struct vnode *vp; 2208 int extra = 0; 2209 int maxblk = 0; 2210 int wroff = 0; 2211 struct sonode *so; 2212 2213 fp = sr->sr_fp; 2214 size = sr->sr_file_size; 2215 fileoff = sr->sr_file_off; 2216 2217 /* 2218 * Ignore the error for filesystems that doesn't support DIRECTIO. 2219 */ 2220 (void) VOP_IOCTL(fp->f_vnode, _FIODIRECTIO, DIRECTIO_ON, 0, 2221 kcred, NULL, NULL); 2222 2223 vp = sr->sr_vp; 2224 if (vp->v_type == VSOCK) { 2225 stdata_t *stp; 2226 2227 /* 2228 * Get the extra space to insert a header and a trailer. 2229 */ 2230 so = VTOSO(vp); 2231 stp = vp->v_stream; 2232 if (stp == NULL) { 2233 wroff = so->so_proto_props.sopp_wroff; 2234 maxblk = so->so_proto_props.sopp_maxblk; 2235 extra = wroff + so->so_proto_props.sopp_tail; 2236 } else { 2237 wroff = (int)(stp->sd_wroff); 2238 maxblk = (int)(stp->sd_maxblk); 2239 extra = wroff + (int)(stp->sd_tail); 2240 } 2241 } 2242 2243 while ((size != 0) && (sr->sr_write_error == 0)) { 2244 2245 iosize = (int)MIN(sr->sr_maxpsz, size); 2246 2247 /* 2248 * Socket filters can limit the mblk size, 2249 * so limit reads to maxblk if there are 2250 * filters present. 2251 */ 2252 if (vp->v_type == VSOCK && 2253 so->so_filter_active > 0 && maxblk != INFPSZ) 2254 iosize = (int)MIN(iosize, maxblk); 2255 2256 if (is_system_labeled()) { 2257 mp = allocb_cred(iosize + extra, CRED(), 2258 curproc->p_pid); 2259 } else { 2260 mp = allocb(iosize + extra, BPRI_MED); 2261 } 2262 if (mp == NULL) { 2263 error = EAGAIN; 2264 break; 2265 } 2266 2267 mp->b_rptr += wroff; 2268 2269 ret_size = soreadfile(fp, mp->b_rptr, fileoff, &error, iosize); 2270 2271 /* Error or Reached EOF ? */ 2272 if ((error != 0) || (ret_size == 0)) { 2273 freeb(mp); 2274 break; 2275 } 2276 mp->b_wptr = mp->b_rptr + ret_size; 2277 2278 snf_enque(sr, mp); 2279 size -= ret_size; 2280 fileoff += ret_size; 2281 } 2282 (void) VOP_IOCTL(fp->f_vnode, _FIODIRECTIO, DIRECTIO_OFF, 0, 2283 kcred, NULL, NULL); 2284 mutex_enter(&sr->sr_lock); 2285 sr->sr_read_error = error; 2286 sr->sr_read_error |= SR_READ_DONE; 2287 cv_signal(&sr->sr_cv); 2288 mutex_exit(&sr->sr_lock); 2289 } 2290 2291 void 2292 snf_async_thread(void) 2293 { 2294 snf_req_t *sr; 2295 callb_cpr_t cprinfo; 2296 clock_t time_left = 1; 2297 2298 CALLB_CPR_INIT(&cprinfo, &snfq->snfq_lock, callb_generic_cpr, "snfq"); 2299 2300 mutex_enter(&snfq->snfq_lock); 2301 for (;;) { 2302 /* 2303 * If we didn't find a entry, then block until woken up 2304 * again and then look through the queues again. 2305 */ 2306 while ((sr = snfq->snfq_req_head) == NULL) { 2307 CALLB_CPR_SAFE_BEGIN(&cprinfo); 2308 if (time_left <= 0) { 2309 snfq->snfq_svc_threads--; 2310 CALLB_CPR_EXIT(&cprinfo); 2311 thread_exit(); 2312 /* NOTREACHED */ 2313 } 2314 snfq->snfq_idle_cnt++; 2315 2316 time_left = cv_reltimedwait(&snfq->snfq_cv, 2317 &snfq->snfq_lock, snfq_timeout, TR_CLOCK_TICK); 2318 snfq->snfq_idle_cnt--; 2319 2320 CALLB_CPR_SAFE_END(&cprinfo, &snfq->snfq_lock); 2321 } 2322 snfq->snfq_req_head = sr->sr_next; 2323 snfq->snfq_req_cnt--; 2324 mutex_exit(&snfq->snfq_lock); 2325 snf_async_read(sr); 2326 mutex_enter(&snfq->snfq_lock); 2327 } 2328 } 2329 2330 2331 snf_req_t * 2332 create_thread(int operation, struct vnode *vp, file_t *fp, 2333 u_offset_t fileoff, u_offset_t size) 2334 { 2335 snf_req_t *sr; 2336 stdata_t *stp; 2337 2338 sr = (snf_req_t *)kmem_zalloc(sizeof (snf_req_t), KM_SLEEP); 2339 2340 sr->sr_vp = vp; 2341 sr->sr_fp = fp; 2342 stp = vp->v_stream; 2343 2344 /* 2345 * store sd_qn_maxpsz into sr_maxpsz while we have stream head. 2346 * stream might be closed before thread returns from snf_async_read. 2347 */ 2348 if (stp != NULL && stp->sd_qn_maxpsz > 0) { 2349 sr->sr_maxpsz = MIN(MAXBSIZE, stp->sd_qn_maxpsz); 2350 } else { 2351 sr->sr_maxpsz = MAXBSIZE; 2352 } 2353 2354 sr->sr_operation = operation; 2355 sr->sr_file_off = fileoff; 2356 sr->sr_file_size = size; 2357 sr->sr_hiwat = sendfile_req_hiwat; 2358 sr->sr_lowat = sendfile_req_lowat; 2359 mutex_init(&sr->sr_lock, NULL, MUTEX_DEFAULT, NULL); 2360 cv_init(&sr->sr_cv, NULL, CV_DEFAULT, NULL); 2361 /* 2362 * See whether we need another thread for servicing this 2363 * request. If there are already enough requests queued 2364 * for the threads, create one if not exceeding 2365 * snfq_max_threads. 2366 */ 2367 mutex_enter(&snfq->snfq_lock); 2368 if (snfq->snfq_req_cnt >= snfq->snfq_idle_cnt && 2369 snfq->snfq_svc_threads < snfq->snfq_max_threads) { 2370 (void) thread_create(NULL, 0, &snf_async_thread, 0, 0, &p0, 2371 TS_RUN, minclsyspri); 2372 snfq->snfq_svc_threads++; 2373 } 2374 if (snfq->snfq_req_head == NULL) { 2375 snfq->snfq_req_head = snfq->snfq_req_tail = sr; 2376 cv_signal(&snfq->snfq_cv); 2377 } else { 2378 snfq->snfq_req_tail->sr_next = sr; 2379 snfq->snfq_req_tail = sr; 2380 } 2381 snfq->snfq_req_cnt++; 2382 mutex_exit(&snfq->snfq_lock); 2383 return (sr); 2384 } 2385 2386 int 2387 snf_direct_io(file_t *fp, file_t *rfp, u_offset_t fileoff, u_offset_t size, 2388 ssize_t *count) 2389 { 2390 snf_req_t *sr; 2391 mblk_t *mp; 2392 int iosize; 2393 int error = 0; 2394 short fflag; 2395 struct vnode *vp; 2396 int ksize; 2397 struct nmsghdr msg; 2398 2399 ksize = 0; 2400 *count = 0; 2401 bzero(&msg, sizeof (msg)); 2402 2403 vp = fp->f_vnode; 2404 fflag = fp->f_flag; 2405 if ((sr = create_thread(READ_OP, vp, rfp, fileoff, size)) == NULL) 2406 return (EAGAIN); 2407 2408 /* 2409 * We check for read error in snf_deque. It has to check 2410 * for successful READ_DONE and return NULL, and we might 2411 * as well make an additional check there. 2412 */ 2413 while ((mp = snf_deque(sr)) != NULL) { 2414 2415 if (ISSIG(curthread, JUSTLOOKING)) { 2416 freeb(mp); 2417 error = EINTR; 2418 break; 2419 } 2420 iosize = MBLKL(mp); 2421 2422 error = socket_sendmblk(VTOSO(vp), &msg, fflag, CRED(), &mp); 2423 2424 if (error != 0) { 2425 if (mp != NULL) 2426 freeb(mp); 2427 break; 2428 } 2429 ksize += iosize; 2430 } 2431 *count = ksize; 2432 2433 mutex_enter(&sr->sr_lock); 2434 sr->sr_write_error = error; 2435 /* Look at the big comments on why we cv_signal here. */ 2436 cv_signal(&sr->sr_cv); 2437 2438 /* Wait for the reader to complete always. */ 2439 while (!(sr->sr_read_error & SR_READ_DONE)) { 2440 cv_wait(&sr->sr_cv, &sr->sr_lock); 2441 } 2442 /* If there is no write error, check for read error. */ 2443 if (error == 0) 2444 error = (sr->sr_read_error & ~SR_READ_DONE); 2445 2446 if (error != 0) { 2447 mblk_t *next_mp; 2448 2449 mp = sr->sr_mp_head; 2450 while (mp != NULL) { 2451 next_mp = mp->b_next; 2452 mp->b_next = NULL; 2453 freeb(mp); 2454 mp = next_mp; 2455 } 2456 } 2457 mutex_exit(&sr->sr_lock); 2458 kmem_free(sr, sizeof (snf_req_t)); 2459 return (error); 2460 } 2461 2462 /* Maximum no.of pages allocated by vpm for sendfile at a time */ 2463 #define SNF_VPMMAXPGS (VPMMAXPGS/2) 2464 2465 /* 2466 * Maximum no.of elements in the list returned by vpm, including 2467 * NULL for the last entry 2468 */ 2469 #define SNF_MAXVMAPS (SNF_VPMMAXPGS + 1) 2470 2471 typedef struct { 2472 unsigned int snfv_ref; 2473 frtn_t snfv_frtn; 2474 vnode_t *snfv_vp; 2475 struct vmap snfv_vml[SNF_MAXVMAPS]; 2476 } snf_vmap_desbinfo; 2477 2478 typedef struct { 2479 frtn_t snfi_frtn; 2480 caddr_t snfi_base; 2481 uint_t snfi_mapoff; 2482 size_t snfi_len; 2483 vnode_t *snfi_vp; 2484 } snf_smap_desbinfo; 2485 2486 /* 2487 * The callback function used for vpm mapped mblks called when the last ref of 2488 * the mblk is dropped which normally occurs when TCP receives the ack. But it 2489 * can be the driver too due to lazy reclaim. 2490 */ 2491 void 2492 snf_vmap_desbfree(snf_vmap_desbinfo *snfv) 2493 { 2494 ASSERT(snfv->snfv_ref != 0); 2495 if (atomic_dec_32_nv(&snfv->snfv_ref) == 0) { 2496 vpm_unmap_pages(snfv->snfv_vml, S_READ); 2497 VN_RELE(snfv->snfv_vp); 2498 kmem_free(snfv, sizeof (snf_vmap_desbinfo)); 2499 } 2500 } 2501 2502 /* 2503 * The callback function used for segmap'ped mblks called when the last ref of 2504 * the mblk is dropped which normally occurs when TCP receives the ack. But it 2505 * can be the driver too due to lazy reclaim. 2506 */ 2507 void 2508 snf_smap_desbfree(snf_smap_desbinfo *snfi) 2509 { 2510 if (! IS_KPM_ADDR(snfi->snfi_base)) { 2511 /* 2512 * We don't need to call segmap_fault(F_SOFTUNLOCK) for 2513 * segmap_kpm as long as the latter never falls back to 2514 * "use_segmap_range". (See segmap_getmapflt().) 2515 * 2516 * Using S_OTHER saves an redundant hat_setref() in 2517 * segmap_unlock() 2518 */ 2519 (void) segmap_fault(kas.a_hat, segkmap, 2520 (caddr_t)(uintptr_t)(((uintptr_t)snfi->snfi_base + 2521 snfi->snfi_mapoff) & PAGEMASK), snfi->snfi_len, 2522 F_SOFTUNLOCK, S_OTHER); 2523 } 2524 (void) segmap_release(segkmap, snfi->snfi_base, SM_DONTNEED); 2525 VN_RELE(snfi->snfi_vp); 2526 kmem_free(snfi, sizeof (*snfi)); 2527 } 2528 2529 /* 2530 * Use segmap or vpm instead of bcopy to send down a desballoca'ed, mblk. 2531 * When segmap is used, the mblk contains a segmap slot of no more 2532 * than MAXBSIZE. 2533 * 2534 * With vpm, a maximum of SNF_MAXVMAPS page-sized mappings can be obtained 2535 * in each iteration and sent by socket_sendmblk until an error occurs or 2536 * the requested size has been transferred. An mblk is esballoca'ed from 2537 * each mapped page and a chain of these mblk is sent to the transport layer. 2538 * vpm will be called to unmap the pages when all mblks have been freed by 2539 * free_func. 2540 * 2541 * At the end of the whole sendfile() operation, we wait till the data from 2542 * the last mblk is ack'ed by the transport before returning so that the 2543 * caller of sendfile() can safely modify the file content. 2544 * 2545 * The caller of this function should make sure that total_size does not exceed 2546 * the actual file size of fvp. 2547 */ 2548 int 2549 snf_segmap(file_t *fp, vnode_t *fvp, u_offset_t fileoff, u_offset_t total_size, 2550 ssize_t *count, boolean_t nowait) 2551 { 2552 caddr_t base; 2553 int mapoff; 2554 vnode_t *vp; 2555 mblk_t *mp = NULL; 2556 int chain_size; 2557 int error; 2558 clock_t deadlk_wait; 2559 short fflag; 2560 int ksize; 2561 struct vattr va; 2562 boolean_t dowait = B_FALSE; 2563 struct nmsghdr msg; 2564 2565 vp = fp->f_vnode; 2566 fflag = fp->f_flag; 2567 ksize = 0; 2568 bzero(&msg, sizeof (msg)); 2569 2570 for (;;) { 2571 if (ISSIG(curthread, JUSTLOOKING)) { 2572 error = EINTR; 2573 break; 2574 } 2575 2576 if (vpm_enable) { 2577 snf_vmap_desbinfo *snfv; 2578 mblk_t *nmp; 2579 int mblk_size; 2580 int maxsize; 2581 int i; 2582 2583 mapoff = fileoff & PAGEOFFSET; 2584 maxsize = MIN((SNF_VPMMAXPGS * PAGESIZE), total_size); 2585 2586 snfv = kmem_zalloc(sizeof (snf_vmap_desbinfo), 2587 KM_SLEEP); 2588 2589 /* 2590 * Get vpm mappings for maxsize with read access. 2591 * If the pages aren't available yet, we get 2592 * DEADLK, so wait and try again a little later using 2593 * an increasing wait. We might be here a long time. 2594 * 2595 * If delay_sig returns EINTR, be sure to exit and 2596 * pass it up to the caller. 2597 */ 2598 deadlk_wait = 0; 2599 while ((error = vpm_map_pages(fvp, fileoff, 2600 (size_t)maxsize, (VPM_FETCHPAGE), snfv->snfv_vml, 2601 SNF_MAXVMAPS, NULL, S_READ)) == EDEADLK) { 2602 deadlk_wait += (deadlk_wait < 5) ? 1 : 4; 2603 if ((error = delay_sig(deadlk_wait)) != 0) { 2604 break; 2605 } 2606 } 2607 if (error != 0) { 2608 kmem_free(snfv, sizeof (snf_vmap_desbinfo)); 2609 error = (error == EINTR) ? EINTR : EIO; 2610 goto out; 2611 } 2612 snfv->snfv_frtn.free_func = snf_vmap_desbfree; 2613 snfv->snfv_frtn.free_arg = (caddr_t)snfv; 2614 2615 /* Construct the mblk chain from the page mappings */ 2616 chain_size = 0; 2617 for (i = 0; (snfv->snfv_vml[i].vs_addr != NULL) && 2618 total_size > 0; i++) { 2619 ASSERT(chain_size < maxsize); 2620 mblk_size = MIN(snfv->snfv_vml[i].vs_len - 2621 mapoff, total_size); 2622 nmp = esballoca( 2623 (uchar_t *)snfv->snfv_vml[i].vs_addr + 2624 mapoff, mblk_size, BPRI_HI, 2625 &snfv->snfv_frtn); 2626 2627 /* 2628 * We return EAGAIN after unmapping the pages 2629 * if we cannot allocate the the head of the 2630 * chain. Otherwise, we continue sending the 2631 * mblks constructed so far. 2632 */ 2633 if (nmp == NULL) { 2634 if (i == 0) { 2635 vpm_unmap_pages(snfv->snfv_vml, 2636 S_READ); 2637 kmem_free(snfv, 2638 sizeof (snf_vmap_desbinfo)); 2639 error = EAGAIN; 2640 goto out; 2641 } 2642 break; 2643 } 2644 /* Mark this dblk with the zero-copy flag */ 2645 nmp->b_datap->db_struioflag |= STRUIO_ZC; 2646 nmp->b_wptr += mblk_size; 2647 chain_size += mblk_size; 2648 fileoff += mblk_size; 2649 total_size -= mblk_size; 2650 snfv->snfv_ref++; 2651 mapoff = 0; 2652 if (i > 0) 2653 linkb(mp, nmp); 2654 else 2655 mp = nmp; 2656 } 2657 VN_HOLD(fvp); 2658 snfv->snfv_vp = fvp; 2659 } else { 2660 /* vpm not supported. fallback to segmap */ 2661 snf_smap_desbinfo *snfi; 2662 2663 mapoff = fileoff & MAXBOFFSET; 2664 chain_size = MAXBSIZE - mapoff; 2665 if (chain_size > total_size) 2666 chain_size = total_size; 2667 /* 2668 * we don't forcefault because we'll call 2669 * segmap_fault(F_SOFTLOCK) next. 2670 * 2671 * S_READ will get the ref bit set (by either 2672 * segmap_getmapflt() or segmap_fault()) and page 2673 * shared locked. 2674 */ 2675 base = segmap_getmapflt(segkmap, fvp, fileoff, 2676 chain_size, segmap_kpm ? SM_FAULT : 0, S_READ); 2677 2678 snfi = kmem_alloc(sizeof (*snfi), KM_SLEEP); 2679 snfi->snfi_len = (size_t)roundup(mapoff+chain_size, 2680 PAGESIZE)- (mapoff & PAGEMASK); 2681 /* 2682 * We must call segmap_fault() even for segmap_kpm 2683 * because that's how error gets returned. 2684 * (segmap_getmapflt() never fails but segmap_fault() 2685 * does.) 2686 * 2687 * If the pages aren't available yet, we get 2688 * DEADLK, so wait and try again a little later using 2689 * an increasing wait. We might be here a long time. 2690 * 2691 * If delay_sig returns EINTR, be sure to exit and 2692 * pass it up to the caller. 2693 */ 2694 deadlk_wait = 0; 2695 while ((error = FC_ERRNO(segmap_fault(kas.a_hat, 2696 segkmap, (caddr_t)(uintptr_t)(((uintptr_t)base + 2697 mapoff) & PAGEMASK), snfi->snfi_len, F_SOFTLOCK, 2698 S_READ))) == EDEADLK) { 2699 deadlk_wait += (deadlk_wait < 5) ? 1 : 4; 2700 if ((error = delay_sig(deadlk_wait)) != 0) { 2701 break; 2702 } 2703 } 2704 if (error != 0) { 2705 (void) segmap_release(segkmap, base, 0); 2706 kmem_free(snfi, sizeof (*snfi)); 2707 error = (error == EINTR) ? EINTR : EIO; 2708 goto out; 2709 } 2710 snfi->snfi_frtn.free_func = snf_smap_desbfree; 2711 snfi->snfi_frtn.free_arg = (caddr_t)snfi; 2712 snfi->snfi_base = base; 2713 snfi->snfi_mapoff = mapoff; 2714 mp = esballoca((uchar_t *)base + mapoff, chain_size, 2715 BPRI_HI, &snfi->snfi_frtn); 2716 2717 if (mp == NULL) { 2718 (void) segmap_fault(kas.a_hat, segkmap, 2719 (caddr_t)(uintptr_t)(((uintptr_t)base + 2720 mapoff) & PAGEMASK), snfi->snfi_len, 2721 F_SOFTUNLOCK, S_OTHER); 2722 (void) segmap_release(segkmap, base, 0); 2723 kmem_free(snfi, sizeof (*snfi)); 2724 freemsg(mp); 2725 error = EAGAIN; 2726 goto out; 2727 } 2728 VN_HOLD(fvp); 2729 snfi->snfi_vp = fvp; 2730 mp->b_wptr += chain_size; 2731 2732 /* Mark this dblk with the zero-copy flag */ 2733 mp->b_datap->db_struioflag |= STRUIO_ZC; 2734 fileoff += chain_size; 2735 total_size -= chain_size; 2736 } 2737 2738 if (total_size == 0 && !nowait) { 2739 ASSERT(!dowait); 2740 dowait = B_TRUE; 2741 mp->b_datap->db_struioflag |= STRUIO_ZCNOTIFY; 2742 } 2743 VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL); 2744 error = socket_sendmblk(VTOSO(vp), &msg, fflag, CRED(), &mp); 2745 if (error != 0) { 2746 /* 2747 * mp contains the mblks that were not sent by 2748 * socket_sendmblk. Use its size to update *count 2749 */ 2750 *count = ksize + (chain_size - msgdsize(mp)); 2751 if (mp != NULL) 2752 freemsg(mp); 2753 return (error); 2754 } 2755 ksize += chain_size; 2756 if (total_size == 0) 2757 goto done; 2758 2759 (void) VOP_RWLOCK(fvp, V_WRITELOCK_FALSE, NULL); 2760 va.va_mask = AT_SIZE; 2761 error = VOP_GETATTR(fvp, &va, 0, kcred, NULL); 2762 if (error) 2763 break; 2764 /* Read as much as possible. */ 2765 if (fileoff >= va.va_size) 2766 break; 2767 if (total_size + fileoff > va.va_size) 2768 total_size = va.va_size - fileoff; 2769 } 2770 out: 2771 VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL); 2772 done: 2773 *count = ksize; 2774 if (dowait) { 2775 stdata_t *stp; 2776 2777 stp = vp->v_stream; 2778 if (stp == NULL) { 2779 struct sonode *so; 2780 so = VTOSO(vp); 2781 error = so_zcopy_wait(so); 2782 } else { 2783 mutex_enter(&stp->sd_lock); 2784 while (!(stp->sd_flag & STZCNOTIFY)) { 2785 if (cv_wait_sig(&stp->sd_zcopy_wait, 2786 &stp->sd_lock) == 0) { 2787 error = EINTR; 2788 break; 2789 } 2790 } 2791 stp->sd_flag &= ~STZCNOTIFY; 2792 mutex_exit(&stp->sd_lock); 2793 } 2794 } 2795 return (error); 2796 } 2797 2798 int 2799 snf_cache(file_t *fp, vnode_t *fvp, u_offset_t fileoff, u_offset_t size, 2800 uint_t maxpsz, ssize_t *count) 2801 { 2802 struct vnode *vp; 2803 mblk_t *mp; 2804 int iosize; 2805 int extra = 0; 2806 int error; 2807 short fflag; 2808 int ksize; 2809 int ioflag; 2810 struct uio auio; 2811 struct iovec aiov; 2812 struct vattr va; 2813 int maxblk = 0; 2814 int wroff = 0; 2815 struct sonode *so; 2816 struct nmsghdr msg; 2817 2818 vp = fp->f_vnode; 2819 if (vp->v_type == VSOCK) { 2820 stdata_t *stp; 2821 2822 /* 2823 * Get the extra space to insert a header and a trailer. 2824 */ 2825 so = VTOSO(vp); 2826 stp = vp->v_stream; 2827 if (stp == NULL) { 2828 wroff = so->so_proto_props.sopp_wroff; 2829 maxblk = so->so_proto_props.sopp_maxblk; 2830 extra = wroff + so->so_proto_props.sopp_tail; 2831 } else { 2832 wroff = (int)(stp->sd_wroff); 2833 maxblk = (int)(stp->sd_maxblk); 2834 extra = wroff + (int)(stp->sd_tail); 2835 } 2836 } 2837 bzero(&msg, sizeof (msg)); 2838 fflag = fp->f_flag; 2839 ksize = 0; 2840 auio.uio_iov = &aiov; 2841 auio.uio_iovcnt = 1; 2842 auio.uio_segflg = UIO_SYSSPACE; 2843 auio.uio_llimit = MAXOFFSET_T; 2844 auio.uio_fmode = fflag; 2845 auio.uio_extflg = UIO_COPY_CACHED; 2846 ioflag = auio.uio_fmode & (FSYNC|FDSYNC|FRSYNC); 2847 /* If read sync is not asked for, filter sync flags */ 2848 if ((ioflag & FRSYNC) == 0) 2849 ioflag &= ~(FSYNC|FDSYNC); 2850 for (;;) { 2851 if (ISSIG(curthread, JUSTLOOKING)) { 2852 error = EINTR; 2853 break; 2854 } 2855 iosize = (int)MIN(maxpsz, size); 2856 2857 /* 2858 * Socket filters can limit the mblk size, 2859 * so limit reads to maxblk if there are 2860 * filters present. 2861 */ 2862 if (vp->v_type == VSOCK && 2863 so->so_filter_active > 0 && maxblk != INFPSZ) 2864 iosize = (int)MIN(iosize, maxblk); 2865 2866 if (is_system_labeled()) { 2867 mp = allocb_cred(iosize + extra, CRED(), 2868 curproc->p_pid); 2869 } else { 2870 mp = allocb(iosize + extra, BPRI_MED); 2871 } 2872 if (mp == NULL) { 2873 error = EAGAIN; 2874 break; 2875 } 2876 2877 mp->b_rptr += wroff; 2878 2879 aiov.iov_base = (caddr_t)mp->b_rptr; 2880 aiov.iov_len = iosize; 2881 auio.uio_loffset = fileoff; 2882 auio.uio_resid = iosize; 2883 2884 error = VOP_READ(fvp, &auio, ioflag, fp->f_cred, NULL); 2885 iosize -= auio.uio_resid; 2886 2887 if (error == EINTR && iosize != 0) 2888 error = 0; 2889 2890 if (error != 0 || iosize == 0) { 2891 freeb(mp); 2892 break; 2893 } 2894 mp->b_wptr = mp->b_rptr + iosize; 2895 2896 VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL); 2897 2898 error = socket_sendmblk(VTOSO(vp), &msg, fflag, CRED(), &mp); 2899 2900 if (error != 0) { 2901 *count = ksize; 2902 if (mp != NULL) 2903 freeb(mp); 2904 return (error); 2905 } 2906 ksize += iosize; 2907 size -= iosize; 2908 if (size == 0) 2909 goto done; 2910 2911 fileoff += iosize; 2912 (void) VOP_RWLOCK(fvp, V_WRITELOCK_FALSE, NULL); 2913 va.va_mask = AT_SIZE; 2914 error = VOP_GETATTR(fvp, &va, 0, kcred, NULL); 2915 if (error) 2916 break; 2917 /* Read as much as possible. */ 2918 if (fileoff >= va.va_size) 2919 size = 0; 2920 else if (size + fileoff > va.va_size) 2921 size = va.va_size - fileoff; 2922 } 2923 VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL); 2924 done: 2925 *count = ksize; 2926 return (error); 2927 } 2928 2929 #if defined(_SYSCALL32_IMPL) || defined(_ILP32) 2930 /* 2931 * Largefile support for 32 bit applications only. 2932 */ 2933 int 2934 sosendfile64(file_t *fp, file_t *rfp, const struct ksendfilevec64 *sfv, 2935 ssize32_t *count32) 2936 { 2937 ssize32_t sfv_len; 2938 u_offset_t sfv_off, va_size; 2939 struct vnode *vp, *fvp, *realvp; 2940 struct vattr va; 2941 stdata_t *stp; 2942 ssize_t count = 0; 2943 int error = 0; 2944 boolean_t dozcopy = B_FALSE; 2945 uint_t maxpsz; 2946 2947 sfv_len = (ssize32_t)sfv->sfv_len; 2948 if (sfv_len < 0) { 2949 error = EINVAL; 2950 goto out; 2951 } 2952 2953 if (sfv_len == 0) goto out; 2954 2955 sfv_off = (u_offset_t)sfv->sfv_off; 2956 2957 /* Same checks as in pread */ 2958 if (sfv_off > MAXOFFSET_T) { 2959 error = EINVAL; 2960 goto out; 2961 } 2962 if (sfv_off + sfv_len > MAXOFFSET_T) 2963 sfv_len = (ssize32_t)(MAXOFFSET_T - sfv_off); 2964 2965 /* 2966 * There are no more checks on sfv_len. So, we cast it to 2967 * u_offset_t and share the snf_direct_io/snf_cache code between 2968 * 32 bit and 64 bit. 2969 * 2970 * TODO: should do nbl_need_check() like read()? 2971 */ 2972 if (sfv_len > sendfile_max_size) { 2973 sf_stats.ss_file_not_cached++; 2974 error = snf_direct_io(fp, rfp, sfv_off, (u_offset_t)sfv_len, 2975 &count); 2976 goto out; 2977 } 2978 fvp = rfp->f_vnode; 2979 if (VOP_REALVP(fvp, &realvp, NULL) == 0) 2980 fvp = realvp; 2981 /* 2982 * Grab the lock as a reader to prevent the file size 2983 * from changing underneath. 2984 */ 2985 (void) VOP_RWLOCK(fvp, V_WRITELOCK_FALSE, NULL); 2986 va.va_mask = AT_SIZE; 2987 error = VOP_GETATTR(fvp, &va, 0, kcred, NULL); 2988 va_size = va.va_size; 2989 if ((error != 0) || (va_size == 0) || (sfv_off >= va_size)) { 2990 VOP_RWUNLOCK(fvp, V_WRITELOCK_FALSE, NULL); 2991 goto out; 2992 } 2993 /* Read as much as possible. */ 2994 if (sfv_off + sfv_len > va_size) 2995 sfv_len = va_size - sfv_off; 2996 2997 vp = fp->f_vnode; 2998 stp = vp->v_stream; 2999 /* 3000 * When the NOWAIT flag is not set, we enable zero-copy only if the 3001 * transfer size is large enough. This prevents performance loss 3002 * when the caller sends the file piece by piece. 3003 */ 3004 if (sfv_len >= MAXBSIZE && (sfv_len >= (va_size >> 1) || 3005 (sfv->sfv_flag & SFV_NOWAIT) || sfv_len >= 0x1000000) && 3006 !vn_has_flocks(fvp) && !(fvp->v_flag & VNOMAP)) { 3007 uint_t copyflag; 3008 copyflag = stp != NULL ? stp->sd_copyflag : 3009 VTOSO(vp)->so_proto_props.sopp_zcopyflag; 3010 if ((copyflag & (STZCVMSAFE|STZCVMUNSAFE)) == 0) { 3011 int on = 1; 3012 3013 if (socket_setsockopt(VTOSO(vp), SOL_SOCKET, 3014 SO_SND_COPYAVOID, &on, sizeof (on), CRED()) == 0) 3015 dozcopy = B_TRUE; 3016 } else { 3017 dozcopy = copyflag & STZCVMSAFE; 3018 } 3019 } 3020 if (dozcopy) { 3021 sf_stats.ss_file_segmap++; 3022 error = snf_segmap(fp, fvp, sfv_off, (u_offset_t)sfv_len, 3023 &count, ((sfv->sfv_flag & SFV_NOWAIT) != 0)); 3024 } else { 3025 if (vp->v_type == VSOCK && stp == NULL) { 3026 sonode_t *so = VTOSO(vp); 3027 maxpsz = so->so_proto_props.sopp_maxpsz; 3028 } else if (stp != NULL) { 3029 maxpsz = stp->sd_qn_maxpsz; 3030 } else { 3031 maxpsz = maxphys; 3032 } 3033 3034 if (maxpsz == INFPSZ) 3035 maxpsz = maxphys; 3036 else 3037 maxpsz = roundup(maxpsz, MAXBSIZE); 3038 sf_stats.ss_file_cached++; 3039 error = snf_cache(fp, fvp, sfv_off, (u_offset_t)sfv_len, 3040 maxpsz, &count); 3041 } 3042 out: 3043 releasef(sfv->sfv_fd); 3044 *count32 = (ssize32_t)count; 3045 return (error); 3046 } 3047 #endif 3048 3049 #ifdef _SYSCALL32_IMPL 3050 /* 3051 * recv32(), recvfrom32(), send32(), sendto32(): intentionally return a 3052 * ssize_t rather than ssize32_t; see the comments above read32 for details. 3053 */ 3054 3055 ssize_t 3056 recv32(int32_t sock, caddr32_t buffer, size32_t len, int32_t flags) 3057 { 3058 return (recv(sock, (void *)(uintptr_t)buffer, (ssize32_t)len, flags)); 3059 } 3060 3061 ssize_t 3062 recvfrom32(int32_t sock, caddr32_t buffer, size32_t len, int32_t flags, 3063 caddr32_t name, caddr32_t namelenp) 3064 { 3065 return (recvfrom(sock, (void *)(uintptr_t)buffer, (ssize32_t)len, flags, 3066 (void *)(uintptr_t)name, (void *)(uintptr_t)namelenp)); 3067 } 3068 3069 ssize_t 3070 send32(int32_t sock, caddr32_t buffer, size32_t len, int32_t flags) 3071 { 3072 return (send(sock, (void *)(uintptr_t)buffer, (ssize32_t)len, flags)); 3073 } 3074 3075 ssize_t 3076 sendto32(int32_t sock, caddr32_t buffer, size32_t len, int32_t flags, 3077 caddr32_t name, socklen_t namelen) 3078 { 3079 return (sendto(sock, (void *)(uintptr_t)buffer, (ssize32_t)len, flags, 3080 (void *)(uintptr_t)name, namelen)); 3081 } 3082 #endif /* _SYSCALL32_IMPL */ 3083 3084 /* 3085 * Function wrappers (mostly around the sonode switch) for 3086 * backward compatibility. 3087 */ 3088 3089 int 3090 soaccept(struct sonode *so, int fflag, struct sonode **nsop) 3091 { 3092 return (socket_accept(so, fflag, CRED(), nsop)); 3093 } 3094 3095 int 3096 sobind(struct sonode *so, struct sockaddr *name, socklen_t namelen, 3097 int backlog, int flags) 3098 { 3099 int error; 3100 3101 error = socket_bind(so, name, namelen, flags, CRED()); 3102 if (error == 0 && backlog != 0) 3103 return (socket_listen(so, backlog, CRED())); 3104 3105 return (error); 3106 } 3107 3108 int 3109 solisten(struct sonode *so, int backlog) 3110 { 3111 return (socket_listen(so, backlog, CRED())); 3112 } 3113 3114 int 3115 soconnect(struct sonode *so, struct sockaddr *name, socklen_t namelen, 3116 int fflag, int flags) 3117 { 3118 return (socket_connect(so, name, namelen, fflag, flags, CRED())); 3119 } 3120 3121 int 3122 sorecvmsg(struct sonode *so, struct nmsghdr *msg, struct uio *uiop) 3123 { 3124 return (socket_recvmsg(so, msg, uiop, CRED())); 3125 } 3126 3127 int 3128 sosendmsg(struct sonode *so, struct nmsghdr *msg, struct uio *uiop) 3129 { 3130 return (socket_sendmsg(so, msg, uiop, CRED())); 3131 } 3132 3133 int 3134 soshutdown(struct sonode *so, int how) 3135 { 3136 return (socket_shutdown(so, how, CRED())); 3137 } 3138 3139 int 3140 sogetsockopt(struct sonode *so, int level, int option_name, void *optval, 3141 socklen_t *optlenp, int flags) 3142 { 3143 return (socket_getsockopt(so, level, option_name, optval, optlenp, 3144 flags, CRED())); 3145 } 3146 3147 int 3148 sosetsockopt(struct sonode *so, int level, int option_name, const void *optval, 3149 t_uscalar_t optlen) 3150 { 3151 return (socket_setsockopt(so, level, option_name, optval, optlen, 3152 CRED())); 3153 } 3154 3155 /* 3156 * Because this is backward compatibility interface it only needs to be 3157 * able to handle the creation of TPI sockfs sockets. 3158 */ 3159 struct sonode * 3160 socreate(struct sockparams *sp, int family, int type, int protocol, int version, 3161 int *errorp) 3162 { 3163 struct sonode *so; 3164 3165 ASSERT(sp != NULL); 3166 3167 so = sp->sp_smod_info->smod_sock_create_func(sp, family, type, protocol, 3168 version, SOCKET_SLEEP, errorp, CRED()); 3169 if (so == NULL) { 3170 SOCKPARAMS_DEC_REF(sp); 3171 } else { 3172 if ((*errorp = SOP_INIT(so, NULL, CRED(), SOCKET_SLEEP)) == 0) { 3173 /* Cannot fail, only bumps so_count */ 3174 (void) VOP_OPEN(&SOTOV(so), FREAD|FWRITE, CRED(), NULL); 3175 } else { 3176 socket_destroy(so); 3177 so = NULL; 3178 } 3179 } 3180 return (so); 3181 } 3182