1 /*- 2 * Copyright (c) 1982, 1986, 1989, 1990, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * sendfile(2) and related extensions: 6 * Copyright (c) 1998, David Greenman. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include "opt_capsicum.h" 39 #include "opt_inet.h" 40 #include "opt_inet6.h" 41 #include "opt_sctp.h" 42 #include "opt_compat.h" 43 #include "opt_ktrace.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/capability.h> 48 #include <sys/condvar.h> 49 #include <sys/kernel.h> 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 #include <sys/sysproto.h> 53 #include <sys/malloc.h> 54 #include <sys/filedesc.h> 55 #include <sys/event.h> 56 #include <sys/proc.h> 57 #include <sys/fcntl.h> 58 #include <sys/file.h> 59 #include <sys/filio.h> 60 #include <sys/jail.h> 61 #include <sys/mman.h> 62 #include <sys/mount.h> 63 #include <sys/mbuf.h> 64 #include <sys/protosw.h> 65 #include <sys/rwlock.h> 66 #include <sys/sf_buf.h> 67 #include <sys/sf_sync.h> 68 #include <sys/sf_base.h> 69 #include <sys/sysent.h> 70 #include <sys/socket.h> 71 #include <sys/socketvar.h> 72 #include <sys/signalvar.h> 73 #include <sys/syscallsubr.h> 74 #include <sys/sysctl.h> 75 #include <sys/uio.h> 76 #include <sys/vnode.h> 77 #ifdef KTRACE 78 #include <sys/ktrace.h> 79 #endif 80 #ifdef COMPAT_FREEBSD32 81 #include <compat/freebsd32/freebsd32_util.h> 82 #endif 83 84 #include <net/vnet.h> 85 86 #include <security/audit/audit.h> 87 #include <security/mac/mac_framework.h> 88 89 #include <vm/vm.h> 90 #include <vm/vm_param.h> 91 #include <vm/vm_object.h> 92 #include <vm/vm_page.h> 93 #include <vm/vm_pager.h> 94 #include <vm/vm_kern.h> 95 #include <vm/vm_extern.h> 96 #include <vm/uma.h> 97 98 #if defined(INET) || defined(INET6) 99 #ifdef SCTP 100 #include <netinet/sctp.h> 101 #include <netinet/sctp_peeloff.h> 102 #endif /* SCTP */ 103 #endif /* INET || INET6 */ 104 105 /* 106 * Flags for accept1() and kern_accept4(), in addition to SOCK_CLOEXEC 107 * and SOCK_NONBLOCK. 108 */ 109 #define ACCEPT4_INHERIT 0x1 110 #define ACCEPT4_COMPAT 0x2 111 112 static int sendit(struct thread *td, int s, struct msghdr *mp, int flags); 113 static int recvit(struct thread *td, int s, struct msghdr *mp, void *namelenp); 114 115 static int accept1(struct thread *td, int s, struct sockaddr *uname, 116 socklen_t *anamelen, int flags); 117 static int do_sendfile(struct thread *td, struct sendfile_args *uap, 118 int compat); 119 static int getsockname1(struct thread *td, struct getsockname_args *uap, 120 int compat); 121 static int getpeername1(struct thread *td, struct getpeername_args *uap, 122 int compat); 123 124 counter_u64_t sfstat[sizeof(struct sfstat) / sizeof(uint64_t)]; 125 126 /* 127 * sendfile(2)-related variables and associated sysctls 128 */ 129 static SYSCTL_NODE(_kern_ipc, OID_AUTO, sendfile, CTLFLAG_RW, 0, 130 "sendfile(2) tunables"); 131 static int sfreadahead = 1; 132 SYSCTL_INT(_kern_ipc_sendfile, OID_AUTO, readahead, CTLFLAG_RW, 133 &sfreadahead, 0, "Number of sendfile(2) read-ahead MAXBSIZE blocks"); 134 135 static uma_zone_t zone_sfsync; 136 137 static void 138 sfstat_init(const void *unused) 139 { 140 141 COUNTER_ARRAY_ALLOC(sfstat, sizeof(struct sfstat) / sizeof(uint64_t), 142 M_WAITOK); 143 } 144 SYSINIT(sfstat, SI_SUB_MBUF, SI_ORDER_FIRST, sfstat_init, NULL); 145 146 static void 147 sf_sync_init(const void *unused) 148 { 149 150 zone_sfsync = uma_zcreate("sendfile_sync", sizeof(struct sendfile_sync), 151 NULL, NULL, 152 NULL, NULL, 153 UMA_ALIGN_CACHE, 154 0); 155 } 156 SYSINIT(sf_sync, SI_SUB_MBUF, SI_ORDER_FIRST, sf_sync_init, NULL); 157 158 static int 159 sfstat_sysctl(SYSCTL_HANDLER_ARGS) 160 { 161 struct sfstat s; 162 163 COUNTER_ARRAY_COPY(sfstat, &s, sizeof(s) / sizeof(uint64_t)); 164 if (req->newptr) 165 COUNTER_ARRAY_ZERO(sfstat, sizeof(s) / sizeof(uint64_t)); 166 return (SYSCTL_OUT(req, &s, sizeof(s))); 167 } 168 SYSCTL_PROC(_kern_ipc, OID_AUTO, sfstat, CTLTYPE_OPAQUE | CTLFLAG_RW, 169 NULL, 0, sfstat_sysctl, "I", "sendfile statistics"); 170 171 /* 172 * Convert a user file descriptor to a kernel file entry and check if required 173 * capability rights are present. 174 * A reference on the file entry is held upon returning. 175 */ 176 static int 177 getsock_cap(struct filedesc *fdp, int fd, cap_rights_t *rightsp, 178 struct file **fpp, u_int *fflagp) 179 { 180 struct file *fp; 181 int error; 182 183 error = fget_unlocked(fdp, fd, rightsp, 0, &fp, NULL); 184 if (error != 0) 185 return (error); 186 if (fp->f_type != DTYPE_SOCKET) { 187 fdrop(fp, curthread); 188 return (ENOTSOCK); 189 } 190 if (fflagp != NULL) 191 *fflagp = fp->f_flag; 192 *fpp = fp; 193 return (0); 194 } 195 196 /* 197 * System call interface to the socket abstraction. 198 */ 199 #if defined(COMPAT_43) 200 #define COMPAT_OLDSOCK 201 #endif 202 203 int 204 sys_socket(td, uap) 205 struct thread *td; 206 struct socket_args /* { 207 int domain; 208 int type; 209 int protocol; 210 } */ *uap; 211 { 212 struct socket *so; 213 struct file *fp; 214 int fd, error, type, oflag, fflag; 215 216 AUDIT_ARG_SOCKET(uap->domain, uap->type, uap->protocol); 217 218 type = uap->type; 219 oflag = 0; 220 fflag = 0; 221 if ((type & SOCK_CLOEXEC) != 0) { 222 type &= ~SOCK_CLOEXEC; 223 oflag |= O_CLOEXEC; 224 } 225 if ((type & SOCK_NONBLOCK) != 0) { 226 type &= ~SOCK_NONBLOCK; 227 fflag |= FNONBLOCK; 228 } 229 230 #ifdef MAC 231 error = mac_socket_check_create(td->td_ucred, uap->domain, type, 232 uap->protocol); 233 if (error != 0) 234 return (error); 235 #endif 236 error = falloc(td, &fp, &fd, oflag); 237 if (error != 0) 238 return (error); 239 /* An extra reference on `fp' has been held for us by falloc(). */ 240 error = socreate(uap->domain, &so, type, uap->protocol, 241 td->td_ucred, td); 242 if (error != 0) { 243 fdclose(td->td_proc->p_fd, fp, fd, td); 244 } else { 245 finit(fp, FREAD | FWRITE | fflag, DTYPE_SOCKET, so, &socketops); 246 if ((fflag & FNONBLOCK) != 0) 247 (void) fo_ioctl(fp, FIONBIO, &fflag, td->td_ucred, td); 248 td->td_retval[0] = fd; 249 } 250 fdrop(fp, td); 251 return (error); 252 } 253 254 /* ARGSUSED */ 255 int 256 sys_bind(td, uap) 257 struct thread *td; 258 struct bind_args /* { 259 int s; 260 caddr_t name; 261 int namelen; 262 } */ *uap; 263 { 264 struct sockaddr *sa; 265 int error; 266 267 error = getsockaddr(&sa, uap->name, uap->namelen); 268 if (error == 0) { 269 error = kern_bind(td, uap->s, sa); 270 free(sa, M_SONAME); 271 } 272 return (error); 273 } 274 275 static int 276 kern_bindat(struct thread *td, int dirfd, int fd, struct sockaddr *sa) 277 { 278 struct socket *so; 279 struct file *fp; 280 cap_rights_t rights; 281 int error; 282 283 AUDIT_ARG_FD(fd); 284 AUDIT_ARG_SOCKADDR(td, dirfd, sa); 285 error = getsock_cap(td->td_proc->p_fd, fd, 286 cap_rights_init(&rights, CAP_BIND), &fp, NULL); 287 if (error != 0) 288 return (error); 289 so = fp->f_data; 290 #ifdef KTRACE 291 if (KTRPOINT(td, KTR_STRUCT)) 292 ktrsockaddr(sa); 293 #endif 294 #ifdef MAC 295 error = mac_socket_check_bind(td->td_ucred, so, sa); 296 if (error == 0) { 297 #endif 298 if (dirfd == AT_FDCWD) 299 error = sobind(so, sa, td); 300 else 301 error = sobindat(dirfd, so, sa, td); 302 #ifdef MAC 303 } 304 #endif 305 fdrop(fp, td); 306 return (error); 307 } 308 309 int 310 kern_bind(struct thread *td, int fd, struct sockaddr *sa) 311 { 312 313 return (kern_bindat(td, AT_FDCWD, fd, sa)); 314 } 315 316 /* ARGSUSED */ 317 int 318 sys_bindat(td, uap) 319 struct thread *td; 320 struct bindat_args /* { 321 int fd; 322 int s; 323 caddr_t name; 324 int namelen; 325 } */ *uap; 326 { 327 struct sockaddr *sa; 328 int error; 329 330 error = getsockaddr(&sa, uap->name, uap->namelen); 331 if (error == 0) { 332 error = kern_bindat(td, uap->fd, uap->s, sa); 333 free(sa, M_SONAME); 334 } 335 return (error); 336 } 337 338 /* ARGSUSED */ 339 int 340 sys_listen(td, uap) 341 struct thread *td; 342 struct listen_args /* { 343 int s; 344 int backlog; 345 } */ *uap; 346 { 347 struct socket *so; 348 struct file *fp; 349 cap_rights_t rights; 350 int error; 351 352 AUDIT_ARG_FD(uap->s); 353 error = getsock_cap(td->td_proc->p_fd, uap->s, 354 cap_rights_init(&rights, CAP_LISTEN), &fp, NULL); 355 if (error == 0) { 356 so = fp->f_data; 357 #ifdef MAC 358 error = mac_socket_check_listen(td->td_ucred, so); 359 if (error == 0) 360 #endif 361 error = solisten(so, uap->backlog, td); 362 fdrop(fp, td); 363 } 364 return(error); 365 } 366 367 /* 368 * accept1() 369 */ 370 static int 371 accept1(td, s, uname, anamelen, flags) 372 struct thread *td; 373 int s; 374 struct sockaddr *uname; 375 socklen_t *anamelen; 376 int flags; 377 { 378 struct sockaddr *name; 379 socklen_t namelen; 380 struct file *fp; 381 int error; 382 383 if (uname == NULL) 384 return (kern_accept4(td, s, NULL, NULL, flags, NULL)); 385 386 error = copyin(anamelen, &namelen, sizeof (namelen)); 387 if (error != 0) 388 return (error); 389 390 error = kern_accept4(td, s, &name, &namelen, flags, &fp); 391 392 /* 393 * return a namelen of zero for older code which might 394 * ignore the return value from accept. 395 */ 396 if (error != 0) { 397 (void) copyout(&namelen, anamelen, sizeof(*anamelen)); 398 return (error); 399 } 400 401 if (error == 0 && uname != NULL) { 402 #ifdef COMPAT_OLDSOCK 403 if (flags & ACCEPT4_COMPAT) 404 ((struct osockaddr *)name)->sa_family = 405 name->sa_family; 406 #endif 407 error = copyout(name, uname, namelen); 408 } 409 if (error == 0) 410 error = copyout(&namelen, anamelen, 411 sizeof(namelen)); 412 if (error != 0) 413 fdclose(td->td_proc->p_fd, fp, td->td_retval[0], td); 414 fdrop(fp, td); 415 free(name, M_SONAME); 416 return (error); 417 } 418 419 int 420 kern_accept(struct thread *td, int s, struct sockaddr **name, 421 socklen_t *namelen, struct file **fp) 422 { 423 return (kern_accept4(td, s, name, namelen, ACCEPT4_INHERIT, fp)); 424 } 425 426 int 427 kern_accept4(struct thread *td, int s, struct sockaddr **name, 428 socklen_t *namelen, int flags, struct file **fp) 429 { 430 struct filedesc *fdp; 431 struct file *headfp, *nfp = NULL; 432 struct sockaddr *sa = NULL; 433 struct socket *head, *so; 434 cap_rights_t rights; 435 u_int fflag; 436 pid_t pgid; 437 int error, fd, tmp; 438 439 if (name != NULL) 440 *name = NULL; 441 442 AUDIT_ARG_FD(s); 443 fdp = td->td_proc->p_fd; 444 error = getsock_cap(fdp, s, cap_rights_init(&rights, CAP_ACCEPT), 445 &headfp, &fflag); 446 if (error != 0) 447 return (error); 448 head = headfp->f_data; 449 if ((head->so_options & SO_ACCEPTCONN) == 0) { 450 error = EINVAL; 451 goto done; 452 } 453 #ifdef MAC 454 error = mac_socket_check_accept(td->td_ucred, head); 455 if (error != 0) 456 goto done; 457 #endif 458 error = falloc(td, &nfp, &fd, (flags & SOCK_CLOEXEC) ? O_CLOEXEC : 0); 459 if (error != 0) 460 goto done; 461 ACCEPT_LOCK(); 462 if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->so_comp)) { 463 ACCEPT_UNLOCK(); 464 error = EWOULDBLOCK; 465 goto noconnection; 466 } 467 while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0) { 468 if (head->so_rcv.sb_state & SBS_CANTRCVMORE) { 469 head->so_error = ECONNABORTED; 470 break; 471 } 472 error = msleep(&head->so_timeo, &accept_mtx, PSOCK | PCATCH, 473 "accept", 0); 474 if (error != 0) { 475 ACCEPT_UNLOCK(); 476 goto noconnection; 477 } 478 } 479 if (head->so_error) { 480 error = head->so_error; 481 head->so_error = 0; 482 ACCEPT_UNLOCK(); 483 goto noconnection; 484 } 485 so = TAILQ_FIRST(&head->so_comp); 486 KASSERT(!(so->so_qstate & SQ_INCOMP), ("accept1: so SQ_INCOMP")); 487 KASSERT(so->so_qstate & SQ_COMP, ("accept1: so not SQ_COMP")); 488 489 /* 490 * Before changing the flags on the socket, we have to bump the 491 * reference count. Otherwise, if the protocol calls sofree(), 492 * the socket will be released due to a zero refcount. 493 */ 494 SOCK_LOCK(so); /* soref() and so_state update */ 495 soref(so); /* file descriptor reference */ 496 497 TAILQ_REMOVE(&head->so_comp, so, so_list); 498 head->so_qlen--; 499 if (flags & ACCEPT4_INHERIT) 500 so->so_state |= (head->so_state & SS_NBIO); 501 else 502 so->so_state |= (flags & SOCK_NONBLOCK) ? SS_NBIO : 0; 503 so->so_qstate &= ~SQ_COMP; 504 so->so_head = NULL; 505 506 SOCK_UNLOCK(so); 507 ACCEPT_UNLOCK(); 508 509 /* An extra reference on `nfp' has been held for us by falloc(). */ 510 td->td_retval[0] = fd; 511 512 /* connection has been removed from the listen queue */ 513 KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0); 514 515 if (flags & ACCEPT4_INHERIT) { 516 pgid = fgetown(&head->so_sigio); 517 if (pgid != 0) 518 fsetown(pgid, &so->so_sigio); 519 } else { 520 fflag &= ~(FNONBLOCK | FASYNC); 521 if (flags & SOCK_NONBLOCK) 522 fflag |= FNONBLOCK; 523 } 524 525 finit(nfp, fflag, DTYPE_SOCKET, so, &socketops); 526 /* Sync socket nonblocking/async state with file flags */ 527 tmp = fflag & FNONBLOCK; 528 (void) fo_ioctl(nfp, FIONBIO, &tmp, td->td_ucred, td); 529 tmp = fflag & FASYNC; 530 (void) fo_ioctl(nfp, FIOASYNC, &tmp, td->td_ucred, td); 531 sa = 0; 532 error = soaccept(so, &sa); 533 if (error != 0) { 534 /* 535 * return a namelen of zero for older code which might 536 * ignore the return value from accept. 537 */ 538 if (name) 539 *namelen = 0; 540 goto noconnection; 541 } 542 if (sa == NULL) { 543 if (name) 544 *namelen = 0; 545 goto done; 546 } 547 AUDIT_ARG_SOCKADDR(td, AT_FDCWD, sa); 548 if (name) { 549 /* check sa_len before it is destroyed */ 550 if (*namelen > sa->sa_len) 551 *namelen = sa->sa_len; 552 #ifdef KTRACE 553 if (KTRPOINT(td, KTR_STRUCT)) 554 ktrsockaddr(sa); 555 #endif 556 *name = sa; 557 sa = NULL; 558 } 559 noconnection: 560 free(sa, M_SONAME); 561 562 /* 563 * close the new descriptor, assuming someone hasn't ripped it 564 * out from under us. 565 */ 566 if (error != 0) 567 fdclose(fdp, nfp, fd, td); 568 569 /* 570 * Release explicitly held references before returning. We return 571 * a reference on nfp to the caller on success if they request it. 572 */ 573 done: 574 if (fp != NULL) { 575 if (error == 0) { 576 *fp = nfp; 577 nfp = NULL; 578 } else 579 *fp = NULL; 580 } 581 if (nfp != NULL) 582 fdrop(nfp, td); 583 fdrop(headfp, td); 584 return (error); 585 } 586 587 int 588 sys_accept(td, uap) 589 struct thread *td; 590 struct accept_args *uap; 591 { 592 593 return (accept1(td, uap->s, uap->name, uap->anamelen, ACCEPT4_INHERIT)); 594 } 595 596 int 597 sys_accept4(td, uap) 598 struct thread *td; 599 struct accept4_args *uap; 600 { 601 602 if (uap->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) 603 return (EINVAL); 604 605 return (accept1(td, uap->s, uap->name, uap->anamelen, uap->flags)); 606 } 607 608 #ifdef COMPAT_OLDSOCK 609 int 610 oaccept(td, uap) 611 struct thread *td; 612 struct accept_args *uap; 613 { 614 615 return (accept1(td, uap->s, uap->name, uap->anamelen, 616 ACCEPT4_INHERIT | ACCEPT4_COMPAT)); 617 } 618 #endif /* COMPAT_OLDSOCK */ 619 620 /* ARGSUSED */ 621 int 622 sys_connect(td, uap) 623 struct thread *td; 624 struct connect_args /* { 625 int s; 626 caddr_t name; 627 int namelen; 628 } */ *uap; 629 { 630 struct sockaddr *sa; 631 int error; 632 633 error = getsockaddr(&sa, uap->name, uap->namelen); 634 if (error == 0) { 635 error = kern_connect(td, uap->s, sa); 636 free(sa, M_SONAME); 637 } 638 return (error); 639 } 640 641 static int 642 kern_connectat(struct thread *td, int dirfd, int fd, struct sockaddr *sa) 643 { 644 struct socket *so; 645 struct file *fp; 646 cap_rights_t rights; 647 int error, interrupted = 0; 648 649 AUDIT_ARG_FD(fd); 650 AUDIT_ARG_SOCKADDR(td, dirfd, sa); 651 error = getsock_cap(td->td_proc->p_fd, fd, 652 cap_rights_init(&rights, CAP_CONNECT), &fp, NULL); 653 if (error != 0) 654 return (error); 655 so = fp->f_data; 656 if (so->so_state & SS_ISCONNECTING) { 657 error = EALREADY; 658 goto done1; 659 } 660 #ifdef KTRACE 661 if (KTRPOINT(td, KTR_STRUCT)) 662 ktrsockaddr(sa); 663 #endif 664 #ifdef MAC 665 error = mac_socket_check_connect(td->td_ucred, so, sa); 666 if (error != 0) 667 goto bad; 668 #endif 669 if (dirfd == AT_FDCWD) 670 error = soconnect(so, sa, td); 671 else 672 error = soconnectat(dirfd, so, sa, td); 673 if (error != 0) 674 goto bad; 675 if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) { 676 error = EINPROGRESS; 677 goto done1; 678 } 679 SOCK_LOCK(so); 680 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { 681 error = msleep(&so->so_timeo, SOCK_MTX(so), PSOCK | PCATCH, 682 "connec", 0); 683 if (error != 0) { 684 if (error == EINTR || error == ERESTART) 685 interrupted = 1; 686 break; 687 } 688 } 689 if (error == 0) { 690 error = so->so_error; 691 so->so_error = 0; 692 } 693 SOCK_UNLOCK(so); 694 bad: 695 if (!interrupted) 696 so->so_state &= ~SS_ISCONNECTING; 697 if (error == ERESTART) 698 error = EINTR; 699 done1: 700 fdrop(fp, td); 701 return (error); 702 } 703 704 int 705 kern_connect(struct thread *td, int fd, struct sockaddr *sa) 706 { 707 708 return (kern_connectat(td, AT_FDCWD, fd, sa)); 709 } 710 711 /* ARGSUSED */ 712 int 713 sys_connectat(td, uap) 714 struct thread *td; 715 struct connectat_args /* { 716 int fd; 717 int s; 718 caddr_t name; 719 int namelen; 720 } */ *uap; 721 { 722 struct sockaddr *sa; 723 int error; 724 725 error = getsockaddr(&sa, uap->name, uap->namelen); 726 if (error == 0) { 727 error = kern_connectat(td, uap->fd, uap->s, sa); 728 free(sa, M_SONAME); 729 } 730 return (error); 731 } 732 733 int 734 kern_socketpair(struct thread *td, int domain, int type, int protocol, 735 int *rsv) 736 { 737 struct filedesc *fdp = td->td_proc->p_fd; 738 struct file *fp1, *fp2; 739 struct socket *so1, *so2; 740 int fd, error, oflag, fflag; 741 742 AUDIT_ARG_SOCKET(domain, type, protocol); 743 744 oflag = 0; 745 fflag = 0; 746 if ((type & SOCK_CLOEXEC) != 0) { 747 type &= ~SOCK_CLOEXEC; 748 oflag |= O_CLOEXEC; 749 } 750 if ((type & SOCK_NONBLOCK) != 0) { 751 type &= ~SOCK_NONBLOCK; 752 fflag |= FNONBLOCK; 753 } 754 #ifdef MAC 755 /* We might want to have a separate check for socket pairs. */ 756 error = mac_socket_check_create(td->td_ucred, domain, type, 757 protocol); 758 if (error != 0) 759 return (error); 760 #endif 761 error = socreate(domain, &so1, type, protocol, td->td_ucred, td); 762 if (error != 0) 763 return (error); 764 error = socreate(domain, &so2, type, protocol, td->td_ucred, td); 765 if (error != 0) 766 goto free1; 767 /* On success extra reference to `fp1' and 'fp2' is set by falloc. */ 768 error = falloc(td, &fp1, &fd, oflag); 769 if (error != 0) 770 goto free2; 771 rsv[0] = fd; 772 fp1->f_data = so1; /* so1 already has ref count */ 773 error = falloc(td, &fp2, &fd, oflag); 774 if (error != 0) 775 goto free3; 776 fp2->f_data = so2; /* so2 already has ref count */ 777 rsv[1] = fd; 778 error = soconnect2(so1, so2); 779 if (error != 0) 780 goto free4; 781 if (type == SOCK_DGRAM) { 782 /* 783 * Datagram socket connection is asymmetric. 784 */ 785 error = soconnect2(so2, so1); 786 if (error != 0) 787 goto free4; 788 } 789 finit(fp1, FREAD | FWRITE | fflag, DTYPE_SOCKET, fp1->f_data, 790 &socketops); 791 finit(fp2, FREAD | FWRITE | fflag, DTYPE_SOCKET, fp2->f_data, 792 &socketops); 793 if ((fflag & FNONBLOCK) != 0) { 794 (void) fo_ioctl(fp1, FIONBIO, &fflag, td->td_ucred, td); 795 (void) fo_ioctl(fp2, FIONBIO, &fflag, td->td_ucred, td); 796 } 797 fdrop(fp1, td); 798 fdrop(fp2, td); 799 return (0); 800 free4: 801 fdclose(fdp, fp2, rsv[1], td); 802 fdrop(fp2, td); 803 free3: 804 fdclose(fdp, fp1, rsv[0], td); 805 fdrop(fp1, td); 806 free2: 807 if (so2 != NULL) 808 (void)soclose(so2); 809 free1: 810 if (so1 != NULL) 811 (void)soclose(so1); 812 return (error); 813 } 814 815 int 816 sys_socketpair(struct thread *td, struct socketpair_args *uap) 817 { 818 int error, sv[2]; 819 820 error = kern_socketpair(td, uap->domain, uap->type, 821 uap->protocol, sv); 822 if (error != 0) 823 return (error); 824 error = copyout(sv, uap->rsv, 2 * sizeof(int)); 825 if (error != 0) { 826 (void)kern_close(td, sv[0]); 827 (void)kern_close(td, sv[1]); 828 } 829 return (error); 830 } 831 832 static int 833 sendit(td, s, mp, flags) 834 struct thread *td; 835 int s; 836 struct msghdr *mp; 837 int flags; 838 { 839 struct mbuf *control; 840 struct sockaddr *to; 841 int error; 842 843 #ifdef CAPABILITY_MODE 844 if (IN_CAPABILITY_MODE(td) && (mp->msg_name != NULL)) 845 return (ECAPMODE); 846 #endif 847 848 if (mp->msg_name != NULL) { 849 error = getsockaddr(&to, mp->msg_name, mp->msg_namelen); 850 if (error != 0) { 851 to = NULL; 852 goto bad; 853 } 854 mp->msg_name = to; 855 } else { 856 to = NULL; 857 } 858 859 if (mp->msg_control) { 860 if (mp->msg_controllen < sizeof(struct cmsghdr) 861 #ifdef COMPAT_OLDSOCK 862 && mp->msg_flags != MSG_COMPAT 863 #endif 864 ) { 865 error = EINVAL; 866 goto bad; 867 } 868 error = sockargs(&control, mp->msg_control, 869 mp->msg_controllen, MT_CONTROL); 870 if (error != 0) 871 goto bad; 872 #ifdef COMPAT_OLDSOCK 873 if (mp->msg_flags == MSG_COMPAT) { 874 struct cmsghdr *cm; 875 876 M_PREPEND(control, sizeof(*cm), M_WAITOK); 877 cm = mtod(control, struct cmsghdr *); 878 cm->cmsg_len = control->m_len; 879 cm->cmsg_level = SOL_SOCKET; 880 cm->cmsg_type = SCM_RIGHTS; 881 } 882 #endif 883 } else { 884 control = NULL; 885 } 886 887 error = kern_sendit(td, s, mp, flags, control, UIO_USERSPACE); 888 889 bad: 890 free(to, M_SONAME); 891 return (error); 892 } 893 894 int 895 kern_sendit(td, s, mp, flags, control, segflg) 896 struct thread *td; 897 int s; 898 struct msghdr *mp; 899 int flags; 900 struct mbuf *control; 901 enum uio_seg segflg; 902 { 903 struct file *fp; 904 struct uio auio; 905 struct iovec *iov; 906 struct socket *so; 907 cap_rights_t rights; 908 #ifdef KTRACE 909 struct uio *ktruio = NULL; 910 #endif 911 ssize_t len; 912 int i, error; 913 914 AUDIT_ARG_FD(s); 915 cap_rights_init(&rights, CAP_SEND); 916 if (mp->msg_name != NULL) { 917 AUDIT_ARG_SOCKADDR(td, AT_FDCWD, mp->msg_name); 918 cap_rights_set(&rights, CAP_CONNECT); 919 } 920 error = getsock_cap(td->td_proc->p_fd, s, &rights, &fp, NULL); 921 if (error != 0) 922 return (error); 923 so = (struct socket *)fp->f_data; 924 925 #ifdef KTRACE 926 if (mp->msg_name != NULL && KTRPOINT(td, KTR_STRUCT)) 927 ktrsockaddr(mp->msg_name); 928 #endif 929 #ifdef MAC 930 if (mp->msg_name != NULL) { 931 error = mac_socket_check_connect(td->td_ucred, so, 932 mp->msg_name); 933 if (error != 0) 934 goto bad; 935 } 936 error = mac_socket_check_send(td->td_ucred, so); 937 if (error != 0) 938 goto bad; 939 #endif 940 941 auio.uio_iov = mp->msg_iov; 942 auio.uio_iovcnt = mp->msg_iovlen; 943 auio.uio_segflg = segflg; 944 auio.uio_rw = UIO_WRITE; 945 auio.uio_td = td; 946 auio.uio_offset = 0; /* XXX */ 947 auio.uio_resid = 0; 948 iov = mp->msg_iov; 949 for (i = 0; i < mp->msg_iovlen; i++, iov++) { 950 if ((auio.uio_resid += iov->iov_len) < 0) { 951 error = EINVAL; 952 goto bad; 953 } 954 } 955 #ifdef KTRACE 956 if (KTRPOINT(td, KTR_GENIO)) 957 ktruio = cloneuio(&auio); 958 #endif 959 len = auio.uio_resid; 960 error = sosend(so, mp->msg_name, &auio, 0, control, flags, td); 961 if (error != 0) { 962 if (auio.uio_resid != len && (error == ERESTART || 963 error == EINTR || error == EWOULDBLOCK)) 964 error = 0; 965 /* Generation of SIGPIPE can be controlled per socket */ 966 if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) && 967 !(flags & MSG_NOSIGNAL)) { 968 PROC_LOCK(td->td_proc); 969 tdsignal(td, SIGPIPE); 970 PROC_UNLOCK(td->td_proc); 971 } 972 } 973 if (error == 0) 974 td->td_retval[0] = len - auio.uio_resid; 975 #ifdef KTRACE 976 if (ktruio != NULL) { 977 ktruio->uio_resid = td->td_retval[0]; 978 ktrgenio(s, UIO_WRITE, ktruio, error); 979 } 980 #endif 981 bad: 982 fdrop(fp, td); 983 return (error); 984 } 985 986 int 987 sys_sendto(td, uap) 988 struct thread *td; 989 struct sendto_args /* { 990 int s; 991 caddr_t buf; 992 size_t len; 993 int flags; 994 caddr_t to; 995 int tolen; 996 } */ *uap; 997 { 998 struct msghdr msg; 999 struct iovec aiov; 1000 1001 msg.msg_name = uap->to; 1002 msg.msg_namelen = uap->tolen; 1003 msg.msg_iov = &aiov; 1004 msg.msg_iovlen = 1; 1005 msg.msg_control = 0; 1006 #ifdef COMPAT_OLDSOCK 1007 msg.msg_flags = 0; 1008 #endif 1009 aiov.iov_base = uap->buf; 1010 aiov.iov_len = uap->len; 1011 return (sendit(td, uap->s, &msg, uap->flags)); 1012 } 1013 1014 #ifdef COMPAT_OLDSOCK 1015 int 1016 osend(td, uap) 1017 struct thread *td; 1018 struct osend_args /* { 1019 int s; 1020 caddr_t buf; 1021 int len; 1022 int flags; 1023 } */ *uap; 1024 { 1025 struct msghdr msg; 1026 struct iovec aiov; 1027 1028 msg.msg_name = 0; 1029 msg.msg_namelen = 0; 1030 msg.msg_iov = &aiov; 1031 msg.msg_iovlen = 1; 1032 aiov.iov_base = uap->buf; 1033 aiov.iov_len = uap->len; 1034 msg.msg_control = 0; 1035 msg.msg_flags = 0; 1036 return (sendit(td, uap->s, &msg, uap->flags)); 1037 } 1038 1039 int 1040 osendmsg(td, uap) 1041 struct thread *td; 1042 struct osendmsg_args /* { 1043 int s; 1044 caddr_t msg; 1045 int flags; 1046 } */ *uap; 1047 { 1048 struct msghdr msg; 1049 struct iovec *iov; 1050 int error; 1051 1052 error = copyin(uap->msg, &msg, sizeof (struct omsghdr)); 1053 if (error != 0) 1054 return (error); 1055 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE); 1056 if (error != 0) 1057 return (error); 1058 msg.msg_iov = iov; 1059 msg.msg_flags = MSG_COMPAT; 1060 error = sendit(td, uap->s, &msg, uap->flags); 1061 free(iov, M_IOV); 1062 return (error); 1063 } 1064 #endif 1065 1066 int 1067 sys_sendmsg(td, uap) 1068 struct thread *td; 1069 struct sendmsg_args /* { 1070 int s; 1071 caddr_t msg; 1072 int flags; 1073 } */ *uap; 1074 { 1075 struct msghdr msg; 1076 struct iovec *iov; 1077 int error; 1078 1079 error = copyin(uap->msg, &msg, sizeof (msg)); 1080 if (error != 0) 1081 return (error); 1082 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE); 1083 if (error != 0) 1084 return (error); 1085 msg.msg_iov = iov; 1086 #ifdef COMPAT_OLDSOCK 1087 msg.msg_flags = 0; 1088 #endif 1089 error = sendit(td, uap->s, &msg, uap->flags); 1090 free(iov, M_IOV); 1091 return (error); 1092 } 1093 1094 int 1095 kern_recvit(td, s, mp, fromseg, controlp) 1096 struct thread *td; 1097 int s; 1098 struct msghdr *mp; 1099 enum uio_seg fromseg; 1100 struct mbuf **controlp; 1101 { 1102 struct uio auio; 1103 struct iovec *iov; 1104 struct mbuf *m, *control = NULL; 1105 caddr_t ctlbuf; 1106 struct file *fp; 1107 struct socket *so; 1108 struct sockaddr *fromsa = NULL; 1109 cap_rights_t rights; 1110 #ifdef KTRACE 1111 struct uio *ktruio = NULL; 1112 #endif 1113 ssize_t len; 1114 int error, i; 1115 1116 if (controlp != NULL) 1117 *controlp = NULL; 1118 1119 AUDIT_ARG_FD(s); 1120 error = getsock_cap(td->td_proc->p_fd, s, 1121 cap_rights_init(&rights, CAP_RECV), &fp, NULL); 1122 if (error != 0) 1123 return (error); 1124 so = fp->f_data; 1125 1126 #ifdef MAC 1127 error = mac_socket_check_receive(td->td_ucred, so); 1128 if (error != 0) { 1129 fdrop(fp, td); 1130 return (error); 1131 } 1132 #endif 1133 1134 auio.uio_iov = mp->msg_iov; 1135 auio.uio_iovcnt = mp->msg_iovlen; 1136 auio.uio_segflg = UIO_USERSPACE; 1137 auio.uio_rw = UIO_READ; 1138 auio.uio_td = td; 1139 auio.uio_offset = 0; /* XXX */ 1140 auio.uio_resid = 0; 1141 iov = mp->msg_iov; 1142 for (i = 0; i < mp->msg_iovlen; i++, iov++) { 1143 if ((auio.uio_resid += iov->iov_len) < 0) { 1144 fdrop(fp, td); 1145 return (EINVAL); 1146 } 1147 } 1148 #ifdef KTRACE 1149 if (KTRPOINT(td, KTR_GENIO)) 1150 ktruio = cloneuio(&auio); 1151 #endif 1152 len = auio.uio_resid; 1153 error = soreceive(so, &fromsa, &auio, NULL, 1154 (mp->msg_control || controlp) ? &control : NULL, 1155 &mp->msg_flags); 1156 if (error != 0) { 1157 if (auio.uio_resid != len && (error == ERESTART || 1158 error == EINTR || error == EWOULDBLOCK)) 1159 error = 0; 1160 } 1161 if (fromsa != NULL) 1162 AUDIT_ARG_SOCKADDR(td, AT_FDCWD, fromsa); 1163 #ifdef KTRACE 1164 if (ktruio != NULL) { 1165 ktruio->uio_resid = len - auio.uio_resid; 1166 ktrgenio(s, UIO_READ, ktruio, error); 1167 } 1168 #endif 1169 if (error != 0) 1170 goto out; 1171 td->td_retval[0] = len - auio.uio_resid; 1172 if (mp->msg_name) { 1173 len = mp->msg_namelen; 1174 if (len <= 0 || fromsa == NULL) 1175 len = 0; 1176 else { 1177 /* save sa_len before it is destroyed by MSG_COMPAT */ 1178 len = MIN(len, fromsa->sa_len); 1179 #ifdef COMPAT_OLDSOCK 1180 if (mp->msg_flags & MSG_COMPAT) 1181 ((struct osockaddr *)fromsa)->sa_family = 1182 fromsa->sa_family; 1183 #endif 1184 if (fromseg == UIO_USERSPACE) { 1185 error = copyout(fromsa, mp->msg_name, 1186 (unsigned)len); 1187 if (error != 0) 1188 goto out; 1189 } else 1190 bcopy(fromsa, mp->msg_name, len); 1191 } 1192 mp->msg_namelen = len; 1193 } 1194 if (mp->msg_control && controlp == NULL) { 1195 #ifdef COMPAT_OLDSOCK 1196 /* 1197 * We assume that old recvmsg calls won't receive access 1198 * rights and other control info, esp. as control info 1199 * is always optional and those options didn't exist in 4.3. 1200 * If we receive rights, trim the cmsghdr; anything else 1201 * is tossed. 1202 */ 1203 if (control && mp->msg_flags & MSG_COMPAT) { 1204 if (mtod(control, struct cmsghdr *)->cmsg_level != 1205 SOL_SOCKET || 1206 mtod(control, struct cmsghdr *)->cmsg_type != 1207 SCM_RIGHTS) { 1208 mp->msg_controllen = 0; 1209 goto out; 1210 } 1211 control->m_len -= sizeof (struct cmsghdr); 1212 control->m_data += sizeof (struct cmsghdr); 1213 } 1214 #endif 1215 len = mp->msg_controllen; 1216 m = control; 1217 mp->msg_controllen = 0; 1218 ctlbuf = mp->msg_control; 1219 1220 while (m && len > 0) { 1221 unsigned int tocopy; 1222 1223 if (len >= m->m_len) 1224 tocopy = m->m_len; 1225 else { 1226 mp->msg_flags |= MSG_CTRUNC; 1227 tocopy = len; 1228 } 1229 1230 if ((error = copyout(mtod(m, caddr_t), 1231 ctlbuf, tocopy)) != 0) 1232 goto out; 1233 1234 ctlbuf += tocopy; 1235 len -= tocopy; 1236 m = m->m_next; 1237 } 1238 mp->msg_controllen = ctlbuf - (caddr_t)mp->msg_control; 1239 } 1240 out: 1241 fdrop(fp, td); 1242 #ifdef KTRACE 1243 if (fromsa && KTRPOINT(td, KTR_STRUCT)) 1244 ktrsockaddr(fromsa); 1245 #endif 1246 free(fromsa, M_SONAME); 1247 1248 if (error == 0 && controlp != NULL) 1249 *controlp = control; 1250 else if (control) 1251 m_freem(control); 1252 1253 return (error); 1254 } 1255 1256 static int 1257 recvit(td, s, mp, namelenp) 1258 struct thread *td; 1259 int s; 1260 struct msghdr *mp; 1261 void *namelenp; 1262 { 1263 int error; 1264 1265 error = kern_recvit(td, s, mp, UIO_USERSPACE, NULL); 1266 if (error != 0) 1267 return (error); 1268 if (namelenp != NULL) { 1269 error = copyout(&mp->msg_namelen, namelenp, sizeof (socklen_t)); 1270 #ifdef COMPAT_OLDSOCK 1271 if (mp->msg_flags & MSG_COMPAT) 1272 error = 0; /* old recvfrom didn't check */ 1273 #endif 1274 } 1275 return (error); 1276 } 1277 1278 int 1279 sys_recvfrom(td, uap) 1280 struct thread *td; 1281 struct recvfrom_args /* { 1282 int s; 1283 caddr_t buf; 1284 size_t len; 1285 int flags; 1286 struct sockaddr * __restrict from; 1287 socklen_t * __restrict fromlenaddr; 1288 } */ *uap; 1289 { 1290 struct msghdr msg; 1291 struct iovec aiov; 1292 int error; 1293 1294 if (uap->fromlenaddr) { 1295 error = copyin(uap->fromlenaddr, 1296 &msg.msg_namelen, sizeof (msg.msg_namelen)); 1297 if (error != 0) 1298 goto done2; 1299 } else { 1300 msg.msg_namelen = 0; 1301 } 1302 msg.msg_name = uap->from; 1303 msg.msg_iov = &aiov; 1304 msg.msg_iovlen = 1; 1305 aiov.iov_base = uap->buf; 1306 aiov.iov_len = uap->len; 1307 msg.msg_control = 0; 1308 msg.msg_flags = uap->flags; 1309 error = recvit(td, uap->s, &msg, uap->fromlenaddr); 1310 done2: 1311 return (error); 1312 } 1313 1314 #ifdef COMPAT_OLDSOCK 1315 int 1316 orecvfrom(td, uap) 1317 struct thread *td; 1318 struct recvfrom_args *uap; 1319 { 1320 1321 uap->flags |= MSG_COMPAT; 1322 return (sys_recvfrom(td, uap)); 1323 } 1324 #endif 1325 1326 #ifdef COMPAT_OLDSOCK 1327 int 1328 orecv(td, uap) 1329 struct thread *td; 1330 struct orecv_args /* { 1331 int s; 1332 caddr_t buf; 1333 int len; 1334 int flags; 1335 } */ *uap; 1336 { 1337 struct msghdr msg; 1338 struct iovec aiov; 1339 1340 msg.msg_name = 0; 1341 msg.msg_namelen = 0; 1342 msg.msg_iov = &aiov; 1343 msg.msg_iovlen = 1; 1344 aiov.iov_base = uap->buf; 1345 aiov.iov_len = uap->len; 1346 msg.msg_control = 0; 1347 msg.msg_flags = uap->flags; 1348 return (recvit(td, uap->s, &msg, NULL)); 1349 } 1350 1351 /* 1352 * Old recvmsg. This code takes advantage of the fact that the old msghdr 1353 * overlays the new one, missing only the flags, and with the (old) access 1354 * rights where the control fields are now. 1355 */ 1356 int 1357 orecvmsg(td, uap) 1358 struct thread *td; 1359 struct orecvmsg_args /* { 1360 int s; 1361 struct omsghdr *msg; 1362 int flags; 1363 } */ *uap; 1364 { 1365 struct msghdr msg; 1366 struct iovec *iov; 1367 int error; 1368 1369 error = copyin(uap->msg, &msg, sizeof (struct omsghdr)); 1370 if (error != 0) 1371 return (error); 1372 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE); 1373 if (error != 0) 1374 return (error); 1375 msg.msg_flags = uap->flags | MSG_COMPAT; 1376 msg.msg_iov = iov; 1377 error = recvit(td, uap->s, &msg, &uap->msg->msg_namelen); 1378 if (msg.msg_controllen && error == 0) 1379 error = copyout(&msg.msg_controllen, 1380 &uap->msg->msg_accrightslen, sizeof (int)); 1381 free(iov, M_IOV); 1382 return (error); 1383 } 1384 #endif 1385 1386 int 1387 sys_recvmsg(td, uap) 1388 struct thread *td; 1389 struct recvmsg_args /* { 1390 int s; 1391 struct msghdr *msg; 1392 int flags; 1393 } */ *uap; 1394 { 1395 struct msghdr msg; 1396 struct iovec *uiov, *iov; 1397 int error; 1398 1399 error = copyin(uap->msg, &msg, sizeof (msg)); 1400 if (error != 0) 1401 return (error); 1402 error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE); 1403 if (error != 0) 1404 return (error); 1405 msg.msg_flags = uap->flags; 1406 #ifdef COMPAT_OLDSOCK 1407 msg.msg_flags &= ~MSG_COMPAT; 1408 #endif 1409 uiov = msg.msg_iov; 1410 msg.msg_iov = iov; 1411 error = recvit(td, uap->s, &msg, NULL); 1412 if (error == 0) { 1413 msg.msg_iov = uiov; 1414 error = copyout(&msg, uap->msg, sizeof(msg)); 1415 } 1416 free(iov, M_IOV); 1417 return (error); 1418 } 1419 1420 /* ARGSUSED */ 1421 int 1422 sys_shutdown(td, uap) 1423 struct thread *td; 1424 struct shutdown_args /* { 1425 int s; 1426 int how; 1427 } */ *uap; 1428 { 1429 struct socket *so; 1430 struct file *fp; 1431 cap_rights_t rights; 1432 int error; 1433 1434 AUDIT_ARG_FD(uap->s); 1435 error = getsock_cap(td->td_proc->p_fd, uap->s, 1436 cap_rights_init(&rights, CAP_SHUTDOWN), &fp, NULL); 1437 if (error == 0) { 1438 so = fp->f_data; 1439 error = soshutdown(so, uap->how); 1440 fdrop(fp, td); 1441 } 1442 return (error); 1443 } 1444 1445 /* ARGSUSED */ 1446 int 1447 sys_setsockopt(td, uap) 1448 struct thread *td; 1449 struct setsockopt_args /* { 1450 int s; 1451 int level; 1452 int name; 1453 caddr_t val; 1454 int valsize; 1455 } */ *uap; 1456 { 1457 1458 return (kern_setsockopt(td, uap->s, uap->level, uap->name, 1459 uap->val, UIO_USERSPACE, uap->valsize)); 1460 } 1461 1462 int 1463 kern_setsockopt(td, s, level, name, val, valseg, valsize) 1464 struct thread *td; 1465 int s; 1466 int level; 1467 int name; 1468 void *val; 1469 enum uio_seg valseg; 1470 socklen_t valsize; 1471 { 1472 struct socket *so; 1473 struct file *fp; 1474 struct sockopt sopt; 1475 cap_rights_t rights; 1476 int error; 1477 1478 if (val == NULL && valsize != 0) 1479 return (EFAULT); 1480 if ((int)valsize < 0) 1481 return (EINVAL); 1482 1483 sopt.sopt_dir = SOPT_SET; 1484 sopt.sopt_level = level; 1485 sopt.sopt_name = name; 1486 sopt.sopt_val = val; 1487 sopt.sopt_valsize = valsize; 1488 switch (valseg) { 1489 case UIO_USERSPACE: 1490 sopt.sopt_td = td; 1491 break; 1492 case UIO_SYSSPACE: 1493 sopt.sopt_td = NULL; 1494 break; 1495 default: 1496 panic("kern_setsockopt called with bad valseg"); 1497 } 1498 1499 AUDIT_ARG_FD(s); 1500 error = getsock_cap(td->td_proc->p_fd, s, 1501 cap_rights_init(&rights, CAP_SETSOCKOPT), &fp, NULL); 1502 if (error == 0) { 1503 so = fp->f_data; 1504 error = sosetopt(so, &sopt); 1505 fdrop(fp, td); 1506 } 1507 return(error); 1508 } 1509 1510 /* ARGSUSED */ 1511 int 1512 sys_getsockopt(td, uap) 1513 struct thread *td; 1514 struct getsockopt_args /* { 1515 int s; 1516 int level; 1517 int name; 1518 void * __restrict val; 1519 socklen_t * __restrict avalsize; 1520 } */ *uap; 1521 { 1522 socklen_t valsize; 1523 int error; 1524 1525 if (uap->val) { 1526 error = copyin(uap->avalsize, &valsize, sizeof (valsize)); 1527 if (error != 0) 1528 return (error); 1529 } 1530 1531 error = kern_getsockopt(td, uap->s, uap->level, uap->name, 1532 uap->val, UIO_USERSPACE, &valsize); 1533 1534 if (error == 0) 1535 error = copyout(&valsize, uap->avalsize, sizeof (valsize)); 1536 return (error); 1537 } 1538 1539 /* 1540 * Kernel version of getsockopt. 1541 * optval can be a userland or userspace. optlen is always a kernel pointer. 1542 */ 1543 int 1544 kern_getsockopt(td, s, level, name, val, valseg, valsize) 1545 struct thread *td; 1546 int s; 1547 int level; 1548 int name; 1549 void *val; 1550 enum uio_seg valseg; 1551 socklen_t *valsize; 1552 { 1553 struct socket *so; 1554 struct file *fp; 1555 struct sockopt sopt; 1556 cap_rights_t rights; 1557 int error; 1558 1559 if (val == NULL) 1560 *valsize = 0; 1561 if ((int)*valsize < 0) 1562 return (EINVAL); 1563 1564 sopt.sopt_dir = SOPT_GET; 1565 sopt.sopt_level = level; 1566 sopt.sopt_name = name; 1567 sopt.sopt_val = val; 1568 sopt.sopt_valsize = (size_t)*valsize; /* checked non-negative above */ 1569 switch (valseg) { 1570 case UIO_USERSPACE: 1571 sopt.sopt_td = td; 1572 break; 1573 case UIO_SYSSPACE: 1574 sopt.sopt_td = NULL; 1575 break; 1576 default: 1577 panic("kern_getsockopt called with bad valseg"); 1578 } 1579 1580 AUDIT_ARG_FD(s); 1581 error = getsock_cap(td->td_proc->p_fd, s, 1582 cap_rights_init(&rights, CAP_GETSOCKOPT), &fp, NULL); 1583 if (error == 0) { 1584 so = fp->f_data; 1585 error = sogetopt(so, &sopt); 1586 *valsize = sopt.sopt_valsize; 1587 fdrop(fp, td); 1588 } 1589 return (error); 1590 } 1591 1592 /* 1593 * getsockname1() - Get socket name. 1594 */ 1595 /* ARGSUSED */ 1596 static int 1597 getsockname1(td, uap, compat) 1598 struct thread *td; 1599 struct getsockname_args /* { 1600 int fdes; 1601 struct sockaddr * __restrict asa; 1602 socklen_t * __restrict alen; 1603 } */ *uap; 1604 int compat; 1605 { 1606 struct sockaddr *sa; 1607 socklen_t len; 1608 int error; 1609 1610 error = copyin(uap->alen, &len, sizeof(len)); 1611 if (error != 0) 1612 return (error); 1613 1614 error = kern_getsockname(td, uap->fdes, &sa, &len); 1615 if (error != 0) 1616 return (error); 1617 1618 if (len != 0) { 1619 #ifdef COMPAT_OLDSOCK 1620 if (compat) 1621 ((struct osockaddr *)sa)->sa_family = sa->sa_family; 1622 #endif 1623 error = copyout(sa, uap->asa, (u_int)len); 1624 } 1625 free(sa, M_SONAME); 1626 if (error == 0) 1627 error = copyout(&len, uap->alen, sizeof(len)); 1628 return (error); 1629 } 1630 1631 int 1632 kern_getsockname(struct thread *td, int fd, struct sockaddr **sa, 1633 socklen_t *alen) 1634 { 1635 struct socket *so; 1636 struct file *fp; 1637 cap_rights_t rights; 1638 socklen_t len; 1639 int error; 1640 1641 AUDIT_ARG_FD(fd); 1642 error = getsock_cap(td->td_proc->p_fd, fd, 1643 cap_rights_init(&rights, CAP_GETSOCKNAME), &fp, NULL); 1644 if (error != 0) 1645 return (error); 1646 so = fp->f_data; 1647 *sa = NULL; 1648 CURVNET_SET(so->so_vnet); 1649 error = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, sa); 1650 CURVNET_RESTORE(); 1651 if (error != 0) 1652 goto bad; 1653 if (*sa == NULL) 1654 len = 0; 1655 else 1656 len = MIN(*alen, (*sa)->sa_len); 1657 *alen = len; 1658 #ifdef KTRACE 1659 if (KTRPOINT(td, KTR_STRUCT)) 1660 ktrsockaddr(*sa); 1661 #endif 1662 bad: 1663 fdrop(fp, td); 1664 if (error != 0 && *sa != NULL) { 1665 free(*sa, M_SONAME); 1666 *sa = NULL; 1667 } 1668 return (error); 1669 } 1670 1671 int 1672 sys_getsockname(td, uap) 1673 struct thread *td; 1674 struct getsockname_args *uap; 1675 { 1676 1677 return (getsockname1(td, uap, 0)); 1678 } 1679 1680 #ifdef COMPAT_OLDSOCK 1681 int 1682 ogetsockname(td, uap) 1683 struct thread *td; 1684 struct getsockname_args *uap; 1685 { 1686 1687 return (getsockname1(td, uap, 1)); 1688 } 1689 #endif /* COMPAT_OLDSOCK */ 1690 1691 /* 1692 * getpeername1() - Get name of peer for connected socket. 1693 */ 1694 /* ARGSUSED */ 1695 static int 1696 getpeername1(td, uap, compat) 1697 struct thread *td; 1698 struct getpeername_args /* { 1699 int fdes; 1700 struct sockaddr * __restrict asa; 1701 socklen_t * __restrict alen; 1702 } */ *uap; 1703 int compat; 1704 { 1705 struct sockaddr *sa; 1706 socklen_t len; 1707 int error; 1708 1709 error = copyin(uap->alen, &len, sizeof (len)); 1710 if (error != 0) 1711 return (error); 1712 1713 error = kern_getpeername(td, uap->fdes, &sa, &len); 1714 if (error != 0) 1715 return (error); 1716 1717 if (len != 0) { 1718 #ifdef COMPAT_OLDSOCK 1719 if (compat) 1720 ((struct osockaddr *)sa)->sa_family = sa->sa_family; 1721 #endif 1722 error = copyout(sa, uap->asa, (u_int)len); 1723 } 1724 free(sa, M_SONAME); 1725 if (error == 0) 1726 error = copyout(&len, uap->alen, sizeof(len)); 1727 return (error); 1728 } 1729 1730 int 1731 kern_getpeername(struct thread *td, int fd, struct sockaddr **sa, 1732 socklen_t *alen) 1733 { 1734 struct socket *so; 1735 struct file *fp; 1736 cap_rights_t rights; 1737 socklen_t len; 1738 int error; 1739 1740 AUDIT_ARG_FD(fd); 1741 error = getsock_cap(td->td_proc->p_fd, fd, 1742 cap_rights_init(&rights, CAP_GETPEERNAME), &fp, NULL); 1743 if (error != 0) 1744 return (error); 1745 so = fp->f_data; 1746 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) { 1747 error = ENOTCONN; 1748 goto done; 1749 } 1750 *sa = NULL; 1751 CURVNET_SET(so->so_vnet); 1752 error = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so, sa); 1753 CURVNET_RESTORE(); 1754 if (error != 0) 1755 goto bad; 1756 if (*sa == NULL) 1757 len = 0; 1758 else 1759 len = MIN(*alen, (*sa)->sa_len); 1760 *alen = len; 1761 #ifdef KTRACE 1762 if (KTRPOINT(td, KTR_STRUCT)) 1763 ktrsockaddr(*sa); 1764 #endif 1765 bad: 1766 if (error != 0 && *sa != NULL) { 1767 free(*sa, M_SONAME); 1768 *sa = NULL; 1769 } 1770 done: 1771 fdrop(fp, td); 1772 return (error); 1773 } 1774 1775 int 1776 sys_getpeername(td, uap) 1777 struct thread *td; 1778 struct getpeername_args *uap; 1779 { 1780 1781 return (getpeername1(td, uap, 0)); 1782 } 1783 1784 #ifdef COMPAT_OLDSOCK 1785 int 1786 ogetpeername(td, uap) 1787 struct thread *td; 1788 struct ogetpeername_args *uap; 1789 { 1790 1791 /* XXX uap should have type `getpeername_args *' to begin with. */ 1792 return (getpeername1(td, (struct getpeername_args *)uap, 1)); 1793 } 1794 #endif /* COMPAT_OLDSOCK */ 1795 1796 int 1797 sockargs(mp, buf, buflen, type) 1798 struct mbuf **mp; 1799 caddr_t buf; 1800 int buflen, type; 1801 { 1802 struct sockaddr *sa; 1803 struct mbuf *m; 1804 int error; 1805 1806 if (buflen > MLEN) { 1807 #ifdef COMPAT_OLDSOCK 1808 if (type == MT_SONAME && buflen <= 112) 1809 buflen = MLEN; /* unix domain compat. hack */ 1810 else 1811 #endif 1812 if (buflen > MCLBYTES) 1813 return (EINVAL); 1814 } 1815 m = m_get2(buflen, M_WAITOK, type, 0); 1816 m->m_len = buflen; 1817 error = copyin(buf, mtod(m, caddr_t), (u_int)buflen); 1818 if (error != 0) 1819 (void) m_free(m); 1820 else { 1821 *mp = m; 1822 if (type == MT_SONAME) { 1823 sa = mtod(m, struct sockaddr *); 1824 1825 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN 1826 if (sa->sa_family == 0 && sa->sa_len < AF_MAX) 1827 sa->sa_family = sa->sa_len; 1828 #endif 1829 sa->sa_len = buflen; 1830 } 1831 } 1832 return (error); 1833 } 1834 1835 int 1836 getsockaddr(namp, uaddr, len) 1837 struct sockaddr **namp; 1838 caddr_t uaddr; 1839 size_t len; 1840 { 1841 struct sockaddr *sa; 1842 int error; 1843 1844 if (len > SOCK_MAXADDRLEN) 1845 return (ENAMETOOLONG); 1846 if (len < offsetof(struct sockaddr, sa_data[0])) 1847 return (EINVAL); 1848 sa = malloc(len, M_SONAME, M_WAITOK); 1849 error = copyin(uaddr, sa, len); 1850 if (error != 0) { 1851 free(sa, M_SONAME); 1852 } else { 1853 #if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN 1854 if (sa->sa_family == 0 && sa->sa_len < AF_MAX) 1855 sa->sa_family = sa->sa_len; 1856 #endif 1857 sa->sa_len = len; 1858 *namp = sa; 1859 } 1860 return (error); 1861 } 1862 1863 /* 1864 * Detach mapped page and release resources back to the system. 1865 */ 1866 int 1867 sf_buf_mext(struct mbuf *mb, void *addr, void *args) 1868 { 1869 vm_page_t m; 1870 struct sendfile_sync *sfs; 1871 1872 m = sf_buf_page(args); 1873 sf_buf_free(args); 1874 vm_page_lock(m); 1875 vm_page_unwire(m, 0); 1876 /* 1877 * Check for the object going away on us. This can 1878 * happen since we don't hold a reference to it. 1879 * If so, we're responsible for freeing the page. 1880 */ 1881 if (m->wire_count == 0 && m->object == NULL) 1882 vm_page_free(m); 1883 vm_page_unlock(m); 1884 if (addr != NULL) { 1885 sfs = addr; 1886 sf_sync_deref(sfs); 1887 } 1888 return (EXT_FREE_OK); 1889 } 1890 1891 void 1892 sf_sync_deref(struct sendfile_sync *sfs) 1893 { 1894 1895 if (sfs == NULL) 1896 return; 1897 1898 mtx_lock(&sfs->mtx); 1899 KASSERT(sfs->count> 0, ("Sendfile sync botchup count == 0")); 1900 if (--sfs->count == 0) 1901 cv_signal(&sfs->cv); 1902 mtx_unlock(&sfs->mtx); 1903 } 1904 1905 /* 1906 * Allocate a sendfile_sync state structure. 1907 * 1908 * For now this only knows about the "sleep" sync, but later it will 1909 * grow various other personalities. 1910 */ 1911 struct sendfile_sync * 1912 sf_sync_alloc(uint32_t flags) 1913 { 1914 struct sendfile_sync *sfs; 1915 1916 sfs = uma_zalloc(zone_sfsync, M_WAITOK | M_ZERO); 1917 mtx_init(&sfs->mtx, "sendfile", NULL, MTX_DEF); 1918 cv_init(&sfs->cv, "sendfile"); 1919 sfs->flags = flags; 1920 1921 return (sfs); 1922 } 1923 1924 /* 1925 * Take a reference to a sfsync instance. 1926 * 1927 * This has to map 1:1 to free calls coming in via sf_buf_mext(), 1928 * so typically this will be referenced once for each mbuf allocated. 1929 */ 1930 void 1931 sf_sync_ref(struct sendfile_sync *sfs) 1932 { 1933 1934 if (sfs == NULL) 1935 return; 1936 1937 mtx_lock(&sfs->mtx); 1938 sfs->count++; 1939 mtx_unlock(&sfs->mtx); 1940 } 1941 1942 void 1943 sf_sync_syscall_wait(struct sendfile_sync *sfs) 1944 { 1945 1946 if (sfs == NULL) 1947 return; 1948 1949 mtx_lock(&sfs->mtx); 1950 if (sfs->count != 0) 1951 cv_wait(&sfs->cv, &sfs->mtx); 1952 KASSERT(sfs->count == 0, ("sendfile sync still busy")); 1953 mtx_unlock(&sfs->mtx); 1954 } 1955 1956 void 1957 sf_sync_free(struct sendfile_sync *sfs) 1958 { 1959 1960 if (sfs == NULL) 1961 return; 1962 1963 /* 1964 * XXX we should ensure that nothing else has this 1965 * locked before freeing. 1966 */ 1967 mtx_lock(&sfs->mtx); 1968 KASSERT(sfs->count == 0, ("sendfile sync still busy")); 1969 cv_destroy(&sfs->cv); 1970 mtx_destroy(&sfs->mtx); 1971 uma_zfree(zone_sfsync, sfs); 1972 } 1973 1974 /* 1975 * sendfile(2) 1976 * 1977 * int sendfile(int fd, int s, off_t offset, size_t nbytes, 1978 * struct sf_hdtr *hdtr, off_t *sbytes, int flags) 1979 * 1980 * Send a file specified by 'fd' and starting at 'offset' to a socket 1981 * specified by 's'. Send only 'nbytes' of the file or until EOF if nbytes == 1982 * 0. Optionally add a header and/or trailer to the socket output. If 1983 * specified, write the total number of bytes sent into *sbytes. 1984 */ 1985 int 1986 sys_sendfile(struct thread *td, struct sendfile_args *uap) 1987 { 1988 1989 return (do_sendfile(td, uap, 0)); 1990 } 1991 1992 int 1993 _do_sendfile(struct thread *td, int src_fd, int sock_fd, int flags, 1994 int compat, off_t offset, size_t nbytes, off_t *sbytes, 1995 struct uio *hdr_uio, struct uio *trl_uio) 1996 { 1997 cap_rights_t rights; 1998 struct sendfile_sync *sfs = NULL; 1999 struct file *fp; 2000 int error; 2001 2002 AUDIT_ARG_FD(src_fd); 2003 2004 /* 2005 * sendfile(2) can start at any offset within a file so we require 2006 * CAP_READ+CAP_SEEK = CAP_PREAD. 2007 */ 2008 if ((error = fget_read(td, src_fd, 2009 cap_rights_init(&rights, CAP_PREAD), &fp)) != 0) { 2010 goto out; 2011 } 2012 2013 /* 2014 * If we need to wait for completion, initialise the sfsync 2015 * state here. 2016 */ 2017 if (flags & SF_SYNC) 2018 sfs = sf_sync_alloc(flags & SF_SYNC); 2019 2020 error = fo_sendfile(fp, sock_fd, hdr_uio, trl_uio, offset, 2021 nbytes, sbytes, flags, compat ? SFK_COMPAT : 0, sfs, td); 2022 2023 /* 2024 * If appropriate, do the wait and free here. 2025 */ 2026 if (sfs != NULL) { 2027 sf_sync_syscall_wait(sfs); 2028 sf_sync_free(sfs); 2029 } 2030 2031 /* 2032 * XXX Should we wait until the send has completed before freeing the source 2033 * file handle? It's the previous behaviour, sure, but is it required? 2034 * We've wired down the page references after all. 2035 */ 2036 fdrop(fp, td); 2037 2038 out: 2039 return (error); 2040 } 2041 2042 static int 2043 do_sendfile(struct thread *td, struct sendfile_args *uap, int compat) 2044 { 2045 struct sf_hdtr hdtr; 2046 struct uio *hdr_uio, *trl_uio; 2047 int error; 2048 off_t sbytes; 2049 2050 /* 2051 * File offset must be positive. If it goes beyond EOF 2052 * we send only the header/trailer and no payload data. 2053 */ 2054 if (uap->offset < 0) 2055 return (EINVAL); 2056 2057 hdr_uio = trl_uio = NULL; 2058 2059 if (uap->hdtr != NULL) { 2060 error = copyin(uap->hdtr, &hdtr, sizeof(hdtr)); 2061 if (error != 0) 2062 goto out; 2063 if (hdtr.headers != NULL) { 2064 error = copyinuio(hdtr.headers, hdtr.hdr_cnt, &hdr_uio); 2065 if (error != 0) 2066 goto out; 2067 } 2068 if (hdtr.trailers != NULL) { 2069 error = copyinuio(hdtr.trailers, hdtr.trl_cnt, &trl_uio); 2070 if (error != 0) 2071 goto out; 2072 } 2073 } 2074 2075 error = _do_sendfile(td, uap->fd, uap->s, uap->flags, compat, 2076 uap->offset, uap->nbytes, &sbytes, hdr_uio, trl_uio); 2077 2078 if (uap->sbytes != NULL) { 2079 copyout(&sbytes, uap->sbytes, sizeof(off_t)); 2080 } 2081 out: 2082 free(hdr_uio, M_IOV); 2083 free(trl_uio, M_IOV); 2084 return (error); 2085 } 2086 2087 #ifdef COMPAT_FREEBSD4 2088 int 2089 freebsd4_sendfile(struct thread *td, struct freebsd4_sendfile_args *uap) 2090 { 2091 struct sendfile_args args; 2092 2093 args.fd = uap->fd; 2094 args.s = uap->s; 2095 args.offset = uap->offset; 2096 args.nbytes = uap->nbytes; 2097 args.hdtr = uap->hdtr; 2098 args.sbytes = uap->sbytes; 2099 args.flags = uap->flags; 2100 2101 return (do_sendfile(td, &args, 1)); 2102 } 2103 #endif /* COMPAT_FREEBSD4 */ 2104 2105 static int 2106 sendfile_readpage(vm_object_t obj, struct vnode *vp, int nd, 2107 off_t off, int xfsize, int bsize, struct thread *td, vm_page_t *res) 2108 { 2109 vm_page_t m; 2110 vm_pindex_t pindex; 2111 ssize_t resid; 2112 int error, readahead, rv; 2113 2114 pindex = OFF_TO_IDX(off); 2115 VM_OBJECT_WLOCK(obj); 2116 m = vm_page_grab(obj, pindex, (vp != NULL ? VM_ALLOC_NOBUSY | 2117 VM_ALLOC_IGN_SBUSY : 0) | VM_ALLOC_WIRED | VM_ALLOC_NORMAL); 2118 2119 /* 2120 * Check if page is valid for what we need, otherwise initiate I/O. 2121 * 2122 * The non-zero nd argument prevents disk I/O, instead we 2123 * return the caller what he specified in nd. In particular, 2124 * if we already turned some pages into mbufs, nd == EAGAIN 2125 * and the main function send them the pages before we come 2126 * here again and block. 2127 */ 2128 if (m->valid != 0 && vm_page_is_valid(m, off & PAGE_MASK, xfsize)) { 2129 if (vp == NULL) 2130 vm_page_xunbusy(m); 2131 VM_OBJECT_WUNLOCK(obj); 2132 *res = m; 2133 return (0); 2134 } else if (nd != 0) { 2135 if (vp == NULL) 2136 vm_page_xunbusy(m); 2137 error = nd; 2138 goto free_page; 2139 } 2140 2141 /* 2142 * Get the page from backing store. 2143 */ 2144 error = 0; 2145 if (vp != NULL) { 2146 VM_OBJECT_WUNLOCK(obj); 2147 readahead = sfreadahead * MAXBSIZE; 2148 2149 /* 2150 * Use vn_rdwr() instead of the pager interface for 2151 * the vnode, to allow the read-ahead. 2152 * 2153 * XXXMAC: Because we don't have fp->f_cred here, we 2154 * pass in NOCRED. This is probably wrong, but is 2155 * consistent with our original implementation. 2156 */ 2157 error = vn_rdwr(UIO_READ, vp, NULL, readahead, trunc_page(off), 2158 UIO_NOCOPY, IO_NODELOCKED | IO_VMIO | ((readahead / 2159 bsize) << IO_SEQSHIFT), td->td_ucred, NOCRED, &resid, td); 2160 SFSTAT_INC(sf_iocnt); 2161 VM_OBJECT_WLOCK(obj); 2162 } else { 2163 if (vm_pager_has_page(obj, pindex, NULL, NULL)) { 2164 rv = vm_pager_get_pages(obj, &m, 1, 0); 2165 SFSTAT_INC(sf_iocnt); 2166 m = vm_page_lookup(obj, pindex); 2167 if (m == NULL) 2168 error = EIO; 2169 else if (rv != VM_PAGER_OK) { 2170 vm_page_lock(m); 2171 vm_page_free(m); 2172 vm_page_unlock(m); 2173 m = NULL; 2174 error = EIO; 2175 } 2176 } else { 2177 pmap_zero_page(m); 2178 m->valid = VM_PAGE_BITS_ALL; 2179 m->dirty = 0; 2180 } 2181 if (m != NULL) 2182 vm_page_xunbusy(m); 2183 } 2184 if (error == 0) { 2185 *res = m; 2186 } else if (m != NULL) { 2187 free_page: 2188 vm_page_lock(m); 2189 vm_page_unwire(m, 0); 2190 2191 /* 2192 * See if anyone else might know about this page. If 2193 * not and it is not valid, then free it. 2194 */ 2195 if (m->wire_count == 0 && m->valid == 0 && !vm_page_busied(m)) 2196 vm_page_free(m); 2197 vm_page_unlock(m); 2198 } 2199 KASSERT(error != 0 || (m->wire_count > 0 && 2200 vm_page_is_valid(m, off & PAGE_MASK, xfsize)), 2201 ("wrong page state m %p off %#jx xfsize %d", m, (uintmax_t)off, 2202 xfsize)); 2203 VM_OBJECT_WUNLOCK(obj); 2204 return (error); 2205 } 2206 2207 static int 2208 sendfile_getobj(struct thread *td, struct file *fp, vm_object_t *obj_res, 2209 struct vnode **vp_res, struct shmfd **shmfd_res, off_t *obj_size, 2210 int *bsize) 2211 { 2212 struct vattr va; 2213 vm_object_t obj; 2214 struct vnode *vp; 2215 struct shmfd *shmfd; 2216 int error; 2217 2218 vp = *vp_res = NULL; 2219 obj = NULL; 2220 shmfd = *shmfd_res = NULL; 2221 *bsize = 0; 2222 2223 /* 2224 * The file descriptor must be a regular file and have a 2225 * backing VM object. 2226 */ 2227 if (fp->f_type == DTYPE_VNODE) { 2228 vp = fp->f_vnode; 2229 vn_lock(vp, LK_SHARED | LK_RETRY); 2230 if (vp->v_type != VREG) { 2231 error = EINVAL; 2232 goto out; 2233 } 2234 *bsize = vp->v_mount->mnt_stat.f_iosize; 2235 error = VOP_GETATTR(vp, &va, td->td_ucred); 2236 if (error != 0) 2237 goto out; 2238 *obj_size = va.va_size; 2239 obj = vp->v_object; 2240 if (obj == NULL) { 2241 error = EINVAL; 2242 goto out; 2243 } 2244 } else if (fp->f_type == DTYPE_SHM) { 2245 shmfd = fp->f_data; 2246 obj = shmfd->shm_object; 2247 *obj_size = shmfd->shm_size; 2248 } else { 2249 error = EINVAL; 2250 goto out; 2251 } 2252 2253 VM_OBJECT_WLOCK(obj); 2254 if ((obj->flags & OBJ_DEAD) != 0) { 2255 VM_OBJECT_WUNLOCK(obj); 2256 error = EBADF; 2257 goto out; 2258 } 2259 2260 /* 2261 * Temporarily increase the backing VM object's reference 2262 * count so that a forced reclamation of its vnode does not 2263 * immediately destroy it. 2264 */ 2265 vm_object_reference_locked(obj); 2266 VM_OBJECT_WUNLOCK(obj); 2267 *obj_res = obj; 2268 *vp_res = vp; 2269 *shmfd_res = shmfd; 2270 2271 out: 2272 if (vp != NULL) 2273 VOP_UNLOCK(vp, 0); 2274 return (error); 2275 } 2276 2277 static int 2278 kern_sendfile_getsock(struct thread *td, int s, struct file **sock_fp, 2279 struct socket **so) 2280 { 2281 cap_rights_t rights; 2282 int error; 2283 2284 *sock_fp = NULL; 2285 *so = NULL; 2286 2287 /* 2288 * The socket must be a stream socket and connected. 2289 */ 2290 error = getsock_cap(td->td_proc->p_fd, s, cap_rights_init(&rights, 2291 CAP_SEND), sock_fp, NULL); 2292 if (error != 0) 2293 return (error); 2294 *so = (*sock_fp)->f_data; 2295 if ((*so)->so_type != SOCK_STREAM) 2296 return (EINVAL); 2297 if (((*so)->so_state & SS_ISCONNECTED) == 0) 2298 return (ENOTCONN); 2299 return (0); 2300 } 2301 2302 int 2303 vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio, 2304 struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags, 2305 int kflags, struct sendfile_sync *sfs, struct thread *td) 2306 { 2307 struct file *sock_fp; 2308 struct vnode *vp; 2309 struct vm_object *obj; 2310 struct socket *so; 2311 struct mbuf *m; 2312 struct sf_buf *sf; 2313 struct vm_page *pg; 2314 struct shmfd *shmfd; 2315 struct vattr va; 2316 off_t off, xfsize, fsbytes, sbytes, rem, obj_size; 2317 int error, bsize, nd, hdrlen, mnw; 2318 2319 pg = NULL; 2320 obj = NULL; 2321 so = NULL; 2322 m = NULL; 2323 fsbytes = sbytes = 0; 2324 hdrlen = mnw = 0; 2325 rem = nbytes; 2326 obj_size = 0; 2327 2328 error = sendfile_getobj(td, fp, &obj, &vp, &shmfd, &obj_size, &bsize); 2329 if (error != 0) 2330 return (error); 2331 if (rem == 0) 2332 rem = obj_size; 2333 2334 error = kern_sendfile_getsock(td, sockfd, &sock_fp, &so); 2335 if (error != 0) 2336 goto out; 2337 2338 /* 2339 * Do not wait on memory allocations but return ENOMEM for 2340 * caller to retry later. 2341 * XXX: Experimental. 2342 */ 2343 if (flags & SF_MNOWAIT) 2344 mnw = 1; 2345 2346 #ifdef MAC 2347 error = mac_socket_check_send(td->td_ucred, so); 2348 if (error != 0) 2349 goto out; 2350 #endif 2351 2352 /* If headers are specified copy them into mbufs. */ 2353 if (hdr_uio != NULL) { 2354 hdr_uio->uio_td = td; 2355 hdr_uio->uio_rw = UIO_WRITE; 2356 if (hdr_uio->uio_resid > 0) { 2357 /* 2358 * In FBSD < 5.0 the nbytes to send also included 2359 * the header. If compat is specified subtract the 2360 * header size from nbytes. 2361 */ 2362 if (kflags & SFK_COMPAT) { 2363 if (nbytes > hdr_uio->uio_resid) 2364 nbytes -= hdr_uio->uio_resid; 2365 else 2366 nbytes = 0; 2367 } 2368 m = m_uiotombuf(hdr_uio, (mnw ? M_NOWAIT : M_WAITOK), 2369 0, 0, 0); 2370 if (m == NULL) { 2371 error = mnw ? EAGAIN : ENOBUFS; 2372 goto out; 2373 } 2374 hdrlen = m_length(m, NULL); 2375 } 2376 } 2377 2378 /* 2379 * Protect against multiple writers to the socket. 2380 * 2381 * XXXRW: Historically this has assumed non-interruptibility, so now 2382 * we implement that, but possibly shouldn't. 2383 */ 2384 (void)sblock(&so->so_snd, SBL_WAIT | SBL_NOINTR); 2385 2386 /* 2387 * Loop through the pages of the file, starting with the requested 2388 * offset. Get a file page (do I/O if necessary), map the file page 2389 * into an sf_buf, attach an mbuf header to the sf_buf, and queue 2390 * it on the socket. 2391 * This is done in two loops. The inner loop turns as many pages 2392 * as it can, up to available socket buffer space, without blocking 2393 * into mbufs to have it bulk delivered into the socket send buffer. 2394 * The outer loop checks the state and available space of the socket 2395 * and takes care of the overall progress. 2396 */ 2397 for (off = offset; ; ) { 2398 struct mbuf *mtail; 2399 int loopbytes; 2400 int space; 2401 int done; 2402 2403 if ((nbytes != 0 && nbytes == fsbytes) || 2404 (nbytes == 0 && obj_size == fsbytes)) 2405 break; 2406 2407 mtail = NULL; 2408 loopbytes = 0; 2409 space = 0; 2410 done = 0; 2411 2412 /* 2413 * Check the socket state for ongoing connection, 2414 * no errors and space in socket buffer. 2415 * If space is low allow for the remainder of the 2416 * file to be processed if it fits the socket buffer. 2417 * Otherwise block in waiting for sufficient space 2418 * to proceed, or if the socket is nonblocking, return 2419 * to userland with EAGAIN while reporting how far 2420 * we've come. 2421 * We wait until the socket buffer has significant free 2422 * space to do bulk sends. This makes good use of file 2423 * system read ahead and allows packet segmentation 2424 * offloading hardware to take over lots of work. If 2425 * we were not careful here we would send off only one 2426 * sfbuf at a time. 2427 */ 2428 SOCKBUF_LOCK(&so->so_snd); 2429 if (so->so_snd.sb_lowat < so->so_snd.sb_hiwat / 2) 2430 so->so_snd.sb_lowat = so->so_snd.sb_hiwat / 2; 2431 retry_space: 2432 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 2433 error = EPIPE; 2434 SOCKBUF_UNLOCK(&so->so_snd); 2435 goto done; 2436 } else if (so->so_error) { 2437 error = so->so_error; 2438 so->so_error = 0; 2439 SOCKBUF_UNLOCK(&so->so_snd); 2440 goto done; 2441 } 2442 space = sbspace(&so->so_snd); 2443 if (space < rem && 2444 (space <= 0 || 2445 space < so->so_snd.sb_lowat)) { 2446 if (so->so_state & SS_NBIO) { 2447 SOCKBUF_UNLOCK(&so->so_snd); 2448 error = EAGAIN; 2449 goto done; 2450 } 2451 /* 2452 * sbwait drops the lock while sleeping. 2453 * When we loop back to retry_space the 2454 * state may have changed and we retest 2455 * for it. 2456 */ 2457 error = sbwait(&so->so_snd); 2458 /* 2459 * An error from sbwait usually indicates that we've 2460 * been interrupted by a signal. If we've sent anything 2461 * then return bytes sent, otherwise return the error. 2462 */ 2463 if (error != 0) { 2464 SOCKBUF_UNLOCK(&so->so_snd); 2465 goto done; 2466 } 2467 goto retry_space; 2468 } 2469 SOCKBUF_UNLOCK(&so->so_snd); 2470 2471 /* 2472 * Reduce space in the socket buffer by the size of 2473 * the header mbuf chain. 2474 * hdrlen is set to 0 after the first loop. 2475 */ 2476 space -= hdrlen; 2477 2478 if (vp != NULL) { 2479 error = vn_lock(vp, LK_SHARED); 2480 if (error != 0) 2481 goto done; 2482 error = VOP_GETATTR(vp, &va, td->td_ucred); 2483 if (error != 0 || off >= va.va_size) { 2484 VOP_UNLOCK(vp, 0); 2485 goto done; 2486 } 2487 obj_size = va.va_size; 2488 } 2489 2490 /* 2491 * Loop and construct maximum sized mbuf chain to be bulk 2492 * dumped into socket buffer. 2493 */ 2494 while (space > loopbytes) { 2495 vm_offset_t pgoff; 2496 struct mbuf *m0; 2497 2498 /* 2499 * Calculate the amount to transfer. 2500 * Not to exceed a page, the EOF, 2501 * or the passed in nbytes. 2502 */ 2503 pgoff = (vm_offset_t)(off & PAGE_MASK); 2504 rem = obj_size - offset; 2505 if (nbytes != 0) 2506 rem = omin(rem, nbytes); 2507 rem -= fsbytes + loopbytes; 2508 xfsize = omin(PAGE_SIZE - pgoff, rem); 2509 xfsize = omin(space - loopbytes, xfsize); 2510 if (xfsize <= 0) { 2511 done = 1; /* all data sent */ 2512 break; 2513 } 2514 2515 /* 2516 * Attempt to look up the page. Allocate 2517 * if not found or wait and loop if busy. 2518 */ 2519 if (m != NULL) 2520 nd = EAGAIN; /* send what we already got */ 2521 else if ((flags & SF_NODISKIO) != 0) 2522 nd = EBUSY; 2523 else 2524 nd = 0; 2525 error = sendfile_readpage(obj, vp, nd, off, 2526 xfsize, bsize, td, &pg); 2527 if (error != 0) { 2528 if (error == EAGAIN) 2529 error = 0; /* not a real error */ 2530 break; 2531 } 2532 2533 /* 2534 * Get a sendfile buf. When allocating the 2535 * first buffer for mbuf chain, we usually 2536 * wait as long as necessary, but this wait 2537 * can be interrupted. For consequent 2538 * buffers, do not sleep, since several 2539 * threads might exhaust the buffers and then 2540 * deadlock. 2541 */ 2542 sf = sf_buf_alloc(pg, (mnw || m != NULL) ? SFB_NOWAIT : 2543 SFB_CATCH); 2544 if (sf == NULL) { 2545 SFSTAT_INC(sf_allocfail); 2546 vm_page_lock(pg); 2547 vm_page_unwire(pg, 0); 2548 KASSERT(pg->object != NULL, 2549 ("%s: object disappeared", __func__)); 2550 vm_page_unlock(pg); 2551 if (m == NULL) 2552 error = (mnw ? EAGAIN : EINTR); 2553 break; 2554 } 2555 2556 /* 2557 * Get an mbuf and set it up as having 2558 * external storage. 2559 */ 2560 m0 = m_get((mnw ? M_NOWAIT : M_WAITOK), MT_DATA); 2561 if (m0 == NULL) { 2562 error = (mnw ? EAGAIN : ENOBUFS); 2563 (void)sf_buf_mext(NULL, NULL, sf); 2564 break; 2565 } 2566 if (m_extadd(m0, (caddr_t )sf_buf_kva(sf), PAGE_SIZE, 2567 sf_buf_mext, sfs, sf, M_RDONLY, EXT_SFBUF, 2568 (mnw ? M_NOWAIT : M_WAITOK)) != 0) { 2569 error = (mnw ? EAGAIN : ENOBUFS); 2570 (void)sf_buf_mext(NULL, NULL, sf); 2571 m_freem(m0); 2572 break; 2573 } 2574 m0->m_data = (char *)sf_buf_kva(sf) + pgoff; 2575 m0->m_len = xfsize; 2576 2577 /* Append to mbuf chain. */ 2578 if (mtail != NULL) 2579 mtail->m_next = m0; 2580 else if (m != NULL) 2581 m_last(m)->m_next = m0; 2582 else 2583 m = m0; 2584 mtail = m0; 2585 2586 /* Keep track of bits processed. */ 2587 loopbytes += xfsize; 2588 off += xfsize; 2589 2590 /* 2591 * XXX eventually this should be a sfsync 2592 * method call! 2593 */ 2594 if (sfs != NULL) 2595 sf_sync_ref(sfs); 2596 } 2597 2598 if (vp != NULL) 2599 VOP_UNLOCK(vp, 0); 2600 2601 /* Add the buffer chain to the socket buffer. */ 2602 if (m != NULL) { 2603 int mlen, err; 2604 2605 mlen = m_length(m, NULL); 2606 SOCKBUF_LOCK(&so->so_snd); 2607 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 2608 error = EPIPE; 2609 SOCKBUF_UNLOCK(&so->so_snd); 2610 goto done; 2611 } 2612 SOCKBUF_UNLOCK(&so->so_snd); 2613 CURVNET_SET(so->so_vnet); 2614 /* Avoid error aliasing. */ 2615 err = (*so->so_proto->pr_usrreqs->pru_send) 2616 (so, 0, m, NULL, NULL, td); 2617 CURVNET_RESTORE(); 2618 if (err == 0) { 2619 /* 2620 * We need two counters to get the 2621 * file offset and nbytes to send 2622 * right: 2623 * - sbytes contains the total amount 2624 * of bytes sent, including headers. 2625 * - fsbytes contains the total amount 2626 * of bytes sent from the file. 2627 */ 2628 sbytes += mlen; 2629 fsbytes += mlen; 2630 if (hdrlen) { 2631 fsbytes -= hdrlen; 2632 hdrlen = 0; 2633 } 2634 } else if (error == 0) 2635 error = err; 2636 m = NULL; /* pru_send always consumes */ 2637 } 2638 2639 /* Quit outer loop on error or when we're done. */ 2640 if (done) 2641 break; 2642 if (error != 0) 2643 goto done; 2644 } 2645 2646 /* 2647 * Send trailers. Wimp out and use writev(2). 2648 */ 2649 if (trl_uio != NULL) { 2650 sbunlock(&so->so_snd); 2651 error = kern_writev(td, sockfd, trl_uio); 2652 if (error == 0) 2653 sbytes += td->td_retval[0]; 2654 goto out; 2655 } 2656 2657 done: 2658 sbunlock(&so->so_snd); 2659 out: 2660 /* 2661 * If there was no error we have to clear td->td_retval[0] 2662 * because it may have been set by writev. 2663 */ 2664 if (error == 0) { 2665 td->td_retval[0] = 0; 2666 } 2667 if (sent != NULL) { 2668 (*sent) = sbytes; 2669 } 2670 if (obj != NULL) 2671 vm_object_deallocate(obj); 2672 if (so) 2673 fdrop(sock_fp, td); 2674 if (m) 2675 m_freem(m); 2676 2677 if (error == ERESTART) 2678 error = EINTR; 2679 2680 return (error); 2681 } 2682 2683 /* 2684 * SCTP syscalls. 2685 * Functionality only compiled in if SCTP is defined in the kernel Makefile, 2686 * otherwise all return EOPNOTSUPP. 2687 * XXX: We should make this loadable one day. 2688 */ 2689 int 2690 sys_sctp_peeloff(td, uap) 2691 struct thread *td; 2692 struct sctp_peeloff_args /* { 2693 int sd; 2694 caddr_t name; 2695 } */ *uap; 2696 { 2697 #if (defined(INET) || defined(INET6)) && defined(SCTP) 2698 struct file *nfp = NULL; 2699 struct socket *head, *so; 2700 cap_rights_t rights; 2701 u_int fflag; 2702 int error, fd; 2703 2704 AUDIT_ARG_FD(uap->sd); 2705 error = fgetsock(td, uap->sd, cap_rights_init(&rights, CAP_PEELOFF), 2706 &head, &fflag); 2707 if (error != 0) 2708 goto done2; 2709 if (head->so_proto->pr_protocol != IPPROTO_SCTP) { 2710 error = EOPNOTSUPP; 2711 goto done; 2712 } 2713 error = sctp_can_peel_off(head, (sctp_assoc_t)uap->name); 2714 if (error != 0) 2715 goto done; 2716 /* 2717 * At this point we know we do have a assoc to pull 2718 * we proceed to get the fd setup. This may block 2719 * but that is ok. 2720 */ 2721 2722 error = falloc(td, &nfp, &fd, 0); 2723 if (error != 0) 2724 goto done; 2725 td->td_retval[0] = fd; 2726 2727 CURVNET_SET(head->so_vnet); 2728 so = sonewconn(head, SS_ISCONNECTED); 2729 if (so == NULL) { 2730 error = ENOMEM; 2731 goto noconnection; 2732 } 2733 /* 2734 * Before changing the flags on the socket, we have to bump the 2735 * reference count. Otherwise, if the protocol calls sofree(), 2736 * the socket will be released due to a zero refcount. 2737 */ 2738 SOCK_LOCK(so); 2739 soref(so); /* file descriptor reference */ 2740 SOCK_UNLOCK(so); 2741 2742 ACCEPT_LOCK(); 2743 2744 TAILQ_REMOVE(&head->so_comp, so, so_list); 2745 head->so_qlen--; 2746 so->so_state |= (head->so_state & SS_NBIO); 2747 so->so_state &= ~SS_NOFDREF; 2748 so->so_qstate &= ~SQ_COMP; 2749 so->so_head = NULL; 2750 ACCEPT_UNLOCK(); 2751 finit(nfp, fflag, DTYPE_SOCKET, so, &socketops); 2752 error = sctp_do_peeloff(head, so, (sctp_assoc_t)uap->name); 2753 if (error != 0) 2754 goto noconnection; 2755 if (head->so_sigio != NULL) 2756 fsetown(fgetown(&head->so_sigio), &so->so_sigio); 2757 2758 noconnection: 2759 /* 2760 * close the new descriptor, assuming someone hasn't ripped it 2761 * out from under us. 2762 */ 2763 if (error != 0) 2764 fdclose(td->td_proc->p_fd, nfp, fd, td); 2765 2766 /* 2767 * Release explicitly held references before returning. 2768 */ 2769 CURVNET_RESTORE(); 2770 done: 2771 if (nfp != NULL) 2772 fdrop(nfp, td); 2773 fputsock(head); 2774 done2: 2775 return (error); 2776 #else /* SCTP */ 2777 return (EOPNOTSUPP); 2778 #endif /* SCTP */ 2779 } 2780 2781 int 2782 sys_sctp_generic_sendmsg (td, uap) 2783 struct thread *td; 2784 struct sctp_generic_sendmsg_args /* { 2785 int sd, 2786 caddr_t msg, 2787 int mlen, 2788 caddr_t to, 2789 __socklen_t tolen, 2790 struct sctp_sndrcvinfo *sinfo, 2791 int flags 2792 } */ *uap; 2793 { 2794 #if (defined(INET) || defined(INET6)) && defined(SCTP) 2795 struct sctp_sndrcvinfo sinfo, *u_sinfo = NULL; 2796 struct socket *so; 2797 struct file *fp = NULL; 2798 struct sockaddr *to = NULL; 2799 #ifdef KTRACE 2800 struct uio *ktruio = NULL; 2801 #endif 2802 struct uio auio; 2803 struct iovec iov[1]; 2804 cap_rights_t rights; 2805 int error = 0, len; 2806 2807 if (uap->sinfo != NULL) { 2808 error = copyin(uap->sinfo, &sinfo, sizeof (sinfo)); 2809 if (error != 0) 2810 return (error); 2811 u_sinfo = &sinfo; 2812 } 2813 2814 cap_rights_init(&rights, CAP_SEND); 2815 if (uap->tolen != 0) { 2816 error = getsockaddr(&to, uap->to, uap->tolen); 2817 if (error != 0) { 2818 to = NULL; 2819 goto sctp_bad2; 2820 } 2821 cap_rights_set(&rights, CAP_CONNECT); 2822 } 2823 2824 AUDIT_ARG_FD(uap->sd); 2825 error = getsock_cap(td->td_proc->p_fd, uap->sd, &rights, &fp, NULL); 2826 if (error != 0) 2827 goto sctp_bad; 2828 #ifdef KTRACE 2829 if (to && (KTRPOINT(td, KTR_STRUCT))) 2830 ktrsockaddr(to); 2831 #endif 2832 2833 iov[0].iov_base = uap->msg; 2834 iov[0].iov_len = uap->mlen; 2835 2836 so = (struct socket *)fp->f_data; 2837 if (so->so_proto->pr_protocol != IPPROTO_SCTP) { 2838 error = EOPNOTSUPP; 2839 goto sctp_bad; 2840 } 2841 #ifdef MAC 2842 error = mac_socket_check_send(td->td_ucred, so); 2843 if (error != 0) 2844 goto sctp_bad; 2845 #endif /* MAC */ 2846 2847 auio.uio_iov = iov; 2848 auio.uio_iovcnt = 1; 2849 auio.uio_segflg = UIO_USERSPACE; 2850 auio.uio_rw = UIO_WRITE; 2851 auio.uio_td = td; 2852 auio.uio_offset = 0; /* XXX */ 2853 auio.uio_resid = 0; 2854 len = auio.uio_resid = uap->mlen; 2855 CURVNET_SET(so->so_vnet); 2856 error = sctp_lower_sosend(so, to, &auio, (struct mbuf *)NULL, 2857 (struct mbuf *)NULL, uap->flags, u_sinfo, td); 2858 CURVNET_RESTORE(); 2859 if (error != 0) { 2860 if (auio.uio_resid != len && (error == ERESTART || 2861 error == EINTR || error == EWOULDBLOCK)) 2862 error = 0; 2863 /* Generation of SIGPIPE can be controlled per socket. */ 2864 if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) && 2865 !(uap->flags & MSG_NOSIGNAL)) { 2866 PROC_LOCK(td->td_proc); 2867 tdsignal(td, SIGPIPE); 2868 PROC_UNLOCK(td->td_proc); 2869 } 2870 } 2871 if (error == 0) 2872 td->td_retval[0] = len - auio.uio_resid; 2873 #ifdef KTRACE 2874 if (ktruio != NULL) { 2875 ktruio->uio_resid = td->td_retval[0]; 2876 ktrgenio(uap->sd, UIO_WRITE, ktruio, error); 2877 } 2878 #endif /* KTRACE */ 2879 sctp_bad: 2880 if (fp != NULL) 2881 fdrop(fp, td); 2882 sctp_bad2: 2883 free(to, M_SONAME); 2884 return (error); 2885 #else /* SCTP */ 2886 return (EOPNOTSUPP); 2887 #endif /* SCTP */ 2888 } 2889 2890 int 2891 sys_sctp_generic_sendmsg_iov(td, uap) 2892 struct thread *td; 2893 struct sctp_generic_sendmsg_iov_args /* { 2894 int sd, 2895 struct iovec *iov, 2896 int iovlen, 2897 caddr_t to, 2898 __socklen_t tolen, 2899 struct sctp_sndrcvinfo *sinfo, 2900 int flags 2901 } */ *uap; 2902 { 2903 #if (defined(INET) || defined(INET6)) && defined(SCTP) 2904 struct sctp_sndrcvinfo sinfo, *u_sinfo = NULL; 2905 struct socket *so; 2906 struct file *fp = NULL; 2907 struct sockaddr *to = NULL; 2908 #ifdef KTRACE 2909 struct uio *ktruio = NULL; 2910 #endif 2911 struct uio auio; 2912 struct iovec *iov, *tiov; 2913 cap_rights_t rights; 2914 ssize_t len; 2915 int error, i; 2916 2917 if (uap->sinfo != NULL) { 2918 error = copyin(uap->sinfo, &sinfo, sizeof (sinfo)); 2919 if (error != 0) 2920 return (error); 2921 u_sinfo = &sinfo; 2922 } 2923 cap_rights_init(&rights, CAP_SEND); 2924 if (uap->tolen != 0) { 2925 error = getsockaddr(&to, uap->to, uap->tolen); 2926 if (error != 0) { 2927 to = NULL; 2928 goto sctp_bad2; 2929 } 2930 cap_rights_set(&rights, CAP_CONNECT); 2931 } 2932 2933 AUDIT_ARG_FD(uap->sd); 2934 error = getsock_cap(td->td_proc->p_fd, uap->sd, &rights, &fp, NULL); 2935 if (error != 0) 2936 goto sctp_bad1; 2937 2938 #ifdef COMPAT_FREEBSD32 2939 if (SV_CURPROC_FLAG(SV_ILP32)) 2940 error = freebsd32_copyiniov((struct iovec32 *)uap->iov, 2941 uap->iovlen, &iov, EMSGSIZE); 2942 else 2943 #endif 2944 error = copyiniov(uap->iov, uap->iovlen, &iov, EMSGSIZE); 2945 if (error != 0) 2946 goto sctp_bad1; 2947 #ifdef KTRACE 2948 if (to && (KTRPOINT(td, KTR_STRUCT))) 2949 ktrsockaddr(to); 2950 #endif 2951 2952 so = (struct socket *)fp->f_data; 2953 if (so->so_proto->pr_protocol != IPPROTO_SCTP) { 2954 error = EOPNOTSUPP; 2955 goto sctp_bad; 2956 } 2957 #ifdef MAC 2958 error = mac_socket_check_send(td->td_ucred, so); 2959 if (error != 0) 2960 goto sctp_bad; 2961 #endif /* MAC */ 2962 2963 auio.uio_iov = iov; 2964 auio.uio_iovcnt = uap->iovlen; 2965 auio.uio_segflg = UIO_USERSPACE; 2966 auio.uio_rw = UIO_WRITE; 2967 auio.uio_td = td; 2968 auio.uio_offset = 0; /* XXX */ 2969 auio.uio_resid = 0; 2970 tiov = iov; 2971 for (i = 0; i <uap->iovlen; i++, tiov++) { 2972 if ((auio.uio_resid += tiov->iov_len) < 0) { 2973 error = EINVAL; 2974 goto sctp_bad; 2975 } 2976 } 2977 len = auio.uio_resid; 2978 CURVNET_SET(so->so_vnet); 2979 error = sctp_lower_sosend(so, to, &auio, 2980 (struct mbuf *)NULL, (struct mbuf *)NULL, 2981 uap->flags, u_sinfo, td); 2982 CURVNET_RESTORE(); 2983 if (error != 0) { 2984 if (auio.uio_resid != len && (error == ERESTART || 2985 error == EINTR || error == EWOULDBLOCK)) 2986 error = 0; 2987 /* Generation of SIGPIPE can be controlled per socket */ 2988 if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) && 2989 !(uap->flags & MSG_NOSIGNAL)) { 2990 PROC_LOCK(td->td_proc); 2991 tdsignal(td, SIGPIPE); 2992 PROC_UNLOCK(td->td_proc); 2993 } 2994 } 2995 if (error == 0) 2996 td->td_retval[0] = len - auio.uio_resid; 2997 #ifdef KTRACE 2998 if (ktruio != NULL) { 2999 ktruio->uio_resid = td->td_retval[0]; 3000 ktrgenio(uap->sd, UIO_WRITE, ktruio, error); 3001 } 3002 #endif /* KTRACE */ 3003 sctp_bad: 3004 free(iov, M_IOV); 3005 sctp_bad1: 3006 if (fp != NULL) 3007 fdrop(fp, td); 3008 sctp_bad2: 3009 free(to, M_SONAME); 3010 return (error); 3011 #else /* SCTP */ 3012 return (EOPNOTSUPP); 3013 #endif /* SCTP */ 3014 } 3015 3016 int 3017 sys_sctp_generic_recvmsg(td, uap) 3018 struct thread *td; 3019 struct sctp_generic_recvmsg_args /* { 3020 int sd, 3021 struct iovec *iov, 3022 int iovlen, 3023 struct sockaddr *from, 3024 __socklen_t *fromlenaddr, 3025 struct sctp_sndrcvinfo *sinfo, 3026 int *msg_flags 3027 } */ *uap; 3028 { 3029 #if (defined(INET) || defined(INET6)) && defined(SCTP) 3030 uint8_t sockbufstore[256]; 3031 struct uio auio; 3032 struct iovec *iov, *tiov; 3033 struct sctp_sndrcvinfo sinfo; 3034 struct socket *so; 3035 struct file *fp = NULL; 3036 struct sockaddr *fromsa; 3037 cap_rights_t rights; 3038 #ifdef KTRACE 3039 struct uio *ktruio = NULL; 3040 #endif 3041 ssize_t len; 3042 int error, fromlen, i, msg_flags; 3043 3044 AUDIT_ARG_FD(uap->sd); 3045 error = getsock_cap(td->td_proc->p_fd, uap->sd, 3046 cap_rights_init(&rights, CAP_RECV), &fp, NULL); 3047 if (error != 0) 3048 return (error); 3049 #ifdef COMPAT_FREEBSD32 3050 if (SV_CURPROC_FLAG(SV_ILP32)) 3051 error = freebsd32_copyiniov((struct iovec32 *)uap->iov, 3052 uap->iovlen, &iov, EMSGSIZE); 3053 else 3054 #endif 3055 error = copyiniov(uap->iov, uap->iovlen, &iov, EMSGSIZE); 3056 if (error != 0) 3057 goto out1; 3058 3059 so = fp->f_data; 3060 if (so->so_proto->pr_protocol != IPPROTO_SCTP) { 3061 error = EOPNOTSUPP; 3062 goto out; 3063 } 3064 #ifdef MAC 3065 error = mac_socket_check_receive(td->td_ucred, so); 3066 if (error != 0) 3067 goto out; 3068 #endif /* MAC */ 3069 3070 if (uap->fromlenaddr != NULL) { 3071 error = copyin(uap->fromlenaddr, &fromlen, sizeof (fromlen)); 3072 if (error != 0) 3073 goto out; 3074 } else { 3075 fromlen = 0; 3076 } 3077 if (uap->msg_flags) { 3078 error = copyin(uap->msg_flags, &msg_flags, sizeof (int)); 3079 if (error != 0) 3080 goto out; 3081 } else { 3082 msg_flags = 0; 3083 } 3084 auio.uio_iov = iov; 3085 auio.uio_iovcnt = uap->iovlen; 3086 auio.uio_segflg = UIO_USERSPACE; 3087 auio.uio_rw = UIO_READ; 3088 auio.uio_td = td; 3089 auio.uio_offset = 0; /* XXX */ 3090 auio.uio_resid = 0; 3091 tiov = iov; 3092 for (i = 0; i <uap->iovlen; i++, tiov++) { 3093 if ((auio.uio_resid += tiov->iov_len) < 0) { 3094 error = EINVAL; 3095 goto out; 3096 } 3097 } 3098 len = auio.uio_resid; 3099 fromsa = (struct sockaddr *)sockbufstore; 3100 3101 #ifdef KTRACE 3102 if (KTRPOINT(td, KTR_GENIO)) 3103 ktruio = cloneuio(&auio); 3104 #endif /* KTRACE */ 3105 memset(&sinfo, 0, sizeof(struct sctp_sndrcvinfo)); 3106 CURVNET_SET(so->so_vnet); 3107 error = sctp_sorecvmsg(so, &auio, (struct mbuf **)NULL, 3108 fromsa, fromlen, &msg_flags, 3109 (struct sctp_sndrcvinfo *)&sinfo, 1); 3110 CURVNET_RESTORE(); 3111 if (error != 0) { 3112 if (auio.uio_resid != len && (error == ERESTART || 3113 error == EINTR || error == EWOULDBLOCK)) 3114 error = 0; 3115 } else { 3116 if (uap->sinfo) 3117 error = copyout(&sinfo, uap->sinfo, sizeof (sinfo)); 3118 } 3119 #ifdef KTRACE 3120 if (ktruio != NULL) { 3121 ktruio->uio_resid = len - auio.uio_resid; 3122 ktrgenio(uap->sd, UIO_READ, ktruio, error); 3123 } 3124 #endif /* KTRACE */ 3125 if (error != 0) 3126 goto out; 3127 td->td_retval[0] = len - auio.uio_resid; 3128 3129 if (fromlen && uap->from) { 3130 len = fromlen; 3131 if (len <= 0 || fromsa == 0) 3132 len = 0; 3133 else { 3134 len = MIN(len, fromsa->sa_len); 3135 error = copyout(fromsa, uap->from, (size_t)len); 3136 if (error != 0) 3137 goto out; 3138 } 3139 error = copyout(&len, uap->fromlenaddr, sizeof (socklen_t)); 3140 if (error != 0) 3141 goto out; 3142 } 3143 #ifdef KTRACE 3144 if (KTRPOINT(td, KTR_STRUCT)) 3145 ktrsockaddr(fromsa); 3146 #endif 3147 if (uap->msg_flags) { 3148 error = copyout(&msg_flags, uap->msg_flags, sizeof (int)); 3149 if (error != 0) 3150 goto out; 3151 } 3152 out: 3153 free(iov, M_IOV); 3154 out1: 3155 if (fp != NULL) 3156 fdrop(fp, td); 3157 3158 return (error); 3159 #else /* SCTP */ 3160 return (EOPNOTSUPP); 3161 #endif /* SCTP */ 3162 } 3163