1 /*- 2 * Copyright (c) 2013-2015 Gleb Smirnoff <glebius@FreeBSD.org> 3 * Copyright (c) 1998, David Greenman. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_kern_tls.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/capsicum.h> 38 #include <sys/kernel.h> 39 #include <sys/lock.h> 40 #include <sys/ktls.h> 41 #include <sys/mutex.h> 42 #include <sys/malloc.h> 43 #include <sys/mman.h> 44 #include <sys/mount.h> 45 #include <sys/mbuf.h> 46 #include <sys/proc.h> 47 #include <sys/protosw.h> 48 #include <sys/rwlock.h> 49 #include <sys/sf_buf.h> 50 #include <sys/socket.h> 51 #include <sys/socketvar.h> 52 #include <sys/syscallsubr.h> 53 #include <sys/sysctl.h> 54 #include <sys/sysproto.h> 55 #include <sys/vnode.h> 56 57 #include <net/vnet.h> 58 #include <netinet/in.h> 59 #include <netinet/tcp.h> 60 61 #include <security/audit/audit.h> 62 #include <security/mac/mac_framework.h> 63 64 #include <vm/vm.h> 65 #include <vm/vm_object.h> 66 #include <vm/vm_pager.h> 67 68 static MALLOC_DEFINE(M_SENDFILE, "sendfile", "sendfile dynamic memory"); 69 70 #define EXT_FLAG_SYNC EXT_FLAG_VENDOR1 71 #define EXT_FLAG_NOCACHE EXT_FLAG_VENDOR2 72 #define EXT_FLAG_CACHE_LAST EXT_FLAG_VENDOR3 73 74 /* 75 * Structure describing a single sendfile(2) I/O, which may consist of 76 * several underlying pager I/Os. 77 * 78 * The syscall context allocates the structure and initializes 'nios' 79 * to 1. As sendfile_swapin() runs through pages and starts asynchronous 80 * paging operations, it increments 'nios'. 81 * 82 * Every I/O completion calls sendfile_iodone(), which decrements the 'nios', 83 * and the syscall also calls sendfile_iodone() after allocating all mbufs, 84 * linking them and sending to socket. Whoever reaches zero 'nios' is 85 * responsible to * call pru_ready on the socket, to notify it of readyness 86 * of the data. 87 */ 88 struct sf_io { 89 volatile u_int nios; 90 u_int error; 91 int npages; 92 struct socket *so; 93 struct mbuf *m; 94 vm_object_t obj; 95 vm_pindex_t pindex0; 96 #ifdef KERN_TLS 97 struct ktls_session *tls; 98 #endif 99 vm_page_t pa[]; 100 }; 101 102 /* 103 * Structure used to track requests with SF_SYNC flag. 104 */ 105 struct sendfile_sync { 106 struct mtx mtx; 107 struct cv cv; 108 unsigned count; 109 }; 110 111 counter_u64_t sfstat[sizeof(struct sfstat) / sizeof(uint64_t)]; 112 113 static void 114 sfstat_init(const void *unused) 115 { 116 117 COUNTER_ARRAY_ALLOC(sfstat, sizeof(struct sfstat) / sizeof(uint64_t), 118 M_WAITOK); 119 } 120 SYSINIT(sfstat, SI_SUB_MBUF, SI_ORDER_FIRST, sfstat_init, NULL); 121 122 static int 123 sfstat_sysctl(SYSCTL_HANDLER_ARGS) 124 { 125 struct sfstat s; 126 127 COUNTER_ARRAY_COPY(sfstat, &s, sizeof(s) / sizeof(uint64_t)); 128 if (req->newptr) 129 COUNTER_ARRAY_ZERO(sfstat, sizeof(s) / sizeof(uint64_t)); 130 return (SYSCTL_OUT(req, &s, sizeof(s))); 131 } 132 SYSCTL_PROC(_kern_ipc, OID_AUTO, sfstat, 133 CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0, 134 sfstat_sysctl, "I", 135 "sendfile statistics"); 136 137 static void 138 sendfile_free_mext(struct mbuf *m) 139 { 140 struct sf_buf *sf; 141 vm_page_t pg; 142 int flags; 143 144 KASSERT(m->m_flags & M_EXT && m->m_ext.ext_type == EXT_SFBUF, 145 ("%s: m %p !M_EXT or !EXT_SFBUF", __func__, m)); 146 147 sf = m->m_ext.ext_arg1; 148 pg = sf_buf_page(sf); 149 flags = (m->m_ext.ext_flags & EXT_FLAG_NOCACHE) != 0 ? VPR_TRYFREE : 0; 150 151 sf_buf_free(sf); 152 vm_page_release(pg, flags); 153 154 if (m->m_ext.ext_flags & EXT_FLAG_SYNC) { 155 struct sendfile_sync *sfs = m->m_ext.ext_arg2; 156 157 mtx_lock(&sfs->mtx); 158 KASSERT(sfs->count > 0, ("Sendfile sync botchup count == 0")); 159 if (--sfs->count == 0) 160 cv_signal(&sfs->cv); 161 mtx_unlock(&sfs->mtx); 162 } 163 } 164 165 static void 166 sendfile_free_mext_pg(struct mbuf *m) 167 { 168 struct mbuf_ext_pgs *ext_pgs; 169 vm_page_t pg; 170 int flags, i; 171 bool cache_last; 172 173 KASSERT(m->m_flags & M_EXT && m->m_ext.ext_type == EXT_PGS, 174 ("%s: m %p !M_EXT or !EXT_PGS", __func__, m)); 175 176 cache_last = m->m_ext.ext_flags & EXT_FLAG_CACHE_LAST; 177 ext_pgs = m->m_ext.ext_pgs; 178 flags = (m->m_ext.ext_flags & EXT_FLAG_NOCACHE) != 0 ? VPR_TRYFREE : 0; 179 180 for (i = 0; i < ext_pgs->npgs; i++) { 181 if (cache_last && i == ext_pgs->npgs - 1) 182 flags = 0; 183 pg = PHYS_TO_VM_PAGE(ext_pgs->pa[i]); 184 vm_page_release(pg, flags); 185 } 186 187 if (m->m_ext.ext_flags & EXT_FLAG_SYNC) { 188 struct sendfile_sync *sfs = m->m_ext.ext_arg2; 189 190 mtx_lock(&sfs->mtx); 191 KASSERT(sfs->count > 0, ("Sendfile sync botchup count == 0")); 192 if (--sfs->count == 0) 193 cv_signal(&sfs->cv); 194 mtx_unlock(&sfs->mtx); 195 } 196 } 197 198 /* 199 * Helper function to calculate how much data to put into page i of n. 200 * Only first and last pages are special. 201 */ 202 static inline off_t 203 xfsize(int i, int n, off_t off, off_t len) 204 { 205 206 if (i == 0) 207 return (omin(PAGE_SIZE - (off & PAGE_MASK), len)); 208 209 if (i == n - 1 && ((off + len) & PAGE_MASK) > 0) 210 return ((off + len) & PAGE_MASK); 211 212 return (PAGE_SIZE); 213 } 214 215 /* 216 * Helper function to get offset within object for i page. 217 */ 218 static inline vm_ooffset_t 219 vmoff(int i, off_t off) 220 { 221 222 if (i == 0) 223 return ((vm_ooffset_t)off); 224 225 return (trunc_page(off + i * PAGE_SIZE)); 226 } 227 228 /* 229 * Helper function used when allocation of a page or sf_buf failed. 230 * Pretend as if we don't have enough space, subtract xfsize() of 231 * all pages that failed. 232 */ 233 static inline void 234 fixspace(int old, int new, off_t off, int *space) 235 { 236 237 KASSERT(old > new, ("%s: old %d new %d", __func__, old, new)); 238 239 /* Subtract last one. */ 240 *space -= xfsize(old - 1, old, off, *space); 241 old--; 242 243 if (new == old) 244 /* There was only one page. */ 245 return; 246 247 /* Subtract first one. */ 248 if (new == 0) { 249 *space -= xfsize(0, old, off, *space); 250 new++; 251 } 252 253 /* Rest of pages are full sized. */ 254 *space -= (old - new) * PAGE_SIZE; 255 256 KASSERT(*space >= 0, ("%s: space went backwards", __func__)); 257 } 258 259 /* 260 * Wait for all in-flight ios to complete, we must not unwire pages 261 * under them. 262 */ 263 static void 264 sendfile_iowait(struct sf_io *sfio, const char *wmesg) 265 { 266 while (atomic_load_int(&sfio->nios) != 1) 267 pause(wmesg, 1); 268 } 269 270 /* 271 * I/O completion callback. 272 */ 273 static void 274 sendfile_iodone(void *arg, vm_page_t *pa, int count, int error) 275 { 276 struct sf_io *sfio = arg; 277 struct socket *so; 278 int i; 279 280 if (error != 0) { 281 sfio->error = error; 282 /* 283 * Restore of the pg[] elements is done by 284 * sendfile_swapin(). 285 */ 286 } else { 287 /* 288 * Restore the valid page pointers. They are already 289 * unbusied, but still wired. For error != 0 case, 290 * sendfile_swapin() handles unbusy. 291 * 292 * XXXKIB since pages are only wired, and we do not 293 * own the object lock, other users might have 294 * invalidated them in meantime. Similarly, after we 295 * unbusied the swapped-in pages, they can become 296 * invalid under us. 297 */ 298 for (i = 0; i < count; i++) { 299 if (pa[i] == bogus_page) { 300 pa[i] = vm_page_relookup(sfio->obj, 301 sfio->pindex0 + i + (sfio->pa - pa)); 302 KASSERT(pa[i] != NULL, 303 ("%s: page %p[%d] disappeared", 304 __func__, pa, i)); 305 } else { 306 vm_page_xunbusy_unchecked(pa[i]); 307 } 308 } 309 } 310 311 if (!refcount_release(&sfio->nios)) 312 return; 313 314 vm_object_pip_wakeup(sfio->obj); 315 316 if (sfio->m == NULL) { 317 /* 318 * Either I/O operation failed, or we failed to allocate 319 * buffers, or we bailed out on first busy page, or we 320 * succeeded filling the request without any I/Os. Anyway, 321 * pru_send hadn't been executed - nothing had been sent 322 * to the socket yet. 323 */ 324 MPASS((curthread->td_pflags & TDP_KTHREAD) == 0); 325 free(sfio, M_SENDFILE); 326 return; 327 } 328 329 #if defined(KERN_TLS) && defined(INVARIANTS) 330 if ((sfio->m->m_flags & M_EXT) != 0 && 331 sfio->m->m_ext.ext_type == EXT_PGS) 332 KASSERT(sfio->tls == sfio->m->m_ext.ext_pgs->tls, 333 ("TLS session mismatch")); 334 else 335 KASSERT(sfio->tls == NULL, 336 ("non-ext_pgs mbuf with TLS session")); 337 #endif 338 so = sfio->so; 339 CURVNET_SET(so->so_vnet); 340 if (__predict_false(sfio->error)) { 341 /* 342 * I/O operation failed. The state of data in the socket 343 * is now inconsistent, and all what we can do is to tear 344 * it down. Protocol abort method would tear down protocol 345 * state, free all ready mbufs and detach not ready ones. 346 * We will free the mbufs corresponding to this I/O manually. 347 * 348 * The socket would be marked with EIO and made available 349 * for read, so that application receives EIO on next 350 * syscall and eventually closes the socket. 351 */ 352 so->so_proto->pr_usrreqs->pru_abort(so); 353 so->so_error = EIO; 354 355 mb_free_notready(sfio->m, sfio->npages); 356 #ifdef KERN_TLS 357 } else if (sfio->tls != NULL && sfio->tls->mode == TCP_TLS_MODE_SW) { 358 /* 359 * I/O operation is complete, but we still need to 360 * encrypt. We cannot do this in the interrupt thread 361 * of the disk controller, so forward the mbufs to a 362 * different thread. 363 * 364 * Donate the socket reference from sfio to rather 365 * than explicitly invoking soref(). 366 */ 367 ktls_enqueue(sfio->m, so, sfio->npages); 368 goto out_with_ref; 369 #endif 370 } else 371 (void)(so->so_proto->pr_usrreqs->pru_ready)(so, sfio->m, 372 sfio->npages); 373 374 SOCK_LOCK(so); 375 sorele(so); 376 #ifdef KERN_TLS 377 out_with_ref: 378 #endif 379 CURVNET_RESTORE(); 380 free(sfio, M_SENDFILE); 381 } 382 383 /* 384 * Iterate through pages vector and request paging for non-valid pages. 385 */ 386 static int 387 sendfile_swapin(vm_object_t obj, struct sf_io *sfio, int *nios, off_t off, 388 off_t len, int npages, int rhpages, int flags) 389 { 390 vm_page_t *pa; 391 int a, count, count1, grabbed, i, j, rv; 392 393 pa = sfio->pa; 394 *nios = 0; 395 flags = (flags & SF_NODISKIO) ? VM_ALLOC_NOWAIT : 0; 396 sfio->pindex0 = OFF_TO_IDX(off); 397 398 /* 399 * First grab all the pages and wire them. Note that we grab 400 * only required pages. Readahead pages are dealt with later. 401 */ 402 grabbed = vm_page_grab_pages_unlocked(obj, OFF_TO_IDX(off), 403 VM_ALLOC_NORMAL | VM_ALLOC_WIRED | flags, pa, npages); 404 if (grabbed < npages) { 405 for (int i = grabbed; i < npages; i++) 406 pa[i] = NULL; 407 npages = grabbed; 408 rhpages = 0; 409 } 410 411 for (i = 0; i < npages;) { 412 /* Skip valid pages. */ 413 if (vm_page_is_valid(pa[i], vmoff(i, off) & PAGE_MASK, 414 xfsize(i, npages, off, len))) { 415 vm_page_xunbusy(pa[i]); 416 SFSTAT_INC(sf_pages_valid); 417 i++; 418 continue; 419 } 420 421 /* 422 * Next page is invalid. Check if it belongs to pager. It 423 * may not be there, which is a regular situation for shmem 424 * pager. For vnode pager this happens only in case of 425 * a sparse file. 426 * 427 * Important feature of vm_pager_has_page() is the hint 428 * stored in 'a', about how many pages we can pagein after 429 * this page in a single I/O. 430 */ 431 VM_OBJECT_RLOCK(obj); 432 if (!vm_pager_has_page(obj, OFF_TO_IDX(vmoff(i, off)), NULL, 433 &a)) { 434 VM_OBJECT_RUNLOCK(obj); 435 pmap_zero_page(pa[i]); 436 vm_page_valid(pa[i]); 437 MPASS(pa[i]->dirty == 0); 438 vm_page_xunbusy(pa[i]); 439 i++; 440 continue; 441 } 442 VM_OBJECT_RUNLOCK(obj); 443 444 /* 445 * We want to pagein as many pages as possible, limited only 446 * by the 'a' hint and actual request. 447 */ 448 count = min(a + 1, npages - i); 449 450 /* 451 * We should not pagein into a valid page because 452 * there might be still unfinished write tracked by 453 * e.g. a buffer, thus we substitute any valid pages 454 * with the bogus one. 455 * 456 * We must not leave around xbusy pages which are not 457 * part of the run passed to vm_pager_getpages(), 458 * otherwise pager might deadlock waiting for the busy 459 * status of the page, e.g. if it constitues the 460 * buffer needed to validate other page. 461 * 462 * First trim the end of the run consisting of the 463 * valid pages, then replace the rest of the valid 464 * with bogus. 465 */ 466 count1 = count; 467 for (j = i + count - 1; j > i; j--) { 468 if (vm_page_is_valid(pa[j], vmoff(j, off) & PAGE_MASK, 469 xfsize(j, npages, off, len))) { 470 vm_page_xunbusy(pa[j]); 471 SFSTAT_INC(sf_pages_valid); 472 count--; 473 } else { 474 break; 475 } 476 } 477 478 /* 479 * The last page in the run pa[i + count - 1] is 480 * guaranteed to be invalid by the trim above, so it 481 * is not replaced with bogus, thus -1 in the loop end 482 * condition. 483 */ 484 MPASS(pa[i + count - 1]->valid != VM_PAGE_BITS_ALL); 485 for (j = i + 1; j < i + count - 1; j++) { 486 if (vm_page_is_valid(pa[j], vmoff(j, off) & PAGE_MASK, 487 xfsize(j, npages, off, len))) { 488 vm_page_xunbusy(pa[j]); 489 SFSTAT_INC(sf_pages_valid); 490 SFSTAT_INC(sf_pages_bogus); 491 pa[j] = bogus_page; 492 } 493 } 494 495 refcount_acquire(&sfio->nios); 496 rv = vm_pager_get_pages_async(obj, pa + i, count, NULL, 497 i + count == npages ? &rhpages : NULL, 498 &sendfile_iodone, sfio); 499 if (__predict_false(rv != VM_PAGER_OK)) { 500 sendfile_iowait(sfio, "sferrio"); 501 502 /* 503 * Perform full pages recovery before returning EIO. 504 * Pages from 0 to npages are wired. 505 * Pages from (i + 1) to (i + count - 1) may be 506 * substituted to bogus page, and not busied. 507 * Pages from (i + count) to (i + count1 - 1) are 508 * not busied. 509 * Rest of the pages from i to npages are busied. 510 */ 511 for (j = 0; j < npages; j++) { 512 if (j >= i + count && j < i + count1) 513 ; 514 else if (j > i && j < i + count - 1 && 515 pa[j] == bogus_page) 516 pa[j] = vm_page_relookup(obj, 517 OFF_TO_IDX(vmoff(j, off))); 518 else if (j >= i) 519 vm_page_xunbusy(pa[j]); 520 KASSERT(pa[j] != NULL && pa[j] != bogus_page, 521 ("%s: page %p[%d] I/O recovery failure", 522 __func__, pa, j)); 523 vm_page_unwire(pa[j], PQ_INACTIVE); 524 } 525 return (EIO); 526 } 527 528 SFSTAT_INC(sf_iocnt); 529 SFSTAT_ADD(sf_pages_read, count); 530 if (i + count == npages) 531 SFSTAT_ADD(sf_rhpages_read, rhpages); 532 533 i += count1; 534 (*nios)++; 535 } 536 537 if (*nios == 0 && npages != 0) 538 SFSTAT_INC(sf_noiocnt); 539 540 return (0); 541 } 542 543 static int 544 sendfile_getobj(struct thread *td, struct file *fp, vm_object_t *obj_res, 545 struct vnode **vp_res, struct shmfd **shmfd_res, off_t *obj_size, 546 int *bsize) 547 { 548 struct vattr va; 549 vm_object_t obj; 550 struct vnode *vp; 551 struct shmfd *shmfd; 552 int error; 553 554 vp = *vp_res = NULL; 555 obj = NULL; 556 shmfd = *shmfd_res = NULL; 557 *bsize = 0; 558 559 /* 560 * The file descriptor must be a regular file and have a 561 * backing VM object. 562 */ 563 if (fp->f_type == DTYPE_VNODE) { 564 vp = fp->f_vnode; 565 vn_lock(vp, LK_SHARED | LK_RETRY); 566 if (vp->v_type != VREG) { 567 error = EINVAL; 568 goto out; 569 } 570 *bsize = vp->v_mount->mnt_stat.f_iosize; 571 error = VOP_GETATTR(vp, &va, td->td_ucred); 572 if (error != 0) 573 goto out; 574 *obj_size = va.va_size; 575 obj = vp->v_object; 576 if (obj == NULL) { 577 error = EINVAL; 578 goto out; 579 } 580 } else if (fp->f_type == DTYPE_SHM) { 581 error = 0; 582 shmfd = fp->f_data; 583 obj = shmfd->shm_object; 584 *obj_size = shmfd->shm_size; 585 } else { 586 error = EINVAL; 587 goto out; 588 } 589 590 VM_OBJECT_WLOCK(obj); 591 if ((obj->flags & OBJ_DEAD) != 0) { 592 VM_OBJECT_WUNLOCK(obj); 593 error = EBADF; 594 goto out; 595 } 596 597 /* 598 * Temporarily increase the backing VM object's reference 599 * count so that a forced reclamation of its vnode does not 600 * immediately destroy it. 601 */ 602 vm_object_reference_locked(obj); 603 VM_OBJECT_WUNLOCK(obj); 604 *obj_res = obj; 605 *vp_res = vp; 606 *shmfd_res = shmfd; 607 608 out: 609 if (vp != NULL) 610 VOP_UNLOCK(vp); 611 return (error); 612 } 613 614 static int 615 sendfile_getsock(struct thread *td, int s, struct file **sock_fp, 616 struct socket **so) 617 { 618 int error; 619 620 *sock_fp = NULL; 621 *so = NULL; 622 623 /* 624 * The socket must be a stream socket and connected. 625 */ 626 error = getsock_cap(td, s, &cap_send_rights, 627 sock_fp, NULL, NULL); 628 if (error != 0) 629 return (error); 630 *so = (*sock_fp)->f_data; 631 if ((*so)->so_type != SOCK_STREAM) 632 return (EINVAL); 633 /* 634 * SCTP one-to-one style sockets currently don't work with 635 * sendfile(). So indicate EINVAL for now. 636 */ 637 if ((*so)->so_proto->pr_protocol == IPPROTO_SCTP) 638 return (EINVAL); 639 if (SOLISTENING(*so)) 640 return (ENOTCONN); 641 return (0); 642 } 643 644 int 645 vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio, 646 struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags, 647 struct thread *td) 648 { 649 struct file *sock_fp; 650 struct vnode *vp; 651 struct vm_object *obj; 652 struct socket *so; 653 #ifdef KERN_TLS 654 struct ktls_session *tls; 655 #endif 656 struct mbuf_ext_pgs *ext_pgs; 657 struct mbuf *m, *mh, *mhtail; 658 struct sf_buf *sf; 659 struct shmfd *shmfd; 660 struct sendfile_sync *sfs; 661 struct vattr va; 662 off_t off, sbytes, rem, obj_size; 663 int bsize, error, ext_pgs_idx, hdrlen, max_pgs, softerr; 664 #ifdef KERN_TLS 665 int tls_enq_cnt; 666 #endif 667 bool use_ext_pgs; 668 669 obj = NULL; 670 so = NULL; 671 m = mh = NULL; 672 sfs = NULL; 673 #ifdef KERN_TLS 674 tls = NULL; 675 #endif 676 hdrlen = sbytes = 0; 677 softerr = 0; 678 use_ext_pgs = false; 679 680 error = sendfile_getobj(td, fp, &obj, &vp, &shmfd, &obj_size, &bsize); 681 if (error != 0) 682 return (error); 683 684 error = sendfile_getsock(td, sockfd, &sock_fp, &so); 685 if (error != 0) 686 goto out; 687 688 #ifdef MAC 689 error = mac_socket_check_send(td->td_ucred, so); 690 if (error != 0) 691 goto out; 692 #endif 693 694 SFSTAT_INC(sf_syscalls); 695 SFSTAT_ADD(sf_rhpages_requested, SF_READAHEAD(flags)); 696 697 if (flags & SF_SYNC) { 698 sfs = malloc(sizeof(*sfs), M_SENDFILE, M_WAITOK | M_ZERO); 699 mtx_init(&sfs->mtx, "sendfile", NULL, MTX_DEF); 700 cv_init(&sfs->cv, "sendfile"); 701 } 702 703 rem = nbytes ? omin(nbytes, obj_size - offset) : obj_size - offset; 704 705 /* 706 * Protect against multiple writers to the socket. 707 * 708 * XXXRW: Historically this has assumed non-interruptibility, so now 709 * we implement that, but possibly shouldn't. 710 */ 711 (void)sblock(&so->so_snd, SBL_WAIT | SBL_NOINTR); 712 #ifdef KERN_TLS 713 tls = ktls_hold(so->so_snd.sb_tls_info); 714 #endif 715 716 /* 717 * Loop through the pages of the file, starting with the requested 718 * offset. Get a file page (do I/O if necessary), map the file page 719 * into an sf_buf, attach an mbuf header to the sf_buf, and queue 720 * it on the socket. 721 * This is done in two loops. The inner loop turns as many pages 722 * as it can, up to available socket buffer space, without blocking 723 * into mbufs to have it bulk delivered into the socket send buffer. 724 * The outer loop checks the state and available space of the socket 725 * and takes care of the overall progress. 726 */ 727 for (off = offset; rem > 0; ) { 728 struct sf_io *sfio; 729 vm_page_t *pa; 730 struct mbuf *m0, *mtail; 731 int nios, space, npages, rhpages; 732 733 mtail = NULL; 734 /* 735 * Check the socket state for ongoing connection, 736 * no errors and space in socket buffer. 737 * If space is low allow for the remainder of the 738 * file to be processed if it fits the socket buffer. 739 * Otherwise block in waiting for sufficient space 740 * to proceed, or if the socket is nonblocking, return 741 * to userland with EAGAIN while reporting how far 742 * we've come. 743 * We wait until the socket buffer has significant free 744 * space to do bulk sends. This makes good use of file 745 * system read ahead and allows packet segmentation 746 * offloading hardware to take over lots of work. If 747 * we were not careful here we would send off only one 748 * sfbuf at a time. 749 */ 750 SOCKBUF_LOCK(&so->so_snd); 751 if (so->so_snd.sb_lowat < so->so_snd.sb_hiwat / 2) 752 so->so_snd.sb_lowat = so->so_snd.sb_hiwat / 2; 753 retry_space: 754 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 755 error = EPIPE; 756 SOCKBUF_UNLOCK(&so->so_snd); 757 goto done; 758 } else if (so->so_error) { 759 error = so->so_error; 760 so->so_error = 0; 761 SOCKBUF_UNLOCK(&so->so_snd); 762 goto done; 763 } 764 if ((so->so_state & SS_ISCONNECTED) == 0) { 765 SOCKBUF_UNLOCK(&so->so_snd); 766 error = ENOTCONN; 767 goto done; 768 } 769 770 space = sbspace(&so->so_snd); 771 if (space < rem && 772 (space <= 0 || 773 space < so->so_snd.sb_lowat)) { 774 if (so->so_state & SS_NBIO) { 775 SOCKBUF_UNLOCK(&so->so_snd); 776 error = EAGAIN; 777 goto done; 778 } 779 /* 780 * sbwait drops the lock while sleeping. 781 * When we loop back to retry_space the 782 * state may have changed and we retest 783 * for it. 784 */ 785 error = sbwait(&so->so_snd); 786 /* 787 * An error from sbwait usually indicates that we've 788 * been interrupted by a signal. If we've sent anything 789 * then return bytes sent, otherwise return the error. 790 */ 791 if (error != 0) { 792 SOCKBUF_UNLOCK(&so->so_snd); 793 goto done; 794 } 795 goto retry_space; 796 } 797 SOCKBUF_UNLOCK(&so->so_snd); 798 799 /* 800 * At the beginning of the first loop check if any headers 801 * are specified and copy them into mbufs. Reduce space in 802 * the socket buffer by the size of the header mbuf chain. 803 * Clear hdr_uio here and hdrlen at the end of the first loop. 804 */ 805 if (hdr_uio != NULL && hdr_uio->uio_resid > 0) { 806 hdr_uio->uio_td = td; 807 hdr_uio->uio_rw = UIO_WRITE; 808 #ifdef KERN_TLS 809 if (tls != NULL) 810 mh = m_uiotombuf(hdr_uio, M_WAITOK, space, 811 tls->params.max_frame_len, M_NOMAP); 812 else 813 #endif 814 mh = m_uiotombuf(hdr_uio, M_WAITOK, 815 space, 0, 0); 816 hdrlen = m_length(mh, &mhtail); 817 space -= hdrlen; 818 /* 819 * If header consumed all the socket buffer space, 820 * don't waste CPU cycles and jump to the end. 821 */ 822 if (space == 0) { 823 sfio = NULL; 824 nios = 0; 825 goto prepend_header; 826 } 827 hdr_uio = NULL; 828 } 829 830 if (vp != NULL) { 831 error = vn_lock(vp, LK_SHARED); 832 if (error != 0) 833 goto done; 834 error = VOP_GETATTR(vp, &va, td->td_ucred); 835 if (error != 0 || off >= va.va_size) { 836 VOP_UNLOCK(vp); 837 goto done; 838 } 839 if (va.va_size != obj_size) { 840 obj_size = va.va_size; 841 rem = nbytes ? 842 omin(nbytes + offset, obj_size) : obj_size; 843 rem -= off; 844 } 845 } 846 847 if (space > rem) 848 space = rem; 849 else if (space > PAGE_SIZE) { 850 /* 851 * Use page boundaries when possible for large 852 * requests. 853 */ 854 if (off & PAGE_MASK) 855 space -= (PAGE_SIZE - (off & PAGE_MASK)); 856 space = trunc_page(space); 857 if (off & PAGE_MASK) 858 space += (PAGE_SIZE - (off & PAGE_MASK)); 859 } 860 861 npages = howmany(space + (off & PAGE_MASK), PAGE_SIZE); 862 863 /* 864 * Calculate maximum allowed number of pages for readahead 865 * at this iteration. If SF_USER_READAHEAD was set, we don't 866 * do any heuristics and use exactly the value supplied by 867 * application. Otherwise, we allow readahead up to "rem". 868 * If application wants more, let it be, but there is no 869 * reason to go above MAXPHYS. Also check against "obj_size", 870 * since vm_pager_has_page() can hint beyond EOF. 871 */ 872 if (flags & SF_USER_READAHEAD) { 873 rhpages = SF_READAHEAD(flags); 874 } else { 875 rhpages = howmany(rem + (off & PAGE_MASK), PAGE_SIZE) - 876 npages; 877 rhpages += SF_READAHEAD(flags); 878 } 879 rhpages = min(howmany(MAXPHYS, PAGE_SIZE), rhpages); 880 rhpages = min(howmany(obj_size - trunc_page(off), PAGE_SIZE) - 881 npages, rhpages); 882 883 sfio = malloc(sizeof(struct sf_io) + 884 npages * sizeof(vm_page_t), M_SENDFILE, M_WAITOK); 885 refcount_init(&sfio->nios, 1); 886 sfio->obj = obj; 887 sfio->error = 0; 888 sfio->m = NULL; 889 #ifdef KERN_TLS 890 /* 891 * This doesn't use ktls_hold() because sfio->m will 892 * also have a reference on 'tls' that will be valid 893 * for all of sfio's lifetime. 894 */ 895 sfio->tls = tls; 896 #endif 897 vm_object_pip_add(obj, 1); 898 error = sendfile_swapin(obj, sfio, &nios, off, space, npages, 899 rhpages, flags); 900 if (error != 0) { 901 if (vp != NULL) 902 VOP_UNLOCK(vp); 903 sendfile_iodone(sfio, NULL, 0, error); 904 goto done; 905 } 906 907 /* 908 * Loop and construct maximum sized mbuf chain to be bulk 909 * dumped into socket buffer. 910 */ 911 pa = sfio->pa; 912 913 /* 914 * Use unmapped mbufs if enabled for TCP. Unmapped 915 * bufs are restricted to TCP as that is what has been 916 * tested. In particular, unmapped mbufs have not 917 * been tested with UNIX-domain sockets. 918 * 919 * TLS frames always require unmapped mbufs. 920 */ 921 if ((mb_use_ext_pgs && 922 so->so_proto->pr_protocol == IPPROTO_TCP) 923 #ifdef KERN_TLS 924 || tls != NULL 925 #endif 926 ) { 927 use_ext_pgs = true; 928 #ifdef KERN_TLS 929 if (tls != NULL) 930 max_pgs = num_pages(tls->params.max_frame_len); 931 else 932 #endif 933 max_pgs = MBUF_PEXT_MAX_PGS; 934 935 /* Start at last index, to wrap on first use. */ 936 ext_pgs_idx = max_pgs - 1; 937 } 938 939 for (int i = 0; i < npages; i++) { 940 /* 941 * If a page wasn't grabbed successfully, then 942 * trim the array. Can happen only with SF_NODISKIO. 943 */ 944 if (pa[i] == NULL) { 945 SFSTAT_INC(sf_busy); 946 fixspace(npages, i, off, &space); 947 npages = i; 948 softerr = EBUSY; 949 break; 950 } 951 952 if (use_ext_pgs) { 953 off_t xfs; 954 955 ext_pgs_idx++; 956 if (ext_pgs_idx == max_pgs) { 957 m0 = mb_alloc_ext_pgs(M_WAITOK, false, 958 sendfile_free_mext_pg); 959 960 if (flags & SF_NOCACHE) { 961 m0->m_ext.ext_flags |= 962 EXT_FLAG_NOCACHE; 963 964 /* 965 * See comment below regarding 966 * ignoring SF_NOCACHE for the 967 * last page. 968 */ 969 if ((npages - i <= max_pgs) && 970 ((off + space) & PAGE_MASK) && 971 (rem > space || rhpages > 0)) 972 m0->m_ext.ext_flags |= 973 EXT_FLAG_CACHE_LAST; 974 } 975 if (sfs != NULL) { 976 m0->m_ext.ext_flags |= 977 EXT_FLAG_SYNC; 978 m0->m_ext.ext_arg2 = sfs; 979 mtx_lock(&sfs->mtx); 980 sfs->count++; 981 mtx_unlock(&sfs->mtx); 982 } 983 ext_pgs = m0->m_ext.ext_pgs; 984 ext_pgs_idx = 0; 985 986 /* Append to mbuf chain. */ 987 if (mtail != NULL) 988 mtail->m_next = m0; 989 else 990 m = m0; 991 mtail = m0; 992 ext_pgs->first_pg_off = 993 vmoff(i, off) & PAGE_MASK; 994 } 995 if (nios) { 996 mtail->m_flags |= M_NOTREADY; 997 ext_pgs->nrdy++; 998 } 999 1000 ext_pgs->pa[ext_pgs_idx] = VM_PAGE_TO_PHYS(pa[i]); 1001 ext_pgs->npgs++; 1002 xfs = xfsize(i, npages, off, space); 1003 ext_pgs->last_pg_len = xfs; 1004 MBUF_EXT_PGS_ASSERT_SANITY(ext_pgs); 1005 mtail->m_len += xfs; 1006 mtail->m_ext.ext_size += PAGE_SIZE; 1007 continue; 1008 } 1009 1010 /* 1011 * Get a sendfile buf. When allocating the 1012 * first buffer for mbuf chain, we usually 1013 * wait as long as necessary, but this wait 1014 * can be interrupted. For consequent 1015 * buffers, do not sleep, since several 1016 * threads might exhaust the buffers and then 1017 * deadlock. 1018 */ 1019 sf = sf_buf_alloc(pa[i], 1020 m != NULL ? SFB_NOWAIT : SFB_CATCH); 1021 if (sf == NULL) { 1022 SFSTAT_INC(sf_allocfail); 1023 sendfile_iowait(sfio, "sfnosf"); 1024 for (int j = i; j < npages; j++) 1025 vm_page_unwire(pa[j], PQ_INACTIVE); 1026 if (m == NULL) 1027 softerr = ENOBUFS; 1028 fixspace(npages, i, off, &space); 1029 npages = i; 1030 break; 1031 } 1032 1033 m0 = m_get(M_WAITOK, MT_DATA); 1034 m0->m_ext.ext_buf = (char *)sf_buf_kva(sf); 1035 m0->m_ext.ext_size = PAGE_SIZE; 1036 m0->m_ext.ext_arg1 = sf; 1037 m0->m_ext.ext_type = EXT_SFBUF; 1038 m0->m_ext.ext_flags = EXT_FLAG_EMBREF; 1039 m0->m_ext.ext_free = sendfile_free_mext; 1040 /* 1041 * SF_NOCACHE sets the page as being freed upon send. 1042 * However, we ignore it for the last page in 'space', 1043 * if the page is truncated, and we got more data to 1044 * send (rem > space), or if we have readahead 1045 * configured (rhpages > 0). 1046 */ 1047 if ((flags & SF_NOCACHE) && 1048 (i != npages - 1 || 1049 !((off + space) & PAGE_MASK) || 1050 !(rem > space || rhpages > 0))) 1051 m0->m_ext.ext_flags |= EXT_FLAG_NOCACHE; 1052 if (sfs != NULL) { 1053 m0->m_ext.ext_flags |= EXT_FLAG_SYNC; 1054 m0->m_ext.ext_arg2 = sfs; 1055 mtx_lock(&sfs->mtx); 1056 sfs->count++; 1057 mtx_unlock(&sfs->mtx); 1058 } 1059 m0->m_ext.ext_count = 1; 1060 m0->m_flags |= (M_EXT | M_RDONLY); 1061 if (nios) 1062 m0->m_flags |= M_NOTREADY; 1063 m0->m_data = (char *)sf_buf_kva(sf) + 1064 (vmoff(i, off) & PAGE_MASK); 1065 m0->m_len = xfsize(i, npages, off, space); 1066 1067 /* Append to mbuf chain. */ 1068 if (mtail != NULL) 1069 mtail->m_next = m0; 1070 else 1071 m = m0; 1072 mtail = m0; 1073 } 1074 1075 if (vp != NULL) 1076 VOP_UNLOCK(vp); 1077 1078 /* Keep track of bytes processed. */ 1079 off += space; 1080 rem -= space; 1081 1082 /* 1083 * Prepend header, if any. Save pointer to first mbuf 1084 * with a page. 1085 */ 1086 if (hdrlen) { 1087 prepend_header: 1088 m0 = mhtail->m_next = m; 1089 m = mh; 1090 mh = NULL; 1091 } else 1092 m0 = m; 1093 1094 if (m == NULL) { 1095 KASSERT(softerr, ("%s: m NULL, no error", __func__)); 1096 error = softerr; 1097 sendfile_iodone(sfio, NULL, 0, 0); 1098 goto done; 1099 } 1100 1101 /* Add the buffer chain to the socket buffer. */ 1102 KASSERT(m_length(m, NULL) == space + hdrlen, 1103 ("%s: mlen %u space %d hdrlen %d", 1104 __func__, m_length(m, NULL), space, hdrlen)); 1105 1106 CURVNET_SET(so->so_vnet); 1107 #ifdef KERN_TLS 1108 if (tls != NULL) 1109 ktls_frame(m, tls, &tls_enq_cnt, TLS_RLTYPE_APP); 1110 #endif 1111 if (nios == 0) { 1112 /* 1113 * If sendfile_swapin() didn't initiate any I/Os, 1114 * which happens if all data is cached in VM, or if 1115 * the header consumed all socket buffer space and 1116 * sfio is NULL, then we can send data right now 1117 * without the PRUS_NOTREADY flag. 1118 */ 1119 if (sfio != NULL) 1120 sendfile_iodone(sfio, NULL, 0, 0); 1121 #ifdef KERN_TLS 1122 if (tls != NULL && tls->mode == TCP_TLS_MODE_SW) { 1123 error = (*so->so_proto->pr_usrreqs->pru_send) 1124 (so, PRUS_NOTREADY, m, NULL, NULL, td); 1125 soref(so); 1126 ktls_enqueue(m, so, tls_enq_cnt); 1127 } else 1128 #endif 1129 error = (*so->so_proto->pr_usrreqs->pru_send) 1130 (so, 0, m, NULL, NULL, td); 1131 } else { 1132 sfio->so = so; 1133 sfio->m = m0; 1134 sfio->npages = npages; 1135 soref(so); 1136 error = (*so->so_proto->pr_usrreqs->pru_send) 1137 (so, PRUS_NOTREADY, m, NULL, NULL, td); 1138 sendfile_iodone(sfio, NULL, 0, 0); 1139 } 1140 CURVNET_RESTORE(); 1141 1142 m = NULL; /* pru_send always consumes */ 1143 if (error) 1144 goto done; 1145 sbytes += space + hdrlen; 1146 if (hdrlen) 1147 hdrlen = 0; 1148 if (softerr) { 1149 error = softerr; 1150 goto done; 1151 } 1152 } 1153 1154 /* 1155 * Send trailers. Wimp out and use writev(2). 1156 */ 1157 if (trl_uio != NULL) { 1158 sbunlock(&so->so_snd); 1159 error = kern_writev(td, sockfd, trl_uio); 1160 if (error == 0) 1161 sbytes += td->td_retval[0]; 1162 goto out; 1163 } 1164 1165 done: 1166 sbunlock(&so->so_snd); 1167 out: 1168 /* 1169 * If there was no error we have to clear td->td_retval[0] 1170 * because it may have been set by writev. 1171 */ 1172 if (error == 0) { 1173 td->td_retval[0] = 0; 1174 } 1175 if (sent != NULL) { 1176 (*sent) = sbytes; 1177 } 1178 if (obj != NULL) 1179 vm_object_deallocate(obj); 1180 if (so) 1181 fdrop(sock_fp, td); 1182 if (m) 1183 m_freem(m); 1184 if (mh) 1185 m_freem(mh); 1186 1187 if (sfs != NULL) { 1188 mtx_lock(&sfs->mtx); 1189 if (sfs->count != 0) 1190 cv_wait(&sfs->cv, &sfs->mtx); 1191 KASSERT(sfs->count == 0, ("sendfile sync still busy")); 1192 cv_destroy(&sfs->cv); 1193 mtx_destroy(&sfs->mtx); 1194 free(sfs, M_SENDFILE); 1195 } 1196 #ifdef KERN_TLS 1197 if (tls != NULL) 1198 ktls_free(tls); 1199 #endif 1200 1201 if (error == ERESTART) 1202 error = EINTR; 1203 1204 return (error); 1205 } 1206 1207 static int 1208 sendfile(struct thread *td, struct sendfile_args *uap, int compat) 1209 { 1210 struct sf_hdtr hdtr; 1211 struct uio *hdr_uio, *trl_uio; 1212 struct file *fp; 1213 off_t sbytes; 1214 int error; 1215 1216 /* 1217 * File offset must be positive. If it goes beyond EOF 1218 * we send only the header/trailer and no payload data. 1219 */ 1220 if (uap->offset < 0) 1221 return (EINVAL); 1222 1223 sbytes = 0; 1224 hdr_uio = trl_uio = NULL; 1225 1226 if (uap->hdtr != NULL) { 1227 error = copyin(uap->hdtr, &hdtr, sizeof(hdtr)); 1228 if (error != 0) 1229 goto out; 1230 if (hdtr.headers != NULL) { 1231 error = copyinuio(hdtr.headers, hdtr.hdr_cnt, 1232 &hdr_uio); 1233 if (error != 0) 1234 goto out; 1235 #ifdef COMPAT_FREEBSD4 1236 /* 1237 * In FreeBSD < 5.0 the nbytes to send also included 1238 * the header. If compat is specified subtract the 1239 * header size from nbytes. 1240 */ 1241 if (compat) { 1242 if (uap->nbytes > hdr_uio->uio_resid) 1243 uap->nbytes -= hdr_uio->uio_resid; 1244 else 1245 uap->nbytes = 0; 1246 } 1247 #endif 1248 } 1249 if (hdtr.trailers != NULL) { 1250 error = copyinuio(hdtr.trailers, hdtr.trl_cnt, 1251 &trl_uio); 1252 if (error != 0) 1253 goto out; 1254 } 1255 } 1256 1257 AUDIT_ARG_FD(uap->fd); 1258 1259 /* 1260 * sendfile(2) can start at any offset within a file so we require 1261 * CAP_READ+CAP_SEEK = CAP_PREAD. 1262 */ 1263 if ((error = fget_read(td, uap->fd, &cap_pread_rights, &fp)) != 0) 1264 goto out; 1265 1266 error = fo_sendfile(fp, uap->s, hdr_uio, trl_uio, uap->offset, 1267 uap->nbytes, &sbytes, uap->flags, td); 1268 fdrop(fp, td); 1269 1270 if (uap->sbytes != NULL) 1271 copyout(&sbytes, uap->sbytes, sizeof(off_t)); 1272 1273 out: 1274 free(hdr_uio, M_IOV); 1275 free(trl_uio, M_IOV); 1276 return (error); 1277 } 1278 1279 /* 1280 * sendfile(2) 1281 * 1282 * int sendfile(int fd, int s, off_t offset, size_t nbytes, 1283 * struct sf_hdtr *hdtr, off_t *sbytes, int flags) 1284 * 1285 * Send a file specified by 'fd' and starting at 'offset' to a socket 1286 * specified by 's'. Send only 'nbytes' of the file or until EOF if nbytes == 1287 * 0. Optionally add a header and/or trailer to the socket output. If 1288 * specified, write the total number of bytes sent into *sbytes. 1289 */ 1290 int 1291 sys_sendfile(struct thread *td, struct sendfile_args *uap) 1292 { 1293 1294 return (sendfile(td, uap, 0)); 1295 } 1296 1297 #ifdef COMPAT_FREEBSD4 1298 int 1299 freebsd4_sendfile(struct thread *td, struct freebsd4_sendfile_args *uap) 1300 { 1301 struct sendfile_args args; 1302 1303 args.fd = uap->fd; 1304 args.s = uap->s; 1305 args.offset = uap->offset; 1306 args.nbytes = uap->nbytes; 1307 args.hdtr = uap->hdtr; 1308 args.sbytes = uap->sbytes; 1309 args.flags = uap->flags; 1310 1311 return (sendfile(td, &args, 1)); 1312 } 1313 #endif /* COMPAT_FREEBSD4 */ 1314