1 /*- 2 * Copyright (c) 2013-2015 Gleb Smirnoff <glebius@FreeBSD.org> 3 * Copyright (c) 1998, David Greenman. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/capsicum.h> 36 #include <sys/kernel.h> 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/sysproto.h> 40 #include <sys/malloc.h> 41 #include <sys/proc.h> 42 #include <sys/mman.h> 43 #include <sys/mount.h> 44 #include <sys/mbuf.h> 45 #include <sys/protosw.h> 46 #include <sys/rwlock.h> 47 #include <sys/sf_buf.h> 48 #include <sys/socket.h> 49 #include <sys/socketvar.h> 50 #include <sys/syscallsubr.h> 51 #include <sys/sysctl.h> 52 #include <sys/vnode.h> 53 54 #include <net/vnet.h> 55 56 #include <security/audit/audit.h> 57 #include <security/mac/mac_framework.h> 58 59 #include <vm/vm.h> 60 #include <vm/vm_object.h> 61 #include <vm/vm_pager.h> 62 63 #define EXT_FLAG_SYNC EXT_FLAG_VENDOR1 64 #define EXT_FLAG_NOCACHE EXT_FLAG_VENDOR2 65 66 /* 67 * Structure describing a single sendfile(2) I/O, which may consist of 68 * several underlying pager I/Os. 69 * 70 * The syscall context allocates the structure and initializes 'nios' 71 * to 1. As sendfile_swapin() runs through pages and starts asynchronous 72 * paging operations, it increments 'nios'. 73 * 74 * Every I/O completion calls sendfile_iodone(), which decrements the 'nios', 75 * and the syscall also calls sendfile_iodone() after allocating all mbufs, 76 * linking them and sending to socket. Whoever reaches zero 'nios' is 77 * responsible to * call pru_ready on the socket, to notify it of readyness 78 * of the data. 79 */ 80 struct sf_io { 81 volatile u_int nios; 82 u_int error; 83 int npages; 84 struct socket *so; 85 struct mbuf *m; 86 vm_page_t pa[]; 87 }; 88 89 /* 90 * Structure used to track requests with SF_SYNC flag. 91 */ 92 struct sendfile_sync { 93 struct mtx mtx; 94 struct cv cv; 95 unsigned count; 96 }; 97 98 counter_u64_t sfstat[sizeof(struct sfstat) / sizeof(uint64_t)]; 99 100 static void 101 sfstat_init(const void *unused) 102 { 103 104 COUNTER_ARRAY_ALLOC(sfstat, sizeof(struct sfstat) / sizeof(uint64_t), 105 M_WAITOK); 106 } 107 SYSINIT(sfstat, SI_SUB_MBUF, SI_ORDER_FIRST, sfstat_init, NULL); 108 109 static int 110 sfstat_sysctl(SYSCTL_HANDLER_ARGS) 111 { 112 struct sfstat s; 113 114 COUNTER_ARRAY_COPY(sfstat, &s, sizeof(s) / sizeof(uint64_t)); 115 if (req->newptr) 116 COUNTER_ARRAY_ZERO(sfstat, sizeof(s) / sizeof(uint64_t)); 117 return (SYSCTL_OUT(req, &s, sizeof(s))); 118 } 119 SYSCTL_PROC(_kern_ipc, OID_AUTO, sfstat, CTLTYPE_OPAQUE | CTLFLAG_RW, 120 NULL, 0, sfstat_sysctl, "I", "sendfile statistics"); 121 122 /* 123 * Detach mapped page and release resources back to the system. Called 124 * by mbuf(9) code when last reference to a page is freed. 125 */ 126 static void 127 sendfile_free_page(vm_page_t pg, bool nocache) 128 { 129 bool freed; 130 131 vm_page_lock(pg); 132 /* 133 * In either case check for the object going away on us. This can 134 * happen since we don't hold a reference to it. If so, we're 135 * responsible for freeing the page. In 'noncache' case try to free 136 * the page, but only if it is cheap to. 137 */ 138 if (vm_page_unwire_noq(pg)) { 139 vm_object_t obj; 140 141 if ((obj = pg->object) == NULL) 142 vm_page_free(pg); 143 else { 144 freed = false; 145 if (nocache && !vm_page_xbusied(pg) && 146 VM_OBJECT_TRYWLOCK(obj)) { 147 /* Only free unmapped pages. */ 148 if (obj->ref_count == 0 || 149 !pmap_page_is_mapped(pg)) 150 /* 151 * The busy test before the object is 152 * locked cannot be relied upon. 153 */ 154 freed = vm_page_try_to_free(pg); 155 VM_OBJECT_WUNLOCK(obj); 156 } 157 if (!freed) { 158 /* 159 * If we were asked to not cache the page, place 160 * it near the head of the inactive queue so 161 * that it is reclaimed sooner. Otherwise, 162 * maintain LRU. 163 */ 164 if (nocache) 165 vm_page_deactivate_noreuse(pg); 166 else if (vm_page_active(pg)) 167 vm_page_reference(pg); 168 else 169 vm_page_deactivate(pg); 170 } 171 } 172 } 173 vm_page_unlock(pg); 174 } 175 176 static void 177 sendfile_free_mext(struct mbuf *m) 178 { 179 struct sf_buf *sf; 180 vm_page_t pg; 181 bool nocache; 182 183 KASSERT(m->m_flags & M_EXT && m->m_ext.ext_type == EXT_SFBUF, 184 ("%s: m %p !M_EXT or !EXT_SFBUF", __func__, m)); 185 186 sf = m->m_ext.ext_arg1; 187 pg = sf_buf_page(sf); 188 nocache = m->m_ext.ext_flags & EXT_FLAG_NOCACHE; 189 190 sf_buf_free(sf); 191 sendfile_free_page(pg, nocache); 192 193 if (m->m_ext.ext_flags & EXT_FLAG_SYNC) { 194 struct sendfile_sync *sfs = m->m_ext.ext_arg2; 195 196 mtx_lock(&sfs->mtx); 197 KASSERT(sfs->count > 0, ("Sendfile sync botchup count == 0")); 198 if (--sfs->count == 0) 199 cv_signal(&sfs->cv); 200 mtx_unlock(&sfs->mtx); 201 } 202 } 203 204 /* 205 * Helper function to calculate how much data to put into page i of n. 206 * Only first and last pages are special. 207 */ 208 static inline off_t 209 xfsize(int i, int n, off_t off, off_t len) 210 { 211 212 if (i == 0) 213 return (omin(PAGE_SIZE - (off & PAGE_MASK), len)); 214 215 if (i == n - 1 && ((off + len) & PAGE_MASK) > 0) 216 return ((off + len) & PAGE_MASK); 217 218 return (PAGE_SIZE); 219 } 220 221 /* 222 * Helper function to get offset within object for i page. 223 */ 224 static inline vm_ooffset_t 225 vmoff(int i, off_t off) 226 { 227 228 if (i == 0) 229 return ((vm_ooffset_t)off); 230 231 return (trunc_page(off + i * PAGE_SIZE)); 232 } 233 234 /* 235 * Helper function used when allocation of a page or sf_buf failed. 236 * Pretend as if we don't have enough space, subtract xfsize() of 237 * all pages that failed. 238 */ 239 static inline void 240 fixspace(int old, int new, off_t off, int *space) 241 { 242 243 KASSERT(old > new, ("%s: old %d new %d", __func__, old, new)); 244 245 /* Subtract last one. */ 246 *space -= xfsize(old - 1, old, off, *space); 247 old--; 248 249 if (new == old) 250 /* There was only one page. */ 251 return; 252 253 /* Subtract first one. */ 254 if (new == 0) { 255 *space -= xfsize(0, old, off, *space); 256 new++; 257 } 258 259 /* Rest of pages are full sized. */ 260 *space -= (old - new) * PAGE_SIZE; 261 262 KASSERT(*space >= 0, ("%s: space went backwards", __func__)); 263 } 264 265 /* 266 * I/O completion callback. 267 */ 268 static void 269 sendfile_iodone(void *arg, vm_page_t *pg, int count, int error) 270 { 271 struct sf_io *sfio = arg; 272 struct socket *so = sfio->so; 273 274 for (int i = 0; i < count; i++) 275 if (pg[i] != bogus_page) 276 vm_page_xunbusy(pg[i]); 277 278 if (error) 279 sfio->error = error; 280 281 if (!refcount_release(&sfio->nios)) 282 return; 283 284 CURVNET_SET(so->so_vnet); 285 if (sfio->error) { 286 struct mbuf *m; 287 288 /* 289 * I/O operation failed. The state of data in the socket 290 * is now inconsistent, and all what we can do is to tear 291 * it down. Protocol abort method would tear down protocol 292 * state, free all ready mbufs and detach not ready ones. 293 * We will free the mbufs corresponding to this I/O manually. 294 * 295 * The socket would be marked with EIO and made available 296 * for read, so that application receives EIO on next 297 * syscall and eventually closes the socket. 298 */ 299 so->so_proto->pr_usrreqs->pru_abort(so); 300 so->so_error = EIO; 301 302 m = sfio->m; 303 for (int i = 0; i < sfio->npages; i++) 304 m = m_free(m); 305 } else 306 (void )(so->so_proto->pr_usrreqs->pru_ready)(so, sfio->m, 307 sfio->npages); 308 309 SOCK_LOCK(so); 310 sorele(so); 311 CURVNET_RESTORE(); 312 free(sfio, M_TEMP); 313 } 314 315 /* 316 * Iterate through pages vector and request paging for non-valid pages. 317 */ 318 static int 319 sendfile_swapin(vm_object_t obj, struct sf_io *sfio, off_t off, off_t len, 320 int npages, int rhpages, int flags) 321 { 322 vm_page_t *pa = sfio->pa; 323 int grabbed, nios; 324 325 nios = 0; 326 flags = (flags & SF_NODISKIO) ? VM_ALLOC_NOWAIT : 0; 327 328 /* 329 * First grab all the pages and wire them. Note that we grab 330 * only required pages. Readahead pages are dealt with later. 331 */ 332 VM_OBJECT_WLOCK(obj); 333 334 grabbed = vm_page_grab_pages(obj, OFF_TO_IDX(off), 335 VM_ALLOC_NORMAL | VM_ALLOC_WIRED | flags, pa, npages); 336 if (grabbed < npages) { 337 for (int i = grabbed; i < npages; i++) 338 pa[i] = NULL; 339 npages = grabbed; 340 rhpages = 0; 341 } 342 343 for (int i = 0; i < npages;) { 344 int j, a, count, rv __unused; 345 346 /* Skip valid pages. */ 347 if (vm_page_is_valid(pa[i], vmoff(i, off) & PAGE_MASK, 348 xfsize(i, npages, off, len))) { 349 vm_page_xunbusy(pa[i]); 350 SFSTAT_INC(sf_pages_valid); 351 i++; 352 continue; 353 } 354 355 /* 356 * Next page is invalid. Check if it belongs to pager. It 357 * may not be there, which is a regular situation for shmem 358 * pager. For vnode pager this happens only in case of 359 * a sparse file. 360 * 361 * Important feature of vm_pager_has_page() is the hint 362 * stored in 'a', about how many pages we can pagein after 363 * this page in a single I/O. 364 */ 365 if (!vm_pager_has_page(obj, OFF_TO_IDX(vmoff(i, off)), NULL, 366 &a)) { 367 pmap_zero_page(pa[i]); 368 pa[i]->valid = VM_PAGE_BITS_ALL; 369 MPASS(pa[i]->dirty == 0); 370 vm_page_xunbusy(pa[i]); 371 i++; 372 continue; 373 } 374 375 /* 376 * We want to pagein as many pages as possible, limited only 377 * by the 'a' hint and actual request. 378 */ 379 count = min(a + 1, npages - i); 380 381 /* 382 * We should not pagein into a valid page, thus we first trim 383 * any valid pages off the end of request, and substitute 384 * to bogus_page those, that are in the middle. 385 */ 386 for (j = i + count - 1; j > i; j--) { 387 if (vm_page_is_valid(pa[j], vmoff(j, off) & PAGE_MASK, 388 xfsize(j, npages, off, len))) { 389 count--; 390 rhpages = 0; 391 } else 392 break; 393 } 394 for (j = i + 1; j < i + count - 1; j++) 395 if (vm_page_is_valid(pa[j], vmoff(j, off) & PAGE_MASK, 396 xfsize(j, npages, off, len))) { 397 vm_page_xunbusy(pa[j]); 398 SFSTAT_INC(sf_pages_valid); 399 SFSTAT_INC(sf_pages_bogus); 400 pa[j] = bogus_page; 401 } 402 403 refcount_acquire(&sfio->nios); 404 rv = vm_pager_get_pages_async(obj, pa + i, count, NULL, 405 i + count == npages ? &rhpages : NULL, 406 &sendfile_iodone, sfio); 407 KASSERT(rv == VM_PAGER_OK, ("%s: pager fail obj %p page %p", 408 __func__, obj, pa[i])); 409 410 SFSTAT_INC(sf_iocnt); 411 SFSTAT_ADD(sf_pages_read, count); 412 if (i + count == npages) 413 SFSTAT_ADD(sf_rhpages_read, rhpages); 414 415 /* 416 * Restore the valid page pointers. They are already 417 * unbusied, but still wired. 418 */ 419 for (j = i; j < i + count; j++) 420 if (pa[j] == bogus_page) { 421 pa[j] = vm_page_lookup(obj, 422 OFF_TO_IDX(vmoff(j, off))); 423 KASSERT(pa[j], ("%s: page %p[%d] disappeared", 424 __func__, pa, j)); 425 426 } 427 i += count; 428 nios++; 429 } 430 431 VM_OBJECT_WUNLOCK(obj); 432 433 if (nios == 0 && npages != 0) 434 SFSTAT_INC(sf_noiocnt); 435 436 return (nios); 437 } 438 439 static int 440 sendfile_getobj(struct thread *td, struct file *fp, vm_object_t *obj_res, 441 struct vnode **vp_res, struct shmfd **shmfd_res, off_t *obj_size, 442 int *bsize) 443 { 444 struct vattr va; 445 vm_object_t obj; 446 struct vnode *vp; 447 struct shmfd *shmfd; 448 int error; 449 450 vp = *vp_res = NULL; 451 obj = NULL; 452 shmfd = *shmfd_res = NULL; 453 *bsize = 0; 454 455 /* 456 * The file descriptor must be a regular file and have a 457 * backing VM object. 458 */ 459 if (fp->f_type == DTYPE_VNODE) { 460 vp = fp->f_vnode; 461 vn_lock(vp, LK_SHARED | LK_RETRY); 462 if (vp->v_type != VREG) { 463 error = EINVAL; 464 goto out; 465 } 466 *bsize = vp->v_mount->mnt_stat.f_iosize; 467 error = VOP_GETATTR(vp, &va, td->td_ucred); 468 if (error != 0) 469 goto out; 470 *obj_size = va.va_size; 471 obj = vp->v_object; 472 if (obj == NULL) { 473 error = EINVAL; 474 goto out; 475 } 476 } else if (fp->f_type == DTYPE_SHM) { 477 error = 0; 478 shmfd = fp->f_data; 479 obj = shmfd->shm_object; 480 *obj_size = shmfd->shm_size; 481 } else { 482 error = EINVAL; 483 goto out; 484 } 485 486 VM_OBJECT_WLOCK(obj); 487 if ((obj->flags & OBJ_DEAD) != 0) { 488 VM_OBJECT_WUNLOCK(obj); 489 error = EBADF; 490 goto out; 491 } 492 493 /* 494 * Temporarily increase the backing VM object's reference 495 * count so that a forced reclamation of its vnode does not 496 * immediately destroy it. 497 */ 498 vm_object_reference_locked(obj); 499 VM_OBJECT_WUNLOCK(obj); 500 *obj_res = obj; 501 *vp_res = vp; 502 *shmfd_res = shmfd; 503 504 out: 505 if (vp != NULL) 506 VOP_UNLOCK(vp, 0); 507 return (error); 508 } 509 510 static int 511 sendfile_getsock(struct thread *td, int s, struct file **sock_fp, 512 struct socket **so) 513 { 514 int error; 515 516 *sock_fp = NULL; 517 *so = NULL; 518 519 /* 520 * The socket must be a stream socket and connected. 521 */ 522 error = getsock_cap(td, s, &cap_send_rights, 523 sock_fp, NULL, NULL); 524 if (error != 0) 525 return (error); 526 *so = (*sock_fp)->f_data; 527 if ((*so)->so_type != SOCK_STREAM) 528 return (EINVAL); 529 return (0); 530 } 531 532 int 533 vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio, 534 struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags, 535 struct thread *td) 536 { 537 struct file *sock_fp; 538 struct vnode *vp; 539 struct vm_object *obj; 540 struct socket *so; 541 struct mbuf *m, *mh, *mhtail; 542 struct sf_buf *sf; 543 struct shmfd *shmfd; 544 struct sendfile_sync *sfs; 545 struct vattr va; 546 off_t off, sbytes, rem, obj_size; 547 int error, softerr, bsize, hdrlen; 548 549 obj = NULL; 550 so = NULL; 551 m = mh = NULL; 552 sfs = NULL; 553 hdrlen = sbytes = 0; 554 softerr = 0; 555 556 error = sendfile_getobj(td, fp, &obj, &vp, &shmfd, &obj_size, &bsize); 557 if (error != 0) 558 return (error); 559 560 error = sendfile_getsock(td, sockfd, &sock_fp, &so); 561 if (error != 0) 562 goto out; 563 564 #ifdef MAC 565 error = mac_socket_check_send(td->td_ucred, so); 566 if (error != 0) 567 goto out; 568 #endif 569 570 SFSTAT_INC(sf_syscalls); 571 SFSTAT_ADD(sf_rhpages_requested, SF_READAHEAD(flags)); 572 573 if (flags & SF_SYNC) { 574 sfs = malloc(sizeof *sfs, M_TEMP, M_WAITOK | M_ZERO); 575 mtx_init(&sfs->mtx, "sendfile", NULL, MTX_DEF); 576 cv_init(&sfs->cv, "sendfile"); 577 } 578 579 rem = nbytes ? omin(nbytes, obj_size - offset) : obj_size - offset; 580 581 /* 582 * Protect against multiple writers to the socket. 583 * 584 * XXXRW: Historically this has assumed non-interruptibility, so now 585 * we implement that, but possibly shouldn't. 586 */ 587 (void)sblock(&so->so_snd, SBL_WAIT | SBL_NOINTR); 588 589 /* 590 * Loop through the pages of the file, starting with the requested 591 * offset. Get a file page (do I/O if necessary), map the file page 592 * into an sf_buf, attach an mbuf header to the sf_buf, and queue 593 * it on the socket. 594 * This is done in two loops. The inner loop turns as many pages 595 * as it can, up to available socket buffer space, without blocking 596 * into mbufs to have it bulk delivered into the socket send buffer. 597 * The outer loop checks the state and available space of the socket 598 * and takes care of the overall progress. 599 */ 600 for (off = offset; rem > 0; ) { 601 struct sf_io *sfio; 602 vm_page_t *pa; 603 struct mbuf *mtail; 604 int nios, space, npages, rhpages; 605 606 mtail = NULL; 607 /* 608 * Check the socket state for ongoing connection, 609 * no errors and space in socket buffer. 610 * If space is low allow for the remainder of the 611 * file to be processed if it fits the socket buffer. 612 * Otherwise block in waiting for sufficient space 613 * to proceed, or if the socket is nonblocking, return 614 * to userland with EAGAIN while reporting how far 615 * we've come. 616 * We wait until the socket buffer has significant free 617 * space to do bulk sends. This makes good use of file 618 * system read ahead and allows packet segmentation 619 * offloading hardware to take over lots of work. If 620 * we were not careful here we would send off only one 621 * sfbuf at a time. 622 */ 623 SOCKBUF_LOCK(&so->so_snd); 624 if (so->so_snd.sb_lowat < so->so_snd.sb_hiwat / 2) 625 so->so_snd.sb_lowat = so->so_snd.sb_hiwat / 2; 626 retry_space: 627 if (so->so_snd.sb_state & SBS_CANTSENDMORE) { 628 error = EPIPE; 629 SOCKBUF_UNLOCK(&so->so_snd); 630 goto done; 631 } else if (so->so_error) { 632 error = so->so_error; 633 so->so_error = 0; 634 SOCKBUF_UNLOCK(&so->so_snd); 635 goto done; 636 } 637 if ((so->so_state & SS_ISCONNECTED) == 0) { 638 SOCKBUF_UNLOCK(&so->so_snd); 639 error = ENOTCONN; 640 goto done; 641 } 642 643 space = sbspace(&so->so_snd); 644 if (space < rem && 645 (space <= 0 || 646 space < so->so_snd.sb_lowat)) { 647 if (so->so_state & SS_NBIO) { 648 SOCKBUF_UNLOCK(&so->so_snd); 649 error = EAGAIN; 650 goto done; 651 } 652 /* 653 * sbwait drops the lock while sleeping. 654 * When we loop back to retry_space the 655 * state may have changed and we retest 656 * for it. 657 */ 658 error = sbwait(&so->so_snd); 659 /* 660 * An error from sbwait usually indicates that we've 661 * been interrupted by a signal. If we've sent anything 662 * then return bytes sent, otherwise return the error. 663 */ 664 if (error != 0) { 665 SOCKBUF_UNLOCK(&so->so_snd); 666 goto done; 667 } 668 goto retry_space; 669 } 670 SOCKBUF_UNLOCK(&so->so_snd); 671 672 /* 673 * At the beginning of the first loop check if any headers 674 * are specified and copy them into mbufs. Reduce space in 675 * the socket buffer by the size of the header mbuf chain. 676 * Clear hdr_uio here and hdrlen at the end of the first loop. 677 */ 678 if (hdr_uio != NULL && hdr_uio->uio_resid > 0) { 679 hdr_uio->uio_td = td; 680 hdr_uio->uio_rw = UIO_WRITE; 681 mh = m_uiotombuf(hdr_uio, M_WAITOK, space, 0, 0); 682 hdrlen = m_length(mh, &mhtail); 683 space -= hdrlen; 684 /* 685 * If header consumed all the socket buffer space, 686 * don't waste CPU cycles and jump to the end. 687 */ 688 if (space == 0) { 689 sfio = NULL; 690 nios = 0; 691 npages = 0; 692 goto prepend_header; 693 } 694 hdr_uio = NULL; 695 } 696 697 if (vp != NULL) { 698 error = vn_lock(vp, LK_SHARED); 699 if (error != 0) 700 goto done; 701 error = VOP_GETATTR(vp, &va, td->td_ucred); 702 if (error != 0 || off >= va.va_size) { 703 VOP_UNLOCK(vp, 0); 704 goto done; 705 } 706 if (va.va_size != obj_size) { 707 obj_size = va.va_size; 708 rem = nbytes ? 709 omin(nbytes + offset, obj_size) : obj_size; 710 rem -= off; 711 } 712 } 713 714 if (space > rem) 715 space = rem; 716 717 npages = howmany(space + (off & PAGE_MASK), PAGE_SIZE); 718 719 /* 720 * Calculate maximum allowed number of pages for readahead 721 * at this iteration. If SF_USER_READAHEAD was set, we don't 722 * do any heuristics and use exactly the value supplied by 723 * application. Otherwise, we allow readahead up to "rem". 724 * If application wants more, let it be, but there is no 725 * reason to go above MAXPHYS. Also check against "obj_size", 726 * since vm_pager_has_page() can hint beyond EOF. 727 */ 728 if (flags & SF_USER_READAHEAD) { 729 rhpages = SF_READAHEAD(flags); 730 } else { 731 rhpages = howmany(rem + (off & PAGE_MASK), PAGE_SIZE) - 732 npages; 733 rhpages += SF_READAHEAD(flags); 734 } 735 rhpages = min(howmany(MAXPHYS, PAGE_SIZE), rhpages); 736 rhpages = min(howmany(obj_size - trunc_page(off), PAGE_SIZE) - 737 npages, rhpages); 738 739 sfio = malloc(sizeof(struct sf_io) + 740 npages * sizeof(vm_page_t), M_TEMP, M_WAITOK); 741 refcount_init(&sfio->nios, 1); 742 sfio->so = so; 743 sfio->error = 0; 744 745 nios = sendfile_swapin(obj, sfio, off, space, npages, rhpages, 746 flags); 747 748 /* 749 * Loop and construct maximum sized mbuf chain to be bulk 750 * dumped into socket buffer. 751 */ 752 pa = sfio->pa; 753 for (int i = 0; i < npages; i++) { 754 struct mbuf *m0; 755 756 /* 757 * If a page wasn't grabbed successfully, then 758 * trim the array. Can happen only with SF_NODISKIO. 759 */ 760 if (pa[i] == NULL) { 761 SFSTAT_INC(sf_busy); 762 fixspace(npages, i, off, &space); 763 npages = i; 764 softerr = EBUSY; 765 break; 766 } 767 768 /* 769 * Get a sendfile buf. When allocating the 770 * first buffer for mbuf chain, we usually 771 * wait as long as necessary, but this wait 772 * can be interrupted. For consequent 773 * buffers, do not sleep, since several 774 * threads might exhaust the buffers and then 775 * deadlock. 776 */ 777 sf = sf_buf_alloc(pa[i], 778 m != NULL ? SFB_NOWAIT : SFB_CATCH); 779 if (sf == NULL) { 780 SFSTAT_INC(sf_allocfail); 781 for (int j = i; j < npages; j++) { 782 vm_page_lock(pa[j]); 783 vm_page_unwire(pa[j], PQ_INACTIVE); 784 vm_page_unlock(pa[j]); 785 } 786 if (m == NULL) 787 softerr = ENOBUFS; 788 fixspace(npages, i, off, &space); 789 npages = i; 790 break; 791 } 792 793 m0 = m_get(M_WAITOK, MT_DATA); 794 m0->m_ext.ext_buf = (char *)sf_buf_kva(sf); 795 m0->m_ext.ext_size = PAGE_SIZE; 796 m0->m_ext.ext_arg1 = sf; 797 m0->m_ext.ext_type = EXT_SFBUF; 798 m0->m_ext.ext_flags = EXT_FLAG_EMBREF; 799 m0->m_ext.ext_free = sendfile_free_mext; 800 /* 801 * SF_NOCACHE sets the page as being freed upon send. 802 * However, we ignore it for the last page in 'space', 803 * if the page is truncated, and we got more data to 804 * send (rem > space), or if we have readahead 805 * configured (rhpages > 0). 806 */ 807 if ((flags & SF_NOCACHE) && 808 (i != npages - 1 || 809 !((off + space) & PAGE_MASK) || 810 !(rem > space || rhpages > 0))) 811 m0->m_ext.ext_flags |= EXT_FLAG_NOCACHE; 812 if (sfs != NULL) { 813 m0->m_ext.ext_flags |= EXT_FLAG_SYNC; 814 m0->m_ext.ext_arg2 = sfs; 815 mtx_lock(&sfs->mtx); 816 sfs->count++; 817 mtx_unlock(&sfs->mtx); 818 } 819 m0->m_ext.ext_count = 1; 820 m0->m_flags |= (M_EXT | M_RDONLY); 821 if (nios) 822 m0->m_flags |= M_NOTREADY; 823 m0->m_data = (char *)sf_buf_kva(sf) + 824 (vmoff(i, off) & PAGE_MASK); 825 m0->m_len = xfsize(i, npages, off, space); 826 827 if (i == 0) 828 sfio->m = m0; 829 830 /* Append to mbuf chain. */ 831 if (mtail != NULL) 832 mtail->m_next = m0; 833 else 834 m = m0; 835 mtail = m0; 836 } 837 838 if (vp != NULL) 839 VOP_UNLOCK(vp, 0); 840 841 /* Keep track of bytes processed. */ 842 off += space; 843 rem -= space; 844 845 /* Prepend header, if any. */ 846 if (hdrlen) { 847 prepend_header: 848 mhtail->m_next = m; 849 m = mh; 850 mh = NULL; 851 } 852 853 if (m == NULL) { 854 KASSERT(softerr, ("%s: m NULL, no error", __func__)); 855 error = softerr; 856 free(sfio, M_TEMP); 857 goto done; 858 } 859 860 /* Add the buffer chain to the socket buffer. */ 861 KASSERT(m_length(m, NULL) == space + hdrlen, 862 ("%s: mlen %u space %d hdrlen %d", 863 __func__, m_length(m, NULL), space, hdrlen)); 864 865 CURVNET_SET(so->so_vnet); 866 if (nios == 0) { 867 /* 868 * If sendfile_swapin() didn't initiate any I/Os, 869 * which happens if all data is cached in VM, then 870 * we can send data right now without the 871 * PRUS_NOTREADY flag. 872 */ 873 free(sfio, M_TEMP); 874 error = (*so->so_proto->pr_usrreqs->pru_send) 875 (so, 0, m, NULL, NULL, td); 876 } else { 877 sfio->npages = npages; 878 soref(so); 879 error = (*so->so_proto->pr_usrreqs->pru_send) 880 (so, PRUS_NOTREADY, m, NULL, NULL, td); 881 sendfile_iodone(sfio, NULL, 0, 0); 882 } 883 CURVNET_RESTORE(); 884 885 m = NULL; /* pru_send always consumes */ 886 if (error) 887 goto done; 888 sbytes += space + hdrlen; 889 if (hdrlen) 890 hdrlen = 0; 891 if (softerr) { 892 error = softerr; 893 goto done; 894 } 895 } 896 897 /* 898 * Send trailers. Wimp out and use writev(2). 899 */ 900 if (trl_uio != NULL) { 901 sbunlock(&so->so_snd); 902 error = kern_writev(td, sockfd, trl_uio); 903 if (error == 0) 904 sbytes += td->td_retval[0]; 905 goto out; 906 } 907 908 done: 909 sbunlock(&so->so_snd); 910 out: 911 /* 912 * If there was no error we have to clear td->td_retval[0] 913 * because it may have been set by writev. 914 */ 915 if (error == 0) { 916 td->td_retval[0] = 0; 917 } 918 if (sent != NULL) { 919 (*sent) = sbytes; 920 } 921 if (obj != NULL) 922 vm_object_deallocate(obj); 923 if (so) 924 fdrop(sock_fp, td); 925 if (m) 926 m_freem(m); 927 if (mh) 928 m_freem(mh); 929 930 if (sfs != NULL) { 931 mtx_lock(&sfs->mtx); 932 if (sfs->count != 0) 933 cv_wait(&sfs->cv, &sfs->mtx); 934 KASSERT(sfs->count == 0, ("sendfile sync still busy")); 935 cv_destroy(&sfs->cv); 936 mtx_destroy(&sfs->mtx); 937 free(sfs, M_TEMP); 938 } 939 940 if (error == ERESTART) 941 error = EINTR; 942 943 return (error); 944 } 945 946 static int 947 sendfile(struct thread *td, struct sendfile_args *uap, int compat) 948 { 949 struct sf_hdtr hdtr; 950 struct uio *hdr_uio, *trl_uio; 951 struct file *fp; 952 off_t sbytes; 953 int error; 954 955 /* 956 * File offset must be positive. If it goes beyond EOF 957 * we send only the header/trailer and no payload data. 958 */ 959 if (uap->offset < 0) 960 return (EINVAL); 961 962 sbytes = 0; 963 hdr_uio = trl_uio = NULL; 964 965 if (uap->hdtr != NULL) { 966 error = copyin(uap->hdtr, &hdtr, sizeof(hdtr)); 967 if (error != 0) 968 goto out; 969 if (hdtr.headers != NULL) { 970 error = copyinuio(hdtr.headers, hdtr.hdr_cnt, 971 &hdr_uio); 972 if (error != 0) 973 goto out; 974 #ifdef COMPAT_FREEBSD4 975 /* 976 * In FreeBSD < 5.0 the nbytes to send also included 977 * the header. If compat is specified subtract the 978 * header size from nbytes. 979 */ 980 if (compat) { 981 if (uap->nbytes > hdr_uio->uio_resid) 982 uap->nbytes -= hdr_uio->uio_resid; 983 else 984 uap->nbytes = 0; 985 } 986 #endif 987 } 988 if (hdtr.trailers != NULL) { 989 error = copyinuio(hdtr.trailers, hdtr.trl_cnt, 990 &trl_uio); 991 if (error != 0) 992 goto out; 993 } 994 } 995 996 AUDIT_ARG_FD(uap->fd); 997 998 /* 999 * sendfile(2) can start at any offset within a file so we require 1000 * CAP_READ+CAP_SEEK = CAP_PREAD. 1001 */ 1002 if ((error = fget_read(td, uap->fd, &cap_pread_rights, &fp)) != 0) 1003 goto out; 1004 1005 error = fo_sendfile(fp, uap->s, hdr_uio, trl_uio, uap->offset, 1006 uap->nbytes, &sbytes, uap->flags, td); 1007 fdrop(fp, td); 1008 1009 if (uap->sbytes != NULL) 1010 copyout(&sbytes, uap->sbytes, sizeof(off_t)); 1011 1012 out: 1013 free(hdr_uio, M_IOV); 1014 free(trl_uio, M_IOV); 1015 return (error); 1016 } 1017 1018 /* 1019 * sendfile(2) 1020 * 1021 * int sendfile(int fd, int s, off_t offset, size_t nbytes, 1022 * struct sf_hdtr *hdtr, off_t *sbytes, int flags) 1023 * 1024 * Send a file specified by 'fd' and starting at 'offset' to a socket 1025 * specified by 's'. Send only 'nbytes' of the file or until EOF if nbytes == 1026 * 0. Optionally add a header and/or trailer to the socket output. If 1027 * specified, write the total number of bytes sent into *sbytes. 1028 */ 1029 int 1030 sys_sendfile(struct thread *td, struct sendfile_args *uap) 1031 { 1032 1033 return (sendfile(td, uap, 0)); 1034 } 1035 1036 #ifdef COMPAT_FREEBSD4 1037 int 1038 freebsd4_sendfile(struct thread *td, struct freebsd4_sendfile_args *uap) 1039 { 1040 struct sendfile_args args; 1041 1042 args.fd = uap->fd; 1043 args.s = uap->s; 1044 args.offset = uap->offset; 1045 args.nbytes = uap->nbytes; 1046 args.hdtr = uap->hdtr; 1047 args.sbytes = uap->sbytes; 1048 args.flags = uap->flags; 1049 1050 return (sendfile(td, &args, 1)); 1051 } 1052 #endif /* COMPAT_FREEBSD4 */ 1053