xref: /freebsd/sys/kern/kern_sendfile.c (revision 575efcfff22f5d75fc3ac6201f11e5eae46ec9ce)
1 /*-
2  * Copyright (c) 2013-2015 Gleb Smirnoff <glebius@FreeBSD.org>
3  * Copyright (c) 1998, David Greenman. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include "opt_kern_tls.h"
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/capsicum.h>
35 #include <sys/inotify.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/ktls.h>
39 #include <sys/mutex.h>
40 #include <sys/malloc.h>
41 #include <sys/mman.h>
42 #include <sys/mount.h>
43 #include <sys/mbuf.h>
44 #include <sys/proc.h>
45 #include <sys/protosw.h>
46 #include <sys/rwlock.h>
47 #include <sys/sf_buf.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/syscallsubr.h>
51 #include <sys/sysctl.h>
52 #include <sys/sysproto.h>
53 #include <sys/vnode.h>
54 
55 #include <net/vnet.h>
56 #include <netinet/in.h>
57 #include <netinet/tcp.h>
58 #include <netinet/in_pcb.h>
59 #include <netinet/tcp_var.h>
60 #include <netinet/tcp_log_buf.h>
61 
62 #include <security/audit/audit.h>
63 #include <security/mac/mac_framework.h>
64 
65 #include <vm/vm.h>
66 #include <vm/vm_object.h>
67 #include <vm/vm_pager.h>
68 
69 static MALLOC_DEFINE(M_SENDFILE, "sendfile", "sendfile dynamic memory");
70 
71 #define	EXT_FLAG_NOCACHE	EXT_FLAG_VENDOR2
72 #define	EXT_FLAG_CACHE_LAST	EXT_FLAG_VENDOR3
73 
74 /*
75  * Structure describing a single sendfile(2) I/O, which may consist of
76  * several underlying pager I/Os.
77  *
78  * The syscall context allocates the structure and initializes 'nios'
79  * to 1.  As sendfile_swapin() runs through pages and starts asynchronous
80  * paging operations, it increments 'nios'.
81  *
82  * Every I/O completion calls sendfile_iodone(), which decrements the 'nios',
83  * and the syscall also calls sendfile_iodone() after allocating all mbufs,
84  * linking them and sending to socket.  Whoever reaches zero 'nios' is
85  * responsible to call pr_ready() on the socket, to notify it of readyness
86  * of the data.
87  */
88 struct sf_io {
89 	volatile u_int	nios;
90 	u_int		error;
91 	int		npages;
92 	struct socket	*so;
93 	struct mbuf	*m;
94 	vm_object_t	obj;
95 	vm_pindex_t	pindex0;
96 #ifdef KERN_TLS
97 	struct ktls_session *tls;
98 #endif
99 	vm_page_t	pa[];
100 };
101 
102 counter_u64_t sfstat[sizeof(struct sfstat) / sizeof(uint64_t)];
103 
104 static void
sfstat_init(const void * unused)105 sfstat_init(const void *unused)
106 {
107 
108 	COUNTER_ARRAY_ALLOC(sfstat, sizeof(struct sfstat) / sizeof(uint64_t),
109 	    M_WAITOK);
110 }
111 SYSINIT(sfstat, SI_SUB_MBUF, SI_ORDER_FIRST, sfstat_init, NULL);
112 
113 static int
sfstat_sysctl(SYSCTL_HANDLER_ARGS)114 sfstat_sysctl(SYSCTL_HANDLER_ARGS)
115 {
116 	struct sfstat s;
117 
118 	COUNTER_ARRAY_COPY(sfstat, &s, sizeof(s) / sizeof(uint64_t));
119 	if (req->newptr)
120 		COUNTER_ARRAY_ZERO(sfstat, sizeof(s) / sizeof(uint64_t));
121 	return (SYSCTL_OUT(req, &s, sizeof(s)));
122 }
123 SYSCTL_PROC(_kern_ipc, OID_AUTO, sfstat,
124     CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
125     sfstat_sysctl, "I",
126     "sendfile statistics");
127 
128 static void
sendfile_free_mext(struct mbuf * m)129 sendfile_free_mext(struct mbuf *m)
130 {
131 	struct sf_buf *sf;
132 	vm_page_t pg;
133 	int flags;
134 
135 	KASSERT(m->m_flags & M_EXT && m->m_ext.ext_type == EXT_SFBUF,
136 	    ("%s: m %p !M_EXT or !EXT_SFBUF", __func__, m));
137 
138 	sf = m->m_ext.ext_arg1;
139 	pg = sf_buf_page(sf);
140 	flags = (m->m_ext.ext_flags & EXT_FLAG_NOCACHE) != 0 ? VPR_TRYFREE : 0;
141 
142 	sf_buf_free(sf);
143 	vm_page_release(pg, flags);
144 }
145 
146 static void
sendfile_free_mext_pg(struct mbuf * m)147 sendfile_free_mext_pg(struct mbuf *m)
148 {
149 	vm_page_t pg;
150 	int flags, i;
151 	bool cache_last;
152 
153 	M_ASSERTEXTPG(m);
154 
155 	cache_last = m->m_ext.ext_flags & EXT_FLAG_CACHE_LAST;
156 	flags = (m->m_ext.ext_flags & EXT_FLAG_NOCACHE) != 0 ? VPR_TRYFREE : 0;
157 
158 	for (i = 0; i < m->m_epg_npgs; i++) {
159 		if (cache_last && i == m->m_epg_npgs - 1)
160 			flags = 0;
161 		pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
162 		vm_page_release(pg, flags);
163 	}
164 }
165 
166 /*
167  * Helper function to calculate how much data to put into page i of n.
168  * Only first and last pages are special.
169  */
170 static inline off_t
xfsize(int i,int n,off_t off,off_t len)171 xfsize(int i, int n, off_t off, off_t len)
172 {
173 
174 	if (i == 0)
175 		return (omin(PAGE_SIZE - (off & PAGE_MASK), len));
176 
177 	if (i == n - 1 && ((off + len) & PAGE_MASK) > 0)
178 		return ((off + len) & PAGE_MASK);
179 
180 	return (PAGE_SIZE);
181 }
182 
183 /*
184  * Helper function to get offset within object for i page.
185  */
186 static inline vm_ooffset_t
vmoff(int i,off_t off)187 vmoff(int i, off_t off)
188 {
189 
190 	if (i == 0)
191 		return ((vm_ooffset_t)off);
192 
193 	return (trunc_page(off + i * PAGE_SIZE));
194 }
195 
196 /*
197  * Helper function used when allocation of a page or sf_buf failed.
198  * Pretend as if we don't have enough space, subtract xfsize() of
199  * all pages that failed.
200  */
201 static inline void
fixspace(int old,int new,off_t off,int * space)202 fixspace(int old, int new, off_t off, int *space)
203 {
204 
205 	KASSERT(old > new, ("%s: old %d new %d", __func__, old, new));
206 
207 	/* Subtract last one. */
208 	*space -= xfsize(old - 1, old, off, *space);
209 	old--;
210 
211 	if (new == old)
212 		/* There was only one page. */
213 		return;
214 
215 	/* Subtract first one. */
216 	if (new == 0) {
217 		*space -= xfsize(0, old, off, *space);
218 		new++;
219 	}
220 
221 	/* Rest of pages are full sized. */
222 	*space -= (old - new) * PAGE_SIZE;
223 
224 	KASSERT(*space >= 0, ("%s: space went backwards", __func__));
225 }
226 
227 /*
228  * Wait for all in-flight ios to complete, we must not unwire pages
229  * under them.
230  */
231 static void
sendfile_iowait(struct sf_io * sfio,const char * wmesg)232 sendfile_iowait(struct sf_io *sfio, const char *wmesg)
233 {
234 	while (atomic_load_int(&sfio->nios) != 1)
235 		pause(wmesg, 1);
236 }
237 
238 /*
239  * I/O completion callback.
240  *
241  * When called via I/O path, the curvnet is not set and should be obtained
242  * from the socket.  When called synchronously from vn_sendfile(), usually
243  * to report error or just release the reference (all pages are valid), then
244  * curvnet shall be already set.
245  */
246 static void
sendfile_iodone(void * arg,vm_page_t * pa,int count,int error)247 sendfile_iodone(void *arg, vm_page_t *pa, int count, int error)
248 {
249 	struct sf_io *sfio = arg;
250 	struct socket *so;
251 	int i;
252 
253 	if (error != 0)
254 		sfio->error = error;
255 
256 	/*
257 	 * Restore the valid page pointers.  They are already
258 	 * unbusied, but still wired.
259 	 *
260 	 * XXXKIB since pages are only wired, and we do not
261 	 * own the object lock, other users might have
262 	 * invalidated them in meantime.  Similarly, after we
263 	 * unbusied the swapped-in pages, they can become
264 	 * invalid under us.
265 	 */
266 	MPASS(count == 0 || pa[0] != bogus_page);
267 	for (i = 0; i < count; i++) {
268 		if (pa[i] == bogus_page) {
269 			sfio->pa[(pa[0]->pindex - sfio->pindex0) + i] =
270 			    pa[i] = vm_page_relookup(sfio->obj,
271 			    pa[0]->pindex + i);
272 			KASSERT(pa[i] != NULL,
273 			    ("%s: page %p[%d] disappeared",
274 			    __func__, pa, i));
275 		} else {
276 			vm_page_xunbusy_unchecked(pa[i]);
277 		}
278 	}
279 
280 	if (!refcount_release(&sfio->nios))
281 		return;
282 
283 #ifdef INVARIANTS
284 	for (i = 1; i < sfio->npages; i++) {
285 		if (sfio->pa[i] == NULL)
286 			break;
287 		KASSERT(vm_page_wired(sfio->pa[i]),
288 		    ("sfio %p page %d %p not wired", sfio, i, sfio->pa[i]));
289 		if (i == 0)
290 			continue;
291 		KASSERT(sfio->pa[0]->object == sfio->pa[i]->object,
292 		    ("sfio %p page %d %p wrong owner %p %p", sfio, i,
293 		    sfio->pa[i], sfio->pa[0]->object, sfio->pa[i]->object));
294 		KASSERT(sfio->pa[0]->pindex + i == sfio->pa[i]->pindex,
295 		    ("sfio %p page %d %p wrong index %jx %jx", sfio, i,
296 		    sfio->pa[i], (uintmax_t)sfio->pa[0]->pindex,
297 		    (uintmax_t)sfio->pa[i]->pindex));
298 	}
299 #endif
300 
301 	vm_object_pip_wakeup(sfio->obj);
302 
303 	if (sfio->m == NULL) {
304 		/*
305 		 * Either I/O operation failed, or we failed to allocate
306 		 * buffers, or we bailed out on first busy page, or we
307 		 * succeeded filling the request without any I/Os. Anyway,
308 		 * pr_send() hadn't been executed - nothing had been sent
309 		 * to the socket yet.
310 		 */
311 		MPASS((curthread->td_pflags & TDP_KTHREAD) == 0);
312 		free(sfio, M_SENDFILE);
313 		return;
314 	}
315 
316 #if defined(KERN_TLS) && defined(INVARIANTS)
317 	if ((sfio->m->m_flags & M_EXTPG) != 0)
318 		KASSERT(sfio->tls == sfio->m->m_epg_tls,
319 		    ("TLS session mismatch"));
320 	else
321 		KASSERT(sfio->tls == NULL,
322 		    ("non-ext_pgs mbuf with TLS session"));
323 #endif
324 	so = sfio->so;
325 	CURVNET_SET_QUIET(so->so_vnet);
326 	if (__predict_false(sfio->error)) {
327 		/*
328 		 * I/O operation failed.  The state of data in the socket
329 		 * is now inconsistent, and all what we can do is to tear
330 		 * it down. Protocol abort method would tear down protocol
331 		 * state, free all ready mbufs and detach not ready ones.
332 		 * We will free the mbufs corresponding to this I/O manually.
333 		 *
334 		 * The socket would be marked with EIO and made available
335 		 * for read, so that application receives EIO on next
336 		 * syscall and eventually closes the socket.
337 		 */
338 		so->so_proto->pr_abort(so);
339 		so->so_error = EIO;
340 
341 		mb_free_notready(sfio->m, sfio->npages);
342 #ifdef KERN_TLS
343 	} else if (sfio->tls != NULL && sfio->tls->mode == TCP_TLS_MODE_SW) {
344 		/*
345 		 * I/O operation is complete, but we still need to
346 		 * encrypt.  We cannot do this in the interrupt thread
347 		 * of the disk controller, so forward the mbufs to a
348 		 * different thread.
349 		 *
350 		 * Donate the socket reference from sfio to rather
351 		 * than explicitly invoking soref().
352 		 */
353 		ktls_enqueue(sfio->m, so, sfio->npages);
354 		goto out_with_ref;
355 #endif
356 	} else
357 		(void)so->so_proto->pr_ready(so, sfio->m, sfio->npages);
358 
359 	sorele(so);
360 #ifdef KERN_TLS
361 out_with_ref:
362 #endif
363 	CURVNET_RESTORE();
364 	free(sfio, M_SENDFILE);
365 }
366 
367 /*
368  * Iterate through pages vector and request paging for non-valid pages.
369  */
370 static int
sendfile_swapin(vm_object_t obj,struct sf_io * sfio,int * nios,off_t off,off_t len,int rhpages,int flags)371 sendfile_swapin(vm_object_t obj, struct sf_io *sfio, int *nios, off_t off,
372     off_t len, int rhpages, int flags)
373 {
374 	vm_page_t *pa;
375 	int a, count, count1, grabbed, i, j, npages, rv;
376 
377 	pa = sfio->pa;
378 	npages = sfio->npages;
379 	*nios = 0;
380 	flags = (flags & SF_NODISKIO) ? VM_ALLOC_NOWAIT : 0;
381 	sfio->pindex0 = OFF_TO_IDX(off);
382 
383 	/*
384 	 * First grab all the pages and wire them.  Note that we grab
385 	 * only required pages.  Readahead pages are dealt with later.
386 	 */
387 	grabbed = vm_page_grab_pages_unlocked(obj, OFF_TO_IDX(off),
388 	    VM_ALLOC_NORMAL | VM_ALLOC_WIRED | flags, pa, npages);
389 	if (grabbed < npages) {
390 		for (int i = grabbed; i < npages; i++)
391 			pa[i] = NULL;
392 		npages = grabbed;
393 		rhpages = 0;
394 	}
395 
396 	for (i = 0; i < npages;) {
397 		/* Skip valid pages. */
398 		if (vm_page_is_valid(pa[i], vmoff(i, off) & PAGE_MASK,
399 		    xfsize(i, npages, off, len))) {
400 			vm_page_xunbusy(pa[i]);
401 			SFSTAT_INC(sf_pages_valid);
402 			i++;
403 			continue;
404 		}
405 
406 		/*
407 		 * Next page is invalid.  Check if it belongs to pager.  It
408 		 * may not be there, which is a regular situation for shmem
409 		 * pager.  For vnode pager this happens only in case of
410 		 * a sparse file.
411 		 *
412 		 * Important feature of vm_pager_has_page() is the hint
413 		 * stored in 'a', about how many pages we can pagein after
414 		 * this page in a single I/O.
415 		 */
416 		VM_OBJECT_RLOCK(obj);
417 		if (!vm_pager_has_page(obj, OFF_TO_IDX(vmoff(i, off)), NULL,
418 		    &a)) {
419 			VM_OBJECT_RUNLOCK(obj);
420 			pmap_zero_page(pa[i]);
421 			vm_page_valid(pa[i]);
422 			MPASS(pa[i]->dirty == 0);
423 			vm_page_xunbusy(pa[i]);
424 			i++;
425 			continue;
426 		}
427 		VM_OBJECT_RUNLOCK(obj);
428 
429 		/*
430 		 * We want to pagein as many pages as possible, limited only
431 		 * by the 'a' hint and actual request.
432 		 */
433 		count = min(a + 1, npages - i);
434 
435 		/*
436 		 * We should not pagein into a valid page because
437 		 * there might be still unfinished write tracked by
438 		 * e.g. a buffer, thus we substitute any valid pages
439 		 * with the bogus one.
440 		 *
441 		 * We must not leave around xbusy pages which are not
442 		 * part of the run passed to vm_pager_getpages(),
443 		 * otherwise pager might deadlock waiting for the busy
444 		 * status of the page, e.g. if it constitues the
445 		 * buffer needed to validate other page.
446 		 *
447 		 * First trim the end of the run consisting of the
448 		 * valid pages, then replace the rest of the valid
449 		 * with bogus.
450 		 */
451 		count1 = count;
452 		for (j = i + count - 1; j > i; j--) {
453 			if (vm_page_is_valid(pa[j], vmoff(j, off) & PAGE_MASK,
454 			    xfsize(j, npages, off, len))) {
455 				vm_page_xunbusy(pa[j]);
456 				SFSTAT_INC(sf_pages_valid);
457 				count--;
458 			} else {
459 				break;
460 			}
461 		}
462 
463 		/*
464 		 * The last page in the run pa[i + count - 1] is
465 		 * guaranteed to be invalid by the trim above, so it
466 		 * is not replaced with bogus, thus -1 in the loop end
467 		 * condition.
468 		 */
469 		MPASS(pa[i + count - 1]->valid != VM_PAGE_BITS_ALL);
470 		for (j = i + 1; j < i + count - 1; j++) {
471 			if (vm_page_is_valid(pa[j], vmoff(j, off) & PAGE_MASK,
472 			    xfsize(j, npages, off, len))) {
473 				vm_page_xunbusy(pa[j]);
474 				SFSTAT_INC(sf_pages_valid);
475 				SFSTAT_INC(sf_pages_bogus);
476 				pa[j] = bogus_page;
477 			}
478 		}
479 
480 		refcount_acquire(&sfio->nios);
481 		rv = vm_pager_get_pages_async(obj, pa + i, count, NULL,
482 		    i + count == npages ? &rhpages : NULL,
483 		    &sendfile_iodone, sfio);
484 		if (__predict_false(rv != VM_PAGER_OK)) {
485 			sendfile_iowait(sfio, "sferrio");
486 
487 			/*
488 			 * Do remaining pages recovery before returning EIO.
489 			 * Pages from 0 to npages are wired.
490 			 * Pages from (i + count1) to npages are busied.
491 			 */
492 			for (j = 0; j < npages; j++) {
493 				if (j >= i + count1)
494 					vm_page_xunbusy(pa[j]);
495 				KASSERT(pa[j] != NULL && pa[j] != bogus_page,
496 				    ("%s: page %p[%d] I/O recovery failure",
497 				    __func__, pa, j));
498 				vm_page_unwire(pa[j], PQ_INACTIVE);
499 				pa[j] = NULL;
500 			}
501 			return (EIO);
502 		}
503 
504 		SFSTAT_INC(sf_iocnt);
505 		SFSTAT_ADD(sf_pages_read, count);
506 		if (i + count == npages)
507 			SFSTAT_ADD(sf_rhpages_read, rhpages);
508 
509 		i += count1;
510 		(*nios)++;
511 	}
512 
513 	if (*nios == 0 && npages != 0)
514 		SFSTAT_INC(sf_noiocnt);
515 
516 	return (0);
517 }
518 
519 static int
sendfile_getobj(struct thread * td,struct file * fp,vm_object_t * obj_res,struct vnode ** vp_res,struct shmfd ** shmfd_res,off_t * obj_size,int * bsize)520 sendfile_getobj(struct thread *td, struct file *fp, vm_object_t *obj_res,
521     struct vnode **vp_res, struct shmfd **shmfd_res, off_t *obj_size,
522     int *bsize)
523 {
524 	vm_object_t obj;
525 	struct vnode *vp;
526 	struct shmfd *shmfd;
527 	int error;
528 
529 	error = 0;
530 	vp = *vp_res = NULL;
531 	obj = NULL;
532 	shmfd = *shmfd_res = NULL;
533 	*bsize = 0;
534 
535 	/*
536 	 * The file descriptor must be a regular file and have a
537 	 * backing VM object.
538 	 */
539 	if (fp->f_type == DTYPE_VNODE) {
540 		vp = fp->f_vnode;
541 		vn_lock(vp, LK_SHARED | LK_RETRY);
542 		if (vp->v_type != VREG) {
543 			error = EINVAL;
544 			goto out;
545 		}
546 		*bsize = vp->v_mount->mnt_stat.f_iosize;
547 		obj = vp->v_object;
548 		if (obj == NULL) {
549 			error = EINVAL;
550 			goto out;
551 		}
552 
553 		/*
554 		 * Use the pager size when available to simplify synchronization
555 		 * with filesystems, which otherwise must atomically update both
556 		 * the vnode pager size and file size.
557 		 */
558 		if (obj->type == OBJT_VNODE) {
559 			VM_OBJECT_RLOCK(obj);
560 			*obj_size = obj->un_pager.vnp.vnp_size;
561 		} else {
562 			error = vn_getsize_locked(vp, obj_size, td->td_ucred);
563 			if (error != 0)
564 				goto out;
565 			VM_OBJECT_RLOCK(obj);
566 		}
567 	} else if (fp->f_type == DTYPE_SHM) {
568 		shmfd = fp->f_data;
569 		obj = shmfd->shm_object;
570 		VM_OBJECT_RLOCK(obj);
571 		*obj_size = shmfd->shm_size;
572 	} else {
573 		error = EINVAL;
574 		goto out;
575 	}
576 
577 	if ((obj->flags & OBJ_DEAD) != 0) {
578 		VM_OBJECT_RUNLOCK(obj);
579 		error = EBADF;
580 		goto out;
581 	}
582 
583 	/*
584 	 * Temporarily increase the backing VM object's reference
585 	 * count so that a forced reclamation of its vnode does not
586 	 * immediately destroy it.
587 	 */
588 	vm_object_reference_locked(obj);
589 	VM_OBJECT_RUNLOCK(obj);
590 	*obj_res = obj;
591 	*vp_res = vp;
592 	*shmfd_res = shmfd;
593 
594 out:
595 	if (vp != NULL)
596 		VOP_UNLOCK(vp);
597 	return (error);
598 }
599 
600 static int
sendfile_getsock(struct thread * td,int s,struct file ** sock_fp,struct socket ** so)601 sendfile_getsock(struct thread *td, int s, struct file **sock_fp,
602     struct socket **so)
603 {
604 	int error;
605 
606 	*sock_fp = NULL;
607 	*so = NULL;
608 
609 	/*
610 	 * The socket must be a stream socket and connected.
611 	 */
612 	error = getsock(td, s, &cap_send_rights, sock_fp);
613 	if (error != 0)
614 		return (error);
615 	*so = (*sock_fp)->f_data;
616 	if ((*so)->so_type != SOCK_STREAM)
617 		return (EINVAL);
618 	/*
619 	 * SCTP one-to-one style sockets currently don't work with
620 	 * sendfile(). So indicate EINVAL for now.
621 	 */
622 	if ((*so)->so_proto->pr_protocol == IPPROTO_SCTP)
623 		return (EINVAL);
624 	return (0);
625 }
626 
627 /*
628  * Check socket state and wait (or EAGAIN) for needed amount of space.
629  */
630 int
sendfile_wait_generic(struct socket * so,off_t need,int * space)631 sendfile_wait_generic(struct socket *so, off_t need, int *space)
632 {
633 	int error;
634 
635 	MPASS(need > 0);
636 	MPASS(space != NULL);
637 
638 	/*
639 	 * XXXGL: the hack with sb_lowat originates from d99b0dd2c5297.  To
640 	 * achieve high performance sending with sendfile(2) a non-blocking
641 	 * socket needs a fairly high low watermark.  Otherwise, the socket
642 	 * will be reported as writable too early, and sendfile(2) will send
643 	 * just a few bytes each time.  It is important to understand that
644 	 * we are changing sb_lowat not for the current invocation of the
645 	 * syscall, but for the *next* syscall. So there is no way to
646 	 * workaround the problem, e.g. provide a special version of sbspace().
647 	 * Since this hack has been in the kernel for a long time, we
648 	 * anticipate that there is a lot of software that will suffer if we
649 	 * remove it.  See also b21104487324.
650 	 */
651 	error = 0;
652 	SOCK_SENDBUF_LOCK(so);
653 	if (so->so_snd.sb_flags & SB_AUTOLOWAT) {
654 		if (so->so_snd.sb_lowat < so->so_snd.sb_hiwat / 2)
655 			so->so_snd.sb_lowat = so->so_snd.sb_hiwat / 2;
656 		if (so->so_snd.sb_lowat < PAGE_SIZE &&
657 		    so->so_snd.sb_hiwat >= PAGE_SIZE)
658 			so->so_snd.sb_lowat = PAGE_SIZE;
659 	}
660 retry_space:
661 	if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
662 		error = EPIPE;
663 		goto done;
664 	} else if (so->so_error) {
665 		error = so->so_error;
666 		so->so_error = 0;
667 		goto done;
668 	}
669 	if ((so->so_state & SS_ISCONNECTED) == 0) {
670 		error = ENOTCONN;
671 		goto done;
672 	}
673 
674 	*space = sbspace(&so->so_snd);
675 	if (*space < need && (*space <= 0 || *space < so->so_snd.sb_lowat)) {
676 		if (so->so_state & SS_NBIO) {
677 			error = EAGAIN;
678 			goto done;
679 		}
680 		/*
681 		 * sbwait() drops the lock while sleeping.  When we loop back
682 		 * to retry_space the state may have changed and we retest
683 		 * for it.
684 		 */
685 		error = sbwait(so, SO_SND);
686 		/*
687 		 * An error from sbwait() usually indicates that we've been
688 		 * interrupted by a signal.  If we've sent anything then return
689 		 * bytes sent, otherwise return the error.
690 		 */
691 		if (error != 0)
692 			goto done;
693 		goto retry_space;
694 	}
695 done:
696 	SOCK_SENDBUF_UNLOCK(so);
697 
698 	return (error);
699 }
700 
701 int
vn_sendfile(struct file * fp,int sockfd,struct uio * hdr_uio,struct uio * trl_uio,off_t offset,size_t nbytes,off_t * sent,int flags,struct thread * td)702 vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
703     struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags,
704     struct thread *td)
705 {
706 	struct file *sock_fp;
707 	struct vnode *vp;
708 	struct vm_object *obj;
709 	vm_page_t pga;
710 	struct socket *so;
711 	const struct protosw *pr;
712 #ifdef KERN_TLS
713 	struct ktls_session *tls;
714 #endif
715 	struct mbuf *m, *mh, *mhtail;
716 	struct sf_buf *sf;
717 	struct shmfd *shmfd;
718 	struct vattr va;
719 	off_t off, sbytes, rem, obj_size, nobj_size;
720 	int bsize, error, ext_pgs_idx, hdrlen, max_pgs, softerr;
721 #ifdef KERN_TLS
722 	int tls_enq_cnt;
723 #endif
724 	bool use_ext_pgs;
725 
726 	obj = NULL;
727 	so = NULL;
728 	m = mh = NULL;
729 #ifdef KERN_TLS
730 	tls = NULL;
731 #endif
732 	hdrlen = sbytes = 0;
733 	softerr = 0;
734 	use_ext_pgs = false;
735 
736 	error = sendfile_getobj(td, fp, &obj, &vp, &shmfd, &obj_size, &bsize);
737 	if (error != 0)
738 		return (error);
739 
740 	error = sendfile_getsock(td, sockfd, &sock_fp, &so);
741 	if (error != 0)
742 		goto out;
743 	pr = so->so_proto;
744 
745 #ifdef MAC
746 	error = mac_socket_check_send(td->td_ucred, so);
747 	if (error != 0)
748 		goto out;
749 #endif
750 
751 	SFSTAT_INC(sf_syscalls);
752 	SFSTAT_ADD(sf_rhpages_requested, SF_READAHEAD(flags));
753 
754 	rem = nbytes ? omin(nbytes, obj_size - offset) : obj_size - offset;
755 
756 	/*
757 	 * Protect against multiple writers to the socket.
758 	 *
759 	 * XXXRW: Historically this has assumed non-interruptibility, so now
760 	 * we implement that, but possibly shouldn't.
761 	 */
762 	error = SOCK_IO_SEND_LOCK(so, SBL_WAIT | SBL_NOINTR);
763 	if (error != 0)
764 		goto out;
765 	CURVNET_SET(so->so_vnet);
766 #ifdef KERN_TLS
767 	tls = ktls_hold(so->so_snd.sb_tls_info);
768 #endif
769 
770 	/*
771 	 * Loop through the pages of the file, starting with the requested
772 	 * offset. Get a file page (do I/O if necessary), map the file page
773 	 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
774 	 * it on the socket.
775 	 * This is done in two loops.  The inner loop turns as many pages
776 	 * as it can, up to available socket buffer space, without blocking
777 	 * into mbufs to have it bulk delivered into the socket send buffer.
778 	 * The outer loop checks the state and available space of the socket
779 	 * and takes care of the overall progress.
780 	 */
781 	for (off = offset; rem > 0; ) {
782 		struct sf_io *sfio;
783 		vm_page_t *pa;
784 		struct mbuf *m0, *mtail;
785 		int nios, space, npages, rhpages;
786 
787 		mtail = NULL;
788 		if ((error = pr->pr_sendfile_wait(so, rem, &space)) != 0)
789 			goto done;
790 		/*
791 		 * At the beginning of the first loop check if any headers
792 		 * are specified and copy them into mbufs.  Reduce space in
793 		 * the socket buffer by the size of the header mbuf chain.
794 		 * Clear hdr_uio here and hdrlen at the end of the first loop.
795 		 */
796 		if (hdr_uio != NULL && hdr_uio->uio_resid > 0) {
797 			hdr_uio->uio_td = td;
798 			hdr_uio->uio_rw = UIO_WRITE;
799 #ifdef KERN_TLS
800 			if (tls != NULL)
801 				mh = m_uiotombuf(hdr_uio, M_WAITOK, space,
802 				    tls->params.max_frame_len, M_EXTPG);
803 			else
804 #endif
805 				mh = m_uiotombuf(hdr_uio, M_WAITOK,
806 				    space, 0, 0);
807 			hdrlen = m_length(mh, &mhtail);
808 			space -= hdrlen;
809 			/*
810 			 * If header consumed all the socket buffer space,
811 			 * don't waste CPU cycles and jump to the end.
812 			 */
813 			if (space == 0) {
814 				sfio = NULL;
815 				nios = 0;
816 				goto prepend_header;
817 			}
818 			hdr_uio = NULL;
819 		}
820 
821 		if (vp != NULL) {
822 			error = vn_lock(vp, LK_SHARED);
823 			if (error != 0)
824 				goto done;
825 
826 			/*
827 			 * Check to see if the file size has changed.
828 			 */
829 			if (obj->type == OBJT_VNODE) {
830 				VM_OBJECT_RLOCK(obj);
831 				nobj_size = obj->un_pager.vnp.vnp_size;
832 				VM_OBJECT_RUNLOCK(obj);
833 			} else {
834 				error = VOP_GETATTR(vp, &va, td->td_ucred);
835 				if (error != 0) {
836 					VOP_UNLOCK(vp);
837 					goto done;
838 				}
839 				nobj_size = va.va_size;
840 			}
841 			if (off >= nobj_size) {
842 				VOP_UNLOCK(vp);
843 				goto done;
844 			}
845 			if (nobj_size != obj_size) {
846 				obj_size = nobj_size;
847 				rem = nbytes ? omin(nbytes + offset, obj_size) :
848 				    obj_size;
849 				rem -= off;
850 			}
851 		}
852 
853 		if (space > rem)
854 			space = rem;
855 		else if (space > PAGE_SIZE) {
856 			/*
857 			 * Use page boundaries when possible for large
858 			 * requests.
859 			 */
860 			if (off & PAGE_MASK)
861 				space -= (PAGE_SIZE - (off & PAGE_MASK));
862 			space = trunc_page(space);
863 			if (off & PAGE_MASK)
864 				space += (PAGE_SIZE - (off & PAGE_MASK));
865 		}
866 
867 		npages = howmany(space + (off & PAGE_MASK), PAGE_SIZE);
868 
869 		/*
870 		 * Calculate maximum allowed number of pages for readahead
871 		 * at this iteration.  If SF_USER_READAHEAD was set, we don't
872 		 * do any heuristics and use exactly the value supplied by
873 		 * application.  Otherwise, we allow readahead up to "rem".
874 		 * If application wants more, let it be, but there is no
875 		 * reason to go above maxphys.  Also check against "obj_size",
876 		 * since vm_pager_has_page() can hint beyond EOF.
877 		 */
878 		if (flags & SF_USER_READAHEAD) {
879 			rhpages = SF_READAHEAD(flags);
880 		} else {
881 			rhpages = howmany(rem + (off & PAGE_MASK), PAGE_SIZE) -
882 			    npages;
883 			rhpages += SF_READAHEAD(flags);
884 		}
885 		rhpages = min(howmany(maxphys, PAGE_SIZE), rhpages);
886 		rhpages = min(howmany(obj_size - trunc_page(off), PAGE_SIZE) -
887 		    npages, rhpages);
888 
889 		sfio = malloc(sizeof(struct sf_io) +
890 		    npages * sizeof(vm_page_t), M_SENDFILE, M_WAITOK);
891 		refcount_init(&sfio->nios, 1);
892 		sfio->obj = obj;
893 		sfio->error = 0;
894 		sfio->m = NULL;
895 		sfio->npages = npages;
896 #ifdef KERN_TLS
897 		/*
898 		 * This doesn't use ktls_hold() because sfio->m will
899 		 * also have a reference on 'tls' that will be valid
900 		 * for all of sfio's lifetime.
901 		 */
902 		sfio->tls = tls;
903 #endif
904 		vm_object_pip_add(obj, 1);
905 		error = sendfile_swapin(obj, sfio, &nios, off, space, rhpages,
906 		    flags);
907 		if (error != 0) {
908 			if (vp != NULL)
909 				VOP_UNLOCK(vp);
910 			sendfile_iodone(sfio, NULL, 0, error);
911 			goto done;
912 		}
913 
914 		/*
915 		 * Loop and construct maximum sized mbuf chain to be bulk
916 		 * dumped into socket buffer.
917 		 */
918 		pa = sfio->pa;
919 
920 		/*
921 		 * Use unmapped mbufs if enabled for TCP.  Unmapped
922 		 * bufs are restricted to TCP as that is what has been
923 		 * tested.  In particular, unmapped mbufs have not
924 		 * been tested with UNIX-domain sockets.
925 		 *
926 		 * TLS frames always require unmapped mbufs.
927 		 */
928 		if ((mb_use_ext_pgs && pr->pr_protocol == IPPROTO_TCP)
929 #ifdef KERN_TLS
930 		    || tls != NULL
931 #endif
932 		    ) {
933 			use_ext_pgs = true;
934 #ifdef KERN_TLS
935 			if (tls != NULL)
936 				max_pgs = num_pages(tls->params.max_frame_len);
937 			else
938 #endif
939 				max_pgs = MBUF_PEXT_MAX_PGS;
940 
941 			/* Start at last index, to wrap on first use. */
942 			ext_pgs_idx = max_pgs - 1;
943 		}
944 
945 		for (int i = 0; i < npages; i++) {
946 			/*
947 			 * If a page wasn't grabbed successfully, then
948 			 * trim the array. Can happen only with SF_NODISKIO.
949 			 */
950 			if (pa[i] == NULL) {
951 				SFSTAT_INC(sf_busy);
952 				fixspace(npages, i, off, &space);
953 				sfio->npages = i;
954 				softerr = EBUSY;
955 				break;
956 			}
957 			pga = pa[i];
958 			if (pga == bogus_page)
959 				pga = vm_page_relookup(obj, sfio->pindex0 + i);
960 
961 			if (use_ext_pgs) {
962 				off_t xfs;
963 
964 				ext_pgs_idx++;
965 				if (ext_pgs_idx == max_pgs) {
966 					m0 = mb_alloc_ext_pgs(M_WAITOK,
967 					    sendfile_free_mext_pg, M_RDONLY);
968 
969 					if (flags & SF_NOCACHE) {
970 						m0->m_ext.ext_flags |=
971 						    EXT_FLAG_NOCACHE;
972 
973 						/*
974 						 * See comment below regarding
975 						 * ignoring SF_NOCACHE for the
976 						 * last page.
977 						 */
978 						if ((npages - i <= max_pgs) &&
979 						    ((off + space) & PAGE_MASK) &&
980 						    (rem > space || rhpages > 0))
981 							m0->m_ext.ext_flags |=
982 							    EXT_FLAG_CACHE_LAST;
983 					}
984 					ext_pgs_idx = 0;
985 
986 					/* Append to mbuf chain. */
987 					if (mtail != NULL)
988 						mtail->m_next = m0;
989 					else
990 						m = m0;
991 					mtail = m0;
992 					m0->m_epg_1st_off =
993 					    vmoff(i, off) & PAGE_MASK;
994 				}
995 				if (nios) {
996 					mtail->m_flags |= M_NOTREADY;
997 					m0->m_epg_nrdy++;
998 				}
999 
1000 				m0->m_epg_pa[ext_pgs_idx] = VM_PAGE_TO_PHYS(pga);
1001 				m0->m_epg_npgs++;
1002 				xfs = xfsize(i, npages, off, space);
1003 				m0->m_epg_last_len = xfs;
1004 				MBUF_EXT_PGS_ASSERT_SANITY(m0);
1005 				mtail->m_len += xfs;
1006 				mtail->m_ext.ext_size += PAGE_SIZE;
1007 				continue;
1008 			}
1009 
1010 			/*
1011 			 * Get a sendfile buf.  When allocating the
1012 			 * first buffer for mbuf chain, we usually
1013 			 * wait as long as necessary, but this wait
1014 			 * can be interrupted.  For consequent
1015 			 * buffers, do not sleep, since several
1016 			 * threads might exhaust the buffers and then
1017 			 * deadlock.
1018 			 */
1019 			sf = sf_buf_alloc(pga,
1020 			    m != NULL ? SFB_NOWAIT : SFB_CATCH);
1021 			if (sf == NULL) {
1022 				SFSTAT_INC(sf_allocfail);
1023 				sendfile_iowait(sfio, "sfnosf");
1024 				for (int j = i; j < npages; j++) {
1025 					vm_page_unwire(pa[j], PQ_INACTIVE);
1026 					pa[j] = NULL;
1027 				}
1028 				if (m == NULL)
1029 					softerr = ENOBUFS;
1030 				fixspace(npages, i, off, &space);
1031 				sfio->npages = i;
1032 				break;
1033 			}
1034 
1035 			m0 = m_get(M_WAITOK, MT_DATA);
1036 			m0->m_ext.ext_buf = (char *)sf_buf_kva(sf);
1037 			m0->m_ext.ext_size = PAGE_SIZE;
1038 			m0->m_ext.ext_arg1 = sf;
1039 			m0->m_ext.ext_type = EXT_SFBUF;
1040 			m0->m_ext.ext_flags = EXT_FLAG_EMBREF;
1041 			m0->m_ext.ext_free = sendfile_free_mext;
1042 			/*
1043 			 * SF_NOCACHE sets the page as being freed upon send.
1044 			 * However, we ignore it for the last page in 'space',
1045 			 * if the page is truncated, and we got more data to
1046 			 * send (rem > space), or if we have readahead
1047 			 * configured (rhpages > 0).
1048 			 */
1049 			if ((flags & SF_NOCACHE) &&
1050 			    (i != npages - 1 ||
1051 			    !((off + space) & PAGE_MASK) ||
1052 			    !(rem > space || rhpages > 0)))
1053 				m0->m_ext.ext_flags |= EXT_FLAG_NOCACHE;
1054 			m0->m_ext.ext_count = 1;
1055 			m0->m_flags |= (M_EXT | M_RDONLY);
1056 			if (nios)
1057 				m0->m_flags |= M_NOTREADY;
1058 			m0->m_data = (char *)sf_buf_kva(sf) +
1059 			    (vmoff(i, off) & PAGE_MASK);
1060 			m0->m_len = xfsize(i, npages, off, space);
1061 
1062 			/* Append to mbuf chain. */
1063 			if (mtail != NULL)
1064 				mtail->m_next = m0;
1065 			else
1066 				m = m0;
1067 			mtail = m0;
1068 		}
1069 
1070 		if (vp != NULL)
1071 			VOP_UNLOCK(vp);
1072 
1073 		/* Keep track of bytes processed. */
1074 		off += space;
1075 		rem -= space;
1076 
1077 		/*
1078 		 * Prepend header, if any.  Save pointer to first mbuf
1079 		 * with a page.
1080 		 */
1081 		if (hdrlen) {
1082 prepend_header:
1083 			m0 = mhtail->m_next = m;
1084 			m = mh;
1085 			mh = NULL;
1086 		} else
1087 			m0 = m;
1088 
1089 		if (m == NULL) {
1090 			KASSERT(softerr, ("%s: m NULL, no error", __func__));
1091 			error = softerr;
1092 			sendfile_iodone(sfio, NULL, 0, 0);
1093 			goto done;
1094 		}
1095 
1096 		/* Add the buffer chain to the socket buffer. */
1097 		KASSERT(m_length(m, NULL) == space + hdrlen,
1098 		    ("%s: mlen %u space %d hdrlen %d",
1099 		    __func__, m_length(m, NULL), space, hdrlen));
1100 
1101 #ifdef KERN_TLS
1102 		if (tls != NULL)
1103 			ktls_frame(m, tls, &tls_enq_cnt, TLS_RLTYPE_APP);
1104 #endif
1105 		if (nios == 0) {
1106 			/*
1107 			 * If sendfile_swapin() didn't initiate any I/Os,
1108 			 * which happens if all data is cached in VM, or if
1109 			 * the header consumed all socket buffer space and
1110 			 * sfio is NULL, then we can send data right now
1111 			 * without the PRUS_NOTREADY flag.
1112 			 */
1113 			if (sfio != NULL)
1114 				sendfile_iodone(sfio, NULL, 0, 0);
1115 #ifdef KERN_TLS
1116 			if (tls != NULL && tls->mode == TCP_TLS_MODE_SW) {
1117 				error = pr->pr_send(so, PRUS_NOTREADY, m, NULL,
1118 				    NULL, td);
1119 				if (error != 0) {
1120 					m_freem(m);
1121 				} else {
1122 					soref(so);
1123 					ktls_enqueue(m, so, tls_enq_cnt);
1124 				}
1125 			} else
1126 #endif
1127 				error = pr->pr_send(so, 0, m, NULL, NULL, td);
1128 		} else {
1129 			sfio->so = so;
1130 			sfio->m = m0;
1131 			soref(so);
1132 			error = pr->pr_send(so, PRUS_NOTREADY, m, NULL, NULL,
1133 			    td);
1134 			sendfile_iodone(sfio, NULL, 0, error);
1135 		}
1136 #ifdef TCP_REQUEST_TRK
1137 		if (so->so_proto->pr_protocol == IPPROTO_TCP) {
1138 			/* log the sendfile call to the TCP log, if enabled */
1139 			tcp_log_sendfile(so, offset, nbytes, flags);
1140 		}
1141 #endif
1142 		m = NULL;
1143 		if (error)
1144 			goto done;
1145 		sbytes += space + hdrlen;
1146 		if (hdrlen)
1147 			hdrlen = 0;
1148 		if (softerr) {
1149 			error = softerr;
1150 			goto done;
1151 		}
1152 	}
1153 
1154 	/*
1155 	 * Send trailers. Wimp out and use writev(2).
1156 	 */
1157 	if (trl_uio != NULL) {
1158 		SOCK_IO_SEND_UNLOCK(so);
1159 		CURVNET_RESTORE();
1160 		error = kern_writev(td, sockfd, trl_uio);
1161 		if (error == 0)
1162 			sbytes += td->td_retval[0];
1163 		goto out;
1164 	}
1165 
1166 done:
1167 	SOCK_IO_SEND_UNLOCK(so);
1168 	CURVNET_RESTORE();
1169 out:
1170 	/*
1171 	 * If there was no error we have to clear td->td_retval[0]
1172 	 * because it may have been set by writev.
1173 	 */
1174 	if (error == 0) {
1175 		td->td_retval[0] = 0;
1176 		if (sbytes > 0 && vp != NULL)
1177 			INOTIFY(vp, IN_ACCESS);
1178 	}
1179 	if (sent != NULL) {
1180 		(*sent) = sbytes;
1181 	}
1182 	if (obj != NULL)
1183 		vm_object_deallocate(obj);
1184 	if (so)
1185 		fdrop(sock_fp, td);
1186 	if (m)
1187 		m_freem(m);
1188 	if (mh)
1189 		m_freem(mh);
1190 #ifdef KERN_TLS
1191 	if (tls != NULL)
1192 		ktls_free(tls);
1193 #endif
1194 	if (error == ERESTART)
1195 		error = EINTR;
1196 
1197 	return (error);
1198 }
1199 
1200 static int
sendfile(struct thread * td,struct sendfile_args * uap,int compat)1201 sendfile(struct thread *td, struct sendfile_args *uap, int compat)
1202 {
1203 	struct sf_hdtr hdtr;
1204 	struct uio *hdr_uio, *trl_uio;
1205 	struct file *fp;
1206 	off_t sbytes;
1207 	int error;
1208 
1209 	/*
1210 	 * File offset must be positive.  If it goes beyond EOF
1211 	 * we send only the header/trailer and no payload data.
1212 	 */
1213 	if (uap->offset < 0)
1214 		return (EINVAL);
1215 
1216 	sbytes = 0;
1217 	hdr_uio = trl_uio = NULL;
1218 
1219 	if (uap->hdtr != NULL) {
1220 		error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
1221 		if (error != 0)
1222 			goto out;
1223 		if (hdtr.headers != NULL) {
1224 			error = copyinuio(hdtr.headers, hdtr.hdr_cnt,
1225 			    &hdr_uio);
1226 			if (error != 0)
1227 				goto out;
1228 #ifdef COMPAT_FREEBSD4
1229 			/*
1230 			 * In FreeBSD < 5.0 the nbytes to send also included
1231 			 * the header.  If compat is specified subtract the
1232 			 * header size from nbytes.
1233 			 */
1234 			if (compat) {
1235 				if (uap->nbytes > hdr_uio->uio_resid)
1236 					uap->nbytes -= hdr_uio->uio_resid;
1237 				else
1238 					uap->nbytes = 0;
1239 			}
1240 #endif
1241 		}
1242 		if (hdtr.trailers != NULL) {
1243 			error = copyinuio(hdtr.trailers, hdtr.trl_cnt,
1244 			    &trl_uio);
1245 			if (error != 0)
1246 				goto out;
1247 		}
1248 	}
1249 
1250 	AUDIT_ARG_FD(uap->fd);
1251 
1252 	/*
1253 	 * sendfile(2) can start at any offset within a file so we require
1254 	 * CAP_READ+CAP_SEEK = CAP_PREAD.
1255 	 */
1256 	if ((error = fget_read(td, uap->fd, &cap_pread_rights, &fp)) != 0)
1257 		goto out;
1258 
1259 	error = fo_sendfile(fp, uap->s, hdr_uio, trl_uio, uap->offset,
1260 	    uap->nbytes, &sbytes, uap->flags, td);
1261 	fdrop(fp, td);
1262 
1263 	if (uap->sbytes != NULL)
1264 		(void)copyout(&sbytes, uap->sbytes, sizeof(off_t));
1265 
1266 out:
1267 	freeuio(hdr_uio);
1268 	freeuio(trl_uio);
1269 	return (error);
1270 }
1271 
1272 /*
1273  * sendfile(2)
1274  *
1275  * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1276  *       struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1277  *
1278  * Send a file specified by 'fd' and starting at 'offset' to a socket
1279  * specified by 's'. Send only 'nbytes' of the file or until EOF if nbytes ==
1280  * 0.  Optionally add a header and/or trailer to the socket output.  If
1281  * specified, write the total number of bytes sent into *sbytes.
1282  */
1283 int
sys_sendfile(struct thread * td,struct sendfile_args * uap)1284 sys_sendfile(struct thread *td, struct sendfile_args *uap)
1285 {
1286 
1287 	return (sendfile(td, uap, 0));
1288 }
1289 
1290 #ifdef COMPAT_FREEBSD4
1291 int
freebsd4_sendfile(struct thread * td,struct freebsd4_sendfile_args * uap)1292 freebsd4_sendfile(struct thread *td, struct freebsd4_sendfile_args *uap)
1293 {
1294 	struct sendfile_args args;
1295 
1296 	args.fd = uap->fd;
1297 	args.s = uap->s;
1298 	args.offset = uap->offset;
1299 	args.nbytes = uap->nbytes;
1300 	args.hdtr = uap->hdtr;
1301 	args.sbytes = uap->sbytes;
1302 	args.flags = uap->flags;
1303 
1304 	return (sendfile(td, &args, 1));
1305 }
1306 #endif /* COMPAT_FREEBSD4 */
1307