xref: /freebsd/sys/kern/kern_sendfile.c (revision e795a04083119a5e0229a676c81f92c828ec747b)
133a2a37bSGleb Smirnoff /*-
233a2a37bSGleb Smirnoff  * Copyright (c) 2013-2015 Gleb Smirnoff <glebius@FreeBSD.org>
333a2a37bSGleb Smirnoff  * Copyright (c) 1998, David Greenman. All rights reserved.
433a2a37bSGleb Smirnoff  *
533a2a37bSGleb Smirnoff  * Redistribution and use in source and binary forms, with or without
633a2a37bSGleb Smirnoff  * modification, are permitted provided that the following conditions
733a2a37bSGleb Smirnoff  * are met:
833a2a37bSGleb Smirnoff  * 1. Redistributions of source code must retain the above copyright
933a2a37bSGleb Smirnoff  *    notice, this list of conditions and the following disclaimer.
1033a2a37bSGleb Smirnoff  * 2. Redistributions in binary form must reproduce the above copyright
1133a2a37bSGleb Smirnoff  *    notice, this list of conditions and the following disclaimer in the
1233a2a37bSGleb Smirnoff  *    documentation and/or other materials provided with the distribution.
1369a28758SEd Maste  * 3. Neither the name of the University nor the names of its contributors
1433a2a37bSGleb Smirnoff  *    may be used to endorse or promote products derived from this software
1533a2a37bSGleb Smirnoff  *    without specific prior written permission.
1633a2a37bSGleb Smirnoff  *
1733a2a37bSGleb Smirnoff  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
1833a2a37bSGleb Smirnoff  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1933a2a37bSGleb Smirnoff  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2033a2a37bSGleb Smirnoff  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
2133a2a37bSGleb Smirnoff  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2233a2a37bSGleb Smirnoff  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2333a2a37bSGleb Smirnoff  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2433a2a37bSGleb Smirnoff  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2533a2a37bSGleb Smirnoff  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2633a2a37bSGleb Smirnoff  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2733a2a37bSGleb Smirnoff  * SUCH DAMAGE.
2833a2a37bSGleb Smirnoff  */
2933a2a37bSGleb Smirnoff 
3033a2a37bSGleb Smirnoff #include <sys/cdefs.h>
3133a2a37bSGleb Smirnoff __FBSDID("$FreeBSD$");
3233a2a37bSGleb Smirnoff 
33b2e60773SJohn Baldwin #include "opt_kern_tls.h"
34b2e60773SJohn Baldwin 
3533a2a37bSGleb Smirnoff #include <sys/param.h>
3633a2a37bSGleb Smirnoff #include <sys/systm.h>
3733a2a37bSGleb Smirnoff #include <sys/capsicum.h>
3833a2a37bSGleb Smirnoff #include <sys/kernel.h>
3933a2a37bSGleb Smirnoff #include <sys/lock.h>
40b2e60773SJohn Baldwin #include <sys/ktls.h>
4133a2a37bSGleb Smirnoff #include <sys/mutex.h>
4233a2a37bSGleb Smirnoff #include <sys/malloc.h>
4333a2a37bSGleb Smirnoff #include <sys/mman.h>
4433a2a37bSGleb Smirnoff #include <sys/mount.h>
4533a2a37bSGleb Smirnoff #include <sys/mbuf.h>
460ac8511aSKonstantin Belousov #include <sys/proc.h>
4733a2a37bSGleb Smirnoff #include <sys/protosw.h>
4833a2a37bSGleb Smirnoff #include <sys/rwlock.h>
4933a2a37bSGleb Smirnoff #include <sys/sf_buf.h>
5033a2a37bSGleb Smirnoff #include <sys/socket.h>
5133a2a37bSGleb Smirnoff #include <sys/socketvar.h>
5233a2a37bSGleb Smirnoff #include <sys/syscallsubr.h>
5333a2a37bSGleb Smirnoff #include <sys/sysctl.h>
540ac8511aSKonstantin Belousov #include <sys/sysproto.h>
5533a2a37bSGleb Smirnoff #include <sys/vnode.h>
5633a2a37bSGleb Smirnoff 
5733a2a37bSGleb Smirnoff #include <net/vnet.h>
580ac8511aSKonstantin Belousov #include <netinet/in.h>
599e14430dSJohn Baldwin #include <netinet/tcp.h>
6033a2a37bSGleb Smirnoff 
6133a2a37bSGleb Smirnoff #include <security/audit/audit.h>
6233a2a37bSGleb Smirnoff #include <security/mac/mac_framework.h>
6333a2a37bSGleb Smirnoff 
6433a2a37bSGleb Smirnoff #include <vm/vm.h>
6533a2a37bSGleb Smirnoff #include <vm/vm_object.h>
6633a2a37bSGleb Smirnoff #include <vm/vm_pager.h>
6733a2a37bSGleb Smirnoff 
688f0a223cSKonstantin Belousov static MALLOC_DEFINE(M_SENDFILE, "sendfile", "sendfile dynamic memory");
698f0a223cSKonstantin Belousov 
709c82bec4SGleb Smirnoff #define	EXT_FLAG_SYNC		EXT_FLAG_VENDOR1
719c82bec4SGleb Smirnoff #define	EXT_FLAG_NOCACHE	EXT_FLAG_VENDOR2
72cec06a3eSJohn Baldwin #define	EXT_FLAG_CACHE_LAST	EXT_FLAG_VENDOR3
739c82bec4SGleb Smirnoff 
7433a2a37bSGleb Smirnoff /*
7533a2a37bSGleb Smirnoff  * Structure describing a single sendfile(2) I/O, which may consist of
7633a2a37bSGleb Smirnoff  * several underlying pager I/Os.
7733a2a37bSGleb Smirnoff  *
7833a2a37bSGleb Smirnoff  * The syscall context allocates the structure and initializes 'nios'
7933a2a37bSGleb Smirnoff  * to 1.  As sendfile_swapin() runs through pages and starts asynchronous
8033a2a37bSGleb Smirnoff  * paging operations, it increments 'nios'.
8133a2a37bSGleb Smirnoff  *
8233a2a37bSGleb Smirnoff  * Every I/O completion calls sendfile_iodone(), which decrements the 'nios',
8333a2a37bSGleb Smirnoff  * and the syscall also calls sendfile_iodone() after allocating all mbufs,
8433a2a37bSGleb Smirnoff  * linking them and sending to socket.  Whoever reaches zero 'nios' is
8533a2a37bSGleb Smirnoff  * responsible to * call pru_ready on the socket, to notify it of readyness
8633a2a37bSGleb Smirnoff  * of the data.
8733a2a37bSGleb Smirnoff  */
8833a2a37bSGleb Smirnoff struct sf_io {
8933a2a37bSGleb Smirnoff 	volatile u_int	nios;
9033a2a37bSGleb Smirnoff 	u_int		error;
9133a2a37bSGleb Smirnoff 	int		npages;
92d37aa3ccSGleb Smirnoff 	struct socket	*so;
9333a2a37bSGleb Smirnoff 	struct mbuf	*m;
94d6e13f3bSJeff Roberson 	vm_object_t	obj;
95c506a638SKonstantin Belousov 	vm_pindex_t	pindex0;
96818d7553SJohn Baldwin #ifdef KERN_TLS
97b2e60773SJohn Baldwin 	struct ktls_session *tls;
98818d7553SJohn Baldwin #endif
9933a2a37bSGleb Smirnoff 	vm_page_t	pa[];
10033a2a37bSGleb Smirnoff };
10133a2a37bSGleb Smirnoff 
10233a2a37bSGleb Smirnoff /*
10333a2a37bSGleb Smirnoff  * Structure used to track requests with SF_SYNC flag.
10433a2a37bSGleb Smirnoff  */
10533a2a37bSGleb Smirnoff struct sendfile_sync {
10633a2a37bSGleb Smirnoff 	struct mtx	mtx;
10733a2a37bSGleb Smirnoff 	struct cv	cv;
10833a2a37bSGleb Smirnoff 	unsigned	count;
10933a2a37bSGleb Smirnoff };
11033a2a37bSGleb Smirnoff 
11133a2a37bSGleb Smirnoff counter_u64_t sfstat[sizeof(struct sfstat) / sizeof(uint64_t)];
11233a2a37bSGleb Smirnoff 
11333a2a37bSGleb Smirnoff static void
11433a2a37bSGleb Smirnoff sfstat_init(const void *unused)
11533a2a37bSGleb Smirnoff {
11633a2a37bSGleb Smirnoff 
11733a2a37bSGleb Smirnoff 	COUNTER_ARRAY_ALLOC(sfstat, sizeof(struct sfstat) / sizeof(uint64_t),
11833a2a37bSGleb Smirnoff 	    M_WAITOK);
11933a2a37bSGleb Smirnoff }
12033a2a37bSGleb Smirnoff SYSINIT(sfstat, SI_SUB_MBUF, SI_ORDER_FIRST, sfstat_init, NULL);
12133a2a37bSGleb Smirnoff 
12233a2a37bSGleb Smirnoff static int
12333a2a37bSGleb Smirnoff sfstat_sysctl(SYSCTL_HANDLER_ARGS)
12433a2a37bSGleb Smirnoff {
12533a2a37bSGleb Smirnoff 	struct sfstat s;
12633a2a37bSGleb Smirnoff 
12733a2a37bSGleb Smirnoff 	COUNTER_ARRAY_COPY(sfstat, &s, sizeof(s) / sizeof(uint64_t));
12833a2a37bSGleb Smirnoff 	if (req->newptr)
12933a2a37bSGleb Smirnoff 		COUNTER_ARRAY_ZERO(sfstat, sizeof(s) / sizeof(uint64_t));
13033a2a37bSGleb Smirnoff 	return (SYSCTL_OUT(req, &s, sizeof(s)));
13133a2a37bSGleb Smirnoff }
1327029da5cSPawel Biernacki SYSCTL_PROC(_kern_ipc, OID_AUTO, sfstat,
1337029da5cSPawel Biernacki     CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0,
1347029da5cSPawel Biernacki     sfstat_sysctl, "I",
1357029da5cSPawel Biernacki     "sendfile statistics");
13633a2a37bSGleb Smirnoff 
1379c82bec4SGleb Smirnoff static void
1389c82bec4SGleb Smirnoff sendfile_free_mext(struct mbuf *m)
1399c82bec4SGleb Smirnoff {
1409c82bec4SGleb Smirnoff 	struct sf_buf *sf;
1419c82bec4SGleb Smirnoff 	vm_page_t pg;
14298549e2dSMark Johnston 	int flags;
1439c82bec4SGleb Smirnoff 
1449c82bec4SGleb Smirnoff 	KASSERT(m->m_flags & M_EXT && m->m_ext.ext_type == EXT_SFBUF,
1459c82bec4SGleb Smirnoff 	    ("%s: m %p !M_EXT or !EXT_SFBUF", __func__, m));
1469c82bec4SGleb Smirnoff 
1479c82bec4SGleb Smirnoff 	sf = m->m_ext.ext_arg1;
1489c82bec4SGleb Smirnoff 	pg = sf_buf_page(sf);
14998549e2dSMark Johnston 	flags = (m->m_ext.ext_flags & EXT_FLAG_NOCACHE) != 0 ? VPR_TRYFREE : 0;
1509c82bec4SGleb Smirnoff 
1519c82bec4SGleb Smirnoff 	sf_buf_free(sf);
15298549e2dSMark Johnston 	vm_page_release(pg, flags);
1539c82bec4SGleb Smirnoff 
1549c82bec4SGleb Smirnoff 	if (m->m_ext.ext_flags & EXT_FLAG_SYNC) {
1559c82bec4SGleb Smirnoff 		struct sendfile_sync *sfs = m->m_ext.ext_arg2;
1569c82bec4SGleb Smirnoff 
15733a2a37bSGleb Smirnoff 		mtx_lock(&sfs->mtx);
15833a2a37bSGleb Smirnoff 		KASSERT(sfs->count > 0, ("Sendfile sync botchup count == 0"));
15933a2a37bSGleb Smirnoff 		if (--sfs->count == 0)
16033a2a37bSGleb Smirnoff 			cv_signal(&sfs->cv);
16133a2a37bSGleb Smirnoff 		mtx_unlock(&sfs->mtx);
16233a2a37bSGleb Smirnoff 	}
16333a2a37bSGleb Smirnoff }
16433a2a37bSGleb Smirnoff 
165cec06a3eSJohn Baldwin static void
166cec06a3eSJohn Baldwin sendfile_free_mext_pg(struct mbuf *m)
167cec06a3eSJohn Baldwin {
168cec06a3eSJohn Baldwin 	struct mbuf_ext_pgs *ext_pgs;
169cec06a3eSJohn Baldwin 	vm_page_t pg;
17098549e2dSMark Johnston 	int flags, i;
17198549e2dSMark Johnston 	bool cache_last;
172cec06a3eSJohn Baldwin 
173cec06a3eSJohn Baldwin 	KASSERT(m->m_flags & M_EXT && m->m_ext.ext_type == EXT_PGS,
174cec06a3eSJohn Baldwin 	    ("%s: m %p !M_EXT or !EXT_PGS", __func__, m));
175cec06a3eSJohn Baldwin 
176cec06a3eSJohn Baldwin 	cache_last = m->m_ext.ext_flags & EXT_FLAG_CACHE_LAST;
17723feb563SAndrew Gallatin 	ext_pgs = &m->m_ext_pgs;
17898549e2dSMark Johnston 	flags = (m->m_ext.ext_flags & EXT_FLAG_NOCACHE) != 0 ? VPR_TRYFREE : 0;
179cec06a3eSJohn Baldwin 
180cec06a3eSJohn Baldwin 	for (i = 0; i < ext_pgs->npgs; i++) {
181cec06a3eSJohn Baldwin 		if (cache_last && i == ext_pgs->npgs - 1)
18298549e2dSMark Johnston 			flags = 0;
18323feb563SAndrew Gallatin 		pg = PHYS_TO_VM_PAGE(ext_pgs->m_epg_pa[i]);
18498549e2dSMark Johnston 		vm_page_release(pg, flags);
185cec06a3eSJohn Baldwin 	}
186cec06a3eSJohn Baldwin 
187cec06a3eSJohn Baldwin 	if (m->m_ext.ext_flags & EXT_FLAG_SYNC) {
18823feb563SAndrew Gallatin 		struct sendfile_sync *sfs = m->m_ext.ext_arg1;
189cec06a3eSJohn Baldwin 
190cec06a3eSJohn Baldwin 		mtx_lock(&sfs->mtx);
191cec06a3eSJohn Baldwin 		KASSERT(sfs->count > 0, ("Sendfile sync botchup count == 0"));
192cec06a3eSJohn Baldwin 		if (--sfs->count == 0)
193cec06a3eSJohn Baldwin 			cv_signal(&sfs->cv);
194cec06a3eSJohn Baldwin 		mtx_unlock(&sfs->mtx);
195cec06a3eSJohn Baldwin 	}
196cec06a3eSJohn Baldwin }
197cec06a3eSJohn Baldwin 
19833a2a37bSGleb Smirnoff /*
19933a2a37bSGleb Smirnoff  * Helper function to calculate how much data to put into page i of n.
20033a2a37bSGleb Smirnoff  * Only first and last pages are special.
20133a2a37bSGleb Smirnoff  */
20233a2a37bSGleb Smirnoff static inline off_t
20333a2a37bSGleb Smirnoff xfsize(int i, int n, off_t off, off_t len)
20433a2a37bSGleb Smirnoff {
20533a2a37bSGleb Smirnoff 
20633a2a37bSGleb Smirnoff 	if (i == 0)
20733a2a37bSGleb Smirnoff 		return (omin(PAGE_SIZE - (off & PAGE_MASK), len));
20833a2a37bSGleb Smirnoff 
20933a2a37bSGleb Smirnoff 	if (i == n - 1 && ((off + len) & PAGE_MASK) > 0)
21033a2a37bSGleb Smirnoff 		return ((off + len) & PAGE_MASK);
21133a2a37bSGleb Smirnoff 
21233a2a37bSGleb Smirnoff 	return (PAGE_SIZE);
21333a2a37bSGleb Smirnoff }
21433a2a37bSGleb Smirnoff 
21533a2a37bSGleb Smirnoff /*
21633a2a37bSGleb Smirnoff  * Helper function to get offset within object for i page.
21733a2a37bSGleb Smirnoff  */
218d712b799SAlan Cox static inline vm_ooffset_t
21933a2a37bSGleb Smirnoff vmoff(int i, off_t off)
22033a2a37bSGleb Smirnoff {
22133a2a37bSGleb Smirnoff 
22233a2a37bSGleb Smirnoff 	if (i == 0)
223d712b799SAlan Cox 		return ((vm_ooffset_t)off);
22433a2a37bSGleb Smirnoff 
22533a2a37bSGleb Smirnoff 	return (trunc_page(off + i * PAGE_SIZE));
22633a2a37bSGleb Smirnoff }
22733a2a37bSGleb Smirnoff 
22833a2a37bSGleb Smirnoff /*
22933a2a37bSGleb Smirnoff  * Helper function used when allocation of a page or sf_buf failed.
23033a2a37bSGleb Smirnoff  * Pretend as if we don't have enough space, subtract xfsize() of
23133a2a37bSGleb Smirnoff  * all pages that failed.
23233a2a37bSGleb Smirnoff  */
23333a2a37bSGleb Smirnoff static inline void
23433a2a37bSGleb Smirnoff fixspace(int old, int new, off_t off, int *space)
23533a2a37bSGleb Smirnoff {
23633a2a37bSGleb Smirnoff 
23733a2a37bSGleb Smirnoff 	KASSERT(old > new, ("%s: old %d new %d", __func__, old, new));
23833a2a37bSGleb Smirnoff 
23933a2a37bSGleb Smirnoff 	/* Subtract last one. */
24033a2a37bSGleb Smirnoff 	*space -= xfsize(old - 1, old, off, *space);
24133a2a37bSGleb Smirnoff 	old--;
24233a2a37bSGleb Smirnoff 
24333a2a37bSGleb Smirnoff 	if (new == old)
24433a2a37bSGleb Smirnoff 		/* There was only one page. */
24533a2a37bSGleb Smirnoff 		return;
24633a2a37bSGleb Smirnoff 
24733a2a37bSGleb Smirnoff 	/* Subtract first one. */
24833a2a37bSGleb Smirnoff 	if (new == 0) {
24933a2a37bSGleb Smirnoff 		*space -= xfsize(0, old, off, *space);
25033a2a37bSGleb Smirnoff 		new++;
25133a2a37bSGleb Smirnoff 	}
25233a2a37bSGleb Smirnoff 
25333a2a37bSGleb Smirnoff 	/* Rest of pages are full sized. */
25433a2a37bSGleb Smirnoff 	*space -= (old - new) * PAGE_SIZE;
25533a2a37bSGleb Smirnoff 
25633a2a37bSGleb Smirnoff 	KASSERT(*space >= 0, ("%s: space went backwards", __func__));
25733a2a37bSGleb Smirnoff }
25833a2a37bSGleb Smirnoff 
25933a2a37bSGleb Smirnoff /*
26059e1ac9dSKonstantin Belousov  * Wait for all in-flight ios to complete, we must not unwire pages
26159e1ac9dSKonstantin Belousov  * under them.
26259e1ac9dSKonstantin Belousov  */
26359e1ac9dSKonstantin Belousov static void
26459e1ac9dSKonstantin Belousov sendfile_iowait(struct sf_io *sfio, const char *wmesg)
26559e1ac9dSKonstantin Belousov {
26659e1ac9dSKonstantin Belousov 	while (atomic_load_int(&sfio->nios) != 1)
26759e1ac9dSKonstantin Belousov 		pause(wmesg, 1);
26859e1ac9dSKonstantin Belousov }
26959e1ac9dSKonstantin Belousov 
27059e1ac9dSKonstantin Belousov /*
27133a2a37bSGleb Smirnoff  * I/O completion callback.
27233a2a37bSGleb Smirnoff  */
27333a2a37bSGleb Smirnoff static void
274c506a638SKonstantin Belousov sendfile_iodone(void *arg, vm_page_t *pa, int count, int error)
27533a2a37bSGleb Smirnoff {
27633a2a37bSGleb Smirnoff 	struct sf_io *sfio = arg;
2776bc27f08SGleb Smirnoff 	struct socket *so;
278c506a638SKonstantin Belousov 	int i;
27933a2a37bSGleb Smirnoff 
280c506a638SKonstantin Belousov 	if (error != 0) {
28133a2a37bSGleb Smirnoff 		sfio->error = error;
282c506a638SKonstantin Belousov 		/*
283c506a638SKonstantin Belousov 		 * Restore of the pg[] elements is done by
284c506a638SKonstantin Belousov 		 * sendfile_swapin().
285c506a638SKonstantin Belousov 		 */
286c506a638SKonstantin Belousov 	} else {
287c506a638SKonstantin Belousov 		/*
288c506a638SKonstantin Belousov 		 * Restore the valid page pointers.  They are already
289c506a638SKonstantin Belousov 		 * unbusied, but still wired.  For error != 0 case,
290c506a638SKonstantin Belousov 		 * sendfile_swapin() handles unbusy.
291c506a638SKonstantin Belousov 		 *
292c506a638SKonstantin Belousov 		 * XXXKIB since pages are only wired, and we do not
293c506a638SKonstantin Belousov 		 * own the object lock, other users might have
294c506a638SKonstantin Belousov 		 * invalidated them in meantime.  Similarly, after we
295c506a638SKonstantin Belousov 		 * unbusied the swapped-in pages, they can become
296c506a638SKonstantin Belousov 		 * invalid under us.
297c506a638SKonstantin Belousov 		 */
298*e795a040SKonstantin Belousov 		MPASS(count == 0 || pa[0] != bogus_page);
299c506a638SKonstantin Belousov 		for (i = 0; i < count; i++) {
300c506a638SKonstantin Belousov 			if (pa[i] == bogus_page) {
301*e795a040SKonstantin Belousov 				sfio->pa[(pa[0]->pindex - sfio->pindex0) + i] =
302c506a638SKonstantin Belousov 				    pa[i] = vm_page_relookup(sfio->obj,
303*e795a040SKonstantin Belousov 				    pa[0]->pindex + i);
304c506a638SKonstantin Belousov 				KASSERT(pa[i] != NULL,
305c506a638SKonstantin Belousov 				    ("%s: page %p[%d] disappeared",
306c506a638SKonstantin Belousov 				    __func__, pa, i));
307c506a638SKonstantin Belousov 			} else {
308c506a638SKonstantin Belousov 				vm_page_xunbusy_unchecked(pa[i]);
309c506a638SKonstantin Belousov 			}
310c506a638SKonstantin Belousov 		}
311c506a638SKonstantin Belousov 	}
31233a2a37bSGleb Smirnoff 
31333a2a37bSGleb Smirnoff 	if (!refcount_release(&sfio->nios))
31433a2a37bSGleb Smirnoff 		return;
31533a2a37bSGleb Smirnoff 
316d6e13f3bSJeff Roberson 	vm_object_pip_wakeup(sfio->obj);
317d6e13f3bSJeff Roberson 
3186bc27f08SGleb Smirnoff 	if (sfio->m == NULL) {
319b8c92303SGleb Smirnoff 		/*
3206bc27f08SGleb Smirnoff 		 * Either I/O operation failed, or we failed to allocate
3216bc27f08SGleb Smirnoff 		 * buffers, or we bailed out on first busy page, or we
3226bc27f08SGleb Smirnoff 		 * succeeded filling the request without any I/Os. Anyway,
3236bc27f08SGleb Smirnoff 		 * pru_send hadn't been executed - nothing had been sent
3246bc27f08SGleb Smirnoff 		 * to the socket yet.
325b8c92303SGleb Smirnoff 		 */
3266bc27f08SGleb Smirnoff 		MPASS((curthread->td_pflags & TDP_KTHREAD) == 0);
3278f0a223cSKonstantin Belousov 		free(sfio, M_SENDFILE);
328b8c92303SGleb Smirnoff 		return;
329b8c92303SGleb Smirnoff 	}
330b8c92303SGleb Smirnoff 
331818d7553SJohn Baldwin #if defined(KERN_TLS) && defined(INVARIANTS)
332b2e60773SJohn Baldwin 	if ((sfio->m->m_flags & M_EXT) != 0 &&
333b2e60773SJohn Baldwin 	    sfio->m->m_ext.ext_type == EXT_PGS)
33423feb563SAndrew Gallatin 		KASSERT(sfio->tls == sfio->m->m_ext_pgs.tls,
335b2e60773SJohn Baldwin 		    ("TLS session mismatch"));
336b2e60773SJohn Baldwin 	else
337b2e60773SJohn Baldwin 		KASSERT(sfio->tls == NULL,
338b2e60773SJohn Baldwin 		    ("non-ext_pgs mbuf with TLS session"));
339b2e60773SJohn Baldwin #endif
3406bc27f08SGleb Smirnoff 	so = sfio->so;
341b4f55763SGleb Smirnoff 	CURVNET_SET(so->so_vnet);
342b8c92303SGleb Smirnoff 	if (__predict_false(sfio->error)) {
34333a2a37bSGleb Smirnoff 		/*
34433a2a37bSGleb Smirnoff 		 * I/O operation failed.  The state of data in the socket
34533a2a37bSGleb Smirnoff 		 * is now inconsistent, and all what we can do is to tear
34633a2a37bSGleb Smirnoff 		 * it down. Protocol abort method would tear down protocol
34733a2a37bSGleb Smirnoff 		 * state, free all ready mbufs and detach not ready ones.
34833a2a37bSGleb Smirnoff 		 * We will free the mbufs corresponding to this I/O manually.
34933a2a37bSGleb Smirnoff 		 *
35033a2a37bSGleb Smirnoff 		 * The socket would be marked with EIO and made available
35133a2a37bSGleb Smirnoff 		 * for read, so that application receives EIO on next
35233a2a37bSGleb Smirnoff 		 * syscall and eventually closes the socket.
35333a2a37bSGleb Smirnoff 		 */
35433a2a37bSGleb Smirnoff 		so->so_proto->pr_usrreqs->pru_abort(so);
35533a2a37bSGleb Smirnoff 		so->so_error = EIO;
35633a2a37bSGleb Smirnoff 
357cec06a3eSJohn Baldwin 		mb_free_notready(sfio->m, sfio->npages);
358b2e60773SJohn Baldwin #ifdef KERN_TLS
3599e14430dSJohn Baldwin 	} else if (sfio->tls != NULL && sfio->tls->mode == TCP_TLS_MODE_SW) {
360b2e60773SJohn Baldwin 		/*
361b2e60773SJohn Baldwin 		 * I/O operation is complete, but we still need to
362b2e60773SJohn Baldwin 		 * encrypt.  We cannot do this in the interrupt thread
363b2e60773SJohn Baldwin 		 * of the disk controller, so forward the mbufs to a
364b2e60773SJohn Baldwin 		 * different thread.
365b2e60773SJohn Baldwin 		 *
366b2e60773SJohn Baldwin 		 * Donate the socket reference from sfio to rather
367b2e60773SJohn Baldwin 		 * than explicitly invoking soref().
368b2e60773SJohn Baldwin 		 */
369b2e60773SJohn Baldwin 		ktls_enqueue(sfio->m, so, sfio->npages);
370b2e60773SJohn Baldwin 		goto out_with_ref;
371b2e60773SJohn Baldwin #endif
372b4f55763SGleb Smirnoff 	} else
37333a2a37bSGleb Smirnoff 		(void)(so->so_proto->pr_usrreqs->pru_ready)(so, sfio->m,
37433a2a37bSGleb Smirnoff 		    sfio->npages);
37533a2a37bSGleb Smirnoff 
376d37aa3ccSGleb Smirnoff 	SOCK_LOCK(so);
377d37aa3ccSGleb Smirnoff 	sorele(so);
378b2e60773SJohn Baldwin #ifdef KERN_TLS
379b2e60773SJohn Baldwin out_with_ref:
380b2e60773SJohn Baldwin #endif
381b4f55763SGleb Smirnoff 	CURVNET_RESTORE();
3828f0a223cSKonstantin Belousov 	free(sfio, M_SENDFILE);
38333a2a37bSGleb Smirnoff }
38433a2a37bSGleb Smirnoff 
38533a2a37bSGleb Smirnoff /*
38633a2a37bSGleb Smirnoff  * Iterate through pages vector and request paging for non-valid pages.
38733a2a37bSGleb Smirnoff  */
38833a2a37bSGleb Smirnoff static int
389fca79580SAlan Somers sendfile_swapin(vm_object_t obj, struct sf_io *sfio, int *nios, off_t off,
390fca79580SAlan Somers     off_t len, int npages, int rhpages, int flags)
39133a2a37bSGleb Smirnoff {
392c506a638SKonstantin Belousov 	vm_page_t *pa;
393c506a638SKonstantin Belousov 	int a, count, count1, grabbed, i, j, rv;
39433a2a37bSGleb Smirnoff 
395c506a638SKonstantin Belousov 	pa = sfio->pa;
396fca79580SAlan Somers 	*nios = 0;
39733a2a37bSGleb Smirnoff 	flags = (flags & SF_NODISKIO) ? VM_ALLOC_NOWAIT : 0;
398c506a638SKonstantin Belousov 	sfio->pindex0 = OFF_TO_IDX(off);
39933a2a37bSGleb Smirnoff 
40033a2a37bSGleb Smirnoff 	/*
40133a2a37bSGleb Smirnoff 	 * First grab all the pages and wire them.  Note that we grab
40233a2a37bSGleb Smirnoff 	 * only required pages.  Readahead pages are dealt with later.
40333a2a37bSGleb Smirnoff 	 */
4047aaf252cSJeff Roberson 	grabbed = vm_page_grab_pages_unlocked(obj, OFF_TO_IDX(off),
405af0460beSMark Johnston 	    VM_ALLOC_NORMAL | VM_ALLOC_WIRED | flags, pa, npages);
406af0460beSMark Johnston 	if (grabbed < npages) {
407af0460beSMark Johnston 		for (int i = grabbed; i < npages; i++)
408af0460beSMark Johnston 			pa[i] = NULL;
409af0460beSMark Johnston 		npages = grabbed;
41033a2a37bSGleb Smirnoff 		rhpages = 0;
41133a2a37bSGleb Smirnoff 	}
41233a2a37bSGleb Smirnoff 
413c506a638SKonstantin Belousov 	for (i = 0; i < npages;) {
41433a2a37bSGleb Smirnoff 		/* Skip valid pages. */
41533a2a37bSGleb Smirnoff 		if (vm_page_is_valid(pa[i], vmoff(i, off) & PAGE_MASK,
41633a2a37bSGleb Smirnoff 		    xfsize(i, npages, off, len))) {
41733a2a37bSGleb Smirnoff 			vm_page_xunbusy(pa[i]);
41833a2a37bSGleb Smirnoff 			SFSTAT_INC(sf_pages_valid);
41933a2a37bSGleb Smirnoff 			i++;
42033a2a37bSGleb Smirnoff 			continue;
42133a2a37bSGleb Smirnoff 		}
42233a2a37bSGleb Smirnoff 
42333a2a37bSGleb Smirnoff 		/*
4245dba303dSGleb Smirnoff 		 * Next page is invalid.  Check if it belongs to pager.  It
4255dba303dSGleb Smirnoff 		 * may not be there, which is a regular situation for shmem
4265dba303dSGleb Smirnoff 		 * pager.  For vnode pager this happens only in case of
4275dba303dSGleb Smirnoff 		 * a sparse file.
42833a2a37bSGleb Smirnoff 		 *
42933a2a37bSGleb Smirnoff 		 * Important feature of vm_pager_has_page() is the hint
43033a2a37bSGleb Smirnoff 		 * stored in 'a', about how many pages we can pagein after
43133a2a37bSGleb Smirnoff 		 * this page in a single I/O.
43233a2a37bSGleb Smirnoff 		 */
4336be21eb7SJeff Roberson 		VM_OBJECT_RLOCK(obj);
4345dba303dSGleb Smirnoff 		if (!vm_pager_has_page(obj, OFF_TO_IDX(vmoff(i, off)), NULL,
4355dba303dSGleb Smirnoff 		    &a)) {
4366be21eb7SJeff Roberson 			VM_OBJECT_RUNLOCK(obj);
43733a2a37bSGleb Smirnoff 			pmap_zero_page(pa[i]);
43891e31c3cSJeff Roberson 			vm_page_valid(pa[i]);
4396921451dSAlan Cox 			MPASS(pa[i]->dirty == 0);
44033a2a37bSGleb Smirnoff 			vm_page_xunbusy(pa[i]);
44133a2a37bSGleb Smirnoff 			i++;
44233a2a37bSGleb Smirnoff 			continue;
4435dba303dSGleb Smirnoff 		}
4446be21eb7SJeff Roberson 		VM_OBJECT_RUNLOCK(obj);
44533a2a37bSGleb Smirnoff 
44633a2a37bSGleb Smirnoff 		/*
44733a2a37bSGleb Smirnoff 		 * We want to pagein as many pages as possible, limited only
44833a2a37bSGleb Smirnoff 		 * by the 'a' hint and actual request.
44933a2a37bSGleb Smirnoff 		 */
45033a2a37bSGleb Smirnoff 		count = min(a + 1, npages - i);
45133a2a37bSGleb Smirnoff 
4525dba303dSGleb Smirnoff 		/*
453c506a638SKonstantin Belousov 		 * We should not pagein into a valid page because
454c506a638SKonstantin Belousov 		 * there might be still unfinished write tracked by
455c506a638SKonstantin Belousov 		 * e.g. a buffer, thus we substitute any valid pages
456c506a638SKonstantin Belousov 		 * with the bogus one.
457c506a638SKonstantin Belousov 		 *
458c506a638SKonstantin Belousov 		 * We must not leave around xbusy pages which are not
459c506a638SKonstantin Belousov 		 * part of the run passed to vm_pager_getpages(),
460c506a638SKonstantin Belousov 		 * otherwise pager might deadlock waiting for the busy
461c506a638SKonstantin Belousov 		 * status of the page, e.g. if it constitues the
462c506a638SKonstantin Belousov 		 * buffer needed to validate other page.
463c506a638SKonstantin Belousov 		 *
464c506a638SKonstantin Belousov 		 * First trim the end of the run consisting of the
465c506a638SKonstantin Belousov 		 * valid pages, then replace the rest of the valid
466c506a638SKonstantin Belousov 		 * with bogus.
4675dba303dSGleb Smirnoff 		 */
468c506a638SKonstantin Belousov 		count1 = count;
4695dba303dSGleb Smirnoff 		for (j = i + count - 1; j > i; j--) {
4705dba303dSGleb Smirnoff 			if (vm_page_is_valid(pa[j], vmoff(j, off) & PAGE_MASK,
4715dba303dSGleb Smirnoff 			    xfsize(j, npages, off, len))) {
472c506a638SKonstantin Belousov 				vm_page_xunbusy(pa[j]);
473c506a638SKonstantin Belousov 				SFSTAT_INC(sf_pages_valid);
4745dba303dSGleb Smirnoff 				count--;
475c506a638SKonstantin Belousov 			} else {
4765dba303dSGleb Smirnoff 				break;
4775dba303dSGleb Smirnoff 			}
478c506a638SKonstantin Belousov 		}
479c506a638SKonstantin Belousov 
480c506a638SKonstantin Belousov 		/*
481c506a638SKonstantin Belousov 		 * The last page in the run pa[i + count - 1] is
482c506a638SKonstantin Belousov 		 * guaranteed to be invalid by the trim above, so it
483c506a638SKonstantin Belousov 		 * is not replaced with bogus, thus -1 in the loop end
484c506a638SKonstantin Belousov 		 * condition.
485c506a638SKonstantin Belousov 		 */
486c506a638SKonstantin Belousov 		MPASS(pa[i + count - 1]->valid != VM_PAGE_BITS_ALL);
487c506a638SKonstantin Belousov 		for (j = i + 1; j < i + count - 1; j++) {
4885dba303dSGleb Smirnoff 			if (vm_page_is_valid(pa[j], vmoff(j, off) & PAGE_MASK,
4895dba303dSGleb Smirnoff 			    xfsize(j, npages, off, len))) {
4905dba303dSGleb Smirnoff 				vm_page_xunbusy(pa[j]);
4915dba303dSGleb Smirnoff 				SFSTAT_INC(sf_pages_valid);
4925dba303dSGleb Smirnoff 				SFSTAT_INC(sf_pages_bogus);
4935dba303dSGleb Smirnoff 				pa[j] = bogus_page;
4945dba303dSGleb Smirnoff 			}
495c506a638SKonstantin Belousov 		}
4965dba303dSGleb Smirnoff 
49733a2a37bSGleb Smirnoff 		refcount_acquire(&sfio->nios);
49833a2a37bSGleb Smirnoff 		rv = vm_pager_get_pages_async(obj, pa + i, count, NULL,
49933a2a37bSGleb Smirnoff 		    i + count == npages ? &rhpages : NULL,
50033a2a37bSGleb Smirnoff 		    &sendfile_iodone, sfio);
501b8c92303SGleb Smirnoff 		if (__predict_false(rv != VM_PAGER_OK)) {
50259e1ac9dSKonstantin Belousov 			sendfile_iowait(sfio, "sferrio");
50359e1ac9dSKonstantin Belousov 
504b8c92303SGleb Smirnoff 			/*
505b8c92303SGleb Smirnoff 			 * Perform full pages recovery before returning EIO.
506b8c92303SGleb Smirnoff 			 * Pages from 0 to npages are wired.
507b8c92303SGleb Smirnoff 			 * Pages from (i + 1) to (i + count - 1) may be
508b8c92303SGleb Smirnoff 			 * substituted to bogus page, and not busied.
509c506a638SKonstantin Belousov 			 * Pages from (i + count) to (i + count1 - 1) are
510c506a638SKonstantin Belousov 			 * not busied.
511c506a638SKonstantin Belousov 			 * Rest of the pages from i to npages are busied.
512b8c92303SGleb Smirnoff 			 */
513b8c92303SGleb Smirnoff 			for (j = 0; j < npages; j++) {
514c506a638SKonstantin Belousov 				if (j >= i + count && j < i + count1)
515c506a638SKonstantin Belousov 					;
516c506a638SKonstantin Belousov 				else if (j > i && j < i + count - 1 &&
517b8c92303SGleb Smirnoff 				    pa[j] == bogus_page)
5186be21eb7SJeff Roberson 					pa[j] = vm_page_relookup(obj,
519b8c92303SGleb Smirnoff 					    OFF_TO_IDX(vmoff(j, off)));
520b8c92303SGleb Smirnoff 				else if (j >= i)
521b8c92303SGleb Smirnoff 					vm_page_xunbusy(pa[j]);
522b8c92303SGleb Smirnoff 				KASSERT(pa[j] != NULL && pa[j] != bogus_page,
523b8c92303SGleb Smirnoff 				    ("%s: page %p[%d] I/O recovery failure",
524b8c92303SGleb Smirnoff 				    __func__, pa, j));
5250367bca4SAlan Somers 				vm_page_unwire(pa[j], PQ_INACTIVE);
526fca79580SAlan Somers 			}
5270367bca4SAlan Somers 			return (EIO);
528fca79580SAlan Somers 		}
52933a2a37bSGleb Smirnoff 
53033a2a37bSGleb Smirnoff 		SFSTAT_INC(sf_iocnt);
53133a2a37bSGleb Smirnoff 		SFSTAT_ADD(sf_pages_read, count);
53233a2a37bSGleb Smirnoff 		if (i + count == npages)
53333a2a37bSGleb Smirnoff 			SFSTAT_ADD(sf_rhpages_read, rhpages);
53433a2a37bSGleb Smirnoff 
535c506a638SKonstantin Belousov 		i += count1;
536fca79580SAlan Somers 		(*nios)++;
53733a2a37bSGleb Smirnoff 	}
53833a2a37bSGleb Smirnoff 
539fca79580SAlan Somers 	if (*nios == 0 && npages != 0)
54033a2a37bSGleb Smirnoff 		SFSTAT_INC(sf_noiocnt);
54133a2a37bSGleb Smirnoff 
542fca79580SAlan Somers 	return (0);
54333a2a37bSGleb Smirnoff }
54433a2a37bSGleb Smirnoff 
54533a2a37bSGleb Smirnoff static int
54633a2a37bSGleb Smirnoff sendfile_getobj(struct thread *td, struct file *fp, vm_object_t *obj_res,
54733a2a37bSGleb Smirnoff     struct vnode **vp_res, struct shmfd **shmfd_res, off_t *obj_size,
54833a2a37bSGleb Smirnoff     int *bsize)
54933a2a37bSGleb Smirnoff {
55033a2a37bSGleb Smirnoff 	struct vattr va;
55133a2a37bSGleb Smirnoff 	vm_object_t obj;
55233a2a37bSGleb Smirnoff 	struct vnode *vp;
55333a2a37bSGleb Smirnoff 	struct shmfd *shmfd;
55433a2a37bSGleb Smirnoff 	int error;
55533a2a37bSGleb Smirnoff 
55633a2a37bSGleb Smirnoff 	vp = *vp_res = NULL;
55733a2a37bSGleb Smirnoff 	obj = NULL;
55833a2a37bSGleb Smirnoff 	shmfd = *shmfd_res = NULL;
55933a2a37bSGleb Smirnoff 	*bsize = 0;
56033a2a37bSGleb Smirnoff 
56133a2a37bSGleb Smirnoff 	/*
56233a2a37bSGleb Smirnoff 	 * The file descriptor must be a regular file and have a
56333a2a37bSGleb Smirnoff 	 * backing VM object.
56433a2a37bSGleb Smirnoff 	 */
56533a2a37bSGleb Smirnoff 	if (fp->f_type == DTYPE_VNODE) {
56633a2a37bSGleb Smirnoff 		vp = fp->f_vnode;
56733a2a37bSGleb Smirnoff 		vn_lock(vp, LK_SHARED | LK_RETRY);
56833a2a37bSGleb Smirnoff 		if (vp->v_type != VREG) {
56933a2a37bSGleb Smirnoff 			error = EINVAL;
57033a2a37bSGleb Smirnoff 			goto out;
57133a2a37bSGleb Smirnoff 		}
57233a2a37bSGleb Smirnoff 		*bsize = vp->v_mount->mnt_stat.f_iosize;
57333a2a37bSGleb Smirnoff 		error = VOP_GETATTR(vp, &va, td->td_ucred);
57433a2a37bSGleb Smirnoff 		if (error != 0)
57533a2a37bSGleb Smirnoff 			goto out;
57633a2a37bSGleb Smirnoff 		*obj_size = va.va_size;
57733a2a37bSGleb Smirnoff 		obj = vp->v_object;
57833a2a37bSGleb Smirnoff 		if (obj == NULL) {
57933a2a37bSGleb Smirnoff 			error = EINVAL;
58033a2a37bSGleb Smirnoff 			goto out;
58133a2a37bSGleb Smirnoff 		}
58233a2a37bSGleb Smirnoff 	} else if (fp->f_type == DTYPE_SHM) {
58333a2a37bSGleb Smirnoff 		error = 0;
58433a2a37bSGleb Smirnoff 		shmfd = fp->f_data;
58533a2a37bSGleb Smirnoff 		obj = shmfd->shm_object;
58633a2a37bSGleb Smirnoff 		*obj_size = shmfd->shm_size;
58733a2a37bSGleb Smirnoff 	} else {
58833a2a37bSGleb Smirnoff 		error = EINVAL;
58933a2a37bSGleb Smirnoff 		goto out;
59033a2a37bSGleb Smirnoff 	}
59133a2a37bSGleb Smirnoff 
59233a2a37bSGleb Smirnoff 	VM_OBJECT_WLOCK(obj);
59333a2a37bSGleb Smirnoff 	if ((obj->flags & OBJ_DEAD) != 0) {
59433a2a37bSGleb Smirnoff 		VM_OBJECT_WUNLOCK(obj);
59533a2a37bSGleb Smirnoff 		error = EBADF;
59633a2a37bSGleb Smirnoff 		goto out;
59733a2a37bSGleb Smirnoff 	}
59833a2a37bSGleb Smirnoff 
59933a2a37bSGleb Smirnoff 	/*
60033a2a37bSGleb Smirnoff 	 * Temporarily increase the backing VM object's reference
60133a2a37bSGleb Smirnoff 	 * count so that a forced reclamation of its vnode does not
60233a2a37bSGleb Smirnoff 	 * immediately destroy it.
60333a2a37bSGleb Smirnoff 	 */
60433a2a37bSGleb Smirnoff 	vm_object_reference_locked(obj);
60533a2a37bSGleb Smirnoff 	VM_OBJECT_WUNLOCK(obj);
60633a2a37bSGleb Smirnoff 	*obj_res = obj;
60733a2a37bSGleb Smirnoff 	*vp_res = vp;
60833a2a37bSGleb Smirnoff 	*shmfd_res = shmfd;
60933a2a37bSGleb Smirnoff 
61033a2a37bSGleb Smirnoff out:
61133a2a37bSGleb Smirnoff 	if (vp != NULL)
612b249ce48SMateusz Guzik 		VOP_UNLOCK(vp);
61333a2a37bSGleb Smirnoff 	return (error);
61433a2a37bSGleb Smirnoff }
61533a2a37bSGleb Smirnoff 
61633a2a37bSGleb Smirnoff static int
61733a2a37bSGleb Smirnoff sendfile_getsock(struct thread *td, int s, struct file **sock_fp,
61833a2a37bSGleb Smirnoff     struct socket **so)
61933a2a37bSGleb Smirnoff {
62033a2a37bSGleb Smirnoff 	int error;
62133a2a37bSGleb Smirnoff 
62233a2a37bSGleb Smirnoff 	*sock_fp = NULL;
62333a2a37bSGleb Smirnoff 	*so = NULL;
62433a2a37bSGleb Smirnoff 
62533a2a37bSGleb Smirnoff 	/*
62633a2a37bSGleb Smirnoff 	 * The socket must be a stream socket and connected.
62733a2a37bSGleb Smirnoff 	 */
628cbd92ce6SMatt Macy 	error = getsock_cap(td, s, &cap_send_rights,
62985b0f9deSMariusz Zaborski 	    sock_fp, NULL, NULL);
63033a2a37bSGleb Smirnoff 	if (error != 0)
63133a2a37bSGleb Smirnoff 		return (error);
63233a2a37bSGleb Smirnoff 	*so = (*sock_fp)->f_data;
63333a2a37bSGleb Smirnoff 	if ((*so)->so_type != SOCK_STREAM)
63433a2a37bSGleb Smirnoff 		return (EINVAL);
635db4493f7SMichael Tuexen 	/*
636db4493f7SMichael Tuexen 	 * SCTP one-to-one style sockets currently don't work with
637db4493f7SMichael Tuexen 	 * sendfile(). So indicate EINVAL for now.
638db4493f7SMichael Tuexen 	 */
639db4493f7SMichael Tuexen 	if ((*so)->so_proto->pr_protocol == IPPROTO_SCTP)
640db4493f7SMichael Tuexen 		return (EINVAL);
64147f1ea51SGleb Smirnoff 	if (SOLISTENING(*so))
64247f1ea51SGleb Smirnoff 		return (ENOTCONN);
64333a2a37bSGleb Smirnoff 	return (0);
64433a2a37bSGleb Smirnoff }
64533a2a37bSGleb Smirnoff 
64633a2a37bSGleb Smirnoff int
64733a2a37bSGleb Smirnoff vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
64833a2a37bSGleb Smirnoff     struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags,
6499c64cfe5SGleb Smirnoff     struct thread *td)
65033a2a37bSGleb Smirnoff {
65133a2a37bSGleb Smirnoff 	struct file *sock_fp;
65233a2a37bSGleb Smirnoff 	struct vnode *vp;
65333a2a37bSGleb Smirnoff 	struct vm_object *obj;
654f709eee6SKonstantin Belousov 	vm_page_t pga;
65533a2a37bSGleb Smirnoff 	struct socket *so;
656b2e60773SJohn Baldwin #ifdef KERN_TLS
657b2e60773SJohn Baldwin 	struct ktls_session *tls;
658b2e60773SJohn Baldwin #endif
659cec06a3eSJohn Baldwin 	struct mbuf_ext_pgs *ext_pgs;
66033a2a37bSGleb Smirnoff 	struct mbuf *m, *mh, *mhtail;
66133a2a37bSGleb Smirnoff 	struct sf_buf *sf;
66233a2a37bSGleb Smirnoff 	struct shmfd *shmfd;
66333a2a37bSGleb Smirnoff 	struct sendfile_sync *sfs;
66433a2a37bSGleb Smirnoff 	struct vattr va;
66533a2a37bSGleb Smirnoff 	off_t off, sbytes, rem, obj_size;
666cec06a3eSJohn Baldwin 	int bsize, error, ext_pgs_idx, hdrlen, max_pgs, softerr;
667b2e60773SJohn Baldwin #ifdef KERN_TLS
668b2e60773SJohn Baldwin 	int tls_enq_cnt;
669b2e60773SJohn Baldwin #endif
670cec06a3eSJohn Baldwin 	bool use_ext_pgs;
67133a2a37bSGleb Smirnoff 
67233a2a37bSGleb Smirnoff 	obj = NULL;
67333a2a37bSGleb Smirnoff 	so = NULL;
67433a2a37bSGleb Smirnoff 	m = mh = NULL;
67533a2a37bSGleb Smirnoff 	sfs = NULL;
676b2e60773SJohn Baldwin #ifdef KERN_TLS
677b2e60773SJohn Baldwin 	tls = NULL;
678b2e60773SJohn Baldwin #endif
6799c64cfe5SGleb Smirnoff 	hdrlen = sbytes = 0;
68033a2a37bSGleb Smirnoff 	softerr = 0;
681cec06a3eSJohn Baldwin 	use_ext_pgs = false;
68233a2a37bSGleb Smirnoff 
68333a2a37bSGleb Smirnoff 	error = sendfile_getobj(td, fp, &obj, &vp, &shmfd, &obj_size, &bsize);
68433a2a37bSGleb Smirnoff 	if (error != 0)
68533a2a37bSGleb Smirnoff 		return (error);
68633a2a37bSGleb Smirnoff 
68733a2a37bSGleb Smirnoff 	error = sendfile_getsock(td, sockfd, &sock_fp, &so);
68833a2a37bSGleb Smirnoff 	if (error != 0)
68933a2a37bSGleb Smirnoff 		goto out;
69033a2a37bSGleb Smirnoff 
69133a2a37bSGleb Smirnoff #ifdef MAC
69233a2a37bSGleb Smirnoff 	error = mac_socket_check_send(td->td_ucred, so);
69333a2a37bSGleb Smirnoff 	if (error != 0)
69433a2a37bSGleb Smirnoff 		goto out;
69533a2a37bSGleb Smirnoff #endif
69633a2a37bSGleb Smirnoff 
69733a2a37bSGleb Smirnoff 	SFSTAT_INC(sf_syscalls);
69833a2a37bSGleb Smirnoff 	SFSTAT_ADD(sf_rhpages_requested, SF_READAHEAD(flags));
69933a2a37bSGleb Smirnoff 
70033a2a37bSGleb Smirnoff 	if (flags & SF_SYNC) {
7018f0a223cSKonstantin Belousov 		sfs = malloc(sizeof(*sfs), M_SENDFILE, M_WAITOK | M_ZERO);
70233a2a37bSGleb Smirnoff 		mtx_init(&sfs->mtx, "sendfile", NULL, MTX_DEF);
70333a2a37bSGleb Smirnoff 		cv_init(&sfs->cv, "sendfile");
70433a2a37bSGleb Smirnoff 	}
70533a2a37bSGleb Smirnoff 
70633a2a37bSGleb Smirnoff 	rem = nbytes ? omin(nbytes, obj_size - offset) : obj_size - offset;
70733a2a37bSGleb Smirnoff 
70833a2a37bSGleb Smirnoff 	/*
70933a2a37bSGleb Smirnoff 	 * Protect against multiple writers to the socket.
71033a2a37bSGleb Smirnoff 	 *
71133a2a37bSGleb Smirnoff 	 * XXXRW: Historically this has assumed non-interruptibility, so now
71233a2a37bSGleb Smirnoff 	 * we implement that, but possibly shouldn't.
71333a2a37bSGleb Smirnoff 	 */
71433a2a37bSGleb Smirnoff 	(void)sblock(&so->so_snd, SBL_WAIT | SBL_NOINTR);
715b2e60773SJohn Baldwin #ifdef KERN_TLS
716b2e60773SJohn Baldwin 	tls = ktls_hold(so->so_snd.sb_tls_info);
717b2e60773SJohn Baldwin #endif
71833a2a37bSGleb Smirnoff 
71933a2a37bSGleb Smirnoff 	/*
72033a2a37bSGleb Smirnoff 	 * Loop through the pages of the file, starting with the requested
72133a2a37bSGleb Smirnoff 	 * offset. Get a file page (do I/O if necessary), map the file page
72233a2a37bSGleb Smirnoff 	 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
72333a2a37bSGleb Smirnoff 	 * it on the socket.
72433a2a37bSGleb Smirnoff 	 * This is done in two loops.  The inner loop turns as many pages
72533a2a37bSGleb Smirnoff 	 * as it can, up to available socket buffer space, without blocking
72633a2a37bSGleb Smirnoff 	 * into mbufs to have it bulk delivered into the socket send buffer.
72733a2a37bSGleb Smirnoff 	 * The outer loop checks the state and available space of the socket
72833a2a37bSGleb Smirnoff 	 * and takes care of the overall progress.
72933a2a37bSGleb Smirnoff 	 */
73033a2a37bSGleb Smirnoff 	for (off = offset; rem > 0; ) {
73133a2a37bSGleb Smirnoff 		struct sf_io *sfio;
73233a2a37bSGleb Smirnoff 		vm_page_t *pa;
7336bc27f08SGleb Smirnoff 		struct mbuf *m0, *mtail;
73433a2a37bSGleb Smirnoff 		int nios, space, npages, rhpages;
73533a2a37bSGleb Smirnoff 
73633a2a37bSGleb Smirnoff 		mtail = NULL;
73733a2a37bSGleb Smirnoff 		/*
73833a2a37bSGleb Smirnoff 		 * Check the socket state for ongoing connection,
73933a2a37bSGleb Smirnoff 		 * no errors and space in socket buffer.
74033a2a37bSGleb Smirnoff 		 * If space is low allow for the remainder of the
74133a2a37bSGleb Smirnoff 		 * file to be processed if it fits the socket buffer.
74233a2a37bSGleb Smirnoff 		 * Otherwise block in waiting for sufficient space
74333a2a37bSGleb Smirnoff 		 * to proceed, or if the socket is nonblocking, return
74433a2a37bSGleb Smirnoff 		 * to userland with EAGAIN while reporting how far
74533a2a37bSGleb Smirnoff 		 * we've come.
74633a2a37bSGleb Smirnoff 		 * We wait until the socket buffer has significant free
74733a2a37bSGleb Smirnoff 		 * space to do bulk sends.  This makes good use of file
74833a2a37bSGleb Smirnoff 		 * system read ahead and allows packet segmentation
74933a2a37bSGleb Smirnoff 		 * offloading hardware to take over lots of work.  If
75033a2a37bSGleb Smirnoff 		 * we were not careful here we would send off only one
75133a2a37bSGleb Smirnoff 		 * sfbuf at a time.
75233a2a37bSGleb Smirnoff 		 */
75333a2a37bSGleb Smirnoff 		SOCKBUF_LOCK(&so->so_snd);
75433a2a37bSGleb Smirnoff 		if (so->so_snd.sb_lowat < so->so_snd.sb_hiwat / 2)
75533a2a37bSGleb Smirnoff 			so->so_snd.sb_lowat = so->so_snd.sb_hiwat / 2;
75633a2a37bSGleb Smirnoff retry_space:
75733a2a37bSGleb Smirnoff 		if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
75833a2a37bSGleb Smirnoff 			error = EPIPE;
75933a2a37bSGleb Smirnoff 			SOCKBUF_UNLOCK(&so->so_snd);
76033a2a37bSGleb Smirnoff 			goto done;
76133a2a37bSGleb Smirnoff 		} else if (so->so_error) {
76233a2a37bSGleb Smirnoff 			error = so->so_error;
76333a2a37bSGleb Smirnoff 			so->so_error = 0;
76433a2a37bSGleb Smirnoff 			SOCKBUF_UNLOCK(&so->so_snd);
76533a2a37bSGleb Smirnoff 			goto done;
76633a2a37bSGleb Smirnoff 		}
7671f9916edSSean Bruno 		if ((so->so_state & SS_ISCONNECTED) == 0) {
7681f9916edSSean Bruno 			SOCKBUF_UNLOCK(&so->so_snd);
7691f9916edSSean Bruno 			error = ENOTCONN;
7701f9916edSSean Bruno 			goto done;
7711f9916edSSean Bruno 		}
7721f9916edSSean Bruno 
77333a2a37bSGleb Smirnoff 		space = sbspace(&so->so_snd);
77433a2a37bSGleb Smirnoff 		if (space < rem &&
77533a2a37bSGleb Smirnoff 		    (space <= 0 ||
77633a2a37bSGleb Smirnoff 		     space < so->so_snd.sb_lowat)) {
77733a2a37bSGleb Smirnoff 			if (so->so_state & SS_NBIO) {
77833a2a37bSGleb Smirnoff 				SOCKBUF_UNLOCK(&so->so_snd);
77933a2a37bSGleb Smirnoff 				error = EAGAIN;
78033a2a37bSGleb Smirnoff 				goto done;
78133a2a37bSGleb Smirnoff 			}
78233a2a37bSGleb Smirnoff 			/*
78333a2a37bSGleb Smirnoff 			 * sbwait drops the lock while sleeping.
78433a2a37bSGleb Smirnoff 			 * When we loop back to retry_space the
78533a2a37bSGleb Smirnoff 			 * state may have changed and we retest
78633a2a37bSGleb Smirnoff 			 * for it.
78733a2a37bSGleb Smirnoff 			 */
78833a2a37bSGleb Smirnoff 			error = sbwait(&so->so_snd);
78933a2a37bSGleb Smirnoff 			/*
79033a2a37bSGleb Smirnoff 			 * An error from sbwait usually indicates that we've
79133a2a37bSGleb Smirnoff 			 * been interrupted by a signal. If we've sent anything
79233a2a37bSGleb Smirnoff 			 * then return bytes sent, otherwise return the error.
79333a2a37bSGleb Smirnoff 			 */
79433a2a37bSGleb Smirnoff 			if (error != 0) {
79533a2a37bSGleb Smirnoff 				SOCKBUF_UNLOCK(&so->so_snd);
79633a2a37bSGleb Smirnoff 				goto done;
79733a2a37bSGleb Smirnoff 			}
79833a2a37bSGleb Smirnoff 			goto retry_space;
79933a2a37bSGleb Smirnoff 		}
80033a2a37bSGleb Smirnoff 		SOCKBUF_UNLOCK(&so->so_snd);
80133a2a37bSGleb Smirnoff 
80233a2a37bSGleb Smirnoff 		/*
8039c64cfe5SGleb Smirnoff 		 * At the beginning of the first loop check if any headers
8049c64cfe5SGleb Smirnoff 		 * are specified and copy them into mbufs.  Reduce space in
8059c64cfe5SGleb Smirnoff 		 * the socket buffer by the size of the header mbuf chain.
8069c64cfe5SGleb Smirnoff 		 * Clear hdr_uio here and hdrlen at the end of the first loop.
80733a2a37bSGleb Smirnoff 		 */
8089c64cfe5SGleb Smirnoff 		if (hdr_uio != NULL && hdr_uio->uio_resid > 0) {
8099c64cfe5SGleb Smirnoff 			hdr_uio->uio_td = td;
8109c64cfe5SGleb Smirnoff 			hdr_uio->uio_rw = UIO_WRITE;
811b2e60773SJohn Baldwin #ifdef KERN_TLS
812b2e60773SJohn Baldwin 			if (tls != NULL)
813b2e60773SJohn Baldwin 				mh = m_uiotombuf(hdr_uio, M_WAITOK, space,
814b2e60773SJohn Baldwin 				    tls->params.max_frame_len, M_NOMAP);
815b2e60773SJohn Baldwin 			else
816b2e60773SJohn Baldwin #endif
817b2e60773SJohn Baldwin 				mh = m_uiotombuf(hdr_uio, M_WAITOK,
818b2e60773SJohn Baldwin 				    space, 0, 0);
8199c64cfe5SGleb Smirnoff 			hdrlen = m_length(mh, &mhtail);
82033a2a37bSGleb Smirnoff 			space -= hdrlen;
821a2d8f9d2SGleb Smirnoff 			/*
822a2d8f9d2SGleb Smirnoff 			 * If header consumed all the socket buffer space,
823a2d8f9d2SGleb Smirnoff 			 * don't waste CPU cycles and jump to the end.
824a2d8f9d2SGleb Smirnoff 			 */
825a2d8f9d2SGleb Smirnoff 			if (space == 0) {
826a2d8f9d2SGleb Smirnoff 				sfio = NULL;
827a2d8f9d2SGleb Smirnoff 				nios = 0;
828a2d8f9d2SGleb Smirnoff 				goto prepend_header;
829a2d8f9d2SGleb Smirnoff 			}
8309c64cfe5SGleb Smirnoff 			hdr_uio = NULL;
8319c64cfe5SGleb Smirnoff 		}
83233a2a37bSGleb Smirnoff 
83333a2a37bSGleb Smirnoff 		if (vp != NULL) {
83433a2a37bSGleb Smirnoff 			error = vn_lock(vp, LK_SHARED);
83533a2a37bSGleb Smirnoff 			if (error != 0)
83633a2a37bSGleb Smirnoff 				goto done;
83733a2a37bSGleb Smirnoff 			error = VOP_GETATTR(vp, &va, td->td_ucred);
83833a2a37bSGleb Smirnoff 			if (error != 0 || off >= va.va_size) {
839b249ce48SMateusz Guzik 				VOP_UNLOCK(vp);
84033a2a37bSGleb Smirnoff 				goto done;
84133a2a37bSGleb Smirnoff 			}
84233a2a37bSGleb Smirnoff 			if (va.va_size != obj_size) {
84333a2a37bSGleb Smirnoff 				obj_size = va.va_size;
8449e3c8bd3SGleb Smirnoff 				rem = nbytes ?
8459e3c8bd3SGleb Smirnoff 				    omin(nbytes + offset, obj_size) : obj_size;
8469e3c8bd3SGleb Smirnoff 				rem -= off;
84733a2a37bSGleb Smirnoff 			}
84833a2a37bSGleb Smirnoff 		}
84933a2a37bSGleb Smirnoff 
85033a2a37bSGleb Smirnoff 		if (space > rem)
85133a2a37bSGleb Smirnoff 			space = rem;
852cec06a3eSJohn Baldwin 		else if (space > PAGE_SIZE) {
853cec06a3eSJohn Baldwin 			/*
854cec06a3eSJohn Baldwin 			 * Use page boundaries when possible for large
855cec06a3eSJohn Baldwin 			 * requests.
856cec06a3eSJohn Baldwin 			 */
857cec06a3eSJohn Baldwin 			if (off & PAGE_MASK)
858cec06a3eSJohn Baldwin 				space -= (PAGE_SIZE - (off & PAGE_MASK));
859cec06a3eSJohn Baldwin 			space = trunc_page(space);
860cec06a3eSJohn Baldwin 			if (off & PAGE_MASK)
861cec06a3eSJohn Baldwin 				space += (PAGE_SIZE - (off & PAGE_MASK));
862cec06a3eSJohn Baldwin 		}
86333a2a37bSGleb Smirnoff 
86433a2a37bSGleb Smirnoff 		npages = howmany(space + (off & PAGE_MASK), PAGE_SIZE);
86533a2a37bSGleb Smirnoff 
86633a2a37bSGleb Smirnoff 		/*
86733a2a37bSGleb Smirnoff 		 * Calculate maximum allowed number of pages for readahead
86800b5ffdeSGleb Smirnoff 		 * at this iteration.  If SF_USER_READAHEAD was set, we don't
86900b5ffdeSGleb Smirnoff 		 * do any heuristics and use exactly the value supplied by
87000b5ffdeSGleb Smirnoff 		 * application.  Otherwise, we allow readahead up to "rem".
87133a2a37bSGleb Smirnoff 		 * If application wants more, let it be, but there is no
87233a2a37bSGleb Smirnoff 		 * reason to go above MAXPHYS.  Also check against "obj_size",
87333a2a37bSGleb Smirnoff 		 * since vm_pager_has_page() can hint beyond EOF.
87433a2a37bSGleb Smirnoff 		 */
87500b5ffdeSGleb Smirnoff 		if (flags & SF_USER_READAHEAD) {
87600b5ffdeSGleb Smirnoff 			rhpages = SF_READAHEAD(flags);
87700b5ffdeSGleb Smirnoff 		} else {
87800b5ffdeSGleb Smirnoff 			rhpages = howmany(rem + (off & PAGE_MASK), PAGE_SIZE) -
87900b5ffdeSGleb Smirnoff 			    npages;
88033a2a37bSGleb Smirnoff 			rhpages += SF_READAHEAD(flags);
88100b5ffdeSGleb Smirnoff 		}
88233a2a37bSGleb Smirnoff 		rhpages = min(howmany(MAXPHYS, PAGE_SIZE), rhpages);
88333a2a37bSGleb Smirnoff 		rhpages = min(howmany(obj_size - trunc_page(off), PAGE_SIZE) -
88433a2a37bSGleb Smirnoff 		    npages, rhpages);
88533a2a37bSGleb Smirnoff 
88633a2a37bSGleb Smirnoff 		sfio = malloc(sizeof(struct sf_io) +
8878f0a223cSKonstantin Belousov 		    npages * sizeof(vm_page_t), M_SENDFILE, M_WAITOK);
88833a2a37bSGleb Smirnoff 		refcount_init(&sfio->nios, 1);
889d6e13f3bSJeff Roberson 		sfio->obj = obj;
89033a2a37bSGleb Smirnoff 		sfio->error = 0;
8916bc27f08SGleb Smirnoff 		sfio->m = NULL;
892b2e60773SJohn Baldwin #ifdef KERN_TLS
893b2e60773SJohn Baldwin 		/*
894b2e60773SJohn Baldwin 		 * This doesn't use ktls_hold() because sfio->m will
895b2e60773SJohn Baldwin 		 * also have a reference on 'tls' that will be valid
896b2e60773SJohn Baldwin 		 * for all of sfio's lifetime.
897b2e60773SJohn Baldwin 		 */
898b2e60773SJohn Baldwin 		sfio->tls = tls;
899b2e60773SJohn Baldwin #endif
9006bc27f08SGleb Smirnoff 		vm_object_pip_add(obj, 1);
901fca79580SAlan Somers 		error = sendfile_swapin(obj, sfio, &nios, off, space, npages,
902fca79580SAlan Somers 		    rhpages, flags);
9030367bca4SAlan Somers 		if (error != 0) {
904fca79580SAlan Somers 			if (vp != NULL)
905b249ce48SMateusz Guzik 				VOP_UNLOCK(vp);
906b8c92303SGleb Smirnoff 			sendfile_iodone(sfio, NULL, 0, error);
907fca79580SAlan Somers 			goto done;
908fca79580SAlan Somers 		}
90933a2a37bSGleb Smirnoff 
91033a2a37bSGleb Smirnoff 		/*
91133a2a37bSGleb Smirnoff 		 * Loop and construct maximum sized mbuf chain to be bulk
91233a2a37bSGleb Smirnoff 		 * dumped into socket buffer.
91333a2a37bSGleb Smirnoff 		 */
91433a2a37bSGleb Smirnoff 		pa = sfio->pa;
915cec06a3eSJohn Baldwin 
916cec06a3eSJohn Baldwin 		/*
917cec06a3eSJohn Baldwin 		 * Use unmapped mbufs if enabled for TCP.  Unmapped
918cec06a3eSJohn Baldwin 		 * bufs are restricted to TCP as that is what has been
919cec06a3eSJohn Baldwin 		 * tested.  In particular, unmapped mbufs have not
920cec06a3eSJohn Baldwin 		 * been tested with UNIX-domain sockets.
921b2e60773SJohn Baldwin 		 *
922b2e60773SJohn Baldwin 		 * TLS frames always require unmapped mbufs.
923cec06a3eSJohn Baldwin 		 */
924b2e60773SJohn Baldwin 		if ((mb_use_ext_pgs &&
925b2e60773SJohn Baldwin 		    so->so_proto->pr_protocol == IPPROTO_TCP)
926b2e60773SJohn Baldwin #ifdef KERN_TLS
927b2e60773SJohn Baldwin 		    || tls != NULL
928b2e60773SJohn Baldwin #endif
929b2e60773SJohn Baldwin 		    ) {
930cec06a3eSJohn Baldwin 			use_ext_pgs = true;
931b2e60773SJohn Baldwin #ifdef KERN_TLS
932b2e60773SJohn Baldwin 			if (tls != NULL)
933b2e60773SJohn Baldwin 				max_pgs = num_pages(tls->params.max_frame_len);
934b2e60773SJohn Baldwin 			else
935b2e60773SJohn Baldwin #endif
936cec06a3eSJohn Baldwin 				max_pgs = MBUF_PEXT_MAX_PGS;
937cec06a3eSJohn Baldwin 
938cec06a3eSJohn Baldwin 			/* Start at last index, to wrap on first use. */
939cec06a3eSJohn Baldwin 			ext_pgs_idx = max_pgs - 1;
940cec06a3eSJohn Baldwin 		}
941cec06a3eSJohn Baldwin 
94233a2a37bSGleb Smirnoff 		for (int i = 0; i < npages; i++) {
94333a2a37bSGleb Smirnoff 			/*
94433a2a37bSGleb Smirnoff 			 * If a page wasn't grabbed successfully, then
94533a2a37bSGleb Smirnoff 			 * trim the array. Can happen only with SF_NODISKIO.
94633a2a37bSGleb Smirnoff 			 */
94733a2a37bSGleb Smirnoff 			if (pa[i] == NULL) {
94833a2a37bSGleb Smirnoff 				SFSTAT_INC(sf_busy);
94933a2a37bSGleb Smirnoff 				fixspace(npages, i, off, &space);
95033a2a37bSGleb Smirnoff 				npages = i;
95133a2a37bSGleb Smirnoff 				softerr = EBUSY;
95233a2a37bSGleb Smirnoff 				break;
95333a2a37bSGleb Smirnoff 			}
954f709eee6SKonstantin Belousov 			pga = pa[i];
955f709eee6SKonstantin Belousov 			if (pga == bogus_page)
956f709eee6SKonstantin Belousov 				pga = vm_page_relookup(obj, sfio->pindex0 + i);
95733a2a37bSGleb Smirnoff 
958cec06a3eSJohn Baldwin 			if (use_ext_pgs) {
959cec06a3eSJohn Baldwin 				off_t xfs;
960cec06a3eSJohn Baldwin 
961cec06a3eSJohn Baldwin 				ext_pgs_idx++;
962cec06a3eSJohn Baldwin 				if (ext_pgs_idx == max_pgs) {
96323feb563SAndrew Gallatin 					m0 = mb_alloc_ext_pgs(M_WAITOK,
964cec06a3eSJohn Baldwin 					    sendfile_free_mext_pg);
965cec06a3eSJohn Baldwin 
966cec06a3eSJohn Baldwin 					if (flags & SF_NOCACHE) {
967cec06a3eSJohn Baldwin 						m0->m_ext.ext_flags |=
968cec06a3eSJohn Baldwin 						    EXT_FLAG_NOCACHE;
969cec06a3eSJohn Baldwin 
970cec06a3eSJohn Baldwin 						/*
971cec06a3eSJohn Baldwin 						 * See comment below regarding
972cec06a3eSJohn Baldwin 						 * ignoring SF_NOCACHE for the
973cec06a3eSJohn Baldwin 						 * last page.
974cec06a3eSJohn Baldwin 						 */
975cec06a3eSJohn Baldwin 						if ((npages - i <= max_pgs) &&
976cec06a3eSJohn Baldwin 						    ((off + space) & PAGE_MASK) &&
977cec06a3eSJohn Baldwin 						    (rem > space || rhpages > 0))
978cec06a3eSJohn Baldwin 							m0->m_ext.ext_flags |=
979cec06a3eSJohn Baldwin 							    EXT_FLAG_CACHE_LAST;
980cec06a3eSJohn Baldwin 					}
981cec06a3eSJohn Baldwin 					if (sfs != NULL) {
982cec06a3eSJohn Baldwin 						m0->m_ext.ext_flags |=
983cec06a3eSJohn Baldwin 						    EXT_FLAG_SYNC;
98423feb563SAndrew Gallatin 						if (m0->m_ext.ext_type ==
98523feb563SAndrew Gallatin 						    EXT_PGS)
98623feb563SAndrew Gallatin 							m0->m_ext.ext_arg1 =
98723feb563SAndrew Gallatin 								sfs;
98823feb563SAndrew Gallatin 						else
98923feb563SAndrew Gallatin 							m0->m_ext.ext_arg2 =
99023feb563SAndrew Gallatin 								sfs;
991cec06a3eSJohn Baldwin 						mtx_lock(&sfs->mtx);
992cec06a3eSJohn Baldwin 						sfs->count++;
993cec06a3eSJohn Baldwin 						mtx_unlock(&sfs->mtx);
994cec06a3eSJohn Baldwin 					}
99523feb563SAndrew Gallatin 					ext_pgs = &m0->m_ext_pgs;
996cec06a3eSJohn Baldwin 					ext_pgs_idx = 0;
997cec06a3eSJohn Baldwin 
998cec06a3eSJohn Baldwin 					/* Append to mbuf chain. */
999cec06a3eSJohn Baldwin 					if (mtail != NULL)
1000cec06a3eSJohn Baldwin 						mtail->m_next = m0;
1001cec06a3eSJohn Baldwin 					else
1002cec06a3eSJohn Baldwin 						m = m0;
1003cec06a3eSJohn Baldwin 					mtail = m0;
1004cec06a3eSJohn Baldwin 					ext_pgs->first_pg_off =
1005cec06a3eSJohn Baldwin 					    vmoff(i, off) & PAGE_MASK;
1006cec06a3eSJohn Baldwin 				}
1007cec06a3eSJohn Baldwin 				if (nios) {
1008cec06a3eSJohn Baldwin 					mtail->m_flags |= M_NOTREADY;
1009cec06a3eSJohn Baldwin 					ext_pgs->nrdy++;
1010cec06a3eSJohn Baldwin 				}
1011cec06a3eSJohn Baldwin 
101223feb563SAndrew Gallatin 				ext_pgs->m_epg_pa[ext_pgs_idx] = VM_PAGE_TO_PHYS(pga);
1013cec06a3eSJohn Baldwin 				ext_pgs->npgs++;
1014cec06a3eSJohn Baldwin 				xfs = xfsize(i, npages, off, space);
1015cec06a3eSJohn Baldwin 				ext_pgs->last_pg_len = xfs;
1016cec06a3eSJohn Baldwin 				MBUF_EXT_PGS_ASSERT_SANITY(ext_pgs);
1017cec06a3eSJohn Baldwin 				mtail->m_len += xfs;
1018cec06a3eSJohn Baldwin 				mtail->m_ext.ext_size += PAGE_SIZE;
1019cec06a3eSJohn Baldwin 				continue;
1020cec06a3eSJohn Baldwin 			}
1021cec06a3eSJohn Baldwin 
102233a2a37bSGleb Smirnoff 			/*
102333a2a37bSGleb Smirnoff 			 * Get a sendfile buf.  When allocating the
102433a2a37bSGleb Smirnoff 			 * first buffer for mbuf chain, we usually
102533a2a37bSGleb Smirnoff 			 * wait as long as necessary, but this wait
102633a2a37bSGleb Smirnoff 			 * can be interrupted.  For consequent
102733a2a37bSGleb Smirnoff 			 * buffers, do not sleep, since several
102833a2a37bSGleb Smirnoff 			 * threads might exhaust the buffers and then
102933a2a37bSGleb Smirnoff 			 * deadlock.
103033a2a37bSGleb Smirnoff 			 */
1031f709eee6SKonstantin Belousov 			sf = sf_buf_alloc(pga,
103233a2a37bSGleb Smirnoff 			    m != NULL ? SFB_NOWAIT : SFB_CATCH);
103333a2a37bSGleb Smirnoff 			if (sf == NULL) {
103433a2a37bSGleb Smirnoff 				SFSTAT_INC(sf_allocfail);
103559e1ac9dSKonstantin Belousov 				sendfile_iowait(sfio, "sfnosf");
1036fee2a2faSMark Johnston 				for (int j = i; j < npages; j++)
103733a2a37bSGleb Smirnoff 					vm_page_unwire(pa[j], PQ_INACTIVE);
103833a2a37bSGleb Smirnoff 				if (m == NULL)
103933a2a37bSGleb Smirnoff 					softerr = ENOBUFS;
104033a2a37bSGleb Smirnoff 				fixspace(npages, i, off, &space);
104133a2a37bSGleb Smirnoff 				npages = i;
104233a2a37bSGleb Smirnoff 				break;
104333a2a37bSGleb Smirnoff 			}
104433a2a37bSGleb Smirnoff 
104533a2a37bSGleb Smirnoff 			m0 = m_get(M_WAITOK, MT_DATA);
104633a2a37bSGleb Smirnoff 			m0->m_ext.ext_buf = (char *)sf_buf_kva(sf);
104733a2a37bSGleb Smirnoff 			m0->m_ext.ext_size = PAGE_SIZE;
104833a2a37bSGleb Smirnoff 			m0->m_ext.ext_arg1 = sf;
10499c82bec4SGleb Smirnoff 			m0->m_ext.ext_type = EXT_SFBUF;
10509c82bec4SGleb Smirnoff 			m0->m_ext.ext_flags = EXT_FLAG_EMBREF;
10519c82bec4SGleb Smirnoff 			m0->m_ext.ext_free = sendfile_free_mext;
105233a2a37bSGleb Smirnoff 			/*
105333a2a37bSGleb Smirnoff 			 * SF_NOCACHE sets the page as being freed upon send.
105433a2a37bSGleb Smirnoff 			 * However, we ignore it for the last page in 'space',
105533a2a37bSGleb Smirnoff 			 * if the page is truncated, and we got more data to
105633a2a37bSGleb Smirnoff 			 * send (rem > space), or if we have readahead
105733a2a37bSGleb Smirnoff 			 * configured (rhpages > 0).
105833a2a37bSGleb Smirnoff 			 */
10599c82bec4SGleb Smirnoff 			if ((flags & SF_NOCACHE) &&
10609c82bec4SGleb Smirnoff 			    (i != npages - 1 ||
10619c82bec4SGleb Smirnoff 			    !((off + space) & PAGE_MASK) ||
10629c82bec4SGleb Smirnoff 			    !(rem > space || rhpages > 0)))
10639c82bec4SGleb Smirnoff 				m0->m_ext.ext_flags |= EXT_FLAG_NOCACHE;
10649c82bec4SGleb Smirnoff 			if (sfs != NULL) {
10659c82bec4SGleb Smirnoff 				m0->m_ext.ext_flags |= EXT_FLAG_SYNC;
106623feb563SAndrew Gallatin 				if (m0->m_ext.ext_type == EXT_PGS)
106723feb563SAndrew Gallatin 					m0->m_ext.ext_arg1 = sfs;
106823feb563SAndrew Gallatin 				else
106923feb563SAndrew Gallatin 					m0->m_ext.ext_arg2 = sfs;
10709c82bec4SGleb Smirnoff 				m0->m_ext.ext_arg2 = sfs;
10719c82bec4SGleb Smirnoff 				mtx_lock(&sfs->mtx);
10729c82bec4SGleb Smirnoff 				sfs->count++;
10739c82bec4SGleb Smirnoff 				mtx_unlock(&sfs->mtx);
10749c82bec4SGleb Smirnoff 			}
107556a5f52eSGleb Smirnoff 			m0->m_ext.ext_count = 1;
107633a2a37bSGleb Smirnoff 			m0->m_flags |= (M_EXT | M_RDONLY);
107733a2a37bSGleb Smirnoff 			if (nios)
107833a2a37bSGleb Smirnoff 				m0->m_flags |= M_NOTREADY;
107933a2a37bSGleb Smirnoff 			m0->m_data = (char *)sf_buf_kva(sf) +
108033a2a37bSGleb Smirnoff 			    (vmoff(i, off) & PAGE_MASK);
108133a2a37bSGleb Smirnoff 			m0->m_len = xfsize(i, npages, off, space);
108233a2a37bSGleb Smirnoff 
108333a2a37bSGleb Smirnoff 			/* Append to mbuf chain. */
108433a2a37bSGleb Smirnoff 			if (mtail != NULL)
108533a2a37bSGleb Smirnoff 				mtail->m_next = m0;
108633a2a37bSGleb Smirnoff 			else
108733a2a37bSGleb Smirnoff 				m = m0;
108833a2a37bSGleb Smirnoff 			mtail = m0;
108933a2a37bSGleb Smirnoff 		}
109033a2a37bSGleb Smirnoff 
109133a2a37bSGleb Smirnoff 		if (vp != NULL)
1092b249ce48SMateusz Guzik 			VOP_UNLOCK(vp);
109333a2a37bSGleb Smirnoff 
109433a2a37bSGleb Smirnoff 		/* Keep track of bytes processed. */
109533a2a37bSGleb Smirnoff 		off += space;
109633a2a37bSGleb Smirnoff 		rem -= space;
109733a2a37bSGleb Smirnoff 
10986bc27f08SGleb Smirnoff 		/*
10996bc27f08SGleb Smirnoff 		 * Prepend header, if any.  Save pointer to first mbuf
11006bc27f08SGleb Smirnoff 		 * with a page.
11016bc27f08SGleb Smirnoff 		 */
110233a2a37bSGleb Smirnoff 		if (hdrlen) {
1103a2d8f9d2SGleb Smirnoff prepend_header:
11046bc27f08SGleb Smirnoff 			m0 = mhtail->m_next = m;
110533a2a37bSGleb Smirnoff 			m = mh;
110633a2a37bSGleb Smirnoff 			mh = NULL;
11076bc27f08SGleb Smirnoff 		} else
11086bc27f08SGleb Smirnoff 			m0 = m;
110933a2a37bSGleb Smirnoff 
111033a2a37bSGleb Smirnoff 		if (m == NULL) {
111133a2a37bSGleb Smirnoff 			KASSERT(softerr, ("%s: m NULL, no error", __func__));
111233a2a37bSGleb Smirnoff 			error = softerr;
11136bc27f08SGleb Smirnoff 			sendfile_iodone(sfio, NULL, 0, 0);
111433a2a37bSGleb Smirnoff 			goto done;
111533a2a37bSGleb Smirnoff 		}
111633a2a37bSGleb Smirnoff 
111733a2a37bSGleb Smirnoff 		/* Add the buffer chain to the socket buffer. */
111833a2a37bSGleb Smirnoff 		KASSERT(m_length(m, NULL) == space + hdrlen,
111933a2a37bSGleb Smirnoff 		    ("%s: mlen %u space %d hdrlen %d",
112033a2a37bSGleb Smirnoff 		    __func__, m_length(m, NULL), space, hdrlen));
112133a2a37bSGleb Smirnoff 
112233a2a37bSGleb Smirnoff 		CURVNET_SET(so->so_vnet);
1123b2e60773SJohn Baldwin #ifdef KERN_TLS
1124f85e1a80SGleb Smirnoff 		if (tls != NULL)
1125f85e1a80SGleb Smirnoff 			ktls_frame(m, tls, &tls_enq_cnt, TLS_RLTYPE_APP);
1126b2e60773SJohn Baldwin #endif
112733a2a37bSGleb Smirnoff 		if (nios == 0) {
112833a2a37bSGleb Smirnoff 			/*
112933a2a37bSGleb Smirnoff 			 * If sendfile_swapin() didn't initiate any I/Os,
11306bc27f08SGleb Smirnoff 			 * which happens if all data is cached in VM, or if
11316bc27f08SGleb Smirnoff 			 * the header consumed all socket buffer space and
11326bc27f08SGleb Smirnoff 			 * sfio is NULL, then we can send data right now
11336bc27f08SGleb Smirnoff 			 * without the PRUS_NOTREADY flag.
113433a2a37bSGleb Smirnoff 			 */
11356bc27f08SGleb Smirnoff 			if (sfio != NULL)
11366bc27f08SGleb Smirnoff 				sendfile_iodone(sfio, NULL, 0, 0);
1137b2e60773SJohn Baldwin #ifdef KERN_TLS
11389e14430dSJohn Baldwin 			if (tls != NULL && tls->mode == TCP_TLS_MODE_SW) {
1139b2e60773SJohn Baldwin 				error = (*so->so_proto->pr_usrreqs->pru_send)
1140b2e60773SJohn Baldwin 				    (so, PRUS_NOTREADY, m, NULL, NULL, td);
1141b2e60773SJohn Baldwin 				soref(so);
1142b2e60773SJohn Baldwin 				ktls_enqueue(m, so, tls_enq_cnt);
1143b2e60773SJohn Baldwin 			} else
1144b2e60773SJohn Baldwin #endif
114533a2a37bSGleb Smirnoff 				error = (*so->so_proto->pr_usrreqs->pru_send)
114633a2a37bSGleb Smirnoff 				    (so, 0, m, NULL, NULL, td);
114733a2a37bSGleb Smirnoff 		} else {
11486bc27f08SGleb Smirnoff 			sfio->so = so;
11496bc27f08SGleb Smirnoff 			sfio->m = m0;
115033a2a37bSGleb Smirnoff 			sfio->npages = npages;
1151d37aa3ccSGleb Smirnoff 			soref(so);
115233a2a37bSGleb Smirnoff 			error = (*so->so_proto->pr_usrreqs->pru_send)
115333a2a37bSGleb Smirnoff 			    (so, PRUS_NOTREADY, m, NULL, NULL, td);
115433a2a37bSGleb Smirnoff 			sendfile_iodone(sfio, NULL, 0, 0);
115533a2a37bSGleb Smirnoff 		}
115633a2a37bSGleb Smirnoff 		CURVNET_RESTORE();
115733a2a37bSGleb Smirnoff 
115833a2a37bSGleb Smirnoff 		m = NULL;	/* pru_send always consumes */
115933a2a37bSGleb Smirnoff 		if (error)
116033a2a37bSGleb Smirnoff 			goto done;
116133a2a37bSGleb Smirnoff 		sbytes += space + hdrlen;
116233a2a37bSGleb Smirnoff 		if (hdrlen)
116333a2a37bSGleb Smirnoff 			hdrlen = 0;
116433a2a37bSGleb Smirnoff 		if (softerr) {
116533a2a37bSGleb Smirnoff 			error = softerr;
116633a2a37bSGleb Smirnoff 			goto done;
116733a2a37bSGleb Smirnoff 		}
116833a2a37bSGleb Smirnoff 	}
116933a2a37bSGleb Smirnoff 
117033a2a37bSGleb Smirnoff 	/*
117133a2a37bSGleb Smirnoff 	 * Send trailers. Wimp out and use writev(2).
117233a2a37bSGleb Smirnoff 	 */
117333a2a37bSGleb Smirnoff 	if (trl_uio != NULL) {
117433a2a37bSGleb Smirnoff 		sbunlock(&so->so_snd);
117533a2a37bSGleb Smirnoff 		error = kern_writev(td, sockfd, trl_uio);
117633a2a37bSGleb Smirnoff 		if (error == 0)
117733a2a37bSGleb Smirnoff 			sbytes += td->td_retval[0];
117833a2a37bSGleb Smirnoff 		goto out;
117933a2a37bSGleb Smirnoff 	}
118033a2a37bSGleb Smirnoff 
118133a2a37bSGleb Smirnoff done:
118233a2a37bSGleb Smirnoff 	sbunlock(&so->so_snd);
118333a2a37bSGleb Smirnoff out:
118433a2a37bSGleb Smirnoff 	/*
118533a2a37bSGleb Smirnoff 	 * If there was no error we have to clear td->td_retval[0]
118633a2a37bSGleb Smirnoff 	 * because it may have been set by writev.
118733a2a37bSGleb Smirnoff 	 */
118833a2a37bSGleb Smirnoff 	if (error == 0) {
118933a2a37bSGleb Smirnoff 		td->td_retval[0] = 0;
119033a2a37bSGleb Smirnoff 	}
119133a2a37bSGleb Smirnoff 	if (sent != NULL) {
119233a2a37bSGleb Smirnoff 		(*sent) = sbytes;
119333a2a37bSGleb Smirnoff 	}
119433a2a37bSGleb Smirnoff 	if (obj != NULL)
119533a2a37bSGleb Smirnoff 		vm_object_deallocate(obj);
119633a2a37bSGleb Smirnoff 	if (so)
119733a2a37bSGleb Smirnoff 		fdrop(sock_fp, td);
119833a2a37bSGleb Smirnoff 	if (m)
119933a2a37bSGleb Smirnoff 		m_freem(m);
120033a2a37bSGleb Smirnoff 	if (mh)
120133a2a37bSGleb Smirnoff 		m_freem(mh);
120233a2a37bSGleb Smirnoff 
120333a2a37bSGleb Smirnoff 	if (sfs != NULL) {
120433a2a37bSGleb Smirnoff 		mtx_lock(&sfs->mtx);
120533a2a37bSGleb Smirnoff 		if (sfs->count != 0)
120633a2a37bSGleb Smirnoff 			cv_wait(&sfs->cv, &sfs->mtx);
120733a2a37bSGleb Smirnoff 		KASSERT(sfs->count == 0, ("sendfile sync still busy"));
120833a2a37bSGleb Smirnoff 		cv_destroy(&sfs->cv);
120933a2a37bSGleb Smirnoff 		mtx_destroy(&sfs->mtx);
12108f0a223cSKonstantin Belousov 		free(sfs, M_SENDFILE);
121133a2a37bSGleb Smirnoff 	}
1212b2e60773SJohn Baldwin #ifdef KERN_TLS
1213b2e60773SJohn Baldwin 	if (tls != NULL)
1214b2e60773SJohn Baldwin 		ktls_free(tls);
1215b2e60773SJohn Baldwin #endif
121633a2a37bSGleb Smirnoff 
121733a2a37bSGleb Smirnoff 	if (error == ERESTART)
121833a2a37bSGleb Smirnoff 		error = EINTR;
121933a2a37bSGleb Smirnoff 
122033a2a37bSGleb Smirnoff 	return (error);
122133a2a37bSGleb Smirnoff }
122233a2a37bSGleb Smirnoff 
122333a2a37bSGleb Smirnoff static int
122433a2a37bSGleb Smirnoff sendfile(struct thread *td, struct sendfile_args *uap, int compat)
122533a2a37bSGleb Smirnoff {
122633a2a37bSGleb Smirnoff 	struct sf_hdtr hdtr;
122733a2a37bSGleb Smirnoff 	struct uio *hdr_uio, *trl_uio;
122833a2a37bSGleb Smirnoff 	struct file *fp;
122933a2a37bSGleb Smirnoff 	off_t sbytes;
123033a2a37bSGleb Smirnoff 	int error;
123133a2a37bSGleb Smirnoff 
123233a2a37bSGleb Smirnoff 	/*
123333a2a37bSGleb Smirnoff 	 * File offset must be positive.  If it goes beyond EOF
123433a2a37bSGleb Smirnoff 	 * we send only the header/trailer and no payload data.
123533a2a37bSGleb Smirnoff 	 */
123633a2a37bSGleb Smirnoff 	if (uap->offset < 0)
123733a2a37bSGleb Smirnoff 		return (EINVAL);
123833a2a37bSGleb Smirnoff 
1239ef3266d5SGleb Smirnoff 	sbytes = 0;
124033a2a37bSGleb Smirnoff 	hdr_uio = trl_uio = NULL;
124133a2a37bSGleb Smirnoff 
124233a2a37bSGleb Smirnoff 	if (uap->hdtr != NULL) {
124333a2a37bSGleb Smirnoff 		error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
124433a2a37bSGleb Smirnoff 		if (error != 0)
124533a2a37bSGleb Smirnoff 			goto out;
124633a2a37bSGleb Smirnoff 		if (hdtr.headers != NULL) {
124733a2a37bSGleb Smirnoff 			error = copyinuio(hdtr.headers, hdtr.hdr_cnt,
124833a2a37bSGleb Smirnoff 			    &hdr_uio);
124933a2a37bSGleb Smirnoff 			if (error != 0)
125033a2a37bSGleb Smirnoff 				goto out;
12519c64cfe5SGleb Smirnoff #ifdef COMPAT_FREEBSD4
12529c64cfe5SGleb Smirnoff 			/*
12539c64cfe5SGleb Smirnoff 			 * In FreeBSD < 5.0 the nbytes to send also included
12549c64cfe5SGleb Smirnoff 			 * the header.  If compat is specified subtract the
12559c64cfe5SGleb Smirnoff 			 * header size from nbytes.
12569c64cfe5SGleb Smirnoff 			 */
12579c64cfe5SGleb Smirnoff 			if (compat) {
12589c64cfe5SGleb Smirnoff 				if (uap->nbytes > hdr_uio->uio_resid)
12599c64cfe5SGleb Smirnoff 					uap->nbytes -= hdr_uio->uio_resid;
12609c64cfe5SGleb Smirnoff 				else
12619c64cfe5SGleb Smirnoff 					uap->nbytes = 0;
12629c64cfe5SGleb Smirnoff 			}
12639c64cfe5SGleb Smirnoff #endif
126433a2a37bSGleb Smirnoff 		}
126533a2a37bSGleb Smirnoff 		if (hdtr.trailers != NULL) {
126633a2a37bSGleb Smirnoff 			error = copyinuio(hdtr.trailers, hdtr.trl_cnt,
126733a2a37bSGleb Smirnoff 			    &trl_uio);
126833a2a37bSGleb Smirnoff 			if (error != 0)
126933a2a37bSGleb Smirnoff 				goto out;
127033a2a37bSGleb Smirnoff 		}
127133a2a37bSGleb Smirnoff 	}
127233a2a37bSGleb Smirnoff 
127333a2a37bSGleb Smirnoff 	AUDIT_ARG_FD(uap->fd);
127433a2a37bSGleb Smirnoff 
127533a2a37bSGleb Smirnoff 	/*
127633a2a37bSGleb Smirnoff 	 * sendfile(2) can start at any offset within a file so we require
127733a2a37bSGleb Smirnoff 	 * CAP_READ+CAP_SEEK = CAP_PREAD.
127833a2a37bSGleb Smirnoff 	 */
1279cbd92ce6SMatt Macy 	if ((error = fget_read(td, uap->fd, &cap_pread_rights, &fp)) != 0)
128033a2a37bSGleb Smirnoff 		goto out;
128133a2a37bSGleb Smirnoff 
128233a2a37bSGleb Smirnoff 	error = fo_sendfile(fp, uap->s, hdr_uio, trl_uio, uap->offset,
12839c64cfe5SGleb Smirnoff 	    uap->nbytes, &sbytes, uap->flags, td);
128433a2a37bSGleb Smirnoff 	fdrop(fp, td);
128533a2a37bSGleb Smirnoff 
128633a2a37bSGleb Smirnoff 	if (uap->sbytes != NULL)
128733a2a37bSGleb Smirnoff 		copyout(&sbytes, uap->sbytes, sizeof(off_t));
128833a2a37bSGleb Smirnoff 
128933a2a37bSGleb Smirnoff out:
129033a2a37bSGleb Smirnoff 	free(hdr_uio, M_IOV);
129133a2a37bSGleb Smirnoff 	free(trl_uio, M_IOV);
129233a2a37bSGleb Smirnoff 	return (error);
129333a2a37bSGleb Smirnoff }
129433a2a37bSGleb Smirnoff 
129533a2a37bSGleb Smirnoff /*
129633a2a37bSGleb Smirnoff  * sendfile(2)
129733a2a37bSGleb Smirnoff  *
129833a2a37bSGleb Smirnoff  * int sendfile(int fd, int s, off_t offset, size_t nbytes,
129933a2a37bSGleb Smirnoff  *       struct sf_hdtr *hdtr, off_t *sbytes, int flags)
130033a2a37bSGleb Smirnoff  *
130133a2a37bSGleb Smirnoff  * Send a file specified by 'fd' and starting at 'offset' to a socket
130233a2a37bSGleb Smirnoff  * specified by 's'. Send only 'nbytes' of the file or until EOF if nbytes ==
130333a2a37bSGleb Smirnoff  * 0.  Optionally add a header and/or trailer to the socket output.  If
130433a2a37bSGleb Smirnoff  * specified, write the total number of bytes sent into *sbytes.
130533a2a37bSGleb Smirnoff  */
130633a2a37bSGleb Smirnoff int
130733a2a37bSGleb Smirnoff sys_sendfile(struct thread *td, struct sendfile_args *uap)
130833a2a37bSGleb Smirnoff {
130933a2a37bSGleb Smirnoff 
131033a2a37bSGleb Smirnoff 	return (sendfile(td, uap, 0));
131133a2a37bSGleb Smirnoff }
131233a2a37bSGleb Smirnoff 
131333a2a37bSGleb Smirnoff #ifdef COMPAT_FREEBSD4
131433a2a37bSGleb Smirnoff int
131533a2a37bSGleb Smirnoff freebsd4_sendfile(struct thread *td, struct freebsd4_sendfile_args *uap)
131633a2a37bSGleb Smirnoff {
131733a2a37bSGleb Smirnoff 	struct sendfile_args args;
131833a2a37bSGleb Smirnoff 
131933a2a37bSGleb Smirnoff 	args.fd = uap->fd;
132033a2a37bSGleb Smirnoff 	args.s = uap->s;
132133a2a37bSGleb Smirnoff 	args.offset = uap->offset;
132233a2a37bSGleb Smirnoff 	args.nbytes = uap->nbytes;
132333a2a37bSGleb Smirnoff 	args.hdtr = uap->hdtr;
132433a2a37bSGleb Smirnoff 	args.sbytes = uap->sbytes;
132533a2a37bSGleb Smirnoff 	args.flags = uap->flags;
132633a2a37bSGleb Smirnoff 
132733a2a37bSGleb Smirnoff 	return (sendfile(td, &args, 1));
132833a2a37bSGleb Smirnoff }
132933a2a37bSGleb Smirnoff #endif /* COMPAT_FREEBSD4 */
1330