xref: /freebsd/sys/kern/kern_sendfile.c (revision 0bd5d367989b3d2de0e8d8ceaa2e31d3f0d96536)
1 /*-
2  * Copyright (c) 2013-2015 Gleb Smirnoff <glebius@FreeBSD.org>
3  * Copyright (c) 1998, David Greenman. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_kern_tls.h"
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/capsicum.h>
38 #include <sys/kernel.h>
39 #include <netinet/in.h>
40 #include <sys/lock.h>
41 #include <sys/ktls.h>
42 #include <sys/mutex.h>
43 #include <sys/sysproto.h>
44 #include <sys/malloc.h>
45 #include <sys/proc.h>
46 #include <sys/mman.h>
47 #include <sys/mount.h>
48 #include <sys/mbuf.h>
49 #include <sys/protosw.h>
50 #include <sys/rwlock.h>
51 #include <sys/sf_buf.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
54 #include <sys/syscallsubr.h>
55 #include <sys/sysctl.h>
56 #include <sys/vnode.h>
57 
58 #include <net/vnet.h>
59 
60 #include <security/audit/audit.h>
61 #include <security/mac/mac_framework.h>
62 
63 #include <vm/vm.h>
64 #include <vm/vm_object.h>
65 #include <vm/vm_pager.h>
66 
67 #define	EXT_FLAG_SYNC		EXT_FLAG_VENDOR1
68 #define	EXT_FLAG_NOCACHE	EXT_FLAG_VENDOR2
69 #define	EXT_FLAG_CACHE_LAST	EXT_FLAG_VENDOR3
70 
71 /*
72  * Structure describing a single sendfile(2) I/O, which may consist of
73  * several underlying pager I/Os.
74  *
75  * The syscall context allocates the structure and initializes 'nios'
76  * to 1.  As sendfile_swapin() runs through pages and starts asynchronous
77  * paging operations, it increments 'nios'.
78  *
79  * Every I/O completion calls sendfile_iodone(), which decrements the 'nios',
80  * and the syscall also calls sendfile_iodone() after allocating all mbufs,
81  * linking them and sending to socket.  Whoever reaches zero 'nios' is
82  * responsible to * call pru_ready on the socket, to notify it of readyness
83  * of the data.
84  */
85 struct sf_io {
86 	volatile u_int	nios;
87 	u_int		error;
88 	int		npages;
89 	struct socket	*so;
90 	struct mbuf	*m;
91 #ifdef KERN_TLS
92 	struct ktls_session *tls;
93 #endif
94 	vm_page_t	pa[];
95 };
96 
97 /*
98  * Structure used to track requests with SF_SYNC flag.
99  */
100 struct sendfile_sync {
101 	struct mtx	mtx;
102 	struct cv	cv;
103 	unsigned	count;
104 };
105 
106 counter_u64_t sfstat[sizeof(struct sfstat) / sizeof(uint64_t)];
107 
108 static void
109 sfstat_init(const void *unused)
110 {
111 
112 	COUNTER_ARRAY_ALLOC(sfstat, sizeof(struct sfstat) / sizeof(uint64_t),
113 	    M_WAITOK);
114 }
115 SYSINIT(sfstat, SI_SUB_MBUF, SI_ORDER_FIRST, sfstat_init, NULL);
116 
117 static int
118 sfstat_sysctl(SYSCTL_HANDLER_ARGS)
119 {
120 	struct sfstat s;
121 
122 	COUNTER_ARRAY_COPY(sfstat, &s, sizeof(s) / sizeof(uint64_t));
123 	if (req->newptr)
124 		COUNTER_ARRAY_ZERO(sfstat, sizeof(s) / sizeof(uint64_t));
125 	return (SYSCTL_OUT(req, &s, sizeof(s)));
126 }
127 SYSCTL_PROC(_kern_ipc, OID_AUTO, sfstat, CTLTYPE_OPAQUE | CTLFLAG_RW,
128     NULL, 0, sfstat_sysctl, "I", "sendfile statistics");
129 
130 static void
131 sendfile_free_mext(struct mbuf *m)
132 {
133 	struct sf_buf *sf;
134 	vm_page_t pg;
135 	int flags;
136 
137 	KASSERT(m->m_flags & M_EXT && m->m_ext.ext_type == EXT_SFBUF,
138 	    ("%s: m %p !M_EXT or !EXT_SFBUF", __func__, m));
139 
140 	sf = m->m_ext.ext_arg1;
141 	pg = sf_buf_page(sf);
142 	flags = (m->m_ext.ext_flags & EXT_FLAG_NOCACHE) != 0 ? VPR_TRYFREE : 0;
143 
144 	sf_buf_free(sf);
145 	vm_page_release(pg, flags);
146 
147 	if (m->m_ext.ext_flags & EXT_FLAG_SYNC) {
148 		struct sendfile_sync *sfs = m->m_ext.ext_arg2;
149 
150 		mtx_lock(&sfs->mtx);
151 		KASSERT(sfs->count > 0, ("Sendfile sync botchup count == 0"));
152 		if (--sfs->count == 0)
153 			cv_signal(&sfs->cv);
154 		mtx_unlock(&sfs->mtx);
155 	}
156 }
157 
158 static void
159 sendfile_free_mext_pg(struct mbuf *m)
160 {
161 	struct mbuf_ext_pgs *ext_pgs;
162 	vm_page_t pg;
163 	int flags, i;
164 	bool cache_last;
165 
166 	KASSERT(m->m_flags & M_EXT && m->m_ext.ext_type == EXT_PGS,
167 	    ("%s: m %p !M_EXT or !EXT_PGS", __func__, m));
168 
169 	cache_last = m->m_ext.ext_flags & EXT_FLAG_CACHE_LAST;
170 	ext_pgs = m->m_ext.ext_pgs;
171 	flags = (m->m_ext.ext_flags & EXT_FLAG_NOCACHE) != 0 ? VPR_TRYFREE : 0;
172 
173 	for (i = 0; i < ext_pgs->npgs; i++) {
174 		if (cache_last && i == ext_pgs->npgs - 1)
175 			flags = 0;
176 		pg = PHYS_TO_VM_PAGE(ext_pgs->pa[i]);
177 		vm_page_release(pg, flags);
178 	}
179 
180 	if (m->m_ext.ext_flags & EXT_FLAG_SYNC) {
181 		struct sendfile_sync *sfs = m->m_ext.ext_arg2;
182 
183 		mtx_lock(&sfs->mtx);
184 		KASSERT(sfs->count > 0, ("Sendfile sync botchup count == 0"));
185 		if (--sfs->count == 0)
186 			cv_signal(&sfs->cv);
187 		mtx_unlock(&sfs->mtx);
188 	}
189 }
190 
191 /*
192  * Helper function to calculate how much data to put into page i of n.
193  * Only first and last pages are special.
194  */
195 static inline off_t
196 xfsize(int i, int n, off_t off, off_t len)
197 {
198 
199 	if (i == 0)
200 		return (omin(PAGE_SIZE - (off & PAGE_MASK), len));
201 
202 	if (i == n - 1 && ((off + len) & PAGE_MASK) > 0)
203 		return ((off + len) & PAGE_MASK);
204 
205 	return (PAGE_SIZE);
206 }
207 
208 /*
209  * Helper function to get offset within object for i page.
210  */
211 static inline vm_ooffset_t
212 vmoff(int i, off_t off)
213 {
214 
215 	if (i == 0)
216 		return ((vm_ooffset_t)off);
217 
218 	return (trunc_page(off + i * PAGE_SIZE));
219 }
220 
221 /*
222  * Helper function used when allocation of a page or sf_buf failed.
223  * Pretend as if we don't have enough space, subtract xfsize() of
224  * all pages that failed.
225  */
226 static inline void
227 fixspace(int old, int new, off_t off, int *space)
228 {
229 
230 	KASSERT(old > new, ("%s: old %d new %d", __func__, old, new));
231 
232 	/* Subtract last one. */
233 	*space -= xfsize(old - 1, old, off, *space);
234 	old--;
235 
236 	if (new == old)
237 		/* There was only one page. */
238 		return;
239 
240 	/* Subtract first one. */
241 	if (new == 0) {
242 		*space -= xfsize(0, old, off, *space);
243 		new++;
244 	}
245 
246 	/* Rest of pages are full sized. */
247 	*space -= (old - new) * PAGE_SIZE;
248 
249 	KASSERT(*space >= 0, ("%s: space went backwards", __func__));
250 }
251 
252 /*
253  * I/O completion callback.
254  */
255 static void
256 sendfile_iodone(void *arg, vm_page_t *pg, int count, int error)
257 {
258 	struct sf_io *sfio = arg;
259 	struct socket *so = sfio->so;
260 
261 	for (int i = 0; i < count; i++)
262 		if (pg[i] != bogus_page)
263 			vm_page_xunbusy(pg[i]);
264 
265 	if (error)
266 		sfio->error = error;
267 
268 	if (!refcount_release(&sfio->nios))
269 		return;
270 
271 #if defined(KERN_TLS) && defined(INVARIANTS)
272 	if ((sfio->m->m_flags & M_EXT) != 0 &&
273 	    sfio->m->m_ext.ext_type == EXT_PGS)
274 		KASSERT(sfio->tls == sfio->m->m_ext.ext_pgs->tls,
275 		    ("TLS session mismatch"));
276 	else
277 		KASSERT(sfio->tls == NULL,
278 		    ("non-ext_pgs mbuf with TLS session"));
279 #endif
280 	CURVNET_SET(so->so_vnet);
281 	if (sfio->error) {
282 		/*
283 		 * I/O operation failed.  The state of data in the socket
284 		 * is now inconsistent, and all what we can do is to tear
285 		 * it down. Protocol abort method would tear down protocol
286 		 * state, free all ready mbufs and detach not ready ones.
287 		 * We will free the mbufs corresponding to this I/O manually.
288 		 *
289 		 * The socket would be marked with EIO and made available
290 		 * for read, so that application receives EIO on next
291 		 * syscall and eventually closes the socket.
292 		 */
293 		so->so_proto->pr_usrreqs->pru_abort(so);
294 		so->so_error = EIO;
295 
296 		mb_free_notready(sfio->m, sfio->npages);
297 #ifdef KERN_TLS
298 	} else if (sfio->tls != NULL && sfio->tls->sw_encrypt != NULL) {
299 		/*
300 		 * I/O operation is complete, but we still need to
301 		 * encrypt.  We cannot do this in the interrupt thread
302 		 * of the disk controller, so forward the mbufs to a
303 		 * different thread.
304 		 *
305 		 * Donate the socket reference from sfio to rather
306 		 * than explicitly invoking soref().
307 		 */
308 		ktls_enqueue(sfio->m, so, sfio->npages);
309 		goto out_with_ref;
310 #endif
311 	} else
312 		(void)(so->so_proto->pr_usrreqs->pru_ready)(so, sfio->m,
313 		    sfio->npages);
314 
315 	SOCK_LOCK(so);
316 	sorele(so);
317 #ifdef KERN_TLS
318 out_with_ref:
319 #endif
320 	CURVNET_RESTORE();
321 	free(sfio, M_TEMP);
322 }
323 
324 /*
325  * Iterate through pages vector and request paging for non-valid pages.
326  */
327 static int
328 sendfile_swapin(vm_object_t obj, struct sf_io *sfio, int *nios, off_t off,
329     off_t len, int npages, int rhpages, int flags)
330 {
331 	vm_page_t *pa = sfio->pa;
332 	int grabbed;
333 
334 	*nios = 0;
335 	flags = (flags & SF_NODISKIO) ? VM_ALLOC_NOWAIT : 0;
336 
337 	/*
338 	 * First grab all the pages and wire them.  Note that we grab
339 	 * only required pages.  Readahead pages are dealt with later.
340 	 */
341 	VM_OBJECT_WLOCK(obj);
342 
343 	grabbed = vm_page_grab_pages(obj, OFF_TO_IDX(off),
344 	    VM_ALLOC_NORMAL | VM_ALLOC_WIRED | flags, pa, npages);
345 	if (grabbed < npages) {
346 		for (int i = grabbed; i < npages; i++)
347 			pa[i] = NULL;
348 		npages = grabbed;
349 		rhpages = 0;
350 	}
351 
352 	for (int i = 0; i < npages;) {
353 		int j, a, count, rv;
354 
355 		/* Skip valid pages. */
356 		if (vm_page_is_valid(pa[i], vmoff(i, off) & PAGE_MASK,
357 		    xfsize(i, npages, off, len))) {
358 			vm_page_xunbusy(pa[i]);
359 			SFSTAT_INC(sf_pages_valid);
360 			i++;
361 			continue;
362 		}
363 
364 		/*
365 		 * Next page is invalid.  Check if it belongs to pager.  It
366 		 * may not be there, which is a regular situation for shmem
367 		 * pager.  For vnode pager this happens only in case of
368 		 * a sparse file.
369 		 *
370 		 * Important feature of vm_pager_has_page() is the hint
371 		 * stored in 'a', about how many pages we can pagein after
372 		 * this page in a single I/O.
373 		 */
374 		if (!vm_pager_has_page(obj, OFF_TO_IDX(vmoff(i, off)), NULL,
375 		    &a)) {
376 			pmap_zero_page(pa[i]);
377 			pa[i]->valid = VM_PAGE_BITS_ALL;
378 			MPASS(pa[i]->dirty == 0);
379 			vm_page_xunbusy(pa[i]);
380 			i++;
381 			continue;
382 		}
383 
384 		/*
385 		 * We want to pagein as many pages as possible, limited only
386 		 * by the 'a' hint and actual request.
387 		 */
388 		count = min(a + 1, npages - i);
389 
390 		/*
391 		 * We should not pagein into a valid page, thus we first trim
392 		 * any valid pages off the end of request, and substitute
393 		 * to bogus_page those, that are in the middle.
394 		 */
395 		for (j = i + count - 1; j > i; j--) {
396 			if (vm_page_is_valid(pa[j], vmoff(j, off) & PAGE_MASK,
397 			    xfsize(j, npages, off, len))) {
398 				count--;
399 				rhpages = 0;
400 			} else
401 				break;
402 		}
403 		for (j = i + 1; j < i + count - 1; j++)
404 			if (vm_page_is_valid(pa[j], vmoff(j, off) & PAGE_MASK,
405 			    xfsize(j, npages, off, len))) {
406 				vm_page_xunbusy(pa[j]);
407 				SFSTAT_INC(sf_pages_valid);
408 				SFSTAT_INC(sf_pages_bogus);
409 				pa[j] = bogus_page;
410 			}
411 
412 		refcount_acquire(&sfio->nios);
413 		rv = vm_pager_get_pages_async(obj, pa + i, count, NULL,
414 		    i + count == npages ? &rhpages : NULL,
415 		    &sendfile_iodone, sfio);
416 		if (rv != VM_PAGER_OK) {
417 			for (j = i; j < i + count; j++) {
418 				if (pa[j] != bogus_page) {
419 					vm_page_lock(pa[j]);
420 					vm_page_unwire(pa[j], PQ_INACTIVE);
421 					vm_page_unlock(pa[j]);
422 				}
423 			}
424 			VM_OBJECT_WUNLOCK(obj);
425 			return (EIO);
426 		}
427 
428 		SFSTAT_INC(sf_iocnt);
429 		SFSTAT_ADD(sf_pages_read, count);
430 		if (i + count == npages)
431 			SFSTAT_ADD(sf_rhpages_read, rhpages);
432 
433 		/*
434 		 * Restore the valid page pointers.  They are already
435 		 * unbusied, but still wired.
436 		 */
437 		for (j = i; j < i + count; j++)
438 			if (pa[j] == bogus_page) {
439 				pa[j] = vm_page_lookup(obj,
440 				    OFF_TO_IDX(vmoff(j, off)));
441 				KASSERT(pa[j], ("%s: page %p[%d] disappeared",
442 				    __func__, pa, j));
443 
444 			}
445 		i += count;
446 		(*nios)++;
447 	}
448 
449 	VM_OBJECT_WUNLOCK(obj);
450 
451 	if (*nios == 0 && npages != 0)
452 		SFSTAT_INC(sf_noiocnt);
453 
454 	return (0);
455 }
456 
457 static int
458 sendfile_getobj(struct thread *td, struct file *fp, vm_object_t *obj_res,
459     struct vnode **vp_res, struct shmfd **shmfd_res, off_t *obj_size,
460     int *bsize)
461 {
462 	struct vattr va;
463 	vm_object_t obj;
464 	struct vnode *vp;
465 	struct shmfd *shmfd;
466 	int error;
467 
468 	vp = *vp_res = NULL;
469 	obj = NULL;
470 	shmfd = *shmfd_res = NULL;
471 	*bsize = 0;
472 
473 	/*
474 	 * The file descriptor must be a regular file and have a
475 	 * backing VM object.
476 	 */
477 	if (fp->f_type == DTYPE_VNODE) {
478 		vp = fp->f_vnode;
479 		vn_lock(vp, LK_SHARED | LK_RETRY);
480 		if (vp->v_type != VREG) {
481 			error = EINVAL;
482 			goto out;
483 		}
484 		*bsize = vp->v_mount->mnt_stat.f_iosize;
485 		error = VOP_GETATTR(vp, &va, td->td_ucred);
486 		if (error != 0)
487 			goto out;
488 		*obj_size = va.va_size;
489 		obj = vp->v_object;
490 		if (obj == NULL) {
491 			error = EINVAL;
492 			goto out;
493 		}
494 	} else if (fp->f_type == DTYPE_SHM) {
495 		error = 0;
496 		shmfd = fp->f_data;
497 		obj = shmfd->shm_object;
498 		*obj_size = shmfd->shm_size;
499 	} else {
500 		error = EINVAL;
501 		goto out;
502 	}
503 
504 	VM_OBJECT_WLOCK(obj);
505 	if ((obj->flags & OBJ_DEAD) != 0) {
506 		VM_OBJECT_WUNLOCK(obj);
507 		error = EBADF;
508 		goto out;
509 	}
510 
511 	/*
512 	 * Temporarily increase the backing VM object's reference
513 	 * count so that a forced reclamation of its vnode does not
514 	 * immediately destroy it.
515 	 */
516 	vm_object_reference_locked(obj);
517 	VM_OBJECT_WUNLOCK(obj);
518 	*obj_res = obj;
519 	*vp_res = vp;
520 	*shmfd_res = shmfd;
521 
522 out:
523 	if (vp != NULL)
524 		VOP_UNLOCK(vp, 0);
525 	return (error);
526 }
527 
528 static int
529 sendfile_getsock(struct thread *td, int s, struct file **sock_fp,
530     struct socket **so)
531 {
532 	int error;
533 
534 	*sock_fp = NULL;
535 	*so = NULL;
536 
537 	/*
538 	 * The socket must be a stream socket and connected.
539 	 */
540 	error = getsock_cap(td, s, &cap_send_rights,
541 	    sock_fp, NULL, NULL);
542 	if (error != 0)
543 		return (error);
544 	*so = (*sock_fp)->f_data;
545 	if ((*so)->so_type != SOCK_STREAM)
546 		return (EINVAL);
547 	if (SOLISTENING(*so))
548 		return (ENOTCONN);
549 	return (0);
550 }
551 
552 int
553 vn_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
554     struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags,
555     struct thread *td)
556 {
557 	struct file *sock_fp;
558 	struct vnode *vp;
559 	struct vm_object *obj;
560 	struct socket *so;
561 #ifdef KERN_TLS
562 	struct ktls_session *tls;
563 #endif
564 	struct mbuf_ext_pgs *ext_pgs;
565 	struct mbuf *m, *mh, *mhtail;
566 	struct sf_buf *sf;
567 	struct shmfd *shmfd;
568 	struct sendfile_sync *sfs;
569 	struct vattr va;
570 	off_t off, sbytes, rem, obj_size;
571 	int bsize, error, ext_pgs_idx, hdrlen, max_pgs, softerr;
572 #ifdef KERN_TLS
573 	int tls_enq_cnt;
574 #endif
575 	bool use_ext_pgs;
576 
577 	obj = NULL;
578 	so = NULL;
579 	m = mh = NULL;
580 	sfs = NULL;
581 #ifdef KERN_TLS
582 	tls = NULL;
583 #endif
584 	hdrlen = sbytes = 0;
585 	softerr = 0;
586 	use_ext_pgs = false;
587 
588 	error = sendfile_getobj(td, fp, &obj, &vp, &shmfd, &obj_size, &bsize);
589 	if (error != 0)
590 		return (error);
591 
592 	error = sendfile_getsock(td, sockfd, &sock_fp, &so);
593 	if (error != 0)
594 		goto out;
595 
596 #ifdef MAC
597 	error = mac_socket_check_send(td->td_ucred, so);
598 	if (error != 0)
599 		goto out;
600 #endif
601 
602 	SFSTAT_INC(sf_syscalls);
603 	SFSTAT_ADD(sf_rhpages_requested, SF_READAHEAD(flags));
604 
605 	if (flags & SF_SYNC) {
606 		sfs = malloc(sizeof *sfs, M_TEMP, M_WAITOK | M_ZERO);
607 		mtx_init(&sfs->mtx, "sendfile", NULL, MTX_DEF);
608 		cv_init(&sfs->cv, "sendfile");
609 	}
610 
611 	rem = nbytes ? omin(nbytes, obj_size - offset) : obj_size - offset;
612 
613 	/*
614 	 * Protect against multiple writers to the socket.
615 	 *
616 	 * XXXRW: Historically this has assumed non-interruptibility, so now
617 	 * we implement that, but possibly shouldn't.
618 	 */
619 	(void)sblock(&so->so_snd, SBL_WAIT | SBL_NOINTR);
620 #ifdef KERN_TLS
621 	tls = ktls_hold(so->so_snd.sb_tls_info);
622 #endif
623 
624 	/*
625 	 * Loop through the pages of the file, starting with the requested
626 	 * offset. Get a file page (do I/O if necessary), map the file page
627 	 * into an sf_buf, attach an mbuf header to the sf_buf, and queue
628 	 * it on the socket.
629 	 * This is done in two loops.  The inner loop turns as many pages
630 	 * as it can, up to available socket buffer space, without blocking
631 	 * into mbufs to have it bulk delivered into the socket send buffer.
632 	 * The outer loop checks the state and available space of the socket
633 	 * and takes care of the overall progress.
634 	 */
635 	for (off = offset; rem > 0; ) {
636 		struct sf_io *sfio;
637 		vm_page_t *pa;
638 		struct mbuf *mtail;
639 		int nios, space, npages, rhpages;
640 
641 		mtail = NULL;
642 		/*
643 		 * Check the socket state for ongoing connection,
644 		 * no errors and space in socket buffer.
645 		 * If space is low allow for the remainder of the
646 		 * file to be processed if it fits the socket buffer.
647 		 * Otherwise block in waiting for sufficient space
648 		 * to proceed, or if the socket is nonblocking, return
649 		 * to userland with EAGAIN while reporting how far
650 		 * we've come.
651 		 * We wait until the socket buffer has significant free
652 		 * space to do bulk sends.  This makes good use of file
653 		 * system read ahead and allows packet segmentation
654 		 * offloading hardware to take over lots of work.  If
655 		 * we were not careful here we would send off only one
656 		 * sfbuf at a time.
657 		 */
658 		SOCKBUF_LOCK(&so->so_snd);
659 		if (so->so_snd.sb_lowat < so->so_snd.sb_hiwat / 2)
660 			so->so_snd.sb_lowat = so->so_snd.sb_hiwat / 2;
661 retry_space:
662 		if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
663 			error = EPIPE;
664 			SOCKBUF_UNLOCK(&so->so_snd);
665 			goto done;
666 		} else if (so->so_error) {
667 			error = so->so_error;
668 			so->so_error = 0;
669 			SOCKBUF_UNLOCK(&so->so_snd);
670 			goto done;
671 		}
672 		if ((so->so_state & SS_ISCONNECTED) == 0) {
673 			SOCKBUF_UNLOCK(&so->so_snd);
674 			error = ENOTCONN;
675 			goto done;
676 		}
677 
678 		space = sbspace(&so->so_snd);
679 		if (space < rem &&
680 		    (space <= 0 ||
681 		     space < so->so_snd.sb_lowat)) {
682 			if (so->so_state & SS_NBIO) {
683 				SOCKBUF_UNLOCK(&so->so_snd);
684 				error = EAGAIN;
685 				goto done;
686 			}
687 			/*
688 			 * sbwait drops the lock while sleeping.
689 			 * When we loop back to retry_space the
690 			 * state may have changed and we retest
691 			 * for it.
692 			 */
693 			error = sbwait(&so->so_snd);
694 			/*
695 			 * An error from sbwait usually indicates that we've
696 			 * been interrupted by a signal. If we've sent anything
697 			 * then return bytes sent, otherwise return the error.
698 			 */
699 			if (error != 0) {
700 				SOCKBUF_UNLOCK(&so->so_snd);
701 				goto done;
702 			}
703 			goto retry_space;
704 		}
705 		SOCKBUF_UNLOCK(&so->so_snd);
706 
707 		/*
708 		 * At the beginning of the first loop check if any headers
709 		 * are specified and copy them into mbufs.  Reduce space in
710 		 * the socket buffer by the size of the header mbuf chain.
711 		 * Clear hdr_uio here and hdrlen at the end of the first loop.
712 		 */
713 		if (hdr_uio != NULL && hdr_uio->uio_resid > 0) {
714 			hdr_uio->uio_td = td;
715 			hdr_uio->uio_rw = UIO_WRITE;
716 #ifdef KERN_TLS
717 			if (tls != NULL)
718 				mh = m_uiotombuf(hdr_uio, M_WAITOK, space,
719 				    tls->params.max_frame_len, M_NOMAP);
720 			else
721 #endif
722 				mh = m_uiotombuf(hdr_uio, M_WAITOK,
723 				    space, 0, 0);
724 			hdrlen = m_length(mh, &mhtail);
725 			space -= hdrlen;
726 			/*
727 			 * If header consumed all the socket buffer space,
728 			 * don't waste CPU cycles and jump to the end.
729 			 */
730 			if (space == 0) {
731 				sfio = NULL;
732 				nios = 0;
733 				goto prepend_header;
734 			}
735 			hdr_uio = NULL;
736 		}
737 
738 		if (vp != NULL) {
739 			error = vn_lock(vp, LK_SHARED);
740 			if (error != 0)
741 				goto done;
742 			error = VOP_GETATTR(vp, &va, td->td_ucred);
743 			if (error != 0 || off >= va.va_size) {
744 				VOP_UNLOCK(vp, 0);
745 				goto done;
746 			}
747 			if (va.va_size != obj_size) {
748 				obj_size = va.va_size;
749 				rem = nbytes ?
750 				    omin(nbytes + offset, obj_size) : obj_size;
751 				rem -= off;
752 			}
753 		}
754 
755 		if (space > rem)
756 			space = rem;
757 		else if (space > PAGE_SIZE) {
758 			/*
759 			 * Use page boundaries when possible for large
760 			 * requests.
761 			 */
762 			if (off & PAGE_MASK)
763 				space -= (PAGE_SIZE - (off & PAGE_MASK));
764 			space = trunc_page(space);
765 			if (off & PAGE_MASK)
766 				space += (PAGE_SIZE - (off & PAGE_MASK));
767 		}
768 
769 		npages = howmany(space + (off & PAGE_MASK), PAGE_SIZE);
770 
771 		/*
772 		 * Calculate maximum allowed number of pages for readahead
773 		 * at this iteration.  If SF_USER_READAHEAD was set, we don't
774 		 * do any heuristics and use exactly the value supplied by
775 		 * application.  Otherwise, we allow readahead up to "rem".
776 		 * If application wants more, let it be, but there is no
777 		 * reason to go above MAXPHYS.  Also check against "obj_size",
778 		 * since vm_pager_has_page() can hint beyond EOF.
779 		 */
780 		if (flags & SF_USER_READAHEAD) {
781 			rhpages = SF_READAHEAD(flags);
782 		} else {
783 			rhpages = howmany(rem + (off & PAGE_MASK), PAGE_SIZE) -
784 			    npages;
785 			rhpages += SF_READAHEAD(flags);
786 		}
787 		rhpages = min(howmany(MAXPHYS, PAGE_SIZE), rhpages);
788 		rhpages = min(howmany(obj_size - trunc_page(off), PAGE_SIZE) -
789 		    npages, rhpages);
790 
791 		sfio = malloc(sizeof(struct sf_io) +
792 		    npages * sizeof(vm_page_t), M_TEMP, M_WAITOK);
793 		refcount_init(&sfio->nios, 1);
794 		sfio->so = so;
795 		sfio->error = 0;
796 
797 #ifdef KERN_TLS
798 		/*
799 		 * This doesn't use ktls_hold() because sfio->m will
800 		 * also have a reference on 'tls' that will be valid
801 		 * for all of sfio's lifetime.
802 		 */
803 		sfio->tls = tls;
804 #endif
805 
806 		error = sendfile_swapin(obj, sfio, &nios, off, space, npages,
807 		    rhpages, flags);
808 		if (error != 0) {
809 			if (vp != NULL)
810 				VOP_UNLOCK(vp, 0);
811 			free(sfio, M_TEMP);
812 			goto done;
813 		}
814 
815 		/*
816 		 * Loop and construct maximum sized mbuf chain to be bulk
817 		 * dumped into socket buffer.
818 		 */
819 		pa = sfio->pa;
820 
821 		/*
822 		 * Use unmapped mbufs if enabled for TCP.  Unmapped
823 		 * bufs are restricted to TCP as that is what has been
824 		 * tested.  In particular, unmapped mbufs have not
825 		 * been tested with UNIX-domain sockets.
826 		 *
827 		 * TLS frames always require unmapped mbufs.
828 		 */
829 		if ((mb_use_ext_pgs &&
830 		    so->so_proto->pr_protocol == IPPROTO_TCP)
831 #ifdef KERN_TLS
832 		    || tls != NULL
833 #endif
834 		    ) {
835 			use_ext_pgs = true;
836 #ifdef KERN_TLS
837 			if (tls != NULL)
838 				max_pgs = num_pages(tls->params.max_frame_len);
839 			else
840 #endif
841 				max_pgs = MBUF_PEXT_MAX_PGS;
842 
843 			/* Start at last index, to wrap on first use. */
844 			ext_pgs_idx = max_pgs - 1;
845 		}
846 
847 		for (int i = 0; i < npages; i++) {
848 			struct mbuf *m0;
849 
850 			/*
851 			 * If a page wasn't grabbed successfully, then
852 			 * trim the array. Can happen only with SF_NODISKIO.
853 			 */
854 			if (pa[i] == NULL) {
855 				SFSTAT_INC(sf_busy);
856 				fixspace(npages, i, off, &space);
857 				npages = i;
858 				softerr = EBUSY;
859 				break;
860 			}
861 
862 			if (use_ext_pgs) {
863 				off_t xfs;
864 
865 				ext_pgs_idx++;
866 				if (ext_pgs_idx == max_pgs) {
867 					m0 = mb_alloc_ext_pgs(M_WAITOK, false,
868 					    sendfile_free_mext_pg);
869 
870 					if (flags & SF_NOCACHE) {
871 						m0->m_ext.ext_flags |=
872 						    EXT_FLAG_NOCACHE;
873 
874 						/*
875 						 * See comment below regarding
876 						 * ignoring SF_NOCACHE for the
877 						 * last page.
878 						 */
879 						if ((npages - i <= max_pgs) &&
880 						    ((off + space) & PAGE_MASK) &&
881 						    (rem > space || rhpages > 0))
882 							m0->m_ext.ext_flags |=
883 							    EXT_FLAG_CACHE_LAST;
884 					}
885 					if (sfs != NULL) {
886 						m0->m_ext.ext_flags |=
887 						    EXT_FLAG_SYNC;
888 						m0->m_ext.ext_arg2 = sfs;
889 						mtx_lock(&sfs->mtx);
890 						sfs->count++;
891 						mtx_unlock(&sfs->mtx);
892 					}
893 					ext_pgs = m0->m_ext.ext_pgs;
894 					if (i == 0)
895 						sfio->m = m0;
896 					ext_pgs_idx = 0;
897 
898 					/* Append to mbuf chain. */
899 					if (mtail != NULL)
900 						mtail->m_next = m0;
901 					else
902 						m = m0;
903 					mtail = m0;
904 					ext_pgs->first_pg_off =
905 					    vmoff(i, off) & PAGE_MASK;
906 				}
907 				if (nios) {
908 					mtail->m_flags |= M_NOTREADY;
909 					ext_pgs->nrdy++;
910 				}
911 
912 				ext_pgs->pa[ext_pgs_idx] = VM_PAGE_TO_PHYS(pa[i]);
913 				ext_pgs->npgs++;
914 				xfs = xfsize(i, npages, off, space);
915 				ext_pgs->last_pg_len = xfs;
916 				MBUF_EXT_PGS_ASSERT_SANITY(ext_pgs);
917 				mtail->m_len += xfs;
918 				mtail->m_ext.ext_size += PAGE_SIZE;
919 				continue;
920 			}
921 
922 			/*
923 			 * Get a sendfile buf.  When allocating the
924 			 * first buffer for mbuf chain, we usually
925 			 * wait as long as necessary, but this wait
926 			 * can be interrupted.  For consequent
927 			 * buffers, do not sleep, since several
928 			 * threads might exhaust the buffers and then
929 			 * deadlock.
930 			 */
931 			sf = sf_buf_alloc(pa[i],
932 			    m != NULL ? SFB_NOWAIT : SFB_CATCH);
933 			if (sf == NULL) {
934 				SFSTAT_INC(sf_allocfail);
935 				for (int j = i; j < npages; j++) {
936 					vm_page_lock(pa[j]);
937 					vm_page_unwire(pa[j], PQ_INACTIVE);
938 					vm_page_unlock(pa[j]);
939 				}
940 				if (m == NULL)
941 					softerr = ENOBUFS;
942 				fixspace(npages, i, off, &space);
943 				npages = i;
944 				break;
945 			}
946 
947 			m0 = m_get(M_WAITOK, MT_DATA);
948 			m0->m_ext.ext_buf = (char *)sf_buf_kva(sf);
949 			m0->m_ext.ext_size = PAGE_SIZE;
950 			m0->m_ext.ext_arg1 = sf;
951 			m0->m_ext.ext_type = EXT_SFBUF;
952 			m0->m_ext.ext_flags = EXT_FLAG_EMBREF;
953 			m0->m_ext.ext_free = sendfile_free_mext;
954 			/*
955 			 * SF_NOCACHE sets the page as being freed upon send.
956 			 * However, we ignore it for the last page in 'space',
957 			 * if the page is truncated, and we got more data to
958 			 * send (rem > space), or if we have readahead
959 			 * configured (rhpages > 0).
960 			 */
961 			if ((flags & SF_NOCACHE) &&
962 			    (i != npages - 1 ||
963 			    !((off + space) & PAGE_MASK) ||
964 			    !(rem > space || rhpages > 0)))
965 				m0->m_ext.ext_flags |= EXT_FLAG_NOCACHE;
966 			if (sfs != NULL) {
967 				m0->m_ext.ext_flags |= EXT_FLAG_SYNC;
968 				m0->m_ext.ext_arg2 = sfs;
969 				mtx_lock(&sfs->mtx);
970 				sfs->count++;
971 				mtx_unlock(&sfs->mtx);
972 			}
973 			m0->m_ext.ext_count = 1;
974 			m0->m_flags |= (M_EXT | M_RDONLY);
975 			if (nios)
976 				m0->m_flags |= M_NOTREADY;
977 			m0->m_data = (char *)sf_buf_kva(sf) +
978 			    (vmoff(i, off) & PAGE_MASK);
979 			m0->m_len = xfsize(i, npages, off, space);
980 
981 			if (i == 0)
982 				sfio->m = m0;
983 
984 			/* Append to mbuf chain. */
985 			if (mtail != NULL)
986 				mtail->m_next = m0;
987 			else
988 				m = m0;
989 			mtail = m0;
990 		}
991 
992 		if (vp != NULL)
993 			VOP_UNLOCK(vp, 0);
994 
995 		/* Keep track of bytes processed. */
996 		off += space;
997 		rem -= space;
998 
999 		/* Prepend header, if any. */
1000 		if (hdrlen) {
1001 prepend_header:
1002 			mhtail->m_next = m;
1003 			m = mh;
1004 			mh = NULL;
1005 		}
1006 
1007 		if (m == NULL) {
1008 			KASSERT(softerr, ("%s: m NULL, no error", __func__));
1009 			error = softerr;
1010 			free(sfio, M_TEMP);
1011 			goto done;
1012 		}
1013 
1014 		/* Add the buffer chain to the socket buffer. */
1015 		KASSERT(m_length(m, NULL) == space + hdrlen,
1016 		    ("%s: mlen %u space %d hdrlen %d",
1017 		    __func__, m_length(m, NULL), space, hdrlen));
1018 
1019 		CURVNET_SET(so->so_vnet);
1020 #ifdef KERN_TLS
1021 		if (tls != NULL) {
1022 			error = ktls_frame(m, tls, &tls_enq_cnt,
1023 			    TLS_RLTYPE_APP);
1024 			if (error != 0)
1025 				goto done;
1026 		}
1027 #endif
1028 		if (nios == 0) {
1029 			/*
1030 			 * If sendfile_swapin() didn't initiate any I/Os,
1031 			 * which happens if all data is cached in VM, then
1032 			 * we can send data right now without the
1033 			 * PRUS_NOTREADY flag.
1034 			 */
1035 			free(sfio, M_TEMP);
1036 #ifdef KERN_TLS
1037 			if (tls != NULL && tls->sw_encrypt != NULL) {
1038 				error = (*so->so_proto->pr_usrreqs->pru_send)
1039 				    (so, PRUS_NOTREADY, m, NULL, NULL, td);
1040 				soref(so);
1041 				ktls_enqueue(m, so, tls_enq_cnt);
1042 			} else
1043 #endif
1044 				error = (*so->so_proto->pr_usrreqs->pru_send)
1045 				    (so, 0, m, NULL, NULL, td);
1046 		} else {
1047 			sfio->npages = npages;
1048 			soref(so);
1049 			error = (*so->so_proto->pr_usrreqs->pru_send)
1050 			    (so, PRUS_NOTREADY, m, NULL, NULL, td);
1051 			sendfile_iodone(sfio, NULL, 0, 0);
1052 		}
1053 		CURVNET_RESTORE();
1054 
1055 		m = NULL;	/* pru_send always consumes */
1056 		if (error)
1057 			goto done;
1058 		sbytes += space + hdrlen;
1059 		if (hdrlen)
1060 			hdrlen = 0;
1061 		if (softerr) {
1062 			error = softerr;
1063 			goto done;
1064 		}
1065 	}
1066 
1067 	/*
1068 	 * Send trailers. Wimp out and use writev(2).
1069 	 */
1070 	if (trl_uio != NULL) {
1071 		sbunlock(&so->so_snd);
1072 		error = kern_writev(td, sockfd, trl_uio);
1073 		if (error == 0)
1074 			sbytes += td->td_retval[0];
1075 		goto out;
1076 	}
1077 
1078 done:
1079 	sbunlock(&so->so_snd);
1080 out:
1081 	/*
1082 	 * If there was no error we have to clear td->td_retval[0]
1083 	 * because it may have been set by writev.
1084 	 */
1085 	if (error == 0) {
1086 		td->td_retval[0] = 0;
1087 	}
1088 	if (sent != NULL) {
1089 		(*sent) = sbytes;
1090 	}
1091 	if (obj != NULL)
1092 		vm_object_deallocate(obj);
1093 	if (so)
1094 		fdrop(sock_fp, td);
1095 	if (m)
1096 		m_freem(m);
1097 	if (mh)
1098 		m_freem(mh);
1099 
1100 	if (sfs != NULL) {
1101 		mtx_lock(&sfs->mtx);
1102 		if (sfs->count != 0)
1103 			cv_wait(&sfs->cv, &sfs->mtx);
1104 		KASSERT(sfs->count == 0, ("sendfile sync still busy"));
1105 		cv_destroy(&sfs->cv);
1106 		mtx_destroy(&sfs->mtx);
1107 		free(sfs, M_TEMP);
1108 	}
1109 #ifdef KERN_TLS
1110 	if (tls != NULL)
1111 		ktls_free(tls);
1112 #endif
1113 
1114 	if (error == ERESTART)
1115 		error = EINTR;
1116 
1117 	return (error);
1118 }
1119 
1120 static int
1121 sendfile(struct thread *td, struct sendfile_args *uap, int compat)
1122 {
1123 	struct sf_hdtr hdtr;
1124 	struct uio *hdr_uio, *trl_uio;
1125 	struct file *fp;
1126 	off_t sbytes;
1127 	int error;
1128 
1129 	/*
1130 	 * File offset must be positive.  If it goes beyond EOF
1131 	 * we send only the header/trailer and no payload data.
1132 	 */
1133 	if (uap->offset < 0)
1134 		return (EINVAL);
1135 
1136 	sbytes = 0;
1137 	hdr_uio = trl_uio = NULL;
1138 
1139 	if (uap->hdtr != NULL) {
1140 		error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
1141 		if (error != 0)
1142 			goto out;
1143 		if (hdtr.headers != NULL) {
1144 			error = copyinuio(hdtr.headers, hdtr.hdr_cnt,
1145 			    &hdr_uio);
1146 			if (error != 0)
1147 				goto out;
1148 #ifdef COMPAT_FREEBSD4
1149 			/*
1150 			 * In FreeBSD < 5.0 the nbytes to send also included
1151 			 * the header.  If compat is specified subtract the
1152 			 * header size from nbytes.
1153 			 */
1154 			if (compat) {
1155 				if (uap->nbytes > hdr_uio->uio_resid)
1156 					uap->nbytes -= hdr_uio->uio_resid;
1157 				else
1158 					uap->nbytes = 0;
1159 			}
1160 #endif
1161 		}
1162 		if (hdtr.trailers != NULL) {
1163 			error = copyinuio(hdtr.trailers, hdtr.trl_cnt,
1164 			    &trl_uio);
1165 			if (error != 0)
1166 				goto out;
1167 		}
1168 	}
1169 
1170 	AUDIT_ARG_FD(uap->fd);
1171 
1172 	/*
1173 	 * sendfile(2) can start at any offset within a file so we require
1174 	 * CAP_READ+CAP_SEEK = CAP_PREAD.
1175 	 */
1176 	if ((error = fget_read(td, uap->fd, &cap_pread_rights, &fp)) != 0)
1177 		goto out;
1178 
1179 	error = fo_sendfile(fp, uap->s, hdr_uio, trl_uio, uap->offset,
1180 	    uap->nbytes, &sbytes, uap->flags, td);
1181 	fdrop(fp, td);
1182 
1183 	if (uap->sbytes != NULL)
1184 		copyout(&sbytes, uap->sbytes, sizeof(off_t));
1185 
1186 out:
1187 	free(hdr_uio, M_IOV);
1188 	free(trl_uio, M_IOV);
1189 	return (error);
1190 }
1191 
1192 /*
1193  * sendfile(2)
1194  *
1195  * int sendfile(int fd, int s, off_t offset, size_t nbytes,
1196  *       struct sf_hdtr *hdtr, off_t *sbytes, int flags)
1197  *
1198  * Send a file specified by 'fd' and starting at 'offset' to a socket
1199  * specified by 's'. Send only 'nbytes' of the file or until EOF if nbytes ==
1200  * 0.  Optionally add a header and/or trailer to the socket output.  If
1201  * specified, write the total number of bytes sent into *sbytes.
1202  */
1203 int
1204 sys_sendfile(struct thread *td, struct sendfile_args *uap)
1205 {
1206 
1207 	return (sendfile(td, uap, 0));
1208 }
1209 
1210 #ifdef COMPAT_FREEBSD4
1211 int
1212 freebsd4_sendfile(struct thread *td, struct freebsd4_sendfile_args *uap)
1213 {
1214 	struct sendfile_args args;
1215 
1216 	args.fd = uap->fd;
1217 	args.s = uap->s;
1218 	args.offset = uap->offset;
1219 	args.nbytes = uap->nbytes;
1220 	args.hdtr = uap->hdtr;
1221 	args.sbytes = uap->sbytes;
1222 	args.flags = uap->flags;
1223 
1224 	return (sendfile(td, &args, 1));
1225 }
1226 #endif /* COMPAT_FREEBSD4 */
1227