xref: /freebsd/sys/kern/vfs_vnops.c (revision ba3c1f5972d7b90feb6e6da47905ff2757e0fe57)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * (c) UNIX System Laboratories, Inc.
7  * All or some portions of this file are derived from material licensed
8  * to the University of California by American Telephone and Telegraph
9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10  * the permission of UNIX System Laboratories, Inc.
11  *
12  * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org>
13  * Copyright (c) 2013, 2014 The FreeBSD Foundation
14  *
15  * Portions of this software were developed by Konstantin Belousov
16  * under sponsorship from the FreeBSD Foundation.
17  *
18  * Redistribution and use in source and binary forms, with or without
19  * modification, are permitted provided that the following conditions
20  * are met:
21  * 1. Redistributions of source code must retain the above copyright
22  *    notice, this list of conditions and the following disclaimer.
23  * 2. Redistributions in binary form must reproduce the above copyright
24  *    notice, this list of conditions and the following disclaimer in the
25  *    documentation and/or other materials provided with the distribution.
26  * 3. Neither the name of the University nor the names of its contributors
27  *    may be used to endorse or promote products derived from this software
28  *    without specific prior written permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  *	@(#)vfs_vnops.c	8.2 (Berkeley) 1/21/94
43  */
44 
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47 
48 #include "opt_hwpmc_hooks.h"
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/disk.h>
53 #include <sys/fail.h>
54 #include <sys/fcntl.h>
55 #include <sys/file.h>
56 #include <sys/kdb.h>
57 #include <sys/ktr.h>
58 #include <sys/stat.h>
59 #include <sys/priv.h>
60 #include <sys/proc.h>
61 #include <sys/limits.h>
62 #include <sys/lock.h>
63 #include <sys/mman.h>
64 #include <sys/mount.h>
65 #include <sys/mutex.h>
66 #include <sys/namei.h>
67 #include <sys/vnode.h>
68 #include <sys/dirent.h>
69 #include <sys/bio.h>
70 #include <sys/buf.h>
71 #include <sys/filio.h>
72 #include <sys/resourcevar.h>
73 #include <sys/rwlock.h>
74 #include <sys/prng.h>
75 #include <sys/sx.h>
76 #include <sys/sleepqueue.h>
77 #include <sys/sysctl.h>
78 #include <sys/ttycom.h>
79 #include <sys/conf.h>
80 #include <sys/syslog.h>
81 #include <sys/unistd.h>
82 #include <sys/user.h>
83 #include <sys/ktrace.h>
84 
85 #include <security/audit/audit.h>
86 #include <security/mac/mac_framework.h>
87 
88 #include <vm/vm.h>
89 #include <vm/vm_extern.h>
90 #include <vm/pmap.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_object.h>
93 #include <vm/vm_page.h>
94 #include <vm/vm_pager.h>
95 
96 #ifdef HWPMC_HOOKS
97 #include <sys/pmckern.h>
98 #endif
99 
100 static fo_rdwr_t	vn_read;
101 static fo_rdwr_t	vn_write;
102 static fo_rdwr_t	vn_io_fault;
103 static fo_truncate_t	vn_truncate;
104 static fo_ioctl_t	vn_ioctl;
105 static fo_poll_t	vn_poll;
106 static fo_kqfilter_t	vn_kqfilter;
107 static fo_close_t	vn_closefile;
108 static fo_mmap_t	vn_mmap;
109 static fo_fallocate_t	vn_fallocate;
110 static fo_fspacectl_t	vn_fspacectl;
111 
112 struct 	fileops vnops = {
113 	.fo_read = vn_io_fault,
114 	.fo_write = vn_io_fault,
115 	.fo_truncate = vn_truncate,
116 	.fo_ioctl = vn_ioctl,
117 	.fo_poll = vn_poll,
118 	.fo_kqfilter = vn_kqfilter,
119 	.fo_stat = vn_statfile,
120 	.fo_close = vn_closefile,
121 	.fo_chmod = vn_chmod,
122 	.fo_chown = vn_chown,
123 	.fo_sendfile = vn_sendfile,
124 	.fo_seek = vn_seek,
125 	.fo_fill_kinfo = vn_fill_kinfo,
126 	.fo_mmap = vn_mmap,
127 	.fo_fallocate = vn_fallocate,
128 	.fo_fspacectl = vn_fspacectl,
129 	.fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE
130 };
131 
132 const u_int io_hold_cnt = 16;
133 static int vn_io_fault_enable = 1;
134 SYSCTL_INT(_debug, OID_AUTO, vn_io_fault_enable, CTLFLAG_RWTUN,
135     &vn_io_fault_enable, 0, "Enable vn_io_fault lock avoidance");
136 static int vn_io_fault_prefault = 0;
137 SYSCTL_INT(_debug, OID_AUTO, vn_io_fault_prefault, CTLFLAG_RWTUN,
138     &vn_io_fault_prefault, 0, "Enable vn_io_fault prefaulting");
139 static int vn_io_pgcache_read_enable = 1;
140 SYSCTL_INT(_debug, OID_AUTO, vn_io_pgcache_read_enable, CTLFLAG_RWTUN,
141     &vn_io_pgcache_read_enable, 0,
142     "Enable copying from page cache for reads, avoiding fs");
143 static u_long vn_io_faults_cnt;
144 SYSCTL_ULONG(_debug, OID_AUTO, vn_io_faults, CTLFLAG_RD,
145     &vn_io_faults_cnt, 0, "Count of vn_io_fault lock avoidance triggers");
146 
147 static int vfs_allow_read_dir = 0;
148 SYSCTL_INT(_security_bsd, OID_AUTO, allow_read_dir, CTLFLAG_RW,
149     &vfs_allow_read_dir, 0,
150     "Enable read(2) of directory by root for filesystems that support it");
151 
152 /*
153  * Returns true if vn_io_fault mode of handling the i/o request should
154  * be used.
155  */
156 static bool
157 do_vn_io_fault(struct vnode *vp, struct uio *uio)
158 {
159 	struct mount *mp;
160 
161 	return (uio->uio_segflg == UIO_USERSPACE && vp->v_type == VREG &&
162 	    (mp = vp->v_mount) != NULL &&
163 	    (mp->mnt_kern_flag & MNTK_NO_IOPF) != 0 && vn_io_fault_enable);
164 }
165 
166 /*
167  * Structure used to pass arguments to vn_io_fault1(), to do either
168  * file- or vnode-based I/O calls.
169  */
170 struct vn_io_fault_args {
171 	enum {
172 		VN_IO_FAULT_FOP,
173 		VN_IO_FAULT_VOP
174 	} kind;
175 	struct ucred *cred;
176 	int flags;
177 	union {
178 		struct fop_args_tag {
179 			struct file *fp;
180 			fo_rdwr_t *doio;
181 		} fop_args;
182 		struct vop_args_tag {
183 			struct vnode *vp;
184 		} vop_args;
185 	} args;
186 };
187 
188 static int vn_io_fault1(struct vnode *vp, struct uio *uio,
189     struct vn_io_fault_args *args, struct thread *td);
190 
191 int
192 vn_open(struct nameidata *ndp, int *flagp, int cmode, struct file *fp)
193 {
194 	struct thread *td = curthread;
195 
196 	return (vn_open_cred(ndp, flagp, cmode, 0, td->td_ucred, fp));
197 }
198 
199 static uint64_t
200 open2nameif(int fmode, u_int vn_open_flags)
201 {
202 	uint64_t res;
203 
204 	res = ISOPEN | LOCKLEAF;
205 	if ((fmode & O_RESOLVE_BENEATH) != 0)
206 		res |= RBENEATH;
207 	if ((fmode & O_EMPTY_PATH) != 0)
208 		res |= EMPTYPATH;
209 	if ((fmode & FREAD) != 0)
210 		res |= OPENREAD;
211 	if ((fmode & FWRITE) != 0)
212 		res |= OPENWRITE;
213 	if ((vn_open_flags & VN_OPEN_NOAUDIT) == 0)
214 		res |= AUDITVNODE1;
215 	if ((vn_open_flags & VN_OPEN_NOCAPCHECK) != 0)
216 		res |= NOCAPCHECK;
217 	if ((vn_open_flags & VN_OPEN_WANTIOCTLCAPS) != 0)
218 		res |= WANTIOCTLCAPS;
219 	return (res);
220 }
221 
222 /*
223  * Common code for vnode open operations via a name lookup.
224  * Lookup the vnode and invoke VOP_CREATE if needed.
225  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
226  *
227  * Note that this does NOT free nameidata for the successful case,
228  * due to the NDINIT being done elsewhere.
229  */
230 int
231 vn_open_cred(struct nameidata *ndp, int *flagp, int cmode, u_int vn_open_flags,
232     struct ucred *cred, struct file *fp)
233 {
234 	struct vnode *vp;
235 	struct mount *mp;
236 	struct vattr vat;
237 	struct vattr *vap = &vat;
238 	int fmode, error;
239 	bool first_open;
240 
241 restart:
242 	first_open = false;
243 	fmode = *flagp;
244 	if ((fmode & (O_CREAT | O_EXCL | O_DIRECTORY)) == (O_CREAT |
245 	    O_EXCL | O_DIRECTORY) ||
246 	    (fmode & (O_CREAT | O_EMPTY_PATH)) == (O_CREAT | O_EMPTY_PATH))
247 		return (EINVAL);
248 	else if ((fmode & (O_CREAT | O_DIRECTORY)) == O_CREAT) {
249 		ndp->ni_cnd.cn_nameiop = CREATE;
250 		ndp->ni_cnd.cn_flags = open2nameif(fmode, vn_open_flags);
251 		/*
252 		 * Set NOCACHE to avoid flushing the cache when
253 		 * rolling in many files at once.
254 		 *
255 		 * Set NC_KEEPPOSENTRY to keep positive entries if they already
256 		 * exist despite NOCACHE.
257 		 */
258 		ndp->ni_cnd.cn_flags |= LOCKPARENT | NOCACHE | NC_KEEPPOSENTRY;
259 		if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
260 			ndp->ni_cnd.cn_flags |= FOLLOW;
261 		if ((vn_open_flags & VN_OPEN_INVFS) == 0)
262 			bwillwrite();
263 		if ((error = namei(ndp)) != 0)
264 			return (error);
265 		if (ndp->ni_vp == NULL) {
266 			VATTR_NULL(vap);
267 			vap->va_type = VREG;
268 			vap->va_mode = cmode;
269 			if (fmode & O_EXCL)
270 				vap->va_vaflags |= VA_EXCLUSIVE;
271 			if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) {
272 				NDFREE_PNBUF(ndp);
273 				vput(ndp->ni_dvp);
274 				if ((error = vn_start_write(NULL, &mp,
275 				    V_XSLEEP | V_PCATCH)) != 0)
276 					return (error);
277 				NDREINIT(ndp);
278 				goto restart;
279 			}
280 			if ((vn_open_flags & VN_OPEN_NAMECACHE) != 0)
281 				ndp->ni_cnd.cn_flags |= MAKEENTRY;
282 #ifdef MAC
283 			error = mac_vnode_check_create(cred, ndp->ni_dvp,
284 			    &ndp->ni_cnd, vap);
285 			if (error == 0)
286 #endif
287 				error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
288 				    &ndp->ni_cnd, vap);
289 			vp = ndp->ni_vp;
290 			if (error == 0 && (fmode & O_EXCL) != 0 &&
291 			    (fmode & (O_EXLOCK | O_SHLOCK)) != 0) {
292 				VI_LOCK(vp);
293 				vp->v_iflag |= VI_FOPENING;
294 				VI_UNLOCK(vp);
295 				first_open = true;
296 			}
297 			VOP_VPUT_PAIR(ndp->ni_dvp, error == 0 ? &vp : NULL,
298 			    false);
299 			vn_finished_write(mp);
300 			if (error) {
301 				NDFREE_PNBUF(ndp);
302 				if (error == ERELOOKUP) {
303 					NDREINIT(ndp);
304 					goto restart;
305 				}
306 				return (error);
307 			}
308 			fmode &= ~O_TRUNC;
309 		} else {
310 			if (ndp->ni_dvp == ndp->ni_vp)
311 				vrele(ndp->ni_dvp);
312 			else
313 				vput(ndp->ni_dvp);
314 			ndp->ni_dvp = NULL;
315 			vp = ndp->ni_vp;
316 			if (fmode & O_EXCL) {
317 				error = EEXIST;
318 				goto bad;
319 			}
320 			if (vp->v_type == VDIR) {
321 				error = EISDIR;
322 				goto bad;
323 			}
324 			fmode &= ~O_CREAT;
325 		}
326 	} else {
327 		ndp->ni_cnd.cn_nameiop = LOOKUP;
328 		ndp->ni_cnd.cn_flags = open2nameif(fmode, vn_open_flags);
329 		ndp->ni_cnd.cn_flags |= (fmode & O_NOFOLLOW) != 0 ? NOFOLLOW :
330 		    FOLLOW;
331 		if ((fmode & FWRITE) == 0)
332 			ndp->ni_cnd.cn_flags |= LOCKSHARED;
333 		if ((error = namei(ndp)) != 0)
334 			return (error);
335 		vp = ndp->ni_vp;
336 	}
337 	error = vn_open_vnode(vp, fmode, cred, curthread, fp);
338 	if (first_open) {
339 		VI_LOCK(vp);
340 		vp->v_iflag &= ~VI_FOPENING;
341 		wakeup(vp);
342 		VI_UNLOCK(vp);
343 	}
344 	if (error)
345 		goto bad;
346 	*flagp = fmode;
347 	return (0);
348 bad:
349 	NDFREE_PNBUF(ndp);
350 	vput(vp);
351 	*flagp = fmode;
352 	ndp->ni_vp = NULL;
353 	return (error);
354 }
355 
356 static int
357 vn_open_vnode_advlock(struct vnode *vp, int fmode, struct file *fp)
358 {
359 	struct flock lf;
360 	int error, lock_flags, type;
361 
362 	ASSERT_VOP_LOCKED(vp, "vn_open_vnode_advlock");
363 	if ((fmode & (O_EXLOCK | O_SHLOCK)) == 0)
364 		return (0);
365 	KASSERT(fp != NULL, ("open with flock requires fp"));
366 	if (fp->f_type != DTYPE_NONE && fp->f_type != DTYPE_VNODE)
367 		return (EOPNOTSUPP);
368 
369 	lock_flags = VOP_ISLOCKED(vp);
370 	VOP_UNLOCK(vp);
371 
372 	lf.l_whence = SEEK_SET;
373 	lf.l_start = 0;
374 	lf.l_len = 0;
375 	lf.l_type = (fmode & O_EXLOCK) != 0 ? F_WRLCK : F_RDLCK;
376 	type = F_FLOCK;
377 	if ((fmode & FNONBLOCK) == 0)
378 		type |= F_WAIT;
379 	if ((fmode & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
380 		type |= F_FIRSTOPEN;
381 	error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type);
382 	if (error == 0)
383 		fp->f_flag |= FHASLOCK;
384 
385 	vn_lock(vp, lock_flags | LK_RETRY);
386 	return (error);
387 }
388 
389 /*
390  * Common code for vnode open operations once a vnode is located.
391  * Check permissions, and call the VOP_OPEN routine.
392  */
393 int
394 vn_open_vnode(struct vnode *vp, int fmode, struct ucred *cred,
395     struct thread *td, struct file *fp)
396 {
397 	accmode_t accmode;
398 	int error;
399 
400 	if (vp->v_type == VLNK) {
401 		if ((fmode & O_PATH) == 0 || (fmode & FEXEC) != 0)
402 			return (EMLINK);
403 	}
404 	if (vp->v_type != VDIR && fmode & O_DIRECTORY)
405 		return (ENOTDIR);
406 
407 	accmode = 0;
408 	if ((fmode & O_PATH) == 0) {
409 		if (vp->v_type == VSOCK)
410 			return (EOPNOTSUPP);
411 		if ((fmode & (FWRITE | O_TRUNC)) != 0) {
412 			if (vp->v_type == VDIR)
413 				return (EISDIR);
414 			accmode |= VWRITE;
415 		}
416 		if ((fmode & FREAD) != 0)
417 			accmode |= VREAD;
418 		if ((fmode & O_APPEND) && (fmode & FWRITE))
419 			accmode |= VAPPEND;
420 #ifdef MAC
421 		if ((fmode & O_CREAT) != 0)
422 			accmode |= VCREAT;
423 #endif
424 	}
425 	if ((fmode & FEXEC) != 0)
426 		accmode |= VEXEC;
427 #ifdef MAC
428 	if ((fmode & O_VERIFY) != 0)
429 		accmode |= VVERIFY;
430 	error = mac_vnode_check_open(cred, vp, accmode);
431 	if (error != 0)
432 		return (error);
433 
434 	accmode &= ~(VCREAT | VVERIFY);
435 #endif
436 	if ((fmode & O_CREAT) == 0 && accmode != 0) {
437 		error = VOP_ACCESS(vp, accmode, cred, td);
438 		if (error != 0)
439 			return (error);
440 	}
441 	if ((fmode & O_PATH) != 0) {
442 		if (vp->v_type != VFIFO && vp->v_type != VSOCK &&
443 		    VOP_ACCESS(vp, VREAD, cred, td) == 0)
444 			fp->f_flag |= FKQALLOWED;
445 		return (0);
446 	}
447 
448 	if (vp->v_type == VFIFO && VOP_ISLOCKED(vp) != LK_EXCLUSIVE)
449 		vn_lock(vp, LK_UPGRADE | LK_RETRY);
450 	error = VOP_OPEN(vp, fmode, cred, td, fp);
451 	if (error != 0)
452 		return (error);
453 
454 	error = vn_open_vnode_advlock(vp, fmode, fp);
455 	if (error == 0 && (fmode & FWRITE) != 0) {
456 		error = VOP_ADD_WRITECOUNT(vp, 1);
457 		if (error == 0) {
458 			CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d",
459 			     __func__, vp, vp->v_writecount);
460 		}
461 	}
462 
463 	/*
464 	 * Error from advlock or VOP_ADD_WRITECOUNT() still requires
465 	 * calling VOP_CLOSE() to pair with earlier VOP_OPEN().
466 	 */
467 	if (error != 0) {
468 		if (fp != NULL) {
469 			/*
470 			 * Arrange the call by having fdrop() to use
471 			 * vn_closefile().  This is to satisfy
472 			 * filesystems like devfs or tmpfs, which
473 			 * override fo_close().
474 			 */
475 			fp->f_flag |= FOPENFAILED;
476 			fp->f_vnode = vp;
477 			if (fp->f_ops == &badfileops) {
478 				fp->f_type = DTYPE_VNODE;
479 				fp->f_ops = &vnops;
480 			}
481 			vref(vp);
482 		} else {
483 			/*
484 			 * If there is no fp, due to kernel-mode open,
485 			 * we can call VOP_CLOSE() now.
486 			 */
487 			if ((vp->v_type == VFIFO ||
488 			    !MNT_EXTENDED_SHARED(vp->v_mount)) &&
489 			    VOP_ISLOCKED(vp) != LK_EXCLUSIVE)
490 				vn_lock(vp, LK_UPGRADE | LK_RETRY);
491 			(void)VOP_CLOSE(vp, fmode & (FREAD | FWRITE | FEXEC),
492 			    cred, td);
493 		}
494 	}
495 
496 	ASSERT_VOP_LOCKED(vp, "vn_open_vnode");
497 	return (error);
498 
499 }
500 
501 /*
502  * Check for write permissions on the specified vnode.
503  * Prototype text segments cannot be written.
504  * It is racy.
505  */
506 int
507 vn_writechk(struct vnode *vp)
508 {
509 
510 	ASSERT_VOP_LOCKED(vp, "vn_writechk");
511 	/*
512 	 * If there's shared text associated with
513 	 * the vnode, try to free it up once.  If
514 	 * we fail, we can't allow writing.
515 	 */
516 	if (VOP_IS_TEXT(vp))
517 		return (ETXTBSY);
518 
519 	return (0);
520 }
521 
522 /*
523  * Vnode close call
524  */
525 static int
526 vn_close1(struct vnode *vp, int flags, struct ucred *file_cred,
527     struct thread *td, bool keep_ref)
528 {
529 	struct mount *mp;
530 	int error, lock_flags;
531 
532 	lock_flags = vp->v_type != VFIFO && MNT_EXTENDED_SHARED(vp->v_mount) ?
533 	    LK_SHARED : LK_EXCLUSIVE;
534 
535 	vn_start_write(vp, &mp, V_WAIT);
536 	vn_lock(vp, lock_flags | LK_RETRY);
537 	AUDIT_ARG_VNODE1(vp);
538 	if ((flags & (FWRITE | FOPENFAILED)) == FWRITE) {
539 		VOP_ADD_WRITECOUNT_CHECKED(vp, -1);
540 		CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
541 		    __func__, vp, vp->v_writecount);
542 	}
543 	error = VOP_CLOSE(vp, flags, file_cred, td);
544 	if (keep_ref)
545 		VOP_UNLOCK(vp);
546 	else
547 		vput(vp);
548 	vn_finished_write(mp);
549 	return (error);
550 }
551 
552 int
553 vn_close(struct vnode *vp, int flags, struct ucred *file_cred,
554     struct thread *td)
555 {
556 
557 	return (vn_close1(vp, flags, file_cred, td, false));
558 }
559 
560 /*
561  * Heuristic to detect sequential operation.
562  */
563 static int
564 sequential_heuristic(struct uio *uio, struct file *fp)
565 {
566 	enum uio_rw rw;
567 
568 	ASSERT_VOP_LOCKED(fp->f_vnode, __func__);
569 
570 	rw = uio->uio_rw;
571 	if (fp->f_flag & FRDAHEAD)
572 		return (fp->f_seqcount[rw] << IO_SEQSHIFT);
573 
574 	/*
575 	 * Offset 0 is handled specially.  open() sets f_seqcount to 1 so
576 	 * that the first I/O is normally considered to be slightly
577 	 * sequential.  Seeking to offset 0 doesn't change sequentiality
578 	 * unless previous seeks have reduced f_seqcount to 0, in which
579 	 * case offset 0 is not special.
580 	 */
581 	if ((uio->uio_offset == 0 && fp->f_seqcount[rw] > 0) ||
582 	    uio->uio_offset == fp->f_nextoff[rw]) {
583 		/*
584 		 * f_seqcount is in units of fixed-size blocks so that it
585 		 * depends mainly on the amount of sequential I/O and not
586 		 * much on the number of sequential I/O's.  The fixed size
587 		 * of 16384 is hard-coded here since it is (not quite) just
588 		 * a magic size that works well here.  This size is more
589 		 * closely related to the best I/O size for real disks than
590 		 * to any block size used by software.
591 		 */
592 		if (uio->uio_resid >= IO_SEQMAX * 16384)
593 			fp->f_seqcount[rw] = IO_SEQMAX;
594 		else {
595 			fp->f_seqcount[rw] += howmany(uio->uio_resid, 16384);
596 			if (fp->f_seqcount[rw] > IO_SEQMAX)
597 				fp->f_seqcount[rw] = IO_SEQMAX;
598 		}
599 		return (fp->f_seqcount[rw] << IO_SEQSHIFT);
600 	}
601 
602 	/* Not sequential.  Quickly draw-down sequentiality. */
603 	if (fp->f_seqcount[rw] > 1)
604 		fp->f_seqcount[rw] = 1;
605 	else
606 		fp->f_seqcount[rw] = 0;
607 	return (0);
608 }
609 
610 /*
611  * Package up an I/O request on a vnode into a uio and do it.
612  */
613 int
614 vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset,
615     enum uio_seg segflg, int ioflg, struct ucred *active_cred,
616     struct ucred *file_cred, ssize_t *aresid, struct thread *td)
617 {
618 	struct uio auio;
619 	struct iovec aiov;
620 	struct mount *mp;
621 	struct ucred *cred;
622 	void *rl_cookie;
623 	struct vn_io_fault_args args;
624 	int error, lock_flags;
625 
626 	if (offset < 0 && vp->v_type != VCHR)
627 		return (EINVAL);
628 	auio.uio_iov = &aiov;
629 	auio.uio_iovcnt = 1;
630 	aiov.iov_base = base;
631 	aiov.iov_len = len;
632 	auio.uio_resid = len;
633 	auio.uio_offset = offset;
634 	auio.uio_segflg = segflg;
635 	auio.uio_rw = rw;
636 	auio.uio_td = td;
637 	error = 0;
638 
639 	if ((ioflg & IO_NODELOCKED) == 0) {
640 		if ((ioflg & IO_RANGELOCKED) == 0) {
641 			if (rw == UIO_READ) {
642 				rl_cookie = vn_rangelock_rlock(vp, offset,
643 				    offset + len);
644 			} else if ((ioflg & IO_APPEND) != 0) {
645 				rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX);
646 			} else {
647 				rl_cookie = vn_rangelock_wlock(vp, offset,
648 				    offset + len);
649 			}
650 		} else
651 			rl_cookie = NULL;
652 		mp = NULL;
653 		if (rw == UIO_WRITE) {
654 			if (vp->v_type != VCHR &&
655 			    (error = vn_start_write(vp, &mp, V_WAIT | V_PCATCH))
656 			    != 0)
657 				goto out;
658 			lock_flags = vn_lktype_write(mp, vp);
659 		} else
660 			lock_flags = LK_SHARED;
661 		vn_lock(vp, lock_flags | LK_RETRY);
662 	} else
663 		rl_cookie = NULL;
664 
665 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
666 #ifdef MAC
667 	if ((ioflg & IO_NOMACCHECK) == 0) {
668 		if (rw == UIO_READ)
669 			error = mac_vnode_check_read(active_cred, file_cred,
670 			    vp);
671 		else
672 			error = mac_vnode_check_write(active_cred, file_cred,
673 			    vp);
674 	}
675 #endif
676 	if (error == 0) {
677 		if (file_cred != NULL)
678 			cred = file_cred;
679 		else
680 			cred = active_cred;
681 		if (do_vn_io_fault(vp, &auio)) {
682 			args.kind = VN_IO_FAULT_VOP;
683 			args.cred = cred;
684 			args.flags = ioflg;
685 			args.args.vop_args.vp = vp;
686 			error = vn_io_fault1(vp, &auio, &args, td);
687 		} else if (rw == UIO_READ) {
688 			error = VOP_READ(vp, &auio, ioflg, cred);
689 		} else /* if (rw == UIO_WRITE) */ {
690 			error = VOP_WRITE(vp, &auio, ioflg, cred);
691 		}
692 	}
693 	if (aresid)
694 		*aresid = auio.uio_resid;
695 	else
696 		if (auio.uio_resid && error == 0)
697 			error = EIO;
698 	if ((ioflg & IO_NODELOCKED) == 0) {
699 		VOP_UNLOCK(vp);
700 		if (mp != NULL)
701 			vn_finished_write(mp);
702 	}
703  out:
704 	if (rl_cookie != NULL)
705 		vn_rangelock_unlock(vp, rl_cookie);
706 	return (error);
707 }
708 
709 /*
710  * Package up an I/O request on a vnode into a uio and do it.  The I/O
711  * request is split up into smaller chunks and we try to avoid saturating
712  * the buffer cache while potentially holding a vnode locked, so we
713  * check bwillwrite() before calling vn_rdwr().  We also call kern_yield()
714  * to give other processes a chance to lock the vnode (either other processes
715  * core'ing the same binary, or unrelated processes scanning the directory).
716  */
717 int
718 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, void *base, size_t len,
719     off_t offset, enum uio_seg segflg, int ioflg, struct ucred *active_cred,
720     struct ucred *file_cred, size_t *aresid, struct thread *td)
721 {
722 	int error = 0;
723 	ssize_t iaresid;
724 
725 	do {
726 		int chunk;
727 
728 		/*
729 		 * Force `offset' to a multiple of MAXBSIZE except possibly
730 		 * for the first chunk, so that filesystems only need to
731 		 * write full blocks except possibly for the first and last
732 		 * chunks.
733 		 */
734 		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
735 
736 		if (chunk > len)
737 			chunk = len;
738 		if (rw != UIO_READ && vp->v_type == VREG)
739 			bwillwrite();
740 		iaresid = 0;
741 		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
742 		    ioflg, active_cred, file_cred, &iaresid, td);
743 		len -= chunk;	/* aresid calc already includes length */
744 		if (error)
745 			break;
746 		offset += chunk;
747 		base = (char *)base + chunk;
748 		kern_yield(PRI_USER);
749 	} while (len);
750 	if (aresid)
751 		*aresid = len + iaresid;
752 	return (error);
753 }
754 
755 #if OFF_MAX <= LONG_MAX
756 off_t
757 foffset_lock(struct file *fp, int flags)
758 {
759 	volatile short *flagsp;
760 	off_t res;
761 	short state;
762 
763 	KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed"));
764 
765 	if ((flags & FOF_NOLOCK) != 0)
766 		return (atomic_load_long(&fp->f_offset));
767 
768 	/*
769 	 * According to McKusick the vn lock was protecting f_offset here.
770 	 * It is now protected by the FOFFSET_LOCKED flag.
771 	 */
772 	flagsp = &fp->f_vnread_flags;
773 	if (atomic_cmpset_acq_16(flagsp, 0, FOFFSET_LOCKED))
774 		return (atomic_load_long(&fp->f_offset));
775 
776 	sleepq_lock(&fp->f_vnread_flags);
777 	state = atomic_load_16(flagsp);
778 	for (;;) {
779 		if ((state & FOFFSET_LOCKED) == 0) {
780 			if (!atomic_fcmpset_acq_16(flagsp, &state,
781 			    FOFFSET_LOCKED))
782 				continue;
783 			break;
784 		}
785 		if ((state & FOFFSET_LOCK_WAITING) == 0) {
786 			if (!atomic_fcmpset_acq_16(flagsp, &state,
787 			    state | FOFFSET_LOCK_WAITING))
788 				continue;
789 		}
790 		DROP_GIANT();
791 		sleepq_add(&fp->f_vnread_flags, NULL, "vofflock", 0, 0);
792 		sleepq_wait(&fp->f_vnread_flags, PUSER -1);
793 		PICKUP_GIANT();
794 		sleepq_lock(&fp->f_vnread_flags);
795 		state = atomic_load_16(flagsp);
796 	}
797 	res = atomic_load_long(&fp->f_offset);
798 	sleepq_release(&fp->f_vnread_flags);
799 	return (res);
800 }
801 
802 void
803 foffset_unlock(struct file *fp, off_t val, int flags)
804 {
805 	volatile short *flagsp;
806 	short state;
807 
808 	KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed"));
809 
810 	if ((flags & FOF_NOUPDATE) == 0)
811 		atomic_store_long(&fp->f_offset, val);
812 	if ((flags & FOF_NEXTOFF_R) != 0)
813 		fp->f_nextoff[UIO_READ] = val;
814 	if ((flags & FOF_NEXTOFF_W) != 0)
815 		fp->f_nextoff[UIO_WRITE] = val;
816 
817 	if ((flags & FOF_NOLOCK) != 0)
818 		return;
819 
820 	flagsp = &fp->f_vnread_flags;
821 	state = atomic_load_16(flagsp);
822 	if ((state & FOFFSET_LOCK_WAITING) == 0 &&
823 	    atomic_cmpset_rel_16(flagsp, state, 0))
824 		return;
825 
826 	sleepq_lock(&fp->f_vnread_flags);
827 	MPASS((fp->f_vnread_flags & FOFFSET_LOCKED) != 0);
828 	MPASS((fp->f_vnread_flags & FOFFSET_LOCK_WAITING) != 0);
829 	fp->f_vnread_flags = 0;
830 	sleepq_broadcast(&fp->f_vnread_flags, SLEEPQ_SLEEP, 0, 0);
831 	sleepq_release(&fp->f_vnread_flags);
832 }
833 #else
834 off_t
835 foffset_lock(struct file *fp, int flags)
836 {
837 	struct mtx *mtxp;
838 	off_t res;
839 
840 	KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed"));
841 
842 	mtxp = mtx_pool_find(mtxpool_sleep, fp);
843 	mtx_lock(mtxp);
844 	if ((flags & FOF_NOLOCK) == 0) {
845 		while (fp->f_vnread_flags & FOFFSET_LOCKED) {
846 			fp->f_vnread_flags |= FOFFSET_LOCK_WAITING;
847 			msleep(&fp->f_vnread_flags, mtxp, PUSER -1,
848 			    "vofflock", 0);
849 		}
850 		fp->f_vnread_flags |= FOFFSET_LOCKED;
851 	}
852 	res = fp->f_offset;
853 	mtx_unlock(mtxp);
854 	return (res);
855 }
856 
857 void
858 foffset_unlock(struct file *fp, off_t val, int flags)
859 {
860 	struct mtx *mtxp;
861 
862 	KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed"));
863 
864 	mtxp = mtx_pool_find(mtxpool_sleep, fp);
865 	mtx_lock(mtxp);
866 	if ((flags & FOF_NOUPDATE) == 0)
867 		fp->f_offset = val;
868 	if ((flags & FOF_NEXTOFF_R) != 0)
869 		fp->f_nextoff[UIO_READ] = val;
870 	if ((flags & FOF_NEXTOFF_W) != 0)
871 		fp->f_nextoff[UIO_WRITE] = val;
872 	if ((flags & FOF_NOLOCK) == 0) {
873 		KASSERT((fp->f_vnread_flags & FOFFSET_LOCKED) != 0,
874 		    ("Lost FOFFSET_LOCKED"));
875 		if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING)
876 			wakeup(&fp->f_vnread_flags);
877 		fp->f_vnread_flags = 0;
878 	}
879 	mtx_unlock(mtxp);
880 }
881 #endif
882 
883 void
884 foffset_lock_uio(struct file *fp, struct uio *uio, int flags)
885 {
886 
887 	if ((flags & FOF_OFFSET) == 0)
888 		uio->uio_offset = foffset_lock(fp, flags);
889 }
890 
891 void
892 foffset_unlock_uio(struct file *fp, struct uio *uio, int flags)
893 {
894 
895 	if ((flags & FOF_OFFSET) == 0)
896 		foffset_unlock(fp, uio->uio_offset, flags);
897 }
898 
899 static int
900 get_advice(struct file *fp, struct uio *uio)
901 {
902 	struct mtx *mtxp;
903 	int ret;
904 
905 	ret = POSIX_FADV_NORMAL;
906 	if (fp->f_advice == NULL || fp->f_vnode->v_type != VREG)
907 		return (ret);
908 
909 	mtxp = mtx_pool_find(mtxpool_sleep, fp);
910 	mtx_lock(mtxp);
911 	if (fp->f_advice != NULL &&
912 	    uio->uio_offset >= fp->f_advice->fa_start &&
913 	    uio->uio_offset + uio->uio_resid <= fp->f_advice->fa_end)
914 		ret = fp->f_advice->fa_advice;
915 	mtx_unlock(mtxp);
916 	return (ret);
917 }
918 
919 static int
920 get_write_ioflag(struct file *fp)
921 {
922 	int ioflag;
923 	struct mount *mp;
924 	struct vnode *vp;
925 
926 	ioflag = 0;
927 	vp = fp->f_vnode;
928 	mp = atomic_load_ptr(&vp->v_mount);
929 
930 	if ((fp->f_flag & O_DIRECT) != 0)
931 		ioflag |= IO_DIRECT;
932 
933 	if ((fp->f_flag & O_FSYNC) != 0 ||
934 	    (mp != NULL && (mp->mnt_flag & MNT_SYNCHRONOUS) != 0))
935 		ioflag |= IO_SYNC;
936 
937 	/*
938 	 * For O_DSYNC we set both IO_SYNC and IO_DATASYNC, so that VOP_WRITE()
939 	 * or VOP_DEALLOCATE() implementations that don't understand IO_DATASYNC
940 	 * fall back to full O_SYNC behavior.
941 	 */
942 	if ((fp->f_flag & O_DSYNC) != 0)
943 		ioflag |= IO_SYNC | IO_DATASYNC;
944 
945 	return (ioflag);
946 }
947 
948 int
949 vn_read_from_obj(struct vnode *vp, struct uio *uio)
950 {
951 	vm_object_t obj;
952 	vm_page_t ma[io_hold_cnt + 2];
953 	off_t off, vsz;
954 	ssize_t resid;
955 	int error, i, j;
956 
957 	MPASS(uio->uio_resid <= ptoa(io_hold_cnt + 2));
958 	obj = atomic_load_ptr(&vp->v_object);
959 	if (obj == NULL)
960 		return (EJUSTRETURN);
961 
962 	/*
963 	 * Depends on type stability of vm_objects.
964 	 */
965 	vm_object_pip_add(obj, 1);
966 	if ((obj->flags & OBJ_DEAD) != 0) {
967 		/*
968 		 * Note that object might be already reused from the
969 		 * vnode, and the OBJ_DEAD flag cleared.  This is fine,
970 		 * we recheck for DOOMED vnode state after all pages
971 		 * are busied, and retract then.
972 		 *
973 		 * But we check for OBJ_DEAD to ensure that we do not
974 		 * busy pages while vm_object_terminate_pages()
975 		 * processes the queue.
976 		 */
977 		error = EJUSTRETURN;
978 		goto out_pip;
979 	}
980 
981 	resid = uio->uio_resid;
982 	off = uio->uio_offset;
983 	for (i = 0; resid > 0; i++) {
984 		MPASS(i < io_hold_cnt + 2);
985 		ma[i] = vm_page_grab_unlocked(obj, atop(off),
986 		    VM_ALLOC_NOCREAT | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY |
987 		    VM_ALLOC_NOWAIT);
988 		if (ma[i] == NULL)
989 			break;
990 
991 		/*
992 		 * Skip invalid pages.  Valid mask can be partial only
993 		 * at EOF, and we clip later.
994 		 */
995 		if (vm_page_none_valid(ma[i])) {
996 			vm_page_sunbusy(ma[i]);
997 			break;
998 		}
999 
1000 		resid -= PAGE_SIZE;
1001 		off += PAGE_SIZE;
1002 	}
1003 	if (i == 0) {
1004 		error = EJUSTRETURN;
1005 		goto out_pip;
1006 	}
1007 
1008 	/*
1009 	 * Check VIRF_DOOMED after we busied our pages.  Since
1010 	 * vgonel() terminates the vnode' vm_object, it cannot
1011 	 * process past pages busied by us.
1012 	 */
1013 	if (VN_IS_DOOMED(vp)) {
1014 		error = EJUSTRETURN;
1015 		goto out;
1016 	}
1017 
1018 	resid = PAGE_SIZE - (uio->uio_offset & PAGE_MASK) + ptoa(i - 1);
1019 	if (resid > uio->uio_resid)
1020 		resid = uio->uio_resid;
1021 
1022 	/*
1023 	 * Unlocked read of vnp_size is safe because truncation cannot
1024 	 * pass busied page.  But we load vnp_size into a local
1025 	 * variable so that possible concurrent extension does not
1026 	 * break calculation.
1027 	 */
1028 #if defined(__powerpc__) && !defined(__powerpc64__)
1029 	vsz = obj->un_pager.vnp.vnp_size;
1030 #else
1031 	vsz = atomic_load_64(&obj->un_pager.vnp.vnp_size);
1032 #endif
1033 	if (uio->uio_offset >= vsz) {
1034 		error = EJUSTRETURN;
1035 		goto out;
1036 	}
1037 	if (uio->uio_offset + resid > vsz)
1038 		resid = vsz - uio->uio_offset;
1039 
1040 	error = vn_io_fault_pgmove(ma, uio->uio_offset & PAGE_MASK, resid, uio);
1041 
1042 out:
1043 	for (j = 0; j < i; j++) {
1044 		if (error == 0)
1045 			vm_page_reference(ma[j]);
1046 		vm_page_sunbusy(ma[j]);
1047 	}
1048 out_pip:
1049 	vm_object_pip_wakeup(obj);
1050 	if (error != 0)
1051 		return (error);
1052 	return (uio->uio_resid == 0 ? 0 : EJUSTRETURN);
1053 }
1054 
1055 /*
1056  * File table vnode read routine.
1057  */
1058 static int
1059 vn_read(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags,
1060     struct thread *td)
1061 {
1062 	struct vnode *vp;
1063 	off_t orig_offset;
1064 	int error, ioflag;
1065 	int advice;
1066 
1067 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
1068 	    uio->uio_td, td));
1069 	KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET"));
1070 	vp = fp->f_vnode;
1071 	ioflag = 0;
1072 	if (fp->f_flag & FNONBLOCK)
1073 		ioflag |= IO_NDELAY;
1074 	if (fp->f_flag & O_DIRECT)
1075 		ioflag |= IO_DIRECT;
1076 
1077 	/*
1078 	 * Try to read from page cache.  VIRF_DOOMED check is racy but
1079 	 * allows us to avoid unneeded work outright.
1080 	 */
1081 	if (vn_io_pgcache_read_enable && !mac_vnode_check_read_enabled() &&
1082 	    (vn_irflag_read(vp) & (VIRF_DOOMED | VIRF_PGREAD)) == VIRF_PGREAD) {
1083 		error = VOP_READ_PGCACHE(vp, uio, ioflag, fp->f_cred);
1084 		if (error == 0) {
1085 			fp->f_nextoff[UIO_READ] = uio->uio_offset;
1086 			return (0);
1087 		}
1088 		if (error != EJUSTRETURN)
1089 			return (error);
1090 	}
1091 
1092 	advice = get_advice(fp, uio);
1093 	vn_lock(vp, LK_SHARED | LK_RETRY);
1094 
1095 	switch (advice) {
1096 	case POSIX_FADV_NORMAL:
1097 	case POSIX_FADV_SEQUENTIAL:
1098 	case POSIX_FADV_NOREUSE:
1099 		ioflag |= sequential_heuristic(uio, fp);
1100 		break;
1101 	case POSIX_FADV_RANDOM:
1102 		/* Disable read-ahead for random I/O. */
1103 		break;
1104 	}
1105 	orig_offset = uio->uio_offset;
1106 
1107 #ifdef MAC
1108 	error = mac_vnode_check_read(active_cred, fp->f_cred, vp);
1109 	if (error == 0)
1110 #endif
1111 		error = VOP_READ(vp, uio, ioflag, fp->f_cred);
1112 	fp->f_nextoff[UIO_READ] = uio->uio_offset;
1113 	VOP_UNLOCK(vp);
1114 	if (error == 0 && advice == POSIX_FADV_NOREUSE &&
1115 	    orig_offset != uio->uio_offset)
1116 		/*
1117 		 * Use POSIX_FADV_DONTNEED to flush pages and buffers
1118 		 * for the backing file after a POSIX_FADV_NOREUSE
1119 		 * read(2).
1120 		 */
1121 		error = VOP_ADVISE(vp, orig_offset, uio->uio_offset - 1,
1122 		    POSIX_FADV_DONTNEED);
1123 	return (error);
1124 }
1125 
1126 /*
1127  * File table vnode write routine.
1128  */
1129 static int
1130 vn_write(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags,
1131     struct thread *td)
1132 {
1133 	struct vnode *vp;
1134 	struct mount *mp;
1135 	off_t orig_offset;
1136 	int error, ioflag;
1137 	int advice;
1138 	bool need_finished_write;
1139 
1140 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p",
1141 	    uio->uio_td, td));
1142 	KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET"));
1143 	vp = fp->f_vnode;
1144 	if (vp->v_type == VREG)
1145 		bwillwrite();
1146 	ioflag = IO_UNIT;
1147 	if (vp->v_type == VREG && (fp->f_flag & O_APPEND) != 0)
1148 		ioflag |= IO_APPEND;
1149 	if ((fp->f_flag & FNONBLOCK) != 0)
1150 		ioflag |= IO_NDELAY;
1151 	ioflag |= get_write_ioflag(fp);
1152 
1153 	mp = NULL;
1154 	need_finished_write = false;
1155 	if (vp->v_type != VCHR) {
1156 		error = vn_start_write(vp, &mp, V_WAIT | V_PCATCH);
1157 		if (error != 0)
1158 			goto unlock;
1159 		need_finished_write = true;
1160 	}
1161 
1162 	advice = get_advice(fp, uio);
1163 
1164 	vn_lock(vp, vn_lktype_write(mp, vp) | LK_RETRY);
1165 	switch (advice) {
1166 	case POSIX_FADV_NORMAL:
1167 	case POSIX_FADV_SEQUENTIAL:
1168 	case POSIX_FADV_NOREUSE:
1169 		ioflag |= sequential_heuristic(uio, fp);
1170 		break;
1171 	case POSIX_FADV_RANDOM:
1172 		/* XXX: Is this correct? */
1173 		break;
1174 	}
1175 	orig_offset = uio->uio_offset;
1176 
1177 #ifdef MAC
1178 	error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
1179 	if (error == 0)
1180 #endif
1181 		error = VOP_WRITE(vp, uio, ioflag, fp->f_cred);
1182 	fp->f_nextoff[UIO_WRITE] = uio->uio_offset;
1183 	VOP_UNLOCK(vp);
1184 	if (need_finished_write)
1185 		vn_finished_write(mp);
1186 	if (error == 0 && advice == POSIX_FADV_NOREUSE &&
1187 	    orig_offset != uio->uio_offset)
1188 		/*
1189 		 * Use POSIX_FADV_DONTNEED to flush pages and buffers
1190 		 * for the backing file after a POSIX_FADV_NOREUSE
1191 		 * write(2).
1192 		 */
1193 		error = VOP_ADVISE(vp, orig_offset, uio->uio_offset - 1,
1194 		    POSIX_FADV_DONTNEED);
1195 unlock:
1196 	return (error);
1197 }
1198 
1199 /*
1200  * The vn_io_fault() is a wrapper around vn_read() and vn_write() to
1201  * prevent the following deadlock:
1202  *
1203  * Assume that the thread A reads from the vnode vp1 into userspace
1204  * buffer buf1 backed by the pages of vnode vp2.  If a page in buf1 is
1205  * currently not resident, then system ends up with the call chain
1206  *   vn_read() -> VOP_READ(vp1) -> uiomove() -> [Page Fault] ->
1207  *     vm_fault(buf1) -> vnode_pager_getpages(vp2) -> VOP_GETPAGES(vp2)
1208  * which establishes lock order vp1->vn_lock, then vp2->vn_lock.
1209  * If, at the same time, thread B reads from vnode vp2 into buffer buf2
1210  * backed by the pages of vnode vp1, and some page in buf2 is not
1211  * resident, we get a reversed order vp2->vn_lock, then vp1->vn_lock.
1212  *
1213  * To prevent the lock order reversal and deadlock, vn_io_fault() does
1214  * not allow page faults to happen during VOP_READ() or VOP_WRITE().
1215  * Instead, it first tries to do the whole range i/o with pagefaults
1216  * disabled. If all pages in the i/o buffer are resident and mapped,
1217  * VOP will succeed (ignoring the genuine filesystem errors).
1218  * Otherwise, we get back EFAULT, and vn_io_fault() falls back to do
1219  * i/o in chunks, with all pages in the chunk prefaulted and held
1220  * using vm_fault_quick_hold_pages().
1221  *
1222  * Filesystems using this deadlock avoidance scheme should use the
1223  * array of the held pages from uio, saved in the curthread->td_ma,
1224  * instead of doing uiomove().  A helper function
1225  * vn_io_fault_uiomove() converts uiomove request into
1226  * uiomove_fromphys() over td_ma array.
1227  *
1228  * Since vnode locks do not cover the whole i/o anymore, rangelocks
1229  * make the current i/o request atomic with respect to other i/os and
1230  * truncations.
1231  */
1232 
1233 /*
1234  * Decode vn_io_fault_args and perform the corresponding i/o.
1235  */
1236 static int
1237 vn_io_fault_doio(struct vn_io_fault_args *args, struct uio *uio,
1238     struct thread *td)
1239 {
1240 	int error, save;
1241 
1242 	error = 0;
1243 	save = vm_fault_disable_pagefaults();
1244 	switch (args->kind) {
1245 	case VN_IO_FAULT_FOP:
1246 		error = (args->args.fop_args.doio)(args->args.fop_args.fp,
1247 		    uio, args->cred, args->flags, td);
1248 		break;
1249 	case VN_IO_FAULT_VOP:
1250 		if (uio->uio_rw == UIO_READ) {
1251 			error = VOP_READ(args->args.vop_args.vp, uio,
1252 			    args->flags, args->cred);
1253 		} else if (uio->uio_rw == UIO_WRITE) {
1254 			error = VOP_WRITE(args->args.vop_args.vp, uio,
1255 			    args->flags, args->cred);
1256 		}
1257 		break;
1258 	default:
1259 		panic("vn_io_fault_doio: unknown kind of io %d %d",
1260 		    args->kind, uio->uio_rw);
1261 	}
1262 	vm_fault_enable_pagefaults(save);
1263 	return (error);
1264 }
1265 
1266 static int
1267 vn_io_fault_touch(char *base, const struct uio *uio)
1268 {
1269 	int r;
1270 
1271 	r = fubyte(base);
1272 	if (r == -1 || (uio->uio_rw == UIO_READ && subyte(base, r) == -1))
1273 		return (EFAULT);
1274 	return (0);
1275 }
1276 
1277 static int
1278 vn_io_fault_prefault_user(const struct uio *uio)
1279 {
1280 	char *base;
1281 	const struct iovec *iov;
1282 	size_t len;
1283 	ssize_t resid;
1284 	int error, i;
1285 
1286 	KASSERT(uio->uio_segflg == UIO_USERSPACE,
1287 	    ("vn_io_fault_prefault userspace"));
1288 
1289 	error = i = 0;
1290 	iov = uio->uio_iov;
1291 	resid = uio->uio_resid;
1292 	base = iov->iov_base;
1293 	len = iov->iov_len;
1294 	while (resid > 0) {
1295 		error = vn_io_fault_touch(base, uio);
1296 		if (error != 0)
1297 			break;
1298 		if (len < PAGE_SIZE) {
1299 			if (len != 0) {
1300 				error = vn_io_fault_touch(base + len - 1, uio);
1301 				if (error != 0)
1302 					break;
1303 				resid -= len;
1304 			}
1305 			if (++i >= uio->uio_iovcnt)
1306 				break;
1307 			iov = uio->uio_iov + i;
1308 			base = iov->iov_base;
1309 			len = iov->iov_len;
1310 		} else {
1311 			len -= PAGE_SIZE;
1312 			base += PAGE_SIZE;
1313 			resid -= PAGE_SIZE;
1314 		}
1315 	}
1316 	return (error);
1317 }
1318 
1319 /*
1320  * Common code for vn_io_fault(), agnostic to the kind of i/o request.
1321  * Uses vn_io_fault_doio() to make the call to an actual i/o function.
1322  * Used from vn_rdwr() and vn_io_fault(), which encode the i/o request
1323  * into args and call vn_io_fault1() to handle faults during the user
1324  * mode buffer accesses.
1325  */
1326 static int
1327 vn_io_fault1(struct vnode *vp, struct uio *uio, struct vn_io_fault_args *args,
1328     struct thread *td)
1329 {
1330 	vm_page_t ma[io_hold_cnt + 2];
1331 	struct uio *uio_clone, short_uio;
1332 	struct iovec short_iovec[1];
1333 	vm_page_t *prev_td_ma;
1334 	vm_prot_t prot;
1335 	vm_offset_t addr, end;
1336 	size_t len, resid;
1337 	ssize_t adv;
1338 	int error, cnt, saveheld, prev_td_ma_cnt;
1339 
1340 	if (vn_io_fault_prefault) {
1341 		error = vn_io_fault_prefault_user(uio);
1342 		if (error != 0)
1343 			return (error); /* Or ignore ? */
1344 	}
1345 
1346 	prot = uio->uio_rw == UIO_READ ? VM_PROT_WRITE : VM_PROT_READ;
1347 
1348 	/*
1349 	 * The UFS follows IO_UNIT directive and replays back both
1350 	 * uio_offset and uio_resid if an error is encountered during the
1351 	 * operation.  But, since the iovec may be already advanced,
1352 	 * uio is still in an inconsistent state.
1353 	 *
1354 	 * Cache a copy of the original uio, which is advanced to the redo
1355 	 * point using UIO_NOCOPY below.
1356 	 */
1357 	uio_clone = cloneuio(uio);
1358 	resid = uio->uio_resid;
1359 
1360 	short_uio.uio_segflg = UIO_USERSPACE;
1361 	short_uio.uio_rw = uio->uio_rw;
1362 	short_uio.uio_td = uio->uio_td;
1363 
1364 	error = vn_io_fault_doio(args, uio, td);
1365 	if (error != EFAULT)
1366 		goto out;
1367 
1368 	atomic_add_long(&vn_io_faults_cnt, 1);
1369 	uio_clone->uio_segflg = UIO_NOCOPY;
1370 	uiomove(NULL, resid - uio->uio_resid, uio_clone);
1371 	uio_clone->uio_segflg = uio->uio_segflg;
1372 
1373 	saveheld = curthread_pflags_set(TDP_UIOHELD);
1374 	prev_td_ma = td->td_ma;
1375 	prev_td_ma_cnt = td->td_ma_cnt;
1376 
1377 	while (uio_clone->uio_resid != 0) {
1378 		len = uio_clone->uio_iov->iov_len;
1379 		if (len == 0) {
1380 			KASSERT(uio_clone->uio_iovcnt >= 1,
1381 			    ("iovcnt underflow"));
1382 			uio_clone->uio_iov++;
1383 			uio_clone->uio_iovcnt--;
1384 			continue;
1385 		}
1386 		if (len > ptoa(io_hold_cnt))
1387 			len = ptoa(io_hold_cnt);
1388 		addr = (uintptr_t)uio_clone->uio_iov->iov_base;
1389 		end = round_page(addr + len);
1390 		if (end < addr) {
1391 			error = EFAULT;
1392 			break;
1393 		}
1394 		/*
1395 		 * A perfectly misaligned address and length could cause
1396 		 * both the start and the end of the chunk to use partial
1397 		 * page.  +2 accounts for such a situation.
1398 		 */
1399 		cnt = vm_fault_quick_hold_pages(&td->td_proc->p_vmspace->vm_map,
1400 		    addr, len, prot, ma, io_hold_cnt + 2);
1401 		if (cnt == -1) {
1402 			error = EFAULT;
1403 			break;
1404 		}
1405 		short_uio.uio_iov = &short_iovec[0];
1406 		short_iovec[0].iov_base = (void *)addr;
1407 		short_uio.uio_iovcnt = 1;
1408 		short_uio.uio_resid = short_iovec[0].iov_len = len;
1409 		short_uio.uio_offset = uio_clone->uio_offset;
1410 		td->td_ma = ma;
1411 		td->td_ma_cnt = cnt;
1412 
1413 		error = vn_io_fault_doio(args, &short_uio, td);
1414 		vm_page_unhold_pages(ma, cnt);
1415 		adv = len - short_uio.uio_resid;
1416 
1417 		uio_clone->uio_iov->iov_base =
1418 		    (char *)uio_clone->uio_iov->iov_base + adv;
1419 		uio_clone->uio_iov->iov_len -= adv;
1420 		uio_clone->uio_resid -= adv;
1421 		uio_clone->uio_offset += adv;
1422 
1423 		uio->uio_resid -= adv;
1424 		uio->uio_offset += adv;
1425 
1426 		if (error != 0 || adv == 0)
1427 			break;
1428 	}
1429 	td->td_ma = prev_td_ma;
1430 	td->td_ma_cnt = prev_td_ma_cnt;
1431 	curthread_pflags_restore(saveheld);
1432 out:
1433 	free(uio_clone, M_IOV);
1434 	return (error);
1435 }
1436 
1437 static int
1438 vn_io_fault(struct file *fp, struct uio *uio, struct ucred *active_cred,
1439     int flags, struct thread *td)
1440 {
1441 	fo_rdwr_t *doio;
1442 	struct vnode *vp;
1443 	void *rl_cookie;
1444 	struct vn_io_fault_args args;
1445 	int error;
1446 	bool rl_locked;
1447 
1448 	doio = uio->uio_rw == UIO_READ ? vn_read : vn_write;
1449 	vp = fp->f_vnode;
1450 
1451 	/*
1452 	 * The ability to read(2) on a directory has historically been
1453 	 * allowed for all users, but this can and has been the source of
1454 	 * at least one security issue in the past.  As such, it is now hidden
1455 	 * away behind a sysctl for those that actually need it to use it, and
1456 	 * restricted to root when it's turned on to make it relatively safe to
1457 	 * leave on for longer sessions of need.
1458 	 */
1459 	if (vp->v_type == VDIR) {
1460 		KASSERT(uio->uio_rw == UIO_READ,
1461 		    ("illegal write attempted on a directory"));
1462 		if (!vfs_allow_read_dir)
1463 			return (EISDIR);
1464 		if ((error = priv_check(td, PRIV_VFS_READ_DIR)) != 0)
1465 			return (EISDIR);
1466 	}
1467 
1468 	foffset_lock_uio(fp, uio, flags);
1469 	if (vp->v_type == VREG) {
1470 		if (uio->uio_rw == UIO_READ) {
1471 			rl_cookie = vn_rangelock_rlock(vp, uio->uio_offset,
1472 			    uio->uio_offset + uio->uio_resid);
1473 		} else if ((fp->f_flag & O_APPEND) != 0 ||
1474 		    (flags & FOF_OFFSET) == 0) {
1475 			/* For appenders, punt and lock the whole range. */
1476 			rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX);
1477 		} else {
1478 			rl_cookie = vn_rangelock_wlock(vp, uio->uio_offset,
1479 			    uio->uio_offset + uio->uio_resid);
1480 		}
1481 		rl_locked = true;
1482 	} else {
1483 		rl_locked = false;
1484 	}
1485 	if (do_vn_io_fault(vp, uio)) {
1486 		args.kind = VN_IO_FAULT_FOP;
1487 		args.args.fop_args.fp = fp;
1488 		args.args.fop_args.doio = doio;
1489 		args.cred = active_cred;
1490 		args.flags = flags | FOF_OFFSET;
1491 		error = vn_io_fault1(vp, uio, &args, td);
1492 	} else {
1493 		error = doio(fp, uio, active_cred, flags | FOF_OFFSET, td);
1494 	}
1495 	if (rl_locked)
1496 		vn_rangelock_unlock(vp, rl_cookie);
1497 	foffset_unlock_uio(fp, uio, flags);
1498 	return (error);
1499 }
1500 
1501 /*
1502  * Helper function to perform the requested uiomove operation using
1503  * the held pages for io->uio_iov[0].iov_base buffer instead of
1504  * copyin/copyout.  Access to the pages with uiomove_fromphys()
1505  * instead of iov_base prevents page faults that could occur due to
1506  * pmap_collect() invalidating the mapping created by
1507  * vm_fault_quick_hold_pages(), or pageout daemon, page laundry or
1508  * object cleanup revoking the write access from page mappings.
1509  *
1510  * Filesystems specified MNTK_NO_IOPF shall use vn_io_fault_uiomove()
1511  * instead of plain uiomove().
1512  */
1513 int
1514 vn_io_fault_uiomove(char *data, int xfersize, struct uio *uio)
1515 {
1516 	struct uio transp_uio;
1517 	struct iovec transp_iov[1];
1518 	struct thread *td;
1519 	size_t adv;
1520 	int error, pgadv;
1521 
1522 	td = curthread;
1523 	if ((td->td_pflags & TDP_UIOHELD) == 0 ||
1524 	    uio->uio_segflg != UIO_USERSPACE)
1525 		return (uiomove(data, xfersize, uio));
1526 
1527 	KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt));
1528 	transp_iov[0].iov_base = data;
1529 	transp_uio.uio_iov = &transp_iov[0];
1530 	transp_uio.uio_iovcnt = 1;
1531 	if (xfersize > uio->uio_resid)
1532 		xfersize = uio->uio_resid;
1533 	transp_uio.uio_resid = transp_iov[0].iov_len = xfersize;
1534 	transp_uio.uio_offset = 0;
1535 	transp_uio.uio_segflg = UIO_SYSSPACE;
1536 	/*
1537 	 * Since transp_iov points to data, and td_ma page array
1538 	 * corresponds to original uio->uio_iov, we need to invert the
1539 	 * direction of the i/o operation as passed to
1540 	 * uiomove_fromphys().
1541 	 */
1542 	switch (uio->uio_rw) {
1543 	case UIO_WRITE:
1544 		transp_uio.uio_rw = UIO_READ;
1545 		break;
1546 	case UIO_READ:
1547 		transp_uio.uio_rw = UIO_WRITE;
1548 		break;
1549 	}
1550 	transp_uio.uio_td = uio->uio_td;
1551 	error = uiomove_fromphys(td->td_ma,
1552 	    ((vm_offset_t)uio->uio_iov->iov_base) & PAGE_MASK,
1553 	    xfersize, &transp_uio);
1554 	adv = xfersize - transp_uio.uio_resid;
1555 	pgadv =
1556 	    (((vm_offset_t)uio->uio_iov->iov_base + adv) >> PAGE_SHIFT) -
1557 	    (((vm_offset_t)uio->uio_iov->iov_base) >> PAGE_SHIFT);
1558 	td->td_ma += pgadv;
1559 	KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt,
1560 	    pgadv));
1561 	td->td_ma_cnt -= pgadv;
1562 	uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + adv;
1563 	uio->uio_iov->iov_len -= adv;
1564 	uio->uio_resid -= adv;
1565 	uio->uio_offset += adv;
1566 	return (error);
1567 }
1568 
1569 int
1570 vn_io_fault_pgmove(vm_page_t ma[], vm_offset_t offset, int xfersize,
1571     struct uio *uio)
1572 {
1573 	struct thread *td;
1574 	vm_offset_t iov_base;
1575 	int cnt, pgadv;
1576 
1577 	td = curthread;
1578 	if ((td->td_pflags & TDP_UIOHELD) == 0 ||
1579 	    uio->uio_segflg != UIO_USERSPACE)
1580 		return (uiomove_fromphys(ma, offset, xfersize, uio));
1581 
1582 	KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt));
1583 	cnt = xfersize > uio->uio_resid ? uio->uio_resid : xfersize;
1584 	iov_base = (vm_offset_t)uio->uio_iov->iov_base;
1585 	switch (uio->uio_rw) {
1586 	case UIO_WRITE:
1587 		pmap_copy_pages(td->td_ma, iov_base & PAGE_MASK, ma,
1588 		    offset, cnt);
1589 		break;
1590 	case UIO_READ:
1591 		pmap_copy_pages(ma, offset, td->td_ma, iov_base & PAGE_MASK,
1592 		    cnt);
1593 		break;
1594 	}
1595 	pgadv = ((iov_base + cnt) >> PAGE_SHIFT) - (iov_base >> PAGE_SHIFT);
1596 	td->td_ma += pgadv;
1597 	KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt,
1598 	    pgadv));
1599 	td->td_ma_cnt -= pgadv;
1600 	uio->uio_iov->iov_base = (char *)(iov_base + cnt);
1601 	uio->uio_iov->iov_len -= cnt;
1602 	uio->uio_resid -= cnt;
1603 	uio->uio_offset += cnt;
1604 	return (0);
1605 }
1606 
1607 /*
1608  * File table truncate routine.
1609  */
1610 static int
1611 vn_truncate(struct file *fp, off_t length, struct ucred *active_cred,
1612     struct thread *td)
1613 {
1614 	struct mount *mp;
1615 	struct vnode *vp;
1616 	void *rl_cookie;
1617 	int error;
1618 
1619 	vp = fp->f_vnode;
1620 
1621 retry:
1622 	/*
1623 	 * Lock the whole range for truncation.  Otherwise split i/o
1624 	 * might happen partly before and partly after the truncation.
1625 	 */
1626 	rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX);
1627 	error = vn_start_write(vp, &mp, V_WAIT | V_PCATCH);
1628 	if (error)
1629 		goto out1;
1630 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1631 	AUDIT_ARG_VNODE1(vp);
1632 	if (vp->v_type == VDIR) {
1633 		error = EISDIR;
1634 		goto out;
1635 	}
1636 #ifdef MAC
1637 	error = mac_vnode_check_write(active_cred, fp->f_cred, vp);
1638 	if (error)
1639 		goto out;
1640 #endif
1641 	error = vn_truncate_locked(vp, length, (fp->f_flag & O_FSYNC) != 0,
1642 	    fp->f_cred);
1643 out:
1644 	VOP_UNLOCK(vp);
1645 	vn_finished_write(mp);
1646 out1:
1647 	vn_rangelock_unlock(vp, rl_cookie);
1648 	if (error == ERELOOKUP)
1649 		goto retry;
1650 	return (error);
1651 }
1652 
1653 /*
1654  * Truncate a file that is already locked.
1655  */
1656 int
1657 vn_truncate_locked(struct vnode *vp, off_t length, bool sync,
1658     struct ucred *cred)
1659 {
1660 	struct vattr vattr;
1661 	int error;
1662 
1663 	error = VOP_ADD_WRITECOUNT(vp, 1);
1664 	if (error == 0) {
1665 		VATTR_NULL(&vattr);
1666 		vattr.va_size = length;
1667 		if (sync)
1668 			vattr.va_vaflags |= VA_SYNC;
1669 		error = VOP_SETATTR(vp, &vattr, cred);
1670 		VOP_ADD_WRITECOUNT_CHECKED(vp, -1);
1671 	}
1672 	return (error);
1673 }
1674 
1675 /*
1676  * File table vnode stat routine.
1677  */
1678 int
1679 vn_statfile(struct file *fp, struct stat *sb, struct ucred *active_cred)
1680 {
1681 	struct vnode *vp = fp->f_vnode;
1682 	int error;
1683 
1684 	vn_lock(vp, LK_SHARED | LK_RETRY);
1685 	error = VOP_STAT(vp, sb, active_cred, fp->f_cred);
1686 	VOP_UNLOCK(vp);
1687 
1688 	return (error);
1689 }
1690 
1691 /*
1692  * File table vnode ioctl routine.
1693  */
1694 static int
1695 vn_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
1696     struct thread *td)
1697 {
1698 	struct vnode *vp;
1699 	struct fiobmap2_arg *bmarg;
1700 	off_t size;
1701 	int error;
1702 
1703 	vp = fp->f_vnode;
1704 	switch (vp->v_type) {
1705 	case VDIR:
1706 	case VREG:
1707 		switch (com) {
1708 		case FIONREAD:
1709 			error = vn_getsize(vp, &size, active_cred);
1710 			if (error == 0)
1711 				*(int *)data = size - fp->f_offset;
1712 			return (error);
1713 		case FIOBMAP2:
1714 			bmarg = (struct fiobmap2_arg *)data;
1715 			vn_lock(vp, LK_SHARED | LK_RETRY);
1716 #ifdef MAC
1717 			error = mac_vnode_check_read(active_cred, fp->f_cred,
1718 			    vp);
1719 			if (error == 0)
1720 #endif
1721 				error = VOP_BMAP(vp, bmarg->bn, NULL,
1722 				    &bmarg->bn, &bmarg->runp, &bmarg->runb);
1723 			VOP_UNLOCK(vp);
1724 			return (error);
1725 		case FIONBIO:
1726 		case FIOASYNC:
1727 			return (0);
1728 		default:
1729 			return (VOP_IOCTL(vp, com, data, fp->f_flag,
1730 			    active_cred, td));
1731 		}
1732 		break;
1733 	case VCHR:
1734 		return (VOP_IOCTL(vp, com, data, fp->f_flag,
1735 		    active_cred, td));
1736 	default:
1737 		return (ENOTTY);
1738 	}
1739 }
1740 
1741 /*
1742  * File table vnode poll routine.
1743  */
1744 static int
1745 vn_poll(struct file *fp, int events, struct ucred *active_cred,
1746     struct thread *td)
1747 {
1748 	struct vnode *vp;
1749 	int error;
1750 
1751 	vp = fp->f_vnode;
1752 #if defined(MAC) || defined(AUDIT)
1753 	if (AUDITING_TD(td) || mac_vnode_check_poll_enabled()) {
1754 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1755 		AUDIT_ARG_VNODE1(vp);
1756 		error = mac_vnode_check_poll(active_cred, fp->f_cred, vp);
1757 		VOP_UNLOCK(vp);
1758 		if (error != 0)
1759 			return (error);
1760 	}
1761 #endif
1762 	error = VOP_POLL(vp, events, fp->f_cred, td);
1763 	return (error);
1764 }
1765 
1766 /*
1767  * Acquire the requested lock and then check for validity.  LK_RETRY
1768  * permits vn_lock to return doomed vnodes.
1769  */
1770 static int __noinline
1771 _vn_lock_fallback(struct vnode *vp, int flags, const char *file, int line,
1772     int error)
1773 {
1774 
1775 	KASSERT((flags & LK_RETRY) == 0 || error == 0,
1776 	    ("vn_lock: error %d incompatible with flags %#x", error, flags));
1777 
1778 	if (error == 0)
1779 		VNASSERT(VN_IS_DOOMED(vp), vp, ("vnode not doomed"));
1780 
1781 	if ((flags & LK_RETRY) == 0) {
1782 		if (error == 0) {
1783 			VOP_UNLOCK(vp);
1784 			error = ENOENT;
1785 		}
1786 		return (error);
1787 	}
1788 
1789 	/*
1790 	 * LK_RETRY case.
1791 	 *
1792 	 * Nothing to do if we got the lock.
1793 	 */
1794 	if (error == 0)
1795 		return (0);
1796 
1797 	/*
1798 	 * Interlock was dropped by the call in _vn_lock.
1799 	 */
1800 	flags &= ~LK_INTERLOCK;
1801 	do {
1802 		error = VOP_LOCK1(vp, flags, file, line);
1803 	} while (error != 0);
1804 	return (0);
1805 }
1806 
1807 int
1808 _vn_lock(struct vnode *vp, int flags, const char *file, int line)
1809 {
1810 	int error;
1811 
1812 	VNASSERT((flags & LK_TYPE_MASK) != 0, vp,
1813 	    ("vn_lock: no locktype (%d passed)", flags));
1814 	VNPASS(vp->v_holdcnt > 0, vp);
1815 	error = VOP_LOCK1(vp, flags, file, line);
1816 	if (__predict_false(error != 0 || VN_IS_DOOMED(vp)))
1817 		return (_vn_lock_fallback(vp, flags, file, line, error));
1818 	return (0);
1819 }
1820 
1821 /*
1822  * File table vnode close routine.
1823  */
1824 static int
1825 vn_closefile(struct file *fp, struct thread *td)
1826 {
1827 	struct vnode *vp;
1828 	struct flock lf;
1829 	int error;
1830 	bool ref;
1831 
1832 	vp = fp->f_vnode;
1833 	fp->f_ops = &badfileops;
1834 	ref = (fp->f_flag & FHASLOCK) != 0;
1835 
1836 	error = vn_close1(vp, fp->f_flag, fp->f_cred, td, ref);
1837 
1838 	if (__predict_false(ref)) {
1839 		lf.l_whence = SEEK_SET;
1840 		lf.l_start = 0;
1841 		lf.l_len = 0;
1842 		lf.l_type = F_UNLCK;
1843 		(void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK);
1844 		vrele(vp);
1845 	}
1846 	return (error);
1847 }
1848 
1849 /*
1850  * Preparing to start a filesystem write operation. If the operation is
1851  * permitted, then we bump the count of operations in progress and
1852  * proceed. If a suspend request is in progress, we wait until the
1853  * suspension is over, and then proceed.
1854  */
1855 static int
1856 vn_start_write_refed(struct mount *mp, int flags, bool mplocked)
1857 {
1858 	struct mount_pcpu *mpcpu;
1859 	int error, mflags;
1860 
1861 	if (__predict_true(!mplocked) && (flags & V_XSLEEP) == 0 &&
1862 	    vfs_op_thread_enter(mp, mpcpu)) {
1863 		MPASS((mp->mnt_kern_flag & MNTK_SUSPEND) == 0);
1864 		vfs_mp_count_add_pcpu(mpcpu, writeopcount, 1);
1865 		vfs_op_thread_exit(mp, mpcpu);
1866 		return (0);
1867 	}
1868 
1869 	if (mplocked)
1870 		mtx_assert(MNT_MTX(mp), MA_OWNED);
1871 	else
1872 		MNT_ILOCK(mp);
1873 
1874 	error = 0;
1875 
1876 	/*
1877 	 * Check on status of suspension.
1878 	 */
1879 	if ((curthread->td_pflags & TDP_IGNSUSP) == 0 ||
1880 	    mp->mnt_susp_owner != curthread) {
1881 		mflags = 0;
1882 		if ((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0) {
1883 			if (flags & V_PCATCH)
1884 				mflags |= PCATCH;
1885 		}
1886 		mflags |= (PUSER - 1);
1887 		while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
1888 			if ((flags & V_NOWAIT) != 0) {
1889 				error = EWOULDBLOCK;
1890 				goto unlock;
1891 			}
1892 			error = msleep(&mp->mnt_flag, MNT_MTX(mp), mflags,
1893 			    "suspfs", 0);
1894 			if (error != 0)
1895 				goto unlock;
1896 		}
1897 	}
1898 	if ((flags & V_XSLEEP) != 0)
1899 		goto unlock;
1900 	mp->mnt_writeopcount++;
1901 unlock:
1902 	if (error != 0 || (flags & V_XSLEEP) != 0)
1903 		MNT_REL(mp);
1904 	MNT_IUNLOCK(mp);
1905 	return (error);
1906 }
1907 
1908 int
1909 vn_start_write(struct vnode *vp, struct mount **mpp, int flags)
1910 {
1911 	struct mount *mp;
1912 	int error;
1913 
1914 	KASSERT((flags & ~V_VALID_FLAGS) == 0,
1915 	    ("%s: invalid flags passed %d\n", __func__, flags));
1916 
1917 	error = 0;
1918 	/*
1919 	 * If a vnode is provided, get and return the mount point that
1920 	 * to which it will write.
1921 	 */
1922 	if (vp != NULL) {
1923 		if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
1924 			*mpp = NULL;
1925 			if (error != EOPNOTSUPP)
1926 				return (error);
1927 			return (0);
1928 		}
1929 	}
1930 	if ((mp = *mpp) == NULL)
1931 		return (0);
1932 
1933 	/*
1934 	 * VOP_GETWRITEMOUNT() returns with the mp refcount held through
1935 	 * a vfs_ref().
1936 	 * As long as a vnode is not provided we need to acquire a
1937 	 * refcount for the provided mountpoint too, in order to
1938 	 * emulate a vfs_ref().
1939 	 */
1940 	if (vp == NULL)
1941 		vfs_ref(mp);
1942 
1943 	error = vn_start_write_refed(mp, flags, false);
1944 	if (error != 0 && (flags & V_NOWAIT) == 0)
1945 		*mpp = NULL;
1946 	return (error);
1947 }
1948 
1949 /*
1950  * Secondary suspension. Used by operations such as vop_inactive
1951  * routines that are needed by the higher level functions. These
1952  * are allowed to proceed until all the higher level functions have
1953  * completed (indicated by mnt_writeopcount dropping to zero). At that
1954  * time, these operations are halted until the suspension is over.
1955  */
1956 int
1957 vn_start_secondary_write(struct vnode *vp, struct mount **mpp, int flags)
1958 {
1959 	struct mount *mp;
1960 	int error, mflags;
1961 
1962 	KASSERT((flags & (~V_VALID_FLAGS | V_XSLEEP)) == 0,
1963 	    ("%s: invalid flags passed %d\n", __func__, flags));
1964 
1965  retry:
1966 	if (vp != NULL) {
1967 		if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) {
1968 			*mpp = NULL;
1969 			if (error != EOPNOTSUPP)
1970 				return (error);
1971 			return (0);
1972 		}
1973 	}
1974 	/*
1975 	 * If we are not suspended or have not yet reached suspended
1976 	 * mode, then let the operation proceed.
1977 	 */
1978 	if ((mp = *mpp) == NULL)
1979 		return (0);
1980 
1981 	/*
1982 	 * VOP_GETWRITEMOUNT() returns with the mp refcount held through
1983 	 * a vfs_ref().
1984 	 * As long as a vnode is not provided we need to acquire a
1985 	 * refcount for the provided mountpoint too, in order to
1986 	 * emulate a vfs_ref().
1987 	 */
1988 	MNT_ILOCK(mp);
1989 	if (vp == NULL)
1990 		MNT_REF(mp);
1991 	if ((mp->mnt_kern_flag & (MNTK_SUSPENDED | MNTK_SUSPEND2)) == 0) {
1992 		mp->mnt_secondary_writes++;
1993 		mp->mnt_secondary_accwrites++;
1994 		MNT_IUNLOCK(mp);
1995 		return (0);
1996 	}
1997 	if ((flags & V_NOWAIT) != 0) {
1998 		MNT_REL(mp);
1999 		MNT_IUNLOCK(mp);
2000 		*mpp = NULL;
2001 		return (EWOULDBLOCK);
2002 	}
2003 	/*
2004 	 * Wait for the suspension to finish.
2005 	 */
2006 	mflags = 0;
2007 	if ((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0) {
2008 		if ((flags & V_PCATCH) != 0)
2009 			mflags |= PCATCH;
2010 	}
2011 	mflags |= (PUSER - 1) | PDROP;
2012 	error = msleep(&mp->mnt_flag, MNT_MTX(mp), mflags, "suspfs", 0);
2013 	vfs_rel(mp);
2014 	if (error == 0)
2015 		goto retry;
2016 	*mpp = NULL;
2017 	return (error);
2018 }
2019 
2020 /*
2021  * Filesystem write operation has completed. If we are suspending and this
2022  * operation is the last one, notify the suspender that the suspension is
2023  * now in effect.
2024  */
2025 void
2026 vn_finished_write(struct mount *mp)
2027 {
2028 	struct mount_pcpu *mpcpu;
2029 	int c;
2030 
2031 	if (mp == NULL)
2032 		return;
2033 
2034 	if (vfs_op_thread_enter(mp, mpcpu)) {
2035 		vfs_mp_count_sub_pcpu(mpcpu, writeopcount, 1);
2036 		vfs_mp_count_sub_pcpu(mpcpu, ref, 1);
2037 		vfs_op_thread_exit(mp, mpcpu);
2038 		return;
2039 	}
2040 
2041 	MNT_ILOCK(mp);
2042 	vfs_assert_mount_counters(mp);
2043 	MNT_REL(mp);
2044 	c = --mp->mnt_writeopcount;
2045 	if (mp->mnt_vfs_ops == 0) {
2046 		MPASS((mp->mnt_kern_flag & MNTK_SUSPEND) == 0);
2047 		MNT_IUNLOCK(mp);
2048 		return;
2049 	}
2050 	if (c < 0)
2051 		vfs_dump_mount_counters(mp);
2052 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && c == 0)
2053 		wakeup(&mp->mnt_writeopcount);
2054 	MNT_IUNLOCK(mp);
2055 }
2056 
2057 /*
2058  * Filesystem secondary write operation has completed. If we are
2059  * suspending and this operation is the last one, notify the suspender
2060  * that the suspension is now in effect.
2061  */
2062 void
2063 vn_finished_secondary_write(struct mount *mp)
2064 {
2065 	if (mp == NULL)
2066 		return;
2067 	MNT_ILOCK(mp);
2068 	MNT_REL(mp);
2069 	mp->mnt_secondary_writes--;
2070 	if (mp->mnt_secondary_writes < 0)
2071 		panic("vn_finished_secondary_write: neg cnt");
2072 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
2073 	    mp->mnt_secondary_writes <= 0)
2074 		wakeup(&mp->mnt_secondary_writes);
2075 	MNT_IUNLOCK(mp);
2076 }
2077 
2078 /*
2079  * Request a filesystem to suspend write operations.
2080  */
2081 int
2082 vfs_write_suspend(struct mount *mp, int flags)
2083 {
2084 	int error;
2085 
2086 	vfs_op_enter(mp);
2087 
2088 	MNT_ILOCK(mp);
2089 	vfs_assert_mount_counters(mp);
2090 	if (mp->mnt_susp_owner == curthread) {
2091 		vfs_op_exit_locked(mp);
2092 		MNT_IUNLOCK(mp);
2093 		return (EALREADY);
2094 	}
2095 	while (mp->mnt_kern_flag & MNTK_SUSPEND)
2096 		msleep(&mp->mnt_flag, MNT_MTX(mp), PUSER - 1, "wsuspfs", 0);
2097 
2098 	/*
2099 	 * Unmount holds a write reference on the mount point.  If we
2100 	 * own busy reference and drain for writers, we deadlock with
2101 	 * the reference draining in the unmount path.  Callers of
2102 	 * vfs_write_suspend() must specify VS_SKIP_UNMOUNT if
2103 	 * vfs_busy() reference is owned and caller is not in the
2104 	 * unmount context.
2105 	 */
2106 	if ((flags & VS_SKIP_UNMOUNT) != 0 &&
2107 	    (mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) {
2108 		vfs_op_exit_locked(mp);
2109 		MNT_IUNLOCK(mp);
2110 		return (EBUSY);
2111 	}
2112 
2113 	mp->mnt_kern_flag |= MNTK_SUSPEND;
2114 	mp->mnt_susp_owner = curthread;
2115 	if (mp->mnt_writeopcount > 0)
2116 		(void) msleep(&mp->mnt_writeopcount,
2117 		    MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0);
2118 	else
2119 		MNT_IUNLOCK(mp);
2120 	if ((error = VFS_SYNC(mp, MNT_SUSPEND)) != 0) {
2121 		vfs_write_resume(mp, 0);
2122 		/* vfs_write_resume does vfs_op_exit() for us */
2123 	}
2124 	return (error);
2125 }
2126 
2127 /*
2128  * Request a filesystem to resume write operations.
2129  */
2130 void
2131 vfs_write_resume(struct mount *mp, int flags)
2132 {
2133 
2134 	MNT_ILOCK(mp);
2135 	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) {
2136 		KASSERT(mp->mnt_susp_owner == curthread, ("mnt_susp_owner"));
2137 		mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPEND2 |
2138 				       MNTK_SUSPENDED);
2139 		mp->mnt_susp_owner = NULL;
2140 		wakeup(&mp->mnt_writeopcount);
2141 		wakeup(&mp->mnt_flag);
2142 		curthread->td_pflags &= ~TDP_IGNSUSP;
2143 		if ((flags & VR_START_WRITE) != 0) {
2144 			MNT_REF(mp);
2145 			mp->mnt_writeopcount++;
2146 		}
2147 		MNT_IUNLOCK(mp);
2148 		if ((flags & VR_NO_SUSPCLR) == 0)
2149 			VFS_SUSP_CLEAN(mp);
2150 		vfs_op_exit(mp);
2151 	} else if ((flags & VR_START_WRITE) != 0) {
2152 		MNT_REF(mp);
2153 		vn_start_write_refed(mp, 0, true);
2154 	} else {
2155 		MNT_IUNLOCK(mp);
2156 	}
2157 }
2158 
2159 /*
2160  * Helper loop around vfs_write_suspend() for filesystem unmount VFS
2161  * methods.
2162  */
2163 int
2164 vfs_write_suspend_umnt(struct mount *mp)
2165 {
2166 	int error;
2167 
2168 	KASSERT((curthread->td_pflags & TDP_IGNSUSP) == 0,
2169 	    ("vfs_write_suspend_umnt: recursed"));
2170 
2171 	/* dounmount() already called vn_start_write(). */
2172 	for (;;) {
2173 		vn_finished_write(mp);
2174 		error = vfs_write_suspend(mp, 0);
2175 		if (error != 0) {
2176 			vn_start_write(NULL, &mp, V_WAIT);
2177 			return (error);
2178 		}
2179 		MNT_ILOCK(mp);
2180 		if ((mp->mnt_kern_flag & MNTK_SUSPENDED) != 0)
2181 			break;
2182 		MNT_IUNLOCK(mp);
2183 		vn_start_write(NULL, &mp, V_WAIT);
2184 	}
2185 	mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2);
2186 	wakeup(&mp->mnt_flag);
2187 	MNT_IUNLOCK(mp);
2188 	curthread->td_pflags |= TDP_IGNSUSP;
2189 	return (0);
2190 }
2191 
2192 /*
2193  * Implement kqueues for files by translating it to vnode operation.
2194  */
2195 static int
2196 vn_kqfilter(struct file *fp, struct knote *kn)
2197 {
2198 
2199 	return (VOP_KQFILTER(fp->f_vnode, kn));
2200 }
2201 
2202 int
2203 vn_kqfilter_opath(struct file *fp, struct knote *kn)
2204 {
2205 	if ((fp->f_flag & FKQALLOWED) == 0)
2206 		return (EBADF);
2207 	return (vn_kqfilter(fp, kn));
2208 }
2209 
2210 /*
2211  * Simplified in-kernel wrapper calls for extended attribute access.
2212  * Both calls pass in a NULL credential, authorizing as "kernel" access.
2213  * Set IO_NODELOCKED in ioflg if the vnode is already locked.
2214  */
2215 int
2216 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
2217     const char *attrname, int *buflen, char *buf, struct thread *td)
2218 {
2219 	struct uio	auio;
2220 	struct iovec	iov;
2221 	int	error;
2222 
2223 	iov.iov_len = *buflen;
2224 	iov.iov_base = buf;
2225 
2226 	auio.uio_iov = &iov;
2227 	auio.uio_iovcnt = 1;
2228 	auio.uio_rw = UIO_READ;
2229 	auio.uio_segflg = UIO_SYSSPACE;
2230 	auio.uio_td = td;
2231 	auio.uio_offset = 0;
2232 	auio.uio_resid = *buflen;
2233 
2234 	if ((ioflg & IO_NODELOCKED) == 0)
2235 		vn_lock(vp, LK_SHARED | LK_RETRY);
2236 
2237 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
2238 
2239 	/* authorize attribute retrieval as kernel */
2240 	error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL,
2241 	    td);
2242 
2243 	if ((ioflg & IO_NODELOCKED) == 0)
2244 		VOP_UNLOCK(vp);
2245 
2246 	if (error == 0) {
2247 		*buflen = *buflen - auio.uio_resid;
2248 	}
2249 
2250 	return (error);
2251 }
2252 
2253 /*
2254  * XXX failure mode if partially written?
2255  */
2256 int
2257 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
2258     const char *attrname, int buflen, char *buf, struct thread *td)
2259 {
2260 	struct uio	auio;
2261 	struct iovec	iov;
2262 	struct mount	*mp;
2263 	int	error;
2264 
2265 	iov.iov_len = buflen;
2266 	iov.iov_base = buf;
2267 
2268 	auio.uio_iov = &iov;
2269 	auio.uio_iovcnt = 1;
2270 	auio.uio_rw = UIO_WRITE;
2271 	auio.uio_segflg = UIO_SYSSPACE;
2272 	auio.uio_td = td;
2273 	auio.uio_offset = 0;
2274 	auio.uio_resid = buflen;
2275 
2276 	if ((ioflg & IO_NODELOCKED) == 0) {
2277 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
2278 			return (error);
2279 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2280 	}
2281 
2282 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
2283 
2284 	/* authorize attribute setting as kernel */
2285 	error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td);
2286 
2287 	if ((ioflg & IO_NODELOCKED) == 0) {
2288 		vn_finished_write(mp);
2289 		VOP_UNLOCK(vp);
2290 	}
2291 
2292 	return (error);
2293 }
2294 
2295 int
2296 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
2297     const char *attrname, struct thread *td)
2298 {
2299 	struct mount	*mp;
2300 	int	error;
2301 
2302 	if ((ioflg & IO_NODELOCKED) == 0) {
2303 		if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0)
2304 			return (error);
2305 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2306 	}
2307 
2308 	ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held");
2309 
2310 	/* authorize attribute removal as kernel */
2311 	error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td);
2312 	if (error == EOPNOTSUPP)
2313 		error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
2314 		    NULL, td);
2315 
2316 	if ((ioflg & IO_NODELOCKED) == 0) {
2317 		vn_finished_write(mp);
2318 		VOP_UNLOCK(vp);
2319 	}
2320 
2321 	return (error);
2322 }
2323 
2324 static int
2325 vn_get_ino_alloc_vget(struct mount *mp, void *arg, int lkflags,
2326     struct vnode **rvp)
2327 {
2328 
2329 	return (VFS_VGET(mp, *(ino_t *)arg, lkflags, rvp));
2330 }
2331 
2332 int
2333 vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags, struct vnode **rvp)
2334 {
2335 
2336 	return (vn_vget_ino_gen(vp, vn_get_ino_alloc_vget, &ino,
2337 	    lkflags, rvp));
2338 }
2339 
2340 int
2341 vn_vget_ino_gen(struct vnode *vp, vn_get_ino_t alloc, void *alloc_arg,
2342     int lkflags, struct vnode **rvp)
2343 {
2344 	struct mount *mp;
2345 	int ltype, error;
2346 
2347 	ASSERT_VOP_LOCKED(vp, "vn_vget_ino_get");
2348 	mp = vp->v_mount;
2349 	ltype = VOP_ISLOCKED(vp);
2350 	KASSERT(ltype == LK_EXCLUSIVE || ltype == LK_SHARED,
2351 	    ("vn_vget_ino: vp not locked"));
2352 	error = vfs_busy(mp, MBF_NOWAIT);
2353 	if (error != 0) {
2354 		vfs_ref(mp);
2355 		VOP_UNLOCK(vp);
2356 		error = vfs_busy(mp, 0);
2357 		vn_lock(vp, ltype | LK_RETRY);
2358 		vfs_rel(mp);
2359 		if (error != 0)
2360 			return (ENOENT);
2361 		if (VN_IS_DOOMED(vp)) {
2362 			vfs_unbusy(mp);
2363 			return (ENOENT);
2364 		}
2365 	}
2366 	VOP_UNLOCK(vp);
2367 	error = alloc(mp, alloc_arg, lkflags, rvp);
2368 	vfs_unbusy(mp);
2369 	if (error != 0 || *rvp != vp)
2370 		vn_lock(vp, ltype | LK_RETRY);
2371 	if (VN_IS_DOOMED(vp)) {
2372 		if (error == 0) {
2373 			if (*rvp == vp)
2374 				vunref(vp);
2375 			else
2376 				vput(*rvp);
2377 		}
2378 		error = ENOENT;
2379 	}
2380 	return (error);
2381 }
2382 
2383 static void
2384 vn_send_sigxfsz(struct proc *p)
2385 {
2386 	PROC_LOCK(p);
2387 	kern_psignal(p, SIGXFSZ);
2388 	PROC_UNLOCK(p);
2389 }
2390 
2391 int
2392 vn_rlimit_trunc(u_quad_t size, struct thread *td)
2393 {
2394 	if (size <= lim_cur(td, RLIMIT_FSIZE))
2395 		return (0);
2396 	vn_send_sigxfsz(td->td_proc);
2397 	return (EFBIG);
2398 }
2399 
2400 static int
2401 vn_rlimit_fsizex1(const struct vnode *vp, struct uio *uio, off_t maxfsz,
2402     bool adj, struct thread *td)
2403 {
2404 	off_t lim;
2405 	bool ktr_write;
2406 
2407 	if (vp->v_type != VREG)
2408 		return (0);
2409 
2410 	/*
2411 	 * Handle file system maximum file size.
2412 	 */
2413 	if (maxfsz != 0 && uio->uio_offset + uio->uio_resid > maxfsz) {
2414 		if (!adj || uio->uio_offset >= maxfsz)
2415 			return (EFBIG);
2416 		uio->uio_resid = maxfsz - uio->uio_offset;
2417 	}
2418 
2419 	/*
2420 	 * This is kernel write (e.g. vnode_pager) or accounting
2421 	 * write, ignore limit.
2422 	 */
2423 	if (td == NULL || (td->td_pflags2 & TDP2_ACCT) != 0)
2424 		return (0);
2425 
2426 	/*
2427 	 * Calculate file size limit.
2428 	 */
2429 	ktr_write = (td->td_pflags & TDP_INKTRACE) != 0;
2430 	lim = __predict_false(ktr_write) ? td->td_ktr_io_lim :
2431 	    lim_cur(td, RLIMIT_FSIZE);
2432 
2433 	/*
2434 	 * Is the limit reached?
2435 	 */
2436 	if (__predict_true((uoff_t)uio->uio_offset + uio->uio_resid <= lim))
2437 		return (0);
2438 
2439 	/*
2440 	 * Prepared filesystems can handle writes truncated to the
2441 	 * file size limit.
2442 	 */
2443 	if (adj && (uoff_t)uio->uio_offset < lim) {
2444 		uio->uio_resid = lim - (uoff_t)uio->uio_offset;
2445 		return (0);
2446 	}
2447 
2448 	if (!ktr_write || ktr_filesize_limit_signal)
2449 		vn_send_sigxfsz(td->td_proc);
2450 	return (EFBIG);
2451 }
2452 
2453 /*
2454  * Helper for VOP_WRITE() implementations, the common code to
2455  * handle maximum supported file size on the filesystem, and
2456  * RLIMIT_FSIZE, except for special writes from accounting subsystem
2457  * and ktrace.
2458  *
2459  * For maximum file size (maxfsz argument):
2460  * - return EFBIG if uio_offset is beyond it
2461  * - otherwise, clamp uio_resid if write would extend file beyond maxfsz.
2462  *
2463  * For RLIMIT_FSIZE:
2464  * - return EFBIG and send SIGXFSZ if uio_offset is beyond the limit
2465  * - otherwise, clamp uio_resid if write would extend file beyond limit.
2466  *
2467  * If clamping occured, the adjustment for uio_resid is stored in
2468  * *resid_adj, to be re-applied by vn_rlimit_fsizex_res() on return
2469  * from the VOP.
2470  */
2471 int
2472 vn_rlimit_fsizex(const struct vnode *vp, struct uio *uio, off_t maxfsz,
2473     ssize_t *resid_adj, struct thread *td)
2474 {
2475 	ssize_t resid_orig;
2476 	int error;
2477 	bool adj;
2478 
2479 	resid_orig = uio->uio_resid;
2480 	adj = resid_adj != NULL;
2481 	error = vn_rlimit_fsizex1(vp, uio, maxfsz, adj, td);
2482 	if (adj)
2483 		*resid_adj = resid_orig - uio->uio_resid;
2484 	return (error);
2485 }
2486 
2487 void
2488 vn_rlimit_fsizex_res(struct uio *uio, ssize_t resid_adj)
2489 {
2490 	uio->uio_resid += resid_adj;
2491 }
2492 
2493 int
2494 vn_rlimit_fsize(const struct vnode *vp, const struct uio *uio,
2495     struct thread *td)
2496 {
2497 	return (vn_rlimit_fsizex(vp, __DECONST(struct uio *, uio), 0, NULL,
2498 	    td));
2499 }
2500 
2501 int
2502 vn_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
2503     struct thread *td)
2504 {
2505 	struct vnode *vp;
2506 
2507 	vp = fp->f_vnode;
2508 #ifdef AUDIT
2509 	vn_lock(vp, LK_SHARED | LK_RETRY);
2510 	AUDIT_ARG_VNODE1(vp);
2511 	VOP_UNLOCK(vp);
2512 #endif
2513 	return (setfmode(td, active_cred, vp, mode));
2514 }
2515 
2516 int
2517 vn_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
2518     struct thread *td)
2519 {
2520 	struct vnode *vp;
2521 
2522 	vp = fp->f_vnode;
2523 #ifdef AUDIT
2524 	vn_lock(vp, LK_SHARED | LK_RETRY);
2525 	AUDIT_ARG_VNODE1(vp);
2526 	VOP_UNLOCK(vp);
2527 #endif
2528 	return (setfown(td, active_cred, vp, uid, gid));
2529 }
2530 
2531 /*
2532  * Remove pages in the range ["start", "end") from the vnode's VM object.  If
2533  * "end" is 0, then the range extends to the end of the object.
2534  */
2535 void
2536 vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end)
2537 {
2538 	vm_object_t object;
2539 
2540 	if ((object = vp->v_object) == NULL)
2541 		return;
2542 	VM_OBJECT_WLOCK(object);
2543 	vm_object_page_remove(object, start, end, 0);
2544 	VM_OBJECT_WUNLOCK(object);
2545 }
2546 
2547 /*
2548  * Like vn_pages_remove(), but skips invalid pages, which by definition are not
2549  * mapped into any process' address space.  Filesystems may use this in
2550  * preference to vn_pages_remove() to avoid blocking on pages busied in
2551  * preparation for a VOP_GETPAGES.
2552  */
2553 void
2554 vn_pages_remove_valid(struct vnode *vp, vm_pindex_t start, vm_pindex_t end)
2555 {
2556 	vm_object_t object;
2557 
2558 	if ((object = vp->v_object) == NULL)
2559 		return;
2560 	VM_OBJECT_WLOCK(object);
2561 	vm_object_page_remove(object, start, end, OBJPR_VALIDONLY);
2562 	VM_OBJECT_WUNLOCK(object);
2563 }
2564 
2565 int
2566 vn_bmap_seekhole_locked(struct vnode *vp, u_long cmd, off_t *off,
2567     struct ucred *cred)
2568 {
2569 	vm_object_t obj;
2570 	off_t size;
2571 	daddr_t bn, bnp;
2572 	uint64_t bsize;
2573 	off_t noff;
2574 	int error;
2575 
2576 	KASSERT(cmd == FIOSEEKHOLE || cmd == FIOSEEKDATA,
2577 	    ("%s: Wrong command %lu", __func__, cmd));
2578 	ASSERT_VOP_ELOCKED(vp, "vn_bmap_seekhole_locked");
2579 
2580 	if (vp->v_type != VREG) {
2581 		error = ENOTTY;
2582 		goto out;
2583 	}
2584 	error = vn_getsize_locked(vp, &size, cred);
2585 	if (error != 0)
2586 		goto out;
2587 	noff = *off;
2588 	if (noff < 0 || noff >= size) {
2589 		error = ENXIO;
2590 		goto out;
2591 	}
2592 
2593 	/* See the comment in ufs_bmap_seekdata(). */
2594 	obj = vp->v_object;
2595 	if (obj != NULL) {
2596 		VM_OBJECT_WLOCK(obj);
2597 		vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
2598 		VM_OBJECT_WUNLOCK(obj);
2599 	}
2600 
2601 	bsize = vp->v_mount->mnt_stat.f_iosize;
2602 	for (bn = noff / bsize; noff < size; bn++, noff += bsize -
2603 	    noff % bsize) {
2604 		error = VOP_BMAP(vp, bn, NULL, &bnp, NULL, NULL);
2605 		if (error == EOPNOTSUPP) {
2606 			error = ENOTTY;
2607 			goto out;
2608 		}
2609 		if ((bnp == -1 && cmd == FIOSEEKHOLE) ||
2610 		    (bnp != -1 && cmd == FIOSEEKDATA)) {
2611 			noff = bn * bsize;
2612 			if (noff < *off)
2613 				noff = *off;
2614 			goto out;
2615 		}
2616 	}
2617 	if (noff > size)
2618 		noff = size;
2619 	/* noff == size. There is an implicit hole at the end of file. */
2620 	if (cmd == FIOSEEKDATA)
2621 		error = ENXIO;
2622 out:
2623 	if (error == 0)
2624 		*off = noff;
2625 	return (error);
2626 }
2627 
2628 int
2629 vn_bmap_seekhole(struct vnode *vp, u_long cmd, off_t *off, struct ucred *cred)
2630 {
2631 	int error;
2632 
2633 	KASSERT(cmd == FIOSEEKHOLE || cmd == FIOSEEKDATA,
2634 	    ("%s: Wrong command %lu", __func__, cmd));
2635 
2636 	if (vn_lock(vp, LK_EXCLUSIVE) != 0)
2637 		return (EBADF);
2638 	error = vn_bmap_seekhole_locked(vp, cmd, off, cred);
2639 	VOP_UNLOCK(vp);
2640 	return (error);
2641 }
2642 
2643 int
2644 vn_seek(struct file *fp, off_t offset, int whence, struct thread *td)
2645 {
2646 	struct ucred *cred;
2647 	struct vnode *vp;
2648 	off_t foffset, fsize, size;
2649 	int error, noneg;
2650 
2651 	cred = td->td_ucred;
2652 	vp = fp->f_vnode;
2653 	foffset = foffset_lock(fp, 0);
2654 	noneg = (vp->v_type != VCHR);
2655 	error = 0;
2656 	switch (whence) {
2657 	case L_INCR:
2658 		if (noneg &&
2659 		    (foffset < 0 ||
2660 		    (offset > 0 && foffset > OFF_MAX - offset))) {
2661 			error = EOVERFLOW;
2662 			break;
2663 		}
2664 		offset += foffset;
2665 		break;
2666 	case L_XTND:
2667 		error = vn_getsize(vp, &fsize, cred);
2668 		if (error != 0)
2669 			break;
2670 
2671 		/*
2672 		 * If the file references a disk device, then fetch
2673 		 * the media size and use that to determine the ending
2674 		 * offset.
2675 		 */
2676 		if (fsize == 0 && vp->v_type == VCHR &&
2677 		    fo_ioctl(fp, DIOCGMEDIASIZE, &size, cred, td) == 0)
2678 			fsize = size;
2679 		if (noneg && offset > 0 && fsize > OFF_MAX - offset) {
2680 			error = EOVERFLOW;
2681 			break;
2682 		}
2683 		offset += fsize;
2684 		break;
2685 	case L_SET:
2686 		break;
2687 	case SEEK_DATA:
2688 		error = fo_ioctl(fp, FIOSEEKDATA, &offset, cred, td);
2689 		if (error == ENOTTY)
2690 			error = EINVAL;
2691 		break;
2692 	case SEEK_HOLE:
2693 		error = fo_ioctl(fp, FIOSEEKHOLE, &offset, cred, td);
2694 		if (error == ENOTTY)
2695 			error = EINVAL;
2696 		break;
2697 	default:
2698 		error = EINVAL;
2699 	}
2700 	if (error == 0 && noneg && offset < 0)
2701 		error = EINVAL;
2702 	if (error != 0)
2703 		goto drop;
2704 	VFS_KNOTE_UNLOCKED(vp, 0);
2705 	td->td_uretoff.tdu_off = offset;
2706 drop:
2707 	foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
2708 	return (error);
2709 }
2710 
2711 int
2712 vn_utimes_perm(struct vnode *vp, struct vattr *vap, struct ucred *cred,
2713     struct thread *td)
2714 {
2715 	int error;
2716 
2717 	/*
2718 	 * Grant permission if the caller is the owner of the file, or
2719 	 * the super-user, or has ACL_WRITE_ATTRIBUTES permission on
2720 	 * on the file.  If the time pointer is null, then write
2721 	 * permission on the file is also sufficient.
2722 	 *
2723 	 * From NFSv4.1, draft 21, 6.2.1.3.1, Discussion of Mask Attributes:
2724 	 * A user having ACL_WRITE_DATA or ACL_WRITE_ATTRIBUTES
2725 	 * will be allowed to set the times [..] to the current
2726 	 * server time.
2727 	 */
2728 	error = VOP_ACCESSX(vp, VWRITE_ATTRIBUTES, cred, td);
2729 	if (error != 0 && (vap->va_vaflags & VA_UTIMES_NULL) != 0)
2730 		error = VOP_ACCESS(vp, VWRITE, cred, td);
2731 	return (error);
2732 }
2733 
2734 int
2735 vn_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
2736 {
2737 	struct vnode *vp;
2738 	int error;
2739 
2740 	if (fp->f_type == DTYPE_FIFO)
2741 		kif->kf_type = KF_TYPE_FIFO;
2742 	else
2743 		kif->kf_type = KF_TYPE_VNODE;
2744 	vp = fp->f_vnode;
2745 	vref(vp);
2746 	FILEDESC_SUNLOCK(fdp);
2747 	error = vn_fill_kinfo_vnode(vp, kif);
2748 	vrele(vp);
2749 	FILEDESC_SLOCK(fdp);
2750 	return (error);
2751 }
2752 
2753 static inline void
2754 vn_fill_junk(struct kinfo_file *kif)
2755 {
2756 	size_t len, olen;
2757 
2758 	/*
2759 	 * Simulate vn_fullpath returning changing values for a given
2760 	 * vp during e.g. coredump.
2761 	 */
2762 	len = (arc4random() % (sizeof(kif->kf_path) - 2)) + 1;
2763 	olen = strlen(kif->kf_path);
2764 	if (len < olen)
2765 		strcpy(&kif->kf_path[len - 1], "$");
2766 	else
2767 		for (; olen < len; olen++)
2768 			strcpy(&kif->kf_path[olen], "A");
2769 }
2770 
2771 int
2772 vn_fill_kinfo_vnode(struct vnode *vp, struct kinfo_file *kif)
2773 {
2774 	struct vattr va;
2775 	char *fullpath, *freepath;
2776 	int error;
2777 
2778 	kif->kf_un.kf_file.kf_file_type = vntype_to_kinfo(vp->v_type);
2779 	freepath = NULL;
2780 	fullpath = "-";
2781 	error = vn_fullpath(vp, &fullpath, &freepath);
2782 	if (error == 0) {
2783 		strlcpy(kif->kf_path, fullpath, sizeof(kif->kf_path));
2784 	}
2785 	if (freepath != NULL)
2786 		free(freepath, M_TEMP);
2787 
2788 	KFAIL_POINT_CODE(DEBUG_FP, fill_kinfo_vnode__random_path,
2789 		vn_fill_junk(kif);
2790 	);
2791 
2792 	/*
2793 	 * Retrieve vnode attributes.
2794 	 */
2795 	va.va_fsid = VNOVAL;
2796 	va.va_rdev = NODEV;
2797 	vn_lock(vp, LK_SHARED | LK_RETRY);
2798 	error = VOP_GETATTR(vp, &va, curthread->td_ucred);
2799 	VOP_UNLOCK(vp);
2800 	if (error != 0)
2801 		return (error);
2802 	if (va.va_fsid != VNOVAL)
2803 		kif->kf_un.kf_file.kf_file_fsid = va.va_fsid;
2804 	else
2805 		kif->kf_un.kf_file.kf_file_fsid =
2806 		    vp->v_mount->mnt_stat.f_fsid.val[0];
2807 	kif->kf_un.kf_file.kf_file_fsid_freebsd11 =
2808 	    kif->kf_un.kf_file.kf_file_fsid; /* truncate */
2809 	kif->kf_un.kf_file.kf_file_fileid = va.va_fileid;
2810 	kif->kf_un.kf_file.kf_file_mode = MAKEIMODE(va.va_type, va.va_mode);
2811 	kif->kf_un.kf_file.kf_file_size = va.va_size;
2812 	kif->kf_un.kf_file.kf_file_rdev = va.va_rdev;
2813 	kif->kf_un.kf_file.kf_file_rdev_freebsd11 =
2814 	    kif->kf_un.kf_file.kf_file_rdev; /* truncate */
2815 	kif->kf_un.kf_file.kf_file_nlink = va.va_nlink;
2816 	return (0);
2817 }
2818 
2819 int
2820 vn_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size,
2821     vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff,
2822     struct thread *td)
2823 {
2824 #ifdef HWPMC_HOOKS
2825 	struct pmckern_map_in pkm;
2826 #endif
2827 	struct mount *mp;
2828 	struct vnode *vp;
2829 	vm_object_t object;
2830 	vm_prot_t maxprot;
2831 	boolean_t writecounted;
2832 	int error;
2833 
2834 #if defined(COMPAT_FREEBSD7) || defined(COMPAT_FREEBSD6) || \
2835     defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4)
2836 	/*
2837 	 * POSIX shared-memory objects are defined to have
2838 	 * kernel persistence, and are not defined to support
2839 	 * read(2)/write(2) -- or even open(2).  Thus, we can
2840 	 * use MAP_ASYNC to trade on-disk coherence for speed.
2841 	 * The shm_open(3) library routine turns on the FPOSIXSHM
2842 	 * flag to request this behavior.
2843 	 */
2844 	if ((fp->f_flag & FPOSIXSHM) != 0)
2845 		flags |= MAP_NOSYNC;
2846 #endif
2847 	vp = fp->f_vnode;
2848 
2849 	/*
2850 	 * Ensure that file and memory protections are
2851 	 * compatible.  Note that we only worry about
2852 	 * writability if mapping is shared; in this case,
2853 	 * current and max prot are dictated by the open file.
2854 	 * XXX use the vnode instead?  Problem is: what
2855 	 * credentials do we use for determination? What if
2856 	 * proc does a setuid?
2857 	 */
2858 	mp = vp->v_mount;
2859 	if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) {
2860 		maxprot = VM_PROT_NONE;
2861 		if ((prot & VM_PROT_EXECUTE) != 0)
2862 			return (EACCES);
2863 	} else
2864 		maxprot = VM_PROT_EXECUTE;
2865 	if ((fp->f_flag & FREAD) != 0)
2866 		maxprot |= VM_PROT_READ;
2867 	else if ((prot & VM_PROT_READ) != 0)
2868 		return (EACCES);
2869 
2870 	/*
2871 	 * If we are sharing potential changes via MAP_SHARED and we
2872 	 * are trying to get write permission although we opened it
2873 	 * without asking for it, bail out.
2874 	 */
2875 	if ((flags & MAP_SHARED) != 0) {
2876 		if ((fp->f_flag & FWRITE) != 0)
2877 			maxprot |= VM_PROT_WRITE;
2878 		else if ((prot & VM_PROT_WRITE) != 0)
2879 			return (EACCES);
2880 	} else {
2881 		maxprot |= VM_PROT_WRITE;
2882 		cap_maxprot |= VM_PROT_WRITE;
2883 	}
2884 	maxprot &= cap_maxprot;
2885 
2886 	/*
2887 	 * For regular files and shared memory, POSIX requires that
2888 	 * the value of foff be a legitimate offset within the data
2889 	 * object.  In particular, negative offsets are invalid.
2890 	 * Blocking negative offsets and overflows here avoids
2891 	 * possible wraparound or user-level access into reserved
2892 	 * ranges of the data object later.  In contrast, POSIX does
2893 	 * not dictate how offsets are used by device drivers, so in
2894 	 * the case of a device mapping a negative offset is passed
2895 	 * on.
2896 	 */
2897 	if (
2898 #ifdef _LP64
2899 	    size > OFF_MAX ||
2900 #endif
2901 	    foff > OFF_MAX - size)
2902 		return (EINVAL);
2903 
2904 	writecounted = FALSE;
2905 	error = vm_mmap_vnode(td, size, prot, &maxprot, &flags, vp,
2906 	    &foff, &object, &writecounted);
2907 	if (error != 0)
2908 		return (error);
2909 	error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object,
2910 	    foff, writecounted, td);
2911 	if (error != 0) {
2912 		/*
2913 		 * If this mapping was accounted for in the vnode's
2914 		 * writecount, then undo that now.
2915 		 */
2916 		if (writecounted)
2917 			vm_pager_release_writecount(object, 0, size);
2918 		vm_object_deallocate(object);
2919 	}
2920 #ifdef HWPMC_HOOKS
2921 	/* Inform hwpmc(4) if an executable is being mapped. */
2922 	if (PMC_HOOK_INSTALLED(PMC_FN_MMAP)) {
2923 		if ((prot & VM_PROT_EXECUTE) != 0 && error == 0) {
2924 			pkm.pm_file = vp;
2925 			pkm.pm_address = (uintptr_t) *addr;
2926 			PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_MMAP, (void *) &pkm);
2927 		}
2928 	}
2929 #endif
2930 	return (error);
2931 }
2932 
2933 void
2934 vn_fsid(struct vnode *vp, struct vattr *va)
2935 {
2936 	fsid_t *f;
2937 
2938 	f = &vp->v_mount->mnt_stat.f_fsid;
2939 	va->va_fsid = (uint32_t)f->val[1];
2940 	va->va_fsid <<= sizeof(f->val[1]) * NBBY;
2941 	va->va_fsid += (uint32_t)f->val[0];
2942 }
2943 
2944 int
2945 vn_fsync_buf(struct vnode *vp, int waitfor)
2946 {
2947 	struct buf *bp, *nbp;
2948 	struct bufobj *bo;
2949 	struct mount *mp;
2950 	int error, maxretry;
2951 
2952 	error = 0;
2953 	maxretry = 10000;     /* large, arbitrarily chosen */
2954 	mp = NULL;
2955 	if (vp->v_type == VCHR) {
2956 		VI_LOCK(vp);
2957 		mp = vp->v_rdev->si_mountpt;
2958 		VI_UNLOCK(vp);
2959 	}
2960 	bo = &vp->v_bufobj;
2961 	BO_LOCK(bo);
2962 loop1:
2963 	/*
2964 	 * MARK/SCAN initialization to avoid infinite loops.
2965 	 */
2966         TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
2967 		bp->b_vflags &= ~BV_SCANNED;
2968 		bp->b_error = 0;
2969 	}
2970 
2971 	/*
2972 	 * Flush all dirty buffers associated with a vnode.
2973 	 */
2974 loop2:
2975 	TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
2976 		if ((bp->b_vflags & BV_SCANNED) != 0)
2977 			continue;
2978 		bp->b_vflags |= BV_SCANNED;
2979 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
2980 			if (waitfor != MNT_WAIT)
2981 				continue;
2982 			if (BUF_LOCK(bp,
2983 			    LK_EXCLUSIVE | LK_INTERLOCK | LK_SLEEPFAIL,
2984 			    BO_LOCKPTR(bo)) != 0) {
2985 				BO_LOCK(bo);
2986 				goto loop1;
2987 			}
2988 			BO_LOCK(bo);
2989 		}
2990 		BO_UNLOCK(bo);
2991 		KASSERT(bp->b_bufobj == bo,
2992 		    ("bp %p wrong b_bufobj %p should be %p",
2993 		    bp, bp->b_bufobj, bo));
2994 		if ((bp->b_flags & B_DELWRI) == 0)
2995 			panic("fsync: not dirty");
2996 		if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) {
2997 			vfs_bio_awrite(bp);
2998 		} else {
2999 			bremfree(bp);
3000 			bawrite(bp);
3001 		}
3002 		if (maxretry < 1000)
3003 			pause("dirty", hz < 1000 ? 1 : hz / 1000);
3004 		BO_LOCK(bo);
3005 		goto loop2;
3006 	}
3007 
3008 	/*
3009 	 * If synchronous the caller expects us to completely resolve all
3010 	 * dirty buffers in the system.  Wait for in-progress I/O to
3011 	 * complete (which could include background bitmap writes), then
3012 	 * retry if dirty blocks still exist.
3013 	 */
3014 	if (waitfor == MNT_WAIT) {
3015 		bufobj_wwait(bo, 0, 0);
3016 		if (bo->bo_dirty.bv_cnt > 0) {
3017 			/*
3018 			 * If we are unable to write any of these buffers
3019 			 * then we fail now rather than trying endlessly
3020 			 * to write them out.
3021 			 */
3022 			TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
3023 				if ((error = bp->b_error) != 0)
3024 					break;
3025 			if ((mp != NULL && mp->mnt_secondary_writes > 0) ||
3026 			    (error == 0 && --maxretry >= 0))
3027 				goto loop1;
3028 			if (error == 0)
3029 				error = EAGAIN;
3030 		}
3031 	}
3032 	BO_UNLOCK(bo);
3033 	if (error != 0)
3034 		vn_printf(vp, "fsync: giving up on dirty (error = %d) ", error);
3035 
3036 	return (error);
3037 }
3038 
3039 /*
3040  * Copies a byte range from invp to outvp.  Calls VOP_COPY_FILE_RANGE()
3041  * or vn_generic_copy_file_range() after rangelocking the byte ranges,
3042  * to do the actual copy.
3043  * vn_generic_copy_file_range() is factored out, so it can be called
3044  * from a VOP_COPY_FILE_RANGE() call as well, but handles vnodes from
3045  * different file systems.
3046  */
3047 int
3048 vn_copy_file_range(struct vnode *invp, off_t *inoffp, struct vnode *outvp,
3049     off_t *outoffp, size_t *lenp, unsigned int flags, struct ucred *incred,
3050     struct ucred *outcred, struct thread *fsize_td)
3051 {
3052 	int error;
3053 	size_t len;
3054 	uint64_t uval;
3055 
3056 	len = *lenp;
3057 	*lenp = 0;		/* For error returns. */
3058 	error = 0;
3059 
3060 	/* Do some sanity checks on the arguments. */
3061 	if (invp->v_type == VDIR || outvp->v_type == VDIR)
3062 		error = EISDIR;
3063 	else if (*inoffp < 0 || *outoffp < 0 ||
3064 	    invp->v_type != VREG || outvp->v_type != VREG)
3065 		error = EINVAL;
3066 	if (error != 0)
3067 		goto out;
3068 
3069 	/* Ensure offset + len does not wrap around. */
3070 	uval = *inoffp;
3071 	uval += len;
3072 	if (uval > INT64_MAX)
3073 		len = INT64_MAX - *inoffp;
3074 	uval = *outoffp;
3075 	uval += len;
3076 	if (uval > INT64_MAX)
3077 		len = INT64_MAX - *outoffp;
3078 	if (len == 0)
3079 		goto out;
3080 
3081 	/*
3082 	 * If the two vnode are for the same file system, call
3083 	 * VOP_COPY_FILE_RANGE(), otherwise call vn_generic_copy_file_range()
3084 	 * which can handle copies across multiple file systems.
3085 	 */
3086 	*lenp = len;
3087 	if (invp->v_mount == outvp->v_mount)
3088 		error = VOP_COPY_FILE_RANGE(invp, inoffp, outvp, outoffp,
3089 		    lenp, flags, incred, outcred, fsize_td);
3090 	else
3091 		error = vn_generic_copy_file_range(invp, inoffp, outvp,
3092 		    outoffp, lenp, flags, incred, outcred, fsize_td);
3093 out:
3094 	return (error);
3095 }
3096 
3097 /*
3098  * Test len bytes of data starting at dat for all bytes == 0.
3099  * Return true if all bytes are zero, false otherwise.
3100  * Expects dat to be well aligned.
3101  */
3102 static bool
3103 mem_iszero(void *dat, int len)
3104 {
3105 	int i;
3106 	const u_int *p;
3107 	const char *cp;
3108 
3109 	for (p = dat; len > 0; len -= sizeof(*p), p++) {
3110 		if (len >= sizeof(*p)) {
3111 			if (*p != 0)
3112 				return (false);
3113 		} else {
3114 			cp = (const char *)p;
3115 			for (i = 0; i < len; i++, cp++)
3116 				if (*cp != '\0')
3117 					return (false);
3118 		}
3119 	}
3120 	return (true);
3121 }
3122 
3123 /*
3124  * Look for a hole in the output file and, if found, adjust *outoffp
3125  * and *xferp to skip past the hole.
3126  * *xferp is the entire hole length to be written and xfer2 is how many bytes
3127  * to be written as 0's upon return.
3128  */
3129 static off_t
3130 vn_skip_hole(struct vnode *outvp, off_t xfer2, off_t *outoffp, off_t *xferp,
3131     off_t *dataoffp, off_t *holeoffp, struct ucred *cred)
3132 {
3133 	int error;
3134 	off_t delta;
3135 
3136 	if (*holeoffp == 0 || *holeoffp <= *outoffp) {
3137 		*dataoffp = *outoffp;
3138 		error = VOP_IOCTL(outvp, FIOSEEKDATA, dataoffp, 0, cred,
3139 		    curthread);
3140 		if (error == 0) {
3141 			*holeoffp = *dataoffp;
3142 			error = VOP_IOCTL(outvp, FIOSEEKHOLE, holeoffp, 0, cred,
3143 			    curthread);
3144 		}
3145 		if (error != 0 || *holeoffp == *dataoffp) {
3146 			/*
3147 			 * Since outvp is unlocked, it may be possible for
3148 			 * another thread to do a truncate(), lseek(), write()
3149 			 * creating a hole at startoff between the above
3150 			 * VOP_IOCTL() calls, if the other thread does not do
3151 			 * rangelocking.
3152 			 * If that happens, *holeoffp == *dataoffp and finding
3153 			 * the hole has failed, so disable vn_skip_hole().
3154 			 */
3155 			*holeoffp = -1;	/* Disable use of vn_skip_hole(). */
3156 			return (xfer2);
3157 		}
3158 		KASSERT(*dataoffp >= *outoffp,
3159 		    ("vn_skip_hole: dataoff=%jd < outoff=%jd",
3160 		    (intmax_t)*dataoffp, (intmax_t)*outoffp));
3161 		KASSERT(*holeoffp > *dataoffp,
3162 		    ("vn_skip_hole: holeoff=%jd <= dataoff=%jd",
3163 		    (intmax_t)*holeoffp, (intmax_t)*dataoffp));
3164 	}
3165 
3166 	/*
3167 	 * If there is a hole before the data starts, advance *outoffp and
3168 	 * *xferp past the hole.
3169 	 */
3170 	if (*dataoffp > *outoffp) {
3171 		delta = *dataoffp - *outoffp;
3172 		if (delta >= *xferp) {
3173 			/* Entire *xferp is a hole. */
3174 			*outoffp += *xferp;
3175 			*xferp = 0;
3176 			return (0);
3177 		}
3178 		*xferp -= delta;
3179 		*outoffp += delta;
3180 		xfer2 = MIN(xfer2, *xferp);
3181 	}
3182 
3183 	/*
3184 	 * If a hole starts before the end of this xfer2, reduce this xfer2 so
3185 	 * that the write ends at the start of the hole.
3186 	 * *holeoffp should always be greater than *outoffp, but for the
3187 	 * non-INVARIANTS case, check this to make sure xfer2 remains a sane
3188 	 * value.
3189 	 */
3190 	if (*holeoffp > *outoffp && *holeoffp < *outoffp + xfer2)
3191 		xfer2 = *holeoffp - *outoffp;
3192 	return (xfer2);
3193 }
3194 
3195 /*
3196  * Write an xfer sized chunk to outvp in blksize blocks from dat.
3197  * dat is a maximum of blksize in length and can be written repeatedly in
3198  * the chunk.
3199  * If growfile == true, just grow the file via vn_truncate_locked() instead
3200  * of doing actual writes.
3201  * If checkhole == true, a hole is being punched, so skip over any hole
3202  * already in the output file.
3203  */
3204 static int
3205 vn_write_outvp(struct vnode *outvp, char *dat, off_t outoff, off_t xfer,
3206     u_long blksize, bool growfile, bool checkhole, struct ucred *cred)
3207 {
3208 	struct mount *mp;
3209 	off_t dataoff, holeoff, xfer2;
3210 	int error;
3211 
3212 	/*
3213 	 * Loop around doing writes of blksize until write has been completed.
3214 	 * Lock/unlock on each loop iteration so that a bwillwrite() can be
3215 	 * done for each iteration, since the xfer argument can be very
3216 	 * large if there is a large hole to punch in the output file.
3217 	 */
3218 	error = 0;
3219 	holeoff = 0;
3220 	do {
3221 		xfer2 = MIN(xfer, blksize);
3222 		if (checkhole) {
3223 			/*
3224 			 * Punching a hole.  Skip writing if there is
3225 			 * already a hole in the output file.
3226 			 */
3227 			xfer2 = vn_skip_hole(outvp, xfer2, &outoff, &xfer,
3228 			    &dataoff, &holeoff, cred);
3229 			if (xfer == 0)
3230 				break;
3231 			if (holeoff < 0)
3232 				checkhole = false;
3233 			KASSERT(xfer2 > 0, ("vn_write_outvp: xfer2=%jd",
3234 			    (intmax_t)xfer2));
3235 		}
3236 		bwillwrite();
3237 		mp = NULL;
3238 		error = vn_start_write(outvp, &mp, V_WAIT);
3239 		if (error != 0)
3240 			break;
3241 		if (growfile) {
3242 			error = vn_lock(outvp, LK_EXCLUSIVE);
3243 			if (error == 0) {
3244 				error = vn_truncate_locked(outvp, outoff + xfer,
3245 				    false, cred);
3246 				VOP_UNLOCK(outvp);
3247 			}
3248 		} else {
3249 			error = vn_lock(outvp, vn_lktype_write(mp, outvp));
3250 			if (error == 0) {
3251 				error = vn_rdwr(UIO_WRITE, outvp, dat, xfer2,
3252 				    outoff, UIO_SYSSPACE, IO_NODELOCKED,
3253 				    curthread->td_ucred, cred, NULL, curthread);
3254 				outoff += xfer2;
3255 				xfer -= xfer2;
3256 				VOP_UNLOCK(outvp);
3257 			}
3258 		}
3259 		if (mp != NULL)
3260 			vn_finished_write(mp);
3261 	} while (!growfile && xfer > 0 && error == 0);
3262 	return (error);
3263 }
3264 
3265 /*
3266  * Copy a byte range of one file to another.  This function can handle the
3267  * case where invp and outvp are on different file systems.
3268  * It can also be called by a VOP_COPY_FILE_RANGE() to do the work, if there
3269  * is no better file system specific way to do it.
3270  */
3271 int
3272 vn_generic_copy_file_range(struct vnode *invp, off_t *inoffp,
3273     struct vnode *outvp, off_t *outoffp, size_t *lenp, unsigned int flags,
3274     struct ucred *incred, struct ucred *outcred, struct thread *fsize_td)
3275 {
3276 	struct mount *mp;
3277 	off_t startoff, endoff, xfer, xfer2;
3278 	u_long blksize;
3279 	int error, interrupted;
3280 	bool cantseek, readzeros, eof, lastblock, holetoeof;
3281 	ssize_t aresid, r = 0;
3282 	size_t copylen, len, savlen;
3283 	off_t insize, outsize;
3284 	char *dat;
3285 	long holein, holeout;
3286 	struct timespec curts, endts;
3287 
3288 	holein = holeout = 0;
3289 	savlen = len = *lenp;
3290 	error = 0;
3291 	interrupted = 0;
3292 	dat = NULL;
3293 
3294 	error = vn_lock(invp, LK_SHARED);
3295 	if (error != 0)
3296 		goto out;
3297 	if (VOP_PATHCONF(invp, _PC_MIN_HOLE_SIZE, &holein) != 0)
3298 		holein = 0;
3299 	if (holein > 0)
3300 		error = vn_getsize_locked(invp, &insize, incred);
3301 	VOP_UNLOCK(invp);
3302 	if (error != 0)
3303 		goto out;
3304 
3305 	mp = NULL;
3306 	error = vn_start_write(outvp, &mp, V_WAIT);
3307 	if (error == 0)
3308 		error = vn_lock(outvp, LK_EXCLUSIVE);
3309 	if (error == 0) {
3310 		/*
3311 		 * If fsize_td != NULL, do a vn_rlimit_fsizex() call,
3312 		 * now that outvp is locked.
3313 		 */
3314 		if (fsize_td != NULL) {
3315 			struct uio io;
3316 
3317 			io.uio_offset = *outoffp;
3318 			io.uio_resid = len;
3319 			error = vn_rlimit_fsizex(outvp, &io, 0, &r, fsize_td);
3320 			len = savlen = io.uio_resid;
3321 			/*
3322 			 * No need to call vn_rlimit_fsizex_res before return,
3323 			 * since the uio is local.
3324 			 */
3325 		}
3326 		if (VOP_PATHCONF(outvp, _PC_MIN_HOLE_SIZE, &holeout) != 0)
3327 			holeout = 0;
3328 		/*
3329 		 * Holes that are past EOF do not need to be written as a block
3330 		 * of zero bytes.  So, truncate the output file as far as
3331 		 * possible and then use size to decide if writing 0
3332 		 * bytes is necessary in the loop below.
3333 		 */
3334 		if (error == 0)
3335 			error = vn_getsize_locked(outvp, &outsize, outcred);
3336 		if (error == 0 && outsize > *outoffp && outsize <= *outoffp + len) {
3337 #ifdef MAC
3338 			error = mac_vnode_check_write(curthread->td_ucred,
3339 			    outcred, outvp);
3340 			if (error == 0)
3341 #endif
3342 				error = vn_truncate_locked(outvp, *outoffp,
3343 				    false, outcred);
3344 			if (error == 0)
3345 				outsize = *outoffp;
3346 		}
3347 		VOP_UNLOCK(outvp);
3348 	}
3349 	if (mp != NULL)
3350 		vn_finished_write(mp);
3351 	if (error != 0)
3352 		goto out;
3353 
3354 	if (holein == 0 && holeout > 0) {
3355 		/*
3356 		 * For this special case, the input data will be scanned
3357 		 * for blocks of all 0 bytes.  For these blocks, the
3358 		 * write can be skipped for the output file to create
3359 		 * an unallocated region.
3360 		 * Therefore, use the appropriate size for the output file.
3361 		 */
3362 		blksize = holeout;
3363 		if (blksize <= 512) {
3364 			/*
3365 			 * Use f_iosize, since ZFS reports a _PC_MIN_HOLE_SIZE
3366 			 * of 512, although it actually only creates
3367 			 * unallocated regions for blocks >= f_iosize.
3368 			 */
3369 			blksize = outvp->v_mount->mnt_stat.f_iosize;
3370 		}
3371 	} else {
3372 		/*
3373 		 * Use the larger of the two f_iosize values.  If they are
3374 		 * not the same size, one will normally be an exact multiple of
3375 		 * the other, since they are both likely to be a power of 2.
3376 		 */
3377 		blksize = MAX(invp->v_mount->mnt_stat.f_iosize,
3378 		    outvp->v_mount->mnt_stat.f_iosize);
3379 	}
3380 
3381 	/* Clip to sane limits. */
3382 	if (blksize < 4096)
3383 		blksize = 4096;
3384 	else if (blksize > maxphys)
3385 		blksize = maxphys;
3386 	dat = malloc(blksize, M_TEMP, M_WAITOK);
3387 
3388 	/*
3389 	 * If VOP_IOCTL(FIOSEEKHOLE) works for invp, use it and FIOSEEKDATA
3390 	 * to find holes.  Otherwise, just scan the read block for all 0s
3391 	 * in the inner loop where the data copying is done.
3392 	 * Note that some file systems such as NFSv3, NFSv4.0 and NFSv4.1 may
3393 	 * support holes on the server, but do not support FIOSEEKHOLE.
3394 	 * The kernel flag COPY_FILE_RANGE_TIMEO1SEC is used to indicate
3395 	 * that this function should return after 1second with a partial
3396 	 * completion.
3397 	 */
3398 	if ((flags & COPY_FILE_RANGE_TIMEO1SEC) != 0) {
3399 		getnanouptime(&endts);
3400 		endts.tv_sec++;
3401 	} else
3402 		timespecclear(&endts);
3403 	holetoeof = eof = false;
3404 	while (len > 0 && error == 0 && !eof && interrupted == 0) {
3405 		endoff = 0;			/* To shut up compilers. */
3406 		cantseek = true;
3407 		startoff = *inoffp;
3408 		copylen = len;
3409 
3410 		/*
3411 		 * Find the next data area.  If there is just a hole to EOF,
3412 		 * FIOSEEKDATA should fail with ENXIO.
3413 		 * (I do not know if any file system will report a hole to
3414 		 *  EOF via FIOSEEKHOLE, but I am pretty sure FIOSEEKDATA
3415 		 *  will fail for those file systems.)
3416 		 *
3417 		 * For input files that don't support FIOSEEKDATA/FIOSEEKHOLE,
3418 		 * the code just falls through to the inner copy loop.
3419 		 */
3420 		error = EINVAL;
3421 		if (holein > 0) {
3422 			error = VOP_IOCTL(invp, FIOSEEKDATA, &startoff, 0,
3423 			    incred, curthread);
3424 			if (error == ENXIO) {
3425 				startoff = endoff = insize;
3426 				eof = holetoeof = true;
3427 				error = 0;
3428 			}
3429 		}
3430 		if (error == 0 && !holetoeof) {
3431 			endoff = startoff;
3432 			error = VOP_IOCTL(invp, FIOSEEKHOLE, &endoff, 0,
3433 			    incred, curthread);
3434 			/*
3435 			 * Since invp is unlocked, it may be possible for
3436 			 * another thread to do a truncate(), lseek(), write()
3437 			 * creating a hole at startoff between the above
3438 			 * VOP_IOCTL() calls, if the other thread does not do
3439 			 * rangelocking.
3440 			 * If that happens, startoff == endoff and finding
3441 			 * the hole has failed, so set an error.
3442 			 */
3443 			if (error == 0 && startoff == endoff)
3444 				error = EINVAL; /* Any error. Reset to 0. */
3445 		}
3446 		if (error == 0) {
3447 			if (startoff > *inoffp) {
3448 				/* Found hole before data block. */
3449 				xfer = MIN(startoff - *inoffp, len);
3450 				if (*outoffp < outsize) {
3451 					/* Must write 0s to punch hole. */
3452 					xfer2 = MIN(outsize - *outoffp,
3453 					    xfer);
3454 					memset(dat, 0, MIN(xfer2, blksize));
3455 					error = vn_write_outvp(outvp, dat,
3456 					    *outoffp, xfer2, blksize, false,
3457 					    holeout > 0, outcred);
3458 				}
3459 
3460 				if (error == 0 && *outoffp + xfer >
3461 				    outsize && (xfer == len || holetoeof)) {
3462 					/* Grow output file (hole at end). */
3463 					error = vn_write_outvp(outvp, dat,
3464 					    *outoffp, xfer, blksize, true,
3465 					    false, outcred);
3466 				}
3467 				if (error == 0) {
3468 					*inoffp += xfer;
3469 					*outoffp += xfer;
3470 					len -= xfer;
3471 					if (len < savlen) {
3472 						interrupted = sig_intr();
3473 						if (timespecisset(&endts) &&
3474 						    interrupted == 0) {
3475 							getnanouptime(&curts);
3476 							if (timespeccmp(&curts,
3477 							    &endts, >=))
3478 								interrupted =
3479 								    EINTR;
3480 						}
3481 					}
3482 				}
3483 			}
3484 			copylen = MIN(len, endoff - startoff);
3485 			cantseek = false;
3486 		} else {
3487 			cantseek = true;
3488 			startoff = *inoffp;
3489 			copylen = len;
3490 			error = 0;
3491 		}
3492 
3493 		xfer = blksize;
3494 		if (cantseek) {
3495 			/*
3496 			 * Set first xfer to end at a block boundary, so that
3497 			 * holes are more likely detected in the loop below via
3498 			 * the for all bytes 0 method.
3499 			 */
3500 			xfer -= (*inoffp % blksize);
3501 		}
3502 		/* Loop copying the data block. */
3503 		while (copylen > 0 && error == 0 && !eof && interrupted == 0) {
3504 			if (copylen < xfer)
3505 				xfer = copylen;
3506 			error = vn_lock(invp, LK_SHARED);
3507 			if (error != 0)
3508 				goto out;
3509 			error = vn_rdwr(UIO_READ, invp, dat, xfer,
3510 			    startoff, UIO_SYSSPACE, IO_NODELOCKED,
3511 			    curthread->td_ucred, incred, &aresid,
3512 			    curthread);
3513 			VOP_UNLOCK(invp);
3514 			lastblock = false;
3515 			if (error == 0 && aresid > 0) {
3516 				/* Stop the copy at EOF on the input file. */
3517 				xfer -= aresid;
3518 				eof = true;
3519 				lastblock = true;
3520 			}
3521 			if (error == 0) {
3522 				/*
3523 				 * Skip the write for holes past the initial EOF
3524 				 * of the output file, unless this is the last
3525 				 * write of the output file at EOF.
3526 				 */
3527 				readzeros = cantseek ? mem_iszero(dat, xfer) :
3528 				    false;
3529 				if (xfer == len)
3530 					lastblock = true;
3531 				if (!cantseek || *outoffp < outsize ||
3532 				    lastblock || !readzeros)
3533 					error = vn_write_outvp(outvp, dat,
3534 					    *outoffp, xfer, blksize,
3535 					    readzeros && lastblock &&
3536 					    *outoffp >= outsize, false,
3537 					    outcred);
3538 				if (error == 0) {
3539 					*inoffp += xfer;
3540 					startoff += xfer;
3541 					*outoffp += xfer;
3542 					copylen -= xfer;
3543 					len -= xfer;
3544 					if (len < savlen) {
3545 						interrupted = sig_intr();
3546 						if (timespecisset(&endts) &&
3547 						    interrupted == 0) {
3548 							getnanouptime(&curts);
3549 							if (timespeccmp(&curts,
3550 							    &endts, >=))
3551 								interrupted =
3552 								    EINTR;
3553 						}
3554 					}
3555 				}
3556 			}
3557 			xfer = blksize;
3558 		}
3559 	}
3560 out:
3561 	*lenp = savlen - len;
3562 	free(dat, M_TEMP);
3563 	return (error);
3564 }
3565 
3566 static int
3567 vn_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
3568 {
3569 	struct mount *mp;
3570 	struct vnode *vp;
3571 	off_t olen, ooffset;
3572 	int error;
3573 #ifdef AUDIT
3574 	int audited_vnode1 = 0;
3575 #endif
3576 
3577 	vp = fp->f_vnode;
3578 	if (vp->v_type != VREG)
3579 		return (ENODEV);
3580 
3581 	/* Allocating blocks may take a long time, so iterate. */
3582 	for (;;) {
3583 		olen = len;
3584 		ooffset = offset;
3585 
3586 		bwillwrite();
3587 		mp = NULL;
3588 		error = vn_start_write(vp, &mp, V_WAIT | V_PCATCH);
3589 		if (error != 0)
3590 			break;
3591 		error = vn_lock(vp, LK_EXCLUSIVE);
3592 		if (error != 0) {
3593 			vn_finished_write(mp);
3594 			break;
3595 		}
3596 #ifdef AUDIT
3597 		if (!audited_vnode1) {
3598 			AUDIT_ARG_VNODE1(vp);
3599 			audited_vnode1 = 1;
3600 		}
3601 #endif
3602 #ifdef MAC
3603 		error = mac_vnode_check_write(td->td_ucred, fp->f_cred, vp);
3604 		if (error == 0)
3605 #endif
3606 			error = VOP_ALLOCATE(vp, &offset, &len, 0,
3607 			    td->td_ucred);
3608 		VOP_UNLOCK(vp);
3609 		vn_finished_write(mp);
3610 
3611 		if (olen + ooffset != offset + len) {
3612 			panic("offset + len changed from %jx/%jx to %jx/%jx",
3613 			    ooffset, olen, offset, len);
3614 		}
3615 		if (error != 0 || len == 0)
3616 			break;
3617 		KASSERT(olen > len, ("Iteration did not make progress?"));
3618 		maybe_yield();
3619 	}
3620 
3621 	return (error);
3622 }
3623 
3624 static int
3625 vn_deallocate_impl(struct vnode *vp, off_t *offset, off_t *length, int flags,
3626     int ioflag, struct ucred *cred, struct ucred *active_cred,
3627     struct ucred *file_cred)
3628 {
3629 	struct mount *mp;
3630 	void *rl_cookie;
3631 	off_t off, len;
3632 	int error;
3633 #ifdef AUDIT
3634 	bool audited_vnode1 = false;
3635 #endif
3636 
3637 	rl_cookie = NULL;
3638 	error = 0;
3639 	mp = NULL;
3640 	off = *offset;
3641 	len = *length;
3642 
3643 	if ((ioflag & (IO_NODELOCKED | IO_RANGELOCKED)) == 0)
3644 		rl_cookie = vn_rangelock_wlock(vp, off, off + len);
3645 	while (len > 0 && error == 0) {
3646 		/*
3647 		 * Try to deallocate the longest range in one pass.
3648 		 * In case a pass takes too long to be executed, it returns
3649 		 * partial result. The residue will be proceeded in the next
3650 		 * pass.
3651 		 */
3652 
3653 		if ((ioflag & IO_NODELOCKED) == 0) {
3654 			bwillwrite();
3655 			if ((error = vn_start_write(vp, &mp,
3656 			    V_WAIT | V_PCATCH)) != 0)
3657 				goto out;
3658 			vn_lock(vp, vn_lktype_write(mp, vp) | LK_RETRY);
3659 		}
3660 #ifdef AUDIT
3661 		if (!audited_vnode1) {
3662 			AUDIT_ARG_VNODE1(vp);
3663 			audited_vnode1 = true;
3664 		}
3665 #endif
3666 
3667 #ifdef MAC
3668 		if ((ioflag & IO_NOMACCHECK) == 0)
3669 			error = mac_vnode_check_write(active_cred, file_cred,
3670 			    vp);
3671 #endif
3672 		if (error == 0)
3673 			error = VOP_DEALLOCATE(vp, &off, &len, flags, ioflag,
3674 			    cred);
3675 
3676 		if ((ioflag & IO_NODELOCKED) == 0) {
3677 			VOP_UNLOCK(vp);
3678 			if (mp != NULL) {
3679 				vn_finished_write(mp);
3680 				mp = NULL;
3681 			}
3682 		}
3683 		if (error == 0 && len != 0)
3684 			maybe_yield();
3685 	}
3686 out:
3687 	if (rl_cookie != NULL)
3688 		vn_rangelock_unlock(vp, rl_cookie);
3689 	*offset = off;
3690 	*length = len;
3691 	return (error);
3692 }
3693 
3694 /*
3695  * This function is supposed to be used in the situations where the deallocation
3696  * is not triggered by a user request.
3697  */
3698 int
3699 vn_deallocate(struct vnode *vp, off_t *offset, off_t *length, int flags,
3700     int ioflag, struct ucred *active_cred, struct ucred *file_cred)
3701 {
3702 	struct ucred *cred;
3703 
3704 	if (*offset < 0 || *length <= 0 || *length > OFF_MAX - *offset ||
3705 	    flags != 0)
3706 		return (EINVAL);
3707 	if (vp->v_type != VREG)
3708 		return (ENODEV);
3709 
3710 	cred = file_cred != NOCRED ? file_cred : active_cred;
3711 	return (vn_deallocate_impl(vp, offset, length, flags, ioflag, cred,
3712 	    active_cred, file_cred));
3713 }
3714 
3715 static int
3716 vn_fspacectl(struct file *fp, int cmd, off_t *offset, off_t *length, int flags,
3717     struct ucred *active_cred, struct thread *td)
3718 {
3719 	int error;
3720 	struct vnode *vp;
3721 	int ioflag;
3722 
3723 	KASSERT(cmd == SPACECTL_DEALLOC, ("vn_fspacectl: Invalid cmd"));
3724 	KASSERT((flags & ~SPACECTL_F_SUPPORTED) == 0,
3725 	    ("vn_fspacectl: non-zero flags"));
3726 	KASSERT(*offset >= 0 && *length > 0 && *length <= OFF_MAX - *offset,
3727 	    ("vn_fspacectl: offset/length overflow or underflow"));
3728 	vp = fp->f_vnode;
3729 
3730 	if (vp->v_type != VREG)
3731 		return (ENODEV);
3732 
3733 	ioflag = get_write_ioflag(fp);
3734 
3735 	switch (cmd) {
3736 	case SPACECTL_DEALLOC:
3737 		error = vn_deallocate_impl(vp, offset, length, flags, ioflag,
3738 		    active_cred, active_cred, fp->f_cred);
3739 		break;
3740 	default:
3741 		panic("vn_fspacectl: unknown cmd %d", cmd);
3742 	}
3743 
3744 	return (error);
3745 }
3746 
3747 /*
3748  * Keep this assert as long as sizeof(struct dirent) is used as the maximum
3749  * entry size.
3750  */
3751 _Static_assert(_GENERIC_MAXDIRSIZ == sizeof(struct dirent),
3752     "'struct dirent' size must be a multiple of its alignment "
3753     "(see _GENERIC_DIRLEN())");
3754 
3755 /*
3756  * Returns successive directory entries through some caller's provided buffer.
3757  *
3758  * This function automatically refills the provided buffer with calls to
3759  * VOP_READDIR() (after MAC permission checks).
3760  *
3761  * 'td' is used for credentials and passed to uiomove().  'dirbuf' is the
3762  * caller's buffer to fill and 'dirbuflen' its allocated size.  'dirbuf' must
3763  * be properly aligned to access 'struct dirent' structures and 'dirbuflen'
3764  * must be greater than GENERIC_MAXDIRSIZ to avoid VOP_READDIR() returning
3765  * EINVAL (the latter is not a strong guarantee (yet); but EINVAL will always
3766  * be returned if this requirement is not verified).  '*dpp' points to the
3767  * current directory entry in the buffer and '*len' contains the remaining
3768  * valid bytes in 'dirbuf' after 'dpp' (including the pointed entry).
3769  *
3770  * At first call (or when restarting the read), '*len' must have been set to 0,
3771  * '*off' to 0 (or any valid start offset) and '*eofflag' to 0.  There are no
3772  * more entries as soon as '*len' is 0 after a call that returned 0.  Calling
3773  * again this function after such a condition is considered an error and EINVAL
3774  * will be returned.  Other possible error codes are those of VOP_READDIR(),
3775  * EINTEGRITY if the returned entries do not pass coherency tests, or EINVAL
3776  * (bad call).  All errors are unrecoverable, i.e., the state ('*len', '*off'
3777  * and '*eofflag') must be re-initialized before a subsequent call.  On error
3778  * or at end of directory, '*dpp' is reset to NULL.
3779  *
3780  * '*len', '*off' and '*eofflag' are internal state the caller should not
3781  * tamper with except as explained above.  '*off' is the next directory offset
3782  * to read from to refill the buffer.  '*eofflag' is set to 0 or 1 by the last
3783  * internal call to VOP_READDIR() that returned without error, indicating
3784  * whether it reached the end of the directory, and to 2 by this function after
3785  * all entries have been read.
3786  */
3787 int
3788 vn_dir_next_dirent(struct vnode *vp, struct thread *td,
3789     char *dirbuf, size_t dirbuflen,
3790     struct dirent **dpp, size_t *len, off_t *off, int *eofflag)
3791 {
3792 	struct dirent *dp = NULL;
3793 	int reclen;
3794 	int error;
3795 	struct uio uio;
3796 	struct iovec iov;
3797 
3798 	ASSERT_VOP_LOCKED(vp, "vnode not locked");
3799 	VNASSERT(vp->v_type == VDIR, vp, ("vnode is not a directory"));
3800 	MPASS2((uintptr_t)dirbuf < (uintptr_t)dirbuf + dirbuflen,
3801 	    "Address space overflow");
3802 
3803 	if (__predict_false(dirbuflen < GENERIC_MAXDIRSIZ)) {
3804 		/* Don't take any chances in this case */
3805 		error = EINVAL;
3806 		goto out;
3807 	}
3808 
3809 	if (*len != 0) {
3810 		dp = *dpp;
3811 
3812 		/*
3813 		 * The caller continued to call us after an error (we set dp to
3814 		 * NULL in a previous iteration).  Bail out right now.
3815 		 */
3816 		if (__predict_false(dp == NULL))
3817 			return (EINVAL);
3818 
3819 		MPASS(*len <= dirbuflen);
3820 		MPASS2((uintptr_t)dirbuf <= (uintptr_t)dp &&
3821 		    (uintptr_t)dp + *len <= (uintptr_t)dirbuf + dirbuflen,
3822 		    "Filled range not inside buffer");
3823 
3824 		reclen = dp->d_reclen;
3825 		if (reclen >= *len) {
3826 			/* End of buffer reached */
3827 			*len = 0;
3828 		} else {
3829 			dp = (struct dirent *)((char *)dp + reclen);
3830 			*len -= reclen;
3831 		}
3832 	}
3833 
3834 	if (*len == 0) {
3835 		dp = NULL;
3836 
3837 		/* Have to refill. */
3838 		switch (*eofflag) {
3839 		case 0:
3840 			break;
3841 
3842 		case 1:
3843 			/* Nothing more to read. */
3844 			*eofflag = 2; /* Remember the caller reached EOF. */
3845 			goto success;
3846 
3847 		default:
3848 			/* The caller didn't test for EOF. */
3849 			error = EINVAL;
3850 			goto out;
3851 		}
3852 
3853 		iov.iov_base = dirbuf;
3854 		iov.iov_len = dirbuflen;
3855 
3856 		uio.uio_iov = &iov;
3857 		uio.uio_iovcnt = 1;
3858 		uio.uio_offset = *off;
3859 		uio.uio_resid = dirbuflen;
3860 		uio.uio_segflg = UIO_SYSSPACE;
3861 		uio.uio_rw = UIO_READ;
3862 		uio.uio_td = td;
3863 
3864 #ifdef MAC
3865 		error = mac_vnode_check_readdir(td->td_ucred, vp);
3866 		if (error == 0)
3867 #endif
3868 			error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag,
3869 			    NULL, NULL);
3870 		if (error != 0)
3871 			goto out;
3872 
3873 		*len = dirbuflen - uio.uio_resid;
3874 		*off = uio.uio_offset;
3875 
3876 		if (*len == 0) {
3877 			/* Sanity check on INVARIANTS. */
3878 			MPASS(*eofflag != 0);
3879 			*eofflag = 1;
3880 			goto success;
3881 		}
3882 
3883 		/*
3884 		 * Normalize the flag returned by VOP_READDIR(), since we use 2
3885 		 * as a sentinel value.
3886 		 */
3887 		if (*eofflag != 0)
3888 			*eofflag = 1;
3889 
3890 		dp = (struct dirent *)dirbuf;
3891 	}
3892 
3893 	if (__predict_false(*len < GENERIC_MINDIRSIZ ||
3894 	    dp->d_reclen < GENERIC_MINDIRSIZ)) {
3895 		error = EINTEGRITY;
3896 		dp = NULL;
3897 		goto out;
3898 	}
3899 
3900 success:
3901 	error = 0;
3902 out:
3903 	*dpp = dp;
3904 	return (error);
3905 }
3906 
3907 /*
3908  * Checks whether a directory is empty or not.
3909  *
3910  * If the directory is empty, returns 0, and if it is not, ENOTEMPTY.  Other
3911  * values are genuine errors preventing the check.
3912  */
3913 int
3914 vn_dir_check_empty(struct vnode *vp)
3915 {
3916 	struct thread *const td = curthread;
3917 	char *dirbuf;
3918 	size_t dirbuflen, len;
3919 	off_t off;
3920 	int eofflag, error;
3921 	struct dirent *dp;
3922 	struct vattr va;
3923 
3924 	ASSERT_VOP_LOCKED(vp, "vfs_emptydir");
3925 	VNPASS(vp->v_type == VDIR, vp);
3926 
3927 	error = VOP_GETATTR(vp, &va, td->td_ucred);
3928 	if (error != 0)
3929 		return (error);
3930 
3931 	dirbuflen = max(DEV_BSIZE, GENERIC_MAXDIRSIZ);
3932 	if (dirbuflen < va.va_blocksize)
3933 		dirbuflen = va.va_blocksize;
3934 	dirbuf = malloc(dirbuflen, M_TEMP, M_WAITOK);
3935 
3936 	len = 0;
3937 	off = 0;
3938 	eofflag = 0;
3939 
3940 	for (;;) {
3941 		error = vn_dir_next_dirent(vp, td, dirbuf, dirbuflen,
3942 		    &dp, &len, &off, &eofflag);
3943 		if (error != 0)
3944 			goto end;
3945 
3946 		if (len == 0) {
3947 			/* EOF */
3948 			error = 0;
3949 			goto end;
3950 		}
3951 
3952 		/*
3953 		 * Skip whiteouts.  Unionfs operates on filesystems only and
3954 		 * not on hierarchies, so these whiteouts would be shadowed on
3955 		 * the system hierarchy but not for a union using the
3956 		 * filesystem of their directories as the upper layer.
3957 		 * Additionally, unionfs currently transparently exposes
3958 		 * union-specific metadata of its upper layer, meaning that
3959 		 * whiteouts can be seen through the union view in empty
3960 		 * directories.  Taking into account these whiteouts would then
3961 		 * prevent mounting another filesystem on such effectively
3962 		 * empty directories.
3963 		 */
3964 		if (dp->d_type == DT_WHT)
3965 			continue;
3966 
3967 		/*
3968 		 * Any file in the directory which is not '.' or '..' indicates
3969 		 * the directory is not empty.
3970 		 */
3971 		switch (dp->d_namlen) {
3972 		case 2:
3973 			if (dp->d_name[1] != '.') {
3974 				/* Can't be '..' (nor '.') */
3975 				error = ENOTEMPTY;
3976 				goto end;
3977 			}
3978 			/* FALLTHROUGH */
3979 		case 1:
3980 			if (dp->d_name[0] != '.') {
3981 				/* Can't be '..' nor '.' */
3982 				error = ENOTEMPTY;
3983 				goto end;
3984 			}
3985 			break;
3986 
3987 		default:
3988 			error = ENOTEMPTY;
3989 			goto end;
3990 		}
3991 	}
3992 
3993 end:
3994 	free(dirbuf, M_TEMP);
3995 	return (error);
3996 }
3997 
3998 
3999 static u_long vn_lock_pair_pause_cnt;
4000 SYSCTL_ULONG(_debug, OID_AUTO, vn_lock_pair_pause, CTLFLAG_RD,
4001     &vn_lock_pair_pause_cnt, 0,
4002     "Count of vn_lock_pair deadlocks");
4003 
4004 u_int vn_lock_pair_pause_max;
4005 SYSCTL_UINT(_debug, OID_AUTO, vn_lock_pair_pause_max, CTLFLAG_RW,
4006     &vn_lock_pair_pause_max, 0,
4007     "Max ticks for vn_lock_pair deadlock avoidance sleep");
4008 
4009 static void
4010 vn_lock_pair_pause(const char *wmesg)
4011 {
4012 	atomic_add_long(&vn_lock_pair_pause_cnt, 1);
4013 	pause(wmesg, prng32_bounded(vn_lock_pair_pause_max));
4014 }
4015 
4016 /*
4017  * Lock pair of vnodes vp1, vp2, avoiding lock order reversal.
4018  * vp1_locked indicates whether vp1 is locked; if not, vp1 must be
4019  * unlocked.  Same for vp2 and vp2_locked.  One of the vnodes can be
4020  * NULL.
4021  *
4022  * The function returns with both vnodes exclusively or shared locked,
4023  * according to corresponding lkflags, and guarantees that it does not
4024  * create lock order reversal with other threads during its execution.
4025  * Both vnodes could be unlocked temporary (and reclaimed).
4026  *
4027  * If requesting shared locking, locked vnode lock must not be recursed.
4028  *
4029  * Only one of LK_SHARED and LK_EXCLUSIVE must be specified.
4030  * LK_NODDLKTREAT can be optionally passed.
4031  */
4032 void
4033 vn_lock_pair(struct vnode *vp1, bool vp1_locked, int lkflags1,
4034     struct vnode *vp2, bool vp2_locked, int lkflags2)
4035 {
4036 	int error;
4037 
4038 	MPASS(((lkflags1 & LK_SHARED) != 0) ^ ((lkflags1 & LK_EXCLUSIVE) != 0));
4039 	MPASS((lkflags1 & ~(LK_SHARED | LK_EXCLUSIVE | LK_NODDLKTREAT)) == 0);
4040 	MPASS(((lkflags2 & LK_SHARED) != 0) ^ ((lkflags2 & LK_EXCLUSIVE) != 0));
4041 	MPASS((lkflags2 & ~(LK_SHARED | LK_EXCLUSIVE | LK_NODDLKTREAT)) == 0);
4042 
4043 	if (vp1 == NULL && vp2 == NULL)
4044 		return;
4045 
4046 	if (vp1 != NULL) {
4047 		if ((lkflags1 & LK_SHARED) != 0 &&
4048 		    (vp1->v_vnlock->lock_object.lo_flags & LK_NOSHARE) != 0)
4049 			lkflags1 = (lkflags1 & ~LK_SHARED) | LK_EXCLUSIVE;
4050 		if (vp1_locked && VOP_ISLOCKED(vp1) != LK_EXCLUSIVE) {
4051 			ASSERT_VOP_LOCKED(vp1, "vp1");
4052 			if ((lkflags1 & LK_EXCLUSIVE) != 0) {
4053 				VOP_UNLOCK(vp1);
4054 				ASSERT_VOP_UNLOCKED(vp1,
4055 				    "vp1 shared recursed");
4056 				vp1_locked = false;
4057 			}
4058 		} else if (!vp1_locked)
4059 			ASSERT_VOP_UNLOCKED(vp1, "vp1");
4060 	} else {
4061 		vp1_locked = true;
4062 	}
4063 
4064 	if (vp2 != NULL) {
4065 		if ((lkflags2 & LK_SHARED) != 0 &&
4066 		    (vp2->v_vnlock->lock_object.lo_flags & LK_NOSHARE) != 0)
4067 			lkflags2 = (lkflags2 & ~LK_SHARED) | LK_EXCLUSIVE;
4068 		if (vp2_locked && VOP_ISLOCKED(vp2) != LK_EXCLUSIVE) {
4069 			ASSERT_VOP_LOCKED(vp2, "vp2");
4070 			if ((lkflags2 & LK_EXCLUSIVE) != 0) {
4071 				VOP_UNLOCK(vp2);
4072 				ASSERT_VOP_UNLOCKED(vp2,
4073 				    "vp2 shared recursed");
4074 				vp2_locked = false;
4075 			}
4076 		} else if (!vp2_locked)
4077 			ASSERT_VOP_UNLOCKED(vp2, "vp2");
4078 	} else {
4079 		vp2_locked = true;
4080 	}
4081 
4082 	if (!vp1_locked && !vp2_locked) {
4083 		vn_lock(vp1, lkflags1 | LK_RETRY);
4084 		vp1_locked = true;
4085 	}
4086 
4087 	while (!vp1_locked || !vp2_locked) {
4088 		if (vp1_locked && vp2 != NULL) {
4089 			if (vp1 != NULL) {
4090 				error = VOP_LOCK1(vp2, lkflags2 | LK_NOWAIT,
4091 				    __FILE__, __LINE__);
4092 				if (error == 0)
4093 					break;
4094 				VOP_UNLOCK(vp1);
4095 				vp1_locked = false;
4096 				vn_lock_pair_pause("vlp1");
4097 			}
4098 			vn_lock(vp2, lkflags2 | LK_RETRY);
4099 			vp2_locked = true;
4100 		}
4101 		if (vp2_locked && vp1 != NULL) {
4102 			if (vp2 != NULL) {
4103 				error = VOP_LOCK1(vp1, lkflags1 | LK_NOWAIT,
4104 				    __FILE__, __LINE__);
4105 				if (error == 0)
4106 					break;
4107 				VOP_UNLOCK(vp2);
4108 				vp2_locked = false;
4109 				vn_lock_pair_pause("vlp2");
4110 			}
4111 			vn_lock(vp1, lkflags1 | LK_RETRY);
4112 			vp1_locked = true;
4113 		}
4114 	}
4115 	if (vp1 != NULL) {
4116 		if (lkflags1 == LK_EXCLUSIVE)
4117 			ASSERT_VOP_ELOCKED(vp1, "vp1 ret");
4118 		else
4119 			ASSERT_VOP_LOCKED(vp1, "vp1 ret");
4120 	}
4121 	if (vp2 != NULL) {
4122 		if (lkflags2 == LK_EXCLUSIVE)
4123 			ASSERT_VOP_ELOCKED(vp2, "vp2 ret");
4124 		else
4125 			ASSERT_VOP_LOCKED(vp2, "vp2 ret");
4126 	}
4127 }
4128 
4129 int
4130 vn_lktype_write(struct mount *mp, struct vnode *vp)
4131 {
4132 	if (MNT_SHARED_WRITES(mp) ||
4133 	    (mp == NULL && MNT_SHARED_WRITES(vp->v_mount)))
4134 		return (LK_SHARED);
4135 	return (LK_EXCLUSIVE);
4136 }
4137