xref: /freebsd/sys/fs/tmpfs/tmpfs_vnops.c (revision 2878d99dfcfbdd7a415a7f31cf95fbd53fc8e581)
1 /*	$NetBSD: tmpfs_vnops.c,v 1.39 2007/07/23 15:41:01 jmmv Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-2-Clause
5  *
6  * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
11  * 2005 program.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 /*
36  * tmpfs vnode interface.
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/dirent.h>
42 #include <sys/extattr.h>
43 #include <sys/fcntl.h>
44 #include <sys/file.h>
45 #include <sys/filio.h>
46 #include <sys/limits.h>
47 #include <sys/lockf.h>
48 #include <sys/lock.h>
49 #include <sys/mount.h>
50 #include <sys/namei.h>
51 #include <sys/priv.h>
52 #include <sys/proc.h>
53 #include <sys/rwlock.h>
54 #include <sys/sched.h>
55 #include <sys/smr.h>
56 #include <sys/stat.h>
57 #include <sys/sysctl.h>
58 #include <sys/unistd.h>
59 #include <sys/vnode.h>
60 #include <security/audit/audit.h>
61 #include <security/mac/mac_framework.h>
62 
63 #include <vm/vm.h>
64 #include <vm/vm_param.h>
65 #include <vm/vm_object.h>
66 #include <vm/vm_page.h>
67 #include <vm/vm_pager.h>
68 #include <vm/swap_pager.h>
69 
70 #include <fs/tmpfs/tmpfs_vnops.h>
71 #include <fs/tmpfs/tmpfs.h>
72 
73 SYSCTL_DECL(_vfs_tmpfs);
74 VFS_SMR_DECLARE;
75 
76 static volatile int tmpfs_rename_restarts;
77 SYSCTL_INT(_vfs_tmpfs, OID_AUTO, rename_restarts, CTLFLAG_RD,
78     __DEVOLATILE(int *, &tmpfs_rename_restarts), 0,
79     "Times rename had to restart due to lock contention");
80 
81 MALLOC_DEFINE(M_TMPFSEA, "tmpfs extattr", "tmpfs extattr structure");
82 
83 static int
84 tmpfs_vn_get_ino_alloc(struct mount *mp, void *arg, int lkflags,
85     struct vnode **rvp)
86 {
87 
88 	return (tmpfs_alloc_vp(mp, arg, lkflags, rvp));
89 }
90 
91 static int
92 tmpfs_lookup1(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
93 {
94 	struct tmpfs_dirent *de;
95 	struct tmpfs_node *dnode, *pnode;
96 	struct tmpfs_mount *tm;
97 	int error;
98 
99 	/* Caller assumes responsibility for ensuring access (VEXEC). */
100 	dnode = VP_TO_TMPFS_DIR(dvp);
101 	*vpp = NULLVP;
102 
103 	/* We cannot be requesting the parent directory of the root node. */
104 	MPASS(IMPLIES(dnode->tn_type == VDIR &&
105 	    dnode->tn_dir.tn_parent == dnode,
106 	    !(cnp->cn_flags & ISDOTDOT)));
107 
108 	TMPFS_ASSERT_LOCKED(dnode);
109 	if (dnode->tn_dir.tn_parent == NULL) {
110 		error = ENOENT;
111 		goto out;
112 	}
113 	if (cnp->cn_flags & ISDOTDOT) {
114 		tm = VFS_TO_TMPFS(dvp->v_mount);
115 		pnode = dnode->tn_dir.tn_parent;
116 		tmpfs_ref_node(pnode);
117 		error = vn_vget_ino_gen(dvp, tmpfs_vn_get_ino_alloc,
118 		    pnode, cnp->cn_lkflags, vpp);
119 		tmpfs_free_node(tm, pnode);
120 		if (error != 0)
121 			goto out;
122 	} else if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
123 		VREF(dvp);
124 		*vpp = dvp;
125 		error = 0;
126 	} else {
127 		de = tmpfs_dir_lookup(dnode, NULL, cnp);
128 		if (de != NULL && de->td_node == NULL)
129 			cnp->cn_flags |= ISWHITEOUT;
130 		if (de == NULL || de->td_node == NULL) {
131 			/*
132 			 * The entry was not found in the directory.
133 			 * This is OK if we are creating or renaming an
134 			 * entry and are working on the last component of
135 			 * the path name.
136 			 */
137 			if ((cnp->cn_flags & ISLASTCN) &&
138 			    (cnp->cn_nameiop == CREATE || \
139 			    cnp->cn_nameiop == RENAME ||
140 			    (cnp->cn_nameiop == DELETE &&
141 			    cnp->cn_flags & DOWHITEOUT &&
142 			    cnp->cn_flags & ISWHITEOUT))) {
143 				error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred,
144 				    curthread);
145 				if (error != 0)
146 					goto out;
147 
148 				error = EJUSTRETURN;
149 			} else
150 				error = ENOENT;
151 		} else {
152 			struct tmpfs_node *tnode;
153 
154 			/*
155 			 * The entry was found, so get its associated
156 			 * tmpfs_node.
157 			 */
158 			tnode = de->td_node;
159 
160 			/*
161 			 * If we are not at the last path component and
162 			 * found a non-directory or non-link entry (which
163 			 * may itself be pointing to a directory), raise
164 			 * an error.
165 			 */
166 			if ((tnode->tn_type != VDIR &&
167 			    tnode->tn_type != VLNK) &&
168 			    !(cnp->cn_flags & ISLASTCN)) {
169 				error = ENOTDIR;
170 				goto out;
171 			}
172 
173 			/*
174 			 * If we are deleting or renaming the entry, keep
175 			 * track of its tmpfs_dirent so that it can be
176 			 * easily deleted later.
177 			 */
178 			if ((cnp->cn_flags & ISLASTCN) &&
179 			    (cnp->cn_nameiop == DELETE ||
180 			    cnp->cn_nameiop == RENAME)) {
181 				error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred,
182 				    curthread);
183 				if (error != 0)
184 					goto out;
185 
186 				/* Allocate a new vnode on the matching entry. */
187 				error = tmpfs_alloc_vp(dvp->v_mount, tnode,
188 				    cnp->cn_lkflags, vpp);
189 				if (error != 0)
190 					goto out;
191 
192 				if ((dnode->tn_mode & S_ISTXT) &&
193 				  VOP_ACCESS(dvp, VADMIN, cnp->cn_cred,
194 				  curthread) && VOP_ACCESS(*vpp, VADMIN,
195 				  cnp->cn_cred, curthread)) {
196 					error = EPERM;
197 					vput(*vpp);
198 					*vpp = NULL;
199 					goto out;
200 				}
201 			} else {
202 				error = tmpfs_alloc_vp(dvp->v_mount, tnode,
203 				    cnp->cn_lkflags, vpp);
204 				if (error != 0)
205 					goto out;
206 			}
207 		}
208 	}
209 
210 	/*
211 	 * Store the result of this lookup in the cache.  Avoid this if the
212 	 * request was for creation, as it does not improve timings on
213 	 * emprical tests.
214 	 */
215 	if ((cnp->cn_flags & MAKEENTRY) != 0 && tmpfs_use_nc(dvp))
216 		cache_enter(dvp, *vpp, cnp);
217 
218 out:
219 #ifdef INVARIANTS
220 	/*
221 	 * If there were no errors, *vpp cannot be null and it must be
222 	 * locked.
223 	 */
224 	if (error == 0) {
225 		MPASS(*vpp != NULLVP);
226 		ASSERT_VOP_LOCKED(*vpp, __func__);
227 	} else {
228 		MPASS(*vpp == NULL);
229 	}
230 #endif
231 
232 	return (error);
233 }
234 
235 static int
236 tmpfs_cached_lookup(struct vop_cachedlookup_args *v)
237 {
238 
239 	return (tmpfs_lookup1(v->a_dvp, v->a_vpp, v->a_cnp));
240 }
241 
242 static int
243 tmpfs_lookup(struct vop_lookup_args *v)
244 {
245 	struct vnode *dvp = v->a_dvp;
246 	struct vnode **vpp = v->a_vpp;
247 	struct componentname *cnp = v->a_cnp;
248 	int error;
249 
250 	/* Check accessibility of requested node as a first step. */
251 	error = vn_dir_check_exec(dvp, cnp);
252 	if (error != 0)
253 		return (error);
254 
255 	return (tmpfs_lookup1(dvp, vpp, cnp));
256 }
257 
258 static int
259 tmpfs_create(struct vop_create_args *v)
260 {
261 	struct vnode *dvp = v->a_dvp;
262 	struct vnode **vpp = v->a_vpp;
263 	struct componentname *cnp = v->a_cnp;
264 	struct vattr *vap = v->a_vap;
265 	int error;
266 
267 	MPASS(vap->va_type == VREG || vap->va_type == VSOCK);
268 
269 	error = tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL);
270 	if (error == 0 && (cnp->cn_flags & MAKEENTRY) != 0 && tmpfs_use_nc(dvp))
271 		cache_enter(dvp, *vpp, cnp);
272 	return (error);
273 }
274 
275 static int
276 tmpfs_mknod(struct vop_mknod_args *v)
277 {
278 	struct vnode *dvp = v->a_dvp;
279 	struct vnode **vpp = v->a_vpp;
280 	struct componentname *cnp = v->a_cnp;
281 	struct vattr *vap = v->a_vap;
282 
283 	if (vap->va_type != VBLK && vap->va_type != VCHR &&
284 	    vap->va_type != VFIFO)
285 		return (EINVAL);
286 
287 	return (tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL));
288 }
289 
290 struct fileops tmpfs_fnops;
291 
292 static int
293 tmpfs_open(struct vop_open_args *v)
294 {
295 	struct vnode *vp;
296 	struct tmpfs_node *node;
297 	struct file *fp;
298 	int error, mode;
299 
300 	vp = v->a_vp;
301 	mode = v->a_mode;
302 	node = VP_TO_TMPFS_NODE(vp);
303 
304 	/*
305 	 * The file is still active but all its names have been removed
306 	 * (e.g. by a "rmdir $(pwd)").  It cannot be opened any more as
307 	 * it is about to die.
308 	 */
309 	if (node->tn_links < 1)
310 		return (ENOENT);
311 
312 	/* If the file is marked append-only, deny write requests. */
313 	if (node->tn_flags & APPEND && (mode & (FWRITE | O_APPEND)) == FWRITE)
314 		error = EPERM;
315 	else {
316 		error = 0;
317 		/* For regular files, the call below is nop. */
318 		KASSERT(vp->v_type != VREG || (node->tn_reg.tn_aobj->flags &
319 		    OBJ_DEAD) == 0, ("dead object"));
320 		vnode_create_vobject(vp, node->tn_size, v->a_td);
321 	}
322 
323 	fp = v->a_fp;
324 	MPASS(fp == NULL || fp->f_data == NULL);
325 	if (error == 0 && fp != NULL && vp->v_type == VREG) {
326 		tmpfs_ref_node(node);
327 		finit_vnode(fp, mode, node, &tmpfs_fnops);
328 	}
329 
330 	return (error);
331 }
332 
333 static int
334 tmpfs_close(struct vop_close_args *v)
335 {
336 	struct vnode *vp = v->a_vp;
337 
338 	/* Update node times. */
339 	tmpfs_update(vp);
340 
341 	return (0);
342 }
343 
344 int
345 tmpfs_fo_close(struct file *fp, struct thread *td)
346 {
347 	struct tmpfs_node *node;
348 
349 	node = fp->f_data;
350 	if (node != NULL) {
351 		MPASS(node->tn_type == VREG);
352 		tmpfs_free_node(node->tn_reg.tn_tmp, node);
353 	}
354 	return (vnops.fo_close(fp, td));
355 }
356 
357 /*
358  * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see
359  * the comment above cache_fplookup for details.
360  */
361 int
362 tmpfs_fplookup_vexec(struct vop_fplookup_vexec_args *v)
363 {
364 	struct vnode *vp;
365 	struct tmpfs_node *node;
366 	struct ucred *cred;
367 	mode_t all_x, mode;
368 
369 	vp = v->a_vp;
370 	node = VP_TO_TMPFS_NODE_SMR(vp);
371 	if (__predict_false(node == NULL))
372 		return (EAGAIN);
373 
374 	all_x = S_IXUSR | S_IXGRP | S_IXOTH;
375 	mode = atomic_load_short(&node->tn_mode);
376 	if (__predict_true((mode & all_x) == all_x))
377 		return (0);
378 
379 	cred = v->a_cred;
380 	return (vaccess_vexec_smr(mode, node->tn_uid, node->tn_gid, cred));
381 }
382 
383 static int
384 tmpfs_access_locked(struct vnode *vp, struct tmpfs_node *node,
385     accmode_t accmode, struct ucred *cred)
386 {
387 #ifdef DEBUG_VFS_LOCKS
388 	if (!mtx_owned(TMPFS_NODE_MTX(node))) {
389 		ASSERT_VOP_LOCKED(vp,
390 		    "tmpfs_access_locked needs locked vnode or node");
391 	}
392 #endif
393 
394 	if ((accmode & VWRITE) != 0 && (node->tn_flags & IMMUTABLE) != 0)
395 		return (EPERM);
396 	return (vaccess(vp->v_type, node->tn_mode, node->tn_uid, node->tn_gid,
397 	    accmode, cred));
398 }
399 
400 int
401 tmpfs_access(struct vop_access_args *v)
402 {
403 	struct vnode *vp = v->a_vp;
404 	struct ucred *cred = v->a_cred;
405 	struct tmpfs_node *node = VP_TO_TMPFS_NODE(vp);
406 	mode_t all_x = S_IXUSR | S_IXGRP | S_IXOTH;
407 	accmode_t accmode = v->a_accmode;
408 
409 	/*
410 	 * Common case path lookup.
411 	 */
412 	if (__predict_true(accmode == VEXEC &&
413 	    (node->tn_mode & all_x) == all_x))
414 		return (0);
415 
416 	switch (vp->v_type) {
417 	case VDIR:
418 		/* FALLTHROUGH */
419 	case VLNK:
420 		/* FALLTHROUGH */
421 	case VREG:
422 		if ((accmode & VWRITE) != 0 &&
423 		    (vp->v_mount->mnt_flag & MNT_RDONLY) != 0)
424 			return (EROFS);
425 		break;
426 
427 	case VBLK:
428 		/* FALLTHROUGH */
429 	case VCHR:
430 		/* FALLTHROUGH */
431 	case VSOCK:
432 		/* FALLTHROUGH */
433 	case VFIFO:
434 		break;
435 
436 	default:
437 		return (EINVAL);
438 	}
439 
440 	return (tmpfs_access_locked(vp, node, accmode, cred));
441 }
442 
443 int
444 tmpfs_stat(struct vop_stat_args *v)
445 {
446 	struct vnode *vp = v->a_vp;
447 	struct stat *sb = v->a_sb;
448 	struct tmpfs_node *node;
449 	int error;
450 
451 	node = VP_TO_TMPFS_NODE(vp);
452 
453 	tmpfs_update_getattr(vp);
454 
455 	error = vop_stat_helper_pre(v);
456 	if (__predict_false(error))
457 		return (error);
458 
459 	sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
460 	sb->st_ino = node->tn_id;
461 	sb->st_mode = node->tn_mode | VTTOIF(vp->v_type);
462 	sb->st_nlink = node->tn_links;
463 	sb->st_uid = node->tn_uid;
464 	sb->st_gid = node->tn_gid;
465 	sb->st_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ?
466 		node->tn_rdev : NODEV;
467 	sb->st_size = node->tn_size;
468 	sb->st_atim.tv_sec = node->tn_atime.tv_sec;
469 	sb->st_atim.tv_nsec = node->tn_atime.tv_nsec;
470 	sb->st_mtim.tv_sec = node->tn_mtime.tv_sec;
471 	sb->st_mtim.tv_nsec = node->tn_mtime.tv_nsec;
472 	sb->st_ctim.tv_sec = node->tn_ctime.tv_sec;
473 	sb->st_ctim.tv_nsec = node->tn_ctime.tv_nsec;
474 	sb->st_birthtim.tv_sec = node->tn_birthtime.tv_sec;
475 	sb->st_birthtim.tv_nsec = node->tn_birthtime.tv_nsec;
476 	sb->st_blksize = PAGE_SIZE;
477 	sb->st_flags = node->tn_flags;
478 	sb->st_gen = node->tn_gen;
479 	if (vp->v_type == VREG) {
480 #ifdef __ILP32__
481 		vm_object_t obj = node->tn_reg.tn_aobj;
482 
483 		/* Handle torn read */
484 		VM_OBJECT_RLOCK(obj);
485 #endif
486 		sb->st_blocks = ptoa(node->tn_reg.tn_pages);
487 #ifdef __ILP32__
488 		VM_OBJECT_RUNLOCK(obj);
489 #endif
490 	} else {
491 		sb->st_blocks = node->tn_size;
492 	}
493 	sb->st_blocks /= S_BLKSIZE;
494 	return (vop_stat_helper_post(v, error));
495 }
496 
497 int
498 tmpfs_getattr(struct vop_getattr_args *v)
499 {
500 	struct vnode *vp = v->a_vp;
501 	struct vattr *vap = v->a_vap;
502 	struct tmpfs_node *node;
503 
504 	node = VP_TO_TMPFS_NODE(vp);
505 
506 	tmpfs_update_getattr(vp);
507 
508 	vap->va_type = vp->v_type;
509 	vap->va_mode = node->tn_mode;
510 	vap->va_nlink = node->tn_links;
511 	vap->va_uid = node->tn_uid;
512 	vap->va_gid = node->tn_gid;
513 	vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
514 	vap->va_fileid = node->tn_id;
515 	vap->va_size = node->tn_size;
516 	vap->va_blocksize = PAGE_SIZE;
517 	vap->va_atime = node->tn_atime;
518 	vap->va_mtime = node->tn_mtime;
519 	vap->va_ctime = node->tn_ctime;
520 	vap->va_birthtime = node->tn_birthtime;
521 	vap->va_gen = node->tn_gen;
522 	vap->va_flags = node->tn_flags;
523 	vap->va_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ?
524 	    node->tn_rdev : NODEV;
525 	if (vp->v_type == VREG) {
526 #ifdef __ILP32__
527 		vm_object_t obj = node->tn_reg.tn_aobj;
528 
529 		VM_OBJECT_RLOCK(obj);
530 #endif
531 		vap->va_bytes = ptoa(node->tn_reg.tn_pages);
532 #ifdef __ILP32__
533 		VM_OBJECT_RUNLOCK(obj);
534 #endif
535 	} else {
536 		vap->va_bytes = node->tn_size;
537 	}
538 	vap->va_filerev = 0;
539 
540 	return (0);
541 }
542 
543 int
544 tmpfs_setattr(struct vop_setattr_args *v)
545 {
546 	struct vnode *vp = v->a_vp;
547 	struct vattr *vap = v->a_vap;
548 	struct ucred *cred = v->a_cred;
549 	struct thread *td = curthread;
550 
551 	int error;
552 
553 	ASSERT_VOP_IN_SEQC(vp);
554 
555 	error = 0;
556 
557 	/* Abort if any unsettable attribute is given. */
558 	if (vap->va_type != VNON ||
559 	    vap->va_nlink != VNOVAL ||
560 	    vap->va_fsid != VNOVAL ||
561 	    vap->va_fileid != VNOVAL ||
562 	    vap->va_blocksize != VNOVAL ||
563 	    vap->va_gen != VNOVAL ||
564 	    vap->va_rdev != VNOVAL ||
565 	    vap->va_bytes != VNOVAL)
566 		error = EINVAL;
567 
568 	if (error == 0 && (vap->va_flags != VNOVAL))
569 		error = tmpfs_chflags(vp, vap->va_flags, cred, td);
570 
571 	if (error == 0 && (vap->va_size != VNOVAL))
572 		error = tmpfs_chsize(vp, vap->va_size, cred, td);
573 
574 	if (error == 0 && (vap->va_uid != VNOVAL || vap->va_gid != VNOVAL))
575 		error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred, td);
576 
577 	if (error == 0 && (vap->va_mode != (mode_t)VNOVAL))
578 		error = tmpfs_chmod(vp, vap->va_mode, cred, td);
579 
580 	if (error == 0 && ((vap->va_atime.tv_sec != VNOVAL &&
581 	    vap->va_atime.tv_nsec != VNOVAL) ||
582 	    (vap->va_mtime.tv_sec != VNOVAL &&
583 	    vap->va_mtime.tv_nsec != VNOVAL) ||
584 	    (vap->va_birthtime.tv_sec != VNOVAL &&
585 	    vap->va_birthtime.tv_nsec != VNOVAL)))
586 		error = tmpfs_chtimes(vp, vap, cred, td);
587 
588 	/*
589 	 * Update the node times.  We give preference to the error codes
590 	 * generated by this function rather than the ones that may arise
591 	 * from tmpfs_update.
592 	 */
593 	tmpfs_update(vp);
594 
595 	return (error);
596 }
597 
598 static int
599 tmpfs_read(struct vop_read_args *v)
600 {
601 	struct vnode *vp;
602 	struct uio *uio;
603 	struct tmpfs_node *node;
604 
605 	vp = v->a_vp;
606 	if (vp->v_type != VREG)
607 		return (EISDIR);
608 	uio = v->a_uio;
609 	if (uio->uio_offset < 0)
610 		return (EINVAL);
611 	node = VP_TO_TMPFS_NODE(vp);
612 	tmpfs_set_accessed(VFS_TO_TMPFS(vp->v_mount), node);
613 	return (uiomove_object(node->tn_reg.tn_aobj, node->tn_size, uio));
614 }
615 
616 static int
617 tmpfs_read_pgcache(struct vop_read_pgcache_args *v)
618 {
619 	struct vnode *vp;
620 	struct tmpfs_node *node;
621 	vm_object_t object;
622 	off_t size;
623 	int error;
624 
625 	vp = v->a_vp;
626 	VNPASS((vn_irflag_read(vp) & VIRF_PGREAD) != 0, vp);
627 
628 	if (v->a_uio->uio_offset < 0)
629 		return (EINVAL);
630 
631 	error = EJUSTRETURN;
632 	vfs_smr_enter();
633 
634 	node = VP_TO_TMPFS_NODE_SMR(vp);
635 	if (node == NULL)
636 		goto out_smr;
637 	MPASS(node->tn_type == VREG);
638 	MPASS(node->tn_refcount >= 1);
639 	object = node->tn_reg.tn_aobj;
640 	if (object == NULL)
641 		goto out_smr;
642 
643 	MPASS(object->type == tmpfs_pager_type);
644 	MPASS((object->flags & (OBJ_ANON | OBJ_DEAD | OBJ_SWAP)) ==
645 	    OBJ_SWAP);
646 	if (!VN_IS_DOOMED(vp)) {
647 		/* size cannot become shorter due to rangelock. */
648 		size = node->tn_size;
649 		tmpfs_set_accessed(node->tn_reg.tn_tmp, node);
650 		vfs_smr_exit();
651 		error = uiomove_object(object, size, v->a_uio);
652 		return (error);
653 	}
654 out_smr:
655 	vfs_smr_exit();
656 	return (error);
657 }
658 
659 static int
660 tmpfs_write(struct vop_write_args *v)
661 {
662 	struct vnode *vp;
663 	struct uio *uio;
664 	struct tmpfs_node *node;
665 	off_t oldsize;
666 	ssize_t r;
667 	int error, ioflag;
668 	mode_t newmode;
669 
670 	vp = v->a_vp;
671 	uio = v->a_uio;
672 	ioflag = v->a_ioflag;
673 	error = 0;
674 	node = VP_TO_TMPFS_NODE(vp);
675 	oldsize = node->tn_size;
676 
677 	if (uio->uio_offset < 0 || vp->v_type != VREG)
678 		return (EINVAL);
679 	if (uio->uio_resid == 0)
680 		return (0);
681 	if (ioflag & IO_APPEND)
682 		uio->uio_offset = node->tn_size;
683 	error = vn_rlimit_fsizex(vp, uio, VFS_TO_TMPFS(vp->v_mount)->
684 	    tm_maxfilesize, &r, uio->uio_td);
685 	if (error != 0) {
686 		vn_rlimit_fsizex_res(uio, r);
687 		return (error);
688 	}
689 
690 	if (uio->uio_offset + uio->uio_resid > node->tn_size) {
691 		error = tmpfs_reg_resize(vp, uio->uio_offset + uio->uio_resid,
692 		    FALSE);
693 		if (error != 0)
694 			goto out;
695 	}
696 
697 	error = uiomove_object(node->tn_reg.tn_aobj, node->tn_size, uio);
698 	node->tn_status |= TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED;
699 	node->tn_accessed = true;
700 	if (node->tn_mode & (S_ISUID | S_ISGID)) {
701 		if (priv_check_cred(v->a_cred, PRIV_VFS_RETAINSUGID)) {
702 			newmode = node->tn_mode & ~(S_ISUID | S_ISGID);
703 			vn_seqc_write_begin(vp);
704 			atomic_store_short(&node->tn_mode, newmode);
705 			vn_seqc_write_end(vp);
706 		}
707 	}
708 	if (error != 0)
709 		(void)tmpfs_reg_resize(vp, oldsize, TRUE);
710 
711 out:
712 	MPASS(IMPLIES(error == 0, uio->uio_resid == 0));
713 	MPASS(IMPLIES(error != 0, oldsize == node->tn_size));
714 
715 	vn_rlimit_fsizex_res(uio, r);
716 	return (error);
717 }
718 
719 static int
720 tmpfs_deallocate(struct vop_deallocate_args *v)
721 {
722 	return (tmpfs_reg_punch_hole(v->a_vp, v->a_offset, v->a_len));
723 }
724 
725 static int
726 tmpfs_fsync(struct vop_fsync_args *v)
727 {
728 	struct vnode *vp = v->a_vp;
729 
730 	tmpfs_check_mtime(vp);
731 	tmpfs_update(vp);
732 
733 	return (0);
734 }
735 
736 static int
737 tmpfs_remove(struct vop_remove_args *v)
738 {
739 	struct vnode *dvp = v->a_dvp;
740 	struct vnode *vp = v->a_vp;
741 
742 	int error;
743 	struct tmpfs_dirent *de;
744 	struct tmpfs_mount *tmp;
745 	struct tmpfs_node *dnode;
746 	struct tmpfs_node *node;
747 
748 	if (vp->v_type == VDIR) {
749 		error = EISDIR;
750 		goto out;
751 	}
752 
753 	dnode = VP_TO_TMPFS_DIR(dvp);
754 	node = VP_TO_TMPFS_NODE(vp);
755 	tmp = VFS_TO_TMPFS(vp->v_mount);
756 	de = tmpfs_dir_lookup(dnode, node, v->a_cnp);
757 	MPASS(de != NULL);
758 
759 	/* Files marked as immutable or append-only cannot be deleted. */
760 	if ((node->tn_flags & (IMMUTABLE | APPEND | NOUNLINK)) ||
761 	    (dnode->tn_flags & APPEND)) {
762 		error = EPERM;
763 		goto out;
764 	}
765 
766 	/* Remove the entry from the directory; as it is a file, we do not
767 	 * have to change the number of hard links of the directory. */
768 	tmpfs_dir_detach(dvp, de);
769 	if (v->a_cnp->cn_flags & DOWHITEOUT)
770 		tmpfs_dir_whiteout_add(dvp, v->a_cnp);
771 
772 	/* Free the directory entry we just deleted.  Note that the node
773 	 * referred by it will not be removed until the vnode is really
774 	 * reclaimed. */
775 	tmpfs_free_dirent(tmp, de);
776 
777 	node->tn_status |= TMPFS_NODE_CHANGED;
778 	node->tn_accessed = true;
779 	error = 0;
780 
781 out:
782 	return (error);
783 }
784 
785 static int
786 tmpfs_link(struct vop_link_args *v)
787 {
788 	struct vnode *dvp = v->a_tdvp;
789 	struct vnode *vp = v->a_vp;
790 	struct componentname *cnp = v->a_cnp;
791 
792 	int error;
793 	struct tmpfs_dirent *de;
794 	struct tmpfs_node *node;
795 
796 	MPASS(dvp != vp); /* XXX When can this be false? */
797 	node = VP_TO_TMPFS_NODE(vp);
798 
799 	/* Ensure that we do not overflow the maximum number of links imposed
800 	 * by the system. */
801 	MPASS(node->tn_links <= TMPFS_LINK_MAX);
802 	if (node->tn_links == TMPFS_LINK_MAX) {
803 		error = EMLINK;
804 		goto out;
805 	}
806 
807 	/* We cannot create links of files marked immutable or append-only. */
808 	if (node->tn_flags & (IMMUTABLE | APPEND)) {
809 		error = EPERM;
810 		goto out;
811 	}
812 
813 	/* Allocate a new directory entry to represent the node. */
814 	error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), node,
815 	    cnp->cn_nameptr, cnp->cn_namelen, &de);
816 	if (error != 0)
817 		goto out;
818 
819 	/* Insert the new directory entry into the appropriate directory. */
820 	if (cnp->cn_flags & ISWHITEOUT)
821 		tmpfs_dir_whiteout_remove(dvp, cnp);
822 	tmpfs_dir_attach(dvp, de);
823 
824 	/* vp link count has changed, so update node times. */
825 	node->tn_status |= TMPFS_NODE_CHANGED;
826 	tmpfs_update(vp);
827 
828 	error = 0;
829 
830 out:
831 	return (error);
832 }
833 
834 /*
835  * We acquire all but fdvp locks using non-blocking acquisitions.  If we
836  * fail to acquire any lock in the path we will drop all held locks,
837  * acquire the new lock in a blocking fashion, and then release it and
838  * restart the rename.  This acquire/release step ensures that we do not
839  * spin on a lock waiting for release.  On error release all vnode locks
840  * and decrement references the way tmpfs_rename() would do.
841  */
842 static int
843 tmpfs_rename_relock(struct vnode *fdvp, struct vnode **fvpp,
844     struct vnode *tdvp, struct vnode **tvpp,
845     struct componentname *fcnp, struct componentname *tcnp)
846 {
847 	struct vnode *nvp;
848 	struct mount *mp;
849 	struct tmpfs_dirent *de;
850 	int error, restarts = 0;
851 
852 	VOP_UNLOCK(tdvp);
853 	if (*tvpp != NULL && *tvpp != tdvp)
854 		VOP_UNLOCK(*tvpp);
855 	mp = fdvp->v_mount;
856 
857 relock:
858 	restarts += 1;
859 	error = vn_lock(fdvp, LK_EXCLUSIVE);
860 	if (error)
861 		goto releout;
862 	if (vn_lock(tdvp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
863 		VOP_UNLOCK(fdvp);
864 		error = vn_lock(tdvp, LK_EXCLUSIVE);
865 		if (error)
866 			goto releout;
867 		VOP_UNLOCK(tdvp);
868 		goto relock;
869 	}
870 	/*
871 	 * Re-resolve fvp to be certain it still exists and fetch the
872 	 * correct vnode.
873 	 */
874 	de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(fdvp), NULL, fcnp);
875 	if (de == NULL) {
876 		VOP_UNLOCK(fdvp);
877 		VOP_UNLOCK(tdvp);
878 		if ((fcnp->cn_flags & ISDOTDOT) != 0 ||
879 		    (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.'))
880 			error = EINVAL;
881 		else
882 			error = ENOENT;
883 		goto releout;
884 	}
885 	error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE | LK_NOWAIT, &nvp);
886 	if (error != 0) {
887 		VOP_UNLOCK(fdvp);
888 		VOP_UNLOCK(tdvp);
889 		if (error != EBUSY)
890 			goto releout;
891 		error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE, &nvp);
892 		if (error != 0)
893 			goto releout;
894 		VOP_UNLOCK(nvp);
895 		/*
896 		 * Concurrent rename race.
897 		 */
898 		if (nvp == tdvp) {
899 			vrele(nvp);
900 			error = EINVAL;
901 			goto releout;
902 		}
903 		vrele(*fvpp);
904 		*fvpp = nvp;
905 		goto relock;
906 	}
907 	vrele(*fvpp);
908 	*fvpp = nvp;
909 	VOP_UNLOCK(*fvpp);
910 	/*
911 	 * Re-resolve tvp and acquire the vnode lock if present.
912 	 */
913 	de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(tdvp), NULL, tcnp);
914 	/*
915 	 * If tvp disappeared we just carry on.
916 	 */
917 	if (de == NULL && *tvpp != NULL) {
918 		vrele(*tvpp);
919 		*tvpp = NULL;
920 	}
921 	/*
922 	 * Get the tvp ino if the lookup succeeded.  We may have to restart
923 	 * if the non-blocking acquire fails.
924 	 */
925 	if (de != NULL) {
926 		nvp = NULL;
927 		error = tmpfs_alloc_vp(mp, de->td_node,
928 		    LK_EXCLUSIVE | LK_NOWAIT, &nvp);
929 		if (*tvpp != NULL)
930 			vrele(*tvpp);
931 		*tvpp = nvp;
932 		if (error != 0) {
933 			VOP_UNLOCK(fdvp);
934 			VOP_UNLOCK(tdvp);
935 			if (error != EBUSY)
936 				goto releout;
937 			error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE,
938 			    &nvp);
939 			if (error != 0)
940 				goto releout;
941 			VOP_UNLOCK(nvp);
942 			/*
943 			 * fdvp contains fvp, thus tvp (=fdvp) is not empty.
944 			 */
945 			if (nvp == fdvp) {
946 				error = ENOTEMPTY;
947 				goto releout;
948 			}
949 			goto relock;
950 		}
951 	}
952 	tmpfs_rename_restarts += restarts;
953 
954 	return (0);
955 
956 releout:
957 	vrele(fdvp);
958 	vrele(*fvpp);
959 	vrele(tdvp);
960 	if (*tvpp != NULL)
961 		vrele(*tvpp);
962 	tmpfs_rename_restarts += restarts;
963 
964 	return (error);
965 }
966 
967 static int
968 tmpfs_rename(struct vop_rename_args *v)
969 {
970 	struct vnode *fdvp = v->a_fdvp;
971 	struct vnode *fvp = v->a_fvp;
972 	struct componentname *fcnp = v->a_fcnp;
973 	struct vnode *tdvp = v->a_tdvp;
974 	struct vnode *tvp = v->a_tvp;
975 	struct componentname *tcnp = v->a_tcnp;
976 	char *newname;
977 	struct tmpfs_dirent *de;
978 	struct tmpfs_mount *tmp;
979 	struct tmpfs_node *fdnode;
980 	struct tmpfs_node *fnode;
981 	struct tmpfs_node *tnode;
982 	struct tmpfs_node *tdnode;
983 	int error;
984 	bool want_seqc_end;
985 
986 	want_seqc_end = false;
987 
988 	/*
989 	 * Disallow cross-device renames.
990 	 * XXX Why isn't this done by the caller?
991 	 */
992 	if (fvp->v_mount != tdvp->v_mount ||
993 	    (tvp != NULL && fvp->v_mount != tvp->v_mount)) {
994 		error = EXDEV;
995 		goto out;
996 	}
997 
998 	/* If source and target are the same file, there is nothing to do. */
999 	if (fvp == tvp) {
1000 		error = 0;
1001 		goto out;
1002 	}
1003 
1004 	/*
1005 	 * If we need to move the directory between entries, lock the
1006 	 * source so that we can safely operate on it.
1007 	 */
1008 	if (fdvp != tdvp && fdvp != tvp) {
1009 		if (vn_lock(fdvp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
1010 			error = tmpfs_rename_relock(fdvp, &fvp, tdvp, &tvp,
1011 			    fcnp, tcnp);
1012 			if (error != 0)
1013 				return (error);
1014 			ASSERT_VOP_ELOCKED(fdvp,
1015 			    "tmpfs_rename: fdvp not locked");
1016 			ASSERT_VOP_ELOCKED(tdvp,
1017 			    "tmpfs_rename: tdvp not locked");
1018 			if (tvp != NULL)
1019 				ASSERT_VOP_ELOCKED(tvp,
1020 				    "tmpfs_rename: tvp not locked");
1021 			if (fvp == tvp) {
1022 				error = 0;
1023 				goto out_locked;
1024 			}
1025 		}
1026 	}
1027 
1028 	/*
1029 	 * Avoid manipulating '.' and '..' entries.
1030 	 */
1031 	if ((fcnp->cn_flags & ISDOTDOT) != 0 ||
1032 	    (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.')) {
1033 		error = EINVAL;
1034 		goto out_locked;
1035 	}
1036 
1037 	if (tvp != NULL)
1038 		vn_seqc_write_begin(tvp);
1039 	vn_seqc_write_begin(tdvp);
1040 	vn_seqc_write_begin(fvp);
1041 	vn_seqc_write_begin(fdvp);
1042 	want_seqc_end = true;
1043 
1044 	tmp = VFS_TO_TMPFS(tdvp->v_mount);
1045 	tdnode = VP_TO_TMPFS_DIR(tdvp);
1046 	tnode = (tvp == NULL) ? NULL : VP_TO_TMPFS_NODE(tvp);
1047 	fdnode = VP_TO_TMPFS_DIR(fdvp);
1048 	fnode = VP_TO_TMPFS_NODE(fvp);
1049 	de = tmpfs_dir_lookup(fdnode, fnode, fcnp);
1050 
1051 	/*
1052 	 * Entry can disappear before we lock fdvp.
1053 	 */
1054 	if (de == NULL) {
1055 		if ((fcnp->cn_flags & ISDOTDOT) != 0 ||
1056 		    (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.'))
1057 			error = EINVAL;
1058 		else
1059 			error = ENOENT;
1060 		goto out_locked;
1061 	}
1062 	MPASS(de->td_node == fnode);
1063 
1064 	/*
1065 	 * If re-naming a directory to another preexisting directory
1066 	 * ensure that the target directory is empty so that its
1067 	 * removal causes no side effects.
1068 	 * Kern_rename guarantees the destination to be a directory
1069 	 * if the source is one.
1070 	 */
1071 	if (tvp != NULL) {
1072 		MPASS(tnode != NULL);
1073 
1074 		if ((tnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) ||
1075 		    (tdnode->tn_flags & (APPEND | IMMUTABLE))) {
1076 			error = EPERM;
1077 			goto out_locked;
1078 		}
1079 
1080 		if (fnode->tn_type == VDIR && tnode->tn_type == VDIR) {
1081 			if (tnode->tn_size != 0 &&
1082 			    ((tcnp->cn_flags & IGNOREWHITEOUT) == 0 ||
1083 			    tnode->tn_size > tnode->tn_dir.tn_wht_size)) {
1084 				error = ENOTEMPTY;
1085 				goto out_locked;
1086 			}
1087 		} else if (fnode->tn_type == VDIR && tnode->tn_type != VDIR) {
1088 			error = ENOTDIR;
1089 			goto out_locked;
1090 		} else if (fnode->tn_type != VDIR && tnode->tn_type == VDIR) {
1091 			error = EISDIR;
1092 			goto out_locked;
1093 		} else {
1094 			MPASS(fnode->tn_type != VDIR &&
1095 				tnode->tn_type != VDIR);
1096 		}
1097 	}
1098 
1099 	if ((fnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))
1100 	    || (fdnode->tn_flags & (APPEND | IMMUTABLE))) {
1101 		error = EPERM;
1102 		goto out_locked;
1103 	}
1104 
1105 	/*
1106 	 * Ensure that we have enough memory to hold the new name, if it
1107 	 * has to be changed.
1108 	 */
1109 	if (fcnp->cn_namelen != tcnp->cn_namelen ||
1110 	    bcmp(fcnp->cn_nameptr, tcnp->cn_nameptr, fcnp->cn_namelen) != 0) {
1111 		newname = malloc(tcnp->cn_namelen, M_TMPFSNAME, M_WAITOK);
1112 	} else
1113 		newname = NULL;
1114 
1115 	/*
1116 	 * If the node is being moved to another directory, we have to do
1117 	 * the move.
1118 	 */
1119 	if (fdnode != tdnode) {
1120 		/*
1121 		 * In case we are moving a directory, we have to adjust its
1122 		 * parent to point to the new parent.
1123 		 */
1124 		if (de->td_node->tn_type == VDIR) {
1125 			struct tmpfs_node *n;
1126 
1127 			TMPFS_NODE_LOCK(fnode);
1128 			error = tmpfs_access_locked(fvp, fnode, VWRITE,
1129 			    tcnp->cn_cred);
1130 			TMPFS_NODE_UNLOCK(fnode);
1131 			if (error) {
1132 				if (newname != NULL)
1133 					free(newname, M_TMPFSNAME);
1134 				goto out_locked;
1135 			}
1136 
1137 			/*
1138 			 * Ensure the target directory is not a child of the
1139 			 * directory being moved.  Otherwise, we'd end up
1140 			 * with stale nodes.
1141 			 */
1142 			n = tdnode;
1143 			/*
1144 			 * TMPFS_LOCK guaranties that no nodes are freed while
1145 			 * traversing the list. Nodes can only be marked as
1146 			 * removed: tn_parent == NULL.
1147 			 */
1148 			TMPFS_LOCK(tmp);
1149 			TMPFS_NODE_LOCK(n);
1150 			while (n != n->tn_dir.tn_parent) {
1151 				struct tmpfs_node *parent;
1152 
1153 				if (n == fnode) {
1154 					TMPFS_NODE_UNLOCK(n);
1155 					TMPFS_UNLOCK(tmp);
1156 					error = EINVAL;
1157 					if (newname != NULL)
1158 						free(newname, M_TMPFSNAME);
1159 					goto out_locked;
1160 				}
1161 				parent = n->tn_dir.tn_parent;
1162 				TMPFS_NODE_UNLOCK(n);
1163 				if (parent == NULL) {
1164 					n = NULL;
1165 					break;
1166 				}
1167 				TMPFS_NODE_LOCK(parent);
1168 				if (parent->tn_dir.tn_parent == NULL) {
1169 					TMPFS_NODE_UNLOCK(parent);
1170 					n = NULL;
1171 					break;
1172 				}
1173 				n = parent;
1174 			}
1175 			TMPFS_UNLOCK(tmp);
1176 			if (n == NULL) {
1177 				error = EINVAL;
1178 				if (newname != NULL)
1179 					    free(newname, M_TMPFSNAME);
1180 				goto out_locked;
1181 			}
1182 			TMPFS_NODE_UNLOCK(n);
1183 
1184 			/* Adjust the parent pointer. */
1185 			TMPFS_VALIDATE_DIR(fnode);
1186 			TMPFS_NODE_LOCK(de->td_node);
1187 			de->td_node->tn_dir.tn_parent = tdnode;
1188 			TMPFS_NODE_UNLOCK(de->td_node);
1189 
1190 			/*
1191 			 * As a result of changing the target of the '..'
1192 			 * entry, the link count of the source and target
1193 			 * directories has to be adjusted.
1194 			 */
1195 			TMPFS_NODE_LOCK(tdnode);
1196 			TMPFS_ASSERT_LOCKED(tdnode);
1197 			tdnode->tn_links++;
1198 			TMPFS_NODE_UNLOCK(tdnode);
1199 
1200 			TMPFS_NODE_LOCK(fdnode);
1201 			TMPFS_ASSERT_LOCKED(fdnode);
1202 			fdnode->tn_links--;
1203 			TMPFS_NODE_UNLOCK(fdnode);
1204 		}
1205 	}
1206 
1207 	/*
1208 	 * Do the move: just remove the entry from the source directory
1209 	 * and insert it into the target one.
1210 	 */
1211 	tmpfs_dir_detach(fdvp, de);
1212 
1213 	if (fcnp->cn_flags & DOWHITEOUT)
1214 		tmpfs_dir_whiteout_add(fdvp, fcnp);
1215 	if (tcnp->cn_flags & ISWHITEOUT)
1216 		tmpfs_dir_whiteout_remove(tdvp, tcnp);
1217 
1218 	/*
1219 	 * If the name has changed, we need to make it effective by changing
1220 	 * it in the directory entry.
1221 	 */
1222 	if (newname != NULL) {
1223 		MPASS(tcnp->cn_namelen <= MAXNAMLEN);
1224 
1225 		free(de->ud.td_name, M_TMPFSNAME);
1226 		de->ud.td_name = newname;
1227 		tmpfs_dirent_init(de, tcnp->cn_nameptr, tcnp->cn_namelen);
1228 
1229 		fnode->tn_status |= TMPFS_NODE_CHANGED;
1230 		tdnode->tn_status |= TMPFS_NODE_MODIFIED;
1231 	}
1232 
1233 	/*
1234 	 * If we are overwriting an entry, we have to remove the old one
1235 	 * from the target directory.
1236 	 */
1237 	if (tvp != NULL) {
1238 		struct tmpfs_dirent *tde;
1239 
1240 		/* Remove the old entry from the target directory. */
1241 		tde = tmpfs_dir_lookup(tdnode, tnode, tcnp);
1242 		tmpfs_dir_detach(tdvp, tde);
1243 
1244 		/*
1245 		 * If we are overwriting a directory, per the ENOTEMPTY check
1246 		 * above it must either be empty or contain only whiteout
1247 		 * entries.  In the latter case (which can only happen if
1248 		 * IGNOREWHITEOUT was passed in tcnp->cn_flags), clear the
1249 		 * whiteout entries to avoid leaking memory.
1250 		 */
1251 		if (tnode->tn_type == VDIR && tnode->tn_size > 0)
1252 			tmpfs_dir_clear_whiteouts(tvp);
1253 
1254 		/* Update node's ctime because of possible hardlinks. */
1255 		tnode->tn_status |= TMPFS_NODE_CHANGED;
1256 		tmpfs_update(tvp);
1257 
1258 		/*
1259 		 * Free the directory entry we just deleted.  Note that the
1260 		 * node referred by it will not be removed until the vnode is
1261 		 * really reclaimed.
1262 		 */
1263 		tmpfs_free_dirent(VFS_TO_TMPFS(tvp->v_mount), tde);
1264 	}
1265 
1266 	tmpfs_dir_attach(tdvp, de);
1267 
1268 	if (tmpfs_use_nc(fvp)) {
1269 		cache_vop_rename(fdvp, fvp, tdvp, tvp, fcnp, tcnp);
1270 	}
1271 
1272 	error = 0;
1273 
1274 out_locked:
1275 	if (fdvp != tdvp && fdvp != tvp)
1276 		VOP_UNLOCK(fdvp);
1277 
1278 out:
1279 	if (want_seqc_end) {
1280 		if (tvp != NULL)
1281 			vn_seqc_write_end(tvp);
1282 		vn_seqc_write_end(tdvp);
1283 		vn_seqc_write_end(fvp);
1284 		vn_seqc_write_end(fdvp);
1285 	}
1286 
1287 	/*
1288 	 * Release target nodes.
1289 	 * XXX: I don't understand when tdvp can be the same as tvp, but
1290 	 * other code takes care of this...
1291 	 */
1292 	if (tdvp == tvp)
1293 		vrele(tdvp);
1294 	else
1295 		vput(tdvp);
1296 	if (tvp != NULL)
1297 		vput(tvp);
1298 
1299 	/* Release source nodes. */
1300 	vrele(fdvp);
1301 	vrele(fvp);
1302 
1303 	return (error);
1304 }
1305 
1306 static int
1307 tmpfs_mkdir(struct vop_mkdir_args *v)
1308 {
1309 	struct vnode *dvp = v->a_dvp;
1310 	struct vnode **vpp = v->a_vpp;
1311 	struct componentname *cnp = v->a_cnp;
1312 	struct vattr *vap = v->a_vap;
1313 
1314 	MPASS(vap->va_type == VDIR);
1315 
1316 	return (tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL));
1317 }
1318 
1319 static int
1320 tmpfs_rmdir(struct vop_rmdir_args *v)
1321 {
1322 	struct vnode *dvp = v->a_dvp;
1323 	struct vnode *vp = v->a_vp;
1324 	struct componentname *cnp = v->a_cnp;
1325 
1326 	int error;
1327 	struct tmpfs_dirent *de;
1328 	struct tmpfs_mount *tmp;
1329 	struct tmpfs_node *dnode;
1330 	struct tmpfs_node *node;
1331 
1332 	tmp = VFS_TO_TMPFS(dvp->v_mount);
1333 	dnode = VP_TO_TMPFS_DIR(dvp);
1334 	node = VP_TO_TMPFS_DIR(vp);
1335 
1336 	/*
1337 	 * Directories with more than two non-whiteout entries ('.' and '..')
1338 	 * cannot be removed.
1339 	 */
1340 	if (node->tn_size != 0 &&
1341 	    ((cnp->cn_flags & IGNOREWHITEOUT) == 0 ||
1342 	    node->tn_size > node->tn_dir.tn_wht_size)) {
1343 		error = ENOTEMPTY;
1344 		goto out;
1345 	}
1346 
1347 	if ((dnode->tn_flags & APPEND)
1348 	    || (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))) {
1349 		error = EPERM;
1350 		goto out;
1351 	}
1352 
1353 	/* This invariant holds only if we are not trying to remove "..".
1354 	 * We checked for that above so this is safe now. */
1355 	MPASS(node->tn_dir.tn_parent == dnode);
1356 
1357 	/* Get the directory entry associated with node (vp).  This was
1358 	 * filled by tmpfs_lookup while looking up the entry. */
1359 	de = tmpfs_dir_lookup(dnode, node, cnp);
1360 	MPASS(TMPFS_DIRENT_MATCHES(de,
1361 	    cnp->cn_nameptr,
1362 	    cnp->cn_namelen));
1363 
1364 	/* Check flags to see if we are allowed to remove the directory. */
1365 	if ((dnode->tn_flags & APPEND) != 0 ||
1366 	    (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) != 0) {
1367 		error = EPERM;
1368 		goto out;
1369 	}
1370 
1371 	/* Detach the directory entry from the directory (dnode). */
1372 	tmpfs_dir_detach(dvp, de);
1373 
1374 	/*
1375 	 * If we are removing a directory, per the ENOTEMPTY check above it
1376 	 * must either be empty or contain only whiteout entries.  In the
1377 	 * latter case (which can only happen if IGNOREWHITEOUT was passed
1378 	 * in cnp->cn_flags), clear the whiteout entries to avoid leaking
1379 	 * memory.
1380 	 */
1381 	if (node->tn_size > 0)
1382 		tmpfs_dir_clear_whiteouts(vp);
1383 
1384 	if (cnp->cn_flags & DOWHITEOUT)
1385 		tmpfs_dir_whiteout_add(dvp, cnp);
1386 
1387 	/* No vnode should be allocated for this entry from this point */
1388 	TMPFS_NODE_LOCK(node);
1389 	node->tn_links--;
1390 	node->tn_dir.tn_parent = NULL;
1391 	node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1392 	node->tn_accessed = true;
1393 
1394 	TMPFS_NODE_UNLOCK(node);
1395 
1396 	TMPFS_NODE_LOCK(dnode);
1397 	dnode->tn_links--;
1398 	dnode->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1399 	dnode->tn_accessed = true;
1400 	TMPFS_NODE_UNLOCK(dnode);
1401 
1402 	if (tmpfs_use_nc(dvp)) {
1403 		cache_vop_rmdir(dvp, vp);
1404 	}
1405 
1406 	/* Free the directory entry we just deleted.  Note that the node
1407 	 * referred by it will not be removed until the vnode is really
1408 	 * reclaimed. */
1409 	tmpfs_free_dirent(tmp, de);
1410 
1411 	/* Release the deleted vnode (will destroy the node, notify
1412 	 * interested parties and clean it from the cache). */
1413 
1414 	dnode->tn_status |= TMPFS_NODE_CHANGED;
1415 	tmpfs_update(dvp);
1416 
1417 	error = 0;
1418 
1419 out:
1420 	return (error);
1421 }
1422 
1423 static int
1424 tmpfs_symlink(struct vop_symlink_args *v)
1425 {
1426 	struct vnode *dvp = v->a_dvp;
1427 	struct vnode **vpp = v->a_vpp;
1428 	struct componentname *cnp = v->a_cnp;
1429 	struct vattr *vap = v->a_vap;
1430 	const char *target = v->a_target;
1431 
1432 #ifdef notyet /* XXX FreeBSD BUG: kern_symlink is not setting VLNK */
1433 	MPASS(vap->va_type == VLNK);
1434 #else
1435 	vap->va_type = VLNK;
1436 #endif
1437 
1438 	return (tmpfs_alloc_file(dvp, vpp, vap, cnp, target));
1439 }
1440 
1441 static int
1442 tmpfs_readdir(struct vop_readdir_args *va)
1443 {
1444 	struct vnode *vp;
1445 	struct uio *uio;
1446 	struct tmpfs_mount *tm;
1447 	struct tmpfs_node *node;
1448 	uint64_t **cookies;
1449 	int *eofflag, *ncookies;
1450 	ssize_t startresid;
1451 	int error, maxcookies;
1452 
1453 	vp = va->a_vp;
1454 	uio = va->a_uio;
1455 	eofflag = va->a_eofflag;
1456 	cookies = va->a_cookies;
1457 	ncookies = va->a_ncookies;
1458 
1459 	/* This operation only makes sense on directory nodes. */
1460 	if (vp->v_type != VDIR)
1461 		return (ENOTDIR);
1462 
1463 	maxcookies = 0;
1464 	node = VP_TO_TMPFS_DIR(vp);
1465 	tm = VFS_TO_TMPFS(vp->v_mount);
1466 
1467 	startresid = uio->uio_resid;
1468 
1469 	/* Allocate cookies for NFS and compat modules. */
1470 	if (cookies != NULL && ncookies != NULL) {
1471 		maxcookies = howmany(node->tn_size,
1472 		    sizeof(struct tmpfs_dirent)) + 2;
1473 		*cookies = malloc(maxcookies * sizeof(**cookies), M_TEMP,
1474 		    M_WAITOK);
1475 		*ncookies = 0;
1476 	}
1477 
1478 	if (cookies == NULL)
1479 		error = tmpfs_dir_getdents(tm, node, uio, 0, NULL, NULL);
1480 	else
1481 		error = tmpfs_dir_getdents(tm, node, uio, maxcookies, *cookies,
1482 		    ncookies);
1483 
1484 	/* Buffer was filled without hitting EOF. */
1485 	if (error == EJUSTRETURN)
1486 		error = (uio->uio_resid != startresid) ? 0 : EINVAL;
1487 
1488 	if (error != 0 && cookies != NULL && ncookies != NULL) {
1489 		free(*cookies, M_TEMP);
1490 		*cookies = NULL;
1491 		*ncookies = 0;
1492 	}
1493 
1494 	if (eofflag != NULL)
1495 		*eofflag =
1496 		    (error == 0 && uio->uio_offset == TMPFS_DIRCOOKIE_EOF);
1497 
1498 	return (error);
1499 }
1500 
1501 static int
1502 tmpfs_readlink(struct vop_readlink_args *v)
1503 {
1504 	struct vnode *vp = v->a_vp;
1505 	struct uio *uio = v->a_uio;
1506 
1507 	int error;
1508 	struct tmpfs_node *node;
1509 
1510 	MPASS(uio->uio_offset == 0);
1511 	MPASS(vp->v_type == VLNK);
1512 
1513 	node = VP_TO_TMPFS_NODE(vp);
1514 
1515 	error = uiomove(node->tn_link_target, MIN(node->tn_size, uio->uio_resid),
1516 	    uio);
1517 	tmpfs_set_accessed(VFS_TO_TMPFS(vp->v_mount), node);
1518 
1519 	return (error);
1520 }
1521 
1522 /*
1523  * VOP_FPLOOKUP_SYMLINK routines are subject to special circumstances, see
1524  * the comment above cache_fplookup for details.
1525  *
1526  * Check tmpfs_alloc_node for tmpfs-specific synchronisation notes.
1527  */
1528 static int
1529 tmpfs_fplookup_symlink(struct vop_fplookup_symlink_args *v)
1530 {
1531 	struct vnode *vp;
1532 	struct tmpfs_node *node;
1533 	char *symlink;
1534 
1535 	vp = v->a_vp;
1536 	node = VP_TO_TMPFS_NODE_SMR(vp);
1537 	if (__predict_false(node == NULL))
1538 		return (EAGAIN);
1539 	if (!atomic_load_char(&node->tn_link_smr))
1540 		return (EAGAIN);
1541 	symlink = atomic_load_ptr(&node->tn_link_target);
1542 	if (symlink == NULL)
1543 		return (EAGAIN);
1544 
1545 	return (cache_symlink_resolve(v->a_fpl, symlink, node->tn_size));
1546 }
1547 
1548 static int
1549 tmpfs_inactive(struct vop_inactive_args *v)
1550 {
1551 	struct vnode *vp;
1552 	struct tmpfs_node *node;
1553 
1554 	vp = v->a_vp;
1555 	node = VP_TO_TMPFS_NODE(vp);
1556 	if (node->tn_links == 0)
1557 		vrecycle(vp);
1558 	else
1559 		tmpfs_check_mtime(vp);
1560 	return (0);
1561 }
1562 
1563 static int
1564 tmpfs_need_inactive(struct vop_need_inactive_args *ap)
1565 {
1566 	struct vnode *vp;
1567 	struct tmpfs_node *node;
1568 	struct vm_object *obj;
1569 
1570 	vp = ap->a_vp;
1571 	node = VP_TO_TMPFS_NODE(vp);
1572 	if (node->tn_links == 0)
1573 		goto need;
1574 	if (vp->v_type == VREG) {
1575 		obj = vp->v_object;
1576 		if (obj->generation != obj->cleangeneration)
1577 			goto need;
1578 	}
1579 	return (0);
1580 need:
1581 	return (1);
1582 }
1583 
1584 int
1585 tmpfs_reclaim(struct vop_reclaim_args *v)
1586 {
1587 	struct vnode *vp;
1588 	struct tmpfs_mount *tmp;
1589 	struct tmpfs_node *node;
1590 	bool unlock;
1591 
1592 	vp = v->a_vp;
1593 	node = VP_TO_TMPFS_NODE(vp);
1594 	tmp = VFS_TO_TMPFS(vp->v_mount);
1595 
1596 	if (vp->v_type == VREG)
1597 		tmpfs_destroy_vobject(vp, node->tn_reg.tn_aobj);
1598 	vp->v_object = NULL;
1599 
1600 	TMPFS_LOCK(tmp);
1601 	TMPFS_NODE_LOCK(node);
1602 	tmpfs_free_vp(vp);
1603 
1604 	/*
1605 	 * If the node referenced by this vnode was deleted by the user,
1606 	 * we must free its associated data structures (now that the vnode
1607 	 * is being reclaimed).
1608 	 */
1609 	unlock = true;
1610 	if (node->tn_links == 0 &&
1611 	    (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0) {
1612 		node->tn_vpstate = TMPFS_VNODE_DOOMED;
1613 		unlock = !tmpfs_free_node_locked(tmp, node, true);
1614 	}
1615 
1616 	if (unlock) {
1617 		TMPFS_NODE_UNLOCK(node);
1618 		TMPFS_UNLOCK(tmp);
1619 	}
1620 
1621 	MPASS(vp->v_data == NULL);
1622 	return (0);
1623 }
1624 
1625 int
1626 tmpfs_print(struct vop_print_args *v)
1627 {
1628 	struct vnode *vp = v->a_vp;
1629 
1630 	struct tmpfs_node *node;
1631 
1632 	node = VP_TO_TMPFS_NODE(vp);
1633 
1634 	printf("tag VT_TMPFS, tmpfs_node %p, flags 0x%lx, links %jd\n",
1635 	    node, node->tn_flags, (uintmax_t)node->tn_links);
1636 	printf("\tmode 0%o, owner %d, group %d, size %jd, status 0x%x\n",
1637 	    node->tn_mode, node->tn_uid, node->tn_gid,
1638 	    (intmax_t)node->tn_size, node->tn_status);
1639 
1640 	if (vp->v_type == VFIFO)
1641 		fifo_printinfo(vp);
1642 
1643 	printf("\n");
1644 
1645 	return (0);
1646 }
1647 
1648 int
1649 tmpfs_pathconf(struct vop_pathconf_args *v)
1650 {
1651 	struct vnode *vp = v->a_vp;
1652 	int name = v->a_name;
1653 	long *retval = v->a_retval;
1654 
1655 	int error;
1656 
1657 	error = 0;
1658 
1659 	switch (name) {
1660 	case _PC_LINK_MAX:
1661 		*retval = TMPFS_LINK_MAX;
1662 		break;
1663 
1664 	case _PC_SYMLINK_MAX:
1665 		*retval = MAXPATHLEN;
1666 		break;
1667 
1668 	case _PC_NAME_MAX:
1669 		*retval = NAME_MAX;
1670 		break;
1671 
1672 	case _PC_PIPE_BUF:
1673 		if (vp->v_type == VDIR || vp->v_type == VFIFO)
1674 			*retval = PIPE_BUF;
1675 		else
1676 			error = EINVAL;
1677 		break;
1678 
1679 	case _PC_CHOWN_RESTRICTED:
1680 		*retval = 1;
1681 		break;
1682 
1683 	case _PC_NO_TRUNC:
1684 		*retval = 1;
1685 		break;
1686 
1687 	case _PC_SYNC_IO:
1688 		*retval = 1;
1689 		break;
1690 
1691 	case _PC_FILESIZEBITS:
1692 		*retval = 64;
1693 		break;
1694 
1695 	case _PC_MIN_HOLE_SIZE:
1696 		*retval = PAGE_SIZE;
1697 		break;
1698 
1699 	default:
1700 		error = vop_stdpathconf(v);
1701 	}
1702 
1703 	return (error);
1704 }
1705 
1706 static int
1707 tmpfs_vptofh(struct vop_vptofh_args *ap)
1708 /*
1709 vop_vptofh {
1710 	IN struct vnode *a_vp;
1711 	IN struct fid *a_fhp;
1712 };
1713 */
1714 {
1715 	struct tmpfs_fid_data tfd;
1716 	struct tmpfs_node *node;
1717 	struct fid *fhp;
1718 
1719 	node = VP_TO_TMPFS_NODE(ap->a_vp);
1720 	fhp = ap->a_fhp;
1721 	fhp->fid_len = sizeof(tfd);
1722 
1723 	/*
1724 	 * Copy into fid_data from the stack to avoid unaligned pointer use.
1725 	 * See the comment in sys/mount.h on struct fid for details.
1726 	 */
1727 	tfd.tfd_id = node->tn_id;
1728 	tfd.tfd_gen = node->tn_gen;
1729 	memcpy(fhp->fid_data, &tfd, fhp->fid_len);
1730 
1731 	return (0);
1732 }
1733 
1734 static int
1735 tmpfs_whiteout(struct vop_whiteout_args *ap)
1736 {
1737 	struct vnode *dvp = ap->a_dvp;
1738 	struct componentname *cnp = ap->a_cnp;
1739 	struct tmpfs_dirent *de;
1740 
1741 	switch (ap->a_flags) {
1742 	case LOOKUP:
1743 		return (0);
1744 	case CREATE:
1745 		de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(dvp), NULL, cnp);
1746 		if (de != NULL)
1747 			return (de->td_node == NULL ? 0 : EEXIST);
1748 		return (tmpfs_dir_whiteout_add(dvp, cnp));
1749 	case DELETE:
1750 		tmpfs_dir_whiteout_remove(dvp, cnp);
1751 		return (0);
1752 	default:
1753 		panic("tmpfs_whiteout: unknown op");
1754 	}
1755 }
1756 
1757 static int
1758 tmpfs_vptocnp_dir(struct tmpfs_node *tn, struct tmpfs_node *tnp,
1759     struct tmpfs_dirent **pde)
1760 {
1761 	struct tmpfs_dir_cursor dc;
1762 	struct tmpfs_dirent *de;
1763 
1764 	for (de = tmpfs_dir_first(tnp, &dc); de != NULL;
1765 	     de = tmpfs_dir_next(tnp, &dc)) {
1766 		if (de->td_node == tn) {
1767 			*pde = de;
1768 			return (0);
1769 		}
1770 	}
1771 	return (ENOENT);
1772 }
1773 
1774 static int
1775 tmpfs_vptocnp_fill(struct vnode *vp, struct tmpfs_node *tn,
1776     struct tmpfs_node *tnp, char *buf, size_t *buflen, struct vnode **dvp)
1777 {
1778 	struct tmpfs_dirent *de;
1779 	int error, i;
1780 
1781 	error = vn_vget_ino_gen(vp, tmpfs_vn_get_ino_alloc, tnp, LK_SHARED,
1782 	    dvp);
1783 	if (error != 0)
1784 		return (error);
1785 	error = tmpfs_vptocnp_dir(tn, tnp, &de);
1786 	if (error == 0) {
1787 		i = *buflen;
1788 		i -= de->td_namelen;
1789 		if (i < 0) {
1790 			error = ENOMEM;
1791 		} else {
1792 			bcopy(de->ud.td_name, buf + i, de->td_namelen);
1793 			*buflen = i;
1794 		}
1795 	}
1796 	if (error == 0) {
1797 		if (vp != *dvp)
1798 			VOP_UNLOCK(*dvp);
1799 	} else {
1800 		if (vp != *dvp)
1801 			vput(*dvp);
1802 		else
1803 			vrele(vp);
1804 	}
1805 	return (error);
1806 }
1807 
1808 static int
1809 tmpfs_vptocnp(struct vop_vptocnp_args *ap)
1810 {
1811 	struct vnode *vp, **dvp;
1812 	struct tmpfs_node *tn, *tnp, *tnp1;
1813 	struct tmpfs_dirent *de;
1814 	struct tmpfs_mount *tm;
1815 	char *buf;
1816 	size_t *buflen;
1817 	int error;
1818 
1819 	vp = ap->a_vp;
1820 	dvp = ap->a_vpp;
1821 	buf = ap->a_buf;
1822 	buflen = ap->a_buflen;
1823 
1824 	tm = VFS_TO_TMPFS(vp->v_mount);
1825 	tn = VP_TO_TMPFS_NODE(vp);
1826 	if (tn->tn_type == VDIR) {
1827 		tnp = tn->tn_dir.tn_parent;
1828 		if (tnp == NULL)
1829 			return (ENOENT);
1830 		tmpfs_ref_node(tnp);
1831 		error = tmpfs_vptocnp_fill(vp, tn, tn->tn_dir.tn_parent, buf,
1832 		    buflen, dvp);
1833 		tmpfs_free_node(tm, tnp);
1834 		return (error);
1835 	}
1836 restart:
1837 	TMPFS_LOCK(tm);
1838 restart_locked:
1839 	LIST_FOREACH_SAFE(tnp, &tm->tm_nodes_used, tn_entries, tnp1) {
1840 		if (tnp->tn_type != VDIR)
1841 			continue;
1842 		TMPFS_NODE_LOCK(tnp);
1843 		tmpfs_ref_node(tnp);
1844 
1845 		/*
1846 		 * tn_vnode cannot be instantiated while we hold the
1847 		 * node lock, so the directory cannot be changed while
1848 		 * we iterate over it.  Do this to avoid instantiating
1849 		 * vnode for directories which cannot point to our
1850 		 * node.
1851 		 */
1852 		error = tnp->tn_vnode == NULL ? tmpfs_vptocnp_dir(tn, tnp,
1853 		    &de) : 0;
1854 
1855 		if (error == 0) {
1856 			TMPFS_NODE_UNLOCK(tnp);
1857 			TMPFS_UNLOCK(tm);
1858 			error = tmpfs_vptocnp_fill(vp, tn, tnp, buf, buflen,
1859 			    dvp);
1860 			if (error == 0) {
1861 				tmpfs_free_node(tm, tnp);
1862 				return (0);
1863 			}
1864 			if (VN_IS_DOOMED(vp)) {
1865 				tmpfs_free_node(tm, tnp);
1866 				return (ENOENT);
1867 			}
1868 			TMPFS_LOCK(tm);
1869 			TMPFS_NODE_LOCK(tnp);
1870 		}
1871 		if (tmpfs_free_node_locked(tm, tnp, false)) {
1872 			goto restart;
1873 		} else {
1874 			KASSERT(tnp->tn_refcount > 0,
1875 			    ("node %p refcount zero", tnp));
1876 			if (tnp->tn_attached) {
1877 				tnp1 = LIST_NEXT(tnp, tn_entries);
1878 				TMPFS_NODE_UNLOCK(tnp);
1879 			} else {
1880 				TMPFS_NODE_UNLOCK(tnp);
1881 				goto restart_locked;
1882 			}
1883 		}
1884 	}
1885 	TMPFS_UNLOCK(tm);
1886 	return (ENOENT);
1887 }
1888 
1889 void
1890 tmpfs_extattr_free(struct tmpfs_extattr *ea)
1891 {
1892 	free(ea->ea_name, M_TMPFSEA);
1893 	free(ea->ea_value, M_TMPFSEA);
1894 	free(ea, M_TMPFSEA);
1895 }
1896 
1897 static bool
1898 tmpfs_extattr_update_mem(struct tmpfs_mount *tmp, ssize_t size)
1899 {
1900 	TMPFS_LOCK(tmp);
1901 	if (size > 0 &&
1902 	    !tmpfs_pages_check_avail(tmp, howmany(size, PAGE_SIZE))) {
1903 		TMPFS_UNLOCK(tmp);
1904 		return (false);
1905 	}
1906 	if (tmp->tm_ea_memory_inuse + size > tmp->tm_ea_memory_max) {
1907 		TMPFS_UNLOCK(tmp);
1908 		return (false);
1909 	}
1910 	tmp->tm_ea_memory_inuse += size;
1911 	TMPFS_UNLOCK(tmp);
1912 	return (true);
1913 }
1914 
1915 static int
1916 tmpfs_deleteextattr(struct vop_deleteextattr_args *ap)
1917 {
1918 	struct vnode *vp = ap->a_vp;
1919 	struct tmpfs_mount *tmp;
1920 	struct tmpfs_node *node;
1921 	struct tmpfs_extattr *ea;
1922 	size_t namelen;
1923 	ssize_t diff;
1924 	int error;
1925 
1926 	node = VP_TO_TMPFS_NODE(vp);
1927 	tmp = VFS_TO_TMPFS(vp->v_mount);
1928 	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1929 		return (EOPNOTSUPP);
1930 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1931 	    ap->a_cred, ap->a_td, VWRITE);
1932 	if (error != 0)
1933 		return (error);
1934 	if (ap->a_name == NULL || ap->a_name[0] == '\0')
1935 		return (EINVAL);
1936 	namelen = strlen(ap->a_name);
1937 	if (namelen > EXTATTR_MAXNAMELEN)
1938 		return (EINVAL);
1939 
1940 	LIST_FOREACH(ea, &node->tn_extattrs, ea_extattrs) {
1941 		if (ea->ea_namespace == ap->a_attrnamespace &&
1942 		    namelen == ea->ea_namelen &&
1943 		    memcmp(ap->a_name, ea->ea_name, namelen) == 0)
1944 			break;
1945 	}
1946 
1947 	if (ea == NULL)
1948 		return (ENOATTR);
1949 	LIST_REMOVE(ea, ea_extattrs);
1950 	diff = -(sizeof(struct tmpfs_extattr) + namelen + ea->ea_size);
1951 	tmpfs_extattr_update_mem(tmp, diff);
1952 	tmpfs_extattr_free(ea);
1953 	return (0);
1954 }
1955 
1956 static int
1957 tmpfs_getextattr(struct vop_getextattr_args *ap)
1958 {
1959 	struct vnode *vp = ap->a_vp;
1960 	struct tmpfs_node *node;
1961 	struct tmpfs_extattr *ea;
1962 	size_t namelen;
1963 	int error;
1964 
1965 	node = VP_TO_TMPFS_NODE(vp);
1966 	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
1967 		return (EOPNOTSUPP);
1968 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
1969 	    ap->a_cred, ap->a_td, VREAD);
1970 	if (error != 0)
1971 		return (error);
1972 	if (ap->a_name == NULL || ap->a_name[0] == '\0')
1973 		return (EINVAL);
1974 	namelen = strlen(ap->a_name);
1975 	if (namelen > EXTATTR_MAXNAMELEN)
1976 		return (EINVAL);
1977 
1978 	LIST_FOREACH(ea, &node->tn_extattrs, ea_extattrs) {
1979 		if (ea->ea_namespace == ap->a_attrnamespace &&
1980 		    namelen == ea->ea_namelen &&
1981 		    memcmp(ap->a_name, ea->ea_name, namelen) == 0)
1982 			break;
1983 	}
1984 
1985 	if (ea == NULL)
1986 		return (ENOATTR);
1987 	if (ap->a_size != NULL)
1988 		*ap->a_size = ea->ea_size;
1989 	if (ap->a_uio != NULL && ea->ea_size != 0)
1990 		error = uiomove(ea->ea_value, ea->ea_size, ap->a_uio);
1991 	return (error);
1992 }
1993 
1994 static int
1995 tmpfs_listextattr(struct vop_listextattr_args *ap)
1996 {
1997 	struct vnode *vp = ap->a_vp;
1998 	struct tmpfs_node *node;
1999 	struct tmpfs_extattr *ea;
2000 	int error;
2001 
2002 	node = VP_TO_TMPFS_NODE(vp);
2003 	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
2004 		return (EOPNOTSUPP);
2005 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
2006 	    ap->a_cred, ap->a_td, VREAD);
2007 	if (error != 0)
2008 		return (error);
2009 	if (ap->a_size != NULL)
2010 		*ap->a_size = 0;
2011 
2012 	LIST_FOREACH(ea, &node->tn_extattrs, ea_extattrs) {
2013 		if (ea->ea_namespace != ap->a_attrnamespace)
2014 			continue;
2015 		if (ap->a_size != NULL)
2016 			*ap->a_size += ea->ea_namelen + 1;
2017 		if (ap->a_uio != NULL) {
2018 			error = uiomove(&ea->ea_namelen, 1, ap->a_uio);
2019 			if (error != 0)
2020 				break;
2021 			error = uiomove(ea->ea_name, ea->ea_namelen, ap->a_uio);
2022 			if (error != 0)
2023 				break;
2024 		}
2025 	}
2026 
2027 	return (error);
2028 }
2029 
2030 static int
2031 tmpfs_setextattr(struct vop_setextattr_args *ap)
2032 {
2033 	struct vnode *vp = ap->a_vp;
2034 	struct tmpfs_mount *tmp;
2035 	struct tmpfs_node *node;
2036 	struct tmpfs_extattr *ea;
2037 	struct tmpfs_extattr *new_ea;
2038 	size_t attr_size;
2039 	size_t namelen;
2040 	ssize_t diff;
2041 	int error;
2042 
2043 	node = VP_TO_TMPFS_NODE(vp);
2044 	tmp = VFS_TO_TMPFS(vp->v_mount);
2045 	attr_size = ap->a_uio->uio_resid;
2046 	diff = 0;
2047 	if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
2048 		return (EOPNOTSUPP);
2049 	error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
2050 	    ap->a_cred, ap->a_td, VWRITE);
2051 	if (error != 0)
2052 		return (error);
2053 	if (ap->a_name == NULL || ap->a_name[0] == '\0')
2054 		return (EINVAL);
2055 	namelen = strlen(ap->a_name);
2056 	if (namelen > EXTATTR_MAXNAMELEN)
2057 		return (EINVAL);
2058 
2059 	LIST_FOREACH(ea, &node->tn_extattrs, ea_extattrs) {
2060 		if (ea->ea_namespace == ap->a_attrnamespace &&
2061 		    namelen == ea->ea_namelen &&
2062 		    memcmp(ap->a_name, ea->ea_name, namelen) == 0) {
2063 			diff -= sizeof(struct tmpfs_extattr) + ea->ea_namelen +
2064 			    ea->ea_size;
2065 			break;
2066 		}
2067 	}
2068 
2069 	diff += sizeof(struct tmpfs_extattr) + namelen + attr_size;
2070 	if (!tmpfs_extattr_update_mem(tmp, diff))
2071 		return (ENOSPC);
2072 	new_ea = malloc(sizeof(struct tmpfs_extattr), M_TMPFSEA, M_WAITOK);
2073 	new_ea->ea_namespace = ap->a_attrnamespace;
2074 	new_ea->ea_name = malloc(namelen, M_TMPFSEA, M_WAITOK);
2075 	new_ea->ea_namelen = namelen;
2076 	memcpy(new_ea->ea_name, ap->a_name, namelen);
2077 	if (attr_size != 0) {
2078 		new_ea->ea_value = malloc(attr_size, M_TMPFSEA, M_WAITOK);
2079 		new_ea->ea_size = attr_size;
2080 		error = uiomove(new_ea->ea_value, attr_size, ap->a_uio);
2081 	} else {
2082 		new_ea->ea_value = NULL;
2083 		new_ea->ea_size = 0;
2084 	}
2085 	if (error != 0) {
2086 		tmpfs_extattr_update_mem(tmp, -diff);
2087 		tmpfs_extattr_free(new_ea);
2088 		return (error);
2089 	}
2090 	if (ea != NULL) {
2091 		LIST_REMOVE(ea, ea_extattrs);
2092 		tmpfs_extattr_free(ea);
2093 	}
2094 	LIST_INSERT_HEAD(&node->tn_extattrs, new_ea, ea_extattrs);
2095 	return (0);
2096 }
2097 
2098 static off_t
2099 tmpfs_seek_data_locked(vm_object_t obj, off_t noff)
2100 {
2101 	vm_page_t m;
2102 	vm_pindex_t p, p_m, p_swp;
2103 
2104 	p = OFF_TO_IDX(noff);
2105 	m = vm_page_find_least(obj, p);
2106 
2107 	/*
2108 	 * Microoptimize the most common case for SEEK_DATA, where
2109 	 * there is no hole and the page is resident.
2110 	 */
2111 	if (m != NULL && vm_page_any_valid(m) && m->pindex == p)
2112 		return (noff);
2113 
2114 	p_swp = swap_pager_find_least(obj, p);
2115 	if (p_swp == p)
2116 		return (noff);
2117 
2118 	p_m = m == NULL ? obj->size : m->pindex;
2119 	return (IDX_TO_OFF(MIN(p_m, p_swp)));
2120 }
2121 
2122 static off_t
2123 tmpfs_seek_next(off_t noff)
2124 {
2125 	return (noff + PAGE_SIZE - (noff & PAGE_MASK));
2126 }
2127 
2128 static int
2129 tmpfs_seek_clamp(struct tmpfs_node *tn, off_t *noff, bool seekdata)
2130 {
2131 	if (*noff < tn->tn_size)
2132 		return (0);
2133 	if (seekdata)
2134 		return (ENXIO);
2135 	*noff = tn->tn_size;
2136 	return (0);
2137 }
2138 
2139 static off_t
2140 tmpfs_seek_hole_locked(vm_object_t obj, off_t noff)
2141 {
2142 	vm_page_t m;
2143 	vm_pindex_t p, p_swp;
2144 
2145 	for (;; noff = tmpfs_seek_next(noff)) {
2146 		/*
2147 		 * Walk over the largest sequential run of the valid pages.
2148 		 */
2149 		for (m = vm_page_lookup(obj, OFF_TO_IDX(noff));
2150 		    m != NULL && vm_page_any_valid(m);
2151 		    m = vm_page_next(m), noff = tmpfs_seek_next(noff))
2152 			;
2153 
2154 		/*
2155 		 * Found a hole in the object's page queue.  Check if
2156 		 * there is a hole in the swap at the same place.
2157 		 */
2158 		p = OFF_TO_IDX(noff);
2159 		p_swp = swap_pager_find_least(obj, p);
2160 		if (p_swp != p) {
2161 			noff = IDX_TO_OFF(p);
2162 			break;
2163 		}
2164 	}
2165 	return (noff);
2166 }
2167 
2168 static int
2169 tmpfs_seek_datahole(struct vnode *vp, off_t *off, bool seekdata)
2170 {
2171 	struct tmpfs_node *tn;
2172 	vm_object_t obj;
2173 	off_t noff;
2174 	int error;
2175 
2176 	if (vp->v_type != VREG)
2177 		return (ENOTTY);
2178 	tn = VP_TO_TMPFS_NODE(vp);
2179 	noff = *off;
2180 	if (noff < 0)
2181 		return (ENXIO);
2182 	error = tmpfs_seek_clamp(tn, &noff, seekdata);
2183 	if (error != 0)
2184 		return (error);
2185 	obj = tn->tn_reg.tn_aobj;
2186 
2187 	VM_OBJECT_RLOCK(obj);
2188 	noff = seekdata ? tmpfs_seek_data_locked(obj, noff) :
2189 	    tmpfs_seek_hole_locked(obj, noff);
2190 	VM_OBJECT_RUNLOCK(obj);
2191 
2192 	error = tmpfs_seek_clamp(tn, &noff, seekdata);
2193 	if (error == 0)
2194 		*off = noff;
2195 	return (error);
2196 }
2197 
2198 static int
2199 tmpfs_ioctl(struct vop_ioctl_args *ap)
2200 {
2201 	struct vnode *vp = ap->a_vp;
2202 	int error = 0;
2203 
2204 	switch (ap->a_command) {
2205 	case FIOSEEKDATA:
2206 	case FIOSEEKHOLE:
2207 		error = vn_lock(vp, LK_SHARED);
2208 		if (error != 0) {
2209 			error = EBADF;
2210 			break;
2211 		}
2212 		error = tmpfs_seek_datahole(vp, (off_t *)ap->a_data,
2213 		    ap->a_command == FIOSEEKDATA);
2214 		VOP_UNLOCK(vp);
2215 		break;
2216 	default:
2217 		error = ENOTTY;
2218 		break;
2219 	}
2220 	return (error);
2221 }
2222 
2223 /*
2224  * Vnode operations vector used for files stored in a tmpfs file system.
2225  */
2226 struct vop_vector tmpfs_vnodeop_entries = {
2227 	.vop_default =			&default_vnodeops,
2228 	.vop_lookup =			vfs_cache_lookup,
2229 	.vop_cachedlookup =		tmpfs_cached_lookup,
2230 	.vop_create =			tmpfs_create,
2231 	.vop_mknod =			tmpfs_mknod,
2232 	.vop_open =			tmpfs_open,
2233 	.vop_close =			tmpfs_close,
2234 	.vop_fplookup_vexec =		tmpfs_fplookup_vexec,
2235 	.vop_fplookup_symlink =		tmpfs_fplookup_symlink,
2236 	.vop_access =			tmpfs_access,
2237 	.vop_stat =			tmpfs_stat,
2238 	.vop_getattr =			tmpfs_getattr,
2239 	.vop_setattr =			tmpfs_setattr,
2240 	.vop_read =			tmpfs_read,
2241 	.vop_read_pgcache =		tmpfs_read_pgcache,
2242 	.vop_write =			tmpfs_write,
2243 	.vop_deallocate =		tmpfs_deallocate,
2244 	.vop_fsync =			tmpfs_fsync,
2245 	.vop_remove =			tmpfs_remove,
2246 	.vop_link =			tmpfs_link,
2247 	.vop_rename =			tmpfs_rename,
2248 	.vop_mkdir =			tmpfs_mkdir,
2249 	.vop_rmdir =			tmpfs_rmdir,
2250 	.vop_symlink =			tmpfs_symlink,
2251 	.vop_readdir =			tmpfs_readdir,
2252 	.vop_readlink =			tmpfs_readlink,
2253 	.vop_inactive =			tmpfs_inactive,
2254 	.vop_need_inactive =		tmpfs_need_inactive,
2255 	.vop_reclaim =			tmpfs_reclaim,
2256 	.vop_print =			tmpfs_print,
2257 	.vop_pathconf =			tmpfs_pathconf,
2258 	.vop_vptofh =			tmpfs_vptofh,
2259 	.vop_whiteout =			tmpfs_whiteout,
2260 	.vop_bmap =			VOP_EOPNOTSUPP,
2261 	.vop_vptocnp =			tmpfs_vptocnp,
2262 	.vop_lock1 =			vop_lock,
2263 	.vop_unlock = 			vop_unlock,
2264 	.vop_islocked = 		vop_islocked,
2265 	.vop_deleteextattr =		tmpfs_deleteextattr,
2266 	.vop_getextattr =		tmpfs_getextattr,
2267 	.vop_listextattr =		tmpfs_listextattr,
2268 	.vop_setextattr =		tmpfs_setextattr,
2269 	.vop_add_writecount =		vop_stdadd_writecount_nomsync,
2270 	.vop_ioctl =			tmpfs_ioctl,
2271 };
2272 VFS_VOP_VECTOR_REGISTER(tmpfs_vnodeop_entries);
2273 
2274 /*
2275  * Same vector for mounts which do not use namecache.
2276  */
2277 struct vop_vector tmpfs_vnodeop_nonc_entries = {
2278 	.vop_default =			&tmpfs_vnodeop_entries,
2279 	.vop_lookup =			tmpfs_lookup,
2280 };
2281 VFS_VOP_VECTOR_REGISTER(tmpfs_vnodeop_nonc_entries);
2282