xref: /freebsd/sys/fs/tmpfs/tmpfs_vnops.c (revision 657729a89dd578d8cfc70d6616f5c65a48a8b33a)
1 /*	$NetBSD: tmpfs_vnops.c,v 1.39 2007/07/23 15:41:01 jmmv Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-2-Clause-NetBSD
5  *
6  * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
11  * 2005 program.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 /*
36  * tmpfs vnode interface.
37  */
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/dirent.h>
44 #include <sys/fcntl.h>
45 #include <sys/file.h>
46 #include <sys/filio.h>
47 #include <sys/limits.h>
48 #include <sys/lockf.h>
49 #include <sys/lock.h>
50 #include <sys/mount.h>
51 #include <sys/namei.h>
52 #include <sys/priv.h>
53 #include <sys/proc.h>
54 #include <sys/rwlock.h>
55 #include <sys/sched.h>
56 #include <sys/smr.h>
57 #include <sys/stat.h>
58 #include <sys/sysctl.h>
59 #include <sys/unistd.h>
60 #include <sys/vnode.h>
61 #include <security/audit/audit.h>
62 #include <security/mac/mac_framework.h>
63 
64 #include <vm/vm.h>
65 #include <vm/vm_param.h>
66 #include <vm/vm_object.h>
67 #include <vm/vm_page.h>
68 #include <vm/vm_pager.h>
69 #include <vm/swap_pager.h>
70 
71 #include <fs/tmpfs/tmpfs_vnops.h>
72 #include <fs/tmpfs/tmpfs.h>
73 
74 SYSCTL_DECL(_vfs_tmpfs);
75 VFS_SMR_DECLARE;
76 
77 static volatile int tmpfs_rename_restarts;
78 SYSCTL_INT(_vfs_tmpfs, OID_AUTO, rename_restarts, CTLFLAG_RD,
79     __DEVOLATILE(int *, &tmpfs_rename_restarts), 0,
80     "Times rename had to restart due to lock contention");
81 
82 static int
83 tmpfs_vn_get_ino_alloc(struct mount *mp, void *arg, int lkflags,
84     struct vnode **rvp)
85 {
86 
87 	return (tmpfs_alloc_vp(mp, arg, lkflags, rvp));
88 }
89 
90 static int
91 tmpfs_lookup1(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
92 {
93 	struct tmpfs_dirent *de;
94 	struct tmpfs_node *dnode, *pnode;
95 	struct tmpfs_mount *tm;
96 	int error;
97 
98 	/* Caller assumes responsibility for ensuring access (VEXEC). */
99 	dnode = VP_TO_TMPFS_DIR(dvp);
100 	*vpp = NULLVP;
101 
102 	/* We cannot be requesting the parent directory of the root node. */
103 	MPASS(IMPLIES(dnode->tn_type == VDIR &&
104 	    dnode->tn_dir.tn_parent == dnode,
105 	    !(cnp->cn_flags & ISDOTDOT)));
106 
107 	TMPFS_ASSERT_LOCKED(dnode);
108 	if (dnode->tn_dir.tn_parent == NULL) {
109 		error = ENOENT;
110 		goto out;
111 	}
112 	if (cnp->cn_flags & ISDOTDOT) {
113 		tm = VFS_TO_TMPFS(dvp->v_mount);
114 		pnode = dnode->tn_dir.tn_parent;
115 		tmpfs_ref_node(pnode);
116 		error = vn_vget_ino_gen(dvp, tmpfs_vn_get_ino_alloc,
117 		    pnode, cnp->cn_lkflags, vpp);
118 		tmpfs_free_node(tm, pnode);
119 		if (error != 0)
120 			goto out;
121 	} else if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
122 		VREF(dvp);
123 		*vpp = dvp;
124 		error = 0;
125 	} else {
126 		de = tmpfs_dir_lookup(dnode, NULL, cnp);
127 		if (de != NULL && de->td_node == NULL)
128 			cnp->cn_flags |= ISWHITEOUT;
129 		if (de == NULL || de->td_node == NULL) {
130 			/*
131 			 * The entry was not found in the directory.
132 			 * This is OK if we are creating or renaming an
133 			 * entry and are working on the last component of
134 			 * the path name.
135 			 */
136 			if ((cnp->cn_flags & ISLASTCN) &&
137 			    (cnp->cn_nameiop == CREATE || \
138 			    cnp->cn_nameiop == RENAME ||
139 			    (cnp->cn_nameiop == DELETE &&
140 			    cnp->cn_flags & DOWHITEOUT &&
141 			    cnp->cn_flags & ISWHITEOUT))) {
142 				error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred,
143 				    curthread);
144 				if (error != 0)
145 					goto out;
146 
147 				error = EJUSTRETURN;
148 			} else
149 				error = ENOENT;
150 		} else {
151 			struct tmpfs_node *tnode;
152 
153 			/*
154 			 * The entry was found, so get its associated
155 			 * tmpfs_node.
156 			 */
157 			tnode = de->td_node;
158 
159 			/*
160 			 * If we are not at the last path component and
161 			 * found a non-directory or non-link entry (which
162 			 * may itself be pointing to a directory), raise
163 			 * an error.
164 			 */
165 			if ((tnode->tn_type != VDIR &&
166 			    tnode->tn_type != VLNK) &&
167 			    !(cnp->cn_flags & ISLASTCN)) {
168 				error = ENOTDIR;
169 				goto out;
170 			}
171 
172 			/*
173 			 * If we are deleting or renaming the entry, keep
174 			 * track of its tmpfs_dirent so that it can be
175 			 * easily deleted later.
176 			 */
177 			if ((cnp->cn_flags & ISLASTCN) &&
178 			    (cnp->cn_nameiop == DELETE ||
179 			    cnp->cn_nameiop == RENAME)) {
180 				error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred,
181 				    curthread);
182 				if (error != 0)
183 					goto out;
184 
185 				/* Allocate a new vnode on the matching entry. */
186 				error = tmpfs_alloc_vp(dvp->v_mount, tnode,
187 				    cnp->cn_lkflags, vpp);
188 				if (error != 0)
189 					goto out;
190 
191 				if ((dnode->tn_mode & S_ISTXT) &&
192 				  VOP_ACCESS(dvp, VADMIN, cnp->cn_cred,
193 				  curthread) && VOP_ACCESS(*vpp, VADMIN,
194 				  cnp->cn_cred, curthread)) {
195 					error = EPERM;
196 					vput(*vpp);
197 					*vpp = NULL;
198 					goto out;
199 				}
200 			} else {
201 				error = tmpfs_alloc_vp(dvp->v_mount, tnode,
202 				    cnp->cn_lkflags, vpp);
203 				if (error != 0)
204 					goto out;
205 			}
206 		}
207 	}
208 
209 	/*
210 	 * Store the result of this lookup in the cache.  Avoid this if the
211 	 * request was for creation, as it does not improve timings on
212 	 * emprical tests.
213 	 */
214 	if ((cnp->cn_flags & MAKEENTRY) != 0 && tmpfs_use_nc(dvp))
215 		cache_enter(dvp, *vpp, cnp);
216 
217 out:
218 	/*
219 	 * If there were no errors, *vpp cannot be null and it must be
220 	 * locked.
221 	 */
222 	MPASS(IFF(error == 0, *vpp != NULLVP && VOP_ISLOCKED(*vpp)));
223 
224 	return (error);
225 }
226 
227 static int
228 tmpfs_cached_lookup(struct vop_cachedlookup_args *v)
229 {
230 
231 	return (tmpfs_lookup1(v->a_dvp, v->a_vpp, v->a_cnp));
232 }
233 
234 static int
235 tmpfs_lookup(struct vop_lookup_args *v)
236 {
237 	struct vnode *dvp = v->a_dvp;
238 	struct vnode **vpp = v->a_vpp;
239 	struct componentname *cnp = v->a_cnp;
240 	int error;
241 
242 	/* Check accessibility of requested node as a first step. */
243 	error = vn_dir_check_exec(dvp, cnp);
244 	if (error != 0)
245 		return (error);
246 
247 	return (tmpfs_lookup1(dvp, vpp, cnp));
248 }
249 
250 static int
251 tmpfs_create(struct vop_create_args *v)
252 {
253 	struct vnode *dvp = v->a_dvp;
254 	struct vnode **vpp = v->a_vpp;
255 	struct componentname *cnp = v->a_cnp;
256 	struct vattr *vap = v->a_vap;
257 	int error;
258 
259 	MPASS(vap->va_type == VREG || vap->va_type == VSOCK);
260 
261 	error = tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL);
262 	if (error == 0 && (cnp->cn_flags & MAKEENTRY) != 0 && tmpfs_use_nc(dvp))
263 		cache_enter(dvp, *vpp, cnp);
264 	return (error);
265 }
266 
267 static int
268 tmpfs_mknod(struct vop_mknod_args *v)
269 {
270 	struct vnode *dvp = v->a_dvp;
271 	struct vnode **vpp = v->a_vpp;
272 	struct componentname *cnp = v->a_cnp;
273 	struct vattr *vap = v->a_vap;
274 
275 	if (vap->va_type != VBLK && vap->va_type != VCHR &&
276 	    vap->va_type != VFIFO)
277 		return (EINVAL);
278 
279 	return (tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL));
280 }
281 
282 struct fileops tmpfs_fnops;
283 
284 static int
285 tmpfs_open(struct vop_open_args *v)
286 {
287 	struct vnode *vp;
288 	struct tmpfs_node *node;
289 	struct file *fp;
290 	int error, mode;
291 
292 	vp = v->a_vp;
293 	mode = v->a_mode;
294 	node = VP_TO_TMPFS_NODE(vp);
295 
296 	/*
297 	 * The file is still active but all its names have been removed
298 	 * (e.g. by a "rmdir $(pwd)").  It cannot be opened any more as
299 	 * it is about to die.
300 	 */
301 	if (node->tn_links < 1)
302 		return (ENOENT);
303 
304 	/* If the file is marked append-only, deny write requests. */
305 	if (node->tn_flags & APPEND && (mode & (FWRITE | O_APPEND)) == FWRITE)
306 		error = EPERM;
307 	else {
308 		error = 0;
309 		/* For regular files, the call below is nop. */
310 		KASSERT(vp->v_type != VREG || (node->tn_reg.tn_aobj->flags &
311 		    OBJ_DEAD) == 0, ("dead object"));
312 		vnode_create_vobject(vp, node->tn_size, v->a_td);
313 	}
314 
315 	fp = v->a_fp;
316 	MPASS(fp == NULL || fp->f_data == NULL);
317 	if (error == 0 && fp != NULL && vp->v_type == VREG) {
318 		tmpfs_ref_node(node);
319 		finit_vnode(fp, mode, node, &tmpfs_fnops);
320 	}
321 
322 	return (error);
323 }
324 
325 static int
326 tmpfs_close(struct vop_close_args *v)
327 {
328 	struct vnode *vp = v->a_vp;
329 
330 	/* Update node times. */
331 	tmpfs_update(vp);
332 
333 	return (0);
334 }
335 
336 int
337 tmpfs_fo_close(struct file *fp, struct thread *td)
338 {
339 	struct tmpfs_node *node;
340 
341 	node = fp->f_data;
342 	if (node != NULL) {
343 		MPASS(node->tn_type == VREG);
344 		tmpfs_free_node(node->tn_reg.tn_tmp, node);
345 	}
346 	return (vnops.fo_close(fp, td));
347 }
348 
349 /*
350  * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see
351  * the comment above cache_fplookup for details.
352  */
353 int
354 tmpfs_fplookup_vexec(struct vop_fplookup_vexec_args *v)
355 {
356 	struct vnode *vp;
357 	struct tmpfs_node *node;
358 	struct ucred *cred;
359 	mode_t all_x, mode;
360 
361 	vp = v->a_vp;
362 	node = VP_TO_TMPFS_NODE_SMR(vp);
363 	if (__predict_false(node == NULL))
364 		return (EAGAIN);
365 
366 	all_x = S_IXUSR | S_IXGRP | S_IXOTH;
367 	mode = atomic_load_short(&node->tn_mode);
368 	if (__predict_true((mode & all_x) == all_x))
369 		return (0);
370 
371 	cred = v->a_cred;
372 	return (vaccess_vexec_smr(mode, node->tn_uid, node->tn_gid, cred));
373 }
374 
375 int
376 tmpfs_access(struct vop_access_args *v)
377 {
378 	struct vnode *vp = v->a_vp;
379 	accmode_t accmode = v->a_accmode;
380 	struct ucred *cred = v->a_cred;
381 	mode_t all_x = S_IXUSR | S_IXGRP | S_IXOTH;
382 	int error;
383 	struct tmpfs_node *node;
384 
385 	MPASS(VOP_ISLOCKED(vp));
386 
387 	node = VP_TO_TMPFS_NODE(vp);
388 
389 	/*
390 	 * Common case path lookup.
391 	 */
392 	if (__predict_true(accmode == VEXEC && (node->tn_mode & all_x) == all_x))
393 		return (0);
394 
395 	switch (vp->v_type) {
396 	case VDIR:
397 		/* FALLTHROUGH */
398 	case VLNK:
399 		/* FALLTHROUGH */
400 	case VREG:
401 		if (accmode & VWRITE && vp->v_mount->mnt_flag & MNT_RDONLY) {
402 			error = EROFS;
403 			goto out;
404 		}
405 		break;
406 
407 	case VBLK:
408 		/* FALLTHROUGH */
409 	case VCHR:
410 		/* FALLTHROUGH */
411 	case VSOCK:
412 		/* FALLTHROUGH */
413 	case VFIFO:
414 		break;
415 
416 	default:
417 		error = EINVAL;
418 		goto out;
419 	}
420 
421 	if (accmode & VWRITE && node->tn_flags & IMMUTABLE) {
422 		error = EPERM;
423 		goto out;
424 	}
425 
426 	error = vaccess(vp->v_type, node->tn_mode, node->tn_uid, node->tn_gid,
427 	    accmode, cred);
428 
429 out:
430 	MPASS(VOP_ISLOCKED(vp));
431 
432 	return (error);
433 }
434 
435 int
436 tmpfs_stat(struct vop_stat_args *v)
437 {
438 	struct vnode *vp = v->a_vp;
439 	struct stat *sb = v->a_sb;
440 	vm_object_t obj;
441 	struct tmpfs_node *node;
442 	int error;
443 
444 	node = VP_TO_TMPFS_NODE(vp);
445 
446 	tmpfs_update_getattr(vp);
447 
448 	error = vop_stat_helper_pre(v);
449 	if (__predict_false(error))
450 		return (error);
451 
452 	sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
453 	sb->st_ino = node->tn_id;
454 	sb->st_mode = node->tn_mode | VTTOIF(vp->v_type);
455 	sb->st_nlink = node->tn_links;
456 	sb->st_uid = node->tn_uid;
457 	sb->st_gid = node->tn_gid;
458 	sb->st_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ?
459 		node->tn_rdev : NODEV;
460 	sb->st_size = node->tn_size;
461 	sb->st_atim.tv_sec = node->tn_atime.tv_sec;
462 	sb->st_atim.tv_nsec = node->tn_atime.tv_nsec;
463 	sb->st_mtim.tv_sec = node->tn_mtime.tv_sec;
464 	sb->st_mtim.tv_nsec = node->tn_mtime.tv_nsec;
465 	sb->st_ctim.tv_sec = node->tn_ctime.tv_sec;
466 	sb->st_ctim.tv_nsec = node->tn_ctime.tv_nsec;
467 	sb->st_birthtim.tv_sec = node->tn_birthtime.tv_sec;
468 	sb->st_birthtim.tv_nsec = node->tn_birthtime.tv_nsec;
469 	sb->st_blksize = PAGE_SIZE;
470 	sb->st_flags = node->tn_flags;
471 	sb->st_gen = node->tn_gen;
472 	if (vp->v_type == VREG) {
473 		obj = node->tn_reg.tn_aobj;
474 		sb->st_blocks = (u_quad_t)obj->resident_page_count * PAGE_SIZE;
475 	} else
476 		sb->st_blocks = node->tn_size;
477 	sb->st_blocks /= S_BLKSIZE;
478 	return (vop_stat_helper_post(v, error));
479 }
480 
481 int
482 tmpfs_getattr(struct vop_getattr_args *v)
483 {
484 	struct vnode *vp = v->a_vp;
485 	struct vattr *vap = v->a_vap;
486 	vm_object_t obj;
487 	struct tmpfs_node *node;
488 
489 	node = VP_TO_TMPFS_NODE(vp);
490 
491 	tmpfs_update_getattr(vp);
492 
493 	vap->va_type = vp->v_type;
494 	vap->va_mode = node->tn_mode;
495 	vap->va_nlink = node->tn_links;
496 	vap->va_uid = node->tn_uid;
497 	vap->va_gid = node->tn_gid;
498 	vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
499 	vap->va_fileid = node->tn_id;
500 	vap->va_size = node->tn_size;
501 	vap->va_blocksize = PAGE_SIZE;
502 	vap->va_atime = node->tn_atime;
503 	vap->va_mtime = node->tn_mtime;
504 	vap->va_ctime = node->tn_ctime;
505 	vap->va_birthtime = node->tn_birthtime;
506 	vap->va_gen = node->tn_gen;
507 	vap->va_flags = node->tn_flags;
508 	vap->va_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ?
509 		node->tn_rdev : NODEV;
510 	if (vp->v_type == VREG) {
511 		obj = node->tn_reg.tn_aobj;
512 		vap->va_bytes = (u_quad_t)obj->resident_page_count * PAGE_SIZE;
513 	} else
514 		vap->va_bytes = node->tn_size;
515 	vap->va_filerev = 0;
516 
517 	return (0);
518 }
519 
520 int
521 tmpfs_setattr(struct vop_setattr_args *v)
522 {
523 	struct vnode *vp = v->a_vp;
524 	struct vattr *vap = v->a_vap;
525 	struct ucred *cred = v->a_cred;
526 	struct thread *td = curthread;
527 
528 	int error;
529 
530 	MPASS(VOP_ISLOCKED(vp));
531 	ASSERT_VOP_IN_SEQC(vp);
532 
533 	error = 0;
534 
535 	/* Abort if any unsettable attribute is given. */
536 	if (vap->va_type != VNON ||
537 	    vap->va_nlink != VNOVAL ||
538 	    vap->va_fsid != VNOVAL ||
539 	    vap->va_fileid != VNOVAL ||
540 	    vap->va_blocksize != VNOVAL ||
541 	    vap->va_gen != VNOVAL ||
542 	    vap->va_rdev != VNOVAL ||
543 	    vap->va_bytes != VNOVAL)
544 		error = EINVAL;
545 
546 	if (error == 0 && (vap->va_flags != VNOVAL))
547 		error = tmpfs_chflags(vp, vap->va_flags, cred, td);
548 
549 	if (error == 0 && (vap->va_size != VNOVAL))
550 		error = tmpfs_chsize(vp, vap->va_size, cred, td);
551 
552 	if (error == 0 && (vap->va_uid != VNOVAL || vap->va_gid != VNOVAL))
553 		error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred, td);
554 
555 	if (error == 0 && (vap->va_mode != (mode_t)VNOVAL))
556 		error = tmpfs_chmod(vp, vap->va_mode, cred, td);
557 
558 	if (error == 0 && ((vap->va_atime.tv_sec != VNOVAL &&
559 	    vap->va_atime.tv_nsec != VNOVAL) ||
560 	    (vap->va_mtime.tv_sec != VNOVAL &&
561 	    vap->va_mtime.tv_nsec != VNOVAL) ||
562 	    (vap->va_birthtime.tv_sec != VNOVAL &&
563 	    vap->va_birthtime.tv_nsec != VNOVAL)))
564 		error = tmpfs_chtimes(vp, vap, cred, td);
565 
566 	/*
567 	 * Update the node times.  We give preference to the error codes
568 	 * generated by this function rather than the ones that may arise
569 	 * from tmpfs_update.
570 	 */
571 	tmpfs_update(vp);
572 
573 	MPASS(VOP_ISLOCKED(vp));
574 
575 	return (error);
576 }
577 
578 static int
579 tmpfs_read(struct vop_read_args *v)
580 {
581 	struct vnode *vp;
582 	struct uio *uio;
583 	struct tmpfs_node *node;
584 
585 	vp = v->a_vp;
586 	if (vp->v_type != VREG)
587 		return (EISDIR);
588 	uio = v->a_uio;
589 	if (uio->uio_offset < 0)
590 		return (EINVAL);
591 	node = VP_TO_TMPFS_NODE(vp);
592 	tmpfs_set_accessed(VFS_TO_TMPFS(vp->v_mount), node);
593 	return (uiomove_object(node->tn_reg.tn_aobj, node->tn_size, uio));
594 }
595 
596 static int
597 tmpfs_read_pgcache(struct vop_read_pgcache_args *v)
598 {
599 	struct vnode *vp;
600 	struct tmpfs_node *node;
601 	vm_object_t object;
602 	off_t size;
603 	int error;
604 
605 	vp = v->a_vp;
606 	VNPASS((vn_irflag_read(vp) & VIRF_PGREAD) != 0, vp);
607 
608 	if (v->a_uio->uio_offset < 0)
609 		return (EINVAL);
610 
611 	error = EJUSTRETURN;
612 	vfs_smr_enter();
613 
614 	node = VP_TO_TMPFS_NODE_SMR(vp);
615 	if (node == NULL)
616 		goto out_smr;
617 	MPASS(node->tn_type == VREG);
618 	MPASS(node->tn_refcount >= 1);
619 	object = node->tn_reg.tn_aobj;
620 	if (object == NULL)
621 		goto out_smr;
622 
623 	MPASS(object->type == tmpfs_pager_type);
624 	MPASS((object->flags & (OBJ_ANON | OBJ_DEAD | OBJ_SWAP)) ==
625 	    OBJ_SWAP);
626 	if (!VN_IS_DOOMED(vp)) {
627 		/* size cannot become shorter due to rangelock. */
628 		size = node->tn_size;
629 		tmpfs_set_accessed(node->tn_reg.tn_tmp, node);
630 		vfs_smr_exit();
631 		error = uiomove_object(object, size, v->a_uio);
632 		return (error);
633 	}
634 out_smr:
635 	vfs_smr_exit();
636 	return (error);
637 }
638 
639 static int
640 tmpfs_write(struct vop_write_args *v)
641 {
642 	struct vnode *vp;
643 	struct uio *uio;
644 	struct tmpfs_node *node;
645 	off_t oldsize;
646 	ssize_t r;
647 	int error, ioflag;
648 	mode_t newmode;
649 
650 	vp = v->a_vp;
651 	uio = v->a_uio;
652 	ioflag = v->a_ioflag;
653 	error = 0;
654 	node = VP_TO_TMPFS_NODE(vp);
655 	oldsize = node->tn_size;
656 
657 	if (uio->uio_offset < 0 || vp->v_type != VREG)
658 		return (EINVAL);
659 	if (uio->uio_resid == 0)
660 		return (0);
661 	if (ioflag & IO_APPEND)
662 		uio->uio_offset = node->tn_size;
663 	error = vn_rlimit_fsizex(vp, uio, VFS_TO_TMPFS(vp->v_mount)->
664 	    tm_maxfilesize, &r, uio->uio_td);
665 	if (error != 0) {
666 		vn_rlimit_fsizex_res(uio, r);
667 		return (error);
668 	}
669 
670 	if (uio->uio_offset + uio->uio_resid > node->tn_size) {
671 		error = tmpfs_reg_resize(vp, uio->uio_offset + uio->uio_resid,
672 		    FALSE);
673 		if (error != 0)
674 			goto out;
675 	}
676 
677 	error = uiomove_object(node->tn_reg.tn_aobj, node->tn_size, uio);
678 	node->tn_status |= TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED;
679 	node->tn_accessed = true;
680 	if (node->tn_mode & (S_ISUID | S_ISGID)) {
681 		if (priv_check_cred(v->a_cred, PRIV_VFS_RETAINSUGID)) {
682 			newmode = node->tn_mode & ~(S_ISUID | S_ISGID);
683 			vn_seqc_write_begin(vp);
684 			atomic_store_short(&node->tn_mode, newmode);
685 			vn_seqc_write_end(vp);
686 		}
687 	}
688 	if (error != 0)
689 		(void)tmpfs_reg_resize(vp, oldsize, TRUE);
690 
691 out:
692 	MPASS(IMPLIES(error == 0, uio->uio_resid == 0));
693 	MPASS(IMPLIES(error != 0, oldsize == node->tn_size));
694 
695 	vn_rlimit_fsizex_res(uio, r);
696 	return (error);
697 }
698 
699 static int
700 tmpfs_deallocate(struct vop_deallocate_args *v)
701 {
702 	return (tmpfs_reg_punch_hole(v->a_vp, v->a_offset, v->a_len));
703 }
704 
705 static int
706 tmpfs_fsync(struct vop_fsync_args *v)
707 {
708 	struct vnode *vp = v->a_vp;
709 
710 	MPASS(VOP_ISLOCKED(vp));
711 
712 	tmpfs_check_mtime(vp);
713 	tmpfs_update(vp);
714 
715 	return (0);
716 }
717 
718 static int
719 tmpfs_remove(struct vop_remove_args *v)
720 {
721 	struct vnode *dvp = v->a_dvp;
722 	struct vnode *vp = v->a_vp;
723 
724 	int error;
725 	struct tmpfs_dirent *de;
726 	struct tmpfs_mount *tmp;
727 	struct tmpfs_node *dnode;
728 	struct tmpfs_node *node;
729 
730 	MPASS(VOP_ISLOCKED(dvp));
731 	MPASS(VOP_ISLOCKED(vp));
732 
733 	if (vp->v_type == VDIR) {
734 		error = EISDIR;
735 		goto out;
736 	}
737 
738 	dnode = VP_TO_TMPFS_DIR(dvp);
739 	node = VP_TO_TMPFS_NODE(vp);
740 	tmp = VFS_TO_TMPFS(vp->v_mount);
741 	de = tmpfs_dir_lookup(dnode, node, v->a_cnp);
742 	MPASS(de != NULL);
743 
744 	/* Files marked as immutable or append-only cannot be deleted. */
745 	if ((node->tn_flags & (IMMUTABLE | APPEND | NOUNLINK)) ||
746 	    (dnode->tn_flags & APPEND)) {
747 		error = EPERM;
748 		goto out;
749 	}
750 
751 	/* Remove the entry from the directory; as it is a file, we do not
752 	 * have to change the number of hard links of the directory. */
753 	tmpfs_dir_detach(dvp, de);
754 	if (v->a_cnp->cn_flags & DOWHITEOUT)
755 		tmpfs_dir_whiteout_add(dvp, v->a_cnp);
756 
757 	/* Free the directory entry we just deleted.  Note that the node
758 	 * referred by it will not be removed until the vnode is really
759 	 * reclaimed. */
760 	tmpfs_free_dirent(tmp, de);
761 
762 	node->tn_status |= TMPFS_NODE_CHANGED;
763 	node->tn_accessed = true;
764 	error = 0;
765 
766 out:
767 	return (error);
768 }
769 
770 static int
771 tmpfs_link(struct vop_link_args *v)
772 {
773 	struct vnode *dvp = v->a_tdvp;
774 	struct vnode *vp = v->a_vp;
775 	struct componentname *cnp = v->a_cnp;
776 
777 	int error;
778 	struct tmpfs_dirent *de;
779 	struct tmpfs_node *node;
780 
781 	MPASS(VOP_ISLOCKED(dvp));
782 	MPASS(dvp != vp); /* XXX When can this be false? */
783 	node = VP_TO_TMPFS_NODE(vp);
784 
785 	/* Ensure that we do not overflow the maximum number of links imposed
786 	 * by the system. */
787 	MPASS(node->tn_links <= TMPFS_LINK_MAX);
788 	if (node->tn_links == TMPFS_LINK_MAX) {
789 		error = EMLINK;
790 		goto out;
791 	}
792 
793 	/* We cannot create links of files marked immutable or append-only. */
794 	if (node->tn_flags & (IMMUTABLE | APPEND)) {
795 		error = EPERM;
796 		goto out;
797 	}
798 
799 	/* Allocate a new directory entry to represent the node. */
800 	error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), node,
801 	    cnp->cn_nameptr, cnp->cn_namelen, &de);
802 	if (error != 0)
803 		goto out;
804 
805 	/* Insert the new directory entry into the appropriate directory. */
806 	if (cnp->cn_flags & ISWHITEOUT)
807 		tmpfs_dir_whiteout_remove(dvp, cnp);
808 	tmpfs_dir_attach(dvp, de);
809 
810 	/* vp link count has changed, so update node times. */
811 	node->tn_status |= TMPFS_NODE_CHANGED;
812 	tmpfs_update(vp);
813 
814 	error = 0;
815 
816 out:
817 	return (error);
818 }
819 
820 /*
821  * We acquire all but fdvp locks using non-blocking acquisitions.  If we
822  * fail to acquire any lock in the path we will drop all held locks,
823  * acquire the new lock in a blocking fashion, and then release it and
824  * restart the rename.  This acquire/release step ensures that we do not
825  * spin on a lock waiting for release.  On error release all vnode locks
826  * and decrement references the way tmpfs_rename() would do.
827  */
828 static int
829 tmpfs_rename_relock(struct vnode *fdvp, struct vnode **fvpp,
830     struct vnode *tdvp, struct vnode **tvpp,
831     struct componentname *fcnp, struct componentname *tcnp)
832 {
833 	struct vnode *nvp;
834 	struct mount *mp;
835 	struct tmpfs_dirent *de;
836 	int error, restarts = 0;
837 
838 	VOP_UNLOCK(tdvp);
839 	if (*tvpp != NULL && *tvpp != tdvp)
840 		VOP_UNLOCK(*tvpp);
841 	mp = fdvp->v_mount;
842 
843 relock:
844 	restarts += 1;
845 	error = vn_lock(fdvp, LK_EXCLUSIVE);
846 	if (error)
847 		goto releout;
848 	if (vn_lock(tdvp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
849 		VOP_UNLOCK(fdvp);
850 		error = vn_lock(tdvp, LK_EXCLUSIVE);
851 		if (error)
852 			goto releout;
853 		VOP_UNLOCK(tdvp);
854 		goto relock;
855 	}
856 	/*
857 	 * Re-resolve fvp to be certain it still exists and fetch the
858 	 * correct vnode.
859 	 */
860 	de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(fdvp), NULL, fcnp);
861 	if (de == NULL) {
862 		VOP_UNLOCK(fdvp);
863 		VOP_UNLOCK(tdvp);
864 		if ((fcnp->cn_flags & ISDOTDOT) != 0 ||
865 		    (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.'))
866 			error = EINVAL;
867 		else
868 			error = ENOENT;
869 		goto releout;
870 	}
871 	error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE | LK_NOWAIT, &nvp);
872 	if (error != 0) {
873 		VOP_UNLOCK(fdvp);
874 		VOP_UNLOCK(tdvp);
875 		if (error != EBUSY)
876 			goto releout;
877 		error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE, &nvp);
878 		if (error != 0)
879 			goto releout;
880 		VOP_UNLOCK(nvp);
881 		/*
882 		 * Concurrent rename race.
883 		 */
884 		if (nvp == tdvp) {
885 			vrele(nvp);
886 			error = EINVAL;
887 			goto releout;
888 		}
889 		vrele(*fvpp);
890 		*fvpp = nvp;
891 		goto relock;
892 	}
893 	vrele(*fvpp);
894 	*fvpp = nvp;
895 	VOP_UNLOCK(*fvpp);
896 	/*
897 	 * Re-resolve tvp and acquire the vnode lock if present.
898 	 */
899 	de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(tdvp), NULL, tcnp);
900 	/*
901 	 * If tvp disappeared we just carry on.
902 	 */
903 	if (de == NULL && *tvpp != NULL) {
904 		vrele(*tvpp);
905 		*tvpp = NULL;
906 	}
907 	/*
908 	 * Get the tvp ino if the lookup succeeded.  We may have to restart
909 	 * if the non-blocking acquire fails.
910 	 */
911 	if (de != NULL) {
912 		nvp = NULL;
913 		error = tmpfs_alloc_vp(mp, de->td_node,
914 		    LK_EXCLUSIVE | LK_NOWAIT, &nvp);
915 		if (*tvpp != NULL)
916 			vrele(*tvpp);
917 		*tvpp = nvp;
918 		if (error != 0) {
919 			VOP_UNLOCK(fdvp);
920 			VOP_UNLOCK(tdvp);
921 			if (error != EBUSY)
922 				goto releout;
923 			error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE,
924 			    &nvp);
925 			if (error != 0)
926 				goto releout;
927 			VOP_UNLOCK(nvp);
928 			/*
929 			 * fdvp contains fvp, thus tvp (=fdvp) is not empty.
930 			 */
931 			if (nvp == fdvp) {
932 				error = ENOTEMPTY;
933 				goto releout;
934 			}
935 			goto relock;
936 		}
937 	}
938 	tmpfs_rename_restarts += restarts;
939 
940 	return (0);
941 
942 releout:
943 	vrele(fdvp);
944 	vrele(*fvpp);
945 	vrele(tdvp);
946 	if (*tvpp != NULL)
947 		vrele(*tvpp);
948 	tmpfs_rename_restarts += restarts;
949 
950 	return (error);
951 }
952 
953 static int
954 tmpfs_rename(struct vop_rename_args *v)
955 {
956 	struct vnode *fdvp = v->a_fdvp;
957 	struct vnode *fvp = v->a_fvp;
958 	struct componentname *fcnp = v->a_fcnp;
959 	struct vnode *tdvp = v->a_tdvp;
960 	struct vnode *tvp = v->a_tvp;
961 	struct componentname *tcnp = v->a_tcnp;
962 	char *newname;
963 	struct tmpfs_dirent *de;
964 	struct tmpfs_mount *tmp;
965 	struct tmpfs_node *fdnode;
966 	struct tmpfs_node *fnode;
967 	struct tmpfs_node *tnode;
968 	struct tmpfs_node *tdnode;
969 	int error;
970 	bool want_seqc_end;
971 
972 	MPASS(VOP_ISLOCKED(tdvp));
973 	MPASS(IMPLIES(tvp != NULL, VOP_ISLOCKED(tvp)));
974 
975 	want_seqc_end = false;
976 
977 	/*
978 	 * Disallow cross-device renames.
979 	 * XXX Why isn't this done by the caller?
980 	 */
981 	if (fvp->v_mount != tdvp->v_mount ||
982 	    (tvp != NULL && fvp->v_mount != tvp->v_mount)) {
983 		error = EXDEV;
984 		goto out;
985 	}
986 
987 	/* If source and target are the same file, there is nothing to do. */
988 	if (fvp == tvp) {
989 		error = 0;
990 		goto out;
991 	}
992 
993 	/*
994 	 * If we need to move the directory between entries, lock the
995 	 * source so that we can safely operate on it.
996 	 */
997 	if (fdvp != tdvp && fdvp != tvp) {
998 		if (vn_lock(fdvp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
999 			error = tmpfs_rename_relock(fdvp, &fvp, tdvp, &tvp,
1000 			    fcnp, tcnp);
1001 			if (error != 0)
1002 				return (error);
1003 			ASSERT_VOP_ELOCKED(fdvp,
1004 			    "tmpfs_rename: fdvp not locked");
1005 			ASSERT_VOP_ELOCKED(tdvp,
1006 			    "tmpfs_rename: tdvp not locked");
1007 			if (tvp != NULL)
1008 				ASSERT_VOP_ELOCKED(tvp,
1009 				    "tmpfs_rename: tvp not locked");
1010 			if (fvp == tvp) {
1011 				error = 0;
1012 				goto out_locked;
1013 			}
1014 		}
1015 	}
1016 
1017 	if (tvp != NULL)
1018 		vn_seqc_write_begin(tvp);
1019 	vn_seqc_write_begin(tdvp);
1020 	vn_seqc_write_begin(fvp);
1021 	vn_seqc_write_begin(fdvp);
1022 	want_seqc_end = true;
1023 
1024 	tmp = VFS_TO_TMPFS(tdvp->v_mount);
1025 	tdnode = VP_TO_TMPFS_DIR(tdvp);
1026 	tnode = (tvp == NULL) ? NULL : VP_TO_TMPFS_NODE(tvp);
1027 	fdnode = VP_TO_TMPFS_DIR(fdvp);
1028 	fnode = VP_TO_TMPFS_NODE(fvp);
1029 	de = tmpfs_dir_lookup(fdnode, fnode, fcnp);
1030 
1031 	/*
1032 	 * Entry can disappear before we lock fdvp,
1033 	 * also avoid manipulating '.' and '..' entries.
1034 	 */
1035 	if (de == NULL) {
1036 		if ((fcnp->cn_flags & ISDOTDOT) != 0 ||
1037 		    (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.'))
1038 			error = EINVAL;
1039 		else
1040 			error = ENOENT;
1041 		goto out_locked;
1042 	}
1043 	MPASS(de->td_node == fnode);
1044 
1045 	/*
1046 	 * If re-naming a directory to another preexisting directory
1047 	 * ensure that the target directory is empty so that its
1048 	 * removal causes no side effects.
1049 	 * Kern_rename guarantees the destination to be a directory
1050 	 * if the source is one.
1051 	 */
1052 	if (tvp != NULL) {
1053 		MPASS(tnode != NULL);
1054 
1055 		if ((tnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) ||
1056 		    (tdnode->tn_flags & (APPEND | IMMUTABLE))) {
1057 			error = EPERM;
1058 			goto out_locked;
1059 		}
1060 
1061 		if (fnode->tn_type == VDIR && tnode->tn_type == VDIR) {
1062 			if (tnode->tn_size > 0) {
1063 				error = ENOTEMPTY;
1064 				goto out_locked;
1065 			}
1066 		} else if (fnode->tn_type == VDIR && tnode->tn_type != VDIR) {
1067 			error = ENOTDIR;
1068 			goto out_locked;
1069 		} else if (fnode->tn_type != VDIR && tnode->tn_type == VDIR) {
1070 			error = EISDIR;
1071 			goto out_locked;
1072 		} else {
1073 			MPASS(fnode->tn_type != VDIR &&
1074 				tnode->tn_type != VDIR);
1075 		}
1076 	}
1077 
1078 	if ((fnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))
1079 	    || (fdnode->tn_flags & (APPEND | IMMUTABLE))) {
1080 		error = EPERM;
1081 		goto out_locked;
1082 	}
1083 
1084 	/*
1085 	 * Ensure that we have enough memory to hold the new name, if it
1086 	 * has to be changed.
1087 	 */
1088 	if (fcnp->cn_namelen != tcnp->cn_namelen ||
1089 	    bcmp(fcnp->cn_nameptr, tcnp->cn_nameptr, fcnp->cn_namelen) != 0) {
1090 		newname = malloc(tcnp->cn_namelen, M_TMPFSNAME, M_WAITOK);
1091 	} else
1092 		newname = NULL;
1093 
1094 	/*
1095 	 * If the node is being moved to another directory, we have to do
1096 	 * the move.
1097 	 */
1098 	if (fdnode != tdnode) {
1099 		/*
1100 		 * In case we are moving a directory, we have to adjust its
1101 		 * parent to point to the new parent.
1102 		 */
1103 		if (de->td_node->tn_type == VDIR) {
1104 			struct tmpfs_node *n;
1105 
1106 			/*
1107 			 * Ensure the target directory is not a child of the
1108 			 * directory being moved.  Otherwise, we'd end up
1109 			 * with stale nodes.
1110 			 */
1111 			n = tdnode;
1112 			/*
1113 			 * TMPFS_LOCK guaranties that no nodes are freed while
1114 			 * traversing the list. Nodes can only be marked as
1115 			 * removed: tn_parent == NULL.
1116 			 */
1117 			TMPFS_LOCK(tmp);
1118 			TMPFS_NODE_LOCK(n);
1119 			while (n != n->tn_dir.tn_parent) {
1120 				struct tmpfs_node *parent;
1121 
1122 				if (n == fnode) {
1123 					TMPFS_NODE_UNLOCK(n);
1124 					TMPFS_UNLOCK(tmp);
1125 					error = EINVAL;
1126 					if (newname != NULL)
1127 						    free(newname, M_TMPFSNAME);
1128 					goto out_locked;
1129 				}
1130 				parent = n->tn_dir.tn_parent;
1131 				TMPFS_NODE_UNLOCK(n);
1132 				if (parent == NULL) {
1133 					n = NULL;
1134 					break;
1135 				}
1136 				TMPFS_NODE_LOCK(parent);
1137 				if (parent->tn_dir.tn_parent == NULL) {
1138 					TMPFS_NODE_UNLOCK(parent);
1139 					n = NULL;
1140 					break;
1141 				}
1142 				n = parent;
1143 			}
1144 			TMPFS_UNLOCK(tmp);
1145 			if (n == NULL) {
1146 				error = EINVAL;
1147 				if (newname != NULL)
1148 					    free(newname, M_TMPFSNAME);
1149 				goto out_locked;
1150 			}
1151 			TMPFS_NODE_UNLOCK(n);
1152 
1153 			/* Adjust the parent pointer. */
1154 			TMPFS_VALIDATE_DIR(fnode);
1155 			TMPFS_NODE_LOCK(de->td_node);
1156 			de->td_node->tn_dir.tn_parent = tdnode;
1157 			TMPFS_NODE_UNLOCK(de->td_node);
1158 
1159 			/*
1160 			 * As a result of changing the target of the '..'
1161 			 * entry, the link count of the source and target
1162 			 * directories has to be adjusted.
1163 			 */
1164 			TMPFS_NODE_LOCK(tdnode);
1165 			TMPFS_ASSERT_LOCKED(tdnode);
1166 			tdnode->tn_links++;
1167 			TMPFS_NODE_UNLOCK(tdnode);
1168 
1169 			TMPFS_NODE_LOCK(fdnode);
1170 			TMPFS_ASSERT_LOCKED(fdnode);
1171 			fdnode->tn_links--;
1172 			TMPFS_NODE_UNLOCK(fdnode);
1173 		}
1174 	}
1175 
1176 	/*
1177 	 * Do the move: just remove the entry from the source directory
1178 	 * and insert it into the target one.
1179 	 */
1180 	tmpfs_dir_detach(fdvp, de);
1181 
1182 	if (fcnp->cn_flags & DOWHITEOUT)
1183 		tmpfs_dir_whiteout_add(fdvp, fcnp);
1184 	if (tcnp->cn_flags & ISWHITEOUT)
1185 		tmpfs_dir_whiteout_remove(tdvp, tcnp);
1186 
1187 	/*
1188 	 * If the name has changed, we need to make it effective by changing
1189 	 * it in the directory entry.
1190 	 */
1191 	if (newname != NULL) {
1192 		MPASS(tcnp->cn_namelen <= MAXNAMLEN);
1193 
1194 		free(de->ud.td_name, M_TMPFSNAME);
1195 		de->ud.td_name = newname;
1196 		tmpfs_dirent_init(de, tcnp->cn_nameptr, tcnp->cn_namelen);
1197 
1198 		fnode->tn_status |= TMPFS_NODE_CHANGED;
1199 		tdnode->tn_status |= TMPFS_NODE_MODIFIED;
1200 	}
1201 
1202 	/*
1203 	 * If we are overwriting an entry, we have to remove the old one
1204 	 * from the target directory.
1205 	 */
1206 	if (tvp != NULL) {
1207 		struct tmpfs_dirent *tde;
1208 
1209 		/* Remove the old entry from the target directory. */
1210 		tde = tmpfs_dir_lookup(tdnode, tnode, tcnp);
1211 		tmpfs_dir_detach(tdvp, tde);
1212 
1213 		/*
1214 		 * Free the directory entry we just deleted.  Note that the
1215 		 * node referred by it will not be removed until the vnode is
1216 		 * really reclaimed.
1217 		 */
1218 		tmpfs_free_dirent(VFS_TO_TMPFS(tvp->v_mount), tde);
1219 	}
1220 
1221 	tmpfs_dir_attach(tdvp, de);
1222 
1223 	if (tmpfs_use_nc(fvp)) {
1224 		cache_vop_rename(fdvp, fvp, tdvp, tvp, fcnp, tcnp);
1225 	}
1226 
1227 	error = 0;
1228 
1229 out_locked:
1230 	if (fdvp != tdvp && fdvp != tvp)
1231 		VOP_UNLOCK(fdvp);
1232 
1233 out:
1234 	if (want_seqc_end) {
1235 		if (tvp != NULL)
1236 			vn_seqc_write_end(tvp);
1237 		vn_seqc_write_end(tdvp);
1238 		vn_seqc_write_end(fvp);
1239 		vn_seqc_write_end(fdvp);
1240 	}
1241 
1242 	/*
1243 	 * Release target nodes.
1244 	 * XXX: I don't understand when tdvp can be the same as tvp, but
1245 	 * other code takes care of this...
1246 	 */
1247 	if (tdvp == tvp)
1248 		vrele(tdvp);
1249 	else
1250 		vput(tdvp);
1251 	if (tvp != NULL)
1252 		vput(tvp);
1253 
1254 	/* Release source nodes. */
1255 	vrele(fdvp);
1256 	vrele(fvp);
1257 
1258 	return (error);
1259 }
1260 
1261 static int
1262 tmpfs_mkdir(struct vop_mkdir_args *v)
1263 {
1264 	struct vnode *dvp = v->a_dvp;
1265 	struct vnode **vpp = v->a_vpp;
1266 	struct componentname *cnp = v->a_cnp;
1267 	struct vattr *vap = v->a_vap;
1268 
1269 	MPASS(vap->va_type == VDIR);
1270 
1271 	return (tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL));
1272 }
1273 
1274 static int
1275 tmpfs_rmdir(struct vop_rmdir_args *v)
1276 {
1277 	struct vnode *dvp = v->a_dvp;
1278 	struct vnode *vp = v->a_vp;
1279 
1280 	int error;
1281 	struct tmpfs_dirent *de;
1282 	struct tmpfs_mount *tmp;
1283 	struct tmpfs_node *dnode;
1284 	struct tmpfs_node *node;
1285 
1286 	MPASS(VOP_ISLOCKED(dvp));
1287 	MPASS(VOP_ISLOCKED(vp));
1288 
1289 	tmp = VFS_TO_TMPFS(dvp->v_mount);
1290 	dnode = VP_TO_TMPFS_DIR(dvp);
1291 	node = VP_TO_TMPFS_DIR(vp);
1292 
1293 	/* Directories with more than two entries ('.' and '..') cannot be
1294 	 * removed. */
1295 	 if (node->tn_size > 0) {
1296 		 error = ENOTEMPTY;
1297 		 goto out;
1298 	 }
1299 
1300 	if ((dnode->tn_flags & APPEND)
1301 	    || (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))) {
1302 		error = EPERM;
1303 		goto out;
1304 	}
1305 
1306 	/* This invariant holds only if we are not trying to remove "..".
1307 	  * We checked for that above so this is safe now. */
1308 	MPASS(node->tn_dir.tn_parent == dnode);
1309 
1310 	/* Get the directory entry associated with node (vp).  This was
1311 	 * filled by tmpfs_lookup while looking up the entry. */
1312 	de = tmpfs_dir_lookup(dnode, node, v->a_cnp);
1313 	MPASS(TMPFS_DIRENT_MATCHES(de,
1314 	    v->a_cnp->cn_nameptr,
1315 	    v->a_cnp->cn_namelen));
1316 
1317 	/* Check flags to see if we are allowed to remove the directory. */
1318 	if ((dnode->tn_flags & APPEND) != 0 ||
1319 	    (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) != 0) {
1320 		error = EPERM;
1321 		goto out;
1322 	}
1323 
1324 	/* Detach the directory entry from the directory (dnode). */
1325 	tmpfs_dir_detach(dvp, de);
1326 	if (v->a_cnp->cn_flags & DOWHITEOUT)
1327 		tmpfs_dir_whiteout_add(dvp, v->a_cnp);
1328 
1329 	/* No vnode should be allocated for this entry from this point */
1330 	TMPFS_NODE_LOCK(node);
1331 	node->tn_links--;
1332 	node->tn_dir.tn_parent = NULL;
1333 	node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1334 	node->tn_accessed = true;
1335 
1336 	TMPFS_NODE_UNLOCK(node);
1337 
1338 	TMPFS_NODE_LOCK(dnode);
1339 	dnode->tn_links--;
1340 	dnode->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;
1341 	dnode->tn_accessed = true;
1342 	TMPFS_NODE_UNLOCK(dnode);
1343 
1344 	if (tmpfs_use_nc(dvp)) {
1345 		cache_vop_rmdir(dvp, vp);
1346 	}
1347 
1348 	/* Free the directory entry we just deleted.  Note that the node
1349 	 * referred by it will not be removed until the vnode is really
1350 	 * reclaimed. */
1351 	tmpfs_free_dirent(tmp, de);
1352 
1353 	/* Release the deleted vnode (will destroy the node, notify
1354 	 * interested parties and clean it from the cache). */
1355 
1356 	dnode->tn_status |= TMPFS_NODE_CHANGED;
1357 	tmpfs_update(dvp);
1358 
1359 	error = 0;
1360 
1361 out:
1362 	return (error);
1363 }
1364 
1365 static int
1366 tmpfs_symlink(struct vop_symlink_args *v)
1367 {
1368 	struct vnode *dvp = v->a_dvp;
1369 	struct vnode **vpp = v->a_vpp;
1370 	struct componentname *cnp = v->a_cnp;
1371 	struct vattr *vap = v->a_vap;
1372 	const char *target = v->a_target;
1373 
1374 #ifdef notyet /* XXX FreeBSD BUG: kern_symlink is not setting VLNK */
1375 	MPASS(vap->va_type == VLNK);
1376 #else
1377 	vap->va_type = VLNK;
1378 #endif
1379 
1380 	return (tmpfs_alloc_file(dvp, vpp, vap, cnp, target));
1381 }
1382 
1383 static int
1384 tmpfs_readdir(struct vop_readdir_args *va)
1385 {
1386 	struct vnode *vp;
1387 	struct uio *uio;
1388 	struct tmpfs_mount *tm;
1389 	struct tmpfs_node *node;
1390 	uint64_t **cookies;
1391 	int *eofflag, *ncookies;
1392 	ssize_t startresid;
1393 	int error, maxcookies;
1394 
1395 	vp = va->a_vp;
1396 	uio = va->a_uio;
1397 	eofflag = va->a_eofflag;
1398 	cookies = va->a_cookies;
1399 	ncookies = va->a_ncookies;
1400 
1401 	/* This operation only makes sense on directory nodes. */
1402 	if (vp->v_type != VDIR)
1403 		return (ENOTDIR);
1404 
1405 	maxcookies = 0;
1406 	node = VP_TO_TMPFS_DIR(vp);
1407 	tm = VFS_TO_TMPFS(vp->v_mount);
1408 
1409 	startresid = uio->uio_resid;
1410 
1411 	/* Allocate cookies for NFS and compat modules. */
1412 	if (cookies != NULL && ncookies != NULL) {
1413 		maxcookies = howmany(node->tn_size,
1414 		    sizeof(struct tmpfs_dirent)) + 2;
1415 		*cookies = malloc(maxcookies * sizeof(**cookies), M_TEMP,
1416 		    M_WAITOK);
1417 		*ncookies = 0;
1418 	}
1419 
1420 	if (cookies == NULL)
1421 		error = tmpfs_dir_getdents(tm, node, uio, 0, NULL, NULL);
1422 	else
1423 		error = tmpfs_dir_getdents(tm, node, uio, maxcookies, *cookies,
1424 		    ncookies);
1425 
1426 	/* Buffer was filled without hitting EOF. */
1427 	if (error == EJUSTRETURN)
1428 		error = (uio->uio_resid != startresid) ? 0 : EINVAL;
1429 
1430 	if (error != 0 && cookies != NULL && ncookies != NULL) {
1431 		free(*cookies, M_TEMP);
1432 		*cookies = NULL;
1433 		*ncookies = 0;
1434 	}
1435 
1436 	if (eofflag != NULL)
1437 		*eofflag =
1438 		    (error == 0 && uio->uio_offset == TMPFS_DIRCOOKIE_EOF);
1439 
1440 	return (error);
1441 }
1442 
1443 static int
1444 tmpfs_readlink(struct vop_readlink_args *v)
1445 {
1446 	struct vnode *vp = v->a_vp;
1447 	struct uio *uio = v->a_uio;
1448 
1449 	int error;
1450 	struct tmpfs_node *node;
1451 
1452 	MPASS(uio->uio_offset == 0);
1453 	MPASS(vp->v_type == VLNK);
1454 
1455 	node = VP_TO_TMPFS_NODE(vp);
1456 
1457 	error = uiomove(node->tn_link_target, MIN(node->tn_size, uio->uio_resid),
1458 	    uio);
1459 	tmpfs_set_accessed(VFS_TO_TMPFS(vp->v_mount), node);
1460 
1461 	return (error);
1462 }
1463 
1464 /*
1465  * VOP_FPLOOKUP_SYMLINK routines are subject to special circumstances, see
1466  * the comment above cache_fplookup for details.
1467  *
1468  * Check tmpfs_alloc_node for tmpfs-specific synchronisation notes.
1469  */
1470 static int
1471 tmpfs_fplookup_symlink(struct vop_fplookup_symlink_args *v)
1472 {
1473 	struct vnode *vp;
1474 	struct tmpfs_node *node;
1475 	char *symlink;
1476 
1477 	vp = v->a_vp;
1478 	node = VP_TO_TMPFS_NODE_SMR(vp);
1479 	if (__predict_false(node == NULL))
1480 		return (EAGAIN);
1481 	if (!atomic_load_char(&node->tn_link_smr))
1482 		return (EAGAIN);
1483 	symlink = atomic_load_ptr(&node->tn_link_target);
1484 	if (symlink == NULL)
1485 		return (EAGAIN);
1486 
1487 	return (cache_symlink_resolve(v->a_fpl, symlink, node->tn_size));
1488 }
1489 
1490 static int
1491 tmpfs_inactive(struct vop_inactive_args *v)
1492 {
1493 	struct vnode *vp;
1494 	struct tmpfs_node *node;
1495 
1496 	vp = v->a_vp;
1497 	node = VP_TO_TMPFS_NODE(vp);
1498 	if (node->tn_links == 0)
1499 		vrecycle(vp);
1500 	else
1501 		tmpfs_check_mtime(vp);
1502 	return (0);
1503 }
1504 
1505 static int
1506 tmpfs_need_inactive(struct vop_need_inactive_args *ap)
1507 {
1508 	struct vnode *vp;
1509 	struct tmpfs_node *node;
1510 	struct vm_object *obj;
1511 
1512 	vp = ap->a_vp;
1513 	node = VP_TO_TMPFS_NODE(vp);
1514 	if (node->tn_links == 0)
1515 		goto need;
1516 	if (vp->v_type == VREG) {
1517 		obj = vp->v_object;
1518 		if (obj->generation != obj->cleangeneration)
1519 			goto need;
1520 	}
1521 	return (0);
1522 need:
1523 	return (1);
1524 }
1525 
1526 int
1527 tmpfs_reclaim(struct vop_reclaim_args *v)
1528 {
1529 	struct vnode *vp;
1530 	struct tmpfs_mount *tmp;
1531 	struct tmpfs_node *node;
1532 	bool unlock;
1533 
1534 	vp = v->a_vp;
1535 	node = VP_TO_TMPFS_NODE(vp);
1536 	tmp = VFS_TO_TMPFS(vp->v_mount);
1537 
1538 	if (vp->v_type == VREG)
1539 		tmpfs_destroy_vobject(vp, node->tn_reg.tn_aobj);
1540 	vp->v_object = NULL;
1541 
1542 	TMPFS_LOCK(tmp);
1543 	TMPFS_NODE_LOCK(node);
1544 	tmpfs_free_vp(vp);
1545 
1546 	/*
1547 	 * If the node referenced by this vnode was deleted by the user,
1548 	 * we must free its associated data structures (now that the vnode
1549 	 * is being reclaimed).
1550 	 */
1551 	unlock = true;
1552 	if (node->tn_links == 0 &&
1553 	    (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0) {
1554 		node->tn_vpstate = TMPFS_VNODE_DOOMED;
1555 		unlock = !tmpfs_free_node_locked(tmp, node, true);
1556 	}
1557 
1558 	if (unlock) {
1559 		TMPFS_NODE_UNLOCK(node);
1560 		TMPFS_UNLOCK(tmp);
1561 	}
1562 
1563 	MPASS(vp->v_data == NULL);
1564 	return (0);
1565 }
1566 
1567 int
1568 tmpfs_print(struct vop_print_args *v)
1569 {
1570 	struct vnode *vp = v->a_vp;
1571 
1572 	struct tmpfs_node *node;
1573 
1574 	node = VP_TO_TMPFS_NODE(vp);
1575 
1576 	printf("tag VT_TMPFS, tmpfs_node %p, flags 0x%lx, links %jd\n",
1577 	    node, node->tn_flags, (uintmax_t)node->tn_links);
1578 	printf("\tmode 0%o, owner %d, group %d, size %jd, status 0x%x\n",
1579 	    node->tn_mode, node->tn_uid, node->tn_gid,
1580 	    (intmax_t)node->tn_size, node->tn_status);
1581 
1582 	if (vp->v_type == VFIFO)
1583 		fifo_printinfo(vp);
1584 
1585 	printf("\n");
1586 
1587 	return (0);
1588 }
1589 
1590 int
1591 tmpfs_pathconf(struct vop_pathconf_args *v)
1592 {
1593 	struct vnode *vp = v->a_vp;
1594 	int name = v->a_name;
1595 	long *retval = v->a_retval;
1596 
1597 	int error;
1598 
1599 	error = 0;
1600 
1601 	switch (name) {
1602 	case _PC_LINK_MAX:
1603 		*retval = TMPFS_LINK_MAX;
1604 		break;
1605 
1606 	case _PC_SYMLINK_MAX:
1607 		*retval = MAXPATHLEN;
1608 		break;
1609 
1610 	case _PC_NAME_MAX:
1611 		*retval = NAME_MAX;
1612 		break;
1613 
1614 	case _PC_PIPE_BUF:
1615 		if (vp->v_type == VDIR || vp->v_type == VFIFO)
1616 			*retval = PIPE_BUF;
1617 		else
1618 			error = EINVAL;
1619 		break;
1620 
1621 	case _PC_CHOWN_RESTRICTED:
1622 		*retval = 1;
1623 		break;
1624 
1625 	case _PC_NO_TRUNC:
1626 		*retval = 1;
1627 		break;
1628 
1629 	case _PC_SYNC_IO:
1630 		*retval = 1;
1631 		break;
1632 
1633 	case _PC_FILESIZEBITS:
1634 		*retval = 64;
1635 		break;
1636 
1637 	case _PC_MIN_HOLE_SIZE:
1638 		*retval = PAGE_SIZE;
1639 		break;
1640 
1641 	default:
1642 		error = vop_stdpathconf(v);
1643 	}
1644 
1645 	return (error);
1646 }
1647 
1648 static int
1649 tmpfs_vptofh(struct vop_vptofh_args *ap)
1650 /*
1651 vop_vptofh {
1652 	IN struct vnode *a_vp;
1653 	IN struct fid *a_fhp;
1654 };
1655 */
1656 {
1657 	struct tmpfs_fid_data tfd;
1658 	struct tmpfs_node *node;
1659 	struct fid *fhp;
1660 
1661 	node = VP_TO_TMPFS_NODE(ap->a_vp);
1662 	fhp = ap->a_fhp;
1663 	fhp->fid_len = sizeof(tfd);
1664 
1665 	/*
1666 	 * Copy into fid_data from the stack to avoid unaligned pointer use.
1667 	 * See the comment in sys/mount.h on struct fid for details.
1668 	 */
1669 	tfd.tfd_id = node->tn_id;
1670 	tfd.tfd_gen = node->tn_gen;
1671 	memcpy(fhp->fid_data, &tfd, fhp->fid_len);
1672 
1673 	return (0);
1674 }
1675 
1676 static int
1677 tmpfs_whiteout(struct vop_whiteout_args *ap)
1678 {
1679 	struct vnode *dvp = ap->a_dvp;
1680 	struct componentname *cnp = ap->a_cnp;
1681 	struct tmpfs_dirent *de;
1682 
1683 	switch (ap->a_flags) {
1684 	case LOOKUP:
1685 		return (0);
1686 	case CREATE:
1687 		de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(dvp), NULL, cnp);
1688 		if (de != NULL)
1689 			return (de->td_node == NULL ? 0 : EEXIST);
1690 		return (tmpfs_dir_whiteout_add(dvp, cnp));
1691 	case DELETE:
1692 		tmpfs_dir_whiteout_remove(dvp, cnp);
1693 		return (0);
1694 	default:
1695 		panic("tmpfs_whiteout: unknown op");
1696 	}
1697 }
1698 
1699 static int
1700 tmpfs_vptocnp_dir(struct tmpfs_node *tn, struct tmpfs_node *tnp,
1701     struct tmpfs_dirent **pde)
1702 {
1703 	struct tmpfs_dir_cursor dc;
1704 	struct tmpfs_dirent *de;
1705 
1706 	for (de = tmpfs_dir_first(tnp, &dc); de != NULL;
1707 	     de = tmpfs_dir_next(tnp, &dc)) {
1708 		if (de->td_node == tn) {
1709 			*pde = de;
1710 			return (0);
1711 		}
1712 	}
1713 	return (ENOENT);
1714 }
1715 
1716 static int
1717 tmpfs_vptocnp_fill(struct vnode *vp, struct tmpfs_node *tn,
1718     struct tmpfs_node *tnp, char *buf, size_t *buflen, struct vnode **dvp)
1719 {
1720 	struct tmpfs_dirent *de;
1721 	int error, i;
1722 
1723 	error = vn_vget_ino_gen(vp, tmpfs_vn_get_ino_alloc, tnp, LK_SHARED,
1724 	    dvp);
1725 	if (error != 0)
1726 		return (error);
1727 	error = tmpfs_vptocnp_dir(tn, tnp, &de);
1728 	if (error == 0) {
1729 		i = *buflen;
1730 		i -= de->td_namelen;
1731 		if (i < 0) {
1732 			error = ENOMEM;
1733 		} else {
1734 			bcopy(de->ud.td_name, buf + i, de->td_namelen);
1735 			*buflen = i;
1736 		}
1737 	}
1738 	if (error == 0) {
1739 		if (vp != *dvp)
1740 			VOP_UNLOCK(*dvp);
1741 	} else {
1742 		if (vp != *dvp)
1743 			vput(*dvp);
1744 		else
1745 			vrele(vp);
1746 	}
1747 	return (error);
1748 }
1749 
1750 static int
1751 tmpfs_vptocnp(struct vop_vptocnp_args *ap)
1752 {
1753 	struct vnode *vp, **dvp;
1754 	struct tmpfs_node *tn, *tnp, *tnp1;
1755 	struct tmpfs_dirent *de;
1756 	struct tmpfs_mount *tm;
1757 	char *buf;
1758 	size_t *buflen;
1759 	int error;
1760 
1761 	vp = ap->a_vp;
1762 	dvp = ap->a_vpp;
1763 	buf = ap->a_buf;
1764 	buflen = ap->a_buflen;
1765 
1766 	tm = VFS_TO_TMPFS(vp->v_mount);
1767 	tn = VP_TO_TMPFS_NODE(vp);
1768 	if (tn->tn_type == VDIR) {
1769 		tnp = tn->tn_dir.tn_parent;
1770 		if (tnp == NULL)
1771 			return (ENOENT);
1772 		tmpfs_ref_node(tnp);
1773 		error = tmpfs_vptocnp_fill(vp, tn, tn->tn_dir.tn_parent, buf,
1774 		    buflen, dvp);
1775 		tmpfs_free_node(tm, tnp);
1776 		return (error);
1777 	}
1778 restart:
1779 	TMPFS_LOCK(tm);
1780 restart_locked:
1781 	LIST_FOREACH_SAFE(tnp, &tm->tm_nodes_used, tn_entries, tnp1) {
1782 		if (tnp->tn_type != VDIR)
1783 			continue;
1784 		TMPFS_NODE_LOCK(tnp);
1785 		tmpfs_ref_node(tnp);
1786 
1787 		/*
1788 		 * tn_vnode cannot be instantiated while we hold the
1789 		 * node lock, so the directory cannot be changed while
1790 		 * we iterate over it.  Do this to avoid instantiating
1791 		 * vnode for directories which cannot point to our
1792 		 * node.
1793 		 */
1794 		error = tnp->tn_vnode == NULL ? tmpfs_vptocnp_dir(tn, tnp,
1795 		    &de) : 0;
1796 
1797 		if (error == 0) {
1798 			TMPFS_NODE_UNLOCK(tnp);
1799 			TMPFS_UNLOCK(tm);
1800 			error = tmpfs_vptocnp_fill(vp, tn, tnp, buf, buflen,
1801 			    dvp);
1802 			if (error == 0) {
1803 				tmpfs_free_node(tm, tnp);
1804 				return (0);
1805 			}
1806 			if (VN_IS_DOOMED(vp)) {
1807 				tmpfs_free_node(tm, tnp);
1808 				return (ENOENT);
1809 			}
1810 			TMPFS_LOCK(tm);
1811 			TMPFS_NODE_LOCK(tnp);
1812 		}
1813 		if (tmpfs_free_node_locked(tm, tnp, false)) {
1814 			goto restart;
1815 		} else {
1816 			KASSERT(tnp->tn_refcount > 0,
1817 			    ("node %p refcount zero", tnp));
1818 			if (tnp->tn_attached) {
1819 				tnp1 = LIST_NEXT(tnp, tn_entries);
1820 				TMPFS_NODE_UNLOCK(tnp);
1821 			} else {
1822 				TMPFS_NODE_UNLOCK(tnp);
1823 				goto restart_locked;
1824 			}
1825 		}
1826 	}
1827 	TMPFS_UNLOCK(tm);
1828 	return (ENOENT);
1829 }
1830 
1831 static off_t
1832 tmpfs_seek_data_locked(vm_object_t obj, off_t noff)
1833 {
1834 	vm_page_t m;
1835 	vm_pindex_t p, p_m, p_swp;
1836 
1837 	p = OFF_TO_IDX(noff);
1838 	m = vm_page_find_least(obj, p);
1839 
1840 	/*
1841 	 * Microoptimize the most common case for SEEK_DATA, where
1842 	 * there is no hole and the page is resident.
1843 	 */
1844 	if (m != NULL && vm_page_any_valid(m) && m->pindex == p)
1845 		return (noff);
1846 
1847 	p_swp = swap_pager_find_least(obj, p);
1848 	if (p_swp == p)
1849 		return (noff);
1850 
1851 	p_m = m == NULL ? obj->size : m->pindex;
1852 	return (IDX_TO_OFF(MIN(p_m, p_swp)));
1853 }
1854 
1855 static off_t
1856 tmpfs_seek_next(off_t noff)
1857 {
1858 	return (noff + PAGE_SIZE - (noff & PAGE_MASK));
1859 }
1860 
1861 static int
1862 tmpfs_seek_clamp(struct tmpfs_node *tn, off_t *noff, bool seekdata)
1863 {
1864 	if (*noff < tn->tn_size)
1865 		return (0);
1866 	if (seekdata)
1867 		return (ENXIO);
1868 	*noff = tn->tn_size;
1869 	return (0);
1870 }
1871 
1872 static off_t
1873 tmpfs_seek_hole_locked(vm_object_t obj, off_t noff)
1874 {
1875 	vm_page_t m;
1876 	vm_pindex_t p, p_swp;
1877 
1878 	for (;; noff = tmpfs_seek_next(noff)) {
1879 		/*
1880 		 * Walk over the largest sequential run of the valid pages.
1881 		 */
1882 		for (m = vm_page_lookup(obj, OFF_TO_IDX(noff));
1883 		    m != NULL && vm_page_any_valid(m);
1884 		    m = vm_page_next(m), noff = tmpfs_seek_next(noff))
1885 			;
1886 
1887 		/*
1888 		 * Found a hole in the object's page queue.  Check if
1889 		 * there is a hole in the swap at the same place.
1890 		 */
1891 		p = OFF_TO_IDX(noff);
1892 		p_swp = swap_pager_find_least(obj, p);
1893 		if (p_swp != p) {
1894 			noff = IDX_TO_OFF(p);
1895 			break;
1896 		}
1897 	}
1898 	return (noff);
1899 }
1900 
1901 static int
1902 tmpfs_seek_datahole(struct vnode *vp, off_t *off, bool seekdata)
1903 {
1904 	struct tmpfs_node *tn;
1905 	vm_object_t obj;
1906 	off_t noff;
1907 	int error;
1908 
1909 	if (vp->v_type != VREG)
1910 		return (ENOTTY);
1911 	tn = VP_TO_TMPFS_NODE(vp);
1912 	noff = *off;
1913 	if (noff < 0)
1914 		return (ENXIO);
1915 	error = tmpfs_seek_clamp(tn, &noff, seekdata);
1916 	if (error != 0)
1917 		return (error);
1918 	obj = tn->tn_reg.tn_aobj;
1919 
1920 	VM_OBJECT_RLOCK(obj);
1921 	noff = seekdata ? tmpfs_seek_data_locked(obj, noff) :
1922 	    tmpfs_seek_hole_locked(obj, noff);
1923 	VM_OBJECT_RUNLOCK(obj);
1924 
1925 	error = tmpfs_seek_clamp(tn, &noff, seekdata);
1926 	if (error == 0)
1927 		*off = noff;
1928 	return (error);
1929 }
1930 
1931 static int
1932 tmpfs_ioctl(struct vop_ioctl_args *ap)
1933 {
1934 	struct vnode *vp = ap->a_vp;
1935 	int error = 0;
1936 
1937 	switch (ap->a_command) {
1938 	case FIOSEEKDATA:
1939 	case FIOSEEKHOLE:
1940 		error = vn_lock(vp, LK_SHARED);
1941 		if (error != 0) {
1942 			error = EBADF;
1943 			break;
1944 		}
1945 		error = tmpfs_seek_datahole(vp, (off_t *)ap->a_data,
1946 		    ap->a_command == FIOSEEKDATA);
1947 		VOP_UNLOCK(vp);
1948 		break;
1949 	default:
1950 		error = ENOTTY;
1951 		break;
1952 	}
1953 	return (error);
1954 }
1955 
1956 /*
1957  * Vnode operations vector used for files stored in a tmpfs file system.
1958  */
1959 struct vop_vector tmpfs_vnodeop_entries = {
1960 	.vop_default =			&default_vnodeops,
1961 	.vop_lookup =			vfs_cache_lookup,
1962 	.vop_cachedlookup =		tmpfs_cached_lookup,
1963 	.vop_create =			tmpfs_create,
1964 	.vop_mknod =			tmpfs_mknod,
1965 	.vop_open =			tmpfs_open,
1966 	.vop_close =			tmpfs_close,
1967 	.vop_fplookup_vexec =		tmpfs_fplookup_vexec,
1968 	.vop_fplookup_symlink =		tmpfs_fplookup_symlink,
1969 	.vop_access =			tmpfs_access,
1970 	.vop_stat =			tmpfs_stat,
1971 	.vop_getattr =			tmpfs_getattr,
1972 	.vop_setattr =			tmpfs_setattr,
1973 	.vop_read =			tmpfs_read,
1974 	.vop_read_pgcache =		tmpfs_read_pgcache,
1975 	.vop_write =			tmpfs_write,
1976 	.vop_deallocate =		tmpfs_deallocate,
1977 	.vop_fsync =			tmpfs_fsync,
1978 	.vop_remove =			tmpfs_remove,
1979 	.vop_link =			tmpfs_link,
1980 	.vop_rename =			tmpfs_rename,
1981 	.vop_mkdir =			tmpfs_mkdir,
1982 	.vop_rmdir =			tmpfs_rmdir,
1983 	.vop_symlink =			tmpfs_symlink,
1984 	.vop_readdir =			tmpfs_readdir,
1985 	.vop_readlink =			tmpfs_readlink,
1986 	.vop_inactive =			tmpfs_inactive,
1987 	.vop_need_inactive =		tmpfs_need_inactive,
1988 	.vop_reclaim =			tmpfs_reclaim,
1989 	.vop_print =			tmpfs_print,
1990 	.vop_pathconf =			tmpfs_pathconf,
1991 	.vop_vptofh =			tmpfs_vptofh,
1992 	.vop_whiteout =			tmpfs_whiteout,
1993 	.vop_bmap =			VOP_EOPNOTSUPP,
1994 	.vop_vptocnp =			tmpfs_vptocnp,
1995 	.vop_lock1 =			vop_lock,
1996 	.vop_unlock = 			vop_unlock,
1997 	.vop_islocked = 		vop_islocked,
1998 	.vop_add_writecount =		vop_stdadd_writecount_nomsync,
1999 	.vop_ioctl =			tmpfs_ioctl,
2000 };
2001 VFS_VOP_VECTOR_REGISTER(tmpfs_vnodeop_entries);
2002 
2003 /*
2004  * Same vector for mounts which do not use namecache.
2005  */
2006 struct vop_vector tmpfs_vnodeop_nonc_entries = {
2007 	.vop_default =			&tmpfs_vnodeop_entries,
2008 	.vop_lookup =			tmpfs_lookup,
2009 };
2010 VFS_VOP_VECTOR_REGISTER(tmpfs_vnodeop_nonc_entries);
2011