xref: /freebsd/sys/fs/tmpfs/tmpfs_vfsops.c (revision eda14cbc264d6969b02f2b1994cef11148e914f1)
1 /*	$NetBSD: tmpfs_vfsops.c,v 1.10 2005/12/11 12:24:29 christos Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-2-Clause-NetBSD
5  *
6  * Copyright (c) 2005 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
11  * 2005 program.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 /*
36  * Efficient memory file system.
37  *
38  * tmpfs is a file system that uses FreeBSD's virtual memory
39  * sub-system to store file data and metadata in an efficient way.
40  * This means that it does not follow the structure of an on-disk file
41  * system because it simply does not need to.  Instead, it uses
42  * memory-specific data structures and algorithms to automatically
43  * allocate and release resources.
44  */
45 
46 #include "opt_tmpfs.h"
47 
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/dirent.h>
54 #include <sys/limits.h>
55 #include <sys/lock.h>
56 #include <sys/mount.h>
57 #include <sys/mutex.h>
58 #include <sys/proc.h>
59 #include <sys/jail.h>
60 #include <sys/kernel.h>
61 #include <sys/rwlock.h>
62 #include <sys/stat.h>
63 #include <sys/sx.h>
64 #include <sys/sysctl.h>
65 #include <sys/vnode.h>
66 
67 #include <vm/vm.h>
68 #include <vm/vm_param.h>
69 #include <vm/pmap.h>
70 #include <vm/vm_extern.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_object.h>
73 #include <vm/vm_param.h>
74 
75 #include <fs/tmpfs/tmpfs.h>
76 
77 /*
78  * Default permission for root node
79  */
80 #define TMPFS_DEFAULT_ROOT_MODE	(S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
81 
82 MALLOC_DEFINE(M_TMPFSMNT, "tmpfs mount", "tmpfs mount structures");
83 MALLOC_DEFINE(M_TMPFSNAME, "tmpfs name", "tmpfs file names");
84 
85 static int	tmpfs_mount(struct mount *);
86 static int	tmpfs_unmount(struct mount *, int);
87 static int	tmpfs_root(struct mount *, int flags, struct vnode **);
88 static int	tmpfs_fhtovp(struct mount *, struct fid *, int,
89 		    struct vnode **);
90 static int	tmpfs_statfs(struct mount *, struct statfs *);
91 
92 static const char *tmpfs_opts[] = {
93 	"from", "size", "maxfilesize", "inodes", "uid", "gid", "mode", "export",
94 	"union", "nonc", "nomtime", NULL
95 };
96 
97 static const char *tmpfs_updateopts[] = {
98 	"from", "export", "nomtime", "size", NULL
99 };
100 
101 /*
102  * Handle updates of time from writes to mmaped regions, if allowed.
103  * Use MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_LAZY, since
104  * unmap of the tmpfs-backed vnode does not call vinactive(), due to
105  * vm object type is OBJT_SWAP.  If lazy, only handle delayed update
106  * of mtime due to the writes to mapped files.
107  */
108 static void
109 tmpfs_update_mtime(struct mount *mp, bool lazy)
110 {
111 	struct vnode *vp, *mvp;
112 	struct vm_object *obj;
113 
114 	if (VFS_TO_TMPFS(mp)->tm_nomtime)
115 		return;
116 	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
117 		if (vp->v_type != VREG) {
118 			VI_UNLOCK(vp);
119 			continue;
120 		}
121 		obj = vp->v_object;
122 		KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) ==
123 		    (OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj"));
124 
125 		/*
126 		 * In lazy case, do unlocked read, avoid taking vnode
127 		 * lock if not needed.  Lost update will be handled on
128 		 * the next call.
129 		 * For non-lazy case, we must flush all pending
130 		 * metadata changes now.
131 		 */
132 		if (!lazy || obj->generation != obj->cleangeneration) {
133 			if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) != 0)
134 				continue;
135 			tmpfs_check_mtime(vp);
136 			if (!lazy)
137 				tmpfs_update(vp);
138 			vput(vp);
139 		} else {
140 			VI_UNLOCK(vp);
141 			continue;
142 		}
143 	}
144 }
145 
146 struct tmpfs_check_rw_maps_arg {
147 	bool found;
148 };
149 
150 static bool
151 tmpfs_check_rw_maps_cb(struct mount *mp __unused, vm_map_t map __unused,
152     vm_map_entry_t entry __unused, void *arg)
153 {
154 	struct tmpfs_check_rw_maps_arg *a;
155 
156 	a = arg;
157 	a->found = true;
158 	return (true);
159 }
160 
161 /*
162  * Revoke write permissions from all mappings of regular files
163  * belonging to the specified tmpfs mount.
164  */
165 static bool
166 tmpfs_revoke_rw_maps_cb(struct mount *mp __unused, vm_map_t map,
167     vm_map_entry_t entry, void *arg __unused)
168 {
169 
170 	/*
171 	 * XXXKIB: might be invalidate the mapping
172 	 * instead ?  The process is not going to be
173 	 * happy in any case.
174 	 */
175 	entry->max_protection &= ~VM_PROT_WRITE;
176 	if ((entry->protection & VM_PROT_WRITE) != 0) {
177 		entry->protection &= ~VM_PROT_WRITE;
178 		pmap_protect(map->pmap, entry->start, entry->end,
179 		    entry->protection);
180 	}
181 	return (false);
182 }
183 
184 static void
185 tmpfs_all_rw_maps(struct mount *mp, bool (*cb)(struct mount *mp, vm_map_t,
186     vm_map_entry_t, void *), void *cb_arg)
187 {
188 	struct proc *p;
189 	struct vmspace *vm;
190 	vm_map_t map;
191 	vm_map_entry_t entry;
192 	vm_object_t object;
193 	struct vnode *vp;
194 	int gen;
195 	bool terminate;
196 
197 	terminate = false;
198 	sx_slock(&allproc_lock);
199 again:
200 	gen = allproc_gen;
201 	FOREACH_PROC_IN_SYSTEM(p) {
202 		PROC_LOCK(p);
203 		if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
204 		    P_SYSTEM | P_WEXIT)) != 0) {
205 			PROC_UNLOCK(p);
206 			continue;
207 		}
208 		vm = vmspace_acquire_ref(p);
209 		_PHOLD_LITE(p);
210 		PROC_UNLOCK(p);
211 		if (vm == NULL) {
212 			PRELE(p);
213 			continue;
214 		}
215 		sx_sunlock(&allproc_lock);
216 		map = &vm->vm_map;
217 
218 		vm_map_lock(map);
219 		if (map->busy)
220 			vm_map_wait_busy(map);
221 		VM_MAP_ENTRY_FOREACH(entry, map) {
222 			if ((entry->eflags & (MAP_ENTRY_GUARD |
223 			    MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_COW)) != 0 ||
224 			    (entry->max_protection & VM_PROT_WRITE) == 0)
225 				continue;
226 			object = entry->object.vm_object;
227 			if (object == NULL || object->type != OBJT_SWAP ||
228 			    (object->flags & OBJ_TMPFS_NODE) == 0)
229 				continue;
230 			/*
231 			 * No need to dig into shadow chain, mapping
232 			 * of the object not at top is readonly.
233 			 */
234 
235 			VM_OBJECT_RLOCK(object);
236 			if (object->type == OBJT_DEAD) {
237 				VM_OBJECT_RUNLOCK(object);
238 				continue;
239 			}
240 			MPASS(object->ref_count > 1);
241 			if ((object->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) !=
242 			    (OBJ_TMPFS_NODE | OBJ_TMPFS)) {
243 				VM_OBJECT_RUNLOCK(object);
244 				continue;
245 			}
246 			vp = object->un_pager.swp.swp_tmpfs;
247 			if (vp->v_mount != mp) {
248 				VM_OBJECT_RUNLOCK(object);
249 				continue;
250 			}
251 
252 			terminate = cb(mp, map, entry, cb_arg);
253 			VM_OBJECT_RUNLOCK(object);
254 			if (terminate)
255 				break;
256 		}
257 		vm_map_unlock(map);
258 
259 		vmspace_free(vm);
260 		sx_slock(&allproc_lock);
261 		PRELE(p);
262 		if (terminate)
263 			break;
264 	}
265 	if (!terminate && gen != allproc_gen)
266 		goto again;
267 	sx_sunlock(&allproc_lock);
268 }
269 
270 static bool
271 tmpfs_check_rw_maps(struct mount *mp)
272 {
273 	struct tmpfs_check_rw_maps_arg ca;
274 
275 	ca.found = false;
276 	tmpfs_all_rw_maps(mp, tmpfs_check_rw_maps_cb, &ca);
277 	return (ca.found);
278 }
279 
280 static int
281 tmpfs_rw_to_ro(struct mount *mp)
282 {
283 	int error, flags;
284 	bool forced;
285 
286 	forced = (mp->mnt_flag & MNT_FORCE) != 0;
287 	flags = WRITECLOSE | (forced ? FORCECLOSE : 0);
288 
289 	if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
290 		return (error);
291 	error = vfs_write_suspend_umnt(mp);
292 	if (error != 0)
293 		return (error);
294 	if (!forced && tmpfs_check_rw_maps(mp)) {
295 		error = EBUSY;
296 		goto out;
297 	}
298 	VFS_TO_TMPFS(mp)->tm_ronly = 1;
299 	MNT_ILOCK(mp);
300 	mp->mnt_flag |= MNT_RDONLY;
301 	MNT_IUNLOCK(mp);
302 	for (;;) {
303 		tmpfs_all_rw_maps(mp, tmpfs_revoke_rw_maps_cb, NULL);
304 		tmpfs_update_mtime(mp, false);
305 		error = vflush(mp, 0, flags, curthread);
306 		if (error != 0) {
307 			VFS_TO_TMPFS(mp)->tm_ronly = 0;
308 			MNT_ILOCK(mp);
309 			mp->mnt_flag &= ~MNT_RDONLY;
310 			MNT_IUNLOCK(mp);
311 			goto out;
312 		}
313 		if (!tmpfs_check_rw_maps(mp))
314 			break;
315 	}
316 out:
317 	vfs_write_resume(mp, 0);
318 	return (error);
319 }
320 
321 static int
322 tmpfs_mount(struct mount *mp)
323 {
324 	const size_t nodes_per_page = howmany(PAGE_SIZE,
325 	    sizeof(struct tmpfs_dirent) + sizeof(struct tmpfs_node));
326 	struct tmpfs_mount *tmp;
327 	struct tmpfs_node *root;
328 	int error;
329 	bool nomtime, nonc;
330 	/* Size counters. */
331 	u_quad_t pages;
332 	off_t nodes_max, size_max, maxfilesize;
333 
334 	/* Root node attributes. */
335 	uid_t root_uid;
336 	gid_t root_gid;
337 	mode_t root_mode;
338 
339 	struct vattr va;
340 
341 	if (vfs_filteropt(mp->mnt_optnew, tmpfs_opts))
342 		return (EINVAL);
343 
344 	if (mp->mnt_flag & MNT_UPDATE) {
345 		/* Only support update mounts for certain options. */
346 		if (vfs_filteropt(mp->mnt_optnew, tmpfs_updateopts) != 0)
347 			return (EOPNOTSUPP);
348 		tmp = VFS_TO_TMPFS(mp);
349 		if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) == 0) {
350 			/*
351 			 * On-the-fly resizing is not supported (yet). We still
352 			 * need to have "size" listed as "supported", otherwise
353 			 * trying to update fs that is listed in fstab with size
354 			 * parameter, say trying to change rw to ro or vice
355 			 * versa, would cause vfs_filteropt() to bail.
356 			 */
357 			if (size_max != tmp->tm_size_max)
358 				return (EOPNOTSUPP);
359 		}
360 		if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) &&
361 		    !tmp->tm_ronly) {
362 			/* RW -> RO */
363 			return (tmpfs_rw_to_ro(mp));
364 		} else if (!vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) &&
365 		    tmp->tm_ronly) {
366 			/* RO -> RW */
367 			tmp->tm_ronly = 0;
368 			MNT_ILOCK(mp);
369 			mp->mnt_flag &= ~MNT_RDONLY;
370 			MNT_IUNLOCK(mp);
371 		}
372 		tmp->tm_nomtime = vfs_getopt(mp->mnt_optnew, "nomtime", NULL,
373 		    0) == 0;
374 		MNT_ILOCK(mp);
375 		if ((mp->mnt_flag & MNT_UNION) == 0) {
376 			mp->mnt_kern_flag |= MNTK_FPLOOKUP;
377 		} else {
378 			mp->mnt_kern_flag &= ~MNTK_FPLOOKUP;
379 		}
380 		MNT_IUNLOCK(mp);
381 		return (0);
382 	}
383 
384 	vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY);
385 	error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred);
386 	VOP_UNLOCK(mp->mnt_vnodecovered);
387 	if (error)
388 		return (error);
389 
390 	if (mp->mnt_cred->cr_ruid != 0 ||
391 	    vfs_scanopt(mp->mnt_optnew, "gid", "%d", &root_gid) != 1)
392 		root_gid = va.va_gid;
393 	if (mp->mnt_cred->cr_ruid != 0 ||
394 	    vfs_scanopt(mp->mnt_optnew, "uid", "%d", &root_uid) != 1)
395 		root_uid = va.va_uid;
396 	if (mp->mnt_cred->cr_ruid != 0 ||
397 	    vfs_scanopt(mp->mnt_optnew, "mode", "%ho", &root_mode) != 1)
398 		root_mode = va.va_mode;
399 	if (vfs_getopt_size(mp->mnt_optnew, "inodes", &nodes_max) != 0)
400 		nodes_max = 0;
401 	if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) != 0)
402 		size_max = 0;
403 	if (vfs_getopt_size(mp->mnt_optnew, "maxfilesize", &maxfilesize) != 0)
404 		maxfilesize = 0;
405 	nonc = vfs_getopt(mp->mnt_optnew, "nonc", NULL, NULL) == 0;
406 	nomtime = vfs_getopt(mp->mnt_optnew, "nomtime", NULL, NULL) == 0;
407 
408 	/* Do not allow mounts if we do not have enough memory to preserve
409 	 * the minimum reserved pages. */
410 	if (tmpfs_mem_avail() < TMPFS_PAGES_MINRESERVED)
411 		return (ENOSPC);
412 
413 	/* Get the maximum number of memory pages this file system is
414 	 * allowed to use, based on the maximum size the user passed in
415 	 * the mount structure.  A value of zero is treated as if the
416 	 * maximum available space was requested. */
417 	if (size_max == 0 || size_max > OFF_MAX - PAGE_SIZE ||
418 	    (SIZE_MAX < OFF_MAX && size_max / PAGE_SIZE >= SIZE_MAX))
419 		pages = SIZE_MAX;
420 	else {
421 		size_max = roundup(size_max, PAGE_SIZE);
422 		pages = howmany(size_max, PAGE_SIZE);
423 	}
424 	MPASS(pages > 0);
425 
426 	if (nodes_max <= 3) {
427 		if (pages < INT_MAX / nodes_per_page)
428 			nodes_max = pages * nodes_per_page;
429 		else
430 			nodes_max = INT_MAX;
431 	}
432 	if (nodes_max > INT_MAX)
433 		nodes_max = INT_MAX;
434 	MPASS(nodes_max >= 3);
435 
436 	/* Allocate the tmpfs mount structure and fill it. */
437 	tmp = (struct tmpfs_mount *)malloc(sizeof(struct tmpfs_mount),
438 	    M_TMPFSMNT, M_WAITOK | M_ZERO);
439 
440 	mtx_init(&tmp->tm_allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF);
441 	tmp->tm_nodes_max = nodes_max;
442 	tmp->tm_nodes_inuse = 0;
443 	tmp->tm_refcount = 1;
444 	tmp->tm_maxfilesize = maxfilesize > 0 ? maxfilesize : OFF_MAX;
445 	LIST_INIT(&tmp->tm_nodes_used);
446 
447 	tmp->tm_size_max = size_max;
448 	tmp->tm_pages_max = pages;
449 	tmp->tm_pages_used = 0;
450 	new_unrhdr64(&tmp->tm_ino_unr, 2);
451 	tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
452 	tmp->tm_nonc = nonc;
453 	tmp->tm_nomtime = nomtime;
454 
455 	/* Allocate the root node. */
456 	error = tmpfs_alloc_node(mp, tmp, VDIR, root_uid, root_gid,
457 	    root_mode & ALLPERMS, NULL, NULL, VNOVAL, &root);
458 
459 	if (error != 0 || root == NULL) {
460 		free(tmp, M_TMPFSMNT);
461 		return (error);
462 	}
463 	KASSERT(root->tn_id == 2,
464 	    ("tmpfs root with invalid ino: %ju", (uintmax_t)root->tn_id));
465 	tmp->tm_root = root;
466 
467 	MNT_ILOCK(mp);
468 	mp->mnt_flag |= MNT_LOCAL;
469 	mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED |
470 	    MNTK_TEXT_REFS | MNTK_NOMSYNC;
471 	if (!nonc && (mp->mnt_flag & MNT_UNION) == 0)
472 		mp->mnt_kern_flag |= MNTK_FPLOOKUP;
473 	MNT_IUNLOCK(mp);
474 
475 	mp->mnt_data = tmp;
476 	mp->mnt_stat.f_namemax = MAXNAMLEN;
477 	vfs_getnewfsid(mp);
478 	vfs_mountedfrom(mp, "tmpfs");
479 
480 	return 0;
481 }
482 
483 /* ARGSUSED2 */
484 static int
485 tmpfs_unmount(struct mount *mp, int mntflags)
486 {
487 	struct tmpfs_mount *tmp;
488 	struct tmpfs_node *node;
489 	int error, flags;
490 
491 	flags = (mntflags & MNT_FORCE) != 0 ? FORCECLOSE : 0;
492 	tmp = VFS_TO_TMPFS(mp);
493 
494 	/* Stop writers */
495 	error = vfs_write_suspend_umnt(mp);
496 	if (error != 0)
497 		return (error);
498 	/*
499 	 * At this point, nodes cannot be destroyed by any other
500 	 * thread because write suspension is started.
501 	 */
502 
503 	for (;;) {
504 		error = vflush(mp, 0, flags, curthread);
505 		if (error != 0) {
506 			vfs_write_resume(mp, VR_START_WRITE);
507 			return (error);
508 		}
509 		MNT_ILOCK(mp);
510 		if (mp->mnt_nvnodelistsize == 0) {
511 			MNT_IUNLOCK(mp);
512 			break;
513 		}
514 		MNT_IUNLOCK(mp);
515 		if ((mntflags & MNT_FORCE) == 0) {
516 			vfs_write_resume(mp, VR_START_WRITE);
517 			return (EBUSY);
518 		}
519 	}
520 
521 	TMPFS_LOCK(tmp);
522 	while ((node = LIST_FIRST(&tmp->tm_nodes_used)) != NULL) {
523 		TMPFS_NODE_LOCK(node);
524 		if (node->tn_type == VDIR)
525 			tmpfs_dir_destroy(tmp, node);
526 		if (tmpfs_free_node_locked(tmp, node, true))
527 			TMPFS_LOCK(tmp);
528 		else
529 			TMPFS_NODE_UNLOCK(node);
530 	}
531 
532 	mp->mnt_data = NULL;
533 	tmpfs_free_tmp(tmp);
534 	vfs_write_resume(mp, VR_START_WRITE);
535 
536 	MNT_ILOCK(mp);
537 	mp->mnt_flag &= ~MNT_LOCAL;
538 	MNT_IUNLOCK(mp);
539 
540 	return (0);
541 }
542 
543 void
544 tmpfs_free_tmp(struct tmpfs_mount *tmp)
545 {
546 
547 	MPASS(tmp->tm_refcount > 0);
548 	tmp->tm_refcount--;
549 	if (tmp->tm_refcount > 0) {
550 		TMPFS_UNLOCK(tmp);
551 		return;
552 	}
553 	TMPFS_UNLOCK(tmp);
554 
555 	mtx_destroy(&tmp->tm_allnode_lock);
556 	MPASS(tmp->tm_pages_used == 0);
557 	MPASS(tmp->tm_nodes_inuse == 0);
558 
559 	free(tmp, M_TMPFSMNT);
560 }
561 
562 static int
563 tmpfs_root(struct mount *mp, int flags, struct vnode **vpp)
564 {
565 	int error;
566 
567 	error = tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, flags, vpp);
568 	if (error == 0)
569 		(*vpp)->v_vflag |= VV_ROOT;
570 	return (error);
571 }
572 
573 static int
574 tmpfs_fhtovp(struct mount *mp, struct fid *fhp, int flags,
575     struct vnode **vpp)
576 {
577 	struct tmpfs_fid_data tfd;
578 	struct tmpfs_mount *tmp;
579 	struct tmpfs_node *node;
580 	int error;
581 
582 	if (fhp->fid_len != sizeof(tfd))
583 		return (EINVAL);
584 
585 	/*
586 	 * Copy from fid_data onto the stack to avoid unaligned pointer use.
587 	 * See the comment in sys/mount.h on struct fid for details.
588 	 */
589 	memcpy(&tfd, fhp->fid_data, fhp->fid_len);
590 
591 	tmp = VFS_TO_TMPFS(mp);
592 
593 	if (tfd.tfd_id >= tmp->tm_nodes_max)
594 		return (EINVAL);
595 
596 	TMPFS_LOCK(tmp);
597 	LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
598 		if (node->tn_id == tfd.tfd_id &&
599 		    node->tn_gen == tfd.tfd_gen) {
600 			tmpfs_ref_node(node);
601 			break;
602 		}
603 	}
604 	TMPFS_UNLOCK(tmp);
605 
606 	if (node != NULL) {
607 		error = tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp);
608 		tmpfs_free_node(tmp, node);
609 	} else
610 		error = EINVAL;
611 	return (error);
612 }
613 
614 /* ARGSUSED2 */
615 static int
616 tmpfs_statfs(struct mount *mp, struct statfs *sbp)
617 {
618 	struct tmpfs_mount *tmp;
619 	size_t used;
620 
621 	tmp = VFS_TO_TMPFS(mp);
622 
623 	sbp->f_iosize = PAGE_SIZE;
624 	sbp->f_bsize = PAGE_SIZE;
625 
626 	used = tmpfs_pages_used(tmp);
627 	if (tmp->tm_pages_max != ULONG_MAX)
628 		 sbp->f_blocks = tmp->tm_pages_max;
629 	else
630 		 sbp->f_blocks = used + tmpfs_mem_avail();
631 	if (sbp->f_blocks <= used)
632 		sbp->f_bavail = 0;
633 	else
634 		sbp->f_bavail = sbp->f_blocks - used;
635 	sbp->f_bfree = sbp->f_bavail;
636 	used = tmp->tm_nodes_inuse;
637 	sbp->f_files = tmp->tm_nodes_max;
638 	if (sbp->f_files <= used)
639 		sbp->f_ffree = 0;
640 	else
641 		sbp->f_ffree = sbp->f_files - used;
642 	/* sbp->f_owner = tmp->tn_uid; */
643 
644 	return 0;
645 }
646 
647 static int
648 tmpfs_sync(struct mount *mp, int waitfor)
649 {
650 
651 	if (waitfor == MNT_SUSPEND) {
652 		MNT_ILOCK(mp);
653 		mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
654 		MNT_IUNLOCK(mp);
655 	} else if (waitfor == MNT_LAZY) {
656 		tmpfs_update_mtime(mp, true);
657 	}
658 	return (0);
659 }
660 
661 static int
662 tmpfs_init(struct vfsconf *conf)
663 {
664 	tmpfs_subr_init();
665 	return (0);
666 }
667 
668 static int
669 tmpfs_uninit(struct vfsconf *conf)
670 {
671 	tmpfs_subr_uninit();
672 	return (0);
673 }
674 
675 /*
676  * tmpfs vfs operations.
677  */
678 struct vfsops tmpfs_vfsops = {
679 	.vfs_mount =			tmpfs_mount,
680 	.vfs_unmount =			tmpfs_unmount,
681 	.vfs_root =			vfs_cache_root,
682 	.vfs_cachedroot =		tmpfs_root,
683 	.vfs_statfs =			tmpfs_statfs,
684 	.vfs_fhtovp =			tmpfs_fhtovp,
685 	.vfs_sync =			tmpfs_sync,
686 	.vfs_init =			tmpfs_init,
687 	.vfs_uninit =			tmpfs_uninit,
688 };
689 VFS_SET(tmpfs_vfsops, tmpfs, VFCF_JAIL);
690