xref: /freebsd/sys/fs/tmpfs/tmpfs_vfsops.c (revision ae7e8a02e6e93455e026036132c4d053b2c12ad9)
1 /*	$NetBSD: tmpfs_vfsops.c,v 1.10 2005/12/11 12:24:29 christos Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-2-Clause-NetBSD
5  *
6  * Copyright (c) 2005 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
11  * 2005 program.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 /*
36  * Efficient memory file system.
37  *
38  * tmpfs is a file system that uses FreeBSD's virtual memory
39  * sub-system to store file data and metadata in an efficient way.
40  * This means that it does not follow the structure of an on-disk file
41  * system because it simply does not need to.  Instead, it uses
42  * memory-specific data structures and algorithms to automatically
43  * allocate and release resources.
44  */
45 
46 #include "opt_tmpfs.h"
47 
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/dirent.h>
54 #include <sys/file.h>
55 #include <sys/limits.h>
56 #include <sys/lock.h>
57 #include <sys/mount.h>
58 #include <sys/mutex.h>
59 #include <sys/proc.h>
60 #include <sys/jail.h>
61 #include <sys/kernel.h>
62 #include <sys/rwlock.h>
63 #include <sys/stat.h>
64 #include <sys/sx.h>
65 #include <sys/sysctl.h>
66 #include <sys/vnode.h>
67 
68 #include <vm/vm.h>
69 #include <vm/vm_param.h>
70 #include <vm/pmap.h>
71 #include <vm/vm_extern.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_object.h>
74 #include <vm/vm_param.h>
75 
76 #include <fs/tmpfs/tmpfs.h>
77 
78 /*
79  * Default permission for root node
80  */
81 #define TMPFS_DEFAULT_ROOT_MODE	(S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
82 
83 static MALLOC_DEFINE(M_TMPFSMNT, "tmpfs mount", "tmpfs mount structures");
84 MALLOC_DEFINE(M_TMPFSNAME, "tmpfs name", "tmpfs file names");
85 
86 static int	tmpfs_mount(struct mount *);
87 static int	tmpfs_unmount(struct mount *, int);
88 static int	tmpfs_root(struct mount *, int flags, struct vnode **);
89 static int	tmpfs_fhtovp(struct mount *, struct fid *, int,
90 		    struct vnode **);
91 static int	tmpfs_statfs(struct mount *, struct statfs *);
92 
93 static const char *tmpfs_opts[] = {
94 	"from", "size", "maxfilesize", "inodes", "uid", "gid", "mode", "export",
95 	"union", "nonc", "nomtime", NULL
96 };
97 
98 static const char *tmpfs_updateopts[] = {
99 	"from", "export", "nomtime", "size", NULL
100 };
101 
102 static int
103 tmpfs_update_mtime_lazy_filter(struct vnode *vp, void *arg)
104 {
105 	struct vm_object *obj;
106 
107 	if (vp->v_type != VREG)
108 		return (0);
109 
110 	obj = atomic_load_ptr(&vp->v_object);
111 	if (obj == NULL)
112 		return (0);
113 
114 	return (vm_object_mightbedirty_(obj));
115 }
116 
117 static void
118 tmpfs_update_mtime_lazy(struct mount *mp)
119 {
120 	struct vnode *vp, *mvp;
121 
122 	MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, tmpfs_update_mtime_lazy_filter, NULL) {
123 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) != 0)
124 			continue;
125 		tmpfs_check_mtime(vp);
126 		vput(vp);
127 	}
128 }
129 
130 static void
131 tmpfs_update_mtime_all(struct mount *mp)
132 {
133 	struct vnode *vp, *mvp;
134 
135 	if (VFS_TO_TMPFS(mp)->tm_nomtime)
136 		return;
137 	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
138 		if (vp->v_type != VREG) {
139 			VI_UNLOCK(vp);
140 			continue;
141 		}
142 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) != 0)
143 			continue;
144 		tmpfs_check_mtime(vp);
145 		tmpfs_update(vp);
146 		vput(vp);
147 	}
148 }
149 
150 struct tmpfs_check_rw_maps_arg {
151 	bool found;
152 };
153 
154 static bool
155 tmpfs_check_rw_maps_cb(struct mount *mp __unused, vm_map_t map __unused,
156     vm_map_entry_t entry __unused, void *arg)
157 {
158 	struct tmpfs_check_rw_maps_arg *a;
159 
160 	a = arg;
161 	a->found = true;
162 	return (true);
163 }
164 
165 /*
166  * Revoke write permissions from all mappings of regular files
167  * belonging to the specified tmpfs mount.
168  */
169 static bool
170 tmpfs_revoke_rw_maps_cb(struct mount *mp __unused, vm_map_t map,
171     vm_map_entry_t entry, void *arg __unused)
172 {
173 
174 	/*
175 	 * XXXKIB: might be invalidate the mapping
176 	 * instead ?  The process is not going to be
177 	 * happy in any case.
178 	 */
179 	entry->max_protection &= ~VM_PROT_WRITE;
180 	if ((entry->protection & VM_PROT_WRITE) != 0) {
181 		entry->protection &= ~VM_PROT_WRITE;
182 		pmap_protect(map->pmap, entry->start, entry->end,
183 		    entry->protection);
184 	}
185 	return (false);
186 }
187 
188 static void
189 tmpfs_all_rw_maps(struct mount *mp, bool (*cb)(struct mount *mp, vm_map_t,
190     vm_map_entry_t, void *), void *cb_arg)
191 {
192 	struct proc *p;
193 	struct vmspace *vm;
194 	vm_map_t map;
195 	vm_map_entry_t entry;
196 	vm_object_t object;
197 	struct vnode *vp;
198 	int gen;
199 	bool terminate;
200 
201 	terminate = false;
202 	sx_slock(&allproc_lock);
203 again:
204 	gen = allproc_gen;
205 	FOREACH_PROC_IN_SYSTEM(p) {
206 		PROC_LOCK(p);
207 		if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
208 		    P_SYSTEM | P_WEXIT)) != 0) {
209 			PROC_UNLOCK(p);
210 			continue;
211 		}
212 		vm = vmspace_acquire_ref(p);
213 		_PHOLD_LITE(p);
214 		PROC_UNLOCK(p);
215 		if (vm == NULL) {
216 			PRELE(p);
217 			continue;
218 		}
219 		sx_sunlock(&allproc_lock);
220 		map = &vm->vm_map;
221 
222 		vm_map_lock(map);
223 		if (map->busy)
224 			vm_map_wait_busy(map);
225 		VM_MAP_ENTRY_FOREACH(entry, map) {
226 			if ((entry->eflags & (MAP_ENTRY_GUARD |
227 			    MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_COW)) != 0 ||
228 			    (entry->max_protection & VM_PROT_WRITE) == 0)
229 				continue;
230 			object = entry->object.vm_object;
231 			if (object == NULL || object->type != tmpfs_pager_type)
232 				continue;
233 			/*
234 			 * No need to dig into shadow chain, mapping
235 			 * of the object not at top is readonly.
236 			 */
237 
238 			VM_OBJECT_RLOCK(object);
239 			if (object->type == OBJT_DEAD) {
240 				VM_OBJECT_RUNLOCK(object);
241 				continue;
242 			}
243 			MPASS(object->ref_count > 1);
244 			if ((object->flags & OBJ_TMPFS) == 0) {
245 				VM_OBJECT_RUNLOCK(object);
246 				continue;
247 			}
248 			vp = object->un_pager.swp.swp_tmpfs;
249 			if (vp->v_mount != mp) {
250 				VM_OBJECT_RUNLOCK(object);
251 				continue;
252 			}
253 
254 			terminate = cb(mp, map, entry, cb_arg);
255 			VM_OBJECT_RUNLOCK(object);
256 			if (terminate)
257 				break;
258 		}
259 		vm_map_unlock(map);
260 
261 		vmspace_free(vm);
262 		sx_slock(&allproc_lock);
263 		PRELE(p);
264 		if (terminate)
265 			break;
266 	}
267 	if (!terminate && gen != allproc_gen)
268 		goto again;
269 	sx_sunlock(&allproc_lock);
270 }
271 
272 static bool
273 tmpfs_check_rw_maps(struct mount *mp)
274 {
275 	struct tmpfs_check_rw_maps_arg ca;
276 
277 	ca.found = false;
278 	tmpfs_all_rw_maps(mp, tmpfs_check_rw_maps_cb, &ca);
279 	return (ca.found);
280 }
281 
282 static int
283 tmpfs_rw_to_ro(struct mount *mp)
284 {
285 	int error, flags;
286 	bool forced;
287 
288 	forced = (mp->mnt_flag & MNT_FORCE) != 0;
289 	flags = WRITECLOSE | (forced ? FORCECLOSE : 0);
290 
291 	if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
292 		return (error);
293 	error = vfs_write_suspend_umnt(mp);
294 	if (error != 0)
295 		return (error);
296 	if (!forced && tmpfs_check_rw_maps(mp)) {
297 		error = EBUSY;
298 		goto out;
299 	}
300 	VFS_TO_TMPFS(mp)->tm_ronly = 1;
301 	MNT_ILOCK(mp);
302 	mp->mnt_flag |= MNT_RDONLY;
303 	MNT_IUNLOCK(mp);
304 	for (;;) {
305 		tmpfs_all_rw_maps(mp, tmpfs_revoke_rw_maps_cb, NULL);
306 		tmpfs_update_mtime_all(mp);
307 		error = vflush(mp, 0, flags, curthread);
308 		if (error != 0) {
309 			VFS_TO_TMPFS(mp)->tm_ronly = 0;
310 			MNT_ILOCK(mp);
311 			mp->mnt_flag &= ~MNT_RDONLY;
312 			MNT_IUNLOCK(mp);
313 			goto out;
314 		}
315 		if (!tmpfs_check_rw_maps(mp))
316 			break;
317 	}
318 out:
319 	vfs_write_resume(mp, 0);
320 	return (error);
321 }
322 
323 static int
324 tmpfs_mount(struct mount *mp)
325 {
326 	const size_t nodes_per_page = howmany(PAGE_SIZE,
327 	    sizeof(struct tmpfs_dirent) + sizeof(struct tmpfs_node));
328 	struct tmpfs_mount *tmp;
329 	struct tmpfs_node *root;
330 	int error;
331 	bool nomtime, nonc;
332 	/* Size counters. */
333 	u_quad_t pages;
334 	off_t nodes_max, size_max, maxfilesize;
335 
336 	/* Root node attributes. */
337 	uid_t root_uid;
338 	gid_t root_gid;
339 	mode_t root_mode;
340 
341 	struct vattr va;
342 
343 	if (vfs_filteropt(mp->mnt_optnew, tmpfs_opts))
344 		return (EINVAL);
345 
346 	if (mp->mnt_flag & MNT_UPDATE) {
347 		/* Only support update mounts for certain options. */
348 		if (vfs_filteropt(mp->mnt_optnew, tmpfs_updateopts) != 0)
349 			return (EOPNOTSUPP);
350 		tmp = VFS_TO_TMPFS(mp);
351 		if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) == 0) {
352 			/*
353 			 * On-the-fly resizing is not supported (yet). We still
354 			 * need to have "size" listed as "supported", otherwise
355 			 * trying to update fs that is listed in fstab with size
356 			 * parameter, say trying to change rw to ro or vice
357 			 * versa, would cause vfs_filteropt() to bail.
358 			 */
359 			if (size_max != tmp->tm_size_max)
360 				return (EOPNOTSUPP);
361 		}
362 		if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) &&
363 		    !tmp->tm_ronly) {
364 			/* RW -> RO */
365 			return (tmpfs_rw_to_ro(mp));
366 		} else if (!vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) &&
367 		    tmp->tm_ronly) {
368 			/* RO -> RW */
369 			tmp->tm_ronly = 0;
370 			MNT_ILOCK(mp);
371 			mp->mnt_flag &= ~MNT_RDONLY;
372 			MNT_IUNLOCK(mp);
373 		}
374 		tmp->tm_nomtime = vfs_getopt(mp->mnt_optnew, "nomtime", NULL,
375 		    0) == 0;
376 		MNT_ILOCK(mp);
377 		if ((mp->mnt_flag & MNT_UNION) == 0) {
378 			mp->mnt_kern_flag |= MNTK_FPLOOKUP;
379 		} else {
380 			mp->mnt_kern_flag &= ~MNTK_FPLOOKUP;
381 		}
382 		MNT_IUNLOCK(mp);
383 		return (0);
384 	}
385 
386 	vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY);
387 	error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred);
388 	VOP_UNLOCK(mp->mnt_vnodecovered);
389 	if (error)
390 		return (error);
391 
392 	if (mp->mnt_cred->cr_ruid != 0 ||
393 	    vfs_scanopt(mp->mnt_optnew, "gid", "%d", &root_gid) != 1)
394 		root_gid = va.va_gid;
395 	if (mp->mnt_cred->cr_ruid != 0 ||
396 	    vfs_scanopt(mp->mnt_optnew, "uid", "%d", &root_uid) != 1)
397 		root_uid = va.va_uid;
398 	if (mp->mnt_cred->cr_ruid != 0 ||
399 	    vfs_scanopt(mp->mnt_optnew, "mode", "%ho", &root_mode) != 1)
400 		root_mode = va.va_mode;
401 	if (vfs_getopt_size(mp->mnt_optnew, "inodes", &nodes_max) != 0)
402 		nodes_max = 0;
403 	if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) != 0)
404 		size_max = 0;
405 	if (vfs_getopt_size(mp->mnt_optnew, "maxfilesize", &maxfilesize) != 0)
406 		maxfilesize = 0;
407 	nonc = vfs_getopt(mp->mnt_optnew, "nonc", NULL, NULL) == 0;
408 	nomtime = vfs_getopt(mp->mnt_optnew, "nomtime", NULL, NULL) == 0;
409 
410 	/* Do not allow mounts if we do not have enough memory to preserve
411 	 * the minimum reserved pages. */
412 	if (tmpfs_mem_avail() < TMPFS_PAGES_MINRESERVED)
413 		return (ENOSPC);
414 
415 	/* Get the maximum number of memory pages this file system is
416 	 * allowed to use, based on the maximum size the user passed in
417 	 * the mount structure.  A value of zero is treated as if the
418 	 * maximum available space was requested. */
419 	if (size_max == 0 || size_max > OFF_MAX - PAGE_SIZE ||
420 	    (SIZE_MAX < OFF_MAX && size_max / PAGE_SIZE >= SIZE_MAX))
421 		pages = SIZE_MAX;
422 	else {
423 		size_max = roundup(size_max, PAGE_SIZE);
424 		pages = howmany(size_max, PAGE_SIZE);
425 	}
426 	MPASS(pages > 0);
427 
428 	if (nodes_max <= 3) {
429 		if (pages < INT_MAX / nodes_per_page)
430 			nodes_max = pages * nodes_per_page;
431 		else
432 			nodes_max = INT_MAX;
433 	}
434 	if (nodes_max > INT_MAX)
435 		nodes_max = INT_MAX;
436 	MPASS(nodes_max >= 3);
437 
438 	/* Allocate the tmpfs mount structure and fill it. */
439 	tmp = (struct tmpfs_mount *)malloc(sizeof(struct tmpfs_mount),
440 	    M_TMPFSMNT, M_WAITOK | M_ZERO);
441 
442 	mtx_init(&tmp->tm_allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF);
443 	tmp->tm_nodes_max = nodes_max;
444 	tmp->tm_nodes_inuse = 0;
445 	tmp->tm_refcount = 1;
446 	tmp->tm_maxfilesize = maxfilesize > 0 ? maxfilesize : OFF_MAX;
447 	LIST_INIT(&tmp->tm_nodes_used);
448 
449 	tmp->tm_size_max = size_max;
450 	tmp->tm_pages_max = pages;
451 	tmp->tm_pages_used = 0;
452 	new_unrhdr64(&tmp->tm_ino_unr, 2);
453 	tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
454 	tmp->tm_nonc = nonc;
455 	tmp->tm_nomtime = nomtime;
456 
457 	/* Allocate the root node. */
458 	error = tmpfs_alloc_node(mp, tmp, VDIR, root_uid, root_gid,
459 	    root_mode & ALLPERMS, NULL, NULL, VNOVAL, &root);
460 
461 	if (error != 0 || root == NULL) {
462 		free(tmp, M_TMPFSMNT);
463 		return (error);
464 	}
465 	KASSERT(root->tn_id == 2,
466 	    ("tmpfs root with invalid ino: %ju", (uintmax_t)root->tn_id));
467 	tmp->tm_root = root;
468 
469 	MNT_ILOCK(mp);
470 	mp->mnt_flag |= MNT_LOCAL;
471 	mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED |
472 	    MNTK_TEXT_REFS | MNTK_NOMSYNC;
473 	if (!nonc && (mp->mnt_flag & MNT_UNION) == 0)
474 		mp->mnt_kern_flag |= MNTK_FPLOOKUP;
475 	MNT_IUNLOCK(mp);
476 
477 	mp->mnt_data = tmp;
478 	mp->mnt_stat.f_namemax = MAXNAMLEN;
479 	vfs_getnewfsid(mp);
480 	vfs_mountedfrom(mp, "tmpfs");
481 
482 	return (0);
483 }
484 
485 /* ARGSUSED2 */
486 static int
487 tmpfs_unmount(struct mount *mp, int mntflags)
488 {
489 	struct tmpfs_mount *tmp;
490 	struct tmpfs_node *node;
491 	int error, flags;
492 
493 	flags = (mntflags & MNT_FORCE) != 0 ? FORCECLOSE : 0;
494 	tmp = VFS_TO_TMPFS(mp);
495 
496 	/* Stop writers */
497 	error = vfs_write_suspend_umnt(mp);
498 	if (error != 0)
499 		return (error);
500 	/*
501 	 * At this point, nodes cannot be destroyed by any other
502 	 * thread because write suspension is started.
503 	 */
504 
505 	for (;;) {
506 		error = vflush(mp, 0, flags, curthread);
507 		if (error != 0) {
508 			vfs_write_resume(mp, VR_START_WRITE);
509 			return (error);
510 		}
511 		MNT_ILOCK(mp);
512 		if (mp->mnt_nvnodelistsize == 0) {
513 			MNT_IUNLOCK(mp);
514 			break;
515 		}
516 		MNT_IUNLOCK(mp);
517 		if ((mntflags & MNT_FORCE) == 0) {
518 			vfs_write_resume(mp, VR_START_WRITE);
519 			return (EBUSY);
520 		}
521 	}
522 
523 	TMPFS_LOCK(tmp);
524 	while ((node = LIST_FIRST(&tmp->tm_nodes_used)) != NULL) {
525 		TMPFS_NODE_LOCK(node);
526 		if (node->tn_type == VDIR)
527 			tmpfs_dir_destroy(tmp, node);
528 		if (tmpfs_free_node_locked(tmp, node, true))
529 			TMPFS_LOCK(tmp);
530 		else
531 			TMPFS_NODE_UNLOCK(node);
532 	}
533 
534 	mp->mnt_data = NULL;
535 	tmpfs_free_tmp(tmp);
536 	vfs_write_resume(mp, VR_START_WRITE);
537 
538 	MNT_ILOCK(mp);
539 	mp->mnt_flag &= ~MNT_LOCAL;
540 	MNT_IUNLOCK(mp);
541 
542 	return (0);
543 }
544 
545 void
546 tmpfs_free_tmp(struct tmpfs_mount *tmp)
547 {
548 	TMPFS_MP_ASSERT_LOCKED(tmp);
549 	MPASS(tmp->tm_refcount > 0);
550 
551 	tmp->tm_refcount--;
552 	if (tmp->tm_refcount > 0) {
553 		TMPFS_UNLOCK(tmp);
554 		return;
555 	}
556 	TMPFS_UNLOCK(tmp);
557 
558 	mtx_destroy(&tmp->tm_allnode_lock);
559 	MPASS(tmp->tm_pages_used == 0);
560 	MPASS(tmp->tm_nodes_inuse == 0);
561 
562 	free(tmp, M_TMPFSMNT);
563 }
564 
565 static int
566 tmpfs_root(struct mount *mp, int flags, struct vnode **vpp)
567 {
568 	int error;
569 
570 	error = tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, flags, vpp);
571 	if (error == 0)
572 		(*vpp)->v_vflag |= VV_ROOT;
573 	return (error);
574 }
575 
576 static int
577 tmpfs_fhtovp(struct mount *mp, struct fid *fhp, int flags,
578     struct vnode **vpp)
579 {
580 	struct tmpfs_fid_data tfd;
581 	struct tmpfs_mount *tmp;
582 	struct tmpfs_node *node;
583 	int error;
584 
585 	if (fhp->fid_len != sizeof(tfd))
586 		return (EINVAL);
587 
588 	/*
589 	 * Copy from fid_data onto the stack to avoid unaligned pointer use.
590 	 * See the comment in sys/mount.h on struct fid for details.
591 	 */
592 	memcpy(&tfd, fhp->fid_data, fhp->fid_len);
593 
594 	tmp = VFS_TO_TMPFS(mp);
595 
596 	if (tfd.tfd_id >= tmp->tm_nodes_max)
597 		return (EINVAL);
598 
599 	TMPFS_LOCK(tmp);
600 	LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
601 		if (node->tn_id == tfd.tfd_id &&
602 		    node->tn_gen == tfd.tfd_gen) {
603 			tmpfs_ref_node(node);
604 			break;
605 		}
606 	}
607 	TMPFS_UNLOCK(tmp);
608 
609 	if (node != NULL) {
610 		error = tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp);
611 		tmpfs_free_node(tmp, node);
612 	} else
613 		error = EINVAL;
614 	return (error);
615 }
616 
617 /* ARGSUSED2 */
618 static int
619 tmpfs_statfs(struct mount *mp, struct statfs *sbp)
620 {
621 	struct tmpfs_mount *tmp;
622 	size_t used;
623 
624 	tmp = VFS_TO_TMPFS(mp);
625 
626 	sbp->f_iosize = PAGE_SIZE;
627 	sbp->f_bsize = PAGE_SIZE;
628 
629 	used = tmpfs_pages_used(tmp);
630 	if (tmp->tm_pages_max != ULONG_MAX)
631 		 sbp->f_blocks = tmp->tm_pages_max;
632 	else
633 		 sbp->f_blocks = used + tmpfs_mem_avail();
634 	if (sbp->f_blocks <= used)
635 		sbp->f_bavail = 0;
636 	else
637 		sbp->f_bavail = sbp->f_blocks - used;
638 	sbp->f_bfree = sbp->f_bavail;
639 	used = tmp->tm_nodes_inuse;
640 	sbp->f_files = tmp->tm_nodes_max;
641 	if (sbp->f_files <= used)
642 		sbp->f_ffree = 0;
643 	else
644 		sbp->f_ffree = sbp->f_files - used;
645 	/* sbp->f_owner = tmp->tn_uid; */
646 
647 	return (0);
648 }
649 
650 static int
651 tmpfs_sync(struct mount *mp, int waitfor)
652 {
653 
654 	if (waitfor == MNT_SUSPEND) {
655 		MNT_ILOCK(mp);
656 		mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
657 		MNT_IUNLOCK(mp);
658 	} else if (waitfor == MNT_LAZY) {
659 		tmpfs_update_mtime_lazy(mp);
660 	}
661 	return (0);
662 }
663 
664 static int
665 tmpfs_init(struct vfsconf *conf)
666 {
667 	int res;
668 
669 	res = tmpfs_subr_init();
670 	if (res != 0)
671 		return (res);
672 	memcpy(&tmpfs_fnops, &vnops, sizeof(struct fileops));
673 	tmpfs_fnops.fo_close = tmpfs_fo_close;
674 	return (0);
675 }
676 
677 static int
678 tmpfs_uninit(struct vfsconf *conf)
679 {
680 	tmpfs_subr_uninit();
681 	return (0);
682 }
683 
684 /*
685  * tmpfs vfs operations.
686  */
687 struct vfsops tmpfs_vfsops = {
688 	.vfs_mount =			tmpfs_mount,
689 	.vfs_unmount =			tmpfs_unmount,
690 	.vfs_root =			vfs_cache_root,
691 	.vfs_cachedroot =		tmpfs_root,
692 	.vfs_statfs =			tmpfs_statfs,
693 	.vfs_fhtovp =			tmpfs_fhtovp,
694 	.vfs_sync =			tmpfs_sync,
695 	.vfs_init =			tmpfs_init,
696 	.vfs_uninit =			tmpfs_uninit,
697 };
698 VFS_SET(tmpfs_vfsops, tmpfs, VFCF_JAIL);
699