xref: /freebsd/sys/fs/tmpfs/tmpfs_vfsops.c (revision 2f513db72b034fd5ef7f080b11be5c711c15186a)
1 /*	$NetBSD: tmpfs_vfsops.c,v 1.10 2005/12/11 12:24:29 christos Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-2-Clause-NetBSD
5  *
6  * Copyright (c) 2005 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
11  * 2005 program.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 /*
36  * Efficient memory file system.
37  *
38  * tmpfs is a file system that uses FreeBSD's virtual memory
39  * sub-system to store file data and metadata in an efficient way.
40  * This means that it does not follow the structure of an on-disk file
41  * system because it simply does not need to.  Instead, it uses
42  * memory-specific data structures and algorithms to automatically
43  * allocate and release resources.
44  */
45 
46 #include "opt_tmpfs.h"
47 
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD$");
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/dirent.h>
54 #include <sys/limits.h>
55 #include <sys/lock.h>
56 #include <sys/mount.h>
57 #include <sys/mutex.h>
58 #include <sys/proc.h>
59 #include <sys/jail.h>
60 #include <sys/kernel.h>
61 #include <sys/rwlock.h>
62 #include <sys/stat.h>
63 #include <sys/sx.h>
64 #include <sys/sysctl.h>
65 #include <sys/vnode.h>
66 
67 #include <vm/vm.h>
68 #include <vm/vm_param.h>
69 #include <vm/pmap.h>
70 #include <vm/vm_extern.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_object.h>
73 #include <vm/vm_param.h>
74 
75 #include <fs/tmpfs/tmpfs.h>
76 
77 /*
78  * Default permission for root node
79  */
80 #define TMPFS_DEFAULT_ROOT_MODE	(S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
81 
82 MALLOC_DEFINE(M_TMPFSMNT, "tmpfs mount", "tmpfs mount structures");
83 MALLOC_DEFINE(M_TMPFSNAME, "tmpfs name", "tmpfs file names");
84 
85 static int	tmpfs_mount(struct mount *);
86 static int	tmpfs_unmount(struct mount *, int);
87 static int	tmpfs_root(struct mount *, int flags, struct vnode **);
88 static int	tmpfs_fhtovp(struct mount *, struct fid *, int,
89 		    struct vnode **);
90 static int	tmpfs_statfs(struct mount *, struct statfs *);
91 static void	tmpfs_susp_clean(struct mount *);
92 
93 static const char *tmpfs_opts[] = {
94 	"from", "size", "maxfilesize", "inodes", "uid", "gid", "mode", "export",
95 	"union", "nonc", "nomtime", NULL
96 };
97 
98 static const char *tmpfs_updateopts[] = {
99 	"from", "export", "nomtime", "size", NULL
100 };
101 
102 /*
103  * Handle updates of time from writes to mmaped regions, if allowed.
104  * Use MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_LAZY, since
105  * unmap of the tmpfs-backed vnode does not call vinactive(), due to
106  * vm object type is OBJT_SWAP.  If lazy, only handle delayed update
107  * of mtime due to the writes to mapped files.
108  */
109 static void
110 tmpfs_update_mtime(struct mount *mp, bool lazy)
111 {
112 	struct vnode *vp, *mvp;
113 	struct vm_object *obj;
114 
115 	if (VFS_TO_TMPFS(mp)->tm_nomtime)
116 		return;
117 	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
118 		if (vp->v_type != VREG) {
119 			VI_UNLOCK(vp);
120 			continue;
121 		}
122 		obj = vp->v_object;
123 		KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) ==
124 		    (OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj"));
125 
126 		/*
127 		 * In lazy case, do unlocked read, avoid taking vnode
128 		 * lock if not needed.  Lost update will be handled on
129 		 * the next call.
130 		 * For non-lazy case, we must flush all pending
131 		 * metadata changes now.
132 		 */
133 		if (!lazy || obj->generation != obj->cleangeneration) {
134 			if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK,
135 			    curthread) != 0)
136 				continue;
137 			tmpfs_check_mtime(vp);
138 			if (!lazy)
139 				tmpfs_update(vp);
140 			vput(vp);
141 		} else {
142 			VI_UNLOCK(vp);
143 			continue;
144 		}
145 	}
146 }
147 
148 struct tmpfs_check_rw_maps_arg {
149 	bool found;
150 };
151 
152 static bool
153 tmpfs_check_rw_maps_cb(struct mount *mp __unused, vm_map_t map __unused,
154     vm_map_entry_t entry __unused, void *arg)
155 {
156 	struct tmpfs_check_rw_maps_arg *a;
157 
158 	a = arg;
159 	a->found = true;
160 	return (true);
161 }
162 
163 /*
164  * Revoke write permissions from all mappings of regular files
165  * belonging to the specified tmpfs mount.
166  */
167 static bool
168 tmpfs_revoke_rw_maps_cb(struct mount *mp __unused, vm_map_t map,
169     vm_map_entry_t entry, void *arg __unused)
170 {
171 
172 	/*
173 	 * XXXKIB: might be invalidate the mapping
174 	 * instead ?  The process is not going to be
175 	 * happy in any case.
176 	 */
177 	entry->max_protection &= ~VM_PROT_WRITE;
178 	if ((entry->protection & VM_PROT_WRITE) != 0) {
179 		entry->protection &= ~VM_PROT_WRITE;
180 		pmap_protect(map->pmap, entry->start, entry->end,
181 		    entry->protection);
182 	}
183 	return (false);
184 }
185 
186 static void
187 tmpfs_all_rw_maps(struct mount *mp, bool (*cb)(struct mount *mp, vm_map_t,
188     vm_map_entry_t, void *), void *cb_arg)
189 {
190 	struct proc *p;
191 	struct vmspace *vm;
192 	vm_map_t map;
193 	vm_map_entry_t entry;
194 	vm_object_t object;
195 	struct vnode *vp;
196 	int gen;
197 	bool terminate;
198 
199 	terminate = false;
200 	sx_slock(&allproc_lock);
201 again:
202 	gen = allproc_gen;
203 	FOREACH_PROC_IN_SYSTEM(p) {
204 		PROC_LOCK(p);
205 		if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
206 		    P_SYSTEM | P_WEXIT)) != 0) {
207 			PROC_UNLOCK(p);
208 			continue;
209 		}
210 		vm = vmspace_acquire_ref(p);
211 		_PHOLD_LITE(p);
212 		PROC_UNLOCK(p);
213 		if (vm == NULL) {
214 			PRELE(p);
215 			continue;
216 		}
217 		sx_sunlock(&allproc_lock);
218 		map = &vm->vm_map;
219 
220 		vm_map_lock(map);
221 		if (map->busy)
222 			vm_map_wait_busy(map);
223 		VM_MAP_ENTRY_FOREACH(entry, map) {
224 			if ((entry->eflags & (MAP_ENTRY_GUARD |
225 			    MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_COW)) != 0 ||
226 			    (entry->max_protection & VM_PROT_WRITE) == 0)
227 				continue;
228 			object = entry->object.vm_object;
229 			if (object == NULL || object->type != OBJT_SWAP ||
230 			    (object->flags & OBJ_TMPFS_NODE) == 0)
231 				continue;
232 			/*
233 			 * No need to dig into shadow chain, mapping
234 			 * of the object not at top is readonly.
235 			 */
236 
237 			VM_OBJECT_RLOCK(object);
238 			if (object->type == OBJT_DEAD) {
239 				VM_OBJECT_RUNLOCK(object);
240 				continue;
241 			}
242 			MPASS(object->ref_count > 1);
243 			if ((object->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) !=
244 			    (OBJ_TMPFS_NODE | OBJ_TMPFS)) {
245 				VM_OBJECT_RUNLOCK(object);
246 				continue;
247 			}
248 			vp = object->un_pager.swp.swp_tmpfs;
249 			if (vp->v_mount != mp) {
250 				VM_OBJECT_RUNLOCK(object);
251 				continue;
252 			}
253 
254 			terminate = cb(mp, map, entry, cb_arg);
255 			VM_OBJECT_RUNLOCK(object);
256 			if (terminate)
257 				break;
258 		}
259 		vm_map_unlock(map);
260 
261 		vmspace_free(vm);
262 		sx_slock(&allproc_lock);
263 		PRELE(p);
264 		if (terminate)
265 			break;
266 	}
267 	if (!terminate && gen != allproc_gen)
268 		goto again;
269 	sx_sunlock(&allproc_lock);
270 }
271 
272 static bool
273 tmpfs_check_rw_maps(struct mount *mp)
274 {
275 	struct tmpfs_check_rw_maps_arg ca;
276 
277 	ca.found = false;
278 	tmpfs_all_rw_maps(mp, tmpfs_check_rw_maps_cb, &ca);
279 	return (ca.found);
280 }
281 
282 static int
283 tmpfs_rw_to_ro(struct mount *mp)
284 {
285 	int error, flags;
286 	bool forced;
287 
288 	forced = (mp->mnt_flag & MNT_FORCE) != 0;
289 	flags = WRITECLOSE | (forced ? FORCECLOSE : 0);
290 
291 	if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
292 		return (error);
293 	error = vfs_write_suspend_umnt(mp);
294 	if (error != 0)
295 		return (error);
296 	if (!forced && tmpfs_check_rw_maps(mp)) {
297 		error = EBUSY;
298 		goto out;
299 	}
300 	VFS_TO_TMPFS(mp)->tm_ronly = 1;
301 	MNT_ILOCK(mp);
302 	mp->mnt_flag |= MNT_RDONLY;
303 	MNT_IUNLOCK(mp);
304 	for (;;) {
305 		tmpfs_all_rw_maps(mp, tmpfs_revoke_rw_maps_cb, NULL);
306 		tmpfs_update_mtime(mp, false);
307 		error = vflush(mp, 0, flags, curthread);
308 		if (error != 0) {
309 			VFS_TO_TMPFS(mp)->tm_ronly = 0;
310 			MNT_ILOCK(mp);
311 			mp->mnt_flag &= ~MNT_RDONLY;
312 			MNT_IUNLOCK(mp);
313 			goto out;
314 		}
315 		if (!tmpfs_check_rw_maps(mp))
316 			break;
317 	}
318 out:
319 	vfs_write_resume(mp, 0);
320 	return (error);
321 }
322 
323 static int
324 tmpfs_mount(struct mount *mp)
325 {
326 	const size_t nodes_per_page = howmany(PAGE_SIZE,
327 	    sizeof(struct tmpfs_dirent) + sizeof(struct tmpfs_node));
328 	struct tmpfs_mount *tmp;
329 	struct tmpfs_node *root;
330 	int error;
331 	bool nomtime, nonc;
332 	/* Size counters. */
333 	u_quad_t pages;
334 	off_t nodes_max, size_max, maxfilesize;
335 
336 	/* Root node attributes. */
337 	uid_t root_uid;
338 	gid_t root_gid;
339 	mode_t root_mode;
340 
341 	struct vattr va;
342 
343 	if (vfs_filteropt(mp->mnt_optnew, tmpfs_opts))
344 		return (EINVAL);
345 
346 	if (mp->mnt_flag & MNT_UPDATE) {
347 		/* Only support update mounts for certain options. */
348 		if (vfs_filteropt(mp->mnt_optnew, tmpfs_updateopts) != 0)
349 			return (EOPNOTSUPP);
350 		tmp = VFS_TO_TMPFS(mp);
351 		if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) == 0) {
352 			/*
353 			 * On-the-fly resizing is not supported (yet). We still
354 			 * need to have "size" listed as "supported", otherwise
355 			 * trying to update fs that is listed in fstab with size
356 			 * parameter, say trying to change rw to ro or vice
357 			 * versa, would cause vfs_filteropt() to bail.
358 			 */
359 			if (size_max != tmp->tm_size_max)
360 				return (EOPNOTSUPP);
361 		}
362 		if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) &&
363 		    !tmp->tm_ronly) {
364 			/* RW -> RO */
365 			return (tmpfs_rw_to_ro(mp));
366 		} else if (!vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) &&
367 		    tmp->tm_ronly) {
368 			/* RO -> RW */
369 			tmp->tm_ronly = 0;
370 			MNT_ILOCK(mp);
371 			mp->mnt_flag &= ~MNT_RDONLY;
372 			MNT_IUNLOCK(mp);
373 		}
374 		tmp->tm_nomtime = vfs_getopt(mp->mnt_optnew, "nomtime", NULL,
375 		    0) == 0;
376 		return (0);
377 	}
378 
379 	vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY);
380 	error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred);
381 	VOP_UNLOCK(mp->mnt_vnodecovered);
382 	if (error)
383 		return (error);
384 
385 	if (mp->mnt_cred->cr_ruid != 0 ||
386 	    vfs_scanopt(mp->mnt_optnew, "gid", "%d", &root_gid) != 1)
387 		root_gid = va.va_gid;
388 	if (mp->mnt_cred->cr_ruid != 0 ||
389 	    vfs_scanopt(mp->mnt_optnew, "uid", "%d", &root_uid) != 1)
390 		root_uid = va.va_uid;
391 	if (mp->mnt_cred->cr_ruid != 0 ||
392 	    vfs_scanopt(mp->mnt_optnew, "mode", "%ho", &root_mode) != 1)
393 		root_mode = va.va_mode;
394 	if (vfs_getopt_size(mp->mnt_optnew, "inodes", &nodes_max) != 0)
395 		nodes_max = 0;
396 	if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) != 0)
397 		size_max = 0;
398 	if (vfs_getopt_size(mp->mnt_optnew, "maxfilesize", &maxfilesize) != 0)
399 		maxfilesize = 0;
400 	nonc = vfs_getopt(mp->mnt_optnew, "nonc", NULL, NULL) == 0;
401 	nomtime = vfs_getopt(mp->mnt_optnew, "nomtime", NULL, NULL) == 0;
402 
403 	/* Do not allow mounts if we do not have enough memory to preserve
404 	 * the minimum reserved pages. */
405 	if (tmpfs_mem_avail() < TMPFS_PAGES_MINRESERVED)
406 		return (ENOSPC);
407 
408 	/* Get the maximum number of memory pages this file system is
409 	 * allowed to use, based on the maximum size the user passed in
410 	 * the mount structure.  A value of zero is treated as if the
411 	 * maximum available space was requested. */
412 	if (size_max == 0 || size_max > OFF_MAX - PAGE_SIZE ||
413 	    (SIZE_MAX < OFF_MAX && size_max / PAGE_SIZE >= SIZE_MAX))
414 		pages = SIZE_MAX;
415 	else {
416 		size_max = roundup(size_max, PAGE_SIZE);
417 		pages = howmany(size_max, PAGE_SIZE);
418 	}
419 	MPASS(pages > 0);
420 
421 	if (nodes_max <= 3) {
422 		if (pages < INT_MAX / nodes_per_page)
423 			nodes_max = pages * nodes_per_page;
424 		else
425 			nodes_max = INT_MAX;
426 	}
427 	if (nodes_max > INT_MAX)
428 		nodes_max = INT_MAX;
429 	MPASS(nodes_max >= 3);
430 
431 	/* Allocate the tmpfs mount structure and fill it. */
432 	tmp = (struct tmpfs_mount *)malloc(sizeof(struct tmpfs_mount),
433 	    M_TMPFSMNT, M_WAITOK | M_ZERO);
434 
435 	mtx_init(&tmp->tm_allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF);
436 	tmp->tm_nodes_max = nodes_max;
437 	tmp->tm_nodes_inuse = 0;
438 	tmp->tm_refcount = 1;
439 	tmp->tm_maxfilesize = maxfilesize > 0 ? maxfilesize : OFF_MAX;
440 	LIST_INIT(&tmp->tm_nodes_used);
441 
442 	tmp->tm_size_max = size_max;
443 	tmp->tm_pages_max = pages;
444 	tmp->tm_pages_used = 0;
445 	new_unrhdr64(&tmp->tm_ino_unr, 2);
446 	tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
447 	tmp->tm_nonc = nonc;
448 	tmp->tm_nomtime = nomtime;
449 
450 	/* Allocate the root node. */
451 	error = tmpfs_alloc_node(mp, tmp, VDIR, root_uid, root_gid,
452 	    root_mode & ALLPERMS, NULL, NULL, VNOVAL, &root);
453 
454 	if (error != 0 || root == NULL) {
455 		free(tmp, M_TMPFSMNT);
456 		return (error);
457 	}
458 	KASSERT(root->tn_id == 2,
459 	    ("tmpfs root with invalid ino: %ju", (uintmax_t)root->tn_id));
460 	tmp->tm_root = root;
461 
462 	MNT_ILOCK(mp);
463 	mp->mnt_flag |= MNT_LOCAL;
464 	mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED |
465 	    MNTK_TEXT_REFS | MNTK_NOMSYNC;
466 	MNT_IUNLOCK(mp);
467 
468 	mp->mnt_data = tmp;
469 	mp->mnt_stat.f_namemax = MAXNAMLEN;
470 	vfs_getnewfsid(mp);
471 	vfs_mountedfrom(mp, "tmpfs");
472 
473 	return 0;
474 }
475 
476 /* ARGSUSED2 */
477 static int
478 tmpfs_unmount(struct mount *mp, int mntflags)
479 {
480 	struct tmpfs_mount *tmp;
481 	struct tmpfs_node *node;
482 	int error, flags;
483 
484 	flags = (mntflags & MNT_FORCE) != 0 ? FORCECLOSE : 0;
485 	tmp = VFS_TO_TMPFS(mp);
486 
487 	/* Stop writers */
488 	error = vfs_write_suspend_umnt(mp);
489 	if (error != 0)
490 		return (error);
491 	/*
492 	 * At this point, nodes cannot be destroyed by any other
493 	 * thread because write suspension is started.
494 	 */
495 
496 	for (;;) {
497 		error = vflush(mp, 0, flags, curthread);
498 		if (error != 0) {
499 			vfs_write_resume(mp, VR_START_WRITE);
500 			return (error);
501 		}
502 		MNT_ILOCK(mp);
503 		if (mp->mnt_nvnodelistsize == 0) {
504 			MNT_IUNLOCK(mp);
505 			break;
506 		}
507 		MNT_IUNLOCK(mp);
508 		if ((mntflags & MNT_FORCE) == 0) {
509 			vfs_write_resume(mp, VR_START_WRITE);
510 			return (EBUSY);
511 		}
512 	}
513 
514 	TMPFS_LOCK(tmp);
515 	while ((node = LIST_FIRST(&tmp->tm_nodes_used)) != NULL) {
516 		TMPFS_NODE_LOCK(node);
517 		if (node->tn_type == VDIR)
518 			tmpfs_dir_destroy(tmp, node);
519 		if (tmpfs_free_node_locked(tmp, node, true))
520 			TMPFS_LOCK(tmp);
521 		else
522 			TMPFS_NODE_UNLOCK(node);
523 	}
524 
525 	mp->mnt_data = NULL;
526 	tmpfs_free_tmp(tmp);
527 	vfs_write_resume(mp, VR_START_WRITE);
528 
529 	MNT_ILOCK(mp);
530 	mp->mnt_flag &= ~MNT_LOCAL;
531 	MNT_IUNLOCK(mp);
532 
533 	return (0);
534 }
535 
536 void
537 tmpfs_free_tmp(struct tmpfs_mount *tmp)
538 {
539 
540 	MPASS(tmp->tm_refcount > 0);
541 	tmp->tm_refcount--;
542 	if (tmp->tm_refcount > 0) {
543 		TMPFS_UNLOCK(tmp);
544 		return;
545 	}
546 	TMPFS_UNLOCK(tmp);
547 
548 	mtx_destroy(&tmp->tm_allnode_lock);
549 	MPASS(tmp->tm_pages_used == 0);
550 	MPASS(tmp->tm_nodes_inuse == 0);
551 
552 	free(tmp, M_TMPFSMNT);
553 }
554 
555 static int
556 tmpfs_root(struct mount *mp, int flags, struct vnode **vpp)
557 {
558 	int error;
559 
560 	error = tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, flags, vpp);
561 	if (error == 0)
562 		(*vpp)->v_vflag |= VV_ROOT;
563 	return (error);
564 }
565 
566 static int
567 tmpfs_fhtovp(struct mount *mp, struct fid *fhp, int flags,
568     struct vnode **vpp)
569 {
570 	struct tmpfs_fid *tfhp;
571 	struct tmpfs_mount *tmp;
572 	struct tmpfs_node *node;
573 	int error;
574 
575 	tmp = VFS_TO_TMPFS(mp);
576 
577 	tfhp = (struct tmpfs_fid *)fhp;
578 	if (tfhp->tf_len != sizeof(struct tmpfs_fid))
579 		return (EINVAL);
580 
581 	if (tfhp->tf_id >= tmp->tm_nodes_max)
582 		return (EINVAL);
583 
584 	TMPFS_LOCK(tmp);
585 	LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
586 		if (node->tn_id == tfhp->tf_id &&
587 		    node->tn_gen == tfhp->tf_gen) {
588 			tmpfs_ref_node(node);
589 			break;
590 		}
591 	}
592 	TMPFS_UNLOCK(tmp);
593 
594 	if (node != NULL) {
595 		error = tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp);
596 		tmpfs_free_node(tmp, node);
597 	} else
598 		error = EINVAL;
599 	return (error);
600 }
601 
602 /* ARGSUSED2 */
603 static int
604 tmpfs_statfs(struct mount *mp, struct statfs *sbp)
605 {
606 	struct tmpfs_mount *tmp;
607 	size_t used;
608 
609 	tmp = VFS_TO_TMPFS(mp);
610 
611 	sbp->f_iosize = PAGE_SIZE;
612 	sbp->f_bsize = PAGE_SIZE;
613 
614 	used = tmpfs_pages_used(tmp);
615 	if (tmp->tm_pages_max != ULONG_MAX)
616 		 sbp->f_blocks = tmp->tm_pages_max;
617 	else
618 		 sbp->f_blocks = used + tmpfs_mem_avail();
619 	if (sbp->f_blocks <= used)
620 		sbp->f_bavail = 0;
621 	else
622 		sbp->f_bavail = sbp->f_blocks - used;
623 	sbp->f_bfree = sbp->f_bavail;
624 	used = tmp->tm_nodes_inuse;
625 	sbp->f_files = tmp->tm_nodes_max;
626 	if (sbp->f_files <= used)
627 		sbp->f_ffree = 0;
628 	else
629 		sbp->f_ffree = sbp->f_files - used;
630 	/* sbp->f_owner = tmp->tn_uid; */
631 
632 	return 0;
633 }
634 
635 static int
636 tmpfs_sync(struct mount *mp, int waitfor)
637 {
638 
639 	if (waitfor == MNT_SUSPEND) {
640 		MNT_ILOCK(mp);
641 		mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
642 		MNT_IUNLOCK(mp);
643 	} else if (waitfor == MNT_LAZY) {
644 		tmpfs_update_mtime(mp, true);
645 	}
646 	return (0);
647 }
648 
649 /*
650  * The presence of a susp_clean method tells the VFS to track writes.
651  */
652 static void
653 tmpfs_susp_clean(struct mount *mp __unused)
654 {
655 }
656 
657 static int
658 tmpfs_init(struct vfsconf *conf)
659 {
660 	tmpfs_subr_init();
661 	return (0);
662 }
663 
664 static int
665 tmpfs_uninit(struct vfsconf *conf)
666 {
667 	tmpfs_subr_uninit();
668 	return (0);
669 }
670 
671 /*
672  * tmpfs vfs operations.
673  */
674 struct vfsops tmpfs_vfsops = {
675 	.vfs_mount =			tmpfs_mount,
676 	.vfs_unmount =			tmpfs_unmount,
677 	.vfs_root =			vfs_cache_root,
678 	.vfs_cachedroot =		tmpfs_root,
679 	.vfs_statfs =			tmpfs_statfs,
680 	.vfs_fhtovp =			tmpfs_fhtovp,
681 	.vfs_sync =			tmpfs_sync,
682 	.vfs_susp_clean =		tmpfs_susp_clean,
683 	.vfs_init =			tmpfs_init,
684 	.vfs_uninit =			tmpfs_uninit,
685 };
686 VFS_SET(tmpfs_vfsops, tmpfs, VFCF_JAIL);
687