xref: /freebsd/sys/fs/tmpfs/tmpfs_vfsops.c (revision f126890ac5386406dadf7c4cfa9566cbb56537c5)
1 /*	$NetBSD: tmpfs_vfsops.c,v 1.10 2005/12/11 12:24:29 christos Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-2-Clause
5  *
6  * Copyright (c) 2005 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
11  * 2005 program.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 /*
36  * Efficient memory file system.
37  *
38  * tmpfs is a file system that uses FreeBSD's virtual memory
39  * sub-system to store file data and metadata in an efficient way.
40  * This means that it does not follow the structure of an on-disk file
41  * system because it simply does not need to.  Instead, it uses
42  * memory-specific data structures and algorithms to automatically
43  * allocate and release resources.
44  */
45 
46 #include "opt_ddb.h"
47 #include "opt_tmpfs.h"
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/dirent.h>
52 #include <sys/file.h>
53 #include <sys/limits.h>
54 #include <sys/lock.h>
55 #include <sys/mount.h>
56 #include <sys/mutex.h>
57 #include <sys/proc.h>
58 #include <sys/jail.h>
59 #include <sys/kernel.h>
60 #include <sys/rwlock.h>
61 #include <sys/stat.h>
62 #include <sys/sx.h>
63 #include <sys/sysctl.h>
64 #include <sys/vnode.h>
65 
66 #include <vm/vm.h>
67 #include <vm/vm_param.h>
68 #include <vm/pmap.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_map.h>
71 #include <vm/vm_object.h>
72 #include <vm/vm_param.h>
73 
74 #include <fs/tmpfs/tmpfs.h>
75 
76 /*
77  * Default permission for root node
78  */
79 #define TMPFS_DEFAULT_ROOT_MODE	(S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
80 
81 static MALLOC_DEFINE(M_TMPFSMNT, "tmpfs mount", "tmpfs mount structures");
82 MALLOC_DEFINE(M_TMPFSNAME, "tmpfs name", "tmpfs file names");
83 
84 static int	tmpfs_mount(struct mount *);
85 static int	tmpfs_unmount(struct mount *, int);
86 static int	tmpfs_root(struct mount *, int flags, struct vnode **);
87 static int	tmpfs_fhtovp(struct mount *, struct fid *, int,
88 		    struct vnode **);
89 static int	tmpfs_statfs(struct mount *, struct statfs *);
90 
91 static const char *tmpfs_opts[] = {
92 	"from", "easize", "size", "maxfilesize", "inodes", "uid", "gid", "mode",
93 	"export", "union", "nonc", "nomtime", "nosymfollow", "pgread", NULL
94 };
95 
96 static const char *tmpfs_updateopts[] = {
97 	"from", "easize", "export", "nomtime", "size", "nosymfollow", NULL
98 };
99 
100 static int
101 tmpfs_update_mtime_lazy_filter(struct vnode *vp, void *arg)
102 {
103 	struct vm_object *obj;
104 
105 	if (vp->v_type != VREG)
106 		return (0);
107 
108 	obj = atomic_load_ptr(&vp->v_object);
109 	if (obj == NULL)
110 		return (0);
111 
112 	return (vm_object_mightbedirty_(obj));
113 }
114 
115 static void
116 tmpfs_update_mtime_lazy(struct mount *mp)
117 {
118 	struct vnode *vp, *mvp;
119 
120 	MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, tmpfs_update_mtime_lazy_filter, NULL) {
121 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) != 0)
122 			continue;
123 		tmpfs_check_mtime(vp);
124 		vput(vp);
125 	}
126 }
127 
128 static void
129 tmpfs_update_mtime_all(struct mount *mp)
130 {
131 	struct vnode *vp, *mvp;
132 
133 	if (VFS_TO_TMPFS(mp)->tm_nomtime)
134 		return;
135 	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
136 		if (vp->v_type != VREG) {
137 			VI_UNLOCK(vp);
138 			continue;
139 		}
140 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) != 0)
141 			continue;
142 		tmpfs_check_mtime(vp);
143 		tmpfs_update(vp);
144 		vput(vp);
145 	}
146 }
147 
148 struct tmpfs_check_rw_maps_arg {
149 	bool found;
150 };
151 
152 static bool
153 tmpfs_check_rw_maps_cb(struct mount *mp __unused, vm_map_t map __unused,
154     vm_map_entry_t entry __unused, void *arg)
155 {
156 	struct tmpfs_check_rw_maps_arg *a;
157 
158 	a = arg;
159 	a->found = true;
160 	return (true);
161 }
162 
163 /*
164  * Revoke write permissions from all mappings of regular files
165  * belonging to the specified tmpfs mount.
166  */
167 static bool
168 tmpfs_revoke_rw_maps_cb(struct mount *mp __unused, vm_map_t map,
169     vm_map_entry_t entry, void *arg __unused)
170 {
171 
172 	/*
173 	 * XXXKIB: might be invalidate the mapping
174 	 * instead ?  The process is not going to be
175 	 * happy in any case.
176 	 */
177 	entry->max_protection &= ~VM_PROT_WRITE;
178 	if ((entry->protection & VM_PROT_WRITE) != 0) {
179 		entry->protection &= ~VM_PROT_WRITE;
180 		pmap_protect(map->pmap, entry->start, entry->end,
181 		    entry->protection);
182 	}
183 	return (false);
184 }
185 
186 static void
187 tmpfs_all_rw_maps(struct mount *mp, bool (*cb)(struct mount *mp, vm_map_t,
188     vm_map_entry_t, void *), void *cb_arg)
189 {
190 	struct proc *p;
191 	struct vmspace *vm;
192 	vm_map_t map;
193 	vm_map_entry_t entry;
194 	vm_object_t object;
195 	struct vnode *vp;
196 	int gen;
197 	bool terminate;
198 
199 	terminate = false;
200 	sx_slock(&allproc_lock);
201 again:
202 	gen = allproc_gen;
203 	FOREACH_PROC_IN_SYSTEM(p) {
204 		PROC_LOCK(p);
205 		if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC |
206 		    P_SYSTEM | P_WEXIT)) != 0) {
207 			PROC_UNLOCK(p);
208 			continue;
209 		}
210 		vm = vmspace_acquire_ref(p);
211 		_PHOLD_LITE(p);
212 		PROC_UNLOCK(p);
213 		if (vm == NULL) {
214 			PRELE(p);
215 			continue;
216 		}
217 		sx_sunlock(&allproc_lock);
218 		map = &vm->vm_map;
219 
220 		vm_map_lock(map);
221 		if (map->busy)
222 			vm_map_wait_busy(map);
223 		VM_MAP_ENTRY_FOREACH(entry, map) {
224 			if ((entry->eflags & (MAP_ENTRY_GUARD |
225 			    MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_COW)) != 0 ||
226 			    (entry->max_protection & VM_PROT_WRITE) == 0)
227 				continue;
228 			object = entry->object.vm_object;
229 			if (object == NULL || object->type != tmpfs_pager_type)
230 				continue;
231 			/*
232 			 * No need to dig into shadow chain, mapping
233 			 * of the object not at top is readonly.
234 			 */
235 
236 			VM_OBJECT_RLOCK(object);
237 			if (object->type == OBJT_DEAD) {
238 				VM_OBJECT_RUNLOCK(object);
239 				continue;
240 			}
241 			MPASS(object->ref_count > 1);
242 			if ((object->flags & OBJ_TMPFS) == 0) {
243 				VM_OBJECT_RUNLOCK(object);
244 				continue;
245 			}
246 			vp = VM_TO_TMPFS_VP(object);
247 			if (vp->v_mount != mp) {
248 				VM_OBJECT_RUNLOCK(object);
249 				continue;
250 			}
251 
252 			terminate = cb(mp, map, entry, cb_arg);
253 			VM_OBJECT_RUNLOCK(object);
254 			if (terminate)
255 				break;
256 		}
257 		vm_map_unlock(map);
258 
259 		vmspace_free(vm);
260 		sx_slock(&allproc_lock);
261 		PRELE(p);
262 		if (terminate)
263 			break;
264 	}
265 	if (!terminate && gen != allproc_gen)
266 		goto again;
267 	sx_sunlock(&allproc_lock);
268 }
269 
270 static bool
271 tmpfs_check_rw_maps(struct mount *mp)
272 {
273 	struct tmpfs_check_rw_maps_arg ca;
274 
275 	ca.found = false;
276 	tmpfs_all_rw_maps(mp, tmpfs_check_rw_maps_cb, &ca);
277 	return (ca.found);
278 }
279 
280 static int
281 tmpfs_rw_to_ro(struct mount *mp)
282 {
283 	int error, flags;
284 	bool forced;
285 
286 	forced = (mp->mnt_flag & MNT_FORCE) != 0;
287 	flags = WRITECLOSE | (forced ? FORCECLOSE : 0);
288 
289 	if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
290 		return (error);
291 	error = vfs_write_suspend_umnt(mp);
292 	if (error != 0)
293 		return (error);
294 	if (!forced && tmpfs_check_rw_maps(mp)) {
295 		error = EBUSY;
296 		goto out;
297 	}
298 	VFS_TO_TMPFS(mp)->tm_ronly = 1;
299 	MNT_ILOCK(mp);
300 	mp->mnt_flag |= MNT_RDONLY;
301 	MNT_IUNLOCK(mp);
302 	for (;;) {
303 		tmpfs_all_rw_maps(mp, tmpfs_revoke_rw_maps_cb, NULL);
304 		tmpfs_update_mtime_all(mp);
305 		error = vflush(mp, 0, flags, curthread);
306 		if (error != 0) {
307 			VFS_TO_TMPFS(mp)->tm_ronly = 0;
308 			MNT_ILOCK(mp);
309 			mp->mnt_flag &= ~MNT_RDONLY;
310 			MNT_IUNLOCK(mp);
311 			goto out;
312 		}
313 		if (!tmpfs_check_rw_maps(mp))
314 			break;
315 	}
316 out:
317 	vfs_write_resume(mp, 0);
318 	return (error);
319 }
320 
321 static int
322 tmpfs_mount(struct mount *mp)
323 {
324 	const size_t nodes_per_page = howmany(PAGE_SIZE,
325 	    sizeof(struct tmpfs_dirent) + sizeof(struct tmpfs_node));
326 	struct tmpfs_mount *tmp;
327 	struct tmpfs_node *root;
328 	int error;
329 	bool nomtime, nonc, pgread;
330 	/* Size counters. */
331 	u_quad_t pages;
332 	off_t nodes_max, size_max, maxfilesize, ea_max_size;
333 
334 	/* Root node attributes. */
335 	uid_t root_uid;
336 	gid_t root_gid;
337 	mode_t root_mode;
338 
339 	struct vattr va;
340 
341 	if (vfs_filteropt(mp->mnt_optnew, tmpfs_opts))
342 		return (EINVAL);
343 
344 	if (mp->mnt_flag & MNT_UPDATE) {
345 		/* Only support update mounts for certain options. */
346 		if (vfs_filteropt(mp->mnt_optnew, tmpfs_updateopts) != 0)
347 			return (EOPNOTSUPP);
348 		tmp = VFS_TO_TMPFS(mp);
349 		if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) == 0) {
350 			/*
351 			 * On-the-fly resizing is not supported (yet). We still
352 			 * need to have "size" listed as "supported", otherwise
353 			 * trying to update fs that is listed in fstab with size
354 			 * parameter, say trying to change rw to ro or vice
355 			 * versa, would cause vfs_filteropt() to bail.
356 			 */
357 			if (size_max != tmp->tm_size_max)
358 				return (EOPNOTSUPP);
359 		}
360 		if (vfs_getopt_size(mp->mnt_optnew, "easize", &ea_max_size) == 0) {
361 			tmp->tm_ea_memory_max = ea_max_size;
362 		}
363 		if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) &&
364 		    !tmp->tm_ronly) {
365 			/* RW -> RO */
366 			return (tmpfs_rw_to_ro(mp));
367 		} else if (!vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) &&
368 		    tmp->tm_ronly) {
369 			/* RO -> RW */
370 			tmp->tm_ronly = 0;
371 			MNT_ILOCK(mp);
372 			mp->mnt_flag &= ~MNT_RDONLY;
373 			MNT_IUNLOCK(mp);
374 		}
375 		tmp->tm_nomtime = vfs_getopt(mp->mnt_optnew, "nomtime", NULL,
376 		    0) == 0;
377 		MNT_ILOCK(mp);
378 		if ((mp->mnt_flag & MNT_UNION) == 0) {
379 			mp->mnt_kern_flag |= MNTK_FPLOOKUP;
380 		} else {
381 			mp->mnt_kern_flag &= ~MNTK_FPLOOKUP;
382 		}
383 		MNT_IUNLOCK(mp);
384 		return (0);
385 	}
386 
387 	vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY);
388 	error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred);
389 	VOP_UNLOCK(mp->mnt_vnodecovered);
390 	if (error)
391 		return (error);
392 
393 	if (mp->mnt_cred->cr_ruid != 0 ||
394 	    vfs_scanopt(mp->mnt_optnew, "gid", "%d", &root_gid) != 1)
395 		root_gid = va.va_gid;
396 	if (mp->mnt_cred->cr_ruid != 0 ||
397 	    vfs_scanopt(mp->mnt_optnew, "uid", "%d", &root_uid) != 1)
398 		root_uid = va.va_uid;
399 	if (mp->mnt_cred->cr_ruid != 0 ||
400 	    vfs_scanopt(mp->mnt_optnew, "mode", "%ho", &root_mode) != 1)
401 		root_mode = va.va_mode;
402 	if (vfs_getopt_size(mp->mnt_optnew, "inodes", &nodes_max) != 0)
403 		nodes_max = 0;
404 	if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) != 0)
405 		size_max = 0;
406 	if (vfs_getopt_size(mp->mnt_optnew, "maxfilesize", &maxfilesize) != 0)
407 		maxfilesize = 0;
408 	if (vfs_getopt_size(mp->mnt_optnew, "easize", &ea_max_size) != 0)
409 		ea_max_size = 0;
410 	nonc = vfs_getopt(mp->mnt_optnew, "nonc", NULL, NULL) == 0;
411 	nomtime = vfs_getopt(mp->mnt_optnew, "nomtime", NULL, NULL) == 0;
412 	pgread = vfs_getopt(mp->mnt_optnew, "pgread", NULL, NULL) == 0;
413 
414 	/* Do not allow mounts if we do not have enough memory to preserve
415 	 * the minimum reserved pages. */
416 	if (tmpfs_mem_avail() < TMPFS_PAGES_MINRESERVED)
417 		return (ENOSPC);
418 
419 	/* Get the maximum number of memory pages this file system is
420 	 * allowed to use, based on the maximum size the user passed in
421 	 * the mount structure.  A value of zero is treated as if the
422 	 * maximum available space was requested. */
423 	if (size_max == 0 || size_max > OFF_MAX - PAGE_SIZE ||
424 	    (SIZE_MAX < OFF_MAX && size_max / PAGE_SIZE >= SIZE_MAX))
425 		pages = SIZE_MAX;
426 	else {
427 		size_max = roundup(size_max, PAGE_SIZE);
428 		pages = howmany(size_max, PAGE_SIZE);
429 	}
430 	MPASS(pages > 0);
431 
432 	if (nodes_max <= 3) {
433 		if (pages < INT_MAX / nodes_per_page)
434 			nodes_max = pages * nodes_per_page;
435 		else
436 			nodes_max = INT_MAX;
437 	}
438 	if (nodes_max > INT_MAX)
439 		nodes_max = INT_MAX;
440 	MPASS(nodes_max >= 3);
441 
442 	/* Allocate the tmpfs mount structure and fill it. */
443 	tmp = (struct tmpfs_mount *)malloc(sizeof(struct tmpfs_mount),
444 	    M_TMPFSMNT, M_WAITOK | M_ZERO);
445 
446 	mtx_init(&tmp->tm_allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF);
447 	tmp->tm_nodes_max = nodes_max;
448 	tmp->tm_nodes_inuse = 0;
449 	tmp->tm_ea_memory_inuse = 0;
450 	tmp->tm_refcount = 1;
451 	tmp->tm_maxfilesize = maxfilesize > 0 ? maxfilesize : OFF_MAX;
452 	tmp->tm_ea_memory_max = ea_max_size > 0 ?
453 	    ea_max_size : TMPFS_EA_MEMORY_RESERVED;
454 	LIST_INIT(&tmp->tm_nodes_used);
455 
456 	tmp->tm_size_max = size_max;
457 	tmp->tm_pages_max = pages;
458 	tmp->tm_pages_used = 0;
459 	new_unrhdr64(&tmp->tm_ino_unr, 2);
460 	tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
461 	tmp->tm_nonc = nonc;
462 	tmp->tm_nomtime = nomtime;
463 	tmp->tm_pgread = pgread;
464 
465 	/* Allocate the root node. */
466 	error = tmpfs_alloc_node(mp, tmp, VDIR, root_uid, root_gid,
467 	    root_mode & ALLPERMS, NULL, NULL, VNOVAL, &root);
468 
469 	if (error != 0 || root == NULL) {
470 		free(tmp, M_TMPFSMNT);
471 		return (error);
472 	}
473 	KASSERT(root->tn_id == 2,
474 	    ("tmpfs root with invalid ino: %ju", (uintmax_t)root->tn_id));
475 	tmp->tm_root = root;
476 
477 	MNT_ILOCK(mp);
478 	mp->mnt_flag |= MNT_LOCAL;
479 	mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED |
480 	    MNTK_NOMSYNC;
481 	if (!nonc && (mp->mnt_flag & MNT_UNION) == 0)
482 		mp->mnt_kern_flag |= MNTK_FPLOOKUP;
483 	MNT_IUNLOCK(mp);
484 
485 	mp->mnt_data = tmp;
486 	mp->mnt_stat.f_namemax = MAXNAMLEN;
487 	vfs_getnewfsid(mp);
488 	vfs_mountedfrom(mp, "tmpfs");
489 
490 	return (0);
491 }
492 
493 /* ARGSUSED2 */
494 static int
495 tmpfs_unmount(struct mount *mp, int mntflags)
496 {
497 	struct tmpfs_mount *tmp;
498 	struct tmpfs_node *node;
499 	int error, flags;
500 
501 	flags = (mntflags & MNT_FORCE) != 0 ? FORCECLOSE : 0;
502 	tmp = VFS_TO_TMPFS(mp);
503 
504 	/* Stop writers */
505 	error = vfs_write_suspend_umnt(mp);
506 	if (error != 0)
507 		return (error);
508 	/*
509 	 * At this point, nodes cannot be destroyed by any other
510 	 * thread because write suspension is started.
511 	 */
512 
513 	for (;;) {
514 		error = vflush(mp, 0, flags, curthread);
515 		if (error != 0) {
516 			vfs_write_resume(mp, VR_START_WRITE);
517 			return (error);
518 		}
519 		MNT_ILOCK(mp);
520 		if (mp->mnt_nvnodelistsize == 0) {
521 			MNT_IUNLOCK(mp);
522 			break;
523 		}
524 		MNT_IUNLOCK(mp);
525 		if ((mntflags & MNT_FORCE) == 0) {
526 			vfs_write_resume(mp, VR_START_WRITE);
527 			return (EBUSY);
528 		}
529 	}
530 
531 	TMPFS_LOCK(tmp);
532 	while ((node = LIST_FIRST(&tmp->tm_nodes_used)) != NULL) {
533 		TMPFS_NODE_LOCK(node);
534 		if (node->tn_type == VDIR)
535 			tmpfs_dir_destroy(tmp, node);
536 		if (tmpfs_free_node_locked(tmp, node, true))
537 			TMPFS_LOCK(tmp);
538 		else
539 			TMPFS_NODE_UNLOCK(node);
540 	}
541 
542 	mp->mnt_data = NULL;
543 	tmpfs_free_tmp(tmp);
544 	vfs_write_resume(mp, VR_START_WRITE);
545 
546 	return (0);
547 }
548 
549 void
550 tmpfs_free_tmp(struct tmpfs_mount *tmp)
551 {
552 	TMPFS_MP_ASSERT_LOCKED(tmp);
553 	MPASS(tmp->tm_refcount > 0);
554 
555 	tmp->tm_refcount--;
556 	if (tmp->tm_refcount > 0) {
557 		TMPFS_UNLOCK(tmp);
558 		return;
559 	}
560 	TMPFS_UNLOCK(tmp);
561 
562 	mtx_destroy(&tmp->tm_allnode_lock);
563 	/*
564 	 * We cannot assert that tmp->tm_pages_used == 0 there,
565 	 * because tmpfs vm_objects might be still mapped by some
566 	 * process and outlive the mount due to reference counting.
567 	 */
568 	MPASS(tmp->tm_nodes_inuse == 0);
569 
570 	free(tmp, M_TMPFSMNT);
571 }
572 
573 static int
574 tmpfs_root(struct mount *mp, int flags, struct vnode **vpp)
575 {
576 	int error;
577 
578 	error = tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, flags, vpp);
579 	if (error == 0)
580 		(*vpp)->v_vflag |= VV_ROOT;
581 	return (error);
582 }
583 
584 static int
585 tmpfs_fhtovp(struct mount *mp, struct fid *fhp, int flags,
586     struct vnode **vpp)
587 {
588 	struct tmpfs_fid_data tfd;
589 	struct tmpfs_mount *tmp;
590 	struct tmpfs_node *node;
591 	int error;
592 
593 	if (fhp->fid_len != sizeof(tfd))
594 		return (EINVAL);
595 
596 	/*
597 	 * Copy from fid_data onto the stack to avoid unaligned pointer use.
598 	 * See the comment in sys/mount.h on struct fid for details.
599 	 */
600 	memcpy(&tfd, fhp->fid_data, fhp->fid_len);
601 
602 	tmp = VFS_TO_TMPFS(mp);
603 
604 	if (tfd.tfd_id >= tmp->tm_nodes_max)
605 		return (EINVAL);
606 
607 	TMPFS_LOCK(tmp);
608 	LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) {
609 		if (node->tn_id == tfd.tfd_id &&
610 		    node->tn_gen == tfd.tfd_gen) {
611 			tmpfs_ref_node(node);
612 			break;
613 		}
614 	}
615 	TMPFS_UNLOCK(tmp);
616 
617 	if (node != NULL) {
618 		error = tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp);
619 		tmpfs_free_node(tmp, node);
620 	} else
621 		error = EINVAL;
622 	return (error);
623 }
624 
625 /* ARGSUSED2 */
626 static int
627 tmpfs_statfs(struct mount *mp, struct statfs *sbp)
628 {
629 	struct tmpfs_mount *tmp;
630 	size_t used;
631 
632 	tmp = VFS_TO_TMPFS(mp);
633 
634 	sbp->f_iosize = PAGE_SIZE;
635 	sbp->f_bsize = PAGE_SIZE;
636 
637 	used = tmpfs_pages_used(tmp);
638 	if (tmp->tm_pages_max != ULONG_MAX)
639 		 sbp->f_blocks = tmp->tm_pages_max;
640 	else
641 		 sbp->f_blocks = used + tmpfs_mem_avail();
642 	if (sbp->f_blocks <= used)
643 		sbp->f_bavail = 0;
644 	else
645 		sbp->f_bavail = sbp->f_blocks - used;
646 	sbp->f_bfree = sbp->f_bavail;
647 	used = tmp->tm_nodes_inuse;
648 	sbp->f_files = tmp->tm_nodes_max;
649 	if (sbp->f_files <= used)
650 		sbp->f_ffree = 0;
651 	else
652 		sbp->f_ffree = sbp->f_files - used;
653 	/* sbp->f_owner = tmp->tn_uid; */
654 
655 	return (0);
656 }
657 
658 static int
659 tmpfs_sync(struct mount *mp, int waitfor)
660 {
661 
662 	if (waitfor == MNT_SUSPEND) {
663 		MNT_ILOCK(mp);
664 		mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
665 		MNT_IUNLOCK(mp);
666 	} else if (waitfor == MNT_LAZY) {
667 		tmpfs_update_mtime_lazy(mp);
668 	}
669 	return (0);
670 }
671 
672 static int
673 tmpfs_init(struct vfsconf *conf)
674 {
675 	int res;
676 
677 	res = tmpfs_subr_init();
678 	if (res != 0)
679 		return (res);
680 	memcpy(&tmpfs_fnops, &vnops, sizeof(struct fileops));
681 	tmpfs_fnops.fo_close = tmpfs_fo_close;
682 	return (0);
683 }
684 
685 static int
686 tmpfs_uninit(struct vfsconf *conf)
687 {
688 	tmpfs_subr_uninit();
689 	return (0);
690 }
691 
692 /*
693  * tmpfs vfs operations.
694  */
695 struct vfsops tmpfs_vfsops = {
696 	.vfs_mount =			tmpfs_mount,
697 	.vfs_unmount =			tmpfs_unmount,
698 	.vfs_root =			vfs_cache_root,
699 	.vfs_cachedroot =		tmpfs_root,
700 	.vfs_statfs =			tmpfs_statfs,
701 	.vfs_fhtovp =			tmpfs_fhtovp,
702 	.vfs_sync =			tmpfs_sync,
703 	.vfs_init =			tmpfs_init,
704 	.vfs_uninit =			tmpfs_uninit,
705 };
706 VFS_SET(tmpfs_vfsops, tmpfs, VFCF_JAIL);
707 
708 #ifdef DDB
709 #include <ddb/ddb.h>
710 
711 static void
712 db_print_tmpfs(struct mount *mp, struct tmpfs_mount *tmp)
713 {
714 	db_printf("mp %p (%s) tmp %p\n", mp,
715 	    mp->mnt_stat.f_mntonname, tmp);
716 	db_printf(
717 	    "\tsize max %ju pages max %lu pages used %lu\n"
718 	    "\tinodes max %ju inodes inuse %ju ea inuse %ju refcount %ju\n"
719 	    "\tmaxfilesize %ju r%c %snamecache %smtime\n",
720 	    (uintmax_t)tmp->tm_size_max, tmp->tm_pages_max, tmp->tm_pages_used,
721 	    (uintmax_t)tmp->tm_nodes_max, (uintmax_t)tmp->tm_nodes_inuse,
722 	    (uintmax_t)tmp->tm_ea_memory_inuse, (uintmax_t)tmp->tm_refcount,
723 	    (uintmax_t)tmp->tm_maxfilesize,
724 	    tmp->tm_ronly ? 'o' : 'w', tmp->tm_nonc ? "no" : "",
725 	    tmp->tm_nomtime ? "no" : "");
726 }
727 
728 DB_SHOW_COMMAND(tmpfs, db_show_tmpfs)
729 {
730 	struct mount *mp;
731 	struct tmpfs_mount *tmp;
732 
733 	if (have_addr) {
734 		mp = (struct mount *)addr;
735 		tmp = VFS_TO_TMPFS(mp);
736 		db_print_tmpfs(mp, tmp);
737 		return;
738 	}
739 
740 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
741 		if (strcmp(mp->mnt_stat.f_fstypename, tmpfs_vfsconf.vfc_name) ==
742 		    0) {
743 			tmp = VFS_TO_TMPFS(mp);
744 			db_print_tmpfs(mp, tmp);
745 		}
746 	}
747 }
748 #endif	/* DDB */
749