1 /* $NetBSD: tmpfs_vfsops.c,v 1.10 2005/12/11 12:24:29 christos Exp $ */ 2 3 /*- 4 * SPDX-License-Identifier: BSD-2-Clause-NetBSD 5 * 6 * Copyright (c) 2005 The NetBSD Foundation, Inc. 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to The NetBSD Foundation 10 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code 11 * 2005 program. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /* 36 * Efficient memory file system. 37 * 38 * tmpfs is a file system that uses FreeBSD's virtual memory 39 * sub-system to store file data and metadata in an efficient way. 40 * This means that it does not follow the structure of an on-disk file 41 * system because it simply does not need to. Instead, it uses 42 * memory-specific data structures and algorithms to automatically 43 * allocate and release resources. 44 */ 45 46 #include "opt_tmpfs.h" 47 48 #include <sys/cdefs.h> 49 __FBSDID("$FreeBSD$"); 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/dirent.h> 54 #include <sys/limits.h> 55 #include <sys/lock.h> 56 #include <sys/mount.h> 57 #include <sys/mutex.h> 58 #include <sys/proc.h> 59 #include <sys/jail.h> 60 #include <sys/kernel.h> 61 #include <sys/rwlock.h> 62 #include <sys/stat.h> 63 #include <sys/sx.h> 64 #include <sys/sysctl.h> 65 #include <sys/vnode.h> 66 67 #include <vm/vm.h> 68 #include <vm/vm_param.h> 69 #include <vm/pmap.h> 70 #include <vm/vm_extern.h> 71 #include <vm/vm_map.h> 72 #include <vm/vm_object.h> 73 #include <vm/vm_param.h> 74 75 #include <fs/tmpfs/tmpfs.h> 76 77 /* 78 * Default permission for root node 79 */ 80 #define TMPFS_DEFAULT_ROOT_MODE (S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH) 81 82 MALLOC_DEFINE(M_TMPFSMNT, "tmpfs mount", "tmpfs mount structures"); 83 MALLOC_DEFINE(M_TMPFSNAME, "tmpfs name", "tmpfs file names"); 84 85 static int tmpfs_mount(struct mount *); 86 static int tmpfs_unmount(struct mount *, int); 87 static int tmpfs_root(struct mount *, int flags, struct vnode **); 88 static int tmpfs_fhtovp(struct mount *, struct fid *, int, 89 struct vnode **); 90 static int tmpfs_statfs(struct mount *, struct statfs *); 91 92 static const char *tmpfs_opts[] = { 93 "from", "size", "maxfilesize", "inodes", "uid", "gid", "mode", "export", 94 "union", "nonc", "nomtime", NULL 95 }; 96 97 static const char *tmpfs_updateopts[] = { 98 "from", "export", "nomtime", "size", NULL 99 }; 100 101 /* 102 * Handle updates of time from writes to mmaped regions, if allowed. 103 * Use MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_LAZY, since 104 * unmap of the tmpfs-backed vnode does not call vinactive(), due to 105 * vm object type is OBJT_SWAP. If lazy, only handle delayed update 106 * of mtime due to the writes to mapped files. 107 */ 108 static void 109 tmpfs_update_mtime(struct mount *mp, bool lazy) 110 { 111 struct vnode *vp, *mvp; 112 struct vm_object *obj; 113 114 if (VFS_TO_TMPFS(mp)->tm_nomtime) 115 return; 116 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 117 if (vp->v_type != VREG) { 118 VI_UNLOCK(vp); 119 continue; 120 } 121 obj = vp->v_object; 122 KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) == 123 (OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj")); 124 125 /* 126 * In lazy case, do unlocked read, avoid taking vnode 127 * lock if not needed. Lost update will be handled on 128 * the next call. 129 * For non-lazy case, we must flush all pending 130 * metadata changes now. 131 */ 132 if (!lazy || obj->generation != obj->cleangeneration) { 133 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, 134 curthread) != 0) 135 continue; 136 tmpfs_check_mtime(vp); 137 if (!lazy) 138 tmpfs_update(vp); 139 vput(vp); 140 } else { 141 VI_UNLOCK(vp); 142 continue; 143 } 144 } 145 } 146 147 struct tmpfs_check_rw_maps_arg { 148 bool found; 149 }; 150 151 static bool 152 tmpfs_check_rw_maps_cb(struct mount *mp __unused, vm_map_t map __unused, 153 vm_map_entry_t entry __unused, void *arg) 154 { 155 struct tmpfs_check_rw_maps_arg *a; 156 157 a = arg; 158 a->found = true; 159 return (true); 160 } 161 162 /* 163 * Revoke write permissions from all mappings of regular files 164 * belonging to the specified tmpfs mount. 165 */ 166 static bool 167 tmpfs_revoke_rw_maps_cb(struct mount *mp __unused, vm_map_t map, 168 vm_map_entry_t entry, void *arg __unused) 169 { 170 171 /* 172 * XXXKIB: might be invalidate the mapping 173 * instead ? The process is not going to be 174 * happy in any case. 175 */ 176 entry->max_protection &= ~VM_PROT_WRITE; 177 if ((entry->protection & VM_PROT_WRITE) != 0) { 178 entry->protection &= ~VM_PROT_WRITE; 179 pmap_protect(map->pmap, entry->start, entry->end, 180 entry->protection); 181 } 182 return (false); 183 } 184 185 static void 186 tmpfs_all_rw_maps(struct mount *mp, bool (*cb)(struct mount *mp, vm_map_t, 187 vm_map_entry_t, void *), void *cb_arg) 188 { 189 struct proc *p; 190 struct vmspace *vm; 191 vm_map_t map; 192 vm_map_entry_t entry; 193 vm_object_t object; 194 struct vnode *vp; 195 int gen; 196 bool terminate; 197 198 terminate = false; 199 sx_slock(&allproc_lock); 200 again: 201 gen = allproc_gen; 202 FOREACH_PROC_IN_SYSTEM(p) { 203 PROC_LOCK(p); 204 if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC | 205 P_SYSTEM | P_WEXIT)) != 0) { 206 PROC_UNLOCK(p); 207 continue; 208 } 209 vm = vmspace_acquire_ref(p); 210 _PHOLD_LITE(p); 211 PROC_UNLOCK(p); 212 if (vm == NULL) { 213 PRELE(p); 214 continue; 215 } 216 sx_sunlock(&allproc_lock); 217 map = &vm->vm_map; 218 219 vm_map_lock(map); 220 if (map->busy) 221 vm_map_wait_busy(map); 222 VM_MAP_ENTRY_FOREACH(entry, map) { 223 if ((entry->eflags & (MAP_ENTRY_GUARD | 224 MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_COW)) != 0 || 225 (entry->max_protection & VM_PROT_WRITE) == 0) 226 continue; 227 object = entry->object.vm_object; 228 if (object == NULL || object->type != OBJT_SWAP || 229 (object->flags & OBJ_TMPFS_NODE) == 0) 230 continue; 231 /* 232 * No need to dig into shadow chain, mapping 233 * of the object not at top is readonly. 234 */ 235 236 VM_OBJECT_RLOCK(object); 237 if (object->type == OBJT_DEAD) { 238 VM_OBJECT_RUNLOCK(object); 239 continue; 240 } 241 MPASS(object->ref_count > 1); 242 if ((object->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) != 243 (OBJ_TMPFS_NODE | OBJ_TMPFS)) { 244 VM_OBJECT_RUNLOCK(object); 245 continue; 246 } 247 vp = object->un_pager.swp.swp_tmpfs; 248 if (vp->v_mount != mp) { 249 VM_OBJECT_RUNLOCK(object); 250 continue; 251 } 252 253 terminate = cb(mp, map, entry, cb_arg); 254 VM_OBJECT_RUNLOCK(object); 255 if (terminate) 256 break; 257 } 258 vm_map_unlock(map); 259 260 vmspace_free(vm); 261 sx_slock(&allproc_lock); 262 PRELE(p); 263 if (terminate) 264 break; 265 } 266 if (!terminate && gen != allproc_gen) 267 goto again; 268 sx_sunlock(&allproc_lock); 269 } 270 271 static bool 272 tmpfs_check_rw_maps(struct mount *mp) 273 { 274 struct tmpfs_check_rw_maps_arg ca; 275 276 ca.found = false; 277 tmpfs_all_rw_maps(mp, tmpfs_check_rw_maps_cb, &ca); 278 return (ca.found); 279 } 280 281 static int 282 tmpfs_rw_to_ro(struct mount *mp) 283 { 284 int error, flags; 285 bool forced; 286 287 forced = (mp->mnt_flag & MNT_FORCE) != 0; 288 flags = WRITECLOSE | (forced ? FORCECLOSE : 0); 289 290 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 291 return (error); 292 error = vfs_write_suspend_umnt(mp); 293 if (error != 0) 294 return (error); 295 if (!forced && tmpfs_check_rw_maps(mp)) { 296 error = EBUSY; 297 goto out; 298 } 299 VFS_TO_TMPFS(mp)->tm_ronly = 1; 300 MNT_ILOCK(mp); 301 mp->mnt_flag |= MNT_RDONLY; 302 MNT_IUNLOCK(mp); 303 for (;;) { 304 tmpfs_all_rw_maps(mp, tmpfs_revoke_rw_maps_cb, NULL); 305 tmpfs_update_mtime(mp, false); 306 error = vflush(mp, 0, flags, curthread); 307 if (error != 0) { 308 VFS_TO_TMPFS(mp)->tm_ronly = 0; 309 MNT_ILOCK(mp); 310 mp->mnt_flag &= ~MNT_RDONLY; 311 MNT_IUNLOCK(mp); 312 goto out; 313 } 314 if (!tmpfs_check_rw_maps(mp)) 315 break; 316 } 317 out: 318 vfs_write_resume(mp, 0); 319 return (error); 320 } 321 322 static int 323 tmpfs_mount(struct mount *mp) 324 { 325 const size_t nodes_per_page = howmany(PAGE_SIZE, 326 sizeof(struct tmpfs_dirent) + sizeof(struct tmpfs_node)); 327 struct tmpfs_mount *tmp; 328 struct tmpfs_node *root; 329 int error; 330 bool nomtime, nonc; 331 /* Size counters. */ 332 u_quad_t pages; 333 off_t nodes_max, size_max, maxfilesize; 334 335 /* Root node attributes. */ 336 uid_t root_uid; 337 gid_t root_gid; 338 mode_t root_mode; 339 340 struct vattr va; 341 342 if (vfs_filteropt(mp->mnt_optnew, tmpfs_opts)) 343 return (EINVAL); 344 345 if (mp->mnt_flag & MNT_UPDATE) { 346 /* Only support update mounts for certain options. */ 347 if (vfs_filteropt(mp->mnt_optnew, tmpfs_updateopts) != 0) 348 return (EOPNOTSUPP); 349 tmp = VFS_TO_TMPFS(mp); 350 if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) == 0) { 351 /* 352 * On-the-fly resizing is not supported (yet). We still 353 * need to have "size" listed as "supported", otherwise 354 * trying to update fs that is listed in fstab with size 355 * parameter, say trying to change rw to ro or vice 356 * versa, would cause vfs_filteropt() to bail. 357 */ 358 if (size_max != tmp->tm_size_max) 359 return (EOPNOTSUPP); 360 } 361 if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) && 362 !tmp->tm_ronly) { 363 /* RW -> RO */ 364 return (tmpfs_rw_to_ro(mp)); 365 } else if (!vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) && 366 tmp->tm_ronly) { 367 /* RO -> RW */ 368 tmp->tm_ronly = 0; 369 MNT_ILOCK(mp); 370 mp->mnt_flag &= ~MNT_RDONLY; 371 MNT_IUNLOCK(mp); 372 } 373 tmp->tm_nomtime = vfs_getopt(mp->mnt_optnew, "nomtime", NULL, 374 0) == 0; 375 return (0); 376 } 377 378 vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY); 379 error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred); 380 VOP_UNLOCK(mp->mnt_vnodecovered); 381 if (error) 382 return (error); 383 384 if (mp->mnt_cred->cr_ruid != 0 || 385 vfs_scanopt(mp->mnt_optnew, "gid", "%d", &root_gid) != 1) 386 root_gid = va.va_gid; 387 if (mp->mnt_cred->cr_ruid != 0 || 388 vfs_scanopt(mp->mnt_optnew, "uid", "%d", &root_uid) != 1) 389 root_uid = va.va_uid; 390 if (mp->mnt_cred->cr_ruid != 0 || 391 vfs_scanopt(mp->mnt_optnew, "mode", "%ho", &root_mode) != 1) 392 root_mode = va.va_mode; 393 if (vfs_getopt_size(mp->mnt_optnew, "inodes", &nodes_max) != 0) 394 nodes_max = 0; 395 if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) != 0) 396 size_max = 0; 397 if (vfs_getopt_size(mp->mnt_optnew, "maxfilesize", &maxfilesize) != 0) 398 maxfilesize = 0; 399 nonc = vfs_getopt(mp->mnt_optnew, "nonc", NULL, NULL) == 0; 400 nomtime = vfs_getopt(mp->mnt_optnew, "nomtime", NULL, NULL) == 0; 401 402 /* Do not allow mounts if we do not have enough memory to preserve 403 * the minimum reserved pages. */ 404 if (tmpfs_mem_avail() < TMPFS_PAGES_MINRESERVED) 405 return (ENOSPC); 406 407 /* Get the maximum number of memory pages this file system is 408 * allowed to use, based on the maximum size the user passed in 409 * the mount structure. A value of zero is treated as if the 410 * maximum available space was requested. */ 411 if (size_max == 0 || size_max > OFF_MAX - PAGE_SIZE || 412 (SIZE_MAX < OFF_MAX && size_max / PAGE_SIZE >= SIZE_MAX)) 413 pages = SIZE_MAX; 414 else { 415 size_max = roundup(size_max, PAGE_SIZE); 416 pages = howmany(size_max, PAGE_SIZE); 417 } 418 MPASS(pages > 0); 419 420 if (nodes_max <= 3) { 421 if (pages < INT_MAX / nodes_per_page) 422 nodes_max = pages * nodes_per_page; 423 else 424 nodes_max = INT_MAX; 425 } 426 if (nodes_max > INT_MAX) 427 nodes_max = INT_MAX; 428 MPASS(nodes_max >= 3); 429 430 /* Allocate the tmpfs mount structure and fill it. */ 431 tmp = (struct tmpfs_mount *)malloc(sizeof(struct tmpfs_mount), 432 M_TMPFSMNT, M_WAITOK | M_ZERO); 433 434 mtx_init(&tmp->tm_allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF); 435 tmp->tm_nodes_max = nodes_max; 436 tmp->tm_nodes_inuse = 0; 437 tmp->tm_refcount = 1; 438 tmp->tm_maxfilesize = maxfilesize > 0 ? maxfilesize : OFF_MAX; 439 LIST_INIT(&tmp->tm_nodes_used); 440 441 tmp->tm_size_max = size_max; 442 tmp->tm_pages_max = pages; 443 tmp->tm_pages_used = 0; 444 new_unrhdr64(&tmp->tm_ino_unr, 2); 445 tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 446 tmp->tm_nonc = nonc; 447 tmp->tm_nomtime = nomtime; 448 449 /* Allocate the root node. */ 450 error = tmpfs_alloc_node(mp, tmp, VDIR, root_uid, root_gid, 451 root_mode & ALLPERMS, NULL, NULL, VNOVAL, &root); 452 453 if (error != 0 || root == NULL) { 454 free(tmp, M_TMPFSMNT); 455 return (error); 456 } 457 KASSERT(root->tn_id == 2, 458 ("tmpfs root with invalid ino: %ju", (uintmax_t)root->tn_id)); 459 tmp->tm_root = root; 460 461 MNT_ILOCK(mp); 462 mp->mnt_flag |= MNT_LOCAL; 463 mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | 464 MNTK_TEXT_REFS | MNTK_NOMSYNC; 465 if (!nonc) 466 mp->mnt_kern_flag |= MNTK_FPLOOKUP; 467 MNT_IUNLOCK(mp); 468 469 mp->mnt_data = tmp; 470 mp->mnt_stat.f_namemax = MAXNAMLEN; 471 vfs_getnewfsid(mp); 472 vfs_mountedfrom(mp, "tmpfs"); 473 474 return 0; 475 } 476 477 /* ARGSUSED2 */ 478 static int 479 tmpfs_unmount(struct mount *mp, int mntflags) 480 { 481 struct tmpfs_mount *tmp; 482 struct tmpfs_node *node; 483 int error, flags; 484 485 flags = (mntflags & MNT_FORCE) != 0 ? FORCECLOSE : 0; 486 tmp = VFS_TO_TMPFS(mp); 487 488 /* Stop writers */ 489 error = vfs_write_suspend_umnt(mp); 490 if (error != 0) 491 return (error); 492 /* 493 * At this point, nodes cannot be destroyed by any other 494 * thread because write suspension is started. 495 */ 496 497 for (;;) { 498 error = vflush(mp, 0, flags, curthread); 499 if (error != 0) { 500 vfs_write_resume(mp, VR_START_WRITE); 501 return (error); 502 } 503 MNT_ILOCK(mp); 504 if (mp->mnt_nvnodelistsize == 0) { 505 MNT_IUNLOCK(mp); 506 break; 507 } 508 MNT_IUNLOCK(mp); 509 if ((mntflags & MNT_FORCE) == 0) { 510 vfs_write_resume(mp, VR_START_WRITE); 511 return (EBUSY); 512 } 513 } 514 515 TMPFS_LOCK(tmp); 516 while ((node = LIST_FIRST(&tmp->tm_nodes_used)) != NULL) { 517 TMPFS_NODE_LOCK(node); 518 if (node->tn_type == VDIR) 519 tmpfs_dir_destroy(tmp, node); 520 if (tmpfs_free_node_locked(tmp, node, true)) 521 TMPFS_LOCK(tmp); 522 else 523 TMPFS_NODE_UNLOCK(node); 524 } 525 526 mp->mnt_data = NULL; 527 tmpfs_free_tmp(tmp); 528 vfs_write_resume(mp, VR_START_WRITE); 529 530 MNT_ILOCK(mp); 531 mp->mnt_flag &= ~MNT_LOCAL; 532 MNT_IUNLOCK(mp); 533 534 return (0); 535 } 536 537 void 538 tmpfs_free_tmp(struct tmpfs_mount *tmp) 539 { 540 541 MPASS(tmp->tm_refcount > 0); 542 tmp->tm_refcount--; 543 if (tmp->tm_refcount > 0) { 544 TMPFS_UNLOCK(tmp); 545 return; 546 } 547 TMPFS_UNLOCK(tmp); 548 549 mtx_destroy(&tmp->tm_allnode_lock); 550 MPASS(tmp->tm_pages_used == 0); 551 MPASS(tmp->tm_nodes_inuse == 0); 552 553 free(tmp, M_TMPFSMNT); 554 } 555 556 static int 557 tmpfs_root(struct mount *mp, int flags, struct vnode **vpp) 558 { 559 int error; 560 561 error = tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, flags, vpp); 562 if (error == 0) 563 (*vpp)->v_vflag |= VV_ROOT; 564 return (error); 565 } 566 567 static int 568 tmpfs_fhtovp(struct mount *mp, struct fid *fhp, int flags, 569 struct vnode **vpp) 570 { 571 struct tmpfs_fid_data tfd; 572 struct tmpfs_mount *tmp; 573 struct tmpfs_node *node; 574 int error; 575 576 if (fhp->fid_len != sizeof(tfd)) 577 return (EINVAL); 578 579 /* 580 * Copy from fid_data onto the stack to avoid unaligned pointer use. 581 * See the comment in sys/mount.h on struct fid for details. 582 */ 583 memcpy(&tfd, fhp->fid_data, fhp->fid_len); 584 585 tmp = VFS_TO_TMPFS(mp); 586 587 if (tfd.tfd_id >= tmp->tm_nodes_max) 588 return (EINVAL); 589 590 TMPFS_LOCK(tmp); 591 LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) { 592 if (node->tn_id == tfd.tfd_id && 593 node->tn_gen == tfd.tfd_gen) { 594 tmpfs_ref_node(node); 595 break; 596 } 597 } 598 TMPFS_UNLOCK(tmp); 599 600 if (node != NULL) { 601 error = tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp); 602 tmpfs_free_node(tmp, node); 603 } else 604 error = EINVAL; 605 return (error); 606 } 607 608 /* ARGSUSED2 */ 609 static int 610 tmpfs_statfs(struct mount *mp, struct statfs *sbp) 611 { 612 struct tmpfs_mount *tmp; 613 size_t used; 614 615 tmp = VFS_TO_TMPFS(mp); 616 617 sbp->f_iosize = PAGE_SIZE; 618 sbp->f_bsize = PAGE_SIZE; 619 620 used = tmpfs_pages_used(tmp); 621 if (tmp->tm_pages_max != ULONG_MAX) 622 sbp->f_blocks = tmp->tm_pages_max; 623 else 624 sbp->f_blocks = used + tmpfs_mem_avail(); 625 if (sbp->f_blocks <= used) 626 sbp->f_bavail = 0; 627 else 628 sbp->f_bavail = sbp->f_blocks - used; 629 sbp->f_bfree = sbp->f_bavail; 630 used = tmp->tm_nodes_inuse; 631 sbp->f_files = tmp->tm_nodes_max; 632 if (sbp->f_files <= used) 633 sbp->f_ffree = 0; 634 else 635 sbp->f_ffree = sbp->f_files - used; 636 /* sbp->f_owner = tmp->tn_uid; */ 637 638 return 0; 639 } 640 641 static int 642 tmpfs_sync(struct mount *mp, int waitfor) 643 { 644 645 if (waitfor == MNT_SUSPEND) { 646 MNT_ILOCK(mp); 647 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; 648 MNT_IUNLOCK(mp); 649 } else if (waitfor == MNT_LAZY) { 650 tmpfs_update_mtime(mp, true); 651 } 652 return (0); 653 } 654 655 static int 656 tmpfs_init(struct vfsconf *conf) 657 { 658 tmpfs_subr_init(); 659 return (0); 660 } 661 662 static int 663 tmpfs_uninit(struct vfsconf *conf) 664 { 665 tmpfs_subr_uninit(); 666 return (0); 667 } 668 669 /* 670 * tmpfs vfs operations. 671 */ 672 struct vfsops tmpfs_vfsops = { 673 .vfs_mount = tmpfs_mount, 674 .vfs_unmount = tmpfs_unmount, 675 .vfs_root = vfs_cache_root, 676 .vfs_cachedroot = tmpfs_root, 677 .vfs_statfs = tmpfs_statfs, 678 .vfs_fhtovp = tmpfs_fhtovp, 679 .vfs_sync = tmpfs_sync, 680 .vfs_init = tmpfs_init, 681 .vfs_uninit = tmpfs_uninit, 682 }; 683 VFS_SET(tmpfs_vfsops, tmpfs, VFCF_JAIL); 684