1 /* $NetBSD: tmpfs_vfsops.c,v 1.10 2005/12/11 12:24:29 christos Exp $ */ 2 3 /*- 4 * SPDX-License-Identifier: BSD-2-Clause-NetBSD 5 * 6 * Copyright (c) 2005 The NetBSD Foundation, Inc. 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to The NetBSD Foundation 10 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code 11 * 2005 program. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /* 36 * Efficient memory file system. 37 * 38 * tmpfs is a file system that uses FreeBSD's virtual memory 39 * sub-system to store file data and metadata in an efficient way. 40 * This means that it does not follow the structure of an on-disk file 41 * system because it simply does not need to. Instead, it uses 42 * memory-specific data structures and algorithms to automatically 43 * allocate and release resources. 44 */ 45 46 #include "opt_tmpfs.h" 47 48 #include <sys/cdefs.h> 49 __FBSDID("$FreeBSD$"); 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/dirent.h> 54 #include <sys/limits.h> 55 #include <sys/lock.h> 56 #include <sys/mount.h> 57 #include <sys/mutex.h> 58 #include <sys/proc.h> 59 #include <sys/jail.h> 60 #include <sys/kernel.h> 61 #include <sys/rwlock.h> 62 #include <sys/stat.h> 63 #include <sys/sx.h> 64 #include <sys/sysctl.h> 65 #include <sys/vnode.h> 66 67 #include <vm/vm.h> 68 #include <vm/vm_param.h> 69 #include <vm/pmap.h> 70 #include <vm/vm_extern.h> 71 #include <vm/vm_map.h> 72 #include <vm/vm_object.h> 73 #include <vm/vm_param.h> 74 75 #include <fs/tmpfs/tmpfs.h> 76 77 /* 78 * Default permission for root node 79 */ 80 #define TMPFS_DEFAULT_ROOT_MODE (S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH) 81 82 MALLOC_DEFINE(M_TMPFSMNT, "tmpfs mount", "tmpfs mount structures"); 83 MALLOC_DEFINE(M_TMPFSNAME, "tmpfs name", "tmpfs file names"); 84 85 static int tmpfs_mount(struct mount *); 86 static int tmpfs_unmount(struct mount *, int); 87 static int tmpfs_root(struct mount *, int flags, struct vnode **); 88 static int tmpfs_fhtovp(struct mount *, struct fid *, int, 89 struct vnode **); 90 static int tmpfs_statfs(struct mount *, struct statfs *); 91 static void tmpfs_susp_clean(struct mount *); 92 93 static const char *tmpfs_opts[] = { 94 "from", "size", "maxfilesize", "inodes", "uid", "gid", "mode", "export", 95 "union", "nonc", NULL 96 }; 97 98 static const char *tmpfs_updateopts[] = { 99 "from", "export", "size", NULL 100 }; 101 102 /* 103 * Handle updates of time from writes to mmaped regions. Use 104 * MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_LAZY, since 105 * unmap of the tmpfs-backed vnode does not call vinactive(), due to 106 * vm object type is OBJT_SWAP. 107 * If lazy, only handle delayed update of mtime due to the writes to 108 * mapped files. 109 */ 110 static void 111 tmpfs_update_mtime(struct mount *mp, bool lazy) 112 { 113 struct vnode *vp, *mvp; 114 struct vm_object *obj; 115 116 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 117 if (vp->v_type != VREG) { 118 VI_UNLOCK(vp); 119 continue; 120 } 121 obj = vp->v_object; 122 KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) == 123 (OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj")); 124 125 /* 126 * In lazy case, do unlocked read, avoid taking vnode 127 * lock if not needed. Lost update will be handled on 128 * the next call. 129 * For non-lazy case, we must flush all pending 130 * metadata changes now. 131 */ 132 if (!lazy || obj->generation != obj->cleangeneration) { 133 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, 134 curthread) != 0) 135 continue; 136 tmpfs_check_mtime(vp); 137 if (!lazy) 138 tmpfs_update(vp); 139 vput(vp); 140 } else { 141 VI_UNLOCK(vp); 142 continue; 143 } 144 } 145 } 146 147 struct tmpfs_check_rw_maps_arg { 148 bool found; 149 }; 150 151 static bool 152 tmpfs_check_rw_maps_cb(struct mount *mp __unused, vm_map_t map __unused, 153 vm_map_entry_t entry __unused, void *arg) 154 { 155 struct tmpfs_check_rw_maps_arg *a; 156 157 a = arg; 158 a->found = true; 159 return (true); 160 } 161 162 /* 163 * Revoke write permissions from all mappings of regular files 164 * belonging to the specified tmpfs mount. 165 */ 166 static bool 167 tmpfs_revoke_rw_maps_cb(struct mount *mp __unused, vm_map_t map, 168 vm_map_entry_t entry, void *arg __unused) 169 { 170 171 /* 172 * XXXKIB: might be invalidate the mapping 173 * instead ? The process is not going to be 174 * happy in any case. 175 */ 176 entry->max_protection &= ~VM_PROT_WRITE; 177 if ((entry->protection & VM_PROT_WRITE) != 0) { 178 entry->protection &= ~VM_PROT_WRITE; 179 pmap_protect(map->pmap, entry->start, entry->end, 180 entry->protection); 181 } 182 return (false); 183 } 184 185 static void 186 tmpfs_all_rw_maps(struct mount *mp, bool (*cb)(struct mount *mp, vm_map_t, 187 vm_map_entry_t, void *), void *cb_arg) 188 { 189 struct proc *p; 190 struct vmspace *vm; 191 vm_map_t map; 192 vm_map_entry_t entry; 193 vm_object_t object; 194 struct vnode *vp; 195 int gen; 196 bool terminate; 197 198 terminate = false; 199 sx_slock(&allproc_lock); 200 again: 201 gen = allproc_gen; 202 FOREACH_PROC_IN_SYSTEM(p) { 203 PROC_LOCK(p); 204 if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC | 205 P_SYSTEM | P_WEXIT)) != 0) { 206 PROC_UNLOCK(p); 207 continue; 208 } 209 vm = vmspace_acquire_ref(p); 210 _PHOLD_LITE(p); 211 PROC_UNLOCK(p); 212 if (vm == NULL) { 213 PRELE(p); 214 continue; 215 } 216 sx_sunlock(&allproc_lock); 217 map = &vm->vm_map; 218 219 vm_map_lock(map); 220 if (map->busy) 221 vm_map_wait_busy(map); 222 VM_MAP_ENTRY_FOREACH(entry, map) { 223 if ((entry->eflags & (MAP_ENTRY_GUARD | 224 MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_COW)) != 0 || 225 (entry->max_protection & VM_PROT_WRITE) == 0) 226 continue; 227 object = entry->object.vm_object; 228 if (object == NULL || object->type != OBJT_SWAP || 229 (object->flags & OBJ_TMPFS_NODE) == 0) 230 continue; 231 /* 232 * No need to dig into shadow chain, mapping 233 * of the object not at top is readonly. 234 */ 235 236 VM_OBJECT_RLOCK(object); 237 if (object->type == OBJT_DEAD) { 238 VM_OBJECT_RUNLOCK(object); 239 continue; 240 } 241 MPASS(object->ref_count > 1); 242 if ((object->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) != 243 (OBJ_TMPFS_NODE | OBJ_TMPFS)) { 244 VM_OBJECT_RUNLOCK(object); 245 continue; 246 } 247 vp = object->un_pager.swp.swp_tmpfs; 248 if (vp->v_mount != mp) { 249 VM_OBJECT_RUNLOCK(object); 250 continue; 251 } 252 253 terminate = cb(mp, map, entry, cb_arg); 254 VM_OBJECT_RUNLOCK(object); 255 if (terminate) 256 break; 257 } 258 vm_map_unlock(map); 259 260 vmspace_free(vm); 261 sx_slock(&allproc_lock); 262 PRELE(p); 263 if (terminate) 264 break; 265 } 266 if (!terminate && gen != allproc_gen) 267 goto again; 268 sx_sunlock(&allproc_lock); 269 } 270 271 static bool 272 tmpfs_check_rw_maps(struct mount *mp) 273 { 274 struct tmpfs_check_rw_maps_arg ca; 275 276 ca.found = false; 277 tmpfs_all_rw_maps(mp, tmpfs_check_rw_maps_cb, &ca); 278 return (ca.found); 279 } 280 281 static int 282 tmpfs_rw_to_ro(struct mount *mp) 283 { 284 int error, flags; 285 bool forced; 286 287 forced = (mp->mnt_flag & MNT_FORCE) != 0; 288 flags = WRITECLOSE | (forced ? FORCECLOSE : 0); 289 290 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 291 return (error); 292 error = vfs_write_suspend_umnt(mp); 293 if (error != 0) 294 return (error); 295 if (!forced && tmpfs_check_rw_maps(mp)) { 296 error = EBUSY; 297 goto out; 298 } 299 VFS_TO_TMPFS(mp)->tm_ronly = 1; 300 MNT_ILOCK(mp); 301 mp->mnt_flag |= MNT_RDONLY; 302 MNT_IUNLOCK(mp); 303 for (;;) { 304 tmpfs_all_rw_maps(mp, tmpfs_revoke_rw_maps_cb, NULL); 305 tmpfs_update_mtime(mp, false); 306 error = vflush(mp, 0, flags, curthread); 307 if (error != 0) { 308 VFS_TO_TMPFS(mp)->tm_ronly = 0; 309 MNT_ILOCK(mp); 310 mp->mnt_flag &= ~MNT_RDONLY; 311 MNT_IUNLOCK(mp); 312 goto out; 313 } 314 if (!tmpfs_check_rw_maps(mp)) 315 break; 316 } 317 out: 318 vfs_write_resume(mp, 0); 319 return (error); 320 } 321 322 static int 323 tmpfs_mount(struct mount *mp) 324 { 325 const size_t nodes_per_page = howmany(PAGE_SIZE, 326 sizeof(struct tmpfs_dirent) + sizeof(struct tmpfs_node)); 327 struct tmpfs_mount *tmp; 328 struct tmpfs_node *root; 329 int error; 330 bool nonc; 331 /* Size counters. */ 332 u_quad_t pages; 333 off_t nodes_max, size_max, maxfilesize; 334 335 /* Root node attributes. */ 336 uid_t root_uid; 337 gid_t root_gid; 338 mode_t root_mode; 339 340 struct vattr va; 341 342 if (vfs_filteropt(mp->mnt_optnew, tmpfs_opts)) 343 return (EINVAL); 344 345 if (mp->mnt_flag & MNT_UPDATE) { 346 /* Only support update mounts for certain options. */ 347 if (vfs_filteropt(mp->mnt_optnew, tmpfs_updateopts) != 0) 348 return (EOPNOTSUPP); 349 if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) == 0) { 350 /* 351 * On-the-fly resizing is not supported (yet). We still 352 * need to have "size" listed as "supported", otherwise 353 * trying to update fs that is listed in fstab with size 354 * parameter, say trying to change rw to ro or vice 355 * versa, would cause vfs_filteropt() to bail. 356 */ 357 if (size_max != VFS_TO_TMPFS(mp)->tm_size_max) 358 return (EOPNOTSUPP); 359 } 360 if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) && 361 !(VFS_TO_TMPFS(mp)->tm_ronly)) { 362 /* RW -> RO */ 363 return (tmpfs_rw_to_ro(mp)); 364 } else if (!vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) && 365 VFS_TO_TMPFS(mp)->tm_ronly) { 366 /* RO -> RW */ 367 VFS_TO_TMPFS(mp)->tm_ronly = 0; 368 MNT_ILOCK(mp); 369 mp->mnt_flag &= ~MNT_RDONLY; 370 MNT_IUNLOCK(mp); 371 } 372 return (0); 373 } 374 375 vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY); 376 error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred); 377 VOP_UNLOCK(mp->mnt_vnodecovered); 378 if (error) 379 return (error); 380 381 if (mp->mnt_cred->cr_ruid != 0 || 382 vfs_scanopt(mp->mnt_optnew, "gid", "%d", &root_gid) != 1) 383 root_gid = va.va_gid; 384 if (mp->mnt_cred->cr_ruid != 0 || 385 vfs_scanopt(mp->mnt_optnew, "uid", "%d", &root_uid) != 1) 386 root_uid = va.va_uid; 387 if (mp->mnt_cred->cr_ruid != 0 || 388 vfs_scanopt(mp->mnt_optnew, "mode", "%ho", &root_mode) != 1) 389 root_mode = va.va_mode; 390 if (vfs_getopt_size(mp->mnt_optnew, "inodes", &nodes_max) != 0) 391 nodes_max = 0; 392 if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) != 0) 393 size_max = 0; 394 if (vfs_getopt_size(mp->mnt_optnew, "maxfilesize", &maxfilesize) != 0) 395 maxfilesize = 0; 396 nonc = vfs_getopt(mp->mnt_optnew, "nonc", NULL, NULL) == 0; 397 398 /* Do not allow mounts if we do not have enough memory to preserve 399 * the minimum reserved pages. */ 400 if (tmpfs_mem_avail() < TMPFS_PAGES_MINRESERVED) 401 return (ENOSPC); 402 403 /* Get the maximum number of memory pages this file system is 404 * allowed to use, based on the maximum size the user passed in 405 * the mount structure. A value of zero is treated as if the 406 * maximum available space was requested. */ 407 if (size_max == 0 || size_max > OFF_MAX - PAGE_SIZE || 408 (SIZE_MAX < OFF_MAX && size_max / PAGE_SIZE >= SIZE_MAX)) 409 pages = SIZE_MAX; 410 else { 411 size_max = roundup(size_max, PAGE_SIZE); 412 pages = howmany(size_max, PAGE_SIZE); 413 } 414 MPASS(pages > 0); 415 416 if (nodes_max <= 3) { 417 if (pages < INT_MAX / nodes_per_page) 418 nodes_max = pages * nodes_per_page; 419 else 420 nodes_max = INT_MAX; 421 } 422 if (nodes_max > INT_MAX) 423 nodes_max = INT_MAX; 424 MPASS(nodes_max >= 3); 425 426 /* Allocate the tmpfs mount structure and fill it. */ 427 tmp = (struct tmpfs_mount *)malloc(sizeof(struct tmpfs_mount), 428 M_TMPFSMNT, M_WAITOK | M_ZERO); 429 430 mtx_init(&tmp->tm_allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF); 431 tmp->tm_nodes_max = nodes_max; 432 tmp->tm_nodes_inuse = 0; 433 tmp->tm_refcount = 1; 434 tmp->tm_maxfilesize = maxfilesize > 0 ? maxfilesize : OFF_MAX; 435 LIST_INIT(&tmp->tm_nodes_used); 436 437 tmp->tm_size_max = size_max; 438 tmp->tm_pages_max = pages; 439 tmp->tm_pages_used = 0; 440 new_unrhdr64(&tmp->tm_ino_unr, 2); 441 tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 442 tmp->tm_nonc = nonc; 443 444 /* Allocate the root node. */ 445 error = tmpfs_alloc_node(mp, tmp, VDIR, root_uid, root_gid, 446 root_mode & ALLPERMS, NULL, NULL, VNOVAL, &root); 447 448 if (error != 0 || root == NULL) { 449 free(tmp, M_TMPFSMNT); 450 return (error); 451 } 452 KASSERT(root->tn_id == 2, 453 ("tmpfs root with invalid ino: %ju", (uintmax_t)root->tn_id)); 454 tmp->tm_root = root; 455 456 MNT_ILOCK(mp); 457 mp->mnt_flag |= MNT_LOCAL; 458 mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | 459 MNTK_TEXT_REFS | MNTK_NOMSYNC; 460 MNT_IUNLOCK(mp); 461 462 mp->mnt_data = tmp; 463 mp->mnt_stat.f_namemax = MAXNAMLEN; 464 vfs_getnewfsid(mp); 465 vfs_mountedfrom(mp, "tmpfs"); 466 467 return 0; 468 } 469 470 /* ARGSUSED2 */ 471 static int 472 tmpfs_unmount(struct mount *mp, int mntflags) 473 { 474 struct tmpfs_mount *tmp; 475 struct tmpfs_node *node; 476 int error, flags; 477 478 flags = (mntflags & MNT_FORCE) != 0 ? FORCECLOSE : 0; 479 tmp = VFS_TO_TMPFS(mp); 480 481 /* Stop writers */ 482 error = vfs_write_suspend_umnt(mp); 483 if (error != 0) 484 return (error); 485 /* 486 * At this point, nodes cannot be destroyed by any other 487 * thread because write suspension is started. 488 */ 489 490 for (;;) { 491 error = vflush(mp, 0, flags, curthread); 492 if (error != 0) { 493 vfs_write_resume(mp, VR_START_WRITE); 494 return (error); 495 } 496 MNT_ILOCK(mp); 497 if (mp->mnt_nvnodelistsize == 0) { 498 MNT_IUNLOCK(mp); 499 break; 500 } 501 MNT_IUNLOCK(mp); 502 if ((mntflags & MNT_FORCE) == 0) { 503 vfs_write_resume(mp, VR_START_WRITE); 504 return (EBUSY); 505 } 506 } 507 508 TMPFS_LOCK(tmp); 509 while ((node = LIST_FIRST(&tmp->tm_nodes_used)) != NULL) { 510 TMPFS_NODE_LOCK(node); 511 if (node->tn_type == VDIR) 512 tmpfs_dir_destroy(tmp, node); 513 if (tmpfs_free_node_locked(tmp, node, true)) 514 TMPFS_LOCK(tmp); 515 else 516 TMPFS_NODE_UNLOCK(node); 517 } 518 519 mp->mnt_data = NULL; 520 tmpfs_free_tmp(tmp); 521 vfs_write_resume(mp, VR_START_WRITE); 522 523 MNT_ILOCK(mp); 524 mp->mnt_flag &= ~MNT_LOCAL; 525 MNT_IUNLOCK(mp); 526 527 return (0); 528 } 529 530 void 531 tmpfs_free_tmp(struct tmpfs_mount *tmp) 532 { 533 534 MPASS(tmp->tm_refcount > 0); 535 tmp->tm_refcount--; 536 if (tmp->tm_refcount > 0) { 537 TMPFS_UNLOCK(tmp); 538 return; 539 } 540 TMPFS_UNLOCK(tmp); 541 542 mtx_destroy(&tmp->tm_allnode_lock); 543 MPASS(tmp->tm_pages_used == 0); 544 MPASS(tmp->tm_nodes_inuse == 0); 545 546 free(tmp, M_TMPFSMNT); 547 } 548 549 static int 550 tmpfs_root(struct mount *mp, int flags, struct vnode **vpp) 551 { 552 int error; 553 554 error = tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, flags, vpp); 555 if (error == 0) 556 (*vpp)->v_vflag |= VV_ROOT; 557 return (error); 558 } 559 560 static int 561 tmpfs_fhtovp(struct mount *mp, struct fid *fhp, int flags, 562 struct vnode **vpp) 563 { 564 struct tmpfs_fid *tfhp; 565 struct tmpfs_mount *tmp; 566 struct tmpfs_node *node; 567 int error; 568 569 tmp = VFS_TO_TMPFS(mp); 570 571 tfhp = (struct tmpfs_fid *)fhp; 572 if (tfhp->tf_len != sizeof(struct tmpfs_fid)) 573 return (EINVAL); 574 575 if (tfhp->tf_id >= tmp->tm_nodes_max) 576 return (EINVAL); 577 578 TMPFS_LOCK(tmp); 579 LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) { 580 if (node->tn_id == tfhp->tf_id && 581 node->tn_gen == tfhp->tf_gen) { 582 tmpfs_ref_node(node); 583 break; 584 } 585 } 586 TMPFS_UNLOCK(tmp); 587 588 if (node != NULL) { 589 error = tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp); 590 tmpfs_free_node(tmp, node); 591 } else 592 error = EINVAL; 593 return (error); 594 } 595 596 /* ARGSUSED2 */ 597 static int 598 tmpfs_statfs(struct mount *mp, struct statfs *sbp) 599 { 600 struct tmpfs_mount *tmp; 601 size_t used; 602 603 tmp = VFS_TO_TMPFS(mp); 604 605 sbp->f_iosize = PAGE_SIZE; 606 sbp->f_bsize = PAGE_SIZE; 607 608 used = tmpfs_pages_used(tmp); 609 if (tmp->tm_pages_max != ULONG_MAX) 610 sbp->f_blocks = tmp->tm_pages_max; 611 else 612 sbp->f_blocks = used + tmpfs_mem_avail(); 613 if (sbp->f_blocks <= used) 614 sbp->f_bavail = 0; 615 else 616 sbp->f_bavail = sbp->f_blocks - used; 617 sbp->f_bfree = sbp->f_bavail; 618 used = tmp->tm_nodes_inuse; 619 sbp->f_files = tmp->tm_nodes_max; 620 if (sbp->f_files <= used) 621 sbp->f_ffree = 0; 622 else 623 sbp->f_ffree = sbp->f_files - used; 624 /* sbp->f_owner = tmp->tn_uid; */ 625 626 return 0; 627 } 628 629 static int 630 tmpfs_sync(struct mount *mp, int waitfor) 631 { 632 633 if (waitfor == MNT_SUSPEND) { 634 MNT_ILOCK(mp); 635 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; 636 MNT_IUNLOCK(mp); 637 } else if (waitfor == MNT_LAZY) { 638 tmpfs_update_mtime(mp, true); 639 } 640 return (0); 641 } 642 643 /* 644 * The presence of a susp_clean method tells the VFS to track writes. 645 */ 646 static void 647 tmpfs_susp_clean(struct mount *mp __unused) 648 { 649 } 650 651 static int 652 tmpfs_init(struct vfsconf *conf) 653 { 654 tmpfs_subr_init(); 655 return (0); 656 } 657 658 static int 659 tmpfs_uninit(struct vfsconf *conf) 660 { 661 tmpfs_subr_uninit(); 662 return (0); 663 } 664 665 /* 666 * tmpfs vfs operations. 667 */ 668 struct vfsops tmpfs_vfsops = { 669 .vfs_mount = tmpfs_mount, 670 .vfs_unmount = tmpfs_unmount, 671 .vfs_root = vfs_cache_root, 672 .vfs_cachedroot = tmpfs_root, 673 .vfs_statfs = tmpfs_statfs, 674 .vfs_fhtovp = tmpfs_fhtovp, 675 .vfs_sync = tmpfs_sync, 676 .vfs_susp_clean = tmpfs_susp_clean, 677 .vfs_init = tmpfs_init, 678 .vfs_uninit = tmpfs_uninit, 679 }; 680 VFS_SET(tmpfs_vfsops, tmpfs, VFCF_JAIL); 681