1 /* $NetBSD: tmpfs_vfsops.c,v 1.10 2005/12/11 12:24:29 christos Exp $ */ 2 3 /*- 4 * SPDX-License-Identifier: BSD-2-Clause-NetBSD 5 * 6 * Copyright (c) 2005 The NetBSD Foundation, Inc. 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to The NetBSD Foundation 10 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code 11 * 2005 program. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /* 36 * Efficient memory file system. 37 * 38 * tmpfs is a file system that uses FreeBSD's virtual memory 39 * sub-system to store file data and metadata in an efficient way. 40 * This means that it does not follow the structure of an on-disk file 41 * system because it simply does not need to. Instead, it uses 42 * memory-specific data structures and algorithms to automatically 43 * allocate and release resources. 44 */ 45 46 #include "opt_tmpfs.h" 47 48 #include <sys/cdefs.h> 49 __FBSDID("$FreeBSD$"); 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/dirent.h> 54 #include <sys/limits.h> 55 #include <sys/lock.h> 56 #include <sys/mount.h> 57 #include <sys/mutex.h> 58 #include <sys/proc.h> 59 #include <sys/jail.h> 60 #include <sys/kernel.h> 61 #include <sys/rwlock.h> 62 #include <sys/stat.h> 63 #include <sys/sx.h> 64 #include <sys/sysctl.h> 65 #include <sys/vnode.h> 66 67 #include <vm/vm.h> 68 #include <vm/vm_param.h> 69 #include <vm/pmap.h> 70 #include <vm/vm_extern.h> 71 #include <vm/vm_map.h> 72 #include <vm/vm_object.h> 73 #include <vm/vm_param.h> 74 75 #include <fs/tmpfs/tmpfs.h> 76 77 /* 78 * Default permission for root node 79 */ 80 #define TMPFS_DEFAULT_ROOT_MODE (S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH) 81 82 MALLOC_DEFINE(M_TMPFSMNT, "tmpfs mount", "tmpfs mount structures"); 83 MALLOC_DEFINE(M_TMPFSNAME, "tmpfs name", "tmpfs file names"); 84 85 static int tmpfs_mount(struct mount *); 86 static int tmpfs_unmount(struct mount *, int); 87 static int tmpfs_root(struct mount *, int flags, struct vnode **); 88 static int tmpfs_fhtovp(struct mount *, struct fid *, int, 89 struct vnode **); 90 static int tmpfs_statfs(struct mount *, struct statfs *); 91 static void tmpfs_susp_clean(struct mount *); 92 93 static const char *tmpfs_opts[] = { 94 "from", "size", "maxfilesize", "inodes", "uid", "gid", "mode", "export", 95 "union", "nonc", NULL 96 }; 97 98 static const char *tmpfs_updateopts[] = { 99 "from", "export", "size", NULL 100 }; 101 102 static int 103 tmpfs_node_ctor(void *mem, int size, void *arg, int flags) 104 { 105 struct tmpfs_node *node = (struct tmpfs_node *)mem; 106 107 node->tn_gen++; 108 node->tn_size = 0; 109 node->tn_status = 0; 110 node->tn_flags = 0; 111 node->tn_links = 0; 112 node->tn_vnode = NULL; 113 node->tn_vpstate = 0; 114 115 return (0); 116 } 117 118 static void 119 tmpfs_node_dtor(void *mem, int size, void *arg) 120 { 121 struct tmpfs_node *node = (struct tmpfs_node *)mem; 122 node->tn_type = VNON; 123 } 124 125 static int 126 tmpfs_node_init(void *mem, int size, int flags) 127 { 128 struct tmpfs_node *node = (struct tmpfs_node *)mem; 129 node->tn_id = 0; 130 131 mtx_init(&node->tn_interlock, "tmpfs node interlock", NULL, MTX_DEF); 132 node->tn_gen = arc4random(); 133 134 return (0); 135 } 136 137 static void 138 tmpfs_node_fini(void *mem, int size) 139 { 140 struct tmpfs_node *node = (struct tmpfs_node *)mem; 141 142 mtx_destroy(&node->tn_interlock); 143 } 144 145 /* 146 * Handle updates of time from writes to mmaped regions. Use 147 * MNT_VNODE_FOREACH_ALL instead of MNT_VNODE_FOREACH_ACTIVE, since 148 * unmap of the tmpfs-backed vnode does not call vinactive(), due to 149 * vm object type is OBJT_SWAP. 150 * If lazy, only handle delayed update of mtime due to the writes to 151 * mapped files. 152 */ 153 static void 154 tmpfs_update_mtime(struct mount *mp, bool lazy) 155 { 156 struct vnode *vp, *mvp; 157 struct vm_object *obj; 158 159 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 160 if (vp->v_type != VREG) { 161 VI_UNLOCK(vp); 162 continue; 163 } 164 obj = vp->v_object; 165 KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) == 166 (OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj")); 167 168 /* 169 * In lazy case, do unlocked read, avoid taking vnode 170 * lock if not needed. Lost update will be handled on 171 * the next call. 172 * For non-lazy case, we must flush all pending 173 * metadata changes now. 174 */ 175 if (!lazy || (obj->flags & OBJ_TMPFS_DIRTY) != 0) { 176 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, 177 curthread) != 0) 178 continue; 179 tmpfs_check_mtime(vp); 180 if (!lazy) 181 tmpfs_update(vp); 182 vput(vp); 183 } else { 184 VI_UNLOCK(vp); 185 continue; 186 } 187 } 188 } 189 190 struct tmpfs_check_rw_maps_arg { 191 bool found; 192 }; 193 194 static bool 195 tmpfs_check_rw_maps_cb(struct mount *mp __unused, vm_map_t map __unused, 196 vm_map_entry_t entry __unused, void *arg) 197 { 198 struct tmpfs_check_rw_maps_arg *a; 199 200 a = arg; 201 a->found = true; 202 return (true); 203 } 204 205 /* 206 * Revoke write permissions from all mappings of regular files 207 * belonging to the specified tmpfs mount. 208 */ 209 static bool 210 tmpfs_revoke_rw_maps_cb(struct mount *mp __unused, vm_map_t map, 211 vm_map_entry_t entry, void *arg __unused) 212 { 213 214 /* 215 * XXXKIB: might be invalidate the mapping 216 * instead ? The process is not going to be 217 * happy in any case. 218 */ 219 entry->max_protection &= ~VM_PROT_WRITE; 220 if ((entry->protection & VM_PROT_WRITE) != 0) { 221 entry->protection &= ~VM_PROT_WRITE; 222 pmap_protect(map->pmap, entry->start, entry->end, 223 entry->protection); 224 } 225 return (false); 226 } 227 228 static void 229 tmpfs_all_rw_maps(struct mount *mp, bool (*cb)(struct mount *mp, vm_map_t, 230 vm_map_entry_t, void *), void *cb_arg) 231 { 232 struct proc *p; 233 struct vmspace *vm; 234 vm_map_t map; 235 vm_map_entry_t entry; 236 vm_object_t object; 237 struct vnode *vp; 238 int gen; 239 bool terminate; 240 241 terminate = false; 242 sx_slock(&allproc_lock); 243 again: 244 gen = allproc_gen; 245 FOREACH_PROC_IN_SYSTEM(p) { 246 PROC_LOCK(p); 247 if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC | 248 P_SYSTEM | P_WEXIT)) != 0) { 249 PROC_UNLOCK(p); 250 continue; 251 } 252 vm = vmspace_acquire_ref(p); 253 _PHOLD_LITE(p); 254 PROC_UNLOCK(p); 255 if (vm == NULL) { 256 PRELE(p); 257 continue; 258 } 259 sx_sunlock(&allproc_lock); 260 map = &vm->vm_map; 261 262 vm_map_lock(map); 263 if (map->busy) 264 vm_map_wait_busy(map); 265 for (entry = map->header.next; entry != &map->header; 266 entry = entry->next) { 267 if ((entry->eflags & (MAP_ENTRY_GUARD | 268 MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_COW)) != 0 || 269 (entry->max_protection & VM_PROT_WRITE) == 0) 270 continue; 271 object = entry->object.vm_object; 272 if (object == NULL || object->type != OBJT_SWAP || 273 (object->flags & OBJ_TMPFS_NODE) == 0) 274 continue; 275 /* 276 * No need to dig into shadow chain, mapping 277 * of the object not at top is readonly. 278 */ 279 280 VM_OBJECT_RLOCK(object); 281 if (object->type == OBJT_DEAD) { 282 VM_OBJECT_RUNLOCK(object); 283 continue; 284 } 285 MPASS(object->ref_count > 1); 286 if ((object->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) != 287 (OBJ_TMPFS_NODE | OBJ_TMPFS)) { 288 VM_OBJECT_RUNLOCK(object); 289 continue; 290 } 291 vp = object->un_pager.swp.swp_tmpfs; 292 if (vp->v_mount != mp) { 293 VM_OBJECT_RUNLOCK(object); 294 continue; 295 } 296 297 terminate = cb(mp, map, entry, cb_arg); 298 VM_OBJECT_RUNLOCK(object); 299 if (terminate) 300 break; 301 } 302 vm_map_unlock(map); 303 304 vmspace_free(vm); 305 sx_slock(&allproc_lock); 306 PRELE(p); 307 if (terminate) 308 break; 309 } 310 if (!terminate && gen != allproc_gen) 311 goto again; 312 sx_sunlock(&allproc_lock); 313 } 314 315 static bool 316 tmpfs_check_rw_maps(struct mount *mp) 317 { 318 struct tmpfs_check_rw_maps_arg ca; 319 320 ca.found = false; 321 tmpfs_all_rw_maps(mp, tmpfs_check_rw_maps_cb, &ca); 322 return (ca.found); 323 } 324 325 static int 326 tmpfs_rw_to_ro(struct mount *mp) 327 { 328 int error, flags; 329 bool forced; 330 331 forced = (mp->mnt_flag & MNT_FORCE) != 0; 332 flags = WRITECLOSE | (forced ? FORCECLOSE : 0); 333 334 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 335 return (error); 336 error = vfs_write_suspend_umnt(mp); 337 if (error != 0) 338 return (error); 339 if (!forced && tmpfs_check_rw_maps(mp)) { 340 error = EBUSY; 341 goto out; 342 } 343 VFS_TO_TMPFS(mp)->tm_ronly = 1; 344 MNT_ILOCK(mp); 345 mp->mnt_flag |= MNT_RDONLY; 346 MNT_IUNLOCK(mp); 347 for (;;) { 348 tmpfs_all_rw_maps(mp, tmpfs_revoke_rw_maps_cb, NULL); 349 tmpfs_update_mtime(mp, false); 350 error = vflush(mp, 0, flags, curthread); 351 if (error != 0) { 352 VFS_TO_TMPFS(mp)->tm_ronly = 0; 353 MNT_ILOCK(mp); 354 mp->mnt_flag &= ~MNT_RDONLY; 355 MNT_IUNLOCK(mp); 356 goto out; 357 } 358 if (!tmpfs_check_rw_maps(mp)) 359 break; 360 } 361 out: 362 vfs_write_resume(mp, 0); 363 return (error); 364 } 365 366 static int 367 tmpfs_mount(struct mount *mp) 368 { 369 const size_t nodes_per_page = howmany(PAGE_SIZE, 370 sizeof(struct tmpfs_dirent) + sizeof(struct tmpfs_node)); 371 struct tmpfs_mount *tmp; 372 struct tmpfs_node *root; 373 int error; 374 bool nonc; 375 /* Size counters. */ 376 u_quad_t pages; 377 off_t nodes_max, size_max, maxfilesize; 378 379 /* Root node attributes. */ 380 uid_t root_uid; 381 gid_t root_gid; 382 mode_t root_mode; 383 384 struct vattr va; 385 386 if (vfs_filteropt(mp->mnt_optnew, tmpfs_opts)) 387 return (EINVAL); 388 389 if (mp->mnt_flag & MNT_UPDATE) { 390 /* Only support update mounts for certain options. */ 391 if (vfs_filteropt(mp->mnt_optnew, tmpfs_updateopts) != 0) 392 return (EOPNOTSUPP); 393 if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) == 0) { 394 /* 395 * On-the-fly resizing is not supported (yet). We still 396 * need to have "size" listed as "supported", otherwise 397 * trying to update fs that is listed in fstab with size 398 * parameter, say trying to change rw to ro or vice 399 * versa, would cause vfs_filteropt() to bail. 400 */ 401 if (size_max != VFS_TO_TMPFS(mp)->tm_size_max) 402 return (EOPNOTSUPP); 403 } 404 if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) && 405 !(VFS_TO_TMPFS(mp)->tm_ronly)) { 406 /* RW -> RO */ 407 return (tmpfs_rw_to_ro(mp)); 408 } else if (!vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) && 409 VFS_TO_TMPFS(mp)->tm_ronly) { 410 /* RO -> RW */ 411 VFS_TO_TMPFS(mp)->tm_ronly = 0; 412 MNT_ILOCK(mp); 413 mp->mnt_flag &= ~MNT_RDONLY; 414 MNT_IUNLOCK(mp); 415 } 416 return (0); 417 } 418 419 vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY); 420 error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred); 421 VOP_UNLOCK(mp->mnt_vnodecovered, 0); 422 if (error) 423 return (error); 424 425 if (mp->mnt_cred->cr_ruid != 0 || 426 vfs_scanopt(mp->mnt_optnew, "gid", "%d", &root_gid) != 1) 427 root_gid = va.va_gid; 428 if (mp->mnt_cred->cr_ruid != 0 || 429 vfs_scanopt(mp->mnt_optnew, "uid", "%d", &root_uid) != 1) 430 root_uid = va.va_uid; 431 if (mp->mnt_cred->cr_ruid != 0 || 432 vfs_scanopt(mp->mnt_optnew, "mode", "%ho", &root_mode) != 1) 433 root_mode = va.va_mode; 434 if (vfs_getopt_size(mp->mnt_optnew, "inodes", &nodes_max) != 0) 435 nodes_max = 0; 436 if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) != 0) 437 size_max = 0; 438 if (vfs_getopt_size(mp->mnt_optnew, "maxfilesize", &maxfilesize) != 0) 439 maxfilesize = 0; 440 nonc = vfs_getopt(mp->mnt_optnew, "nonc", NULL, NULL) == 0; 441 442 /* Do not allow mounts if we do not have enough memory to preserve 443 * the minimum reserved pages. */ 444 if (tmpfs_mem_avail() < TMPFS_PAGES_MINRESERVED) 445 return (ENOSPC); 446 447 /* Get the maximum number of memory pages this file system is 448 * allowed to use, based on the maximum size the user passed in 449 * the mount structure. A value of zero is treated as if the 450 * maximum available space was requested. */ 451 if (size_max == 0 || size_max > OFF_MAX - PAGE_SIZE || 452 (SIZE_MAX < OFF_MAX && size_max / PAGE_SIZE >= SIZE_MAX)) 453 pages = SIZE_MAX; 454 else { 455 size_max = roundup(size_max, PAGE_SIZE); 456 pages = howmany(size_max, PAGE_SIZE); 457 } 458 MPASS(pages > 0); 459 460 if (nodes_max <= 3) { 461 if (pages < INT_MAX / nodes_per_page) 462 nodes_max = pages * nodes_per_page; 463 else 464 nodes_max = INT_MAX; 465 } 466 if (nodes_max > INT_MAX) 467 nodes_max = INT_MAX; 468 MPASS(nodes_max >= 3); 469 470 /* Allocate the tmpfs mount structure and fill it. */ 471 tmp = (struct tmpfs_mount *)malloc(sizeof(struct tmpfs_mount), 472 M_TMPFSMNT, M_WAITOK | M_ZERO); 473 474 mtx_init(&tmp->tm_allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF); 475 tmp->tm_nodes_max = nodes_max; 476 tmp->tm_nodes_inuse = 0; 477 tmp->tm_refcount = 1; 478 tmp->tm_maxfilesize = maxfilesize > 0 ? maxfilesize : OFF_MAX; 479 LIST_INIT(&tmp->tm_nodes_used); 480 481 tmp->tm_size_max = size_max; 482 tmp->tm_pages_max = pages; 483 tmp->tm_pages_used = 0; 484 new_unrhdr64(&tmp->tm_ino_unr, 2); 485 tmp->tm_dirent_pool = uma_zcreate("TMPFS dirent", 486 sizeof(struct tmpfs_dirent), NULL, NULL, NULL, NULL, 487 UMA_ALIGN_PTR, 0); 488 tmp->tm_node_pool = uma_zcreate("TMPFS node", 489 sizeof(struct tmpfs_node), tmpfs_node_ctor, tmpfs_node_dtor, 490 tmpfs_node_init, tmpfs_node_fini, UMA_ALIGN_PTR, 0); 491 tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 492 tmp->tm_nonc = nonc; 493 494 /* Allocate the root node. */ 495 error = tmpfs_alloc_node(mp, tmp, VDIR, root_uid, root_gid, 496 root_mode & ALLPERMS, NULL, NULL, VNOVAL, &root); 497 498 if (error != 0 || root == NULL) { 499 uma_zdestroy(tmp->tm_node_pool); 500 uma_zdestroy(tmp->tm_dirent_pool); 501 free(tmp, M_TMPFSMNT); 502 return (error); 503 } 504 KASSERT(root->tn_id == 2, 505 ("tmpfs root with invalid ino: %ju", (uintmax_t)root->tn_id)); 506 tmp->tm_root = root; 507 508 MNT_ILOCK(mp); 509 mp->mnt_flag |= MNT_LOCAL; 510 mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | 511 MNTK_TEXT_REFS; 512 MNT_IUNLOCK(mp); 513 514 mp->mnt_data = tmp; 515 mp->mnt_stat.f_namemax = MAXNAMLEN; 516 vfs_getnewfsid(mp); 517 vfs_mountedfrom(mp, "tmpfs"); 518 519 return 0; 520 } 521 522 /* ARGSUSED2 */ 523 static int 524 tmpfs_unmount(struct mount *mp, int mntflags) 525 { 526 struct tmpfs_mount *tmp; 527 struct tmpfs_node *node; 528 int error, flags; 529 530 flags = (mntflags & MNT_FORCE) != 0 ? FORCECLOSE : 0; 531 tmp = VFS_TO_TMPFS(mp); 532 533 /* Stop writers */ 534 error = vfs_write_suspend_umnt(mp); 535 if (error != 0) 536 return (error); 537 /* 538 * At this point, nodes cannot be destroyed by any other 539 * thread because write suspension is started. 540 */ 541 542 for (;;) { 543 error = vflush(mp, 0, flags, curthread); 544 if (error != 0) { 545 vfs_write_resume(mp, VR_START_WRITE); 546 return (error); 547 } 548 MNT_ILOCK(mp); 549 if (mp->mnt_nvnodelistsize == 0) { 550 MNT_IUNLOCK(mp); 551 break; 552 } 553 MNT_IUNLOCK(mp); 554 if ((mntflags & MNT_FORCE) == 0) { 555 vfs_write_resume(mp, VR_START_WRITE); 556 return (EBUSY); 557 } 558 } 559 560 TMPFS_LOCK(tmp); 561 while ((node = LIST_FIRST(&tmp->tm_nodes_used)) != NULL) { 562 TMPFS_NODE_LOCK(node); 563 if (node->tn_type == VDIR) 564 tmpfs_dir_destroy(tmp, node); 565 if (tmpfs_free_node_locked(tmp, node, true)) 566 TMPFS_LOCK(tmp); 567 else 568 TMPFS_NODE_UNLOCK(node); 569 } 570 571 mp->mnt_data = NULL; 572 tmpfs_free_tmp(tmp); 573 vfs_write_resume(mp, VR_START_WRITE); 574 575 MNT_ILOCK(mp); 576 mp->mnt_flag &= ~MNT_LOCAL; 577 MNT_IUNLOCK(mp); 578 579 return (0); 580 } 581 582 void 583 tmpfs_free_tmp(struct tmpfs_mount *tmp) 584 { 585 586 MPASS(tmp->tm_refcount > 0); 587 tmp->tm_refcount--; 588 if (tmp->tm_refcount > 0) { 589 TMPFS_UNLOCK(tmp); 590 return; 591 } 592 TMPFS_UNLOCK(tmp); 593 594 uma_zdestroy(tmp->tm_dirent_pool); 595 uma_zdestroy(tmp->tm_node_pool); 596 597 mtx_destroy(&tmp->tm_allnode_lock); 598 MPASS(tmp->tm_pages_used == 0); 599 MPASS(tmp->tm_nodes_inuse == 0); 600 601 free(tmp, M_TMPFSMNT); 602 } 603 604 static int 605 tmpfs_root(struct mount *mp, int flags, struct vnode **vpp) 606 { 607 int error; 608 609 error = tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, flags, vpp); 610 if (error == 0) 611 (*vpp)->v_vflag |= VV_ROOT; 612 return (error); 613 } 614 615 static int 616 tmpfs_fhtovp(struct mount *mp, struct fid *fhp, int flags, 617 struct vnode **vpp) 618 { 619 struct tmpfs_fid *tfhp; 620 struct tmpfs_mount *tmp; 621 struct tmpfs_node *node; 622 int error; 623 624 tmp = VFS_TO_TMPFS(mp); 625 626 tfhp = (struct tmpfs_fid *)fhp; 627 if (tfhp->tf_len != sizeof(struct tmpfs_fid)) 628 return (EINVAL); 629 630 if (tfhp->tf_id >= tmp->tm_nodes_max) 631 return (EINVAL); 632 633 TMPFS_LOCK(tmp); 634 LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) { 635 if (node->tn_id == tfhp->tf_id && 636 node->tn_gen == tfhp->tf_gen) { 637 tmpfs_ref_node(node); 638 break; 639 } 640 } 641 TMPFS_UNLOCK(tmp); 642 643 if (node != NULL) { 644 error = tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp); 645 tmpfs_free_node(tmp, node); 646 } else 647 error = EINVAL; 648 return (error); 649 } 650 651 /* ARGSUSED2 */ 652 static int 653 tmpfs_statfs(struct mount *mp, struct statfs *sbp) 654 { 655 struct tmpfs_mount *tmp; 656 size_t used; 657 658 tmp = VFS_TO_TMPFS(mp); 659 660 sbp->f_iosize = PAGE_SIZE; 661 sbp->f_bsize = PAGE_SIZE; 662 663 used = tmpfs_pages_used(tmp); 664 if (tmp->tm_pages_max != ULONG_MAX) 665 sbp->f_blocks = tmp->tm_pages_max; 666 else 667 sbp->f_blocks = used + tmpfs_mem_avail(); 668 if (sbp->f_blocks <= used) 669 sbp->f_bavail = 0; 670 else 671 sbp->f_bavail = sbp->f_blocks - used; 672 sbp->f_bfree = sbp->f_bavail; 673 used = tmp->tm_nodes_inuse; 674 sbp->f_files = tmp->tm_nodes_max; 675 if (sbp->f_files <= used) 676 sbp->f_ffree = 0; 677 else 678 sbp->f_ffree = sbp->f_files - used; 679 /* sbp->f_owner = tmp->tn_uid; */ 680 681 return 0; 682 } 683 684 static int 685 tmpfs_sync(struct mount *mp, int waitfor) 686 { 687 688 if (waitfor == MNT_SUSPEND) { 689 MNT_ILOCK(mp); 690 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; 691 MNT_IUNLOCK(mp); 692 } else if (waitfor == MNT_LAZY) { 693 tmpfs_update_mtime(mp, true); 694 } 695 return (0); 696 } 697 698 /* 699 * The presence of a susp_clean method tells the VFS to track writes. 700 */ 701 static void 702 tmpfs_susp_clean(struct mount *mp __unused) 703 { 704 } 705 706 /* 707 * tmpfs vfs operations. 708 */ 709 710 struct vfsops tmpfs_vfsops = { 711 .vfs_mount = tmpfs_mount, 712 .vfs_unmount = tmpfs_unmount, 713 .vfs_root = tmpfs_root, 714 .vfs_statfs = tmpfs_statfs, 715 .vfs_fhtovp = tmpfs_fhtovp, 716 .vfs_sync = tmpfs_sync, 717 .vfs_susp_clean = tmpfs_susp_clean, 718 }; 719 VFS_SET(tmpfs_vfsops, tmpfs, VFCF_JAIL); 720