1 /* $NetBSD: tmpfs_vfsops.c,v 1.10 2005/12/11 12:24:29 christos Exp $ */ 2 3 /*- 4 * SPDX-License-Identifier: BSD-2-Clause 5 * 6 * Copyright (c) 2005 The NetBSD Foundation, Inc. 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to The NetBSD Foundation 10 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code 11 * 2005 program. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /* 36 * Efficient memory file system. 37 * 38 * tmpfs is a file system that uses FreeBSD's virtual memory 39 * sub-system to store file data and metadata in an efficient way. 40 * This means that it does not follow the structure of an on-disk file 41 * system because it simply does not need to. Instead, it uses 42 * memory-specific data structures and algorithms to automatically 43 * allocate and release resources. 44 */ 45 46 #include "opt_ddb.h" 47 #include "opt_tmpfs.h" 48 49 #include <sys/cdefs.h> 50 __FBSDID("$FreeBSD$"); 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/dirent.h> 55 #include <sys/file.h> 56 #include <sys/limits.h> 57 #include <sys/lock.h> 58 #include <sys/mount.h> 59 #include <sys/mutex.h> 60 #include <sys/proc.h> 61 #include <sys/jail.h> 62 #include <sys/kernel.h> 63 #include <sys/rwlock.h> 64 #include <sys/stat.h> 65 #include <sys/sx.h> 66 #include <sys/sysctl.h> 67 #include <sys/vnode.h> 68 69 #include <vm/vm.h> 70 #include <vm/vm_param.h> 71 #include <vm/pmap.h> 72 #include <vm/vm_extern.h> 73 #include <vm/vm_map.h> 74 #include <vm/vm_object.h> 75 #include <vm/vm_param.h> 76 77 #include <fs/tmpfs/tmpfs.h> 78 79 /* 80 * Default permission for root node 81 */ 82 #define TMPFS_DEFAULT_ROOT_MODE (S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH) 83 84 static MALLOC_DEFINE(M_TMPFSMNT, "tmpfs mount", "tmpfs mount structures"); 85 MALLOC_DEFINE(M_TMPFSNAME, "tmpfs name", "tmpfs file names"); 86 87 static int tmpfs_mount(struct mount *); 88 static int tmpfs_unmount(struct mount *, int); 89 static int tmpfs_root(struct mount *, int flags, struct vnode **); 90 static int tmpfs_fhtovp(struct mount *, struct fid *, int, 91 struct vnode **); 92 static int tmpfs_statfs(struct mount *, struct statfs *); 93 94 static const char *tmpfs_opts[] = { 95 "from", "easize", "size", "maxfilesize", "inodes", "uid", "gid", "mode", 96 "export", "union", "nonc", "nomtime", "nosymfollow", "pgread", NULL 97 }; 98 99 static const char *tmpfs_updateopts[] = { 100 "from", "easize", "export", "nomtime", "size", "nosymfollow", NULL 101 }; 102 103 static int 104 tmpfs_update_mtime_lazy_filter(struct vnode *vp, void *arg) 105 { 106 struct vm_object *obj; 107 108 if (vp->v_type != VREG) 109 return (0); 110 111 obj = atomic_load_ptr(&vp->v_object); 112 if (obj == NULL) 113 return (0); 114 115 return (vm_object_mightbedirty_(obj)); 116 } 117 118 static void 119 tmpfs_update_mtime_lazy(struct mount *mp) 120 { 121 struct vnode *vp, *mvp; 122 123 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, tmpfs_update_mtime_lazy_filter, NULL) { 124 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) != 0) 125 continue; 126 tmpfs_check_mtime(vp); 127 vput(vp); 128 } 129 } 130 131 static void 132 tmpfs_update_mtime_all(struct mount *mp) 133 { 134 struct vnode *vp, *mvp; 135 136 if (VFS_TO_TMPFS(mp)->tm_nomtime) 137 return; 138 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 139 if (vp->v_type != VREG) { 140 VI_UNLOCK(vp); 141 continue; 142 } 143 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) != 0) 144 continue; 145 tmpfs_check_mtime(vp); 146 tmpfs_update(vp); 147 vput(vp); 148 } 149 } 150 151 struct tmpfs_check_rw_maps_arg { 152 bool found; 153 }; 154 155 static bool 156 tmpfs_check_rw_maps_cb(struct mount *mp __unused, vm_map_t map __unused, 157 vm_map_entry_t entry __unused, void *arg) 158 { 159 struct tmpfs_check_rw_maps_arg *a; 160 161 a = arg; 162 a->found = true; 163 return (true); 164 } 165 166 /* 167 * Revoke write permissions from all mappings of regular files 168 * belonging to the specified tmpfs mount. 169 */ 170 static bool 171 tmpfs_revoke_rw_maps_cb(struct mount *mp __unused, vm_map_t map, 172 vm_map_entry_t entry, void *arg __unused) 173 { 174 175 /* 176 * XXXKIB: might be invalidate the mapping 177 * instead ? The process is not going to be 178 * happy in any case. 179 */ 180 entry->max_protection &= ~VM_PROT_WRITE; 181 if ((entry->protection & VM_PROT_WRITE) != 0) { 182 entry->protection &= ~VM_PROT_WRITE; 183 pmap_protect(map->pmap, entry->start, entry->end, 184 entry->protection); 185 } 186 return (false); 187 } 188 189 static void 190 tmpfs_all_rw_maps(struct mount *mp, bool (*cb)(struct mount *mp, vm_map_t, 191 vm_map_entry_t, void *), void *cb_arg) 192 { 193 struct proc *p; 194 struct vmspace *vm; 195 vm_map_t map; 196 vm_map_entry_t entry; 197 vm_object_t object; 198 struct vnode *vp; 199 int gen; 200 bool terminate; 201 202 terminate = false; 203 sx_slock(&allproc_lock); 204 again: 205 gen = allproc_gen; 206 FOREACH_PROC_IN_SYSTEM(p) { 207 PROC_LOCK(p); 208 if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC | 209 P_SYSTEM | P_WEXIT)) != 0) { 210 PROC_UNLOCK(p); 211 continue; 212 } 213 vm = vmspace_acquire_ref(p); 214 _PHOLD_LITE(p); 215 PROC_UNLOCK(p); 216 if (vm == NULL) { 217 PRELE(p); 218 continue; 219 } 220 sx_sunlock(&allproc_lock); 221 map = &vm->vm_map; 222 223 vm_map_lock(map); 224 if (map->busy) 225 vm_map_wait_busy(map); 226 VM_MAP_ENTRY_FOREACH(entry, map) { 227 if ((entry->eflags & (MAP_ENTRY_GUARD | 228 MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_COW)) != 0 || 229 (entry->max_protection & VM_PROT_WRITE) == 0) 230 continue; 231 object = entry->object.vm_object; 232 if (object == NULL || object->type != tmpfs_pager_type) 233 continue; 234 /* 235 * No need to dig into shadow chain, mapping 236 * of the object not at top is readonly. 237 */ 238 239 VM_OBJECT_RLOCK(object); 240 if (object->type == OBJT_DEAD) { 241 VM_OBJECT_RUNLOCK(object); 242 continue; 243 } 244 MPASS(object->ref_count > 1); 245 if ((object->flags & OBJ_TMPFS) == 0) { 246 VM_OBJECT_RUNLOCK(object); 247 continue; 248 } 249 vp = VM_TO_TMPFS_VP(object); 250 if (vp->v_mount != mp) { 251 VM_OBJECT_RUNLOCK(object); 252 continue; 253 } 254 255 terminate = cb(mp, map, entry, cb_arg); 256 VM_OBJECT_RUNLOCK(object); 257 if (terminate) 258 break; 259 } 260 vm_map_unlock(map); 261 262 vmspace_free(vm); 263 sx_slock(&allproc_lock); 264 PRELE(p); 265 if (terminate) 266 break; 267 } 268 if (!terminate && gen != allproc_gen) 269 goto again; 270 sx_sunlock(&allproc_lock); 271 } 272 273 static bool 274 tmpfs_check_rw_maps(struct mount *mp) 275 { 276 struct tmpfs_check_rw_maps_arg ca; 277 278 ca.found = false; 279 tmpfs_all_rw_maps(mp, tmpfs_check_rw_maps_cb, &ca); 280 return (ca.found); 281 } 282 283 static int 284 tmpfs_rw_to_ro(struct mount *mp) 285 { 286 int error, flags; 287 bool forced; 288 289 forced = (mp->mnt_flag & MNT_FORCE) != 0; 290 flags = WRITECLOSE | (forced ? FORCECLOSE : 0); 291 292 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 293 return (error); 294 error = vfs_write_suspend_umnt(mp); 295 if (error != 0) 296 return (error); 297 if (!forced && tmpfs_check_rw_maps(mp)) { 298 error = EBUSY; 299 goto out; 300 } 301 VFS_TO_TMPFS(mp)->tm_ronly = 1; 302 MNT_ILOCK(mp); 303 mp->mnt_flag |= MNT_RDONLY; 304 MNT_IUNLOCK(mp); 305 for (;;) { 306 tmpfs_all_rw_maps(mp, tmpfs_revoke_rw_maps_cb, NULL); 307 tmpfs_update_mtime_all(mp); 308 error = vflush(mp, 0, flags, curthread); 309 if (error != 0) { 310 VFS_TO_TMPFS(mp)->tm_ronly = 0; 311 MNT_ILOCK(mp); 312 mp->mnt_flag &= ~MNT_RDONLY; 313 MNT_IUNLOCK(mp); 314 goto out; 315 } 316 if (!tmpfs_check_rw_maps(mp)) 317 break; 318 } 319 out: 320 vfs_write_resume(mp, 0); 321 return (error); 322 } 323 324 static int 325 tmpfs_mount(struct mount *mp) 326 { 327 const size_t nodes_per_page = howmany(PAGE_SIZE, 328 sizeof(struct tmpfs_dirent) + sizeof(struct tmpfs_node)); 329 struct tmpfs_mount *tmp; 330 struct tmpfs_node *root; 331 int error; 332 bool nomtime, nonc, pgread; 333 /* Size counters. */ 334 u_quad_t pages; 335 off_t nodes_max, size_max, maxfilesize, ea_max_size; 336 337 /* Root node attributes. */ 338 uid_t root_uid; 339 gid_t root_gid; 340 mode_t root_mode; 341 342 struct vattr va; 343 344 if (vfs_filteropt(mp->mnt_optnew, tmpfs_opts)) 345 return (EINVAL); 346 347 if (mp->mnt_flag & MNT_UPDATE) { 348 /* Only support update mounts for certain options. */ 349 if (vfs_filteropt(mp->mnt_optnew, tmpfs_updateopts) != 0) 350 return (EOPNOTSUPP); 351 tmp = VFS_TO_TMPFS(mp); 352 if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) == 0) { 353 /* 354 * On-the-fly resizing is not supported (yet). We still 355 * need to have "size" listed as "supported", otherwise 356 * trying to update fs that is listed in fstab with size 357 * parameter, say trying to change rw to ro or vice 358 * versa, would cause vfs_filteropt() to bail. 359 */ 360 if (size_max != tmp->tm_size_max) 361 return (EOPNOTSUPP); 362 } 363 if (vfs_getopt_size(mp->mnt_optnew, "easize", &ea_max_size) == 0) { 364 tmp->tm_ea_memory_max = ea_max_size; 365 } 366 if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) && 367 !tmp->tm_ronly) { 368 /* RW -> RO */ 369 return (tmpfs_rw_to_ro(mp)); 370 } else if (!vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) && 371 tmp->tm_ronly) { 372 /* RO -> RW */ 373 tmp->tm_ronly = 0; 374 MNT_ILOCK(mp); 375 mp->mnt_flag &= ~MNT_RDONLY; 376 MNT_IUNLOCK(mp); 377 } 378 tmp->tm_nomtime = vfs_getopt(mp->mnt_optnew, "nomtime", NULL, 379 0) == 0; 380 MNT_ILOCK(mp); 381 if ((mp->mnt_flag & MNT_UNION) == 0) { 382 mp->mnt_kern_flag |= MNTK_FPLOOKUP; 383 } else { 384 mp->mnt_kern_flag &= ~MNTK_FPLOOKUP; 385 } 386 MNT_IUNLOCK(mp); 387 return (0); 388 } 389 390 vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY); 391 error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred); 392 VOP_UNLOCK(mp->mnt_vnodecovered); 393 if (error) 394 return (error); 395 396 if (mp->mnt_cred->cr_ruid != 0 || 397 vfs_scanopt(mp->mnt_optnew, "gid", "%d", &root_gid) != 1) 398 root_gid = va.va_gid; 399 if (mp->mnt_cred->cr_ruid != 0 || 400 vfs_scanopt(mp->mnt_optnew, "uid", "%d", &root_uid) != 1) 401 root_uid = va.va_uid; 402 if (mp->mnt_cred->cr_ruid != 0 || 403 vfs_scanopt(mp->mnt_optnew, "mode", "%ho", &root_mode) != 1) 404 root_mode = va.va_mode; 405 if (vfs_getopt_size(mp->mnt_optnew, "inodes", &nodes_max) != 0) 406 nodes_max = 0; 407 if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) != 0) 408 size_max = 0; 409 if (vfs_getopt_size(mp->mnt_optnew, "maxfilesize", &maxfilesize) != 0) 410 maxfilesize = 0; 411 if (vfs_getopt_size(mp->mnt_optnew, "easize", &ea_max_size) != 0) 412 ea_max_size = 0; 413 nonc = vfs_getopt(mp->mnt_optnew, "nonc", NULL, NULL) == 0; 414 nomtime = vfs_getopt(mp->mnt_optnew, "nomtime", NULL, NULL) == 0; 415 pgread = vfs_getopt(mp->mnt_optnew, "pgread", NULL, NULL) == 0; 416 417 /* Do not allow mounts if we do not have enough memory to preserve 418 * the minimum reserved pages. */ 419 if (tmpfs_mem_avail() < TMPFS_PAGES_MINRESERVED) 420 return (ENOSPC); 421 422 /* Get the maximum number of memory pages this file system is 423 * allowed to use, based on the maximum size the user passed in 424 * the mount structure. A value of zero is treated as if the 425 * maximum available space was requested. */ 426 if (size_max == 0 || size_max > OFF_MAX - PAGE_SIZE || 427 (SIZE_MAX < OFF_MAX && size_max / PAGE_SIZE >= SIZE_MAX)) 428 pages = SIZE_MAX; 429 else { 430 size_max = roundup(size_max, PAGE_SIZE); 431 pages = howmany(size_max, PAGE_SIZE); 432 } 433 MPASS(pages > 0); 434 435 if (nodes_max <= 3) { 436 if (pages < INT_MAX / nodes_per_page) 437 nodes_max = pages * nodes_per_page; 438 else 439 nodes_max = INT_MAX; 440 } 441 if (nodes_max > INT_MAX) 442 nodes_max = INT_MAX; 443 MPASS(nodes_max >= 3); 444 445 /* Allocate the tmpfs mount structure and fill it. */ 446 tmp = (struct tmpfs_mount *)malloc(sizeof(struct tmpfs_mount), 447 M_TMPFSMNT, M_WAITOK | M_ZERO); 448 449 mtx_init(&tmp->tm_allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF); 450 tmp->tm_nodes_max = nodes_max; 451 tmp->tm_nodes_inuse = 0; 452 tmp->tm_ea_memory_inuse = 0; 453 tmp->tm_refcount = 1; 454 tmp->tm_maxfilesize = maxfilesize > 0 ? maxfilesize : OFF_MAX; 455 tmp->tm_ea_memory_max = ea_max_size > 0 ? 456 ea_max_size : TMPFS_EA_MEMORY_RESERVED; 457 LIST_INIT(&tmp->tm_nodes_used); 458 459 tmp->tm_size_max = size_max; 460 tmp->tm_pages_max = pages; 461 tmp->tm_pages_used = 0; 462 new_unrhdr64(&tmp->tm_ino_unr, 2); 463 tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 464 tmp->tm_nonc = nonc; 465 tmp->tm_nomtime = nomtime; 466 tmp->tm_pgread = pgread; 467 468 /* Allocate the root node. */ 469 error = tmpfs_alloc_node(mp, tmp, VDIR, root_uid, root_gid, 470 root_mode & ALLPERMS, NULL, NULL, VNOVAL, &root); 471 472 if (error != 0 || root == NULL) { 473 free(tmp, M_TMPFSMNT); 474 return (error); 475 } 476 KASSERT(root->tn_id == 2, 477 ("tmpfs root with invalid ino: %ju", (uintmax_t)root->tn_id)); 478 tmp->tm_root = root; 479 480 MNT_ILOCK(mp); 481 mp->mnt_flag |= MNT_LOCAL; 482 mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | 483 MNTK_NOMSYNC; 484 if (!nonc && (mp->mnt_flag & MNT_UNION) == 0) 485 mp->mnt_kern_flag |= MNTK_FPLOOKUP; 486 MNT_IUNLOCK(mp); 487 488 mp->mnt_data = tmp; 489 mp->mnt_stat.f_namemax = MAXNAMLEN; 490 vfs_getnewfsid(mp); 491 vfs_mountedfrom(mp, "tmpfs"); 492 493 return (0); 494 } 495 496 /* ARGSUSED2 */ 497 static int 498 tmpfs_unmount(struct mount *mp, int mntflags) 499 { 500 struct tmpfs_mount *tmp; 501 struct tmpfs_node *node; 502 int error, flags; 503 504 flags = (mntflags & MNT_FORCE) != 0 ? FORCECLOSE : 0; 505 tmp = VFS_TO_TMPFS(mp); 506 507 /* Stop writers */ 508 error = vfs_write_suspend_umnt(mp); 509 if (error != 0) 510 return (error); 511 /* 512 * At this point, nodes cannot be destroyed by any other 513 * thread because write suspension is started. 514 */ 515 516 for (;;) { 517 error = vflush(mp, 0, flags, curthread); 518 if (error != 0) { 519 vfs_write_resume(mp, VR_START_WRITE); 520 return (error); 521 } 522 MNT_ILOCK(mp); 523 if (mp->mnt_nvnodelistsize == 0) { 524 MNT_IUNLOCK(mp); 525 break; 526 } 527 MNT_IUNLOCK(mp); 528 if ((mntflags & MNT_FORCE) == 0) { 529 vfs_write_resume(mp, VR_START_WRITE); 530 return (EBUSY); 531 } 532 } 533 534 TMPFS_LOCK(tmp); 535 while ((node = LIST_FIRST(&tmp->tm_nodes_used)) != NULL) { 536 TMPFS_NODE_LOCK(node); 537 if (node->tn_type == VDIR) 538 tmpfs_dir_destroy(tmp, node); 539 if (tmpfs_free_node_locked(tmp, node, true)) 540 TMPFS_LOCK(tmp); 541 else 542 TMPFS_NODE_UNLOCK(node); 543 } 544 545 mp->mnt_data = NULL; 546 tmpfs_free_tmp(tmp); 547 vfs_write_resume(mp, VR_START_WRITE); 548 549 return (0); 550 } 551 552 void 553 tmpfs_free_tmp(struct tmpfs_mount *tmp) 554 { 555 TMPFS_MP_ASSERT_LOCKED(tmp); 556 MPASS(tmp->tm_refcount > 0); 557 558 tmp->tm_refcount--; 559 if (tmp->tm_refcount > 0) { 560 TMPFS_UNLOCK(tmp); 561 return; 562 } 563 TMPFS_UNLOCK(tmp); 564 565 mtx_destroy(&tmp->tm_allnode_lock); 566 /* 567 * We cannot assert that tmp->tm_pages_used == 0 there, 568 * because tmpfs vm_objects might be still mapped by some 569 * process and outlive the mount due to reference counting. 570 */ 571 MPASS(tmp->tm_nodes_inuse == 0); 572 573 free(tmp, M_TMPFSMNT); 574 } 575 576 static int 577 tmpfs_root(struct mount *mp, int flags, struct vnode **vpp) 578 { 579 int error; 580 581 error = tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, flags, vpp); 582 if (error == 0) 583 (*vpp)->v_vflag |= VV_ROOT; 584 return (error); 585 } 586 587 static int 588 tmpfs_fhtovp(struct mount *mp, struct fid *fhp, int flags, 589 struct vnode **vpp) 590 { 591 struct tmpfs_fid_data tfd; 592 struct tmpfs_mount *tmp; 593 struct tmpfs_node *node; 594 int error; 595 596 if (fhp->fid_len != sizeof(tfd)) 597 return (EINVAL); 598 599 /* 600 * Copy from fid_data onto the stack to avoid unaligned pointer use. 601 * See the comment in sys/mount.h on struct fid for details. 602 */ 603 memcpy(&tfd, fhp->fid_data, fhp->fid_len); 604 605 tmp = VFS_TO_TMPFS(mp); 606 607 if (tfd.tfd_id >= tmp->tm_nodes_max) 608 return (EINVAL); 609 610 TMPFS_LOCK(tmp); 611 LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) { 612 if (node->tn_id == tfd.tfd_id && 613 node->tn_gen == tfd.tfd_gen) { 614 tmpfs_ref_node(node); 615 break; 616 } 617 } 618 TMPFS_UNLOCK(tmp); 619 620 if (node != NULL) { 621 error = tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp); 622 tmpfs_free_node(tmp, node); 623 } else 624 error = EINVAL; 625 return (error); 626 } 627 628 /* ARGSUSED2 */ 629 static int 630 tmpfs_statfs(struct mount *mp, struct statfs *sbp) 631 { 632 struct tmpfs_mount *tmp; 633 size_t used; 634 635 tmp = VFS_TO_TMPFS(mp); 636 637 sbp->f_iosize = PAGE_SIZE; 638 sbp->f_bsize = PAGE_SIZE; 639 640 used = tmpfs_pages_used(tmp); 641 if (tmp->tm_pages_max != ULONG_MAX) 642 sbp->f_blocks = tmp->tm_pages_max; 643 else 644 sbp->f_blocks = used + tmpfs_mem_avail(); 645 if (sbp->f_blocks <= used) 646 sbp->f_bavail = 0; 647 else 648 sbp->f_bavail = sbp->f_blocks - used; 649 sbp->f_bfree = sbp->f_bavail; 650 used = tmp->tm_nodes_inuse; 651 sbp->f_files = tmp->tm_nodes_max; 652 if (sbp->f_files <= used) 653 sbp->f_ffree = 0; 654 else 655 sbp->f_ffree = sbp->f_files - used; 656 /* sbp->f_owner = tmp->tn_uid; */ 657 658 return (0); 659 } 660 661 static int 662 tmpfs_sync(struct mount *mp, int waitfor) 663 { 664 665 if (waitfor == MNT_SUSPEND) { 666 MNT_ILOCK(mp); 667 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; 668 MNT_IUNLOCK(mp); 669 } else if (waitfor == MNT_LAZY) { 670 tmpfs_update_mtime_lazy(mp); 671 } 672 return (0); 673 } 674 675 static int 676 tmpfs_init(struct vfsconf *conf) 677 { 678 int res; 679 680 res = tmpfs_subr_init(); 681 if (res != 0) 682 return (res); 683 memcpy(&tmpfs_fnops, &vnops, sizeof(struct fileops)); 684 tmpfs_fnops.fo_close = tmpfs_fo_close; 685 return (0); 686 } 687 688 static int 689 tmpfs_uninit(struct vfsconf *conf) 690 { 691 tmpfs_subr_uninit(); 692 return (0); 693 } 694 695 /* 696 * tmpfs vfs operations. 697 */ 698 struct vfsops tmpfs_vfsops = { 699 .vfs_mount = tmpfs_mount, 700 .vfs_unmount = tmpfs_unmount, 701 .vfs_root = vfs_cache_root, 702 .vfs_cachedroot = tmpfs_root, 703 .vfs_statfs = tmpfs_statfs, 704 .vfs_fhtovp = tmpfs_fhtovp, 705 .vfs_sync = tmpfs_sync, 706 .vfs_init = tmpfs_init, 707 .vfs_uninit = tmpfs_uninit, 708 }; 709 VFS_SET(tmpfs_vfsops, tmpfs, VFCF_JAIL); 710 711 #ifdef DDB 712 #include <ddb/ddb.h> 713 714 static void 715 db_print_tmpfs(struct mount *mp, struct tmpfs_mount *tmp) 716 { 717 db_printf("mp %p (%s) tmp %p\n", mp, 718 mp->mnt_stat.f_mntonname, tmp); 719 db_printf( 720 "\tsize max %ju pages max %lu pages used %lu\n" 721 "\tinodes max %ju inodes inuse %ju ea inuse %ju refcount %ju\n" 722 "\tmaxfilesize %ju r%c %snamecache %smtime\n", 723 (uintmax_t)tmp->tm_size_max, tmp->tm_pages_max, tmp->tm_pages_used, 724 (uintmax_t)tmp->tm_nodes_max, (uintmax_t)tmp->tm_nodes_inuse, 725 (uintmax_t)tmp->tm_ea_memory_inuse, (uintmax_t)tmp->tm_refcount, 726 (uintmax_t)tmp->tm_maxfilesize, 727 tmp->tm_ronly ? 'o' : 'w', tmp->tm_nonc ? "no" : "", 728 tmp->tm_nomtime ? "no" : ""); 729 } 730 731 DB_SHOW_COMMAND(tmpfs, db_show_tmpfs) 732 { 733 struct mount *mp; 734 struct tmpfs_mount *tmp; 735 736 if (have_addr) { 737 mp = (struct mount *)addr; 738 tmp = VFS_TO_TMPFS(mp); 739 db_print_tmpfs(mp, tmp); 740 return; 741 } 742 743 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 744 if (strcmp(mp->mnt_stat.f_fstypename, tmpfs_vfsconf.vfc_name) == 745 0) { 746 tmp = VFS_TO_TMPFS(mp); 747 db_print_tmpfs(mp, tmp); 748 } 749 } 750 } 751 #endif /* DDB */ 752