1 /* $NetBSD: tmpfs_vfsops.c,v 1.10 2005/12/11 12:24:29 christos Exp $ */ 2 3 /*- 4 * SPDX-License-Identifier: BSD-2-Clause 5 * 6 * Copyright (c) 2005 The NetBSD Foundation, Inc. 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to The NetBSD Foundation 10 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code 11 * 2005 program. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /* 36 * Efficient memory file system. 37 * 38 * tmpfs is a file system that uses FreeBSD's virtual memory 39 * sub-system to store file data and metadata in an efficient way. 40 * This means that it does not follow the structure of an on-disk file 41 * system because it simply does not need to. Instead, it uses 42 * memory-specific data structures and algorithms to automatically 43 * allocate and release resources. 44 */ 45 46 #include "opt_ddb.h" 47 #include "opt_tmpfs.h" 48 49 #include <sys/cdefs.h> 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/dirent.h> 53 #include <sys/file.h> 54 #include <sys/limits.h> 55 #include <sys/lock.h> 56 #include <sys/mount.h> 57 #include <sys/mutex.h> 58 #include <sys/proc.h> 59 #include <sys/jail.h> 60 #include <sys/kernel.h> 61 #include <sys/rwlock.h> 62 #include <sys/stat.h> 63 #include <sys/sx.h> 64 #include <sys/sysctl.h> 65 #include <sys/vnode.h> 66 67 #include <vm/vm.h> 68 #include <vm/vm_param.h> 69 #include <vm/pmap.h> 70 #include <vm/vm_extern.h> 71 #include <vm/vm_map.h> 72 #include <vm/vm_object.h> 73 #include <vm/vm_param.h> 74 75 #include <fs/tmpfs/tmpfs.h> 76 77 /* 78 * Default permission for root node 79 */ 80 #define TMPFS_DEFAULT_ROOT_MODE (S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH) 81 82 static MALLOC_DEFINE(M_TMPFSMNT, "tmpfs mount", "tmpfs mount structures"); 83 MALLOC_DEFINE(M_TMPFSNAME, "tmpfs name", "tmpfs file names"); 84 85 static int tmpfs_mount(struct mount *); 86 static int tmpfs_unmount(struct mount *, int); 87 static int tmpfs_root(struct mount *, int flags, struct vnode **); 88 static int tmpfs_fhtovp(struct mount *, struct fid *, int, 89 struct vnode **); 90 static int tmpfs_statfs(struct mount *, struct statfs *); 91 92 static const char *tmpfs_opts[] = { 93 "from", "easize", "size", "maxfilesize", "inodes", "uid", "gid", "mode", 94 "export", "union", "nonc", "nomtime", "nosymfollow", "pgread", NULL 95 }; 96 97 static const char *tmpfs_updateopts[] = { 98 "from", "easize", "export", "nomtime", "size", "nosymfollow", NULL 99 }; 100 101 static int 102 tmpfs_update_mtime_lazy_filter(struct vnode *vp, void *arg) 103 { 104 struct vm_object *obj; 105 106 if (vp->v_type != VREG) 107 return (0); 108 109 obj = atomic_load_ptr(&vp->v_object); 110 if (obj == NULL) 111 return (0); 112 113 return (vm_object_mightbedirty_(obj)); 114 } 115 116 static void 117 tmpfs_update_mtime_lazy(struct mount *mp) 118 { 119 struct vnode *vp, *mvp; 120 121 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, tmpfs_update_mtime_lazy_filter, NULL) { 122 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) != 0) 123 continue; 124 tmpfs_check_mtime(vp); 125 vput(vp); 126 } 127 } 128 129 static void 130 tmpfs_update_mtime_all(struct mount *mp) 131 { 132 struct vnode *vp, *mvp; 133 134 if (VFS_TO_TMPFS(mp)->tm_nomtime) 135 return; 136 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 137 if (vp->v_type != VREG) { 138 VI_UNLOCK(vp); 139 continue; 140 } 141 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK) != 0) 142 continue; 143 tmpfs_check_mtime(vp); 144 tmpfs_update(vp); 145 vput(vp); 146 } 147 } 148 149 struct tmpfs_check_rw_maps_arg { 150 bool found; 151 }; 152 153 static bool 154 tmpfs_check_rw_maps_cb(struct mount *mp __unused, vm_map_t map __unused, 155 vm_map_entry_t entry __unused, void *arg) 156 { 157 struct tmpfs_check_rw_maps_arg *a; 158 159 a = arg; 160 a->found = true; 161 return (true); 162 } 163 164 /* 165 * Revoke write permissions from all mappings of regular files 166 * belonging to the specified tmpfs mount. 167 */ 168 static bool 169 tmpfs_revoke_rw_maps_cb(struct mount *mp __unused, vm_map_t map, 170 vm_map_entry_t entry, void *arg __unused) 171 { 172 173 /* 174 * XXXKIB: might be invalidate the mapping 175 * instead ? The process is not going to be 176 * happy in any case. 177 */ 178 entry->max_protection &= ~VM_PROT_WRITE; 179 if ((entry->protection & VM_PROT_WRITE) != 0) { 180 entry->protection &= ~VM_PROT_WRITE; 181 pmap_protect(map->pmap, entry->start, entry->end, 182 entry->protection); 183 } 184 return (false); 185 } 186 187 static void 188 tmpfs_all_rw_maps(struct mount *mp, bool (*cb)(struct mount *mp, vm_map_t, 189 vm_map_entry_t, void *), void *cb_arg) 190 { 191 struct proc *p; 192 struct vmspace *vm; 193 vm_map_t map; 194 vm_map_entry_t entry; 195 vm_object_t object; 196 struct vnode *vp; 197 int gen; 198 bool terminate; 199 200 terminate = false; 201 sx_slock(&allproc_lock); 202 again: 203 gen = allproc_gen; 204 FOREACH_PROC_IN_SYSTEM(p) { 205 PROC_LOCK(p); 206 if (p->p_state != PRS_NORMAL || (p->p_flag & (P_INEXEC | 207 P_SYSTEM | P_WEXIT)) != 0) { 208 PROC_UNLOCK(p); 209 continue; 210 } 211 vm = vmspace_acquire_ref(p); 212 _PHOLD_LITE(p); 213 PROC_UNLOCK(p); 214 if (vm == NULL) { 215 PRELE(p); 216 continue; 217 } 218 sx_sunlock(&allproc_lock); 219 map = &vm->vm_map; 220 221 vm_map_lock(map); 222 if (map->busy) 223 vm_map_wait_busy(map); 224 VM_MAP_ENTRY_FOREACH(entry, map) { 225 if ((entry->eflags & (MAP_ENTRY_GUARD | 226 MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_COW)) != 0 || 227 (entry->max_protection & VM_PROT_WRITE) == 0) 228 continue; 229 object = entry->object.vm_object; 230 if (object == NULL || object->type != tmpfs_pager_type) 231 continue; 232 /* 233 * No need to dig into shadow chain, mapping 234 * of the object not at top is readonly. 235 */ 236 237 VM_OBJECT_RLOCK(object); 238 if (object->type == OBJT_DEAD) { 239 VM_OBJECT_RUNLOCK(object); 240 continue; 241 } 242 MPASS(object->ref_count > 1); 243 if ((object->flags & OBJ_TMPFS) == 0) { 244 VM_OBJECT_RUNLOCK(object); 245 continue; 246 } 247 vp = VM_TO_TMPFS_VP(object); 248 if (vp->v_mount != mp) { 249 VM_OBJECT_RUNLOCK(object); 250 continue; 251 } 252 253 terminate = cb(mp, map, entry, cb_arg); 254 VM_OBJECT_RUNLOCK(object); 255 if (terminate) 256 break; 257 } 258 vm_map_unlock(map); 259 260 vmspace_free(vm); 261 sx_slock(&allproc_lock); 262 PRELE(p); 263 if (terminate) 264 break; 265 } 266 if (!terminate && gen != allproc_gen) 267 goto again; 268 sx_sunlock(&allproc_lock); 269 } 270 271 static bool 272 tmpfs_check_rw_maps(struct mount *mp) 273 { 274 struct tmpfs_check_rw_maps_arg ca; 275 276 ca.found = false; 277 tmpfs_all_rw_maps(mp, tmpfs_check_rw_maps_cb, &ca); 278 return (ca.found); 279 } 280 281 static int 282 tmpfs_rw_to_ro(struct mount *mp) 283 { 284 int error, flags; 285 bool forced; 286 287 forced = (mp->mnt_flag & MNT_FORCE) != 0; 288 flags = WRITECLOSE | (forced ? FORCECLOSE : 0); 289 290 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 291 return (error); 292 error = vfs_write_suspend_umnt(mp); 293 if (error != 0) 294 return (error); 295 if (!forced && tmpfs_check_rw_maps(mp)) { 296 error = EBUSY; 297 goto out; 298 } 299 VFS_TO_TMPFS(mp)->tm_ronly = 1; 300 MNT_ILOCK(mp); 301 mp->mnt_flag |= MNT_RDONLY; 302 MNT_IUNLOCK(mp); 303 for (;;) { 304 tmpfs_all_rw_maps(mp, tmpfs_revoke_rw_maps_cb, NULL); 305 tmpfs_update_mtime_all(mp); 306 error = vflush(mp, 0, flags, curthread); 307 if (error != 0) { 308 VFS_TO_TMPFS(mp)->tm_ronly = 0; 309 MNT_ILOCK(mp); 310 mp->mnt_flag &= ~MNT_RDONLY; 311 MNT_IUNLOCK(mp); 312 goto out; 313 } 314 if (!tmpfs_check_rw_maps(mp)) 315 break; 316 } 317 out: 318 vfs_write_resume(mp, 0); 319 return (error); 320 } 321 322 static int 323 tmpfs_mount(struct mount *mp) 324 { 325 const size_t nodes_per_page = howmany(PAGE_SIZE, 326 sizeof(struct tmpfs_dirent) + sizeof(struct tmpfs_node)); 327 struct tmpfs_mount *tmp; 328 struct tmpfs_node *root; 329 int error; 330 bool nomtime, nonc, pgread; 331 /* Size counters. */ 332 u_quad_t pages; 333 off_t nodes_max, size_max, maxfilesize, ea_max_size; 334 335 /* Root node attributes. */ 336 uid_t root_uid; 337 gid_t root_gid; 338 mode_t root_mode; 339 340 struct vattr va; 341 342 if (vfs_filteropt(mp->mnt_optnew, tmpfs_opts)) 343 return (EINVAL); 344 345 if (mp->mnt_flag & MNT_UPDATE) { 346 /* Only support update mounts for certain options. */ 347 if (vfs_filteropt(mp->mnt_optnew, tmpfs_updateopts) != 0) 348 return (EOPNOTSUPP); 349 tmp = VFS_TO_TMPFS(mp); 350 if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) == 0) { 351 /* 352 * On-the-fly resizing is not supported (yet). We still 353 * need to have "size" listed as "supported", otherwise 354 * trying to update fs that is listed in fstab with size 355 * parameter, say trying to change rw to ro or vice 356 * versa, would cause vfs_filteropt() to bail. 357 */ 358 if (size_max != tmp->tm_size_max) 359 return (EOPNOTSUPP); 360 } 361 if (vfs_getopt_size(mp->mnt_optnew, "easize", &ea_max_size) == 0) { 362 tmp->tm_ea_memory_max = ea_max_size; 363 } 364 if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) && 365 !tmp->tm_ronly) { 366 /* RW -> RO */ 367 return (tmpfs_rw_to_ro(mp)); 368 } else if (!vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) && 369 tmp->tm_ronly) { 370 /* RO -> RW */ 371 tmp->tm_ronly = 0; 372 MNT_ILOCK(mp); 373 mp->mnt_flag &= ~MNT_RDONLY; 374 MNT_IUNLOCK(mp); 375 } 376 tmp->tm_nomtime = vfs_getopt(mp->mnt_optnew, "nomtime", NULL, 377 0) == 0; 378 MNT_ILOCK(mp); 379 if ((mp->mnt_flag & MNT_UNION) == 0) { 380 mp->mnt_kern_flag |= MNTK_FPLOOKUP; 381 } else { 382 mp->mnt_kern_flag &= ~MNTK_FPLOOKUP; 383 } 384 MNT_IUNLOCK(mp); 385 return (0); 386 } 387 388 vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY); 389 error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred); 390 VOP_UNLOCK(mp->mnt_vnodecovered); 391 if (error) 392 return (error); 393 394 if (mp->mnt_cred->cr_ruid != 0 || 395 vfs_scanopt(mp->mnt_optnew, "gid", "%d", &root_gid) != 1) 396 root_gid = va.va_gid; 397 if (mp->mnt_cred->cr_ruid != 0 || 398 vfs_scanopt(mp->mnt_optnew, "uid", "%d", &root_uid) != 1) 399 root_uid = va.va_uid; 400 if (mp->mnt_cred->cr_ruid != 0 || 401 vfs_scanopt(mp->mnt_optnew, "mode", "%ho", &root_mode) != 1) 402 root_mode = va.va_mode; 403 if (vfs_getopt_size(mp->mnt_optnew, "inodes", &nodes_max) != 0) 404 nodes_max = 0; 405 if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) != 0) 406 size_max = 0; 407 if (vfs_getopt_size(mp->mnt_optnew, "maxfilesize", &maxfilesize) != 0) 408 maxfilesize = 0; 409 if (vfs_getopt_size(mp->mnt_optnew, "easize", &ea_max_size) != 0) 410 ea_max_size = 0; 411 nonc = vfs_getopt(mp->mnt_optnew, "nonc", NULL, NULL) == 0; 412 nomtime = vfs_getopt(mp->mnt_optnew, "nomtime", NULL, NULL) == 0; 413 pgread = vfs_getopt(mp->mnt_optnew, "pgread", NULL, NULL) == 0; 414 415 /* Do not allow mounts if we do not have enough memory to preserve 416 * the minimum reserved pages. */ 417 if (tmpfs_mem_avail() < TMPFS_PAGES_MINRESERVED) 418 return (ENOSPC); 419 420 /* Get the maximum number of memory pages this file system is 421 * allowed to use, based on the maximum size the user passed in 422 * the mount structure. A value of zero is treated as if the 423 * maximum available space was requested. */ 424 if (size_max == 0 || size_max > OFF_MAX - PAGE_SIZE || 425 (SIZE_MAX < OFF_MAX && size_max / PAGE_SIZE >= SIZE_MAX)) 426 pages = SIZE_MAX; 427 else { 428 size_max = roundup(size_max, PAGE_SIZE); 429 pages = howmany(size_max, PAGE_SIZE); 430 } 431 MPASS(pages > 0); 432 433 if (nodes_max <= 3) { 434 if (pages < INT_MAX / nodes_per_page) 435 nodes_max = pages * nodes_per_page; 436 else 437 nodes_max = INT_MAX; 438 } 439 if (nodes_max > INT_MAX) 440 nodes_max = INT_MAX; 441 MPASS(nodes_max >= 3); 442 443 /* Allocate the tmpfs mount structure and fill it. */ 444 tmp = (struct tmpfs_mount *)malloc(sizeof(struct tmpfs_mount), 445 M_TMPFSMNT, M_WAITOK | M_ZERO); 446 447 mtx_init(&tmp->tm_allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF); 448 tmp->tm_nodes_max = nodes_max; 449 tmp->tm_nodes_inuse = 0; 450 tmp->tm_ea_memory_inuse = 0; 451 tmp->tm_refcount = 1; 452 tmp->tm_maxfilesize = maxfilesize > 0 ? maxfilesize : OFF_MAX; 453 tmp->tm_ea_memory_max = ea_max_size > 0 ? 454 ea_max_size : TMPFS_EA_MEMORY_RESERVED; 455 LIST_INIT(&tmp->tm_nodes_used); 456 457 tmp->tm_size_max = size_max; 458 tmp->tm_pages_max = pages; 459 tmp->tm_pages_used = 0; 460 new_unrhdr64(&tmp->tm_ino_unr, 2); 461 tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 462 tmp->tm_nonc = nonc; 463 tmp->tm_nomtime = nomtime; 464 tmp->tm_pgread = pgread; 465 466 /* Allocate the root node. */ 467 error = tmpfs_alloc_node(mp, tmp, VDIR, root_uid, root_gid, 468 root_mode & ALLPERMS, NULL, NULL, VNOVAL, &root); 469 470 if (error != 0 || root == NULL) { 471 free(tmp, M_TMPFSMNT); 472 return (error); 473 } 474 KASSERT(root->tn_id == 2, 475 ("tmpfs root with invalid ino: %ju", (uintmax_t)root->tn_id)); 476 tmp->tm_root = root; 477 478 MNT_ILOCK(mp); 479 mp->mnt_flag |= MNT_LOCAL; 480 mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | 481 MNTK_NOMSYNC; 482 if (!nonc && (mp->mnt_flag & MNT_UNION) == 0) 483 mp->mnt_kern_flag |= MNTK_FPLOOKUP; 484 MNT_IUNLOCK(mp); 485 486 mp->mnt_data = tmp; 487 mp->mnt_stat.f_namemax = MAXNAMLEN; 488 vfs_getnewfsid(mp); 489 vfs_mountedfrom(mp, "tmpfs"); 490 491 return (0); 492 } 493 494 /* ARGSUSED2 */ 495 static int 496 tmpfs_unmount(struct mount *mp, int mntflags) 497 { 498 struct tmpfs_mount *tmp; 499 struct tmpfs_node *node; 500 int error, flags; 501 502 flags = (mntflags & MNT_FORCE) != 0 ? FORCECLOSE : 0; 503 tmp = VFS_TO_TMPFS(mp); 504 505 /* Stop writers */ 506 error = vfs_write_suspend_umnt(mp); 507 if (error != 0) 508 return (error); 509 /* 510 * At this point, nodes cannot be destroyed by any other 511 * thread because write suspension is started. 512 */ 513 514 for (;;) { 515 error = vflush(mp, 0, flags, curthread); 516 if (error != 0) { 517 vfs_write_resume(mp, VR_START_WRITE); 518 return (error); 519 } 520 MNT_ILOCK(mp); 521 if (mp->mnt_nvnodelistsize == 0) { 522 MNT_IUNLOCK(mp); 523 break; 524 } 525 MNT_IUNLOCK(mp); 526 if ((mntflags & MNT_FORCE) == 0) { 527 vfs_write_resume(mp, VR_START_WRITE); 528 return (EBUSY); 529 } 530 } 531 532 TMPFS_LOCK(tmp); 533 while ((node = LIST_FIRST(&tmp->tm_nodes_used)) != NULL) { 534 TMPFS_NODE_LOCK(node); 535 if (node->tn_type == VDIR) 536 tmpfs_dir_destroy(tmp, node); 537 if (tmpfs_free_node_locked(tmp, node, true)) 538 TMPFS_LOCK(tmp); 539 else 540 TMPFS_NODE_UNLOCK(node); 541 } 542 543 mp->mnt_data = NULL; 544 tmpfs_free_tmp(tmp); 545 vfs_write_resume(mp, VR_START_WRITE); 546 547 return (0); 548 } 549 550 void 551 tmpfs_free_tmp(struct tmpfs_mount *tmp) 552 { 553 TMPFS_MP_ASSERT_LOCKED(tmp); 554 MPASS(tmp->tm_refcount > 0); 555 556 tmp->tm_refcount--; 557 if (tmp->tm_refcount > 0) { 558 TMPFS_UNLOCK(tmp); 559 return; 560 } 561 TMPFS_UNLOCK(tmp); 562 563 mtx_destroy(&tmp->tm_allnode_lock); 564 /* 565 * We cannot assert that tmp->tm_pages_used == 0 there, 566 * because tmpfs vm_objects might be still mapped by some 567 * process and outlive the mount due to reference counting. 568 */ 569 MPASS(tmp->tm_nodes_inuse == 0); 570 571 free(tmp, M_TMPFSMNT); 572 } 573 574 static int 575 tmpfs_root(struct mount *mp, int flags, struct vnode **vpp) 576 { 577 int error; 578 579 error = tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, flags, vpp); 580 if (error == 0) 581 (*vpp)->v_vflag |= VV_ROOT; 582 return (error); 583 } 584 585 static int 586 tmpfs_fhtovp(struct mount *mp, struct fid *fhp, int flags, 587 struct vnode **vpp) 588 { 589 struct tmpfs_fid_data tfd; 590 struct tmpfs_mount *tmp; 591 struct tmpfs_node *node; 592 int error; 593 594 if (fhp->fid_len != sizeof(tfd)) 595 return (EINVAL); 596 597 /* 598 * Copy from fid_data onto the stack to avoid unaligned pointer use. 599 * See the comment in sys/mount.h on struct fid for details. 600 */ 601 memcpy(&tfd, fhp->fid_data, fhp->fid_len); 602 603 tmp = VFS_TO_TMPFS(mp); 604 605 if (tfd.tfd_id >= tmp->tm_nodes_max) 606 return (EINVAL); 607 608 TMPFS_LOCK(tmp); 609 LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) { 610 if (node->tn_id == tfd.tfd_id && 611 node->tn_gen == tfd.tfd_gen) { 612 tmpfs_ref_node(node); 613 break; 614 } 615 } 616 TMPFS_UNLOCK(tmp); 617 618 if (node != NULL) { 619 error = tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp); 620 tmpfs_free_node(tmp, node); 621 } else 622 error = EINVAL; 623 return (error); 624 } 625 626 /* ARGSUSED2 */ 627 static int 628 tmpfs_statfs(struct mount *mp, struct statfs *sbp) 629 { 630 struct tmpfs_mount *tmp; 631 size_t used; 632 633 tmp = VFS_TO_TMPFS(mp); 634 635 sbp->f_iosize = PAGE_SIZE; 636 sbp->f_bsize = PAGE_SIZE; 637 638 used = tmpfs_pages_used(tmp); 639 if (tmp->tm_pages_max != ULONG_MAX) 640 sbp->f_blocks = tmp->tm_pages_max; 641 else 642 sbp->f_blocks = used + tmpfs_mem_avail(); 643 if (sbp->f_blocks <= used) 644 sbp->f_bavail = 0; 645 else 646 sbp->f_bavail = sbp->f_blocks - used; 647 sbp->f_bfree = sbp->f_bavail; 648 used = tmp->tm_nodes_inuse; 649 sbp->f_files = tmp->tm_nodes_max; 650 if (sbp->f_files <= used) 651 sbp->f_ffree = 0; 652 else 653 sbp->f_ffree = sbp->f_files - used; 654 /* sbp->f_owner = tmp->tn_uid; */ 655 656 return (0); 657 } 658 659 static int 660 tmpfs_sync(struct mount *mp, int waitfor) 661 { 662 663 if (waitfor == MNT_SUSPEND) { 664 MNT_ILOCK(mp); 665 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; 666 MNT_IUNLOCK(mp); 667 } else if (waitfor == MNT_LAZY) { 668 tmpfs_update_mtime_lazy(mp); 669 } 670 return (0); 671 } 672 673 static int 674 tmpfs_init(struct vfsconf *conf) 675 { 676 int res; 677 678 res = tmpfs_subr_init(); 679 if (res != 0) 680 return (res); 681 memcpy(&tmpfs_fnops, &vnops, sizeof(struct fileops)); 682 tmpfs_fnops.fo_close = tmpfs_fo_close; 683 return (0); 684 } 685 686 static int 687 tmpfs_uninit(struct vfsconf *conf) 688 { 689 tmpfs_subr_uninit(); 690 return (0); 691 } 692 693 /* 694 * tmpfs vfs operations. 695 */ 696 struct vfsops tmpfs_vfsops = { 697 .vfs_mount = tmpfs_mount, 698 .vfs_unmount = tmpfs_unmount, 699 .vfs_root = vfs_cache_root, 700 .vfs_cachedroot = tmpfs_root, 701 .vfs_statfs = tmpfs_statfs, 702 .vfs_fhtovp = tmpfs_fhtovp, 703 .vfs_sync = tmpfs_sync, 704 .vfs_init = tmpfs_init, 705 .vfs_uninit = tmpfs_uninit, 706 }; 707 VFS_SET(tmpfs_vfsops, tmpfs, VFCF_JAIL); 708 709 #ifdef DDB 710 #include <ddb/ddb.h> 711 712 static void 713 db_print_tmpfs(struct mount *mp, struct tmpfs_mount *tmp) 714 { 715 db_printf("mp %p (%s) tmp %p\n", mp, 716 mp->mnt_stat.f_mntonname, tmp); 717 db_printf( 718 "\tsize max %ju pages max %lu pages used %lu\n" 719 "\tinodes max %ju inodes inuse %ju ea inuse %ju refcount %ju\n" 720 "\tmaxfilesize %ju r%c %snamecache %smtime\n", 721 (uintmax_t)tmp->tm_size_max, tmp->tm_pages_max, tmp->tm_pages_used, 722 (uintmax_t)tmp->tm_nodes_max, (uintmax_t)tmp->tm_nodes_inuse, 723 (uintmax_t)tmp->tm_ea_memory_inuse, (uintmax_t)tmp->tm_refcount, 724 (uintmax_t)tmp->tm_maxfilesize, 725 tmp->tm_ronly ? 'o' : 'w', tmp->tm_nonc ? "no" : "", 726 tmp->tm_nomtime ? "no" : ""); 727 } 728 729 DB_SHOW_COMMAND(tmpfs, db_show_tmpfs) 730 { 731 struct mount *mp; 732 struct tmpfs_mount *tmp; 733 734 if (have_addr) { 735 mp = (struct mount *)addr; 736 tmp = VFS_TO_TMPFS(mp); 737 db_print_tmpfs(mp, tmp); 738 return; 739 } 740 741 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 742 if (strcmp(mp->mnt_stat.f_fstypename, tmpfs_vfsconf.vfc_name) == 743 0) { 744 tmp = VFS_TO_TMPFS(mp); 745 db_print_tmpfs(mp, tmp); 746 } 747 } 748 } 749 #endif /* DDB */ 750