1 /* $NetBSD: tmpfs_vfsops.c,v 1.10 2005/12/11 12:24:29 christos Exp $ */ 2 3 /*- 4 * SPDX-License-Identifier: BSD-2-Clause-NetBSD 5 * 6 * Copyright (c) 2005 The NetBSD Foundation, Inc. 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to The NetBSD Foundation 10 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code 11 * 2005 program. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /* 36 * Efficient memory file system. 37 * 38 * tmpfs is a file system that uses FreeBSD's virtual memory 39 * sub-system to store file data and metadata in an efficient way. 40 * This means that it does not follow the structure of an on-disk file 41 * system because it simply does not need to. Instead, it uses 42 * memory-specific data structures and algorithms to automatically 43 * allocate and release resources. 44 */ 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/limits.h> 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 #include <sys/proc.h> 53 #include <sys/jail.h> 54 #include <sys/kernel.h> 55 #include <sys/rwlock.h> 56 #include <sys/stat.h> 57 #include <sys/systm.h> 58 #include <sys/sysctl.h> 59 60 #include <vm/vm.h> 61 #include <vm/vm_object.h> 62 #include <vm/vm_param.h> 63 64 #include <fs/tmpfs/tmpfs.h> 65 66 /* 67 * Default permission for root node 68 */ 69 #define TMPFS_DEFAULT_ROOT_MODE (S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH) 70 71 MALLOC_DEFINE(M_TMPFSMNT, "tmpfs mount", "tmpfs mount structures"); 72 MALLOC_DEFINE(M_TMPFSNAME, "tmpfs name", "tmpfs file names"); 73 74 static int tmpfs_mount(struct mount *); 75 static int tmpfs_unmount(struct mount *, int); 76 static int tmpfs_root(struct mount *, int flags, struct vnode **); 77 static int tmpfs_fhtovp(struct mount *, struct fid *, int, 78 struct vnode **); 79 static int tmpfs_statfs(struct mount *, struct statfs *); 80 static void tmpfs_susp_clean(struct mount *); 81 82 static const char *tmpfs_opts[] = { 83 "from", "size", "maxfilesize", "inodes", "uid", "gid", "mode", "export", 84 "union", "nonc", NULL 85 }; 86 87 static const char *tmpfs_updateopts[] = { 88 "from", "export", NULL 89 }; 90 91 static int 92 tmpfs_node_ctor(void *mem, int size, void *arg, int flags) 93 { 94 struct tmpfs_node *node = (struct tmpfs_node *)mem; 95 96 node->tn_gen++; 97 node->tn_size = 0; 98 node->tn_status = 0; 99 node->tn_flags = 0; 100 node->tn_links = 0; 101 node->tn_vnode = NULL; 102 node->tn_vpstate = 0; 103 104 return (0); 105 } 106 107 static void 108 tmpfs_node_dtor(void *mem, int size, void *arg) 109 { 110 struct tmpfs_node *node = (struct tmpfs_node *)mem; 111 node->tn_type = VNON; 112 } 113 114 static int 115 tmpfs_node_init(void *mem, int size, int flags) 116 { 117 struct tmpfs_node *node = (struct tmpfs_node *)mem; 118 node->tn_id = 0; 119 120 mtx_init(&node->tn_interlock, "tmpfs node interlock", NULL, MTX_DEF); 121 node->tn_gen = arc4random(); 122 123 return (0); 124 } 125 126 static void 127 tmpfs_node_fini(void *mem, int size) 128 { 129 struct tmpfs_node *node = (struct tmpfs_node *)mem; 130 131 mtx_destroy(&node->tn_interlock); 132 } 133 134 static int 135 tmpfs_mount(struct mount *mp) 136 { 137 const size_t nodes_per_page = howmany(PAGE_SIZE, 138 sizeof(struct tmpfs_dirent) + sizeof(struct tmpfs_node)); 139 struct tmpfs_mount *tmp; 140 struct tmpfs_node *root; 141 struct thread *td = curthread; 142 int error; 143 bool nonc; 144 /* Size counters. */ 145 u_quad_t pages; 146 off_t nodes_max, size_max, maxfilesize; 147 148 /* Root node attributes. */ 149 uid_t root_uid; 150 gid_t root_gid; 151 mode_t root_mode; 152 153 struct vattr va; 154 155 if (!prison_allow(td->td_ucred, PR_ALLOW_MOUNT_TMPFS)) 156 return (EPERM); 157 158 if (vfs_filteropt(mp->mnt_optnew, tmpfs_opts)) 159 return (EINVAL); 160 161 if (mp->mnt_flag & MNT_UPDATE) { 162 /* Only support update mounts for certain options. */ 163 if (vfs_filteropt(mp->mnt_optnew, tmpfs_updateopts) != 0) 164 return (EOPNOTSUPP); 165 if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) != 166 ((struct tmpfs_mount *)mp->mnt_data)->tm_ronly) 167 return (EOPNOTSUPP); 168 return (0); 169 } 170 171 vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY); 172 error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred); 173 VOP_UNLOCK(mp->mnt_vnodecovered, 0); 174 if (error) 175 return (error); 176 177 if (mp->mnt_cred->cr_ruid != 0 || 178 vfs_scanopt(mp->mnt_optnew, "gid", "%d", &root_gid) != 1) 179 root_gid = va.va_gid; 180 if (mp->mnt_cred->cr_ruid != 0 || 181 vfs_scanopt(mp->mnt_optnew, "uid", "%d", &root_uid) != 1) 182 root_uid = va.va_uid; 183 if (mp->mnt_cred->cr_ruid != 0 || 184 vfs_scanopt(mp->mnt_optnew, "mode", "%ho", &root_mode) != 1) 185 root_mode = va.va_mode; 186 if (vfs_getopt_size(mp->mnt_optnew, "inodes", &nodes_max) != 0) 187 nodes_max = 0; 188 if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) != 0) 189 size_max = 0; 190 if (vfs_getopt_size(mp->mnt_optnew, "maxfilesize", &maxfilesize) != 0) 191 maxfilesize = 0; 192 nonc = vfs_getopt(mp->mnt_optnew, "nonc", NULL, NULL) == 0; 193 194 /* Do not allow mounts if we do not have enough memory to preserve 195 * the minimum reserved pages. */ 196 if (tmpfs_mem_avail() < TMPFS_PAGES_MINRESERVED) 197 return (ENOSPC); 198 199 /* Get the maximum number of memory pages this file system is 200 * allowed to use, based on the maximum size the user passed in 201 * the mount structure. A value of zero is treated as if the 202 * maximum available space was requested. */ 203 if (size_max == 0 || size_max > OFF_MAX - PAGE_SIZE || 204 (SIZE_MAX < OFF_MAX && size_max / PAGE_SIZE >= SIZE_MAX)) 205 pages = SIZE_MAX; 206 else { 207 size_max = roundup(size_max, PAGE_SIZE); 208 pages = howmany(size_max, PAGE_SIZE); 209 } 210 MPASS(pages > 0); 211 212 if (nodes_max <= 3) { 213 if (pages < INT_MAX / nodes_per_page) 214 nodes_max = pages * nodes_per_page; 215 else 216 nodes_max = INT_MAX; 217 } 218 if (nodes_max > INT_MAX) 219 nodes_max = INT_MAX; 220 MPASS(nodes_max >= 3); 221 222 /* Allocate the tmpfs mount structure and fill it. */ 223 tmp = (struct tmpfs_mount *)malloc(sizeof(struct tmpfs_mount), 224 M_TMPFSMNT, M_WAITOK | M_ZERO); 225 226 mtx_init(&tmp->tm_allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF); 227 tmp->tm_nodes_max = nodes_max; 228 tmp->tm_nodes_inuse = 0; 229 tmp->tm_refcount = 1; 230 tmp->tm_maxfilesize = maxfilesize > 0 ? maxfilesize : OFF_MAX; 231 LIST_INIT(&tmp->tm_nodes_used); 232 233 tmp->tm_pages_max = pages; 234 tmp->tm_pages_used = 0; 235 tmp->tm_ino_unr = new_unrhdr(2, INT_MAX, &tmp->tm_allnode_lock); 236 tmp->tm_dirent_pool = uma_zcreate("TMPFS dirent", 237 sizeof(struct tmpfs_dirent), NULL, NULL, NULL, NULL, 238 UMA_ALIGN_PTR, 0); 239 tmp->tm_node_pool = uma_zcreate("TMPFS node", 240 sizeof(struct tmpfs_node), tmpfs_node_ctor, tmpfs_node_dtor, 241 tmpfs_node_init, tmpfs_node_fini, UMA_ALIGN_PTR, 0); 242 tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 243 tmp->tm_nonc = nonc; 244 245 /* Allocate the root node. */ 246 error = tmpfs_alloc_node(mp, tmp, VDIR, root_uid, root_gid, 247 root_mode & ALLPERMS, NULL, NULL, VNOVAL, &root); 248 249 if (error != 0 || root == NULL) { 250 uma_zdestroy(tmp->tm_node_pool); 251 uma_zdestroy(tmp->tm_dirent_pool); 252 delete_unrhdr(tmp->tm_ino_unr); 253 free(tmp, M_TMPFSMNT); 254 return (error); 255 } 256 KASSERT(root->tn_id == 2, 257 ("tmpfs root with invalid ino: %ju", (uintmax_t)root->tn_id)); 258 tmp->tm_root = root; 259 260 MNT_ILOCK(mp); 261 mp->mnt_flag |= MNT_LOCAL; 262 mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED; 263 MNT_IUNLOCK(mp); 264 265 mp->mnt_data = tmp; 266 mp->mnt_stat.f_namemax = MAXNAMLEN; 267 vfs_getnewfsid(mp); 268 vfs_mountedfrom(mp, "tmpfs"); 269 270 return 0; 271 } 272 273 /* ARGSUSED2 */ 274 static int 275 tmpfs_unmount(struct mount *mp, int mntflags) 276 { 277 struct tmpfs_mount *tmp; 278 struct tmpfs_node *node; 279 int error, flags; 280 281 flags = (mntflags & MNT_FORCE) != 0 ? FORCECLOSE : 0; 282 tmp = VFS_TO_TMPFS(mp); 283 284 /* Stop writers */ 285 error = vfs_write_suspend_umnt(mp); 286 if (error != 0) 287 return (error); 288 /* 289 * At this point, nodes cannot be destroyed by any other 290 * thread because write suspension is started. 291 */ 292 293 for (;;) { 294 error = vflush(mp, 0, flags, curthread); 295 if (error != 0) { 296 vfs_write_resume(mp, VR_START_WRITE); 297 return (error); 298 } 299 MNT_ILOCK(mp); 300 if (mp->mnt_nvnodelistsize == 0) { 301 MNT_IUNLOCK(mp); 302 break; 303 } 304 MNT_IUNLOCK(mp); 305 if ((mntflags & MNT_FORCE) == 0) { 306 vfs_write_resume(mp, VR_START_WRITE); 307 return (EBUSY); 308 } 309 } 310 311 TMPFS_LOCK(tmp); 312 while ((node = LIST_FIRST(&tmp->tm_nodes_used)) != NULL) { 313 TMPFS_NODE_LOCK(node); 314 if (node->tn_type == VDIR) 315 tmpfs_dir_destroy(tmp, node); 316 if (tmpfs_free_node_locked(tmp, node, true)) 317 TMPFS_LOCK(tmp); 318 else 319 TMPFS_NODE_UNLOCK(node); 320 } 321 322 mp->mnt_data = NULL; 323 tmpfs_free_tmp(tmp); 324 vfs_write_resume(mp, VR_START_WRITE); 325 326 MNT_ILOCK(mp); 327 mp->mnt_flag &= ~MNT_LOCAL; 328 MNT_IUNLOCK(mp); 329 330 return (0); 331 } 332 333 void 334 tmpfs_free_tmp(struct tmpfs_mount *tmp) 335 { 336 337 MPASS(tmp->tm_refcount > 0); 338 tmp->tm_refcount--; 339 if (tmp->tm_refcount > 0) { 340 TMPFS_UNLOCK(tmp); 341 return; 342 } 343 TMPFS_UNLOCK(tmp); 344 345 uma_zdestroy(tmp->tm_dirent_pool); 346 uma_zdestroy(tmp->tm_node_pool); 347 clear_unrhdr(tmp->tm_ino_unr); 348 delete_unrhdr(tmp->tm_ino_unr); 349 350 mtx_destroy(&tmp->tm_allnode_lock); 351 MPASS(tmp->tm_pages_used == 0); 352 MPASS(tmp->tm_nodes_inuse == 0); 353 354 free(tmp, M_TMPFSMNT); 355 } 356 357 static int 358 tmpfs_root(struct mount *mp, int flags, struct vnode **vpp) 359 { 360 int error; 361 362 error = tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, flags, vpp); 363 if (error == 0) 364 (*vpp)->v_vflag |= VV_ROOT; 365 return (error); 366 } 367 368 static int 369 tmpfs_fhtovp(struct mount *mp, struct fid *fhp, int flags, 370 struct vnode **vpp) 371 { 372 struct tmpfs_fid *tfhp; 373 struct tmpfs_mount *tmp; 374 struct tmpfs_node *node; 375 int error; 376 377 tmp = VFS_TO_TMPFS(mp); 378 379 tfhp = (struct tmpfs_fid *)fhp; 380 if (tfhp->tf_len != sizeof(struct tmpfs_fid)) 381 return (EINVAL); 382 383 if (tfhp->tf_id >= tmp->tm_nodes_max) 384 return (EINVAL); 385 386 TMPFS_LOCK(tmp); 387 LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) { 388 if (node->tn_id == tfhp->tf_id && 389 node->tn_gen == tfhp->tf_gen) { 390 tmpfs_ref_node(node); 391 break; 392 } 393 } 394 TMPFS_UNLOCK(tmp); 395 396 if (node != NULL) { 397 error = tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp); 398 tmpfs_free_node(tmp, node); 399 } else 400 error = EINVAL; 401 return (error); 402 } 403 404 /* ARGSUSED2 */ 405 static int 406 tmpfs_statfs(struct mount *mp, struct statfs *sbp) 407 { 408 struct tmpfs_mount *tmp; 409 size_t used; 410 411 tmp = VFS_TO_TMPFS(mp); 412 413 sbp->f_iosize = PAGE_SIZE; 414 sbp->f_bsize = PAGE_SIZE; 415 416 used = tmpfs_pages_used(tmp); 417 if (tmp->tm_pages_max != ULONG_MAX) 418 sbp->f_blocks = tmp->tm_pages_max; 419 else 420 sbp->f_blocks = used + tmpfs_mem_avail(); 421 if (sbp->f_blocks <= used) 422 sbp->f_bavail = 0; 423 else 424 sbp->f_bavail = sbp->f_blocks - used; 425 sbp->f_bfree = sbp->f_bavail; 426 used = tmp->tm_nodes_inuse; 427 sbp->f_files = tmp->tm_nodes_max; 428 if (sbp->f_files <= used) 429 sbp->f_ffree = 0; 430 else 431 sbp->f_ffree = sbp->f_files - used; 432 /* sbp->f_owner = tmp->tn_uid; */ 433 434 return 0; 435 } 436 437 static int 438 tmpfs_sync(struct mount *mp, int waitfor) 439 { 440 struct vnode *vp, *mvp; 441 struct vm_object *obj; 442 443 if (waitfor == MNT_SUSPEND) { 444 MNT_ILOCK(mp); 445 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; 446 MNT_IUNLOCK(mp); 447 } else if (waitfor == MNT_LAZY) { 448 /* 449 * Handle lazy updates of mtime from writes to mmaped 450 * regions. Use MNT_VNODE_FOREACH_ALL instead of 451 * MNT_VNODE_FOREACH_ACTIVE, since unmap of the 452 * tmpfs-backed vnode does not call vinactive(), due 453 * to vm object type is OBJT_SWAP. 454 */ 455 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 456 if (vp->v_type != VREG) { 457 VI_UNLOCK(vp); 458 continue; 459 } 460 obj = vp->v_object; 461 KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) == 462 (OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj")); 463 464 /* 465 * Unlocked read, avoid taking vnode lock if 466 * not needed. Lost update will be handled on 467 * the next call. 468 */ 469 if ((obj->flags & OBJ_TMPFS_DIRTY) == 0) { 470 VI_UNLOCK(vp); 471 continue; 472 } 473 if (vget(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, 474 curthread) != 0) 475 continue; 476 tmpfs_check_mtime(vp); 477 vput(vp); 478 } 479 } 480 return (0); 481 } 482 483 /* 484 * The presence of a susp_clean method tells the VFS to track writes. 485 */ 486 static void 487 tmpfs_susp_clean(struct mount *mp __unused) 488 { 489 } 490 491 /* 492 * tmpfs vfs operations. 493 */ 494 495 struct vfsops tmpfs_vfsops = { 496 .vfs_mount = tmpfs_mount, 497 .vfs_unmount = tmpfs_unmount, 498 .vfs_root = tmpfs_root, 499 .vfs_statfs = tmpfs_statfs, 500 .vfs_fhtovp = tmpfs_fhtovp, 501 .vfs_sync = tmpfs_sync, 502 .vfs_susp_clean = tmpfs_susp_clean, 503 }; 504 VFS_SET(tmpfs_vfsops, tmpfs, VFCF_JAIL); 505