1 /* $NetBSD: tmpfs_vfsops.c,v 1.10 2005/12/11 12:24:29 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2005 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code 9 * 2005 program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Efficient memory file system. 35 * 36 * tmpfs is a file system that uses FreeBSD's virtual memory 37 * sub-system to store file data and metadata in an efficient way. 38 * This means that it does not follow the structure of an on-disk file 39 * system because it simply does not need to. Instead, it uses 40 * memory-specific data structures and algorithms to automatically 41 * allocate and release resources. 42 */ 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include <sys/param.h> 47 #include <sys/limits.h> 48 #include <sys/lock.h> 49 #include <sys/mutex.h> 50 #include <sys/proc.h> 51 #include <sys/jail.h> 52 #include <sys/kernel.h> 53 #include <sys/rwlock.h> 54 #include <sys/stat.h> 55 #include <sys/systm.h> 56 #include <sys/sysctl.h> 57 58 #include <vm/vm.h> 59 #include <vm/vm_object.h> 60 #include <vm/vm_param.h> 61 62 #include <fs/tmpfs/tmpfs.h> 63 64 /* 65 * Default permission for root node 66 */ 67 #define TMPFS_DEFAULT_ROOT_MODE (S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH) 68 69 MALLOC_DEFINE(M_TMPFSMNT, "tmpfs mount", "tmpfs mount structures"); 70 MALLOC_DEFINE(M_TMPFSNAME, "tmpfs name", "tmpfs file names"); 71 72 static int tmpfs_mount(struct mount *); 73 static int tmpfs_unmount(struct mount *, int); 74 static int tmpfs_root(struct mount *, int flags, struct vnode **); 75 static int tmpfs_fhtovp(struct mount *, struct fid *, int, 76 struct vnode **); 77 static int tmpfs_statfs(struct mount *, struct statfs *); 78 static void tmpfs_susp_clean(struct mount *); 79 80 static const char *tmpfs_opts[] = { 81 "from", "size", "maxfilesize", "inodes", "uid", "gid", "mode", "export", 82 "union", "nonc", NULL 83 }; 84 85 static const char *tmpfs_updateopts[] = { 86 "from", "export", NULL 87 }; 88 89 static int 90 tmpfs_node_ctor(void *mem, int size, void *arg, int flags) 91 { 92 struct tmpfs_node *node = (struct tmpfs_node *)mem; 93 94 node->tn_gen++; 95 node->tn_size = 0; 96 node->tn_status = 0; 97 node->tn_flags = 0; 98 node->tn_links = 0; 99 node->tn_vnode = NULL; 100 node->tn_vpstate = 0; 101 102 return (0); 103 } 104 105 static void 106 tmpfs_node_dtor(void *mem, int size, void *arg) 107 { 108 struct tmpfs_node *node = (struct tmpfs_node *)mem; 109 node->tn_type = VNON; 110 } 111 112 static int 113 tmpfs_node_init(void *mem, int size, int flags) 114 { 115 struct tmpfs_node *node = (struct tmpfs_node *)mem; 116 node->tn_id = 0; 117 118 mtx_init(&node->tn_interlock, "tmpfs node interlock", NULL, MTX_DEF); 119 node->tn_gen = arc4random(); 120 121 return (0); 122 } 123 124 static void 125 tmpfs_node_fini(void *mem, int size) 126 { 127 struct tmpfs_node *node = (struct tmpfs_node *)mem; 128 129 mtx_destroy(&node->tn_interlock); 130 } 131 132 static int 133 tmpfs_mount(struct mount *mp) 134 { 135 const size_t nodes_per_page = howmany(PAGE_SIZE, 136 sizeof(struct tmpfs_dirent) + sizeof(struct tmpfs_node)); 137 struct tmpfs_mount *tmp; 138 struct tmpfs_node *root; 139 struct thread *td = curthread; 140 int error; 141 bool nonc; 142 /* Size counters. */ 143 u_quad_t pages; 144 off_t nodes_max, size_max, maxfilesize; 145 146 /* Root node attributes. */ 147 uid_t root_uid; 148 gid_t root_gid; 149 mode_t root_mode; 150 151 struct vattr va; 152 153 if (!prison_allow(td->td_ucred, PR_ALLOW_MOUNT_TMPFS)) 154 return (EPERM); 155 156 if (vfs_filteropt(mp->mnt_optnew, tmpfs_opts)) 157 return (EINVAL); 158 159 if (mp->mnt_flag & MNT_UPDATE) { 160 /* Only support update mounts for certain options. */ 161 if (vfs_filteropt(mp->mnt_optnew, tmpfs_updateopts) != 0) 162 return (EOPNOTSUPP); 163 if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) != 164 ((struct tmpfs_mount *)mp->mnt_data)->tm_ronly) 165 return (EOPNOTSUPP); 166 return (0); 167 } 168 169 vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY); 170 error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred); 171 VOP_UNLOCK(mp->mnt_vnodecovered, 0); 172 if (error) 173 return (error); 174 175 if (mp->mnt_cred->cr_ruid != 0 || 176 vfs_scanopt(mp->mnt_optnew, "gid", "%d", &root_gid) != 1) 177 root_gid = va.va_gid; 178 if (mp->mnt_cred->cr_ruid != 0 || 179 vfs_scanopt(mp->mnt_optnew, "uid", "%d", &root_uid) != 1) 180 root_uid = va.va_uid; 181 if (mp->mnt_cred->cr_ruid != 0 || 182 vfs_scanopt(mp->mnt_optnew, "mode", "%ho", &root_mode) != 1) 183 root_mode = va.va_mode; 184 if (vfs_getopt_size(mp->mnt_optnew, "inodes", &nodes_max) != 0) 185 nodes_max = 0; 186 if (vfs_getopt_size(mp->mnt_optnew, "size", &size_max) != 0) 187 size_max = 0; 188 if (vfs_getopt_size(mp->mnt_optnew, "maxfilesize", &maxfilesize) != 0) 189 maxfilesize = 0; 190 nonc = vfs_getopt(mp->mnt_optnew, "nonc", NULL, NULL) == 0; 191 192 /* Do not allow mounts if we do not have enough memory to preserve 193 * the minimum reserved pages. */ 194 if (tmpfs_mem_avail() < TMPFS_PAGES_MINRESERVED) 195 return (ENOSPC); 196 197 /* Get the maximum number of memory pages this file system is 198 * allowed to use, based on the maximum size the user passed in 199 * the mount structure. A value of zero is treated as if the 200 * maximum available space was requested. */ 201 if (size_max == 0 || size_max > OFF_MAX - PAGE_SIZE || 202 (SIZE_MAX < OFF_MAX && size_max / PAGE_SIZE >= SIZE_MAX)) 203 pages = SIZE_MAX; 204 else { 205 size_max = roundup(size_max, PAGE_SIZE); 206 pages = howmany(size_max, PAGE_SIZE); 207 } 208 MPASS(pages > 0); 209 210 if (nodes_max <= 3) { 211 if (pages < INT_MAX / nodes_per_page) 212 nodes_max = pages * nodes_per_page; 213 else 214 nodes_max = INT_MAX; 215 } 216 if (nodes_max > INT_MAX) 217 nodes_max = INT_MAX; 218 MPASS(nodes_max >= 3); 219 220 /* Allocate the tmpfs mount structure and fill it. */ 221 tmp = (struct tmpfs_mount *)malloc(sizeof(struct tmpfs_mount), 222 M_TMPFSMNT, M_WAITOK | M_ZERO); 223 224 mtx_init(&tmp->tm_allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF); 225 tmp->tm_nodes_max = nodes_max; 226 tmp->tm_nodes_inuse = 0; 227 tmp->tm_refcount = 1; 228 tmp->tm_maxfilesize = maxfilesize > 0 ? maxfilesize : OFF_MAX; 229 LIST_INIT(&tmp->tm_nodes_used); 230 231 tmp->tm_pages_max = pages; 232 tmp->tm_pages_used = 0; 233 tmp->tm_ino_unr = new_unrhdr(2, INT_MAX, &tmp->tm_allnode_lock); 234 tmp->tm_dirent_pool = uma_zcreate("TMPFS dirent", 235 sizeof(struct tmpfs_dirent), NULL, NULL, NULL, NULL, 236 UMA_ALIGN_PTR, 0); 237 tmp->tm_node_pool = uma_zcreate("TMPFS node", 238 sizeof(struct tmpfs_node), tmpfs_node_ctor, tmpfs_node_dtor, 239 tmpfs_node_init, tmpfs_node_fini, UMA_ALIGN_PTR, 0); 240 tmp->tm_ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 241 tmp->tm_nonc = nonc; 242 243 /* Allocate the root node. */ 244 error = tmpfs_alloc_node(mp, tmp, VDIR, root_uid, root_gid, 245 root_mode & ALLPERMS, NULL, NULL, VNOVAL, &root); 246 247 if (error != 0 || root == NULL) { 248 uma_zdestroy(tmp->tm_node_pool); 249 uma_zdestroy(tmp->tm_dirent_pool); 250 delete_unrhdr(tmp->tm_ino_unr); 251 free(tmp, M_TMPFSMNT); 252 return (error); 253 } 254 KASSERT(root->tn_id == 2, 255 ("tmpfs root with invalid ino: %ju", (uintmax_t)root->tn_id)); 256 tmp->tm_root = root; 257 258 MNT_ILOCK(mp); 259 mp->mnt_flag |= MNT_LOCAL; 260 mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED; 261 MNT_IUNLOCK(mp); 262 263 mp->mnt_data = tmp; 264 mp->mnt_stat.f_namemax = MAXNAMLEN; 265 vfs_getnewfsid(mp); 266 vfs_mountedfrom(mp, "tmpfs"); 267 268 return 0; 269 } 270 271 /* ARGSUSED2 */ 272 static int 273 tmpfs_unmount(struct mount *mp, int mntflags) 274 { 275 struct tmpfs_mount *tmp; 276 struct tmpfs_node *node; 277 int error, flags; 278 279 flags = (mntflags & MNT_FORCE) != 0 ? FORCECLOSE : 0; 280 tmp = VFS_TO_TMPFS(mp); 281 282 /* Stop writers */ 283 error = vfs_write_suspend_umnt(mp); 284 if (error != 0) 285 return (error); 286 /* 287 * At this point, nodes cannot be destroyed by any other 288 * thread because write suspension is started. 289 */ 290 291 for (;;) { 292 error = vflush(mp, 0, flags, curthread); 293 if (error != 0) { 294 vfs_write_resume(mp, VR_START_WRITE); 295 return (error); 296 } 297 MNT_ILOCK(mp); 298 if (mp->mnt_nvnodelistsize == 0) { 299 MNT_IUNLOCK(mp); 300 break; 301 } 302 MNT_IUNLOCK(mp); 303 if ((mntflags & MNT_FORCE) == 0) { 304 vfs_write_resume(mp, VR_START_WRITE); 305 return (EBUSY); 306 } 307 } 308 309 TMPFS_LOCK(tmp); 310 while ((node = LIST_FIRST(&tmp->tm_nodes_used)) != NULL) { 311 TMPFS_NODE_LOCK(node); 312 if (node->tn_type == VDIR) 313 tmpfs_dir_destroy(tmp, node); 314 if (tmpfs_free_node_locked(tmp, node, true)) 315 TMPFS_LOCK(tmp); 316 else 317 TMPFS_NODE_UNLOCK(node); 318 } 319 320 mp->mnt_data = NULL; 321 tmpfs_free_tmp(tmp); 322 vfs_write_resume(mp, VR_START_WRITE); 323 324 MNT_ILOCK(mp); 325 mp->mnt_flag &= ~MNT_LOCAL; 326 MNT_IUNLOCK(mp); 327 328 return (0); 329 } 330 331 void 332 tmpfs_free_tmp(struct tmpfs_mount *tmp) 333 { 334 335 MPASS(tmp->tm_refcount > 0); 336 tmp->tm_refcount--; 337 if (tmp->tm_refcount > 0) { 338 TMPFS_UNLOCK(tmp); 339 return; 340 } 341 TMPFS_UNLOCK(tmp); 342 343 uma_zdestroy(tmp->tm_dirent_pool); 344 uma_zdestroy(tmp->tm_node_pool); 345 clear_unrhdr(tmp->tm_ino_unr); 346 delete_unrhdr(tmp->tm_ino_unr); 347 348 mtx_destroy(&tmp->tm_allnode_lock); 349 MPASS(tmp->tm_pages_used == 0); 350 MPASS(tmp->tm_nodes_inuse == 0); 351 352 free(tmp, M_TMPFSMNT); 353 } 354 355 static int 356 tmpfs_root(struct mount *mp, int flags, struct vnode **vpp) 357 { 358 int error; 359 360 error = tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, flags, vpp); 361 if (error == 0) 362 (*vpp)->v_vflag |= VV_ROOT; 363 return (error); 364 } 365 366 static int 367 tmpfs_fhtovp(struct mount *mp, struct fid *fhp, int flags, 368 struct vnode **vpp) 369 { 370 struct tmpfs_fid *tfhp; 371 struct tmpfs_mount *tmp; 372 struct tmpfs_node *node; 373 int error; 374 375 tmp = VFS_TO_TMPFS(mp); 376 377 tfhp = (struct tmpfs_fid *)fhp; 378 if (tfhp->tf_len != sizeof(struct tmpfs_fid)) 379 return (EINVAL); 380 381 if (tfhp->tf_id >= tmp->tm_nodes_max) 382 return (EINVAL); 383 384 TMPFS_LOCK(tmp); 385 LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) { 386 if (node->tn_id == tfhp->tf_id && 387 node->tn_gen == tfhp->tf_gen) { 388 tmpfs_ref_node(node); 389 break; 390 } 391 } 392 TMPFS_UNLOCK(tmp); 393 394 if (node != NULL) { 395 error = tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp); 396 tmpfs_free_node(tmp, node); 397 } else 398 error = EINVAL; 399 return (error); 400 } 401 402 /* ARGSUSED2 */ 403 static int 404 tmpfs_statfs(struct mount *mp, struct statfs *sbp) 405 { 406 struct tmpfs_mount *tmp; 407 size_t used; 408 409 tmp = VFS_TO_TMPFS(mp); 410 411 sbp->f_iosize = PAGE_SIZE; 412 sbp->f_bsize = PAGE_SIZE; 413 414 used = tmpfs_pages_used(tmp); 415 if (tmp->tm_pages_max != ULONG_MAX) 416 sbp->f_blocks = tmp->tm_pages_max; 417 else 418 sbp->f_blocks = used + tmpfs_mem_avail(); 419 if (sbp->f_blocks <= used) 420 sbp->f_bavail = 0; 421 else 422 sbp->f_bavail = sbp->f_blocks - used; 423 sbp->f_bfree = sbp->f_bavail; 424 used = tmp->tm_nodes_inuse; 425 sbp->f_files = tmp->tm_nodes_max; 426 if (sbp->f_files <= used) 427 sbp->f_ffree = 0; 428 else 429 sbp->f_ffree = sbp->f_files - used; 430 /* sbp->f_owner = tmp->tn_uid; */ 431 432 return 0; 433 } 434 435 static int 436 tmpfs_sync(struct mount *mp, int waitfor) 437 { 438 struct vnode *vp, *mvp; 439 struct vm_object *obj; 440 441 if (waitfor == MNT_SUSPEND) { 442 MNT_ILOCK(mp); 443 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; 444 MNT_IUNLOCK(mp); 445 } else if (waitfor == MNT_LAZY) { 446 /* 447 * Handle lazy updates of mtime from writes to mmaped 448 * regions. Use MNT_VNODE_FOREACH_ALL instead of 449 * MNT_VNODE_FOREACH_ACTIVE, since unmap of the 450 * tmpfs-backed vnode does not call vinactive(), due 451 * to vm object type is OBJT_SWAP. 452 */ 453 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 454 if (vp->v_type != VREG) { 455 VI_UNLOCK(vp); 456 continue; 457 } 458 obj = vp->v_object; 459 KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) == 460 (OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj")); 461 462 /* 463 * Unlocked read, avoid taking vnode lock if 464 * not needed. Lost update will be handled on 465 * the next call. 466 */ 467 if ((obj->flags & OBJ_TMPFS_DIRTY) == 0) { 468 VI_UNLOCK(vp); 469 continue; 470 } 471 if (vget(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, 472 curthread) != 0) 473 continue; 474 tmpfs_check_mtime(vp); 475 vput(vp); 476 } 477 } 478 return (0); 479 } 480 481 /* 482 * The presence of a susp_clean method tells the VFS to track writes. 483 */ 484 static void 485 tmpfs_susp_clean(struct mount *mp __unused) 486 { 487 } 488 489 /* 490 * tmpfs vfs operations. 491 */ 492 493 struct vfsops tmpfs_vfsops = { 494 .vfs_mount = tmpfs_mount, 495 .vfs_unmount = tmpfs_unmount, 496 .vfs_root = tmpfs_root, 497 .vfs_statfs = tmpfs_statfs, 498 .vfs_fhtovp = tmpfs_fhtovp, 499 .vfs_sync = tmpfs_sync, 500 .vfs_susp_clean = tmpfs_susp_clean, 501 }; 502 VFS_SET(tmpfs_vfsops, tmpfs, VFCF_JAIL); 503