1 /* $NetBSD: tmpfs_vfsops.c,v 1.10 2005/12/11 12:24:29 christos Exp $ */ 2 3 /* 4 * Copyright (c) 2005 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code 9 * 2005 program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Efficient memory file system. 42 * 43 * tmpfs is a file system that uses NetBSD's virtual memory sub-system 44 * (the well-known UVM) to store file data and metadata in an efficient 45 * way. This means that it does not follow the structure of an on-disk 46 * file system because it simply does not need to. Instead, it uses 47 * memory-specific data structures and algorithms to automatically 48 * allocate and release resources. 49 */ 50 #include <sys/cdefs.h> 51 __FBSDID("$FreeBSD$"); 52 53 #include <sys/param.h> 54 #include <sys/limits.h> 55 #include <sys/lock.h> 56 #include <sys/mutex.h> 57 #include <sys/kernel.h> 58 #include <sys/stat.h> 59 #include <sys/systm.h> 60 #include <sys/sysctl.h> 61 62 #include <vm/vm.h> 63 #include <vm/vm_object.h> 64 #include <vm/vm_param.h> 65 66 #include <fs/tmpfs/tmpfs.h> 67 68 /* 69 * Default permission for root node 70 */ 71 #define TMPFS_DEFAULT_ROOT_MODE (S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH) 72 73 MALLOC_DEFINE(M_TMPFSMNT, "tmpfs mount", "tmpfs mount structures"); 74 MALLOC_DEFINE(M_TMPFSNAME, "tmpfs name", "tmpfs file names"); 75 76 /* --------------------------------------------------------------------- */ 77 78 static int tmpfs_mount(struct mount *, struct thread *); 79 static int tmpfs_unmount(struct mount *, int, struct thread *); 80 static int tmpfs_root(struct mount *, int flags, struct vnode **, 81 struct thread *); 82 static int tmpfs_fhtovp(struct mount *, struct fid *, struct vnode **); 83 static int tmpfs_statfs(struct mount *, struct statfs *, struct thread *); 84 85 /* --------------------------------------------------------------------- */ 86 87 static const char *tmpfs_opts[] = { 88 "from", "size", "inodes", "uid", "gid", "mode", "export", 89 NULL 90 }; 91 92 /* --------------------------------------------------------------------- */ 93 94 #define SWI_MAXMIB 3 95 96 static u_int 97 get_swpgtotal(void) 98 { 99 struct xswdev xsd; 100 char *sname = "vm.swap_info"; 101 int soid[SWI_MAXMIB], oid[2]; 102 u_int unswdev, total, dmmax, nswapdev; 103 size_t mibi, len; 104 105 total = 0; 106 107 len = sizeof(dmmax); 108 if (kernel_sysctlbyname(curthread, "vm.dmmax", &dmmax, &len, 109 NULL, 0, NULL, 0) != 0) 110 return total; 111 112 len = sizeof(nswapdev); 113 if (kernel_sysctlbyname(curthread, "vm.nswapdev", 114 &nswapdev, &len, 115 NULL, 0, NULL, 0) != 0) 116 return total; 117 118 mibi = (SWI_MAXMIB - 1) * sizeof(int); 119 oid[0] = 0; 120 oid[1] = 3; 121 122 if (kernel_sysctl(curthread, oid, 2, 123 soid, &mibi, (void *)sname, strlen(sname), 124 NULL, 0) != 0) 125 return total; 126 127 mibi = (SWI_MAXMIB - 1); 128 for (unswdev = 0; unswdev < nswapdev; ++unswdev) { 129 soid[mibi] = unswdev; 130 len = sizeof(struct xswdev); 131 if (kernel_sysctl(curthread, 132 soid, mibi + 1, &xsd, &len, NULL, 0, 133 NULL, 0) != 0) 134 return total; 135 if (len == sizeof(struct xswdev)) 136 total += (xsd.xsw_nblks - dmmax); 137 } 138 139 /* Not Reached */ 140 return total; 141 } 142 143 /* --------------------------------------------------------------------- */ 144 static int 145 tmpfs_node_ctor(void *mem, int size, void *arg, int flags) 146 { 147 struct tmpfs_node *node = (struct tmpfs_node *)mem; 148 149 node->tn_gen++; 150 node->tn_size = 0; 151 node->tn_status = 0; 152 node->tn_flags = 0; 153 node->tn_links = 0; 154 node->tn_lockf = NULL; 155 node->tn_vnode = NULL; 156 node->tn_vpstate = 0; 157 node->tn_lookup_dirent = NULL; 158 159 return (0); 160 } 161 162 static void 163 tmpfs_node_dtor(void *mem, int size, void *arg) 164 { 165 struct tmpfs_node *node = (struct tmpfs_node *)mem; 166 node->tn_type = VNON; 167 } 168 169 static int 170 tmpfs_node_init(void *mem, int size, int flags) 171 { 172 struct tmpfs_node *node = (struct tmpfs_node *)mem; 173 node->tn_id = 0; 174 175 mtx_init(&node->tn_interlock, "tmpfs node interlock", NULL, MTX_DEF); 176 node->tn_gen = arc4random(); 177 178 return (0); 179 } 180 181 static void 182 tmpfs_node_fini(void *mem, int size) 183 { 184 struct tmpfs_node *node = (struct tmpfs_node *)mem; 185 186 mtx_destroy(&node->tn_interlock); 187 } 188 189 static int 190 tmpfs_mount(struct mount *mp, struct thread *td) 191 { 192 struct tmpfs_mount *tmp; 193 struct tmpfs_node *root; 194 size_t pages, mem_size; 195 ino_t nodes; 196 int error; 197 /* Size counters. */ 198 ino_t nodes_max; 199 off_t size_max; 200 201 /* Root node attributes. */ 202 uid_t root_uid; 203 gid_t root_gid; 204 mode_t root_mode; 205 206 struct vattr va; 207 208 if (vfs_filteropt(mp->mnt_optnew, tmpfs_opts)) 209 return (EINVAL); 210 211 if (mp->mnt_flag & MNT_UPDATE) { 212 /* XXX: There is no support yet to update file system 213 * settings. Should be added. */ 214 215 return EOPNOTSUPP; 216 } 217 218 printf("WARNING: TMPFS is considered to be a highly experimental " 219 "feature in FreeBSD.\n"); 220 221 vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY, td); 222 error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred, td); 223 VOP_UNLOCK(mp->mnt_vnodecovered, 0, td); 224 if (error) 225 return (error); 226 227 if (mp->mnt_cred->cr_ruid != 0 || 228 vfs_scanopt(mp->mnt_optnew, "gid", "%d", &root_gid) != 1) 229 root_gid = va.va_gid; 230 if (mp->mnt_cred->cr_ruid != 0 || 231 vfs_scanopt(mp->mnt_optnew, "uid", "%d", &root_uid) != 1) 232 root_uid = va.va_uid; 233 if (mp->mnt_cred->cr_ruid != 0 || 234 vfs_scanopt(mp->mnt_optnew, "mode", "%o", &root_mode) != 1) 235 root_mode = va.va_mode; 236 if(vfs_scanopt(mp->mnt_optnew, "inodes", "%d", &nodes_max) != 1) 237 nodes_max = 0; 238 239 if(vfs_scanopt(mp->mnt_optnew, 240 "size", 241 "%qu", &size_max) != 1) 242 size_max = 0; 243 244 /* Do not allow mounts if we do not have enough memory to preserve 245 * the minimum reserved pages. */ 246 mem_size = cnt.v_free_count + cnt.v_inactive_count + get_swpgtotal(); 247 mem_size -= mem_size > cnt.v_wire_count ? cnt.v_wire_count : mem_size; 248 if (mem_size < TMPFS_PAGES_RESERVED) 249 return ENOSPC; 250 251 /* Get the maximum number of memory pages this file system is 252 * allowed to use, based on the maximum size the user passed in 253 * the mount structure. A value of zero is treated as if the 254 * maximum available space was requested. */ 255 if (size_max < PAGE_SIZE || size_max >= SIZE_MAX) 256 pages = SIZE_MAX; 257 else 258 pages = howmany(size_max, PAGE_SIZE); 259 MPASS(pages > 0); 260 261 if (nodes_max <= 3) 262 nodes = 3 + pages * PAGE_SIZE / 1024; 263 else 264 nodes = nodes_max; 265 MPASS(nodes >= 3); 266 267 /* Allocate the tmpfs mount structure and fill it. */ 268 tmp = (struct tmpfs_mount *)malloc(sizeof(struct tmpfs_mount), 269 M_TMPFSMNT, M_WAITOK | M_ZERO); 270 271 mtx_init(&tmp->allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF); 272 tmp->tm_nodes_max = nodes; 273 tmp->tm_nodes_inuse = 0; 274 tmp->tm_maxfilesize = (u_int64_t)(cnt.v_page_count + get_swpgtotal()) * PAGE_SIZE; 275 LIST_INIT(&tmp->tm_nodes_used); 276 277 tmp->tm_pages_max = pages; 278 tmp->tm_pages_used = 0; 279 tmp->tm_ino_unr = new_unrhdr(2, INT_MAX, &tmp->allnode_lock); 280 tmp->tm_dirent_pool = uma_zcreate( 281 "TMPFS dirent", 282 sizeof(struct tmpfs_dirent), 283 NULL, NULL, NULL, NULL, 284 UMA_ALIGN_PTR, 285 0); 286 tmp->tm_node_pool = uma_zcreate( 287 "TMPFS node", 288 sizeof(struct tmpfs_node), 289 tmpfs_node_ctor, tmpfs_node_dtor, 290 tmpfs_node_init, tmpfs_node_fini, 291 UMA_ALIGN_PTR, 292 0); 293 294 /* Allocate the root node. */ 295 error = tmpfs_alloc_node(tmp, VDIR, root_uid, 296 root_gid, root_mode & ALLPERMS, NULL, NULL, 297 VNOVAL, td, &root); 298 299 if (error != 0 || root == NULL) { 300 uma_zdestroy(tmp->tm_node_pool); 301 uma_zdestroy(tmp->tm_dirent_pool); 302 delete_unrhdr(tmp->tm_ino_unr); 303 free(tmp, M_TMPFSMNT); 304 return error; 305 } 306 KASSERT(root->tn_id == 2, ("tmpfs root with invalid ino: %d", root->tn_id)); 307 tmp->tm_root = root; 308 309 MNT_ILOCK(mp); 310 mp->mnt_flag |= MNT_LOCAL; 311 mp->mnt_kern_flag |= MNTK_MPSAFE; 312 MNT_IUNLOCK(mp); 313 314 mp->mnt_data = tmp; 315 mp->mnt_stat.f_namemax = MAXNAMLEN; 316 vfs_getnewfsid(mp); 317 vfs_mountedfrom(mp, "tmpfs"); 318 319 return 0; 320 } 321 322 /* --------------------------------------------------------------------- */ 323 324 /* ARGSUSED2 */ 325 static int 326 tmpfs_unmount(struct mount *mp, int mntflags, struct thread *l) 327 { 328 int error; 329 int flags = 0; 330 struct tmpfs_mount *tmp; 331 struct tmpfs_node *node; 332 333 /* Handle forced unmounts. */ 334 if (mntflags & MNT_FORCE) 335 flags |= FORCECLOSE; 336 337 /* Finalize all pending I/O. */ 338 error = vflush(mp, 0, flags, l); 339 if (error != 0) 340 return error; 341 342 tmp = VFS_TO_TMPFS(mp); 343 344 /* Free all associated data. The loop iterates over the linked list 345 * we have containing all used nodes. For each of them that is 346 * a directory, we free all its directory entries. Note that after 347 * freeing a node, it will automatically go to the available list, 348 * so we will later have to iterate over it to release its items. */ 349 node = LIST_FIRST(&tmp->tm_nodes_used); 350 while (node != NULL) { 351 struct tmpfs_node *next; 352 353 if (node->tn_type == VDIR) { 354 struct tmpfs_dirent *de; 355 356 de = TAILQ_FIRST(&node->tn_dir.tn_dirhead); 357 while (de != NULL) { 358 struct tmpfs_dirent *nde; 359 360 nde = TAILQ_NEXT(de, td_entries); 361 tmpfs_free_dirent(tmp, de, FALSE); 362 de = nde; 363 node->tn_size -= sizeof(struct tmpfs_dirent); 364 } 365 } 366 367 next = LIST_NEXT(node, tn_entries); 368 tmpfs_free_node(tmp, node); 369 node = next; 370 } 371 372 uma_zdestroy(tmp->tm_dirent_pool); 373 uma_zdestroy(tmp->tm_node_pool); 374 delete_unrhdr(tmp->tm_ino_unr); 375 376 mtx_destroy(&tmp->allnode_lock); 377 MPASS(tmp->tm_pages_used == 0); 378 MPASS(tmp->tm_nodes_inuse == 0); 379 380 /* Throw away the tmpfs_mount structure. */ 381 free(mp->mnt_data, M_TMPFSMNT); 382 mp->mnt_data = NULL; 383 384 MNT_ILOCK(mp); 385 mp->mnt_flag &= ~MNT_LOCAL; 386 MNT_IUNLOCK(mp); 387 return 0; 388 } 389 390 /* --------------------------------------------------------------------- */ 391 392 static int 393 tmpfs_root(struct mount *mp, int flags, struct vnode **vpp, struct thread *td) 394 { 395 int error; 396 error = tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, flags, vpp, td); 397 398 if (!error) 399 (*vpp)->v_vflag |= VV_ROOT; 400 401 return error; 402 } 403 404 /* --------------------------------------------------------------------- */ 405 406 static int 407 tmpfs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp) 408 { 409 boolean_t found; 410 struct tmpfs_fid *tfhp; 411 struct tmpfs_mount *tmp; 412 struct tmpfs_node *node; 413 414 tmp = VFS_TO_TMPFS(mp); 415 416 tfhp = (struct tmpfs_fid *)fhp; 417 if (tfhp->tf_len != sizeof(struct tmpfs_fid)) 418 return EINVAL; 419 420 if (tfhp->tf_id >= tmp->tm_nodes_max) 421 return EINVAL; 422 423 found = FALSE; 424 425 TMPFS_LOCK(tmp); 426 LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) { 427 if (node->tn_id == tfhp->tf_id && 428 node->tn_gen == tfhp->tf_gen) { 429 found = TRUE; 430 break; 431 } 432 } 433 TMPFS_UNLOCK(tmp); 434 435 if (found) 436 return (tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp, curthread)); 437 438 return (EINVAL); 439 } 440 441 /* --------------------------------------------------------------------- */ 442 443 /* ARGSUSED2 */ 444 static int 445 tmpfs_statfs(struct mount *mp, struct statfs *sbp, struct thread *l) 446 { 447 fsfilcnt_t freenodes; 448 struct tmpfs_mount *tmp; 449 450 tmp = VFS_TO_TMPFS(mp); 451 452 sbp->f_iosize = PAGE_SIZE; 453 sbp->f_bsize = PAGE_SIZE; 454 455 sbp->f_blocks = TMPFS_PAGES_MAX(tmp); 456 sbp->f_bavail = sbp->f_bfree = TMPFS_PAGES_AVAIL(tmp); 457 458 freenodes = MIN(tmp->tm_nodes_max - tmp->tm_nodes_inuse, 459 TMPFS_PAGES_AVAIL(tmp) * PAGE_SIZE / sizeof(struct tmpfs_node)); 460 461 sbp->f_files = freenodes + tmp->tm_nodes_inuse; 462 sbp->f_ffree = freenodes; 463 /* sbp->f_owner = tmp->tn_uid; */ 464 465 return 0; 466 } 467 468 /* --------------------------------------------------------------------- */ 469 470 /* 471 * tmpfs vfs operations. 472 */ 473 474 struct vfsops tmpfs_vfsops = { 475 .vfs_mount = tmpfs_mount, 476 .vfs_unmount = tmpfs_unmount, 477 .vfs_root = tmpfs_root, 478 .vfs_statfs = tmpfs_statfs, 479 .vfs_fhtovp = tmpfs_fhtovp, 480 }; 481 VFS_SET(tmpfs_vfsops, tmpfs, 0); 482