1 /* $NetBSD: tmpfs_vfsops.c,v 1.10 2005/12/11 12:24:29 christos Exp $ */ 2 3 /* 4 * Copyright (c) 2005 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code 9 * 2005 program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Efficient memory file system. 42 * 43 * tmpfs is a file system that uses NetBSD's virtual memory sub-system 44 * (the well-known UVM) to store file data and metadata in an efficient 45 * way. This means that it does not follow the structure of an on-disk 46 * file system because it simply does not need to. Instead, it uses 47 * memory-specific data structures and algorithms to automatically 48 * allocate and release resources. 49 */ 50 #include <sys/cdefs.h> 51 __FBSDID("$FreeBSD$"); 52 53 #include <sys/param.h> 54 #include <sys/limits.h> 55 #include <sys/lock.h> 56 #include <sys/mutex.h> 57 #include <sys/kernel.h> 58 #include <sys/stat.h> 59 #include <sys/systm.h> 60 #include <sys/sysctl.h> 61 62 #include <vm/vm.h> 63 #include <vm/vm_object.h> 64 #include <vm/vm_param.h> 65 66 #include <fs/tmpfs/tmpfs.h> 67 68 /* 69 * Default permission for root node 70 */ 71 #define TMPFS_DEFAULT_ROOT_MODE (S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH) 72 73 MALLOC_DEFINE(M_TMPFSMNT, "tmpfs mount", "tmpfs mount structures"); 74 MALLOC_DEFINE(M_TMPFSNAME, "tmpfs name", "tmpfs file names"); 75 76 /* --------------------------------------------------------------------- */ 77 78 static int tmpfs_mount(struct mount *, struct thread *); 79 static int tmpfs_unmount(struct mount *, int, struct thread *); 80 static int tmpfs_root(struct mount *, int flags, struct vnode **, 81 struct thread *); 82 static int tmpfs_fhtovp(struct mount *, struct fid *, struct vnode **); 83 static int tmpfs_statfs(struct mount *, struct statfs *, struct thread *); 84 85 /* --------------------------------------------------------------------- */ 86 87 static const char *tmpfs_opts[] = { 88 "from", "size", "inodes", "uid", "gid", "mode", 89 NULL 90 }; 91 92 /* --------------------------------------------------------------------- */ 93 94 #define SWI_MAXMIB 3 95 96 static u_int 97 get_swpgtotal(void) 98 { 99 struct xswdev xsd; 100 char *sname = "vm.swap_info"; 101 int soid[SWI_MAXMIB], oid[2]; 102 u_int unswdev, total, dmmax, nswapdev; 103 size_t mibi, len; 104 105 total = 0; 106 107 len = sizeof(dmmax); 108 if (kernel_sysctlbyname(curthread, "vm.dmmax", &dmmax, &len, 109 NULL, 0, NULL, 0) != 0) 110 return total; 111 112 len = sizeof(nswapdev); 113 if (kernel_sysctlbyname(curthread, "vm.nswapdev", 114 &nswapdev, &len, 115 NULL, 0, NULL, 0) != 0) 116 return total; 117 118 mibi = (SWI_MAXMIB - 1) * sizeof(int); 119 oid[0] = 0; 120 oid[1] = 3; 121 122 if (kernel_sysctl(curthread, oid, 2, 123 soid, &mibi, (void *)sname, strlen(sname), 124 NULL, 0) != 0) 125 return total; 126 127 mibi = (SWI_MAXMIB - 1); 128 for (unswdev = 0; unswdev < nswapdev; ++unswdev) { 129 soid[mibi] = unswdev; 130 len = sizeof(struct xswdev); 131 if (kernel_sysctl(curthread, 132 soid, mibi + 1, &xsd, &len, NULL, 0, 133 NULL, 0) != 0) 134 return total; 135 if (len == sizeof(struct xswdev)) 136 total += (xsd.xsw_nblks - dmmax); 137 } 138 139 /* Not Reached */ 140 return total; 141 } 142 143 /* --------------------------------------------------------------------- */ 144 static int 145 tmpfs_node_ctor(void *mem, int size, void *arg, int flags) 146 { 147 struct tmpfs_node *node = (struct tmpfs_node *)mem; 148 149 node->tn_gen++; 150 node->tn_size = 0; 151 node->tn_status = 0; 152 node->tn_flags = 0; 153 node->tn_links = 0; 154 node->tn_lockf = NULL; 155 node->tn_vnode = NULL; 156 node->tn_vpstate = 0; 157 node->tn_lookup_dirent = NULL; 158 159 return (0); 160 } 161 162 static void 163 tmpfs_node_dtor(void *mem, int size, void *arg) 164 { 165 struct tmpfs_node *node = (struct tmpfs_node *)mem; 166 node->tn_type = VNON; 167 } 168 169 static int 170 tmpfs_node_init(void *mem, int size, int flags) 171 { 172 struct tmpfs_node *node = (struct tmpfs_node *)mem; 173 node->tn_id = 0; 174 175 mtx_init(&node->tn_interlock, "tmpfs node interlock", NULL, MTX_DEF); 176 node->tn_gen = arc4random(); 177 178 return (0); 179 } 180 181 static void 182 tmpfs_node_fini(void *mem, int size) 183 { 184 struct tmpfs_node *node = (struct tmpfs_node *)mem; 185 186 mtx_destroy(&node->tn_interlock); 187 } 188 189 static int 190 tmpfs_mount(struct mount *mp, struct thread *td) 191 { 192 struct tmpfs_mount *tmp; 193 struct tmpfs_node *root; 194 size_t pages, mem_size; 195 ino_t nodes; 196 int error; 197 /* Size counters. */ 198 ino_t nodes_max; 199 off_t size_max; 200 201 /* Root node attributes. */ 202 uid_t root_uid; 203 gid_t root_gid; 204 mode_t root_mode; 205 206 struct vattr va; 207 208 if (vfs_filteropt(mp->mnt_optnew, tmpfs_opts)) 209 return (EINVAL); 210 211 if (mp->mnt_flag & MNT_UPDATE) { 212 /* XXX: There is no support yet to update file system 213 * settings. Should be added. */ 214 215 return EOPNOTSUPP; 216 } 217 218 vn_lock(mp->mnt_vnodecovered, LK_SHARED | LK_RETRY, td); 219 error = VOP_GETATTR(mp->mnt_vnodecovered, &va, mp->mnt_cred, td); 220 VOP_UNLOCK(mp->mnt_vnodecovered, 0, td); 221 if (error) 222 return (error); 223 224 if (mp->mnt_cred->cr_ruid != 0 || 225 vfs_scanopt(mp->mnt_optnew, "gid", "%d", &root_gid) != 1) 226 root_gid = va.va_gid; 227 if (mp->mnt_cred->cr_ruid != 0 || 228 vfs_scanopt(mp->mnt_optnew, "uid", "%d", &root_uid) != 1) 229 root_uid = va.va_uid; 230 if (mp->mnt_cred->cr_ruid != 0 || 231 vfs_scanopt(mp->mnt_optnew, "mode", "%o", &root_mode) != 1) 232 root_mode = va.va_mode; 233 if(vfs_scanopt(mp->mnt_optnew, "inodes", "%d", &nodes_max) != 1) 234 nodes_max = 0; 235 236 if(vfs_scanopt(mp->mnt_optnew, 237 "size", 238 "%qu", &size_max) != 1) 239 size_max = 0; 240 241 /* Do not allow mounts if we do not have enough memory to preserve 242 * the minimum reserved pages. */ 243 mem_size = cnt.v_free_count + cnt.v_inactive_count + get_swpgtotal(); 244 mem_size -= mem_size > cnt.v_wire_count ? cnt.v_wire_count : mem_size; 245 if (mem_size < TMPFS_PAGES_RESERVED) 246 return ENOSPC; 247 248 /* Get the maximum number of memory pages this file system is 249 * allowed to use, based on the maximum size the user passed in 250 * the mount structure. A value of zero is treated as if the 251 * maximum available space was requested. */ 252 if (size_max < PAGE_SIZE || size_max >= SIZE_MAX) 253 pages = SIZE_MAX; 254 else 255 pages = howmany(size_max, PAGE_SIZE); 256 MPASS(pages > 0); 257 258 if (nodes_max <= 3) 259 nodes = 3 + pages * PAGE_SIZE / 1024; 260 else 261 nodes = nodes_max; 262 MPASS(nodes >= 3); 263 264 /* Allocate the tmpfs mount structure and fill it. */ 265 tmp = (struct tmpfs_mount *)malloc(sizeof(struct tmpfs_mount), 266 M_TMPFSMNT, M_WAITOK | M_ZERO); 267 268 mtx_init(&tmp->allnode_lock, "tmpfs allnode lock", NULL, MTX_DEF); 269 tmp->tm_nodes_max = nodes; 270 tmp->tm_nodes_inuse = 0; 271 tmp->tm_maxfilesize = (u_int64_t)(cnt.v_page_count + get_swpgtotal()) * PAGE_SIZE; 272 LIST_INIT(&tmp->tm_nodes_used); 273 274 tmp->tm_pages_max = pages; 275 tmp->tm_pages_used = 0; 276 tmp->tm_ino_unr = new_unrhdr(2, INT_MAX, &tmp->allnode_lock); 277 tmp->tm_dirent_pool = uma_zcreate( 278 "TMPFS dirent", 279 sizeof(struct tmpfs_dirent), 280 NULL, NULL, NULL, NULL, 281 UMA_ALIGN_PTR, 282 0); 283 tmp->tm_node_pool = uma_zcreate( 284 "TMPFS node", 285 sizeof(struct tmpfs_node), 286 tmpfs_node_ctor, tmpfs_node_dtor, 287 tmpfs_node_init, tmpfs_node_fini, 288 UMA_ALIGN_PTR, 289 0); 290 291 /* Allocate the root node. */ 292 error = tmpfs_alloc_node(tmp, VDIR, root_uid, 293 root_gid, root_mode & ALLPERMS, NULL, NULL, 294 VNOVAL, td, &root); 295 296 if (error != 0 || root == NULL) { 297 uma_zdestroy(tmp->tm_node_pool); 298 uma_zdestroy(tmp->tm_dirent_pool); 299 delete_unrhdr(tmp->tm_ino_unr); 300 free(tmp, M_TMPFSMNT); 301 return error; 302 } 303 KASSERT(root->tn_id == 2, ("tmpfs root with invalid ino: %d", root->tn_id)); 304 tmp->tm_root = root; 305 306 MNT_ILOCK(mp); 307 mp->mnt_flag |= MNT_LOCAL; 308 mp->mnt_kern_flag |= MNTK_MPSAFE; 309 MNT_IUNLOCK(mp); 310 311 mp->mnt_data = tmp; 312 mp->mnt_stat.f_namemax = MAXNAMLEN; 313 vfs_getnewfsid(mp); 314 vfs_mountedfrom(mp, "tmpfs"); 315 316 return 0; 317 } 318 319 /* --------------------------------------------------------------------- */ 320 321 /* ARGSUSED2 */ 322 static int 323 tmpfs_unmount(struct mount *mp, int mntflags, struct thread *l) 324 { 325 int error; 326 int flags = 0; 327 struct tmpfs_mount *tmp; 328 struct tmpfs_node *node; 329 330 /* Handle forced unmounts. */ 331 if (mntflags & MNT_FORCE) 332 flags |= FORCECLOSE; 333 334 /* Finalize all pending I/O. */ 335 error = vflush(mp, 0, flags, l); 336 if (error != 0) 337 return error; 338 339 tmp = VFS_TO_TMPFS(mp); 340 341 /* Free all associated data. The loop iterates over the linked list 342 * we have containing all used nodes. For each of them that is 343 * a directory, we free all its directory entries. Note that after 344 * freeing a node, it will automatically go to the available list, 345 * so we will later have to iterate over it to release its items. */ 346 node = LIST_FIRST(&tmp->tm_nodes_used); 347 while (node != NULL) { 348 struct tmpfs_node *next; 349 350 if (node->tn_type == VDIR) { 351 struct tmpfs_dirent *de; 352 353 de = TAILQ_FIRST(&node->tn_dir.tn_dirhead); 354 while (de != NULL) { 355 struct tmpfs_dirent *nde; 356 357 nde = TAILQ_NEXT(de, td_entries); 358 tmpfs_free_dirent(tmp, de, FALSE); 359 de = nde; 360 node->tn_size -= sizeof(struct tmpfs_dirent); 361 } 362 } 363 364 next = LIST_NEXT(node, tn_entries); 365 tmpfs_free_node(tmp, node); 366 node = next; 367 } 368 369 uma_zdestroy(tmp->tm_dirent_pool); 370 uma_zdestroy(tmp->tm_node_pool); 371 delete_unrhdr(tmp->tm_ino_unr); 372 373 mtx_destroy(&tmp->allnode_lock); 374 MPASS(tmp->tm_pages_used == 0); 375 MPASS(tmp->tm_nodes_inuse == 0); 376 377 /* Throw away the tmpfs_mount structure. */ 378 free(mp->mnt_data, M_TMPFSMNT); 379 mp->mnt_data = NULL; 380 381 MNT_ILOCK(mp); 382 mp->mnt_flag &= ~MNT_LOCAL; 383 MNT_IUNLOCK(mp); 384 return 0; 385 } 386 387 /* --------------------------------------------------------------------- */ 388 389 static int 390 tmpfs_root(struct mount *mp, int flags, struct vnode **vpp, struct thread *td) 391 { 392 int error; 393 error = tmpfs_alloc_vp(mp, VFS_TO_TMPFS(mp)->tm_root, flags, vpp, td); 394 395 if (!error) 396 (*vpp)->v_vflag |= VV_ROOT; 397 398 return error; 399 } 400 401 /* --------------------------------------------------------------------- */ 402 403 static int 404 tmpfs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp) 405 { 406 boolean_t found; 407 struct tmpfs_fid *tfhp; 408 struct tmpfs_mount *tmp; 409 struct tmpfs_node *node; 410 411 tmp = VFS_TO_TMPFS(mp); 412 413 tfhp = (struct tmpfs_fid *)fhp; 414 if (tfhp->tf_len != sizeof(struct tmpfs_fid)) 415 return EINVAL; 416 417 if (tfhp->tf_id >= tmp->tm_nodes_max) 418 return EINVAL; 419 420 found = FALSE; 421 422 TMPFS_LOCK(tmp); 423 LIST_FOREACH(node, &tmp->tm_nodes_used, tn_entries) { 424 if (node->tn_id == tfhp->tf_id && 425 node->tn_gen == tfhp->tf_gen) { 426 found = TRUE; 427 break; 428 } 429 } 430 TMPFS_UNLOCK(tmp); 431 432 if (found) 433 return (tmpfs_alloc_vp(mp, node, LK_EXCLUSIVE, vpp, curthread)); 434 435 return (EINVAL); 436 } 437 438 /* --------------------------------------------------------------------- */ 439 440 /* ARGSUSED2 */ 441 static int 442 tmpfs_statfs(struct mount *mp, struct statfs *sbp, struct thread *l) 443 { 444 fsfilcnt_t freenodes; 445 struct tmpfs_mount *tmp; 446 447 tmp = VFS_TO_TMPFS(mp); 448 449 sbp->f_iosize = PAGE_SIZE; 450 sbp->f_bsize = PAGE_SIZE; 451 452 sbp->f_blocks = TMPFS_PAGES_MAX(tmp); 453 sbp->f_bavail = sbp->f_bfree = TMPFS_PAGES_AVAIL(tmp); 454 455 freenodes = MIN(tmp->tm_nodes_max - tmp->tm_nodes_inuse, 456 TMPFS_PAGES_AVAIL(tmp) * PAGE_SIZE / sizeof(struct tmpfs_node)); 457 458 sbp->f_files = freenodes + tmp->tm_nodes_inuse; 459 sbp->f_ffree = freenodes; 460 /* sbp->f_owner = tmp->tn_uid; */ 461 462 return 0; 463 } 464 465 /* --------------------------------------------------------------------- */ 466 467 /* 468 * tmpfs vfs operations. 469 */ 470 471 struct vfsops tmpfs_vfsops = { 472 .vfs_mount = tmpfs_mount, 473 .vfs_unmount = tmpfs_unmount, 474 .vfs_root = tmpfs_root, 475 .vfs_statfs = tmpfs_statfs, 476 .vfs_fhtovp = tmpfs_fhtovp, 477 }; 478 VFS_SET(tmpfs_vfsops, tmpfs, 0); 479