1 /* $NetBSD: tmpfs_subr.c,v 1.35 2007/07/09 21:10:50 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2005 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code 9 * 2005 program. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Efficient memory file system supporting functions. 35 */ 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include <sys/param.h> 40 #include <sys/fnv_hash.h> 41 #include <sys/lock.h> 42 #include <sys/namei.h> 43 #include <sys/priv.h> 44 #include <sys/proc.h> 45 #include <sys/random.h> 46 #include <sys/rwlock.h> 47 #include <sys/stat.h> 48 #include <sys/systm.h> 49 #include <sys/sysctl.h> 50 #include <sys/vnode.h> 51 #include <sys/vmmeter.h> 52 53 #include <vm/vm.h> 54 #include <vm/vm_param.h> 55 #include <vm/vm_object.h> 56 #include <vm/vm_page.h> 57 #include <vm/vm_pageout.h> 58 #include <vm/vm_pager.h> 59 #include <vm/vm_extern.h> 60 61 #include <fs/tmpfs/tmpfs.h> 62 #include <fs/tmpfs/tmpfs_fifoops.h> 63 #include <fs/tmpfs/tmpfs_vnops.h> 64 65 struct tmpfs_dir_cursor { 66 struct tmpfs_dirent *tdc_current; 67 struct tmpfs_dirent *tdc_tree; 68 }; 69 70 SYSCTL_NODE(_vfs, OID_AUTO, tmpfs, CTLFLAG_RW, 0, "tmpfs file system"); 71 72 static long tmpfs_pages_reserved = TMPFS_PAGES_MINRESERVED; 73 74 static int 75 sysctl_mem_reserved(SYSCTL_HANDLER_ARGS) 76 { 77 int error; 78 long pages, bytes; 79 80 pages = *(long *)arg1; 81 bytes = pages * PAGE_SIZE; 82 83 error = sysctl_handle_long(oidp, &bytes, 0, req); 84 if (error || !req->newptr) 85 return (error); 86 87 pages = bytes / PAGE_SIZE; 88 if (pages < TMPFS_PAGES_MINRESERVED) 89 return (EINVAL); 90 91 *(long *)arg1 = pages; 92 return (0); 93 } 94 95 SYSCTL_PROC(_vfs_tmpfs, OID_AUTO, memory_reserved, CTLTYPE_LONG|CTLFLAG_RW, 96 &tmpfs_pages_reserved, 0, sysctl_mem_reserved, "L", 97 "Amount of available memory and swap below which tmpfs growth stops"); 98 99 static __inline int tmpfs_dirtree_cmp(struct tmpfs_dirent *a, 100 struct tmpfs_dirent *b); 101 RB_PROTOTYPE_STATIC(tmpfs_dir, tmpfs_dirent, uh.td_entries, tmpfs_dirtree_cmp); 102 103 size_t 104 tmpfs_mem_avail(void) 105 { 106 vm_ooffset_t avail; 107 108 avail = swap_pager_avail + vm_cnt.v_free_count - tmpfs_pages_reserved; 109 if (__predict_false(avail < 0)) 110 avail = 0; 111 return (avail); 112 } 113 114 size_t 115 tmpfs_pages_used(struct tmpfs_mount *tmp) 116 { 117 const size_t node_size = sizeof(struct tmpfs_node) + 118 sizeof(struct tmpfs_dirent); 119 size_t meta_pages; 120 121 meta_pages = howmany((uintmax_t)tmp->tm_nodes_inuse * node_size, 122 PAGE_SIZE); 123 return (meta_pages + tmp->tm_pages_used); 124 } 125 126 static size_t 127 tmpfs_pages_check_avail(struct tmpfs_mount *tmp, size_t req_pages) 128 { 129 if (tmpfs_mem_avail() < req_pages) 130 return (0); 131 132 if (tmp->tm_pages_max != ULONG_MAX && 133 tmp->tm_pages_max < req_pages + tmpfs_pages_used(tmp)) 134 return (0); 135 136 return (1); 137 } 138 139 /* 140 * Allocates a new node of type 'type' inside the 'tmp' mount point, with 141 * its owner set to 'uid', its group to 'gid' and its mode set to 'mode', 142 * using the credentials of the process 'p'. 143 * 144 * If the node type is set to 'VDIR', then the parent parameter must point 145 * to the parent directory of the node being created. It may only be NULL 146 * while allocating the root node. 147 * 148 * If the node type is set to 'VBLK' or 'VCHR', then the rdev parameter 149 * specifies the device the node represents. 150 * 151 * If the node type is set to 'VLNK', then the parameter target specifies 152 * the file name of the target file for the symbolic link that is being 153 * created. 154 * 155 * Note that new nodes are retrieved from the available list if it has 156 * items or, if it is empty, from the node pool as long as there is enough 157 * space to create them. 158 * 159 * Returns zero on success or an appropriate error code on failure. 160 */ 161 int 162 tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *tmp, enum vtype type, 163 uid_t uid, gid_t gid, mode_t mode, struct tmpfs_node *parent, 164 char *target, dev_t rdev, struct tmpfs_node **node) 165 { 166 struct tmpfs_node *nnode; 167 vm_object_t obj; 168 169 /* If the root directory of the 'tmp' file system is not yet 170 * allocated, this must be the request to do it. */ 171 MPASS(IMPLIES(tmp->tm_root == NULL, parent == NULL && type == VDIR)); 172 KASSERT(tmp->tm_root == NULL || mp->mnt_writeopcount > 0, 173 ("creating node not under vn_start_write")); 174 175 MPASS(IFF(type == VLNK, target != NULL)); 176 MPASS(IFF(type == VBLK || type == VCHR, rdev != VNOVAL)); 177 178 if (tmp->tm_nodes_inuse >= tmp->tm_nodes_max) 179 return (ENOSPC); 180 if (tmpfs_pages_check_avail(tmp, 1) == 0) 181 return (ENOSPC); 182 183 if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) { 184 /* 185 * When a new tmpfs node is created for fully 186 * constructed mount point, there must be a parent 187 * node, which vnode is locked exclusively. As 188 * consequence, if the unmount is executing in 189 * parallel, vflush() cannot reclaim the parent vnode. 190 * Due to this, the check for MNTK_UNMOUNT flag is not 191 * racy: if we did not see MNTK_UNMOUNT flag, then tmp 192 * cannot be destroyed until node construction is 193 * finished and the parent vnode unlocked. 194 * 195 * Tmpfs does not need to instantiate new nodes during 196 * unmount. 197 */ 198 return (EBUSY); 199 } 200 201 nnode = (struct tmpfs_node *)uma_zalloc_arg( 202 tmp->tm_node_pool, tmp, M_WAITOK); 203 204 /* Generic initialization. */ 205 nnode->tn_type = type; 206 vfs_timestamp(&nnode->tn_atime); 207 nnode->tn_birthtime = nnode->tn_ctime = nnode->tn_mtime = 208 nnode->tn_atime; 209 nnode->tn_uid = uid; 210 nnode->tn_gid = gid; 211 nnode->tn_mode = mode; 212 nnode->tn_id = alloc_unr(tmp->tm_ino_unr); 213 214 /* Type-specific initialization. */ 215 switch (nnode->tn_type) { 216 case VBLK: 217 case VCHR: 218 nnode->tn_rdev = rdev; 219 break; 220 221 case VDIR: 222 RB_INIT(&nnode->tn_dir.tn_dirhead); 223 LIST_INIT(&nnode->tn_dir.tn_dupindex); 224 MPASS(parent != nnode); 225 MPASS(IMPLIES(parent == NULL, tmp->tm_root == NULL)); 226 nnode->tn_dir.tn_parent = (parent == NULL) ? nnode : parent; 227 nnode->tn_dir.tn_readdir_lastn = 0; 228 nnode->tn_dir.tn_readdir_lastp = NULL; 229 nnode->tn_links++; 230 TMPFS_NODE_LOCK(nnode->tn_dir.tn_parent); 231 nnode->tn_dir.tn_parent->tn_links++; 232 TMPFS_NODE_UNLOCK(nnode->tn_dir.tn_parent); 233 break; 234 235 case VFIFO: 236 /* FALLTHROUGH */ 237 case VSOCK: 238 break; 239 240 case VLNK: 241 MPASS(strlen(target) < MAXPATHLEN); 242 nnode->tn_size = strlen(target); 243 nnode->tn_link = malloc(nnode->tn_size, M_TMPFSNAME, 244 M_WAITOK); 245 memcpy(nnode->tn_link, target, nnode->tn_size); 246 break; 247 248 case VREG: 249 obj = nnode->tn_reg.tn_aobj = 250 vm_pager_allocate(OBJT_SWAP, NULL, 0, VM_PROT_DEFAULT, 0, 251 NULL /* XXXKIB - tmpfs needs swap reservation */); 252 VM_OBJECT_WLOCK(obj); 253 /* OBJ_TMPFS is set together with the setting of vp->v_object */ 254 vm_object_set_flag(obj, OBJ_NOSPLIT | OBJ_TMPFS_NODE); 255 vm_object_clear_flag(obj, OBJ_ONEMAPPING); 256 VM_OBJECT_WUNLOCK(obj); 257 break; 258 259 default: 260 panic("tmpfs_alloc_node: type %p %d", nnode, (int)nnode->tn_type); 261 } 262 263 TMPFS_LOCK(tmp); 264 LIST_INSERT_HEAD(&tmp->tm_nodes_used, nnode, tn_entries); 265 tmp->tm_nodes_inuse++; 266 TMPFS_UNLOCK(tmp); 267 268 *node = nnode; 269 return 0; 270 } 271 272 /* 273 * Destroys the node pointed to by node from the file system 'tmp'. 274 * If the node does not belong to the given mount point, the results are 275 * unpredicted. 276 * 277 * If the node references a directory; no entries are allowed because 278 * their removal could need a recursive algorithm, something forbidden in 279 * kernel space. Furthermore, there is not need to provide such 280 * functionality (recursive removal) because the only primitives offered 281 * to the user are the removal of empty directories and the deletion of 282 * individual files. 283 * 284 * Note that nodes are not really deleted; in fact, when a node has been 285 * allocated, it cannot be deleted during the whole life of the file 286 * system. Instead, they are moved to the available list and remain there 287 * until reused. 288 */ 289 void 290 tmpfs_free_node(struct tmpfs_mount *tmp, struct tmpfs_node *node) 291 { 292 vm_object_t uobj; 293 294 #ifdef INVARIANTS 295 TMPFS_NODE_LOCK(node); 296 MPASS(node->tn_vnode == NULL); 297 MPASS((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0); 298 TMPFS_NODE_UNLOCK(node); 299 #endif 300 301 TMPFS_LOCK(tmp); 302 LIST_REMOVE(node, tn_entries); 303 tmp->tm_nodes_inuse--; 304 TMPFS_UNLOCK(tmp); 305 306 switch (node->tn_type) { 307 case VNON: 308 /* Do not do anything. VNON is provided to let the 309 * allocation routine clean itself easily by avoiding 310 * duplicating code in it. */ 311 /* FALLTHROUGH */ 312 case VBLK: 313 /* FALLTHROUGH */ 314 case VCHR: 315 /* FALLTHROUGH */ 316 case VDIR: 317 /* FALLTHROUGH */ 318 case VFIFO: 319 /* FALLTHROUGH */ 320 case VSOCK: 321 break; 322 323 case VLNK: 324 free(node->tn_link, M_TMPFSNAME); 325 break; 326 327 case VREG: 328 uobj = node->tn_reg.tn_aobj; 329 if (uobj != NULL) { 330 atomic_subtract_long(&tmp->tm_pages_used, uobj->size); 331 KASSERT((uobj->flags & OBJ_TMPFS) == 0, 332 ("leaked OBJ_TMPFS node %p vm_obj %p", node, uobj)); 333 vm_object_deallocate(uobj); 334 } 335 break; 336 337 default: 338 panic("tmpfs_free_node: type %p %d", node, (int)node->tn_type); 339 } 340 341 free_unr(tmp->tm_ino_unr, node->tn_id); 342 uma_zfree(tmp->tm_node_pool, node); 343 } 344 345 static __inline uint32_t 346 tmpfs_dirent_hash(const char *name, u_int len) 347 { 348 uint32_t hash; 349 350 hash = fnv_32_buf(name, len, FNV1_32_INIT + len) & TMPFS_DIRCOOKIE_MASK; 351 #ifdef TMPFS_DEBUG_DIRCOOKIE_DUP 352 hash &= 0xf; 353 #endif 354 if (hash < TMPFS_DIRCOOKIE_MIN) 355 hash += TMPFS_DIRCOOKIE_MIN; 356 357 return (hash); 358 } 359 360 static __inline off_t 361 tmpfs_dirent_cookie(struct tmpfs_dirent *de) 362 { 363 if (de == NULL) 364 return (TMPFS_DIRCOOKIE_EOF); 365 366 MPASS(de->td_cookie >= TMPFS_DIRCOOKIE_MIN); 367 368 return (de->td_cookie); 369 } 370 371 static __inline boolean_t 372 tmpfs_dirent_dup(struct tmpfs_dirent *de) 373 { 374 return ((de->td_cookie & TMPFS_DIRCOOKIE_DUP) != 0); 375 } 376 377 static __inline boolean_t 378 tmpfs_dirent_duphead(struct tmpfs_dirent *de) 379 { 380 return ((de->td_cookie & TMPFS_DIRCOOKIE_DUPHEAD) != 0); 381 } 382 383 void 384 tmpfs_dirent_init(struct tmpfs_dirent *de, const char *name, u_int namelen) 385 { 386 de->td_hash = de->td_cookie = tmpfs_dirent_hash(name, namelen); 387 memcpy(de->ud.td_name, name, namelen); 388 de->td_namelen = namelen; 389 } 390 391 /* 392 * Allocates a new directory entry for the node node with a name of name. 393 * The new directory entry is returned in *de. 394 * 395 * The link count of node is increased by one to reflect the new object 396 * referencing it. 397 * 398 * Returns zero on success or an appropriate error code on failure. 399 */ 400 int 401 tmpfs_alloc_dirent(struct tmpfs_mount *tmp, struct tmpfs_node *node, 402 const char *name, u_int len, struct tmpfs_dirent **de) 403 { 404 struct tmpfs_dirent *nde; 405 406 nde = uma_zalloc(tmp->tm_dirent_pool, M_WAITOK); 407 nde->td_node = node; 408 if (name != NULL) { 409 nde->ud.td_name = malloc(len, M_TMPFSNAME, M_WAITOK); 410 tmpfs_dirent_init(nde, name, len); 411 } else 412 nde->td_namelen = 0; 413 if (node != NULL) 414 node->tn_links++; 415 416 *de = nde; 417 418 return 0; 419 } 420 421 /* 422 * Frees a directory entry. It is the caller's responsibility to destroy 423 * the node referenced by it if needed. 424 * 425 * The link count of node is decreased by one to reflect the removal of an 426 * object that referenced it. This only happens if 'node_exists' is true; 427 * otherwise the function will not access the node referred to by the 428 * directory entry, as it may already have been released from the outside. 429 */ 430 void 431 tmpfs_free_dirent(struct tmpfs_mount *tmp, struct tmpfs_dirent *de) 432 { 433 struct tmpfs_node *node; 434 435 node = de->td_node; 436 if (node != NULL) { 437 MPASS(node->tn_links > 0); 438 node->tn_links--; 439 } 440 if (!tmpfs_dirent_duphead(de) && de->ud.td_name != NULL) 441 free(de->ud.td_name, M_TMPFSNAME); 442 uma_zfree(tmp->tm_dirent_pool, de); 443 } 444 445 void 446 tmpfs_destroy_vobject(struct vnode *vp, vm_object_t obj) 447 { 448 449 ASSERT_VOP_ELOCKED(vp, "tmpfs_destroy_vobject"); 450 if (vp->v_type != VREG || obj == NULL) 451 return; 452 453 VM_OBJECT_WLOCK(obj); 454 VI_LOCK(vp); 455 vm_object_clear_flag(obj, OBJ_TMPFS); 456 obj->un_pager.swp.swp_tmpfs = NULL; 457 VI_UNLOCK(vp); 458 VM_OBJECT_WUNLOCK(obj); 459 } 460 461 /* 462 * Need to clear v_object for insmntque failure. 463 */ 464 static void 465 tmpfs_insmntque_dtr(struct vnode *vp, void *dtr_arg) 466 { 467 468 tmpfs_destroy_vobject(vp, vp->v_object); 469 vp->v_object = NULL; 470 vp->v_data = NULL; 471 vp->v_op = &dead_vnodeops; 472 vgone(vp); 473 vput(vp); 474 } 475 476 /* 477 * Allocates a new vnode for the node node or returns a new reference to 478 * an existing one if the node had already a vnode referencing it. The 479 * resulting locked vnode is returned in *vpp. 480 * 481 * Returns zero on success or an appropriate error code on failure. 482 */ 483 int 484 tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, int lkflag, 485 struct vnode **vpp) 486 { 487 struct vnode *vp; 488 vm_object_t object; 489 int error; 490 491 error = 0; 492 loop: 493 TMPFS_NODE_LOCK(node); 494 loop1: 495 if ((vp = node->tn_vnode) != NULL) { 496 MPASS((node->tn_vpstate & TMPFS_VNODE_DOOMED) == 0); 497 VI_LOCK(vp); 498 if ((node->tn_type == VDIR && node->tn_dir.tn_parent == NULL) || 499 ((vp->v_iflag & VI_DOOMED) != 0 && 500 (lkflag & LK_NOWAIT) != 0)) { 501 VI_UNLOCK(vp); 502 TMPFS_NODE_UNLOCK(node); 503 error = ENOENT; 504 vp = NULL; 505 goto out; 506 } 507 if ((vp->v_iflag & VI_DOOMED) != 0) { 508 VI_UNLOCK(vp); 509 node->tn_vpstate |= TMPFS_VNODE_WRECLAIM; 510 while ((node->tn_vpstate & TMPFS_VNODE_WRECLAIM) != 0) { 511 msleep(&node->tn_vnode, TMPFS_NODE_MTX(node), 512 0, "tmpfsE", 0); 513 } 514 goto loop1; 515 } 516 TMPFS_NODE_UNLOCK(node); 517 error = vget(vp, lkflag | LK_INTERLOCK, curthread); 518 if (error == ENOENT) 519 goto loop; 520 if (error != 0) { 521 vp = NULL; 522 goto out; 523 } 524 525 /* 526 * Make sure the vnode is still there after 527 * getting the interlock to avoid racing a free. 528 */ 529 if (node->tn_vnode == NULL || node->tn_vnode != vp) { 530 vput(vp); 531 goto loop; 532 } 533 534 goto out; 535 } 536 537 if ((node->tn_vpstate & TMPFS_VNODE_DOOMED) || 538 (node->tn_type == VDIR && node->tn_dir.tn_parent == NULL)) { 539 TMPFS_NODE_UNLOCK(node); 540 error = ENOENT; 541 vp = NULL; 542 goto out; 543 } 544 545 /* 546 * otherwise lock the vp list while we call getnewvnode 547 * since that can block. 548 */ 549 if (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) { 550 node->tn_vpstate |= TMPFS_VNODE_WANT; 551 error = msleep((caddr_t) &node->tn_vpstate, 552 TMPFS_NODE_MTX(node), PDROP | PCATCH, 553 "tmpfs_alloc_vp", 0); 554 if (error) 555 return error; 556 557 goto loop; 558 } else 559 node->tn_vpstate |= TMPFS_VNODE_ALLOCATING; 560 561 TMPFS_NODE_UNLOCK(node); 562 563 /* Get a new vnode and associate it with our node. */ 564 error = getnewvnode("tmpfs", mp, &tmpfs_vnodeop_entries, &vp); 565 if (error != 0) 566 goto unlock; 567 MPASS(vp != NULL); 568 569 /* lkflag is ignored, the lock is exclusive */ 570 (void) vn_lock(vp, lkflag | LK_RETRY); 571 572 vp->v_data = node; 573 vp->v_type = node->tn_type; 574 575 /* Type-specific initialization. */ 576 switch (node->tn_type) { 577 case VBLK: 578 /* FALLTHROUGH */ 579 case VCHR: 580 /* FALLTHROUGH */ 581 case VLNK: 582 /* FALLTHROUGH */ 583 case VSOCK: 584 break; 585 case VFIFO: 586 vp->v_op = &tmpfs_fifoop_entries; 587 break; 588 case VREG: 589 object = node->tn_reg.tn_aobj; 590 VM_OBJECT_WLOCK(object); 591 VI_LOCK(vp); 592 KASSERT(vp->v_object == NULL, ("Not NULL v_object in tmpfs")); 593 vp->v_object = object; 594 object->un_pager.swp.swp_tmpfs = vp; 595 vm_object_set_flag(object, OBJ_TMPFS); 596 VI_UNLOCK(vp); 597 VM_OBJECT_WUNLOCK(object); 598 break; 599 case VDIR: 600 MPASS(node->tn_dir.tn_parent != NULL); 601 if (node->tn_dir.tn_parent == node) 602 vp->v_vflag |= VV_ROOT; 603 break; 604 605 default: 606 panic("tmpfs_alloc_vp: type %p %d", node, (int)node->tn_type); 607 } 608 if (vp->v_type != VFIFO) 609 VN_LOCK_ASHARE(vp); 610 611 error = insmntque1(vp, mp, tmpfs_insmntque_dtr, NULL); 612 if (error) 613 vp = NULL; 614 615 unlock: 616 TMPFS_NODE_LOCK(node); 617 618 MPASS(node->tn_vpstate & TMPFS_VNODE_ALLOCATING); 619 node->tn_vpstate &= ~TMPFS_VNODE_ALLOCATING; 620 node->tn_vnode = vp; 621 622 if (node->tn_vpstate & TMPFS_VNODE_WANT) { 623 node->tn_vpstate &= ~TMPFS_VNODE_WANT; 624 TMPFS_NODE_UNLOCK(node); 625 wakeup((caddr_t) &node->tn_vpstate); 626 } else 627 TMPFS_NODE_UNLOCK(node); 628 629 out: 630 *vpp = vp; 631 632 #ifdef INVARIANTS 633 if (error == 0) { 634 MPASS(*vpp != NULL && VOP_ISLOCKED(*vpp)); 635 TMPFS_NODE_LOCK(node); 636 MPASS(*vpp == node->tn_vnode); 637 TMPFS_NODE_UNLOCK(node); 638 } 639 #endif 640 641 return error; 642 } 643 644 /* 645 * Destroys the association between the vnode vp and the node it 646 * references. 647 */ 648 void 649 tmpfs_free_vp(struct vnode *vp) 650 { 651 struct tmpfs_node *node; 652 653 node = VP_TO_TMPFS_NODE(vp); 654 655 TMPFS_NODE_ASSERT_LOCKED(node); 656 node->tn_vnode = NULL; 657 if ((node->tn_vpstate & TMPFS_VNODE_WRECLAIM) != 0) 658 wakeup(&node->tn_vnode); 659 node->tn_vpstate &= ~TMPFS_VNODE_WRECLAIM; 660 vp->v_data = NULL; 661 } 662 663 /* 664 * Allocates a new file of type 'type' and adds it to the parent directory 665 * 'dvp'; this addition is done using the component name given in 'cnp'. 666 * The ownership of the new file is automatically assigned based on the 667 * credentials of the caller (through 'cnp'), the group is set based on 668 * the parent directory and the mode is determined from the 'vap' argument. 669 * If successful, *vpp holds a vnode to the newly created file and zero 670 * is returned. Otherwise *vpp is NULL and the function returns an 671 * appropriate error code. 672 */ 673 int 674 tmpfs_alloc_file(struct vnode *dvp, struct vnode **vpp, struct vattr *vap, 675 struct componentname *cnp, char *target) 676 { 677 int error; 678 struct tmpfs_dirent *de; 679 struct tmpfs_mount *tmp; 680 struct tmpfs_node *dnode; 681 struct tmpfs_node *node; 682 struct tmpfs_node *parent; 683 684 MPASS(VOP_ISLOCKED(dvp)); 685 MPASS(cnp->cn_flags & HASBUF); 686 687 tmp = VFS_TO_TMPFS(dvp->v_mount); 688 dnode = VP_TO_TMPFS_DIR(dvp); 689 *vpp = NULL; 690 691 /* If the entry we are creating is a directory, we cannot overflow 692 * the number of links of its parent, because it will get a new 693 * link. */ 694 if (vap->va_type == VDIR) { 695 /* Ensure that we do not overflow the maximum number of links 696 * imposed by the system. */ 697 MPASS(dnode->tn_links <= LINK_MAX); 698 if (dnode->tn_links == LINK_MAX) { 699 return (EMLINK); 700 } 701 702 parent = dnode; 703 MPASS(parent != NULL); 704 } else 705 parent = NULL; 706 707 /* Allocate a node that represents the new file. */ 708 error = tmpfs_alloc_node(dvp->v_mount, tmp, vap->va_type, 709 cnp->cn_cred->cr_uid, 710 dnode->tn_gid, vap->va_mode, parent, target, vap->va_rdev, &node); 711 if (error != 0) 712 return (error); 713 714 /* Allocate a directory entry that points to the new file. */ 715 error = tmpfs_alloc_dirent(tmp, node, cnp->cn_nameptr, cnp->cn_namelen, 716 &de); 717 if (error != 0) { 718 tmpfs_free_node(tmp, node); 719 return (error); 720 } 721 722 /* Allocate a vnode for the new file. */ 723 error = tmpfs_alloc_vp(dvp->v_mount, node, LK_EXCLUSIVE, vpp); 724 if (error != 0) { 725 tmpfs_free_dirent(tmp, de); 726 tmpfs_free_node(tmp, node); 727 return (error); 728 } 729 730 /* Now that all required items are allocated, we can proceed to 731 * insert the new node into the directory, an operation that 732 * cannot fail. */ 733 if (cnp->cn_flags & ISWHITEOUT) 734 tmpfs_dir_whiteout_remove(dvp, cnp); 735 tmpfs_dir_attach(dvp, de); 736 return (0); 737 } 738 739 static struct tmpfs_dirent * 740 tmpfs_dir_first(struct tmpfs_node *dnode, struct tmpfs_dir_cursor *dc) 741 { 742 struct tmpfs_dirent *de; 743 744 de = RB_MIN(tmpfs_dir, &dnode->tn_dir.tn_dirhead); 745 dc->tdc_tree = de; 746 if (de != NULL && tmpfs_dirent_duphead(de)) 747 de = LIST_FIRST(&de->ud.td_duphead); 748 dc->tdc_current = de; 749 750 return (dc->tdc_current); 751 } 752 753 static struct tmpfs_dirent * 754 tmpfs_dir_next(struct tmpfs_node *dnode, struct tmpfs_dir_cursor *dc) 755 { 756 struct tmpfs_dirent *de; 757 758 MPASS(dc->tdc_tree != NULL); 759 if (tmpfs_dirent_dup(dc->tdc_current)) { 760 dc->tdc_current = LIST_NEXT(dc->tdc_current, uh.td_dup.entries); 761 if (dc->tdc_current != NULL) 762 return (dc->tdc_current); 763 } 764 dc->tdc_tree = dc->tdc_current = RB_NEXT(tmpfs_dir, 765 &dnode->tn_dir.tn_dirhead, dc->tdc_tree); 766 if ((de = dc->tdc_current) != NULL && tmpfs_dirent_duphead(de)) { 767 dc->tdc_current = LIST_FIRST(&de->ud.td_duphead); 768 MPASS(dc->tdc_current != NULL); 769 } 770 771 return (dc->tdc_current); 772 } 773 774 /* Lookup directory entry in RB-Tree. Function may return duphead entry. */ 775 static struct tmpfs_dirent * 776 tmpfs_dir_xlookup_hash(struct tmpfs_node *dnode, uint32_t hash) 777 { 778 struct tmpfs_dirent *de, dekey; 779 780 dekey.td_hash = hash; 781 de = RB_FIND(tmpfs_dir, &dnode->tn_dir.tn_dirhead, &dekey); 782 return (de); 783 } 784 785 /* Lookup directory entry by cookie, initialize directory cursor accordingly. */ 786 static struct tmpfs_dirent * 787 tmpfs_dir_lookup_cookie(struct tmpfs_node *node, off_t cookie, 788 struct tmpfs_dir_cursor *dc) 789 { 790 struct tmpfs_dir *dirhead = &node->tn_dir.tn_dirhead; 791 struct tmpfs_dirent *de, dekey; 792 793 MPASS(cookie >= TMPFS_DIRCOOKIE_MIN); 794 795 if (cookie == node->tn_dir.tn_readdir_lastn && 796 (de = node->tn_dir.tn_readdir_lastp) != NULL) { 797 /* Protect against possible race, tn_readdir_last[pn] 798 * may be updated with only shared vnode lock held. */ 799 if (cookie == tmpfs_dirent_cookie(de)) 800 goto out; 801 } 802 803 if ((cookie & TMPFS_DIRCOOKIE_DUP) != 0) { 804 LIST_FOREACH(de, &node->tn_dir.tn_dupindex, 805 uh.td_dup.index_entries) { 806 MPASS(tmpfs_dirent_dup(de)); 807 if (de->td_cookie == cookie) 808 goto out; 809 /* dupindex list is sorted. */ 810 if (de->td_cookie < cookie) { 811 de = NULL; 812 goto out; 813 } 814 } 815 MPASS(de == NULL); 816 goto out; 817 } 818 819 if ((cookie & TMPFS_DIRCOOKIE_MASK) != cookie) { 820 de = NULL; 821 } else { 822 dekey.td_hash = cookie; 823 /* Recover if direntry for cookie was removed */ 824 de = RB_NFIND(tmpfs_dir, dirhead, &dekey); 825 } 826 dc->tdc_tree = de; 827 dc->tdc_current = de; 828 if (de != NULL && tmpfs_dirent_duphead(de)) { 829 dc->tdc_current = LIST_FIRST(&de->ud.td_duphead); 830 MPASS(dc->tdc_current != NULL); 831 } 832 return (dc->tdc_current); 833 834 out: 835 dc->tdc_tree = de; 836 dc->tdc_current = de; 837 if (de != NULL && tmpfs_dirent_dup(de)) 838 dc->tdc_tree = tmpfs_dir_xlookup_hash(node, 839 de->td_hash); 840 return (dc->tdc_current); 841 } 842 843 /* 844 * Looks for a directory entry in the directory represented by node. 845 * 'cnp' describes the name of the entry to look for. Note that the . 846 * and .. components are not allowed as they do not physically exist 847 * within directories. 848 * 849 * Returns a pointer to the entry when found, otherwise NULL. 850 */ 851 struct tmpfs_dirent * 852 tmpfs_dir_lookup(struct tmpfs_node *node, struct tmpfs_node *f, 853 struct componentname *cnp) 854 { 855 struct tmpfs_dir_duphead *duphead; 856 struct tmpfs_dirent *de; 857 uint32_t hash; 858 859 MPASS(IMPLIES(cnp->cn_namelen == 1, cnp->cn_nameptr[0] != '.')); 860 MPASS(IMPLIES(cnp->cn_namelen == 2, !(cnp->cn_nameptr[0] == '.' && 861 cnp->cn_nameptr[1] == '.'))); 862 TMPFS_VALIDATE_DIR(node); 863 864 hash = tmpfs_dirent_hash(cnp->cn_nameptr, cnp->cn_namelen); 865 de = tmpfs_dir_xlookup_hash(node, hash); 866 if (de != NULL && tmpfs_dirent_duphead(de)) { 867 duphead = &de->ud.td_duphead; 868 LIST_FOREACH(de, duphead, uh.td_dup.entries) { 869 if (TMPFS_DIRENT_MATCHES(de, cnp->cn_nameptr, 870 cnp->cn_namelen)) 871 break; 872 } 873 } else if (de != NULL) { 874 if (!TMPFS_DIRENT_MATCHES(de, cnp->cn_nameptr, 875 cnp->cn_namelen)) 876 de = NULL; 877 } 878 if (de != NULL && f != NULL && de->td_node != f) 879 de = NULL; 880 881 return (de); 882 } 883 884 /* 885 * Attach duplicate-cookie directory entry nde to dnode and insert to dupindex 886 * list, allocate new cookie value. 887 */ 888 static void 889 tmpfs_dir_attach_dup(struct tmpfs_node *dnode, 890 struct tmpfs_dir_duphead *duphead, struct tmpfs_dirent *nde) 891 { 892 struct tmpfs_dir_duphead *dupindex; 893 struct tmpfs_dirent *de, *pde; 894 895 dupindex = &dnode->tn_dir.tn_dupindex; 896 de = LIST_FIRST(dupindex); 897 if (de == NULL || de->td_cookie < TMPFS_DIRCOOKIE_DUP_MAX) { 898 if (de == NULL) 899 nde->td_cookie = TMPFS_DIRCOOKIE_DUP_MIN; 900 else 901 nde->td_cookie = de->td_cookie + 1; 902 MPASS(tmpfs_dirent_dup(nde)); 903 LIST_INSERT_HEAD(dupindex, nde, uh.td_dup.index_entries); 904 LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries); 905 return; 906 } 907 908 /* 909 * Cookie numbers are near exhaustion. Scan dupindex list for unused 910 * numbers. dupindex list is sorted in descending order. Keep it so 911 * after inserting nde. 912 */ 913 while (1) { 914 pde = de; 915 de = LIST_NEXT(de, uh.td_dup.index_entries); 916 if (de == NULL && pde->td_cookie != TMPFS_DIRCOOKIE_DUP_MIN) { 917 /* 918 * Last element of the index doesn't have minimal cookie 919 * value, use it. 920 */ 921 nde->td_cookie = TMPFS_DIRCOOKIE_DUP_MIN; 922 LIST_INSERT_AFTER(pde, nde, uh.td_dup.index_entries); 923 LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries); 924 return; 925 } else if (de == NULL) { 926 /* 927 * We are so lucky have 2^30 hash duplicates in single 928 * directory :) Return largest possible cookie value. 929 * It should be fine except possible issues with 930 * VOP_READDIR restart. 931 */ 932 nde->td_cookie = TMPFS_DIRCOOKIE_DUP_MAX; 933 LIST_INSERT_HEAD(dupindex, nde, 934 uh.td_dup.index_entries); 935 LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries); 936 return; 937 } 938 if (de->td_cookie + 1 == pde->td_cookie || 939 de->td_cookie >= TMPFS_DIRCOOKIE_DUP_MAX) 940 continue; /* No hole or invalid cookie. */ 941 nde->td_cookie = de->td_cookie + 1; 942 MPASS(tmpfs_dirent_dup(nde)); 943 MPASS(pde->td_cookie > nde->td_cookie); 944 MPASS(nde->td_cookie > de->td_cookie); 945 LIST_INSERT_BEFORE(de, nde, uh.td_dup.index_entries); 946 LIST_INSERT_HEAD(duphead, nde, uh.td_dup.entries); 947 return; 948 } 949 } 950 951 /* 952 * Attaches the directory entry de to the directory represented by vp. 953 * Note that this does not change the link count of the node pointed by 954 * the directory entry, as this is done by tmpfs_alloc_dirent. 955 */ 956 void 957 tmpfs_dir_attach(struct vnode *vp, struct tmpfs_dirent *de) 958 { 959 struct tmpfs_node *dnode; 960 struct tmpfs_dirent *xde, *nde; 961 962 ASSERT_VOP_ELOCKED(vp, __func__); 963 MPASS(de->td_namelen > 0); 964 MPASS(de->td_hash >= TMPFS_DIRCOOKIE_MIN); 965 MPASS(de->td_cookie == de->td_hash); 966 967 dnode = VP_TO_TMPFS_DIR(vp); 968 dnode->tn_dir.tn_readdir_lastn = 0; 969 dnode->tn_dir.tn_readdir_lastp = NULL; 970 971 MPASS(!tmpfs_dirent_dup(de)); 972 xde = RB_INSERT(tmpfs_dir, &dnode->tn_dir.tn_dirhead, de); 973 if (xde != NULL && tmpfs_dirent_duphead(xde)) 974 tmpfs_dir_attach_dup(dnode, &xde->ud.td_duphead, de); 975 else if (xde != NULL) { 976 /* 977 * Allocate new duphead. Swap xde with duphead to avoid 978 * adding/removing elements with the same hash. 979 */ 980 MPASS(!tmpfs_dirent_dup(xde)); 981 tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), NULL, NULL, 0, 982 &nde); 983 /* *nde = *xde; XXX gcc 4.2.1 may generate invalid code. */ 984 memcpy(nde, xde, sizeof(*xde)); 985 xde->td_cookie |= TMPFS_DIRCOOKIE_DUPHEAD; 986 LIST_INIT(&xde->ud.td_duphead); 987 xde->td_namelen = 0; 988 xde->td_node = NULL; 989 tmpfs_dir_attach_dup(dnode, &xde->ud.td_duphead, nde); 990 tmpfs_dir_attach_dup(dnode, &xde->ud.td_duphead, de); 991 } 992 dnode->tn_size += sizeof(struct tmpfs_dirent); 993 dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \ 994 TMPFS_NODE_MODIFIED; 995 tmpfs_update(vp); 996 } 997 998 /* 999 * Detaches the directory entry de from the directory represented by vp. 1000 * Note that this does not change the link count of the node pointed by 1001 * the directory entry, as this is done by tmpfs_free_dirent. 1002 */ 1003 void 1004 tmpfs_dir_detach(struct vnode *vp, struct tmpfs_dirent *de) 1005 { 1006 struct tmpfs_mount *tmp; 1007 struct tmpfs_dir *head; 1008 struct tmpfs_node *dnode; 1009 struct tmpfs_dirent *xde; 1010 1011 ASSERT_VOP_ELOCKED(vp, __func__); 1012 1013 dnode = VP_TO_TMPFS_DIR(vp); 1014 head = &dnode->tn_dir.tn_dirhead; 1015 dnode->tn_dir.tn_readdir_lastn = 0; 1016 dnode->tn_dir.tn_readdir_lastp = NULL; 1017 1018 if (tmpfs_dirent_dup(de)) { 1019 /* Remove duphead if de was last entry. */ 1020 if (LIST_NEXT(de, uh.td_dup.entries) == NULL) { 1021 xde = tmpfs_dir_xlookup_hash(dnode, de->td_hash); 1022 MPASS(tmpfs_dirent_duphead(xde)); 1023 } else 1024 xde = NULL; 1025 LIST_REMOVE(de, uh.td_dup.entries); 1026 LIST_REMOVE(de, uh.td_dup.index_entries); 1027 if (xde != NULL) { 1028 if (LIST_EMPTY(&xde->ud.td_duphead)) { 1029 RB_REMOVE(tmpfs_dir, head, xde); 1030 tmp = VFS_TO_TMPFS(vp->v_mount); 1031 MPASS(xde->td_node == NULL); 1032 tmpfs_free_dirent(tmp, xde); 1033 } 1034 } 1035 de->td_cookie = de->td_hash; 1036 } else 1037 RB_REMOVE(tmpfs_dir, head, de); 1038 1039 dnode->tn_size -= sizeof(struct tmpfs_dirent); 1040 dnode->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \ 1041 TMPFS_NODE_MODIFIED; 1042 tmpfs_update(vp); 1043 } 1044 1045 void 1046 tmpfs_dir_destroy(struct tmpfs_mount *tmp, struct tmpfs_node *dnode) 1047 { 1048 struct tmpfs_dirent *de, *dde, *nde; 1049 1050 RB_FOREACH_SAFE(de, tmpfs_dir, &dnode->tn_dir.tn_dirhead, nde) { 1051 RB_REMOVE(tmpfs_dir, &dnode->tn_dir.tn_dirhead, de); 1052 /* Node may already be destroyed. */ 1053 de->td_node = NULL; 1054 if (tmpfs_dirent_duphead(de)) { 1055 while ((dde = LIST_FIRST(&de->ud.td_duphead)) != NULL) { 1056 LIST_REMOVE(dde, uh.td_dup.entries); 1057 dde->td_node = NULL; 1058 tmpfs_free_dirent(tmp, dde); 1059 } 1060 } 1061 tmpfs_free_dirent(tmp, de); 1062 } 1063 } 1064 1065 /* 1066 * Helper function for tmpfs_readdir. Creates a '.' entry for the given 1067 * directory and returns it in the uio space. The function returns 0 1068 * on success, -1 if there was not enough space in the uio structure to 1069 * hold the directory entry or an appropriate error code if another 1070 * error happens. 1071 */ 1072 static int 1073 tmpfs_dir_getdotdent(struct tmpfs_node *node, struct uio *uio) 1074 { 1075 int error; 1076 struct dirent dent; 1077 1078 TMPFS_VALIDATE_DIR(node); 1079 MPASS(uio->uio_offset == TMPFS_DIRCOOKIE_DOT); 1080 1081 dent.d_fileno = node->tn_id; 1082 dent.d_type = DT_DIR; 1083 dent.d_namlen = 1; 1084 dent.d_name[0] = '.'; 1085 dent.d_name[1] = '\0'; 1086 dent.d_reclen = GENERIC_DIRSIZ(&dent); 1087 1088 if (dent.d_reclen > uio->uio_resid) 1089 error = EJUSTRETURN; 1090 else 1091 error = uiomove(&dent, dent.d_reclen, uio); 1092 1093 tmpfs_set_status(node, TMPFS_NODE_ACCESSED); 1094 1095 return (error); 1096 } 1097 1098 /* 1099 * Helper function for tmpfs_readdir. Creates a '..' entry for the given 1100 * directory and returns it in the uio space. The function returns 0 1101 * on success, -1 if there was not enough space in the uio structure to 1102 * hold the directory entry or an appropriate error code if another 1103 * error happens. 1104 */ 1105 static int 1106 tmpfs_dir_getdotdotdent(struct tmpfs_node *node, struct uio *uio) 1107 { 1108 int error; 1109 struct dirent dent; 1110 1111 TMPFS_VALIDATE_DIR(node); 1112 MPASS(uio->uio_offset == TMPFS_DIRCOOKIE_DOTDOT); 1113 1114 /* 1115 * Return ENOENT if the current node is already removed. 1116 */ 1117 TMPFS_ASSERT_LOCKED(node); 1118 if (node->tn_dir.tn_parent == NULL) { 1119 return (ENOENT); 1120 } 1121 1122 TMPFS_NODE_LOCK(node->tn_dir.tn_parent); 1123 dent.d_fileno = node->tn_dir.tn_parent->tn_id; 1124 TMPFS_NODE_UNLOCK(node->tn_dir.tn_parent); 1125 1126 dent.d_type = DT_DIR; 1127 dent.d_namlen = 2; 1128 dent.d_name[0] = '.'; 1129 dent.d_name[1] = '.'; 1130 dent.d_name[2] = '\0'; 1131 dent.d_reclen = GENERIC_DIRSIZ(&dent); 1132 1133 if (dent.d_reclen > uio->uio_resid) 1134 error = EJUSTRETURN; 1135 else 1136 error = uiomove(&dent, dent.d_reclen, uio); 1137 1138 tmpfs_set_status(node, TMPFS_NODE_ACCESSED); 1139 1140 return (error); 1141 } 1142 1143 /* 1144 * Helper function for tmpfs_readdir. Returns as much directory entries 1145 * as can fit in the uio space. The read starts at uio->uio_offset. 1146 * The function returns 0 on success, -1 if there was not enough space 1147 * in the uio structure to hold the directory entry or an appropriate 1148 * error code if another error happens. 1149 */ 1150 int 1151 tmpfs_dir_getdents(struct tmpfs_node *node, struct uio *uio, int maxcookies, 1152 u_long *cookies, int *ncookies) 1153 { 1154 struct tmpfs_dir_cursor dc; 1155 struct tmpfs_dirent *de; 1156 off_t off; 1157 int error; 1158 1159 TMPFS_VALIDATE_DIR(node); 1160 1161 off = 0; 1162 1163 /* 1164 * Lookup the node from the current offset. The starting offset of 1165 * 0 will lookup both '.' and '..', and then the first real entry, 1166 * or EOF if there are none. Then find all entries for the dir that 1167 * fit into the buffer. Once no more entries are found (de == NULL), 1168 * the offset is set to TMPFS_DIRCOOKIE_EOF, which will cause the next 1169 * call to return 0. 1170 */ 1171 switch (uio->uio_offset) { 1172 case TMPFS_DIRCOOKIE_DOT: 1173 error = tmpfs_dir_getdotdent(node, uio); 1174 if (error != 0) 1175 return (error); 1176 uio->uio_offset = TMPFS_DIRCOOKIE_DOTDOT; 1177 if (cookies != NULL) 1178 cookies[(*ncookies)++] = off = uio->uio_offset; 1179 /* FALLTHROUGH */ 1180 case TMPFS_DIRCOOKIE_DOTDOT: 1181 error = tmpfs_dir_getdotdotdent(node, uio); 1182 if (error != 0) 1183 return (error); 1184 de = tmpfs_dir_first(node, &dc); 1185 uio->uio_offset = tmpfs_dirent_cookie(de); 1186 if (cookies != NULL) 1187 cookies[(*ncookies)++] = off = uio->uio_offset; 1188 /* EOF. */ 1189 if (de == NULL) 1190 return (0); 1191 break; 1192 case TMPFS_DIRCOOKIE_EOF: 1193 return (0); 1194 default: 1195 de = tmpfs_dir_lookup_cookie(node, uio->uio_offset, &dc); 1196 if (de == NULL) 1197 return (EINVAL); 1198 if (cookies != NULL) 1199 off = tmpfs_dirent_cookie(de); 1200 } 1201 1202 /* Read as much entries as possible; i.e., until we reach the end of 1203 * the directory or we exhaust uio space. */ 1204 do { 1205 struct dirent d; 1206 1207 /* Create a dirent structure representing the current 1208 * tmpfs_node and fill it. */ 1209 if (de->td_node == NULL) { 1210 d.d_fileno = 1; 1211 d.d_type = DT_WHT; 1212 } else { 1213 d.d_fileno = de->td_node->tn_id; 1214 switch (de->td_node->tn_type) { 1215 case VBLK: 1216 d.d_type = DT_BLK; 1217 break; 1218 1219 case VCHR: 1220 d.d_type = DT_CHR; 1221 break; 1222 1223 case VDIR: 1224 d.d_type = DT_DIR; 1225 break; 1226 1227 case VFIFO: 1228 d.d_type = DT_FIFO; 1229 break; 1230 1231 case VLNK: 1232 d.d_type = DT_LNK; 1233 break; 1234 1235 case VREG: 1236 d.d_type = DT_REG; 1237 break; 1238 1239 case VSOCK: 1240 d.d_type = DT_SOCK; 1241 break; 1242 1243 default: 1244 panic("tmpfs_dir_getdents: type %p %d", 1245 de->td_node, (int)de->td_node->tn_type); 1246 } 1247 } 1248 d.d_namlen = de->td_namelen; 1249 MPASS(de->td_namelen < sizeof(d.d_name)); 1250 (void)memcpy(d.d_name, de->ud.td_name, de->td_namelen); 1251 d.d_name[de->td_namelen] = '\0'; 1252 d.d_reclen = GENERIC_DIRSIZ(&d); 1253 1254 /* Stop reading if the directory entry we are treating is 1255 * bigger than the amount of data that can be returned. */ 1256 if (d.d_reclen > uio->uio_resid) { 1257 error = EJUSTRETURN; 1258 break; 1259 } 1260 1261 /* Copy the new dirent structure into the output buffer and 1262 * advance pointers. */ 1263 error = uiomove(&d, d.d_reclen, uio); 1264 if (error == 0) { 1265 de = tmpfs_dir_next(node, &dc); 1266 if (cookies != NULL) { 1267 off = tmpfs_dirent_cookie(de); 1268 MPASS(*ncookies < maxcookies); 1269 cookies[(*ncookies)++] = off; 1270 } 1271 } 1272 } while (error == 0 && uio->uio_resid > 0 && de != NULL); 1273 1274 /* Skip setting off when using cookies as it is already done above. */ 1275 if (cookies == NULL) 1276 off = tmpfs_dirent_cookie(de); 1277 1278 /* Update the offset and cache. */ 1279 uio->uio_offset = off; 1280 node->tn_dir.tn_readdir_lastn = off; 1281 node->tn_dir.tn_readdir_lastp = de; 1282 1283 tmpfs_set_status(node, TMPFS_NODE_ACCESSED); 1284 return error; 1285 } 1286 1287 int 1288 tmpfs_dir_whiteout_add(struct vnode *dvp, struct componentname *cnp) 1289 { 1290 struct tmpfs_dirent *de; 1291 int error; 1292 1293 error = tmpfs_alloc_dirent(VFS_TO_TMPFS(dvp->v_mount), NULL, 1294 cnp->cn_nameptr, cnp->cn_namelen, &de); 1295 if (error != 0) 1296 return (error); 1297 tmpfs_dir_attach(dvp, de); 1298 return (0); 1299 } 1300 1301 void 1302 tmpfs_dir_whiteout_remove(struct vnode *dvp, struct componentname *cnp) 1303 { 1304 struct tmpfs_dirent *de; 1305 1306 de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(dvp), NULL, cnp); 1307 MPASS(de != NULL && de->td_node == NULL); 1308 tmpfs_dir_detach(dvp, de); 1309 tmpfs_free_dirent(VFS_TO_TMPFS(dvp->v_mount), de); 1310 } 1311 1312 /* 1313 * Resizes the aobj associated with the regular file pointed to by 'vp' to the 1314 * size 'newsize'. 'vp' must point to a vnode that represents a regular file. 1315 * 'newsize' must be positive. 1316 * 1317 * Returns zero on success or an appropriate error code on failure. 1318 */ 1319 int 1320 tmpfs_reg_resize(struct vnode *vp, off_t newsize, boolean_t ignerr) 1321 { 1322 struct tmpfs_mount *tmp; 1323 struct tmpfs_node *node; 1324 vm_object_t uobj; 1325 vm_page_t m; 1326 vm_pindex_t idx, newpages, oldpages; 1327 off_t oldsize; 1328 int base, rv; 1329 1330 MPASS(vp->v_type == VREG); 1331 MPASS(newsize >= 0); 1332 1333 node = VP_TO_TMPFS_NODE(vp); 1334 uobj = node->tn_reg.tn_aobj; 1335 tmp = VFS_TO_TMPFS(vp->v_mount); 1336 1337 /* 1338 * Convert the old and new sizes to the number of pages needed to 1339 * store them. It may happen that we do not need to do anything 1340 * because the last allocated page can accommodate the change on 1341 * its own. 1342 */ 1343 oldsize = node->tn_size; 1344 oldpages = OFF_TO_IDX(oldsize + PAGE_MASK); 1345 MPASS(oldpages == uobj->size); 1346 newpages = OFF_TO_IDX(newsize + PAGE_MASK); 1347 if (newpages > oldpages && 1348 tmpfs_pages_check_avail(tmp, newpages - oldpages) == 0) 1349 return (ENOSPC); 1350 1351 VM_OBJECT_WLOCK(uobj); 1352 if (newsize < oldsize) { 1353 /* 1354 * Zero the truncated part of the last page. 1355 */ 1356 base = newsize & PAGE_MASK; 1357 if (base != 0) { 1358 idx = OFF_TO_IDX(newsize); 1359 retry: 1360 m = vm_page_lookup(uobj, idx); 1361 if (m != NULL) { 1362 if (vm_page_sleep_if_busy(m, "tmfssz")) 1363 goto retry; 1364 MPASS(m->valid == VM_PAGE_BITS_ALL); 1365 } else if (vm_pager_has_page(uobj, idx, NULL, NULL)) { 1366 m = vm_page_alloc(uobj, idx, VM_ALLOC_NORMAL); 1367 if (m == NULL) { 1368 VM_OBJECT_WUNLOCK(uobj); 1369 VM_WAIT; 1370 VM_OBJECT_WLOCK(uobj); 1371 goto retry; 1372 } 1373 rv = vm_pager_get_pages(uobj, &m, 1, NULL, 1374 NULL); 1375 vm_page_lock(m); 1376 if (rv == VM_PAGER_OK) { 1377 /* 1378 * Since the page was not resident, 1379 * and therefore not recently 1380 * accessed, immediately enqueue it 1381 * for asynchronous laundering. The 1382 * current operation is not regarded 1383 * as an access. 1384 */ 1385 vm_page_launder(m); 1386 vm_page_unlock(m); 1387 vm_page_xunbusy(m); 1388 } else { 1389 vm_page_free(m); 1390 vm_page_unlock(m); 1391 if (ignerr) 1392 m = NULL; 1393 else { 1394 VM_OBJECT_WUNLOCK(uobj); 1395 return (EIO); 1396 } 1397 } 1398 } 1399 if (m != NULL) { 1400 pmap_zero_page_area(m, base, PAGE_SIZE - base); 1401 vm_page_dirty(m); 1402 vm_pager_page_unswapped(m); 1403 } 1404 } 1405 1406 /* 1407 * Release any swap space and free any whole pages. 1408 */ 1409 if (newpages < oldpages) { 1410 swap_pager_freespace(uobj, newpages, oldpages - 1411 newpages); 1412 vm_object_page_remove(uobj, newpages, 0, 0); 1413 } 1414 } 1415 uobj->size = newpages; 1416 VM_OBJECT_WUNLOCK(uobj); 1417 1418 atomic_add_long(&tmp->tm_pages_used, newpages - oldpages); 1419 1420 node->tn_size = newsize; 1421 return (0); 1422 } 1423 1424 void 1425 tmpfs_check_mtime(struct vnode *vp) 1426 { 1427 struct tmpfs_node *node; 1428 struct vm_object *obj; 1429 1430 ASSERT_VOP_ELOCKED(vp, "check_mtime"); 1431 if (vp->v_type != VREG) 1432 return; 1433 obj = vp->v_object; 1434 KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) == 1435 (OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj")); 1436 /* unlocked read */ 1437 if ((obj->flags & OBJ_TMPFS_DIRTY) != 0) { 1438 VM_OBJECT_WLOCK(obj); 1439 if ((obj->flags & OBJ_TMPFS_DIRTY) != 0) { 1440 obj->flags &= ~OBJ_TMPFS_DIRTY; 1441 node = VP_TO_TMPFS_NODE(vp); 1442 node->tn_status |= TMPFS_NODE_MODIFIED | 1443 TMPFS_NODE_CHANGED; 1444 } 1445 VM_OBJECT_WUNLOCK(obj); 1446 } 1447 } 1448 1449 /* 1450 * Change flags of the given vnode. 1451 * Caller should execute tmpfs_update on vp after a successful execution. 1452 * The vnode must be locked on entry and remain locked on exit. 1453 */ 1454 int 1455 tmpfs_chflags(struct vnode *vp, u_long flags, struct ucred *cred, 1456 struct thread *p) 1457 { 1458 int error; 1459 struct tmpfs_node *node; 1460 1461 ASSERT_VOP_ELOCKED(vp, "chflags"); 1462 1463 node = VP_TO_TMPFS_NODE(vp); 1464 1465 if ((flags & ~(SF_APPEND | SF_ARCHIVED | SF_IMMUTABLE | SF_NOUNLINK | 1466 UF_APPEND | UF_ARCHIVE | UF_HIDDEN | UF_IMMUTABLE | UF_NODUMP | 1467 UF_NOUNLINK | UF_OFFLINE | UF_OPAQUE | UF_READONLY | UF_REPARSE | 1468 UF_SPARSE | UF_SYSTEM)) != 0) 1469 return (EOPNOTSUPP); 1470 1471 /* Disallow this operation if the file system is mounted read-only. */ 1472 if (vp->v_mount->mnt_flag & MNT_RDONLY) 1473 return EROFS; 1474 1475 /* 1476 * Callers may only modify the file flags on objects they 1477 * have VADMIN rights for. 1478 */ 1479 if ((error = VOP_ACCESS(vp, VADMIN, cred, p))) 1480 return (error); 1481 /* 1482 * Unprivileged processes are not permitted to unset system 1483 * flags, or modify flags if any system flags are set. 1484 */ 1485 if (!priv_check_cred(cred, PRIV_VFS_SYSFLAGS, 0)) { 1486 if (node->tn_flags & 1487 (SF_NOUNLINK | SF_IMMUTABLE | SF_APPEND)) { 1488 error = securelevel_gt(cred, 0); 1489 if (error) 1490 return (error); 1491 } 1492 } else { 1493 if (node->tn_flags & 1494 (SF_NOUNLINK | SF_IMMUTABLE | SF_APPEND) || 1495 ((flags ^ node->tn_flags) & SF_SETTABLE)) 1496 return (EPERM); 1497 } 1498 node->tn_flags = flags; 1499 node->tn_status |= TMPFS_NODE_CHANGED; 1500 1501 ASSERT_VOP_ELOCKED(vp, "chflags2"); 1502 1503 return (0); 1504 } 1505 1506 /* 1507 * Change access mode on the given vnode. 1508 * Caller should execute tmpfs_update on vp after a successful execution. 1509 * The vnode must be locked on entry and remain locked on exit. 1510 */ 1511 int 1512 tmpfs_chmod(struct vnode *vp, mode_t mode, struct ucred *cred, struct thread *p) 1513 { 1514 int error; 1515 struct tmpfs_node *node; 1516 1517 ASSERT_VOP_ELOCKED(vp, "chmod"); 1518 1519 node = VP_TO_TMPFS_NODE(vp); 1520 1521 /* Disallow this operation if the file system is mounted read-only. */ 1522 if (vp->v_mount->mnt_flag & MNT_RDONLY) 1523 return EROFS; 1524 1525 /* Immutable or append-only files cannot be modified, either. */ 1526 if (node->tn_flags & (IMMUTABLE | APPEND)) 1527 return EPERM; 1528 1529 /* 1530 * To modify the permissions on a file, must possess VADMIN 1531 * for that file. 1532 */ 1533 if ((error = VOP_ACCESS(vp, VADMIN, cred, p))) 1534 return (error); 1535 1536 /* 1537 * Privileged processes may set the sticky bit on non-directories, 1538 * as well as set the setgid bit on a file with a group that the 1539 * process is not a member of. 1540 */ 1541 if (vp->v_type != VDIR && (mode & S_ISTXT)) { 1542 if (priv_check_cred(cred, PRIV_VFS_STICKYFILE, 0)) 1543 return (EFTYPE); 1544 } 1545 if (!groupmember(node->tn_gid, cred) && (mode & S_ISGID)) { 1546 error = priv_check_cred(cred, PRIV_VFS_SETGID, 0); 1547 if (error) 1548 return (error); 1549 } 1550 1551 1552 node->tn_mode &= ~ALLPERMS; 1553 node->tn_mode |= mode & ALLPERMS; 1554 1555 node->tn_status |= TMPFS_NODE_CHANGED; 1556 1557 ASSERT_VOP_ELOCKED(vp, "chmod2"); 1558 1559 return (0); 1560 } 1561 1562 /* 1563 * Change ownership of the given vnode. At least one of uid or gid must 1564 * be different than VNOVAL. If one is set to that value, the attribute 1565 * is unchanged. 1566 * Caller should execute tmpfs_update on vp after a successful execution. 1567 * The vnode must be locked on entry and remain locked on exit. 1568 */ 1569 int 1570 tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred, 1571 struct thread *p) 1572 { 1573 int error; 1574 struct tmpfs_node *node; 1575 uid_t ouid; 1576 gid_t ogid; 1577 1578 ASSERT_VOP_ELOCKED(vp, "chown"); 1579 1580 node = VP_TO_TMPFS_NODE(vp); 1581 1582 /* Assign default values if they are unknown. */ 1583 MPASS(uid != VNOVAL || gid != VNOVAL); 1584 if (uid == VNOVAL) 1585 uid = node->tn_uid; 1586 if (gid == VNOVAL) 1587 gid = node->tn_gid; 1588 MPASS(uid != VNOVAL && gid != VNOVAL); 1589 1590 /* Disallow this operation if the file system is mounted read-only. */ 1591 if (vp->v_mount->mnt_flag & MNT_RDONLY) 1592 return EROFS; 1593 1594 /* Immutable or append-only files cannot be modified, either. */ 1595 if (node->tn_flags & (IMMUTABLE | APPEND)) 1596 return EPERM; 1597 1598 /* 1599 * To modify the ownership of a file, must possess VADMIN for that 1600 * file. 1601 */ 1602 if ((error = VOP_ACCESS(vp, VADMIN, cred, p))) 1603 return (error); 1604 1605 /* 1606 * To change the owner of a file, or change the group of a file to a 1607 * group of which we are not a member, the caller must have 1608 * privilege. 1609 */ 1610 if ((uid != node->tn_uid || 1611 (gid != node->tn_gid && !groupmember(gid, cred))) && 1612 (error = priv_check_cred(cred, PRIV_VFS_CHOWN, 0))) 1613 return (error); 1614 1615 ogid = node->tn_gid; 1616 ouid = node->tn_uid; 1617 1618 node->tn_uid = uid; 1619 node->tn_gid = gid; 1620 1621 node->tn_status |= TMPFS_NODE_CHANGED; 1622 1623 if ((node->tn_mode & (S_ISUID | S_ISGID)) && (ouid != uid || ogid != gid)) { 1624 if (priv_check_cred(cred, PRIV_VFS_RETAINSUGID, 0)) 1625 node->tn_mode &= ~(S_ISUID | S_ISGID); 1626 } 1627 1628 ASSERT_VOP_ELOCKED(vp, "chown2"); 1629 1630 return (0); 1631 } 1632 1633 /* 1634 * Change size of the given vnode. 1635 * Caller should execute tmpfs_update on vp after a successful execution. 1636 * The vnode must be locked on entry and remain locked on exit. 1637 */ 1638 int 1639 tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred, 1640 struct thread *p) 1641 { 1642 int error; 1643 struct tmpfs_node *node; 1644 1645 ASSERT_VOP_ELOCKED(vp, "chsize"); 1646 1647 node = VP_TO_TMPFS_NODE(vp); 1648 1649 /* Decide whether this is a valid operation based on the file type. */ 1650 error = 0; 1651 switch (vp->v_type) { 1652 case VDIR: 1653 return EISDIR; 1654 1655 case VREG: 1656 if (vp->v_mount->mnt_flag & MNT_RDONLY) 1657 return EROFS; 1658 break; 1659 1660 case VBLK: 1661 /* FALLTHROUGH */ 1662 case VCHR: 1663 /* FALLTHROUGH */ 1664 case VFIFO: 1665 /* Allow modifications of special files even if in the file 1666 * system is mounted read-only (we are not modifying the 1667 * files themselves, but the objects they represent). */ 1668 return 0; 1669 1670 default: 1671 /* Anything else is unsupported. */ 1672 return EOPNOTSUPP; 1673 } 1674 1675 /* Immutable or append-only files cannot be modified, either. */ 1676 if (node->tn_flags & (IMMUTABLE | APPEND)) 1677 return EPERM; 1678 1679 error = tmpfs_truncate(vp, size); 1680 /* tmpfs_truncate will raise the NOTE_EXTEND and NOTE_ATTRIB kevents 1681 * for us, as will update tn_status; no need to do that here. */ 1682 1683 ASSERT_VOP_ELOCKED(vp, "chsize2"); 1684 1685 return (error); 1686 } 1687 1688 /* 1689 * Change access and modification times of the given vnode. 1690 * Caller should execute tmpfs_update on vp after a successful execution. 1691 * The vnode must be locked on entry and remain locked on exit. 1692 */ 1693 int 1694 tmpfs_chtimes(struct vnode *vp, struct vattr *vap, 1695 struct ucred *cred, struct thread *l) 1696 { 1697 int error; 1698 struct tmpfs_node *node; 1699 1700 ASSERT_VOP_ELOCKED(vp, "chtimes"); 1701 1702 node = VP_TO_TMPFS_NODE(vp); 1703 1704 /* Disallow this operation if the file system is mounted read-only. */ 1705 if (vp->v_mount->mnt_flag & MNT_RDONLY) 1706 return EROFS; 1707 1708 /* Immutable or append-only files cannot be modified, either. */ 1709 if (node->tn_flags & (IMMUTABLE | APPEND)) 1710 return EPERM; 1711 1712 error = vn_utimes_perm(vp, vap, cred, l); 1713 if (error != 0) 1714 return (error); 1715 1716 if (vap->va_atime.tv_sec != VNOVAL) 1717 node->tn_status |= TMPFS_NODE_ACCESSED; 1718 1719 if (vap->va_mtime.tv_sec != VNOVAL) 1720 node->tn_status |= TMPFS_NODE_MODIFIED; 1721 1722 if (vap->va_birthtime.tv_sec != VNOVAL) 1723 node->tn_status |= TMPFS_NODE_MODIFIED; 1724 1725 tmpfs_itimes(vp, &vap->va_atime, &vap->va_mtime); 1726 1727 if (vap->va_birthtime.tv_sec != VNOVAL) 1728 node->tn_birthtime = vap->va_birthtime; 1729 ASSERT_VOP_ELOCKED(vp, "chtimes2"); 1730 1731 return (0); 1732 } 1733 1734 void 1735 tmpfs_set_status(struct tmpfs_node *node, int status) 1736 { 1737 1738 if ((node->tn_status & status) == status) 1739 return; 1740 TMPFS_NODE_LOCK(node); 1741 node->tn_status |= status; 1742 TMPFS_NODE_UNLOCK(node); 1743 } 1744 1745 /* Sync timestamps */ 1746 void 1747 tmpfs_itimes(struct vnode *vp, const struct timespec *acc, 1748 const struct timespec *mod) 1749 { 1750 struct tmpfs_node *node; 1751 struct timespec now; 1752 1753 ASSERT_VOP_LOCKED(vp, "tmpfs_itimes"); 1754 node = VP_TO_TMPFS_NODE(vp); 1755 1756 if ((node->tn_status & (TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED | 1757 TMPFS_NODE_CHANGED)) == 0) 1758 return; 1759 1760 vfs_timestamp(&now); 1761 TMPFS_NODE_LOCK(node); 1762 if (node->tn_status & TMPFS_NODE_ACCESSED) { 1763 if (acc == NULL) 1764 acc = &now; 1765 node->tn_atime = *acc; 1766 } 1767 if (node->tn_status & TMPFS_NODE_MODIFIED) { 1768 if (mod == NULL) 1769 mod = &now; 1770 node->tn_mtime = *mod; 1771 } 1772 if (node->tn_status & TMPFS_NODE_CHANGED) 1773 node->tn_ctime = now; 1774 node->tn_status &= ~(TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED | 1775 TMPFS_NODE_CHANGED); 1776 TMPFS_NODE_UNLOCK(node); 1777 1778 /* XXX: FIX? The entropy here is desirable, but the harvesting may be expensive */ 1779 random_harvest_queue(node, sizeof(*node), 1, RANDOM_FS_ATIME); 1780 } 1781 1782 void 1783 tmpfs_update(struct vnode *vp) 1784 { 1785 1786 tmpfs_itimes(vp, NULL, NULL); 1787 } 1788 1789 int 1790 tmpfs_truncate(struct vnode *vp, off_t length) 1791 { 1792 int error; 1793 struct tmpfs_node *node; 1794 1795 node = VP_TO_TMPFS_NODE(vp); 1796 1797 if (length < 0) { 1798 error = EINVAL; 1799 goto out; 1800 } 1801 1802 if (node->tn_size == length) { 1803 error = 0; 1804 goto out; 1805 } 1806 1807 if (length > VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize) 1808 return (EFBIG); 1809 1810 error = tmpfs_reg_resize(vp, length, FALSE); 1811 if (error == 0) 1812 node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED; 1813 1814 out: 1815 tmpfs_update(vp); 1816 1817 return (error); 1818 } 1819 1820 static __inline int 1821 tmpfs_dirtree_cmp(struct tmpfs_dirent *a, struct tmpfs_dirent *b) 1822 { 1823 if (a->td_hash > b->td_hash) 1824 return (1); 1825 else if (a->td_hash < b->td_hash) 1826 return (-1); 1827 return (0); 1828 } 1829 1830 RB_GENERATE_STATIC(tmpfs_dir, tmpfs_dirent, uh.td_entries, tmpfs_dirtree_cmp); 1831