1 /* $NetBSD: tmpfs_vnops.c,v 1.39 2007/07/23 15:41:01 jmmv Exp $ */ 2 3 /*- 4 * SPDX-License-Identifier: BSD-2-Clause-NetBSD 5 * 6 * Copyright (c) 2005, 2006 The NetBSD Foundation, Inc. 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to The NetBSD Foundation 10 * by Julio M. Merino Vidal, developed as part of Google's Summer of Code 11 * 2005 program. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /* 36 * tmpfs vnode interface. 37 */ 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/dirent.h> 44 #include <sys/fcntl.h> 45 #include <sys/file.h> 46 #include <sys/filio.h> 47 #include <sys/limits.h> 48 #include <sys/lockf.h> 49 #include <sys/lock.h> 50 #include <sys/mount.h> 51 #include <sys/namei.h> 52 #include <sys/priv.h> 53 #include <sys/proc.h> 54 #include <sys/rwlock.h> 55 #include <sys/sched.h> 56 #include <sys/smr.h> 57 #include <sys/stat.h> 58 #include <sys/sysctl.h> 59 #include <sys/unistd.h> 60 #include <sys/vnode.h> 61 #include <security/audit/audit.h> 62 #include <security/mac/mac_framework.h> 63 64 #include <vm/vm.h> 65 #include <vm/vm_param.h> 66 #include <vm/vm_object.h> 67 #include <vm/vm_page.h> 68 #include <vm/vm_pager.h> 69 #include <vm/swap_pager.h> 70 71 #include <fs/tmpfs/tmpfs_vnops.h> 72 #include <fs/tmpfs/tmpfs.h> 73 74 SYSCTL_DECL(_vfs_tmpfs); 75 VFS_SMR_DECLARE; 76 77 static volatile int tmpfs_rename_restarts; 78 SYSCTL_INT(_vfs_tmpfs, OID_AUTO, rename_restarts, CTLFLAG_RD, 79 __DEVOLATILE(int *, &tmpfs_rename_restarts), 0, 80 "Times rename had to restart due to lock contention"); 81 82 static int 83 tmpfs_vn_get_ino_alloc(struct mount *mp, void *arg, int lkflags, 84 struct vnode **rvp) 85 { 86 87 return (tmpfs_alloc_vp(mp, arg, lkflags, rvp)); 88 } 89 90 static int 91 tmpfs_lookup1(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp) 92 { 93 struct tmpfs_dirent *de; 94 struct tmpfs_node *dnode, *pnode; 95 struct tmpfs_mount *tm; 96 int error; 97 98 /* Caller assumes responsibility for ensuring access (VEXEC). */ 99 dnode = VP_TO_TMPFS_DIR(dvp); 100 *vpp = NULLVP; 101 102 /* We cannot be requesting the parent directory of the root node. */ 103 MPASS(IMPLIES(dnode->tn_type == VDIR && 104 dnode->tn_dir.tn_parent == dnode, 105 !(cnp->cn_flags & ISDOTDOT))); 106 107 TMPFS_ASSERT_LOCKED(dnode); 108 if (dnode->tn_dir.tn_parent == NULL) { 109 error = ENOENT; 110 goto out; 111 } 112 if (cnp->cn_flags & ISDOTDOT) { 113 tm = VFS_TO_TMPFS(dvp->v_mount); 114 pnode = dnode->tn_dir.tn_parent; 115 tmpfs_ref_node(pnode); 116 error = vn_vget_ino_gen(dvp, tmpfs_vn_get_ino_alloc, 117 pnode, cnp->cn_lkflags, vpp); 118 tmpfs_free_node(tm, pnode); 119 if (error != 0) 120 goto out; 121 } else if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') { 122 VREF(dvp); 123 *vpp = dvp; 124 error = 0; 125 } else { 126 de = tmpfs_dir_lookup(dnode, NULL, cnp); 127 if (de != NULL && de->td_node == NULL) 128 cnp->cn_flags |= ISWHITEOUT; 129 if (de == NULL || de->td_node == NULL) { 130 /* 131 * The entry was not found in the directory. 132 * This is OK if we are creating or renaming an 133 * entry and are working on the last component of 134 * the path name. 135 */ 136 if ((cnp->cn_flags & ISLASTCN) && 137 (cnp->cn_nameiop == CREATE || \ 138 cnp->cn_nameiop == RENAME || 139 (cnp->cn_nameiop == DELETE && 140 cnp->cn_flags & DOWHITEOUT && 141 cnp->cn_flags & ISWHITEOUT))) { 142 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, 143 curthread); 144 if (error != 0) 145 goto out; 146 147 error = EJUSTRETURN; 148 } else 149 error = ENOENT; 150 } else { 151 struct tmpfs_node *tnode; 152 153 /* 154 * The entry was found, so get its associated 155 * tmpfs_node. 156 */ 157 tnode = de->td_node; 158 159 /* 160 * If we are not at the last path component and 161 * found a non-directory or non-link entry (which 162 * may itself be pointing to a directory), raise 163 * an error. 164 */ 165 if ((tnode->tn_type != VDIR && 166 tnode->tn_type != VLNK) && 167 !(cnp->cn_flags & ISLASTCN)) { 168 error = ENOTDIR; 169 goto out; 170 } 171 172 /* 173 * If we are deleting or renaming the entry, keep 174 * track of its tmpfs_dirent so that it can be 175 * easily deleted later. 176 */ 177 if ((cnp->cn_flags & ISLASTCN) && 178 (cnp->cn_nameiop == DELETE || 179 cnp->cn_nameiop == RENAME)) { 180 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, 181 curthread); 182 if (error != 0) 183 goto out; 184 185 /* Allocate a new vnode on the matching entry. */ 186 error = tmpfs_alloc_vp(dvp->v_mount, tnode, 187 cnp->cn_lkflags, vpp); 188 if (error != 0) 189 goto out; 190 191 if ((dnode->tn_mode & S_ISTXT) && 192 VOP_ACCESS(dvp, VADMIN, cnp->cn_cred, 193 curthread) && VOP_ACCESS(*vpp, VADMIN, 194 cnp->cn_cred, curthread)) { 195 error = EPERM; 196 vput(*vpp); 197 *vpp = NULL; 198 goto out; 199 } 200 } else { 201 error = tmpfs_alloc_vp(dvp->v_mount, tnode, 202 cnp->cn_lkflags, vpp); 203 if (error != 0) 204 goto out; 205 } 206 } 207 } 208 209 /* 210 * Store the result of this lookup in the cache. Avoid this if the 211 * request was for creation, as it does not improve timings on 212 * emprical tests. 213 */ 214 if ((cnp->cn_flags & MAKEENTRY) != 0 && tmpfs_use_nc(dvp)) 215 cache_enter(dvp, *vpp, cnp); 216 217 out: 218 /* 219 * If there were no errors, *vpp cannot be null and it must be 220 * locked. 221 */ 222 MPASS(IFF(error == 0, *vpp != NULLVP && VOP_ISLOCKED(*vpp))); 223 224 return (error); 225 } 226 227 static int 228 tmpfs_cached_lookup(struct vop_cachedlookup_args *v) 229 { 230 231 return (tmpfs_lookup1(v->a_dvp, v->a_vpp, v->a_cnp)); 232 } 233 234 static int 235 tmpfs_lookup(struct vop_lookup_args *v) 236 { 237 struct vnode *dvp = v->a_dvp; 238 struct vnode **vpp = v->a_vpp; 239 struct componentname *cnp = v->a_cnp; 240 int error; 241 242 /* Check accessibility of requested node as a first step. */ 243 error = vn_dir_check_exec(dvp, cnp); 244 if (error != 0) 245 return (error); 246 247 return (tmpfs_lookup1(dvp, vpp, cnp)); 248 } 249 250 static int 251 tmpfs_create(struct vop_create_args *v) 252 { 253 struct vnode *dvp = v->a_dvp; 254 struct vnode **vpp = v->a_vpp; 255 struct componentname *cnp = v->a_cnp; 256 struct vattr *vap = v->a_vap; 257 int error; 258 259 MPASS(vap->va_type == VREG || vap->va_type == VSOCK); 260 261 error = tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL); 262 if (error == 0 && (cnp->cn_flags & MAKEENTRY) != 0 && tmpfs_use_nc(dvp)) 263 cache_enter(dvp, *vpp, cnp); 264 return (error); 265 } 266 267 static int 268 tmpfs_mknod(struct vop_mknod_args *v) 269 { 270 struct vnode *dvp = v->a_dvp; 271 struct vnode **vpp = v->a_vpp; 272 struct componentname *cnp = v->a_cnp; 273 struct vattr *vap = v->a_vap; 274 275 if (vap->va_type != VBLK && vap->va_type != VCHR && 276 vap->va_type != VFIFO) 277 return (EINVAL); 278 279 return (tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL)); 280 } 281 282 struct fileops tmpfs_fnops; 283 284 static int 285 tmpfs_open(struct vop_open_args *v) 286 { 287 struct vnode *vp; 288 struct tmpfs_node *node; 289 struct file *fp; 290 int error, mode; 291 292 vp = v->a_vp; 293 mode = v->a_mode; 294 node = VP_TO_TMPFS_NODE(vp); 295 296 /* 297 * The file is still active but all its names have been removed 298 * (e.g. by a "rmdir $(pwd)"). It cannot be opened any more as 299 * it is about to die. 300 */ 301 if (node->tn_links < 1) 302 return (ENOENT); 303 304 /* If the file is marked append-only, deny write requests. */ 305 if (node->tn_flags & APPEND && (mode & (FWRITE | O_APPEND)) == FWRITE) 306 error = EPERM; 307 else { 308 error = 0; 309 /* For regular files, the call below is nop. */ 310 KASSERT(vp->v_type != VREG || (node->tn_reg.tn_aobj->flags & 311 OBJ_DEAD) == 0, ("dead object")); 312 vnode_create_vobject(vp, node->tn_size, v->a_td); 313 } 314 315 fp = v->a_fp; 316 MPASS(fp == NULL || fp->f_data == NULL); 317 if (error == 0 && fp != NULL && vp->v_type == VREG) { 318 tmpfs_ref_node(node); 319 finit_vnode(fp, mode, node, &tmpfs_fnops); 320 } 321 322 return (error); 323 } 324 325 static int 326 tmpfs_close(struct vop_close_args *v) 327 { 328 struct vnode *vp = v->a_vp; 329 330 /* Update node times. */ 331 tmpfs_update(vp); 332 333 return (0); 334 } 335 336 int 337 tmpfs_fo_close(struct file *fp, struct thread *td) 338 { 339 struct tmpfs_node *node; 340 341 node = fp->f_data; 342 if (node != NULL) { 343 MPASS(node->tn_type == VREG); 344 tmpfs_free_node(node->tn_reg.tn_tmp, node); 345 } 346 return (vnops.fo_close(fp, td)); 347 } 348 349 /* 350 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see 351 * the comment above cache_fplookup for details. 352 */ 353 int 354 tmpfs_fplookup_vexec(struct vop_fplookup_vexec_args *v) 355 { 356 struct vnode *vp; 357 struct tmpfs_node *node; 358 struct ucred *cred; 359 mode_t all_x, mode; 360 361 vp = v->a_vp; 362 node = VP_TO_TMPFS_NODE_SMR(vp); 363 if (__predict_false(node == NULL)) 364 return (EAGAIN); 365 366 all_x = S_IXUSR | S_IXGRP | S_IXOTH; 367 mode = atomic_load_short(&node->tn_mode); 368 if (__predict_true((mode & all_x) == all_x)) 369 return (0); 370 371 cred = v->a_cred; 372 return (vaccess_vexec_smr(mode, node->tn_uid, node->tn_gid, cred)); 373 } 374 375 int 376 tmpfs_access(struct vop_access_args *v) 377 { 378 struct vnode *vp = v->a_vp; 379 accmode_t accmode = v->a_accmode; 380 struct ucred *cred = v->a_cred; 381 mode_t all_x = S_IXUSR | S_IXGRP | S_IXOTH; 382 int error; 383 struct tmpfs_node *node; 384 385 MPASS(VOP_ISLOCKED(vp)); 386 387 node = VP_TO_TMPFS_NODE(vp); 388 389 /* 390 * Common case path lookup. 391 */ 392 if (__predict_true(accmode == VEXEC && (node->tn_mode & all_x) == all_x)) 393 return (0); 394 395 switch (vp->v_type) { 396 case VDIR: 397 /* FALLTHROUGH */ 398 case VLNK: 399 /* FALLTHROUGH */ 400 case VREG: 401 if (accmode & VWRITE && vp->v_mount->mnt_flag & MNT_RDONLY) { 402 error = EROFS; 403 goto out; 404 } 405 break; 406 407 case VBLK: 408 /* FALLTHROUGH */ 409 case VCHR: 410 /* FALLTHROUGH */ 411 case VSOCK: 412 /* FALLTHROUGH */ 413 case VFIFO: 414 break; 415 416 default: 417 error = EINVAL; 418 goto out; 419 } 420 421 if (accmode & VWRITE && node->tn_flags & IMMUTABLE) { 422 error = EPERM; 423 goto out; 424 } 425 426 error = vaccess(vp->v_type, node->tn_mode, node->tn_uid, node->tn_gid, 427 accmode, cred); 428 429 out: 430 MPASS(VOP_ISLOCKED(vp)); 431 432 return (error); 433 } 434 435 int 436 tmpfs_stat(struct vop_stat_args *v) 437 { 438 struct vnode *vp = v->a_vp; 439 struct stat *sb = v->a_sb; 440 struct tmpfs_node *node; 441 int error; 442 443 node = VP_TO_TMPFS_NODE(vp); 444 445 tmpfs_update_getattr(vp); 446 447 error = vop_stat_helper_pre(v); 448 if (__predict_false(error)) 449 return (error); 450 451 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 452 sb->st_ino = node->tn_id; 453 sb->st_mode = node->tn_mode | VTTOIF(vp->v_type); 454 sb->st_nlink = node->tn_links; 455 sb->st_uid = node->tn_uid; 456 sb->st_gid = node->tn_gid; 457 sb->st_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ? 458 node->tn_rdev : NODEV; 459 sb->st_size = node->tn_size; 460 sb->st_atim.tv_sec = node->tn_atime.tv_sec; 461 sb->st_atim.tv_nsec = node->tn_atime.tv_nsec; 462 sb->st_mtim.tv_sec = node->tn_mtime.tv_sec; 463 sb->st_mtim.tv_nsec = node->tn_mtime.tv_nsec; 464 sb->st_ctim.tv_sec = node->tn_ctime.tv_sec; 465 sb->st_ctim.tv_nsec = node->tn_ctime.tv_nsec; 466 sb->st_birthtim.tv_sec = node->tn_birthtime.tv_sec; 467 sb->st_birthtim.tv_nsec = node->tn_birthtime.tv_nsec; 468 sb->st_blksize = PAGE_SIZE; 469 sb->st_flags = node->tn_flags; 470 sb->st_gen = node->tn_gen; 471 if (vp->v_type == VREG) { 472 #ifdef __ILP32__ 473 vm_object_t obj = node->tn_reg.tn_aobj; 474 475 /* Handle torn read */ 476 VM_OBJECT_RLOCK(obj); 477 #endif 478 sb->st_blocks = ptoa(node->tn_reg.tn_pages); 479 #ifdef __ILP32__ 480 VM_OBJECT_RUNLOCK(obj); 481 #endif 482 } else { 483 sb->st_blocks = node->tn_size; 484 } 485 sb->st_blocks /= S_BLKSIZE; 486 return (vop_stat_helper_post(v, error)); 487 } 488 489 int 490 tmpfs_getattr(struct vop_getattr_args *v) 491 { 492 struct vnode *vp = v->a_vp; 493 struct vattr *vap = v->a_vap; 494 vm_object_t obj; 495 struct tmpfs_node *node; 496 497 node = VP_TO_TMPFS_NODE(vp); 498 499 tmpfs_update_getattr(vp); 500 501 vap->va_type = vp->v_type; 502 vap->va_mode = node->tn_mode; 503 vap->va_nlink = node->tn_links; 504 vap->va_uid = node->tn_uid; 505 vap->va_gid = node->tn_gid; 506 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; 507 vap->va_fileid = node->tn_id; 508 vap->va_size = node->tn_size; 509 vap->va_blocksize = PAGE_SIZE; 510 vap->va_atime = node->tn_atime; 511 vap->va_mtime = node->tn_mtime; 512 vap->va_ctime = node->tn_ctime; 513 vap->va_birthtime = node->tn_birthtime; 514 vap->va_gen = node->tn_gen; 515 vap->va_flags = node->tn_flags; 516 vap->va_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ? 517 node->tn_rdev : NODEV; 518 if (vp->v_type == VREG) { 519 obj = node->tn_reg.tn_aobj; 520 VM_OBJECT_RLOCK(obj); 521 vap->va_bytes = ptoa(node->tn_reg.tn_pages); 522 VM_OBJECT_RUNLOCK(obj); 523 } else { 524 vap->va_bytes = node->tn_size; 525 } 526 vap->va_filerev = 0; 527 528 return (0); 529 } 530 531 int 532 tmpfs_setattr(struct vop_setattr_args *v) 533 { 534 struct vnode *vp = v->a_vp; 535 struct vattr *vap = v->a_vap; 536 struct ucred *cred = v->a_cred; 537 struct thread *td = curthread; 538 539 int error; 540 541 MPASS(VOP_ISLOCKED(vp)); 542 ASSERT_VOP_IN_SEQC(vp); 543 544 error = 0; 545 546 /* Abort if any unsettable attribute is given. */ 547 if (vap->va_type != VNON || 548 vap->va_nlink != VNOVAL || 549 vap->va_fsid != VNOVAL || 550 vap->va_fileid != VNOVAL || 551 vap->va_blocksize != VNOVAL || 552 vap->va_gen != VNOVAL || 553 vap->va_rdev != VNOVAL || 554 vap->va_bytes != VNOVAL) 555 error = EINVAL; 556 557 if (error == 0 && (vap->va_flags != VNOVAL)) 558 error = tmpfs_chflags(vp, vap->va_flags, cred, td); 559 560 if (error == 0 && (vap->va_size != VNOVAL)) 561 error = tmpfs_chsize(vp, vap->va_size, cred, td); 562 563 if (error == 0 && (vap->va_uid != VNOVAL || vap->va_gid != VNOVAL)) 564 error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred, td); 565 566 if (error == 0 && (vap->va_mode != (mode_t)VNOVAL)) 567 error = tmpfs_chmod(vp, vap->va_mode, cred, td); 568 569 if (error == 0 && ((vap->va_atime.tv_sec != VNOVAL && 570 vap->va_atime.tv_nsec != VNOVAL) || 571 (vap->va_mtime.tv_sec != VNOVAL && 572 vap->va_mtime.tv_nsec != VNOVAL) || 573 (vap->va_birthtime.tv_sec != VNOVAL && 574 vap->va_birthtime.tv_nsec != VNOVAL))) 575 error = tmpfs_chtimes(vp, vap, cred, td); 576 577 /* 578 * Update the node times. We give preference to the error codes 579 * generated by this function rather than the ones that may arise 580 * from tmpfs_update. 581 */ 582 tmpfs_update(vp); 583 584 MPASS(VOP_ISLOCKED(vp)); 585 586 return (error); 587 } 588 589 static int 590 tmpfs_read(struct vop_read_args *v) 591 { 592 struct vnode *vp; 593 struct uio *uio; 594 struct tmpfs_node *node; 595 596 vp = v->a_vp; 597 if (vp->v_type != VREG) 598 return (EISDIR); 599 uio = v->a_uio; 600 if (uio->uio_offset < 0) 601 return (EINVAL); 602 node = VP_TO_TMPFS_NODE(vp); 603 tmpfs_set_accessed(VFS_TO_TMPFS(vp->v_mount), node); 604 return (uiomove_object(node->tn_reg.tn_aobj, node->tn_size, uio)); 605 } 606 607 static int 608 tmpfs_read_pgcache(struct vop_read_pgcache_args *v) 609 { 610 struct vnode *vp; 611 struct tmpfs_node *node; 612 vm_object_t object; 613 off_t size; 614 int error; 615 616 vp = v->a_vp; 617 VNPASS((vn_irflag_read(vp) & VIRF_PGREAD) != 0, vp); 618 619 if (v->a_uio->uio_offset < 0) 620 return (EINVAL); 621 622 error = EJUSTRETURN; 623 vfs_smr_enter(); 624 625 node = VP_TO_TMPFS_NODE_SMR(vp); 626 if (node == NULL) 627 goto out_smr; 628 MPASS(node->tn_type == VREG); 629 MPASS(node->tn_refcount >= 1); 630 object = node->tn_reg.tn_aobj; 631 if (object == NULL) 632 goto out_smr; 633 634 MPASS(object->type == tmpfs_pager_type); 635 MPASS((object->flags & (OBJ_ANON | OBJ_DEAD | OBJ_SWAP)) == 636 OBJ_SWAP); 637 if (!VN_IS_DOOMED(vp)) { 638 /* size cannot become shorter due to rangelock. */ 639 size = node->tn_size; 640 tmpfs_set_accessed(node->tn_reg.tn_tmp, node); 641 vfs_smr_exit(); 642 error = uiomove_object(object, size, v->a_uio); 643 return (error); 644 } 645 out_smr: 646 vfs_smr_exit(); 647 return (error); 648 } 649 650 static int 651 tmpfs_write(struct vop_write_args *v) 652 { 653 struct vnode *vp; 654 struct uio *uio; 655 struct tmpfs_node *node; 656 off_t oldsize; 657 ssize_t r; 658 int error, ioflag; 659 mode_t newmode; 660 661 vp = v->a_vp; 662 uio = v->a_uio; 663 ioflag = v->a_ioflag; 664 error = 0; 665 node = VP_TO_TMPFS_NODE(vp); 666 oldsize = node->tn_size; 667 668 if (uio->uio_offset < 0 || vp->v_type != VREG) 669 return (EINVAL); 670 if (uio->uio_resid == 0) 671 return (0); 672 if (ioflag & IO_APPEND) 673 uio->uio_offset = node->tn_size; 674 error = vn_rlimit_fsizex(vp, uio, VFS_TO_TMPFS(vp->v_mount)-> 675 tm_maxfilesize, &r, uio->uio_td); 676 if (error != 0) { 677 vn_rlimit_fsizex_res(uio, r); 678 return (error); 679 } 680 681 if (uio->uio_offset + uio->uio_resid > node->tn_size) { 682 error = tmpfs_reg_resize(vp, uio->uio_offset + uio->uio_resid, 683 FALSE); 684 if (error != 0) 685 goto out; 686 } 687 688 error = uiomove_object(node->tn_reg.tn_aobj, node->tn_size, uio); 689 node->tn_status |= TMPFS_NODE_MODIFIED | TMPFS_NODE_CHANGED; 690 node->tn_accessed = true; 691 if (node->tn_mode & (S_ISUID | S_ISGID)) { 692 if (priv_check_cred(v->a_cred, PRIV_VFS_RETAINSUGID)) { 693 newmode = node->tn_mode & ~(S_ISUID | S_ISGID); 694 vn_seqc_write_begin(vp); 695 atomic_store_short(&node->tn_mode, newmode); 696 vn_seqc_write_end(vp); 697 } 698 } 699 if (error != 0) 700 (void)tmpfs_reg_resize(vp, oldsize, TRUE); 701 702 out: 703 MPASS(IMPLIES(error == 0, uio->uio_resid == 0)); 704 MPASS(IMPLIES(error != 0, oldsize == node->tn_size)); 705 706 vn_rlimit_fsizex_res(uio, r); 707 return (error); 708 } 709 710 static int 711 tmpfs_deallocate(struct vop_deallocate_args *v) 712 { 713 return (tmpfs_reg_punch_hole(v->a_vp, v->a_offset, v->a_len)); 714 } 715 716 static int 717 tmpfs_fsync(struct vop_fsync_args *v) 718 { 719 struct vnode *vp = v->a_vp; 720 721 MPASS(VOP_ISLOCKED(vp)); 722 723 tmpfs_check_mtime(vp); 724 tmpfs_update(vp); 725 726 return (0); 727 } 728 729 static int 730 tmpfs_remove(struct vop_remove_args *v) 731 { 732 struct vnode *dvp = v->a_dvp; 733 struct vnode *vp = v->a_vp; 734 735 int error; 736 struct tmpfs_dirent *de; 737 struct tmpfs_mount *tmp; 738 struct tmpfs_node *dnode; 739 struct tmpfs_node *node; 740 741 MPASS(VOP_ISLOCKED(dvp)); 742 MPASS(VOP_ISLOCKED(vp)); 743 744 if (vp->v_type == VDIR) { 745 error = EISDIR; 746 goto out; 747 } 748 749 dnode = VP_TO_TMPFS_DIR(dvp); 750 node = VP_TO_TMPFS_NODE(vp); 751 tmp = VFS_TO_TMPFS(vp->v_mount); 752 de = tmpfs_dir_lookup(dnode, node, v->a_cnp); 753 MPASS(de != NULL); 754 755 /* Files marked as immutable or append-only cannot be deleted. */ 756 if ((node->tn_flags & (IMMUTABLE | APPEND | NOUNLINK)) || 757 (dnode->tn_flags & APPEND)) { 758 error = EPERM; 759 goto out; 760 } 761 762 /* Remove the entry from the directory; as it is a file, we do not 763 * have to change the number of hard links of the directory. */ 764 tmpfs_dir_detach(dvp, de); 765 if (v->a_cnp->cn_flags & DOWHITEOUT) 766 tmpfs_dir_whiteout_add(dvp, v->a_cnp); 767 768 /* Free the directory entry we just deleted. Note that the node 769 * referred by it will not be removed until the vnode is really 770 * reclaimed. */ 771 tmpfs_free_dirent(tmp, de); 772 773 node->tn_status |= TMPFS_NODE_CHANGED; 774 node->tn_accessed = true; 775 error = 0; 776 777 out: 778 return (error); 779 } 780 781 static int 782 tmpfs_link(struct vop_link_args *v) 783 { 784 struct vnode *dvp = v->a_tdvp; 785 struct vnode *vp = v->a_vp; 786 struct componentname *cnp = v->a_cnp; 787 788 int error; 789 struct tmpfs_dirent *de; 790 struct tmpfs_node *node; 791 792 MPASS(VOP_ISLOCKED(dvp)); 793 MPASS(dvp != vp); /* XXX When can this be false? */ 794 node = VP_TO_TMPFS_NODE(vp); 795 796 /* Ensure that we do not overflow the maximum number of links imposed 797 * by the system. */ 798 MPASS(node->tn_links <= TMPFS_LINK_MAX); 799 if (node->tn_links == TMPFS_LINK_MAX) { 800 error = EMLINK; 801 goto out; 802 } 803 804 /* We cannot create links of files marked immutable or append-only. */ 805 if (node->tn_flags & (IMMUTABLE | APPEND)) { 806 error = EPERM; 807 goto out; 808 } 809 810 /* Allocate a new directory entry to represent the node. */ 811 error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), node, 812 cnp->cn_nameptr, cnp->cn_namelen, &de); 813 if (error != 0) 814 goto out; 815 816 /* Insert the new directory entry into the appropriate directory. */ 817 if (cnp->cn_flags & ISWHITEOUT) 818 tmpfs_dir_whiteout_remove(dvp, cnp); 819 tmpfs_dir_attach(dvp, de); 820 821 /* vp link count has changed, so update node times. */ 822 node->tn_status |= TMPFS_NODE_CHANGED; 823 tmpfs_update(vp); 824 825 error = 0; 826 827 out: 828 return (error); 829 } 830 831 /* 832 * We acquire all but fdvp locks using non-blocking acquisitions. If we 833 * fail to acquire any lock in the path we will drop all held locks, 834 * acquire the new lock in a blocking fashion, and then release it and 835 * restart the rename. This acquire/release step ensures that we do not 836 * spin on a lock waiting for release. On error release all vnode locks 837 * and decrement references the way tmpfs_rename() would do. 838 */ 839 static int 840 tmpfs_rename_relock(struct vnode *fdvp, struct vnode **fvpp, 841 struct vnode *tdvp, struct vnode **tvpp, 842 struct componentname *fcnp, struct componentname *tcnp) 843 { 844 struct vnode *nvp; 845 struct mount *mp; 846 struct tmpfs_dirent *de; 847 int error, restarts = 0; 848 849 VOP_UNLOCK(tdvp); 850 if (*tvpp != NULL && *tvpp != tdvp) 851 VOP_UNLOCK(*tvpp); 852 mp = fdvp->v_mount; 853 854 relock: 855 restarts += 1; 856 error = vn_lock(fdvp, LK_EXCLUSIVE); 857 if (error) 858 goto releout; 859 if (vn_lock(tdvp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 860 VOP_UNLOCK(fdvp); 861 error = vn_lock(tdvp, LK_EXCLUSIVE); 862 if (error) 863 goto releout; 864 VOP_UNLOCK(tdvp); 865 goto relock; 866 } 867 /* 868 * Re-resolve fvp to be certain it still exists and fetch the 869 * correct vnode. 870 */ 871 de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(fdvp), NULL, fcnp); 872 if (de == NULL) { 873 VOP_UNLOCK(fdvp); 874 VOP_UNLOCK(tdvp); 875 if ((fcnp->cn_flags & ISDOTDOT) != 0 || 876 (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.')) 877 error = EINVAL; 878 else 879 error = ENOENT; 880 goto releout; 881 } 882 error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE | LK_NOWAIT, &nvp); 883 if (error != 0) { 884 VOP_UNLOCK(fdvp); 885 VOP_UNLOCK(tdvp); 886 if (error != EBUSY) 887 goto releout; 888 error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE, &nvp); 889 if (error != 0) 890 goto releout; 891 VOP_UNLOCK(nvp); 892 /* 893 * Concurrent rename race. 894 */ 895 if (nvp == tdvp) { 896 vrele(nvp); 897 error = EINVAL; 898 goto releout; 899 } 900 vrele(*fvpp); 901 *fvpp = nvp; 902 goto relock; 903 } 904 vrele(*fvpp); 905 *fvpp = nvp; 906 VOP_UNLOCK(*fvpp); 907 /* 908 * Re-resolve tvp and acquire the vnode lock if present. 909 */ 910 de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(tdvp), NULL, tcnp); 911 /* 912 * If tvp disappeared we just carry on. 913 */ 914 if (de == NULL && *tvpp != NULL) { 915 vrele(*tvpp); 916 *tvpp = NULL; 917 } 918 /* 919 * Get the tvp ino if the lookup succeeded. We may have to restart 920 * if the non-blocking acquire fails. 921 */ 922 if (de != NULL) { 923 nvp = NULL; 924 error = tmpfs_alloc_vp(mp, de->td_node, 925 LK_EXCLUSIVE | LK_NOWAIT, &nvp); 926 if (*tvpp != NULL) 927 vrele(*tvpp); 928 *tvpp = nvp; 929 if (error != 0) { 930 VOP_UNLOCK(fdvp); 931 VOP_UNLOCK(tdvp); 932 if (error != EBUSY) 933 goto releout; 934 error = tmpfs_alloc_vp(mp, de->td_node, LK_EXCLUSIVE, 935 &nvp); 936 if (error != 0) 937 goto releout; 938 VOP_UNLOCK(nvp); 939 /* 940 * fdvp contains fvp, thus tvp (=fdvp) is not empty. 941 */ 942 if (nvp == fdvp) { 943 error = ENOTEMPTY; 944 goto releout; 945 } 946 goto relock; 947 } 948 } 949 tmpfs_rename_restarts += restarts; 950 951 return (0); 952 953 releout: 954 vrele(fdvp); 955 vrele(*fvpp); 956 vrele(tdvp); 957 if (*tvpp != NULL) 958 vrele(*tvpp); 959 tmpfs_rename_restarts += restarts; 960 961 return (error); 962 } 963 964 static int 965 tmpfs_rename(struct vop_rename_args *v) 966 { 967 struct vnode *fdvp = v->a_fdvp; 968 struct vnode *fvp = v->a_fvp; 969 struct componentname *fcnp = v->a_fcnp; 970 struct vnode *tdvp = v->a_tdvp; 971 struct vnode *tvp = v->a_tvp; 972 struct componentname *tcnp = v->a_tcnp; 973 char *newname; 974 struct tmpfs_dirent *de; 975 struct tmpfs_mount *tmp; 976 struct tmpfs_node *fdnode; 977 struct tmpfs_node *fnode; 978 struct tmpfs_node *tnode; 979 struct tmpfs_node *tdnode; 980 int error; 981 bool want_seqc_end; 982 983 MPASS(VOP_ISLOCKED(tdvp)); 984 MPASS(IMPLIES(tvp != NULL, VOP_ISLOCKED(tvp))); 985 986 want_seqc_end = false; 987 988 /* 989 * Disallow cross-device renames. 990 * XXX Why isn't this done by the caller? 991 */ 992 if (fvp->v_mount != tdvp->v_mount || 993 (tvp != NULL && fvp->v_mount != tvp->v_mount)) { 994 error = EXDEV; 995 goto out; 996 } 997 998 /* If source and target are the same file, there is nothing to do. */ 999 if (fvp == tvp) { 1000 error = 0; 1001 goto out; 1002 } 1003 1004 /* 1005 * If we need to move the directory between entries, lock the 1006 * source so that we can safely operate on it. 1007 */ 1008 if (fdvp != tdvp && fdvp != tvp) { 1009 if (vn_lock(fdvp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1010 error = tmpfs_rename_relock(fdvp, &fvp, tdvp, &tvp, 1011 fcnp, tcnp); 1012 if (error != 0) 1013 return (error); 1014 ASSERT_VOP_ELOCKED(fdvp, 1015 "tmpfs_rename: fdvp not locked"); 1016 ASSERT_VOP_ELOCKED(tdvp, 1017 "tmpfs_rename: tdvp not locked"); 1018 if (tvp != NULL) 1019 ASSERT_VOP_ELOCKED(tvp, 1020 "tmpfs_rename: tvp not locked"); 1021 if (fvp == tvp) { 1022 error = 0; 1023 goto out_locked; 1024 } 1025 } 1026 } 1027 1028 if (tvp != NULL) 1029 vn_seqc_write_begin(tvp); 1030 vn_seqc_write_begin(tdvp); 1031 vn_seqc_write_begin(fvp); 1032 vn_seqc_write_begin(fdvp); 1033 want_seqc_end = true; 1034 1035 tmp = VFS_TO_TMPFS(tdvp->v_mount); 1036 tdnode = VP_TO_TMPFS_DIR(tdvp); 1037 tnode = (tvp == NULL) ? NULL : VP_TO_TMPFS_NODE(tvp); 1038 fdnode = VP_TO_TMPFS_DIR(fdvp); 1039 fnode = VP_TO_TMPFS_NODE(fvp); 1040 de = tmpfs_dir_lookup(fdnode, fnode, fcnp); 1041 1042 /* 1043 * Entry can disappear before we lock fdvp, 1044 * also avoid manipulating '.' and '..' entries. 1045 */ 1046 if (de == NULL) { 1047 if ((fcnp->cn_flags & ISDOTDOT) != 0 || 1048 (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.')) 1049 error = EINVAL; 1050 else 1051 error = ENOENT; 1052 goto out_locked; 1053 } 1054 MPASS(de->td_node == fnode); 1055 1056 /* 1057 * If re-naming a directory to another preexisting directory 1058 * ensure that the target directory is empty so that its 1059 * removal causes no side effects. 1060 * Kern_rename guarantees the destination to be a directory 1061 * if the source is one. 1062 */ 1063 if (tvp != NULL) { 1064 MPASS(tnode != NULL); 1065 1066 if ((tnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) || 1067 (tdnode->tn_flags & (APPEND | IMMUTABLE))) { 1068 error = EPERM; 1069 goto out_locked; 1070 } 1071 1072 if (fnode->tn_type == VDIR && tnode->tn_type == VDIR) { 1073 if (tnode->tn_size > 0) { 1074 error = ENOTEMPTY; 1075 goto out_locked; 1076 } 1077 } else if (fnode->tn_type == VDIR && tnode->tn_type != VDIR) { 1078 error = ENOTDIR; 1079 goto out_locked; 1080 } else if (fnode->tn_type != VDIR && tnode->tn_type == VDIR) { 1081 error = EISDIR; 1082 goto out_locked; 1083 } else { 1084 MPASS(fnode->tn_type != VDIR && 1085 tnode->tn_type != VDIR); 1086 } 1087 } 1088 1089 if ((fnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) 1090 || (fdnode->tn_flags & (APPEND | IMMUTABLE))) { 1091 error = EPERM; 1092 goto out_locked; 1093 } 1094 1095 /* 1096 * Ensure that we have enough memory to hold the new name, if it 1097 * has to be changed. 1098 */ 1099 if (fcnp->cn_namelen != tcnp->cn_namelen || 1100 bcmp(fcnp->cn_nameptr, tcnp->cn_nameptr, fcnp->cn_namelen) != 0) { 1101 newname = malloc(tcnp->cn_namelen, M_TMPFSNAME, M_WAITOK); 1102 } else 1103 newname = NULL; 1104 1105 /* 1106 * If the node is being moved to another directory, we have to do 1107 * the move. 1108 */ 1109 if (fdnode != tdnode) { 1110 /* 1111 * In case we are moving a directory, we have to adjust its 1112 * parent to point to the new parent. 1113 */ 1114 if (de->td_node->tn_type == VDIR) { 1115 struct tmpfs_node *n; 1116 1117 /* 1118 * Ensure the target directory is not a child of the 1119 * directory being moved. Otherwise, we'd end up 1120 * with stale nodes. 1121 */ 1122 n = tdnode; 1123 /* 1124 * TMPFS_LOCK guaranties that no nodes are freed while 1125 * traversing the list. Nodes can only be marked as 1126 * removed: tn_parent == NULL. 1127 */ 1128 TMPFS_LOCK(tmp); 1129 TMPFS_NODE_LOCK(n); 1130 while (n != n->tn_dir.tn_parent) { 1131 struct tmpfs_node *parent; 1132 1133 if (n == fnode) { 1134 TMPFS_NODE_UNLOCK(n); 1135 TMPFS_UNLOCK(tmp); 1136 error = EINVAL; 1137 if (newname != NULL) 1138 free(newname, M_TMPFSNAME); 1139 goto out_locked; 1140 } 1141 parent = n->tn_dir.tn_parent; 1142 TMPFS_NODE_UNLOCK(n); 1143 if (parent == NULL) { 1144 n = NULL; 1145 break; 1146 } 1147 TMPFS_NODE_LOCK(parent); 1148 if (parent->tn_dir.tn_parent == NULL) { 1149 TMPFS_NODE_UNLOCK(parent); 1150 n = NULL; 1151 break; 1152 } 1153 n = parent; 1154 } 1155 TMPFS_UNLOCK(tmp); 1156 if (n == NULL) { 1157 error = EINVAL; 1158 if (newname != NULL) 1159 free(newname, M_TMPFSNAME); 1160 goto out_locked; 1161 } 1162 TMPFS_NODE_UNLOCK(n); 1163 1164 /* Adjust the parent pointer. */ 1165 TMPFS_VALIDATE_DIR(fnode); 1166 TMPFS_NODE_LOCK(de->td_node); 1167 de->td_node->tn_dir.tn_parent = tdnode; 1168 TMPFS_NODE_UNLOCK(de->td_node); 1169 1170 /* 1171 * As a result of changing the target of the '..' 1172 * entry, the link count of the source and target 1173 * directories has to be adjusted. 1174 */ 1175 TMPFS_NODE_LOCK(tdnode); 1176 TMPFS_ASSERT_LOCKED(tdnode); 1177 tdnode->tn_links++; 1178 TMPFS_NODE_UNLOCK(tdnode); 1179 1180 TMPFS_NODE_LOCK(fdnode); 1181 TMPFS_ASSERT_LOCKED(fdnode); 1182 fdnode->tn_links--; 1183 TMPFS_NODE_UNLOCK(fdnode); 1184 } 1185 } 1186 1187 /* 1188 * Do the move: just remove the entry from the source directory 1189 * and insert it into the target one. 1190 */ 1191 tmpfs_dir_detach(fdvp, de); 1192 1193 if (fcnp->cn_flags & DOWHITEOUT) 1194 tmpfs_dir_whiteout_add(fdvp, fcnp); 1195 if (tcnp->cn_flags & ISWHITEOUT) 1196 tmpfs_dir_whiteout_remove(tdvp, tcnp); 1197 1198 /* 1199 * If the name has changed, we need to make it effective by changing 1200 * it in the directory entry. 1201 */ 1202 if (newname != NULL) { 1203 MPASS(tcnp->cn_namelen <= MAXNAMLEN); 1204 1205 free(de->ud.td_name, M_TMPFSNAME); 1206 de->ud.td_name = newname; 1207 tmpfs_dirent_init(de, tcnp->cn_nameptr, tcnp->cn_namelen); 1208 1209 fnode->tn_status |= TMPFS_NODE_CHANGED; 1210 tdnode->tn_status |= TMPFS_NODE_MODIFIED; 1211 } 1212 1213 /* 1214 * If we are overwriting an entry, we have to remove the old one 1215 * from the target directory. 1216 */ 1217 if (tvp != NULL) { 1218 struct tmpfs_dirent *tde; 1219 1220 /* Remove the old entry from the target directory. */ 1221 tde = tmpfs_dir_lookup(tdnode, tnode, tcnp); 1222 tmpfs_dir_detach(tdvp, tde); 1223 1224 /* 1225 * Free the directory entry we just deleted. Note that the 1226 * node referred by it will not be removed until the vnode is 1227 * really reclaimed. 1228 */ 1229 tmpfs_free_dirent(VFS_TO_TMPFS(tvp->v_mount), tde); 1230 } 1231 1232 tmpfs_dir_attach(tdvp, de); 1233 1234 if (tmpfs_use_nc(fvp)) { 1235 cache_vop_rename(fdvp, fvp, tdvp, tvp, fcnp, tcnp); 1236 } 1237 1238 error = 0; 1239 1240 out_locked: 1241 if (fdvp != tdvp && fdvp != tvp) 1242 VOP_UNLOCK(fdvp); 1243 1244 out: 1245 if (want_seqc_end) { 1246 if (tvp != NULL) 1247 vn_seqc_write_end(tvp); 1248 vn_seqc_write_end(tdvp); 1249 vn_seqc_write_end(fvp); 1250 vn_seqc_write_end(fdvp); 1251 } 1252 1253 /* 1254 * Release target nodes. 1255 * XXX: I don't understand when tdvp can be the same as tvp, but 1256 * other code takes care of this... 1257 */ 1258 if (tdvp == tvp) 1259 vrele(tdvp); 1260 else 1261 vput(tdvp); 1262 if (tvp != NULL) 1263 vput(tvp); 1264 1265 /* Release source nodes. */ 1266 vrele(fdvp); 1267 vrele(fvp); 1268 1269 return (error); 1270 } 1271 1272 static int 1273 tmpfs_mkdir(struct vop_mkdir_args *v) 1274 { 1275 struct vnode *dvp = v->a_dvp; 1276 struct vnode **vpp = v->a_vpp; 1277 struct componentname *cnp = v->a_cnp; 1278 struct vattr *vap = v->a_vap; 1279 1280 MPASS(vap->va_type == VDIR); 1281 1282 return (tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL)); 1283 } 1284 1285 static int 1286 tmpfs_rmdir(struct vop_rmdir_args *v) 1287 { 1288 struct vnode *dvp = v->a_dvp; 1289 struct vnode *vp = v->a_vp; 1290 1291 int error; 1292 struct tmpfs_dirent *de; 1293 struct tmpfs_mount *tmp; 1294 struct tmpfs_node *dnode; 1295 struct tmpfs_node *node; 1296 1297 MPASS(VOP_ISLOCKED(dvp)); 1298 MPASS(VOP_ISLOCKED(vp)); 1299 1300 tmp = VFS_TO_TMPFS(dvp->v_mount); 1301 dnode = VP_TO_TMPFS_DIR(dvp); 1302 node = VP_TO_TMPFS_DIR(vp); 1303 1304 /* Directories with more than two entries ('.' and '..') cannot be 1305 * removed. */ 1306 if (node->tn_size > 0) { 1307 error = ENOTEMPTY; 1308 goto out; 1309 } 1310 1311 if ((dnode->tn_flags & APPEND) 1312 || (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))) { 1313 error = EPERM; 1314 goto out; 1315 } 1316 1317 /* This invariant holds only if we are not trying to remove "..". 1318 * We checked for that above so this is safe now. */ 1319 MPASS(node->tn_dir.tn_parent == dnode); 1320 1321 /* Get the directory entry associated with node (vp). This was 1322 * filled by tmpfs_lookup while looking up the entry. */ 1323 de = tmpfs_dir_lookup(dnode, node, v->a_cnp); 1324 MPASS(TMPFS_DIRENT_MATCHES(de, 1325 v->a_cnp->cn_nameptr, 1326 v->a_cnp->cn_namelen)); 1327 1328 /* Check flags to see if we are allowed to remove the directory. */ 1329 if ((dnode->tn_flags & APPEND) != 0 || 1330 (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) != 0) { 1331 error = EPERM; 1332 goto out; 1333 } 1334 1335 /* Detach the directory entry from the directory (dnode). */ 1336 tmpfs_dir_detach(dvp, de); 1337 if (v->a_cnp->cn_flags & DOWHITEOUT) 1338 tmpfs_dir_whiteout_add(dvp, v->a_cnp); 1339 1340 /* No vnode should be allocated for this entry from this point */ 1341 TMPFS_NODE_LOCK(node); 1342 node->tn_links--; 1343 node->tn_dir.tn_parent = NULL; 1344 node->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED; 1345 node->tn_accessed = true; 1346 1347 TMPFS_NODE_UNLOCK(node); 1348 1349 TMPFS_NODE_LOCK(dnode); 1350 dnode->tn_links--; 1351 dnode->tn_status |= TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED; 1352 dnode->tn_accessed = true; 1353 TMPFS_NODE_UNLOCK(dnode); 1354 1355 if (tmpfs_use_nc(dvp)) { 1356 cache_vop_rmdir(dvp, vp); 1357 } 1358 1359 /* Free the directory entry we just deleted. Note that the node 1360 * referred by it will not be removed until the vnode is really 1361 * reclaimed. */ 1362 tmpfs_free_dirent(tmp, de); 1363 1364 /* Release the deleted vnode (will destroy the node, notify 1365 * interested parties and clean it from the cache). */ 1366 1367 dnode->tn_status |= TMPFS_NODE_CHANGED; 1368 tmpfs_update(dvp); 1369 1370 error = 0; 1371 1372 out: 1373 return (error); 1374 } 1375 1376 static int 1377 tmpfs_symlink(struct vop_symlink_args *v) 1378 { 1379 struct vnode *dvp = v->a_dvp; 1380 struct vnode **vpp = v->a_vpp; 1381 struct componentname *cnp = v->a_cnp; 1382 struct vattr *vap = v->a_vap; 1383 const char *target = v->a_target; 1384 1385 #ifdef notyet /* XXX FreeBSD BUG: kern_symlink is not setting VLNK */ 1386 MPASS(vap->va_type == VLNK); 1387 #else 1388 vap->va_type = VLNK; 1389 #endif 1390 1391 return (tmpfs_alloc_file(dvp, vpp, vap, cnp, target)); 1392 } 1393 1394 static int 1395 tmpfs_readdir(struct vop_readdir_args *va) 1396 { 1397 struct vnode *vp; 1398 struct uio *uio; 1399 struct tmpfs_mount *tm; 1400 struct tmpfs_node *node; 1401 uint64_t **cookies; 1402 int *eofflag, *ncookies; 1403 ssize_t startresid; 1404 int error, maxcookies; 1405 1406 vp = va->a_vp; 1407 uio = va->a_uio; 1408 eofflag = va->a_eofflag; 1409 cookies = va->a_cookies; 1410 ncookies = va->a_ncookies; 1411 1412 /* This operation only makes sense on directory nodes. */ 1413 if (vp->v_type != VDIR) 1414 return (ENOTDIR); 1415 1416 maxcookies = 0; 1417 node = VP_TO_TMPFS_DIR(vp); 1418 tm = VFS_TO_TMPFS(vp->v_mount); 1419 1420 startresid = uio->uio_resid; 1421 1422 /* Allocate cookies for NFS and compat modules. */ 1423 if (cookies != NULL && ncookies != NULL) { 1424 maxcookies = howmany(node->tn_size, 1425 sizeof(struct tmpfs_dirent)) + 2; 1426 *cookies = malloc(maxcookies * sizeof(**cookies), M_TEMP, 1427 M_WAITOK); 1428 *ncookies = 0; 1429 } 1430 1431 if (cookies == NULL) 1432 error = tmpfs_dir_getdents(tm, node, uio, 0, NULL, NULL); 1433 else 1434 error = tmpfs_dir_getdents(tm, node, uio, maxcookies, *cookies, 1435 ncookies); 1436 1437 /* Buffer was filled without hitting EOF. */ 1438 if (error == EJUSTRETURN) 1439 error = (uio->uio_resid != startresid) ? 0 : EINVAL; 1440 1441 if (error != 0 && cookies != NULL && ncookies != NULL) { 1442 free(*cookies, M_TEMP); 1443 *cookies = NULL; 1444 *ncookies = 0; 1445 } 1446 1447 if (eofflag != NULL) 1448 *eofflag = 1449 (error == 0 && uio->uio_offset == TMPFS_DIRCOOKIE_EOF); 1450 1451 return (error); 1452 } 1453 1454 static int 1455 tmpfs_readlink(struct vop_readlink_args *v) 1456 { 1457 struct vnode *vp = v->a_vp; 1458 struct uio *uio = v->a_uio; 1459 1460 int error; 1461 struct tmpfs_node *node; 1462 1463 MPASS(uio->uio_offset == 0); 1464 MPASS(vp->v_type == VLNK); 1465 1466 node = VP_TO_TMPFS_NODE(vp); 1467 1468 error = uiomove(node->tn_link_target, MIN(node->tn_size, uio->uio_resid), 1469 uio); 1470 tmpfs_set_accessed(VFS_TO_TMPFS(vp->v_mount), node); 1471 1472 return (error); 1473 } 1474 1475 /* 1476 * VOP_FPLOOKUP_SYMLINK routines are subject to special circumstances, see 1477 * the comment above cache_fplookup for details. 1478 * 1479 * Check tmpfs_alloc_node for tmpfs-specific synchronisation notes. 1480 */ 1481 static int 1482 tmpfs_fplookup_symlink(struct vop_fplookup_symlink_args *v) 1483 { 1484 struct vnode *vp; 1485 struct tmpfs_node *node; 1486 char *symlink; 1487 1488 vp = v->a_vp; 1489 node = VP_TO_TMPFS_NODE_SMR(vp); 1490 if (__predict_false(node == NULL)) 1491 return (EAGAIN); 1492 if (!atomic_load_char(&node->tn_link_smr)) 1493 return (EAGAIN); 1494 symlink = atomic_load_ptr(&node->tn_link_target); 1495 if (symlink == NULL) 1496 return (EAGAIN); 1497 1498 return (cache_symlink_resolve(v->a_fpl, symlink, node->tn_size)); 1499 } 1500 1501 static int 1502 tmpfs_inactive(struct vop_inactive_args *v) 1503 { 1504 struct vnode *vp; 1505 struct tmpfs_node *node; 1506 1507 vp = v->a_vp; 1508 node = VP_TO_TMPFS_NODE(vp); 1509 if (node->tn_links == 0) 1510 vrecycle(vp); 1511 else 1512 tmpfs_check_mtime(vp); 1513 return (0); 1514 } 1515 1516 static int 1517 tmpfs_need_inactive(struct vop_need_inactive_args *ap) 1518 { 1519 struct vnode *vp; 1520 struct tmpfs_node *node; 1521 struct vm_object *obj; 1522 1523 vp = ap->a_vp; 1524 node = VP_TO_TMPFS_NODE(vp); 1525 if (node->tn_links == 0) 1526 goto need; 1527 if (vp->v_type == VREG) { 1528 obj = vp->v_object; 1529 if (obj->generation != obj->cleangeneration) 1530 goto need; 1531 } 1532 return (0); 1533 need: 1534 return (1); 1535 } 1536 1537 int 1538 tmpfs_reclaim(struct vop_reclaim_args *v) 1539 { 1540 struct vnode *vp; 1541 struct tmpfs_mount *tmp; 1542 struct tmpfs_node *node; 1543 bool unlock; 1544 1545 vp = v->a_vp; 1546 node = VP_TO_TMPFS_NODE(vp); 1547 tmp = VFS_TO_TMPFS(vp->v_mount); 1548 1549 if (vp->v_type == VREG) 1550 tmpfs_destroy_vobject(vp, node->tn_reg.tn_aobj); 1551 vp->v_object = NULL; 1552 1553 TMPFS_LOCK(tmp); 1554 TMPFS_NODE_LOCK(node); 1555 tmpfs_free_vp(vp); 1556 1557 /* 1558 * If the node referenced by this vnode was deleted by the user, 1559 * we must free its associated data structures (now that the vnode 1560 * is being reclaimed). 1561 */ 1562 unlock = true; 1563 if (node->tn_links == 0 && 1564 (node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0) { 1565 node->tn_vpstate = TMPFS_VNODE_DOOMED; 1566 unlock = !tmpfs_free_node_locked(tmp, node, true); 1567 } 1568 1569 if (unlock) { 1570 TMPFS_NODE_UNLOCK(node); 1571 TMPFS_UNLOCK(tmp); 1572 } 1573 1574 MPASS(vp->v_data == NULL); 1575 return (0); 1576 } 1577 1578 int 1579 tmpfs_print(struct vop_print_args *v) 1580 { 1581 struct vnode *vp = v->a_vp; 1582 1583 struct tmpfs_node *node; 1584 1585 node = VP_TO_TMPFS_NODE(vp); 1586 1587 printf("tag VT_TMPFS, tmpfs_node %p, flags 0x%lx, links %jd\n", 1588 node, node->tn_flags, (uintmax_t)node->tn_links); 1589 printf("\tmode 0%o, owner %d, group %d, size %jd, status 0x%x\n", 1590 node->tn_mode, node->tn_uid, node->tn_gid, 1591 (intmax_t)node->tn_size, node->tn_status); 1592 1593 if (vp->v_type == VFIFO) 1594 fifo_printinfo(vp); 1595 1596 printf("\n"); 1597 1598 return (0); 1599 } 1600 1601 int 1602 tmpfs_pathconf(struct vop_pathconf_args *v) 1603 { 1604 struct vnode *vp = v->a_vp; 1605 int name = v->a_name; 1606 long *retval = v->a_retval; 1607 1608 int error; 1609 1610 error = 0; 1611 1612 switch (name) { 1613 case _PC_LINK_MAX: 1614 *retval = TMPFS_LINK_MAX; 1615 break; 1616 1617 case _PC_SYMLINK_MAX: 1618 *retval = MAXPATHLEN; 1619 break; 1620 1621 case _PC_NAME_MAX: 1622 *retval = NAME_MAX; 1623 break; 1624 1625 case _PC_PIPE_BUF: 1626 if (vp->v_type == VDIR || vp->v_type == VFIFO) 1627 *retval = PIPE_BUF; 1628 else 1629 error = EINVAL; 1630 break; 1631 1632 case _PC_CHOWN_RESTRICTED: 1633 *retval = 1; 1634 break; 1635 1636 case _PC_NO_TRUNC: 1637 *retval = 1; 1638 break; 1639 1640 case _PC_SYNC_IO: 1641 *retval = 1; 1642 break; 1643 1644 case _PC_FILESIZEBITS: 1645 *retval = 64; 1646 break; 1647 1648 case _PC_MIN_HOLE_SIZE: 1649 *retval = PAGE_SIZE; 1650 break; 1651 1652 default: 1653 error = vop_stdpathconf(v); 1654 } 1655 1656 return (error); 1657 } 1658 1659 static int 1660 tmpfs_vptofh(struct vop_vptofh_args *ap) 1661 /* 1662 vop_vptofh { 1663 IN struct vnode *a_vp; 1664 IN struct fid *a_fhp; 1665 }; 1666 */ 1667 { 1668 struct tmpfs_fid_data tfd; 1669 struct tmpfs_node *node; 1670 struct fid *fhp; 1671 1672 node = VP_TO_TMPFS_NODE(ap->a_vp); 1673 fhp = ap->a_fhp; 1674 fhp->fid_len = sizeof(tfd); 1675 1676 /* 1677 * Copy into fid_data from the stack to avoid unaligned pointer use. 1678 * See the comment in sys/mount.h on struct fid for details. 1679 */ 1680 tfd.tfd_id = node->tn_id; 1681 tfd.tfd_gen = node->tn_gen; 1682 memcpy(fhp->fid_data, &tfd, fhp->fid_len); 1683 1684 return (0); 1685 } 1686 1687 static int 1688 tmpfs_whiteout(struct vop_whiteout_args *ap) 1689 { 1690 struct vnode *dvp = ap->a_dvp; 1691 struct componentname *cnp = ap->a_cnp; 1692 struct tmpfs_dirent *de; 1693 1694 switch (ap->a_flags) { 1695 case LOOKUP: 1696 return (0); 1697 case CREATE: 1698 de = tmpfs_dir_lookup(VP_TO_TMPFS_DIR(dvp), NULL, cnp); 1699 if (de != NULL) 1700 return (de->td_node == NULL ? 0 : EEXIST); 1701 return (tmpfs_dir_whiteout_add(dvp, cnp)); 1702 case DELETE: 1703 tmpfs_dir_whiteout_remove(dvp, cnp); 1704 return (0); 1705 default: 1706 panic("tmpfs_whiteout: unknown op"); 1707 } 1708 } 1709 1710 static int 1711 tmpfs_vptocnp_dir(struct tmpfs_node *tn, struct tmpfs_node *tnp, 1712 struct tmpfs_dirent **pde) 1713 { 1714 struct tmpfs_dir_cursor dc; 1715 struct tmpfs_dirent *de; 1716 1717 for (de = tmpfs_dir_first(tnp, &dc); de != NULL; 1718 de = tmpfs_dir_next(tnp, &dc)) { 1719 if (de->td_node == tn) { 1720 *pde = de; 1721 return (0); 1722 } 1723 } 1724 return (ENOENT); 1725 } 1726 1727 static int 1728 tmpfs_vptocnp_fill(struct vnode *vp, struct tmpfs_node *tn, 1729 struct tmpfs_node *tnp, char *buf, size_t *buflen, struct vnode **dvp) 1730 { 1731 struct tmpfs_dirent *de; 1732 int error, i; 1733 1734 error = vn_vget_ino_gen(vp, tmpfs_vn_get_ino_alloc, tnp, LK_SHARED, 1735 dvp); 1736 if (error != 0) 1737 return (error); 1738 error = tmpfs_vptocnp_dir(tn, tnp, &de); 1739 if (error == 0) { 1740 i = *buflen; 1741 i -= de->td_namelen; 1742 if (i < 0) { 1743 error = ENOMEM; 1744 } else { 1745 bcopy(de->ud.td_name, buf + i, de->td_namelen); 1746 *buflen = i; 1747 } 1748 } 1749 if (error == 0) { 1750 if (vp != *dvp) 1751 VOP_UNLOCK(*dvp); 1752 } else { 1753 if (vp != *dvp) 1754 vput(*dvp); 1755 else 1756 vrele(vp); 1757 } 1758 return (error); 1759 } 1760 1761 static int 1762 tmpfs_vptocnp(struct vop_vptocnp_args *ap) 1763 { 1764 struct vnode *vp, **dvp; 1765 struct tmpfs_node *tn, *tnp, *tnp1; 1766 struct tmpfs_dirent *de; 1767 struct tmpfs_mount *tm; 1768 char *buf; 1769 size_t *buflen; 1770 int error; 1771 1772 vp = ap->a_vp; 1773 dvp = ap->a_vpp; 1774 buf = ap->a_buf; 1775 buflen = ap->a_buflen; 1776 1777 tm = VFS_TO_TMPFS(vp->v_mount); 1778 tn = VP_TO_TMPFS_NODE(vp); 1779 if (tn->tn_type == VDIR) { 1780 tnp = tn->tn_dir.tn_parent; 1781 if (tnp == NULL) 1782 return (ENOENT); 1783 tmpfs_ref_node(tnp); 1784 error = tmpfs_vptocnp_fill(vp, tn, tn->tn_dir.tn_parent, buf, 1785 buflen, dvp); 1786 tmpfs_free_node(tm, tnp); 1787 return (error); 1788 } 1789 restart: 1790 TMPFS_LOCK(tm); 1791 restart_locked: 1792 LIST_FOREACH_SAFE(tnp, &tm->tm_nodes_used, tn_entries, tnp1) { 1793 if (tnp->tn_type != VDIR) 1794 continue; 1795 TMPFS_NODE_LOCK(tnp); 1796 tmpfs_ref_node(tnp); 1797 1798 /* 1799 * tn_vnode cannot be instantiated while we hold the 1800 * node lock, so the directory cannot be changed while 1801 * we iterate over it. Do this to avoid instantiating 1802 * vnode for directories which cannot point to our 1803 * node. 1804 */ 1805 error = tnp->tn_vnode == NULL ? tmpfs_vptocnp_dir(tn, tnp, 1806 &de) : 0; 1807 1808 if (error == 0) { 1809 TMPFS_NODE_UNLOCK(tnp); 1810 TMPFS_UNLOCK(tm); 1811 error = tmpfs_vptocnp_fill(vp, tn, tnp, buf, buflen, 1812 dvp); 1813 if (error == 0) { 1814 tmpfs_free_node(tm, tnp); 1815 return (0); 1816 } 1817 if (VN_IS_DOOMED(vp)) { 1818 tmpfs_free_node(tm, tnp); 1819 return (ENOENT); 1820 } 1821 TMPFS_LOCK(tm); 1822 TMPFS_NODE_LOCK(tnp); 1823 } 1824 if (tmpfs_free_node_locked(tm, tnp, false)) { 1825 goto restart; 1826 } else { 1827 KASSERT(tnp->tn_refcount > 0, 1828 ("node %p refcount zero", tnp)); 1829 if (tnp->tn_attached) { 1830 tnp1 = LIST_NEXT(tnp, tn_entries); 1831 TMPFS_NODE_UNLOCK(tnp); 1832 } else { 1833 TMPFS_NODE_UNLOCK(tnp); 1834 goto restart_locked; 1835 } 1836 } 1837 } 1838 TMPFS_UNLOCK(tm); 1839 return (ENOENT); 1840 } 1841 1842 static off_t 1843 tmpfs_seek_data_locked(vm_object_t obj, off_t noff) 1844 { 1845 vm_page_t m; 1846 vm_pindex_t p, p_m, p_swp; 1847 1848 p = OFF_TO_IDX(noff); 1849 m = vm_page_find_least(obj, p); 1850 1851 /* 1852 * Microoptimize the most common case for SEEK_DATA, where 1853 * there is no hole and the page is resident. 1854 */ 1855 if (m != NULL && vm_page_any_valid(m) && m->pindex == p) 1856 return (noff); 1857 1858 p_swp = swap_pager_find_least(obj, p); 1859 if (p_swp == p) 1860 return (noff); 1861 1862 p_m = m == NULL ? obj->size : m->pindex; 1863 return (IDX_TO_OFF(MIN(p_m, p_swp))); 1864 } 1865 1866 static off_t 1867 tmpfs_seek_next(off_t noff) 1868 { 1869 return (noff + PAGE_SIZE - (noff & PAGE_MASK)); 1870 } 1871 1872 static int 1873 tmpfs_seek_clamp(struct tmpfs_node *tn, off_t *noff, bool seekdata) 1874 { 1875 if (*noff < tn->tn_size) 1876 return (0); 1877 if (seekdata) 1878 return (ENXIO); 1879 *noff = tn->tn_size; 1880 return (0); 1881 } 1882 1883 static off_t 1884 tmpfs_seek_hole_locked(vm_object_t obj, off_t noff) 1885 { 1886 vm_page_t m; 1887 vm_pindex_t p, p_swp; 1888 1889 for (;; noff = tmpfs_seek_next(noff)) { 1890 /* 1891 * Walk over the largest sequential run of the valid pages. 1892 */ 1893 for (m = vm_page_lookup(obj, OFF_TO_IDX(noff)); 1894 m != NULL && vm_page_any_valid(m); 1895 m = vm_page_next(m), noff = tmpfs_seek_next(noff)) 1896 ; 1897 1898 /* 1899 * Found a hole in the object's page queue. Check if 1900 * there is a hole in the swap at the same place. 1901 */ 1902 p = OFF_TO_IDX(noff); 1903 p_swp = swap_pager_find_least(obj, p); 1904 if (p_swp != p) { 1905 noff = IDX_TO_OFF(p); 1906 break; 1907 } 1908 } 1909 return (noff); 1910 } 1911 1912 static int 1913 tmpfs_seek_datahole(struct vnode *vp, off_t *off, bool seekdata) 1914 { 1915 struct tmpfs_node *tn; 1916 vm_object_t obj; 1917 off_t noff; 1918 int error; 1919 1920 if (vp->v_type != VREG) 1921 return (ENOTTY); 1922 tn = VP_TO_TMPFS_NODE(vp); 1923 noff = *off; 1924 if (noff < 0) 1925 return (ENXIO); 1926 error = tmpfs_seek_clamp(tn, &noff, seekdata); 1927 if (error != 0) 1928 return (error); 1929 obj = tn->tn_reg.tn_aobj; 1930 1931 VM_OBJECT_RLOCK(obj); 1932 noff = seekdata ? tmpfs_seek_data_locked(obj, noff) : 1933 tmpfs_seek_hole_locked(obj, noff); 1934 VM_OBJECT_RUNLOCK(obj); 1935 1936 error = tmpfs_seek_clamp(tn, &noff, seekdata); 1937 if (error == 0) 1938 *off = noff; 1939 return (error); 1940 } 1941 1942 static int 1943 tmpfs_ioctl(struct vop_ioctl_args *ap) 1944 { 1945 struct vnode *vp = ap->a_vp; 1946 int error = 0; 1947 1948 switch (ap->a_command) { 1949 case FIOSEEKDATA: 1950 case FIOSEEKHOLE: 1951 error = vn_lock(vp, LK_SHARED); 1952 if (error != 0) { 1953 error = EBADF; 1954 break; 1955 } 1956 error = tmpfs_seek_datahole(vp, (off_t *)ap->a_data, 1957 ap->a_command == FIOSEEKDATA); 1958 VOP_UNLOCK(vp); 1959 break; 1960 default: 1961 error = ENOTTY; 1962 break; 1963 } 1964 return (error); 1965 } 1966 1967 /* 1968 * Vnode operations vector used for files stored in a tmpfs file system. 1969 */ 1970 struct vop_vector tmpfs_vnodeop_entries = { 1971 .vop_default = &default_vnodeops, 1972 .vop_lookup = vfs_cache_lookup, 1973 .vop_cachedlookup = tmpfs_cached_lookup, 1974 .vop_create = tmpfs_create, 1975 .vop_mknod = tmpfs_mknod, 1976 .vop_open = tmpfs_open, 1977 .vop_close = tmpfs_close, 1978 .vop_fplookup_vexec = tmpfs_fplookup_vexec, 1979 .vop_fplookup_symlink = tmpfs_fplookup_symlink, 1980 .vop_access = tmpfs_access, 1981 .vop_stat = tmpfs_stat, 1982 .vop_getattr = tmpfs_getattr, 1983 .vop_setattr = tmpfs_setattr, 1984 .vop_read = tmpfs_read, 1985 .vop_read_pgcache = tmpfs_read_pgcache, 1986 .vop_write = tmpfs_write, 1987 .vop_deallocate = tmpfs_deallocate, 1988 .vop_fsync = tmpfs_fsync, 1989 .vop_remove = tmpfs_remove, 1990 .vop_link = tmpfs_link, 1991 .vop_rename = tmpfs_rename, 1992 .vop_mkdir = tmpfs_mkdir, 1993 .vop_rmdir = tmpfs_rmdir, 1994 .vop_symlink = tmpfs_symlink, 1995 .vop_readdir = tmpfs_readdir, 1996 .vop_readlink = tmpfs_readlink, 1997 .vop_inactive = tmpfs_inactive, 1998 .vop_need_inactive = tmpfs_need_inactive, 1999 .vop_reclaim = tmpfs_reclaim, 2000 .vop_print = tmpfs_print, 2001 .vop_pathconf = tmpfs_pathconf, 2002 .vop_vptofh = tmpfs_vptofh, 2003 .vop_whiteout = tmpfs_whiteout, 2004 .vop_bmap = VOP_EOPNOTSUPP, 2005 .vop_vptocnp = tmpfs_vptocnp, 2006 .vop_lock1 = vop_lock, 2007 .vop_unlock = vop_unlock, 2008 .vop_islocked = vop_islocked, 2009 .vop_add_writecount = vop_stdadd_writecount_nomsync, 2010 .vop_ioctl = tmpfs_ioctl, 2011 }; 2012 VFS_VOP_VECTOR_REGISTER(tmpfs_vnodeop_entries); 2013 2014 /* 2015 * Same vector for mounts which do not use namecache. 2016 */ 2017 struct vop_vector tmpfs_vnodeop_nonc_entries = { 2018 .vop_default = &tmpfs_vnodeop_entries, 2019 .vop_lookup = tmpfs_lookup, 2020 }; 2021 VFS_VOP_VECTOR_REGISTER(tmpfs_vnodeop_nonc_entries); 2022