1 /* 2 * Copyright (c) 1992, 1993, 1994, 1995 Jan-Simon Pendry. 3 * Copyright (c) 1992, 1993, 1994, 1995 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * Jan-Simon Pendry. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)union_vnops.c 8.32 (Berkeley) 6/23/95 38 * $Id: union_vnops.c,v 1.60 1999/01/27 22:42:08 dillon Exp $ 39 */ 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/proc.h> 44 #include <sys/fcntl.h> 45 #include <sys/stat.h> 46 #include <sys/kernel.h> 47 #include <sys/vnode.h> 48 #include <sys/mount.h> 49 #include <sys/namei.h> 50 #include <sys/malloc.h> 51 #include <sys/buf.h> 52 #include <sys/lock.h> 53 #include <miscfs/union/union.h> 54 55 #define FIXUP(un, p) { \ 56 if (((un)->un_flags & UN_ULOCK) == 0) { \ 57 union_fixup(un, p); \ 58 } \ 59 } 60 61 static int union_abortop __P((struct vop_abortop_args *ap)); 62 static int union_access __P((struct vop_access_args *ap)); 63 static int union_advlock __P((struct vop_advlock_args *ap)); 64 static int union_bmap __P((struct vop_bmap_args *ap)); 65 static int union_close __P((struct vop_close_args *ap)); 66 static int union_create __P((struct vop_create_args *ap)); 67 static void union_fixup __P((struct union_node *un, struct proc *p)); 68 static int union_fsync __P((struct vop_fsync_args *ap)); 69 static int union_getattr __P((struct vop_getattr_args *ap)); 70 static int union_inactive __P((struct vop_inactive_args *ap)); 71 static int union_ioctl __P((struct vop_ioctl_args *ap)); 72 static int union_islocked __P((struct vop_islocked_args *ap)); 73 static int union_lease __P((struct vop_lease_args *ap)); 74 static int union_link __P((struct vop_link_args *ap)); 75 static int union_lock __P((struct vop_lock_args *ap)); 76 static int union_lookup __P((struct vop_lookup_args *ap)); 77 static int union_lookup1 __P((struct vnode *udvp, struct vnode **dvpp, 78 struct vnode **vpp, 79 struct componentname *cnp)); 80 static int union_mkdir __P((struct vop_mkdir_args *ap)); 81 static int union_mknod __P((struct vop_mknod_args *ap)); 82 static int union_mmap __P((struct vop_mmap_args *ap)); 83 static int union_open __P((struct vop_open_args *ap)); 84 static int union_pathconf __P((struct vop_pathconf_args *ap)); 85 static int union_print __P((struct vop_print_args *ap)); 86 static int union_read __P((struct vop_read_args *ap)); 87 static int union_readdir __P((struct vop_readdir_args *ap)); 88 static int union_readlink __P((struct vop_readlink_args *ap)); 89 static int union_reclaim __P((struct vop_reclaim_args *ap)); 90 static int union_remove __P((struct vop_remove_args *ap)); 91 static int union_rename __P((struct vop_rename_args *ap)); 92 static int union_revoke __P((struct vop_revoke_args *ap)); 93 static int union_rmdir __P((struct vop_rmdir_args *ap)); 94 static int union_poll __P((struct vop_poll_args *ap)); 95 static int union_setattr __P((struct vop_setattr_args *ap)); 96 static int union_strategy __P((struct vop_strategy_args *ap)); 97 static int union_symlink __P((struct vop_symlink_args *ap)); 98 static int union_unlock __P((struct vop_unlock_args *ap)); 99 static int union_whiteout __P((struct vop_whiteout_args *ap)); 100 static int union_write __P((struct vop_read_args *ap)); 101 102 static void 103 union_fixup(un, p) 104 struct union_node *un; 105 struct proc *p; 106 { 107 108 vn_lock(un->un_uppervp, LK_EXCLUSIVE | LK_RETRY, p); 109 un->un_flags |= UN_ULOCK; 110 } 111 112 static int 113 union_lookup1(udvp, dvpp, vpp, cnp) 114 struct vnode *udvp; 115 struct vnode **dvpp; 116 struct vnode **vpp; 117 struct componentname *cnp; 118 { 119 int error; 120 struct proc *p = cnp->cn_proc; 121 struct vnode *tdvp; 122 struct vnode *dvp; 123 struct mount *mp; 124 125 dvp = *dvpp; 126 127 /* 128 * If stepping up the directory tree, check for going 129 * back across the mount point, in which case do what 130 * lookup would do by stepping back down the mount 131 * hierarchy. 132 */ 133 if (cnp->cn_flags & ISDOTDOT) { 134 while ((dvp != udvp) && (dvp->v_flag & VROOT)) { 135 /* 136 * Don't do the NOCROSSMOUNT check 137 * at this level. By definition, 138 * union fs deals with namespaces, not 139 * filesystems. 140 */ 141 tdvp = dvp; 142 *dvpp = dvp = dvp->v_mount->mnt_vnodecovered; 143 vput(tdvp); 144 VREF(dvp); 145 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p); 146 } 147 } 148 149 error = VOP_LOOKUP(dvp, &tdvp, cnp); 150 if (error) 151 return (error); 152 153 /* 154 * The parent directory will have been unlocked, unless lookup 155 * found the last component. In which case, re-lock the node 156 * here to allow it to be unlocked again (phew) in union_lookup. 157 */ 158 if (dvp != tdvp && !(cnp->cn_flags & ISLASTCN)) 159 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p); 160 161 dvp = tdvp; 162 163 /* 164 * Lastly check if the current node is a mount point in 165 * which case walk up the mount hierarchy making sure not to 166 * bump into the root of the mount tree (ie. dvp != udvp). 167 */ 168 while (dvp != udvp && (dvp->v_type == VDIR) && 169 (mp = dvp->v_mountedhere)) { 170 171 if (vfs_busy(mp, 0, 0, p)) 172 continue; 173 174 error = VFS_ROOT(mp, &tdvp); 175 vfs_unbusy(mp, p); 176 if (error) { 177 vput(dvp); 178 return (error); 179 } 180 181 vput(dvp); 182 dvp = tdvp; 183 } 184 185 *vpp = dvp; 186 return (0); 187 } 188 189 static int 190 union_lookup(ap) 191 struct vop_lookup_args /* { 192 struct vnodeop_desc *a_desc; 193 struct vnode *a_dvp; 194 struct vnode **a_vpp; 195 struct componentname *a_cnp; 196 } */ *ap; 197 { 198 int error; 199 int uerror, lerror; 200 struct vnode *uppervp, *lowervp; 201 struct vnode *upperdvp, *lowerdvp; 202 struct vnode *dvp = ap->a_dvp; 203 struct union_node *dun = VTOUNION(dvp); 204 struct componentname *cnp = ap->a_cnp; 205 struct proc *p = cnp->cn_proc; 206 int lockparent = cnp->cn_flags & LOCKPARENT; 207 struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount); 208 struct ucred *saved_cred = NULL; 209 int iswhiteout; 210 struct vattr va; 211 212 213 /* 214 * Disallow write attemps to the filesystem mounted read-only. 215 */ 216 if ((cnp->cn_flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 217 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 218 return (EROFS); 219 220 #ifdef notyet 221 if (cnp->cn_namelen == 3 && 222 cnp->cn_nameptr[2] == '.' && 223 cnp->cn_nameptr[1] == '.' && 224 cnp->cn_nameptr[0] == '.') { 225 dvp = *ap->a_vpp = LOWERVP(ap->a_dvp); 226 if (dvp == NULLVP) 227 return (ENOENT); 228 VREF(dvp); 229 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p); 230 if (!lockparent || !(cnp->cn_flags & ISLASTCN)) 231 VOP_UNLOCK(ap->a_dvp, 0, p); 232 return (0); 233 } 234 #endif 235 236 cnp->cn_flags |= LOCKPARENT; 237 238 upperdvp = dun->un_uppervp; 239 lowerdvp = dun->un_lowervp; 240 uppervp = NULLVP; 241 lowervp = NULLVP; 242 iswhiteout = 0; 243 244 if (cnp->cn_flags & ISDOTDOT) { 245 if (upperdvp != NULL) 246 VREF(upperdvp); 247 if (lowerdvp != NULL) 248 VREF(lowerdvp); 249 } 250 251 /* 252 * do the lookup in the upper level. 253 * if that level comsumes additional pathnames, 254 * then assume that something special is going 255 * on and just return that vnode. 256 */ 257 if (upperdvp != NULLVP) { 258 FIXUP(dun, p); 259 /* 260 * If we're doing `..' in the underlying filesystem, 261 * we must drop our lock on the union node before 262 * going up the tree in the lower file system--if we block 263 * on the lowervp lock, and that's held by someone else 264 * coming down the tree and who's waiting for our lock, 265 * we would be hosed. 266 */ 267 if (cnp->cn_flags & ISDOTDOT) { 268 /* retain lock on underlying VP: */ 269 dun->un_flags |= UN_KLOCK; 270 VOP_UNLOCK(dvp, 0, p); 271 } 272 uerror = union_lookup1(um->um_uppervp, &upperdvp, 273 &uppervp, cnp); 274 /* 275 * Disallow write attemps to the filesystem mounted read-only. 276 */ 277 if (uerror == EJUSTRETURN && (cnp->cn_flags & ISLASTCN) && 278 (dvp->v_mount->mnt_flag & MNT_RDONLY) && 279 (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) { 280 if (!lockparent) 281 cnp->cn_flags &= ~LOCKPARENT; 282 return (EROFS); 283 } 284 285 if (cnp->cn_flags & ISDOTDOT) { 286 if (dun->un_uppervp == upperdvp) { 287 /* 288 * We got the underlying bugger back locked... 289 * now take back the union node lock. Since we 290 * hold the uppervp lock, we can diddle union 291 * locking flags at will. :) 292 */ 293 dun->un_flags |= UN_ULOCK; 294 } 295 /* 296 * If upperdvp got swapped out, it means we did 297 * some mount point magic, and we do not have 298 * dun->un_uppervp locked currently--so we get it 299 * locked here (don't set the UN_ULOCK flag). 300 */ 301 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p); 302 } 303 304 /*if (uppervp == upperdvp) 305 dun->un_flags |= UN_KLOCK;*/ 306 307 if (cnp->cn_consume != 0) { 308 *ap->a_vpp = uppervp; 309 if (!lockparent) 310 cnp->cn_flags &= ~LOCKPARENT; 311 error = uerror; 312 goto out; 313 } 314 if (uerror == ENOENT || uerror == EJUSTRETURN) { 315 if (cnp->cn_flags & ISWHITEOUT) { 316 iswhiteout = 1; 317 } else if (lowerdvp != NULLVP) { 318 lerror = VOP_GETATTR(upperdvp, &va, 319 cnp->cn_cred, cnp->cn_proc); 320 if (lerror == 0 && (va.va_flags & OPAQUE)) 321 iswhiteout = 1; 322 } 323 } 324 } else { 325 uerror = ENOENT; 326 } 327 328 /* 329 * in a similar way to the upper layer, do the lookup 330 * in the lower layer. this time, if there is some 331 * component magic going on, then vput whatever we got 332 * back from the upper layer and return the lower vnode 333 * instead. 334 */ 335 if (lowerdvp != NULLVP && !iswhiteout) { 336 int nameiop; 337 338 vn_lock(lowerdvp, LK_EXCLUSIVE | LK_RETRY, p); 339 340 /* 341 * Only do a LOOKUP on the bottom node, since 342 * we won't be making changes to it anyway. 343 */ 344 nameiop = cnp->cn_nameiop; 345 cnp->cn_nameiop = LOOKUP; 346 if (um->um_op == UNMNT_BELOW) { 347 saved_cred = cnp->cn_cred; 348 cnp->cn_cred = um->um_cred; 349 } 350 /* 351 * We shouldn't have to worry about locking interactions 352 * between the lower layer and our union layer (w.r.t. 353 * `..' processing) because we don't futz with lowervp 354 * locks in the union-node instantiation code path. 355 */ 356 lerror = union_lookup1(um->um_lowervp, &lowerdvp, 357 &lowervp, cnp); 358 if (um->um_op == UNMNT_BELOW) 359 cnp->cn_cred = saved_cred; 360 cnp->cn_nameiop = nameiop; 361 362 if (lowervp != lowerdvp) 363 VOP_UNLOCK(lowerdvp, 0, p); 364 365 if (cnp->cn_consume != 0 || lerror == EACCES) { 366 if (lerror == EACCES) 367 lowervp = NULLVP; 368 if (uppervp != NULLVP) { 369 if (uppervp == upperdvp) 370 vrele(uppervp); 371 else 372 vput(uppervp); 373 uppervp = NULLVP; 374 } 375 *ap->a_vpp = lowervp; 376 if (!lockparent) 377 cnp->cn_flags &= ~LOCKPARENT; 378 error = lerror; 379 goto out; 380 } 381 } else { 382 lerror = ENOENT; 383 if ((cnp->cn_flags & ISDOTDOT) && dun->un_pvp != NULLVP) { 384 lowervp = LOWERVP(dun->un_pvp); 385 if (lowervp != NULLVP) { 386 VREF(lowervp); 387 vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY, p); 388 lerror = 0; 389 } 390 } 391 } 392 393 if (!lockparent) 394 cnp->cn_flags &= ~LOCKPARENT; 395 396 /* 397 * at this point, we have uerror and lerror indicating 398 * possible errors with the lookups in the upper and lower 399 * layers. additionally, uppervp and lowervp are (locked) 400 * references to existing vnodes in the upper and lower layers. 401 * 402 * there are now three cases to consider. 403 * 1. if both layers returned an error, then return whatever 404 * error the upper layer generated. 405 * 406 * 2. if the top layer failed and the bottom layer succeeded 407 * then two subcases occur. 408 * a. the bottom vnode is not a directory, in which 409 * case just return a new union vnode referencing 410 * an empty top layer and the existing bottom layer. 411 * b. the bottom vnode is a directory, in which case 412 * create a new directory in the top-level and 413 * continue as in case 3. 414 * 415 * 3. if the top layer succeeded then return a new union 416 * vnode referencing whatever the new top layer and 417 * whatever the bottom layer returned. 418 */ 419 420 *ap->a_vpp = NULLVP; 421 422 /* case 1. */ 423 if ((uerror != 0) && (lerror != 0)) { 424 error = uerror; 425 goto out; 426 } 427 428 /* case 2. */ 429 if (uerror != 0 /* && (lerror == 0) */ ) { 430 if (lowervp->v_type == VDIR) { /* case 2b. */ 431 dun->un_flags &= ~UN_ULOCK; 432 VOP_UNLOCK(upperdvp, 0, p); 433 uerror = union_mkshadow(um, upperdvp, cnp, &uppervp); 434 vn_lock(upperdvp, LK_EXCLUSIVE | LK_RETRY, p); 435 dun->un_flags |= UN_ULOCK; 436 437 if (uerror) { 438 if (lowervp != NULLVP) { 439 vput(lowervp); 440 lowervp = NULLVP; 441 } 442 error = uerror; 443 goto out; 444 } 445 } 446 } 447 448 if (lowervp != NULLVP) 449 VOP_UNLOCK(lowervp, 0, p); 450 451 error = union_allocvp(ap->a_vpp, dvp->v_mount, dvp, upperdvp, cnp, 452 uppervp, lowervp, 1); 453 454 if (error) { 455 if (uppervp != NULLVP) 456 vput(uppervp); 457 if (lowervp != NULLVP) 458 vrele(lowervp); 459 } else { 460 if (*ap->a_vpp != dvp) 461 if (!lockparent || !(cnp->cn_flags & ISLASTCN)) 462 VOP_UNLOCK(dvp, 0, p); 463 #ifdef DIAGNOSTIC 464 if (cnp->cn_namelen == 1 && 465 cnp->cn_nameptr[0] == '.' && 466 *ap->a_vpp != dvp) { 467 panic("union_lookup returning . (%p) not same as startdir (%p)", 468 ap->a_vpp, dvp); 469 } 470 #endif 471 } 472 473 out: 474 if (cnp->cn_flags & ISDOTDOT) { 475 if (upperdvp != NULL) 476 vrele(upperdvp); 477 if (lowerdvp != NULL) 478 vrele(lowerdvp); 479 } 480 481 return (error); 482 } 483 484 static int 485 union_create(ap) 486 struct vop_create_args /* { 487 struct vnode *a_dvp; 488 struct vnode **a_vpp; 489 struct componentname *a_cnp; 490 struct vattr *a_vap; 491 } */ *ap; 492 { 493 struct union_node *dun = VTOUNION(ap->a_dvp); 494 struct vnode *dvp = dun->un_uppervp; 495 struct componentname *cnp = ap->a_cnp; 496 struct proc *p = cnp->cn_proc; 497 498 if (dvp != NULLVP) { 499 struct vnode *vp; 500 struct mount *mp; 501 int error; 502 503 FIXUP(dun, p); 504 505 dun->un_flags |= UN_KLOCK; 506 VOP_UNLOCK(ap->a_dvp, 0, p); 507 error = VOP_CREATE(dvp, &vp, cnp, ap->a_vap); 508 if (error) { 509 dun->un_flags |= UN_ULOCK; 510 return (error); 511 } 512 513 mp = ap->a_dvp->v_mount; 514 VOP_UNLOCK(dvp, 0, p); 515 error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP, cnp, vp, 516 NULLVP, 1); 517 if (error) 518 vput(vp); 519 vn_lock(ap->a_dvp, LK_EXCLUSIVE| LK_RETRY, p); 520 return (error); 521 } 522 523 return (EROFS); 524 } 525 526 static int 527 union_whiteout(ap) 528 struct vop_whiteout_args /* { 529 struct vnode *a_dvp; 530 struct componentname *a_cnp; 531 int a_flags; 532 } */ *ap; 533 { 534 struct union_node *un = VTOUNION(ap->a_dvp); 535 struct componentname *cnp = ap->a_cnp; 536 struct proc *p = cnp->cn_proc; 537 538 if (un->un_uppervp == NULLVP) 539 return (EOPNOTSUPP); 540 541 FIXUP(un, p); 542 return (VOP_WHITEOUT(un->un_uppervp, cnp, ap->a_flags)); 543 } 544 545 static int 546 union_mknod(ap) 547 struct vop_mknod_args /* { 548 struct vnode *a_dvp; 549 struct vnode **a_vpp; 550 struct componentname *a_cnp; 551 struct vattr *a_vap; 552 } */ *ap; 553 { 554 struct union_node *dun = VTOUNION(ap->a_dvp); 555 struct vnode *dvp = dun->un_uppervp; 556 struct componentname *cnp = ap->a_cnp; 557 struct proc *p = cnp->cn_proc; 558 559 if (dvp != NULLVP) { 560 struct vnode *vp; 561 struct mount *mp; 562 int error; 563 564 FIXUP(dun, p); 565 566 dun->un_flags |= UN_KLOCK; 567 VOP_UNLOCK(ap->a_dvp, 0, p); 568 error = VOP_MKNOD(dvp, &vp, cnp, ap->a_vap); 569 if (error) { 570 dun->un_flags |= UN_ULOCK; 571 return (error); 572 } 573 574 if (vp != NULLVP) { 575 mp = ap->a_dvp->v_mount; 576 VOP_UNLOCK(dvp, 0, p); 577 error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP, 578 cnp, vp, NULLVP, 1); 579 if (error) 580 vput(vp); 581 vn_lock(ap->a_dvp, LK_EXCLUSIVE| LK_RETRY, p); 582 } else { 583 dun->un_flags |= UN_ULOCK; 584 } 585 return (error); 586 } 587 588 return (EROFS); 589 } 590 591 static int 592 union_open(ap) 593 struct vop_open_args /* { 594 struct vnodeop_desc *a_desc; 595 struct vnode *a_vp; 596 int a_mode; 597 struct ucred *a_cred; 598 struct proc *a_p; 599 } */ *ap; 600 { 601 struct union_node *un = VTOUNION(ap->a_vp); 602 struct vnode *tvp; 603 int mode = ap->a_mode; 604 struct ucred *cred = ap->a_cred; 605 struct proc *p = ap->a_p; 606 int error; 607 608 /* 609 * If there is an existing upper vp then simply open that. 610 */ 611 tvp = un->un_uppervp; 612 if (tvp == NULLVP) { 613 /* 614 * If the lower vnode is being opened for writing, then 615 * copy the file contents to the upper vnode and open that, 616 * otherwise can simply open the lower vnode. 617 */ 618 tvp = un->un_lowervp; 619 if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) { 620 error = union_copyup(un, (mode&O_TRUNC) == 0, cred, p); 621 if (error == 0) 622 error = VOP_OPEN(un->un_uppervp, mode, cred, p); 623 return (error); 624 } 625 626 /* 627 * Just open the lower vnode 628 */ 629 un->un_openl++; 630 vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, p); 631 error = VOP_OPEN(tvp, mode, cred, p); 632 VOP_UNLOCK(tvp, 0, p); 633 634 return (error); 635 } 636 637 FIXUP(un, p); 638 639 error = VOP_OPEN(tvp, mode, cred, p); 640 641 return (error); 642 } 643 644 static int 645 union_close(ap) 646 struct vop_close_args /* { 647 struct vnode *a_vp; 648 int a_fflag; 649 struct ucred *a_cred; 650 struct proc *a_p; 651 } */ *ap; 652 { 653 struct union_node *un = VTOUNION(ap->a_vp); 654 struct vnode *vp; 655 656 if ((vp = un->un_uppervp) == NULLVP) { 657 #ifdef UNION_DIAGNOSTIC 658 if (un->un_openl <= 0) 659 panic("union: un_openl cnt"); 660 #endif 661 --un->un_openl; 662 vp = un->un_lowervp; 663 } 664 665 ap->a_vp = vp; 666 return (VCALL(vp, VOFFSET(vop_close), ap)); 667 } 668 669 /* 670 * Check access permission on the union vnode. 671 * The access check being enforced is to check 672 * against both the underlying vnode, and any 673 * copied vnode. This ensures that no additional 674 * file permissions are given away simply because 675 * the user caused an implicit file copy. 676 */ 677 static int 678 union_access(ap) 679 struct vop_access_args /* { 680 struct vnodeop_desc *a_desc; 681 struct vnode *a_vp; 682 int a_mode; 683 struct ucred *a_cred; 684 struct proc *a_p; 685 } */ *ap; 686 { 687 struct union_node *un = VTOUNION(ap->a_vp); 688 struct proc *p = ap->a_p; 689 int error = EACCES; 690 struct vnode *vp; 691 struct vnode *savedvp; 692 693 /* 694 * Disallow write attempts on filesystems mounted read-only. 695 */ 696 if (ap->a_mode & VWRITE && (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)) { 697 switch (ap->a_vp->v_type) { 698 case VREG: 699 case VDIR: 700 case VLNK: 701 return (EROFS); 702 default: 703 break; 704 } 705 } 706 if ((vp = un->un_uppervp) != NULLVP) { 707 FIXUP(un, p); 708 ap->a_vp = vp; 709 return (VCALL(vp, VOFFSET(vop_access), ap)); 710 } 711 712 if ((vp = un->un_lowervp) != NULLVP) { 713 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 714 savedvp = ap->a_vp; 715 ap->a_vp = vp; 716 error = VCALL(vp, VOFFSET(vop_access), ap); 717 if (error == 0) { 718 struct union_mount *um = MOUNTTOUNIONMOUNT(savedvp->v_mount); 719 720 if (um->um_op == UNMNT_BELOW) { 721 ap->a_cred = um->um_cred; 722 error = VCALL(vp, VOFFSET(vop_access), ap); 723 } 724 } 725 VOP_UNLOCK(vp, 0, p); 726 if (error) 727 return (error); 728 } 729 730 return (error); 731 } 732 733 /* 734 * We handle getattr only to change the fsid and 735 * track object sizes 736 */ 737 static int 738 union_getattr(ap) 739 struct vop_getattr_args /* { 740 struct vnode *a_vp; 741 struct vattr *a_vap; 742 struct ucred *a_cred; 743 struct proc *a_p; 744 } */ *ap; 745 { 746 int error; 747 struct union_node *un = VTOUNION(ap->a_vp); 748 struct vnode *vp = un->un_uppervp; 749 struct proc *p = ap->a_p; 750 struct vattr *vap; 751 struct vattr va; 752 753 754 /* 755 * Some programs walk the filesystem hierarchy by counting 756 * links to directories to avoid stat'ing all the time. 757 * This means the link count on directories needs to be "correct". 758 * The only way to do that is to call getattr on both layers 759 * and fix up the link count. The link count will not necessarily 760 * be accurate but will be large enough to defeat the tree walkers. 761 */ 762 763 vap = ap->a_vap; 764 765 vp = un->un_uppervp; 766 if (vp != NULLVP) { 767 /* 768 * It's not clear whether VOP_GETATTR is to be 769 * called with the vnode locked or not. stat() calls 770 * it with (vp) locked, and fstat calls it with 771 * (vp) unlocked. 772 * In the mean time, compensate here by checking 773 * the union_node's lock flag. 774 */ 775 if (un->un_flags & UN_LOCKED) 776 FIXUP(un, p); 777 778 error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_p); 779 if (error) 780 return (error); 781 union_newsize(ap->a_vp, vap->va_size, VNOVAL); 782 } 783 784 if (vp == NULLVP) { 785 vp = un->un_lowervp; 786 } else if (vp->v_type == VDIR && un->un_lowervp != NULLVP) { 787 vp = un->un_lowervp; 788 vap = &va; 789 } else { 790 vp = NULLVP; 791 } 792 793 if (vp != NULLVP) { 794 error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_p); 795 if (error) 796 return (error); 797 union_newsize(ap->a_vp, VNOVAL, vap->va_size); 798 } 799 800 if ((vap != ap->a_vap) && (vap->va_type == VDIR)) 801 ap->a_vap->va_nlink += vap->va_nlink; 802 803 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 804 return (0); 805 } 806 807 static int 808 union_setattr(ap) 809 struct vop_setattr_args /* { 810 struct vnode *a_vp; 811 struct vattr *a_vap; 812 struct ucred *a_cred; 813 struct proc *a_p; 814 } */ *ap; 815 { 816 struct union_node *un = VTOUNION(ap->a_vp); 817 struct proc *p = ap->a_p; 818 struct vattr *vap = ap->a_vap; 819 int error; 820 821 /* 822 * Disallow write attempts on filesystems mounted read-only. 823 */ 824 if ((ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) && 825 (vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || 826 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || 827 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL)) 828 return (EROFS); 829 830 /* 831 * Handle case of truncating lower object to zero size, 832 * by creating a zero length upper object. This is to 833 * handle the case of open with O_TRUNC and O_CREAT. 834 */ 835 if ((un->un_uppervp == NULLVP) && 836 /* assert(un->un_lowervp != NULLVP) */ 837 (un->un_lowervp->v_type == VREG)) { 838 error = union_copyup(un, (ap->a_vap->va_size != 0), 839 ap->a_cred, ap->a_p); 840 if (error) 841 return (error); 842 } 843 844 /* 845 * Try to set attributes in upper layer, 846 * otherwise return read-only filesystem error. 847 */ 848 if (un->un_uppervp != NULLVP) { 849 FIXUP(un, p); 850 error = VOP_SETATTR(un->un_uppervp, ap->a_vap, 851 ap->a_cred, ap->a_p); 852 if ((error == 0) && (ap->a_vap->va_size != VNOVAL)) 853 union_newsize(ap->a_vp, ap->a_vap->va_size, VNOVAL); 854 } else { 855 error = EROFS; 856 } 857 858 return (error); 859 } 860 861 static int 862 union_read(ap) 863 struct vop_read_args /* { 864 struct vnode *a_vp; 865 struct uio *a_uio; 866 int a_ioflag; 867 struct ucred *a_cred; 868 } */ *ap; 869 { 870 int error; 871 struct proc *p = ap->a_uio->uio_procp; 872 struct vnode *vp = OTHERVP(ap->a_vp); 873 int dolock = (vp == LOWERVP(ap->a_vp)); 874 875 if (dolock) 876 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 877 else 878 FIXUP(VTOUNION(ap->a_vp), p); 879 error = VOP_READ(vp, ap->a_uio, ap->a_ioflag, ap->a_cred); 880 if (dolock) 881 VOP_UNLOCK(vp, 0, p); 882 883 /* 884 * XXX 885 * perhaps the size of the underlying object has changed under 886 * our feet. take advantage of the offset information present 887 * in the uio structure. 888 */ 889 if (error == 0) { 890 struct union_node *un = VTOUNION(ap->a_vp); 891 off_t cur = ap->a_uio->uio_offset; 892 893 if (vp == un->un_uppervp) { 894 if (cur > un->un_uppersz) 895 union_newsize(ap->a_vp, cur, VNOVAL); 896 } else { 897 if (cur > un->un_lowersz) 898 union_newsize(ap->a_vp, VNOVAL, cur); 899 } 900 } 901 902 return (error); 903 } 904 905 static int 906 union_write(ap) 907 struct vop_read_args /* { 908 struct vnode *a_vp; 909 struct uio *a_uio; 910 int a_ioflag; 911 struct ucred *a_cred; 912 } */ *ap; 913 { 914 int error; 915 struct vnode *vp; 916 struct union_node *un = VTOUNION(ap->a_vp); 917 struct proc *p = ap->a_uio->uio_procp; 918 919 vp = UPPERVP(ap->a_vp); 920 if (vp == NULLVP) 921 panic("union: missing upper layer in write"); 922 923 FIXUP(un, p); 924 error = VOP_WRITE(vp, ap->a_uio, ap->a_ioflag, ap->a_cred); 925 926 /* 927 * the size of the underlying object may be changed by the 928 * write. 929 */ 930 if (error == 0) { 931 off_t cur = ap->a_uio->uio_offset; 932 933 if (cur > un->un_uppersz) 934 union_newsize(ap->a_vp, cur, VNOVAL); 935 } 936 937 return (error); 938 } 939 940 static int 941 union_lease(ap) 942 struct vop_lease_args /* { 943 struct vnode *a_vp; 944 struct proc *a_p; 945 struct ucred *a_cred; 946 int a_flag; 947 } */ *ap; 948 { 949 register struct vnode *ovp = OTHERVP(ap->a_vp); 950 951 ap->a_vp = ovp; 952 return (VCALL(ovp, VOFFSET(vop_lease), ap)); 953 } 954 955 static int 956 union_ioctl(ap) 957 struct vop_ioctl_args /* { 958 struct vnode *a_vp; 959 int a_command; 960 caddr_t a_data; 961 int a_fflag; 962 struct ucred *a_cred; 963 struct proc *a_p; 964 } */ *ap; 965 { 966 register struct vnode *ovp = OTHERVP(ap->a_vp); 967 968 ap->a_vp = ovp; 969 return (VCALL(ovp, VOFFSET(vop_ioctl), ap)); 970 } 971 972 static int 973 union_poll(ap) 974 struct vop_poll_args /* { 975 struct vnode *a_vp; 976 int a_events; 977 struct ucred *a_cred; 978 struct proc *a_p; 979 } */ *ap; 980 { 981 register struct vnode *ovp = OTHERVP(ap->a_vp); 982 983 ap->a_vp = ovp; 984 return (VCALL(ovp, VOFFSET(vop_poll), ap)); 985 } 986 987 static int 988 union_revoke(ap) 989 struct vop_revoke_args /* { 990 struct vnode *a_vp; 991 int a_flags; 992 struct proc *a_p; 993 } */ *ap; 994 { 995 struct vnode *vp = ap->a_vp; 996 997 if (UPPERVP(vp)) 998 VOP_REVOKE(UPPERVP(vp), ap->a_flags); 999 if (LOWERVP(vp)) 1000 VOP_REVOKE(LOWERVP(vp), ap->a_flags); 1001 vgone(vp); 1002 return (0); 1003 } 1004 1005 static int 1006 union_mmap(ap) 1007 struct vop_mmap_args /* { 1008 struct vnode *a_vp; 1009 int a_fflags; 1010 struct ucred *a_cred; 1011 struct proc *a_p; 1012 } */ *ap; 1013 { 1014 register struct vnode *ovp = OTHERVP(ap->a_vp); 1015 1016 ap->a_vp = ovp; 1017 return (VCALL(ovp, VOFFSET(vop_mmap), ap)); 1018 } 1019 1020 static int 1021 union_fsync(ap) 1022 struct vop_fsync_args /* { 1023 struct vnode *a_vp; 1024 struct ucred *a_cred; 1025 int a_waitfor; 1026 struct proc *a_p; 1027 } */ *ap; 1028 { 1029 int error = 0; 1030 struct proc *p = ap->a_p; 1031 struct vnode *targetvp = OTHERVP(ap->a_vp); 1032 struct union_node *un; 1033 1034 if (targetvp != NULLVP) { 1035 int dolock = (targetvp == LOWERVP(ap->a_vp)); 1036 1037 un = VTOUNION(ap->a_vp); 1038 if (dolock) 1039 vn_lock(targetvp, LK_EXCLUSIVE | LK_RETRY, p); 1040 else { 1041 un = VTOUNION(ap->a_vp); 1042 if ((un->un_flags & UN_ULOCK) == 0 && 1043 targetvp->v_data != NULL && 1044 ((struct lock *)targetvp->v_data)->lk_lockholder 1045 == curproc->p_pid && 1046 VOP_ISLOCKED(targetvp) != 0) 1047 return 0; /* XXX */ 1048 1049 FIXUP(un, p); 1050 } 1051 1052 error = VOP_FSYNC(targetvp, ap->a_cred, ap->a_waitfor, p); 1053 if (dolock) 1054 VOP_UNLOCK(targetvp, 0, p); 1055 } 1056 1057 return (error); 1058 } 1059 1060 static int 1061 union_remove(ap) 1062 struct vop_remove_args /* { 1063 struct vnode *a_dvp; 1064 struct vnode *a_vp; 1065 struct componentname *a_cnp; 1066 } */ *ap; 1067 { 1068 struct union_node *dun = VTOUNION(ap->a_dvp); 1069 struct union_node *un = VTOUNION(ap->a_vp); 1070 struct componentname *cnp = ap->a_cnp; 1071 struct proc *p = cnp->cn_proc; 1072 int error; 1073 1074 if (dun->un_uppervp == NULLVP) 1075 panic("union remove: null upper vnode"); 1076 1077 if (un->un_uppervp != NULLVP) { 1078 struct vnode *dvp = dun->un_uppervp; 1079 struct vnode *vp = un->un_uppervp; 1080 1081 FIXUP(dun, p); 1082 dun->un_flags |= UN_KLOCK; 1083 VOP_UNLOCK(ap->a_dvp, 0, p); 1084 FIXUP(un, p); 1085 un->un_flags |= UN_KLOCK; 1086 VOP_UNLOCK(ap->a_vp, 0, p); 1087 1088 if (union_dowhiteout(un, cnp->cn_cred, p)) 1089 cnp->cn_flags |= DOWHITEOUT; 1090 error = VOP_REMOVE(dvp, vp, cnp); 1091 #if 0 1092 /* XXX */ 1093 if (!error) 1094 union_removed_upper(un); 1095 #endif 1096 dun->un_flags |= UN_ULOCK; 1097 un->un_flags |= UN_ULOCK; 1098 } else { 1099 FIXUP(dun, p); 1100 error = union_mkwhiteout( 1101 MOUNTTOUNIONMOUNT(UNIONTOV(dun)->v_mount), 1102 dun->un_uppervp, ap->a_cnp, un->un_path); 1103 } 1104 1105 return (error); 1106 } 1107 1108 static int 1109 union_link(ap) 1110 struct vop_link_args /* { 1111 struct vnode *a_tdvp; 1112 struct vnode *a_vp; 1113 struct componentname *a_cnp; 1114 } */ *ap; 1115 { 1116 struct componentname *cnp = ap->a_cnp; 1117 struct proc *p = cnp->cn_proc; 1118 struct union_node *dun = VTOUNION(ap->a_tdvp); 1119 struct vnode *vp; 1120 struct vnode *tdvp; 1121 int error = 0; 1122 1123 1124 if (ap->a_tdvp->v_op != ap->a_vp->v_op) { 1125 vp = ap->a_vp; 1126 } else { 1127 struct union_node *tun = VTOUNION(ap->a_vp); 1128 if (tun->un_uppervp == NULLVP) { 1129 vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY, p); 1130 if (dun->un_uppervp == tun->un_dirvp) { 1131 dun->un_flags &= ~UN_ULOCK; 1132 VOP_UNLOCK(dun->un_uppervp, 0, p); 1133 } 1134 error = union_copyup(tun, 1, cnp->cn_cred, p); 1135 if (dun->un_uppervp == tun->un_dirvp) { 1136 vn_lock(dun->un_uppervp, 1137 LK_EXCLUSIVE | LK_RETRY, p); 1138 dun->un_flags |= UN_ULOCK; 1139 } 1140 VOP_UNLOCK(ap->a_vp, 0, p); 1141 } 1142 vp = tun->un_uppervp; 1143 } 1144 1145 tdvp = dun->un_uppervp; 1146 if (tdvp == NULLVP) 1147 error = EROFS; 1148 1149 if (error) 1150 return (error); 1151 1152 FIXUP(dun, p); 1153 dun->un_flags |= UN_KLOCK; 1154 VOP_UNLOCK(ap->a_tdvp, 0, p); 1155 1156 error = VOP_LINK(tdvp, vp, cnp); 1157 1158 dun->un_flags |= UN_ULOCK; 1159 1160 return (error); 1161 } 1162 1163 static int 1164 union_rename(ap) 1165 struct vop_rename_args /* { 1166 struct vnode *a_fdvp; 1167 struct vnode *a_fvp; 1168 struct componentname *a_fcnp; 1169 struct vnode *a_tdvp; 1170 struct vnode *a_tvp; 1171 struct componentname *a_tcnp; 1172 } */ *ap; 1173 { 1174 int error; 1175 1176 struct vnode *fdvp = ap->a_fdvp; 1177 struct vnode *fvp = ap->a_fvp; 1178 struct vnode *tdvp = ap->a_tdvp; 1179 struct vnode *tvp = ap->a_tvp; 1180 1181 if (fdvp->v_op == union_vnodeop_p) { /* always true */ 1182 struct union_node *un = VTOUNION(fdvp); 1183 if (un->un_uppervp == NULLVP) { 1184 /* 1185 * this should never happen in normal 1186 * operation but might if there was 1187 * a problem creating the top-level shadow 1188 * directory. 1189 */ 1190 error = EXDEV; 1191 goto bad; 1192 } 1193 1194 fdvp = un->un_uppervp; 1195 VREF(fdvp); 1196 vrele(ap->a_fdvp); 1197 } 1198 1199 if (fvp->v_op == union_vnodeop_p) { /* always true */ 1200 struct union_node *un = VTOUNION(fvp); 1201 if (un->un_uppervp == NULLVP) { 1202 /* XXX: should do a copyup */ 1203 error = EXDEV; 1204 goto bad; 1205 } 1206 1207 if (un->un_lowervp != NULLVP) 1208 ap->a_fcnp->cn_flags |= DOWHITEOUT; 1209 1210 fvp = un->un_uppervp; 1211 VREF(fvp); 1212 vrele(ap->a_fvp); 1213 } 1214 1215 if (tdvp->v_op == union_vnodeop_p) { 1216 struct union_node *un = VTOUNION(tdvp); 1217 if (un->un_uppervp == NULLVP) { 1218 /* 1219 * this should never happen in normal 1220 * operation but might if there was 1221 * a problem creating the top-level shadow 1222 * directory. 1223 */ 1224 error = EXDEV; 1225 goto bad; 1226 } 1227 1228 tdvp = un->un_uppervp; 1229 VREF(tdvp); 1230 un->un_flags |= UN_KLOCK; 1231 vput(ap->a_tdvp); 1232 } 1233 1234 if (tvp != NULLVP && tvp->v_op == union_vnodeop_p) { 1235 struct union_node *un = VTOUNION(tvp); 1236 1237 tvp = un->un_uppervp; 1238 if (tvp != NULLVP) { 1239 VREF(tvp); 1240 un->un_flags |= UN_KLOCK; 1241 } 1242 vput(ap->a_tvp); 1243 } 1244 1245 return (VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp)); 1246 1247 bad: 1248 vrele(fdvp); 1249 vrele(fvp); 1250 vput(tdvp); 1251 if (tvp != NULLVP) 1252 vput(tvp); 1253 1254 return (error); 1255 } 1256 1257 static int 1258 union_mkdir(ap) 1259 struct vop_mkdir_args /* { 1260 struct vnode *a_dvp; 1261 struct vnode **a_vpp; 1262 struct componentname *a_cnp; 1263 struct vattr *a_vap; 1264 } */ *ap; 1265 { 1266 struct union_node *dun = VTOUNION(ap->a_dvp); 1267 struct vnode *dvp = dun->un_uppervp; 1268 struct componentname *cnp = ap->a_cnp; 1269 struct proc *p = cnp->cn_proc; 1270 1271 if (dvp != NULLVP) { 1272 struct vnode *vp; 1273 int error; 1274 1275 FIXUP(dun, p); 1276 dun->un_flags |= UN_KLOCK; 1277 VOP_UNLOCK(ap->a_dvp, 0, p); 1278 error = VOP_MKDIR(dvp, &vp, cnp, ap->a_vap); 1279 if (error) { 1280 dun->un_flags |= UN_ULOCK; 1281 return (error); 1282 } 1283 1284 VOP_UNLOCK(dvp, 0, p); 1285 error = union_allocvp(ap->a_vpp, ap->a_dvp->v_mount, ap->a_dvp, 1286 NULLVP, cnp, vp, NULLVP, 1); 1287 if (error) 1288 vput(vp); 1289 vn_lock(ap->a_dvp, LK_EXCLUSIVE| LK_RETRY, p); 1290 1291 return (error); 1292 } 1293 1294 return (EROFS); 1295 } 1296 1297 static int 1298 union_rmdir(ap) 1299 struct vop_rmdir_args /* { 1300 struct vnode *a_dvp; 1301 struct vnode *a_vp; 1302 struct componentname *a_cnp; 1303 } */ *ap; 1304 { 1305 struct union_node *dun = VTOUNION(ap->a_dvp); 1306 struct union_node *un = VTOUNION(ap->a_vp); 1307 struct componentname *cnp = ap->a_cnp; 1308 struct proc *p = cnp->cn_proc; 1309 int error; 1310 1311 if (dun->un_uppervp == NULLVP) 1312 panic("union rmdir: null upper vnode"); 1313 1314 if (un->un_uppervp != NULLVP) { 1315 struct vnode *dvp = dun->un_uppervp; 1316 struct vnode *vp = un->un_uppervp; 1317 1318 FIXUP(dun, p); 1319 dun->un_flags |= UN_KLOCK; 1320 VOP_UNLOCK(ap->a_dvp, 0, p); 1321 FIXUP(un, p); 1322 un->un_flags |= UN_KLOCK; 1323 VOP_UNLOCK(ap->a_vp, 0, p); 1324 1325 if (union_dowhiteout(un, cnp->cn_cred, p)) 1326 cnp->cn_flags |= DOWHITEOUT; 1327 error = VOP_RMDIR(dvp, vp, ap->a_cnp); 1328 #if 0 1329 /* XXX */ 1330 if (!error) 1331 union_removed_upper(un); 1332 #endif 1333 dun->un_flags |= UN_ULOCK; 1334 un->un_flags |= UN_ULOCK; 1335 } else { 1336 FIXUP(dun, p); 1337 error = union_mkwhiteout( 1338 MOUNTTOUNIONMOUNT(UNIONTOV(dun)->v_mount), 1339 dun->un_uppervp, ap->a_cnp, un->un_path); 1340 } 1341 1342 return (error); 1343 } 1344 1345 static int 1346 union_symlink(ap) 1347 struct vop_symlink_args /* { 1348 struct vnode *a_dvp; 1349 struct vnode **a_vpp; 1350 struct componentname *a_cnp; 1351 struct vattr *a_vap; 1352 char *a_target; 1353 } */ *ap; 1354 { 1355 struct union_node *dun = VTOUNION(ap->a_dvp); 1356 struct vnode *dvp = dun->un_uppervp; 1357 struct componentname *cnp = ap->a_cnp; 1358 struct proc *p = cnp->cn_proc; 1359 1360 if (dvp != NULLVP) { 1361 struct vnode *vp; 1362 int error; 1363 1364 FIXUP(dun, p); 1365 dun->un_flags |= UN_KLOCK; 1366 VOP_UNLOCK(ap->a_dvp, 0, p); 1367 error = VOP_SYMLINK(dvp, &vp, cnp, ap->a_vap, ap->a_target); 1368 dun->un_flags |= UN_ULOCK; 1369 *ap->a_vpp = NULLVP; 1370 return (error); 1371 } 1372 1373 return (EROFS); 1374 } 1375 1376 /* 1377 * union_readdir works in concert with getdirentries and 1378 * readdir(3) to provide a list of entries in the unioned 1379 * directories. getdirentries is responsible for walking 1380 * down the union stack. readdir(3) is responsible for 1381 * eliminating duplicate names from the returned data stream. 1382 */ 1383 static int 1384 union_readdir(ap) 1385 struct vop_readdir_args /* { 1386 struct vnode *a_vp; 1387 struct uio *a_uio; 1388 struct ucred *a_cred; 1389 int *a_eofflag; 1390 u_long *a_cookies; 1391 int a_ncookies; 1392 } */ *ap; 1393 { 1394 struct union_node *un = VTOUNION(ap->a_vp); 1395 struct vnode *uvp = un->un_uppervp; 1396 struct proc *p = ap->a_uio->uio_procp; 1397 1398 if (uvp == NULLVP) 1399 return (0); 1400 1401 FIXUP(un, p); 1402 ap->a_vp = uvp; 1403 return (VCALL(uvp, VOFFSET(vop_readdir), ap)); 1404 } 1405 1406 static int 1407 union_readlink(ap) 1408 struct vop_readlink_args /* { 1409 struct vnode *a_vp; 1410 struct uio *a_uio; 1411 struct ucred *a_cred; 1412 } */ *ap; 1413 { 1414 int error; 1415 struct uio *uio = ap->a_uio; 1416 struct proc *p = uio->uio_procp; 1417 struct vnode *vp = OTHERVP(ap->a_vp); 1418 int dolock = (vp == LOWERVP(ap->a_vp)); 1419 1420 if (dolock) 1421 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 1422 else 1423 FIXUP(VTOUNION(ap->a_vp), p); 1424 ap->a_vp = vp; 1425 error = VCALL(vp, VOFFSET(vop_readlink), ap); 1426 if (dolock) 1427 VOP_UNLOCK(vp, 0, p); 1428 1429 return (error); 1430 } 1431 1432 static int 1433 union_abortop(ap) 1434 struct vop_abortop_args /* { 1435 struct vnode *a_dvp; 1436 struct componentname *a_cnp; 1437 } */ *ap; 1438 { 1439 int error; 1440 struct componentname *cnp = ap->a_cnp; 1441 struct proc *p = cnp->cn_proc; 1442 struct vnode *vp = OTHERVP(ap->a_dvp); 1443 struct union_node *un = VTOUNION(ap->a_dvp); 1444 int islocked = un->un_flags & UN_LOCKED; 1445 int dolock = (vp == LOWERVP(ap->a_dvp)); 1446 1447 if (islocked) { 1448 if (dolock) 1449 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 1450 else 1451 FIXUP(VTOUNION(ap->a_dvp), p); 1452 } 1453 ap->a_dvp = vp; 1454 error = VCALL(vp, VOFFSET(vop_abortop), ap); 1455 if (islocked && dolock) 1456 VOP_UNLOCK(vp, 0, p); 1457 1458 return (error); 1459 } 1460 1461 static int 1462 union_inactive(ap) 1463 struct vop_inactive_args /* { 1464 struct vnode *a_vp; 1465 struct proc *a_p; 1466 } */ *ap; 1467 { 1468 struct vnode *vp = ap->a_vp; 1469 struct proc *p = ap->a_p; 1470 struct union_node *un = VTOUNION(vp); 1471 struct vnode **vpp; 1472 1473 /* 1474 * Do nothing (and _don't_ bypass). 1475 * Wait to vrele lowervp until reclaim, 1476 * so that until then our union_node is in the 1477 * cache and reusable. 1478 * 1479 * NEEDSWORK: Someday, consider inactive'ing 1480 * the lowervp and then trying to reactivate it 1481 * with capabilities (v_id) 1482 * like they do in the name lookup cache code. 1483 * That's too much work for now. 1484 */ 1485 1486 if (un->un_dircache != 0) { 1487 for (vpp = un->un_dircache; *vpp != NULLVP; vpp++) 1488 vrele(*vpp); 1489 free(un->un_dircache, M_TEMP); 1490 un->un_dircache = 0; 1491 } 1492 1493 VOP_UNLOCK(vp, 0, p); 1494 1495 if ((un->un_flags & UN_CACHED) == 0) 1496 vgone(vp); 1497 1498 return (0); 1499 } 1500 1501 static int 1502 union_reclaim(ap) 1503 struct vop_reclaim_args /* { 1504 struct vnode *a_vp; 1505 } */ *ap; 1506 { 1507 1508 union_freevp(ap->a_vp); 1509 1510 return (0); 1511 } 1512 1513 static int 1514 union_lock(ap) 1515 struct vop_lock_args *ap; 1516 { 1517 struct vnode *vp = ap->a_vp; 1518 struct proc *p = ap->a_p; 1519 int flags = ap->a_flags; 1520 struct union_node *un; 1521 int error; 1522 1523 vop_nolock(ap); 1524 /* 1525 * Need to do real lockmgr-style locking here. 1526 * in the mean time, draining won't work quite right, 1527 * which could lead to a few race conditions. 1528 * the following test was here, but is not quite right, we 1529 * still need to take the lock: 1530 if ((flags & LK_TYPE_MASK) == LK_DRAIN) 1531 return (0); 1532 */ 1533 flags &= ~LK_INTERLOCK; 1534 1535 start: 1536 un = VTOUNION(vp); 1537 1538 if (un->un_uppervp != NULLVP) { 1539 if (((un->un_flags & UN_ULOCK) == 0) && 1540 (vp->v_usecount != 0)) { 1541 error = vn_lock(un->un_uppervp, flags, p); 1542 if (error) 1543 return (error); 1544 un->un_flags |= UN_ULOCK; 1545 } 1546 #ifdef DIAGNOSTIC 1547 if (un->un_flags & UN_KLOCK) { 1548 vprint("dangling upper lock", vp); 1549 panic("union: dangling upper lock"); 1550 } 1551 #endif 1552 } 1553 1554 if (un->un_flags & UN_LOCKED) { 1555 #ifdef DIAGNOSTIC 1556 if (curproc && un->un_pid == curproc->p_pid && 1557 un->un_pid > -1 && curproc->p_pid > -1) 1558 panic("union: locking against myself"); 1559 #endif 1560 un->un_flags |= UN_WANT; 1561 tsleep((caddr_t)&un->un_flags, PINOD, "unionlk2", 0); 1562 goto start; 1563 } 1564 1565 #ifdef DIAGNOSTIC 1566 if (curproc) 1567 un->un_pid = curproc->p_pid; 1568 else 1569 un->un_pid = -1; 1570 #endif 1571 1572 un->un_flags |= UN_LOCKED; 1573 return (0); 1574 } 1575 1576 /* 1577 * When operations want to vput() a union node yet retain a lock on 1578 * the upper vnode (say, to do some further operations like link(), 1579 * mkdir(), ...), they set UN_KLOCK on the union node, then call 1580 * vput() which calls VOP_UNLOCK() and comes here. union_unlock() 1581 * unlocks the union node (leaving the upper vnode alone), clears the 1582 * KLOCK flag, and then returns to vput(). The caller then does whatever 1583 * is left to do with the upper vnode, and ensures that it gets unlocked. 1584 * 1585 * If UN_KLOCK isn't set, then the upper vnode is unlocked here. 1586 */ 1587 static int 1588 union_unlock(ap) 1589 struct vop_unlock_args /* { 1590 struct vnode *a_vp; 1591 int a_flags; 1592 struct proc *a_p; 1593 } */ *ap; 1594 { 1595 struct union_node *un = VTOUNION(ap->a_vp); 1596 struct proc *p = ap->a_p; 1597 1598 #ifdef DIAGNOSTIC 1599 if ((un->un_flags & UN_LOCKED) == 0) 1600 panic("union: unlock unlocked node"); 1601 if (curproc && un->un_pid != curproc->p_pid && 1602 curproc->p_pid > -1 && un->un_pid > -1) 1603 panic("union: unlocking other process's union node"); 1604 #endif 1605 1606 un->un_flags &= ~UN_LOCKED; 1607 1608 if ((un->un_flags & (UN_ULOCK|UN_KLOCK)) == UN_ULOCK) 1609 VOP_UNLOCK(un->un_uppervp, 0, p); 1610 1611 un->un_flags &= ~(UN_ULOCK|UN_KLOCK); 1612 1613 if (un->un_flags & UN_WANT) { 1614 un->un_flags &= ~UN_WANT; 1615 wakeup((caddr_t) &un->un_flags); 1616 } 1617 1618 #ifdef DIAGNOSTIC 1619 un->un_pid = 0; 1620 #endif 1621 vop_nounlock(ap); 1622 1623 return (0); 1624 } 1625 1626 static int 1627 union_bmap(ap) 1628 struct vop_bmap_args /* { 1629 struct vnode *a_vp; 1630 daddr_t a_bn; 1631 struct vnode **a_vpp; 1632 daddr_t *a_bnp; 1633 int *a_runp; 1634 int *a_runb; 1635 } */ *ap; 1636 { 1637 int error; 1638 struct proc *p = curproc; /* XXX */ 1639 struct vnode *vp = OTHERVP(ap->a_vp); 1640 int dolock = (vp == LOWERVP(ap->a_vp)); 1641 1642 if (dolock) 1643 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 1644 else 1645 FIXUP(VTOUNION(ap->a_vp), p); 1646 ap->a_vp = vp; 1647 error = VCALL(vp, VOFFSET(vop_bmap), ap); 1648 if (dolock) 1649 VOP_UNLOCK(vp, 0, p); 1650 1651 return (error); 1652 } 1653 1654 static int 1655 union_print(ap) 1656 struct vop_print_args /* { 1657 struct vnode *a_vp; 1658 } */ *ap; 1659 { 1660 struct vnode *vp = ap->a_vp; 1661 1662 printf("\ttag VT_UNION, vp=%p, uppervp=%p, lowervp=%p\n", 1663 vp, UPPERVP(vp), LOWERVP(vp)); 1664 if (UPPERVP(vp) != NULLVP) 1665 vprint("union: upper", UPPERVP(vp)); 1666 if (LOWERVP(vp) != NULLVP) 1667 vprint("union: lower", LOWERVP(vp)); 1668 1669 return (0); 1670 } 1671 1672 static int 1673 union_islocked(ap) 1674 struct vop_islocked_args /* { 1675 struct vnode *a_vp; 1676 } */ *ap; 1677 { 1678 1679 return ((VTOUNION(ap->a_vp)->un_flags & UN_LOCKED) ? 1 : 0); 1680 } 1681 1682 static int 1683 union_pathconf(ap) 1684 struct vop_pathconf_args /* { 1685 struct vnode *a_vp; 1686 int a_name; 1687 int *a_retval; 1688 } */ *ap; 1689 { 1690 int error; 1691 struct proc *p = curproc; /* XXX */ 1692 struct vnode *vp = OTHERVP(ap->a_vp); 1693 int dolock = (vp == LOWERVP(ap->a_vp)); 1694 1695 if (dolock) 1696 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 1697 else 1698 FIXUP(VTOUNION(ap->a_vp), p); 1699 ap->a_vp = vp; 1700 error = VCALL(vp, VOFFSET(vop_pathconf), ap); 1701 if (dolock) 1702 VOP_UNLOCK(vp, 0, p); 1703 1704 return (error); 1705 } 1706 1707 static int 1708 union_advlock(ap) 1709 struct vop_advlock_args /* { 1710 struct vnode *a_vp; 1711 caddr_t a_id; 1712 int a_op; 1713 struct flock *a_fl; 1714 int a_flags; 1715 } */ *ap; 1716 { 1717 register struct vnode *ovp = OTHERVP(ap->a_vp); 1718 1719 ap->a_vp = ovp; 1720 return (VCALL(ovp, VOFFSET(vop_advlock), ap)); 1721 } 1722 1723 1724 /* 1725 * XXX - vop_strategy must be hand coded because it has no 1726 * vnode in its arguments. 1727 * This goes away with a merged VM/buffer cache. 1728 */ 1729 static int 1730 union_strategy(ap) 1731 struct vop_strategy_args /* { 1732 struct vnode *a_vp; 1733 struct buf *a_bp; 1734 } */ *ap; 1735 { 1736 struct buf *bp = ap->a_bp; 1737 struct vnode *othervp = OTHERVP(bp->b_vp); 1738 1739 #ifdef DIAGNOSTIC 1740 if (othervp == NULLVP) 1741 panic("union_strategy: nil vp"); 1742 if (((bp->b_flags & B_READ) == 0) && 1743 (othervp == LOWERVP(bp->b_vp))) 1744 panic("union_strategy: writing to lowervp"); 1745 #endif 1746 1747 return (VOP_STRATEGY(othervp, bp)); 1748 } 1749 1750 /* 1751 * Global vfs data structures 1752 */ 1753 vop_t **union_vnodeop_p; 1754 static struct vnodeopv_entry_desc union_vnodeop_entries[] = { 1755 { &vop_default_desc, (vop_t *) vop_defaultop }, 1756 { &vop_abortop_desc, (vop_t *) union_abortop }, 1757 { &vop_access_desc, (vop_t *) union_access }, 1758 { &vop_advlock_desc, (vop_t *) union_advlock }, 1759 { &vop_bmap_desc, (vop_t *) union_bmap }, 1760 { &vop_close_desc, (vop_t *) union_close }, 1761 { &vop_create_desc, (vop_t *) union_create }, 1762 { &vop_fsync_desc, (vop_t *) union_fsync }, 1763 { &vop_getattr_desc, (vop_t *) union_getattr }, 1764 { &vop_inactive_desc, (vop_t *) union_inactive }, 1765 { &vop_ioctl_desc, (vop_t *) union_ioctl }, 1766 { &vop_islocked_desc, (vop_t *) union_islocked }, 1767 { &vop_lease_desc, (vop_t *) union_lease }, 1768 { &vop_link_desc, (vop_t *) union_link }, 1769 { &vop_lock_desc, (vop_t *) union_lock }, 1770 { &vop_lookup_desc, (vop_t *) union_lookup }, 1771 { &vop_mkdir_desc, (vop_t *) union_mkdir }, 1772 { &vop_mknod_desc, (vop_t *) union_mknod }, 1773 { &vop_mmap_desc, (vop_t *) union_mmap }, 1774 { &vop_open_desc, (vop_t *) union_open }, 1775 { &vop_pathconf_desc, (vop_t *) union_pathconf }, 1776 { &vop_poll_desc, (vop_t *) union_poll }, 1777 { &vop_print_desc, (vop_t *) union_print }, 1778 { &vop_read_desc, (vop_t *) union_read }, 1779 { &vop_readdir_desc, (vop_t *) union_readdir }, 1780 { &vop_readlink_desc, (vop_t *) union_readlink }, 1781 { &vop_reclaim_desc, (vop_t *) union_reclaim }, 1782 { &vop_remove_desc, (vop_t *) union_remove }, 1783 { &vop_rename_desc, (vop_t *) union_rename }, 1784 { &vop_revoke_desc, (vop_t *) union_revoke }, 1785 { &vop_rmdir_desc, (vop_t *) union_rmdir }, 1786 { &vop_setattr_desc, (vop_t *) union_setattr }, 1787 { &vop_strategy_desc, (vop_t *) union_strategy }, 1788 { &vop_symlink_desc, (vop_t *) union_symlink }, 1789 { &vop_unlock_desc, (vop_t *) union_unlock }, 1790 { &vop_whiteout_desc, (vop_t *) union_whiteout }, 1791 { &vop_write_desc, (vop_t *) union_write }, 1792 { NULL, NULL } 1793 }; 1794 static struct vnodeopv_desc union_vnodeop_opv_desc = 1795 { &union_vnodeop_p, union_vnodeop_entries }; 1796 1797 VNODEOP_SET(union_vnodeop_opv_desc); 1798