1 /* 2 * Copyright (c) 1994 Jan-Simon Pendry 3 * Copyright (c) 1994 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * Jan-Simon Pendry. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)union_subr.c 8.20 (Berkeley) 5/20/95 38 * $FreeBSD$ 39 */ 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/kernel.h> 44 #include <sys/vnode.h> 45 #include <sys/namei.h> 46 #include <sys/malloc.h> 47 #include <sys/fcntl.h> 48 #include <sys/file.h> 49 #include <sys/filedesc.h> 50 #include <sys/module.h> 51 #include <sys/mount.h> 52 #include <sys/stat.h> 53 #include <vm/vm.h> 54 #include <vm/vm_extern.h> /* for vnode_pager_setsize */ 55 #include <vm/vm_zone.h> 56 #include <vm/vm_object.h> /* for vm cache coherency */ 57 #include <miscfs/union/union.h> 58 59 #include <sys/proc.h> 60 61 extern int union_init __P((void)); 62 63 /* must be power of two, otherwise change UNION_HASH() */ 64 #define NHASH 32 65 66 /* unsigned int ... */ 67 #define UNION_HASH(u, l) \ 68 (((((uintptr_t) (u)) + ((uintptr_t) l)) >> 8) & (NHASH-1)) 69 70 static LIST_HEAD(unhead, union_node) unhead[NHASH]; 71 static int unvplock[NHASH]; 72 73 static void union_dircache_r __P((struct vnode *vp, struct vnode ***vppp, 74 int *cntp)); 75 static int union_list_lock __P((int ix)); 76 static void union_list_unlock __P((int ix)); 77 static int union_relookup __P((struct union_mount *um, struct vnode *dvp, 78 struct vnode **vpp, 79 struct componentname *cnp, 80 struct componentname *cn, char *path, 81 int pathlen)); 82 static void union_updatevp __P((struct union_node *un, 83 struct vnode *uppervp, 84 struct vnode *lowervp)); 85 static void union_newlower __P((struct union_node *, struct vnode *)); 86 static void union_newupper __P((struct union_node *, struct vnode *)); 87 static int union_copyfile __P((struct vnode *, struct vnode *, 88 struct ucred *, struct proc *)); 89 static int union_vn_create __P((struct vnode **, struct union_node *, 90 struct proc *)); 91 static int union_vn_close __P((struct vnode *, int, struct ucred *, 92 struct proc *)); 93 94 int 95 union_init() 96 { 97 int i; 98 99 for (i = 0; i < NHASH; i++) 100 LIST_INIT(&unhead[i]); 101 bzero((caddr_t)unvplock, sizeof(unvplock)); 102 return (0); 103 } 104 105 static int 106 union_list_lock(ix) 107 int ix; 108 { 109 if (unvplock[ix] & UNVP_LOCKED) { 110 unvplock[ix] |= UNVP_WANT; 111 (void) tsleep((caddr_t) &unvplock[ix], PINOD, "unllck", 0); 112 return (1); 113 } 114 unvplock[ix] |= UNVP_LOCKED; 115 return (0); 116 } 117 118 static void 119 union_list_unlock(ix) 120 int ix; 121 { 122 unvplock[ix] &= ~UNVP_LOCKED; 123 124 if (unvplock[ix] & UNVP_WANT) { 125 unvplock[ix] &= ~UNVP_WANT; 126 wakeup((caddr_t) &unvplock[ix]); 127 } 128 } 129 130 /* 131 * union_updatevp: 132 * 133 * The uppervp, if not NULL, must be referenced and not locked by us 134 * The lowervp, if not NULL, must be referenced. 135 * 136 * if uppervp and lowervp match pointers already installed, nothing 137 * happens. The passed vp's (when matching) are not adjusted. This 138 * routine may only be called by union_newupper() and union_newlower(). 139 */ 140 141 static void 142 union_updatevp(un, uppervp, lowervp) 143 struct union_node *un; 144 struct vnode *uppervp; 145 struct vnode *lowervp; 146 { 147 int ohash = UNION_HASH(un->un_uppervp, un->un_lowervp); 148 int nhash = UNION_HASH(uppervp, lowervp); 149 int docache = (lowervp != NULLVP || uppervp != NULLVP); 150 int lhash, uhash; 151 152 /* 153 * Ensure locking is ordered from lower to higher 154 * to avoid deadlocks. 155 */ 156 if (nhash < ohash) { 157 lhash = nhash; 158 uhash = ohash; 159 } else { 160 lhash = ohash; 161 uhash = nhash; 162 } 163 164 if (lhash != uhash) { 165 while (union_list_lock(lhash)) 166 continue; 167 } 168 169 while (union_list_lock(uhash)) 170 continue; 171 172 if (ohash != nhash || !docache) { 173 if (un->un_flags & UN_CACHED) { 174 un->un_flags &= ~UN_CACHED; 175 LIST_REMOVE(un, un_cache); 176 } 177 } 178 179 if (ohash != nhash) 180 union_list_unlock(ohash); 181 182 if (un->un_lowervp != lowervp) { 183 if (un->un_lowervp) { 184 vrele(un->un_lowervp); 185 if (un->un_path) { 186 free(un->un_path, M_TEMP); 187 un->un_path = 0; 188 } 189 } 190 un->un_lowervp = lowervp; 191 un->un_lowersz = VNOVAL; 192 } 193 194 if (un->un_uppervp != uppervp) { 195 if (un->un_uppervp) 196 vrele(un->un_uppervp); 197 un->un_uppervp = uppervp; 198 un->un_uppersz = VNOVAL; 199 } 200 201 if (docache && (ohash != nhash)) { 202 LIST_INSERT_HEAD(&unhead[nhash], un, un_cache); 203 un->un_flags |= UN_CACHED; 204 } 205 206 union_list_unlock(nhash); 207 } 208 209 /* 210 * Set a new lowervp. The passed lowervp must be referenced and will be 211 * stored in the vp in a referenced state. 212 */ 213 214 static void 215 union_newlower(un, lowervp) 216 struct union_node *un; 217 struct vnode *lowervp; 218 { 219 union_updatevp(un, un->un_uppervp, lowervp); 220 } 221 222 /* 223 * Set a new uppervp. The passed uppervp must be locked and will be 224 * stored in the vp in a locked state. The caller should not unlock 225 * uppervp. 226 */ 227 228 static void 229 union_newupper(un, uppervp) 230 struct union_node *un; 231 struct vnode *uppervp; 232 { 233 union_updatevp(un, uppervp, un->un_lowervp); 234 } 235 236 /* 237 * Keep track of size changes in the underlying vnodes. 238 * If the size changes, then callback to the vm layer 239 * giving priority to the upper layer size. 240 */ 241 void 242 union_newsize(vp, uppersz, lowersz) 243 struct vnode *vp; 244 off_t uppersz, lowersz; 245 { 246 struct union_node *un; 247 off_t sz; 248 249 /* only interested in regular files */ 250 if (vp->v_type != VREG) 251 return; 252 253 un = VTOUNION(vp); 254 sz = VNOVAL; 255 256 if ((uppersz != VNOVAL) && (un->un_uppersz != uppersz)) { 257 un->un_uppersz = uppersz; 258 if (sz == VNOVAL) 259 sz = un->un_uppersz; 260 } 261 262 if ((lowersz != VNOVAL) && (un->un_lowersz != lowersz)) { 263 un->un_lowersz = lowersz; 264 if (sz == VNOVAL) 265 sz = un->un_lowersz; 266 } 267 268 if (sz != VNOVAL) { 269 UDEBUG(("union: %s size now %ld\n", 270 (uppersz != VNOVAL ? "upper" : "lower"), (long)sz)); 271 vnode_pager_setsize(vp, sz); 272 } 273 } 274 275 /* 276 * union_allocvp: allocate a union_node and associate it with a 277 * parent union_node and one or two vnodes. 278 * 279 * vpp Holds the returned vnode locked and referenced if no 280 * error occurs. 281 * 282 * mp Holds the mount point. mp may or may not be busied. 283 * allocvp makes no changes to mp. 284 * 285 * dvp Holds the parent union_node to the one we wish to create. 286 * XXX may only be used to traverse an uncopied lowervp-based 287 * tree? XXX 288 * 289 * dvp may or may not be locked. allocvp makes no changes 290 * to dvp. 291 * 292 * upperdvp Holds the parent vnode to uppervp, generally used along 293 * with path component information to create a shadow of 294 * lowervp when uppervp does not exist. 295 * 296 * upperdvp is referenced but unlocked on entry, and will be 297 * dereferenced on return. 298 * 299 * uppervp Holds the new uppervp vnode to be stored in the 300 * union_node we are allocating. uppervp is referenced but 301 * not locked, and will be dereferenced on return. 302 * 303 * lowervp Holds the new lowervp vnode to be stored in the 304 * union_node we are allocating. uppervp is referenced but 305 * not locked, and will be dereferenced on return. 306 * 307 * cnp Holds path component information to be coupled with 308 * lowervp and upperdvp to allow unionfs to create an uppervp 309 * later on. Only used if lowervp is valid. The conents 310 * of cnp is only valid for the duration of the call. 311 * 312 * docache Determine whether this node should be entered in the 313 * cache or whether it should be destroyed as soon as possible. 314 * 315 * all union_nodes are maintained on a singly-linked 316 * list. new nodes are only allocated when they cannot 317 * be found on this list. entries on the list are 318 * removed when the vfs reclaim entry is called. 319 * 320 * a single lock is kept for the entire list. this is 321 * needed because the getnewvnode() function can block 322 * waiting for a vnode to become free, in which case there 323 * may be more than one process trying to get the same 324 * vnode. this lock is only taken if we are going to 325 * call getnewvnode, since the kernel itself is single-threaded. 326 * 327 * if an entry is found on the list, then call vget() to 328 * take a reference. this is done because there may be 329 * zero references to it and so it needs to removed from 330 * the vnode free list. 331 */ 332 333 int 334 union_allocvp(vpp, mp, dvp, upperdvp, cnp, uppervp, lowervp, docache) 335 struct vnode **vpp; 336 struct mount *mp; 337 struct vnode *dvp; /* parent union vnode */ 338 struct vnode *upperdvp; /* parent vnode of uppervp */ 339 struct componentname *cnp; /* may be null */ 340 struct vnode *uppervp; /* may be null */ 341 struct vnode *lowervp; /* may be null */ 342 int docache; 343 { 344 int error; 345 struct union_node *un = 0; 346 struct vnode *xlowervp = NULLVP; 347 struct union_mount *um = MOUNTTOUNIONMOUNT(mp); 348 struct proc *p = (cnp) ? cnp->cn_proc : curproc; 349 int hash = 0; 350 int vflag; 351 int try; 352 353 if (uppervp == NULLVP && lowervp == NULLVP) 354 panic("union: unidentifiable allocation"); 355 356 if (uppervp && lowervp && (uppervp->v_type != lowervp->v_type)) { 357 xlowervp = lowervp; 358 lowervp = NULLVP; 359 } 360 361 /* detect the root vnode (and aliases) */ 362 vflag = 0; 363 if ((uppervp == um->um_uppervp) && 364 ((lowervp == NULLVP) || lowervp == um->um_lowervp)) { 365 if (lowervp == NULLVP) { 366 lowervp = um->um_lowervp; 367 if (lowervp != NULLVP) 368 VREF(lowervp); 369 } 370 vflag = VROOT; 371 } 372 373 loop: 374 if (!docache) { 375 un = 0; 376 } else for (try = 0; try < 3; try++) { 377 switch (try) { 378 case 0: 379 if (lowervp == NULLVP) 380 continue; 381 hash = UNION_HASH(uppervp, lowervp); 382 break; 383 384 case 1: 385 if (uppervp == NULLVP) 386 continue; 387 hash = UNION_HASH(uppervp, NULLVP); 388 break; 389 390 case 2: 391 if (lowervp == NULLVP) 392 continue; 393 hash = UNION_HASH(NULLVP, lowervp); 394 break; 395 } 396 397 while (union_list_lock(hash)) 398 continue; 399 400 for (un = unhead[hash].lh_first; un != 0; 401 un = un->un_cache.le_next) { 402 if ((un->un_lowervp == lowervp || 403 un->un_lowervp == NULLVP) && 404 (un->un_uppervp == uppervp || 405 un->un_uppervp == NULLVP) && 406 (UNIONTOV(un)->v_mount == mp)) { 407 if (vget(UNIONTOV(un), 0, 408 cnp ? cnp->cn_proc : NULL)) { 409 union_list_unlock(hash); 410 goto loop; 411 } 412 break; 413 } 414 } 415 416 union_list_unlock(hash); 417 418 if (un) 419 break; 420 } 421 422 if (un) { 423 /* 424 * Obtain a lock on the union_node. Everything is unlocked 425 * except for dvp, so check that case. If they match, our 426 * new un is already locked. Otherwise we have to lock our 427 * new un. 428 * 429 * A potential deadlock situation occurs when we are holding 430 * one lock while trying to get another. We must follow 431 * strict ordering rules to avoid it. We try to locate dvp 432 * by scanning up from un_vnode, since the most likely 433 * scenario is un being under dvp. 434 */ 435 436 if (dvp && un->un_vnode != dvp) { 437 struct vnode *scan = un->un_vnode; 438 439 do { 440 scan = VTOUNION(scan)->un_pvp; 441 } while (scan && scan->v_tag == VT_UNION && scan != dvp); 442 if (scan != dvp) { 443 /* 444 * our new un is above dvp (we never saw dvp 445 * while moving up the tree). 446 */ 447 VREF(dvp); 448 VOP_UNLOCK(dvp, 0, p); 449 error = vn_lock(un->un_vnode, LK_EXCLUSIVE, p); 450 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p); 451 vrele(dvp); 452 } else { 453 /* 454 * our new un is under dvp 455 */ 456 error = vn_lock(un->un_vnode, LK_EXCLUSIVE, p); 457 } 458 } else if (dvp == NULLVP) { 459 /* 460 * dvp is NULL, we need to lock un. 461 */ 462 error = vn_lock(un->un_vnode, LK_EXCLUSIVE, p); 463 } else { 464 /* 465 * dvp == un->un_vnode, we are already locked. 466 */ 467 error = 0; 468 } 469 470 if (error) 471 goto loop; 472 473 /* 474 * At this point, the union_node is locked and referenced. 475 * 476 * uppervp is locked and referenced or NULL, lowervp is 477 * referenced or NULL. 478 */ 479 UDEBUG(("Modify existing un %p vn %p upper %p(refs %d) -> %p(refs %d)\n", 480 un, un->un_vnode, un->un_uppervp, 481 (un->un_uppervp ? un->un_uppervp->v_usecount : -99), 482 uppervp, 483 (uppervp ? uppervp->v_usecount : -99) 484 )); 485 486 if (uppervp != un->un_uppervp) { 487 KASSERT(uppervp == NULL || uppervp->v_usecount > 0, ("union_allocvp: too few refs %d (at least 1 required) on uppervp", uppervp->v_usecount)); 488 union_newupper(un, uppervp); 489 } else if (uppervp) { 490 KASSERT(uppervp->v_usecount > 1, ("union_allocvp: too few refs %d (at least 2 required) on uppervp", uppervp->v_usecount)); 491 vrele(uppervp); 492 } 493 494 /* 495 * Save information about the lower layer. 496 * This needs to keep track of pathname 497 * and directory information which union_vn_create 498 * might need. 499 */ 500 if (lowervp != un->un_lowervp) { 501 union_newlower(un, lowervp); 502 if (cnp && (lowervp != NULLVP)) { 503 un->un_path = malloc(cnp->cn_namelen+1, 504 M_TEMP, M_WAITOK); 505 bcopy(cnp->cn_nameptr, un->un_path, 506 cnp->cn_namelen); 507 un->un_path[cnp->cn_namelen] = '\0'; 508 } 509 } else if (lowervp) { 510 vrele(lowervp); 511 } 512 513 /* 514 * and upperdvp 515 */ 516 if (upperdvp != un->un_dirvp) { 517 if (un->un_dirvp) 518 vrele(un->un_dirvp); 519 un->un_dirvp = upperdvp; 520 } else if (upperdvp) { 521 vrele(upperdvp); 522 } 523 524 *vpp = UNIONTOV(un); 525 return (0); 526 } 527 528 if (docache) { 529 /* 530 * otherwise lock the vp list while we call getnewvnode 531 * since that can block. 532 */ 533 hash = UNION_HASH(uppervp, lowervp); 534 535 if (union_list_lock(hash)) 536 goto loop; 537 } 538 539 /* 540 * Create new node rather then replace old node 541 */ 542 543 error = getnewvnode(VT_UNION, mp, union_vnodeop_p, vpp); 544 if (error) { 545 /* 546 * If an error occurs clear out vnodes. 547 */ 548 if (lowervp) 549 vrele(lowervp); 550 if (uppervp) 551 vrele(uppervp); 552 if (upperdvp) 553 vrele(upperdvp); 554 *vpp = NULL; 555 goto out; 556 } 557 558 MALLOC((*vpp)->v_data, void *, sizeof(struct union_node), 559 M_TEMP, M_WAITOK); 560 561 (*vpp)->v_flag |= vflag; 562 if (uppervp) 563 (*vpp)->v_type = uppervp->v_type; 564 else 565 (*vpp)->v_type = lowervp->v_type; 566 567 un = VTOUNION(*vpp); 568 bzero(un, sizeof(*un)); 569 570 lockinit(&un->un_lock, PVFS, "unlock", 0, 0); 571 vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, p); 572 573 un->un_vnode = *vpp; 574 un->un_uppervp = uppervp; 575 un->un_uppersz = VNOVAL; 576 un->un_lowervp = lowervp; 577 un->un_lowersz = VNOVAL; 578 un->un_dirvp = upperdvp; 579 un->un_pvp = dvp; /* only parent dir in new allocation */ 580 if (dvp != NULLVP) 581 VREF(dvp); 582 un->un_dircache = 0; 583 un->un_openl = 0; 584 585 if (cnp && (lowervp != NULLVP)) { 586 un->un_path = malloc(cnp->cn_namelen+1, M_TEMP, M_WAITOK); 587 bcopy(cnp->cn_nameptr, un->un_path, cnp->cn_namelen); 588 un->un_path[cnp->cn_namelen] = '\0'; 589 } else { 590 un->un_path = 0; 591 un->un_dirvp = NULL; 592 } 593 594 if (docache) { 595 LIST_INSERT_HEAD(&unhead[hash], un, un_cache); 596 un->un_flags |= UN_CACHED; 597 } 598 599 out: 600 if (xlowervp) 601 vrele(xlowervp); 602 603 if (docache) 604 union_list_unlock(hash); 605 606 return (error); 607 } 608 609 int 610 union_freevp(vp) 611 struct vnode *vp; 612 { 613 struct union_node *un = VTOUNION(vp); 614 615 if (un->un_flags & UN_CACHED) { 616 un->un_flags &= ~UN_CACHED; 617 LIST_REMOVE(un, un_cache); 618 } 619 620 if (un->un_pvp != NULLVP) { 621 vrele(un->un_pvp); 622 un->un_pvp = NULL; 623 } 624 if (un->un_uppervp != NULLVP) { 625 vrele(un->un_uppervp); 626 un->un_uppervp = NULL; 627 } 628 if (un->un_lowervp != NULLVP) { 629 vrele(un->un_lowervp); 630 un->un_lowervp = NULL; 631 } 632 if (un->un_dirvp != NULLVP) { 633 vrele(un->un_dirvp); 634 un->un_dirvp = NULL; 635 } 636 if (un->un_path) { 637 free(un->un_path, M_TEMP); 638 un->un_path = NULL; 639 } 640 641 FREE(vp->v_data, M_TEMP); 642 vp->v_data = 0; 643 644 return (0); 645 } 646 647 /* 648 * copyfile. copy the vnode (fvp) to the vnode (tvp) 649 * using a sequence of reads and writes. both (fvp) 650 * and (tvp) are locked on entry and exit. 651 * 652 * fvp and tvp are both exclusive locked on call, but their refcount's 653 * haven't been bumped at all. 654 */ 655 static int 656 union_copyfile(fvp, tvp, cred, p) 657 struct vnode *fvp; 658 struct vnode *tvp; 659 struct ucred *cred; 660 struct proc *p; 661 { 662 char *buf; 663 struct uio uio; 664 struct iovec iov; 665 int error = 0; 666 667 /* 668 * strategy: 669 * allocate a buffer of size MAXBSIZE. 670 * loop doing reads and writes, keeping track 671 * of the current uio offset. 672 * give up at the first sign of trouble. 673 */ 674 675 bzero(&uio, sizeof(uio)); 676 677 uio.uio_procp = p; 678 uio.uio_segflg = UIO_SYSSPACE; 679 uio.uio_offset = 0; 680 681 VOP_LEASE(fvp, p, cred, LEASE_READ); 682 VOP_LEASE(tvp, p, cred, LEASE_WRITE); 683 684 buf = malloc(MAXBSIZE, M_TEMP, M_WAITOK); 685 686 /* ugly loop follows... */ 687 do { 688 off_t offset = uio.uio_offset; 689 int count; 690 int bufoffset; 691 692 /* 693 * Setup for big read 694 */ 695 uio.uio_iov = &iov; 696 uio.uio_iovcnt = 1; 697 iov.iov_base = buf; 698 iov.iov_len = MAXBSIZE; 699 uio.uio_resid = iov.iov_len; 700 uio.uio_rw = UIO_READ; 701 702 if ((error = VOP_READ(fvp, &uio, 0, cred)) != 0) 703 break; 704 705 /* 706 * Get bytes read, handle read eof case and setup for 707 * write loop 708 */ 709 if ((count = MAXBSIZE - uio.uio_resid) == 0) 710 break; 711 bufoffset = 0; 712 713 /* 714 * Write until an error occurs or our buffer has been 715 * exhausted, then update the offset for the next read. 716 */ 717 while (bufoffset < count) { 718 uio.uio_iov = &iov; 719 uio.uio_iovcnt = 1; 720 iov.iov_base = buf + bufoffset; 721 iov.iov_len = count - bufoffset; 722 uio.uio_offset = offset + bufoffset; 723 uio.uio_rw = UIO_WRITE; 724 uio.uio_resid = iov.iov_len; 725 726 if ((error = VOP_WRITE(tvp, &uio, 0, cred)) != 0) 727 break; 728 bufoffset += (count - bufoffset) - uio.uio_resid; 729 } 730 uio.uio_offset = offset + bufoffset; 731 } while (error == 0); 732 733 free(buf, M_TEMP); 734 return (error); 735 } 736 737 /* 738 * 739 * un's vnode is assumed to be locked on entry and remains locked on exit. 740 */ 741 742 int 743 union_copyup(un, docopy, cred, p) 744 struct union_node *un; 745 int docopy; 746 struct ucred *cred; 747 struct proc *p; 748 { 749 int error; 750 struct vnode *lvp, *uvp; 751 752 /* 753 * If the user does not have read permission, the vnode should not 754 * be copied to upper layer. 755 */ 756 vn_lock(un->un_lowervp, LK_EXCLUSIVE | LK_RETRY, p); 757 error = VOP_ACCESS(un->un_lowervp, VREAD, cred, p); 758 VOP_UNLOCK(un->un_lowervp, 0, p); 759 if (error) 760 return (error); 761 762 error = union_vn_create(&uvp, un, p); 763 if (error) 764 return (error); 765 766 lvp = un->un_lowervp; 767 768 KASSERT(uvp->v_usecount > 0, ("copy: uvp refcount 0: %d", uvp->v_usecount)); 769 if (docopy) { 770 /* 771 * XX - should not ignore errors 772 * from VOP_CLOSE 773 */ 774 vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY, p); 775 error = VOP_OPEN(lvp, FREAD, cred, p); 776 if (error == 0 && vn_canvmio(lvp) == TRUE) 777 error = vfs_object_create(lvp, p, cred); 778 if (error == 0) { 779 error = union_copyfile(lvp, uvp, cred, p); 780 VOP_UNLOCK(lvp, 0, p); 781 (void) VOP_CLOSE(lvp, FREAD, cred, p); 782 } 783 if (error == 0) 784 UDEBUG(("union: copied up %s\n", un->un_path)); 785 786 } 787 VOP_UNLOCK(uvp, 0, p); 788 union_newupper(un, uvp); 789 KASSERT(uvp->v_usecount > 0, ("copy: uvp refcount 0: %d", uvp->v_usecount)); 790 union_vn_close(uvp, FWRITE, cred, p); 791 KASSERT(uvp->v_usecount > 0, ("copy: uvp refcount 0: %d", uvp->v_usecount)); 792 /* 793 * Subsequent IOs will go to the top layer, so 794 * call close on the lower vnode and open on the 795 * upper vnode to ensure that the filesystem keeps 796 * its references counts right. This doesn't do 797 * the right thing with (cred) and (FREAD) though. 798 * Ignoring error returns is not right, either. 799 */ 800 if (error == 0) { 801 int i; 802 803 for (i = 0; i < un->un_openl; i++) { 804 (void) VOP_CLOSE(lvp, FREAD, cred, p); 805 (void) VOP_OPEN(uvp, FREAD, cred, p); 806 } 807 if (un->un_openl) { 808 if (vn_canvmio(uvp) == TRUE) 809 error = vfs_object_create(uvp, p, cred); 810 } 811 un->un_openl = 0; 812 } 813 814 return (error); 815 816 } 817 818 /* 819 * union_relookup: 820 * 821 * dvp should be locked on entry and will be locked on return. No 822 * net change in the ref count will occur. 823 * 824 * If an error is returned, *vpp will be invalid, otherwise it 825 * will hold a locked, referenced vnode. If *vpp == dvp then 826 * remember that only one exclusive lock is held. 827 */ 828 829 static int 830 union_relookup(um, dvp, vpp, cnp, cn, path, pathlen) 831 struct union_mount *um; 832 struct vnode *dvp; 833 struct vnode **vpp; 834 struct componentname *cnp; 835 struct componentname *cn; 836 char *path; 837 int pathlen; 838 { 839 int error; 840 841 /* 842 * A new componentname structure must be faked up because 843 * there is no way to know where the upper level cnp came 844 * from or what it is being used for. This must duplicate 845 * some of the work done by NDINIT, some of the work done 846 * by namei, some of the work done by lookup and some of 847 * the work done by VOP_LOOKUP when given a CREATE flag. 848 * Conclusion: Horrible. 849 */ 850 cn->cn_namelen = pathlen; 851 cn->cn_pnbuf = zalloc(namei_zone); 852 bcopy(path, cn->cn_pnbuf, cn->cn_namelen); 853 cn->cn_pnbuf[cn->cn_namelen] = '\0'; 854 855 cn->cn_nameiop = CREATE; 856 cn->cn_flags = (LOCKPARENT|LOCKLEAF|HASBUF|SAVENAME|ISLASTCN); 857 cn->cn_proc = cnp->cn_proc; 858 if (um->um_op == UNMNT_ABOVE) 859 cn->cn_cred = cnp->cn_cred; 860 else 861 cn->cn_cred = um->um_cred; 862 cn->cn_nameptr = cn->cn_pnbuf; 863 cn->cn_consume = cnp->cn_consume; 864 865 VREF(dvp); 866 VOP_UNLOCK(dvp, 0, cnp->cn_proc); 867 868 /* 869 * Pass dvp unlocked and referenced on call to relookup(). 870 * 871 * If an error occurs, dvp will be returned unlocked and dereferenced. 872 */ 873 874 if ((error = relookup(dvp, vpp, cn)) != 0) { 875 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, cnp->cn_proc); 876 return(error); 877 } 878 879 /* 880 * If no error occurs, dvp will be returned locked with the reference 881 * left as before, and vpp will be returned referenced and locked. 882 * 883 * We want to return with dvp as it was passed to us, so we get 884 * rid of our reference. 885 */ 886 vrele(dvp); 887 return (0); 888 } 889 890 /* 891 * Create a shadow directory in the upper layer. 892 * The new vnode is returned locked. 893 * 894 * (um) points to the union mount structure for access to the 895 * the mounting process's credentials. 896 * (dvp) is the directory in which to create the shadow directory, 897 * it is locked (but not ref'd) on entry and return. 898 * (cnp) is the componentname to be created. 899 * (vpp) is the returned newly created shadow directory, which 900 * is returned locked and ref'd 901 */ 902 int 903 union_mkshadow(um, dvp, cnp, vpp) 904 struct union_mount *um; 905 struct vnode *dvp; 906 struct componentname *cnp; 907 struct vnode **vpp; 908 { 909 int error; 910 struct vattr va; 911 struct proc *p = cnp->cn_proc; 912 struct componentname cn; 913 914 error = union_relookup(um, dvp, vpp, cnp, &cn, 915 cnp->cn_nameptr, cnp->cn_namelen); 916 if (error) 917 return (error); 918 919 if (*vpp) { 920 if (cn.cn_flags & HASBUF) { 921 zfree(namei_zone, cn.cn_pnbuf); 922 cn.cn_flags &= ~HASBUF; 923 } 924 if (dvp == *vpp) 925 vrele(*vpp); 926 else 927 vput(*vpp); 928 *vpp = NULLVP; 929 return (EEXIST); 930 } 931 932 /* 933 * policy: when creating the shadow directory in the 934 * upper layer, create it owned by the user who did 935 * the mount, group from parent directory, and mode 936 * 777 modified by umask (ie mostly identical to the 937 * mkdir syscall). (jsp, kb) 938 */ 939 940 VATTR_NULL(&va); 941 va.va_type = VDIR; 942 va.va_mode = um->um_cmode; 943 944 /* VOP_LEASE: dvp is locked */ 945 VOP_LEASE(dvp, p, cn.cn_cred, LEASE_WRITE); 946 947 error = VOP_MKDIR(dvp, vpp, &cn, &va); 948 if (cn.cn_flags & HASBUF) { 949 zfree(namei_zone, cn.cn_pnbuf); 950 cn.cn_flags &= ~HASBUF; 951 } 952 /*vput(dvp);*/ 953 return (error); 954 } 955 956 /* 957 * Create a whiteout entry in the upper layer. 958 * 959 * (um) points to the union mount structure for access to the 960 * the mounting process's credentials. 961 * (dvp) is the directory in which to create the whiteout. 962 * it is locked on entry and return. 963 * (cnp) is the componentname to be created. 964 */ 965 int 966 union_mkwhiteout(um, dvp, cnp, path) 967 struct union_mount *um; 968 struct vnode *dvp; 969 struct componentname *cnp; 970 char *path; 971 { 972 int error; 973 struct proc *p = cnp->cn_proc; 974 struct vnode *wvp; 975 struct componentname cn; 976 977 error = union_relookup(um, dvp, &wvp, cnp, &cn, path, strlen(path)); 978 if (error) 979 return (error); 980 981 if (wvp) { 982 if (cn.cn_flags & HASBUF) { 983 zfree(namei_zone, cn.cn_pnbuf); 984 cn.cn_flags &= ~HASBUF; 985 } 986 if (wvp == dvp) 987 vrele(wvp); 988 else 989 vput(wvp); 990 return (EEXIST); 991 } 992 993 /* VOP_LEASE: dvp is locked */ 994 VOP_LEASE(dvp, p, p->p_ucred, LEASE_WRITE); 995 996 error = VOP_WHITEOUT(dvp, &cn, CREATE); 997 if (cn.cn_flags & HASBUF) { 998 zfree(namei_zone, cn.cn_pnbuf); 999 cn.cn_flags &= ~HASBUF; 1000 } 1001 return (error); 1002 } 1003 1004 /* 1005 * union_vn_create: creates and opens a new shadow file 1006 * on the upper union layer. this function is similar 1007 * in spirit to calling vn_open but it avoids calling namei(). 1008 * the problem with calling namei is that a) it locks too many 1009 * things, and b) it doesn't start at the "right" directory, 1010 * whereas relookup is told where to start. 1011 * 1012 * On entry, the vnode associated with un is locked. It remains locked 1013 * on return. 1014 * 1015 * If no error occurs, *vpp contains a locked referenced vnode for your 1016 * use. If an error occurs *vpp iis undefined. 1017 */ 1018 static int 1019 union_vn_create(vpp, un, p) 1020 struct vnode **vpp; 1021 struct union_node *un; 1022 struct proc *p; 1023 { 1024 struct vnode *vp; 1025 struct ucred *cred = p->p_ucred; 1026 struct vattr vat; 1027 struct vattr *vap = &vat; 1028 int fmode = FFLAGS(O_WRONLY|O_CREAT|O_TRUNC|O_EXCL); 1029 int error; 1030 int cmode = UN_FILEMODE & ~p->p_fd->fd_cmask; 1031 struct componentname cn; 1032 1033 *vpp = NULLVP; 1034 1035 /* 1036 * Build a new componentname structure (for the same 1037 * reasons outlines in union_mkshadow). 1038 * The difference here is that the file is owned by 1039 * the current user, rather than by the person who 1040 * did the mount, since the current user needs to be 1041 * able to write the file (that's why it is being 1042 * copied in the first place). 1043 */ 1044 cn.cn_namelen = strlen(un->un_path); 1045 cn.cn_pnbuf = zalloc(namei_zone); 1046 bcopy(un->un_path, cn.cn_pnbuf, cn.cn_namelen+1); 1047 cn.cn_nameiop = CREATE; 1048 cn.cn_flags = (LOCKPARENT|LOCKLEAF|HASBUF|SAVENAME|ISLASTCN); 1049 cn.cn_proc = p; 1050 cn.cn_cred = p->p_ucred; 1051 cn.cn_nameptr = cn.cn_pnbuf; 1052 cn.cn_consume = 0; 1053 1054 /* 1055 * Pass dvp unlocked and referenced on call to relookup(). 1056 * 1057 * If an error occurs, dvp will be returned unlocked and dereferenced. 1058 */ 1059 VREF(un->un_dirvp); 1060 error = relookup(un->un_dirvp, &vp, &cn); 1061 if (error) 1062 return (error); 1063 1064 /* 1065 * If no error occurs, dvp will be returned locked with the reference 1066 * left as before, and vpp will be returned referenced and locked. 1067 */ 1068 if (vp) { 1069 vput(un->un_dirvp); 1070 if (cn.cn_flags & HASBUF) { 1071 zfree(namei_zone, cn.cn_pnbuf); 1072 cn.cn_flags &= ~HASBUF; 1073 } 1074 if (vp == un->un_dirvp) 1075 vrele(vp); 1076 else 1077 vput(vp); 1078 return (EEXIST); 1079 } 1080 1081 /* 1082 * Good - there was no race to create the file 1083 * so go ahead and create it. The permissions 1084 * on the file will be 0666 modified by the 1085 * current user's umask. Access to the file, while 1086 * it is unioned, will require access to the top *and* 1087 * bottom files. Access when not unioned will simply 1088 * require access to the top-level file. 1089 * TODO: confirm choice of access permissions. 1090 */ 1091 VATTR_NULL(vap); 1092 vap->va_type = VREG; 1093 vap->va_mode = cmode; 1094 VOP_LEASE(un->un_dirvp, p, cred, LEASE_WRITE); 1095 error = VOP_CREATE(un->un_dirvp, &vp, &cn, vap); 1096 if (cn.cn_flags & HASBUF) { 1097 zfree(namei_zone, cn.cn_pnbuf); 1098 cn.cn_flags &= ~HASBUF; 1099 } 1100 vput(un->un_dirvp); 1101 if (error) 1102 return (error); 1103 1104 error = VOP_OPEN(vp, fmode, cred, p); 1105 if (error == 0 && vn_canvmio(vp) == TRUE) 1106 error = vfs_object_create(vp, p, cred); 1107 if (error) { 1108 vput(vp); 1109 return (error); 1110 } 1111 vp->v_writecount++; 1112 *vpp = vp; 1113 return (0); 1114 } 1115 1116 static int 1117 union_vn_close(vp, fmode, cred, p) 1118 struct vnode *vp; 1119 int fmode; 1120 struct ucred *cred; 1121 struct proc *p; 1122 { 1123 1124 if (fmode & FWRITE) 1125 --vp->v_writecount; 1126 return (VOP_CLOSE(vp, fmode, cred, p)); 1127 } 1128 1129 #if 0 1130 1131 /* 1132 * union_removed_upper: 1133 * 1134 * called with union_node unlocked. XXX 1135 */ 1136 1137 void 1138 union_removed_upper(un) 1139 struct union_node *un; 1140 { 1141 struct proc *p = curproc; /* XXX */ 1142 struct vnode **vpp; 1143 1144 /* 1145 * Do not set the uppervp to NULLVP. If lowervp is NULLVP, 1146 * union node will have neither uppervp nor lowervp. We remove 1147 * the union node from cache, so that it will not be referrenced. 1148 */ 1149 union_newupper(un, NULLVP); 1150 if (un->un_dircache != 0) { 1151 for (vpp = un->un_dircache; *vpp != NULLVP; vpp++) 1152 vrele(*vpp); 1153 free(un->un_dircache, M_TEMP); 1154 un->un_dircache = 0; 1155 } 1156 1157 if (un->un_flags & UN_CACHED) { 1158 un->un_flags &= ~UN_CACHED; 1159 LIST_REMOVE(un, un_cache); 1160 } 1161 } 1162 1163 #endif 1164 1165 /* 1166 * determine whether a whiteout is needed 1167 * during a remove/rmdir operation. 1168 */ 1169 int 1170 union_dowhiteout(un, cred, p) 1171 struct union_node *un; 1172 struct ucred *cred; 1173 struct proc *p; 1174 { 1175 struct vattr va; 1176 1177 if (un->un_lowervp != NULLVP) 1178 return (1); 1179 1180 if (VOP_GETATTR(un->un_uppervp, &va, cred, p) == 0 && 1181 (va.va_flags & OPAQUE)) 1182 return (1); 1183 1184 return (0); 1185 } 1186 1187 static void 1188 union_dircache_r(vp, vppp, cntp) 1189 struct vnode *vp; 1190 struct vnode ***vppp; 1191 int *cntp; 1192 { 1193 struct union_node *un; 1194 1195 if (vp->v_op != union_vnodeop_p) { 1196 if (vppp) { 1197 VREF(vp); 1198 *(*vppp)++ = vp; 1199 if (--(*cntp) == 0) 1200 panic("union: dircache table too small"); 1201 } else { 1202 (*cntp)++; 1203 } 1204 1205 return; 1206 } 1207 1208 un = VTOUNION(vp); 1209 if (un->un_uppervp != NULLVP) 1210 union_dircache_r(un->un_uppervp, vppp, cntp); 1211 if (un->un_lowervp != NULLVP) 1212 union_dircache_r(un->un_lowervp, vppp, cntp); 1213 } 1214 1215 struct vnode * 1216 union_dircache(vp, p) 1217 struct vnode *vp; 1218 struct proc *p; 1219 { 1220 int cnt; 1221 struct vnode *nvp; 1222 struct vnode **vpp; 1223 struct vnode **dircache; 1224 struct union_node *un; 1225 int error; 1226 1227 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 1228 dircache = VTOUNION(vp)->un_dircache; 1229 1230 nvp = NULLVP; 1231 1232 if (dircache == NULL) { 1233 cnt = 0; 1234 union_dircache_r(vp, 0, &cnt); 1235 cnt++; 1236 dircache = malloc(cnt * sizeof(struct vnode *), 1237 M_TEMP, M_WAITOK); 1238 vpp = dircache; 1239 union_dircache_r(vp, &vpp, &cnt); 1240 *vpp = NULLVP; 1241 vpp = dircache + 1; 1242 } else { 1243 vpp = dircache; 1244 do { 1245 if (*vpp++ == VTOUNION(vp)->un_uppervp) 1246 break; 1247 } while (*vpp != NULLVP); 1248 } 1249 1250 if (*vpp == NULLVP) 1251 goto out; 1252 1253 /*vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, p);*/ 1254 UDEBUG(("ALLOCVP-3 %p ref %d\n", *vpp, (*vpp ? (*vpp)->v_usecount : -99))); 1255 VREF(*vpp); 1256 error = union_allocvp(&nvp, vp->v_mount, NULLVP, NULLVP, NULL, *vpp, NULLVP, 0); 1257 UDEBUG(("ALLOCVP-3B %p ref %d\n", nvp, (*vpp ? (*vpp)->v_usecount : -99))); 1258 if (error) 1259 goto out; 1260 1261 VTOUNION(vp)->un_dircache = 0; 1262 un = VTOUNION(nvp); 1263 un->un_dircache = dircache; 1264 1265 out: 1266 VOP_UNLOCK(vp, 0, p); 1267 return (nvp); 1268 } 1269 1270 /* 1271 * Guarentee coherency with the VM cache by invalidating any clean VM pages 1272 * associated with this write and updating any dirty VM pages. Since our 1273 * vnode is locked, other processes will not be able to read the pages in 1274 * again until after our write completes. 1275 * 1276 * We also have to be coherent with reads, by flushing any pending dirty 1277 * pages prior to issuing the read. 1278 * 1279 * XXX this is somewhat of a hack at the moment. To support this properly 1280 * we would have to be able to run VOP_READ and VOP_WRITE through the VM 1281 * cache. Then we wouldn't need to worry about coherency. 1282 */ 1283 1284 void 1285 union_vm_coherency(struct vnode *vp, struct uio *uio, int cleanfls) 1286 { 1287 vm_object_t object; 1288 vm_pindex_t pstart; 1289 vm_pindex_t pend; 1290 int pgoff; 1291 1292 if ((object = vp->v_object) == NULL) 1293 return; 1294 1295 pgoff = uio->uio_offset & PAGE_MASK; 1296 pstart = uio->uio_offset / PAGE_SIZE; 1297 pend = pstart + (uio->uio_resid + pgoff + PAGE_MASK) / PAGE_SIZE; 1298 1299 vm_object_page_clean(object, pstart, pend, OBJPC_SYNC); 1300 if (cleanfls) 1301 vm_object_page_remove(object, pstart, pend, TRUE); 1302 } 1303 1304 /* 1305 * Module glue to remove #ifdef UNION from vfs_syscalls.c 1306 */ 1307 static int 1308 union_dircheck(struct proc *p, struct vnode **vp, struct file *fp) 1309 { 1310 int error = 0; 1311 1312 if ((*vp)->v_op == union_vnodeop_p) { 1313 struct vnode *lvp; 1314 1315 lvp = union_dircache(*vp, p); 1316 if (lvp != NULLVP) { 1317 struct vattr va; 1318 1319 /* 1320 * If the directory is opaque, 1321 * then don't show lower entries 1322 */ 1323 error = VOP_GETATTR(*vp, &va, fp->f_cred, p); 1324 if (va.va_flags & OPAQUE) { 1325 vput(lvp); 1326 lvp = NULL; 1327 } 1328 } 1329 1330 if (lvp != NULLVP) { 1331 error = VOP_OPEN(lvp, FREAD, fp->f_cred, p); 1332 if (error == 0 && vn_canvmio(lvp) == TRUE) 1333 error = vfs_object_create(lvp, p, fp->f_cred); 1334 if (error) { 1335 vput(lvp); 1336 return (error); 1337 } 1338 VOP_UNLOCK(lvp, 0, p); 1339 fp->f_data = (caddr_t) lvp; 1340 fp->f_offset = 0; 1341 error = vn_close(*vp, FREAD, fp->f_cred, p); 1342 if (error) 1343 return (error); 1344 *vp = lvp; 1345 return -1; /* goto unionread */ 1346 } 1347 } 1348 return error; 1349 } 1350 1351 static int 1352 union_modevent(module_t mod, int type, void *data) 1353 { 1354 switch (type) { 1355 case MOD_LOAD: 1356 union_dircheckp = union_dircheck; 1357 break; 1358 case MOD_UNLOAD: 1359 union_dircheckp = NULL; 1360 break; 1361 default: 1362 break; 1363 } 1364 return 0; 1365 } 1366 1367 static moduledata_t union_mod = { 1368 "union_dircheck", 1369 union_modevent, 1370 NULL 1371 }; 1372 1373 DECLARE_MODULE(union_dircheck, union_mod, SI_SUB_VFS, SI_ORDER_ANY); 1374