1 /*- 2 * Copyright (c) 1994 Jan-Simon Pendry 3 * Copyright (c) 1994 4 * The Regents of the University of California. All rights reserved. 5 * Copyright (c) 2005, 2006 Masanori Ozawa <ozawa@ongs.co.jp>, ONGS Inc. 6 * Copyright (c) 2006 Daichi Goto <daichi@freebsd.org> 7 * 8 * This code is derived from software contributed to Berkeley by 9 * Jan-Simon Pendry. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)union_subr.c 8.20 (Berkeley) 5/20/95 36 * $FreeBSD$ 37 */ 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/mutex.h> 44 #include <sys/malloc.h> 45 #include <sys/mount.h> 46 #include <sys/namei.h> 47 #include <sys/proc.h> 48 #include <sys/vnode.h> 49 #include <sys/dirent.h> 50 #include <sys/fcntl.h> 51 #include <sys/filedesc.h> 52 #include <sys/stat.h> 53 #include <sys/resourcevar.h> 54 55 #ifdef MAC 56 #include <sys/mac.h> 57 #endif 58 59 #include <vm/uma.h> 60 61 #include <fs/unionfs/union.h> 62 63 #define NUNIONFSNODECACHE 16 64 65 static MALLOC_DEFINE(M_UNIONFSHASH, "UNIONFS hash", "UNIONFS hash table"); 66 MALLOC_DEFINE(M_UNIONFSNODE, "UNIONFS node", "UNIONFS vnode private part"); 67 MALLOC_DEFINE(M_UNIONFSPATH, "UNIONFS path", "UNIONFS path private part"); 68 69 /* 70 * Initialize 71 */ 72 int 73 unionfs_init(struct vfsconf *vfsp) 74 { 75 UNIONFSDEBUG("unionfs_init\n"); /* printed during system boot */ 76 return (0); 77 } 78 79 /* 80 * Uninitialize 81 */ 82 int 83 unionfs_uninit(struct vfsconf *vfsp) 84 { 85 return (0); 86 } 87 88 static struct unionfs_node_hashhead * 89 unionfs_get_hashhead(struct vnode *dvp, char *path) 90 { 91 int count; 92 char hash; 93 struct unionfs_node *unp; 94 95 hash = 0; 96 unp = VTOUNIONFS(dvp); 97 if (path != NULL) { 98 for (count = 0; path[count]; count++) 99 hash += path[count]; 100 } 101 102 return (&(unp->un_hashtbl[hash & (unp->un_hashmask)])); 103 } 104 105 /* 106 * Get the cached vnode. (only VDIR) 107 */ 108 static struct vnode * 109 unionfs_get_cached_vdir(struct vnode *uvp, struct vnode *lvp, 110 struct vnode *dvp, char *path) 111 { 112 struct unionfs_node_hashhead *hd; 113 struct unionfs_node *unp; 114 struct vnode *vp; 115 116 KASSERT((uvp == NULLVP || uvp->v_type == VDIR || uvp->v_type == VSOCK), 117 ("unionfs_get_cached_vdir: v_type != VDIR/VSOCK")); 118 KASSERT((lvp == NULLVP || lvp->v_type == VDIR || lvp->v_type == VSOCK), 119 ("unionfs_get_cached_vdir: v_type != VDIR/VSOCK")); 120 121 VI_LOCK(dvp); 122 hd = unionfs_get_hashhead(dvp, path); 123 LIST_FOREACH(unp, hd, un_hash) { 124 if (!strcmp(unp->un_path, path)) { 125 vp = UNIONFSTOV(unp); 126 VI_LOCK_FLAGS(vp, MTX_DUPOK); 127 VI_UNLOCK(dvp); 128 vp->v_iflag &= ~VI_OWEINACT; 129 if ((vp->v_iflag & (VI_DOOMED | VI_DOINGINACT)) != 0) { 130 VI_UNLOCK(vp); 131 vp = NULLVP; 132 } else 133 VI_UNLOCK(vp); 134 return (vp); 135 } 136 } 137 VI_UNLOCK(dvp); 138 139 return (NULLVP); 140 } 141 142 /* 143 * Add the new vnode into cache. (only VDIR) 144 */ 145 static struct vnode * 146 unionfs_ins_cached_vdir(struct unionfs_node *uncp, 147 struct vnode *dvp, char *path) 148 { 149 struct unionfs_node_hashhead *hd; 150 struct unionfs_node *unp; 151 struct vnode *vp; 152 153 KASSERT((uncp->un_uppervp==NULLVP || uncp->un_uppervp->v_type==VDIR || 154 uncp->un_uppervp->v_type==VSOCK), 155 ("unionfs_ins_cached_vdir: v_type != VDIR/VSOCK")); 156 KASSERT((uncp->un_lowervp==NULLVP || uncp->un_lowervp->v_type==VDIR || 157 uncp->un_lowervp->v_type==VSOCK), 158 ("unionfs_ins_cached_vdir: v_type != VDIR/VSOCK")); 159 160 VI_LOCK(dvp); 161 hd = unionfs_get_hashhead(dvp, path); 162 LIST_FOREACH(unp, hd, un_hash) { 163 if (!strcmp(unp->un_path, path)) { 164 vp = UNIONFSTOV(unp); 165 VI_LOCK_FLAGS(vp, MTX_DUPOK); 166 vp->v_iflag &= ~VI_OWEINACT; 167 if ((vp->v_iflag & (VI_DOOMED | VI_DOINGINACT)) != 0) { 168 LIST_INSERT_HEAD(hd, uncp, un_hash); 169 VI_UNLOCK(vp); 170 vp = NULLVP; 171 } else 172 VI_UNLOCK(vp); 173 VI_UNLOCK(dvp); 174 return (vp); 175 } 176 } 177 178 LIST_INSERT_HEAD(hd, uncp, un_hash); 179 VI_UNLOCK(dvp); 180 181 return (NULLVP); 182 } 183 184 /* 185 * Remove the vnode. (only VDIR) 186 */ 187 static void 188 unionfs_rem_cached_vdir(struct unionfs_node *unp, struct vnode *dvp) 189 { 190 KASSERT((unp != NULL), ("unionfs_rem_cached_vdir: null node")); 191 KASSERT((dvp != NULLVP), 192 ("unionfs_rem_cached_vdir: null parent vnode")); 193 KASSERT((unp->un_hash.le_prev != NULL), 194 ("unionfs_rem_cached_vdir: null hash")); 195 196 VI_LOCK(dvp); 197 LIST_REMOVE(unp, un_hash); 198 VI_UNLOCK(dvp); 199 } 200 201 /* 202 * Make a new or get existing unionfs node. 203 * 204 * uppervp and lowervp should be unlocked. Because if new unionfs vnode is 205 * locked, uppervp or lowervp is locked too. In order to prevent dead lock, 206 * you should not lock plurality simultaneously. 207 */ 208 int 209 unionfs_nodeget(struct mount *mp, struct vnode *uppervp, 210 struct vnode *lowervp, struct vnode *dvp, 211 struct vnode **vpp, struct componentname *cnp, 212 struct thread *td) 213 { 214 struct unionfs_mount *ump; 215 struct unionfs_node *unp; 216 struct vnode *vp; 217 int error; 218 int lkflags; 219 enum vtype vt; 220 char *path; 221 222 ump = MOUNTTOUNIONFSMOUNT(mp); 223 lkflags = (cnp ? cnp->cn_lkflags : 0); 224 path = (cnp ? cnp->cn_nameptr : NULL); 225 *vpp = NULLVP; 226 227 if (uppervp == NULLVP && lowervp == NULLVP) 228 panic("unionfs_nodeget: upper and lower is null"); 229 230 vt = (uppervp != NULLVP ? uppervp->v_type : lowervp->v_type); 231 232 /* If it has no ISLASTCN flag, path check is skipped. */ 233 if (cnp && !(cnp->cn_flags & ISLASTCN)) 234 path = NULL; 235 236 /* check the vdir cache */ 237 if (path != NULL && dvp != NULLVP && (vt == VDIR || vt == VSOCK)) { 238 vp = unionfs_get_cached_vdir(uppervp, lowervp, dvp, path); 239 if (vp != NULLVP) { 240 vref(vp); 241 *vpp = vp; 242 goto unionfs_nodeget_out; 243 } 244 } 245 246 if ((uppervp == NULLVP || ump->um_uppervp != uppervp) || 247 (lowervp == NULLVP || ump->um_lowervp != lowervp)) { 248 /* dvp will be NULLVP only in case of root vnode. */ 249 if (dvp == NULLVP) 250 return (EINVAL); 251 } 252 253 /* 254 * Do the MALLOC before the getnewvnode since doing so afterward 255 * might cause a bogus v_data pointer to get dereferenced elsewhere 256 * if MALLOC should block. 257 */ 258 MALLOC(unp, struct unionfs_node *, sizeof(struct unionfs_node), 259 M_UNIONFSNODE, M_WAITOK | M_ZERO); 260 261 error = getnewvnode("unionfs", mp, &unionfs_vnodeops, &vp); 262 if (error != 0) { 263 FREE(unp, M_UNIONFSNODE); 264 return (error); 265 } 266 error = insmntque(vp, mp); /* XXX: Too early for mpsafe fs */ 267 if (error != 0) { 268 FREE(unp, M_UNIONFSNODE); 269 return (error); 270 } 271 if (dvp != NULLVP) 272 vref(dvp); 273 if (uppervp != NULLVP) 274 vref(uppervp); 275 if (lowervp != NULLVP) 276 vref(lowervp); 277 278 switch (vt) { 279 case VDIR: 280 unp->un_hashtbl = hashinit(NUNIONFSNODECACHE, M_UNIONFSHASH, 281 &(unp->un_hashmask)); 282 break; 283 case VSOCK: 284 if (uppervp != NULLVP) 285 vp->v_socket = uppervp->v_socket; 286 else 287 vp->v_socket = lowervp->v_socket; 288 break; 289 default: 290 break; 291 } 292 293 unp->un_vnode = vp; 294 unp->un_uppervp = uppervp; 295 unp->un_lowervp = lowervp; 296 unp->un_dvp = dvp; 297 if (uppervp != NULLVP) 298 vp->v_vnlock = uppervp->v_vnlock; 299 else 300 vp->v_vnlock = lowervp->v_vnlock; 301 302 if (path != NULL) { 303 unp->un_path = (char *) 304 malloc(cnp->cn_namelen +1, M_UNIONFSPATH, M_WAITOK|M_ZERO); 305 bcopy(cnp->cn_nameptr, unp->un_path, cnp->cn_namelen); 306 unp->un_path[cnp->cn_namelen] = '\0'; 307 } 308 vp->v_type = vt; 309 vp->v_data = unp; 310 311 if ((uppervp != NULLVP && ump->um_uppervp == uppervp) && 312 (lowervp != NULLVP && ump->um_lowervp == lowervp)) 313 vp->v_vflag |= VV_ROOT; 314 315 if (path != NULL && dvp != NULLVP && (vt == VDIR || vt == VSOCK)) 316 *vpp = unionfs_ins_cached_vdir(unp, dvp, path); 317 if ((*vpp) != NULLVP) { 318 if (dvp != NULLVP) 319 vrele(dvp); 320 if (uppervp != NULLVP) 321 vrele(uppervp); 322 if (lowervp != NULLVP) 323 vrele(lowervp); 324 325 unp->un_uppervp = NULLVP; 326 unp->un_lowervp = NULLVP; 327 unp->un_dvp = NULLVP; 328 vrele(vp); 329 vp = *vpp; 330 vref(vp); 331 } else 332 *vpp = vp; 333 334 unionfs_nodeget_out: 335 if (lkflags & LK_TYPE_MASK) 336 vn_lock(vp, lkflags | LK_RETRY); 337 338 return (0); 339 } 340 341 /* 342 * Clean up the unionfs node. 343 */ 344 void 345 unionfs_noderem(struct vnode *vp, struct thread *td) 346 { 347 int vfslocked; 348 struct unionfs_node *unp; 349 struct unionfs_node_status *unsp, *unsp_tmp; 350 struct vnode *lvp; 351 struct vnode *uvp; 352 struct vnode *dvp; 353 354 /* 355 * Use the interlock to protect the clearing of v_data to 356 * prevent faults in unionfs_lock(). 357 */ 358 VI_LOCK(vp); 359 unp = VTOUNIONFS(vp); 360 lvp = unp->un_lowervp; 361 uvp = unp->un_uppervp; 362 dvp = unp->un_dvp; 363 unp->un_lowervp = unp->un_uppervp = NULLVP; 364 365 vp->v_vnlock = &(vp->v_lock); 366 vp->v_data = NULL; 367 lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_INTERLOCK, VI_MTX(vp)); 368 if (lvp != NULLVP) 369 VOP_UNLOCK(lvp, 0); 370 if (uvp != NULLVP) 371 VOP_UNLOCK(uvp, 0); 372 vp->v_object = NULL; 373 374 if (unp->un_path != NULL && dvp != NULLVP && 375 (vp->v_type == VDIR || vp->v_type == VSOCK)) 376 unionfs_rem_cached_vdir(unp, dvp); 377 378 if (lvp != NULLVP) { 379 vfslocked = VFS_LOCK_GIANT(lvp->v_mount); 380 vrele(lvp); 381 VFS_UNLOCK_GIANT(vfslocked); 382 } 383 if (uvp != NULLVP) { 384 vfslocked = VFS_LOCK_GIANT(uvp->v_mount); 385 vrele(uvp); 386 VFS_UNLOCK_GIANT(vfslocked); 387 } 388 if (dvp != NULLVP) { 389 vfslocked = VFS_LOCK_GIANT(dvp->v_mount); 390 vrele(dvp); 391 VFS_UNLOCK_GIANT(vfslocked); 392 unp->un_dvp = NULLVP; 393 } 394 if (unp->un_path != NULL) { 395 free(unp->un_path, M_UNIONFSPATH); 396 unp->un_path = NULL; 397 } 398 399 if (unp->un_hashtbl != NULL) 400 hashdestroy(unp->un_hashtbl, M_UNIONFSHASH, unp->un_hashmask); 401 402 LIST_FOREACH_SAFE(unsp, &(unp->un_unshead), uns_list, unsp_tmp) { 403 LIST_REMOVE(unsp, uns_list); 404 free(unsp, M_TEMP); 405 } 406 FREE(unp, M_UNIONFSNODE); 407 } 408 409 /* 410 * Get the unionfs node status. 411 * You need exclusive lock this vnode. 412 */ 413 void 414 unionfs_get_node_status(struct unionfs_node *unp, struct thread *td, 415 struct unionfs_node_status **unspp) 416 { 417 struct unionfs_node_status *unsp; 418 pid_t pid = td->td_proc->p_pid; 419 420 KASSERT(NULL != unspp, ("null pointer")); 421 ASSERT_VOP_ELOCKED(UNIONFSTOV(unp), "unionfs_get_node_status"); 422 423 LIST_FOREACH(unsp, &(unp->un_unshead), uns_list) { 424 if (unsp->uns_pid == pid) { 425 *unspp = unsp; 426 return; 427 } 428 } 429 430 /* create a new unionfs node status */ 431 MALLOC(unsp, struct unionfs_node_status *, 432 sizeof(struct unionfs_node_status), M_TEMP, M_WAITOK | M_ZERO); 433 434 unsp->uns_pid = pid; 435 LIST_INSERT_HEAD(&(unp->un_unshead), unsp, uns_list); 436 437 *unspp = unsp; 438 } 439 440 /* 441 * Remove the unionfs node status, if you can. 442 * You need exclusive lock this vnode. 443 */ 444 void 445 unionfs_tryrem_node_status(struct unionfs_node *unp, 446 struct unionfs_node_status *unsp) 447 { 448 KASSERT(NULL != unsp, ("null pointer")); 449 ASSERT_VOP_ELOCKED(UNIONFSTOV(unp), "unionfs_get_node_status"); 450 451 if (0 < unsp->uns_lower_opencnt || 0 < unsp->uns_upper_opencnt) 452 return; 453 454 LIST_REMOVE(unsp, uns_list); 455 free(unsp, M_TEMP); 456 } 457 458 /* 459 * Create upper node attr. 460 */ 461 void 462 unionfs_create_uppervattr_core(struct unionfs_mount *ump, 463 struct vattr *lva, 464 struct vattr *uva, 465 struct thread *td) 466 { 467 VATTR_NULL(uva); 468 uva->va_type = lva->va_type; 469 uva->va_atime = lva->va_atime; 470 uva->va_mtime = lva->va_mtime; 471 uva->va_ctime = lva->va_ctime; 472 473 switch (ump->um_copymode) { 474 case UNIONFS_TRANSPARENT: 475 uva->va_mode = lva->va_mode; 476 uva->va_uid = lva->va_uid; 477 uva->va_gid = lva->va_gid; 478 break; 479 case UNIONFS_MASQUERADE: 480 if (ump->um_uid == lva->va_uid) { 481 uva->va_mode = lva->va_mode & 077077; 482 uva->va_mode |= (lva->va_type == VDIR ? ump->um_udir : ump->um_ufile) & 0700; 483 uva->va_uid = lva->va_uid; 484 uva->va_gid = lva->va_gid; 485 } else { 486 uva->va_mode = (lva->va_type == VDIR ? ump->um_udir : ump->um_ufile); 487 uva->va_uid = ump->um_uid; 488 uva->va_gid = ump->um_gid; 489 } 490 break; 491 default: /* UNIONFS_TRADITIONAL */ 492 FILEDESC_SLOCK(td->td_proc->p_fd); 493 uva->va_mode = 0777 & ~td->td_proc->p_fd->fd_cmask; 494 FILEDESC_SUNLOCK(td->td_proc->p_fd); 495 uva->va_uid = ump->um_uid; 496 uva->va_gid = ump->um_gid; 497 break; 498 } 499 } 500 501 /* 502 * Create upper node attr. 503 */ 504 int 505 unionfs_create_uppervattr(struct unionfs_mount *ump, 506 struct vnode *lvp, 507 struct vattr *uva, 508 struct ucred *cred, 509 struct thread *td) 510 { 511 int error; 512 struct vattr lva; 513 514 if ((error = VOP_GETATTR(lvp, &lva, cred, td))) 515 return (error); 516 517 unionfs_create_uppervattr_core(ump, &lva, uva, td); 518 519 return (error); 520 } 521 522 /* 523 * relookup 524 * 525 * dvp should be locked on entry and will be locked on return. 526 * 527 * If an error is returned, *vpp will be invalid, otherwise it will hold a 528 * locked, referenced vnode. If *vpp == dvp then remember that only one 529 * LK_EXCLUSIVE lock is held. 530 */ 531 static int 532 unionfs_relookup(struct vnode *dvp, struct vnode **vpp, 533 struct componentname *cnp, struct componentname *cn, 534 struct thread *td, char *path, int pathlen, u_long nameiop) 535 { 536 int error; 537 538 cn->cn_namelen = pathlen; 539 cn->cn_pnbuf = uma_zalloc(namei_zone, M_WAITOK); 540 bcopy(path, cn->cn_pnbuf, pathlen); 541 cn->cn_pnbuf[pathlen] = '\0'; 542 543 cn->cn_nameiop = nameiop; 544 cn->cn_flags = (LOCKPARENT | LOCKLEAF | HASBUF | SAVENAME | ISLASTCN); 545 cn->cn_lkflags = LK_EXCLUSIVE; 546 cn->cn_thread = td; 547 cn->cn_cred = cnp->cn_cred; 548 549 cn->cn_nameptr = cn->cn_pnbuf; 550 cn->cn_consume = cnp->cn_consume; 551 552 if (nameiop == DELETE) 553 cn->cn_flags |= (cnp->cn_flags & (DOWHITEOUT | SAVESTART)); 554 else if (RENAME == nameiop) 555 cn->cn_flags |= (cnp->cn_flags & SAVESTART); 556 557 vref(dvp); 558 VOP_UNLOCK(dvp, 0); 559 560 if ((error = relookup(dvp, vpp, cn))) { 561 uma_zfree(namei_zone, cn->cn_pnbuf); 562 cn->cn_flags &= ~HASBUF; 563 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); 564 } else 565 vrele(dvp); 566 567 return (error); 568 } 569 570 /* 571 * relookup for CREATE namei operation. 572 * 573 * dvp is unionfs vnode. dvp should be locked. 574 * 575 * If it called 'unionfs_copyfile' function by unionfs_link etc, 576 * VOP_LOOKUP information is broken. 577 * So it need relookup in order to create link etc. 578 */ 579 int 580 unionfs_relookup_for_create(struct vnode *dvp, struct componentname *cnp, 581 struct thread *td) 582 { 583 int error; 584 struct vnode *udvp; 585 struct vnode *vp; 586 struct componentname cn; 587 588 udvp = UNIONFSVPTOUPPERVP(dvp); 589 vp = NULLVP; 590 591 error = unionfs_relookup(udvp, &vp, cnp, &cn, td, cnp->cn_nameptr, 592 strlen(cnp->cn_nameptr), CREATE); 593 if (error) 594 return (error); 595 596 if (vp != NULLVP) { 597 if (udvp == vp) 598 vrele(vp); 599 else 600 vput(vp); 601 602 error = EEXIST; 603 } 604 605 if (cn.cn_flags & HASBUF) { 606 uma_zfree(namei_zone, cn.cn_pnbuf); 607 cn.cn_flags &= ~HASBUF; 608 } 609 610 if (!error) { 611 cn.cn_flags |= (cnp->cn_flags & HASBUF); 612 cnp->cn_flags = cn.cn_flags; 613 } 614 615 return (error); 616 } 617 618 /* 619 * relookup for DELETE namei operation. 620 * 621 * dvp is unionfs vnode. dvp should be locked. 622 */ 623 int 624 unionfs_relookup_for_delete(struct vnode *dvp, struct componentname *cnp, 625 struct thread *td) 626 { 627 int error; 628 struct vnode *udvp; 629 struct vnode *vp; 630 struct componentname cn; 631 632 udvp = UNIONFSVPTOUPPERVP(dvp); 633 vp = NULLVP; 634 635 error = unionfs_relookup(udvp, &vp, cnp, &cn, td, cnp->cn_nameptr, 636 strlen(cnp->cn_nameptr), DELETE); 637 if (error) 638 return (error); 639 640 if (vp == NULLVP) 641 error = ENOENT; 642 else { 643 if (udvp == vp) 644 vrele(vp); 645 else 646 vput(vp); 647 } 648 649 if (cn.cn_flags & HASBUF) { 650 uma_zfree(namei_zone, cn.cn_pnbuf); 651 cn.cn_flags &= ~HASBUF; 652 } 653 654 if (!error) { 655 cn.cn_flags |= (cnp->cn_flags & HASBUF); 656 cnp->cn_flags = cn.cn_flags; 657 } 658 659 return (error); 660 } 661 662 /* 663 * relookup for RENAME namei operation. 664 * 665 * dvp is unionfs vnode. dvp should be locked. 666 */ 667 int 668 unionfs_relookup_for_rename(struct vnode *dvp, struct componentname *cnp, 669 struct thread *td) 670 { 671 int error; 672 struct vnode *udvp; 673 struct vnode *vp; 674 struct componentname cn; 675 676 udvp = UNIONFSVPTOUPPERVP(dvp); 677 vp = NULLVP; 678 679 error = unionfs_relookup(udvp, &vp, cnp, &cn, td, cnp->cn_nameptr, 680 strlen(cnp->cn_nameptr), RENAME); 681 if (error) 682 return (error); 683 684 if (vp != NULLVP) { 685 if (udvp == vp) 686 vrele(vp); 687 else 688 vput(vp); 689 } 690 691 if (cn.cn_flags & HASBUF) { 692 uma_zfree(namei_zone, cn.cn_pnbuf); 693 cn.cn_flags &= ~HASBUF; 694 } 695 696 if (!error) { 697 cn.cn_flags |= (cnp->cn_flags & HASBUF); 698 cnp->cn_flags = cn.cn_flags; 699 } 700 701 return (error); 702 703 } 704 705 /* 706 * Update the unionfs_node. 707 * 708 * uvp is new locked upper vnode. unionfs vnode's lock will be exchanged to the 709 * uvp's lock and lower's lock will be unlocked. 710 */ 711 static void 712 unionfs_node_update(struct unionfs_node *unp, struct vnode *uvp, 713 struct thread *td) 714 { 715 unsigned count, lockrec; 716 struct vnode *vp; 717 struct vnode *lvp; 718 struct vnode *dvp; 719 720 vp = UNIONFSTOV(unp); 721 lvp = unp->un_lowervp; 722 ASSERT_VOP_ELOCKED(lvp, "unionfs_node_update"); 723 dvp = unp->un_dvp; 724 725 /* 726 * lock update 727 */ 728 VI_LOCK(vp); 729 unp->un_uppervp = uvp; 730 vp->v_vnlock = uvp->v_vnlock; 731 VI_UNLOCK(vp); 732 lockrec = lvp->v_vnlock->lk_recurse; 733 for (count = 0; count < lockrec; count++) 734 vn_lock(uvp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY); 735 736 /* 737 * cache update 738 */ 739 if (unp->un_path != NULL && dvp != NULLVP && 740 (vp->v_type == VDIR || vp->v_type == VSOCK)) { 741 static struct unionfs_node_hashhead *hd; 742 743 VI_LOCK(dvp); 744 hd = unionfs_get_hashhead(dvp, unp->un_path); 745 LIST_REMOVE(unp, un_hash); 746 LIST_INSERT_HEAD(hd, unp, un_hash); 747 VI_UNLOCK(dvp); 748 } 749 } 750 751 /* 752 * Create a new shadow dir. 753 * 754 * udvp should be locked on entry and will be locked on return. 755 * 756 * If no error returned, unp will be updated. 757 */ 758 int 759 unionfs_mkshadowdir(struct unionfs_mount *ump, struct vnode *udvp, 760 struct unionfs_node *unp, struct componentname *cnp, 761 struct thread *td) 762 { 763 int error; 764 struct vnode *lvp; 765 struct vnode *uvp; 766 struct vattr va; 767 struct vattr lva; 768 struct componentname cn; 769 struct mount *mp; 770 struct ucred *cred; 771 struct ucred *credbk; 772 struct uidinfo *rootinfo; 773 774 if (unp->un_uppervp != NULLVP) 775 return (EEXIST); 776 777 lvp = unp->un_lowervp; 778 uvp = NULLVP; 779 credbk = cnp->cn_cred; 780 781 /* Authority change to root */ 782 rootinfo = uifind((uid_t)0); 783 cred = crdup(cnp->cn_cred); 784 chgproccnt(cred->cr_ruidinfo, 1, 0); 785 change_euid(cred, rootinfo); 786 change_ruid(cred, rootinfo); 787 change_svuid(cred, (uid_t)0); 788 uifree(rootinfo); 789 cnp->cn_cred = cred; 790 791 memset(&cn, 0, sizeof(cn)); 792 793 if ((error = VOP_GETATTR(lvp, &lva, cnp->cn_cred, td))) 794 goto unionfs_mkshadowdir_abort; 795 796 if ((error = unionfs_relookup(udvp, &uvp, cnp, &cn, td, cnp->cn_nameptr, cnp->cn_namelen, CREATE))) 797 goto unionfs_mkshadowdir_abort; 798 if (uvp != NULLVP) { 799 if (udvp == uvp) 800 vrele(uvp); 801 else 802 vput(uvp); 803 804 error = EEXIST; 805 goto unionfs_mkshadowdir_free_out; 806 } 807 808 if ((error = vn_start_write(udvp, &mp, V_WAIT | PCATCH))) 809 goto unionfs_mkshadowdir_free_out; 810 if ((error = VOP_LEASE(udvp, td, cn.cn_cred, LEASE_WRITE))) { 811 vn_finished_write(mp); 812 goto unionfs_mkshadowdir_free_out; 813 } 814 unionfs_create_uppervattr_core(ump, &lva, &va, td); 815 816 error = VOP_MKDIR(udvp, &uvp, &cn, &va); 817 818 if (!error) { 819 unionfs_node_update(unp, uvp, td); 820 821 /* 822 * XXX The bug which cannot set uid/gid was corrected. 823 * Ignore errors. 824 */ 825 va.va_type = VNON; 826 VOP_SETATTR(uvp, &va, cn.cn_cred, td); 827 } 828 vn_finished_write(mp); 829 830 unionfs_mkshadowdir_free_out: 831 if (cn.cn_flags & HASBUF) { 832 uma_zfree(namei_zone, cn.cn_pnbuf); 833 cn.cn_flags &= ~HASBUF; 834 } 835 836 unionfs_mkshadowdir_abort: 837 cnp->cn_cred = credbk; 838 chgproccnt(cred->cr_ruidinfo, -1, 0); 839 crfree(cred); 840 841 return (error); 842 } 843 844 /* 845 * Create a new whiteout. 846 * 847 * dvp should be locked on entry and will be locked on return. 848 */ 849 int 850 unionfs_mkwhiteout(struct vnode *dvp, struct componentname *cnp, 851 struct thread *td, char *path) 852 { 853 int error; 854 struct vnode *wvp; 855 struct componentname cn; 856 struct mount *mp; 857 858 if (path == NULL) 859 path = cnp->cn_nameptr; 860 861 wvp = NULLVP; 862 if ((error = unionfs_relookup(dvp, &wvp, cnp, &cn, td, path, strlen(path), CREATE))) 863 return (error); 864 if (wvp != NULLVP) { 865 if (cn.cn_flags & HASBUF) { 866 uma_zfree(namei_zone, cn.cn_pnbuf); 867 cn.cn_flags &= ~HASBUF; 868 } 869 if (dvp == wvp) 870 vrele(wvp); 871 else 872 vput(wvp); 873 874 return (EEXIST); 875 } 876 877 if ((error = vn_start_write(dvp, &mp, V_WAIT | PCATCH))) 878 goto unionfs_mkwhiteout_free_out; 879 if (!(error = VOP_LEASE(dvp, td, td->td_ucred, LEASE_WRITE))) 880 error = VOP_WHITEOUT(dvp, &cn, CREATE); 881 882 vn_finished_write(mp); 883 884 unionfs_mkwhiteout_free_out: 885 if (cn.cn_flags & HASBUF) { 886 uma_zfree(namei_zone, cn.cn_pnbuf); 887 cn.cn_flags &= ~HASBUF; 888 } 889 890 return (error); 891 } 892 893 /* 894 * Create a new vnode for create a new shadow file. 895 * 896 * If an error is returned, *vpp will be invalid, otherwise it will hold a 897 * locked, referenced and opened vnode. 898 * 899 * unp is never updated. 900 */ 901 static int 902 unionfs_vn_create_on_upper(struct vnode **vpp, struct vnode *udvp, 903 struct unionfs_node *unp, struct vattr *uvap, 904 struct thread *td) 905 { 906 struct unionfs_mount *ump; 907 struct vnode *vp; 908 struct vnode *lvp; 909 struct ucred *cred; 910 struct vattr lva; 911 int fmode; 912 int error; 913 struct componentname cn; 914 915 ump = MOUNTTOUNIONFSMOUNT(UNIONFSTOV(unp)->v_mount); 916 vp = NULLVP; 917 lvp = unp->un_lowervp; 918 cred = td->td_ucred; 919 fmode = FFLAGS(O_WRONLY | O_CREAT | O_TRUNC | O_EXCL); 920 error = 0; 921 922 if ((error = VOP_GETATTR(lvp, &lva, cred, td)) != 0) 923 return (error); 924 unionfs_create_uppervattr_core(ump, &lva, uvap, td); 925 926 if (unp->un_path == NULL) 927 panic("unionfs: un_path is null"); 928 929 cn.cn_namelen = strlen(unp->un_path); 930 cn.cn_pnbuf = uma_zalloc(namei_zone, M_WAITOK); 931 bcopy(unp->un_path, cn.cn_pnbuf, cn.cn_namelen + 1); 932 cn.cn_nameiop = CREATE; 933 cn.cn_flags = (LOCKPARENT | LOCKLEAF | HASBUF | SAVENAME | ISLASTCN); 934 cn.cn_lkflags = LK_EXCLUSIVE; 935 cn.cn_thread = td; 936 cn.cn_cred = cred; 937 cn.cn_nameptr = cn.cn_pnbuf; 938 cn.cn_consume = 0; 939 940 vref(udvp); 941 if ((error = relookup(udvp, &vp, &cn)) != 0) 942 goto unionfs_vn_create_on_upper_free_out2; 943 vrele(udvp); 944 945 if (vp != NULLVP) { 946 if (vp == udvp) 947 vrele(vp); 948 else 949 vput(vp); 950 error = EEXIST; 951 goto unionfs_vn_create_on_upper_free_out1; 952 } 953 954 if ((error = VOP_LEASE(udvp, td, cred, LEASE_WRITE)) != 0) 955 goto unionfs_vn_create_on_upper_free_out1; 956 957 if ((error = VOP_CREATE(udvp, &vp, &cn, uvap)) != 0) 958 goto unionfs_vn_create_on_upper_free_out1; 959 960 if ((error = VOP_OPEN(vp, fmode, cred, td, NULL)) != 0) { 961 vput(vp); 962 goto unionfs_vn_create_on_upper_free_out1; 963 } 964 vp->v_writecount++; 965 *vpp = vp; 966 967 unionfs_vn_create_on_upper_free_out1: 968 VOP_UNLOCK(udvp, 0); 969 970 unionfs_vn_create_on_upper_free_out2: 971 if (cn.cn_flags & HASBUF) { 972 uma_zfree(namei_zone, cn.cn_pnbuf); 973 cn.cn_flags &= ~HASBUF; 974 } 975 976 return (error); 977 } 978 979 /* 980 * Copy from lvp to uvp. 981 * 982 * lvp and uvp should be locked and opened on entry and will be locked and 983 * opened on return. 984 */ 985 static int 986 unionfs_copyfile_core(struct vnode *lvp, struct vnode *uvp, 987 struct ucred *cred, struct thread *td) 988 { 989 int error; 990 off_t offset; 991 int count; 992 int bufoffset; 993 char *buf; 994 struct uio uio; 995 struct iovec iov; 996 997 error = 0; 998 memset(&uio, 0, sizeof(uio)); 999 1000 uio.uio_td = td; 1001 uio.uio_segflg = UIO_SYSSPACE; 1002 uio.uio_offset = 0; 1003 1004 if ((error = VOP_LEASE(lvp, td, cred, LEASE_READ)) != 0) 1005 return (error); 1006 if ((error = VOP_LEASE(uvp, td, cred, LEASE_WRITE)) != 0) 1007 return (error); 1008 buf = malloc(MAXBSIZE, M_TEMP, M_WAITOK); 1009 1010 while (error == 0) { 1011 offset = uio.uio_offset; 1012 1013 uio.uio_iov = &iov; 1014 uio.uio_iovcnt = 1; 1015 iov.iov_base = buf; 1016 iov.iov_len = MAXBSIZE; 1017 uio.uio_resid = iov.iov_len; 1018 uio.uio_rw = UIO_READ; 1019 1020 if ((error = VOP_READ(lvp, &uio, 0, cred)) != 0) 1021 break; 1022 if ((count = MAXBSIZE - uio.uio_resid) == 0) 1023 break; 1024 1025 bufoffset = 0; 1026 while (bufoffset < count) { 1027 uio.uio_iov = &iov; 1028 uio.uio_iovcnt = 1; 1029 iov.iov_base = buf + bufoffset; 1030 iov.iov_len = count - bufoffset; 1031 uio.uio_offset = offset + bufoffset; 1032 uio.uio_resid = iov.iov_len; 1033 uio.uio_rw = UIO_WRITE; 1034 1035 if ((error = VOP_WRITE(uvp, &uio, 0, cred)) != 0) 1036 break; 1037 1038 bufoffset += (count - bufoffset) - uio.uio_resid; 1039 } 1040 1041 uio.uio_offset = offset + bufoffset; 1042 } 1043 1044 free(buf, M_TEMP); 1045 1046 return (error); 1047 } 1048 1049 /* 1050 * Copy file from lower to upper. 1051 * 1052 * If you need copy of the contents, set 1 to docopy. Otherwise, set 0 to 1053 * docopy. 1054 * 1055 * If no error returned, unp will be updated. 1056 */ 1057 int 1058 unionfs_copyfile(struct unionfs_node *unp, int docopy, struct ucred *cred, 1059 struct thread *td) 1060 { 1061 int error; 1062 struct mount *mp; 1063 struct vnode *udvp; 1064 struct vnode *lvp; 1065 struct vnode *uvp; 1066 struct vattr uva; 1067 1068 lvp = unp->un_lowervp; 1069 uvp = NULLVP; 1070 1071 if ((UNIONFSTOV(unp)->v_mount->mnt_flag & MNT_RDONLY)) 1072 return (EROFS); 1073 if (unp->un_dvp == NULLVP) 1074 return (EINVAL); 1075 if (unp->un_uppervp != NULLVP) 1076 return (EEXIST); 1077 udvp = VTOUNIONFS(unp->un_dvp)->un_uppervp; 1078 if (udvp == NULLVP) 1079 return (EROFS); 1080 if ((udvp->v_mount->mnt_flag & MNT_RDONLY)) 1081 return (EROFS); 1082 1083 error = VOP_ACCESS(lvp, VREAD, cred, td); 1084 if (error != 0) 1085 return (error); 1086 1087 if ((error = vn_start_write(udvp, &mp, V_WAIT | PCATCH)) != 0) 1088 return (error); 1089 error = unionfs_vn_create_on_upper(&uvp, udvp, unp, &uva, td); 1090 if (error != 0) { 1091 vn_finished_write(mp); 1092 return (error); 1093 } 1094 1095 if (docopy != 0) { 1096 error = VOP_OPEN(lvp, FREAD, cred, td, NULL); 1097 if (error == 0) { 1098 error = unionfs_copyfile_core(lvp, uvp, cred, td); 1099 VOP_CLOSE(lvp, FREAD, cred, td); 1100 } 1101 } 1102 VOP_CLOSE(uvp, FWRITE, cred, td); 1103 uvp->v_writecount--; 1104 1105 vn_finished_write(mp); 1106 1107 if (error == 0) { 1108 /* Reset the attributes. Ignore errors. */ 1109 uva.va_type = VNON; 1110 VOP_SETATTR(uvp, &uva, cred, td); 1111 } 1112 1113 unionfs_node_update(unp, uvp, td); 1114 1115 return (error); 1116 } 1117 1118 /* 1119 * It checks whether vp can rmdir. (check empty) 1120 * 1121 * vp is unionfs vnode. 1122 * vp should be locked. 1123 */ 1124 int 1125 unionfs_check_rmdir(struct vnode *vp, struct ucred *cred, struct thread *td) 1126 { 1127 int error; 1128 int eofflag; 1129 int lookuperr; 1130 struct vnode *uvp; 1131 struct vnode *lvp; 1132 struct vnode *tvp; 1133 struct vattr va; 1134 struct componentname cn; 1135 /* 1136 * The size of buf needs to be larger than DIRBLKSIZ. 1137 */ 1138 char buf[256 * 6]; 1139 struct dirent *dp; 1140 struct dirent *edp; 1141 struct uio uio; 1142 struct iovec iov; 1143 1144 ASSERT_VOP_ELOCKED(vp, "unionfs_check_rmdir"); 1145 1146 eofflag = 0; 1147 uvp = UNIONFSVPTOUPPERVP(vp); 1148 lvp = UNIONFSVPTOLOWERVP(vp); 1149 1150 /* check opaque */ 1151 if ((error = VOP_GETATTR(uvp, &va, cred, td)) != 0) 1152 return (error); 1153 if (va.va_flags & OPAQUE) 1154 return (0); 1155 1156 /* open vnode */ 1157 #ifdef MAC 1158 if ((error = mac_vnode_check_open(cred, vp, VEXEC|VREAD)) != 0) 1159 return (error); 1160 #endif 1161 if ((error = VOP_ACCESS(vp, VEXEC|VREAD, cred, td)) != 0) 1162 return (error); 1163 if ((error = VOP_OPEN(vp, FREAD, cred, td, NULL)) != 0) 1164 return (error); 1165 1166 uio.uio_rw = UIO_READ; 1167 uio.uio_segflg = UIO_SYSSPACE; 1168 uio.uio_td = td; 1169 uio.uio_offset = 0; 1170 1171 #ifdef MAC 1172 error = mac_vnode_check_readdir(td->td_ucred, lvp); 1173 #endif 1174 while (!error && !eofflag) { 1175 iov.iov_base = buf; 1176 iov.iov_len = sizeof(buf); 1177 uio.uio_iov = &iov; 1178 uio.uio_iovcnt = 1; 1179 uio.uio_resid = iov.iov_len; 1180 1181 error = VOP_READDIR(lvp, &uio, cred, &eofflag, NULL, NULL); 1182 if (error != 0) 1183 break; 1184 if (eofflag == 0 && uio.uio_resid == sizeof(buf)) { 1185 #ifdef DIAGNOSTIC 1186 panic("bad readdir response from lower FS."); 1187 #endif 1188 break; 1189 } 1190 1191 edp = (struct dirent*)&buf[sizeof(buf) - uio.uio_resid]; 1192 for (dp = (struct dirent*)buf; !error && dp < edp; 1193 dp = (struct dirent*)((caddr_t)dp + dp->d_reclen)) { 1194 if (dp->d_type == DT_WHT || 1195 (dp->d_namlen == 1 && dp->d_name[0] == '.') || 1196 (dp->d_namlen == 2 && !bcmp(dp->d_name, "..", 2))) 1197 continue; 1198 1199 cn.cn_namelen = dp->d_namlen; 1200 cn.cn_pnbuf = NULL; 1201 cn.cn_nameptr = dp->d_name; 1202 cn.cn_nameiop = LOOKUP; 1203 cn.cn_flags = (LOCKPARENT | LOCKLEAF | SAVENAME | RDONLY | ISLASTCN); 1204 cn.cn_lkflags = LK_EXCLUSIVE; 1205 cn.cn_thread = td; 1206 cn.cn_cred = cred; 1207 cn.cn_consume = 0; 1208 1209 /* 1210 * check entry in lower. 1211 * Sometimes, readdir function returns 1212 * wrong entry. 1213 */ 1214 lookuperr = VOP_LOOKUP(lvp, &tvp, &cn); 1215 1216 if (!lookuperr) 1217 vput(tvp); 1218 else 1219 continue; /* skip entry */ 1220 1221 /* 1222 * check entry 1223 * If it has no exist/whiteout entry in upper, 1224 * directory is not empty. 1225 */ 1226 cn.cn_flags = (LOCKPARENT | LOCKLEAF | SAVENAME | RDONLY | ISLASTCN); 1227 lookuperr = VOP_LOOKUP(uvp, &tvp, &cn); 1228 1229 if (!lookuperr) 1230 vput(tvp); 1231 1232 /* ignore exist or whiteout entry */ 1233 if (!lookuperr || 1234 (lookuperr == ENOENT && (cn.cn_flags & ISWHITEOUT))) 1235 continue; 1236 1237 error = ENOTEMPTY; 1238 } 1239 } 1240 1241 /* close vnode */ 1242 VOP_CLOSE(vp, FREAD, cred, td); 1243 1244 return (error); 1245 } 1246 1247 #ifdef DIAGNOSTIC 1248 1249 struct vnode * 1250 unionfs_checkuppervp(struct vnode *vp, char *fil, int lno) 1251 { 1252 struct unionfs_node *unp; 1253 1254 unp = VTOUNIONFS(vp); 1255 1256 #ifdef notyet 1257 if (vp->v_op != unionfs_vnodeop_p) { 1258 printf("unionfs_checkuppervp: on non-unionfs-node.\n"); 1259 #ifdef KDB 1260 kdb_enter(KDB_WHY_UNIONFS, 1261 "unionfs_checkuppervp: on non-unionfs-node.\n"); 1262 #endif 1263 panic("unionfs_checkuppervp"); 1264 }; 1265 #endif 1266 return (unp->un_uppervp); 1267 } 1268 1269 struct vnode * 1270 unionfs_checklowervp(struct vnode *vp, char *fil, int lno) 1271 { 1272 struct unionfs_node *unp; 1273 1274 unp = VTOUNIONFS(vp); 1275 1276 #ifdef notyet 1277 if (vp->v_op != unionfs_vnodeop_p) { 1278 printf("unionfs_checklowervp: on non-unionfs-node.\n"); 1279 #ifdef KDB 1280 kdb_enter(KDB_WHY_UNIONFS, 1281 "unionfs_checklowervp: on non-unionfs-node.\n"); 1282 #endif 1283 panic("unionfs_checklowervp"); 1284 }; 1285 #endif 1286 return (unp->un_lowervp); 1287 } 1288 #endif 1289