1 /* 2 * Copyright (c) 1994, 1995 The Regents of the University of California. 3 * Copyright (c) 1994, 1995 Jan-Simon Pendry. 4 * All rights reserved. 5 * 6 * This code is derived from software donated to Berkeley by 7 * Jan-Simon Pendry. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)union_vfsops.c 8.20 (Berkeley) 5/20/95 38 * $Id: union_vfsops.c,v 1.19 1997/08/16 19:15:22 wollman Exp $ 39 */ 40 41 /* 42 * Union Layer 43 */ 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/proc.h> 49 #include <sys/vnode.h> 50 #include <sys/mount.h> 51 #include <sys/namei.h> 52 #include <sys/malloc.h> 53 #include <sys/filedesc.h> 54 #include <miscfs/union/union.h> 55 56 extern int union_init __P((struct vfsconf *)); 57 58 extern int union_fhtovp __P((struct mount *mp, struct fid *fidp, 59 struct mbuf *nam, struct vnode **vpp, 60 int *exflagsp, struct ucred **credanonp)); 61 extern int union_mount __P((struct mount *mp, char *path, caddr_t data, 62 struct nameidata *ndp, struct proc *p)); 63 extern int union_quotactl __P((struct mount *mp, int cmd, uid_t uid, 64 caddr_t arg, struct proc *p)); 65 extern int union_root __P((struct mount *mp, struct vnode **vpp)); 66 extern int union_start __P((struct mount *mp, int flags, struct proc *p)); 67 extern int union_statfs __P((struct mount *mp, struct statfs *sbp, 68 struct proc *p)); 69 extern int union_sync __P((struct mount *mp, int waitfor, 70 struct ucred *cred, struct proc *p)); 71 extern int union_unmount __P((struct mount *mp, int mntflags, 72 struct proc *p)); 73 extern int union_vget __P((struct mount *mp, ino_t ino, 74 struct vnode **vpp)); 75 extern int union_vptofh __P((struct vnode *vp, struct fid *fhp)); 76 77 /* 78 * Mount union filesystem 79 */ 80 int 81 union_mount(mp, path, data, ndp, p) 82 struct mount *mp; 83 char *path; 84 caddr_t data; 85 struct nameidata *ndp; 86 struct proc *p; 87 { 88 int error = 0; 89 struct union_args args; 90 struct vnode *lowerrootvp = NULLVP; 91 struct vnode *upperrootvp = NULLVP; 92 struct union_mount *um = 0; 93 struct ucred *cred = 0; 94 struct ucred *scred; 95 struct vattr va; 96 char *cp = 0; 97 int len; 98 u_int size; 99 int islowerunlocked = 0; 100 101 #ifdef UNION_DIAGNOSTIC 102 printf("union_mount(mp = %x)\n", mp); 103 #endif 104 105 /* 106 * Disable clustered write, otherwise system becomes unstable. 107 */ 108 mp->mnt_flag |= MNT_NOCLUSTERW; 109 110 /* 111 * Update is a no-op 112 */ 113 if (mp->mnt_flag & MNT_UPDATE) { 114 /* 115 * Need to provide. 116 * 1. a way to convert between rdonly and rdwr mounts. 117 * 2. support for nfs exports. 118 */ 119 error = EOPNOTSUPP; 120 goto bad; 121 } 122 123 /* 124 * Get argument 125 */ 126 error = copyin(data, (caddr_t)&args, sizeof(struct union_args)); 127 if (error) 128 goto bad; 129 130 lowerrootvp = mp->mnt_vnodecovered; 131 VREF(lowerrootvp); 132 133 /* 134 * Unlock lower node to avoid deadlock. 135 * (XXX) VOP_ISLOCKED is needed? 136 */ 137 if ((lowerrootvp->v_op == union_vnodeop_p) && VOP_ISLOCKED(lowerrootvp)) { 138 VOP_UNLOCK(lowerrootvp, 0, p); 139 islowerunlocked = 1; 140 } 141 142 /* 143 * Find upper node. 144 */ 145 NDINIT(ndp, LOOKUP, FOLLOW|WANTPARENT, 146 UIO_USERSPACE, args.target, p); 147 148 error = namei(ndp); 149 /* 150 * Re-lock vnode. 151 * (XXX) VOP_ISLOCKED is needed? 152 */ 153 if (islowerunlocked && !VOP_ISLOCKED(lowerrootvp)) 154 vn_lock(lowerrootvp, LK_EXCLUSIVE | LK_RETRY, p); 155 if (error) 156 goto bad; 157 158 upperrootvp = ndp->ni_vp; 159 vrele(ndp->ni_dvp); 160 ndp->ni_dvp = NULL; 161 162 /* 163 * Check multi union mount to avoid `lock myself again' panic. 164 */ 165 if (upperrootvp == VTOUNION(lowerrootvp)->un_uppervp) { 166 #ifdef DIAGNOSTIC 167 printf("union_mount: multi union mount?\n"); 168 #endif 169 error = EDEADLK; 170 goto bad; 171 } 172 173 if (upperrootvp->v_type != VDIR) { 174 error = EINVAL; 175 goto bad; 176 } 177 178 um = (struct union_mount *) malloc(sizeof(struct union_mount), 179 M_UFSMNT, M_WAITOK); /* XXX */ 180 181 /* 182 * Keep a held reference to the target vnodes. 183 * They are vrele'd in union_unmount. 184 * 185 * Depending on the _BELOW flag, the filesystems are 186 * viewed in a different order. In effect, this is the 187 * same as providing a mount under option to the mount syscall. 188 */ 189 190 um->um_op = args.mntflags & UNMNT_OPMASK; 191 switch (um->um_op) { 192 case UNMNT_ABOVE: 193 um->um_lowervp = lowerrootvp; 194 um->um_uppervp = upperrootvp; 195 break; 196 197 case UNMNT_BELOW: 198 um->um_lowervp = upperrootvp; 199 um->um_uppervp = lowerrootvp; 200 break; 201 202 case UNMNT_REPLACE: 203 vrele(lowerrootvp); 204 lowerrootvp = NULLVP; 205 um->um_uppervp = upperrootvp; 206 um->um_lowervp = lowerrootvp; 207 break; 208 209 default: 210 error = EINVAL; 211 goto bad; 212 } 213 214 /* 215 * Unless the mount is readonly, ensure that the top layer 216 * supports whiteout operations 217 */ 218 if ((mp->mnt_flag & MNT_RDONLY) == 0) { 219 error = VOP_WHITEOUT(um->um_uppervp, (struct componentname *) 0, LOOKUP); 220 if (error) 221 goto bad; 222 } 223 224 um->um_cred = p->p_ucred; 225 crhold(um->um_cred); 226 um->um_cmode = UN_DIRMODE &~ p->p_fd->fd_cmask; 227 228 /* 229 * Depending on what you think the MNT_LOCAL flag might mean, 230 * you may want the && to be || on the conditional below. 231 * At the moment it has been defined that the filesystem is 232 * only local if it is all local, ie the MNT_LOCAL flag implies 233 * that the entire namespace is local. If you think the MNT_LOCAL 234 * flag implies that some of the files might be stored locally 235 * then you will want to change the conditional. 236 */ 237 if (um->um_op == UNMNT_ABOVE) { 238 if (((um->um_lowervp == NULLVP) || 239 (um->um_lowervp->v_mount->mnt_flag & MNT_LOCAL)) && 240 (um->um_uppervp->v_mount->mnt_flag & MNT_LOCAL)) 241 mp->mnt_flag |= MNT_LOCAL; 242 } 243 244 /* 245 * Copy in the upper layer's RDONLY flag. This is for the benefit 246 * of lookup() which explicitly checks the flag, rather than asking 247 * the filesystem for it's own opinion. This means, that an update 248 * mount of the underlying filesystem to go from rdonly to rdwr 249 * will leave the unioned view as read-only. 250 */ 251 mp->mnt_flag |= (um->um_uppervp->v_mount->mnt_flag & MNT_RDONLY); 252 253 mp->mnt_data = (qaddr_t) um; 254 vfs_getnewfsid(mp); 255 256 (void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size); 257 bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size); 258 259 switch (um->um_op) { 260 case UNMNT_ABOVE: 261 cp = "<above>:"; 262 break; 263 case UNMNT_BELOW: 264 cp = "<below>:"; 265 break; 266 case UNMNT_REPLACE: 267 cp = ""; 268 break; 269 } 270 len = strlen(cp); 271 bcopy(cp, mp->mnt_stat.f_mntfromname, len); 272 273 cp = mp->mnt_stat.f_mntfromname + len; 274 len = MNAMELEN - len; 275 276 (void) copyinstr(args.target, cp, len - 1, &size); 277 bzero(cp + size, len - size); 278 279 (void)union_statfs(mp, &mp->mnt_stat, p); 280 281 #ifdef UNION_DIAGNOSTIC 282 printf("union_mount: from %s, on %s\n", 283 mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname); 284 #endif 285 return (0); 286 287 bad: 288 if (um) 289 free(um, M_UFSMNT); 290 if (cred) 291 crfree(cred); 292 if (upperrootvp) 293 vrele(upperrootvp); 294 if (lowerrootvp) 295 vrele(lowerrootvp); 296 return (error); 297 } 298 299 /* 300 * VFS start. Nothing needed here - the start routine 301 * on the underlying filesystem(s) will have been called 302 * when that filesystem was mounted. 303 */ 304 int 305 union_start(mp, flags, p) 306 struct mount *mp; 307 int flags; 308 struct proc *p; 309 { 310 311 return (0); 312 } 313 314 /* 315 * Free reference to union layer 316 */ 317 int 318 union_unmount(mp, mntflags, p) 319 struct mount *mp; 320 int mntflags; 321 struct proc *p; 322 { 323 struct union_mount *um = MOUNTTOUNIONMOUNT(mp); 324 struct vnode *um_rootvp; 325 int error; 326 int freeing; 327 int flags = 0; 328 329 #ifdef UNION_DIAGNOSTIC 330 printf("union_unmount(mp = %x)\n", mp); 331 #endif 332 333 if (mntflags & MNT_FORCE) 334 flags |= FORCECLOSE; 335 336 if (error = union_root(mp, &um_rootvp)) 337 return (error); 338 339 /* 340 * Keep flushing vnodes from the mount list. 341 * This is needed because of the un_pvp held 342 * reference to the parent vnode. 343 * If more vnodes have been freed on a given pass, 344 * the try again. The loop will iterate at most 345 * (d) times, where (d) is the maximum tree depth 346 * in the filesystem. 347 */ 348 for (freeing = 0; vflush(mp, um_rootvp, flags) != 0;) { 349 struct vnode *vp; 350 int n; 351 352 /* count #vnodes held on mount list */ 353 for (n = 0, vp = mp->mnt_vnodelist.lh_first; 354 vp != NULLVP; 355 vp = vp->v_mntvnodes.le_next) 356 n++; 357 358 /* if this is unchanged then stop */ 359 if (n == freeing) 360 break; 361 362 /* otherwise try once more time */ 363 freeing = n; 364 } 365 366 /* At this point the root vnode should have a single reference */ 367 if (um_rootvp->v_usecount > 1) { 368 vput(um_rootvp); 369 return (EBUSY); 370 } 371 372 #ifdef UNION_DIAGNOSTIC 373 vprint("union root", um_rootvp); 374 #endif 375 /* 376 * Discard references to upper and lower target vnodes. 377 */ 378 if (um->um_lowervp) 379 vrele(um->um_lowervp); 380 vrele(um->um_uppervp); 381 crfree(um->um_cred); 382 /* 383 * Release reference on underlying root vnode 384 */ 385 vput(um_rootvp); 386 /* 387 * And blow it away for future re-use 388 */ 389 vgone(um_rootvp); 390 /* 391 * Finally, throw away the union_mount structure 392 */ 393 free(mp->mnt_data, M_UFSMNT); /* XXX */ 394 mp->mnt_data = 0; 395 return (0); 396 } 397 398 int 399 union_root(mp, vpp) 400 struct mount *mp; 401 struct vnode **vpp; 402 { 403 struct proc *p = curproc; /* XXX */ 404 struct union_mount *um = MOUNTTOUNIONMOUNT(mp); 405 int error; 406 int loselock; 407 408 /* 409 * Return locked reference to root. 410 */ 411 VREF(um->um_uppervp); 412 if ((um->um_op == UNMNT_BELOW) && 413 VOP_ISLOCKED(um->um_uppervp)) { 414 loselock = 1; 415 } else { 416 if (VOP_ISLOCKED(um->um_uppervp)) { 417 /* 418 * XXX 419 * Should we check type of node? 420 */ 421 #ifdef DIAGNOSTIC 422 printf("union_root: multi union mount?"); 423 #endif 424 vrele(um->um_uppervp); 425 return EDEADLK; 426 } else 427 vn_lock(um->um_uppervp, LK_EXCLUSIVE | LK_RETRY, p); 428 loselock = 0; 429 } 430 if (um->um_lowervp) 431 VREF(um->um_lowervp); 432 error = union_allocvp(vpp, mp, 433 (struct vnode *) 0, 434 (struct vnode *) 0, 435 (struct componentname *) 0, 436 um->um_uppervp, 437 um->um_lowervp, 438 1); 439 440 if (error) { 441 if (loselock) 442 vrele(um->um_uppervp); 443 else 444 vput(um->um_uppervp); 445 if (um->um_lowervp) 446 vrele(um->um_lowervp); 447 } else { 448 if (loselock) 449 VTOUNION(*vpp)->un_flags &= ~UN_ULOCK; 450 } 451 452 return (error); 453 } 454 455 int 456 union_statfs(mp, sbp, p) 457 struct mount *mp; 458 struct statfs *sbp; 459 struct proc *p; 460 { 461 int error; 462 struct union_mount *um = MOUNTTOUNIONMOUNT(mp); 463 struct statfs mstat; 464 int lbsize; 465 466 #ifdef UNION_DIAGNOSTIC 467 printf("union_statfs(mp = %x, lvp = %x, uvp = %x)\n", mp, 468 um->um_lowervp, 469 um->um_uppervp); 470 #endif 471 472 bzero(&mstat, sizeof(mstat)); 473 474 if (um->um_lowervp) { 475 error = VFS_STATFS(um->um_lowervp->v_mount, &mstat, p); 476 if (error) 477 return (error); 478 } 479 480 /* now copy across the "interesting" information and fake the rest */ 481 #if 0 482 sbp->f_type = mstat.f_type; 483 sbp->f_flags = mstat.f_flags; 484 sbp->f_bsize = mstat.f_bsize; 485 sbp->f_iosize = mstat.f_iosize; 486 #endif 487 lbsize = mstat.f_bsize; 488 sbp->f_blocks = mstat.f_blocks; 489 sbp->f_bfree = mstat.f_bfree; 490 sbp->f_bavail = mstat.f_bavail; 491 sbp->f_files = mstat.f_files; 492 sbp->f_ffree = mstat.f_ffree; 493 494 error = VFS_STATFS(um->um_uppervp->v_mount, &mstat, p); 495 if (error) 496 return (error); 497 498 sbp->f_flags = mstat.f_flags; 499 sbp->f_bsize = mstat.f_bsize; 500 sbp->f_iosize = mstat.f_iosize; 501 502 /* 503 * if the lower and upper blocksizes differ, then frig the 504 * block counts so that the sizes reported by df make some 505 * kind of sense. none of this makes sense though. 506 */ 507 508 if (mstat.f_bsize != lbsize) 509 sbp->f_blocks = ((off_t) sbp->f_blocks * lbsize) / mstat.f_bsize; 510 511 /* 512 * The "total" fields count total resources in all layers, 513 * the "free" fields count only those resources which are 514 * free in the upper layer (since only the upper layer 515 * is writeable). 516 */ 517 sbp->f_blocks += mstat.f_blocks; 518 sbp->f_bfree = mstat.f_bfree; 519 sbp->f_bavail = mstat.f_bavail; 520 sbp->f_files += mstat.f_files; 521 sbp->f_ffree = mstat.f_ffree; 522 523 if (sbp != &mp->mnt_stat) { 524 sbp->f_type = mp->mnt_vfc->vfc_typenum; 525 bcopy(&mp->mnt_stat.f_fsid, &sbp->f_fsid, sizeof(sbp->f_fsid)); 526 bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN); 527 bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN); 528 } 529 return (0); 530 } 531 532 /* 533 * XXX - Assumes no data cached at union layer. 534 */ 535 #define union_sync ((int (*) __P((struct mount *, int, struct ucred *, \ 536 struct proc *)))nullop) 537 538 #define union_fhtovp ((int (*) __P((struct mount *, struct fid *, \ 539 struct sockaddr *, struct vnode **, int *, struct ucred **)))eopnotsupp) 540 #define union_quotactl ((int (*) __P((struct mount *, int, uid_t, caddr_t, \ 541 struct proc *)))eopnotsupp) 542 #define union_sysctl ((int (*) __P((int *, u_int, void *, size_t *, void *, \ 543 size_t, struct proc *)))eopnotsupp) 544 #define union_vget ((int (*) __P((struct mount *, ino_t, struct vnode **))) \ 545 eopnotsupp) 546 #define union_vptofh ((int (*) __P((struct vnode *, struct fid *)))eopnotsupp) 547 548 struct vfsops union_vfsops = { 549 union_mount, 550 union_start, 551 union_unmount, 552 union_root, 553 union_quotactl, 554 union_statfs, 555 union_sync, 556 union_vget, 557 union_fhtovp, 558 union_vptofh, 559 union_init, 560 }; 561 562 VFS_SET(union_vfsops, union, MOUNT_UNION, VFCF_LOOPBACK); 563