1 /*- 2 * Copyright (c) 1999-2004 Poul-Henning Kamp 3 * Copyright (c) 1999 Michael Smith 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_vfs_allow_nonmpsafe.h" 41 42 #include <sys/param.h> 43 #include <sys/conf.h> 44 #include <sys/fcntl.h> 45 #include <sys/jail.h> 46 #include <sys/kernel.h> 47 #include <sys/libkern.h> 48 #include <sys/malloc.h> 49 #include <sys/mount.h> 50 #include <sys/mutex.h> 51 #include <sys/namei.h> 52 #include <sys/priv.h> 53 #include <sys/proc.h> 54 #include <sys/filedesc.h> 55 #include <sys/reboot.h> 56 #include <sys/sbuf.h> 57 #include <sys/syscallsubr.h> 58 #include <sys/sysproto.h> 59 #include <sys/sx.h> 60 #include <sys/sysctl.h> 61 #include <sys/sysent.h> 62 #include <sys/systm.h> 63 #include <sys/vnode.h> 64 #include <vm/uma.h> 65 66 #include <geom/geom.h> 67 68 #include <machine/stdarg.h> 69 70 #include <security/audit/audit.h> 71 #include <security/mac/mac_framework.h> 72 73 #define VFS_MOUNTARG_SIZE_MAX (1024 * 64) 74 75 static int vfs_domount(struct thread *td, const char *fstype, 76 char *fspath, int fsflags, struct vfsoptlist **optlist); 77 static void free_mntarg(struct mntarg *ma); 78 79 static int usermount = 0; 80 SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0, 81 "Unprivileged users may mount and unmount file systems"); 82 83 MALLOC_DEFINE(M_MOUNT, "mount", "vfs mount structure"); 84 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 85 static uma_zone_t mount_zone; 86 87 /* List of mounted filesystems. */ 88 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); 89 90 /* For any iteration/modification of mountlist */ 91 struct mtx mountlist_mtx; 92 MTX_SYSINIT(mountlist, &mountlist_mtx, "mountlist", MTX_DEF); 93 94 /* 95 * Global opts, taken by all filesystems 96 */ 97 static const char *global_opts[] = { 98 "errmsg", 99 "fstype", 100 "fspath", 101 "ro", 102 "rw", 103 "nosuid", 104 "noexec", 105 NULL 106 }; 107 108 static int 109 mount_init(void *mem, int size, int flags) 110 { 111 struct mount *mp; 112 113 mp = (struct mount *)mem; 114 mtx_init(&mp->mnt_mtx, "struct mount mtx", NULL, MTX_DEF); 115 lockinit(&mp->mnt_explock, PVFS, "explock", 0, 0); 116 return (0); 117 } 118 119 static void 120 mount_fini(void *mem, int size) 121 { 122 struct mount *mp; 123 124 mp = (struct mount *)mem; 125 lockdestroy(&mp->mnt_explock); 126 mtx_destroy(&mp->mnt_mtx); 127 } 128 129 static void 130 vfs_mount_init(void *dummy __unused) 131 { 132 133 mount_zone = uma_zcreate("Mountpoints", sizeof(struct mount), NULL, 134 NULL, mount_init, mount_fini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 135 } 136 SYSINIT(vfs_mount, SI_SUB_VFS, SI_ORDER_ANY, vfs_mount_init, NULL); 137 138 /* 139 * --------------------------------------------------------------------- 140 * Functions for building and sanitizing the mount options 141 */ 142 143 /* Remove one mount option. */ 144 static void 145 vfs_freeopt(struct vfsoptlist *opts, struct vfsopt *opt) 146 { 147 148 TAILQ_REMOVE(opts, opt, link); 149 free(opt->name, M_MOUNT); 150 if (opt->value != NULL) 151 free(opt->value, M_MOUNT); 152 free(opt, M_MOUNT); 153 } 154 155 /* Release all resources related to the mount options. */ 156 void 157 vfs_freeopts(struct vfsoptlist *opts) 158 { 159 struct vfsopt *opt; 160 161 while (!TAILQ_EMPTY(opts)) { 162 opt = TAILQ_FIRST(opts); 163 vfs_freeopt(opts, opt); 164 } 165 free(opts, M_MOUNT); 166 } 167 168 void 169 vfs_deleteopt(struct vfsoptlist *opts, const char *name) 170 { 171 struct vfsopt *opt, *temp; 172 173 if (opts == NULL) 174 return; 175 TAILQ_FOREACH_SAFE(opt, opts, link, temp) { 176 if (strcmp(opt->name, name) == 0) 177 vfs_freeopt(opts, opt); 178 } 179 } 180 181 static int 182 vfs_isopt_ro(const char *opt) 183 { 184 185 if (strcmp(opt, "ro") == 0 || strcmp(opt, "rdonly") == 0 || 186 strcmp(opt, "norw") == 0) 187 return (1); 188 return (0); 189 } 190 191 static int 192 vfs_isopt_rw(const char *opt) 193 { 194 195 if (strcmp(opt, "rw") == 0 || strcmp(opt, "noro") == 0) 196 return (1); 197 return (0); 198 } 199 200 /* 201 * Check if options are equal (with or without the "no" prefix). 202 */ 203 static int 204 vfs_equalopts(const char *opt1, const char *opt2) 205 { 206 char *p; 207 208 /* "opt" vs. "opt" or "noopt" vs. "noopt" */ 209 if (strcmp(opt1, opt2) == 0) 210 return (1); 211 /* "noopt" vs. "opt" */ 212 if (strncmp(opt1, "no", 2) == 0 && strcmp(opt1 + 2, opt2) == 0) 213 return (1); 214 /* "opt" vs. "noopt" */ 215 if (strncmp(opt2, "no", 2) == 0 && strcmp(opt1, opt2 + 2) == 0) 216 return (1); 217 while ((p = strchr(opt1, '.')) != NULL && 218 !strncmp(opt1, opt2, ++p - opt1)) { 219 opt2 += p - opt1; 220 opt1 = p; 221 /* "foo.noopt" vs. "foo.opt" */ 222 if (strncmp(opt1, "no", 2) == 0 && strcmp(opt1 + 2, opt2) == 0) 223 return (1); 224 /* "foo.opt" vs. "foo.noopt" */ 225 if (strncmp(opt2, "no", 2) == 0 && strcmp(opt1, opt2 + 2) == 0) 226 return (1); 227 } 228 /* "ro" / "rdonly" / "norw" / "rw" / "noro" */ 229 if ((vfs_isopt_ro(opt1) || vfs_isopt_rw(opt1)) && 230 (vfs_isopt_ro(opt2) || vfs_isopt_rw(opt2))) 231 return (1); 232 return (0); 233 } 234 235 /* 236 * If a mount option is specified several times, 237 * (with or without the "no" prefix) only keep 238 * the last occurence of it. 239 */ 240 static void 241 vfs_sanitizeopts(struct vfsoptlist *opts) 242 { 243 struct vfsopt *opt, *opt2, *tmp; 244 245 TAILQ_FOREACH_REVERSE(opt, opts, vfsoptlist, link) { 246 opt2 = TAILQ_PREV(opt, vfsoptlist, link); 247 while (opt2 != NULL) { 248 if (vfs_equalopts(opt->name, opt2->name)) { 249 tmp = TAILQ_PREV(opt2, vfsoptlist, link); 250 vfs_freeopt(opts, opt2); 251 opt2 = tmp; 252 } else { 253 opt2 = TAILQ_PREV(opt2, vfsoptlist, link); 254 } 255 } 256 } 257 } 258 259 /* 260 * Build a linked list of mount options from a struct uio. 261 */ 262 int 263 vfs_buildopts(struct uio *auio, struct vfsoptlist **options) 264 { 265 struct vfsoptlist *opts; 266 struct vfsopt *opt; 267 size_t memused, namelen, optlen; 268 unsigned int i, iovcnt; 269 int error; 270 271 opts = malloc(sizeof(struct vfsoptlist), M_MOUNT, M_WAITOK); 272 TAILQ_INIT(opts); 273 memused = 0; 274 iovcnt = auio->uio_iovcnt; 275 for (i = 0; i < iovcnt; i += 2) { 276 namelen = auio->uio_iov[i].iov_len; 277 optlen = auio->uio_iov[i + 1].iov_len; 278 memused += sizeof(struct vfsopt) + optlen + namelen; 279 /* 280 * Avoid consuming too much memory, and attempts to overflow 281 * memused. 282 */ 283 if (memused > VFS_MOUNTARG_SIZE_MAX || 284 optlen > VFS_MOUNTARG_SIZE_MAX || 285 namelen > VFS_MOUNTARG_SIZE_MAX) { 286 error = EINVAL; 287 goto bad; 288 } 289 290 opt = malloc(sizeof(struct vfsopt), M_MOUNT, M_WAITOK); 291 opt->name = malloc(namelen, M_MOUNT, M_WAITOK); 292 opt->value = NULL; 293 opt->len = 0; 294 opt->pos = i / 2; 295 opt->seen = 0; 296 297 /* 298 * Do this early, so jumps to "bad" will free the current 299 * option. 300 */ 301 TAILQ_INSERT_TAIL(opts, opt, link); 302 303 if (auio->uio_segflg == UIO_SYSSPACE) { 304 bcopy(auio->uio_iov[i].iov_base, opt->name, namelen); 305 } else { 306 error = copyin(auio->uio_iov[i].iov_base, opt->name, 307 namelen); 308 if (error) 309 goto bad; 310 } 311 /* Ensure names are null-terminated strings. */ 312 if (namelen == 0 || opt->name[namelen - 1] != '\0') { 313 error = EINVAL; 314 goto bad; 315 } 316 if (optlen != 0) { 317 opt->len = optlen; 318 opt->value = malloc(optlen, M_MOUNT, M_WAITOK); 319 if (auio->uio_segflg == UIO_SYSSPACE) { 320 bcopy(auio->uio_iov[i + 1].iov_base, opt->value, 321 optlen); 322 } else { 323 error = copyin(auio->uio_iov[i + 1].iov_base, 324 opt->value, optlen); 325 if (error) 326 goto bad; 327 } 328 } 329 } 330 vfs_sanitizeopts(opts); 331 *options = opts; 332 return (0); 333 bad: 334 vfs_freeopts(opts); 335 return (error); 336 } 337 338 /* 339 * Merge the old mount options with the new ones passed 340 * in the MNT_UPDATE case. 341 * 342 * XXX: This function will keep a "nofoo" option in the new 343 * options. E.g, if the option's canonical name is "foo", 344 * "nofoo" ends up in the mount point's active options. 345 */ 346 static void 347 vfs_mergeopts(struct vfsoptlist *toopts, struct vfsoptlist *oldopts) 348 { 349 struct vfsopt *opt, *new; 350 351 TAILQ_FOREACH(opt, oldopts, link) { 352 new = malloc(sizeof(struct vfsopt), M_MOUNT, M_WAITOK); 353 new->name = strdup(opt->name, M_MOUNT); 354 if (opt->len != 0) { 355 new->value = malloc(opt->len, M_MOUNT, M_WAITOK); 356 bcopy(opt->value, new->value, opt->len); 357 } else 358 new->value = NULL; 359 new->len = opt->len; 360 new->seen = opt->seen; 361 TAILQ_INSERT_HEAD(toopts, new, link); 362 } 363 vfs_sanitizeopts(toopts); 364 } 365 366 /* 367 * Mount a filesystem. 368 */ 369 int 370 sys_nmount(td, uap) 371 struct thread *td; 372 struct nmount_args /* { 373 struct iovec *iovp; 374 unsigned int iovcnt; 375 int flags; 376 } */ *uap; 377 { 378 struct uio *auio; 379 int error; 380 u_int iovcnt; 381 382 AUDIT_ARG_FFLAGS(uap->flags); 383 CTR4(KTR_VFS, "%s: iovp %p with iovcnt %d and flags %d", __func__, 384 uap->iovp, uap->iovcnt, uap->flags); 385 386 /* 387 * Filter out MNT_ROOTFS. We do not want clients of nmount() in 388 * userspace to set this flag, but we must filter it out if we want 389 * MNT_UPDATE on the root file system to work. 390 * MNT_ROOTFS should only be set by the kernel when mounting its 391 * root file system. 392 */ 393 uap->flags &= ~MNT_ROOTFS; 394 395 iovcnt = uap->iovcnt; 396 /* 397 * Check that we have an even number of iovec's 398 * and that we have at least two options. 399 */ 400 if ((iovcnt & 1) || (iovcnt < 4)) { 401 CTR2(KTR_VFS, "%s: failed for invalid iovcnt %d", __func__, 402 uap->iovcnt); 403 return (EINVAL); 404 } 405 406 error = copyinuio(uap->iovp, iovcnt, &auio); 407 if (error) { 408 CTR2(KTR_VFS, "%s: failed for invalid uio op with %d errno", 409 __func__, error); 410 return (error); 411 } 412 error = vfs_donmount(td, uap->flags, auio); 413 414 free(auio, M_IOV); 415 return (error); 416 } 417 418 /* 419 * --------------------------------------------------------------------- 420 * Various utility functions 421 */ 422 423 void 424 vfs_ref(struct mount *mp) 425 { 426 427 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 428 MNT_ILOCK(mp); 429 MNT_REF(mp); 430 MNT_IUNLOCK(mp); 431 } 432 433 void 434 vfs_rel(struct mount *mp) 435 { 436 437 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 438 MNT_ILOCK(mp); 439 MNT_REL(mp); 440 MNT_IUNLOCK(mp); 441 } 442 443 /* 444 * Allocate and initialize the mount point struct. 445 */ 446 struct mount * 447 vfs_mount_alloc(struct vnode *vp, struct vfsconf *vfsp, const char *fspath, 448 struct ucred *cred) 449 { 450 struct mount *mp; 451 452 mp = uma_zalloc(mount_zone, M_WAITOK); 453 bzero(&mp->mnt_startzero, 454 __rangeof(struct mount, mnt_startzero, mnt_endzero)); 455 TAILQ_INIT(&mp->mnt_nvnodelist); 456 mp->mnt_nvnodelistsize = 0; 457 mp->mnt_ref = 0; 458 (void) vfs_busy(mp, MBF_NOWAIT); 459 mp->mnt_op = vfsp->vfc_vfsops; 460 mp->mnt_vfc = vfsp; 461 vfsp->vfc_refcount++; /* XXX Unlocked */ 462 mp->mnt_stat.f_type = vfsp->vfc_typenum; 463 mp->mnt_gen++; 464 strlcpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 465 mp->mnt_vnodecovered = vp; 466 mp->mnt_cred = crdup(cred); 467 mp->mnt_stat.f_owner = cred->cr_uid; 468 strlcpy(mp->mnt_stat.f_mntonname, fspath, MNAMELEN); 469 mp->mnt_iosize_max = DFLTPHYS; 470 #ifdef MAC 471 mac_mount_init(mp); 472 mac_mount_create(cred, mp); 473 #endif 474 arc4rand(&mp->mnt_hashseed, sizeof mp->mnt_hashseed, 0); 475 return (mp); 476 } 477 478 /* 479 * Destroy the mount struct previously allocated by vfs_mount_alloc(). 480 */ 481 void 482 vfs_mount_destroy(struct mount *mp) 483 { 484 485 MNT_ILOCK(mp); 486 mp->mnt_kern_flag |= MNTK_REFEXPIRE; 487 if (mp->mnt_kern_flag & MNTK_MWAIT) { 488 mp->mnt_kern_flag &= ~MNTK_MWAIT; 489 wakeup(mp); 490 } 491 while (mp->mnt_ref) 492 msleep(mp, MNT_MTX(mp), PVFS, "mntref", 0); 493 KASSERT(mp->mnt_ref == 0, 494 ("%s: invalid refcount in the drain path @ %s:%d", __func__, 495 __FILE__, __LINE__)); 496 if (mp->mnt_writeopcount != 0) 497 panic("vfs_mount_destroy: nonzero writeopcount"); 498 if (mp->mnt_secondary_writes != 0) 499 panic("vfs_mount_destroy: nonzero secondary_writes"); 500 mp->mnt_vfc->vfc_refcount--; 501 if (!TAILQ_EMPTY(&mp->mnt_nvnodelist)) { 502 struct vnode *vp; 503 504 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) 505 vprint("", vp); 506 panic("unmount: dangling vnode"); 507 } 508 if (mp->mnt_nvnodelistsize != 0) 509 panic("vfs_mount_destroy: nonzero nvnodelistsize"); 510 if (mp->mnt_lockref != 0) 511 panic("vfs_mount_destroy: nonzero lock refcount"); 512 MNT_IUNLOCK(mp); 513 #ifdef MAC 514 mac_mount_destroy(mp); 515 #endif 516 if (mp->mnt_opt != NULL) 517 vfs_freeopts(mp->mnt_opt); 518 crfree(mp->mnt_cred); 519 uma_zfree(mount_zone, mp); 520 } 521 522 int 523 vfs_donmount(struct thread *td, int fsflags, struct uio *fsoptions) 524 { 525 struct vfsoptlist *optlist; 526 struct vfsopt *opt, *tmp_opt; 527 char *fstype, *fspath, *errmsg; 528 int error, fstypelen, fspathlen, errmsg_len, errmsg_pos; 529 530 errmsg = fspath = NULL; 531 errmsg_len = fspathlen = 0; 532 errmsg_pos = -1; 533 534 error = vfs_buildopts(fsoptions, &optlist); 535 if (error) 536 return (error); 537 538 if (vfs_getopt(optlist, "errmsg", (void **)&errmsg, &errmsg_len) == 0) 539 errmsg_pos = vfs_getopt_pos(optlist, "errmsg"); 540 541 /* 542 * We need these two options before the others, 543 * and they are mandatory for any filesystem. 544 * Ensure they are NUL terminated as well. 545 */ 546 fstypelen = 0; 547 error = vfs_getopt(optlist, "fstype", (void **)&fstype, &fstypelen); 548 if (error || fstype[fstypelen - 1] != '\0') { 549 error = EINVAL; 550 if (errmsg != NULL) 551 strncpy(errmsg, "Invalid fstype", errmsg_len); 552 goto bail; 553 } 554 fspathlen = 0; 555 error = vfs_getopt(optlist, "fspath", (void **)&fspath, &fspathlen); 556 if (error || fspath[fspathlen - 1] != '\0') { 557 error = EINVAL; 558 if (errmsg != NULL) 559 strncpy(errmsg, "Invalid fspath", errmsg_len); 560 goto bail; 561 } 562 563 /* 564 * We need to see if we have the "update" option 565 * before we call vfs_domount(), since vfs_domount() has special 566 * logic based on MNT_UPDATE. This is very important 567 * when we want to update the root filesystem. 568 */ 569 TAILQ_FOREACH_SAFE(opt, optlist, link, tmp_opt) { 570 if (strcmp(opt->name, "update") == 0) { 571 fsflags |= MNT_UPDATE; 572 vfs_freeopt(optlist, opt); 573 } 574 else if (strcmp(opt->name, "async") == 0) 575 fsflags |= MNT_ASYNC; 576 else if (strcmp(opt->name, "force") == 0) { 577 fsflags |= MNT_FORCE; 578 vfs_freeopt(optlist, opt); 579 } 580 else if (strcmp(opt->name, "reload") == 0) { 581 fsflags |= MNT_RELOAD; 582 vfs_freeopt(optlist, opt); 583 } 584 else if (strcmp(opt->name, "multilabel") == 0) 585 fsflags |= MNT_MULTILABEL; 586 else if (strcmp(opt->name, "noasync") == 0) 587 fsflags &= ~MNT_ASYNC; 588 else if (strcmp(opt->name, "noatime") == 0) 589 fsflags |= MNT_NOATIME; 590 else if (strcmp(opt->name, "atime") == 0) { 591 free(opt->name, M_MOUNT); 592 opt->name = strdup("nonoatime", M_MOUNT); 593 } 594 else if (strcmp(opt->name, "noclusterr") == 0) 595 fsflags |= MNT_NOCLUSTERR; 596 else if (strcmp(opt->name, "clusterr") == 0) { 597 free(opt->name, M_MOUNT); 598 opt->name = strdup("nonoclusterr", M_MOUNT); 599 } 600 else if (strcmp(opt->name, "noclusterw") == 0) 601 fsflags |= MNT_NOCLUSTERW; 602 else if (strcmp(opt->name, "clusterw") == 0) { 603 free(opt->name, M_MOUNT); 604 opt->name = strdup("nonoclusterw", M_MOUNT); 605 } 606 else if (strcmp(opt->name, "noexec") == 0) 607 fsflags |= MNT_NOEXEC; 608 else if (strcmp(opt->name, "exec") == 0) { 609 free(opt->name, M_MOUNT); 610 opt->name = strdup("nonoexec", M_MOUNT); 611 } 612 else if (strcmp(opt->name, "nosuid") == 0) 613 fsflags |= MNT_NOSUID; 614 else if (strcmp(opt->name, "suid") == 0) { 615 free(opt->name, M_MOUNT); 616 opt->name = strdup("nonosuid", M_MOUNT); 617 } 618 else if (strcmp(opt->name, "nosymfollow") == 0) 619 fsflags |= MNT_NOSYMFOLLOW; 620 else if (strcmp(opt->name, "symfollow") == 0) { 621 free(opt->name, M_MOUNT); 622 opt->name = strdup("nonosymfollow", M_MOUNT); 623 } 624 else if (strcmp(opt->name, "noro") == 0) 625 fsflags &= ~MNT_RDONLY; 626 else if (strcmp(opt->name, "rw") == 0) 627 fsflags &= ~MNT_RDONLY; 628 else if (strcmp(opt->name, "ro") == 0) 629 fsflags |= MNT_RDONLY; 630 else if (strcmp(opt->name, "rdonly") == 0) { 631 free(opt->name, M_MOUNT); 632 opt->name = strdup("ro", M_MOUNT); 633 fsflags |= MNT_RDONLY; 634 } 635 else if (strcmp(opt->name, "suiddir") == 0) 636 fsflags |= MNT_SUIDDIR; 637 else if (strcmp(opt->name, "sync") == 0) 638 fsflags |= MNT_SYNCHRONOUS; 639 else if (strcmp(opt->name, "union") == 0) 640 fsflags |= MNT_UNION; 641 } 642 643 /* 644 * Be ultra-paranoid about making sure the type and fspath 645 * variables will fit in our mp buffers, including the 646 * terminating NUL. 647 */ 648 if (fstypelen >= MFSNAMELEN - 1 || fspathlen >= MNAMELEN - 1) { 649 error = ENAMETOOLONG; 650 goto bail; 651 } 652 653 error = vfs_domount(td, fstype, fspath, fsflags, &optlist); 654 bail: 655 /* copyout the errmsg */ 656 if (errmsg_pos != -1 && ((2 * errmsg_pos + 1) < fsoptions->uio_iovcnt) 657 && errmsg_len > 0 && errmsg != NULL) { 658 if (fsoptions->uio_segflg == UIO_SYSSPACE) { 659 bcopy(errmsg, 660 fsoptions->uio_iov[2 * errmsg_pos + 1].iov_base, 661 fsoptions->uio_iov[2 * errmsg_pos + 1].iov_len); 662 } else { 663 copyout(errmsg, 664 fsoptions->uio_iov[2 * errmsg_pos + 1].iov_base, 665 fsoptions->uio_iov[2 * errmsg_pos + 1].iov_len); 666 } 667 } 668 669 if (optlist != NULL) 670 vfs_freeopts(optlist); 671 return (error); 672 } 673 674 /* 675 * Old mount API. 676 */ 677 #ifndef _SYS_SYSPROTO_H_ 678 struct mount_args { 679 char *type; 680 char *path; 681 int flags; 682 caddr_t data; 683 }; 684 #endif 685 /* ARGSUSED */ 686 int 687 sys_mount(td, uap) 688 struct thread *td; 689 struct mount_args /* { 690 char *type; 691 char *path; 692 int flags; 693 caddr_t data; 694 } */ *uap; 695 { 696 char *fstype; 697 struct vfsconf *vfsp = NULL; 698 struct mntarg *ma = NULL; 699 int error; 700 701 AUDIT_ARG_FFLAGS(uap->flags); 702 703 /* 704 * Filter out MNT_ROOTFS. We do not want clients of mount() in 705 * userspace to set this flag, but we must filter it out if we want 706 * MNT_UPDATE on the root file system to work. 707 * MNT_ROOTFS should only be set by the kernel when mounting its 708 * root file system. 709 */ 710 uap->flags &= ~MNT_ROOTFS; 711 712 fstype = malloc(MFSNAMELEN, M_TEMP, M_WAITOK); 713 error = copyinstr(uap->type, fstype, MFSNAMELEN, NULL); 714 if (error) { 715 free(fstype, M_TEMP); 716 return (error); 717 } 718 719 AUDIT_ARG_TEXT(fstype); 720 mtx_lock(&Giant); 721 vfsp = vfs_byname_kld(fstype, td, &error); 722 free(fstype, M_TEMP); 723 if (vfsp == NULL) { 724 mtx_unlock(&Giant); 725 return (ENOENT); 726 } 727 if (vfsp->vfc_vfsops->vfs_cmount == NULL) { 728 mtx_unlock(&Giant); 729 return (EOPNOTSUPP); 730 } 731 732 ma = mount_argsu(ma, "fstype", uap->type, MNAMELEN); 733 ma = mount_argsu(ma, "fspath", uap->path, MNAMELEN); 734 ma = mount_argb(ma, uap->flags & MNT_RDONLY, "noro"); 735 ma = mount_argb(ma, !(uap->flags & MNT_NOSUID), "nosuid"); 736 ma = mount_argb(ma, !(uap->flags & MNT_NOEXEC), "noexec"); 737 738 error = vfsp->vfc_vfsops->vfs_cmount(ma, uap->data, uap->flags); 739 mtx_unlock(&Giant); 740 return (error); 741 } 742 743 /* 744 * vfs_domount_first(): first file system mount (not update) 745 */ 746 static int 747 vfs_domount_first( 748 struct thread *td, /* Calling thread. */ 749 struct vfsconf *vfsp, /* File system type. */ 750 char *fspath, /* Mount path. */ 751 struct vnode *vp, /* Vnode to be covered. */ 752 int fsflags, /* Flags common to all filesystems. */ 753 struct vfsoptlist **optlist /* Options local to the filesystem. */ 754 ) 755 { 756 struct vattr va; 757 struct mount *mp; 758 struct vnode *newdp; 759 int error; 760 761 mtx_assert(&Giant, MA_OWNED); 762 ASSERT_VOP_ELOCKED(vp, __func__); 763 KASSERT((fsflags & MNT_UPDATE) == 0, ("MNT_UPDATE shouldn't be here")); 764 765 /* 766 * If the user is not root, ensure that they own the directory 767 * onto which we are attempting to mount. 768 */ 769 error = VOP_GETATTR(vp, &va, td->td_ucred); 770 if (error == 0 && va.va_uid != td->td_ucred->cr_uid) 771 error = priv_check_cred(td->td_ucred, PRIV_VFS_ADMIN, 0); 772 if (error == 0) 773 error = vinvalbuf(vp, V_SAVE, 0, 0); 774 if (error == 0 && vp->v_type != VDIR) 775 error = ENOTDIR; 776 if (error == 0) { 777 VI_LOCK(vp); 778 if ((vp->v_iflag & VI_MOUNT) == 0 && vp->v_mountedhere == NULL) 779 vp->v_iflag |= VI_MOUNT; 780 else 781 error = EBUSY; 782 VI_UNLOCK(vp); 783 } 784 if (error != 0) { 785 vput(vp); 786 return (error); 787 } 788 VOP_UNLOCK(vp, 0); 789 790 /* Allocate and initialize the filesystem. */ 791 mp = vfs_mount_alloc(vp, vfsp, fspath, td->td_ucred); 792 /* XXXMAC: pass to vfs_mount_alloc? */ 793 mp->mnt_optnew = *optlist; 794 /* Set the mount level flags. */ 795 mp->mnt_flag = (fsflags & (MNT_UPDATEMASK | MNT_ROOTFS | MNT_RDONLY)); 796 797 /* 798 * Mount the filesystem. 799 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they 800 * get. No freeing of cn_pnbuf. 801 */ 802 error = VFS_MOUNT(mp); 803 #ifndef VFS_ALLOW_NONMPSAFE 804 if (error == 0 && VFS_NEEDSGIANT(mp)) { 805 (void)VFS_UNMOUNT(mp, fsflags); 806 error = ENXIO; 807 printf("%s: Mounting non-MPSAFE fs (%s) is disabled\n", 808 __func__, mp->mnt_vfc->vfc_name); 809 } 810 #endif 811 if (error != 0) { 812 vfs_unbusy(mp); 813 vfs_mount_destroy(mp); 814 VI_LOCK(vp); 815 vp->v_iflag &= ~VI_MOUNT; 816 VI_UNLOCK(vp); 817 vrele(vp); 818 return (error); 819 } 820 #ifdef VFS_ALLOW_NONMPSAFE 821 if (VFS_NEEDSGIANT(mp)) 822 printf("%s: Mounting non-MPSAFE fs (%s) is deprecated\n", 823 __func__, mp->mnt_vfc->vfc_name); 824 #endif 825 826 if (mp->mnt_opt != NULL) 827 vfs_freeopts(mp->mnt_opt); 828 mp->mnt_opt = mp->mnt_optnew; 829 *optlist = NULL; 830 (void)VFS_STATFS(mp, &mp->mnt_stat); 831 832 /* 833 * Prevent external consumers of mount options from reading mnt_optnew. 834 */ 835 mp->mnt_optnew = NULL; 836 837 MNT_ILOCK(mp); 838 if ((mp->mnt_flag & MNT_ASYNC) != 0 && mp->mnt_noasync == 0) 839 mp->mnt_kern_flag |= MNTK_ASYNC; 840 else 841 mp->mnt_kern_flag &= ~MNTK_ASYNC; 842 MNT_IUNLOCK(mp); 843 844 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 845 cache_purge(vp); 846 VI_LOCK(vp); 847 vp->v_iflag &= ~VI_MOUNT; 848 VI_UNLOCK(vp); 849 vp->v_mountedhere = mp; 850 /* Place the new filesystem at the end of the mount list. */ 851 mtx_lock(&mountlist_mtx); 852 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); 853 mtx_unlock(&mountlist_mtx); 854 vfs_event_signal(NULL, VQ_MOUNT, 0); 855 if (VFS_ROOT(mp, LK_EXCLUSIVE, &newdp)) 856 panic("mount: lost mount"); 857 VOP_UNLOCK(newdp, 0); 858 VOP_UNLOCK(vp, 0); 859 mountcheckdirs(vp, newdp); 860 vrele(newdp); 861 if ((mp->mnt_flag & MNT_RDONLY) == 0) 862 vfs_allocate_syncvnode(mp); 863 vfs_unbusy(mp); 864 return (0); 865 } 866 867 /* 868 * vfs_domount_update(): update of mounted file system 869 */ 870 static int 871 vfs_domount_update( 872 struct thread *td, /* Calling thread. */ 873 struct vnode *vp, /* Mount point vnode. */ 874 int fsflags, /* Flags common to all filesystems. */ 875 struct vfsoptlist **optlist /* Options local to the filesystem. */ 876 ) 877 { 878 struct oexport_args oexport; 879 struct export_args export; 880 struct mount *mp; 881 int error, export_error, flag; 882 883 mtx_assert(&Giant, MA_OWNED); 884 ASSERT_VOP_ELOCKED(vp, __func__); 885 KASSERT((fsflags & MNT_UPDATE) != 0, ("MNT_UPDATE should be here")); 886 887 if ((vp->v_vflag & VV_ROOT) == 0) { 888 vput(vp); 889 return (EINVAL); 890 } 891 mp = vp->v_mount; 892 /* 893 * We only allow the filesystem to be reloaded if it 894 * is currently mounted read-only. 895 */ 896 flag = mp->mnt_flag; 897 if ((fsflags & MNT_RELOAD) != 0 && (flag & MNT_RDONLY) == 0) { 898 vput(vp); 899 return (EOPNOTSUPP); /* Needs translation */ 900 } 901 /* 902 * Only privileged root, or (if MNT_USER is set) the user that 903 * did the original mount is permitted to update it. 904 */ 905 error = vfs_suser(mp, td); 906 if (error != 0) { 907 vput(vp); 908 return (error); 909 } 910 if (vfs_busy(mp, MBF_NOWAIT)) { 911 vput(vp); 912 return (EBUSY); 913 } 914 VI_LOCK(vp); 915 if ((vp->v_iflag & VI_MOUNT) != 0 || vp->v_mountedhere != NULL) { 916 VI_UNLOCK(vp); 917 vfs_unbusy(mp); 918 vput(vp); 919 return (EBUSY); 920 } 921 vp->v_iflag |= VI_MOUNT; 922 VI_UNLOCK(vp); 923 VOP_UNLOCK(vp, 0); 924 925 MNT_ILOCK(mp); 926 mp->mnt_flag &= ~MNT_UPDATEMASK; 927 mp->mnt_flag |= fsflags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE | 928 MNT_SNAPSHOT | MNT_ROOTFS | MNT_UPDATEMASK | MNT_RDONLY); 929 if ((mp->mnt_flag & MNT_ASYNC) == 0) 930 mp->mnt_kern_flag &= ~MNTK_ASYNC; 931 MNT_IUNLOCK(mp); 932 mp->mnt_optnew = *optlist; 933 vfs_mergeopts(mp->mnt_optnew, mp->mnt_opt); 934 935 /* 936 * Mount the filesystem. 937 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they 938 * get. No freeing of cn_pnbuf. 939 */ 940 error = VFS_MOUNT(mp); 941 942 export_error = 0; 943 if (error == 0) { 944 /* Process the export option. */ 945 if (vfs_copyopt(mp->mnt_optnew, "export", &export, 946 sizeof(export)) == 0) { 947 export_error = vfs_export(mp, &export); 948 } else if (vfs_copyopt(mp->mnt_optnew, "export", &oexport, 949 sizeof(oexport)) == 0) { 950 export.ex_flags = oexport.ex_flags; 951 export.ex_root = oexport.ex_root; 952 export.ex_anon = oexport.ex_anon; 953 export.ex_addr = oexport.ex_addr; 954 export.ex_addrlen = oexport.ex_addrlen; 955 export.ex_mask = oexport.ex_mask; 956 export.ex_masklen = oexport.ex_masklen; 957 export.ex_indexfile = oexport.ex_indexfile; 958 export.ex_numsecflavors = 0; 959 export_error = vfs_export(mp, &export); 960 } 961 } 962 963 MNT_ILOCK(mp); 964 if (error == 0) { 965 mp->mnt_flag &= ~(MNT_UPDATE | MNT_RELOAD | MNT_FORCE | 966 MNT_SNAPSHOT); 967 } else { 968 /* 969 * If we fail, restore old mount flags. MNT_QUOTA is special, 970 * because it is not part of MNT_UPDATEMASK, but it could have 971 * changed in the meantime if quotactl(2) was called. 972 * All in all we want current value of MNT_QUOTA, not the old 973 * one. 974 */ 975 mp->mnt_flag = (mp->mnt_flag & MNT_QUOTA) | (flag & ~MNT_QUOTA); 976 } 977 if ((mp->mnt_flag & MNT_ASYNC) != 0 && mp->mnt_noasync == 0) 978 mp->mnt_kern_flag |= MNTK_ASYNC; 979 else 980 mp->mnt_kern_flag &= ~MNTK_ASYNC; 981 MNT_IUNLOCK(mp); 982 983 if (error != 0) 984 goto end; 985 986 if (mp->mnt_opt != NULL) 987 vfs_freeopts(mp->mnt_opt); 988 mp->mnt_opt = mp->mnt_optnew; 989 *optlist = NULL; 990 (void)VFS_STATFS(mp, &mp->mnt_stat); 991 /* 992 * Prevent external consumers of mount options from reading 993 * mnt_optnew. 994 */ 995 mp->mnt_optnew = NULL; 996 997 if ((mp->mnt_flag & MNT_RDONLY) == 0) 998 vfs_allocate_syncvnode(mp); 999 else 1000 vfs_deallocate_syncvnode(mp); 1001 end: 1002 vfs_unbusy(mp); 1003 VI_LOCK(vp); 1004 vp->v_iflag &= ~VI_MOUNT; 1005 VI_UNLOCK(vp); 1006 vrele(vp); 1007 return (error != 0 ? error : export_error); 1008 } 1009 1010 /* 1011 * vfs_domount(): actually attempt a filesystem mount. 1012 */ 1013 static int 1014 vfs_domount( 1015 struct thread *td, /* Calling thread. */ 1016 const char *fstype, /* Filesystem type. */ 1017 char *fspath, /* Mount path. */ 1018 int fsflags, /* Flags common to all filesystems. */ 1019 struct vfsoptlist **optlist /* Options local to the filesystem. */ 1020 ) 1021 { 1022 struct vfsconf *vfsp; 1023 struct nameidata nd; 1024 struct vnode *vp; 1025 int error; 1026 1027 /* 1028 * Be ultra-paranoid about making sure the type and fspath 1029 * variables will fit in our mp buffers, including the 1030 * terminating NUL. 1031 */ 1032 if (strlen(fstype) >= MFSNAMELEN || strlen(fspath) >= MNAMELEN) 1033 return (ENAMETOOLONG); 1034 1035 if (jailed(td->td_ucred) || usermount == 0) { 1036 if ((error = priv_check(td, PRIV_VFS_MOUNT)) != 0) 1037 return (error); 1038 } 1039 1040 /* 1041 * Do not allow NFS export or MNT_SUIDDIR by unprivileged users. 1042 */ 1043 if (fsflags & MNT_EXPORTED) { 1044 error = priv_check(td, PRIV_VFS_MOUNT_EXPORTED); 1045 if (error) 1046 return (error); 1047 } 1048 if (fsflags & MNT_SUIDDIR) { 1049 error = priv_check(td, PRIV_VFS_MOUNT_SUIDDIR); 1050 if (error) 1051 return (error); 1052 } 1053 /* 1054 * Silently enforce MNT_NOSUID and MNT_USER for unprivileged users. 1055 */ 1056 if ((fsflags & (MNT_NOSUID | MNT_USER)) != (MNT_NOSUID | MNT_USER)) { 1057 if (priv_check(td, PRIV_VFS_MOUNT_NONUSER) != 0) 1058 fsflags |= MNT_NOSUID | MNT_USER; 1059 } 1060 1061 /* Load KLDs before we lock the covered vnode to avoid reversals. */ 1062 vfsp = NULL; 1063 if ((fsflags & MNT_UPDATE) == 0) { 1064 /* Don't try to load KLDs if we're mounting the root. */ 1065 if (fsflags & MNT_ROOTFS) 1066 vfsp = vfs_byname(fstype); 1067 else 1068 vfsp = vfs_byname_kld(fstype, td, &error); 1069 if (vfsp == NULL) 1070 return (ENODEV); 1071 if (jailed(td->td_ucred) && !(vfsp->vfc_flags & VFCF_JAIL)) 1072 return (EPERM); 1073 } 1074 1075 /* 1076 * Get vnode to be covered or mount point's vnode in case of MNT_UPDATE. 1077 */ 1078 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | MPSAFE | AUDITVNODE1, 1079 UIO_SYSSPACE, fspath, td); 1080 error = namei(&nd); 1081 if (error != 0) 1082 return (error); 1083 if (!NDHASGIANT(&nd)) 1084 mtx_lock(&Giant); 1085 NDFREE(&nd, NDF_ONLY_PNBUF); 1086 vp = nd.ni_vp; 1087 if ((fsflags & MNT_UPDATE) == 0) { 1088 error = vn_path_to_global_path(td, vp, fspath, MNAMELEN); 1089 /* debug.disablefullpath == 1 results in ENODEV */ 1090 if (error == 0 || error == ENODEV) { 1091 error = vfs_domount_first(td, vfsp, fspath, vp, 1092 fsflags, optlist); 1093 } 1094 } else 1095 error = vfs_domount_update(td, vp, fsflags, optlist); 1096 mtx_unlock(&Giant); 1097 1098 ASSERT_VI_UNLOCKED(vp, __func__); 1099 ASSERT_VOP_UNLOCKED(vp, __func__); 1100 1101 return (error); 1102 } 1103 1104 /* 1105 * Unmount a filesystem. 1106 * 1107 * Note: unmount takes a path to the vnode mounted on as argument, not 1108 * special file (as before). 1109 */ 1110 #ifndef _SYS_SYSPROTO_H_ 1111 struct unmount_args { 1112 char *path; 1113 int flags; 1114 }; 1115 #endif 1116 /* ARGSUSED */ 1117 int 1118 sys_unmount(td, uap) 1119 struct thread *td; 1120 register struct unmount_args /* { 1121 char *path; 1122 int flags; 1123 } */ *uap; 1124 { 1125 struct nameidata nd; 1126 struct mount *mp; 1127 char *pathbuf; 1128 int error, id0, id1, vfslocked; 1129 1130 AUDIT_ARG_VALUE(uap->flags); 1131 if (jailed(td->td_ucred) || usermount == 0) { 1132 error = priv_check(td, PRIV_VFS_UNMOUNT); 1133 if (error) 1134 return (error); 1135 } 1136 1137 pathbuf = malloc(MNAMELEN, M_TEMP, M_WAITOK); 1138 error = copyinstr(uap->path, pathbuf, MNAMELEN, NULL); 1139 if (error) { 1140 free(pathbuf, M_TEMP); 1141 return (error); 1142 } 1143 mtx_lock(&Giant); 1144 if (uap->flags & MNT_BYFSID) { 1145 AUDIT_ARG_TEXT(pathbuf); 1146 /* Decode the filesystem ID. */ 1147 if (sscanf(pathbuf, "FSID:%d:%d", &id0, &id1) != 2) { 1148 mtx_unlock(&Giant); 1149 free(pathbuf, M_TEMP); 1150 return (EINVAL); 1151 } 1152 1153 mtx_lock(&mountlist_mtx); 1154 TAILQ_FOREACH_REVERSE(mp, &mountlist, mntlist, mnt_list) { 1155 if (mp->mnt_stat.f_fsid.val[0] == id0 && 1156 mp->mnt_stat.f_fsid.val[1] == id1) 1157 break; 1158 } 1159 mtx_unlock(&mountlist_mtx); 1160 } else { 1161 AUDIT_ARG_UPATH1(td, pathbuf); 1162 /* 1163 * Try to find global path for path argument. 1164 */ 1165 NDINIT(&nd, LOOKUP, 1166 FOLLOW | LOCKLEAF | MPSAFE | AUDITVNODE1, 1167 UIO_SYSSPACE, pathbuf, td); 1168 if (namei(&nd) == 0) { 1169 vfslocked = NDHASGIANT(&nd); 1170 NDFREE(&nd, NDF_ONLY_PNBUF); 1171 error = vn_path_to_global_path(td, nd.ni_vp, pathbuf, 1172 MNAMELEN); 1173 if (error == 0 || error == ENODEV) 1174 vput(nd.ni_vp); 1175 VFS_UNLOCK_GIANT(vfslocked); 1176 } 1177 mtx_lock(&mountlist_mtx); 1178 TAILQ_FOREACH_REVERSE(mp, &mountlist, mntlist, mnt_list) { 1179 if (strcmp(mp->mnt_stat.f_mntonname, pathbuf) == 0) 1180 break; 1181 } 1182 mtx_unlock(&mountlist_mtx); 1183 } 1184 free(pathbuf, M_TEMP); 1185 if (mp == NULL) { 1186 /* 1187 * Previously we returned ENOENT for a nonexistent path and 1188 * EINVAL for a non-mountpoint. We cannot tell these apart 1189 * now, so in the !MNT_BYFSID case return the more likely 1190 * EINVAL for compatibility. 1191 */ 1192 mtx_unlock(&Giant); 1193 return ((uap->flags & MNT_BYFSID) ? ENOENT : EINVAL); 1194 } 1195 1196 /* 1197 * Don't allow unmounting the root filesystem. 1198 */ 1199 if (mp->mnt_flag & MNT_ROOTFS) { 1200 mtx_unlock(&Giant); 1201 return (EINVAL); 1202 } 1203 error = dounmount(mp, uap->flags, td); 1204 mtx_unlock(&Giant); 1205 return (error); 1206 } 1207 1208 /* 1209 * Do the actual filesystem unmount. 1210 */ 1211 int 1212 dounmount(mp, flags, td) 1213 struct mount *mp; 1214 int flags; 1215 struct thread *td; 1216 { 1217 struct vnode *coveredvp, *fsrootvp; 1218 int error; 1219 int async_flag; 1220 int mnt_gen_r; 1221 1222 mtx_assert(&Giant, MA_OWNED); 1223 1224 if ((coveredvp = mp->mnt_vnodecovered) != NULL) { 1225 mnt_gen_r = mp->mnt_gen; 1226 VI_LOCK(coveredvp); 1227 vholdl(coveredvp); 1228 vn_lock(coveredvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY); 1229 vdrop(coveredvp); 1230 /* 1231 * Check for mp being unmounted while waiting for the 1232 * covered vnode lock. 1233 */ 1234 if (coveredvp->v_mountedhere != mp || 1235 coveredvp->v_mountedhere->mnt_gen != mnt_gen_r) { 1236 VOP_UNLOCK(coveredvp, 0); 1237 return (EBUSY); 1238 } 1239 } 1240 /* 1241 * Only privileged root, or (if MNT_USER is set) the user that did the 1242 * original mount is permitted to unmount this filesystem. 1243 */ 1244 error = vfs_suser(mp, td); 1245 if (error) { 1246 if (coveredvp) 1247 VOP_UNLOCK(coveredvp, 0); 1248 return (error); 1249 } 1250 1251 MNT_ILOCK(mp); 1252 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 1253 MNT_IUNLOCK(mp); 1254 if (coveredvp) 1255 VOP_UNLOCK(coveredvp, 0); 1256 return (EBUSY); 1257 } 1258 mp->mnt_kern_flag |= MNTK_UNMOUNT | MNTK_NOINSMNTQ; 1259 /* Allow filesystems to detect that a forced unmount is in progress. */ 1260 if (flags & MNT_FORCE) 1261 mp->mnt_kern_flag |= MNTK_UNMOUNTF; 1262 error = 0; 1263 if (mp->mnt_lockref) { 1264 mp->mnt_kern_flag |= MNTK_DRAINING; 1265 error = msleep(&mp->mnt_lockref, MNT_MTX(mp), PVFS, 1266 "mount drain", 0); 1267 } 1268 MNT_IUNLOCK(mp); 1269 KASSERT(mp->mnt_lockref == 0, 1270 ("%s: invalid lock refcount in the drain path @ %s:%d", 1271 __func__, __FILE__, __LINE__)); 1272 KASSERT(error == 0, 1273 ("%s: invalid return value for msleep in the drain path @ %s:%d", 1274 __func__, __FILE__, __LINE__)); 1275 vn_start_write(NULL, &mp, V_WAIT); 1276 1277 if (mp->mnt_flag & MNT_EXPUBLIC) 1278 vfs_setpublicfs(NULL, NULL, NULL); 1279 1280 vfs_msync(mp, MNT_WAIT); 1281 MNT_ILOCK(mp); 1282 async_flag = mp->mnt_flag & MNT_ASYNC; 1283 mp->mnt_flag &= ~MNT_ASYNC; 1284 mp->mnt_kern_flag &= ~MNTK_ASYNC; 1285 MNT_IUNLOCK(mp); 1286 cache_purgevfs(mp); /* remove cache entries for this file sys */ 1287 vfs_deallocate_syncvnode(mp); 1288 /* 1289 * For forced unmounts, move process cdir/rdir refs on the fs root 1290 * vnode to the covered vnode. For non-forced unmounts we want 1291 * such references to cause an EBUSY error. 1292 */ 1293 if ((flags & MNT_FORCE) && 1294 VFS_ROOT(mp, LK_EXCLUSIVE, &fsrootvp) == 0) { 1295 if (mp->mnt_vnodecovered != NULL) 1296 mountcheckdirs(fsrootvp, mp->mnt_vnodecovered); 1297 if (fsrootvp == rootvnode) { 1298 vrele(rootvnode); 1299 rootvnode = NULL; 1300 } 1301 vput(fsrootvp); 1302 } 1303 if (((mp->mnt_flag & MNT_RDONLY) || 1304 (error = VFS_SYNC(mp, MNT_WAIT)) == 0) || (flags & MNT_FORCE) != 0) 1305 error = VFS_UNMOUNT(mp, flags); 1306 vn_finished_write(mp); 1307 /* 1308 * If we failed to flush the dirty blocks for this mount point, 1309 * undo all the cdir/rdir and rootvnode changes we made above. 1310 * Unless we failed to do so because the device is reporting that 1311 * it doesn't exist anymore. 1312 */ 1313 if (error && error != ENXIO) { 1314 if ((flags & MNT_FORCE) && 1315 VFS_ROOT(mp, LK_EXCLUSIVE, &fsrootvp) == 0) { 1316 if (mp->mnt_vnodecovered != NULL) 1317 mountcheckdirs(mp->mnt_vnodecovered, fsrootvp); 1318 if (rootvnode == NULL) { 1319 rootvnode = fsrootvp; 1320 vref(rootvnode); 1321 } 1322 vput(fsrootvp); 1323 } 1324 MNT_ILOCK(mp); 1325 mp->mnt_kern_flag &= ~MNTK_NOINSMNTQ; 1326 if ((mp->mnt_flag & MNT_RDONLY) == 0) { 1327 MNT_IUNLOCK(mp); 1328 vfs_allocate_syncvnode(mp); 1329 MNT_ILOCK(mp); 1330 } 1331 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF); 1332 mp->mnt_flag |= async_flag; 1333 if ((mp->mnt_flag & MNT_ASYNC) != 0 && mp->mnt_noasync == 0) 1334 mp->mnt_kern_flag |= MNTK_ASYNC; 1335 if (mp->mnt_kern_flag & MNTK_MWAIT) { 1336 mp->mnt_kern_flag &= ~MNTK_MWAIT; 1337 wakeup(mp); 1338 } 1339 MNT_IUNLOCK(mp); 1340 if (coveredvp) 1341 VOP_UNLOCK(coveredvp, 0); 1342 return (error); 1343 } 1344 mtx_lock(&mountlist_mtx); 1345 TAILQ_REMOVE(&mountlist, mp, mnt_list); 1346 mtx_unlock(&mountlist_mtx); 1347 if (coveredvp != NULL) { 1348 coveredvp->v_mountedhere = NULL; 1349 vput(coveredvp); 1350 } 1351 vfs_event_signal(NULL, VQ_UNMOUNT, 0); 1352 vfs_mount_destroy(mp); 1353 return (0); 1354 } 1355 1356 /* 1357 * Report errors during filesystem mounting. 1358 */ 1359 void 1360 vfs_mount_error(struct mount *mp, const char *fmt, ...) 1361 { 1362 struct vfsoptlist *moptlist = mp->mnt_optnew; 1363 va_list ap; 1364 int error, len; 1365 char *errmsg; 1366 1367 error = vfs_getopt(moptlist, "errmsg", (void **)&errmsg, &len); 1368 if (error || errmsg == NULL || len <= 0) 1369 return; 1370 1371 va_start(ap, fmt); 1372 vsnprintf(errmsg, (size_t)len, fmt, ap); 1373 va_end(ap); 1374 } 1375 1376 void 1377 vfs_opterror(struct vfsoptlist *opts, const char *fmt, ...) 1378 { 1379 va_list ap; 1380 int error, len; 1381 char *errmsg; 1382 1383 error = vfs_getopt(opts, "errmsg", (void **)&errmsg, &len); 1384 if (error || errmsg == NULL || len <= 0) 1385 return; 1386 1387 va_start(ap, fmt); 1388 vsnprintf(errmsg, (size_t)len, fmt, ap); 1389 va_end(ap); 1390 } 1391 1392 /* 1393 * --------------------------------------------------------------------- 1394 * Functions for querying mount options/arguments from filesystems. 1395 */ 1396 1397 /* 1398 * Check that no unknown options are given 1399 */ 1400 int 1401 vfs_filteropt(struct vfsoptlist *opts, const char **legal) 1402 { 1403 struct vfsopt *opt; 1404 char errmsg[255]; 1405 const char **t, *p, *q; 1406 int ret = 0; 1407 1408 TAILQ_FOREACH(opt, opts, link) { 1409 p = opt->name; 1410 q = NULL; 1411 if (p[0] == 'n' && p[1] == 'o') 1412 q = p + 2; 1413 for(t = global_opts; *t != NULL; t++) { 1414 if (strcmp(*t, p) == 0) 1415 break; 1416 if (q != NULL) { 1417 if (strcmp(*t, q) == 0) 1418 break; 1419 } 1420 } 1421 if (*t != NULL) 1422 continue; 1423 for(t = legal; *t != NULL; t++) { 1424 if (strcmp(*t, p) == 0) 1425 break; 1426 if (q != NULL) { 1427 if (strcmp(*t, q) == 0) 1428 break; 1429 } 1430 } 1431 if (*t != NULL) 1432 continue; 1433 snprintf(errmsg, sizeof(errmsg), 1434 "mount option <%s> is unknown", p); 1435 ret = EINVAL; 1436 } 1437 if (ret != 0) { 1438 TAILQ_FOREACH(opt, opts, link) { 1439 if (strcmp(opt->name, "errmsg") == 0) { 1440 strncpy((char *)opt->value, errmsg, opt->len); 1441 break; 1442 } 1443 } 1444 if (opt == NULL) 1445 printf("%s\n", errmsg); 1446 } 1447 return (ret); 1448 } 1449 1450 /* 1451 * Get a mount option by its name. 1452 * 1453 * Return 0 if the option was found, ENOENT otherwise. 1454 * If len is non-NULL it will be filled with the length 1455 * of the option. If buf is non-NULL, it will be filled 1456 * with the address of the option. 1457 */ 1458 int 1459 vfs_getopt(opts, name, buf, len) 1460 struct vfsoptlist *opts; 1461 const char *name; 1462 void **buf; 1463 int *len; 1464 { 1465 struct vfsopt *opt; 1466 1467 KASSERT(opts != NULL, ("vfs_getopt: caller passed 'opts' as NULL")); 1468 1469 TAILQ_FOREACH(opt, opts, link) { 1470 if (strcmp(name, opt->name) == 0) { 1471 opt->seen = 1; 1472 if (len != NULL) 1473 *len = opt->len; 1474 if (buf != NULL) 1475 *buf = opt->value; 1476 return (0); 1477 } 1478 } 1479 return (ENOENT); 1480 } 1481 1482 int 1483 vfs_getopt_pos(struct vfsoptlist *opts, const char *name) 1484 { 1485 struct vfsopt *opt; 1486 1487 if (opts == NULL) 1488 return (-1); 1489 1490 TAILQ_FOREACH(opt, opts, link) { 1491 if (strcmp(name, opt->name) == 0) { 1492 opt->seen = 1; 1493 return (opt->pos); 1494 } 1495 } 1496 return (-1); 1497 } 1498 1499 char * 1500 vfs_getopts(struct vfsoptlist *opts, const char *name, int *error) 1501 { 1502 struct vfsopt *opt; 1503 1504 *error = 0; 1505 TAILQ_FOREACH(opt, opts, link) { 1506 if (strcmp(name, opt->name) != 0) 1507 continue; 1508 opt->seen = 1; 1509 if (opt->len == 0 || 1510 ((char *)opt->value)[opt->len - 1] != '\0') { 1511 *error = EINVAL; 1512 return (NULL); 1513 } 1514 return (opt->value); 1515 } 1516 *error = ENOENT; 1517 return (NULL); 1518 } 1519 1520 int 1521 vfs_flagopt(struct vfsoptlist *opts, const char *name, uint64_t *w, 1522 uint64_t val) 1523 { 1524 struct vfsopt *opt; 1525 1526 TAILQ_FOREACH(opt, opts, link) { 1527 if (strcmp(name, opt->name) == 0) { 1528 opt->seen = 1; 1529 if (w != NULL) 1530 *w |= val; 1531 return (1); 1532 } 1533 } 1534 if (w != NULL) 1535 *w &= ~val; 1536 return (0); 1537 } 1538 1539 int 1540 vfs_scanopt(struct vfsoptlist *opts, const char *name, const char *fmt, ...) 1541 { 1542 va_list ap; 1543 struct vfsopt *opt; 1544 int ret; 1545 1546 KASSERT(opts != NULL, ("vfs_getopt: caller passed 'opts' as NULL")); 1547 1548 TAILQ_FOREACH(opt, opts, link) { 1549 if (strcmp(name, opt->name) != 0) 1550 continue; 1551 opt->seen = 1; 1552 if (opt->len == 0 || opt->value == NULL) 1553 return (0); 1554 if (((char *)opt->value)[opt->len - 1] != '\0') 1555 return (0); 1556 va_start(ap, fmt); 1557 ret = vsscanf(opt->value, fmt, ap); 1558 va_end(ap); 1559 return (ret); 1560 } 1561 return (0); 1562 } 1563 1564 int 1565 vfs_setopt(struct vfsoptlist *opts, const char *name, void *value, int len) 1566 { 1567 struct vfsopt *opt; 1568 1569 TAILQ_FOREACH(opt, opts, link) { 1570 if (strcmp(name, opt->name) != 0) 1571 continue; 1572 opt->seen = 1; 1573 if (opt->value == NULL) 1574 opt->len = len; 1575 else { 1576 if (opt->len != len) 1577 return (EINVAL); 1578 bcopy(value, opt->value, len); 1579 } 1580 return (0); 1581 } 1582 return (ENOENT); 1583 } 1584 1585 int 1586 vfs_setopt_part(struct vfsoptlist *opts, const char *name, void *value, int len) 1587 { 1588 struct vfsopt *opt; 1589 1590 TAILQ_FOREACH(opt, opts, link) { 1591 if (strcmp(name, opt->name) != 0) 1592 continue; 1593 opt->seen = 1; 1594 if (opt->value == NULL) 1595 opt->len = len; 1596 else { 1597 if (opt->len < len) 1598 return (EINVAL); 1599 opt->len = len; 1600 bcopy(value, opt->value, len); 1601 } 1602 return (0); 1603 } 1604 return (ENOENT); 1605 } 1606 1607 int 1608 vfs_setopts(struct vfsoptlist *opts, const char *name, const char *value) 1609 { 1610 struct vfsopt *opt; 1611 1612 TAILQ_FOREACH(opt, opts, link) { 1613 if (strcmp(name, opt->name) != 0) 1614 continue; 1615 opt->seen = 1; 1616 if (opt->value == NULL) 1617 opt->len = strlen(value) + 1; 1618 else if (strlcpy(opt->value, value, opt->len) >= opt->len) 1619 return (EINVAL); 1620 return (0); 1621 } 1622 return (ENOENT); 1623 } 1624 1625 /* 1626 * Find and copy a mount option. 1627 * 1628 * The size of the buffer has to be specified 1629 * in len, if it is not the same length as the 1630 * mount option, EINVAL is returned. 1631 * Returns ENOENT if the option is not found. 1632 */ 1633 int 1634 vfs_copyopt(opts, name, dest, len) 1635 struct vfsoptlist *opts; 1636 const char *name; 1637 void *dest; 1638 int len; 1639 { 1640 struct vfsopt *opt; 1641 1642 KASSERT(opts != NULL, ("vfs_copyopt: caller passed 'opts' as NULL")); 1643 1644 TAILQ_FOREACH(opt, opts, link) { 1645 if (strcmp(name, opt->name) == 0) { 1646 opt->seen = 1; 1647 if (len != opt->len) 1648 return (EINVAL); 1649 bcopy(opt->value, dest, opt->len); 1650 return (0); 1651 } 1652 } 1653 return (ENOENT); 1654 } 1655 1656 /* 1657 * This is a helper function for filesystems to traverse their 1658 * vnodes. See MNT_VNODE_FOREACH() in sys/mount.h 1659 */ 1660 1661 struct vnode * 1662 __mnt_vnode_next(struct vnode **mvp, struct mount *mp) 1663 { 1664 struct vnode *vp; 1665 1666 mtx_assert(MNT_MTX(mp), MA_OWNED); 1667 1668 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 1669 if (should_yield()) { 1670 MNT_IUNLOCK(mp); 1671 kern_yield(PRI_UNCHANGED); 1672 MNT_ILOCK(mp); 1673 } 1674 vp = TAILQ_NEXT(*mvp, v_nmntvnodes); 1675 while (vp != NULL && vp->v_type == VMARKER) 1676 vp = TAILQ_NEXT(vp, v_nmntvnodes); 1677 1678 /* Check if we are done */ 1679 if (vp == NULL) { 1680 __mnt_vnode_markerfree(mvp, mp); 1681 return (NULL); 1682 } 1683 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 1684 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 1685 return (vp); 1686 } 1687 1688 struct vnode * 1689 __mnt_vnode_first(struct vnode **mvp, struct mount *mp) 1690 { 1691 struct vnode *vp; 1692 1693 mtx_assert(MNT_MTX(mp), MA_OWNED); 1694 1695 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 1696 while (vp != NULL && vp->v_type == VMARKER) 1697 vp = TAILQ_NEXT(vp, v_nmntvnodes); 1698 1699 /* Check if we are done */ 1700 if (vp == NULL) { 1701 *mvp = NULL; 1702 return (NULL); 1703 } 1704 MNT_REF(mp); 1705 MNT_IUNLOCK(mp); 1706 *mvp = (struct vnode *) malloc(sizeof(struct vnode), 1707 M_VNODE_MARKER, 1708 M_WAITOK | M_ZERO); 1709 MNT_ILOCK(mp); 1710 (*mvp)->v_type = VMARKER; 1711 1712 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 1713 while (vp != NULL && vp->v_type == VMARKER) 1714 vp = TAILQ_NEXT(vp, v_nmntvnodes); 1715 1716 /* Check if we are done */ 1717 if (vp == NULL) { 1718 MNT_IUNLOCK(mp); 1719 free(*mvp, M_VNODE_MARKER); 1720 MNT_ILOCK(mp); 1721 *mvp = NULL; 1722 MNT_REL(mp); 1723 return (NULL); 1724 } 1725 (*mvp)->v_mount = mp; 1726 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 1727 return (vp); 1728 } 1729 1730 1731 void 1732 __mnt_vnode_markerfree(struct vnode **mvp, struct mount *mp) 1733 { 1734 1735 if (*mvp == NULL) 1736 return; 1737 1738 mtx_assert(MNT_MTX(mp), MA_OWNED); 1739 1740 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 1741 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 1742 MNT_IUNLOCK(mp); 1743 free(*mvp, M_VNODE_MARKER); 1744 MNT_ILOCK(mp); 1745 *mvp = NULL; 1746 MNT_REL(mp); 1747 } 1748 1749 1750 int 1751 __vfs_statfs(struct mount *mp, struct statfs *sbp) 1752 { 1753 int error; 1754 1755 error = mp->mnt_op->vfs_statfs(mp, &mp->mnt_stat); 1756 if (sbp != &mp->mnt_stat) 1757 *sbp = mp->mnt_stat; 1758 return (error); 1759 } 1760 1761 void 1762 vfs_mountedfrom(struct mount *mp, const char *from) 1763 { 1764 1765 bzero(mp->mnt_stat.f_mntfromname, sizeof mp->mnt_stat.f_mntfromname); 1766 strlcpy(mp->mnt_stat.f_mntfromname, from, 1767 sizeof mp->mnt_stat.f_mntfromname); 1768 } 1769 1770 /* 1771 * --------------------------------------------------------------------- 1772 * This is the api for building mount args and mounting filesystems from 1773 * inside the kernel. 1774 * 1775 * The API works by accumulation of individual args. First error is 1776 * latched. 1777 * 1778 * XXX: should be documented in new manpage kernel_mount(9) 1779 */ 1780 1781 /* A memory allocation which must be freed when we are done */ 1782 struct mntaarg { 1783 SLIST_ENTRY(mntaarg) next; 1784 }; 1785 1786 /* The header for the mount arguments */ 1787 struct mntarg { 1788 struct iovec *v; 1789 int len; 1790 int error; 1791 SLIST_HEAD(, mntaarg) list; 1792 }; 1793 1794 /* 1795 * Add a boolean argument. 1796 * 1797 * flag is the boolean value. 1798 * name must start with "no". 1799 */ 1800 struct mntarg * 1801 mount_argb(struct mntarg *ma, int flag, const char *name) 1802 { 1803 1804 KASSERT(name[0] == 'n' && name[1] == 'o', 1805 ("mount_argb(...,%s): name must start with 'no'", name)); 1806 1807 return (mount_arg(ma, name + (flag ? 2 : 0), NULL, 0)); 1808 } 1809 1810 /* 1811 * Add an argument printf style 1812 */ 1813 struct mntarg * 1814 mount_argf(struct mntarg *ma, const char *name, const char *fmt, ...) 1815 { 1816 va_list ap; 1817 struct mntaarg *maa; 1818 struct sbuf *sb; 1819 int len; 1820 1821 if (ma == NULL) { 1822 ma = malloc(sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO); 1823 SLIST_INIT(&ma->list); 1824 } 1825 if (ma->error) 1826 return (ma); 1827 1828 ma->v = realloc(ma->v, sizeof *ma->v * (ma->len + 2), 1829 M_MOUNT, M_WAITOK); 1830 ma->v[ma->len].iov_base = (void *)(uintptr_t)name; 1831 ma->v[ma->len].iov_len = strlen(name) + 1; 1832 ma->len++; 1833 1834 sb = sbuf_new_auto(); 1835 va_start(ap, fmt); 1836 sbuf_vprintf(sb, fmt, ap); 1837 va_end(ap); 1838 sbuf_finish(sb); 1839 len = sbuf_len(sb) + 1; 1840 maa = malloc(sizeof *maa + len, M_MOUNT, M_WAITOK | M_ZERO); 1841 SLIST_INSERT_HEAD(&ma->list, maa, next); 1842 bcopy(sbuf_data(sb), maa + 1, len); 1843 sbuf_delete(sb); 1844 1845 ma->v[ma->len].iov_base = maa + 1; 1846 ma->v[ma->len].iov_len = len; 1847 ma->len++; 1848 1849 return (ma); 1850 } 1851 1852 /* 1853 * Add an argument which is a userland string. 1854 */ 1855 struct mntarg * 1856 mount_argsu(struct mntarg *ma, const char *name, const void *val, int len) 1857 { 1858 struct mntaarg *maa; 1859 char *tbuf; 1860 1861 if (val == NULL) 1862 return (ma); 1863 if (ma == NULL) { 1864 ma = malloc(sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO); 1865 SLIST_INIT(&ma->list); 1866 } 1867 if (ma->error) 1868 return (ma); 1869 maa = malloc(sizeof *maa + len, M_MOUNT, M_WAITOK | M_ZERO); 1870 SLIST_INSERT_HEAD(&ma->list, maa, next); 1871 tbuf = (void *)(maa + 1); 1872 ma->error = copyinstr(val, tbuf, len, NULL); 1873 return (mount_arg(ma, name, tbuf, -1)); 1874 } 1875 1876 /* 1877 * Plain argument. 1878 * 1879 * If length is -1, treat value as a C string. 1880 */ 1881 struct mntarg * 1882 mount_arg(struct mntarg *ma, const char *name, const void *val, int len) 1883 { 1884 1885 if (ma == NULL) { 1886 ma = malloc(sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO); 1887 SLIST_INIT(&ma->list); 1888 } 1889 if (ma->error) 1890 return (ma); 1891 1892 ma->v = realloc(ma->v, sizeof *ma->v * (ma->len + 2), 1893 M_MOUNT, M_WAITOK); 1894 ma->v[ma->len].iov_base = (void *)(uintptr_t)name; 1895 ma->v[ma->len].iov_len = strlen(name) + 1; 1896 ma->len++; 1897 1898 ma->v[ma->len].iov_base = (void *)(uintptr_t)val; 1899 if (len < 0) 1900 ma->v[ma->len].iov_len = strlen(val) + 1; 1901 else 1902 ma->v[ma->len].iov_len = len; 1903 ma->len++; 1904 return (ma); 1905 } 1906 1907 /* 1908 * Free a mntarg structure 1909 */ 1910 static void 1911 free_mntarg(struct mntarg *ma) 1912 { 1913 struct mntaarg *maa; 1914 1915 while (!SLIST_EMPTY(&ma->list)) { 1916 maa = SLIST_FIRST(&ma->list); 1917 SLIST_REMOVE_HEAD(&ma->list, next); 1918 free(maa, M_MOUNT); 1919 } 1920 free(ma->v, M_MOUNT); 1921 free(ma, M_MOUNT); 1922 } 1923 1924 /* 1925 * Mount a filesystem 1926 */ 1927 int 1928 kernel_mount(struct mntarg *ma, int flags) 1929 { 1930 struct uio auio; 1931 int error; 1932 1933 KASSERT(ma != NULL, ("kernel_mount NULL ma")); 1934 KASSERT(ma->v != NULL, ("kernel_mount NULL ma->v")); 1935 KASSERT(!(ma->len & 1), ("kernel_mount odd ma->len (%d)", ma->len)); 1936 1937 auio.uio_iov = ma->v; 1938 auio.uio_iovcnt = ma->len; 1939 auio.uio_segflg = UIO_SYSSPACE; 1940 1941 error = ma->error; 1942 if (!error) 1943 error = vfs_donmount(curthread, flags, &auio); 1944 free_mntarg(ma); 1945 return (error); 1946 } 1947 1948 /* 1949 * A printflike function to mount a filesystem. 1950 */ 1951 int 1952 kernel_vmount(int flags, ...) 1953 { 1954 struct mntarg *ma = NULL; 1955 va_list ap; 1956 const char *cp; 1957 const void *vp; 1958 int error; 1959 1960 va_start(ap, flags); 1961 for (;;) { 1962 cp = va_arg(ap, const char *); 1963 if (cp == NULL) 1964 break; 1965 vp = va_arg(ap, const void *); 1966 ma = mount_arg(ma, cp, vp, (vp != NULL ? -1 : 0)); 1967 } 1968 va_end(ap); 1969 1970 error = kernel_mount(ma, flags); 1971 return (error); 1972 } 1973 1974 void 1975 vfs_oexport_conv(const struct oexport_args *oexp, struct export_args *exp) 1976 { 1977 1978 bcopy(oexp, exp, sizeof(*oexp)); 1979 exp->ex_numsecflavors = 0; 1980 } 1981