1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1999-2004 Poul-Henning Kamp 5 * Copyright (c) 1999 Michael Smith 6 * Copyright (c) 1989, 1993 7 * The Regents of the University of California. All rights reserved. 8 * (c) UNIX System Laboratories, Inc. 9 * All or some portions of this file are derived from material licensed 10 * to the University of California by American Telephone and Telegraph 11 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 12 * the permission of UNIX System Laboratories, Inc. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #include <sys/param.h> 43 #include <sys/conf.h> 44 #include <sys/fcntl.h> 45 #include <sys/jail.h> 46 #include <sys/kernel.h> 47 #include <sys/libkern.h> 48 #include <sys/malloc.h> 49 #include <sys/mount.h> 50 #include <sys/mutex.h> 51 #include <sys/namei.h> 52 #include <sys/priv.h> 53 #include <sys/proc.h> 54 #include <sys/filedesc.h> 55 #include <sys/reboot.h> 56 #include <sys/sbuf.h> 57 #include <sys/syscallsubr.h> 58 #include <sys/sysproto.h> 59 #include <sys/sx.h> 60 #include <sys/sysctl.h> 61 #include <sys/sysent.h> 62 #include <sys/systm.h> 63 #include <sys/vnode.h> 64 #include <vm/uma.h> 65 66 #include <geom/geom.h> 67 68 #include <machine/stdarg.h> 69 70 #include <security/audit/audit.h> 71 #include <security/mac/mac_framework.h> 72 73 #define VFS_MOUNTARG_SIZE_MAX (1024 * 64) 74 75 static int vfs_domount(struct thread *td, const char *fstype, char *fspath, 76 uint64_t fsflags, struct vfsoptlist **optlist); 77 static void free_mntarg(struct mntarg *ma); 78 79 static int usermount = 0; 80 SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0, 81 "Unprivileged users may mount and unmount file systems"); 82 83 MALLOC_DEFINE(M_MOUNT, "mount", "vfs mount structure"); 84 MALLOC_DEFINE(M_STATFS, "statfs", "statfs structure"); 85 static uma_zone_t mount_zone; 86 87 /* List of mounted filesystems. */ 88 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); 89 90 /* For any iteration/modification of mountlist */ 91 struct mtx mountlist_mtx; 92 MTX_SYSINIT(mountlist, &mountlist_mtx, "mountlist", MTX_DEF); 93 94 /* 95 * Global opts, taken by all filesystems 96 */ 97 static const char *global_opts[] = { 98 "errmsg", 99 "fstype", 100 "fspath", 101 "ro", 102 "rw", 103 "nosuid", 104 "noexec", 105 NULL 106 }; 107 108 static int 109 mount_init(void *mem, int size, int flags) 110 { 111 struct mount *mp; 112 113 mp = (struct mount *)mem; 114 mtx_init(&mp->mnt_mtx, "struct mount mtx", NULL, MTX_DEF); 115 mtx_init(&mp->mnt_listmtx, "struct mount vlist mtx", NULL, MTX_DEF); 116 lockinit(&mp->mnt_explock, PVFS, "explock", 0, 0); 117 return (0); 118 } 119 120 static void 121 mount_fini(void *mem, int size) 122 { 123 struct mount *mp; 124 125 mp = (struct mount *)mem; 126 lockdestroy(&mp->mnt_explock); 127 mtx_destroy(&mp->mnt_listmtx); 128 mtx_destroy(&mp->mnt_mtx); 129 } 130 131 static void 132 vfs_mount_init(void *dummy __unused) 133 { 134 135 mount_zone = uma_zcreate("Mountpoints", sizeof(struct mount), NULL, 136 NULL, mount_init, mount_fini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 137 } 138 SYSINIT(vfs_mount, SI_SUB_VFS, SI_ORDER_ANY, vfs_mount_init, NULL); 139 140 /* 141 * --------------------------------------------------------------------- 142 * Functions for building and sanitizing the mount options 143 */ 144 145 /* Remove one mount option. */ 146 static void 147 vfs_freeopt(struct vfsoptlist *opts, struct vfsopt *opt) 148 { 149 150 TAILQ_REMOVE(opts, opt, link); 151 free(opt->name, M_MOUNT); 152 if (opt->value != NULL) 153 free(opt->value, M_MOUNT); 154 free(opt, M_MOUNT); 155 } 156 157 /* Release all resources related to the mount options. */ 158 void 159 vfs_freeopts(struct vfsoptlist *opts) 160 { 161 struct vfsopt *opt; 162 163 while (!TAILQ_EMPTY(opts)) { 164 opt = TAILQ_FIRST(opts); 165 vfs_freeopt(opts, opt); 166 } 167 free(opts, M_MOUNT); 168 } 169 170 void 171 vfs_deleteopt(struct vfsoptlist *opts, const char *name) 172 { 173 struct vfsopt *opt, *temp; 174 175 if (opts == NULL) 176 return; 177 TAILQ_FOREACH_SAFE(opt, opts, link, temp) { 178 if (strcmp(opt->name, name) == 0) 179 vfs_freeopt(opts, opt); 180 } 181 } 182 183 static int 184 vfs_isopt_ro(const char *opt) 185 { 186 187 if (strcmp(opt, "ro") == 0 || strcmp(opt, "rdonly") == 0 || 188 strcmp(opt, "norw") == 0) 189 return (1); 190 return (0); 191 } 192 193 static int 194 vfs_isopt_rw(const char *opt) 195 { 196 197 if (strcmp(opt, "rw") == 0 || strcmp(opt, "noro") == 0) 198 return (1); 199 return (0); 200 } 201 202 /* 203 * Check if options are equal (with or without the "no" prefix). 204 */ 205 static int 206 vfs_equalopts(const char *opt1, const char *opt2) 207 { 208 char *p; 209 210 /* "opt" vs. "opt" or "noopt" vs. "noopt" */ 211 if (strcmp(opt1, opt2) == 0) 212 return (1); 213 /* "noopt" vs. "opt" */ 214 if (strncmp(opt1, "no", 2) == 0 && strcmp(opt1 + 2, opt2) == 0) 215 return (1); 216 /* "opt" vs. "noopt" */ 217 if (strncmp(opt2, "no", 2) == 0 && strcmp(opt1, opt2 + 2) == 0) 218 return (1); 219 while ((p = strchr(opt1, '.')) != NULL && 220 !strncmp(opt1, opt2, ++p - opt1)) { 221 opt2 += p - opt1; 222 opt1 = p; 223 /* "foo.noopt" vs. "foo.opt" */ 224 if (strncmp(opt1, "no", 2) == 0 && strcmp(opt1 + 2, opt2) == 0) 225 return (1); 226 /* "foo.opt" vs. "foo.noopt" */ 227 if (strncmp(opt2, "no", 2) == 0 && strcmp(opt1, opt2 + 2) == 0) 228 return (1); 229 } 230 /* "ro" / "rdonly" / "norw" / "rw" / "noro" */ 231 if ((vfs_isopt_ro(opt1) || vfs_isopt_rw(opt1)) && 232 (vfs_isopt_ro(opt2) || vfs_isopt_rw(opt2))) 233 return (1); 234 return (0); 235 } 236 237 /* 238 * If a mount option is specified several times, 239 * (with or without the "no" prefix) only keep 240 * the last occurrence of it. 241 */ 242 static void 243 vfs_sanitizeopts(struct vfsoptlist *opts) 244 { 245 struct vfsopt *opt, *opt2, *tmp; 246 247 TAILQ_FOREACH_REVERSE(opt, opts, vfsoptlist, link) { 248 opt2 = TAILQ_PREV(opt, vfsoptlist, link); 249 while (opt2 != NULL) { 250 if (vfs_equalopts(opt->name, opt2->name)) { 251 tmp = TAILQ_PREV(opt2, vfsoptlist, link); 252 vfs_freeopt(opts, opt2); 253 opt2 = tmp; 254 } else { 255 opt2 = TAILQ_PREV(opt2, vfsoptlist, link); 256 } 257 } 258 } 259 } 260 261 /* 262 * Build a linked list of mount options from a struct uio. 263 */ 264 int 265 vfs_buildopts(struct uio *auio, struct vfsoptlist **options) 266 { 267 struct vfsoptlist *opts; 268 struct vfsopt *opt; 269 size_t memused, namelen, optlen; 270 unsigned int i, iovcnt; 271 int error; 272 273 opts = malloc(sizeof(struct vfsoptlist), M_MOUNT, M_WAITOK); 274 TAILQ_INIT(opts); 275 memused = 0; 276 iovcnt = auio->uio_iovcnt; 277 for (i = 0; i < iovcnt; i += 2) { 278 namelen = auio->uio_iov[i].iov_len; 279 optlen = auio->uio_iov[i + 1].iov_len; 280 memused += sizeof(struct vfsopt) + optlen + namelen; 281 /* 282 * Avoid consuming too much memory, and attempts to overflow 283 * memused. 284 */ 285 if (memused > VFS_MOUNTARG_SIZE_MAX || 286 optlen > VFS_MOUNTARG_SIZE_MAX || 287 namelen > VFS_MOUNTARG_SIZE_MAX) { 288 error = EINVAL; 289 goto bad; 290 } 291 292 opt = malloc(sizeof(struct vfsopt), M_MOUNT, M_WAITOK); 293 opt->name = malloc(namelen, M_MOUNT, M_WAITOK); 294 opt->value = NULL; 295 opt->len = 0; 296 opt->pos = i / 2; 297 opt->seen = 0; 298 299 /* 300 * Do this early, so jumps to "bad" will free the current 301 * option. 302 */ 303 TAILQ_INSERT_TAIL(opts, opt, link); 304 305 if (auio->uio_segflg == UIO_SYSSPACE) { 306 bcopy(auio->uio_iov[i].iov_base, opt->name, namelen); 307 } else { 308 error = copyin(auio->uio_iov[i].iov_base, opt->name, 309 namelen); 310 if (error) 311 goto bad; 312 } 313 /* Ensure names are null-terminated strings. */ 314 if (namelen == 0 || opt->name[namelen - 1] != '\0') { 315 error = EINVAL; 316 goto bad; 317 } 318 if (optlen != 0) { 319 opt->len = optlen; 320 opt->value = malloc(optlen, M_MOUNT, M_WAITOK); 321 if (auio->uio_segflg == UIO_SYSSPACE) { 322 bcopy(auio->uio_iov[i + 1].iov_base, opt->value, 323 optlen); 324 } else { 325 error = copyin(auio->uio_iov[i + 1].iov_base, 326 opt->value, optlen); 327 if (error) 328 goto bad; 329 } 330 } 331 } 332 vfs_sanitizeopts(opts); 333 *options = opts; 334 return (0); 335 bad: 336 vfs_freeopts(opts); 337 return (error); 338 } 339 340 /* 341 * Merge the old mount options with the new ones passed 342 * in the MNT_UPDATE case. 343 * 344 * XXX: This function will keep a "nofoo" option in the new 345 * options. E.g, if the option's canonical name is "foo", 346 * "nofoo" ends up in the mount point's active options. 347 */ 348 static void 349 vfs_mergeopts(struct vfsoptlist *toopts, struct vfsoptlist *oldopts) 350 { 351 struct vfsopt *opt, *new; 352 353 TAILQ_FOREACH(opt, oldopts, link) { 354 new = malloc(sizeof(struct vfsopt), M_MOUNT, M_WAITOK); 355 new->name = strdup(opt->name, M_MOUNT); 356 if (opt->len != 0) { 357 new->value = malloc(opt->len, M_MOUNT, M_WAITOK); 358 bcopy(opt->value, new->value, opt->len); 359 } else 360 new->value = NULL; 361 new->len = opt->len; 362 new->seen = opt->seen; 363 TAILQ_INSERT_HEAD(toopts, new, link); 364 } 365 vfs_sanitizeopts(toopts); 366 } 367 368 /* 369 * Mount a filesystem. 370 */ 371 #ifndef _SYS_SYSPROTO_H_ 372 struct nmount_args { 373 struct iovec *iovp; 374 unsigned int iovcnt; 375 int flags; 376 }; 377 #endif 378 int 379 sys_nmount(struct thread *td, struct nmount_args *uap) 380 { 381 struct uio *auio; 382 int error; 383 u_int iovcnt; 384 uint64_t flags; 385 386 /* 387 * Mount flags are now 64-bits. On 32-bit archtectures only 388 * 32-bits are passed in, but from here on everything handles 389 * 64-bit flags correctly. 390 */ 391 flags = uap->flags; 392 393 AUDIT_ARG_FFLAGS(flags); 394 CTR4(KTR_VFS, "%s: iovp %p with iovcnt %d and flags %d", __func__, 395 uap->iovp, uap->iovcnt, flags); 396 397 /* 398 * Filter out MNT_ROOTFS. We do not want clients of nmount() in 399 * userspace to set this flag, but we must filter it out if we want 400 * MNT_UPDATE on the root file system to work. 401 * MNT_ROOTFS should only be set by the kernel when mounting its 402 * root file system. 403 */ 404 flags &= ~MNT_ROOTFS; 405 406 iovcnt = uap->iovcnt; 407 /* 408 * Check that we have an even number of iovec's 409 * and that we have at least two options. 410 */ 411 if ((iovcnt & 1) || (iovcnt < 4)) { 412 CTR2(KTR_VFS, "%s: failed for invalid iovcnt %d", __func__, 413 uap->iovcnt); 414 return (EINVAL); 415 } 416 417 error = copyinuio(uap->iovp, iovcnt, &auio); 418 if (error) { 419 CTR2(KTR_VFS, "%s: failed for invalid uio op with %d errno", 420 __func__, error); 421 return (error); 422 } 423 error = vfs_donmount(td, flags, auio); 424 425 free(auio, M_IOV); 426 return (error); 427 } 428 429 /* 430 * --------------------------------------------------------------------- 431 * Various utility functions 432 */ 433 434 void 435 vfs_ref(struct mount *mp) 436 { 437 438 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 439 MNT_ILOCK(mp); 440 MNT_REF(mp); 441 MNT_IUNLOCK(mp); 442 } 443 444 void 445 vfs_rel(struct mount *mp) 446 { 447 448 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 449 MNT_ILOCK(mp); 450 MNT_REL(mp); 451 MNT_IUNLOCK(mp); 452 } 453 454 /* 455 * Allocate and initialize the mount point struct. 456 */ 457 struct mount * 458 vfs_mount_alloc(struct vnode *vp, struct vfsconf *vfsp, const char *fspath, 459 struct ucred *cred) 460 { 461 struct mount *mp; 462 463 mp = uma_zalloc(mount_zone, M_WAITOK); 464 bzero(&mp->mnt_startzero, 465 __rangeof(struct mount, mnt_startzero, mnt_endzero)); 466 TAILQ_INIT(&mp->mnt_nvnodelist); 467 mp->mnt_nvnodelistsize = 0; 468 TAILQ_INIT(&mp->mnt_activevnodelist); 469 mp->mnt_activevnodelistsize = 0; 470 TAILQ_INIT(&mp->mnt_tmpfreevnodelist); 471 mp->mnt_tmpfreevnodelistsize = 0; 472 mp->mnt_ref = 0; 473 (void) vfs_busy(mp, MBF_NOWAIT); 474 atomic_add_acq_int(&vfsp->vfc_refcount, 1); 475 mp->mnt_op = vfsp->vfc_vfsops; 476 mp->mnt_vfc = vfsp; 477 mp->mnt_stat.f_type = vfsp->vfc_typenum; 478 mp->mnt_gen++; 479 strlcpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 480 mp->mnt_vnodecovered = vp; 481 mp->mnt_cred = crdup(cred); 482 mp->mnt_stat.f_owner = cred->cr_uid; 483 strlcpy(mp->mnt_stat.f_mntonname, fspath, MNAMELEN); 484 mp->mnt_iosize_max = DFLTPHYS; 485 #ifdef MAC 486 mac_mount_init(mp); 487 mac_mount_create(cred, mp); 488 #endif 489 arc4rand(&mp->mnt_hashseed, sizeof mp->mnt_hashseed, 0); 490 TAILQ_INIT(&mp->mnt_uppers); 491 return (mp); 492 } 493 494 /* 495 * Destroy the mount struct previously allocated by vfs_mount_alloc(). 496 */ 497 void 498 vfs_mount_destroy(struct mount *mp) 499 { 500 501 MNT_ILOCK(mp); 502 mp->mnt_kern_flag |= MNTK_REFEXPIRE; 503 if (mp->mnt_kern_flag & MNTK_MWAIT) { 504 mp->mnt_kern_flag &= ~MNTK_MWAIT; 505 wakeup(mp); 506 } 507 while (mp->mnt_ref) 508 msleep(mp, MNT_MTX(mp), PVFS, "mntref", 0); 509 KASSERT(mp->mnt_ref == 0, 510 ("%s: invalid refcount in the drain path @ %s:%d", __func__, 511 __FILE__, __LINE__)); 512 if (mp->mnt_writeopcount != 0) 513 panic("vfs_mount_destroy: nonzero writeopcount"); 514 if (mp->mnt_secondary_writes != 0) 515 panic("vfs_mount_destroy: nonzero secondary_writes"); 516 atomic_subtract_rel_int(&mp->mnt_vfc->vfc_refcount, 1); 517 if (!TAILQ_EMPTY(&mp->mnt_nvnodelist)) { 518 struct vnode *vp; 519 520 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) 521 vn_printf(vp, "dangling vnode "); 522 panic("unmount: dangling vnode"); 523 } 524 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), ("mnt_uppers")); 525 if (mp->mnt_nvnodelistsize != 0) 526 panic("vfs_mount_destroy: nonzero nvnodelistsize"); 527 if (mp->mnt_activevnodelistsize != 0) 528 panic("vfs_mount_destroy: nonzero activevnodelistsize"); 529 if (mp->mnt_tmpfreevnodelistsize != 0) 530 panic("vfs_mount_destroy: nonzero tmpfreevnodelistsize"); 531 if (mp->mnt_lockref != 0) 532 panic("vfs_mount_destroy: nonzero lock refcount"); 533 MNT_IUNLOCK(mp); 534 if (mp->mnt_vnodecovered != NULL) 535 vrele(mp->mnt_vnodecovered); 536 #ifdef MAC 537 mac_mount_destroy(mp); 538 #endif 539 if (mp->mnt_opt != NULL) 540 vfs_freeopts(mp->mnt_opt); 541 crfree(mp->mnt_cred); 542 uma_zfree(mount_zone, mp); 543 } 544 545 int 546 vfs_donmount(struct thread *td, uint64_t fsflags, struct uio *fsoptions) 547 { 548 struct vfsoptlist *optlist; 549 struct vfsopt *opt, *tmp_opt; 550 char *fstype, *fspath, *errmsg; 551 int error, fstypelen, fspathlen, errmsg_len, errmsg_pos; 552 553 errmsg = fspath = NULL; 554 errmsg_len = fspathlen = 0; 555 errmsg_pos = -1; 556 557 error = vfs_buildopts(fsoptions, &optlist); 558 if (error) 559 return (error); 560 561 if (vfs_getopt(optlist, "errmsg", (void **)&errmsg, &errmsg_len) == 0) 562 errmsg_pos = vfs_getopt_pos(optlist, "errmsg"); 563 564 /* 565 * We need these two options before the others, 566 * and they are mandatory for any filesystem. 567 * Ensure they are NUL terminated as well. 568 */ 569 fstypelen = 0; 570 error = vfs_getopt(optlist, "fstype", (void **)&fstype, &fstypelen); 571 if (error || fstype[fstypelen - 1] != '\0') { 572 error = EINVAL; 573 if (errmsg != NULL) 574 strncpy(errmsg, "Invalid fstype", errmsg_len); 575 goto bail; 576 } 577 fspathlen = 0; 578 error = vfs_getopt(optlist, "fspath", (void **)&fspath, &fspathlen); 579 if (error || fspath[fspathlen - 1] != '\0') { 580 error = EINVAL; 581 if (errmsg != NULL) 582 strncpy(errmsg, "Invalid fspath", errmsg_len); 583 goto bail; 584 } 585 586 /* 587 * We need to see if we have the "update" option 588 * before we call vfs_domount(), since vfs_domount() has special 589 * logic based on MNT_UPDATE. This is very important 590 * when we want to update the root filesystem. 591 */ 592 TAILQ_FOREACH_SAFE(opt, optlist, link, tmp_opt) { 593 if (strcmp(opt->name, "update") == 0) { 594 fsflags |= MNT_UPDATE; 595 vfs_freeopt(optlist, opt); 596 } 597 else if (strcmp(opt->name, "async") == 0) 598 fsflags |= MNT_ASYNC; 599 else if (strcmp(opt->name, "force") == 0) { 600 fsflags |= MNT_FORCE; 601 vfs_freeopt(optlist, opt); 602 } 603 else if (strcmp(opt->name, "reload") == 0) { 604 fsflags |= MNT_RELOAD; 605 vfs_freeopt(optlist, opt); 606 } 607 else if (strcmp(opt->name, "multilabel") == 0) 608 fsflags |= MNT_MULTILABEL; 609 else if (strcmp(opt->name, "noasync") == 0) 610 fsflags &= ~MNT_ASYNC; 611 else if (strcmp(opt->name, "noatime") == 0) 612 fsflags |= MNT_NOATIME; 613 else if (strcmp(opt->name, "atime") == 0) { 614 free(opt->name, M_MOUNT); 615 opt->name = strdup("nonoatime", M_MOUNT); 616 } 617 else if (strcmp(opt->name, "noclusterr") == 0) 618 fsflags |= MNT_NOCLUSTERR; 619 else if (strcmp(opt->name, "clusterr") == 0) { 620 free(opt->name, M_MOUNT); 621 opt->name = strdup("nonoclusterr", M_MOUNT); 622 } 623 else if (strcmp(opt->name, "noclusterw") == 0) 624 fsflags |= MNT_NOCLUSTERW; 625 else if (strcmp(opt->name, "clusterw") == 0) { 626 free(opt->name, M_MOUNT); 627 opt->name = strdup("nonoclusterw", M_MOUNT); 628 } 629 else if (strcmp(opt->name, "noexec") == 0) 630 fsflags |= MNT_NOEXEC; 631 else if (strcmp(opt->name, "exec") == 0) { 632 free(opt->name, M_MOUNT); 633 opt->name = strdup("nonoexec", M_MOUNT); 634 } 635 else if (strcmp(opt->name, "nosuid") == 0) 636 fsflags |= MNT_NOSUID; 637 else if (strcmp(opt->name, "suid") == 0) { 638 free(opt->name, M_MOUNT); 639 opt->name = strdup("nonosuid", M_MOUNT); 640 } 641 else if (strcmp(opt->name, "nosymfollow") == 0) 642 fsflags |= MNT_NOSYMFOLLOW; 643 else if (strcmp(opt->name, "symfollow") == 0) { 644 free(opt->name, M_MOUNT); 645 opt->name = strdup("nonosymfollow", M_MOUNT); 646 } 647 else if (strcmp(opt->name, "noro") == 0) 648 fsflags &= ~MNT_RDONLY; 649 else if (strcmp(opt->name, "rw") == 0) 650 fsflags &= ~MNT_RDONLY; 651 else if (strcmp(opt->name, "ro") == 0) 652 fsflags |= MNT_RDONLY; 653 else if (strcmp(opt->name, "rdonly") == 0) { 654 free(opt->name, M_MOUNT); 655 opt->name = strdup("ro", M_MOUNT); 656 fsflags |= MNT_RDONLY; 657 } 658 else if (strcmp(opt->name, "suiddir") == 0) 659 fsflags |= MNT_SUIDDIR; 660 else if (strcmp(opt->name, "sync") == 0) 661 fsflags |= MNT_SYNCHRONOUS; 662 else if (strcmp(opt->name, "union") == 0) 663 fsflags |= MNT_UNION; 664 else if (strcmp(opt->name, "automounted") == 0) { 665 fsflags |= MNT_AUTOMOUNTED; 666 vfs_freeopt(optlist, opt); 667 } 668 } 669 670 /* 671 * Be ultra-paranoid about making sure the type and fspath 672 * variables will fit in our mp buffers, including the 673 * terminating NUL. 674 */ 675 if (fstypelen > MFSNAMELEN || fspathlen > MNAMELEN) { 676 error = ENAMETOOLONG; 677 goto bail; 678 } 679 680 error = vfs_domount(td, fstype, fspath, fsflags, &optlist); 681 bail: 682 /* copyout the errmsg */ 683 if (errmsg_pos != -1 && ((2 * errmsg_pos + 1) < fsoptions->uio_iovcnt) 684 && errmsg_len > 0 && errmsg != NULL) { 685 if (fsoptions->uio_segflg == UIO_SYSSPACE) { 686 bcopy(errmsg, 687 fsoptions->uio_iov[2 * errmsg_pos + 1].iov_base, 688 fsoptions->uio_iov[2 * errmsg_pos + 1].iov_len); 689 } else { 690 copyout(errmsg, 691 fsoptions->uio_iov[2 * errmsg_pos + 1].iov_base, 692 fsoptions->uio_iov[2 * errmsg_pos + 1].iov_len); 693 } 694 } 695 696 if (optlist != NULL) 697 vfs_freeopts(optlist); 698 return (error); 699 } 700 701 /* 702 * Old mount API. 703 */ 704 #ifndef _SYS_SYSPROTO_H_ 705 struct mount_args { 706 char *type; 707 char *path; 708 int flags; 709 caddr_t data; 710 }; 711 #endif 712 /* ARGSUSED */ 713 int 714 sys_mount(struct thread *td, struct mount_args *uap) 715 { 716 char *fstype; 717 struct vfsconf *vfsp = NULL; 718 struct mntarg *ma = NULL; 719 uint64_t flags; 720 int error; 721 722 /* 723 * Mount flags are now 64-bits. On 32-bit architectures only 724 * 32-bits are passed in, but from here on everything handles 725 * 64-bit flags correctly. 726 */ 727 flags = uap->flags; 728 729 AUDIT_ARG_FFLAGS(flags); 730 731 /* 732 * Filter out MNT_ROOTFS. We do not want clients of mount() in 733 * userspace to set this flag, but we must filter it out if we want 734 * MNT_UPDATE on the root file system to work. 735 * MNT_ROOTFS should only be set by the kernel when mounting its 736 * root file system. 737 */ 738 flags &= ~MNT_ROOTFS; 739 740 fstype = malloc(MFSNAMELEN, M_TEMP, M_WAITOK); 741 error = copyinstr(uap->type, fstype, MFSNAMELEN, NULL); 742 if (error) { 743 free(fstype, M_TEMP); 744 return (error); 745 } 746 747 AUDIT_ARG_TEXT(fstype); 748 vfsp = vfs_byname_kld(fstype, td, &error); 749 free(fstype, M_TEMP); 750 if (vfsp == NULL) 751 return (ENOENT); 752 if (vfsp->vfc_vfsops->vfs_cmount == NULL) 753 return (EOPNOTSUPP); 754 755 ma = mount_argsu(ma, "fstype", uap->type, MFSNAMELEN); 756 ma = mount_argsu(ma, "fspath", uap->path, MNAMELEN); 757 ma = mount_argb(ma, flags & MNT_RDONLY, "noro"); 758 ma = mount_argb(ma, !(flags & MNT_NOSUID), "nosuid"); 759 ma = mount_argb(ma, !(flags & MNT_NOEXEC), "noexec"); 760 761 error = vfsp->vfc_vfsops->vfs_cmount(ma, uap->data, flags); 762 return (error); 763 } 764 765 /* 766 * vfs_domount_first(): first file system mount (not update) 767 */ 768 static int 769 vfs_domount_first( 770 struct thread *td, /* Calling thread. */ 771 struct vfsconf *vfsp, /* File system type. */ 772 char *fspath, /* Mount path. */ 773 struct vnode *vp, /* Vnode to be covered. */ 774 uint64_t fsflags, /* Flags common to all filesystems. */ 775 struct vfsoptlist **optlist /* Options local to the filesystem. */ 776 ) 777 { 778 struct vattr va; 779 struct mount *mp; 780 struct vnode *newdp; 781 int error; 782 783 ASSERT_VOP_ELOCKED(vp, __func__); 784 KASSERT((fsflags & MNT_UPDATE) == 0, ("MNT_UPDATE shouldn't be here")); 785 786 /* 787 * If the user is not root, ensure that they own the directory 788 * onto which we are attempting to mount. 789 */ 790 error = VOP_GETATTR(vp, &va, td->td_ucred); 791 if (error == 0 && va.va_uid != td->td_ucred->cr_uid) 792 error = priv_check_cred(td->td_ucred, PRIV_VFS_ADMIN, 0); 793 if (error == 0) 794 error = vinvalbuf(vp, V_SAVE, 0, 0); 795 if (error == 0 && vp->v_type != VDIR) 796 error = ENOTDIR; 797 if (error == 0) { 798 VI_LOCK(vp); 799 if ((vp->v_iflag & VI_MOUNT) == 0 && vp->v_mountedhere == NULL) 800 vp->v_iflag |= VI_MOUNT; 801 else 802 error = EBUSY; 803 VI_UNLOCK(vp); 804 } 805 if (error != 0) { 806 vput(vp); 807 return (error); 808 } 809 VOP_UNLOCK(vp, 0); 810 811 /* Allocate and initialize the filesystem. */ 812 mp = vfs_mount_alloc(vp, vfsp, fspath, td->td_ucred); 813 /* XXXMAC: pass to vfs_mount_alloc? */ 814 mp->mnt_optnew = *optlist; 815 /* Set the mount level flags. */ 816 mp->mnt_flag = (fsflags & (MNT_UPDATEMASK | MNT_ROOTFS | MNT_RDONLY)); 817 818 /* 819 * Mount the filesystem. 820 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they 821 * get. No freeing of cn_pnbuf. 822 */ 823 error = VFS_MOUNT(mp); 824 if (error != 0) { 825 vfs_unbusy(mp); 826 mp->mnt_vnodecovered = NULL; 827 vfs_mount_destroy(mp); 828 VI_LOCK(vp); 829 vp->v_iflag &= ~VI_MOUNT; 830 VI_UNLOCK(vp); 831 vrele(vp); 832 return (error); 833 } 834 835 if (mp->mnt_opt != NULL) 836 vfs_freeopts(mp->mnt_opt); 837 mp->mnt_opt = mp->mnt_optnew; 838 *optlist = NULL; 839 (void)VFS_STATFS(mp, &mp->mnt_stat); 840 841 /* 842 * Prevent external consumers of mount options from reading mnt_optnew. 843 */ 844 mp->mnt_optnew = NULL; 845 846 MNT_ILOCK(mp); 847 if ((mp->mnt_flag & MNT_ASYNC) != 0 && 848 (mp->mnt_kern_flag & MNTK_NOASYNC) == 0) 849 mp->mnt_kern_flag |= MNTK_ASYNC; 850 else 851 mp->mnt_kern_flag &= ~MNTK_ASYNC; 852 MNT_IUNLOCK(mp); 853 854 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 855 cache_purge(vp); 856 VI_LOCK(vp); 857 vp->v_iflag &= ~VI_MOUNT; 858 VI_UNLOCK(vp); 859 vp->v_mountedhere = mp; 860 /* Place the new filesystem at the end of the mount list. */ 861 mtx_lock(&mountlist_mtx); 862 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); 863 mtx_unlock(&mountlist_mtx); 864 vfs_event_signal(NULL, VQ_MOUNT, 0); 865 if (VFS_ROOT(mp, LK_EXCLUSIVE, &newdp)) 866 panic("mount: lost mount"); 867 VOP_UNLOCK(vp, 0); 868 EVENTHANDLER_INVOKE(vfs_mounted, mp, newdp, td); 869 VOP_UNLOCK(newdp, 0); 870 mountcheckdirs(vp, newdp); 871 vrele(newdp); 872 if ((mp->mnt_flag & MNT_RDONLY) == 0) 873 vfs_allocate_syncvnode(mp); 874 vfs_unbusy(mp); 875 return (0); 876 } 877 878 /* 879 * vfs_domount_update(): update of mounted file system 880 */ 881 static int 882 vfs_domount_update( 883 struct thread *td, /* Calling thread. */ 884 struct vnode *vp, /* Mount point vnode. */ 885 uint64_t fsflags, /* Flags common to all filesystems. */ 886 struct vfsoptlist **optlist /* Options local to the filesystem. */ 887 ) 888 { 889 struct export_args export; 890 void *bufp; 891 struct mount *mp; 892 int error, export_error, len; 893 uint64_t flag; 894 895 ASSERT_VOP_ELOCKED(vp, __func__); 896 KASSERT((fsflags & MNT_UPDATE) != 0, ("MNT_UPDATE should be here")); 897 mp = vp->v_mount; 898 899 if ((vp->v_vflag & VV_ROOT) == 0) { 900 if (vfs_copyopt(*optlist, "export", &export, sizeof(export)) 901 == 0) 902 error = EXDEV; 903 else 904 error = EINVAL; 905 vput(vp); 906 return (error); 907 } 908 909 /* 910 * We only allow the filesystem to be reloaded if it 911 * is currently mounted read-only. 912 */ 913 flag = mp->mnt_flag; 914 if ((fsflags & MNT_RELOAD) != 0 && (flag & MNT_RDONLY) == 0) { 915 vput(vp); 916 return (EOPNOTSUPP); /* Needs translation */ 917 } 918 /* 919 * Only privileged root, or (if MNT_USER is set) the user that 920 * did the original mount is permitted to update it. 921 */ 922 error = vfs_suser(mp, td); 923 if (error != 0) { 924 vput(vp); 925 return (error); 926 } 927 if (vfs_busy(mp, MBF_NOWAIT)) { 928 vput(vp); 929 return (EBUSY); 930 } 931 VI_LOCK(vp); 932 if ((vp->v_iflag & VI_MOUNT) != 0 || vp->v_mountedhere != NULL) { 933 VI_UNLOCK(vp); 934 vfs_unbusy(mp); 935 vput(vp); 936 return (EBUSY); 937 } 938 vp->v_iflag |= VI_MOUNT; 939 VI_UNLOCK(vp); 940 VOP_UNLOCK(vp, 0); 941 942 MNT_ILOCK(mp); 943 if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) { 944 MNT_IUNLOCK(mp); 945 error = EBUSY; 946 goto end; 947 } 948 mp->mnt_flag &= ~MNT_UPDATEMASK; 949 mp->mnt_flag |= fsflags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE | 950 MNT_SNAPSHOT | MNT_ROOTFS | MNT_UPDATEMASK | MNT_RDONLY); 951 if ((mp->mnt_flag & MNT_ASYNC) == 0) 952 mp->mnt_kern_flag &= ~MNTK_ASYNC; 953 MNT_IUNLOCK(mp); 954 mp->mnt_optnew = *optlist; 955 vfs_mergeopts(mp->mnt_optnew, mp->mnt_opt); 956 957 /* 958 * Mount the filesystem. 959 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they 960 * get. No freeing of cn_pnbuf. 961 */ 962 error = VFS_MOUNT(mp); 963 964 export_error = 0; 965 /* Process the export option. */ 966 if (error == 0 && vfs_getopt(mp->mnt_optnew, "export", &bufp, 967 &len) == 0) { 968 /* Assume that there is only 1 ABI for each length. */ 969 switch (len) { 970 case (sizeof(struct oexport_args)): 971 bzero(&export, sizeof(export)); 972 /* FALLTHROUGH */ 973 case (sizeof(export)): 974 bcopy(bufp, &export, len); 975 export_error = vfs_export(mp, &export); 976 break; 977 default: 978 export_error = EINVAL; 979 break; 980 } 981 } 982 983 MNT_ILOCK(mp); 984 if (error == 0) { 985 mp->mnt_flag &= ~(MNT_UPDATE | MNT_RELOAD | MNT_FORCE | 986 MNT_SNAPSHOT); 987 } else { 988 /* 989 * If we fail, restore old mount flags. MNT_QUOTA is special, 990 * because it is not part of MNT_UPDATEMASK, but it could have 991 * changed in the meantime if quotactl(2) was called. 992 * All in all we want current value of MNT_QUOTA, not the old 993 * one. 994 */ 995 mp->mnt_flag = (mp->mnt_flag & MNT_QUOTA) | (flag & ~MNT_QUOTA); 996 } 997 if ((mp->mnt_flag & MNT_ASYNC) != 0 && 998 (mp->mnt_kern_flag & MNTK_NOASYNC) == 0) 999 mp->mnt_kern_flag |= MNTK_ASYNC; 1000 else 1001 mp->mnt_kern_flag &= ~MNTK_ASYNC; 1002 MNT_IUNLOCK(mp); 1003 1004 if (error != 0) 1005 goto end; 1006 1007 if (mp->mnt_opt != NULL) 1008 vfs_freeopts(mp->mnt_opt); 1009 mp->mnt_opt = mp->mnt_optnew; 1010 *optlist = NULL; 1011 (void)VFS_STATFS(mp, &mp->mnt_stat); 1012 /* 1013 * Prevent external consumers of mount options from reading 1014 * mnt_optnew. 1015 */ 1016 mp->mnt_optnew = NULL; 1017 1018 if ((mp->mnt_flag & MNT_RDONLY) == 0) 1019 vfs_allocate_syncvnode(mp); 1020 else 1021 vfs_deallocate_syncvnode(mp); 1022 end: 1023 vfs_unbusy(mp); 1024 VI_LOCK(vp); 1025 vp->v_iflag &= ~VI_MOUNT; 1026 VI_UNLOCK(vp); 1027 vrele(vp); 1028 return (error != 0 ? error : export_error); 1029 } 1030 1031 /* 1032 * vfs_domount(): actually attempt a filesystem mount. 1033 */ 1034 static int 1035 vfs_domount( 1036 struct thread *td, /* Calling thread. */ 1037 const char *fstype, /* Filesystem type. */ 1038 char *fspath, /* Mount path. */ 1039 uint64_t fsflags, /* Flags common to all filesystems. */ 1040 struct vfsoptlist **optlist /* Options local to the filesystem. */ 1041 ) 1042 { 1043 struct vfsconf *vfsp; 1044 struct nameidata nd; 1045 struct vnode *vp; 1046 char *pathbuf; 1047 int error; 1048 1049 /* 1050 * Be ultra-paranoid about making sure the type and fspath 1051 * variables will fit in our mp buffers, including the 1052 * terminating NUL. 1053 */ 1054 if (strlen(fstype) >= MFSNAMELEN || strlen(fspath) >= MNAMELEN) 1055 return (ENAMETOOLONG); 1056 1057 if (jailed(td->td_ucred) || usermount == 0) { 1058 if ((error = priv_check(td, PRIV_VFS_MOUNT)) != 0) 1059 return (error); 1060 } 1061 1062 /* 1063 * Do not allow NFS export or MNT_SUIDDIR by unprivileged users. 1064 */ 1065 if (fsflags & MNT_EXPORTED) { 1066 error = priv_check(td, PRIV_VFS_MOUNT_EXPORTED); 1067 if (error) 1068 return (error); 1069 } 1070 if (fsflags & MNT_SUIDDIR) { 1071 error = priv_check(td, PRIV_VFS_MOUNT_SUIDDIR); 1072 if (error) 1073 return (error); 1074 } 1075 /* 1076 * Silently enforce MNT_NOSUID and MNT_USER for unprivileged users. 1077 */ 1078 if ((fsflags & (MNT_NOSUID | MNT_USER)) != (MNT_NOSUID | MNT_USER)) { 1079 if (priv_check(td, PRIV_VFS_MOUNT_NONUSER) != 0) 1080 fsflags |= MNT_NOSUID | MNT_USER; 1081 } 1082 1083 /* Load KLDs before we lock the covered vnode to avoid reversals. */ 1084 vfsp = NULL; 1085 if ((fsflags & MNT_UPDATE) == 0) { 1086 /* Don't try to load KLDs if we're mounting the root. */ 1087 if (fsflags & MNT_ROOTFS) 1088 vfsp = vfs_byname(fstype); 1089 else 1090 vfsp = vfs_byname_kld(fstype, td, &error); 1091 if (vfsp == NULL) 1092 return (ENODEV); 1093 if (jailed(td->td_ucred) && !(vfsp->vfc_flags & VFCF_JAIL)) 1094 return (EPERM); 1095 } 1096 1097 /* 1098 * Get vnode to be covered or mount point's vnode in case of MNT_UPDATE. 1099 */ 1100 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1, 1101 UIO_SYSSPACE, fspath, td); 1102 error = namei(&nd); 1103 if (error != 0) 1104 return (error); 1105 NDFREE(&nd, NDF_ONLY_PNBUF); 1106 vp = nd.ni_vp; 1107 if ((fsflags & MNT_UPDATE) == 0) { 1108 pathbuf = malloc(MNAMELEN, M_TEMP, M_WAITOK); 1109 strcpy(pathbuf, fspath); 1110 error = vn_path_to_global_path(td, vp, pathbuf, MNAMELEN); 1111 /* debug.disablefullpath == 1 results in ENODEV */ 1112 if (error == 0 || error == ENODEV) { 1113 error = vfs_domount_first(td, vfsp, pathbuf, vp, 1114 fsflags, optlist); 1115 } 1116 free(pathbuf, M_TEMP); 1117 } else 1118 error = vfs_domount_update(td, vp, fsflags, optlist); 1119 1120 return (error); 1121 } 1122 1123 /* 1124 * Unmount a filesystem. 1125 * 1126 * Note: unmount takes a path to the vnode mounted on as argument, not 1127 * special file (as before). 1128 */ 1129 #ifndef _SYS_SYSPROTO_H_ 1130 struct unmount_args { 1131 char *path; 1132 int flags; 1133 }; 1134 #endif 1135 /* ARGSUSED */ 1136 int 1137 sys_unmount(struct thread *td, struct unmount_args *uap) 1138 { 1139 struct nameidata nd; 1140 struct mount *mp; 1141 char *pathbuf; 1142 int error, id0, id1; 1143 1144 AUDIT_ARG_VALUE(uap->flags); 1145 if (jailed(td->td_ucred) || usermount == 0) { 1146 error = priv_check(td, PRIV_VFS_UNMOUNT); 1147 if (error) 1148 return (error); 1149 } 1150 1151 pathbuf = malloc(MNAMELEN, M_TEMP, M_WAITOK); 1152 error = copyinstr(uap->path, pathbuf, MNAMELEN, NULL); 1153 if (error) { 1154 free(pathbuf, M_TEMP); 1155 return (error); 1156 } 1157 if (uap->flags & MNT_BYFSID) { 1158 AUDIT_ARG_TEXT(pathbuf); 1159 /* Decode the filesystem ID. */ 1160 if (sscanf(pathbuf, "FSID:%d:%d", &id0, &id1) != 2) { 1161 free(pathbuf, M_TEMP); 1162 return (EINVAL); 1163 } 1164 1165 mtx_lock(&mountlist_mtx); 1166 TAILQ_FOREACH_REVERSE(mp, &mountlist, mntlist, mnt_list) { 1167 if (mp->mnt_stat.f_fsid.val[0] == id0 && 1168 mp->mnt_stat.f_fsid.val[1] == id1) { 1169 vfs_ref(mp); 1170 break; 1171 } 1172 } 1173 mtx_unlock(&mountlist_mtx); 1174 } else { 1175 /* 1176 * Try to find global path for path argument. 1177 */ 1178 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1, 1179 UIO_SYSSPACE, pathbuf, td); 1180 if (namei(&nd) == 0) { 1181 NDFREE(&nd, NDF_ONLY_PNBUF); 1182 error = vn_path_to_global_path(td, nd.ni_vp, pathbuf, 1183 MNAMELEN); 1184 if (error == 0 || error == ENODEV) 1185 vput(nd.ni_vp); 1186 } 1187 mtx_lock(&mountlist_mtx); 1188 TAILQ_FOREACH_REVERSE(mp, &mountlist, mntlist, mnt_list) { 1189 if (strcmp(mp->mnt_stat.f_mntonname, pathbuf) == 0) { 1190 vfs_ref(mp); 1191 break; 1192 } 1193 } 1194 mtx_unlock(&mountlist_mtx); 1195 } 1196 free(pathbuf, M_TEMP); 1197 if (mp == NULL) { 1198 /* 1199 * Previously we returned ENOENT for a nonexistent path and 1200 * EINVAL for a non-mountpoint. We cannot tell these apart 1201 * now, so in the !MNT_BYFSID case return the more likely 1202 * EINVAL for compatibility. 1203 */ 1204 return ((uap->flags & MNT_BYFSID) ? ENOENT : EINVAL); 1205 } 1206 1207 /* 1208 * Don't allow unmounting the root filesystem. 1209 */ 1210 if (mp->mnt_flag & MNT_ROOTFS) { 1211 vfs_rel(mp); 1212 return (EINVAL); 1213 } 1214 error = dounmount(mp, uap->flags, td); 1215 return (error); 1216 } 1217 1218 /* 1219 * Return error if any of the vnodes, ignoring the root vnode 1220 * and the syncer vnode, have non-zero usecount. 1221 * 1222 * This function is purely advisory - it can return false positives 1223 * and negatives. 1224 */ 1225 static int 1226 vfs_check_usecounts(struct mount *mp) 1227 { 1228 struct vnode *vp, *mvp; 1229 1230 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 1231 if ((vp->v_vflag & VV_ROOT) == 0 && vp->v_type != VNON && 1232 vp->v_usecount != 0) { 1233 VI_UNLOCK(vp); 1234 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1235 return (EBUSY); 1236 } 1237 VI_UNLOCK(vp); 1238 } 1239 1240 return (0); 1241 } 1242 1243 static void 1244 dounmount_cleanup(struct mount *mp, struct vnode *coveredvp, int mntkflags) 1245 { 1246 1247 mtx_assert(MNT_MTX(mp), MA_OWNED); 1248 mp->mnt_kern_flag &= ~mntkflags; 1249 if ((mp->mnt_kern_flag & MNTK_MWAIT) != 0) { 1250 mp->mnt_kern_flag &= ~MNTK_MWAIT; 1251 wakeup(mp); 1252 } 1253 MNT_IUNLOCK(mp); 1254 if (coveredvp != NULL) { 1255 VOP_UNLOCK(coveredvp, 0); 1256 vdrop(coveredvp); 1257 } 1258 vn_finished_write(mp); 1259 } 1260 1261 /* 1262 * Do the actual filesystem unmount. 1263 */ 1264 int 1265 dounmount(struct mount *mp, int flags, struct thread *td) 1266 { 1267 struct vnode *coveredvp; 1268 int error; 1269 uint64_t async_flag; 1270 int mnt_gen_r; 1271 1272 if ((coveredvp = mp->mnt_vnodecovered) != NULL) { 1273 mnt_gen_r = mp->mnt_gen; 1274 VI_LOCK(coveredvp); 1275 vholdl(coveredvp); 1276 vn_lock(coveredvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY); 1277 /* 1278 * Check for mp being unmounted while waiting for the 1279 * covered vnode lock. 1280 */ 1281 if (coveredvp->v_mountedhere != mp || 1282 coveredvp->v_mountedhere->mnt_gen != mnt_gen_r) { 1283 VOP_UNLOCK(coveredvp, 0); 1284 vdrop(coveredvp); 1285 vfs_rel(mp); 1286 return (EBUSY); 1287 } 1288 } 1289 1290 /* 1291 * Only privileged root, or (if MNT_USER is set) the user that did the 1292 * original mount is permitted to unmount this filesystem. 1293 */ 1294 error = vfs_suser(mp, td); 1295 if (error != 0) { 1296 if (coveredvp != NULL) { 1297 VOP_UNLOCK(coveredvp, 0); 1298 vdrop(coveredvp); 1299 } 1300 vfs_rel(mp); 1301 return (error); 1302 } 1303 1304 vn_start_write(NULL, &mp, V_WAIT | V_MNTREF); 1305 MNT_ILOCK(mp); 1306 if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 || 1307 (mp->mnt_flag & MNT_UPDATE) != 0 || 1308 !TAILQ_EMPTY(&mp->mnt_uppers)) { 1309 dounmount_cleanup(mp, coveredvp, 0); 1310 return (EBUSY); 1311 } 1312 mp->mnt_kern_flag |= MNTK_UNMOUNT | MNTK_NOINSMNTQ; 1313 if (flags & MNT_NONBUSY) { 1314 MNT_IUNLOCK(mp); 1315 error = vfs_check_usecounts(mp); 1316 MNT_ILOCK(mp); 1317 if (error != 0) { 1318 dounmount_cleanup(mp, coveredvp, MNTK_UNMOUNT | 1319 MNTK_NOINSMNTQ); 1320 return (error); 1321 } 1322 } 1323 /* Allow filesystems to detect that a forced unmount is in progress. */ 1324 if (flags & MNT_FORCE) { 1325 mp->mnt_kern_flag |= MNTK_UNMOUNTF; 1326 MNT_IUNLOCK(mp); 1327 /* 1328 * Must be done after setting MNTK_UNMOUNTF and before 1329 * waiting for mnt_lockref to become 0. 1330 */ 1331 VFS_PURGE(mp); 1332 MNT_ILOCK(mp); 1333 } 1334 error = 0; 1335 if (mp->mnt_lockref) { 1336 mp->mnt_kern_flag |= MNTK_DRAINING; 1337 error = msleep(&mp->mnt_lockref, MNT_MTX(mp), PVFS, 1338 "mount drain", 0); 1339 } 1340 MNT_IUNLOCK(mp); 1341 KASSERT(mp->mnt_lockref == 0, 1342 ("%s: invalid lock refcount in the drain path @ %s:%d", 1343 __func__, __FILE__, __LINE__)); 1344 KASSERT(error == 0, 1345 ("%s: invalid return value for msleep in the drain path @ %s:%d", 1346 __func__, __FILE__, __LINE__)); 1347 1348 if (mp->mnt_flag & MNT_EXPUBLIC) 1349 vfs_setpublicfs(NULL, NULL, NULL); 1350 1351 /* 1352 * From now, we can claim that the use reference on the 1353 * coveredvp is ours, and the ref can be released only by 1354 * successfull unmount by us, or left for later unmount 1355 * attempt. The previously acquired hold reference is no 1356 * longer needed to protect the vnode from reuse. 1357 */ 1358 if (coveredvp != NULL) 1359 vdrop(coveredvp); 1360 1361 vfs_msync(mp, MNT_WAIT); 1362 MNT_ILOCK(mp); 1363 async_flag = mp->mnt_flag & MNT_ASYNC; 1364 mp->mnt_flag &= ~MNT_ASYNC; 1365 mp->mnt_kern_flag &= ~MNTK_ASYNC; 1366 MNT_IUNLOCK(mp); 1367 cache_purgevfs(mp, false); /* remove cache entries for this file sys */ 1368 vfs_deallocate_syncvnode(mp); 1369 if ((mp->mnt_flag & MNT_RDONLY) != 0 || (flags & MNT_FORCE) != 0 || 1370 (error = VFS_SYNC(mp, MNT_WAIT)) == 0) 1371 error = VFS_UNMOUNT(mp, flags); 1372 vn_finished_write(mp); 1373 /* 1374 * If we failed to flush the dirty blocks for this mount point, 1375 * undo all the cdir/rdir and rootvnode changes we made above. 1376 * Unless we failed to do so because the device is reporting that 1377 * it doesn't exist anymore. 1378 */ 1379 if (error && error != ENXIO) { 1380 MNT_ILOCK(mp); 1381 mp->mnt_kern_flag &= ~MNTK_NOINSMNTQ; 1382 if ((mp->mnt_flag & MNT_RDONLY) == 0) { 1383 MNT_IUNLOCK(mp); 1384 vfs_allocate_syncvnode(mp); 1385 MNT_ILOCK(mp); 1386 } 1387 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF); 1388 mp->mnt_flag |= async_flag; 1389 if ((mp->mnt_flag & MNT_ASYNC) != 0 && 1390 (mp->mnt_kern_flag & MNTK_NOASYNC) == 0) 1391 mp->mnt_kern_flag |= MNTK_ASYNC; 1392 if (mp->mnt_kern_flag & MNTK_MWAIT) { 1393 mp->mnt_kern_flag &= ~MNTK_MWAIT; 1394 wakeup(mp); 1395 } 1396 MNT_IUNLOCK(mp); 1397 if (coveredvp) 1398 VOP_UNLOCK(coveredvp, 0); 1399 return (error); 1400 } 1401 mtx_lock(&mountlist_mtx); 1402 TAILQ_REMOVE(&mountlist, mp, mnt_list); 1403 mtx_unlock(&mountlist_mtx); 1404 EVENTHANDLER_INVOKE(vfs_unmounted, mp, td); 1405 if (coveredvp != NULL) { 1406 coveredvp->v_mountedhere = NULL; 1407 VOP_UNLOCK(coveredvp, 0); 1408 } 1409 vfs_event_signal(NULL, VQ_UNMOUNT, 0); 1410 if (rootvnode != NULL && mp == rootvnode->v_mount) { 1411 vrele(rootvnode); 1412 rootvnode = NULL; 1413 } 1414 if (mp == rootdevmp) 1415 rootdevmp = NULL; 1416 vfs_mount_destroy(mp); 1417 return (0); 1418 } 1419 1420 /* 1421 * Report errors during filesystem mounting. 1422 */ 1423 void 1424 vfs_mount_error(struct mount *mp, const char *fmt, ...) 1425 { 1426 struct vfsoptlist *moptlist = mp->mnt_optnew; 1427 va_list ap; 1428 int error, len; 1429 char *errmsg; 1430 1431 error = vfs_getopt(moptlist, "errmsg", (void **)&errmsg, &len); 1432 if (error || errmsg == NULL || len <= 0) 1433 return; 1434 1435 va_start(ap, fmt); 1436 vsnprintf(errmsg, (size_t)len, fmt, ap); 1437 va_end(ap); 1438 } 1439 1440 void 1441 vfs_opterror(struct vfsoptlist *opts, const char *fmt, ...) 1442 { 1443 va_list ap; 1444 int error, len; 1445 char *errmsg; 1446 1447 error = vfs_getopt(opts, "errmsg", (void **)&errmsg, &len); 1448 if (error || errmsg == NULL || len <= 0) 1449 return; 1450 1451 va_start(ap, fmt); 1452 vsnprintf(errmsg, (size_t)len, fmt, ap); 1453 va_end(ap); 1454 } 1455 1456 /* 1457 * --------------------------------------------------------------------- 1458 * Functions for querying mount options/arguments from filesystems. 1459 */ 1460 1461 /* 1462 * Check that no unknown options are given 1463 */ 1464 int 1465 vfs_filteropt(struct vfsoptlist *opts, const char **legal) 1466 { 1467 struct vfsopt *opt; 1468 char errmsg[255]; 1469 const char **t, *p, *q; 1470 int ret = 0; 1471 1472 TAILQ_FOREACH(opt, opts, link) { 1473 p = opt->name; 1474 q = NULL; 1475 if (p[0] == 'n' && p[1] == 'o') 1476 q = p + 2; 1477 for(t = global_opts; *t != NULL; t++) { 1478 if (strcmp(*t, p) == 0) 1479 break; 1480 if (q != NULL) { 1481 if (strcmp(*t, q) == 0) 1482 break; 1483 } 1484 } 1485 if (*t != NULL) 1486 continue; 1487 for(t = legal; *t != NULL; t++) { 1488 if (strcmp(*t, p) == 0) 1489 break; 1490 if (q != NULL) { 1491 if (strcmp(*t, q) == 0) 1492 break; 1493 } 1494 } 1495 if (*t != NULL) 1496 continue; 1497 snprintf(errmsg, sizeof(errmsg), 1498 "mount option <%s> is unknown", p); 1499 ret = EINVAL; 1500 } 1501 if (ret != 0) { 1502 TAILQ_FOREACH(opt, opts, link) { 1503 if (strcmp(opt->name, "errmsg") == 0) { 1504 strncpy((char *)opt->value, errmsg, opt->len); 1505 break; 1506 } 1507 } 1508 if (opt == NULL) 1509 printf("%s\n", errmsg); 1510 } 1511 return (ret); 1512 } 1513 1514 /* 1515 * Get a mount option by its name. 1516 * 1517 * Return 0 if the option was found, ENOENT otherwise. 1518 * If len is non-NULL it will be filled with the length 1519 * of the option. If buf is non-NULL, it will be filled 1520 * with the address of the option. 1521 */ 1522 int 1523 vfs_getopt(struct vfsoptlist *opts, const char *name, void **buf, int *len) 1524 { 1525 struct vfsopt *opt; 1526 1527 KASSERT(opts != NULL, ("vfs_getopt: caller passed 'opts' as NULL")); 1528 1529 TAILQ_FOREACH(opt, opts, link) { 1530 if (strcmp(name, opt->name) == 0) { 1531 opt->seen = 1; 1532 if (len != NULL) 1533 *len = opt->len; 1534 if (buf != NULL) 1535 *buf = opt->value; 1536 return (0); 1537 } 1538 } 1539 return (ENOENT); 1540 } 1541 1542 int 1543 vfs_getopt_pos(struct vfsoptlist *opts, const char *name) 1544 { 1545 struct vfsopt *opt; 1546 1547 if (opts == NULL) 1548 return (-1); 1549 1550 TAILQ_FOREACH(opt, opts, link) { 1551 if (strcmp(name, opt->name) == 0) { 1552 opt->seen = 1; 1553 return (opt->pos); 1554 } 1555 } 1556 return (-1); 1557 } 1558 1559 int 1560 vfs_getopt_size(struct vfsoptlist *opts, const char *name, off_t *value) 1561 { 1562 char *opt_value, *vtp; 1563 quad_t iv; 1564 int error, opt_len; 1565 1566 error = vfs_getopt(opts, name, (void **)&opt_value, &opt_len); 1567 if (error != 0) 1568 return (error); 1569 if (opt_len == 0 || opt_value == NULL) 1570 return (EINVAL); 1571 if (opt_value[0] == '\0' || opt_value[opt_len - 1] != '\0') 1572 return (EINVAL); 1573 iv = strtoq(opt_value, &vtp, 0); 1574 if (vtp == opt_value || (vtp[0] != '\0' && vtp[1] != '\0')) 1575 return (EINVAL); 1576 if (iv < 0) 1577 return (EINVAL); 1578 switch (vtp[0]) { 1579 case 't': 1580 case 'T': 1581 iv *= 1024; 1582 case 'g': 1583 case 'G': 1584 iv *= 1024; 1585 case 'm': 1586 case 'M': 1587 iv *= 1024; 1588 case 'k': 1589 case 'K': 1590 iv *= 1024; 1591 case '\0': 1592 break; 1593 default: 1594 return (EINVAL); 1595 } 1596 *value = iv; 1597 1598 return (0); 1599 } 1600 1601 char * 1602 vfs_getopts(struct vfsoptlist *opts, const char *name, int *error) 1603 { 1604 struct vfsopt *opt; 1605 1606 *error = 0; 1607 TAILQ_FOREACH(opt, opts, link) { 1608 if (strcmp(name, opt->name) != 0) 1609 continue; 1610 opt->seen = 1; 1611 if (opt->len == 0 || 1612 ((char *)opt->value)[opt->len - 1] != '\0') { 1613 *error = EINVAL; 1614 return (NULL); 1615 } 1616 return (opt->value); 1617 } 1618 *error = ENOENT; 1619 return (NULL); 1620 } 1621 1622 int 1623 vfs_flagopt(struct vfsoptlist *opts, const char *name, uint64_t *w, 1624 uint64_t val) 1625 { 1626 struct vfsopt *opt; 1627 1628 TAILQ_FOREACH(opt, opts, link) { 1629 if (strcmp(name, opt->name) == 0) { 1630 opt->seen = 1; 1631 if (w != NULL) 1632 *w |= val; 1633 return (1); 1634 } 1635 } 1636 if (w != NULL) 1637 *w &= ~val; 1638 return (0); 1639 } 1640 1641 int 1642 vfs_scanopt(struct vfsoptlist *opts, const char *name, const char *fmt, ...) 1643 { 1644 va_list ap; 1645 struct vfsopt *opt; 1646 int ret; 1647 1648 KASSERT(opts != NULL, ("vfs_getopt: caller passed 'opts' as NULL")); 1649 1650 TAILQ_FOREACH(opt, opts, link) { 1651 if (strcmp(name, opt->name) != 0) 1652 continue; 1653 opt->seen = 1; 1654 if (opt->len == 0 || opt->value == NULL) 1655 return (0); 1656 if (((char *)opt->value)[opt->len - 1] != '\0') 1657 return (0); 1658 va_start(ap, fmt); 1659 ret = vsscanf(opt->value, fmt, ap); 1660 va_end(ap); 1661 return (ret); 1662 } 1663 return (0); 1664 } 1665 1666 int 1667 vfs_setopt(struct vfsoptlist *opts, const char *name, void *value, int len) 1668 { 1669 struct vfsopt *opt; 1670 1671 TAILQ_FOREACH(opt, opts, link) { 1672 if (strcmp(name, opt->name) != 0) 1673 continue; 1674 opt->seen = 1; 1675 if (opt->value == NULL) 1676 opt->len = len; 1677 else { 1678 if (opt->len != len) 1679 return (EINVAL); 1680 bcopy(value, opt->value, len); 1681 } 1682 return (0); 1683 } 1684 return (ENOENT); 1685 } 1686 1687 int 1688 vfs_setopt_part(struct vfsoptlist *opts, const char *name, void *value, int len) 1689 { 1690 struct vfsopt *opt; 1691 1692 TAILQ_FOREACH(opt, opts, link) { 1693 if (strcmp(name, opt->name) != 0) 1694 continue; 1695 opt->seen = 1; 1696 if (opt->value == NULL) 1697 opt->len = len; 1698 else { 1699 if (opt->len < len) 1700 return (EINVAL); 1701 opt->len = len; 1702 bcopy(value, opt->value, len); 1703 } 1704 return (0); 1705 } 1706 return (ENOENT); 1707 } 1708 1709 int 1710 vfs_setopts(struct vfsoptlist *opts, const char *name, const char *value) 1711 { 1712 struct vfsopt *opt; 1713 1714 TAILQ_FOREACH(opt, opts, link) { 1715 if (strcmp(name, opt->name) != 0) 1716 continue; 1717 opt->seen = 1; 1718 if (opt->value == NULL) 1719 opt->len = strlen(value) + 1; 1720 else if (strlcpy(opt->value, value, opt->len) >= opt->len) 1721 return (EINVAL); 1722 return (0); 1723 } 1724 return (ENOENT); 1725 } 1726 1727 /* 1728 * Find and copy a mount option. 1729 * 1730 * The size of the buffer has to be specified 1731 * in len, if it is not the same length as the 1732 * mount option, EINVAL is returned. 1733 * Returns ENOENT if the option is not found. 1734 */ 1735 int 1736 vfs_copyopt(struct vfsoptlist *opts, const char *name, void *dest, int len) 1737 { 1738 struct vfsopt *opt; 1739 1740 KASSERT(opts != NULL, ("vfs_copyopt: caller passed 'opts' as NULL")); 1741 1742 TAILQ_FOREACH(opt, opts, link) { 1743 if (strcmp(name, opt->name) == 0) { 1744 opt->seen = 1; 1745 if (len != opt->len) 1746 return (EINVAL); 1747 bcopy(opt->value, dest, opt->len); 1748 return (0); 1749 } 1750 } 1751 return (ENOENT); 1752 } 1753 1754 int 1755 __vfs_statfs(struct mount *mp, struct statfs *sbp) 1756 { 1757 int error; 1758 1759 error = mp->mnt_op->vfs_statfs(mp, &mp->mnt_stat); 1760 if (sbp != &mp->mnt_stat) 1761 *sbp = mp->mnt_stat; 1762 return (error); 1763 } 1764 1765 void 1766 vfs_mountedfrom(struct mount *mp, const char *from) 1767 { 1768 1769 bzero(mp->mnt_stat.f_mntfromname, sizeof mp->mnt_stat.f_mntfromname); 1770 strlcpy(mp->mnt_stat.f_mntfromname, from, 1771 sizeof mp->mnt_stat.f_mntfromname); 1772 } 1773 1774 /* 1775 * --------------------------------------------------------------------- 1776 * This is the api for building mount args and mounting filesystems from 1777 * inside the kernel. 1778 * 1779 * The API works by accumulation of individual args. First error is 1780 * latched. 1781 * 1782 * XXX: should be documented in new manpage kernel_mount(9) 1783 */ 1784 1785 /* A memory allocation which must be freed when we are done */ 1786 struct mntaarg { 1787 SLIST_ENTRY(mntaarg) next; 1788 }; 1789 1790 /* The header for the mount arguments */ 1791 struct mntarg { 1792 struct iovec *v; 1793 int len; 1794 int error; 1795 SLIST_HEAD(, mntaarg) list; 1796 }; 1797 1798 /* 1799 * Add a boolean argument. 1800 * 1801 * flag is the boolean value. 1802 * name must start with "no". 1803 */ 1804 struct mntarg * 1805 mount_argb(struct mntarg *ma, int flag, const char *name) 1806 { 1807 1808 KASSERT(name[0] == 'n' && name[1] == 'o', 1809 ("mount_argb(...,%s): name must start with 'no'", name)); 1810 1811 return (mount_arg(ma, name + (flag ? 2 : 0), NULL, 0)); 1812 } 1813 1814 /* 1815 * Add an argument printf style 1816 */ 1817 struct mntarg * 1818 mount_argf(struct mntarg *ma, const char *name, const char *fmt, ...) 1819 { 1820 va_list ap; 1821 struct mntaarg *maa; 1822 struct sbuf *sb; 1823 int len; 1824 1825 if (ma == NULL) { 1826 ma = malloc(sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO); 1827 SLIST_INIT(&ma->list); 1828 } 1829 if (ma->error) 1830 return (ma); 1831 1832 ma->v = realloc(ma->v, sizeof *ma->v * (ma->len + 2), 1833 M_MOUNT, M_WAITOK); 1834 ma->v[ma->len].iov_base = (void *)(uintptr_t)name; 1835 ma->v[ma->len].iov_len = strlen(name) + 1; 1836 ma->len++; 1837 1838 sb = sbuf_new_auto(); 1839 va_start(ap, fmt); 1840 sbuf_vprintf(sb, fmt, ap); 1841 va_end(ap); 1842 sbuf_finish(sb); 1843 len = sbuf_len(sb) + 1; 1844 maa = malloc(sizeof *maa + len, M_MOUNT, M_WAITOK | M_ZERO); 1845 SLIST_INSERT_HEAD(&ma->list, maa, next); 1846 bcopy(sbuf_data(sb), maa + 1, len); 1847 sbuf_delete(sb); 1848 1849 ma->v[ma->len].iov_base = maa + 1; 1850 ma->v[ma->len].iov_len = len; 1851 ma->len++; 1852 1853 return (ma); 1854 } 1855 1856 /* 1857 * Add an argument which is a userland string. 1858 */ 1859 struct mntarg * 1860 mount_argsu(struct mntarg *ma, const char *name, const void *val, int len) 1861 { 1862 struct mntaarg *maa; 1863 char *tbuf; 1864 1865 if (val == NULL) 1866 return (ma); 1867 if (ma == NULL) { 1868 ma = malloc(sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO); 1869 SLIST_INIT(&ma->list); 1870 } 1871 if (ma->error) 1872 return (ma); 1873 maa = malloc(sizeof *maa + len, M_MOUNT, M_WAITOK | M_ZERO); 1874 SLIST_INSERT_HEAD(&ma->list, maa, next); 1875 tbuf = (void *)(maa + 1); 1876 ma->error = copyinstr(val, tbuf, len, NULL); 1877 return (mount_arg(ma, name, tbuf, -1)); 1878 } 1879 1880 /* 1881 * Plain argument. 1882 * 1883 * If length is -1, treat value as a C string. 1884 */ 1885 struct mntarg * 1886 mount_arg(struct mntarg *ma, const char *name, const void *val, int len) 1887 { 1888 1889 if (ma == NULL) { 1890 ma = malloc(sizeof *ma, M_MOUNT, M_WAITOK | M_ZERO); 1891 SLIST_INIT(&ma->list); 1892 } 1893 if (ma->error) 1894 return (ma); 1895 1896 ma->v = realloc(ma->v, sizeof *ma->v * (ma->len + 2), 1897 M_MOUNT, M_WAITOK); 1898 ma->v[ma->len].iov_base = (void *)(uintptr_t)name; 1899 ma->v[ma->len].iov_len = strlen(name) + 1; 1900 ma->len++; 1901 1902 ma->v[ma->len].iov_base = (void *)(uintptr_t)val; 1903 if (len < 0) 1904 ma->v[ma->len].iov_len = strlen(val) + 1; 1905 else 1906 ma->v[ma->len].iov_len = len; 1907 ma->len++; 1908 return (ma); 1909 } 1910 1911 /* 1912 * Free a mntarg structure 1913 */ 1914 static void 1915 free_mntarg(struct mntarg *ma) 1916 { 1917 struct mntaarg *maa; 1918 1919 while (!SLIST_EMPTY(&ma->list)) { 1920 maa = SLIST_FIRST(&ma->list); 1921 SLIST_REMOVE_HEAD(&ma->list, next); 1922 free(maa, M_MOUNT); 1923 } 1924 free(ma->v, M_MOUNT); 1925 free(ma, M_MOUNT); 1926 } 1927 1928 /* 1929 * Mount a filesystem 1930 */ 1931 int 1932 kernel_mount(struct mntarg *ma, uint64_t flags) 1933 { 1934 struct uio auio; 1935 int error; 1936 1937 KASSERT(ma != NULL, ("kernel_mount NULL ma")); 1938 KASSERT(ma->v != NULL, ("kernel_mount NULL ma->v")); 1939 KASSERT(!(ma->len & 1), ("kernel_mount odd ma->len (%d)", ma->len)); 1940 1941 auio.uio_iov = ma->v; 1942 auio.uio_iovcnt = ma->len; 1943 auio.uio_segflg = UIO_SYSSPACE; 1944 1945 error = ma->error; 1946 if (!error) 1947 error = vfs_donmount(curthread, flags, &auio); 1948 free_mntarg(ma); 1949 return (error); 1950 } 1951 1952 /* 1953 * A printflike function to mount a filesystem. 1954 */ 1955 int 1956 kernel_vmount(int flags, ...) 1957 { 1958 struct mntarg *ma = NULL; 1959 va_list ap; 1960 const char *cp; 1961 const void *vp; 1962 int error; 1963 1964 va_start(ap, flags); 1965 for (;;) { 1966 cp = va_arg(ap, const char *); 1967 if (cp == NULL) 1968 break; 1969 vp = va_arg(ap, const void *); 1970 ma = mount_arg(ma, cp, vp, (vp != NULL ? -1 : 0)); 1971 } 1972 va_end(ap); 1973 1974 error = kernel_mount(ma, flags); 1975 return (error); 1976 } 1977 1978 void 1979 vfs_oexport_conv(const struct oexport_args *oexp, struct export_args *exp) 1980 { 1981 1982 bcopy(oexp, exp, sizeof(*oexp)); 1983 exp->ex_numsecflavors = 0; 1984 } 1985