1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2014, Joyent, Inc. All rights reserved. 25 * Copyright 2017 RackTop Systems. 26 * Copyright 2016 Nexenta Systems, Inc. 27 */ 28 29 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ 30 /* All Rights Reserved */ 31 32 /* 33 * University Copyright- Copyright (c) 1982, 1986, 1988 34 * The Regents of the University of California 35 * All Rights Reserved 36 * 37 * University Acknowledgment- Portions of this document are derived from 38 * software developed by the University of California, Berkeley, and its 39 * contributors. 40 */ 41 42 #include <sys/types.h> 43 #include <sys/t_lock.h> 44 #include <sys/param.h> 45 #include <sys/errno.h> 46 #include <sys/user.h> 47 #include <sys/fstyp.h> 48 #include <sys/kmem.h> 49 #include <sys/systm.h> 50 #include <sys/proc.h> 51 #include <sys/mount.h> 52 #include <sys/vfs.h> 53 #include <sys/vfs_opreg.h> 54 #include <sys/fem.h> 55 #include <sys/mntent.h> 56 #include <sys/stat.h> 57 #include <sys/statvfs.h> 58 #include <sys/statfs.h> 59 #include <sys/cred.h> 60 #include <sys/vnode.h> 61 #include <sys/rwstlock.h> 62 #include <sys/dnlc.h> 63 #include <sys/file.h> 64 #include <sys/time.h> 65 #include <sys/atomic.h> 66 #include <sys/cmn_err.h> 67 #include <sys/buf.h> 68 #include <sys/swap.h> 69 #include <sys/debug.h> 70 #include <sys/vnode.h> 71 #include <sys/modctl.h> 72 #include <sys/ddi.h> 73 #include <sys/pathname.h> 74 #include <sys/bootconf.h> 75 #include <sys/dumphdr.h> 76 #include <sys/dc_ki.h> 77 #include <sys/poll.h> 78 #include <sys/sunddi.h> 79 #include <sys/sysmacros.h> 80 #include <sys/zone.h> 81 #include <sys/policy.h> 82 #include <sys/ctfs.h> 83 #include <sys/objfs.h> 84 #include <sys/console.h> 85 #include <sys/reboot.h> 86 #include <sys/attr.h> 87 #include <sys/zio.h> 88 #include <sys/spa.h> 89 #include <sys/lofi.h> 90 #include <sys/bootprops.h> 91 #include <sys/avl.h> 92 93 #include <vm/page.h> 94 95 #include <fs/fs_subr.h> 96 /* Private interfaces to create vopstats-related data structures */ 97 extern void initialize_vopstats(vopstats_t *); 98 extern vopstats_t *get_fstype_vopstats(struct vfs *, struct vfssw *); 99 extern vsk_anchor_t *get_vskstat_anchor(struct vfs *); 100 101 static void vfs_clearmntopt_nolock(mntopts_t *, const char *, int); 102 static void vfs_setmntopt_nolock(mntopts_t *, const char *, 103 const char *, int, int); 104 static int vfs_optionisset_nolock(const mntopts_t *, const char *, char **); 105 static void vfs_freemnttab(struct vfs *); 106 static void vfs_freeopt(mntopt_t *); 107 static void vfs_swapopttbl_nolock(mntopts_t *, mntopts_t *); 108 static void vfs_swapopttbl(mntopts_t *, mntopts_t *); 109 static void vfs_copyopttbl_extend(const mntopts_t *, mntopts_t *, int); 110 static void vfs_createopttbl_extend(mntopts_t *, const char *, 111 const mntopts_t *); 112 static char **vfs_copycancelopt_extend(char **const, int); 113 static void vfs_freecancelopt(char **); 114 static void getrootfs(char **, char **); 115 static int getmacpath(dev_info_t *, void *); 116 static void vfs_mnttabvp_setup(void); 117 118 struct ipmnt { 119 struct ipmnt *mip_next; 120 dev_t mip_dev; 121 struct vfs *mip_vfsp; 122 }; 123 124 static kmutex_t vfs_miplist_mutex; 125 static struct ipmnt *vfs_miplist = NULL; 126 static struct ipmnt *vfs_miplist_end = NULL; 127 128 static kmem_cache_t *vfs_cache; /* Pointer to VFS kmem cache */ 129 130 /* 131 * VFS global data. 132 */ 133 vnode_t *rootdir; /* pointer to root inode vnode. */ 134 vnode_t *devicesdir; /* pointer to inode of devices root */ 135 vnode_t *devdir; /* pointer to inode of dev root */ 136 137 char *server_rootpath; /* root path for diskless clients */ 138 char *server_hostname; /* hostname of diskless server */ 139 140 static struct vfs root; 141 static struct vfs devices; 142 static struct vfs dev; 143 struct vfs *rootvfs = &root; /* pointer to root vfs; head of VFS list. */ 144 avl_tree_t vfs_by_dev; /* avl tree to index mounted VFSs by dev */ 145 avl_tree_t vfs_by_mntpnt; /* avl tree to index mounted VFSs by mntpnt */ 146 uint64_t vfs_curr_mntix; /* counter to provide a unique mntix for 147 * entries in the above avl trees. 148 * protected by vfslist lock */ 149 rvfs_t *rvfs_list; /* array of vfs ptrs for vfs hash list */ 150 int vfshsz = 512; /* # of heads/locks in vfs hash arrays */ 151 /* must be power of 2! */ 152 timespec_t vfs_mnttab_ctime; /* mnttab created time */ 153 timespec_t vfs_mnttab_mtime; /* mnttab last modified time */ 154 char *vfs_dummyfstype = "\0"; 155 struct pollhead vfs_pollhd; /* for mnttab pollers */ 156 struct vnode *vfs_mntdummyvp; /* to fake mnttab read/write for file events */ 157 int mntfstype; /* will be set once mnt fs is mounted */ 158 159 /* 160 * Table for generic options recognized in the VFS layer and acted 161 * on at this level before parsing file system specific options. 162 * The nosuid option is stronger than any of the devices and setuid 163 * options, so those are canceled when nosuid is seen. 164 * 165 * All options which are added here need to be added to the 166 * list of standard options in usr/src/cmd/fs.d/fslib.c as well. 167 */ 168 /* 169 * VFS Mount options table 170 */ 171 static char *ro_cancel[] = { MNTOPT_RW, NULL }; 172 static char *rw_cancel[] = { MNTOPT_RO, NULL }; 173 static char *suid_cancel[] = { MNTOPT_NOSUID, NULL }; 174 static char *nosuid_cancel[] = { MNTOPT_SUID, MNTOPT_DEVICES, MNTOPT_NODEVICES, 175 MNTOPT_NOSETUID, MNTOPT_SETUID, NULL }; 176 static char *devices_cancel[] = { MNTOPT_NODEVICES, NULL }; 177 static char *nodevices_cancel[] = { MNTOPT_DEVICES, NULL }; 178 static char *setuid_cancel[] = { MNTOPT_NOSETUID, NULL }; 179 static char *nosetuid_cancel[] = { MNTOPT_SETUID, NULL }; 180 static char *nbmand_cancel[] = { MNTOPT_NONBMAND, NULL }; 181 static char *nonbmand_cancel[] = { MNTOPT_NBMAND, NULL }; 182 static char *exec_cancel[] = { MNTOPT_NOEXEC, NULL }; 183 static char *noexec_cancel[] = { MNTOPT_EXEC, NULL }; 184 185 static const mntopt_t mntopts[] = { 186 /* 187 * option name cancel options default arg flags 188 */ 189 { MNTOPT_REMOUNT, NULL, NULL, 190 MO_NODISPLAY, (void *)0 }, 191 { MNTOPT_RO, ro_cancel, NULL, 0, 192 (void *)0 }, 193 { MNTOPT_RW, rw_cancel, NULL, 0, 194 (void *)0 }, 195 { MNTOPT_SUID, suid_cancel, NULL, 0, 196 (void *)0 }, 197 { MNTOPT_NOSUID, nosuid_cancel, NULL, 0, 198 (void *)0 }, 199 { MNTOPT_DEVICES, devices_cancel, NULL, 0, 200 (void *)0 }, 201 { MNTOPT_NODEVICES, nodevices_cancel, NULL, 0, 202 (void *)0 }, 203 { MNTOPT_SETUID, setuid_cancel, NULL, 0, 204 (void *)0 }, 205 { MNTOPT_NOSETUID, nosetuid_cancel, NULL, 0, 206 (void *)0 }, 207 { MNTOPT_NBMAND, nbmand_cancel, NULL, 0, 208 (void *)0 }, 209 { MNTOPT_NONBMAND, nonbmand_cancel, NULL, 0, 210 (void *)0 }, 211 { MNTOPT_EXEC, exec_cancel, NULL, 0, 212 (void *)0 }, 213 { MNTOPT_NOEXEC, noexec_cancel, NULL, 0, 214 (void *)0 }, 215 }; 216 217 const mntopts_t vfs_mntopts = { 218 sizeof (mntopts) / sizeof (mntopt_t), 219 (mntopt_t *)&mntopts[0] 220 }; 221 222 /* 223 * File system operation dispatch functions. 224 */ 225 226 int 227 fsop_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr) 228 { 229 return (*(vfsp)->vfs_op->vfs_mount)(vfsp, mvp, uap, cr); 230 } 231 232 int 233 fsop_unmount(vfs_t *vfsp, int flag, cred_t *cr) 234 { 235 return (*(vfsp)->vfs_op->vfs_unmount)(vfsp, flag, cr); 236 } 237 238 int 239 fsop_root(vfs_t *vfsp, vnode_t **vpp) 240 { 241 refstr_t *mntpt; 242 int ret = (*(vfsp)->vfs_op->vfs_root)(vfsp, vpp); 243 /* 244 * Make sure this root has a path. With lofs, it is possible to have 245 * a NULL mountpoint. 246 */ 247 if (ret == 0 && vfsp->vfs_mntpt != NULL && (*vpp)->v_path == NULL) { 248 mntpt = vfs_getmntpoint(vfsp); 249 vn_setpath_str(*vpp, refstr_value(mntpt), 250 strlen(refstr_value(mntpt))); 251 refstr_rele(mntpt); 252 } 253 254 return (ret); 255 } 256 257 int 258 fsop_statfs(vfs_t *vfsp, statvfs64_t *sp) 259 { 260 return (*(vfsp)->vfs_op->vfs_statvfs)(vfsp, sp); 261 } 262 263 int 264 fsop_sync(vfs_t *vfsp, short flag, cred_t *cr) 265 { 266 return (*(vfsp)->vfs_op->vfs_sync)(vfsp, flag, cr); 267 } 268 269 int 270 fsop_vget(vfs_t *vfsp, vnode_t **vpp, fid_t *fidp) 271 { 272 /* 273 * In order to handle system attribute fids in a manner 274 * transparent to the underlying fs, we embed the fid for 275 * the sysattr parent object in the sysattr fid and tack on 276 * some extra bytes that only the sysattr layer knows about. 277 * 278 * This guarantees that sysattr fids are larger than other fids 279 * for this vfs. If the vfs supports the sysattr view interface 280 * (as indicated by VFSFT_SYSATTR_VIEWS), we cannot have a size 281 * collision with XATTR_FIDSZ. 282 */ 283 if (vfs_has_feature(vfsp, VFSFT_SYSATTR_VIEWS) && 284 fidp->fid_len == XATTR_FIDSZ) 285 return (xattr_dir_vget(vfsp, vpp, fidp)); 286 287 return (*(vfsp)->vfs_op->vfs_vget)(vfsp, vpp, fidp); 288 } 289 290 int 291 fsop_mountroot(vfs_t *vfsp, enum whymountroot reason) 292 { 293 return (*(vfsp)->vfs_op->vfs_mountroot)(vfsp, reason); 294 } 295 296 void 297 fsop_freefs(vfs_t *vfsp) 298 { 299 (*(vfsp)->vfs_op->vfs_freevfs)(vfsp); 300 } 301 302 int 303 fsop_vnstate(vfs_t *vfsp, vnode_t *vp, vntrans_t nstate) 304 { 305 return ((*(vfsp)->vfs_op->vfs_vnstate)(vfsp, vp, nstate)); 306 } 307 308 int 309 fsop_sync_by_kind(int fstype, short flag, cred_t *cr) 310 { 311 ASSERT((fstype >= 0) && (fstype < nfstype)); 312 313 if (ALLOCATED_VFSSW(&vfssw[fstype]) && VFS_INSTALLED(&vfssw[fstype])) 314 return (*vfssw[fstype].vsw_vfsops.vfs_sync) (NULL, flag, cr); 315 else 316 return (ENOTSUP); 317 } 318 319 /* 320 * File system initialization. vfs_setfsops() must be called from a file 321 * system's init routine. 322 */ 323 324 static int 325 fs_copyfsops(const fs_operation_def_t *template, vfsops_t *actual, 326 int *unused_ops) 327 { 328 static const fs_operation_trans_def_t vfs_ops_table[] = { 329 VFSNAME_MOUNT, offsetof(vfsops_t, vfs_mount), 330 fs_nosys, fs_nosys, 331 332 VFSNAME_UNMOUNT, offsetof(vfsops_t, vfs_unmount), 333 fs_nosys, fs_nosys, 334 335 VFSNAME_ROOT, offsetof(vfsops_t, vfs_root), 336 fs_nosys, fs_nosys, 337 338 VFSNAME_STATVFS, offsetof(vfsops_t, vfs_statvfs), 339 fs_nosys, fs_nosys, 340 341 VFSNAME_SYNC, offsetof(vfsops_t, vfs_sync), 342 (fs_generic_func_p) fs_sync, 343 (fs_generic_func_p) fs_sync, /* No errors allowed */ 344 345 VFSNAME_VGET, offsetof(vfsops_t, vfs_vget), 346 fs_nosys, fs_nosys, 347 348 VFSNAME_MOUNTROOT, offsetof(vfsops_t, vfs_mountroot), 349 fs_nosys, fs_nosys, 350 351 VFSNAME_FREEVFS, offsetof(vfsops_t, vfs_freevfs), 352 (fs_generic_func_p)fs_freevfs, 353 (fs_generic_func_p)fs_freevfs, /* Shouldn't fail */ 354 355 VFSNAME_VNSTATE, offsetof(vfsops_t, vfs_vnstate), 356 (fs_generic_func_p)fs_nosys, 357 (fs_generic_func_p)fs_nosys, 358 359 NULL, 0, NULL, NULL 360 }; 361 362 return (fs_build_vector(actual, unused_ops, vfs_ops_table, template)); 363 } 364 365 void 366 zfs_boot_init() { 367 368 if (strcmp(rootfs.bo_fstype, MNTTYPE_ZFS) == 0) 369 spa_boot_init(); 370 } 371 372 int 373 vfs_setfsops(int fstype, const fs_operation_def_t *template, vfsops_t **actual) 374 { 375 int error; 376 int unused_ops; 377 378 /* 379 * Verify that fstype refers to a valid fs. Note that 380 * 0 is valid since it's used to set "stray" ops. 381 */ 382 if ((fstype < 0) || (fstype >= nfstype)) 383 return (EINVAL); 384 385 if (!ALLOCATED_VFSSW(&vfssw[fstype])) 386 return (EINVAL); 387 388 /* Set up the operations vector. */ 389 390 error = fs_copyfsops(template, &vfssw[fstype].vsw_vfsops, &unused_ops); 391 392 if (error != 0) 393 return (error); 394 395 vfssw[fstype].vsw_flag |= VSW_INSTALLED; 396 397 if (actual != NULL) 398 *actual = &vfssw[fstype].vsw_vfsops; 399 400 #if DEBUG 401 if (unused_ops != 0) 402 cmn_err(CE_WARN, "vfs_setfsops: %s: %d operations supplied " 403 "but not used", vfssw[fstype].vsw_name, unused_ops); 404 #endif 405 406 return (0); 407 } 408 409 int 410 vfs_makefsops(const fs_operation_def_t *template, vfsops_t **actual) 411 { 412 int error; 413 int unused_ops; 414 415 *actual = (vfsops_t *)kmem_alloc(sizeof (vfsops_t), KM_SLEEP); 416 417 error = fs_copyfsops(template, *actual, &unused_ops); 418 if (error != 0) { 419 kmem_free(*actual, sizeof (vfsops_t)); 420 *actual = NULL; 421 return (error); 422 } 423 424 return (0); 425 } 426 427 /* 428 * Free a vfsops structure created as a result of vfs_makefsops(). 429 * NOTE: For a vfsops structure initialized by vfs_setfsops(), use 430 * vfs_freevfsops_by_type(). 431 */ 432 void 433 vfs_freevfsops(vfsops_t *vfsops) 434 { 435 kmem_free(vfsops, sizeof (vfsops_t)); 436 } 437 438 /* 439 * Since the vfsops structure is part of the vfssw table and wasn't 440 * really allocated, we're not really freeing anything. We keep 441 * the name for consistency with vfs_freevfsops(). We do, however, 442 * need to take care of a little bookkeeping. 443 * NOTE: For a vfsops structure created by vfs_setfsops(), use 444 * vfs_freevfsops_by_type(). 445 */ 446 int 447 vfs_freevfsops_by_type(int fstype) 448 { 449 450 /* Verify that fstype refers to a loaded fs (and not fsid 0). */ 451 if ((fstype <= 0) || (fstype >= nfstype)) 452 return (EINVAL); 453 454 WLOCK_VFSSW(); 455 if ((vfssw[fstype].vsw_flag & VSW_INSTALLED) == 0) { 456 WUNLOCK_VFSSW(); 457 return (EINVAL); 458 } 459 460 vfssw[fstype].vsw_flag &= ~VSW_INSTALLED; 461 WUNLOCK_VFSSW(); 462 463 return (0); 464 } 465 466 /* Support routines used to reference vfs_op */ 467 468 /* Set the operations vector for a vfs */ 469 void 470 vfs_setops(vfs_t *vfsp, vfsops_t *vfsops) 471 { 472 vfsops_t *op; 473 474 ASSERT(vfsp != NULL); 475 ASSERT(vfsops != NULL); 476 477 op = vfsp->vfs_op; 478 membar_consumer(); 479 if (vfsp->vfs_femhead == NULL && 480 atomic_cas_ptr(&vfsp->vfs_op, op, vfsops) == op) { 481 return; 482 } 483 fsem_setvfsops(vfsp, vfsops); 484 } 485 486 /* Retrieve the operations vector for a vfs */ 487 vfsops_t * 488 vfs_getops(vfs_t *vfsp) 489 { 490 vfsops_t *op; 491 492 ASSERT(vfsp != NULL); 493 494 op = vfsp->vfs_op; 495 membar_consumer(); 496 if (vfsp->vfs_femhead == NULL && op == vfsp->vfs_op) { 497 return (op); 498 } else { 499 return (fsem_getvfsops(vfsp)); 500 } 501 } 502 503 /* 504 * Returns non-zero (1) if the vfsops matches that of the vfs. 505 * Returns zero (0) if not. 506 */ 507 int 508 vfs_matchops(vfs_t *vfsp, vfsops_t *vfsops) 509 { 510 return (vfs_getops(vfsp) == vfsops); 511 } 512 513 /* 514 * Returns non-zero (1) if the file system has installed a non-default, 515 * non-error vfs_sync routine. Returns zero (0) otherwise. 516 */ 517 int 518 vfs_can_sync(vfs_t *vfsp) 519 { 520 /* vfs_sync() routine is not the default/error function */ 521 return (vfs_getops(vfsp)->vfs_sync != fs_sync); 522 } 523 524 /* 525 * Initialize a vfs structure. 526 */ 527 void 528 vfs_init(vfs_t *vfsp, vfsops_t *op, void *data) 529 { 530 /* Other initialization has been moved to vfs_alloc() */ 531 vfsp->vfs_count = 0; 532 vfsp->vfs_next = vfsp; 533 vfsp->vfs_prev = vfsp; 534 vfsp->vfs_zone_next = vfsp; 535 vfsp->vfs_zone_prev = vfsp; 536 vfsp->vfs_lofi_minor = 0; 537 sema_init(&vfsp->vfs_reflock, 1, NULL, SEMA_DEFAULT, NULL); 538 vfsimpl_setup(vfsp); 539 vfsp->vfs_data = (data); 540 vfs_setops((vfsp), (op)); 541 } 542 543 /* 544 * Allocate and initialize the vfs implementation private data 545 * structure, vfs_impl_t. 546 */ 547 void 548 vfsimpl_setup(vfs_t *vfsp) 549 { 550 int i; 551 552 if (vfsp->vfs_implp != NULL) { 553 return; 554 } 555 556 vfsp->vfs_implp = kmem_alloc(sizeof (vfs_impl_t), KM_SLEEP); 557 /* Note that these are #define'd in vfs.h */ 558 vfsp->vfs_vskap = NULL; 559 vfsp->vfs_fstypevsp = NULL; 560 561 /* Set size of counted array, then zero the array */ 562 vfsp->vfs_featureset[0] = VFS_FEATURE_MAXSZ - 1; 563 for (i = 1; i < VFS_FEATURE_MAXSZ; i++) { 564 vfsp->vfs_featureset[i] = 0; 565 } 566 } 567 568 /* 569 * Release the vfs_impl_t structure, if it exists. Some unbundled 570 * filesystems may not use the newer version of vfs and thus 571 * would not contain this implementation private data structure. 572 */ 573 void 574 vfsimpl_teardown(vfs_t *vfsp) 575 { 576 vfs_impl_t *vip = vfsp->vfs_implp; 577 578 if (vip == NULL) 579 return; 580 581 kmem_free(vfsp->vfs_implp, sizeof (vfs_impl_t)); 582 vfsp->vfs_implp = NULL; 583 } 584 585 /* 586 * VFS system calls: mount, umount, syssync, statfs, fstatfs, statvfs, 587 * fstatvfs, and sysfs moved to common/syscall. 588 */ 589 590 /* 591 * Update every mounted file system. We call the vfs_sync operation of 592 * each file system type, passing it a NULL vfsp to indicate that all 593 * mounted file systems of that type should be updated. 594 */ 595 void 596 vfs_sync(int flag) 597 { 598 struct vfssw *vswp; 599 RLOCK_VFSSW(); 600 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) { 601 if (ALLOCATED_VFSSW(vswp) && VFS_INSTALLED(vswp)) { 602 vfs_refvfssw(vswp); 603 RUNLOCK_VFSSW(); 604 (void) (*vswp->vsw_vfsops.vfs_sync)(NULL, flag, 605 CRED()); 606 vfs_unrefvfssw(vswp); 607 RLOCK_VFSSW(); 608 } 609 } 610 RUNLOCK_VFSSW(); 611 } 612 613 void 614 sync(void) 615 { 616 vfs_sync(0); 617 } 618 619 /* 620 * compare function for vfs_by_dev avl tree. compare dev first, then mntix 621 */ 622 static int 623 vfs_cmp_dev(const void *aa, const void *bb) 624 { 625 const vfs_t *a = aa; 626 const vfs_t *b = bb; 627 628 if (a->vfs_dev < b->vfs_dev) 629 return (-1); 630 if (a->vfs_dev > b->vfs_dev) 631 return (1); 632 if (a->vfs_mntix < b->vfs_mntix) 633 return (-1); 634 if (a->vfs_mntix > b->vfs_mntix) 635 return (1); 636 return (0); 637 } 638 639 /* 640 * compare function for vfs_by_mntpnt avl tree. compare mntpnt first, then mntix 641 */ 642 static int 643 vfs_cmp_mntpnt(const void *aa, const void *bb) 644 { 645 const vfs_t *a = aa; 646 const vfs_t *b = bb; 647 int ret; 648 649 ret = strcmp(refstr_value(a->vfs_mntpt), refstr_value(b->vfs_mntpt)); 650 if (ret < 0) 651 return (-1); 652 if (ret > 0) 653 return (1); 654 if (a->vfs_mntix < b->vfs_mntix) 655 return (-1); 656 if (a->vfs_mntix > b->vfs_mntix) 657 return (1); 658 return (0); 659 } 660 661 /* 662 * External routines. 663 */ 664 665 krwlock_t vfssw_lock; /* lock accesses to vfssw */ 666 667 /* 668 * Lock for accessing the vfs linked list. Initialized in vfs_mountroot(), 669 * but otherwise should be accessed only via vfs_list_lock() and 670 * vfs_list_unlock(). Also used to protect the timestamp for mods to the list. 671 */ 672 static krwlock_t vfslist; 673 674 /* 675 * Mount devfs on /devices. This is done right after root is mounted 676 * to provide device access support for the system 677 */ 678 static void 679 vfs_mountdevices(void) 680 { 681 struct vfssw *vsw; 682 struct vnode *mvp; 683 struct mounta mounta = { /* fake mounta for devfs_mount() */ 684 NULL, 685 NULL, 686 MS_SYSSPACE, 687 NULL, 688 NULL, 689 0, 690 NULL, 691 0 692 }; 693 694 /* 695 * _init devfs module to fill in the vfssw 696 */ 697 if (modload("fs", "devfs") == -1) 698 panic("Cannot _init devfs module"); 699 700 /* 701 * Hold vfs 702 */ 703 RLOCK_VFSSW(); 704 vsw = vfs_getvfsswbyname("devfs"); 705 VFS_INIT(&devices, &vsw->vsw_vfsops, NULL); 706 VFS_HOLD(&devices); 707 708 /* 709 * Locate mount point 710 */ 711 if (lookupname("/devices", UIO_SYSSPACE, FOLLOW, NULLVPP, &mvp)) 712 panic("Cannot find /devices"); 713 714 /* 715 * Perform the mount of /devices 716 */ 717 if (VFS_MOUNT(&devices, mvp, &mounta, CRED())) 718 panic("Cannot mount /devices"); 719 720 RUNLOCK_VFSSW(); 721 722 /* 723 * Set appropriate members and add to vfs list for mnttab display 724 */ 725 vfs_setresource(&devices, "/devices", 0); 726 vfs_setmntpoint(&devices, "/devices", 0); 727 728 /* 729 * Hold the root of /devices so it won't go away 730 */ 731 if (VFS_ROOT(&devices, &devicesdir)) 732 panic("vfs_mountdevices: not devices root"); 733 734 if (vfs_lock(&devices) != 0) { 735 VN_RELE(devicesdir); 736 cmn_err(CE_NOTE, "Cannot acquire vfs_lock of /devices"); 737 return; 738 } 739 740 if (vn_vfswlock(mvp) != 0) { 741 vfs_unlock(&devices); 742 VN_RELE(devicesdir); 743 cmn_err(CE_NOTE, "Cannot acquire vfswlock of /devices"); 744 return; 745 } 746 747 vfs_add(mvp, &devices, 0); 748 vn_vfsunlock(mvp); 749 vfs_unlock(&devices); 750 VN_RELE(devicesdir); 751 } 752 753 /* 754 * mount the first instance of /dev to root and remain mounted 755 */ 756 static void 757 vfs_mountdev1(void) 758 { 759 struct vfssw *vsw; 760 struct vnode *mvp; 761 struct mounta mounta = { /* fake mounta for sdev_mount() */ 762 NULL, 763 NULL, 764 MS_SYSSPACE | MS_OVERLAY, 765 NULL, 766 NULL, 767 0, 768 NULL, 769 0 770 }; 771 772 /* 773 * _init dev module to fill in the vfssw 774 */ 775 if (modload("fs", "dev") == -1) 776 cmn_err(CE_PANIC, "Cannot _init dev module\n"); 777 778 /* 779 * Hold vfs 780 */ 781 RLOCK_VFSSW(); 782 vsw = vfs_getvfsswbyname("dev"); 783 VFS_INIT(&dev, &vsw->vsw_vfsops, NULL); 784 VFS_HOLD(&dev); 785 786 /* 787 * Locate mount point 788 */ 789 if (lookupname("/dev", UIO_SYSSPACE, FOLLOW, NULLVPP, &mvp)) 790 cmn_err(CE_PANIC, "Cannot find /dev\n"); 791 792 /* 793 * Perform the mount of /dev 794 */ 795 if (VFS_MOUNT(&dev, mvp, &mounta, CRED())) 796 cmn_err(CE_PANIC, "Cannot mount /dev 1\n"); 797 798 RUNLOCK_VFSSW(); 799 800 /* 801 * Set appropriate members and add to vfs list for mnttab display 802 */ 803 vfs_setresource(&dev, "/dev", 0); 804 vfs_setmntpoint(&dev, "/dev", 0); 805 806 /* 807 * Hold the root of /dev so it won't go away 808 */ 809 if (VFS_ROOT(&dev, &devdir)) 810 cmn_err(CE_PANIC, "vfs_mountdev1: not dev root"); 811 812 if (vfs_lock(&dev) != 0) { 813 VN_RELE(devdir); 814 cmn_err(CE_NOTE, "Cannot acquire vfs_lock of /dev"); 815 return; 816 } 817 818 if (vn_vfswlock(mvp) != 0) { 819 vfs_unlock(&dev); 820 VN_RELE(devdir); 821 cmn_err(CE_NOTE, "Cannot acquire vfswlock of /dev"); 822 return; 823 } 824 825 vfs_add(mvp, &dev, 0); 826 vn_vfsunlock(mvp); 827 vfs_unlock(&dev); 828 VN_RELE(devdir); 829 } 830 831 /* 832 * Mount required filesystem. This is done right after root is mounted. 833 */ 834 static void 835 vfs_mountfs(char *module, char *spec, char *path) 836 { 837 struct vnode *mvp; 838 struct mounta mounta; 839 vfs_t *vfsp; 840 841 bzero(&mounta, sizeof (mounta)); 842 mounta.flags = MS_SYSSPACE | MS_DATA; 843 mounta.fstype = module; 844 mounta.spec = spec; 845 mounta.dir = path; 846 if (lookupname(path, UIO_SYSSPACE, FOLLOW, NULLVPP, &mvp)) { 847 cmn_err(CE_WARN, "Cannot find %s", path); 848 return; 849 } 850 if (domount(NULL, &mounta, mvp, CRED(), &vfsp)) 851 cmn_err(CE_WARN, "Cannot mount %s", path); 852 else 853 VFS_RELE(vfsp); 854 VN_RELE(mvp); 855 } 856 857 /* 858 * vfs_mountroot is called by main() to mount the root filesystem. 859 */ 860 void 861 vfs_mountroot(void) 862 { 863 struct vnode *rvp = NULL; 864 char *path; 865 size_t plen; 866 struct vfssw *vswp; 867 proc_t *p; 868 869 rw_init(&vfssw_lock, NULL, RW_DEFAULT, NULL); 870 rw_init(&vfslist, NULL, RW_DEFAULT, NULL); 871 872 /* 873 * Alloc the avl trees for quick indexing via dev and mountpoint 874 */ 875 avl_create(&vfs_by_dev, vfs_cmp_dev, sizeof(vfs_t), 876 offsetof(vfs_t, vfs_avldev)); 877 avl_create(&vfs_by_mntpnt, vfs_cmp_mntpnt, sizeof(vfs_t), 878 offsetof(vfs_t, vfs_avlmntpnt)); 879 880 /* 881 * Alloc the vfs hash bucket array and locks 882 */ 883 rvfs_list = kmem_zalloc(vfshsz * sizeof (rvfs_t), KM_SLEEP); 884 885 /* 886 * Call machine-dependent routine "rootconf" to choose a root 887 * file system type. 888 */ 889 if (rootconf()) 890 panic("vfs_mountroot: cannot mount root"); 891 /* 892 * Get vnode for '/'. Set up rootdir, u.u_rdir and u.u_cdir 893 * to point to it. These are used by lookuppn() so that it 894 * knows where to start from ('/' or '.'). 895 */ 896 vfs_setmntpoint(rootvfs, "/", 0); 897 if (VFS_ROOT(rootvfs, &rootdir)) 898 panic("vfs_mountroot: no root vnode"); 899 900 /* 901 * At this point, the process tree consists of p0 and possibly some 902 * direct children of p0. (i.e. there are no grandchildren) 903 * 904 * Walk through them all, setting their current directory. 905 */ 906 mutex_enter(&pidlock); 907 for (p = practive; p != NULL; p = p->p_next) { 908 ASSERT(p == &p0 || p->p_parent == &p0); 909 910 PTOU(p)->u_cdir = rootdir; 911 VN_HOLD(PTOU(p)->u_cdir); 912 PTOU(p)->u_rdir = NULL; 913 } 914 mutex_exit(&pidlock); 915 916 /* 917 * Setup the global zone's rootvp, now that it exists. 918 */ 919 global_zone->zone_rootvp = rootdir; 920 VN_HOLD(global_zone->zone_rootvp); 921 922 /* 923 * Notify the module code that it can begin using the 924 * root filesystem instead of the boot program's services. 925 */ 926 modrootloaded = 1; 927 928 /* 929 * Special handling for a ZFS root file system. 930 */ 931 zfs_boot_init(); 932 933 /* 934 * Set up mnttab information for root 935 */ 936 vfs_setresource(rootvfs, rootfs.bo_name, 0); 937 938 /* 939 * Notify cluster software that the root filesystem is available. 940 */ 941 clboot_mountroot(); 942 943 /* Now that we're all done with the root FS, set up its vopstats */ 944 if ((vswp = vfs_getvfsswbyvfsops(vfs_getops(rootvfs))) != NULL) { 945 /* Set flag for statistics collection */ 946 if (vswp->vsw_flag & VSW_STATS) { 947 initialize_vopstats(&rootvfs->vfs_vopstats); 948 rootvfs->vfs_flag |= VFS_STATS; 949 rootvfs->vfs_fstypevsp = 950 get_fstype_vopstats(rootvfs, vswp); 951 rootvfs->vfs_vskap = get_vskstat_anchor(rootvfs); 952 } 953 vfs_unrefvfssw(vswp); 954 } 955 956 /* 957 * Mount /devices, /dev instance 1, /system/contract, /etc/mnttab, 958 * /etc/svc/volatile, /etc/dfs/sharetab, /system/object, and /proc. 959 */ 960 vfs_mountdevices(); 961 vfs_mountdev1(); 962 963 vfs_mountfs("ctfs", "ctfs", CTFS_ROOT); 964 vfs_mountfs("proc", "/proc", "/proc"); 965 vfs_mountfs("mntfs", "/etc/mnttab", "/etc/mnttab"); 966 vfs_mountfs("tmpfs", "/etc/svc/volatile", "/etc/svc/volatile"); 967 vfs_mountfs("objfs", "objfs", OBJFS_ROOT); 968 vfs_mountfs("bootfs", "bootfs", "/system/boot"); 969 970 if (getzoneid() == GLOBAL_ZONEID) { 971 vfs_mountfs("sharefs", "sharefs", "/etc/dfs/sharetab"); 972 } 973 974 if (strcmp(rootfs.bo_fstype, "zfs") != 0) { 975 /* 976 * Look up the root device via devfs so that a dv_node is 977 * created for it. The vnode is never VN_RELE()ed. 978 * We allocate more than MAXPATHLEN so that the 979 * buffer passed to i_ddi_prompath_to_devfspath() is 980 * exactly MAXPATHLEN (the function expects a buffer 981 * of that length). 982 */ 983 plen = strlen("/devices"); 984 path = kmem_alloc(plen + MAXPATHLEN, KM_SLEEP); 985 (void) strcpy(path, "/devices"); 986 987 if (i_ddi_prompath_to_devfspath(rootfs.bo_name, path + plen) 988 != DDI_SUCCESS || 989 lookupname(path, UIO_SYSSPACE, FOLLOW, NULLVPP, &rvp)) { 990 991 /* NUL terminate in case "path" has garbage */ 992 path[plen + MAXPATHLEN - 1] = '\0'; 993 #ifdef DEBUG 994 cmn_err(CE_WARN, "!Cannot lookup root device: %s", 995 path); 996 #endif 997 } 998 kmem_free(path, plen + MAXPATHLEN); 999 } 1000 1001 vfs_mnttabvp_setup(); 1002 } 1003 1004 /* 1005 * Check to see if our "block device" is actually a file. If so, 1006 * automatically add a lofi device, and keep track of this fact. 1007 */ 1008 static int 1009 lofi_add(const char *fsname, struct vfs *vfsp, 1010 mntopts_t *mntopts, struct mounta *uap) 1011 { 1012 int fromspace = (uap->flags & MS_SYSSPACE) ? 1013 UIO_SYSSPACE : UIO_USERSPACE; 1014 struct lofi_ioctl *li = NULL; 1015 struct vnode *vp = NULL; 1016 struct pathname pn = { NULL }; 1017 ldi_ident_t ldi_id; 1018 ldi_handle_t ldi_hdl; 1019 vfssw_t *vfssw; 1020 int minor; 1021 int err = 0; 1022 1023 if ((vfssw = vfs_getvfssw(fsname)) == NULL) 1024 return (0); 1025 1026 if (!(vfssw->vsw_flag & VSW_CANLOFI)) { 1027 vfs_unrefvfssw(vfssw); 1028 return (0); 1029 } 1030 1031 vfs_unrefvfssw(vfssw); 1032 vfssw = NULL; 1033 1034 if (pn_get(uap->spec, fromspace, &pn) != 0) 1035 return (0); 1036 1037 if (lookupname(uap->spec, fromspace, FOLLOW, NULL, &vp) != 0) 1038 goto out; 1039 1040 if (vp->v_type != VREG) 1041 goto out; 1042 1043 /* OK, this is a lofi mount. */ 1044 1045 if ((uap->flags & (MS_REMOUNT|MS_GLOBAL)) || 1046 vfs_optionisset_nolock(mntopts, MNTOPT_SUID, NULL) || 1047 vfs_optionisset_nolock(mntopts, MNTOPT_SETUID, NULL) || 1048 vfs_optionisset_nolock(mntopts, MNTOPT_DEVICES, NULL)) { 1049 err = EINVAL; 1050 goto out; 1051 } 1052 1053 ldi_id = ldi_ident_from_anon(); 1054 li = kmem_zalloc(sizeof (*li), KM_SLEEP); 1055 (void) strlcpy(li->li_filename, pn.pn_path, MAXPATHLEN); 1056 1057 err = ldi_open_by_name("/dev/lofictl", FREAD | FWRITE, kcred, 1058 &ldi_hdl, ldi_id); 1059 1060 if (err) 1061 goto out2; 1062 1063 err = ldi_ioctl(ldi_hdl, LOFI_MAP_FILE, (intptr_t)li, 1064 FREAD | FWRITE | FKIOCTL, kcred, &minor); 1065 1066 (void) ldi_close(ldi_hdl, FREAD | FWRITE, kcred); 1067 1068 if (!err) 1069 vfsp->vfs_lofi_minor = minor; 1070 1071 out2: 1072 ldi_ident_release(ldi_id); 1073 out: 1074 if (li != NULL) 1075 kmem_free(li, sizeof (*li)); 1076 if (vp != NULL) 1077 VN_RELE(vp); 1078 pn_free(&pn); 1079 return (err); 1080 } 1081 1082 static void 1083 lofi_remove(struct vfs *vfsp) 1084 { 1085 struct lofi_ioctl *li = NULL; 1086 ldi_ident_t ldi_id; 1087 ldi_handle_t ldi_hdl; 1088 int err; 1089 1090 if (vfsp->vfs_lofi_minor == 0) 1091 return; 1092 1093 ldi_id = ldi_ident_from_anon(); 1094 1095 li = kmem_zalloc(sizeof (*li), KM_SLEEP); 1096 li->li_minor = vfsp->vfs_lofi_minor; 1097 li->li_cleanup = B_TRUE; 1098 1099 err = ldi_open_by_name("/dev/lofictl", FREAD | FWRITE, kcred, 1100 &ldi_hdl, ldi_id); 1101 1102 if (err) 1103 goto out; 1104 1105 err = ldi_ioctl(ldi_hdl, LOFI_UNMAP_FILE_MINOR, (intptr_t)li, 1106 FREAD | FWRITE | FKIOCTL, kcred, NULL); 1107 1108 (void) ldi_close(ldi_hdl, FREAD | FWRITE, kcred); 1109 1110 if (!err) 1111 vfsp->vfs_lofi_minor = 0; 1112 1113 out: 1114 ldi_ident_release(ldi_id); 1115 if (li != NULL) 1116 kmem_free(li, sizeof (*li)); 1117 } 1118 1119 /* 1120 * Common mount code. Called from the system call entry point, from autofs, 1121 * nfsv4 trigger mounts, and from pxfs. 1122 * 1123 * Takes the effective file system type, mount arguments, the mount point 1124 * vnode, flags specifying whether the mount is a remount and whether it 1125 * should be entered into the vfs list, and credentials. Fills in its vfspp 1126 * parameter with the mounted file system instance's vfs. 1127 * 1128 * Note that the effective file system type is specified as a string. It may 1129 * be null, in which case it's determined from the mount arguments, and may 1130 * differ from the type specified in the mount arguments; this is a hook to 1131 * allow interposition when instantiating file system instances. 1132 * 1133 * The caller is responsible for releasing its own hold on the mount point 1134 * vp (this routine does its own hold when necessary). 1135 * Also note that for remounts, the mount point vp should be the vnode for 1136 * the root of the file system rather than the vnode that the file system 1137 * is mounted on top of. 1138 */ 1139 int 1140 domount(char *fsname, struct mounta *uap, vnode_t *vp, struct cred *credp, 1141 struct vfs **vfspp) 1142 { 1143 struct vfssw *vswp; 1144 vfsops_t *vfsops; 1145 struct vfs *vfsp; 1146 struct vnode *bvp; 1147 dev_t bdev = 0; 1148 mntopts_t mnt_mntopts; 1149 int error = 0; 1150 int copyout_error = 0; 1151 int ovflags; 1152 char *opts = uap->optptr; 1153 char *inargs = opts; 1154 int optlen = uap->optlen; 1155 int remount; 1156 int rdonly; 1157 int nbmand = 0; 1158 int delmip = 0; 1159 int addmip = 0; 1160 int splice = ((uap->flags & MS_NOSPLICE) == 0); 1161 int fromspace = (uap->flags & MS_SYSSPACE) ? 1162 UIO_SYSSPACE : UIO_USERSPACE; 1163 char *resource = NULL, *mountpt = NULL; 1164 refstr_t *oldresource, *oldmntpt; 1165 struct pathname pn, rpn; 1166 vsk_anchor_t *vskap; 1167 char fstname[FSTYPSZ]; 1168 zone_t *zone; 1169 1170 /* 1171 * The v_flag value for the mount point vp is permanently set 1172 * to VVFSLOCK so that no one bypasses the vn_vfs*locks routine 1173 * for mount point locking. 1174 */ 1175 mutex_enter(&vp->v_lock); 1176 vp->v_flag |= VVFSLOCK; 1177 mutex_exit(&vp->v_lock); 1178 1179 mnt_mntopts.mo_count = 0; 1180 /* 1181 * Find the ops vector to use to invoke the file system-specific mount 1182 * method. If the fsname argument is non-NULL, use it directly. 1183 * Otherwise, dig the file system type information out of the mount 1184 * arguments. 1185 * 1186 * A side effect is to hold the vfssw entry. 1187 * 1188 * Mount arguments can be specified in several ways, which are 1189 * distinguished by flag bit settings. The preferred way is to set 1190 * MS_OPTIONSTR, indicating an 8 argument mount with the file system 1191 * type supplied as a character string and the last two arguments 1192 * being a pointer to a character buffer and the size of the buffer. 1193 * On entry, the buffer holds a null terminated list of options; on 1194 * return, the string is the list of options the file system 1195 * recognized. If MS_DATA is set arguments five and six point to a 1196 * block of binary data which the file system interprets. 1197 * A further wrinkle is that some callers don't set MS_FSS and MS_DATA 1198 * consistently with these conventions. To handle them, we check to 1199 * see whether the pointer to the file system name has a numeric value 1200 * less than 256. If so, we treat it as an index. 1201 */ 1202 if (fsname != NULL) { 1203 if ((vswp = vfs_getvfssw(fsname)) == NULL) { 1204 return (EINVAL); 1205 } 1206 } else if (uap->flags & (MS_OPTIONSTR | MS_DATA | MS_FSS)) { 1207 size_t n; 1208 uint_t fstype; 1209 1210 fsname = fstname; 1211 1212 if ((fstype = (uintptr_t)uap->fstype) < 256) { 1213 RLOCK_VFSSW(); 1214 if (fstype == 0 || fstype >= nfstype || 1215 !ALLOCATED_VFSSW(&vfssw[fstype])) { 1216 RUNLOCK_VFSSW(); 1217 return (EINVAL); 1218 } 1219 (void) strcpy(fsname, vfssw[fstype].vsw_name); 1220 RUNLOCK_VFSSW(); 1221 if ((vswp = vfs_getvfssw(fsname)) == NULL) 1222 return (EINVAL); 1223 } else { 1224 /* 1225 * Handle either kernel or user address space. 1226 */ 1227 if (uap->flags & MS_SYSSPACE) { 1228 error = copystr(uap->fstype, fsname, 1229 FSTYPSZ, &n); 1230 } else { 1231 error = copyinstr(uap->fstype, fsname, 1232 FSTYPSZ, &n); 1233 } 1234 if (error) { 1235 if (error == ENAMETOOLONG) 1236 return (EINVAL); 1237 return (error); 1238 } 1239 if ((vswp = vfs_getvfssw(fsname)) == NULL) 1240 return (EINVAL); 1241 } 1242 } else { 1243 if ((vswp = vfs_getvfsswbyvfsops(vfs_getops(rootvfs))) == NULL) 1244 return (EINVAL); 1245 fsname = vswp->vsw_name; 1246 } 1247 if (!VFS_INSTALLED(vswp)) 1248 return (EINVAL); 1249 1250 if ((error = secpolicy_fs_allowed_mount(fsname)) != 0) { 1251 vfs_unrefvfssw(vswp); 1252 return (error); 1253 } 1254 1255 vfsops = &vswp->vsw_vfsops; 1256 1257 vfs_copyopttbl(&vswp->vsw_optproto, &mnt_mntopts); 1258 /* 1259 * Fetch mount options and parse them for generic vfs options 1260 */ 1261 if (uap->flags & MS_OPTIONSTR) { 1262 /* 1263 * Limit the buffer size 1264 */ 1265 if (optlen < 0 || optlen > MAX_MNTOPT_STR) { 1266 error = EINVAL; 1267 goto errout; 1268 } 1269 if ((uap->flags & MS_SYSSPACE) == 0) { 1270 inargs = kmem_alloc(MAX_MNTOPT_STR, KM_SLEEP); 1271 inargs[0] = '\0'; 1272 if (optlen) { 1273 error = copyinstr(opts, inargs, (size_t)optlen, 1274 NULL); 1275 if (error) { 1276 goto errout; 1277 } 1278 } 1279 } 1280 vfs_parsemntopts(&mnt_mntopts, inargs, 0); 1281 } 1282 /* 1283 * Flag bits override the options string. 1284 */ 1285 if (uap->flags & MS_REMOUNT) 1286 vfs_setmntopt_nolock(&mnt_mntopts, MNTOPT_REMOUNT, NULL, 0, 0); 1287 if (uap->flags & MS_RDONLY) 1288 vfs_setmntopt_nolock(&mnt_mntopts, MNTOPT_RO, NULL, 0, 0); 1289 if (uap->flags & MS_NOSUID) 1290 vfs_setmntopt_nolock(&mnt_mntopts, MNTOPT_NOSUID, NULL, 0, 0); 1291 1292 /* 1293 * Check if this is a remount; must be set in the option string and 1294 * the file system must support a remount option. 1295 */ 1296 if (remount = vfs_optionisset_nolock(&mnt_mntopts, 1297 MNTOPT_REMOUNT, NULL)) { 1298 if (!(vswp->vsw_flag & VSW_CANREMOUNT)) { 1299 error = ENOTSUP; 1300 goto errout; 1301 } 1302 uap->flags |= MS_REMOUNT; 1303 } 1304 1305 /* 1306 * uap->flags and vfs_optionisset() should agree. 1307 */ 1308 if (rdonly = vfs_optionisset_nolock(&mnt_mntopts, MNTOPT_RO, NULL)) { 1309 uap->flags |= MS_RDONLY; 1310 } 1311 if (vfs_optionisset_nolock(&mnt_mntopts, MNTOPT_NOSUID, NULL)) { 1312 uap->flags |= MS_NOSUID; 1313 } 1314 nbmand = vfs_optionisset_nolock(&mnt_mntopts, MNTOPT_NBMAND, NULL); 1315 ASSERT(splice || !remount); 1316 /* 1317 * If we are splicing the fs into the namespace, 1318 * perform mount point checks. 1319 * 1320 * We want to resolve the path for the mount point to eliminate 1321 * '.' and ".." and symlinks in mount points; we can't do the 1322 * same for the resource string, since it would turn 1323 * "/dev/dsk/c0t0d0s0" into "/devices/pci@...". We need to do 1324 * this before grabbing vn_vfswlock(), because otherwise we 1325 * would deadlock with lookuppn(). 1326 */ 1327 if (splice) { 1328 ASSERT(vp->v_count > 0); 1329 1330 /* 1331 * Pick up mount point and device from appropriate space. 1332 */ 1333 if (pn_get(uap->spec, fromspace, &pn) == 0) { 1334 resource = kmem_alloc(pn.pn_pathlen + 1, 1335 KM_SLEEP); 1336 (void) strcpy(resource, pn.pn_path); 1337 pn_free(&pn); 1338 } 1339 /* 1340 * Do a lookupname prior to taking the 1341 * writelock. Mark this as completed if 1342 * successful for later cleanup and addition to 1343 * the mount in progress table. 1344 */ 1345 if ((uap->flags & MS_GLOBAL) == 0 && 1346 lookupname(uap->spec, fromspace, 1347 FOLLOW, NULL, &bvp) == 0) { 1348 addmip = 1; 1349 } 1350 1351 if ((error = pn_get(uap->dir, fromspace, &pn)) == 0) { 1352 pathname_t *pnp; 1353 1354 if (*pn.pn_path != '/') { 1355 error = EINVAL; 1356 pn_free(&pn); 1357 goto errout; 1358 } 1359 pn_alloc(&rpn); 1360 /* 1361 * Kludge to prevent autofs from deadlocking with 1362 * itself when it calls domount(). 1363 * 1364 * If autofs is calling, it is because it is doing 1365 * (autofs) mounts in the process of an NFS mount. A 1366 * lookuppn() here would cause us to block waiting for 1367 * said NFS mount to complete, which can't since this 1368 * is the thread that was supposed to doing it. 1369 */ 1370 if (fromspace == UIO_USERSPACE) { 1371 if ((error = lookuppn(&pn, &rpn, FOLLOW, NULL, 1372 NULL)) == 0) { 1373 pnp = &rpn; 1374 } else { 1375 /* 1376 * The file disappeared or otherwise 1377 * became inaccessible since we opened 1378 * it; might as well fail the mount 1379 * since the mount point is no longer 1380 * accessible. 1381 */ 1382 pn_free(&rpn); 1383 pn_free(&pn); 1384 goto errout; 1385 } 1386 } else { 1387 pnp = &pn; 1388 } 1389 mountpt = kmem_alloc(pnp->pn_pathlen + 1, KM_SLEEP); 1390 (void) strcpy(mountpt, pnp->pn_path); 1391 1392 /* 1393 * If the addition of the zone's rootpath 1394 * would push us over a total path length 1395 * of MAXPATHLEN, we fail the mount with 1396 * ENAMETOOLONG, which is what we would have 1397 * gotten if we were trying to perform the same 1398 * mount in the global zone. 1399 * 1400 * strlen() doesn't count the trailing 1401 * '\0', but zone_rootpathlen counts both a 1402 * trailing '/' and the terminating '\0'. 1403 */ 1404 if ((curproc->p_zone->zone_rootpathlen - 1 + 1405 strlen(mountpt)) > MAXPATHLEN || 1406 (resource != NULL && 1407 (curproc->p_zone->zone_rootpathlen - 1 + 1408 strlen(resource)) > MAXPATHLEN)) { 1409 error = ENAMETOOLONG; 1410 } 1411 1412 pn_free(&rpn); 1413 pn_free(&pn); 1414 } 1415 1416 if (error) 1417 goto errout; 1418 1419 /* 1420 * Prevent path name resolution from proceeding past 1421 * the mount point. 1422 */ 1423 if (vn_vfswlock(vp) != 0) { 1424 error = EBUSY; 1425 goto errout; 1426 } 1427 1428 /* 1429 * Verify that it's legitimate to establish a mount on 1430 * the prospective mount point. 1431 */ 1432 if (vn_mountedvfs(vp) != NULL) { 1433 /* 1434 * The mount point lock was obtained after some 1435 * other thread raced through and established a mount. 1436 */ 1437 vn_vfsunlock(vp); 1438 error = EBUSY; 1439 goto errout; 1440 } 1441 if (vp->v_flag & VNOMOUNT) { 1442 vn_vfsunlock(vp); 1443 error = EINVAL; 1444 goto errout; 1445 } 1446 } 1447 if ((uap->flags & (MS_DATA | MS_OPTIONSTR)) == 0) { 1448 uap->dataptr = NULL; 1449 uap->datalen = 0; 1450 } 1451 1452 /* 1453 * If this is a remount, we don't want to create a new VFS. 1454 * Instead, we pass the existing one with a remount flag. 1455 */ 1456 if (remount) { 1457 /* 1458 * Confirm that the mount point is the root vnode of the 1459 * file system that is being remounted. 1460 * This can happen if the user specifies a different 1461 * mount point directory pathname in the (re)mount command. 1462 * 1463 * Code below can only be reached if splice is true, so it's 1464 * safe to do vn_vfsunlock() here. 1465 */ 1466 if ((vp->v_flag & VROOT) == 0) { 1467 vn_vfsunlock(vp); 1468 error = ENOENT; 1469 goto errout; 1470 } 1471 /* 1472 * Disallow making file systems read-only unless file system 1473 * explicitly allows it in its vfssw. Ignore other flags. 1474 */ 1475 if (rdonly && vn_is_readonly(vp) == 0 && 1476 (vswp->vsw_flag & VSW_CANRWRO) == 0) { 1477 vn_vfsunlock(vp); 1478 error = EINVAL; 1479 goto errout; 1480 } 1481 /* 1482 * Disallow changing the NBMAND disposition of the file 1483 * system on remounts. 1484 */ 1485 if ((nbmand && ((vp->v_vfsp->vfs_flag & VFS_NBMAND) == 0)) || 1486 (!nbmand && (vp->v_vfsp->vfs_flag & VFS_NBMAND))) { 1487 vn_vfsunlock(vp); 1488 error = EINVAL; 1489 goto errout; 1490 } 1491 vfsp = vp->v_vfsp; 1492 ovflags = vfsp->vfs_flag; 1493 vfsp->vfs_flag |= VFS_REMOUNT; 1494 vfsp->vfs_flag &= ~VFS_RDONLY; 1495 } else { 1496 vfsp = vfs_alloc(KM_SLEEP); 1497 VFS_INIT(vfsp, vfsops, NULL); 1498 } 1499 1500 VFS_HOLD(vfsp); 1501 1502 if ((error = lofi_add(fsname, vfsp, &mnt_mntopts, uap)) != 0) { 1503 if (!remount) { 1504 if (splice) 1505 vn_vfsunlock(vp); 1506 vfs_free(vfsp); 1507 } else { 1508 vn_vfsunlock(vp); 1509 VFS_RELE(vfsp); 1510 } 1511 goto errout; 1512 } 1513 1514 /* 1515 * PRIV_SYS_MOUNT doesn't mean you can become root. 1516 */ 1517 if (vfsp->vfs_lofi_minor != 0) { 1518 uap->flags |= MS_NOSUID; 1519 vfs_setmntopt_nolock(&mnt_mntopts, MNTOPT_NOSUID, NULL, 0, 0); 1520 } 1521 1522 /* 1523 * The vfs_reflock is not used anymore the code below explicitly 1524 * holds it preventing others accesing it directly. 1525 */ 1526 if ((sema_tryp(&vfsp->vfs_reflock) == 0) && 1527 !(vfsp->vfs_flag & VFS_REMOUNT)) 1528 cmn_err(CE_WARN, 1529 "mount type %s couldn't get vfs_reflock", vswp->vsw_name); 1530 1531 /* 1532 * Lock the vfs. If this is a remount we want to avoid spurious umount 1533 * failures that happen as a side-effect of fsflush() and other mount 1534 * and unmount operations that might be going on simultaneously and 1535 * may have locked the vfs currently. To not return EBUSY immediately 1536 * here we use vfs_lock_wait() instead vfs_lock() for the remount case. 1537 */ 1538 if (!remount) { 1539 if (error = vfs_lock(vfsp)) { 1540 vfsp->vfs_flag = ovflags; 1541 1542 lofi_remove(vfsp); 1543 1544 if (splice) 1545 vn_vfsunlock(vp); 1546 vfs_free(vfsp); 1547 goto errout; 1548 } 1549 } else { 1550 vfs_lock_wait(vfsp); 1551 } 1552 1553 /* 1554 * Add device to mount in progress table, global mounts require special 1555 * handling. It is possible that we have already done the lookupname 1556 * on a spliced, non-global fs. If so, we don't want to do it again 1557 * since we cannot do a lookupname after taking the 1558 * wlock above. This case is for a non-spliced, non-global filesystem. 1559 */ 1560 if (!addmip) { 1561 if ((uap->flags & MS_GLOBAL) == 0 && 1562 lookupname(uap->spec, fromspace, FOLLOW, NULL, &bvp) == 0) { 1563 addmip = 1; 1564 } 1565 } 1566 1567 if (addmip) { 1568 vnode_t *lvp = NULL; 1569 1570 error = vfs_get_lofi(vfsp, &lvp); 1571 if (error > 0) { 1572 lofi_remove(vfsp); 1573 1574 if (splice) 1575 vn_vfsunlock(vp); 1576 vfs_unlock(vfsp); 1577 1578 if (remount) { 1579 VFS_RELE(vfsp); 1580 } else { 1581 vfs_free(vfsp); 1582 } 1583 1584 goto errout; 1585 } else if (error == -1) { 1586 bdev = bvp->v_rdev; 1587 VN_RELE(bvp); 1588 } else { 1589 bdev = lvp->v_rdev; 1590 VN_RELE(lvp); 1591 VN_RELE(bvp); 1592 } 1593 1594 vfs_addmip(bdev, vfsp); 1595 addmip = 0; 1596 delmip = 1; 1597 } 1598 /* 1599 * Invalidate cached entry for the mount point. 1600 */ 1601 if (splice) 1602 dnlc_purge_vp(vp); 1603 1604 /* 1605 * If have an option string but the filesystem doesn't supply a 1606 * prototype options table, create a table with the global 1607 * options and sufficient room to accept all the options in the 1608 * string. Then parse the passed in option string 1609 * accepting all the options in the string. This gives us an 1610 * option table with all the proper cancel properties for the 1611 * global options. 1612 * 1613 * Filesystems that supply a prototype options table are handled 1614 * earlier in this function. 1615 */ 1616 if (uap->flags & MS_OPTIONSTR) { 1617 if (!(vswp->vsw_flag & VSW_HASPROTO)) { 1618 mntopts_t tmp_mntopts; 1619 1620 tmp_mntopts.mo_count = 0; 1621 vfs_createopttbl_extend(&tmp_mntopts, inargs, 1622 &mnt_mntopts); 1623 vfs_parsemntopts(&tmp_mntopts, inargs, 1); 1624 vfs_swapopttbl_nolock(&mnt_mntopts, &tmp_mntopts); 1625 vfs_freeopttbl(&tmp_mntopts); 1626 } 1627 } 1628 1629 /* 1630 * Serialize with zone state transitions. 1631 * See vfs_list_add; zone mounted into is: 1632 * zone_find_by_path(refstr_value(vfsp->vfs_mntpt)) 1633 * not the zone doing the mount (curproc->p_zone), but if we're already 1634 * inside a NGZ, then we know what zone we are. 1635 */ 1636 if (INGLOBALZONE(curproc)) { 1637 zone = zone_find_by_path(mountpt); 1638 ASSERT(zone != NULL); 1639 } else { 1640 zone = curproc->p_zone; 1641 /* 1642 * zone_find_by_path does a hold, so do one here too so that 1643 * we can do a zone_rele after mount_completed. 1644 */ 1645 zone_hold(zone); 1646 } 1647 mount_in_progress(zone); 1648 /* 1649 * Instantiate (or reinstantiate) the file system. If appropriate, 1650 * splice it into the file system name space. 1651 * 1652 * We want VFS_MOUNT() to be able to override the vfs_resource 1653 * string if necessary (ie, mntfs), and also for a remount to 1654 * change the same (necessary when remounting '/' during boot). 1655 * So we set up vfs_mntpt and vfs_resource to what we think they 1656 * should be, then hand off control to VFS_MOUNT() which can 1657 * override this. 1658 * 1659 * For safety's sake, when changing vfs_resource or vfs_mntpt of 1660 * a vfs which is on the vfs list (i.e. during a remount), we must 1661 * never set those fields to NULL. Several bits of code make 1662 * assumptions that the fields are always valid. 1663 */ 1664 vfs_swapopttbl(&mnt_mntopts, &vfsp->vfs_mntopts); 1665 if (remount) { 1666 if ((oldresource = vfsp->vfs_resource) != NULL) 1667 refstr_hold(oldresource); 1668 if ((oldmntpt = vfsp->vfs_mntpt) != NULL) 1669 refstr_hold(oldmntpt); 1670 } 1671 vfs_setresource(vfsp, resource, 0); 1672 vfs_setmntpoint(vfsp, mountpt, 0); 1673 1674 /* 1675 * going to mount on this vnode, so notify. 1676 */ 1677 vnevent_mountedover(vp, NULL); 1678 error = VFS_MOUNT(vfsp, vp, uap, credp); 1679 1680 if (uap->flags & MS_RDONLY) 1681 vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0); 1682 if (uap->flags & MS_NOSUID) 1683 vfs_setmntopt(vfsp, MNTOPT_NOSUID, NULL, 0); 1684 if (uap->flags & MS_GLOBAL) 1685 vfs_setmntopt(vfsp, MNTOPT_GLOBAL, NULL, 0); 1686 1687 if (error) { 1688 lofi_remove(vfsp); 1689 1690 if (remount) { 1691 /* put back pre-remount options */ 1692 vfs_swapopttbl(&mnt_mntopts, &vfsp->vfs_mntopts); 1693 vfs_setmntpoint(vfsp, refstr_value(oldmntpt), 1694 VFSSP_VERBATIM); 1695 if (oldmntpt) 1696 refstr_rele(oldmntpt); 1697 vfs_setresource(vfsp, refstr_value(oldresource), 1698 VFSSP_VERBATIM); 1699 if (oldresource) 1700 refstr_rele(oldresource); 1701 vfsp->vfs_flag = ovflags; 1702 vfs_unlock(vfsp); 1703 VFS_RELE(vfsp); 1704 } else { 1705 vfs_unlock(vfsp); 1706 vfs_freemnttab(vfsp); 1707 vfs_free(vfsp); 1708 } 1709 } else { 1710 /* 1711 * Set the mount time to now 1712 */ 1713 vfsp->vfs_mtime = ddi_get_time(); 1714 if (remount) { 1715 vfsp->vfs_flag &= ~VFS_REMOUNT; 1716 if (oldresource) 1717 refstr_rele(oldresource); 1718 if (oldmntpt) 1719 refstr_rele(oldmntpt); 1720 } else if (splice) { 1721 /* 1722 * Link vfsp into the name space at the mount 1723 * point. Vfs_add() is responsible for 1724 * holding the mount point which will be 1725 * released when vfs_remove() is called. 1726 */ 1727 vfs_add(vp, vfsp, uap->flags); 1728 } else { 1729 /* 1730 * Hold the reference to file system which is 1731 * not linked into the name space. 1732 */ 1733 vfsp->vfs_zone = NULL; 1734 VFS_HOLD(vfsp); 1735 vfsp->vfs_vnodecovered = NULL; 1736 } 1737 /* 1738 * Set flags for global options encountered 1739 */ 1740 if (vfs_optionisset(vfsp, MNTOPT_RO, NULL)) 1741 vfsp->vfs_flag |= VFS_RDONLY; 1742 else 1743 vfsp->vfs_flag &= ~VFS_RDONLY; 1744 if (vfs_optionisset(vfsp, MNTOPT_NOSUID, NULL)) { 1745 vfsp->vfs_flag |= (VFS_NOSETUID|VFS_NODEVICES); 1746 } else { 1747 if (vfs_optionisset(vfsp, MNTOPT_NODEVICES, NULL)) 1748 vfsp->vfs_flag |= VFS_NODEVICES; 1749 else 1750 vfsp->vfs_flag &= ~VFS_NODEVICES; 1751 if (vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL)) 1752 vfsp->vfs_flag |= VFS_NOSETUID; 1753 else 1754 vfsp->vfs_flag &= ~VFS_NOSETUID; 1755 } 1756 if (vfs_optionisset(vfsp, MNTOPT_NBMAND, NULL)) 1757 vfsp->vfs_flag |= VFS_NBMAND; 1758 else 1759 vfsp->vfs_flag &= ~VFS_NBMAND; 1760 1761 if (vfs_optionisset(vfsp, MNTOPT_XATTR, NULL)) 1762 vfsp->vfs_flag |= VFS_XATTR; 1763 else 1764 vfsp->vfs_flag &= ~VFS_XATTR; 1765 1766 if (vfs_optionisset(vfsp, MNTOPT_NOEXEC, NULL)) 1767 vfsp->vfs_flag |= VFS_NOEXEC; 1768 else 1769 vfsp->vfs_flag &= ~VFS_NOEXEC; 1770 1771 /* 1772 * Now construct the output option string of options 1773 * we recognized. 1774 */ 1775 if (uap->flags & MS_OPTIONSTR) { 1776 vfs_list_read_lock(); 1777 copyout_error = vfs_buildoptionstr( 1778 &vfsp->vfs_mntopts, inargs, optlen); 1779 vfs_list_unlock(); 1780 if (copyout_error == 0 && 1781 (uap->flags & MS_SYSSPACE) == 0) { 1782 copyout_error = copyoutstr(inargs, opts, 1783 optlen, NULL); 1784 } 1785 } 1786 1787 /* 1788 * If this isn't a remount, set up the vopstats before 1789 * anyone can touch this. We only allow spliced file 1790 * systems (file systems which are in the namespace) to 1791 * have the VFS_STATS flag set. 1792 * NOTE: PxFS mounts the underlying file system with 1793 * MS_NOSPLICE set and copies those vfs_flags to its private 1794 * vfs structure. As a result, PxFS should never have 1795 * the VFS_STATS flag or else we might access the vfs 1796 * statistics-related fields prior to them being 1797 * properly initialized. 1798 */ 1799 if (!remount && (vswp->vsw_flag & VSW_STATS) && splice) { 1800 initialize_vopstats(&vfsp->vfs_vopstats); 1801 /* 1802 * We need to set vfs_vskap to NULL because there's 1803 * a chance it won't be set below. This is checked 1804 * in teardown_vopstats() so we can't have garbage. 1805 */ 1806 vfsp->vfs_vskap = NULL; 1807 vfsp->vfs_flag |= VFS_STATS; 1808 vfsp->vfs_fstypevsp = get_fstype_vopstats(vfsp, vswp); 1809 } 1810 1811 if (vswp->vsw_flag & VSW_XID) 1812 vfsp->vfs_flag |= VFS_XID; 1813 1814 vfs_unlock(vfsp); 1815 } 1816 mount_completed(zone); 1817 zone_rele(zone); 1818 if (splice) 1819 vn_vfsunlock(vp); 1820 1821 if ((error == 0) && (copyout_error == 0)) { 1822 if (!remount) { 1823 /* 1824 * Don't call get_vskstat_anchor() while holding 1825 * locks since it allocates memory and calls 1826 * VFS_STATVFS(). For NFS, the latter can generate 1827 * an over-the-wire call. 1828 */ 1829 vskap = get_vskstat_anchor(vfsp); 1830 /* Only take the lock if we have something to do */ 1831 if (vskap != NULL) { 1832 vfs_lock_wait(vfsp); 1833 if (vfsp->vfs_flag & VFS_STATS) { 1834 vfsp->vfs_vskap = vskap; 1835 } 1836 vfs_unlock(vfsp); 1837 } 1838 } 1839 /* Return vfsp to caller. */ 1840 *vfspp = vfsp; 1841 } 1842 errout: 1843 vfs_freeopttbl(&mnt_mntopts); 1844 if (resource != NULL) 1845 kmem_free(resource, strlen(resource) + 1); 1846 if (mountpt != NULL) 1847 kmem_free(mountpt, strlen(mountpt) + 1); 1848 /* 1849 * It is possible we errored prior to adding to mount in progress 1850 * table. Must free vnode we acquired with successful lookupname. 1851 */ 1852 if (addmip) 1853 VN_RELE(bvp); 1854 if (delmip) 1855 vfs_delmip(vfsp); 1856 ASSERT(vswp != NULL); 1857 vfs_unrefvfssw(vswp); 1858 if (inargs != opts) 1859 kmem_free(inargs, MAX_MNTOPT_STR); 1860 if (copyout_error) { 1861 lofi_remove(vfsp); 1862 VFS_RELE(vfsp); 1863 error = copyout_error; 1864 } 1865 return (error); 1866 } 1867 1868 static void 1869 vfs_setpath( 1870 struct vfs *vfsp, /* vfs being updated */ 1871 refstr_t **refp, /* Ref-count string to contain the new path */ 1872 const char *newpath, /* Path to add to refp (above) */ 1873 uint32_t flag) /* flag */ 1874 { 1875 size_t len; 1876 refstr_t *ref; 1877 zone_t *zone = curproc->p_zone; 1878 char *sp; 1879 int have_list_lock = 0; 1880 1881 ASSERT(!VFS_ON_LIST(vfsp) || vfs_lock_held(vfsp)); 1882 1883 /* 1884 * New path must be less than MAXPATHLEN because mntfs 1885 * will only display up to MAXPATHLEN bytes. This is currently 1886 * safe, because domount() uses pn_get(), and other callers 1887 * similarly cap the size to fewer than MAXPATHLEN bytes. 1888 */ 1889 1890 ASSERT(strlen(newpath) < MAXPATHLEN); 1891 1892 /* mntfs requires consistency while vfs list lock is held */ 1893 1894 if (VFS_ON_LIST(vfsp)) { 1895 have_list_lock = 1; 1896 vfs_list_lock(); 1897 } 1898 1899 if (*refp != NULL) 1900 refstr_rele(*refp); 1901 1902 /* 1903 * If we are in a non-global zone then we prefix the supplied path, 1904 * newpath, with the zone's root path, with two exceptions. The first 1905 * is where we have been explicitly directed to avoid doing so; this 1906 * will be the case following a failed remount, where the path supplied 1907 * will be a saved version which must now be restored. The second 1908 * exception is where newpath is not a pathname but a descriptive name, 1909 * e.g. "procfs". 1910 */ 1911 if (zone == global_zone || (flag & VFSSP_VERBATIM) || *newpath != '/') { 1912 ref = refstr_alloc(newpath); 1913 goto out; 1914 } 1915 1916 /* 1917 * Truncate the trailing '/' in the zoneroot, and merge 1918 * in the zone's rootpath with the "newpath" (resource 1919 * or mountpoint) passed in. 1920 * 1921 * The size of the required buffer is thus the size of 1922 * the buffer required for the passed-in newpath 1923 * (strlen(newpath) + 1), plus the size of the buffer 1924 * required to hold zone_rootpath (zone_rootpathlen) 1925 * minus one for one of the now-superfluous NUL 1926 * terminations, minus one for the trailing '/'. 1927 * 1928 * That gives us: 1929 * 1930 * (strlen(newpath) + 1) + zone_rootpathlen - 1 - 1 1931 * 1932 * Which is what we have below. 1933 */ 1934 1935 len = strlen(newpath) + zone->zone_rootpathlen - 1; 1936 sp = kmem_alloc(len, KM_SLEEP); 1937 1938 /* 1939 * Copy everything including the trailing slash, which 1940 * we then overwrite with the NUL character. 1941 */ 1942 1943 (void) strcpy(sp, zone->zone_rootpath); 1944 sp[zone->zone_rootpathlen - 2] = '\0'; 1945 (void) strcat(sp, newpath); 1946 1947 ref = refstr_alloc(sp); 1948 kmem_free(sp, len); 1949 out: 1950 *refp = ref; 1951 1952 if (have_list_lock) { 1953 vfs_mnttab_modtimeupd(); 1954 vfs_list_unlock(); 1955 } 1956 } 1957 1958 /* 1959 * Record a mounted resource name in a vfs structure. 1960 * If vfsp is already mounted, caller must hold the vfs lock. 1961 */ 1962 void 1963 vfs_setresource(struct vfs *vfsp, const char *resource, uint32_t flag) 1964 { 1965 if (resource == NULL || resource[0] == '\0') 1966 resource = VFS_NORESOURCE; 1967 vfs_setpath(vfsp, &vfsp->vfs_resource, resource, flag); 1968 } 1969 1970 /* 1971 * Record a mount point name in a vfs structure. 1972 * If vfsp is already mounted, caller must hold the vfs lock. 1973 */ 1974 void 1975 vfs_setmntpoint(struct vfs *vfsp, const char *mntpt, uint32_t flag) 1976 { 1977 if (mntpt == NULL || mntpt[0] == '\0') 1978 mntpt = VFS_NOMNTPT; 1979 vfs_setpath(vfsp, &vfsp->vfs_mntpt, mntpt, flag); 1980 } 1981 1982 /* Returns the vfs_resource. Caller must call refstr_rele() when finished. */ 1983 1984 refstr_t * 1985 vfs_getresource(const struct vfs *vfsp) 1986 { 1987 refstr_t *resource; 1988 1989 vfs_list_read_lock(); 1990 resource = vfsp->vfs_resource; 1991 refstr_hold(resource); 1992 vfs_list_unlock(); 1993 1994 return (resource); 1995 } 1996 1997 /* Returns the vfs_mntpt. Caller must call refstr_rele() when finished. */ 1998 1999 refstr_t * 2000 vfs_getmntpoint(const struct vfs *vfsp) 2001 { 2002 refstr_t *mntpt; 2003 2004 vfs_list_read_lock(); 2005 mntpt = vfsp->vfs_mntpt; 2006 refstr_hold(mntpt); 2007 vfs_list_unlock(); 2008 2009 return (mntpt); 2010 } 2011 2012 /* 2013 * Create an empty options table with enough empty slots to hold all 2014 * The options in the options string passed as an argument. 2015 * Potentially prepend another options table. 2016 * 2017 * Note: caller is responsible for locking the vfs list, if needed, 2018 * to protect mops. 2019 */ 2020 static void 2021 vfs_createopttbl_extend(mntopts_t *mops, const char *opts, 2022 const mntopts_t *mtmpl) 2023 { 2024 const char *s = opts; 2025 uint_t count; 2026 2027 if (opts == NULL || *opts == '\0') { 2028 count = 0; 2029 } else { 2030 count = 1; 2031 2032 /* 2033 * Count number of options in the string 2034 */ 2035 for (s = strchr(s, ','); s != NULL; s = strchr(s, ',')) { 2036 count++; 2037 s++; 2038 } 2039 } 2040 vfs_copyopttbl_extend(mtmpl, mops, count); 2041 } 2042 2043 /* 2044 * Create an empty options table with enough empty slots to hold all 2045 * The options in the options string passed as an argument. 2046 * 2047 * This function is *not* for general use by filesystems. 2048 * 2049 * Note: caller is responsible for locking the vfs list, if needed, 2050 * to protect mops. 2051 */ 2052 void 2053 vfs_createopttbl(mntopts_t *mops, const char *opts) 2054 { 2055 vfs_createopttbl_extend(mops, opts, NULL); 2056 } 2057 2058 2059 /* 2060 * Swap two mount options tables 2061 */ 2062 static void 2063 vfs_swapopttbl_nolock(mntopts_t *optbl1, mntopts_t *optbl2) 2064 { 2065 uint_t tmpcnt; 2066 mntopt_t *tmplist; 2067 2068 tmpcnt = optbl2->mo_count; 2069 tmplist = optbl2->mo_list; 2070 optbl2->mo_count = optbl1->mo_count; 2071 optbl2->mo_list = optbl1->mo_list; 2072 optbl1->mo_count = tmpcnt; 2073 optbl1->mo_list = tmplist; 2074 } 2075 2076 static void 2077 vfs_swapopttbl(mntopts_t *optbl1, mntopts_t *optbl2) 2078 { 2079 vfs_list_lock(); 2080 vfs_swapopttbl_nolock(optbl1, optbl2); 2081 vfs_mnttab_modtimeupd(); 2082 vfs_list_unlock(); 2083 } 2084 2085 static char ** 2086 vfs_copycancelopt_extend(char **const moc, int extend) 2087 { 2088 int i = 0; 2089 int j; 2090 char **result; 2091 2092 if (moc != NULL) { 2093 for (; moc[i] != NULL; i++) 2094 /* count number of options to cancel */; 2095 } 2096 2097 if (i + extend == 0) 2098 return (NULL); 2099 2100 result = kmem_alloc((i + extend + 1) * sizeof (char *), KM_SLEEP); 2101 2102 for (j = 0; j < i; j++) { 2103 result[j] = kmem_alloc(strlen(moc[j]) + 1, KM_SLEEP); 2104 (void) strcpy(result[j], moc[j]); 2105 } 2106 for (; j <= i + extend; j++) 2107 result[j] = NULL; 2108 2109 return (result); 2110 } 2111 2112 static void 2113 vfs_copyopt(const mntopt_t *s, mntopt_t *d) 2114 { 2115 char *sp, *dp; 2116 2117 d->mo_flags = s->mo_flags; 2118 d->mo_data = s->mo_data; 2119 sp = s->mo_name; 2120 if (sp != NULL) { 2121 dp = kmem_alloc(strlen(sp) + 1, KM_SLEEP); 2122 (void) strcpy(dp, sp); 2123 d->mo_name = dp; 2124 } else { 2125 d->mo_name = NULL; /* should never happen */ 2126 } 2127 2128 d->mo_cancel = vfs_copycancelopt_extend(s->mo_cancel, 0); 2129 2130 sp = s->mo_arg; 2131 if (sp != NULL) { 2132 dp = kmem_alloc(strlen(sp) + 1, KM_SLEEP); 2133 (void) strcpy(dp, sp); 2134 d->mo_arg = dp; 2135 } else { 2136 d->mo_arg = NULL; 2137 } 2138 } 2139 2140 /* 2141 * Copy a mount options table, possibly allocating some spare 2142 * slots at the end. It is permissible to copy_extend the NULL table. 2143 */ 2144 static void 2145 vfs_copyopttbl_extend(const mntopts_t *smo, mntopts_t *dmo, int extra) 2146 { 2147 uint_t i, count; 2148 mntopt_t *motbl; 2149 2150 /* 2151 * Clear out any existing stuff in the options table being initialized 2152 */ 2153 vfs_freeopttbl(dmo); 2154 count = (smo == NULL) ? 0 : smo->mo_count; 2155 if ((count + extra) == 0) /* nothing to do */ 2156 return; 2157 dmo->mo_count = count + extra; 2158 motbl = kmem_zalloc((count + extra) * sizeof (mntopt_t), KM_SLEEP); 2159 dmo->mo_list = motbl; 2160 for (i = 0; i < count; i++) { 2161 vfs_copyopt(&smo->mo_list[i], &motbl[i]); 2162 } 2163 for (i = count; i < count + extra; i++) { 2164 motbl[i].mo_flags = MO_EMPTY; 2165 } 2166 } 2167 2168 /* 2169 * Copy a mount options table. 2170 * 2171 * This function is *not* for general use by filesystems. 2172 * 2173 * Note: caller is responsible for locking the vfs list, if needed, 2174 * to protect smo and dmo. 2175 */ 2176 void 2177 vfs_copyopttbl(const mntopts_t *smo, mntopts_t *dmo) 2178 { 2179 vfs_copyopttbl_extend(smo, dmo, 0); 2180 } 2181 2182 static char ** 2183 vfs_mergecancelopts(const mntopt_t *mop1, const mntopt_t *mop2) 2184 { 2185 int c1 = 0; 2186 int c2 = 0; 2187 char **result; 2188 char **sp1, **sp2, **dp; 2189 2190 /* 2191 * First we count both lists of cancel options. 2192 * If either is NULL or has no elements, we return a copy of 2193 * the other. 2194 */ 2195 if (mop1->mo_cancel != NULL) { 2196 for (; mop1->mo_cancel[c1] != NULL; c1++) 2197 /* count cancel options in mop1 */; 2198 } 2199 2200 if (c1 == 0) 2201 return (vfs_copycancelopt_extend(mop2->mo_cancel, 0)); 2202 2203 if (mop2->mo_cancel != NULL) { 2204 for (; mop2->mo_cancel[c2] != NULL; c2++) 2205 /* count cancel options in mop2 */; 2206 } 2207 2208 result = vfs_copycancelopt_extend(mop1->mo_cancel, c2); 2209 2210 if (c2 == 0) 2211 return (result); 2212 2213 /* 2214 * When we get here, we've got two sets of cancel options; 2215 * we need to merge the two sets. We know that the result 2216 * array has "c1+c2+1" entries and in the end we might shrink 2217 * it. 2218 * Result now has a copy of the c1 entries from mop1; we'll 2219 * now lookup all the entries of mop2 in mop1 and copy it if 2220 * it is unique. 2221 * This operation is O(n^2) but it's only called once per 2222 * filesystem per duplicate option. This is a situation 2223 * which doesn't arise with the filesystems in ON and 2224 * n is generally 1. 2225 */ 2226 2227 dp = &result[c1]; 2228 for (sp2 = mop2->mo_cancel; *sp2 != NULL; sp2++) { 2229 for (sp1 = mop1->mo_cancel; *sp1 != NULL; sp1++) { 2230 if (strcmp(*sp1, *sp2) == 0) 2231 break; 2232 } 2233 if (*sp1 == NULL) { 2234 /* 2235 * Option *sp2 not found in mop1, so copy it. 2236 * The calls to vfs_copycancelopt_extend() 2237 * guarantee that there's enough room. 2238 */ 2239 *dp = kmem_alloc(strlen(*sp2) + 1, KM_SLEEP); 2240 (void) strcpy(*dp++, *sp2); 2241 } 2242 } 2243 if (dp != &result[c1+c2]) { 2244 size_t bytes = (dp - result + 1) * sizeof (char *); 2245 char **nres = kmem_alloc(bytes, KM_SLEEP); 2246 2247 bcopy(result, nres, bytes); 2248 kmem_free(result, (c1 + c2 + 1) * sizeof (char *)); 2249 result = nres; 2250 } 2251 return (result); 2252 } 2253 2254 /* 2255 * Merge two mount option tables (outer and inner) into one. This is very 2256 * similar to "merging" global variables and automatic variables in C. 2257 * 2258 * This isn't (and doesn't have to be) fast. 2259 * 2260 * This function is *not* for general use by filesystems. 2261 * 2262 * Note: caller is responsible for locking the vfs list, if needed, 2263 * to protect omo, imo & dmo. 2264 */ 2265 void 2266 vfs_mergeopttbl(const mntopts_t *omo, const mntopts_t *imo, mntopts_t *dmo) 2267 { 2268 uint_t i, count; 2269 mntopt_t *mop, *motbl; 2270 uint_t freeidx; 2271 2272 /* 2273 * First determine how much space we need to allocate. 2274 */ 2275 count = omo->mo_count; 2276 for (i = 0; i < imo->mo_count; i++) { 2277 if (imo->mo_list[i].mo_flags & MO_EMPTY) 2278 continue; 2279 if (vfs_hasopt(omo, imo->mo_list[i].mo_name) == NULL) 2280 count++; 2281 } 2282 ASSERT(count >= omo->mo_count && 2283 count <= omo->mo_count + imo->mo_count); 2284 motbl = kmem_alloc(count * sizeof (mntopt_t), KM_SLEEP); 2285 for (i = 0; i < omo->mo_count; i++) 2286 vfs_copyopt(&omo->mo_list[i], &motbl[i]); 2287 freeidx = omo->mo_count; 2288 for (i = 0; i < imo->mo_count; i++) { 2289 if (imo->mo_list[i].mo_flags & MO_EMPTY) 2290 continue; 2291 if ((mop = vfs_hasopt(omo, imo->mo_list[i].mo_name)) != NULL) { 2292 char **newcanp; 2293 uint_t index = mop - omo->mo_list; 2294 2295 newcanp = vfs_mergecancelopts(mop, &motbl[index]); 2296 2297 vfs_freeopt(&motbl[index]); 2298 vfs_copyopt(&imo->mo_list[i], &motbl[index]); 2299 2300 vfs_freecancelopt(motbl[index].mo_cancel); 2301 motbl[index].mo_cancel = newcanp; 2302 } else { 2303 /* 2304 * If it's a new option, just copy it over to the first 2305 * free location. 2306 */ 2307 vfs_copyopt(&imo->mo_list[i], &motbl[freeidx++]); 2308 } 2309 } 2310 dmo->mo_count = count; 2311 dmo->mo_list = motbl; 2312 } 2313 2314 /* 2315 * Functions to set and clear mount options in a mount options table. 2316 */ 2317 2318 /* 2319 * Clear a mount option, if it exists. 2320 * 2321 * The update_mnttab arg indicates whether mops is part of a vfs that is on 2322 * the vfs list. 2323 */ 2324 static void 2325 vfs_clearmntopt_nolock(mntopts_t *mops, const char *opt, int update_mnttab) 2326 { 2327 struct mntopt *mop; 2328 uint_t i, count; 2329 2330 ASSERT(!update_mnttab || RW_WRITE_HELD(&vfslist)); 2331 2332 count = mops->mo_count; 2333 for (i = 0; i < count; i++) { 2334 mop = &mops->mo_list[i]; 2335 2336 if (mop->mo_flags & MO_EMPTY) 2337 continue; 2338 if (strcmp(opt, mop->mo_name)) 2339 continue; 2340 mop->mo_flags &= ~MO_SET; 2341 if (mop->mo_arg != NULL) { 2342 kmem_free(mop->mo_arg, strlen(mop->mo_arg) + 1); 2343 } 2344 mop->mo_arg = NULL; 2345 if (update_mnttab) 2346 vfs_mnttab_modtimeupd(); 2347 break; 2348 } 2349 } 2350 2351 void 2352 vfs_clearmntopt(struct vfs *vfsp, const char *opt) 2353 { 2354 int gotlock = 0; 2355 2356 if (VFS_ON_LIST(vfsp)) { 2357 gotlock = 1; 2358 vfs_list_lock(); 2359 } 2360 vfs_clearmntopt_nolock(&vfsp->vfs_mntopts, opt, gotlock); 2361 if (gotlock) 2362 vfs_list_unlock(); 2363 } 2364 2365 2366 /* 2367 * Set a mount option on. If it's not found in the table, it's silently 2368 * ignored. If the option has MO_IGNORE set, it is still set unless the 2369 * VFS_NOFORCEOPT bit is set in the flags. Also, VFS_DISPLAY/VFS_NODISPLAY flag 2370 * bits can be used to toggle the MO_NODISPLAY bit for the option. 2371 * If the VFS_CREATEOPT flag bit is set then the first option slot with 2372 * MO_EMPTY set is created as the option passed in. 2373 * 2374 * The update_mnttab arg indicates whether mops is part of a vfs that is on 2375 * the vfs list. 2376 */ 2377 static void 2378 vfs_setmntopt_nolock(mntopts_t *mops, const char *opt, 2379 const char *arg, int flags, int update_mnttab) 2380 { 2381 mntopt_t *mop; 2382 uint_t i, count; 2383 char *sp; 2384 2385 ASSERT(!update_mnttab || RW_WRITE_HELD(&vfslist)); 2386 2387 if (flags & VFS_CREATEOPT) { 2388 if (vfs_hasopt(mops, opt) != NULL) { 2389 flags &= ~VFS_CREATEOPT; 2390 } 2391 } 2392 count = mops->mo_count; 2393 for (i = 0; i < count; i++) { 2394 mop = &mops->mo_list[i]; 2395 2396 if (mop->mo_flags & MO_EMPTY) { 2397 if ((flags & VFS_CREATEOPT) == 0) 2398 continue; 2399 sp = kmem_alloc(strlen(opt) + 1, KM_SLEEP); 2400 (void) strcpy(sp, opt); 2401 mop->mo_name = sp; 2402 if (arg != NULL) 2403 mop->mo_flags = MO_HASVALUE; 2404 else 2405 mop->mo_flags = 0; 2406 } else if (strcmp(opt, mop->mo_name)) { 2407 continue; 2408 } 2409 if ((mop->mo_flags & MO_IGNORE) && (flags & VFS_NOFORCEOPT)) 2410 break; 2411 if (arg != NULL && (mop->mo_flags & MO_HASVALUE) != 0) { 2412 sp = kmem_alloc(strlen(arg) + 1, KM_SLEEP); 2413 (void) strcpy(sp, arg); 2414 } else { 2415 sp = NULL; 2416 } 2417 if (mop->mo_arg != NULL) 2418 kmem_free(mop->mo_arg, strlen(mop->mo_arg) + 1); 2419 mop->mo_arg = sp; 2420 if (flags & VFS_DISPLAY) 2421 mop->mo_flags &= ~MO_NODISPLAY; 2422 if (flags & VFS_NODISPLAY) 2423 mop->mo_flags |= MO_NODISPLAY; 2424 mop->mo_flags |= MO_SET; 2425 if (mop->mo_cancel != NULL) { 2426 char **cp; 2427 2428 for (cp = mop->mo_cancel; *cp != NULL; cp++) 2429 vfs_clearmntopt_nolock(mops, *cp, 0); 2430 } 2431 if (update_mnttab) 2432 vfs_mnttab_modtimeupd(); 2433 break; 2434 } 2435 } 2436 2437 void 2438 vfs_setmntopt(struct vfs *vfsp, const char *opt, const char *arg, int flags) 2439 { 2440 int gotlock = 0; 2441 2442 if (VFS_ON_LIST(vfsp)) { 2443 gotlock = 1; 2444 vfs_list_lock(); 2445 } 2446 vfs_setmntopt_nolock(&vfsp->vfs_mntopts, opt, arg, flags, gotlock); 2447 if (gotlock) 2448 vfs_list_unlock(); 2449 } 2450 2451 2452 /* 2453 * Add a "tag" option to a mounted file system's options list. 2454 * 2455 * Note: caller is responsible for locking the vfs list, if needed, 2456 * to protect mops. 2457 */ 2458 static mntopt_t * 2459 vfs_addtag(mntopts_t *mops, const char *tag) 2460 { 2461 uint_t count; 2462 mntopt_t *mop, *motbl; 2463 2464 count = mops->mo_count + 1; 2465 motbl = kmem_zalloc(count * sizeof (mntopt_t), KM_SLEEP); 2466 if (mops->mo_count) { 2467 size_t len = (count - 1) * sizeof (mntopt_t); 2468 2469 bcopy(mops->mo_list, motbl, len); 2470 kmem_free(mops->mo_list, len); 2471 } 2472 mops->mo_count = count; 2473 mops->mo_list = motbl; 2474 mop = &motbl[count - 1]; 2475 mop->mo_flags = MO_TAG; 2476 mop->mo_name = kmem_alloc(strlen(tag) + 1, KM_SLEEP); 2477 (void) strcpy(mop->mo_name, tag); 2478 return (mop); 2479 } 2480 2481 /* 2482 * Allow users to set arbitrary "tags" in a vfs's mount options. 2483 * Broader use within the kernel is discouraged. 2484 */ 2485 int 2486 vfs_settag(uint_t major, uint_t minor, const char *mntpt, const char *tag, 2487 cred_t *cr) 2488 { 2489 vfs_t *vfsp; 2490 mntopts_t *mops; 2491 mntopt_t *mop; 2492 int found = 0; 2493 dev_t dev = makedevice(major, minor); 2494 int err = 0; 2495 char *buf = kmem_alloc(MAX_MNTOPT_STR, KM_SLEEP); 2496 2497 /* 2498 * Find the desired mounted file system 2499 */ 2500 vfs_list_lock(); 2501 vfsp = rootvfs; 2502 do { 2503 if (vfsp->vfs_dev == dev && 2504 strcmp(mntpt, refstr_value(vfsp->vfs_mntpt)) == 0) { 2505 found = 1; 2506 break; 2507 } 2508 vfsp = vfsp->vfs_next; 2509 } while (vfsp != rootvfs); 2510 2511 if (!found) { 2512 err = EINVAL; 2513 goto out; 2514 } 2515 err = secpolicy_fs_config(cr, vfsp); 2516 if (err != 0) 2517 goto out; 2518 2519 mops = &vfsp->vfs_mntopts; 2520 /* 2521 * Add tag if it doesn't already exist 2522 */ 2523 if ((mop = vfs_hasopt(mops, tag)) == NULL) { 2524 int len; 2525 2526 (void) vfs_buildoptionstr(mops, buf, MAX_MNTOPT_STR); 2527 len = strlen(buf); 2528 if (len + strlen(tag) + 2 > MAX_MNTOPT_STR) { 2529 err = ENAMETOOLONG; 2530 goto out; 2531 } 2532 mop = vfs_addtag(mops, tag); 2533 } 2534 if ((mop->mo_flags & MO_TAG) == 0) { 2535 err = EINVAL; 2536 goto out; 2537 } 2538 vfs_setmntopt_nolock(mops, tag, NULL, 0, 1); 2539 out: 2540 vfs_list_unlock(); 2541 kmem_free(buf, MAX_MNTOPT_STR); 2542 return (err); 2543 } 2544 2545 /* 2546 * Allow users to remove arbitrary "tags" in a vfs's mount options. 2547 * Broader use within the kernel is discouraged. 2548 */ 2549 int 2550 vfs_clrtag(uint_t major, uint_t minor, const char *mntpt, const char *tag, 2551 cred_t *cr) 2552 { 2553 vfs_t *vfsp; 2554 mntopt_t *mop; 2555 int found = 0; 2556 dev_t dev = makedevice(major, minor); 2557 int err = 0; 2558 2559 /* 2560 * Find the desired mounted file system 2561 */ 2562 vfs_list_lock(); 2563 vfsp = rootvfs; 2564 do { 2565 if (vfsp->vfs_dev == dev && 2566 strcmp(mntpt, refstr_value(vfsp->vfs_mntpt)) == 0) { 2567 found = 1; 2568 break; 2569 } 2570 vfsp = vfsp->vfs_next; 2571 } while (vfsp != rootvfs); 2572 2573 if (!found) { 2574 err = EINVAL; 2575 goto out; 2576 } 2577 err = secpolicy_fs_config(cr, vfsp); 2578 if (err != 0) 2579 goto out; 2580 2581 if ((mop = vfs_hasopt(&vfsp->vfs_mntopts, tag)) == NULL) { 2582 err = EINVAL; 2583 goto out; 2584 } 2585 if ((mop->mo_flags & MO_TAG) == 0) { 2586 err = EINVAL; 2587 goto out; 2588 } 2589 vfs_clearmntopt_nolock(&vfsp->vfs_mntopts, tag, 1); 2590 out: 2591 vfs_list_unlock(); 2592 return (err); 2593 } 2594 2595 /* 2596 * Function to parse an option string and fill in a mount options table. 2597 * Unknown options are silently ignored. The input option string is modified 2598 * by replacing separators with nulls. If the create flag is set, options 2599 * not found in the table are just added on the fly. The table must have 2600 * an option slot marked MO_EMPTY to add an option on the fly. 2601 * 2602 * This function is *not* for general use by filesystems. 2603 * 2604 * Note: caller is responsible for locking the vfs list, if needed, 2605 * to protect mops.. 2606 */ 2607 void 2608 vfs_parsemntopts(mntopts_t *mops, char *osp, int create) 2609 { 2610 char *s = osp, *p, *nextop, *valp, *cp, *ep; 2611 int setflg = VFS_NOFORCEOPT; 2612 2613 if (osp == NULL) 2614 return; 2615 while (*s != '\0') { 2616 p = strchr(s, ','); /* find next option */ 2617 if (p == NULL) { 2618 cp = NULL; 2619 p = s + strlen(s); 2620 } else { 2621 cp = p; /* save location of comma */ 2622 *p++ = '\0'; /* mark end and point to next option */ 2623 } 2624 nextop = p; 2625 p = strchr(s, '='); /* look for value */ 2626 if (p == NULL) { 2627 valp = NULL; /* no value supplied */ 2628 } else { 2629 ep = p; /* save location of equals */ 2630 *p++ = '\0'; /* end option and point to value */ 2631 valp = p; 2632 } 2633 /* 2634 * set option into options table 2635 */ 2636 if (create) 2637 setflg |= VFS_CREATEOPT; 2638 vfs_setmntopt_nolock(mops, s, valp, setflg, 0); 2639 if (cp != NULL) 2640 *cp = ','; /* restore the comma */ 2641 if (valp != NULL) 2642 *ep = '='; /* restore the equals */ 2643 s = nextop; 2644 } 2645 } 2646 2647 /* 2648 * Function to inquire if an option exists in a mount options table. 2649 * Returns a pointer to the option if it exists, else NULL. 2650 * 2651 * This function is *not* for general use by filesystems. 2652 * 2653 * Note: caller is responsible for locking the vfs list, if needed, 2654 * to protect mops. 2655 */ 2656 struct mntopt * 2657 vfs_hasopt(const mntopts_t *mops, const char *opt) 2658 { 2659 struct mntopt *mop; 2660 uint_t i, count; 2661 2662 count = mops->mo_count; 2663 for (i = 0; i < count; i++) { 2664 mop = &mops->mo_list[i]; 2665 2666 if (mop->mo_flags & MO_EMPTY) 2667 continue; 2668 if (strcmp(opt, mop->mo_name) == 0) 2669 return (mop); 2670 } 2671 return (NULL); 2672 } 2673 2674 /* 2675 * Function to inquire if an option is set in a mount options table. 2676 * Returns non-zero if set and fills in the arg pointer with a pointer to 2677 * the argument string or NULL if there is no argument string. 2678 */ 2679 static int 2680 vfs_optionisset_nolock(const mntopts_t *mops, const char *opt, char **argp) 2681 { 2682 struct mntopt *mop; 2683 uint_t i, count; 2684 2685 count = mops->mo_count; 2686 for (i = 0; i < count; i++) { 2687 mop = &mops->mo_list[i]; 2688 2689 if (mop->mo_flags & MO_EMPTY) 2690 continue; 2691 if (strcmp(opt, mop->mo_name)) 2692 continue; 2693 if ((mop->mo_flags & MO_SET) == 0) 2694 return (0); 2695 if (argp != NULL && (mop->mo_flags & MO_HASVALUE) != 0) 2696 *argp = mop->mo_arg; 2697 return (1); 2698 } 2699 return (0); 2700 } 2701 2702 2703 int 2704 vfs_optionisset(const struct vfs *vfsp, const char *opt, char **argp) 2705 { 2706 int ret; 2707 2708 vfs_list_read_lock(); 2709 ret = vfs_optionisset_nolock(&vfsp->vfs_mntopts, opt, argp); 2710 vfs_list_unlock(); 2711 return (ret); 2712 } 2713 2714 2715 /* 2716 * Construct a comma separated string of the options set in the given 2717 * mount table, return the string in the given buffer. Return non-zero if 2718 * the buffer would overflow. 2719 * 2720 * This function is *not* for general use by filesystems. 2721 * 2722 * Note: caller is responsible for locking the vfs list, if needed, 2723 * to protect mp. 2724 */ 2725 int 2726 vfs_buildoptionstr(const mntopts_t *mp, char *buf, int len) 2727 { 2728 char *cp; 2729 uint_t i; 2730 2731 buf[0] = '\0'; 2732 cp = buf; 2733 for (i = 0; i < mp->mo_count; i++) { 2734 struct mntopt *mop; 2735 2736 mop = &mp->mo_list[i]; 2737 if (mop->mo_flags & MO_SET) { 2738 int optlen, comma = 0; 2739 2740 if (buf[0] != '\0') 2741 comma = 1; 2742 optlen = strlen(mop->mo_name); 2743 if (strlen(buf) + comma + optlen + 1 > len) 2744 goto err; 2745 if (comma) 2746 *cp++ = ','; 2747 (void) strcpy(cp, mop->mo_name); 2748 cp += optlen; 2749 /* 2750 * Append option value if there is one 2751 */ 2752 if (mop->mo_arg != NULL) { 2753 int arglen; 2754 2755 arglen = strlen(mop->mo_arg); 2756 if (strlen(buf) + arglen + 2 > len) 2757 goto err; 2758 *cp++ = '='; 2759 (void) strcpy(cp, mop->mo_arg); 2760 cp += arglen; 2761 } 2762 } 2763 } 2764 return (0); 2765 err: 2766 return (EOVERFLOW); 2767 } 2768 2769 static void 2770 vfs_freecancelopt(char **moc) 2771 { 2772 if (moc != NULL) { 2773 int ccnt = 0; 2774 char **cp; 2775 2776 for (cp = moc; *cp != NULL; cp++) { 2777 kmem_free(*cp, strlen(*cp) + 1); 2778 ccnt++; 2779 } 2780 kmem_free(moc, (ccnt + 1) * sizeof (char *)); 2781 } 2782 } 2783 2784 static void 2785 vfs_freeopt(mntopt_t *mop) 2786 { 2787 if (mop->mo_name != NULL) 2788 kmem_free(mop->mo_name, strlen(mop->mo_name) + 1); 2789 2790 vfs_freecancelopt(mop->mo_cancel); 2791 2792 if (mop->mo_arg != NULL) 2793 kmem_free(mop->mo_arg, strlen(mop->mo_arg) + 1); 2794 } 2795 2796 /* 2797 * Free a mount options table 2798 * 2799 * This function is *not* for general use by filesystems. 2800 * 2801 * Note: caller is responsible for locking the vfs list, if needed, 2802 * to protect mp. 2803 */ 2804 void 2805 vfs_freeopttbl(mntopts_t *mp) 2806 { 2807 uint_t i, count; 2808 2809 count = mp->mo_count; 2810 for (i = 0; i < count; i++) { 2811 vfs_freeopt(&mp->mo_list[i]); 2812 } 2813 if (count) { 2814 kmem_free(mp->mo_list, sizeof (mntopt_t) * count); 2815 mp->mo_count = 0; 2816 mp->mo_list = NULL; 2817 } 2818 } 2819 2820 2821 /* ARGSUSED */ 2822 static int 2823 vfs_mntdummyread(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cred, 2824 caller_context_t *ct) 2825 { 2826 return (0); 2827 } 2828 2829 /* ARGSUSED */ 2830 static int 2831 vfs_mntdummywrite(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cred, 2832 caller_context_t *ct) 2833 { 2834 return (0); 2835 } 2836 2837 /* 2838 * The dummy vnode is currently used only by file events notification 2839 * module which is just interested in the timestamps. 2840 */ 2841 /* ARGSUSED */ 2842 static int 2843 vfs_mntdummygetattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, 2844 caller_context_t *ct) 2845 { 2846 bzero(vap, sizeof (vattr_t)); 2847 vap->va_type = VREG; 2848 vap->va_nlink = 1; 2849 vap->va_ctime = vfs_mnttab_ctime; 2850 /* 2851 * it is ok to just copy mtime as the time will be monotonically 2852 * increasing. 2853 */ 2854 vap->va_mtime = vfs_mnttab_mtime; 2855 vap->va_atime = vap->va_mtime; 2856 return (0); 2857 } 2858 2859 static void 2860 vfs_mnttabvp_setup(void) 2861 { 2862 vnode_t *tvp; 2863 vnodeops_t *vfs_mntdummyvnops; 2864 const fs_operation_def_t mnt_dummyvnodeops_template[] = { 2865 VOPNAME_READ, { .vop_read = vfs_mntdummyread }, 2866 VOPNAME_WRITE, { .vop_write = vfs_mntdummywrite }, 2867 VOPNAME_GETATTR, { .vop_getattr = vfs_mntdummygetattr }, 2868 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 2869 NULL, NULL 2870 }; 2871 2872 if (vn_make_ops("mnttab", mnt_dummyvnodeops_template, 2873 &vfs_mntdummyvnops) != 0) { 2874 cmn_err(CE_WARN, "vfs_mnttabvp_setup: vn_make_ops failed"); 2875 /* Shouldn't happen, but not bad enough to panic */ 2876 return; 2877 } 2878 2879 /* 2880 * A global dummy vnode is allocated to represent mntfs files. 2881 * The mntfs file (/etc/mnttab) can be monitored for file events 2882 * and receive an event when mnttab changes. Dummy VOP calls 2883 * will be made on this vnode. The file events notification module 2884 * intercepts this vnode and delivers relevant events. 2885 */ 2886 tvp = vn_alloc(KM_SLEEP); 2887 tvp->v_flag = VNOMOUNT|VNOMAP|VNOSWAP|VNOCACHE; 2888 vn_setops(tvp, vfs_mntdummyvnops); 2889 tvp->v_type = VREG; 2890 /* 2891 * The mnt dummy ops do not reference v_data. 2892 * No other module intercepting this vnode should either. 2893 * Just set it to point to itself. 2894 */ 2895 tvp->v_data = (caddr_t)tvp; 2896 tvp->v_vfsp = rootvfs; 2897 vfs_mntdummyvp = tvp; 2898 } 2899 2900 /* 2901 * performs fake read/write ops 2902 */ 2903 static void 2904 vfs_mnttab_rwop(int rw) 2905 { 2906 struct uio uio; 2907 struct iovec iov; 2908 char buf[1]; 2909 2910 if (vfs_mntdummyvp == NULL) 2911 return; 2912 2913 bzero(&uio, sizeof (uio)); 2914 bzero(&iov, sizeof (iov)); 2915 iov.iov_base = buf; 2916 iov.iov_len = 0; 2917 uio.uio_iov = &iov; 2918 uio.uio_iovcnt = 1; 2919 uio.uio_loffset = 0; 2920 uio.uio_segflg = UIO_SYSSPACE; 2921 uio.uio_resid = 0; 2922 if (rw) { 2923 (void) VOP_WRITE(vfs_mntdummyvp, &uio, 0, kcred, NULL); 2924 } else { 2925 (void) VOP_READ(vfs_mntdummyvp, &uio, 0, kcred, NULL); 2926 } 2927 } 2928 2929 /* 2930 * Generate a write operation. 2931 */ 2932 void 2933 vfs_mnttab_writeop(void) 2934 { 2935 vfs_mnttab_rwop(1); 2936 } 2937 2938 /* 2939 * Generate a read operation. 2940 */ 2941 void 2942 vfs_mnttab_readop(void) 2943 { 2944 vfs_mnttab_rwop(0); 2945 } 2946 2947 /* 2948 * Free any mnttab information recorded in the vfs struct. 2949 * The vfs must not be on the vfs list. 2950 */ 2951 static void 2952 vfs_freemnttab(struct vfs *vfsp) 2953 { 2954 ASSERT(!VFS_ON_LIST(vfsp)); 2955 2956 /* 2957 * Free device and mount point information 2958 */ 2959 if (vfsp->vfs_mntpt != NULL) { 2960 refstr_rele(vfsp->vfs_mntpt); 2961 vfsp->vfs_mntpt = NULL; 2962 } 2963 if (vfsp->vfs_resource != NULL) { 2964 refstr_rele(vfsp->vfs_resource); 2965 vfsp->vfs_resource = NULL; 2966 } 2967 /* 2968 * Now free mount options information 2969 */ 2970 vfs_freeopttbl(&vfsp->vfs_mntopts); 2971 } 2972 2973 /* 2974 * Return the last mnttab modification time 2975 */ 2976 void 2977 vfs_mnttab_modtime(timespec_t *ts) 2978 { 2979 ASSERT(RW_LOCK_HELD(&vfslist)); 2980 *ts = vfs_mnttab_mtime; 2981 } 2982 2983 /* 2984 * See if mnttab is changed 2985 */ 2986 void 2987 vfs_mnttab_poll(timespec_t *old, struct pollhead **phpp) 2988 { 2989 int changed; 2990 2991 *phpp = (struct pollhead *)NULL; 2992 2993 /* 2994 * Note: don't grab vfs list lock before accessing vfs_mnttab_mtime. 2995 * Can lead to deadlock against vfs_mnttab_modtimeupd(). It is safe 2996 * to not grab the vfs list lock because tv_sec is monotonically 2997 * increasing. 2998 */ 2999 3000 changed = (old->tv_nsec != vfs_mnttab_mtime.tv_nsec) || 3001 (old->tv_sec != vfs_mnttab_mtime.tv_sec); 3002 if (!changed) { 3003 *phpp = &vfs_pollhd; 3004 } 3005 } 3006 3007 /* Provide a unique and monotonically-increasing timestamp. */ 3008 void 3009 vfs_mono_time(timespec_t *ts) 3010 { 3011 static volatile hrtime_t hrt; /* The saved time. */ 3012 hrtime_t newhrt, oldhrt; /* For effecting the CAS. */ 3013 timespec_t newts; 3014 3015 /* 3016 * Try gethrestime() first, but be prepared to fabricate a sensible 3017 * answer at the first sign of any trouble. 3018 */ 3019 gethrestime(&newts); 3020 newhrt = ts2hrt(&newts); 3021 for (;;) { 3022 oldhrt = hrt; 3023 if (newhrt <= hrt) 3024 newhrt = hrt + 1; 3025 if (atomic_cas_64((uint64_t *)&hrt, oldhrt, newhrt) == oldhrt) 3026 break; 3027 } 3028 hrt2ts(newhrt, ts); 3029 } 3030 3031 /* 3032 * Update the mnttab modification time and wake up any waiters for 3033 * mnttab changes 3034 */ 3035 void 3036 vfs_mnttab_modtimeupd() 3037 { 3038 hrtime_t oldhrt, newhrt; 3039 3040 ASSERT(RW_WRITE_HELD(&vfslist)); 3041 oldhrt = ts2hrt(&vfs_mnttab_mtime); 3042 gethrestime(&vfs_mnttab_mtime); 3043 newhrt = ts2hrt(&vfs_mnttab_mtime); 3044 if (oldhrt == (hrtime_t)0) 3045 vfs_mnttab_ctime = vfs_mnttab_mtime; 3046 /* 3047 * Attempt to provide unique mtime (like uniqtime but not). 3048 */ 3049 if (newhrt == oldhrt) { 3050 newhrt++; 3051 hrt2ts(newhrt, &vfs_mnttab_mtime); 3052 } 3053 pollwakeup(&vfs_pollhd, (short)POLLRDBAND); 3054 vfs_mnttab_writeop(); 3055 } 3056 3057 int 3058 dounmount(struct vfs *vfsp, int flag, cred_t *cr) 3059 { 3060 vnode_t *coveredvp; 3061 int error; 3062 extern void teardown_vopstats(vfs_t *); 3063 3064 /* 3065 * Get covered vnode. This will be NULL if the vfs is not linked 3066 * into the file system name space (i.e., domount() with MNT_NOSPICE). 3067 */ 3068 coveredvp = vfsp->vfs_vnodecovered; 3069 ASSERT(coveredvp == NULL || vn_vfswlock_held(coveredvp)); 3070 3071 /* 3072 * Purge all dnlc entries for this vfs. 3073 */ 3074 (void) dnlc_purge_vfsp(vfsp, 0); 3075 3076 /* For forcible umount, skip VFS_SYNC() since it may hang */ 3077 if ((flag & MS_FORCE) == 0) 3078 (void) VFS_SYNC(vfsp, 0, cr); 3079 3080 /* 3081 * Lock the vfs to maintain fs status quo during unmount. This 3082 * has to be done after the sync because ufs_update tries to acquire 3083 * the vfs_reflock. 3084 */ 3085 vfs_lock_wait(vfsp); 3086 3087 if (error = VFS_UNMOUNT(vfsp, flag, cr)) { 3088 vfs_unlock(vfsp); 3089 if (coveredvp != NULL) 3090 vn_vfsunlock(coveredvp); 3091 } else if (coveredvp != NULL) { 3092 teardown_vopstats(vfsp); 3093 /* 3094 * vfs_remove() will do a VN_RELE(vfsp->vfs_vnodecovered) 3095 * when it frees vfsp so we do a VN_HOLD() so we can 3096 * continue to use coveredvp afterwards. 3097 */ 3098 VN_HOLD(coveredvp); 3099 vfs_remove(vfsp); 3100 vn_vfsunlock(coveredvp); 3101 VN_RELE(coveredvp); 3102 } else { 3103 teardown_vopstats(vfsp); 3104 /* 3105 * Release the reference to vfs that is not linked 3106 * into the name space. 3107 */ 3108 vfs_unlock(vfsp); 3109 VFS_RELE(vfsp); 3110 } 3111 return (error); 3112 } 3113 3114 3115 /* 3116 * Vfs_unmountall() is called by uadmin() to unmount all 3117 * mounted file systems (except the root file system) during shutdown. 3118 * It follows the existing locking protocol when traversing the vfs list 3119 * to sync and unmount vfses. Even though there should be no 3120 * other thread running while the system is shutting down, it is prudent 3121 * to still follow the locking protocol. 3122 */ 3123 void 3124 vfs_unmountall(void) 3125 { 3126 struct vfs *vfsp; 3127 struct vfs *prev_vfsp = NULL; 3128 int error; 3129 3130 /* 3131 * Toss all dnlc entries now so that the per-vfs sync 3132 * and unmount operations don't have to slog through 3133 * a bunch of uninteresting vnodes over and over again. 3134 */ 3135 dnlc_purge(); 3136 3137 vfs_list_lock(); 3138 for (vfsp = rootvfs->vfs_prev; vfsp != rootvfs; vfsp = prev_vfsp) { 3139 prev_vfsp = vfsp->vfs_prev; 3140 3141 if (vfs_lock(vfsp) != 0) 3142 continue; 3143 error = vn_vfswlock(vfsp->vfs_vnodecovered); 3144 vfs_unlock(vfsp); 3145 if (error) 3146 continue; 3147 3148 vfs_list_unlock(); 3149 3150 (void) VFS_SYNC(vfsp, SYNC_CLOSE, CRED()); 3151 (void) dounmount(vfsp, 0, CRED()); 3152 3153 /* 3154 * Since we dropped the vfslist lock above we must 3155 * verify that next_vfsp still exists, else start over. 3156 */ 3157 vfs_list_lock(); 3158 for (vfsp = rootvfs->vfs_prev; 3159 vfsp != rootvfs; vfsp = vfsp->vfs_prev) 3160 if (vfsp == prev_vfsp) 3161 break; 3162 if (vfsp == rootvfs && prev_vfsp != rootvfs) 3163 prev_vfsp = rootvfs->vfs_prev; 3164 } 3165 vfs_list_unlock(); 3166 } 3167 3168 /* 3169 * Called to add an entry to the end of the vfs mount in progress list 3170 */ 3171 void 3172 vfs_addmip(dev_t dev, struct vfs *vfsp) 3173 { 3174 struct ipmnt *mipp; 3175 3176 mipp = (struct ipmnt *)kmem_alloc(sizeof (struct ipmnt), KM_SLEEP); 3177 mipp->mip_next = NULL; 3178 mipp->mip_dev = dev; 3179 mipp->mip_vfsp = vfsp; 3180 mutex_enter(&vfs_miplist_mutex); 3181 if (vfs_miplist_end != NULL) 3182 vfs_miplist_end->mip_next = mipp; 3183 else 3184 vfs_miplist = mipp; 3185 vfs_miplist_end = mipp; 3186 mutex_exit(&vfs_miplist_mutex); 3187 } 3188 3189 /* 3190 * Called to remove an entry from the mount in progress list 3191 * Either because the mount completed or it failed. 3192 */ 3193 void 3194 vfs_delmip(struct vfs *vfsp) 3195 { 3196 struct ipmnt *mipp, *mipprev; 3197 3198 mutex_enter(&vfs_miplist_mutex); 3199 mipprev = NULL; 3200 for (mipp = vfs_miplist; 3201 mipp && mipp->mip_vfsp != vfsp; mipp = mipp->mip_next) { 3202 mipprev = mipp; 3203 } 3204 if (mipp == NULL) 3205 return; /* shouldn't happen */ 3206 if (mipp == vfs_miplist_end) 3207 vfs_miplist_end = mipprev; 3208 if (mipprev == NULL) 3209 vfs_miplist = mipp->mip_next; 3210 else 3211 mipprev->mip_next = mipp->mip_next; 3212 mutex_exit(&vfs_miplist_mutex); 3213 kmem_free(mipp, sizeof (struct ipmnt)); 3214 } 3215 3216 /* 3217 * vfs_add is called by a specific filesystem's mount routine to add 3218 * the new vfs into the vfs list/hash and to cover the mounted-on vnode. 3219 * The vfs should already have been locked by the caller. 3220 * 3221 * coveredvp is NULL if this is the root. 3222 */ 3223 void 3224 vfs_add(vnode_t *coveredvp, struct vfs *vfsp, int mflag) 3225 { 3226 int newflag; 3227 3228 ASSERT(vfs_lock_held(vfsp)); 3229 VFS_HOLD(vfsp); 3230 newflag = vfsp->vfs_flag; 3231 if (mflag & MS_RDONLY) 3232 newflag |= VFS_RDONLY; 3233 else 3234 newflag &= ~VFS_RDONLY; 3235 if (mflag & MS_NOSUID) 3236 newflag |= (VFS_NOSETUID|VFS_NODEVICES); 3237 else 3238 newflag &= ~(VFS_NOSETUID|VFS_NODEVICES); 3239 if (mflag & MS_NOMNTTAB) 3240 newflag |= VFS_NOMNTTAB; 3241 else 3242 newflag &= ~VFS_NOMNTTAB; 3243 3244 if (coveredvp != NULL) { 3245 ASSERT(vn_vfswlock_held(coveredvp)); 3246 coveredvp->v_vfsmountedhere = vfsp; 3247 VN_HOLD(coveredvp); 3248 } 3249 vfsp->vfs_vnodecovered = coveredvp; 3250 vfsp->vfs_flag = newflag; 3251 3252 vfs_list_add(vfsp); 3253 } 3254 3255 /* 3256 * Remove a vfs from the vfs list, null out the pointer from the 3257 * covered vnode to the vfs (v_vfsmountedhere), and null out the pointer 3258 * from the vfs to the covered vnode (vfs_vnodecovered). Release the 3259 * reference to the vfs and to the covered vnode. 3260 * 3261 * Called from dounmount after it's confirmed with the file system 3262 * that the unmount is legal. 3263 */ 3264 void 3265 vfs_remove(struct vfs *vfsp) 3266 { 3267 vnode_t *vp; 3268 3269 ASSERT(vfs_lock_held(vfsp)); 3270 3271 /* 3272 * Can't unmount root. Should never happen because fs will 3273 * be busy. 3274 */ 3275 if (vfsp == rootvfs) 3276 panic("vfs_remove: unmounting root"); 3277 3278 vfs_list_remove(vfsp); 3279 3280 /* 3281 * Unhook from the file system name space. 3282 */ 3283 vp = vfsp->vfs_vnodecovered; 3284 ASSERT(vn_vfswlock_held(vp)); 3285 vp->v_vfsmountedhere = NULL; 3286 vfsp->vfs_vnodecovered = NULL; 3287 VN_RELE(vp); 3288 3289 /* 3290 * Release lock and wakeup anybody waiting. 3291 */ 3292 vfs_unlock(vfsp); 3293 VFS_RELE(vfsp); 3294 } 3295 3296 /* 3297 * Lock a filesystem to prevent access to it while mounting, 3298 * unmounting and syncing. Return EBUSY immediately if lock 3299 * can't be acquired. 3300 */ 3301 int 3302 vfs_lock(vfs_t *vfsp) 3303 { 3304 vn_vfslocks_entry_t *vpvfsentry; 3305 3306 vpvfsentry = vn_vfslocks_getlock(vfsp); 3307 if (rwst_tryenter(&vpvfsentry->ve_lock, RW_WRITER)) 3308 return (0); 3309 3310 vn_vfslocks_rele(vpvfsentry); 3311 return (EBUSY); 3312 } 3313 3314 int 3315 vfs_rlock(vfs_t *vfsp) 3316 { 3317 vn_vfslocks_entry_t *vpvfsentry; 3318 3319 vpvfsentry = vn_vfslocks_getlock(vfsp); 3320 3321 if (rwst_tryenter(&vpvfsentry->ve_lock, RW_READER)) 3322 return (0); 3323 3324 vn_vfslocks_rele(vpvfsentry); 3325 return (EBUSY); 3326 } 3327 3328 void 3329 vfs_lock_wait(vfs_t *vfsp) 3330 { 3331 vn_vfslocks_entry_t *vpvfsentry; 3332 3333 vpvfsentry = vn_vfslocks_getlock(vfsp); 3334 rwst_enter(&vpvfsentry->ve_lock, RW_WRITER); 3335 } 3336 3337 void 3338 vfs_rlock_wait(vfs_t *vfsp) 3339 { 3340 vn_vfslocks_entry_t *vpvfsentry; 3341 3342 vpvfsentry = vn_vfslocks_getlock(vfsp); 3343 rwst_enter(&vpvfsentry->ve_lock, RW_READER); 3344 } 3345 3346 /* 3347 * Unlock a locked filesystem. 3348 */ 3349 void 3350 vfs_unlock(vfs_t *vfsp) 3351 { 3352 vn_vfslocks_entry_t *vpvfsentry; 3353 3354 /* 3355 * vfs_unlock will mimic sema_v behaviour to fix 4748018. 3356 * And these changes should remain for the patch changes as it is. 3357 */ 3358 if (panicstr) 3359 return; 3360 3361 /* 3362 * ve_refcount needs to be dropped twice here. 3363 * 1. To release refernce after a call to vfs_locks_getlock() 3364 * 2. To release the reference from the locking routines like 3365 * vfs_rlock_wait/vfs_wlock_wait/vfs_wlock etc,. 3366 */ 3367 3368 vpvfsentry = vn_vfslocks_getlock(vfsp); 3369 vn_vfslocks_rele(vpvfsentry); 3370 3371 rwst_exit(&vpvfsentry->ve_lock); 3372 vn_vfslocks_rele(vpvfsentry); 3373 } 3374 3375 /* 3376 * Utility routine that allows a filesystem to construct its 3377 * fsid in "the usual way" - by munging some underlying dev_t and 3378 * the filesystem type number into the 64-bit fsid. Note that 3379 * this implicitly relies on dev_t persistence to make filesystem 3380 * id's persistent. 3381 * 3382 * There's nothing to prevent an individual fs from constructing its 3383 * fsid in a different way, and indeed they should. 3384 * 3385 * Since we want fsids to be 32-bit quantities (so that they can be 3386 * exported identically by either 32-bit or 64-bit APIs, as well as 3387 * the fact that fsid's are "known" to NFS), we compress the device 3388 * number given down to 32-bits, and panic if that isn't possible. 3389 */ 3390 void 3391 vfs_make_fsid(fsid_t *fsi, dev_t dev, int val) 3392 { 3393 if (!cmpldev((dev32_t *)&fsi->val[0], dev)) 3394 panic("device number too big for fsid!"); 3395 fsi->val[1] = val; 3396 } 3397 3398 int 3399 vfs_lock_held(vfs_t *vfsp) 3400 { 3401 int held; 3402 vn_vfslocks_entry_t *vpvfsentry; 3403 3404 /* 3405 * vfs_lock_held will mimic sema_held behaviour 3406 * if panicstr is set. And these changes should remain 3407 * for the patch changes as it is. 3408 */ 3409 if (panicstr) 3410 return (1); 3411 3412 vpvfsentry = vn_vfslocks_getlock(vfsp); 3413 held = rwst_lock_held(&vpvfsentry->ve_lock, RW_WRITER); 3414 3415 vn_vfslocks_rele(vpvfsentry); 3416 return (held); 3417 } 3418 3419 struct _kthread * 3420 vfs_lock_owner(vfs_t *vfsp) 3421 { 3422 struct _kthread *owner; 3423 vn_vfslocks_entry_t *vpvfsentry; 3424 3425 /* 3426 * vfs_wlock_held will mimic sema_held behaviour 3427 * if panicstr is set. And these changes should remain 3428 * for the patch changes as it is. 3429 */ 3430 if (panicstr) 3431 return (NULL); 3432 3433 vpvfsentry = vn_vfslocks_getlock(vfsp); 3434 owner = rwst_owner(&vpvfsentry->ve_lock); 3435 3436 vn_vfslocks_rele(vpvfsentry); 3437 return (owner); 3438 } 3439 3440 /* 3441 * vfs list locking. 3442 * 3443 * Rather than manipulate the vfslist lock directly, we abstract into lock 3444 * and unlock routines to allow the locking implementation to be changed for 3445 * clustering. 3446 * 3447 * Whenever the vfs list is modified through its hash links, the overall list 3448 * lock must be obtained before locking the relevant hash bucket. But to see 3449 * whether a given vfs is on the list, it suffices to obtain the lock for the 3450 * hash bucket without getting the overall list lock. (See getvfs() below.) 3451 */ 3452 3453 void 3454 vfs_list_lock() 3455 { 3456 rw_enter(&vfslist, RW_WRITER); 3457 } 3458 3459 void 3460 vfs_list_read_lock() 3461 { 3462 rw_enter(&vfslist, RW_READER); 3463 } 3464 3465 void 3466 vfs_list_unlock() 3467 { 3468 rw_exit(&vfslist); 3469 } 3470 3471 /* 3472 * Low level worker routines for adding entries to and removing entries from 3473 * the vfs list. 3474 */ 3475 3476 static void 3477 vfs_hash_add(struct vfs *vfsp, int insert_at_head) 3478 { 3479 int vhno; 3480 struct vfs **hp; 3481 dev_t dev; 3482 3483 ASSERT(RW_WRITE_HELD(&vfslist)); 3484 3485 dev = expldev(vfsp->vfs_fsid.val[0]); 3486 vhno = VFSHASH(getmajor(dev), getminor(dev)); 3487 3488 mutex_enter(&rvfs_list[vhno].rvfs_lock); 3489 3490 /* 3491 * Link into the hash table, inserting it at the end, so that LOFS 3492 * with the same fsid as UFS (or other) file systems will not hide the 3493 * UFS. 3494 */ 3495 if (insert_at_head) { 3496 vfsp->vfs_hash = rvfs_list[vhno].rvfs_head; 3497 rvfs_list[vhno].rvfs_head = vfsp; 3498 } else { 3499 for (hp = &rvfs_list[vhno].rvfs_head; *hp != NULL; 3500 hp = &(*hp)->vfs_hash) 3501 continue; 3502 /* 3503 * hp now contains the address of the pointer to update 3504 * to effect the insertion. 3505 */ 3506 vfsp->vfs_hash = NULL; 3507 *hp = vfsp; 3508 } 3509 3510 rvfs_list[vhno].rvfs_len++; 3511 mutex_exit(&rvfs_list[vhno].rvfs_lock); 3512 } 3513 3514 3515 static void 3516 vfs_hash_remove(struct vfs *vfsp) 3517 { 3518 int vhno; 3519 struct vfs *tvfsp; 3520 dev_t dev; 3521 3522 ASSERT(RW_WRITE_HELD(&vfslist)); 3523 3524 dev = expldev(vfsp->vfs_fsid.val[0]); 3525 vhno = VFSHASH(getmajor(dev), getminor(dev)); 3526 3527 mutex_enter(&rvfs_list[vhno].rvfs_lock); 3528 3529 /* 3530 * Remove from hash. 3531 */ 3532 if (rvfs_list[vhno].rvfs_head == vfsp) { 3533 rvfs_list[vhno].rvfs_head = vfsp->vfs_hash; 3534 rvfs_list[vhno].rvfs_len--; 3535 goto foundit; 3536 } 3537 for (tvfsp = rvfs_list[vhno].rvfs_head; tvfsp != NULL; 3538 tvfsp = tvfsp->vfs_hash) { 3539 if (tvfsp->vfs_hash == vfsp) { 3540 tvfsp->vfs_hash = vfsp->vfs_hash; 3541 rvfs_list[vhno].rvfs_len--; 3542 goto foundit; 3543 } 3544 } 3545 cmn_err(CE_WARN, "vfs_list_remove: vfs not found in hash"); 3546 3547 foundit: 3548 3549 mutex_exit(&rvfs_list[vhno].rvfs_lock); 3550 } 3551 3552 3553 void 3554 vfs_list_add(struct vfs *vfsp) 3555 { 3556 zone_t *zone; 3557 3558 /* 3559 * Typically, the vfs_t will have been created on behalf of the file 3560 * system in vfs_init, where it will have been provided with a 3561 * vfs_impl_t. This, however, might be lacking if the vfs_t was created 3562 * by an unbundled file system. We therefore check for such an example 3563 * before stamping the vfs_t with its creation time for the benefit of 3564 * mntfs. 3565 */ 3566 if (vfsp->vfs_implp == NULL) 3567 vfsimpl_setup(vfsp); 3568 vfs_mono_time(&vfsp->vfs_hrctime); 3569 3570 /* 3571 * The zone that owns the mount is the one that performed the mount. 3572 * Note that this isn't necessarily the same as the zone mounted into. 3573 * The corresponding zone_rele_ref() will be done when the vfs_t 3574 * is being free'd. 3575 */ 3576 vfsp->vfs_zone = curproc->p_zone; 3577 zone_init_ref(&vfsp->vfs_implp->vi_zone_ref); 3578 zone_hold_ref(vfsp->vfs_zone, &vfsp->vfs_implp->vi_zone_ref, 3579 ZONE_REF_VFS); 3580 3581 /* 3582 * Find the zone mounted into, and put this mount on its vfs list. 3583 */ 3584 zone = zone_find_by_path(refstr_value(vfsp->vfs_mntpt)); 3585 ASSERT(zone != NULL); 3586 /* 3587 * Special casing for the root vfs. This structure is allocated 3588 * statically and hooked onto rootvfs at link time. During the 3589 * vfs_mountroot call at system startup time, the root file system's 3590 * VFS_MOUNTROOT routine will call vfs_add with this root vfs struct 3591 * as argument. The code below must detect and handle this special 3592 * case. The only apparent justification for this special casing is 3593 * to ensure that the root file system appears at the head of the 3594 * list. 3595 * 3596 * XXX: I'm assuming that it's ok to do normal list locking when 3597 * adding the entry for the root file system (this used to be 3598 * done with no locks held). 3599 */ 3600 vfs_list_lock(); 3601 /* 3602 * Link into the vfs list proper. 3603 */ 3604 if (vfsp == &root) { 3605 /* 3606 * Assert: This vfs is already on the list as its first entry. 3607 * Thus, there's nothing to do. 3608 */ 3609 ASSERT(rootvfs == vfsp); 3610 /* 3611 * Add it to the head of the global zone's vfslist. 3612 */ 3613 ASSERT(zone == global_zone); 3614 ASSERT(zone->zone_vfslist == NULL); 3615 zone->zone_vfslist = vfsp; 3616 } else { 3617 /* 3618 * Link to end of list using vfs_prev (as rootvfs is now a 3619 * doubly linked circular list) so list is in mount order for 3620 * mnttab use. 3621 */ 3622 rootvfs->vfs_prev->vfs_next = vfsp; 3623 vfsp->vfs_prev = rootvfs->vfs_prev; 3624 rootvfs->vfs_prev = vfsp; 3625 vfsp->vfs_next = rootvfs; 3626 3627 /* 3628 * Do it again for the zone-private list (which may be NULL). 3629 */ 3630 if (zone->zone_vfslist == NULL) { 3631 ASSERT(zone != global_zone); 3632 zone->zone_vfslist = vfsp; 3633 } else { 3634 zone->zone_vfslist->vfs_zone_prev->vfs_zone_next = vfsp; 3635 vfsp->vfs_zone_prev = zone->zone_vfslist->vfs_zone_prev; 3636 zone->zone_vfslist->vfs_zone_prev = vfsp; 3637 vfsp->vfs_zone_next = zone->zone_vfslist; 3638 } 3639 } 3640 3641 /* 3642 * Link into the hash table, inserting it at the end, so that LOFS 3643 * with the same fsid as UFS (or other) file systems will not hide 3644 * the UFS. 3645 */ 3646 vfs_hash_add(vfsp, 0); 3647 3648 /* 3649 * Link into tree indexed by mntpoint, for vfs_mntpoint2vfsp 3650 * mntix discerns entries with the same key 3651 */ 3652 vfsp->vfs_mntix = ++vfs_curr_mntix; 3653 avl_add(&vfs_by_dev, vfsp); 3654 3655 /* 3656 * Link into tree indexed by dev, for vfs_devismounted 3657 */ 3658 avl_add(&vfs_by_mntpnt, vfsp); 3659 3660 /* 3661 * update the mnttab modification time 3662 */ 3663 vfs_mnttab_modtimeupd(); 3664 vfs_list_unlock(); 3665 zone_rele(zone); 3666 } 3667 3668 void 3669 vfs_list_remove(struct vfs *vfsp) 3670 { 3671 zone_t *zone; 3672 3673 zone = zone_find_by_path(refstr_value(vfsp->vfs_mntpt)); 3674 ASSERT(zone != NULL); 3675 /* 3676 * Callers are responsible for preventing attempts to unmount the 3677 * root. 3678 */ 3679 ASSERT(vfsp != rootvfs); 3680 3681 vfs_list_lock(); 3682 3683 /* 3684 * Remove from avl trees 3685 */ 3686 avl_remove(&vfs_by_mntpnt, vfsp); 3687 avl_remove(&vfs_by_dev, vfsp); 3688 3689 /* 3690 * Remove from hash. 3691 */ 3692 vfs_hash_remove(vfsp); 3693 3694 /* 3695 * Remove from vfs list. 3696 */ 3697 vfsp->vfs_prev->vfs_next = vfsp->vfs_next; 3698 vfsp->vfs_next->vfs_prev = vfsp->vfs_prev; 3699 vfsp->vfs_next = vfsp->vfs_prev = NULL; 3700 3701 /* 3702 * Remove from zone-specific vfs list. 3703 */ 3704 if (zone->zone_vfslist == vfsp) 3705 zone->zone_vfslist = vfsp->vfs_zone_next; 3706 3707 if (vfsp->vfs_zone_next == vfsp) { 3708 ASSERT(vfsp->vfs_zone_prev == vfsp); 3709 ASSERT(zone->zone_vfslist == vfsp); 3710 zone->zone_vfslist = NULL; 3711 } 3712 3713 vfsp->vfs_zone_prev->vfs_zone_next = vfsp->vfs_zone_next; 3714 vfsp->vfs_zone_next->vfs_zone_prev = vfsp->vfs_zone_prev; 3715 vfsp->vfs_zone_next = vfsp->vfs_zone_prev = NULL; 3716 3717 /* 3718 * update the mnttab modification time 3719 */ 3720 vfs_mnttab_modtimeupd(); 3721 vfs_list_unlock(); 3722 zone_rele(zone); 3723 } 3724 3725 struct vfs * 3726 getvfs(fsid_t *fsid) 3727 { 3728 struct vfs *vfsp; 3729 int val0 = fsid->val[0]; 3730 int val1 = fsid->val[1]; 3731 dev_t dev = expldev(val0); 3732 int vhno = VFSHASH(getmajor(dev), getminor(dev)); 3733 kmutex_t *hmp = &rvfs_list[vhno].rvfs_lock; 3734 3735 mutex_enter(hmp); 3736 for (vfsp = rvfs_list[vhno].rvfs_head; vfsp; vfsp = vfsp->vfs_hash) { 3737 if (vfsp->vfs_fsid.val[0] == val0 && 3738 vfsp->vfs_fsid.val[1] == val1) { 3739 VFS_HOLD(vfsp); 3740 mutex_exit(hmp); 3741 return (vfsp); 3742 } 3743 } 3744 mutex_exit(hmp); 3745 return (NULL); 3746 } 3747 3748 /* 3749 * Search the vfs mount in progress list for a specified device/vfs entry. 3750 * Returns 0 if the first entry in the list that the device matches has the 3751 * given vfs pointer as well. If the device matches but a different vfs 3752 * pointer is encountered in the list before the given vfs pointer then 3753 * a 1 is returned. 3754 */ 3755 3756 int 3757 vfs_devmounting(dev_t dev, struct vfs *vfsp) 3758 { 3759 int retval = 0; 3760 struct ipmnt *mipp; 3761 3762 mutex_enter(&vfs_miplist_mutex); 3763 for (mipp = vfs_miplist; mipp != NULL; mipp = mipp->mip_next) { 3764 if (mipp->mip_dev == dev) { 3765 if (mipp->mip_vfsp != vfsp) 3766 retval = 1; 3767 break; 3768 } 3769 } 3770 mutex_exit(&vfs_miplist_mutex); 3771 return (retval); 3772 } 3773 3774 /* 3775 * Search the vfs list for a specified device. Returns 1, if entry is found 3776 * or 0 if no suitable entry is found. 3777 */ 3778 3779 int 3780 vfs_devismounted(dev_t dev) 3781 { 3782 struct vfs *vfsp; 3783 int found = 0; 3784 struct vfs search; 3785 avl_index_t index; 3786 3787 search.vfs_dev = dev; 3788 search.vfs_mntix = 0; 3789 3790 vfs_list_read_lock(); 3791 3792 /* 3793 * there might be several entries with the same dev in the tree, 3794 * only discerned by mntix. To find the first, we start with a mntix 3795 * of 0. The search will fail. The following avl_nearest will give 3796 * us the actual first entry. 3797 */ 3798 VERIFY(avl_find(&vfs_by_dev, &search, &index) == NULL); 3799 vfsp = avl_nearest(&vfs_by_dev, index, AVL_AFTER); 3800 3801 if (vfsp != NULL && vfsp->vfs_dev == dev) 3802 found = 1; 3803 3804 vfs_list_unlock(); 3805 return (found); 3806 } 3807 3808 /* 3809 * Search the vfs list for a specified device. Returns a pointer to it 3810 * or NULL if no suitable entry is found. The caller of this routine 3811 * is responsible for releasing the returned vfs pointer. 3812 */ 3813 struct vfs * 3814 vfs_dev2vfsp(dev_t dev) 3815 { 3816 struct vfs *vfsp; 3817 int found; 3818 struct vfs search; 3819 avl_index_t index; 3820 3821 search.vfs_dev = dev; 3822 search.vfs_mntix = 0; 3823 3824 vfs_list_read_lock(); 3825 3826 /* 3827 * there might be several entries with the same dev in the tree, 3828 * only discerned by mntix. To find the first, we start with a mntix 3829 * of 0. The search will fail. The following avl_nearest will give 3830 * us the actual first entry. 3831 */ 3832 VERIFY(avl_find(&vfs_by_dev, &search, &index) == NULL); 3833 vfsp = avl_nearest(&vfs_by_dev, index, AVL_AFTER); 3834 3835 found = 0; 3836 while (vfsp != NULL && vfsp->vfs_dev == dev) { 3837 /* 3838 * The following could be made more efficient by making 3839 * the entire loop use vfs_zone_next if the call is from 3840 * a zone. The only callers, however, ustat(2) and 3841 * umount2(2), don't seem to justify the added 3842 * complexity at present. 3843 */ 3844 if (ZONE_PATH_VISIBLE(refstr_value(vfsp->vfs_mntpt), 3845 curproc->p_zone)) { 3846 VFS_HOLD(vfsp); 3847 found = 1; 3848 break; 3849 } 3850 vfsp = AVL_NEXT(&vfs_by_dev, vfsp); 3851 } 3852 vfs_list_unlock(); 3853 return (found ? vfsp : NULL); 3854 } 3855 3856 /* 3857 * Search the vfs list for a specified mntpoint. Returns a pointer to it 3858 * or NULL if no suitable entry is found. The caller of this routine 3859 * is responsible for releasing the returned vfs pointer. 3860 * 3861 * Note that if multiple mntpoints match, the last one matching is 3862 * returned in an attempt to return the "top" mount when overlay 3863 * mounts are covering the same mount point. This is accomplished by starting 3864 * at the end of the list and working our way backwards, stopping at the first 3865 * matching mount. 3866 */ 3867 struct vfs * 3868 vfs_mntpoint2vfsp(const char *mp) 3869 { 3870 struct vfs *vfsp; 3871 struct vfs *retvfsp = NULL; 3872 zone_t *zone = curproc->p_zone; 3873 struct vfs *list; 3874 3875 vfs_list_read_lock(); 3876 if (getzoneid() == GLOBAL_ZONEID) { 3877 /* 3878 * The global zone may see filesystems in any zone. 3879 */ 3880 struct vfs search; 3881 search.vfs_mntpt = refstr_alloc(mp); 3882 search.vfs_mntix = UINT64_MAX; 3883 avl_index_t index; 3884 3885 /* 3886 * there might be several entries with the same mntpnt in the 3887 * tree, only discerned by mntix. To find the last, we start 3888 * with a mntix of UINT64_MAX. The search will fail. The 3889 * following avl_nearest will give us the actual last entry 3890 * matching the mntpnt. 3891 */ 3892 VERIFY(avl_find(&vfs_by_mntpnt, &search, &index) == 0); 3893 vfsp = avl_nearest(&vfs_by_mntpnt, index, AVL_BEFORE); 3894 3895 refstr_rele(search.vfs_mntpt); 3896 3897 if (vfsp != NULL && 3898 strcmp(refstr_value(vfsp->vfs_mntpt), mp) == 0) 3899 retvfsp = vfsp; 3900 } else if ((list = zone->zone_vfslist) != NULL) { 3901 const char *mntpt; 3902 3903 vfsp = list->vfs_zone_prev; 3904 do { 3905 mntpt = refstr_value(vfsp->vfs_mntpt); 3906 mntpt = ZONE_PATH_TRANSLATE(mntpt, zone); 3907 if (strcmp(mntpt, mp) == 0) { 3908 retvfsp = vfsp; 3909 break; 3910 } 3911 vfsp = vfsp->vfs_zone_prev; 3912 } while (vfsp != list->vfs_zone_prev); 3913 } 3914 if (retvfsp) 3915 VFS_HOLD(retvfsp); 3916 vfs_list_unlock(); 3917 return (retvfsp); 3918 } 3919 3920 /* 3921 * Search the vfs list for a specified vfsops. 3922 * if vfs entry is found then return 1, else 0. 3923 */ 3924 int 3925 vfs_opsinuse(vfsops_t *ops) 3926 { 3927 struct vfs *vfsp; 3928 int found; 3929 3930 vfs_list_read_lock(); 3931 vfsp = rootvfs; 3932 found = 0; 3933 do { 3934 if (vfs_getops(vfsp) == ops) { 3935 found = 1; 3936 break; 3937 } 3938 vfsp = vfsp->vfs_next; 3939 } while (vfsp != rootvfs); 3940 vfs_list_unlock(); 3941 return (found); 3942 } 3943 3944 /* 3945 * Allocate an entry in vfssw for a file system type 3946 */ 3947 struct vfssw * 3948 allocate_vfssw(const char *type) 3949 { 3950 struct vfssw *vswp; 3951 3952 if (type[0] == '\0' || strlen(type) + 1 > _ST_FSTYPSZ) { 3953 /* 3954 * The vfssw table uses the empty string to identify an 3955 * available entry; we cannot add any type which has 3956 * a leading NUL. The string length is limited to 3957 * the size of the st_fstype array in struct stat. 3958 */ 3959 return (NULL); 3960 } 3961 3962 ASSERT(VFSSW_WRITE_LOCKED()); 3963 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) 3964 if (!ALLOCATED_VFSSW(vswp)) { 3965 vswp->vsw_name = kmem_alloc(strlen(type) + 1, KM_SLEEP); 3966 (void) strcpy(vswp->vsw_name, type); 3967 ASSERT(vswp->vsw_count == 0); 3968 vswp->vsw_count = 1; 3969 mutex_init(&vswp->vsw_lock, NULL, MUTEX_DEFAULT, NULL); 3970 return (vswp); 3971 } 3972 return (NULL); 3973 } 3974 3975 /* 3976 * Impose additional layer of translation between vfstype names 3977 * and module names in the filesystem. 3978 */ 3979 static const char * 3980 vfs_to_modname(const char *vfstype) 3981 { 3982 if (strcmp(vfstype, "proc") == 0) { 3983 vfstype = "procfs"; 3984 } else if (strcmp(vfstype, "fd") == 0) { 3985 vfstype = "fdfs"; 3986 } else if (strncmp(vfstype, "nfs", 3) == 0) { 3987 vfstype = "nfs"; 3988 } 3989 3990 return (vfstype); 3991 } 3992 3993 /* 3994 * Find a vfssw entry given a file system type name. 3995 * Try to autoload the filesystem if it's not found. 3996 * If it's installed, return the vfssw locked to prevent unloading. 3997 */ 3998 struct vfssw * 3999 vfs_getvfssw(const char *type) 4000 { 4001 struct vfssw *vswp; 4002 const char *modname; 4003 4004 RLOCK_VFSSW(); 4005 vswp = vfs_getvfsswbyname(type); 4006 modname = vfs_to_modname(type); 4007 4008 if (rootdir == NULL) { 4009 /* 4010 * If we haven't yet loaded the root file system, then our 4011 * _init won't be called until later. Allocate vfssw entry, 4012 * because mod_installfs won't be called. 4013 */ 4014 if (vswp == NULL) { 4015 RUNLOCK_VFSSW(); 4016 WLOCK_VFSSW(); 4017 if ((vswp = vfs_getvfsswbyname(type)) == NULL) { 4018 if ((vswp = allocate_vfssw(type)) == NULL) { 4019 WUNLOCK_VFSSW(); 4020 return (NULL); 4021 } 4022 } 4023 WUNLOCK_VFSSW(); 4024 RLOCK_VFSSW(); 4025 } 4026 if (!VFS_INSTALLED(vswp)) { 4027 RUNLOCK_VFSSW(); 4028 (void) modloadonly("fs", modname); 4029 } else 4030 RUNLOCK_VFSSW(); 4031 return (vswp); 4032 } 4033 4034 /* 4035 * Try to load the filesystem. Before calling modload(), we drop 4036 * our lock on the VFS switch table, and pick it up after the 4037 * module is loaded. However, there is a potential race: the 4038 * module could be unloaded after the call to modload() completes 4039 * but before we pick up the lock and drive on. Therefore, 4040 * we keep reloading the module until we've loaded the module 4041 * _and_ we have the lock on the VFS switch table. 4042 */ 4043 while (vswp == NULL || !VFS_INSTALLED(vswp)) { 4044 RUNLOCK_VFSSW(); 4045 if (modload("fs", modname) == -1) 4046 return (NULL); 4047 RLOCK_VFSSW(); 4048 if (vswp == NULL) 4049 if ((vswp = vfs_getvfsswbyname(type)) == NULL) 4050 break; 4051 } 4052 RUNLOCK_VFSSW(); 4053 4054 return (vswp); 4055 } 4056 4057 /* 4058 * Find a vfssw entry given a file system type name. 4059 */ 4060 struct vfssw * 4061 vfs_getvfsswbyname(const char *type) 4062 { 4063 struct vfssw *vswp; 4064 4065 ASSERT(VFSSW_LOCKED()); 4066 if (type == NULL || *type == '\0') 4067 return (NULL); 4068 4069 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) { 4070 if (strcmp(type, vswp->vsw_name) == 0) { 4071 vfs_refvfssw(vswp); 4072 return (vswp); 4073 } 4074 } 4075 4076 return (NULL); 4077 } 4078 4079 /* 4080 * Find a vfssw entry given a set of vfsops. 4081 */ 4082 struct vfssw * 4083 vfs_getvfsswbyvfsops(vfsops_t *vfsops) 4084 { 4085 struct vfssw *vswp; 4086 4087 RLOCK_VFSSW(); 4088 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) { 4089 if (ALLOCATED_VFSSW(vswp) && &vswp->vsw_vfsops == vfsops) { 4090 vfs_refvfssw(vswp); 4091 RUNLOCK_VFSSW(); 4092 return (vswp); 4093 } 4094 } 4095 RUNLOCK_VFSSW(); 4096 4097 return (NULL); 4098 } 4099 4100 /* 4101 * Reference a vfssw entry. 4102 */ 4103 void 4104 vfs_refvfssw(struct vfssw *vswp) 4105 { 4106 4107 mutex_enter(&vswp->vsw_lock); 4108 vswp->vsw_count++; 4109 mutex_exit(&vswp->vsw_lock); 4110 } 4111 4112 /* 4113 * Unreference a vfssw entry. 4114 */ 4115 void 4116 vfs_unrefvfssw(struct vfssw *vswp) 4117 { 4118 4119 mutex_enter(&vswp->vsw_lock); 4120 vswp->vsw_count--; 4121 mutex_exit(&vswp->vsw_lock); 4122 } 4123 4124 int sync_timeout = 30; /* timeout for syncing a page during panic */ 4125 int sync_timeleft; /* portion of sync_timeout remaining */ 4126 4127 static int sync_retries = 20; /* number of retries when not making progress */ 4128 static int sync_triesleft; /* portion of sync_retries remaining */ 4129 4130 static pgcnt_t old_pgcnt, new_pgcnt; 4131 static int new_bufcnt, old_bufcnt; 4132 4133 /* 4134 * Sync all of the mounted filesystems, and then wait for the actual i/o to 4135 * complete. We wait by counting the number of dirty pages and buffers, 4136 * pushing them out using bio_busy() and page_busy(), and then counting again. 4137 * This routine is used during both the uadmin A_SHUTDOWN code as well as 4138 * the SYNC phase of the panic code (see comments in panic.c). It should only 4139 * be used after some higher-level mechanism has quiesced the system so that 4140 * new writes are not being initiated while we are waiting for completion. 4141 * 4142 * To ensure finite running time, our algorithm uses two timeout mechanisms: 4143 * sync_timeleft (a timer implemented by the omnipresent deadman() cyclic), and 4144 * sync_triesleft (a progress counter used by the vfs_syncall() loop below). 4145 * Together these ensure that syncing completes if our i/o paths are stuck. 4146 * The counters are declared above so they can be found easily in the debugger. 4147 * 4148 * The sync_timeleft counter is reset by bio_busy() and page_busy() using the 4149 * vfs_syncprogress() subroutine whenever we make progress through the lists of 4150 * pages and buffers. It is decremented and expired by the deadman() cyclic. 4151 * When vfs_syncall() decides it is done, we disable the deadman() counter by 4152 * setting sync_timeleft to zero. This timer guards against vfs_syncall() 4153 * deadlocking or hanging inside of a broken filesystem or driver routine. 4154 * 4155 * The sync_triesleft counter is updated by vfs_syncall() itself. If we make 4156 * sync_retries consecutive calls to bio_busy() and page_busy() without 4157 * decreasing either the number of dirty buffers or dirty pages below the 4158 * lowest count we have seen so far, we give up and return from vfs_syncall(). 4159 * 4160 * Each loop iteration ends with a call to delay() one second to allow time for 4161 * i/o completion and to permit the user time to read our progress messages. 4162 */ 4163 void 4164 vfs_syncall(void) 4165 { 4166 if (rootdir == NULL && !modrootloaded) 4167 return; /* panic during boot - no filesystems yet */ 4168 4169 printf("syncing file systems..."); 4170 vfs_syncprogress(); 4171 sync(); 4172 4173 vfs_syncprogress(); 4174 sync_triesleft = sync_retries; 4175 4176 old_bufcnt = new_bufcnt = INT_MAX; 4177 old_pgcnt = new_pgcnt = ULONG_MAX; 4178 4179 while (sync_triesleft > 0) { 4180 old_bufcnt = MIN(old_bufcnt, new_bufcnt); 4181 old_pgcnt = MIN(old_pgcnt, new_pgcnt); 4182 4183 new_bufcnt = bio_busy(B_TRUE); 4184 new_pgcnt = page_busy(B_TRUE); 4185 vfs_syncprogress(); 4186 4187 if (new_bufcnt == 0 && new_pgcnt == 0) 4188 break; 4189 4190 if (new_bufcnt < old_bufcnt || new_pgcnt < old_pgcnt) 4191 sync_triesleft = sync_retries; 4192 else 4193 sync_triesleft--; 4194 4195 if (new_bufcnt) 4196 printf(" [%d]", new_bufcnt); 4197 if (new_pgcnt) 4198 printf(" %lu", new_pgcnt); 4199 4200 delay(hz); 4201 } 4202 4203 if (new_bufcnt != 0 || new_pgcnt != 0) 4204 printf(" done (not all i/o completed)\n"); 4205 else 4206 printf(" done\n"); 4207 4208 sync_timeleft = 0; 4209 delay(hz); 4210 } 4211 4212 /* 4213 * If we are in the middle of the sync phase of panic, reset sync_timeleft to 4214 * sync_timeout to indicate that we are making progress and the deadman() 4215 * omnipresent cyclic should not yet time us out. Note that it is safe to 4216 * store to sync_timeleft here since the deadman() is firing at high-level 4217 * on top of us. If we are racing with the deadman(), either the deadman() 4218 * will decrement the old value and then we will reset it, or we will 4219 * reset it and then the deadman() will immediately decrement it. In either 4220 * case, correct behavior results. 4221 */ 4222 void 4223 vfs_syncprogress(void) 4224 { 4225 if (panicstr) 4226 sync_timeleft = sync_timeout; 4227 } 4228 4229 /* 4230 * Map VFS flags to statvfs flags. These shouldn't really be separate 4231 * flags at all. 4232 */ 4233 uint_t 4234 vf_to_stf(uint_t vf) 4235 { 4236 uint_t stf = 0; 4237 4238 if (vf & VFS_RDONLY) 4239 stf |= ST_RDONLY; 4240 if (vf & VFS_NOSETUID) 4241 stf |= ST_NOSUID; 4242 if (vf & VFS_NOTRUNC) 4243 stf |= ST_NOTRUNC; 4244 4245 return (stf); 4246 } 4247 4248 /* 4249 * Entries for (illegal) fstype 0. 4250 */ 4251 /* ARGSUSED */ 4252 int 4253 vfsstray_sync(struct vfs *vfsp, short arg, struct cred *cr) 4254 { 4255 cmn_err(CE_PANIC, "stray vfs operation"); 4256 return (0); 4257 } 4258 4259 /* 4260 * Entries for (illegal) fstype 0. 4261 */ 4262 int 4263 vfsstray(void) 4264 { 4265 cmn_err(CE_PANIC, "stray vfs operation"); 4266 return (0); 4267 } 4268 4269 /* 4270 * Support for dealing with forced UFS unmount and its interaction with 4271 * LOFS. Could be used by any filesystem. 4272 * See bug 1203132. 4273 */ 4274 int 4275 vfs_EIO(void) 4276 { 4277 return (EIO); 4278 } 4279 4280 /* 4281 * We've gotta define the op for sync separately, since the compiler gets 4282 * confused if we mix and match ANSI and normal style prototypes when 4283 * a "short" argument is present and spits out a warning. 4284 */ 4285 /*ARGSUSED*/ 4286 int 4287 vfs_EIO_sync(struct vfs *vfsp, short arg, struct cred *cr) 4288 { 4289 return (EIO); 4290 } 4291 4292 vfs_t EIO_vfs; 4293 vfsops_t *EIO_vfsops; 4294 4295 /* 4296 * Called from startup() to initialize all loaded vfs's 4297 */ 4298 void 4299 vfsinit(void) 4300 { 4301 struct vfssw *vswp; 4302 int error; 4303 extern int vopstats_enabled; 4304 extern void vopstats_startup(); 4305 4306 static const fs_operation_def_t EIO_vfsops_template[] = { 4307 VFSNAME_MOUNT, { .error = vfs_EIO }, 4308 VFSNAME_UNMOUNT, { .error = vfs_EIO }, 4309 VFSNAME_ROOT, { .error = vfs_EIO }, 4310 VFSNAME_STATVFS, { .error = vfs_EIO }, 4311 VFSNAME_SYNC, { .vfs_sync = vfs_EIO_sync }, 4312 VFSNAME_VGET, { .error = vfs_EIO }, 4313 VFSNAME_MOUNTROOT, { .error = vfs_EIO }, 4314 VFSNAME_FREEVFS, { .error = vfs_EIO }, 4315 VFSNAME_VNSTATE, { .error = vfs_EIO }, 4316 NULL, NULL 4317 }; 4318 4319 static const fs_operation_def_t stray_vfsops_template[] = { 4320 VFSNAME_MOUNT, { .error = vfsstray }, 4321 VFSNAME_UNMOUNT, { .error = vfsstray }, 4322 VFSNAME_ROOT, { .error = vfsstray }, 4323 VFSNAME_STATVFS, { .error = vfsstray }, 4324 VFSNAME_SYNC, { .vfs_sync = vfsstray_sync }, 4325 VFSNAME_VGET, { .error = vfsstray }, 4326 VFSNAME_MOUNTROOT, { .error = vfsstray }, 4327 VFSNAME_FREEVFS, { .error = vfsstray }, 4328 VFSNAME_VNSTATE, { .error = vfsstray }, 4329 NULL, NULL 4330 }; 4331 4332 /* Create vfs cache */ 4333 vfs_cache = kmem_cache_create("vfs_cache", sizeof (struct vfs), 4334 sizeof (uintptr_t), NULL, NULL, NULL, NULL, NULL, 0); 4335 4336 /* Initialize the vnode cache (file systems may use it during init). */ 4337 vn_create_cache(); 4338 4339 /* Setup event monitor framework */ 4340 fem_init(); 4341 4342 /* Initialize the dummy stray file system type. */ 4343 error = vfs_setfsops(0, stray_vfsops_template, NULL); 4344 4345 /* Initialize the dummy EIO file system. */ 4346 error = vfs_makefsops(EIO_vfsops_template, &EIO_vfsops); 4347 if (error != 0) { 4348 cmn_err(CE_WARN, "vfsinit: bad EIO vfs ops template"); 4349 /* Shouldn't happen, but not bad enough to panic */ 4350 } 4351 4352 VFS_INIT(&EIO_vfs, EIO_vfsops, (caddr_t)NULL); 4353 4354 /* 4355 * Default EIO_vfs.vfs_flag to VFS_UNMOUNTED so a lookup 4356 * on this vfs can immediately notice it's invalid. 4357 */ 4358 EIO_vfs.vfs_flag |= VFS_UNMOUNTED; 4359 4360 /* 4361 * Call the init routines of non-loadable filesystems only. 4362 * Filesystems which are loaded as separate modules will be 4363 * initialized by the module loading code instead. 4364 */ 4365 4366 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) { 4367 RLOCK_VFSSW(); 4368 if (vswp->vsw_init != NULL) 4369 (*vswp->vsw_init)(vswp - vfssw, vswp->vsw_name); 4370 RUNLOCK_VFSSW(); 4371 } 4372 4373 vopstats_startup(); 4374 4375 if (vopstats_enabled) { 4376 /* EIO_vfs can collect stats, but we don't retrieve them */ 4377 initialize_vopstats(&EIO_vfs.vfs_vopstats); 4378 EIO_vfs.vfs_fstypevsp = NULL; 4379 EIO_vfs.vfs_vskap = NULL; 4380 EIO_vfs.vfs_flag |= VFS_STATS; 4381 } 4382 4383 xattr_init(); 4384 4385 reparse_point_init(); 4386 } 4387 4388 vfs_t * 4389 vfs_alloc(int kmflag) 4390 { 4391 vfs_t *vfsp; 4392 4393 vfsp = kmem_cache_alloc(vfs_cache, kmflag); 4394 4395 /* 4396 * Do the simplest initialization here. 4397 * Everything else gets done in vfs_init() 4398 */ 4399 bzero(vfsp, sizeof (vfs_t)); 4400 return (vfsp); 4401 } 4402 4403 void 4404 vfs_free(vfs_t *vfsp) 4405 { 4406 /* 4407 * One would be tempted to assert that "vfsp->vfs_count == 0". 4408 * The problem is that this gets called out of domount() with 4409 * a partially initialized vfs and a vfs_count of 1. This is 4410 * also called from vfs_rele() with a vfs_count of 0. We can't 4411 * call VFS_RELE() from domount() if VFS_MOUNT() hasn't successfully 4412 * returned. This is because VFS_MOUNT() fully initializes the 4413 * vfs structure and its associated data. VFS_RELE() will call 4414 * VFS_FREEVFS() which may panic the system if the data structures 4415 * aren't fully initialized from a successful VFS_MOUNT()). 4416 */ 4417 4418 /* If FEM was in use, make sure everything gets cleaned up */ 4419 if (vfsp->vfs_femhead) { 4420 ASSERT(vfsp->vfs_femhead->femh_list == NULL); 4421 mutex_destroy(&vfsp->vfs_femhead->femh_lock); 4422 kmem_free(vfsp->vfs_femhead, sizeof (*(vfsp->vfs_femhead))); 4423 vfsp->vfs_femhead = NULL; 4424 } 4425 4426 if (vfsp->vfs_implp) 4427 vfsimpl_teardown(vfsp); 4428 sema_destroy(&vfsp->vfs_reflock); 4429 kmem_cache_free(vfs_cache, vfsp); 4430 } 4431 4432 /* 4433 * Increments the vfs reference count by one atomically. 4434 */ 4435 void 4436 vfs_hold(vfs_t *vfsp) 4437 { 4438 atomic_inc_32(&vfsp->vfs_count); 4439 ASSERT(vfsp->vfs_count != 0); 4440 } 4441 4442 /* 4443 * Decrements the vfs reference count by one atomically. When 4444 * vfs reference count becomes zero, it calls the file system 4445 * specific vfs_freevfs() to free up the resources. 4446 */ 4447 void 4448 vfs_rele(vfs_t *vfsp) 4449 { 4450 ASSERT(vfsp->vfs_count != 0); 4451 if (atomic_dec_32_nv(&vfsp->vfs_count) == 0) { 4452 VFS_FREEVFS(vfsp); 4453 lofi_remove(vfsp); 4454 if (vfsp->vfs_zone) 4455 zone_rele_ref(&vfsp->vfs_implp->vi_zone_ref, 4456 ZONE_REF_VFS); 4457 vfs_freemnttab(vfsp); 4458 vfs_free(vfsp); 4459 } 4460 } 4461 4462 /* 4463 * Generic operations vector support. 4464 * 4465 * This is used to build operations vectors for both the vfs and vnode. 4466 * It's normally called only when a file system is loaded. 4467 * 4468 * There are many possible algorithms for this, including the following: 4469 * 4470 * (1) scan the list of known operations; for each, see if the file system 4471 * includes an entry for it, and fill it in as appropriate. 4472 * 4473 * (2) set up defaults for all known operations. scan the list of ops 4474 * supplied by the file system; for each which is both supplied and 4475 * known, fill it in. 4476 * 4477 * (3) sort the lists of known ops & supplied ops; scan the list, filling 4478 * in entries as we go. 4479 * 4480 * we choose (1) for simplicity, and because performance isn't critical here. 4481 * note that (2) could be sped up using a precomputed hash table on known ops. 4482 * (3) could be faster than either, but only if the lists were very large or 4483 * supplied in sorted order. 4484 * 4485 */ 4486 4487 int 4488 fs_build_vector(void *vector, int *unused_ops, 4489 const fs_operation_trans_def_t *translation, 4490 const fs_operation_def_t *operations) 4491 { 4492 int i, num_trans, num_ops, used; 4493 4494 /* 4495 * Count the number of translations and the number of supplied 4496 * operations. 4497 */ 4498 4499 { 4500 const fs_operation_trans_def_t *p; 4501 4502 for (num_trans = 0, p = translation; 4503 p->name != NULL; 4504 num_trans++, p++) 4505 ; 4506 } 4507 4508 { 4509 const fs_operation_def_t *p; 4510 4511 for (num_ops = 0, p = operations; 4512 p->name != NULL; 4513 num_ops++, p++) 4514 ; 4515 } 4516 4517 /* Walk through each operation known to our caller. There will be */ 4518 /* one entry in the supplied "translation table" for each. */ 4519 4520 used = 0; 4521 4522 for (i = 0; i < num_trans; i++) { 4523 int j, found; 4524 char *curname; 4525 fs_generic_func_p result; 4526 fs_generic_func_p *location; 4527 4528 curname = translation[i].name; 4529 4530 /* Look for a matching operation in the list supplied by the */ 4531 /* file system. */ 4532 4533 found = 0; 4534 4535 for (j = 0; j < num_ops; j++) { 4536 if (strcmp(operations[j].name, curname) == 0) { 4537 used++; 4538 found = 1; 4539 break; 4540 } 4541 } 4542 4543 /* 4544 * If the file system is using a "placeholder" for default 4545 * or error functions, grab the appropriate function out of 4546 * the translation table. If the file system didn't supply 4547 * this operation at all, use the default function. 4548 */ 4549 4550 if (found) { 4551 result = operations[j].func.fs_generic; 4552 if (result == fs_default) { 4553 result = translation[i].defaultFunc; 4554 } else if (result == fs_error) { 4555 result = translation[i].errorFunc; 4556 } else if (result == NULL) { 4557 /* Null values are PROHIBITED */ 4558 return (EINVAL); 4559 } 4560 } else { 4561 result = translation[i].defaultFunc; 4562 } 4563 4564 /* Now store the function into the operations vector. */ 4565 4566 location = (fs_generic_func_p *) 4567 (((char *)vector) + translation[i].offset); 4568 4569 *location = result; 4570 } 4571 4572 *unused_ops = num_ops - used; 4573 4574 return (0); 4575 } 4576 4577 /* Placeholder functions, should never be called. */ 4578 4579 int 4580 fs_error(void) 4581 { 4582 cmn_err(CE_PANIC, "fs_error called"); 4583 return (0); 4584 } 4585 4586 int 4587 fs_default(void) 4588 { 4589 cmn_err(CE_PANIC, "fs_default called"); 4590 return (0); 4591 } 4592 4593 #ifdef __sparc 4594 4595 /* 4596 * Part of the implementation of booting off a mirrored root 4597 * involves a change of dev_t for the root device. To 4598 * accomplish this, first remove the existing hash table 4599 * entry for the root device, convert to the new dev_t, 4600 * then re-insert in the hash table at the head of the list. 4601 */ 4602 void 4603 vfs_root_redev(vfs_t *vfsp, dev_t ndev, int fstype) 4604 { 4605 vfs_list_lock(); 4606 4607 vfs_hash_remove(vfsp); 4608 4609 vfsp->vfs_dev = ndev; 4610 vfs_make_fsid(&vfsp->vfs_fsid, ndev, fstype); 4611 4612 vfs_hash_add(vfsp, 1); 4613 4614 vfs_list_unlock(); 4615 } 4616 4617 #else /* x86 NEWBOOT */ 4618 4619 #if defined(__x86) 4620 extern int hvmboot_rootconf(); 4621 #endif /* __x86 */ 4622 4623 extern ib_boot_prop_t *iscsiboot_prop; 4624 4625 int 4626 rootconf() 4627 { 4628 int error; 4629 struct vfssw *vsw; 4630 extern void pm_init(); 4631 char *fstyp, *fsmod; 4632 int ret = -1; 4633 4634 getrootfs(&fstyp, &fsmod); 4635 4636 #if defined(__x86) 4637 /* 4638 * hvmboot_rootconf() is defined in the hvm_bootstrap misc module, 4639 * which lives in /platform/i86hvm, and hence is only available when 4640 * booted in an x86 hvm environment. If the hvm_bootstrap misc module 4641 * is not available then the modstub for this function will return 0. 4642 * If the hvm_bootstrap misc module is available it will be loaded 4643 * and hvmboot_rootconf() will be invoked. 4644 */ 4645 if (error = hvmboot_rootconf()) 4646 return (error); 4647 #endif /* __x86 */ 4648 4649 if (error = clboot_rootconf()) 4650 return (error); 4651 4652 if (modload("fs", fsmod) == -1) 4653 panic("Cannot _init %s module", fsmod); 4654 4655 RLOCK_VFSSW(); 4656 vsw = vfs_getvfsswbyname(fstyp); 4657 RUNLOCK_VFSSW(); 4658 if (vsw == NULL) { 4659 cmn_err(CE_CONT, "Cannot find %s filesystem\n", fstyp); 4660 return (ENXIO); 4661 } 4662 VFS_INIT(rootvfs, &vsw->vsw_vfsops, 0); 4663 VFS_HOLD(rootvfs); 4664 4665 /* always mount readonly first */ 4666 rootvfs->vfs_flag |= VFS_RDONLY; 4667 4668 pm_init(); 4669 4670 if (netboot && iscsiboot_prop) { 4671 cmn_err(CE_WARN, "NFS boot and iSCSI boot" 4672 " shouldn't happen in the same time"); 4673 return (EINVAL); 4674 } 4675 4676 if (netboot || iscsiboot_prop) { 4677 ret = strplumb(); 4678 if (ret != 0) { 4679 cmn_err(CE_WARN, "Cannot plumb network device %d", ret); 4680 return (EFAULT); 4681 } 4682 } 4683 4684 if ((ret == 0) && iscsiboot_prop) { 4685 ret = modload("drv", "iscsi"); 4686 /* -1 indicates fail */ 4687 if (ret == -1) { 4688 cmn_err(CE_WARN, "Failed to load iscsi module"); 4689 iscsi_boot_prop_free(); 4690 return (EINVAL); 4691 } else { 4692 if (!i_ddi_attach_pseudo_node("iscsi")) { 4693 cmn_err(CE_WARN, 4694 "Failed to attach iscsi driver"); 4695 iscsi_boot_prop_free(); 4696 return (ENODEV); 4697 } 4698 } 4699 } 4700 4701 error = VFS_MOUNTROOT(rootvfs, ROOT_INIT); 4702 vfs_unrefvfssw(vsw); 4703 rootdev = rootvfs->vfs_dev; 4704 4705 if (error) 4706 cmn_err(CE_CONT, "Cannot mount root on %s fstype %s\n", 4707 rootfs.bo_name, fstyp); 4708 else 4709 cmn_err(CE_CONT, "?root on %s fstype %s\n", 4710 rootfs.bo_name, fstyp); 4711 return (error); 4712 } 4713 4714 /* 4715 * XXX this is called by nfs only and should probably be removed 4716 * If booted with ASKNAME, prompt on the console for a filesystem 4717 * name and return it. 4718 */ 4719 void 4720 getfsname(char *askfor, char *name, size_t namelen) 4721 { 4722 if (boothowto & RB_ASKNAME) { 4723 printf("%s name: ", askfor); 4724 console_gets(name, namelen); 4725 } 4726 } 4727 4728 /* 4729 * Init the root filesystem type (rootfs.bo_fstype) from the "fstype" 4730 * property. 4731 * 4732 * Filesystem types starting with the prefix "nfs" are diskless clients; 4733 * init the root filename name (rootfs.bo_name), too. 4734 * 4735 * If we are booting via NFS we currently have these options: 4736 * nfs - dynamically choose NFS V2, V3, or V4 (default) 4737 * nfs2 - force NFS V2 4738 * nfs3 - force NFS V3 4739 * nfs4 - force NFS V4 4740 * Because we need to maintain backward compatibility with the naming 4741 * convention that the NFS V2 filesystem name is "nfs" (see vfs_conf.c) 4742 * we need to map "nfs" => "nfsdyn" and "nfs2" => "nfs". The dynamic 4743 * nfs module will map the type back to either "nfs", "nfs3", or "nfs4". 4744 * This is only for root filesystems, all other uses will expect 4745 * that "nfs" == NFS V2. 4746 */ 4747 static void 4748 getrootfs(char **fstypp, char **fsmodp) 4749 { 4750 extern char *strplumb_get_netdev_path(void); 4751 char *propstr = NULL; 4752 4753 /* 4754 * Check fstype property; for diskless it should be one of "nfs", 4755 * "nfs2", "nfs3" or "nfs4". 4756 */ 4757 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(), 4758 DDI_PROP_DONTPASS, "fstype", &propstr) 4759 == DDI_SUCCESS) { 4760 (void) strncpy(rootfs.bo_fstype, propstr, BO_MAXFSNAME); 4761 ddi_prop_free(propstr); 4762 4763 /* 4764 * if the boot property 'fstype' is not set, but 'zfs-bootfs' is set, 4765 * assume the type of this root filesystem is 'zfs'. 4766 */ 4767 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(), 4768 DDI_PROP_DONTPASS, "zfs-bootfs", &propstr) 4769 == DDI_SUCCESS) { 4770 (void) strncpy(rootfs.bo_fstype, "zfs", BO_MAXFSNAME); 4771 ddi_prop_free(propstr); 4772 } 4773 4774 if (strncmp(rootfs.bo_fstype, "nfs", 3) != 0) { 4775 *fstypp = *fsmodp = rootfs.bo_fstype; 4776 return; 4777 } 4778 4779 ++netboot; 4780 4781 if (strcmp(rootfs.bo_fstype, "nfs2") == 0) 4782 (void) strcpy(rootfs.bo_fstype, "nfs"); 4783 else if (strcmp(rootfs.bo_fstype, "nfs") == 0) 4784 (void) strcpy(rootfs.bo_fstype, "nfsdyn"); 4785 4786 /* 4787 * check if path to network interface is specified in bootpath 4788 * or by a hypervisor domain configuration file. 4789 * XXPV - enable strlumb_get_netdev_path() 4790 */ 4791 if (ddi_prop_exists(DDI_DEV_T_ANY, ddi_root_node(), DDI_PROP_DONTPASS, 4792 "xpv-nfsroot")) { 4793 (void) strcpy(rootfs.bo_name, "/xpvd/xnf@0"); 4794 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(), 4795 DDI_PROP_DONTPASS, "bootpath", &propstr) 4796 == DDI_SUCCESS) { 4797 (void) strncpy(rootfs.bo_name, propstr, BO_MAXOBJNAME); 4798 ddi_prop_free(propstr); 4799 } else { 4800 /* attempt to determine netdev_path via boot_mac address */ 4801 netdev_path = strplumb_get_netdev_path(); 4802 if (netdev_path == NULL) 4803 panic("cannot find boot network interface"); 4804 (void) strncpy(rootfs.bo_name, netdev_path, BO_MAXOBJNAME); 4805 } 4806 *fstypp = rootfs.bo_fstype; 4807 *fsmodp = "nfs"; 4808 } 4809 #endif 4810 4811 /* 4812 * VFS feature routines 4813 */ 4814 4815 #define VFTINDEX(feature) (((feature) >> 32) & 0xFFFFFFFF) 4816 #define VFTBITS(feature) ((feature) & 0xFFFFFFFFLL) 4817 4818 /* Register a feature in the vfs */ 4819 void 4820 vfs_set_feature(vfs_t *vfsp, vfs_feature_t feature) 4821 { 4822 /* Note that vfs_featureset[] is found in *vfsp->vfs_implp */ 4823 if (vfsp->vfs_implp == NULL) 4824 return; 4825 4826 vfsp->vfs_featureset[VFTINDEX(feature)] |= VFTBITS(feature); 4827 } 4828 4829 void 4830 vfs_clear_feature(vfs_t *vfsp, vfs_feature_t feature) 4831 { 4832 /* Note that vfs_featureset[] is found in *vfsp->vfs_implp */ 4833 if (vfsp->vfs_implp == NULL) 4834 return; 4835 vfsp->vfs_featureset[VFTINDEX(feature)] &= VFTBITS(~feature); 4836 } 4837 4838 /* 4839 * Query a vfs for a feature. 4840 * Returns 1 if feature is present, 0 if not 4841 */ 4842 int 4843 vfs_has_feature(vfs_t *vfsp, vfs_feature_t feature) 4844 { 4845 int ret = 0; 4846 4847 /* Note that vfs_featureset[] is found in *vfsp->vfs_implp */ 4848 if (vfsp->vfs_implp == NULL) 4849 return (ret); 4850 4851 if (vfsp->vfs_featureset[VFTINDEX(feature)] & VFTBITS(feature)) 4852 ret = 1; 4853 4854 return (ret); 4855 } 4856 4857 /* 4858 * Propagate feature set from one vfs to another 4859 */ 4860 void 4861 vfs_propagate_features(vfs_t *from, vfs_t *to) 4862 { 4863 int i; 4864 4865 if (to->vfs_implp == NULL || from->vfs_implp == NULL) 4866 return; 4867 4868 for (i = 1; i <= to->vfs_featureset[0]; i++) { 4869 to->vfs_featureset[i] = from->vfs_featureset[i]; 4870 } 4871 } 4872 4873 #define LOFINODE_PATH "/dev/lofi/%d" 4874 4875 /* 4876 * Return the vnode for the lofi node if there's a lofi mount in place. 4877 * Returns -1 when there's no lofi node, 0 on success, and > 0 on 4878 * failure. 4879 */ 4880 int 4881 vfs_get_lofi(vfs_t *vfsp, vnode_t **vpp) 4882 { 4883 char *path = NULL; 4884 int strsize; 4885 int err; 4886 4887 if (vfsp->vfs_lofi_minor == 0) { 4888 *vpp = NULL; 4889 return (-1); 4890 } 4891 4892 strsize = snprintf(NULL, 0, LOFINODE_PATH, vfsp->vfs_lofi_minor); 4893 path = kmem_alloc(strsize + 1, KM_SLEEP); 4894 (void) snprintf(path, strsize + 1, LOFINODE_PATH, vfsp->vfs_lofi_minor); 4895 4896 /* 4897 * We may be inside a zone, so we need to use the /dev path, but 4898 * it's created asynchronously, so we wait here. 4899 */ 4900 for (;;) { 4901 err = lookupname(path, UIO_SYSSPACE, FOLLOW, NULLVPP, vpp); 4902 4903 if (err != ENOENT) 4904 break; 4905 4906 if ((err = delay_sig(hz / 8)) == EINTR) 4907 break; 4908 } 4909 4910 if (err) 4911 *vpp = NULL; 4912 4913 kmem_free(path, strsize + 1); 4914 return (err); 4915 } 4916