1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 25 * Copyright 2017 RackTop Systems. 26 * Copyright 2016 Nexenta Systems, Inc. 27 */ 28 29 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ 30 /* All Rights Reserved */ 31 32 /* 33 * University Copyright- Copyright (c) 1982, 1986, 1988 34 * The Regents of the University of California 35 * All Rights Reserved 36 * 37 * University Acknowledgment- Portions of this document are derived from 38 * software developed by the University of California, Berkeley, and its 39 * contributors. 40 */ 41 42 #include <sys/types.h> 43 #include <sys/t_lock.h> 44 #include <sys/param.h> 45 #include <sys/errno.h> 46 #include <sys/user.h> 47 #include <sys/fstyp.h> 48 #include <sys/kmem.h> 49 #include <sys/systm.h> 50 #include <sys/proc.h> 51 #include <sys/mount.h> 52 #include <sys/vfs.h> 53 #include <sys/vfs_opreg.h> 54 #include <sys/fem.h> 55 #include <sys/mntent.h> 56 #include <sys/stat.h> 57 #include <sys/statvfs.h> 58 #include <sys/statfs.h> 59 #include <sys/cred.h> 60 #include <sys/vnode.h> 61 #include <sys/rwstlock.h> 62 #include <sys/dnlc.h> 63 #include <sys/file.h> 64 #include <sys/time.h> 65 #include <sys/atomic.h> 66 #include <sys/cmn_err.h> 67 #include <sys/buf.h> 68 #include <sys/swap.h> 69 #include <sys/debug.h> 70 #include <sys/vnode.h> 71 #include <sys/modctl.h> 72 #include <sys/ddi.h> 73 #include <sys/pathname.h> 74 #include <sys/bootconf.h> 75 #include <sys/dumphdr.h> 76 #include <sys/dc_ki.h> 77 #include <sys/poll.h> 78 #include <sys/sunddi.h> 79 #include <sys/sysmacros.h> 80 #include <sys/zone.h> 81 #include <sys/policy.h> 82 #include <sys/ctfs.h> 83 #include <sys/objfs.h> 84 #include <sys/console.h> 85 #include <sys/reboot.h> 86 #include <sys/attr.h> 87 #include <sys/zio.h> 88 #include <sys/spa.h> 89 #include <sys/lofi.h> 90 #include <sys/bootprops.h> 91 #include <sys/avl.h> 92 93 #include <vm/page.h> 94 95 #include <fs/fs_subr.h> 96 /* Private interfaces to create vopstats-related data structures */ 97 extern void initialize_vopstats(vopstats_t *); 98 extern vopstats_t *get_fstype_vopstats(struct vfs *, struct vfssw *); 99 extern vsk_anchor_t *get_vskstat_anchor(struct vfs *); 100 101 static void vfs_clearmntopt_nolock(mntopts_t *, const char *, int); 102 static void vfs_setmntopt_nolock(mntopts_t *, const char *, 103 const char *, int, int); 104 static int vfs_optionisset_nolock(const mntopts_t *, const char *, char **); 105 static void vfs_freemnttab(struct vfs *); 106 static void vfs_freeopt(mntopt_t *); 107 static void vfs_swapopttbl_nolock(mntopts_t *, mntopts_t *); 108 static void vfs_swapopttbl(mntopts_t *, mntopts_t *); 109 static void vfs_copyopttbl_extend(const mntopts_t *, mntopts_t *, int); 110 static void vfs_createopttbl_extend(mntopts_t *, const char *, 111 const mntopts_t *); 112 static char **vfs_copycancelopt_extend(char **const, int); 113 static void vfs_freecancelopt(char **); 114 static void getrootfs(char **, char **); 115 static int getmacpath(dev_info_t *, void *); 116 static void vfs_mnttabvp_setup(void); 117 118 struct ipmnt { 119 struct ipmnt *mip_next; 120 dev_t mip_dev; 121 struct vfs *mip_vfsp; 122 }; 123 124 static kmutex_t vfs_miplist_mutex; 125 static struct ipmnt *vfs_miplist = NULL; 126 static struct ipmnt *vfs_miplist_end = NULL; 127 128 static kmem_cache_t *vfs_cache; /* Pointer to VFS kmem cache */ 129 130 /* 131 * VFS global data. 132 */ 133 vnode_t *rootdir; /* pointer to root inode vnode. */ 134 vnode_t *devicesdir; /* pointer to inode of devices root */ 135 vnode_t *devdir; /* pointer to inode of dev root */ 136 137 char *server_rootpath; /* root path for diskless clients */ 138 char *server_hostname; /* hostname of diskless server */ 139 140 static struct vfs root; 141 static struct vfs devices; 142 static struct vfs dev; 143 struct vfs *rootvfs = &root; /* pointer to root vfs; head of VFS list. */ 144 avl_tree_t vfs_by_dev; /* avl tree to index mounted VFSs by dev */ 145 avl_tree_t vfs_by_mntpnt; /* avl tree to index mounted VFSs by mntpnt */ 146 uint64_t vfs_curr_mntix; /* counter to provide a unique mntix for 147 * entries in the above avl trees. 148 * protected by vfslist lock */ 149 rvfs_t *rvfs_list; /* array of vfs ptrs for vfs hash list */ 150 int vfshsz = 512; /* # of heads/locks in vfs hash arrays */ 151 /* must be power of 2! */ 152 timespec_t vfs_mnttab_ctime; /* mnttab created time */ 153 timespec_t vfs_mnttab_mtime; /* mnttab last modified time */ 154 char *vfs_dummyfstype = "\0"; 155 struct pollhead vfs_pollhd; /* for mnttab pollers */ 156 struct vnode *vfs_mntdummyvp; /* to fake mnttab read/write for file events */ 157 int mntfstype; /* will be set once mnt fs is mounted */ 158 159 /* 160 * Table for generic options recognized in the VFS layer and acted 161 * on at this level before parsing file system specific options. 162 * The nosuid option is stronger than any of the devices and setuid 163 * options, so those are canceled when nosuid is seen. 164 * 165 * All options which are added here need to be added to the 166 * list of standard options in usr/src/cmd/fs.d/fslib.c as well. 167 */ 168 /* 169 * VFS Mount options table 170 */ 171 static char *ro_cancel[] = { MNTOPT_RW, NULL }; 172 static char *rw_cancel[] = { MNTOPT_RO, NULL }; 173 static char *suid_cancel[] = { MNTOPT_NOSUID, NULL }; 174 static char *nosuid_cancel[] = { MNTOPT_SUID, MNTOPT_DEVICES, MNTOPT_NODEVICES, 175 MNTOPT_NOSETUID, MNTOPT_SETUID, NULL }; 176 static char *devices_cancel[] = { MNTOPT_NODEVICES, NULL }; 177 static char *nodevices_cancel[] = { MNTOPT_DEVICES, NULL }; 178 static char *setuid_cancel[] = { MNTOPT_NOSETUID, NULL }; 179 static char *nosetuid_cancel[] = { MNTOPT_SETUID, NULL }; 180 static char *nbmand_cancel[] = { MNTOPT_NONBMAND, NULL }; 181 static char *nonbmand_cancel[] = { MNTOPT_NBMAND, NULL }; 182 static char *exec_cancel[] = { MNTOPT_NOEXEC, NULL }; 183 static char *noexec_cancel[] = { MNTOPT_EXEC, NULL }; 184 185 static const mntopt_t mntopts[] = { 186 /* 187 * option name cancel options default arg flags 188 */ 189 { MNTOPT_REMOUNT, NULL, NULL, 190 MO_NODISPLAY, (void *)0 }, 191 { MNTOPT_RO, ro_cancel, NULL, 0, 192 (void *)0 }, 193 { MNTOPT_RW, rw_cancel, NULL, 0, 194 (void *)0 }, 195 { MNTOPT_SUID, suid_cancel, NULL, 0, 196 (void *)0 }, 197 { MNTOPT_NOSUID, nosuid_cancel, NULL, 0, 198 (void *)0 }, 199 { MNTOPT_DEVICES, devices_cancel, NULL, 0, 200 (void *)0 }, 201 { MNTOPT_NODEVICES, nodevices_cancel, NULL, 0, 202 (void *)0 }, 203 { MNTOPT_SETUID, setuid_cancel, NULL, 0, 204 (void *)0 }, 205 { MNTOPT_NOSETUID, nosetuid_cancel, NULL, 0, 206 (void *)0 }, 207 { MNTOPT_NBMAND, nbmand_cancel, NULL, 0, 208 (void *)0 }, 209 { MNTOPT_NONBMAND, nonbmand_cancel, NULL, 0, 210 (void *)0 }, 211 { MNTOPT_EXEC, exec_cancel, NULL, 0, 212 (void *)0 }, 213 { MNTOPT_NOEXEC, noexec_cancel, NULL, 0, 214 (void *)0 }, 215 }; 216 217 const mntopts_t vfs_mntopts = { 218 sizeof (mntopts) / sizeof (mntopt_t), 219 (mntopt_t *)&mntopts[0] 220 }; 221 222 /* 223 * File system operation dispatch functions. 224 */ 225 226 int 227 fsop_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr) 228 { 229 return (*(vfsp)->vfs_op->vfs_mount)(vfsp, mvp, uap, cr); 230 } 231 232 int 233 fsop_unmount(vfs_t *vfsp, int flag, cred_t *cr) 234 { 235 return (*(vfsp)->vfs_op->vfs_unmount)(vfsp, flag, cr); 236 } 237 238 int 239 fsop_root(vfs_t *vfsp, vnode_t **vpp) 240 { 241 refstr_t *mntpt; 242 int ret = (*(vfsp)->vfs_op->vfs_root)(vfsp, vpp); 243 /* 244 * Make sure this root has a path. With lofs, it is possible to have 245 * a NULL mountpoint. 246 */ 247 if (ret == 0 && vfsp->vfs_mntpt != NULL && (*vpp)->v_path == NULL) { 248 mntpt = vfs_getmntpoint(vfsp); 249 vn_setpath_str(*vpp, refstr_value(mntpt), 250 strlen(refstr_value(mntpt))); 251 refstr_rele(mntpt); 252 } 253 254 return (ret); 255 } 256 257 int 258 fsop_statfs(vfs_t *vfsp, statvfs64_t *sp) 259 { 260 return (*(vfsp)->vfs_op->vfs_statvfs)(vfsp, sp); 261 } 262 263 int 264 fsop_sync(vfs_t *vfsp, short flag, cred_t *cr) 265 { 266 return (*(vfsp)->vfs_op->vfs_sync)(vfsp, flag, cr); 267 } 268 269 int 270 fsop_vget(vfs_t *vfsp, vnode_t **vpp, fid_t *fidp) 271 { 272 /* 273 * In order to handle system attribute fids in a manner 274 * transparent to the underlying fs, we embed the fid for 275 * the sysattr parent object in the sysattr fid and tack on 276 * some extra bytes that only the sysattr layer knows about. 277 * 278 * This guarantees that sysattr fids are larger than other fids 279 * for this vfs. If the vfs supports the sysattr view interface 280 * (as indicated by VFSFT_SYSATTR_VIEWS), we cannot have a size 281 * collision with XATTR_FIDSZ. 282 */ 283 if (vfs_has_feature(vfsp, VFSFT_SYSATTR_VIEWS) && 284 fidp->fid_len == XATTR_FIDSZ) 285 return (xattr_dir_vget(vfsp, vpp, fidp)); 286 287 return (*(vfsp)->vfs_op->vfs_vget)(vfsp, vpp, fidp); 288 } 289 290 int 291 fsop_mountroot(vfs_t *vfsp, enum whymountroot reason) 292 { 293 return (*(vfsp)->vfs_op->vfs_mountroot)(vfsp, reason); 294 } 295 296 void 297 fsop_freefs(vfs_t *vfsp) 298 { 299 (*(vfsp)->vfs_op->vfs_freevfs)(vfsp); 300 } 301 302 int 303 fsop_vnstate(vfs_t *vfsp, vnode_t *vp, vntrans_t nstate) 304 { 305 return ((*(vfsp)->vfs_op->vfs_vnstate)(vfsp, vp, nstate)); 306 } 307 308 int 309 fsop_sync_by_kind(int fstype, short flag, cred_t *cr) 310 { 311 ASSERT((fstype >= 0) && (fstype < nfstype)); 312 313 if (ALLOCATED_VFSSW(&vfssw[fstype]) && VFS_INSTALLED(&vfssw[fstype])) 314 return (*vfssw[fstype].vsw_vfsops.vfs_sync) (NULL, flag, cr); 315 else 316 return (ENOTSUP); 317 } 318 319 /* 320 * File system initialization. vfs_setfsops() must be called from a file 321 * system's init routine. 322 */ 323 324 static int 325 fs_copyfsops(const fs_operation_def_t *template, vfsops_t *actual, 326 int *unused_ops) 327 { 328 static const fs_operation_trans_def_t vfs_ops_table[] = { 329 VFSNAME_MOUNT, offsetof(vfsops_t, vfs_mount), 330 fs_nosys, fs_nosys, 331 332 VFSNAME_UNMOUNT, offsetof(vfsops_t, vfs_unmount), 333 fs_nosys, fs_nosys, 334 335 VFSNAME_ROOT, offsetof(vfsops_t, vfs_root), 336 fs_nosys, fs_nosys, 337 338 VFSNAME_STATVFS, offsetof(vfsops_t, vfs_statvfs), 339 fs_nosys, fs_nosys, 340 341 VFSNAME_SYNC, offsetof(vfsops_t, vfs_sync), 342 (fs_generic_func_p) fs_sync, 343 (fs_generic_func_p) fs_sync, /* No errors allowed */ 344 345 VFSNAME_VGET, offsetof(vfsops_t, vfs_vget), 346 fs_nosys, fs_nosys, 347 348 VFSNAME_MOUNTROOT, offsetof(vfsops_t, vfs_mountroot), 349 fs_nosys, fs_nosys, 350 351 VFSNAME_FREEVFS, offsetof(vfsops_t, vfs_freevfs), 352 (fs_generic_func_p)fs_freevfs, 353 (fs_generic_func_p)fs_freevfs, /* Shouldn't fail */ 354 355 VFSNAME_VNSTATE, offsetof(vfsops_t, vfs_vnstate), 356 (fs_generic_func_p)fs_nosys, 357 (fs_generic_func_p)fs_nosys, 358 359 NULL, 0, NULL, NULL 360 }; 361 362 return (fs_build_vector(actual, unused_ops, vfs_ops_table, template)); 363 } 364 365 void 366 zfs_boot_init() { 367 368 if (strcmp(rootfs.bo_fstype, MNTTYPE_ZFS) == 0) 369 spa_boot_init(); 370 } 371 372 int 373 vfs_setfsops(int fstype, const fs_operation_def_t *template, vfsops_t **actual) 374 { 375 int error; 376 int unused_ops; 377 378 /* 379 * Verify that fstype refers to a valid fs. Note that 380 * 0 is valid since it's used to set "stray" ops. 381 */ 382 if ((fstype < 0) || (fstype >= nfstype)) 383 return (EINVAL); 384 385 if (!ALLOCATED_VFSSW(&vfssw[fstype])) 386 return (EINVAL); 387 388 /* Set up the operations vector. */ 389 390 error = fs_copyfsops(template, &vfssw[fstype].vsw_vfsops, &unused_ops); 391 392 if (error != 0) 393 return (error); 394 395 vfssw[fstype].vsw_flag |= VSW_INSTALLED; 396 397 if (actual != NULL) 398 *actual = &vfssw[fstype].vsw_vfsops; 399 400 #if DEBUG 401 if (unused_ops != 0) 402 cmn_err(CE_WARN, "vfs_setfsops: %s: %d operations supplied " 403 "but not used", vfssw[fstype].vsw_name, unused_ops); 404 #endif 405 406 return (0); 407 } 408 409 int 410 vfs_makefsops(const fs_operation_def_t *template, vfsops_t **actual) 411 { 412 int error; 413 int unused_ops; 414 415 *actual = (vfsops_t *)kmem_alloc(sizeof (vfsops_t), KM_SLEEP); 416 417 error = fs_copyfsops(template, *actual, &unused_ops); 418 if (error != 0) { 419 kmem_free(*actual, sizeof (vfsops_t)); 420 *actual = NULL; 421 return (error); 422 } 423 424 return (0); 425 } 426 427 /* 428 * Free a vfsops structure created as a result of vfs_makefsops(). 429 * NOTE: For a vfsops structure initialized by vfs_setfsops(), use 430 * vfs_freevfsops_by_type(). 431 */ 432 void 433 vfs_freevfsops(vfsops_t *vfsops) 434 { 435 kmem_free(vfsops, sizeof (vfsops_t)); 436 } 437 438 /* 439 * Since the vfsops structure is part of the vfssw table and wasn't 440 * really allocated, we're not really freeing anything. We keep 441 * the name for consistency with vfs_freevfsops(). We do, however, 442 * need to take care of a little bookkeeping. 443 * NOTE: For a vfsops structure created by vfs_setfsops(), use 444 * vfs_freevfsops_by_type(). 445 */ 446 int 447 vfs_freevfsops_by_type(int fstype) 448 { 449 450 /* Verify that fstype refers to a loaded fs (and not fsid 0). */ 451 if ((fstype <= 0) || (fstype >= nfstype)) 452 return (EINVAL); 453 454 WLOCK_VFSSW(); 455 if ((vfssw[fstype].vsw_flag & VSW_INSTALLED) == 0) { 456 WUNLOCK_VFSSW(); 457 return (EINVAL); 458 } 459 460 vfssw[fstype].vsw_flag &= ~VSW_INSTALLED; 461 WUNLOCK_VFSSW(); 462 463 return (0); 464 } 465 466 /* Support routines used to reference vfs_op */ 467 468 /* Set the operations vector for a vfs */ 469 void 470 vfs_setops(vfs_t *vfsp, vfsops_t *vfsops) 471 { 472 vfsops_t *op; 473 474 ASSERT(vfsp != NULL); 475 ASSERT(vfsops != NULL); 476 477 op = vfsp->vfs_op; 478 membar_consumer(); 479 if (vfsp->vfs_femhead == NULL && 480 atomic_cas_ptr(&vfsp->vfs_op, op, vfsops) == op) { 481 return; 482 } 483 fsem_setvfsops(vfsp, vfsops); 484 } 485 486 /* Retrieve the operations vector for a vfs */ 487 vfsops_t * 488 vfs_getops(vfs_t *vfsp) 489 { 490 vfsops_t *op; 491 492 ASSERT(vfsp != NULL); 493 494 op = vfsp->vfs_op; 495 membar_consumer(); 496 if (vfsp->vfs_femhead == NULL && op == vfsp->vfs_op) { 497 return (op); 498 } else { 499 return (fsem_getvfsops(vfsp)); 500 } 501 } 502 503 /* 504 * Returns non-zero (1) if the vfsops matches that of the vfs. 505 * Returns zero (0) if not. 506 */ 507 int 508 vfs_matchops(vfs_t *vfsp, vfsops_t *vfsops) 509 { 510 return (vfs_getops(vfsp) == vfsops); 511 } 512 513 /* 514 * Returns non-zero (1) if the file system has installed a non-default, 515 * non-error vfs_sync routine. Returns zero (0) otherwise. 516 */ 517 int 518 vfs_can_sync(vfs_t *vfsp) 519 { 520 /* vfs_sync() routine is not the default/error function */ 521 return (vfs_getops(vfsp)->vfs_sync != fs_sync); 522 } 523 524 /* 525 * Initialize a vfs structure. 526 */ 527 void 528 vfs_init(vfs_t *vfsp, vfsops_t *op, void *data) 529 { 530 /* Other initialization has been moved to vfs_alloc() */ 531 vfsp->vfs_count = 0; 532 vfsp->vfs_next = vfsp; 533 vfsp->vfs_prev = vfsp; 534 vfsp->vfs_zone_next = vfsp; 535 vfsp->vfs_zone_prev = vfsp; 536 vfsp->vfs_lofi_minor = 0; 537 sema_init(&vfsp->vfs_reflock, 1, NULL, SEMA_DEFAULT, NULL); 538 vfsimpl_setup(vfsp); 539 vfsp->vfs_data = (data); 540 vfs_setops((vfsp), (op)); 541 } 542 543 /* 544 * Allocate and initialize the vfs implementation private data 545 * structure, vfs_impl_t. 546 */ 547 void 548 vfsimpl_setup(vfs_t *vfsp) 549 { 550 int i; 551 552 if (vfsp->vfs_implp != NULL) { 553 return; 554 } 555 556 vfsp->vfs_implp = kmem_alloc(sizeof (vfs_impl_t), KM_SLEEP); 557 /* Note that these are #define'd in vfs.h */ 558 vfsp->vfs_vskap = NULL; 559 vfsp->vfs_fstypevsp = NULL; 560 561 /* Set size of counted array, then zero the array */ 562 vfsp->vfs_featureset[0] = VFS_FEATURE_MAXSZ - 1; 563 for (i = 1; i < VFS_FEATURE_MAXSZ; i++) { 564 vfsp->vfs_featureset[i] = 0; 565 } 566 } 567 568 /* 569 * Release the vfs_impl_t structure, if it exists. Some unbundled 570 * filesystems may not use the newer version of vfs and thus 571 * would not contain this implementation private data structure. 572 */ 573 void 574 vfsimpl_teardown(vfs_t *vfsp) 575 { 576 vfs_impl_t *vip = vfsp->vfs_implp; 577 578 if (vip == NULL) 579 return; 580 581 kmem_free(vfsp->vfs_implp, sizeof (vfs_impl_t)); 582 vfsp->vfs_implp = NULL; 583 } 584 585 /* 586 * VFS system calls: mount, umount, syssync, statfs, fstatfs, statvfs, 587 * fstatvfs, and sysfs moved to common/syscall. 588 */ 589 590 /* 591 * Update every mounted file system. We call the vfs_sync operation of 592 * each file system type, passing it a NULL vfsp to indicate that all 593 * mounted file systems of that type should be updated. 594 */ 595 void 596 vfs_sync(int flag) 597 { 598 struct vfssw *vswp; 599 RLOCK_VFSSW(); 600 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) { 601 if (ALLOCATED_VFSSW(vswp) && VFS_INSTALLED(vswp)) { 602 vfs_refvfssw(vswp); 603 RUNLOCK_VFSSW(); 604 (void) (*vswp->vsw_vfsops.vfs_sync)(NULL, flag, 605 CRED()); 606 vfs_unrefvfssw(vswp); 607 RLOCK_VFSSW(); 608 } 609 } 610 RUNLOCK_VFSSW(); 611 } 612 613 void 614 sync(void) 615 { 616 vfs_sync(0); 617 } 618 619 /* 620 * compare function for vfs_by_dev avl tree. compare dev first, then mntix 621 */ 622 static int 623 vfs_cmp_dev(const void *aa, const void *bb) 624 { 625 const vfs_t *a = aa; 626 const vfs_t *b = bb; 627 628 if (a->vfs_dev < b->vfs_dev) 629 return (-1); 630 if (a->vfs_dev > b->vfs_dev) 631 return (1); 632 if (a->vfs_mntix < b->vfs_mntix) 633 return (-1); 634 if (a->vfs_mntix > b->vfs_mntix) 635 return (1); 636 return (0); 637 } 638 639 /* 640 * compare function for vfs_by_mntpnt avl tree. compare mntpnt first, then mntix 641 */ 642 static int 643 vfs_cmp_mntpnt(const void *aa, const void *bb) 644 { 645 const vfs_t *a = aa; 646 const vfs_t *b = bb; 647 int ret; 648 649 ret = strcmp(refstr_value(a->vfs_mntpt), refstr_value(b->vfs_mntpt)); 650 if (ret < 0) 651 return (-1); 652 if (ret > 0) 653 return (1); 654 if (a->vfs_mntix < b->vfs_mntix) 655 return (-1); 656 if (a->vfs_mntix > b->vfs_mntix) 657 return (1); 658 return (0); 659 } 660 661 /* 662 * External routines. 663 */ 664 665 krwlock_t vfssw_lock; /* lock accesses to vfssw */ 666 667 /* 668 * Lock for accessing the vfs linked list. Initialized in vfs_mountroot(), 669 * but otherwise should be accessed only via vfs_list_lock() and 670 * vfs_list_unlock(). Also used to protect the timestamp for mods to the list. 671 */ 672 static krwlock_t vfslist; 673 674 /* 675 * Mount devfs on /devices. This is done right after root is mounted 676 * to provide device access support for the system 677 */ 678 static void 679 vfs_mountdevices(void) 680 { 681 struct vfssw *vsw; 682 struct vnode *mvp; 683 struct mounta mounta = { /* fake mounta for devfs_mount() */ 684 NULL, 685 NULL, 686 MS_SYSSPACE, 687 NULL, 688 NULL, 689 0, 690 NULL, 691 0 692 }; 693 694 /* 695 * _init devfs module to fill in the vfssw 696 */ 697 if (modload("fs", "devfs") == -1) 698 panic("Cannot _init devfs module"); 699 700 /* 701 * Hold vfs 702 */ 703 RLOCK_VFSSW(); 704 vsw = vfs_getvfsswbyname("devfs"); 705 VFS_INIT(&devices, &vsw->vsw_vfsops, NULL); 706 VFS_HOLD(&devices); 707 708 /* 709 * Locate mount point 710 */ 711 if (lookupname("/devices", UIO_SYSSPACE, FOLLOW, NULLVPP, &mvp)) 712 panic("Cannot find /devices"); 713 714 /* 715 * Perform the mount of /devices 716 */ 717 if (VFS_MOUNT(&devices, mvp, &mounta, CRED())) 718 panic("Cannot mount /devices"); 719 720 RUNLOCK_VFSSW(); 721 722 /* 723 * Set appropriate members and add to vfs list for mnttab display 724 */ 725 vfs_setresource(&devices, "/devices", 0); 726 vfs_setmntpoint(&devices, "/devices", 0); 727 728 /* 729 * Hold the root of /devices so it won't go away 730 */ 731 if (VFS_ROOT(&devices, &devicesdir)) 732 panic("vfs_mountdevices: not devices root"); 733 734 if (vfs_lock(&devices) != 0) { 735 VN_RELE(devicesdir); 736 cmn_err(CE_NOTE, "Cannot acquire vfs_lock of /devices"); 737 return; 738 } 739 740 if (vn_vfswlock(mvp) != 0) { 741 vfs_unlock(&devices); 742 VN_RELE(devicesdir); 743 cmn_err(CE_NOTE, "Cannot acquire vfswlock of /devices"); 744 return; 745 } 746 747 vfs_add(mvp, &devices, 0); 748 vn_vfsunlock(mvp); 749 vfs_unlock(&devices); 750 VN_RELE(devicesdir); 751 } 752 753 /* 754 * mount the first instance of /dev to root and remain mounted 755 */ 756 static void 757 vfs_mountdev1(void) 758 { 759 struct vfssw *vsw; 760 struct vnode *mvp; 761 struct mounta mounta = { /* fake mounta for sdev_mount() */ 762 NULL, 763 NULL, 764 MS_SYSSPACE | MS_OVERLAY, 765 NULL, 766 NULL, 767 0, 768 NULL, 769 0 770 }; 771 772 /* 773 * _init dev module to fill in the vfssw 774 */ 775 if (modload("fs", "dev") == -1) 776 cmn_err(CE_PANIC, "Cannot _init dev module\n"); 777 778 /* 779 * Hold vfs 780 */ 781 RLOCK_VFSSW(); 782 vsw = vfs_getvfsswbyname("dev"); 783 VFS_INIT(&dev, &vsw->vsw_vfsops, NULL); 784 VFS_HOLD(&dev); 785 786 /* 787 * Locate mount point 788 */ 789 if (lookupname("/dev", UIO_SYSSPACE, FOLLOW, NULLVPP, &mvp)) 790 cmn_err(CE_PANIC, "Cannot find /dev\n"); 791 792 /* 793 * Perform the mount of /dev 794 */ 795 if (VFS_MOUNT(&dev, mvp, &mounta, CRED())) 796 cmn_err(CE_PANIC, "Cannot mount /dev 1\n"); 797 798 RUNLOCK_VFSSW(); 799 800 /* 801 * Set appropriate members and add to vfs list for mnttab display 802 */ 803 vfs_setresource(&dev, "/dev", 0); 804 vfs_setmntpoint(&dev, "/dev", 0); 805 806 /* 807 * Hold the root of /dev so it won't go away 808 */ 809 if (VFS_ROOT(&dev, &devdir)) 810 cmn_err(CE_PANIC, "vfs_mountdev1: not dev root"); 811 812 if (vfs_lock(&dev) != 0) { 813 VN_RELE(devdir); 814 cmn_err(CE_NOTE, "Cannot acquire vfs_lock of /dev"); 815 return; 816 } 817 818 if (vn_vfswlock(mvp) != 0) { 819 vfs_unlock(&dev); 820 VN_RELE(devdir); 821 cmn_err(CE_NOTE, "Cannot acquire vfswlock of /dev"); 822 return; 823 } 824 825 vfs_add(mvp, &dev, 0); 826 vn_vfsunlock(mvp); 827 vfs_unlock(&dev); 828 VN_RELE(devdir); 829 } 830 831 /* 832 * Mount required filesystem. This is done right after root is mounted. 833 */ 834 static void 835 vfs_mountfs(char *module, char *spec, char *path) 836 { 837 struct vnode *mvp; 838 struct mounta mounta; 839 vfs_t *vfsp; 840 841 bzero(&mounta, sizeof (mounta)); 842 mounta.flags = MS_SYSSPACE | MS_DATA; 843 mounta.fstype = module; 844 mounta.spec = spec; 845 mounta.dir = path; 846 if (lookupname(path, UIO_SYSSPACE, FOLLOW, NULLVPP, &mvp)) { 847 cmn_err(CE_WARN, "Cannot find %s", path); 848 return; 849 } 850 if (domount(NULL, &mounta, mvp, CRED(), &vfsp)) 851 cmn_err(CE_WARN, "Cannot mount %s", path); 852 else 853 VFS_RELE(vfsp); 854 VN_RELE(mvp); 855 } 856 857 /* 858 * vfs_mountroot is called by main() to mount the root filesystem. 859 */ 860 void 861 vfs_mountroot(void) 862 { 863 struct vnode *rvp = NULL; 864 char *path; 865 size_t plen; 866 struct vfssw *vswp; 867 proc_t *p; 868 869 rw_init(&vfssw_lock, NULL, RW_DEFAULT, NULL); 870 rw_init(&vfslist, NULL, RW_DEFAULT, NULL); 871 872 /* 873 * Alloc the avl trees for quick indexing via dev and mountpoint 874 */ 875 avl_create(&vfs_by_dev, vfs_cmp_dev, sizeof(vfs_t), 876 offsetof(vfs_t, vfs_avldev)); 877 avl_create(&vfs_by_mntpnt, vfs_cmp_mntpnt, sizeof(vfs_t), 878 offsetof(vfs_t, vfs_avlmntpnt)); 879 880 /* 881 * Alloc the vfs hash bucket array and locks 882 */ 883 rvfs_list = kmem_zalloc(vfshsz * sizeof (rvfs_t), KM_SLEEP); 884 885 /* 886 * Call machine-dependent routine "rootconf" to choose a root 887 * file system type. 888 */ 889 if (rootconf()) 890 panic("vfs_mountroot: cannot mount root"); 891 /* 892 * Get vnode for '/'. Set up rootdir, u.u_rdir and u.u_cdir 893 * to point to it. These are used by lookuppn() so that it 894 * knows where to start from ('/' or '.'). 895 */ 896 vfs_setmntpoint(rootvfs, "/", 0); 897 if (VFS_ROOT(rootvfs, &rootdir)) 898 panic("vfs_mountroot: no root vnode"); 899 900 /* 901 * At this point, the process tree consists of p0 and possibly some 902 * direct children of p0. (i.e. there are no grandchildren) 903 * 904 * Walk through them all, setting their current directory. 905 */ 906 mutex_enter(&pidlock); 907 for (p = practive; p != NULL; p = p->p_next) { 908 ASSERT(p == &p0 || p->p_parent == &p0); 909 910 PTOU(p)->u_cdir = rootdir; 911 VN_HOLD(PTOU(p)->u_cdir); 912 PTOU(p)->u_rdir = NULL; 913 } 914 mutex_exit(&pidlock); 915 916 /* 917 * Setup the global zone's rootvp, now that it exists. 918 */ 919 global_zone->zone_rootvp = rootdir; 920 VN_HOLD(global_zone->zone_rootvp); 921 922 /* 923 * Notify the module code that it can begin using the 924 * root filesystem instead of the boot program's services. 925 */ 926 modrootloaded = 1; 927 928 /* 929 * Special handling for a ZFS root file system. 930 */ 931 zfs_boot_init(); 932 933 /* 934 * Set up mnttab information for root 935 */ 936 vfs_setresource(rootvfs, rootfs.bo_name, 0); 937 938 /* 939 * Notify cluster software that the root filesystem is available. 940 */ 941 clboot_mountroot(); 942 943 /* Now that we're all done with the root FS, set up its vopstats */ 944 if ((vswp = vfs_getvfsswbyvfsops(vfs_getops(rootvfs))) != NULL) { 945 /* Set flag for statistics collection */ 946 if (vswp->vsw_flag & VSW_STATS) { 947 initialize_vopstats(&rootvfs->vfs_vopstats); 948 rootvfs->vfs_flag |= VFS_STATS; 949 rootvfs->vfs_fstypevsp = 950 get_fstype_vopstats(rootvfs, vswp); 951 rootvfs->vfs_vskap = get_vskstat_anchor(rootvfs); 952 } 953 vfs_unrefvfssw(vswp); 954 } 955 956 /* 957 * Mount /devices, /dev instance 1, /system/contract, /etc/mnttab, 958 * /etc/svc/volatile, /etc/dfs/sharetab, /system/object, and /proc. 959 */ 960 vfs_mountdevices(); 961 vfs_mountdev1(); 962 963 vfs_mountfs("ctfs", "ctfs", CTFS_ROOT); 964 vfs_mountfs("proc", "/proc", "/proc"); 965 vfs_mountfs("mntfs", "/etc/mnttab", "/etc/mnttab"); 966 vfs_mountfs("tmpfs", "/etc/svc/volatile", "/etc/svc/volatile"); 967 vfs_mountfs("objfs", "objfs", OBJFS_ROOT); 968 969 if (getzoneid() == GLOBAL_ZONEID) { 970 vfs_mountfs("sharefs", "sharefs", "/etc/dfs/sharetab"); 971 } 972 973 if (strcmp(rootfs.bo_fstype, "zfs") != 0) { 974 /* 975 * Look up the root device via devfs so that a dv_node is 976 * created for it. The vnode is never VN_RELE()ed. 977 * We allocate more than MAXPATHLEN so that the 978 * buffer passed to i_ddi_prompath_to_devfspath() is 979 * exactly MAXPATHLEN (the function expects a buffer 980 * of that length). 981 */ 982 plen = strlen("/devices"); 983 path = kmem_alloc(plen + MAXPATHLEN, KM_SLEEP); 984 (void) strcpy(path, "/devices"); 985 986 if (i_ddi_prompath_to_devfspath(rootfs.bo_name, path + plen) 987 != DDI_SUCCESS || 988 lookupname(path, UIO_SYSSPACE, FOLLOW, NULLVPP, &rvp)) { 989 990 /* NUL terminate in case "path" has garbage */ 991 path[plen + MAXPATHLEN - 1] = '\0'; 992 #ifdef DEBUG 993 cmn_err(CE_WARN, "!Cannot lookup root device: %s", 994 path); 995 #endif 996 } 997 kmem_free(path, plen + MAXPATHLEN); 998 } 999 1000 vfs_mnttabvp_setup(); 1001 } 1002 1003 /* 1004 * Check to see if our "block device" is actually a file. If so, 1005 * automatically add a lofi device, and keep track of this fact. 1006 */ 1007 static int 1008 lofi_add(const char *fsname, struct vfs *vfsp, 1009 mntopts_t *mntopts, struct mounta *uap) 1010 { 1011 int fromspace = (uap->flags & MS_SYSSPACE) ? 1012 UIO_SYSSPACE : UIO_USERSPACE; 1013 struct lofi_ioctl *li = NULL; 1014 struct vnode *vp = NULL; 1015 struct pathname pn = { NULL }; 1016 ldi_ident_t ldi_id; 1017 ldi_handle_t ldi_hdl; 1018 vfssw_t *vfssw; 1019 int minor; 1020 int err = 0; 1021 1022 if ((vfssw = vfs_getvfssw(fsname)) == NULL) 1023 return (0); 1024 1025 if (!(vfssw->vsw_flag & VSW_CANLOFI)) { 1026 vfs_unrefvfssw(vfssw); 1027 return (0); 1028 } 1029 1030 vfs_unrefvfssw(vfssw); 1031 vfssw = NULL; 1032 1033 if (pn_get(uap->spec, fromspace, &pn) != 0) 1034 return (0); 1035 1036 if (lookupname(uap->spec, fromspace, FOLLOW, NULL, &vp) != 0) 1037 goto out; 1038 1039 if (vp->v_type != VREG) 1040 goto out; 1041 1042 /* OK, this is a lofi mount. */ 1043 1044 if ((uap->flags & (MS_REMOUNT|MS_GLOBAL)) || 1045 vfs_optionisset_nolock(mntopts, MNTOPT_SUID, NULL) || 1046 vfs_optionisset_nolock(mntopts, MNTOPT_SETUID, NULL) || 1047 vfs_optionisset_nolock(mntopts, MNTOPT_DEVICES, NULL)) { 1048 err = EINVAL; 1049 goto out; 1050 } 1051 1052 ldi_id = ldi_ident_from_anon(); 1053 li = kmem_zalloc(sizeof (*li), KM_SLEEP); 1054 (void) strlcpy(li->li_filename, pn.pn_path, MAXPATHLEN); 1055 1056 err = ldi_open_by_name("/dev/lofictl", FREAD | FWRITE, kcred, 1057 &ldi_hdl, ldi_id); 1058 1059 if (err) 1060 goto out2; 1061 1062 err = ldi_ioctl(ldi_hdl, LOFI_MAP_FILE, (intptr_t)li, 1063 FREAD | FWRITE | FKIOCTL, kcred, &minor); 1064 1065 (void) ldi_close(ldi_hdl, FREAD | FWRITE, kcred); 1066 1067 if (!err) 1068 vfsp->vfs_lofi_minor = minor; 1069 1070 out2: 1071 ldi_ident_release(ldi_id); 1072 out: 1073 if (li != NULL) 1074 kmem_free(li, sizeof (*li)); 1075 if (vp != NULL) 1076 VN_RELE(vp); 1077 pn_free(&pn); 1078 return (err); 1079 } 1080 1081 static void 1082 lofi_remove(struct vfs *vfsp) 1083 { 1084 struct lofi_ioctl *li = NULL; 1085 ldi_ident_t ldi_id; 1086 ldi_handle_t ldi_hdl; 1087 int err; 1088 1089 if (vfsp->vfs_lofi_minor == 0) 1090 return; 1091 1092 ldi_id = ldi_ident_from_anon(); 1093 1094 li = kmem_zalloc(sizeof (*li), KM_SLEEP); 1095 li->li_minor = vfsp->vfs_lofi_minor; 1096 li->li_cleanup = B_TRUE; 1097 1098 err = ldi_open_by_name("/dev/lofictl", FREAD | FWRITE, kcred, 1099 &ldi_hdl, ldi_id); 1100 1101 if (err) 1102 goto out; 1103 1104 err = ldi_ioctl(ldi_hdl, LOFI_UNMAP_FILE_MINOR, (intptr_t)li, 1105 FREAD | FWRITE | FKIOCTL, kcred, NULL); 1106 1107 (void) ldi_close(ldi_hdl, FREAD | FWRITE, kcred); 1108 1109 if (!err) 1110 vfsp->vfs_lofi_minor = 0; 1111 1112 out: 1113 ldi_ident_release(ldi_id); 1114 if (li != NULL) 1115 kmem_free(li, sizeof (*li)); 1116 } 1117 1118 /* 1119 * Common mount code. Called from the system call entry point, from autofs, 1120 * nfsv4 trigger mounts, and from pxfs. 1121 * 1122 * Takes the effective file system type, mount arguments, the mount point 1123 * vnode, flags specifying whether the mount is a remount and whether it 1124 * should be entered into the vfs list, and credentials. Fills in its vfspp 1125 * parameter with the mounted file system instance's vfs. 1126 * 1127 * Note that the effective file system type is specified as a string. It may 1128 * be null, in which case it's determined from the mount arguments, and may 1129 * differ from the type specified in the mount arguments; this is a hook to 1130 * allow interposition when instantiating file system instances. 1131 * 1132 * The caller is responsible for releasing its own hold on the mount point 1133 * vp (this routine does its own hold when necessary). 1134 * Also note that for remounts, the mount point vp should be the vnode for 1135 * the root of the file system rather than the vnode that the file system 1136 * is mounted on top of. 1137 */ 1138 int 1139 domount(char *fsname, struct mounta *uap, vnode_t *vp, struct cred *credp, 1140 struct vfs **vfspp) 1141 { 1142 struct vfssw *vswp; 1143 vfsops_t *vfsops; 1144 struct vfs *vfsp; 1145 struct vnode *bvp; 1146 dev_t bdev = 0; 1147 mntopts_t mnt_mntopts; 1148 int error = 0; 1149 int copyout_error = 0; 1150 int ovflags; 1151 char *opts = uap->optptr; 1152 char *inargs = opts; 1153 int optlen = uap->optlen; 1154 int remount; 1155 int rdonly; 1156 int nbmand = 0; 1157 int delmip = 0; 1158 int addmip = 0; 1159 int splice = ((uap->flags & MS_NOSPLICE) == 0); 1160 int fromspace = (uap->flags & MS_SYSSPACE) ? 1161 UIO_SYSSPACE : UIO_USERSPACE; 1162 char *resource = NULL, *mountpt = NULL; 1163 refstr_t *oldresource, *oldmntpt; 1164 struct pathname pn, rpn; 1165 vsk_anchor_t *vskap; 1166 char fstname[FSTYPSZ]; 1167 zone_t *zone; 1168 1169 /* 1170 * The v_flag value for the mount point vp is permanently set 1171 * to VVFSLOCK so that no one bypasses the vn_vfs*locks routine 1172 * for mount point locking. 1173 */ 1174 mutex_enter(&vp->v_lock); 1175 vp->v_flag |= VVFSLOCK; 1176 mutex_exit(&vp->v_lock); 1177 1178 mnt_mntopts.mo_count = 0; 1179 /* 1180 * Find the ops vector to use to invoke the file system-specific mount 1181 * method. If the fsname argument is non-NULL, use it directly. 1182 * Otherwise, dig the file system type information out of the mount 1183 * arguments. 1184 * 1185 * A side effect is to hold the vfssw entry. 1186 * 1187 * Mount arguments can be specified in several ways, which are 1188 * distinguished by flag bit settings. The preferred way is to set 1189 * MS_OPTIONSTR, indicating an 8 argument mount with the file system 1190 * type supplied as a character string and the last two arguments 1191 * being a pointer to a character buffer and the size of the buffer. 1192 * On entry, the buffer holds a null terminated list of options; on 1193 * return, the string is the list of options the file system 1194 * recognized. If MS_DATA is set arguments five and six point to a 1195 * block of binary data which the file system interprets. 1196 * A further wrinkle is that some callers don't set MS_FSS and MS_DATA 1197 * consistently with these conventions. To handle them, we check to 1198 * see whether the pointer to the file system name has a numeric value 1199 * less than 256. If so, we treat it as an index. 1200 */ 1201 if (fsname != NULL) { 1202 if ((vswp = vfs_getvfssw(fsname)) == NULL) { 1203 return (EINVAL); 1204 } 1205 } else if (uap->flags & (MS_OPTIONSTR | MS_DATA | MS_FSS)) { 1206 size_t n; 1207 uint_t fstype; 1208 1209 fsname = fstname; 1210 1211 if ((fstype = (uintptr_t)uap->fstype) < 256) { 1212 RLOCK_VFSSW(); 1213 if (fstype == 0 || fstype >= nfstype || 1214 !ALLOCATED_VFSSW(&vfssw[fstype])) { 1215 RUNLOCK_VFSSW(); 1216 return (EINVAL); 1217 } 1218 (void) strcpy(fsname, vfssw[fstype].vsw_name); 1219 RUNLOCK_VFSSW(); 1220 if ((vswp = vfs_getvfssw(fsname)) == NULL) 1221 return (EINVAL); 1222 } else { 1223 /* 1224 * Handle either kernel or user address space. 1225 */ 1226 if (uap->flags & MS_SYSSPACE) { 1227 error = copystr(uap->fstype, fsname, 1228 FSTYPSZ, &n); 1229 } else { 1230 error = copyinstr(uap->fstype, fsname, 1231 FSTYPSZ, &n); 1232 } 1233 if (error) { 1234 if (error == ENAMETOOLONG) 1235 return (EINVAL); 1236 return (error); 1237 } 1238 if ((vswp = vfs_getvfssw(fsname)) == NULL) 1239 return (EINVAL); 1240 } 1241 } else { 1242 if ((vswp = vfs_getvfsswbyvfsops(vfs_getops(rootvfs))) == NULL) 1243 return (EINVAL); 1244 fsname = vswp->vsw_name; 1245 } 1246 if (!VFS_INSTALLED(vswp)) 1247 return (EINVAL); 1248 1249 if ((error = secpolicy_fs_allowed_mount(fsname)) != 0) { 1250 vfs_unrefvfssw(vswp); 1251 return (error); 1252 } 1253 1254 vfsops = &vswp->vsw_vfsops; 1255 1256 vfs_copyopttbl(&vswp->vsw_optproto, &mnt_mntopts); 1257 /* 1258 * Fetch mount options and parse them for generic vfs options 1259 */ 1260 if (uap->flags & MS_OPTIONSTR) { 1261 /* 1262 * Limit the buffer size 1263 */ 1264 if (optlen < 0 || optlen > MAX_MNTOPT_STR) { 1265 error = EINVAL; 1266 goto errout; 1267 } 1268 if ((uap->flags & MS_SYSSPACE) == 0) { 1269 inargs = kmem_alloc(MAX_MNTOPT_STR, KM_SLEEP); 1270 inargs[0] = '\0'; 1271 if (optlen) { 1272 error = copyinstr(opts, inargs, (size_t)optlen, 1273 NULL); 1274 if (error) { 1275 goto errout; 1276 } 1277 } 1278 } 1279 vfs_parsemntopts(&mnt_mntopts, inargs, 0); 1280 } 1281 /* 1282 * Flag bits override the options string. 1283 */ 1284 if (uap->flags & MS_REMOUNT) 1285 vfs_setmntopt_nolock(&mnt_mntopts, MNTOPT_REMOUNT, NULL, 0, 0); 1286 if (uap->flags & MS_RDONLY) 1287 vfs_setmntopt_nolock(&mnt_mntopts, MNTOPT_RO, NULL, 0, 0); 1288 if (uap->flags & MS_NOSUID) 1289 vfs_setmntopt_nolock(&mnt_mntopts, MNTOPT_NOSUID, NULL, 0, 0); 1290 1291 /* 1292 * Check if this is a remount; must be set in the option string and 1293 * the file system must support a remount option. 1294 */ 1295 if (remount = vfs_optionisset_nolock(&mnt_mntopts, 1296 MNTOPT_REMOUNT, NULL)) { 1297 if (!(vswp->vsw_flag & VSW_CANREMOUNT)) { 1298 error = ENOTSUP; 1299 goto errout; 1300 } 1301 uap->flags |= MS_REMOUNT; 1302 } 1303 1304 /* 1305 * uap->flags and vfs_optionisset() should agree. 1306 */ 1307 if (rdonly = vfs_optionisset_nolock(&mnt_mntopts, MNTOPT_RO, NULL)) { 1308 uap->flags |= MS_RDONLY; 1309 } 1310 if (vfs_optionisset_nolock(&mnt_mntopts, MNTOPT_NOSUID, NULL)) { 1311 uap->flags |= MS_NOSUID; 1312 } 1313 nbmand = vfs_optionisset_nolock(&mnt_mntopts, MNTOPT_NBMAND, NULL); 1314 ASSERT(splice || !remount); 1315 /* 1316 * If we are splicing the fs into the namespace, 1317 * perform mount point checks. 1318 * 1319 * We want to resolve the path for the mount point to eliminate 1320 * '.' and ".." and symlinks in mount points; we can't do the 1321 * same for the resource string, since it would turn 1322 * "/dev/dsk/c0t0d0s0" into "/devices/pci@...". We need to do 1323 * this before grabbing vn_vfswlock(), because otherwise we 1324 * would deadlock with lookuppn(). 1325 */ 1326 if (splice) { 1327 ASSERT(vp->v_count > 0); 1328 1329 /* 1330 * Pick up mount point and device from appropriate space. 1331 */ 1332 if (pn_get(uap->spec, fromspace, &pn) == 0) { 1333 resource = kmem_alloc(pn.pn_pathlen + 1, 1334 KM_SLEEP); 1335 (void) strcpy(resource, pn.pn_path); 1336 pn_free(&pn); 1337 } 1338 /* 1339 * Do a lookupname prior to taking the 1340 * writelock. Mark this as completed if 1341 * successful for later cleanup and addition to 1342 * the mount in progress table. 1343 */ 1344 if ((uap->flags & MS_GLOBAL) == 0 && 1345 lookupname(uap->spec, fromspace, 1346 FOLLOW, NULL, &bvp) == 0) { 1347 addmip = 1; 1348 } 1349 1350 if ((error = pn_get(uap->dir, fromspace, &pn)) == 0) { 1351 pathname_t *pnp; 1352 1353 if (*pn.pn_path != '/') { 1354 error = EINVAL; 1355 pn_free(&pn); 1356 goto errout; 1357 } 1358 pn_alloc(&rpn); 1359 /* 1360 * Kludge to prevent autofs from deadlocking with 1361 * itself when it calls domount(). 1362 * 1363 * If autofs is calling, it is because it is doing 1364 * (autofs) mounts in the process of an NFS mount. A 1365 * lookuppn() here would cause us to block waiting for 1366 * said NFS mount to complete, which can't since this 1367 * is the thread that was supposed to doing it. 1368 */ 1369 if (fromspace == UIO_USERSPACE) { 1370 if ((error = lookuppn(&pn, &rpn, FOLLOW, NULL, 1371 NULL)) == 0) { 1372 pnp = &rpn; 1373 } else { 1374 /* 1375 * The file disappeared or otherwise 1376 * became inaccessible since we opened 1377 * it; might as well fail the mount 1378 * since the mount point is no longer 1379 * accessible. 1380 */ 1381 pn_free(&rpn); 1382 pn_free(&pn); 1383 goto errout; 1384 } 1385 } else { 1386 pnp = &pn; 1387 } 1388 mountpt = kmem_alloc(pnp->pn_pathlen + 1, KM_SLEEP); 1389 (void) strcpy(mountpt, pnp->pn_path); 1390 1391 /* 1392 * If the addition of the zone's rootpath 1393 * would push us over a total path length 1394 * of MAXPATHLEN, we fail the mount with 1395 * ENAMETOOLONG, which is what we would have 1396 * gotten if we were trying to perform the same 1397 * mount in the global zone. 1398 * 1399 * strlen() doesn't count the trailing 1400 * '\0', but zone_rootpathlen counts both a 1401 * trailing '/' and the terminating '\0'. 1402 */ 1403 if ((curproc->p_zone->zone_rootpathlen - 1 + 1404 strlen(mountpt)) > MAXPATHLEN || 1405 (resource != NULL && 1406 (curproc->p_zone->zone_rootpathlen - 1 + 1407 strlen(resource)) > MAXPATHLEN)) { 1408 error = ENAMETOOLONG; 1409 } 1410 1411 pn_free(&rpn); 1412 pn_free(&pn); 1413 } 1414 1415 if (error) 1416 goto errout; 1417 1418 /* 1419 * Prevent path name resolution from proceeding past 1420 * the mount point. 1421 */ 1422 if (vn_vfswlock(vp) != 0) { 1423 error = EBUSY; 1424 goto errout; 1425 } 1426 1427 /* 1428 * Verify that it's legitimate to establish a mount on 1429 * the prospective mount point. 1430 */ 1431 if (vn_mountedvfs(vp) != NULL) { 1432 /* 1433 * The mount point lock was obtained after some 1434 * other thread raced through and established a mount. 1435 */ 1436 vn_vfsunlock(vp); 1437 error = EBUSY; 1438 goto errout; 1439 } 1440 if (vp->v_flag & VNOMOUNT) { 1441 vn_vfsunlock(vp); 1442 error = EINVAL; 1443 goto errout; 1444 } 1445 } 1446 if ((uap->flags & (MS_DATA | MS_OPTIONSTR)) == 0) { 1447 uap->dataptr = NULL; 1448 uap->datalen = 0; 1449 } 1450 1451 /* 1452 * If this is a remount, we don't want to create a new VFS. 1453 * Instead, we pass the existing one with a remount flag. 1454 */ 1455 if (remount) { 1456 /* 1457 * Confirm that the mount point is the root vnode of the 1458 * file system that is being remounted. 1459 * This can happen if the user specifies a different 1460 * mount point directory pathname in the (re)mount command. 1461 * 1462 * Code below can only be reached if splice is true, so it's 1463 * safe to do vn_vfsunlock() here. 1464 */ 1465 if ((vp->v_flag & VROOT) == 0) { 1466 vn_vfsunlock(vp); 1467 error = ENOENT; 1468 goto errout; 1469 } 1470 /* 1471 * Disallow making file systems read-only unless file system 1472 * explicitly allows it in its vfssw. Ignore other flags. 1473 */ 1474 if (rdonly && vn_is_readonly(vp) == 0 && 1475 (vswp->vsw_flag & VSW_CANRWRO) == 0) { 1476 vn_vfsunlock(vp); 1477 error = EINVAL; 1478 goto errout; 1479 } 1480 /* 1481 * Disallow changing the NBMAND disposition of the file 1482 * system on remounts. 1483 */ 1484 if ((nbmand && ((vp->v_vfsp->vfs_flag & VFS_NBMAND) == 0)) || 1485 (!nbmand && (vp->v_vfsp->vfs_flag & VFS_NBMAND))) { 1486 vn_vfsunlock(vp); 1487 error = EINVAL; 1488 goto errout; 1489 } 1490 vfsp = vp->v_vfsp; 1491 ovflags = vfsp->vfs_flag; 1492 vfsp->vfs_flag |= VFS_REMOUNT; 1493 vfsp->vfs_flag &= ~VFS_RDONLY; 1494 } else { 1495 vfsp = vfs_alloc(KM_SLEEP); 1496 VFS_INIT(vfsp, vfsops, NULL); 1497 } 1498 1499 VFS_HOLD(vfsp); 1500 1501 if ((error = lofi_add(fsname, vfsp, &mnt_mntopts, uap)) != 0) { 1502 if (!remount) { 1503 if (splice) 1504 vn_vfsunlock(vp); 1505 vfs_free(vfsp); 1506 } else { 1507 vn_vfsunlock(vp); 1508 VFS_RELE(vfsp); 1509 } 1510 goto errout; 1511 } 1512 1513 /* 1514 * PRIV_SYS_MOUNT doesn't mean you can become root. 1515 */ 1516 if (vfsp->vfs_lofi_minor != 0) { 1517 uap->flags |= MS_NOSUID; 1518 vfs_setmntopt_nolock(&mnt_mntopts, MNTOPT_NOSUID, NULL, 0, 0); 1519 } 1520 1521 /* 1522 * The vfs_reflock is not used anymore the code below explicitly 1523 * holds it preventing others accesing it directly. 1524 */ 1525 if ((sema_tryp(&vfsp->vfs_reflock) == 0) && 1526 !(vfsp->vfs_flag & VFS_REMOUNT)) 1527 cmn_err(CE_WARN, 1528 "mount type %s couldn't get vfs_reflock", vswp->vsw_name); 1529 1530 /* 1531 * Lock the vfs. If this is a remount we want to avoid spurious umount 1532 * failures that happen as a side-effect of fsflush() and other mount 1533 * and unmount operations that might be going on simultaneously and 1534 * may have locked the vfs currently. To not return EBUSY immediately 1535 * here we use vfs_lock_wait() instead vfs_lock() for the remount case. 1536 */ 1537 if (!remount) { 1538 if (error = vfs_lock(vfsp)) { 1539 vfsp->vfs_flag = ovflags; 1540 1541 lofi_remove(vfsp); 1542 1543 if (splice) 1544 vn_vfsunlock(vp); 1545 vfs_free(vfsp); 1546 goto errout; 1547 } 1548 } else { 1549 vfs_lock_wait(vfsp); 1550 } 1551 1552 /* 1553 * Add device to mount in progress table, global mounts require special 1554 * handling. It is possible that we have already done the lookupname 1555 * on a spliced, non-global fs. If so, we don't want to do it again 1556 * since we cannot do a lookupname after taking the 1557 * wlock above. This case is for a non-spliced, non-global filesystem. 1558 */ 1559 if (!addmip) { 1560 if ((uap->flags & MS_GLOBAL) == 0 && 1561 lookupname(uap->spec, fromspace, FOLLOW, NULL, &bvp) == 0) { 1562 addmip = 1; 1563 } 1564 } 1565 1566 if (addmip) { 1567 vnode_t *lvp = NULL; 1568 1569 error = vfs_get_lofi(vfsp, &lvp); 1570 if (error > 0) { 1571 lofi_remove(vfsp); 1572 1573 if (splice) 1574 vn_vfsunlock(vp); 1575 vfs_unlock(vfsp); 1576 1577 if (remount) { 1578 VFS_RELE(vfsp); 1579 } else { 1580 vfs_free(vfsp); 1581 } 1582 1583 goto errout; 1584 } else if (error == -1) { 1585 bdev = bvp->v_rdev; 1586 VN_RELE(bvp); 1587 } else { 1588 bdev = lvp->v_rdev; 1589 VN_RELE(lvp); 1590 VN_RELE(bvp); 1591 } 1592 1593 vfs_addmip(bdev, vfsp); 1594 addmip = 0; 1595 delmip = 1; 1596 } 1597 /* 1598 * Invalidate cached entry for the mount point. 1599 */ 1600 if (splice) 1601 dnlc_purge_vp(vp); 1602 1603 /* 1604 * If have an option string but the filesystem doesn't supply a 1605 * prototype options table, create a table with the global 1606 * options and sufficient room to accept all the options in the 1607 * string. Then parse the passed in option string 1608 * accepting all the options in the string. This gives us an 1609 * option table with all the proper cancel properties for the 1610 * global options. 1611 * 1612 * Filesystems that supply a prototype options table are handled 1613 * earlier in this function. 1614 */ 1615 if (uap->flags & MS_OPTIONSTR) { 1616 if (!(vswp->vsw_flag & VSW_HASPROTO)) { 1617 mntopts_t tmp_mntopts; 1618 1619 tmp_mntopts.mo_count = 0; 1620 vfs_createopttbl_extend(&tmp_mntopts, inargs, 1621 &mnt_mntopts); 1622 vfs_parsemntopts(&tmp_mntopts, inargs, 1); 1623 vfs_swapopttbl_nolock(&mnt_mntopts, &tmp_mntopts); 1624 vfs_freeopttbl(&tmp_mntopts); 1625 } 1626 } 1627 1628 /* 1629 * Serialize with zone state transitions. 1630 * See vfs_list_add; zone mounted into is: 1631 * zone_find_by_path(refstr_value(vfsp->vfs_mntpt)) 1632 * not the zone doing the mount (curproc->p_zone), but if we're already 1633 * inside a NGZ, then we know what zone we are. 1634 */ 1635 if (INGLOBALZONE(curproc)) { 1636 zone = zone_find_by_path(mountpt); 1637 ASSERT(zone != NULL); 1638 } else { 1639 zone = curproc->p_zone; 1640 /* 1641 * zone_find_by_path does a hold, so do one here too so that 1642 * we can do a zone_rele after mount_completed. 1643 */ 1644 zone_hold(zone); 1645 } 1646 mount_in_progress(zone); 1647 /* 1648 * Instantiate (or reinstantiate) the file system. If appropriate, 1649 * splice it into the file system name space. 1650 * 1651 * We want VFS_MOUNT() to be able to override the vfs_resource 1652 * string if necessary (ie, mntfs), and also for a remount to 1653 * change the same (necessary when remounting '/' during boot). 1654 * So we set up vfs_mntpt and vfs_resource to what we think they 1655 * should be, then hand off control to VFS_MOUNT() which can 1656 * override this. 1657 * 1658 * For safety's sake, when changing vfs_resource or vfs_mntpt of 1659 * a vfs which is on the vfs list (i.e. during a remount), we must 1660 * never set those fields to NULL. Several bits of code make 1661 * assumptions that the fields are always valid. 1662 */ 1663 vfs_swapopttbl(&mnt_mntopts, &vfsp->vfs_mntopts); 1664 if (remount) { 1665 if ((oldresource = vfsp->vfs_resource) != NULL) 1666 refstr_hold(oldresource); 1667 if ((oldmntpt = vfsp->vfs_mntpt) != NULL) 1668 refstr_hold(oldmntpt); 1669 } 1670 vfs_setresource(vfsp, resource, 0); 1671 vfs_setmntpoint(vfsp, mountpt, 0); 1672 1673 /* 1674 * going to mount on this vnode, so notify. 1675 */ 1676 vnevent_mountedover(vp, NULL); 1677 error = VFS_MOUNT(vfsp, vp, uap, credp); 1678 1679 if (uap->flags & MS_RDONLY) 1680 vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0); 1681 if (uap->flags & MS_NOSUID) 1682 vfs_setmntopt(vfsp, MNTOPT_NOSUID, NULL, 0); 1683 if (uap->flags & MS_GLOBAL) 1684 vfs_setmntopt(vfsp, MNTOPT_GLOBAL, NULL, 0); 1685 1686 if (error) { 1687 lofi_remove(vfsp); 1688 1689 if (remount) { 1690 /* put back pre-remount options */ 1691 vfs_swapopttbl(&mnt_mntopts, &vfsp->vfs_mntopts); 1692 vfs_setmntpoint(vfsp, refstr_value(oldmntpt), 1693 VFSSP_VERBATIM); 1694 if (oldmntpt) 1695 refstr_rele(oldmntpt); 1696 vfs_setresource(vfsp, refstr_value(oldresource), 1697 VFSSP_VERBATIM); 1698 if (oldresource) 1699 refstr_rele(oldresource); 1700 vfsp->vfs_flag = ovflags; 1701 vfs_unlock(vfsp); 1702 VFS_RELE(vfsp); 1703 } else { 1704 vfs_unlock(vfsp); 1705 vfs_freemnttab(vfsp); 1706 vfs_free(vfsp); 1707 } 1708 } else { 1709 /* 1710 * Set the mount time to now 1711 */ 1712 vfsp->vfs_mtime = ddi_get_time(); 1713 if (remount) { 1714 vfsp->vfs_flag &= ~VFS_REMOUNT; 1715 if (oldresource) 1716 refstr_rele(oldresource); 1717 if (oldmntpt) 1718 refstr_rele(oldmntpt); 1719 } else if (splice) { 1720 /* 1721 * Link vfsp into the name space at the mount 1722 * point. Vfs_add() is responsible for 1723 * holding the mount point which will be 1724 * released when vfs_remove() is called. 1725 */ 1726 vfs_add(vp, vfsp, uap->flags); 1727 } else { 1728 /* 1729 * Hold the reference to file system which is 1730 * not linked into the name space. 1731 */ 1732 vfsp->vfs_zone = NULL; 1733 VFS_HOLD(vfsp); 1734 vfsp->vfs_vnodecovered = NULL; 1735 } 1736 /* 1737 * Set flags for global options encountered 1738 */ 1739 if (vfs_optionisset(vfsp, MNTOPT_RO, NULL)) 1740 vfsp->vfs_flag |= VFS_RDONLY; 1741 else 1742 vfsp->vfs_flag &= ~VFS_RDONLY; 1743 if (vfs_optionisset(vfsp, MNTOPT_NOSUID, NULL)) { 1744 vfsp->vfs_flag |= (VFS_NOSETUID|VFS_NODEVICES); 1745 } else { 1746 if (vfs_optionisset(vfsp, MNTOPT_NODEVICES, NULL)) 1747 vfsp->vfs_flag |= VFS_NODEVICES; 1748 else 1749 vfsp->vfs_flag &= ~VFS_NODEVICES; 1750 if (vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL)) 1751 vfsp->vfs_flag |= VFS_NOSETUID; 1752 else 1753 vfsp->vfs_flag &= ~VFS_NOSETUID; 1754 } 1755 if (vfs_optionisset(vfsp, MNTOPT_NBMAND, NULL)) 1756 vfsp->vfs_flag |= VFS_NBMAND; 1757 else 1758 vfsp->vfs_flag &= ~VFS_NBMAND; 1759 1760 if (vfs_optionisset(vfsp, MNTOPT_XATTR, NULL)) 1761 vfsp->vfs_flag |= VFS_XATTR; 1762 else 1763 vfsp->vfs_flag &= ~VFS_XATTR; 1764 1765 if (vfs_optionisset(vfsp, MNTOPT_NOEXEC, NULL)) 1766 vfsp->vfs_flag |= VFS_NOEXEC; 1767 else 1768 vfsp->vfs_flag &= ~VFS_NOEXEC; 1769 1770 /* 1771 * Now construct the output option string of options 1772 * we recognized. 1773 */ 1774 if (uap->flags & MS_OPTIONSTR) { 1775 vfs_list_read_lock(); 1776 copyout_error = vfs_buildoptionstr( 1777 &vfsp->vfs_mntopts, inargs, optlen); 1778 vfs_list_unlock(); 1779 if (copyout_error == 0 && 1780 (uap->flags & MS_SYSSPACE) == 0) { 1781 copyout_error = copyoutstr(inargs, opts, 1782 optlen, NULL); 1783 } 1784 } 1785 1786 /* 1787 * If this isn't a remount, set up the vopstats before 1788 * anyone can touch this. We only allow spliced file 1789 * systems (file systems which are in the namespace) to 1790 * have the VFS_STATS flag set. 1791 * NOTE: PxFS mounts the underlying file system with 1792 * MS_NOSPLICE set and copies those vfs_flags to its private 1793 * vfs structure. As a result, PxFS should never have 1794 * the VFS_STATS flag or else we might access the vfs 1795 * statistics-related fields prior to them being 1796 * properly initialized. 1797 */ 1798 if (!remount && (vswp->vsw_flag & VSW_STATS) && splice) { 1799 initialize_vopstats(&vfsp->vfs_vopstats); 1800 /* 1801 * We need to set vfs_vskap to NULL because there's 1802 * a chance it won't be set below. This is checked 1803 * in teardown_vopstats() so we can't have garbage. 1804 */ 1805 vfsp->vfs_vskap = NULL; 1806 vfsp->vfs_flag |= VFS_STATS; 1807 vfsp->vfs_fstypevsp = get_fstype_vopstats(vfsp, vswp); 1808 } 1809 1810 if (vswp->vsw_flag & VSW_XID) 1811 vfsp->vfs_flag |= VFS_XID; 1812 1813 vfs_unlock(vfsp); 1814 } 1815 mount_completed(zone); 1816 zone_rele(zone); 1817 if (splice) 1818 vn_vfsunlock(vp); 1819 1820 if ((error == 0) && (copyout_error == 0)) { 1821 if (!remount) { 1822 /* 1823 * Don't call get_vskstat_anchor() while holding 1824 * locks since it allocates memory and calls 1825 * VFS_STATVFS(). For NFS, the latter can generate 1826 * an over-the-wire call. 1827 */ 1828 vskap = get_vskstat_anchor(vfsp); 1829 /* Only take the lock if we have something to do */ 1830 if (vskap != NULL) { 1831 vfs_lock_wait(vfsp); 1832 if (vfsp->vfs_flag & VFS_STATS) { 1833 vfsp->vfs_vskap = vskap; 1834 } 1835 vfs_unlock(vfsp); 1836 } 1837 } 1838 /* Return vfsp to caller. */ 1839 *vfspp = vfsp; 1840 } 1841 errout: 1842 vfs_freeopttbl(&mnt_mntopts); 1843 if (resource != NULL) 1844 kmem_free(resource, strlen(resource) + 1); 1845 if (mountpt != NULL) 1846 kmem_free(mountpt, strlen(mountpt) + 1); 1847 /* 1848 * It is possible we errored prior to adding to mount in progress 1849 * table. Must free vnode we acquired with successful lookupname. 1850 */ 1851 if (addmip) 1852 VN_RELE(bvp); 1853 if (delmip) 1854 vfs_delmip(vfsp); 1855 ASSERT(vswp != NULL); 1856 vfs_unrefvfssw(vswp); 1857 if (inargs != opts) 1858 kmem_free(inargs, MAX_MNTOPT_STR); 1859 if (copyout_error) { 1860 lofi_remove(vfsp); 1861 VFS_RELE(vfsp); 1862 error = copyout_error; 1863 } 1864 return (error); 1865 } 1866 1867 static void 1868 vfs_setpath( 1869 struct vfs *vfsp, /* vfs being updated */ 1870 refstr_t **refp, /* Ref-count string to contain the new path */ 1871 const char *newpath, /* Path to add to refp (above) */ 1872 uint32_t flag) /* flag */ 1873 { 1874 size_t len; 1875 refstr_t *ref; 1876 zone_t *zone = curproc->p_zone; 1877 char *sp; 1878 int have_list_lock = 0; 1879 1880 ASSERT(!VFS_ON_LIST(vfsp) || vfs_lock_held(vfsp)); 1881 1882 /* 1883 * New path must be less than MAXPATHLEN because mntfs 1884 * will only display up to MAXPATHLEN bytes. This is currently 1885 * safe, because domount() uses pn_get(), and other callers 1886 * similarly cap the size to fewer than MAXPATHLEN bytes. 1887 */ 1888 1889 ASSERT(strlen(newpath) < MAXPATHLEN); 1890 1891 /* mntfs requires consistency while vfs list lock is held */ 1892 1893 if (VFS_ON_LIST(vfsp)) { 1894 have_list_lock = 1; 1895 vfs_list_lock(); 1896 } 1897 1898 if (*refp != NULL) 1899 refstr_rele(*refp); 1900 1901 /* 1902 * If we are in a non-global zone then we prefix the supplied path, 1903 * newpath, with the zone's root path, with two exceptions. The first 1904 * is where we have been explicitly directed to avoid doing so; this 1905 * will be the case following a failed remount, where the path supplied 1906 * will be a saved version which must now be restored. The second 1907 * exception is where newpath is not a pathname but a descriptive name, 1908 * e.g. "procfs". 1909 */ 1910 if (zone == global_zone || (flag & VFSSP_VERBATIM) || *newpath != '/') { 1911 ref = refstr_alloc(newpath); 1912 goto out; 1913 } 1914 1915 /* 1916 * Truncate the trailing '/' in the zoneroot, and merge 1917 * in the zone's rootpath with the "newpath" (resource 1918 * or mountpoint) passed in. 1919 * 1920 * The size of the required buffer is thus the size of 1921 * the buffer required for the passed-in newpath 1922 * (strlen(newpath) + 1), plus the size of the buffer 1923 * required to hold zone_rootpath (zone_rootpathlen) 1924 * minus one for one of the now-superfluous NUL 1925 * terminations, minus one for the trailing '/'. 1926 * 1927 * That gives us: 1928 * 1929 * (strlen(newpath) + 1) + zone_rootpathlen - 1 - 1 1930 * 1931 * Which is what we have below. 1932 */ 1933 1934 len = strlen(newpath) + zone->zone_rootpathlen - 1; 1935 sp = kmem_alloc(len, KM_SLEEP); 1936 1937 /* 1938 * Copy everything including the trailing slash, which 1939 * we then overwrite with the NUL character. 1940 */ 1941 1942 (void) strcpy(sp, zone->zone_rootpath); 1943 sp[zone->zone_rootpathlen - 2] = '\0'; 1944 (void) strcat(sp, newpath); 1945 1946 ref = refstr_alloc(sp); 1947 kmem_free(sp, len); 1948 out: 1949 *refp = ref; 1950 1951 if (have_list_lock) { 1952 vfs_mnttab_modtimeupd(); 1953 vfs_list_unlock(); 1954 } 1955 } 1956 1957 /* 1958 * Record a mounted resource name in a vfs structure. 1959 * If vfsp is already mounted, caller must hold the vfs lock. 1960 */ 1961 void 1962 vfs_setresource(struct vfs *vfsp, const char *resource, uint32_t flag) 1963 { 1964 if (resource == NULL || resource[0] == '\0') 1965 resource = VFS_NORESOURCE; 1966 vfs_setpath(vfsp, &vfsp->vfs_resource, resource, flag); 1967 } 1968 1969 /* 1970 * Record a mount point name in a vfs structure. 1971 * If vfsp is already mounted, caller must hold the vfs lock. 1972 */ 1973 void 1974 vfs_setmntpoint(struct vfs *vfsp, const char *mntpt, uint32_t flag) 1975 { 1976 if (mntpt == NULL || mntpt[0] == '\0') 1977 mntpt = VFS_NOMNTPT; 1978 vfs_setpath(vfsp, &vfsp->vfs_mntpt, mntpt, flag); 1979 } 1980 1981 /* Returns the vfs_resource. Caller must call refstr_rele() when finished. */ 1982 1983 refstr_t * 1984 vfs_getresource(const struct vfs *vfsp) 1985 { 1986 refstr_t *resource; 1987 1988 vfs_list_read_lock(); 1989 resource = vfsp->vfs_resource; 1990 refstr_hold(resource); 1991 vfs_list_unlock(); 1992 1993 return (resource); 1994 } 1995 1996 /* Returns the vfs_mntpt. Caller must call refstr_rele() when finished. */ 1997 1998 refstr_t * 1999 vfs_getmntpoint(const struct vfs *vfsp) 2000 { 2001 refstr_t *mntpt; 2002 2003 vfs_list_read_lock(); 2004 mntpt = vfsp->vfs_mntpt; 2005 refstr_hold(mntpt); 2006 vfs_list_unlock(); 2007 2008 return (mntpt); 2009 } 2010 2011 /* 2012 * Create an empty options table with enough empty slots to hold all 2013 * The options in the options string passed as an argument. 2014 * Potentially prepend another options table. 2015 * 2016 * Note: caller is responsible for locking the vfs list, if needed, 2017 * to protect mops. 2018 */ 2019 static void 2020 vfs_createopttbl_extend(mntopts_t *mops, const char *opts, 2021 const mntopts_t *mtmpl) 2022 { 2023 const char *s = opts; 2024 uint_t count; 2025 2026 if (opts == NULL || *opts == '\0') { 2027 count = 0; 2028 } else { 2029 count = 1; 2030 2031 /* 2032 * Count number of options in the string 2033 */ 2034 for (s = strchr(s, ','); s != NULL; s = strchr(s, ',')) { 2035 count++; 2036 s++; 2037 } 2038 } 2039 vfs_copyopttbl_extend(mtmpl, mops, count); 2040 } 2041 2042 /* 2043 * Create an empty options table with enough empty slots to hold all 2044 * The options in the options string passed as an argument. 2045 * 2046 * This function is *not* for general use by filesystems. 2047 * 2048 * Note: caller is responsible for locking the vfs list, if needed, 2049 * to protect mops. 2050 */ 2051 void 2052 vfs_createopttbl(mntopts_t *mops, const char *opts) 2053 { 2054 vfs_createopttbl_extend(mops, opts, NULL); 2055 } 2056 2057 2058 /* 2059 * Swap two mount options tables 2060 */ 2061 static void 2062 vfs_swapopttbl_nolock(mntopts_t *optbl1, mntopts_t *optbl2) 2063 { 2064 uint_t tmpcnt; 2065 mntopt_t *tmplist; 2066 2067 tmpcnt = optbl2->mo_count; 2068 tmplist = optbl2->mo_list; 2069 optbl2->mo_count = optbl1->mo_count; 2070 optbl2->mo_list = optbl1->mo_list; 2071 optbl1->mo_count = tmpcnt; 2072 optbl1->mo_list = tmplist; 2073 } 2074 2075 static void 2076 vfs_swapopttbl(mntopts_t *optbl1, mntopts_t *optbl2) 2077 { 2078 vfs_list_lock(); 2079 vfs_swapopttbl_nolock(optbl1, optbl2); 2080 vfs_mnttab_modtimeupd(); 2081 vfs_list_unlock(); 2082 } 2083 2084 static char ** 2085 vfs_copycancelopt_extend(char **const moc, int extend) 2086 { 2087 int i = 0; 2088 int j; 2089 char **result; 2090 2091 if (moc != NULL) { 2092 for (; moc[i] != NULL; i++) 2093 /* count number of options to cancel */; 2094 } 2095 2096 if (i + extend == 0) 2097 return (NULL); 2098 2099 result = kmem_alloc((i + extend + 1) * sizeof (char *), KM_SLEEP); 2100 2101 for (j = 0; j < i; j++) { 2102 result[j] = kmem_alloc(strlen(moc[j]) + 1, KM_SLEEP); 2103 (void) strcpy(result[j], moc[j]); 2104 } 2105 for (; j <= i + extend; j++) 2106 result[j] = NULL; 2107 2108 return (result); 2109 } 2110 2111 static void 2112 vfs_copyopt(const mntopt_t *s, mntopt_t *d) 2113 { 2114 char *sp, *dp; 2115 2116 d->mo_flags = s->mo_flags; 2117 d->mo_data = s->mo_data; 2118 sp = s->mo_name; 2119 if (sp != NULL) { 2120 dp = kmem_alloc(strlen(sp) + 1, KM_SLEEP); 2121 (void) strcpy(dp, sp); 2122 d->mo_name = dp; 2123 } else { 2124 d->mo_name = NULL; /* should never happen */ 2125 } 2126 2127 d->mo_cancel = vfs_copycancelopt_extend(s->mo_cancel, 0); 2128 2129 sp = s->mo_arg; 2130 if (sp != NULL) { 2131 dp = kmem_alloc(strlen(sp) + 1, KM_SLEEP); 2132 (void) strcpy(dp, sp); 2133 d->mo_arg = dp; 2134 } else { 2135 d->mo_arg = NULL; 2136 } 2137 } 2138 2139 /* 2140 * Copy a mount options table, possibly allocating some spare 2141 * slots at the end. It is permissible to copy_extend the NULL table. 2142 */ 2143 static void 2144 vfs_copyopttbl_extend(const mntopts_t *smo, mntopts_t *dmo, int extra) 2145 { 2146 uint_t i, count; 2147 mntopt_t *motbl; 2148 2149 /* 2150 * Clear out any existing stuff in the options table being initialized 2151 */ 2152 vfs_freeopttbl(dmo); 2153 count = (smo == NULL) ? 0 : smo->mo_count; 2154 if ((count + extra) == 0) /* nothing to do */ 2155 return; 2156 dmo->mo_count = count + extra; 2157 motbl = kmem_zalloc((count + extra) * sizeof (mntopt_t), KM_SLEEP); 2158 dmo->mo_list = motbl; 2159 for (i = 0; i < count; i++) { 2160 vfs_copyopt(&smo->mo_list[i], &motbl[i]); 2161 } 2162 for (i = count; i < count + extra; i++) { 2163 motbl[i].mo_flags = MO_EMPTY; 2164 } 2165 } 2166 2167 /* 2168 * Copy a mount options table. 2169 * 2170 * This function is *not* for general use by filesystems. 2171 * 2172 * Note: caller is responsible for locking the vfs list, if needed, 2173 * to protect smo and dmo. 2174 */ 2175 void 2176 vfs_copyopttbl(const mntopts_t *smo, mntopts_t *dmo) 2177 { 2178 vfs_copyopttbl_extend(smo, dmo, 0); 2179 } 2180 2181 static char ** 2182 vfs_mergecancelopts(const mntopt_t *mop1, const mntopt_t *mop2) 2183 { 2184 int c1 = 0; 2185 int c2 = 0; 2186 char **result; 2187 char **sp1, **sp2, **dp; 2188 2189 /* 2190 * First we count both lists of cancel options. 2191 * If either is NULL or has no elements, we return a copy of 2192 * the other. 2193 */ 2194 if (mop1->mo_cancel != NULL) { 2195 for (; mop1->mo_cancel[c1] != NULL; c1++) 2196 /* count cancel options in mop1 */; 2197 } 2198 2199 if (c1 == 0) 2200 return (vfs_copycancelopt_extend(mop2->mo_cancel, 0)); 2201 2202 if (mop2->mo_cancel != NULL) { 2203 for (; mop2->mo_cancel[c2] != NULL; c2++) 2204 /* count cancel options in mop2 */; 2205 } 2206 2207 result = vfs_copycancelopt_extend(mop1->mo_cancel, c2); 2208 2209 if (c2 == 0) 2210 return (result); 2211 2212 /* 2213 * When we get here, we've got two sets of cancel options; 2214 * we need to merge the two sets. We know that the result 2215 * array has "c1+c2+1" entries and in the end we might shrink 2216 * it. 2217 * Result now has a copy of the c1 entries from mop1; we'll 2218 * now lookup all the entries of mop2 in mop1 and copy it if 2219 * it is unique. 2220 * This operation is O(n^2) but it's only called once per 2221 * filesystem per duplicate option. This is a situation 2222 * which doesn't arise with the filesystems in ON and 2223 * n is generally 1. 2224 */ 2225 2226 dp = &result[c1]; 2227 for (sp2 = mop2->mo_cancel; *sp2 != NULL; sp2++) { 2228 for (sp1 = mop1->mo_cancel; *sp1 != NULL; sp1++) { 2229 if (strcmp(*sp1, *sp2) == 0) 2230 break; 2231 } 2232 if (*sp1 == NULL) { 2233 /* 2234 * Option *sp2 not found in mop1, so copy it. 2235 * The calls to vfs_copycancelopt_extend() 2236 * guarantee that there's enough room. 2237 */ 2238 *dp = kmem_alloc(strlen(*sp2) + 1, KM_SLEEP); 2239 (void) strcpy(*dp++, *sp2); 2240 } 2241 } 2242 if (dp != &result[c1+c2]) { 2243 size_t bytes = (dp - result + 1) * sizeof (char *); 2244 char **nres = kmem_alloc(bytes, KM_SLEEP); 2245 2246 bcopy(result, nres, bytes); 2247 kmem_free(result, (c1 + c2 + 1) * sizeof (char *)); 2248 result = nres; 2249 } 2250 return (result); 2251 } 2252 2253 /* 2254 * Merge two mount option tables (outer and inner) into one. This is very 2255 * similar to "merging" global variables and automatic variables in C. 2256 * 2257 * This isn't (and doesn't have to be) fast. 2258 * 2259 * This function is *not* for general use by filesystems. 2260 * 2261 * Note: caller is responsible for locking the vfs list, if needed, 2262 * to protect omo, imo & dmo. 2263 */ 2264 void 2265 vfs_mergeopttbl(const mntopts_t *omo, const mntopts_t *imo, mntopts_t *dmo) 2266 { 2267 uint_t i, count; 2268 mntopt_t *mop, *motbl; 2269 uint_t freeidx; 2270 2271 /* 2272 * First determine how much space we need to allocate. 2273 */ 2274 count = omo->mo_count; 2275 for (i = 0; i < imo->mo_count; i++) { 2276 if (imo->mo_list[i].mo_flags & MO_EMPTY) 2277 continue; 2278 if (vfs_hasopt(omo, imo->mo_list[i].mo_name) == NULL) 2279 count++; 2280 } 2281 ASSERT(count >= omo->mo_count && 2282 count <= omo->mo_count + imo->mo_count); 2283 motbl = kmem_alloc(count * sizeof (mntopt_t), KM_SLEEP); 2284 for (i = 0; i < omo->mo_count; i++) 2285 vfs_copyopt(&omo->mo_list[i], &motbl[i]); 2286 freeidx = omo->mo_count; 2287 for (i = 0; i < imo->mo_count; i++) { 2288 if (imo->mo_list[i].mo_flags & MO_EMPTY) 2289 continue; 2290 if ((mop = vfs_hasopt(omo, imo->mo_list[i].mo_name)) != NULL) { 2291 char **newcanp; 2292 uint_t index = mop - omo->mo_list; 2293 2294 newcanp = vfs_mergecancelopts(mop, &motbl[index]); 2295 2296 vfs_freeopt(&motbl[index]); 2297 vfs_copyopt(&imo->mo_list[i], &motbl[index]); 2298 2299 vfs_freecancelopt(motbl[index].mo_cancel); 2300 motbl[index].mo_cancel = newcanp; 2301 } else { 2302 /* 2303 * If it's a new option, just copy it over to the first 2304 * free location. 2305 */ 2306 vfs_copyopt(&imo->mo_list[i], &motbl[freeidx++]); 2307 } 2308 } 2309 dmo->mo_count = count; 2310 dmo->mo_list = motbl; 2311 } 2312 2313 /* 2314 * Functions to set and clear mount options in a mount options table. 2315 */ 2316 2317 /* 2318 * Clear a mount option, if it exists. 2319 * 2320 * The update_mnttab arg indicates whether mops is part of a vfs that is on 2321 * the vfs list. 2322 */ 2323 static void 2324 vfs_clearmntopt_nolock(mntopts_t *mops, const char *opt, int update_mnttab) 2325 { 2326 struct mntopt *mop; 2327 uint_t i, count; 2328 2329 ASSERT(!update_mnttab || RW_WRITE_HELD(&vfslist)); 2330 2331 count = mops->mo_count; 2332 for (i = 0; i < count; i++) { 2333 mop = &mops->mo_list[i]; 2334 2335 if (mop->mo_flags & MO_EMPTY) 2336 continue; 2337 if (strcmp(opt, mop->mo_name)) 2338 continue; 2339 mop->mo_flags &= ~MO_SET; 2340 if (mop->mo_arg != NULL) { 2341 kmem_free(mop->mo_arg, strlen(mop->mo_arg) + 1); 2342 } 2343 mop->mo_arg = NULL; 2344 if (update_mnttab) 2345 vfs_mnttab_modtimeupd(); 2346 break; 2347 } 2348 } 2349 2350 void 2351 vfs_clearmntopt(struct vfs *vfsp, const char *opt) 2352 { 2353 int gotlock = 0; 2354 2355 if (VFS_ON_LIST(vfsp)) { 2356 gotlock = 1; 2357 vfs_list_lock(); 2358 } 2359 vfs_clearmntopt_nolock(&vfsp->vfs_mntopts, opt, gotlock); 2360 if (gotlock) 2361 vfs_list_unlock(); 2362 } 2363 2364 2365 /* 2366 * Set a mount option on. If it's not found in the table, it's silently 2367 * ignored. If the option has MO_IGNORE set, it is still set unless the 2368 * VFS_NOFORCEOPT bit is set in the flags. Also, VFS_DISPLAY/VFS_NODISPLAY flag 2369 * bits can be used to toggle the MO_NODISPLAY bit for the option. 2370 * If the VFS_CREATEOPT flag bit is set then the first option slot with 2371 * MO_EMPTY set is created as the option passed in. 2372 * 2373 * The update_mnttab arg indicates whether mops is part of a vfs that is on 2374 * the vfs list. 2375 */ 2376 static void 2377 vfs_setmntopt_nolock(mntopts_t *mops, const char *opt, 2378 const char *arg, int flags, int update_mnttab) 2379 { 2380 mntopt_t *mop; 2381 uint_t i, count; 2382 char *sp; 2383 2384 ASSERT(!update_mnttab || RW_WRITE_HELD(&vfslist)); 2385 2386 if (flags & VFS_CREATEOPT) { 2387 if (vfs_hasopt(mops, opt) != NULL) { 2388 flags &= ~VFS_CREATEOPT; 2389 } 2390 } 2391 count = mops->mo_count; 2392 for (i = 0; i < count; i++) { 2393 mop = &mops->mo_list[i]; 2394 2395 if (mop->mo_flags & MO_EMPTY) { 2396 if ((flags & VFS_CREATEOPT) == 0) 2397 continue; 2398 sp = kmem_alloc(strlen(opt) + 1, KM_SLEEP); 2399 (void) strcpy(sp, opt); 2400 mop->mo_name = sp; 2401 if (arg != NULL) 2402 mop->mo_flags = MO_HASVALUE; 2403 else 2404 mop->mo_flags = 0; 2405 } else if (strcmp(opt, mop->mo_name)) { 2406 continue; 2407 } 2408 if ((mop->mo_flags & MO_IGNORE) && (flags & VFS_NOFORCEOPT)) 2409 break; 2410 if (arg != NULL && (mop->mo_flags & MO_HASVALUE) != 0) { 2411 sp = kmem_alloc(strlen(arg) + 1, KM_SLEEP); 2412 (void) strcpy(sp, arg); 2413 } else { 2414 sp = NULL; 2415 } 2416 if (mop->mo_arg != NULL) 2417 kmem_free(mop->mo_arg, strlen(mop->mo_arg) + 1); 2418 mop->mo_arg = sp; 2419 if (flags & VFS_DISPLAY) 2420 mop->mo_flags &= ~MO_NODISPLAY; 2421 if (flags & VFS_NODISPLAY) 2422 mop->mo_flags |= MO_NODISPLAY; 2423 mop->mo_flags |= MO_SET; 2424 if (mop->mo_cancel != NULL) { 2425 char **cp; 2426 2427 for (cp = mop->mo_cancel; *cp != NULL; cp++) 2428 vfs_clearmntopt_nolock(mops, *cp, 0); 2429 } 2430 if (update_mnttab) 2431 vfs_mnttab_modtimeupd(); 2432 break; 2433 } 2434 } 2435 2436 void 2437 vfs_setmntopt(struct vfs *vfsp, const char *opt, const char *arg, int flags) 2438 { 2439 int gotlock = 0; 2440 2441 if (VFS_ON_LIST(vfsp)) { 2442 gotlock = 1; 2443 vfs_list_lock(); 2444 } 2445 vfs_setmntopt_nolock(&vfsp->vfs_mntopts, opt, arg, flags, gotlock); 2446 if (gotlock) 2447 vfs_list_unlock(); 2448 } 2449 2450 2451 /* 2452 * Add a "tag" option to a mounted file system's options list. 2453 * 2454 * Note: caller is responsible for locking the vfs list, if needed, 2455 * to protect mops. 2456 */ 2457 static mntopt_t * 2458 vfs_addtag(mntopts_t *mops, const char *tag) 2459 { 2460 uint_t count; 2461 mntopt_t *mop, *motbl; 2462 2463 count = mops->mo_count + 1; 2464 motbl = kmem_zalloc(count * sizeof (mntopt_t), KM_SLEEP); 2465 if (mops->mo_count) { 2466 size_t len = (count - 1) * sizeof (mntopt_t); 2467 2468 bcopy(mops->mo_list, motbl, len); 2469 kmem_free(mops->mo_list, len); 2470 } 2471 mops->mo_count = count; 2472 mops->mo_list = motbl; 2473 mop = &motbl[count - 1]; 2474 mop->mo_flags = MO_TAG; 2475 mop->mo_name = kmem_alloc(strlen(tag) + 1, KM_SLEEP); 2476 (void) strcpy(mop->mo_name, tag); 2477 return (mop); 2478 } 2479 2480 /* 2481 * Allow users to set arbitrary "tags" in a vfs's mount options. 2482 * Broader use within the kernel is discouraged. 2483 */ 2484 int 2485 vfs_settag(uint_t major, uint_t minor, const char *mntpt, const char *tag, 2486 cred_t *cr) 2487 { 2488 vfs_t *vfsp; 2489 mntopts_t *mops; 2490 mntopt_t *mop; 2491 int found = 0; 2492 dev_t dev = makedevice(major, minor); 2493 int err = 0; 2494 char *buf = kmem_alloc(MAX_MNTOPT_STR, KM_SLEEP); 2495 2496 /* 2497 * Find the desired mounted file system 2498 */ 2499 vfs_list_lock(); 2500 vfsp = rootvfs; 2501 do { 2502 if (vfsp->vfs_dev == dev && 2503 strcmp(mntpt, refstr_value(vfsp->vfs_mntpt)) == 0) { 2504 found = 1; 2505 break; 2506 } 2507 vfsp = vfsp->vfs_next; 2508 } while (vfsp != rootvfs); 2509 2510 if (!found) { 2511 err = EINVAL; 2512 goto out; 2513 } 2514 err = secpolicy_fs_config(cr, vfsp); 2515 if (err != 0) 2516 goto out; 2517 2518 mops = &vfsp->vfs_mntopts; 2519 /* 2520 * Add tag if it doesn't already exist 2521 */ 2522 if ((mop = vfs_hasopt(mops, tag)) == NULL) { 2523 int len; 2524 2525 (void) vfs_buildoptionstr(mops, buf, MAX_MNTOPT_STR); 2526 len = strlen(buf); 2527 if (len + strlen(tag) + 2 > MAX_MNTOPT_STR) { 2528 err = ENAMETOOLONG; 2529 goto out; 2530 } 2531 mop = vfs_addtag(mops, tag); 2532 } 2533 if ((mop->mo_flags & MO_TAG) == 0) { 2534 err = EINVAL; 2535 goto out; 2536 } 2537 vfs_setmntopt_nolock(mops, tag, NULL, 0, 1); 2538 out: 2539 vfs_list_unlock(); 2540 kmem_free(buf, MAX_MNTOPT_STR); 2541 return (err); 2542 } 2543 2544 /* 2545 * Allow users to remove arbitrary "tags" in a vfs's mount options. 2546 * Broader use within the kernel is discouraged. 2547 */ 2548 int 2549 vfs_clrtag(uint_t major, uint_t minor, const char *mntpt, const char *tag, 2550 cred_t *cr) 2551 { 2552 vfs_t *vfsp; 2553 mntopt_t *mop; 2554 int found = 0; 2555 dev_t dev = makedevice(major, minor); 2556 int err = 0; 2557 2558 /* 2559 * Find the desired mounted file system 2560 */ 2561 vfs_list_lock(); 2562 vfsp = rootvfs; 2563 do { 2564 if (vfsp->vfs_dev == dev && 2565 strcmp(mntpt, refstr_value(vfsp->vfs_mntpt)) == 0) { 2566 found = 1; 2567 break; 2568 } 2569 vfsp = vfsp->vfs_next; 2570 } while (vfsp != rootvfs); 2571 2572 if (!found) { 2573 err = EINVAL; 2574 goto out; 2575 } 2576 err = secpolicy_fs_config(cr, vfsp); 2577 if (err != 0) 2578 goto out; 2579 2580 if ((mop = vfs_hasopt(&vfsp->vfs_mntopts, tag)) == NULL) { 2581 err = EINVAL; 2582 goto out; 2583 } 2584 if ((mop->mo_flags & MO_TAG) == 0) { 2585 err = EINVAL; 2586 goto out; 2587 } 2588 vfs_clearmntopt_nolock(&vfsp->vfs_mntopts, tag, 1); 2589 out: 2590 vfs_list_unlock(); 2591 return (err); 2592 } 2593 2594 /* 2595 * Function to parse an option string and fill in a mount options table. 2596 * Unknown options are silently ignored. The input option string is modified 2597 * by replacing separators with nulls. If the create flag is set, options 2598 * not found in the table are just added on the fly. The table must have 2599 * an option slot marked MO_EMPTY to add an option on the fly. 2600 * 2601 * This function is *not* for general use by filesystems. 2602 * 2603 * Note: caller is responsible for locking the vfs list, if needed, 2604 * to protect mops.. 2605 */ 2606 void 2607 vfs_parsemntopts(mntopts_t *mops, char *osp, int create) 2608 { 2609 char *s = osp, *p, *nextop, *valp, *cp, *ep; 2610 int setflg = VFS_NOFORCEOPT; 2611 2612 if (osp == NULL) 2613 return; 2614 while (*s != '\0') { 2615 p = strchr(s, ','); /* find next option */ 2616 if (p == NULL) { 2617 cp = NULL; 2618 p = s + strlen(s); 2619 } else { 2620 cp = p; /* save location of comma */ 2621 *p++ = '\0'; /* mark end and point to next option */ 2622 } 2623 nextop = p; 2624 p = strchr(s, '='); /* look for value */ 2625 if (p == NULL) { 2626 valp = NULL; /* no value supplied */ 2627 } else { 2628 ep = p; /* save location of equals */ 2629 *p++ = '\0'; /* end option and point to value */ 2630 valp = p; 2631 } 2632 /* 2633 * set option into options table 2634 */ 2635 if (create) 2636 setflg |= VFS_CREATEOPT; 2637 vfs_setmntopt_nolock(mops, s, valp, setflg, 0); 2638 if (cp != NULL) 2639 *cp = ','; /* restore the comma */ 2640 if (valp != NULL) 2641 *ep = '='; /* restore the equals */ 2642 s = nextop; 2643 } 2644 } 2645 2646 /* 2647 * Function to inquire if an option exists in a mount options table. 2648 * Returns a pointer to the option if it exists, else NULL. 2649 * 2650 * This function is *not* for general use by filesystems. 2651 * 2652 * Note: caller is responsible for locking the vfs list, if needed, 2653 * to protect mops. 2654 */ 2655 struct mntopt * 2656 vfs_hasopt(const mntopts_t *mops, const char *opt) 2657 { 2658 struct mntopt *mop; 2659 uint_t i, count; 2660 2661 count = mops->mo_count; 2662 for (i = 0; i < count; i++) { 2663 mop = &mops->mo_list[i]; 2664 2665 if (mop->mo_flags & MO_EMPTY) 2666 continue; 2667 if (strcmp(opt, mop->mo_name) == 0) 2668 return (mop); 2669 } 2670 return (NULL); 2671 } 2672 2673 /* 2674 * Function to inquire if an option is set in a mount options table. 2675 * Returns non-zero if set and fills in the arg pointer with a pointer to 2676 * the argument string or NULL if there is no argument string. 2677 */ 2678 static int 2679 vfs_optionisset_nolock(const mntopts_t *mops, const char *opt, char **argp) 2680 { 2681 struct mntopt *mop; 2682 uint_t i, count; 2683 2684 count = mops->mo_count; 2685 for (i = 0; i < count; i++) { 2686 mop = &mops->mo_list[i]; 2687 2688 if (mop->mo_flags & MO_EMPTY) 2689 continue; 2690 if (strcmp(opt, mop->mo_name)) 2691 continue; 2692 if ((mop->mo_flags & MO_SET) == 0) 2693 return (0); 2694 if (argp != NULL && (mop->mo_flags & MO_HASVALUE) != 0) 2695 *argp = mop->mo_arg; 2696 return (1); 2697 } 2698 return (0); 2699 } 2700 2701 2702 int 2703 vfs_optionisset(const struct vfs *vfsp, const char *opt, char **argp) 2704 { 2705 int ret; 2706 2707 vfs_list_read_lock(); 2708 ret = vfs_optionisset_nolock(&vfsp->vfs_mntopts, opt, argp); 2709 vfs_list_unlock(); 2710 return (ret); 2711 } 2712 2713 2714 /* 2715 * Construct a comma separated string of the options set in the given 2716 * mount table, return the string in the given buffer. Return non-zero if 2717 * the buffer would overflow. 2718 * 2719 * This function is *not* for general use by filesystems. 2720 * 2721 * Note: caller is responsible for locking the vfs list, if needed, 2722 * to protect mp. 2723 */ 2724 int 2725 vfs_buildoptionstr(const mntopts_t *mp, char *buf, int len) 2726 { 2727 char *cp; 2728 uint_t i; 2729 2730 buf[0] = '\0'; 2731 cp = buf; 2732 for (i = 0; i < mp->mo_count; i++) { 2733 struct mntopt *mop; 2734 2735 mop = &mp->mo_list[i]; 2736 if (mop->mo_flags & MO_SET) { 2737 int optlen, comma = 0; 2738 2739 if (buf[0] != '\0') 2740 comma = 1; 2741 optlen = strlen(mop->mo_name); 2742 if (strlen(buf) + comma + optlen + 1 > len) 2743 goto err; 2744 if (comma) 2745 *cp++ = ','; 2746 (void) strcpy(cp, mop->mo_name); 2747 cp += optlen; 2748 /* 2749 * Append option value if there is one 2750 */ 2751 if (mop->mo_arg != NULL) { 2752 int arglen; 2753 2754 arglen = strlen(mop->mo_arg); 2755 if (strlen(buf) + arglen + 2 > len) 2756 goto err; 2757 *cp++ = '='; 2758 (void) strcpy(cp, mop->mo_arg); 2759 cp += arglen; 2760 } 2761 } 2762 } 2763 return (0); 2764 err: 2765 return (EOVERFLOW); 2766 } 2767 2768 static void 2769 vfs_freecancelopt(char **moc) 2770 { 2771 if (moc != NULL) { 2772 int ccnt = 0; 2773 char **cp; 2774 2775 for (cp = moc; *cp != NULL; cp++) { 2776 kmem_free(*cp, strlen(*cp) + 1); 2777 ccnt++; 2778 } 2779 kmem_free(moc, (ccnt + 1) * sizeof (char *)); 2780 } 2781 } 2782 2783 static void 2784 vfs_freeopt(mntopt_t *mop) 2785 { 2786 if (mop->mo_name != NULL) 2787 kmem_free(mop->mo_name, strlen(mop->mo_name) + 1); 2788 2789 vfs_freecancelopt(mop->mo_cancel); 2790 2791 if (mop->mo_arg != NULL) 2792 kmem_free(mop->mo_arg, strlen(mop->mo_arg) + 1); 2793 } 2794 2795 /* 2796 * Free a mount options table 2797 * 2798 * This function is *not* for general use by filesystems. 2799 * 2800 * Note: caller is responsible for locking the vfs list, if needed, 2801 * to protect mp. 2802 */ 2803 void 2804 vfs_freeopttbl(mntopts_t *mp) 2805 { 2806 uint_t i, count; 2807 2808 count = mp->mo_count; 2809 for (i = 0; i < count; i++) { 2810 vfs_freeopt(&mp->mo_list[i]); 2811 } 2812 if (count) { 2813 kmem_free(mp->mo_list, sizeof (mntopt_t) * count); 2814 mp->mo_count = 0; 2815 mp->mo_list = NULL; 2816 } 2817 } 2818 2819 2820 /* ARGSUSED */ 2821 static int 2822 vfs_mntdummyread(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cred, 2823 caller_context_t *ct) 2824 { 2825 return (0); 2826 } 2827 2828 /* ARGSUSED */ 2829 static int 2830 vfs_mntdummywrite(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cred, 2831 caller_context_t *ct) 2832 { 2833 return (0); 2834 } 2835 2836 /* 2837 * The dummy vnode is currently used only by file events notification 2838 * module which is just interested in the timestamps. 2839 */ 2840 /* ARGSUSED */ 2841 static int 2842 vfs_mntdummygetattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, 2843 caller_context_t *ct) 2844 { 2845 bzero(vap, sizeof (vattr_t)); 2846 vap->va_type = VREG; 2847 vap->va_nlink = 1; 2848 vap->va_ctime = vfs_mnttab_ctime; 2849 /* 2850 * it is ok to just copy mtime as the time will be monotonically 2851 * increasing. 2852 */ 2853 vap->va_mtime = vfs_mnttab_mtime; 2854 vap->va_atime = vap->va_mtime; 2855 return (0); 2856 } 2857 2858 static void 2859 vfs_mnttabvp_setup(void) 2860 { 2861 vnode_t *tvp; 2862 vnodeops_t *vfs_mntdummyvnops; 2863 const fs_operation_def_t mnt_dummyvnodeops_template[] = { 2864 VOPNAME_READ, { .vop_read = vfs_mntdummyread }, 2865 VOPNAME_WRITE, { .vop_write = vfs_mntdummywrite }, 2866 VOPNAME_GETATTR, { .vop_getattr = vfs_mntdummygetattr }, 2867 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 2868 NULL, NULL 2869 }; 2870 2871 if (vn_make_ops("mnttab", mnt_dummyvnodeops_template, 2872 &vfs_mntdummyvnops) != 0) { 2873 cmn_err(CE_WARN, "vfs_mnttabvp_setup: vn_make_ops failed"); 2874 /* Shouldn't happen, but not bad enough to panic */ 2875 return; 2876 } 2877 2878 /* 2879 * A global dummy vnode is allocated to represent mntfs files. 2880 * The mntfs file (/etc/mnttab) can be monitored for file events 2881 * and receive an event when mnttab changes. Dummy VOP calls 2882 * will be made on this vnode. The file events notification module 2883 * intercepts this vnode and delivers relevant events. 2884 */ 2885 tvp = vn_alloc(KM_SLEEP); 2886 tvp->v_flag = VNOMOUNT|VNOMAP|VNOSWAP|VNOCACHE; 2887 vn_setops(tvp, vfs_mntdummyvnops); 2888 tvp->v_type = VREG; 2889 /* 2890 * The mnt dummy ops do not reference v_data. 2891 * No other module intercepting this vnode should either. 2892 * Just set it to point to itself. 2893 */ 2894 tvp->v_data = (caddr_t)tvp; 2895 tvp->v_vfsp = rootvfs; 2896 vfs_mntdummyvp = tvp; 2897 } 2898 2899 /* 2900 * performs fake read/write ops 2901 */ 2902 static void 2903 vfs_mnttab_rwop(int rw) 2904 { 2905 struct uio uio; 2906 struct iovec iov; 2907 char buf[1]; 2908 2909 if (vfs_mntdummyvp == NULL) 2910 return; 2911 2912 bzero(&uio, sizeof (uio)); 2913 bzero(&iov, sizeof (iov)); 2914 iov.iov_base = buf; 2915 iov.iov_len = 0; 2916 uio.uio_iov = &iov; 2917 uio.uio_iovcnt = 1; 2918 uio.uio_loffset = 0; 2919 uio.uio_segflg = UIO_SYSSPACE; 2920 uio.uio_resid = 0; 2921 if (rw) { 2922 (void) VOP_WRITE(vfs_mntdummyvp, &uio, 0, kcred, NULL); 2923 } else { 2924 (void) VOP_READ(vfs_mntdummyvp, &uio, 0, kcred, NULL); 2925 } 2926 } 2927 2928 /* 2929 * Generate a write operation. 2930 */ 2931 void 2932 vfs_mnttab_writeop(void) 2933 { 2934 vfs_mnttab_rwop(1); 2935 } 2936 2937 /* 2938 * Generate a read operation. 2939 */ 2940 void 2941 vfs_mnttab_readop(void) 2942 { 2943 vfs_mnttab_rwop(0); 2944 } 2945 2946 /* 2947 * Free any mnttab information recorded in the vfs struct. 2948 * The vfs must not be on the vfs list. 2949 */ 2950 static void 2951 vfs_freemnttab(struct vfs *vfsp) 2952 { 2953 ASSERT(!VFS_ON_LIST(vfsp)); 2954 2955 /* 2956 * Free device and mount point information 2957 */ 2958 if (vfsp->vfs_mntpt != NULL) { 2959 refstr_rele(vfsp->vfs_mntpt); 2960 vfsp->vfs_mntpt = NULL; 2961 } 2962 if (vfsp->vfs_resource != NULL) { 2963 refstr_rele(vfsp->vfs_resource); 2964 vfsp->vfs_resource = NULL; 2965 } 2966 /* 2967 * Now free mount options information 2968 */ 2969 vfs_freeopttbl(&vfsp->vfs_mntopts); 2970 } 2971 2972 /* 2973 * Return the last mnttab modification time 2974 */ 2975 void 2976 vfs_mnttab_modtime(timespec_t *ts) 2977 { 2978 ASSERT(RW_LOCK_HELD(&vfslist)); 2979 *ts = vfs_mnttab_mtime; 2980 } 2981 2982 /* 2983 * See if mnttab is changed 2984 */ 2985 void 2986 vfs_mnttab_poll(timespec_t *old, struct pollhead **phpp) 2987 { 2988 int changed; 2989 2990 *phpp = (struct pollhead *)NULL; 2991 2992 /* 2993 * Note: don't grab vfs list lock before accessing vfs_mnttab_mtime. 2994 * Can lead to deadlock against vfs_mnttab_modtimeupd(). It is safe 2995 * to not grab the vfs list lock because tv_sec is monotonically 2996 * increasing. 2997 */ 2998 2999 changed = (old->tv_nsec != vfs_mnttab_mtime.tv_nsec) || 3000 (old->tv_sec != vfs_mnttab_mtime.tv_sec); 3001 if (!changed) { 3002 *phpp = &vfs_pollhd; 3003 } 3004 } 3005 3006 /* Provide a unique and monotonically-increasing timestamp. */ 3007 void 3008 vfs_mono_time(timespec_t *ts) 3009 { 3010 static volatile hrtime_t hrt; /* The saved time. */ 3011 hrtime_t newhrt, oldhrt; /* For effecting the CAS. */ 3012 timespec_t newts; 3013 3014 /* 3015 * Try gethrestime() first, but be prepared to fabricate a sensible 3016 * answer at the first sign of any trouble. 3017 */ 3018 gethrestime(&newts); 3019 newhrt = ts2hrt(&newts); 3020 for (;;) { 3021 oldhrt = hrt; 3022 if (newhrt <= hrt) 3023 newhrt = hrt + 1; 3024 if (atomic_cas_64((uint64_t *)&hrt, oldhrt, newhrt) == oldhrt) 3025 break; 3026 } 3027 hrt2ts(newhrt, ts); 3028 } 3029 3030 /* 3031 * Update the mnttab modification time and wake up any waiters for 3032 * mnttab changes 3033 */ 3034 void 3035 vfs_mnttab_modtimeupd() 3036 { 3037 hrtime_t oldhrt, newhrt; 3038 3039 ASSERT(RW_WRITE_HELD(&vfslist)); 3040 oldhrt = ts2hrt(&vfs_mnttab_mtime); 3041 gethrestime(&vfs_mnttab_mtime); 3042 newhrt = ts2hrt(&vfs_mnttab_mtime); 3043 if (oldhrt == (hrtime_t)0) 3044 vfs_mnttab_ctime = vfs_mnttab_mtime; 3045 /* 3046 * Attempt to provide unique mtime (like uniqtime but not). 3047 */ 3048 if (newhrt == oldhrt) { 3049 newhrt++; 3050 hrt2ts(newhrt, &vfs_mnttab_mtime); 3051 } 3052 pollwakeup(&vfs_pollhd, (short)POLLRDBAND); 3053 vfs_mnttab_writeop(); 3054 } 3055 3056 int 3057 dounmount(struct vfs *vfsp, int flag, cred_t *cr) 3058 { 3059 vnode_t *coveredvp; 3060 int error; 3061 extern void teardown_vopstats(vfs_t *); 3062 3063 /* 3064 * Get covered vnode. This will be NULL if the vfs is not linked 3065 * into the file system name space (i.e., domount() with MNT_NOSPICE). 3066 */ 3067 coveredvp = vfsp->vfs_vnodecovered; 3068 ASSERT(coveredvp == NULL || vn_vfswlock_held(coveredvp)); 3069 3070 /* 3071 * Purge all dnlc entries for this vfs. 3072 */ 3073 (void) dnlc_purge_vfsp(vfsp, 0); 3074 3075 /* For forcible umount, skip VFS_SYNC() since it may hang */ 3076 if ((flag & MS_FORCE) == 0) 3077 (void) VFS_SYNC(vfsp, 0, cr); 3078 3079 /* 3080 * Lock the vfs to maintain fs status quo during unmount. This 3081 * has to be done after the sync because ufs_update tries to acquire 3082 * the vfs_reflock. 3083 */ 3084 vfs_lock_wait(vfsp); 3085 3086 if (error = VFS_UNMOUNT(vfsp, flag, cr)) { 3087 vfs_unlock(vfsp); 3088 if (coveredvp != NULL) 3089 vn_vfsunlock(coveredvp); 3090 } else if (coveredvp != NULL) { 3091 teardown_vopstats(vfsp); 3092 /* 3093 * vfs_remove() will do a VN_RELE(vfsp->vfs_vnodecovered) 3094 * when it frees vfsp so we do a VN_HOLD() so we can 3095 * continue to use coveredvp afterwards. 3096 */ 3097 VN_HOLD(coveredvp); 3098 vfs_remove(vfsp); 3099 vn_vfsunlock(coveredvp); 3100 VN_RELE(coveredvp); 3101 } else { 3102 teardown_vopstats(vfsp); 3103 /* 3104 * Release the reference to vfs that is not linked 3105 * into the name space. 3106 */ 3107 vfs_unlock(vfsp); 3108 VFS_RELE(vfsp); 3109 } 3110 return (error); 3111 } 3112 3113 3114 /* 3115 * Vfs_unmountall() is called by uadmin() to unmount all 3116 * mounted file systems (except the root file system) during shutdown. 3117 * It follows the existing locking protocol when traversing the vfs list 3118 * to sync and unmount vfses. Even though there should be no 3119 * other thread running while the system is shutting down, it is prudent 3120 * to still follow the locking protocol. 3121 */ 3122 void 3123 vfs_unmountall(void) 3124 { 3125 struct vfs *vfsp; 3126 struct vfs *prev_vfsp = NULL; 3127 int error; 3128 3129 /* 3130 * Toss all dnlc entries now so that the per-vfs sync 3131 * and unmount operations don't have to slog through 3132 * a bunch of uninteresting vnodes over and over again. 3133 */ 3134 dnlc_purge(); 3135 3136 vfs_list_lock(); 3137 for (vfsp = rootvfs->vfs_prev; vfsp != rootvfs; vfsp = prev_vfsp) { 3138 prev_vfsp = vfsp->vfs_prev; 3139 3140 if (vfs_lock(vfsp) != 0) 3141 continue; 3142 error = vn_vfswlock(vfsp->vfs_vnodecovered); 3143 vfs_unlock(vfsp); 3144 if (error) 3145 continue; 3146 3147 vfs_list_unlock(); 3148 3149 (void) VFS_SYNC(vfsp, SYNC_CLOSE, CRED()); 3150 (void) dounmount(vfsp, 0, CRED()); 3151 3152 /* 3153 * Since we dropped the vfslist lock above we must 3154 * verify that next_vfsp still exists, else start over. 3155 */ 3156 vfs_list_lock(); 3157 for (vfsp = rootvfs->vfs_prev; 3158 vfsp != rootvfs; vfsp = vfsp->vfs_prev) 3159 if (vfsp == prev_vfsp) 3160 break; 3161 if (vfsp == rootvfs && prev_vfsp != rootvfs) 3162 prev_vfsp = rootvfs->vfs_prev; 3163 } 3164 vfs_list_unlock(); 3165 } 3166 3167 /* 3168 * Called to add an entry to the end of the vfs mount in progress list 3169 */ 3170 void 3171 vfs_addmip(dev_t dev, struct vfs *vfsp) 3172 { 3173 struct ipmnt *mipp; 3174 3175 mipp = (struct ipmnt *)kmem_alloc(sizeof (struct ipmnt), KM_SLEEP); 3176 mipp->mip_next = NULL; 3177 mipp->mip_dev = dev; 3178 mipp->mip_vfsp = vfsp; 3179 mutex_enter(&vfs_miplist_mutex); 3180 if (vfs_miplist_end != NULL) 3181 vfs_miplist_end->mip_next = mipp; 3182 else 3183 vfs_miplist = mipp; 3184 vfs_miplist_end = mipp; 3185 mutex_exit(&vfs_miplist_mutex); 3186 } 3187 3188 /* 3189 * Called to remove an entry from the mount in progress list 3190 * Either because the mount completed or it failed. 3191 */ 3192 void 3193 vfs_delmip(struct vfs *vfsp) 3194 { 3195 struct ipmnt *mipp, *mipprev; 3196 3197 mutex_enter(&vfs_miplist_mutex); 3198 mipprev = NULL; 3199 for (mipp = vfs_miplist; 3200 mipp && mipp->mip_vfsp != vfsp; mipp = mipp->mip_next) { 3201 mipprev = mipp; 3202 } 3203 if (mipp == NULL) 3204 return; /* shouldn't happen */ 3205 if (mipp == vfs_miplist_end) 3206 vfs_miplist_end = mipprev; 3207 if (mipprev == NULL) 3208 vfs_miplist = mipp->mip_next; 3209 else 3210 mipprev->mip_next = mipp->mip_next; 3211 mutex_exit(&vfs_miplist_mutex); 3212 kmem_free(mipp, sizeof (struct ipmnt)); 3213 } 3214 3215 /* 3216 * vfs_add is called by a specific filesystem's mount routine to add 3217 * the new vfs into the vfs list/hash and to cover the mounted-on vnode. 3218 * The vfs should already have been locked by the caller. 3219 * 3220 * coveredvp is NULL if this is the root. 3221 */ 3222 void 3223 vfs_add(vnode_t *coveredvp, struct vfs *vfsp, int mflag) 3224 { 3225 int newflag; 3226 3227 ASSERT(vfs_lock_held(vfsp)); 3228 VFS_HOLD(vfsp); 3229 newflag = vfsp->vfs_flag; 3230 if (mflag & MS_RDONLY) 3231 newflag |= VFS_RDONLY; 3232 else 3233 newflag &= ~VFS_RDONLY; 3234 if (mflag & MS_NOSUID) 3235 newflag |= (VFS_NOSETUID|VFS_NODEVICES); 3236 else 3237 newflag &= ~(VFS_NOSETUID|VFS_NODEVICES); 3238 if (mflag & MS_NOMNTTAB) 3239 newflag |= VFS_NOMNTTAB; 3240 else 3241 newflag &= ~VFS_NOMNTTAB; 3242 3243 if (coveredvp != NULL) { 3244 ASSERT(vn_vfswlock_held(coveredvp)); 3245 coveredvp->v_vfsmountedhere = vfsp; 3246 VN_HOLD(coveredvp); 3247 } 3248 vfsp->vfs_vnodecovered = coveredvp; 3249 vfsp->vfs_flag = newflag; 3250 3251 vfs_list_add(vfsp); 3252 } 3253 3254 /* 3255 * Remove a vfs from the vfs list, null out the pointer from the 3256 * covered vnode to the vfs (v_vfsmountedhere), and null out the pointer 3257 * from the vfs to the covered vnode (vfs_vnodecovered). Release the 3258 * reference to the vfs and to the covered vnode. 3259 * 3260 * Called from dounmount after it's confirmed with the file system 3261 * that the unmount is legal. 3262 */ 3263 void 3264 vfs_remove(struct vfs *vfsp) 3265 { 3266 vnode_t *vp; 3267 3268 ASSERT(vfs_lock_held(vfsp)); 3269 3270 /* 3271 * Can't unmount root. Should never happen because fs will 3272 * be busy. 3273 */ 3274 if (vfsp == rootvfs) 3275 panic("vfs_remove: unmounting root"); 3276 3277 vfs_list_remove(vfsp); 3278 3279 /* 3280 * Unhook from the file system name space. 3281 */ 3282 vp = vfsp->vfs_vnodecovered; 3283 ASSERT(vn_vfswlock_held(vp)); 3284 vp->v_vfsmountedhere = NULL; 3285 vfsp->vfs_vnodecovered = NULL; 3286 VN_RELE(vp); 3287 3288 /* 3289 * Release lock and wakeup anybody waiting. 3290 */ 3291 vfs_unlock(vfsp); 3292 VFS_RELE(vfsp); 3293 } 3294 3295 /* 3296 * Lock a filesystem to prevent access to it while mounting, 3297 * unmounting and syncing. Return EBUSY immediately if lock 3298 * can't be acquired. 3299 */ 3300 int 3301 vfs_lock(vfs_t *vfsp) 3302 { 3303 vn_vfslocks_entry_t *vpvfsentry; 3304 3305 vpvfsentry = vn_vfslocks_getlock(vfsp); 3306 if (rwst_tryenter(&vpvfsentry->ve_lock, RW_WRITER)) 3307 return (0); 3308 3309 vn_vfslocks_rele(vpvfsentry); 3310 return (EBUSY); 3311 } 3312 3313 int 3314 vfs_rlock(vfs_t *vfsp) 3315 { 3316 vn_vfslocks_entry_t *vpvfsentry; 3317 3318 vpvfsentry = vn_vfslocks_getlock(vfsp); 3319 3320 if (rwst_tryenter(&vpvfsentry->ve_lock, RW_READER)) 3321 return (0); 3322 3323 vn_vfslocks_rele(vpvfsentry); 3324 return (EBUSY); 3325 } 3326 3327 void 3328 vfs_lock_wait(vfs_t *vfsp) 3329 { 3330 vn_vfslocks_entry_t *vpvfsentry; 3331 3332 vpvfsentry = vn_vfslocks_getlock(vfsp); 3333 rwst_enter(&vpvfsentry->ve_lock, RW_WRITER); 3334 } 3335 3336 void 3337 vfs_rlock_wait(vfs_t *vfsp) 3338 { 3339 vn_vfslocks_entry_t *vpvfsentry; 3340 3341 vpvfsentry = vn_vfslocks_getlock(vfsp); 3342 rwst_enter(&vpvfsentry->ve_lock, RW_READER); 3343 } 3344 3345 /* 3346 * Unlock a locked filesystem. 3347 */ 3348 void 3349 vfs_unlock(vfs_t *vfsp) 3350 { 3351 vn_vfslocks_entry_t *vpvfsentry; 3352 3353 /* 3354 * vfs_unlock will mimic sema_v behaviour to fix 4748018. 3355 * And these changes should remain for the patch changes as it is. 3356 */ 3357 if (panicstr) 3358 return; 3359 3360 /* 3361 * ve_refcount needs to be dropped twice here. 3362 * 1. To release refernce after a call to vfs_locks_getlock() 3363 * 2. To release the reference from the locking routines like 3364 * vfs_rlock_wait/vfs_wlock_wait/vfs_wlock etc,. 3365 */ 3366 3367 vpvfsentry = vn_vfslocks_getlock(vfsp); 3368 vn_vfslocks_rele(vpvfsentry); 3369 3370 rwst_exit(&vpvfsentry->ve_lock); 3371 vn_vfslocks_rele(vpvfsentry); 3372 } 3373 3374 /* 3375 * Utility routine that allows a filesystem to construct its 3376 * fsid in "the usual way" - by munging some underlying dev_t and 3377 * the filesystem type number into the 64-bit fsid. Note that 3378 * this implicitly relies on dev_t persistence to make filesystem 3379 * id's persistent. 3380 * 3381 * There's nothing to prevent an individual fs from constructing its 3382 * fsid in a different way, and indeed they should. 3383 * 3384 * Since we want fsids to be 32-bit quantities (so that they can be 3385 * exported identically by either 32-bit or 64-bit APIs, as well as 3386 * the fact that fsid's are "known" to NFS), we compress the device 3387 * number given down to 32-bits, and panic if that isn't possible. 3388 */ 3389 void 3390 vfs_make_fsid(fsid_t *fsi, dev_t dev, int val) 3391 { 3392 if (!cmpldev((dev32_t *)&fsi->val[0], dev)) 3393 panic("device number too big for fsid!"); 3394 fsi->val[1] = val; 3395 } 3396 3397 int 3398 vfs_lock_held(vfs_t *vfsp) 3399 { 3400 int held; 3401 vn_vfslocks_entry_t *vpvfsentry; 3402 3403 /* 3404 * vfs_lock_held will mimic sema_held behaviour 3405 * if panicstr is set. And these changes should remain 3406 * for the patch changes as it is. 3407 */ 3408 if (panicstr) 3409 return (1); 3410 3411 vpvfsentry = vn_vfslocks_getlock(vfsp); 3412 held = rwst_lock_held(&vpvfsentry->ve_lock, RW_WRITER); 3413 3414 vn_vfslocks_rele(vpvfsentry); 3415 return (held); 3416 } 3417 3418 struct _kthread * 3419 vfs_lock_owner(vfs_t *vfsp) 3420 { 3421 struct _kthread *owner; 3422 vn_vfslocks_entry_t *vpvfsentry; 3423 3424 /* 3425 * vfs_wlock_held will mimic sema_held behaviour 3426 * if panicstr is set. And these changes should remain 3427 * for the patch changes as it is. 3428 */ 3429 if (panicstr) 3430 return (NULL); 3431 3432 vpvfsentry = vn_vfslocks_getlock(vfsp); 3433 owner = rwst_owner(&vpvfsentry->ve_lock); 3434 3435 vn_vfslocks_rele(vpvfsentry); 3436 return (owner); 3437 } 3438 3439 /* 3440 * vfs list locking. 3441 * 3442 * Rather than manipulate the vfslist lock directly, we abstract into lock 3443 * and unlock routines to allow the locking implementation to be changed for 3444 * clustering. 3445 * 3446 * Whenever the vfs list is modified through its hash links, the overall list 3447 * lock must be obtained before locking the relevant hash bucket. But to see 3448 * whether a given vfs is on the list, it suffices to obtain the lock for the 3449 * hash bucket without getting the overall list lock. (See getvfs() below.) 3450 */ 3451 3452 void 3453 vfs_list_lock() 3454 { 3455 rw_enter(&vfslist, RW_WRITER); 3456 } 3457 3458 void 3459 vfs_list_read_lock() 3460 { 3461 rw_enter(&vfslist, RW_READER); 3462 } 3463 3464 void 3465 vfs_list_unlock() 3466 { 3467 rw_exit(&vfslist); 3468 } 3469 3470 /* 3471 * Low level worker routines for adding entries to and removing entries from 3472 * the vfs list. 3473 */ 3474 3475 static void 3476 vfs_hash_add(struct vfs *vfsp, int insert_at_head) 3477 { 3478 int vhno; 3479 struct vfs **hp; 3480 dev_t dev; 3481 3482 ASSERT(RW_WRITE_HELD(&vfslist)); 3483 3484 dev = expldev(vfsp->vfs_fsid.val[0]); 3485 vhno = VFSHASH(getmajor(dev), getminor(dev)); 3486 3487 mutex_enter(&rvfs_list[vhno].rvfs_lock); 3488 3489 /* 3490 * Link into the hash table, inserting it at the end, so that LOFS 3491 * with the same fsid as UFS (or other) file systems will not hide the 3492 * UFS. 3493 */ 3494 if (insert_at_head) { 3495 vfsp->vfs_hash = rvfs_list[vhno].rvfs_head; 3496 rvfs_list[vhno].rvfs_head = vfsp; 3497 } else { 3498 for (hp = &rvfs_list[vhno].rvfs_head; *hp != NULL; 3499 hp = &(*hp)->vfs_hash) 3500 continue; 3501 /* 3502 * hp now contains the address of the pointer to update 3503 * to effect the insertion. 3504 */ 3505 vfsp->vfs_hash = NULL; 3506 *hp = vfsp; 3507 } 3508 3509 rvfs_list[vhno].rvfs_len++; 3510 mutex_exit(&rvfs_list[vhno].rvfs_lock); 3511 } 3512 3513 3514 static void 3515 vfs_hash_remove(struct vfs *vfsp) 3516 { 3517 int vhno; 3518 struct vfs *tvfsp; 3519 dev_t dev; 3520 3521 ASSERT(RW_WRITE_HELD(&vfslist)); 3522 3523 dev = expldev(vfsp->vfs_fsid.val[0]); 3524 vhno = VFSHASH(getmajor(dev), getminor(dev)); 3525 3526 mutex_enter(&rvfs_list[vhno].rvfs_lock); 3527 3528 /* 3529 * Remove from hash. 3530 */ 3531 if (rvfs_list[vhno].rvfs_head == vfsp) { 3532 rvfs_list[vhno].rvfs_head = vfsp->vfs_hash; 3533 rvfs_list[vhno].rvfs_len--; 3534 goto foundit; 3535 } 3536 for (tvfsp = rvfs_list[vhno].rvfs_head; tvfsp != NULL; 3537 tvfsp = tvfsp->vfs_hash) { 3538 if (tvfsp->vfs_hash == vfsp) { 3539 tvfsp->vfs_hash = vfsp->vfs_hash; 3540 rvfs_list[vhno].rvfs_len--; 3541 goto foundit; 3542 } 3543 } 3544 cmn_err(CE_WARN, "vfs_list_remove: vfs not found in hash"); 3545 3546 foundit: 3547 3548 mutex_exit(&rvfs_list[vhno].rvfs_lock); 3549 } 3550 3551 3552 void 3553 vfs_list_add(struct vfs *vfsp) 3554 { 3555 zone_t *zone; 3556 3557 /* 3558 * Typically, the vfs_t will have been created on behalf of the file 3559 * system in vfs_init, where it will have been provided with a 3560 * vfs_impl_t. This, however, might be lacking if the vfs_t was created 3561 * by an unbundled file system. We therefore check for such an example 3562 * before stamping the vfs_t with its creation time for the benefit of 3563 * mntfs. 3564 */ 3565 if (vfsp->vfs_implp == NULL) 3566 vfsimpl_setup(vfsp); 3567 vfs_mono_time(&vfsp->vfs_hrctime); 3568 3569 /* 3570 * The zone that owns the mount is the one that performed the mount. 3571 * Note that this isn't necessarily the same as the zone mounted into. 3572 * The corresponding zone_rele_ref() will be done when the vfs_t 3573 * is being free'd. 3574 */ 3575 vfsp->vfs_zone = curproc->p_zone; 3576 zone_init_ref(&vfsp->vfs_implp->vi_zone_ref); 3577 zone_hold_ref(vfsp->vfs_zone, &vfsp->vfs_implp->vi_zone_ref, 3578 ZONE_REF_VFS); 3579 3580 /* 3581 * Find the zone mounted into, and put this mount on its vfs list. 3582 */ 3583 zone = zone_find_by_path(refstr_value(vfsp->vfs_mntpt)); 3584 ASSERT(zone != NULL); 3585 /* 3586 * Special casing for the root vfs. This structure is allocated 3587 * statically and hooked onto rootvfs at link time. During the 3588 * vfs_mountroot call at system startup time, the root file system's 3589 * VFS_MOUNTROOT routine will call vfs_add with this root vfs struct 3590 * as argument. The code below must detect and handle this special 3591 * case. The only apparent justification for this special casing is 3592 * to ensure that the root file system appears at the head of the 3593 * list. 3594 * 3595 * XXX: I'm assuming that it's ok to do normal list locking when 3596 * adding the entry for the root file system (this used to be 3597 * done with no locks held). 3598 */ 3599 vfs_list_lock(); 3600 /* 3601 * Link into the vfs list proper. 3602 */ 3603 if (vfsp == &root) { 3604 /* 3605 * Assert: This vfs is already on the list as its first entry. 3606 * Thus, there's nothing to do. 3607 */ 3608 ASSERT(rootvfs == vfsp); 3609 /* 3610 * Add it to the head of the global zone's vfslist. 3611 */ 3612 ASSERT(zone == global_zone); 3613 ASSERT(zone->zone_vfslist == NULL); 3614 zone->zone_vfslist = vfsp; 3615 } else { 3616 /* 3617 * Link to end of list using vfs_prev (as rootvfs is now a 3618 * doubly linked circular list) so list is in mount order for 3619 * mnttab use. 3620 */ 3621 rootvfs->vfs_prev->vfs_next = vfsp; 3622 vfsp->vfs_prev = rootvfs->vfs_prev; 3623 rootvfs->vfs_prev = vfsp; 3624 vfsp->vfs_next = rootvfs; 3625 3626 /* 3627 * Do it again for the zone-private list (which may be NULL). 3628 */ 3629 if (zone->zone_vfslist == NULL) { 3630 ASSERT(zone != global_zone); 3631 zone->zone_vfslist = vfsp; 3632 } else { 3633 zone->zone_vfslist->vfs_zone_prev->vfs_zone_next = vfsp; 3634 vfsp->vfs_zone_prev = zone->zone_vfslist->vfs_zone_prev; 3635 zone->zone_vfslist->vfs_zone_prev = vfsp; 3636 vfsp->vfs_zone_next = zone->zone_vfslist; 3637 } 3638 } 3639 3640 /* 3641 * Link into the hash table, inserting it at the end, so that LOFS 3642 * with the same fsid as UFS (or other) file systems will not hide 3643 * the UFS. 3644 */ 3645 vfs_hash_add(vfsp, 0); 3646 3647 /* 3648 * Link into tree indexed by mntpoint, for vfs_mntpoint2vfsp 3649 * mntix discerns entries with the same key 3650 */ 3651 vfsp->vfs_mntix = ++vfs_curr_mntix; 3652 avl_add(&vfs_by_dev, vfsp); 3653 3654 /* 3655 * Link into tree indexed by dev, for vfs_devismounted 3656 */ 3657 avl_add(&vfs_by_mntpnt, vfsp); 3658 3659 /* 3660 * update the mnttab modification time 3661 */ 3662 vfs_mnttab_modtimeupd(); 3663 vfs_list_unlock(); 3664 zone_rele(zone); 3665 } 3666 3667 void 3668 vfs_list_remove(struct vfs *vfsp) 3669 { 3670 zone_t *zone; 3671 3672 zone = zone_find_by_path(refstr_value(vfsp->vfs_mntpt)); 3673 ASSERT(zone != NULL); 3674 /* 3675 * Callers are responsible for preventing attempts to unmount the 3676 * root. 3677 */ 3678 ASSERT(vfsp != rootvfs); 3679 3680 vfs_list_lock(); 3681 3682 /* 3683 * Remove from avl trees 3684 */ 3685 avl_remove(&vfs_by_mntpnt, vfsp); 3686 avl_remove(&vfs_by_dev, vfsp); 3687 3688 /* 3689 * Remove from hash. 3690 */ 3691 vfs_hash_remove(vfsp); 3692 3693 /* 3694 * Remove from vfs list. 3695 */ 3696 vfsp->vfs_prev->vfs_next = vfsp->vfs_next; 3697 vfsp->vfs_next->vfs_prev = vfsp->vfs_prev; 3698 vfsp->vfs_next = vfsp->vfs_prev = NULL; 3699 3700 /* 3701 * Remove from zone-specific vfs list. 3702 */ 3703 if (zone->zone_vfslist == vfsp) 3704 zone->zone_vfslist = vfsp->vfs_zone_next; 3705 3706 if (vfsp->vfs_zone_next == vfsp) { 3707 ASSERT(vfsp->vfs_zone_prev == vfsp); 3708 ASSERT(zone->zone_vfslist == vfsp); 3709 zone->zone_vfslist = NULL; 3710 } 3711 3712 vfsp->vfs_zone_prev->vfs_zone_next = vfsp->vfs_zone_next; 3713 vfsp->vfs_zone_next->vfs_zone_prev = vfsp->vfs_zone_prev; 3714 vfsp->vfs_zone_next = vfsp->vfs_zone_prev = NULL; 3715 3716 /* 3717 * update the mnttab modification time 3718 */ 3719 vfs_mnttab_modtimeupd(); 3720 vfs_list_unlock(); 3721 zone_rele(zone); 3722 } 3723 3724 struct vfs * 3725 getvfs(fsid_t *fsid) 3726 { 3727 struct vfs *vfsp; 3728 int val0 = fsid->val[0]; 3729 int val1 = fsid->val[1]; 3730 dev_t dev = expldev(val0); 3731 int vhno = VFSHASH(getmajor(dev), getminor(dev)); 3732 kmutex_t *hmp = &rvfs_list[vhno].rvfs_lock; 3733 3734 mutex_enter(hmp); 3735 for (vfsp = rvfs_list[vhno].rvfs_head; vfsp; vfsp = vfsp->vfs_hash) { 3736 if (vfsp->vfs_fsid.val[0] == val0 && 3737 vfsp->vfs_fsid.val[1] == val1) { 3738 VFS_HOLD(vfsp); 3739 mutex_exit(hmp); 3740 return (vfsp); 3741 } 3742 } 3743 mutex_exit(hmp); 3744 return (NULL); 3745 } 3746 3747 /* 3748 * Search the vfs mount in progress list for a specified device/vfs entry. 3749 * Returns 0 if the first entry in the list that the device matches has the 3750 * given vfs pointer as well. If the device matches but a different vfs 3751 * pointer is encountered in the list before the given vfs pointer then 3752 * a 1 is returned. 3753 */ 3754 3755 int 3756 vfs_devmounting(dev_t dev, struct vfs *vfsp) 3757 { 3758 int retval = 0; 3759 struct ipmnt *mipp; 3760 3761 mutex_enter(&vfs_miplist_mutex); 3762 for (mipp = vfs_miplist; mipp != NULL; mipp = mipp->mip_next) { 3763 if (mipp->mip_dev == dev) { 3764 if (mipp->mip_vfsp != vfsp) 3765 retval = 1; 3766 break; 3767 } 3768 } 3769 mutex_exit(&vfs_miplist_mutex); 3770 return (retval); 3771 } 3772 3773 /* 3774 * Search the vfs list for a specified device. Returns 1, if entry is found 3775 * or 0 if no suitable entry is found. 3776 */ 3777 3778 int 3779 vfs_devismounted(dev_t dev) 3780 { 3781 struct vfs *vfsp; 3782 int found = 0; 3783 struct vfs search; 3784 avl_index_t index; 3785 3786 search.vfs_dev = dev; 3787 search.vfs_mntix = 0; 3788 3789 vfs_list_read_lock(); 3790 3791 /* 3792 * there might be several entries with the same dev in the tree, 3793 * only discerned by mntix. To find the first, we start with a mntix 3794 * of 0. The search will fail. The following avl_nearest will give 3795 * us the actual first entry. 3796 */ 3797 VERIFY(avl_find(&vfs_by_dev, &search, &index) == NULL); 3798 vfsp = avl_nearest(&vfs_by_dev, index, AVL_AFTER); 3799 3800 if (vfsp != NULL && vfsp->vfs_dev == dev) 3801 found = 1; 3802 3803 vfs_list_unlock(); 3804 return (found); 3805 } 3806 3807 /* 3808 * Search the vfs list for a specified device. Returns a pointer to it 3809 * or NULL if no suitable entry is found. The caller of this routine 3810 * is responsible for releasing the returned vfs pointer. 3811 */ 3812 struct vfs * 3813 vfs_dev2vfsp(dev_t dev) 3814 { 3815 struct vfs *vfsp; 3816 int found; 3817 struct vfs search; 3818 avl_index_t index; 3819 3820 search.vfs_dev = dev; 3821 search.vfs_mntix = 0; 3822 3823 vfs_list_read_lock(); 3824 3825 /* 3826 * there might be several entries with the same dev in the tree, 3827 * only discerned by mntix. To find the first, we start with a mntix 3828 * of 0. The search will fail. The following avl_nearest will give 3829 * us the actual first entry. 3830 */ 3831 VERIFY(avl_find(&vfs_by_dev, &search, &index) == NULL); 3832 vfsp = avl_nearest(&vfs_by_dev, index, AVL_AFTER); 3833 3834 found = 0; 3835 while (vfsp != NULL && vfsp->vfs_dev == dev) { 3836 /* 3837 * The following could be made more efficient by making 3838 * the entire loop use vfs_zone_next if the call is from 3839 * a zone. The only callers, however, ustat(2) and 3840 * umount2(2), don't seem to justify the added 3841 * complexity at present. 3842 */ 3843 if (ZONE_PATH_VISIBLE(refstr_value(vfsp->vfs_mntpt), 3844 curproc->p_zone)) { 3845 VFS_HOLD(vfsp); 3846 found = 1; 3847 break; 3848 } 3849 vfsp = AVL_NEXT(&vfs_by_dev, vfsp); 3850 } 3851 vfs_list_unlock(); 3852 return (found ? vfsp : NULL); 3853 } 3854 3855 /* 3856 * Search the vfs list for a specified mntpoint. Returns a pointer to it 3857 * or NULL if no suitable entry is found. The caller of this routine 3858 * is responsible for releasing the returned vfs pointer. 3859 * 3860 * Note that if multiple mntpoints match, the last one matching is 3861 * returned in an attempt to return the "top" mount when overlay 3862 * mounts are covering the same mount point. This is accomplished by starting 3863 * at the end of the list and working our way backwards, stopping at the first 3864 * matching mount. 3865 */ 3866 struct vfs * 3867 vfs_mntpoint2vfsp(const char *mp) 3868 { 3869 struct vfs *vfsp; 3870 struct vfs *retvfsp = NULL; 3871 zone_t *zone = curproc->p_zone; 3872 struct vfs *list; 3873 3874 vfs_list_read_lock(); 3875 if (getzoneid() == GLOBAL_ZONEID) { 3876 /* 3877 * The global zone may see filesystems in any zone. 3878 */ 3879 struct vfs search; 3880 search.vfs_mntpt = refstr_alloc(mp); 3881 search.vfs_mntix = UINT64_MAX; 3882 avl_index_t index; 3883 3884 /* 3885 * there might be several entries with the same mntpnt in the 3886 * tree, only discerned by mntix. To find the last, we start 3887 * with a mntix of UINT64_MAX. The search will fail. The 3888 * following avl_nearest will give us the actual last entry 3889 * matching the mntpnt. 3890 */ 3891 VERIFY(avl_find(&vfs_by_mntpnt, &search, &index) == 0); 3892 vfsp = avl_nearest(&vfs_by_mntpnt, index, AVL_BEFORE); 3893 3894 refstr_rele(search.vfs_mntpt); 3895 3896 if (vfsp != NULL && 3897 strcmp(refstr_value(vfsp->vfs_mntpt), mp) == 0) 3898 retvfsp = vfsp; 3899 } else if ((list = zone->zone_vfslist) != NULL) { 3900 const char *mntpt; 3901 3902 vfsp = list->vfs_zone_prev; 3903 do { 3904 mntpt = refstr_value(vfsp->vfs_mntpt); 3905 mntpt = ZONE_PATH_TRANSLATE(mntpt, zone); 3906 if (strcmp(mntpt, mp) == 0) { 3907 retvfsp = vfsp; 3908 break; 3909 } 3910 vfsp = vfsp->vfs_zone_prev; 3911 } while (vfsp != list->vfs_zone_prev); 3912 } 3913 if (retvfsp) 3914 VFS_HOLD(retvfsp); 3915 vfs_list_unlock(); 3916 return (retvfsp); 3917 } 3918 3919 /* 3920 * Search the vfs list for a specified vfsops. 3921 * if vfs entry is found then return 1, else 0. 3922 */ 3923 int 3924 vfs_opsinuse(vfsops_t *ops) 3925 { 3926 struct vfs *vfsp; 3927 int found; 3928 3929 vfs_list_read_lock(); 3930 vfsp = rootvfs; 3931 found = 0; 3932 do { 3933 if (vfs_getops(vfsp) == ops) { 3934 found = 1; 3935 break; 3936 } 3937 vfsp = vfsp->vfs_next; 3938 } while (vfsp != rootvfs); 3939 vfs_list_unlock(); 3940 return (found); 3941 } 3942 3943 /* 3944 * Allocate an entry in vfssw for a file system type 3945 */ 3946 struct vfssw * 3947 allocate_vfssw(const char *type) 3948 { 3949 struct vfssw *vswp; 3950 3951 if (type[0] == '\0' || strlen(type) + 1 > _ST_FSTYPSZ) { 3952 /* 3953 * The vfssw table uses the empty string to identify an 3954 * available entry; we cannot add any type which has 3955 * a leading NUL. The string length is limited to 3956 * the size of the st_fstype array in struct stat. 3957 */ 3958 return (NULL); 3959 } 3960 3961 ASSERT(VFSSW_WRITE_LOCKED()); 3962 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) 3963 if (!ALLOCATED_VFSSW(vswp)) { 3964 vswp->vsw_name = kmem_alloc(strlen(type) + 1, KM_SLEEP); 3965 (void) strcpy(vswp->vsw_name, type); 3966 ASSERT(vswp->vsw_count == 0); 3967 vswp->vsw_count = 1; 3968 mutex_init(&vswp->vsw_lock, NULL, MUTEX_DEFAULT, NULL); 3969 return (vswp); 3970 } 3971 return (NULL); 3972 } 3973 3974 /* 3975 * Impose additional layer of translation between vfstype names 3976 * and module names in the filesystem. 3977 */ 3978 static const char * 3979 vfs_to_modname(const char *vfstype) 3980 { 3981 if (strcmp(vfstype, "proc") == 0) { 3982 vfstype = "procfs"; 3983 } else if (strcmp(vfstype, "fd") == 0) { 3984 vfstype = "fdfs"; 3985 } else if (strncmp(vfstype, "nfs", 3) == 0) { 3986 vfstype = "nfs"; 3987 } 3988 3989 return (vfstype); 3990 } 3991 3992 /* 3993 * Find a vfssw entry given a file system type name. 3994 * Try to autoload the filesystem if it's not found. 3995 * If it's installed, return the vfssw locked to prevent unloading. 3996 */ 3997 struct vfssw * 3998 vfs_getvfssw(const char *type) 3999 { 4000 struct vfssw *vswp; 4001 const char *modname; 4002 4003 RLOCK_VFSSW(); 4004 vswp = vfs_getvfsswbyname(type); 4005 modname = vfs_to_modname(type); 4006 4007 if (rootdir == NULL) { 4008 /* 4009 * If we haven't yet loaded the root file system, then our 4010 * _init won't be called until later. Allocate vfssw entry, 4011 * because mod_installfs won't be called. 4012 */ 4013 if (vswp == NULL) { 4014 RUNLOCK_VFSSW(); 4015 WLOCK_VFSSW(); 4016 if ((vswp = vfs_getvfsswbyname(type)) == NULL) { 4017 if ((vswp = allocate_vfssw(type)) == NULL) { 4018 WUNLOCK_VFSSW(); 4019 return (NULL); 4020 } 4021 } 4022 WUNLOCK_VFSSW(); 4023 RLOCK_VFSSW(); 4024 } 4025 if (!VFS_INSTALLED(vswp)) { 4026 RUNLOCK_VFSSW(); 4027 (void) modloadonly("fs", modname); 4028 } else 4029 RUNLOCK_VFSSW(); 4030 return (vswp); 4031 } 4032 4033 /* 4034 * Try to load the filesystem. Before calling modload(), we drop 4035 * our lock on the VFS switch table, and pick it up after the 4036 * module is loaded. However, there is a potential race: the 4037 * module could be unloaded after the call to modload() completes 4038 * but before we pick up the lock and drive on. Therefore, 4039 * we keep reloading the module until we've loaded the module 4040 * _and_ we have the lock on the VFS switch table. 4041 */ 4042 while (vswp == NULL || !VFS_INSTALLED(vswp)) { 4043 RUNLOCK_VFSSW(); 4044 if (modload("fs", modname) == -1) 4045 return (NULL); 4046 RLOCK_VFSSW(); 4047 if (vswp == NULL) 4048 if ((vswp = vfs_getvfsswbyname(type)) == NULL) 4049 break; 4050 } 4051 RUNLOCK_VFSSW(); 4052 4053 return (vswp); 4054 } 4055 4056 /* 4057 * Find a vfssw entry given a file system type name. 4058 */ 4059 struct vfssw * 4060 vfs_getvfsswbyname(const char *type) 4061 { 4062 struct vfssw *vswp; 4063 4064 ASSERT(VFSSW_LOCKED()); 4065 if (type == NULL || *type == '\0') 4066 return (NULL); 4067 4068 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) { 4069 if (strcmp(type, vswp->vsw_name) == 0) { 4070 vfs_refvfssw(vswp); 4071 return (vswp); 4072 } 4073 } 4074 4075 return (NULL); 4076 } 4077 4078 /* 4079 * Find a vfssw entry given a set of vfsops. 4080 */ 4081 struct vfssw * 4082 vfs_getvfsswbyvfsops(vfsops_t *vfsops) 4083 { 4084 struct vfssw *vswp; 4085 4086 RLOCK_VFSSW(); 4087 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) { 4088 if (ALLOCATED_VFSSW(vswp) && &vswp->vsw_vfsops == vfsops) { 4089 vfs_refvfssw(vswp); 4090 RUNLOCK_VFSSW(); 4091 return (vswp); 4092 } 4093 } 4094 RUNLOCK_VFSSW(); 4095 4096 return (NULL); 4097 } 4098 4099 /* 4100 * Reference a vfssw entry. 4101 */ 4102 void 4103 vfs_refvfssw(struct vfssw *vswp) 4104 { 4105 4106 mutex_enter(&vswp->vsw_lock); 4107 vswp->vsw_count++; 4108 mutex_exit(&vswp->vsw_lock); 4109 } 4110 4111 /* 4112 * Unreference a vfssw entry. 4113 */ 4114 void 4115 vfs_unrefvfssw(struct vfssw *vswp) 4116 { 4117 4118 mutex_enter(&vswp->vsw_lock); 4119 vswp->vsw_count--; 4120 mutex_exit(&vswp->vsw_lock); 4121 } 4122 4123 int sync_timeout = 30; /* timeout for syncing a page during panic */ 4124 int sync_timeleft; /* portion of sync_timeout remaining */ 4125 4126 static int sync_retries = 20; /* number of retries when not making progress */ 4127 static int sync_triesleft; /* portion of sync_retries remaining */ 4128 4129 static pgcnt_t old_pgcnt, new_pgcnt; 4130 static int new_bufcnt, old_bufcnt; 4131 4132 /* 4133 * Sync all of the mounted filesystems, and then wait for the actual i/o to 4134 * complete. We wait by counting the number of dirty pages and buffers, 4135 * pushing them out using bio_busy() and page_busy(), and then counting again. 4136 * This routine is used during both the uadmin A_SHUTDOWN code as well as 4137 * the SYNC phase of the panic code (see comments in panic.c). It should only 4138 * be used after some higher-level mechanism has quiesced the system so that 4139 * new writes are not being initiated while we are waiting for completion. 4140 * 4141 * To ensure finite running time, our algorithm uses two timeout mechanisms: 4142 * sync_timeleft (a timer implemented by the omnipresent deadman() cyclic), and 4143 * sync_triesleft (a progress counter used by the vfs_syncall() loop below). 4144 * Together these ensure that syncing completes if our i/o paths are stuck. 4145 * The counters are declared above so they can be found easily in the debugger. 4146 * 4147 * The sync_timeleft counter is reset by bio_busy() and page_busy() using the 4148 * vfs_syncprogress() subroutine whenever we make progress through the lists of 4149 * pages and buffers. It is decremented and expired by the deadman() cyclic. 4150 * When vfs_syncall() decides it is done, we disable the deadman() counter by 4151 * setting sync_timeleft to zero. This timer guards against vfs_syncall() 4152 * deadlocking or hanging inside of a broken filesystem or driver routine. 4153 * 4154 * The sync_triesleft counter is updated by vfs_syncall() itself. If we make 4155 * sync_retries consecutive calls to bio_busy() and page_busy() without 4156 * decreasing either the number of dirty buffers or dirty pages below the 4157 * lowest count we have seen so far, we give up and return from vfs_syncall(). 4158 * 4159 * Each loop iteration ends with a call to delay() one second to allow time for 4160 * i/o completion and to permit the user time to read our progress messages. 4161 */ 4162 void 4163 vfs_syncall(void) 4164 { 4165 if (rootdir == NULL && !modrootloaded) 4166 return; /* panic during boot - no filesystems yet */ 4167 4168 printf("syncing file systems..."); 4169 vfs_syncprogress(); 4170 sync(); 4171 4172 vfs_syncprogress(); 4173 sync_triesleft = sync_retries; 4174 4175 old_bufcnt = new_bufcnt = INT_MAX; 4176 old_pgcnt = new_pgcnt = ULONG_MAX; 4177 4178 while (sync_triesleft > 0) { 4179 old_bufcnt = MIN(old_bufcnt, new_bufcnt); 4180 old_pgcnt = MIN(old_pgcnt, new_pgcnt); 4181 4182 new_bufcnt = bio_busy(B_TRUE); 4183 new_pgcnt = page_busy(B_TRUE); 4184 vfs_syncprogress(); 4185 4186 if (new_bufcnt == 0 && new_pgcnt == 0) 4187 break; 4188 4189 if (new_bufcnt < old_bufcnt || new_pgcnt < old_pgcnt) 4190 sync_triesleft = sync_retries; 4191 else 4192 sync_triesleft--; 4193 4194 if (new_bufcnt) 4195 printf(" [%d]", new_bufcnt); 4196 if (new_pgcnt) 4197 printf(" %lu", new_pgcnt); 4198 4199 delay(hz); 4200 } 4201 4202 if (new_bufcnt != 0 || new_pgcnt != 0) 4203 printf(" done (not all i/o completed)\n"); 4204 else 4205 printf(" done\n"); 4206 4207 sync_timeleft = 0; 4208 delay(hz); 4209 } 4210 4211 /* 4212 * If we are in the middle of the sync phase of panic, reset sync_timeleft to 4213 * sync_timeout to indicate that we are making progress and the deadman() 4214 * omnipresent cyclic should not yet time us out. Note that it is safe to 4215 * store to sync_timeleft here since the deadman() is firing at high-level 4216 * on top of us. If we are racing with the deadman(), either the deadman() 4217 * will decrement the old value and then we will reset it, or we will 4218 * reset it and then the deadman() will immediately decrement it. In either 4219 * case, correct behavior results. 4220 */ 4221 void 4222 vfs_syncprogress(void) 4223 { 4224 if (panicstr) 4225 sync_timeleft = sync_timeout; 4226 } 4227 4228 /* 4229 * Map VFS flags to statvfs flags. These shouldn't really be separate 4230 * flags at all. 4231 */ 4232 uint_t 4233 vf_to_stf(uint_t vf) 4234 { 4235 uint_t stf = 0; 4236 4237 if (vf & VFS_RDONLY) 4238 stf |= ST_RDONLY; 4239 if (vf & VFS_NOSETUID) 4240 stf |= ST_NOSUID; 4241 if (vf & VFS_NOTRUNC) 4242 stf |= ST_NOTRUNC; 4243 4244 return (stf); 4245 } 4246 4247 /* 4248 * Entries for (illegal) fstype 0. 4249 */ 4250 /* ARGSUSED */ 4251 int 4252 vfsstray_sync(struct vfs *vfsp, short arg, struct cred *cr) 4253 { 4254 cmn_err(CE_PANIC, "stray vfs operation"); 4255 return (0); 4256 } 4257 4258 /* 4259 * Entries for (illegal) fstype 0. 4260 */ 4261 int 4262 vfsstray(void) 4263 { 4264 cmn_err(CE_PANIC, "stray vfs operation"); 4265 return (0); 4266 } 4267 4268 /* 4269 * Support for dealing with forced UFS unmount and its interaction with 4270 * LOFS. Could be used by any filesystem. 4271 * See bug 1203132. 4272 */ 4273 int 4274 vfs_EIO(void) 4275 { 4276 return (EIO); 4277 } 4278 4279 /* 4280 * We've gotta define the op for sync separately, since the compiler gets 4281 * confused if we mix and match ANSI and normal style prototypes when 4282 * a "short" argument is present and spits out a warning. 4283 */ 4284 /*ARGSUSED*/ 4285 int 4286 vfs_EIO_sync(struct vfs *vfsp, short arg, struct cred *cr) 4287 { 4288 return (EIO); 4289 } 4290 4291 vfs_t EIO_vfs; 4292 vfsops_t *EIO_vfsops; 4293 4294 /* 4295 * Called from startup() to initialize all loaded vfs's 4296 */ 4297 void 4298 vfsinit(void) 4299 { 4300 struct vfssw *vswp; 4301 int error; 4302 extern int vopstats_enabled; 4303 extern void vopstats_startup(); 4304 4305 static const fs_operation_def_t EIO_vfsops_template[] = { 4306 VFSNAME_MOUNT, { .error = vfs_EIO }, 4307 VFSNAME_UNMOUNT, { .error = vfs_EIO }, 4308 VFSNAME_ROOT, { .error = vfs_EIO }, 4309 VFSNAME_STATVFS, { .error = vfs_EIO }, 4310 VFSNAME_SYNC, { .vfs_sync = vfs_EIO_sync }, 4311 VFSNAME_VGET, { .error = vfs_EIO }, 4312 VFSNAME_MOUNTROOT, { .error = vfs_EIO }, 4313 VFSNAME_FREEVFS, { .error = vfs_EIO }, 4314 VFSNAME_VNSTATE, { .error = vfs_EIO }, 4315 NULL, NULL 4316 }; 4317 4318 static const fs_operation_def_t stray_vfsops_template[] = { 4319 VFSNAME_MOUNT, { .error = vfsstray }, 4320 VFSNAME_UNMOUNT, { .error = vfsstray }, 4321 VFSNAME_ROOT, { .error = vfsstray }, 4322 VFSNAME_STATVFS, { .error = vfsstray }, 4323 VFSNAME_SYNC, { .vfs_sync = vfsstray_sync }, 4324 VFSNAME_VGET, { .error = vfsstray }, 4325 VFSNAME_MOUNTROOT, { .error = vfsstray }, 4326 VFSNAME_FREEVFS, { .error = vfsstray }, 4327 VFSNAME_VNSTATE, { .error = vfsstray }, 4328 NULL, NULL 4329 }; 4330 4331 /* Create vfs cache */ 4332 vfs_cache = kmem_cache_create("vfs_cache", sizeof (struct vfs), 4333 sizeof (uintptr_t), NULL, NULL, NULL, NULL, NULL, 0); 4334 4335 /* Initialize the vnode cache (file systems may use it during init). */ 4336 vn_create_cache(); 4337 4338 /* Setup event monitor framework */ 4339 fem_init(); 4340 4341 /* Initialize the dummy stray file system type. */ 4342 error = vfs_setfsops(0, stray_vfsops_template, NULL); 4343 4344 /* Initialize the dummy EIO file system. */ 4345 error = vfs_makefsops(EIO_vfsops_template, &EIO_vfsops); 4346 if (error != 0) { 4347 cmn_err(CE_WARN, "vfsinit: bad EIO vfs ops template"); 4348 /* Shouldn't happen, but not bad enough to panic */ 4349 } 4350 4351 VFS_INIT(&EIO_vfs, EIO_vfsops, (caddr_t)NULL); 4352 4353 /* 4354 * Default EIO_vfs.vfs_flag to VFS_UNMOUNTED so a lookup 4355 * on this vfs can immediately notice it's invalid. 4356 */ 4357 EIO_vfs.vfs_flag |= VFS_UNMOUNTED; 4358 4359 /* 4360 * Call the init routines of non-loadable filesystems only. 4361 * Filesystems which are loaded as separate modules will be 4362 * initialized by the module loading code instead. 4363 */ 4364 4365 for (vswp = &vfssw[1]; vswp < &vfssw[nfstype]; vswp++) { 4366 RLOCK_VFSSW(); 4367 if (vswp->vsw_init != NULL) 4368 (*vswp->vsw_init)(vswp - vfssw, vswp->vsw_name); 4369 RUNLOCK_VFSSW(); 4370 } 4371 4372 vopstats_startup(); 4373 4374 if (vopstats_enabled) { 4375 /* EIO_vfs can collect stats, but we don't retrieve them */ 4376 initialize_vopstats(&EIO_vfs.vfs_vopstats); 4377 EIO_vfs.vfs_fstypevsp = NULL; 4378 EIO_vfs.vfs_vskap = NULL; 4379 EIO_vfs.vfs_flag |= VFS_STATS; 4380 } 4381 4382 xattr_init(); 4383 4384 reparse_point_init(); 4385 } 4386 4387 vfs_t * 4388 vfs_alloc(int kmflag) 4389 { 4390 vfs_t *vfsp; 4391 4392 vfsp = kmem_cache_alloc(vfs_cache, kmflag); 4393 4394 /* 4395 * Do the simplest initialization here. 4396 * Everything else gets done in vfs_init() 4397 */ 4398 bzero(vfsp, sizeof (vfs_t)); 4399 return (vfsp); 4400 } 4401 4402 void 4403 vfs_free(vfs_t *vfsp) 4404 { 4405 /* 4406 * One would be tempted to assert that "vfsp->vfs_count == 0". 4407 * The problem is that this gets called out of domount() with 4408 * a partially initialized vfs and a vfs_count of 1. This is 4409 * also called from vfs_rele() with a vfs_count of 0. We can't 4410 * call VFS_RELE() from domount() if VFS_MOUNT() hasn't successfully 4411 * returned. This is because VFS_MOUNT() fully initializes the 4412 * vfs structure and its associated data. VFS_RELE() will call 4413 * VFS_FREEVFS() which may panic the system if the data structures 4414 * aren't fully initialized from a successful VFS_MOUNT()). 4415 */ 4416 4417 /* If FEM was in use, make sure everything gets cleaned up */ 4418 if (vfsp->vfs_femhead) { 4419 ASSERT(vfsp->vfs_femhead->femh_list == NULL); 4420 mutex_destroy(&vfsp->vfs_femhead->femh_lock); 4421 kmem_free(vfsp->vfs_femhead, sizeof (*(vfsp->vfs_femhead))); 4422 vfsp->vfs_femhead = NULL; 4423 } 4424 4425 if (vfsp->vfs_implp) 4426 vfsimpl_teardown(vfsp); 4427 sema_destroy(&vfsp->vfs_reflock); 4428 kmem_cache_free(vfs_cache, vfsp); 4429 } 4430 4431 /* 4432 * Increments the vfs reference count by one atomically. 4433 */ 4434 void 4435 vfs_hold(vfs_t *vfsp) 4436 { 4437 atomic_inc_32(&vfsp->vfs_count); 4438 ASSERT(vfsp->vfs_count != 0); 4439 } 4440 4441 /* 4442 * Decrements the vfs reference count by one atomically. When 4443 * vfs reference count becomes zero, it calls the file system 4444 * specific vfs_freevfs() to free up the resources. 4445 */ 4446 void 4447 vfs_rele(vfs_t *vfsp) 4448 { 4449 ASSERT(vfsp->vfs_count != 0); 4450 if (atomic_dec_32_nv(&vfsp->vfs_count) == 0) { 4451 VFS_FREEVFS(vfsp); 4452 lofi_remove(vfsp); 4453 if (vfsp->vfs_zone) 4454 zone_rele_ref(&vfsp->vfs_implp->vi_zone_ref, 4455 ZONE_REF_VFS); 4456 vfs_freemnttab(vfsp); 4457 vfs_free(vfsp); 4458 } 4459 } 4460 4461 /* 4462 * Generic operations vector support. 4463 * 4464 * This is used to build operations vectors for both the vfs and vnode. 4465 * It's normally called only when a file system is loaded. 4466 * 4467 * There are many possible algorithms for this, including the following: 4468 * 4469 * (1) scan the list of known operations; for each, see if the file system 4470 * includes an entry for it, and fill it in as appropriate. 4471 * 4472 * (2) set up defaults for all known operations. scan the list of ops 4473 * supplied by the file system; for each which is both supplied and 4474 * known, fill it in. 4475 * 4476 * (3) sort the lists of known ops & supplied ops; scan the list, filling 4477 * in entries as we go. 4478 * 4479 * we choose (1) for simplicity, and because performance isn't critical here. 4480 * note that (2) could be sped up using a precomputed hash table on known ops. 4481 * (3) could be faster than either, but only if the lists were very large or 4482 * supplied in sorted order. 4483 * 4484 */ 4485 4486 int 4487 fs_build_vector(void *vector, int *unused_ops, 4488 const fs_operation_trans_def_t *translation, 4489 const fs_operation_def_t *operations) 4490 { 4491 int i, num_trans, num_ops, used; 4492 4493 /* 4494 * Count the number of translations and the number of supplied 4495 * operations. 4496 */ 4497 4498 { 4499 const fs_operation_trans_def_t *p; 4500 4501 for (num_trans = 0, p = translation; 4502 p->name != NULL; 4503 num_trans++, p++) 4504 ; 4505 } 4506 4507 { 4508 const fs_operation_def_t *p; 4509 4510 for (num_ops = 0, p = operations; 4511 p->name != NULL; 4512 num_ops++, p++) 4513 ; 4514 } 4515 4516 /* Walk through each operation known to our caller. There will be */ 4517 /* one entry in the supplied "translation table" for each. */ 4518 4519 used = 0; 4520 4521 for (i = 0; i < num_trans; i++) { 4522 int j, found; 4523 char *curname; 4524 fs_generic_func_p result; 4525 fs_generic_func_p *location; 4526 4527 curname = translation[i].name; 4528 4529 /* Look for a matching operation in the list supplied by the */ 4530 /* file system. */ 4531 4532 found = 0; 4533 4534 for (j = 0; j < num_ops; j++) { 4535 if (strcmp(operations[j].name, curname) == 0) { 4536 used++; 4537 found = 1; 4538 break; 4539 } 4540 } 4541 4542 /* 4543 * If the file system is using a "placeholder" for default 4544 * or error functions, grab the appropriate function out of 4545 * the translation table. If the file system didn't supply 4546 * this operation at all, use the default function. 4547 */ 4548 4549 if (found) { 4550 result = operations[j].func.fs_generic; 4551 if (result == fs_default) { 4552 result = translation[i].defaultFunc; 4553 } else if (result == fs_error) { 4554 result = translation[i].errorFunc; 4555 } else if (result == NULL) { 4556 /* Null values are PROHIBITED */ 4557 return (EINVAL); 4558 } 4559 } else { 4560 result = translation[i].defaultFunc; 4561 } 4562 4563 /* Now store the function into the operations vector. */ 4564 4565 location = (fs_generic_func_p *) 4566 (((char *)vector) + translation[i].offset); 4567 4568 *location = result; 4569 } 4570 4571 *unused_ops = num_ops - used; 4572 4573 return (0); 4574 } 4575 4576 /* Placeholder functions, should never be called. */ 4577 4578 int 4579 fs_error(void) 4580 { 4581 cmn_err(CE_PANIC, "fs_error called"); 4582 return (0); 4583 } 4584 4585 int 4586 fs_default(void) 4587 { 4588 cmn_err(CE_PANIC, "fs_default called"); 4589 return (0); 4590 } 4591 4592 #ifdef __sparc 4593 4594 /* 4595 * Part of the implementation of booting off a mirrored root 4596 * involves a change of dev_t for the root device. To 4597 * accomplish this, first remove the existing hash table 4598 * entry for the root device, convert to the new dev_t, 4599 * then re-insert in the hash table at the head of the list. 4600 */ 4601 void 4602 vfs_root_redev(vfs_t *vfsp, dev_t ndev, int fstype) 4603 { 4604 vfs_list_lock(); 4605 4606 vfs_hash_remove(vfsp); 4607 4608 vfsp->vfs_dev = ndev; 4609 vfs_make_fsid(&vfsp->vfs_fsid, ndev, fstype); 4610 4611 vfs_hash_add(vfsp, 1); 4612 4613 vfs_list_unlock(); 4614 } 4615 4616 #else /* x86 NEWBOOT */ 4617 4618 #if defined(__x86) 4619 extern int hvmboot_rootconf(); 4620 #endif /* __x86 */ 4621 4622 extern ib_boot_prop_t *iscsiboot_prop; 4623 4624 int 4625 rootconf() 4626 { 4627 int error; 4628 struct vfssw *vsw; 4629 extern void pm_init(); 4630 char *fstyp, *fsmod; 4631 int ret = -1; 4632 4633 getrootfs(&fstyp, &fsmod); 4634 4635 #if defined(__x86) 4636 /* 4637 * hvmboot_rootconf() is defined in the hvm_bootstrap misc module, 4638 * which lives in /platform/i86hvm, and hence is only available when 4639 * booted in an x86 hvm environment. If the hvm_bootstrap misc module 4640 * is not available then the modstub for this function will return 0. 4641 * If the hvm_bootstrap misc module is available it will be loaded 4642 * and hvmboot_rootconf() will be invoked. 4643 */ 4644 if (error = hvmboot_rootconf()) 4645 return (error); 4646 #endif /* __x86 */ 4647 4648 if (error = clboot_rootconf()) 4649 return (error); 4650 4651 if (modload("fs", fsmod) == -1) 4652 panic("Cannot _init %s module", fsmod); 4653 4654 RLOCK_VFSSW(); 4655 vsw = vfs_getvfsswbyname(fstyp); 4656 RUNLOCK_VFSSW(); 4657 if (vsw == NULL) { 4658 cmn_err(CE_CONT, "Cannot find %s filesystem\n", fstyp); 4659 return (ENXIO); 4660 } 4661 VFS_INIT(rootvfs, &vsw->vsw_vfsops, 0); 4662 VFS_HOLD(rootvfs); 4663 4664 /* always mount readonly first */ 4665 rootvfs->vfs_flag |= VFS_RDONLY; 4666 4667 pm_init(); 4668 4669 if (netboot && iscsiboot_prop) { 4670 cmn_err(CE_WARN, "NFS boot and iSCSI boot" 4671 " shouldn't happen in the same time"); 4672 return (EINVAL); 4673 } 4674 4675 if (netboot || iscsiboot_prop) { 4676 ret = strplumb(); 4677 if (ret != 0) { 4678 cmn_err(CE_WARN, "Cannot plumb network device %d", ret); 4679 return (EFAULT); 4680 } 4681 } 4682 4683 if ((ret == 0) && iscsiboot_prop) { 4684 ret = modload("drv", "iscsi"); 4685 /* -1 indicates fail */ 4686 if (ret == -1) { 4687 cmn_err(CE_WARN, "Failed to load iscsi module"); 4688 iscsi_boot_prop_free(); 4689 return (EINVAL); 4690 } else { 4691 if (!i_ddi_attach_pseudo_node("iscsi")) { 4692 cmn_err(CE_WARN, 4693 "Failed to attach iscsi driver"); 4694 iscsi_boot_prop_free(); 4695 return (ENODEV); 4696 } 4697 } 4698 } 4699 4700 error = VFS_MOUNTROOT(rootvfs, ROOT_INIT); 4701 vfs_unrefvfssw(vsw); 4702 rootdev = rootvfs->vfs_dev; 4703 4704 if (error) 4705 cmn_err(CE_CONT, "Cannot mount root on %s fstype %s\n", 4706 rootfs.bo_name, fstyp); 4707 else 4708 cmn_err(CE_CONT, "?root on %s fstype %s\n", 4709 rootfs.bo_name, fstyp); 4710 return (error); 4711 } 4712 4713 /* 4714 * XXX this is called by nfs only and should probably be removed 4715 * If booted with ASKNAME, prompt on the console for a filesystem 4716 * name and return it. 4717 */ 4718 void 4719 getfsname(char *askfor, char *name, size_t namelen) 4720 { 4721 if (boothowto & RB_ASKNAME) { 4722 printf("%s name: ", askfor); 4723 console_gets(name, namelen); 4724 } 4725 } 4726 4727 /* 4728 * Init the root filesystem type (rootfs.bo_fstype) from the "fstype" 4729 * property. 4730 * 4731 * Filesystem types starting with the prefix "nfs" are diskless clients; 4732 * init the root filename name (rootfs.bo_name), too. 4733 * 4734 * If we are booting via NFS we currently have these options: 4735 * nfs - dynamically choose NFS V2, V3, or V4 (default) 4736 * nfs2 - force NFS V2 4737 * nfs3 - force NFS V3 4738 * nfs4 - force NFS V4 4739 * Because we need to maintain backward compatibility with the naming 4740 * convention that the NFS V2 filesystem name is "nfs" (see vfs_conf.c) 4741 * we need to map "nfs" => "nfsdyn" and "nfs2" => "nfs". The dynamic 4742 * nfs module will map the type back to either "nfs", "nfs3", or "nfs4". 4743 * This is only for root filesystems, all other uses such as cachefs 4744 * will expect that "nfs" == NFS V2. 4745 */ 4746 static void 4747 getrootfs(char **fstypp, char **fsmodp) 4748 { 4749 extern char *strplumb_get_netdev_path(void); 4750 char *propstr = NULL; 4751 4752 /* 4753 * Check fstype property; for diskless it should be one of "nfs", 4754 * "nfs2", "nfs3" or "nfs4". 4755 */ 4756 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(), 4757 DDI_PROP_DONTPASS, "fstype", &propstr) 4758 == DDI_SUCCESS) { 4759 (void) strncpy(rootfs.bo_fstype, propstr, BO_MAXFSNAME); 4760 ddi_prop_free(propstr); 4761 4762 /* 4763 * if the boot property 'fstype' is not set, but 'zfs-bootfs' is set, 4764 * assume the type of this root filesystem is 'zfs'. 4765 */ 4766 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(), 4767 DDI_PROP_DONTPASS, "zfs-bootfs", &propstr) 4768 == DDI_SUCCESS) { 4769 (void) strncpy(rootfs.bo_fstype, "zfs", BO_MAXFSNAME); 4770 ddi_prop_free(propstr); 4771 } 4772 4773 if (strncmp(rootfs.bo_fstype, "nfs", 3) != 0) { 4774 *fstypp = *fsmodp = rootfs.bo_fstype; 4775 return; 4776 } 4777 4778 ++netboot; 4779 4780 if (strcmp(rootfs.bo_fstype, "nfs2") == 0) 4781 (void) strcpy(rootfs.bo_fstype, "nfs"); 4782 else if (strcmp(rootfs.bo_fstype, "nfs") == 0) 4783 (void) strcpy(rootfs.bo_fstype, "nfsdyn"); 4784 4785 /* 4786 * check if path to network interface is specified in bootpath 4787 * or by a hypervisor domain configuration file. 4788 * XXPV - enable strlumb_get_netdev_path() 4789 */ 4790 if (ddi_prop_exists(DDI_DEV_T_ANY, ddi_root_node(), DDI_PROP_DONTPASS, 4791 "xpv-nfsroot")) { 4792 (void) strcpy(rootfs.bo_name, "/xpvd/xnf@0"); 4793 } else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(), 4794 DDI_PROP_DONTPASS, "bootpath", &propstr) 4795 == DDI_SUCCESS) { 4796 (void) strncpy(rootfs.bo_name, propstr, BO_MAXOBJNAME); 4797 ddi_prop_free(propstr); 4798 } else { 4799 /* attempt to determine netdev_path via boot_mac address */ 4800 netdev_path = strplumb_get_netdev_path(); 4801 if (netdev_path == NULL) 4802 panic("cannot find boot network interface"); 4803 (void) strncpy(rootfs.bo_name, netdev_path, BO_MAXOBJNAME); 4804 } 4805 *fstypp = rootfs.bo_fstype; 4806 *fsmodp = "nfs"; 4807 } 4808 #endif 4809 4810 /* 4811 * VFS feature routines 4812 */ 4813 4814 #define VFTINDEX(feature) (((feature) >> 32) & 0xFFFFFFFF) 4815 #define VFTBITS(feature) ((feature) & 0xFFFFFFFFLL) 4816 4817 /* Register a feature in the vfs */ 4818 void 4819 vfs_set_feature(vfs_t *vfsp, vfs_feature_t feature) 4820 { 4821 /* Note that vfs_featureset[] is found in *vfsp->vfs_implp */ 4822 if (vfsp->vfs_implp == NULL) 4823 return; 4824 4825 vfsp->vfs_featureset[VFTINDEX(feature)] |= VFTBITS(feature); 4826 } 4827 4828 void 4829 vfs_clear_feature(vfs_t *vfsp, vfs_feature_t feature) 4830 { 4831 /* Note that vfs_featureset[] is found in *vfsp->vfs_implp */ 4832 if (vfsp->vfs_implp == NULL) 4833 return; 4834 vfsp->vfs_featureset[VFTINDEX(feature)] &= VFTBITS(~feature); 4835 } 4836 4837 /* 4838 * Query a vfs for a feature. 4839 * Returns 1 if feature is present, 0 if not 4840 */ 4841 int 4842 vfs_has_feature(vfs_t *vfsp, vfs_feature_t feature) 4843 { 4844 int ret = 0; 4845 4846 /* Note that vfs_featureset[] is found in *vfsp->vfs_implp */ 4847 if (vfsp->vfs_implp == NULL) 4848 return (ret); 4849 4850 if (vfsp->vfs_featureset[VFTINDEX(feature)] & VFTBITS(feature)) 4851 ret = 1; 4852 4853 return (ret); 4854 } 4855 4856 /* 4857 * Propagate feature set from one vfs to another 4858 */ 4859 void 4860 vfs_propagate_features(vfs_t *from, vfs_t *to) 4861 { 4862 int i; 4863 4864 if (to->vfs_implp == NULL || from->vfs_implp == NULL) 4865 return; 4866 4867 for (i = 1; i <= to->vfs_featureset[0]; i++) { 4868 to->vfs_featureset[i] = from->vfs_featureset[i]; 4869 } 4870 } 4871 4872 #define LOFINODE_PATH "/dev/lofi/%d" 4873 4874 /* 4875 * Return the vnode for the lofi node if there's a lofi mount in place. 4876 * Returns -1 when there's no lofi node, 0 on success, and > 0 on 4877 * failure. 4878 */ 4879 int 4880 vfs_get_lofi(vfs_t *vfsp, vnode_t **vpp) 4881 { 4882 char *path = NULL; 4883 int strsize; 4884 int err; 4885 4886 if (vfsp->vfs_lofi_minor == 0) { 4887 *vpp = NULL; 4888 return (-1); 4889 } 4890 4891 strsize = snprintf(NULL, 0, LOFINODE_PATH, vfsp->vfs_lofi_minor); 4892 path = kmem_alloc(strsize + 1, KM_SLEEP); 4893 (void) snprintf(path, strsize + 1, LOFINODE_PATH, vfsp->vfs_lofi_minor); 4894 4895 /* 4896 * We may be inside a zone, so we need to use the /dev path, but 4897 * it's created asynchronously, so we wait here. 4898 */ 4899 for (;;) { 4900 err = lookupname(path, UIO_SYSSPACE, FOLLOW, NULLVPP, vpp); 4901 4902 if (err != ENOENT) 4903 break; 4904 4905 if ((err = delay_sig(hz / 8)) == EINTR) 4906 break; 4907 } 4908 4909 if (err) 4910 *vpp = NULL; 4911 4912 kmem_free(path, strsize + 1); 4913 return (err); 4914 } 4915