1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * Support for ephemeral mounts, e.g. mirror-mounts. These mounts are 31 * triggered from a "stub" rnode via a special set of vnodeops. 32 */ 33 34 #include <sys/param.h> 35 #include <sys/types.h> 36 #include <sys/systm.h> 37 #include <sys/cred.h> 38 #include <sys/time.h> 39 #include <sys/vnode.h> 40 #include <sys/vfs.h> 41 #include <sys/vfs_opreg.h> 42 #include <sys/file.h> 43 #include <sys/filio.h> 44 #include <sys/uio.h> 45 #include <sys/buf.h> 46 #include <sys/mman.h> 47 #include <sys/pathname.h> 48 #include <sys/dirent.h> 49 #include <sys/debug.h> 50 #include <sys/vmsystm.h> 51 #include <sys/fcntl.h> 52 #include <sys/flock.h> 53 #include <sys/swap.h> 54 #include <sys/errno.h> 55 #include <sys/strsubr.h> 56 #include <sys/sysmacros.h> 57 #include <sys/kmem.h> 58 #include <sys/mount.h> 59 #include <sys/cmn_err.h> 60 #include <sys/pathconf.h> 61 #include <sys/utsname.h> 62 #include <sys/dnlc.h> 63 #include <sys/acl.h> 64 #include <sys/systeminfo.h> 65 #include <sys/policy.h> 66 #include <sys/sdt.h> 67 #include <sys/list.h> 68 #include <sys/stat.h> 69 #include <sys/mntent.h> 70 71 #include <rpc/types.h> 72 #include <rpc/auth.h> 73 #include <rpc/clnt.h> 74 75 #include <nfs/nfs.h> 76 #include <nfs/nfs_clnt.h> 77 #include <nfs/nfs_acl.h> 78 #include <nfs/lm.h> 79 #include <nfs/nfs4.h> 80 #include <nfs/nfs4_kprot.h> 81 #include <nfs/rnode4.h> 82 #include <nfs/nfs4_clnt.h> 83 84 #include <vm/hat.h> 85 #include <vm/as.h> 86 #include <vm/page.h> 87 #include <vm/pvn.h> 88 #include <vm/seg.h> 89 #include <vm/seg_map.h> 90 #include <vm/seg_kpm.h> 91 #include <vm/seg_vn.h> 92 93 #include <fs/fs_subr.h> 94 95 #include <sys/ddi.h> 96 #include <sys/int_fmtio.h> 97 98 #include <sys/sunddi.h> 99 100 /* 101 * The automatic unmounter thread stuff! 102 */ 103 static int nfs4_trigger_thread_timer = 20; /* in seconds */ 104 105 /* 106 * Just a default.... 107 */ 108 static uint_t nfs4_trigger_mount_to = 240; 109 110 typedef struct nfs4_trigger_globals { 111 kmutex_t ntg_forest_lock; 112 uint_t ntg_mount_to; 113 int ntg_thread_started; 114 nfs4_ephemeral_tree_t *ntg_forest; 115 } nfs4_trigger_globals_t; 116 117 kmutex_t nfs4_ephemeral_thread_lock; 118 119 zone_key_t nfs4_ephemeral_key = ZONE_KEY_UNINITIALIZED; 120 121 static void nfs4_ephemeral_start_harvester(nfs4_trigger_globals_t *); 122 123 /* 124 * Used for ephemeral mounts; contains data either duplicated from 125 * servinfo4_t, or hand-crafted, depending on type of ephemeral mount. 126 * 127 * It's intended that this structure is used solely for ephemeral 128 * mount-type specific data, for passing this data to 129 * nfs4_trigger_nargs_create(). 130 */ 131 typedef struct ephemeral_servinfo { 132 char *esi_hostname; 133 char *esi_netname; 134 char *esi_path; 135 int esi_path_len; 136 int esi_mount_flags; 137 struct netbuf *esi_addr; 138 struct netbuf *esi_syncaddr; 139 struct knetconfig *esi_knconf; 140 } ephemeral_servinfo_t; 141 142 /* 143 * Collect together the mount-type specific and generic data args. 144 */ 145 typedef struct domount_args { 146 ephemeral_servinfo_t *dma_esi; 147 char *dma_hostlist; /* comma-sep. for RO failover */ 148 struct nfs_args *dma_nargs; 149 } domount_args_t; 150 151 152 /* 153 * The vnode ops functions for a trigger stub vnode 154 */ 155 static int nfs4_trigger_open(vnode_t **, int, cred_t *, caller_context_t *); 156 static int nfs4_trigger_getattr(vnode_t *, struct vattr *, int, cred_t *, 157 caller_context_t *); 158 static int nfs4_trigger_setattr(vnode_t *, struct vattr *, int, cred_t *, 159 caller_context_t *); 160 static int nfs4_trigger_access(vnode_t *, int, int, cred_t *, 161 caller_context_t *); 162 static int nfs4_trigger_readlink(vnode_t *, struct uio *, cred_t *, 163 caller_context_t *); 164 static int nfs4_trigger_lookup(vnode_t *, char *, vnode_t **, 165 struct pathname *, int, vnode_t *, cred_t *, caller_context_t *, 166 int *, pathname_t *); 167 static int nfs4_trigger_create(vnode_t *, char *, struct vattr *, 168 enum vcexcl, int, vnode_t **, cred_t *, int, caller_context_t *, 169 vsecattr_t *); 170 static int nfs4_trigger_remove(vnode_t *, char *, cred_t *, caller_context_t *, 171 int); 172 static int nfs4_trigger_link(vnode_t *, vnode_t *, char *, cred_t *, 173 caller_context_t *, int); 174 static int nfs4_trigger_rename(vnode_t *, char *, vnode_t *, char *, 175 cred_t *, caller_context_t *, int); 176 static int nfs4_trigger_mkdir(vnode_t *, char *, struct vattr *, 177 vnode_t **, cred_t *, caller_context_t *, int, vsecattr_t *vsecp); 178 static int nfs4_trigger_rmdir(vnode_t *, char *, vnode_t *, cred_t *, 179 caller_context_t *, int); 180 static int nfs4_trigger_symlink(vnode_t *, char *, struct vattr *, char *, 181 cred_t *, caller_context_t *, int); 182 static int nfs4_trigger_cmp(vnode_t *, vnode_t *, caller_context_t *); 183 184 /* 185 * Regular NFSv4 vnodeops that we need to reference directly 186 */ 187 extern int nfs4_getattr(vnode_t *, struct vattr *, int, cred_t *, 188 caller_context_t *); 189 extern void nfs4_inactive(vnode_t *, cred_t *, caller_context_t *); 190 extern int nfs4_rwlock(vnode_t *, int, caller_context_t *); 191 extern void nfs4_rwunlock(vnode_t *, int, caller_context_t *); 192 extern int nfs4_lookup(vnode_t *, char *, vnode_t **, 193 struct pathname *, int, vnode_t *, cred_t *, 194 caller_context_t *, int *, pathname_t *); 195 extern int nfs4_pathconf(vnode_t *, int, ulong_t *, cred_t *, 196 caller_context_t *); 197 extern int nfs4_getsecattr(vnode_t *, vsecattr_t *, int, cred_t *, 198 caller_context_t *); 199 extern int nfs4_fid(vnode_t *, fid_t *, caller_context_t *); 200 extern int nfs4_realvp(vnode_t *, vnode_t **, caller_context_t *); 201 202 static int nfs4_trigger_mount(vnode_t *, vnode_t **); 203 static int nfs4_trigger_domount(vnode_t *, domount_args_t *, vfs_t **, 204 cred_t *); 205 static domount_args_t *nfs4_trigger_domount_args_create(vnode_t *); 206 static void nfs4_trigger_domount_args_destroy(domount_args_t *dma, 207 vnode_t *vp); 208 static ephemeral_servinfo_t *nfs4_trigger_esi_create(vnode_t *, servinfo4_t *); 209 static void nfs4_trigger_esi_destroy(ephemeral_servinfo_t *, vnode_t *); 210 static ephemeral_servinfo_t *nfs4_trigger_esi_create_mirrormount(vnode_t *, 211 servinfo4_t *); 212 static struct nfs_args *nfs4_trigger_nargs_create(mntinfo4_t *, servinfo4_t *, 213 ephemeral_servinfo_t *); 214 static void nfs4_trigger_nargs_destroy(struct nfs_args *); 215 static char *nfs4_trigger_create_mntopts(vfs_t *); 216 static void nfs4_trigger_destroy_mntopts(char *); 217 static int nfs4_trigger_add_mntopt(char *, char *, vfs_t *); 218 static enum clnt_stat nfs4_trigger_ping_server(servinfo4_t *, int); 219 220 extern int umount2_engine(vfs_t *, int, cred_t *, int); 221 222 223 vnodeops_t *nfs4_trigger_vnodeops; 224 225 /* 226 * These are the vnodeops that we must define for stub vnodes. 227 * 228 * 229 * Many of the VOPs defined for NFSv4 do not need to be defined here, 230 * for various reasons. This will result in the VFS default function being 231 * used: 232 * 233 * - These VOPs require a previous VOP_OPEN to have occurred. That will have 234 * lost the reference to the stub vnode, meaning these should not be called: 235 * close, read, write, ioctl, readdir, seek. 236 * 237 * - These VOPs are meaningless for vnodes without data pages. Since the 238 * stub vnode is of type VDIR, these should not be called: 239 * space, getpage, putpage, map, addmap, delmap, pageio, fsync. 240 * 241 * - These VOPs are otherwise not applicable, and should not be called: 242 * dump, setsecattr. 243 * 244 * 245 * These VOPs we do not want to define, but nor do we want the VFS default 246 * action. Instead, we specify the VFS error function, with fs_error(), but 247 * note that fs_error() is not actually called. Instead it results in the 248 * use of the error function defined for the particular VOP, in vn_ops_table[]: 249 * 250 * - frlock, dispose, shrlock. 251 * 252 * 253 * These VOPs we define to use the corresponding regular NFSv4 vnodeop. 254 * NOTE: if any of these ops involve an OTW call with the stub FH, then 255 * that call must be wrapped with save_mnt_secinfo()/check_mnt_secinfo() 256 * to protect the security data in the servinfo4_t for the "parent" 257 * filesystem that contains the stub. 258 * 259 * - These VOPs should not trigger a mount, so that "ls -l" does not: 260 * pathconf, getsecattr. 261 * 262 * - These VOPs would not make sense to trigger: 263 * inactive, rwlock, rwunlock, fid, realvp. 264 */ 265 const fs_operation_def_t nfs4_trigger_vnodeops_template[] = { 266 VOPNAME_OPEN, { .vop_open = nfs4_trigger_open }, 267 VOPNAME_GETATTR, { .vop_getattr = nfs4_trigger_getattr }, 268 VOPNAME_SETATTR, { .vop_setattr = nfs4_trigger_setattr }, 269 VOPNAME_ACCESS, { .vop_access = nfs4_trigger_access }, 270 VOPNAME_LOOKUP, { .vop_lookup = nfs4_trigger_lookup }, 271 VOPNAME_CREATE, { .vop_create = nfs4_trigger_create }, 272 VOPNAME_REMOVE, { .vop_remove = nfs4_trigger_remove }, 273 VOPNAME_LINK, { .vop_link = nfs4_trigger_link }, 274 VOPNAME_RENAME, { .vop_rename = nfs4_trigger_rename }, 275 VOPNAME_MKDIR, { .vop_mkdir = nfs4_trigger_mkdir }, 276 VOPNAME_RMDIR, { .vop_rmdir = nfs4_trigger_rmdir }, 277 VOPNAME_SYMLINK, { .vop_symlink = nfs4_trigger_symlink }, 278 VOPNAME_READLINK, { .vop_readlink = nfs4_trigger_readlink }, 279 VOPNAME_INACTIVE, { .vop_inactive = nfs4_inactive }, 280 VOPNAME_FID, { .vop_fid = nfs4_fid }, 281 VOPNAME_RWLOCK, { .vop_rwlock = nfs4_rwlock }, 282 VOPNAME_RWUNLOCK, { .vop_rwunlock = nfs4_rwunlock }, 283 VOPNAME_REALVP, { .vop_realvp = nfs4_realvp }, 284 VOPNAME_GETSECATTR, { .vop_getsecattr = nfs4_getsecattr }, 285 VOPNAME_PATHCONF, { .vop_pathconf = nfs4_pathconf }, 286 VOPNAME_FRLOCK, { .error = fs_error }, 287 VOPNAME_DISPOSE, { .error = fs_error }, 288 VOPNAME_SHRLOCK, { .error = fs_error }, 289 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 290 NULL, NULL 291 }; 292 293 /* 294 * Trigger ops for stub vnodes; for mirror mounts, etc. 295 * 296 * The general idea is that a "triggering" op will first call 297 * nfs4_trigger_mount(), which will find out whether a mount has already 298 * been triggered. 299 * 300 * If it has, then nfs4_trigger_mount() sets newvp to the root vnode 301 * of the covering vfs. 302 * 303 * If a mount has not yet been triggered, nfs4_trigger_mount() will do so, 304 * and again set newvp, as above. 305 * 306 * The triggering op may then re-issue the VOP by calling it on newvp. 307 * 308 * Note that some ops may perform custom action, and may or may not need 309 * to trigger a mount. 310 * 311 * Some ops need to call the regular NFSv4 vnodeop for a stub vnode. We 312 * obviously can't do this with VOP_<whatever>, since it's a stub vnode 313 * and that would just recurse. Instead, we call the v4 op directly, 314 * by name. This is OK, since we know that the vnode is for NFSv4, 315 * otherwise it couldn't be a stub. 316 * 317 */ 318 319 static int 320 nfs4_trigger_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct) 321 { 322 int error; 323 vnode_t *newvp; 324 325 error = nfs4_trigger_mount(*vpp, &newvp); 326 if (error) 327 return (error); 328 329 /* Release the stub vnode, as we're losing the reference to it */ 330 VN_RELE(*vpp); 331 332 /* Give the caller the root vnode of the newly-mounted fs */ 333 *vpp = newvp; 334 335 /* return with VN_HELD(newvp) */ 336 return (VOP_OPEN(vpp, flag, cr, ct)); 337 } 338 339 /* 340 * For the majority of cases, nfs4_trigger_getattr() will not trigger 341 * a mount. However, if ATTR_TRIGGER is set, we are being informed 342 * that we need to force the mount before we attempt to determine 343 * the attributes. The intent is an atomic operation for security 344 * testing. 345 */ 346 static int 347 nfs4_trigger_getattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr, 348 caller_context_t *ct) 349 { 350 int error; 351 352 if (flags & ATTR_TRIGGER) { 353 vnode_t *newvp; 354 355 error = nfs4_trigger_mount(vp, &newvp); 356 if (error) 357 return (error); 358 359 error = VOP_GETATTR(newvp, vap, flags, cr, ct); 360 VN_RELE(newvp); 361 } else { 362 error = nfs4_getattr(vp, vap, flags, cr, ct); 363 } 364 365 return (error); 366 } 367 368 static int 369 nfs4_trigger_setattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr, 370 caller_context_t *ct) 371 { 372 int error; 373 vnode_t *newvp; 374 375 error = nfs4_trigger_mount(vp, &newvp); 376 if (error) 377 return (error); 378 379 error = VOP_SETATTR(newvp, vap, flags, cr, ct); 380 VN_RELE(newvp); 381 382 return (error); 383 } 384 385 static int 386 nfs4_trigger_access(vnode_t *vp, int mode, int flags, cred_t *cr, 387 caller_context_t *ct) 388 { 389 int error; 390 vnode_t *newvp; 391 392 error = nfs4_trigger_mount(vp, &newvp); 393 if (error) 394 return (error); 395 396 error = VOP_ACCESS(newvp, mode, flags, cr, ct); 397 VN_RELE(newvp); 398 399 return (error); 400 } 401 402 static int 403 nfs4_trigger_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, 404 struct pathname *pnp, int flags, vnode_t *rdir, cred_t *cr, 405 caller_context_t *ct, int *deflags, pathname_t *rpnp) 406 { 407 int error; 408 vnode_t *newdvp; 409 rnode4_t *drp = VTOR4(dvp); 410 411 ASSERT(RP_ISSTUB(drp)); 412 413 /* for now, we only support mirror-mounts */ 414 ASSERT(RP_ISSTUB_MIRRORMOUNT(drp)); 415 416 /* 417 * It's not legal to lookup ".." for an fs root, so we mustn't pass 418 * that up. Instead, pass onto the regular op, regardless of whether 419 * we've triggered a mount. 420 */ 421 if (strcmp(nm, "..") == 0) 422 return (nfs4_lookup(dvp, nm, vpp, pnp, flags, rdir, cr, 423 ct, deflags, rpnp)); 424 425 error = nfs4_trigger_mount(dvp, &newdvp); 426 if (error) 427 return (error); 428 429 error = VOP_LOOKUP(newdvp, nm, vpp, pnp, flags, rdir, cr, ct, 430 deflags, rpnp); 431 VN_RELE(newdvp); 432 433 return (error); 434 } 435 436 static int 437 nfs4_trigger_create(vnode_t *dvp, char *nm, struct vattr *va, 438 enum vcexcl exclusive, int mode, vnode_t **vpp, cred_t *cr, 439 int flags, caller_context_t *ct, vsecattr_t *vsecp) 440 { 441 int error; 442 vnode_t *newdvp; 443 444 error = nfs4_trigger_mount(dvp, &newdvp); 445 if (error) 446 return (error); 447 448 error = VOP_CREATE(newdvp, nm, va, exclusive, mode, vpp, cr, 449 flags, ct, vsecp); 450 VN_RELE(newdvp); 451 452 return (error); 453 } 454 455 static int 456 nfs4_trigger_remove(vnode_t *dvp, char *nm, cred_t *cr, caller_context_t *ct, 457 int flags) 458 { 459 int error; 460 vnode_t *newdvp; 461 462 error = nfs4_trigger_mount(dvp, &newdvp); 463 if (error) 464 return (error); 465 466 error = VOP_REMOVE(newdvp, nm, cr, ct, flags); 467 VN_RELE(newdvp); 468 469 return (error); 470 } 471 472 static int 473 nfs4_trigger_link(vnode_t *tdvp, vnode_t *svp, char *tnm, cred_t *cr, 474 caller_context_t *ct, int flags) 475 { 476 int error; 477 vnode_t *newtdvp; 478 479 error = nfs4_trigger_mount(tdvp, &newtdvp); 480 if (error) 481 return (error); 482 483 /* 484 * We don't check whether svp is a stub. Let the NFSv4 code 485 * detect that error, and return accordingly. 486 */ 487 error = VOP_LINK(newtdvp, svp, tnm, cr, ct, flags); 488 VN_RELE(newtdvp); 489 490 return (error); 491 } 492 493 static int 494 nfs4_trigger_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, 495 cred_t *cr, caller_context_t *ct, int flags) 496 { 497 int error; 498 vnode_t *newsdvp; 499 rnode4_t *tdrp = VTOR4(tdvp); 500 501 /* 502 * We know that sdvp is a stub, otherwise we would not be here. 503 * 504 * If tdvp is also be a stub, there are two possibilities: it 505 * is either the same stub as sdvp [i.e. VN_CMP(sdvp, tdvp)] 506 * or it is a different stub [!VN_CMP(sdvp, tdvp)]. 507 * 508 * In the former case, just trigger sdvp, and treat tdvp as 509 * though it were not a stub. 510 * 511 * In the latter case, it might be a different stub for the 512 * same server fs as sdvp, or for a different server fs. 513 * Regardless, from the client perspective this would still 514 * be a cross-filesystem rename, and should not be allowed, 515 * so return EXDEV, without triggering either mount. 516 */ 517 if (RP_ISSTUB(tdrp) && !VN_CMP(sdvp, tdvp)) 518 return (EXDEV); 519 520 error = nfs4_trigger_mount(sdvp, &newsdvp); 521 if (error) 522 return (error); 523 524 error = VOP_RENAME(newsdvp, snm, tdvp, tnm, cr, ct, flags); 525 526 VN_RELE(newsdvp); 527 528 return (error); 529 } 530 531 /* ARGSUSED */ 532 static int 533 nfs4_trigger_mkdir(vnode_t *dvp, char *nm, struct vattr *va, vnode_t **vpp, 534 cred_t *cr, caller_context_t *ct, int flags, vsecattr_t *vsecp) 535 { 536 int error; 537 vnode_t *newdvp; 538 539 error = nfs4_trigger_mount(dvp, &newdvp); 540 if (error) 541 return (error); 542 543 error = VOP_MKDIR(newdvp, nm, va, vpp, cr, ct, flags, vsecp); 544 VN_RELE(newdvp); 545 546 return (error); 547 } 548 549 static int 550 nfs4_trigger_rmdir(vnode_t *dvp, char *nm, vnode_t *cdir, cred_t *cr, 551 caller_context_t *ct, int flags) 552 { 553 int error; 554 vnode_t *newdvp; 555 556 error = nfs4_trigger_mount(dvp, &newdvp); 557 if (error) 558 return (error); 559 560 error = VOP_RMDIR(newdvp, nm, cdir, cr, ct, flags); 561 VN_RELE(newdvp); 562 563 return (error); 564 } 565 566 static int 567 nfs4_trigger_symlink(vnode_t *dvp, char *lnm, struct vattr *tva, char *tnm, 568 cred_t *cr, caller_context_t *ct, int flags) 569 { 570 int error; 571 vnode_t *newdvp; 572 573 error = nfs4_trigger_mount(dvp, &newdvp); 574 if (error) 575 return (error); 576 577 error = VOP_SYMLINK(newdvp, lnm, tva, tnm, cr, ct, flags); 578 VN_RELE(newdvp); 579 580 return (error); 581 } 582 583 static int 584 nfs4_trigger_readlink(vnode_t *vp, struct uio *uiop, cred_t *cr, 585 caller_context_t *ct) 586 { 587 int error; 588 vnode_t *newvp; 589 590 error = nfs4_trigger_mount(vp, &newvp); 591 if (error) 592 return (error); 593 594 error = VOP_READLINK(newvp, uiop, cr, ct); 595 VN_RELE(newvp); 596 597 return (error); 598 } 599 600 /* end of trigger vnode ops */ 601 602 603 /* 604 * Mount upon a trigger vnode; for mirror-mounts, etc. 605 * 606 * The mount may have already occurred, via another thread. If not, 607 * assemble the location information - which may require fetching - and 608 * perform the mount. 609 * 610 * Sets newvp to be the root of the fs that is now covering vp. Note 611 * that we return with VN_HELD(*newvp). 612 * 613 * The caller is responsible for passing the VOP onto the covering fs. 614 */ 615 static int 616 nfs4_trigger_mount(vnode_t *vp, vnode_t **newvpp) 617 { 618 int error; 619 vfs_t *vfsp; 620 rnode4_t *rp = VTOR4(vp); 621 mntinfo4_t *mi = VTOMI4(vp); 622 domount_args_t *dma; 623 624 nfs4_ephemeral_tree_t *net; 625 626 bool_t must_unlock = FALSE; 627 bool_t is_building = FALSE; 628 629 cred_t *zcred; 630 631 nfs4_trigger_globals_t *ntg; 632 633 zone_t *zone = curproc->p_zone; 634 635 ASSERT(RP_ISSTUB(rp)); 636 637 /* for now, we only support mirror-mounts */ 638 ASSERT(RP_ISSTUB_MIRRORMOUNT(rp)); 639 640 *newvpp = NULL; 641 642 /* 643 * Has the mount already occurred? 644 */ 645 error = vn_vfsrlock_wait(vp); 646 if (error) 647 goto done; 648 vfsp = vn_mountedvfs(vp); 649 if (vfsp != NULL) { 650 /* the mount has already occurred */ 651 error = VFS_ROOT(vfsp, newvpp); 652 if (!error) { 653 /* need to update the reference time */ 654 mutex_enter(&mi->mi_lock); 655 if (mi->mi_ephemeral) 656 mi->mi_ephemeral->ne_ref_time = 657 gethrestime_sec(); 658 mutex_exit(&mi->mi_lock); 659 } 660 661 vn_vfsunlock(vp); 662 goto done; 663 } 664 vn_vfsunlock(vp); 665 666 ntg = zone_getspecific(nfs4_ephemeral_key, zone); 667 ASSERT(ntg != NULL); 668 669 mutex_enter(&mi->mi_lock); 670 671 /* 672 * We need to lock down the ephemeral tree. 673 */ 674 if (mi->mi_ephemeral_tree == NULL) { 675 net = kmem_zalloc(sizeof (*net), KM_SLEEP); 676 mutex_init(&net->net_tree_lock, NULL, MUTEX_DEFAULT, NULL); 677 mutex_init(&net->net_cnt_lock, NULL, MUTEX_DEFAULT, NULL); 678 net->net_refcnt = 1; 679 net->net_status = NFS4_EPHEMERAL_TREE_BUILDING; 680 is_building = TRUE; 681 682 /* 683 * We need to add it to the zone specific list for 684 * automatic unmounting and harvesting of deadwood. 685 */ 686 mutex_enter(&ntg->ntg_forest_lock); 687 if (ntg->ntg_forest != NULL) 688 net->net_next = ntg->ntg_forest; 689 ntg->ntg_forest = net; 690 mutex_exit(&ntg->ntg_forest_lock); 691 692 /* 693 * No lock order confusion with mi_lock because no 694 * other node could have grabbed net_tree_lock. 695 */ 696 mutex_enter(&net->net_tree_lock); 697 mi->mi_ephemeral_tree = net; 698 net->net_mount = mi; 699 mutex_exit(&mi->mi_lock); 700 } else { 701 net = mi->mi_ephemeral_tree; 702 mutex_exit(&mi->mi_lock); 703 704 mutex_enter(&net->net_cnt_lock); 705 net->net_refcnt++; 706 mutex_exit(&net->net_cnt_lock); 707 708 /* 709 * Note that we do not do any checks to 710 * see if the parent has been nuked. 711 * We count on the vfs layer having protected 712 * us from feet shooters. 713 */ 714 mutex_enter(&net->net_tree_lock); 715 } 716 717 mutex_enter(&net->net_cnt_lock); 718 net->net_status |= NFS4_EPHEMERAL_TREE_MOUNTING; 719 mutex_exit(&net->net_cnt_lock); 720 721 must_unlock = TRUE; 722 723 dma = nfs4_trigger_domount_args_create(vp); 724 if (dma == NULL) { 725 error = EINVAL; 726 goto done; 727 } 728 729 /* 730 * Need to be root for this call to make mount work. 731 * Note that since we define mirror mounts to work 732 * for any user, we allow the mount to proceed. And 733 * we realize that the server will perform security 734 * checks to make sure that the client is allowed 735 * access. Finally, once the mount takes place, 736 * directory permissions will ensure that the 737 * content is secure. 738 */ 739 zcred = zone_get_kcred(getzoneid()); 740 ASSERT(zcred != NULL); 741 742 error = nfs4_trigger_domount(vp, dma, &vfsp, zcred); 743 nfs4_trigger_domount_args_destroy(dma, vp); 744 745 crfree(zcred); 746 747 if (!error) 748 error = VFS_ROOT(vfsp, newvpp); 749 done: 750 if (must_unlock) { 751 mutex_enter(&net->net_cnt_lock); 752 net->net_status &= ~NFS4_EPHEMERAL_TREE_MOUNTING; 753 if (is_building) 754 net->net_status &= ~NFS4_EPHEMERAL_TREE_BUILDING; 755 net->net_refcnt--; 756 mutex_exit(&net->net_cnt_lock); 757 758 mutex_exit(&net->net_tree_lock); 759 } 760 761 if (!error && (newvpp == NULL || *newvpp == NULL)) 762 error = ENOSYS; 763 764 return (error); 765 } 766 767 /* 768 * Collect together both the generic & mount-type specific args. 769 */ 770 static domount_args_t * 771 nfs4_trigger_domount_args_create(vnode_t *vp) 772 { 773 int nointr; 774 char *hostlist; 775 servinfo4_t *svp; 776 struct nfs_args *nargs, *nargs_head; 777 enum clnt_stat status; 778 ephemeral_servinfo_t *esi, *esi_first; 779 domount_args_t *dma; 780 mntinfo4_t *mi = VTOMI4(vp); 781 782 nointr = !(mi->mi_flags & MI4_INT); 783 hostlist = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 784 785 svp = mi->mi_curr_serv; 786 /* check if the current server is responding */ 787 status = nfs4_trigger_ping_server(svp, nointr); 788 if (status == RPC_SUCCESS) { 789 esi_first = nfs4_trigger_esi_create(vp, svp); 790 if (esi_first == NULL) { 791 kmem_free(hostlist, MAXPATHLEN); 792 return (NULL); 793 } 794 795 (void) strlcpy(hostlist, esi_first->esi_hostname, MAXPATHLEN); 796 797 nargs_head = nfs4_trigger_nargs_create(mi, svp, esi_first); 798 } else { 799 /* current server did not respond */ 800 esi_first = NULL; 801 nargs_head = NULL; 802 } 803 nargs = nargs_head; 804 805 /* 806 * NFS RO failover. 807 * 808 * If we have multiple servinfo4 structures, linked via sv_next, 809 * we must create one nfs_args for each, linking the nfs_args via 810 * nfs_ext_u.nfs_extB.next. 811 * 812 * We need to build a corresponding esi for each, too, but that is 813 * used solely for building nfs_args, and may be immediately 814 * discarded, as domount() requires the info from just one esi, 815 * but all the nfs_args. 816 * 817 * Currently, the NFS mount code will hang if not all servers 818 * requested are available. To avoid that, we need to ping each 819 * server, here, and remove it from the list if it is not 820 * responding. This has the side-effect of that server then 821 * being permanently unavailable for this failover mount, even if 822 * it recovers. That's unfortunate, but the best we can do until 823 * the mount code path is fixed. 824 */ 825 826 /* 827 * If the current server was down, loop indefinitely until we find 828 * at least one responsive server. 829 */ 830 do { 831 /* no locking needed for sv_next; it is only set at fs mount */ 832 for (svp = mi->mi_servers; svp != NULL; svp = svp->sv_next) { 833 struct nfs_args *next; 834 835 /* 836 * nargs_head: the head of the nfs_args list 837 * nargs: the current tail of the list 838 * next: the newly-created element to be added 839 */ 840 841 /* 842 * We've already tried the current server, above; 843 * if it was responding, we have already included it 844 * and it may now be ignored. 845 * 846 * Otherwise, try it again, since it may now have 847 * recovered. 848 */ 849 if (svp == mi->mi_curr_serv && esi_first != NULL) 850 continue; 851 852 (void) nfs_rw_enter_sig(&svp->sv_lock, RW_READER, 0); 853 if (svp->sv_flags & SV4_NOTINUSE) { 854 nfs_rw_exit(&svp->sv_lock); 855 continue; 856 } 857 nfs_rw_exit(&svp->sv_lock); 858 859 /* check if the server is responding */ 860 status = nfs4_trigger_ping_server(svp, nointr); 861 /* if the server did not respond, ignore it */ 862 if (status != RPC_SUCCESS) 863 continue; 864 865 esi = nfs4_trigger_esi_create(vp, svp); 866 if (esi == NULL) 867 continue; 868 869 /* 870 * If the original current server (mi_curr_serv) 871 * was down when when we first tried it, 872 * (i.e. esi_first == NULL), 873 * we select this new server (svp) to be the server 874 * that we will actually contact (esi_first). 875 * 876 * Note that it's possible that mi_curr_serv == svp, 877 * if that mi_curr_serv was down but has now recovered. 878 */ 879 next = nfs4_trigger_nargs_create(mi, svp, esi); 880 if (esi_first == NULL) { 881 ASSERT(nargs == NULL); 882 ASSERT(nargs_head == NULL); 883 nargs_head = next; 884 esi_first = esi; 885 (void) strlcpy(hostlist, 886 esi_first->esi_hostname, MAXPATHLEN); 887 } else { 888 ASSERT(nargs_head != NULL); 889 nargs->nfs_ext_u.nfs_extB.next = next; 890 (void) strlcat(hostlist, ",", MAXPATHLEN); 891 (void) strlcat(hostlist, esi->esi_hostname, 892 MAXPATHLEN); 893 /* esi was only needed for hostname & nargs */ 894 nfs4_trigger_esi_destroy(esi, vp); 895 } 896 897 nargs = next; 898 } 899 900 /* if we've had no response at all, wait a second */ 901 if (esi_first == NULL) 902 delay(drv_usectohz(1000000)); 903 904 } while (esi_first == NULL); 905 ASSERT(nargs_head != NULL); 906 907 dma = kmem_zalloc(sizeof (domount_args_t), KM_SLEEP); 908 dma->dma_esi = esi_first; 909 dma->dma_hostlist = hostlist; 910 dma->dma_nargs = nargs_head; 911 912 return (dma); 913 } 914 915 static void 916 nfs4_trigger_domount_args_destroy(domount_args_t *dma, vnode_t *vp) 917 { 918 if (dma != NULL) { 919 if (dma->dma_esi != NULL && vp != NULL) 920 nfs4_trigger_esi_destroy(dma->dma_esi, vp); 921 922 if (dma->dma_hostlist != NULL) 923 kmem_free(dma->dma_hostlist, MAXPATHLEN); 924 925 if (dma->dma_nargs != NULL) { 926 struct nfs_args *nargs = dma->dma_nargs; 927 928 do { 929 struct nfs_args *next = 930 nargs->nfs_ext_u.nfs_extB.next; 931 932 nfs4_trigger_nargs_destroy(nargs); 933 nargs = next; 934 } while (nargs != NULL); 935 } 936 937 kmem_free(dma, sizeof (domount_args_t)); 938 } 939 } 940 941 /* 942 * The ephemeral_servinfo_t struct contains basic information we will need to 943 * perform the mount. Whilst the structure is generic across different 944 * types of ephemeral mount, the way we gather its contents differs. 945 */ 946 static ephemeral_servinfo_t * 947 nfs4_trigger_esi_create(vnode_t *vp, servinfo4_t *svp) 948 { 949 ephemeral_servinfo_t *esi; 950 rnode4_t *rp = VTOR4(vp); 951 952 ASSERT(RP_ISSTUB(rp)); 953 954 /* Call the ephemeral type-specific routine */ 955 if (RP_ISSTUB_MIRRORMOUNT(rp)) 956 esi = nfs4_trigger_esi_create_mirrormount(vp, svp); 957 else 958 esi = NULL; 959 960 /* for now, we only support mirror-mounts */ 961 ASSERT(esi != NULL); 962 963 return (esi); 964 } 965 966 static void 967 nfs4_trigger_esi_destroy(ephemeral_servinfo_t *esi, vnode_t *vp) 968 { 969 rnode4_t *rp = VTOR4(vp); 970 971 ASSERT(RP_ISSTUB(rp)); 972 973 /* for now, we only support mirror-mounts */ 974 ASSERT(RP_ISSTUB_MIRRORMOUNT(rp)); 975 976 /* Currently, no need for an ephemeral type-specific routine */ 977 978 /* 979 * The contents of ephemeral_servinfo_t goes into nfs_args, 980 * and will be handled by nfs4_trigger_nargs_destroy(). 981 * We need only free the structure itself. 982 */ 983 if (esi != NULL) 984 kmem_free(esi, sizeof (ephemeral_servinfo_t)); 985 } 986 987 /* 988 * Some of this may turn out to be common with other ephemeral types, 989 * in which case it should be moved to nfs4_trigger_esi_create(), or a 990 * common function called. 991 */ 992 static ephemeral_servinfo_t * 993 nfs4_trigger_esi_create_mirrormount(vnode_t *vp, servinfo4_t *svp) 994 { 995 char *stubpath; 996 struct knetconfig *sikncp, *svkncp; 997 struct netbuf *bufp; 998 ephemeral_servinfo_t *esi; 999 1000 esi = kmem_zalloc(sizeof (ephemeral_servinfo_t), KM_SLEEP); 1001 1002 /* initially set to be our type of ephemeral mount; may be added to */ 1003 esi->esi_mount_flags = NFSMNT_MIRRORMOUNT; 1004 1005 /* 1006 * We're copying info from the stub rnode's servinfo4, but 1007 * we must create new copies, not pointers, since this information 1008 * is to be associated with the new mount, which will be 1009 * unmounted (and its structures freed) separately 1010 */ 1011 1012 /* 1013 * Sizes passed to kmem_[z]alloc here must match those freed 1014 * in nfs4_free_args() 1015 */ 1016 1017 /* 1018 * We hold sv_lock across kmem_zalloc() calls that may sleep, but this 1019 * is difficult to avoid: as we need to read svp to calculate the 1020 * sizes to be allocated. 1021 */ 1022 (void) nfs_rw_enter_sig(&svp->sv_lock, RW_READER, 0); 1023 1024 esi->esi_hostname = kmem_zalloc(strlen(svp->sv_hostname) + 1, KM_SLEEP); 1025 (void) strcat(esi->esi_hostname, svp->sv_hostname); 1026 1027 esi->esi_addr = kmem_zalloc(sizeof (struct netbuf), KM_SLEEP); 1028 bufp = esi->esi_addr; 1029 bufp->len = svp->sv_addr.len; 1030 bufp->maxlen = svp->sv_addr.maxlen; 1031 bufp->buf = kmem_zalloc(bufp->len, KM_SLEEP); 1032 bcopy(svp->sv_addr.buf, bufp->buf, bufp->len); 1033 1034 esi->esi_knconf = kmem_zalloc(sizeof (*esi->esi_knconf), KM_SLEEP); 1035 sikncp = esi->esi_knconf; 1036 svkncp = svp->sv_knconf; 1037 sikncp->knc_semantics = svkncp->knc_semantics; 1038 sikncp->knc_protofmly = (caddr_t)kmem_zalloc(KNC_STRSIZE, KM_SLEEP); 1039 (void) strcat((char *)sikncp->knc_protofmly, 1040 (char *)svkncp->knc_protofmly); 1041 sikncp->knc_proto = (caddr_t)kmem_zalloc(KNC_STRSIZE, KM_SLEEP); 1042 (void) strcat((char *)sikncp->knc_proto, (char *)svkncp->knc_proto); 1043 sikncp->knc_rdev = svkncp->knc_rdev; 1044 1045 /* 1046 * Used when AUTH_DH is negotiated. 1047 * 1048 * This is ephemeral mount-type specific, since it contains the 1049 * server's time-sync syncaddr. 1050 */ 1051 if (svp->sv_dhsec) { 1052 struct netbuf *bufp; 1053 sec_data_t *sdata; 1054 dh_k4_clntdata_t *data; 1055 1056 sdata = svp->sv_dhsec; 1057 data = (dh_k4_clntdata_t *)sdata->data; 1058 ASSERT(sdata->rpcflavor == AUTH_DH); 1059 1060 bufp = kmem_zalloc(sizeof (struct netbuf), KM_SLEEP); 1061 bufp->len = data->syncaddr.len; 1062 bufp->maxlen = data->syncaddr.maxlen; 1063 bufp->buf = kmem_zalloc(bufp->len, KM_SLEEP); 1064 bcopy(data->syncaddr.buf, bufp->buf, bufp->len); 1065 esi->esi_syncaddr = bufp; 1066 1067 if (data->netname != NULL) { 1068 int nmlen = data->netnamelen; 1069 1070 /* 1071 * We need to copy from a dh_k4_clntdata_t 1072 * netname/netnamelen pair to a NUL-terminated 1073 * netname string suitable for putting in nfs_args, 1074 * where the latter has no netnamelen field. 1075 */ 1076 esi->esi_netname = kmem_zalloc(nmlen + 1, KM_SLEEP); 1077 bcopy(data->netname, esi->esi_netname, nmlen); 1078 } 1079 } else { 1080 esi->esi_syncaddr = NULL; 1081 esi->esi_netname = NULL; 1082 } 1083 1084 stubpath = fn_path(VTOSV(vp)->sv_name); 1085 /* step over initial '.', to avoid e.g. sv_path: "/tank./ws" */ 1086 ASSERT(*stubpath == '.'); 1087 stubpath += 1; 1088 1089 /* for nfs_args->fh */ 1090 esi->esi_path_len = strlen(svp->sv_path) + strlen(stubpath) + 1; 1091 esi->esi_path = kmem_zalloc(esi->esi_path_len, KM_SLEEP); 1092 (void) strcat(esi->esi_path, svp->sv_path); 1093 (void) strcat(esi->esi_path, stubpath); 1094 1095 stubpath -= 1; 1096 /* stubpath allocated by fn_path() */ 1097 kmem_free(stubpath, strlen(stubpath) + 1); 1098 1099 nfs_rw_exit(&svp->sv_lock); 1100 1101 return (esi); 1102 } 1103 1104 /* 1105 * Assemble the args, and call the generic VFS mount function to 1106 * finally perform the ephemeral mount. 1107 */ 1108 static int 1109 nfs4_trigger_domount(vnode_t *stubvp, domount_args_t *dma, vfs_t **vfsp, 1110 cred_t *cr) 1111 { 1112 struct mounta *uap; 1113 char *mntpt, *orig_path, *path; 1114 const char *orig_mntpt; 1115 int retval; 1116 int mntpt_len; 1117 int spec_len; 1118 zone_t *zone = curproc->p_zone; 1119 bool_t has_leading_slash; 1120 1121 vfs_t *stubvfsp = stubvp->v_vfsp; 1122 ephemeral_servinfo_t *esi = dma->dma_esi; 1123 struct nfs_args *nargs = dma->dma_nargs; 1124 1125 /* first, construct the mount point for the ephemeral mount */ 1126 orig_path = path = fn_path(VTOSV(stubvp)->sv_name); 1127 orig_mntpt = (char *)refstr_value(stubvfsp->vfs_mntpt); 1128 1129 if (*orig_path == '.') 1130 orig_path++; 1131 1132 /* 1133 * Get rid of zone's root path 1134 */ 1135 if (zone != global_zone) { 1136 /* 1137 * -1 for trailing '/' and -1 for EOS. 1138 */ 1139 if (strncmp(zone->zone_rootpath, orig_mntpt, 1140 zone->zone_rootpathlen - 1) == 0) { 1141 orig_mntpt += (zone->zone_rootpathlen - 2); 1142 } 1143 } 1144 1145 mntpt_len = strlen(orig_mntpt) + strlen(orig_path); 1146 mntpt = kmem_zalloc(mntpt_len + 1, KM_SLEEP); 1147 (void) strcat(mntpt, orig_mntpt); 1148 (void) strcat(mntpt, orig_path); 1149 1150 kmem_free(path, strlen(path) + 1); 1151 path = esi->esi_path; 1152 if (*path == '.') 1153 path++; 1154 if (path[0] == '/' && path[1] == '/') 1155 path++; 1156 has_leading_slash = (*path == '/'); 1157 1158 spec_len = strlen(dma->dma_hostlist); 1159 spec_len += strlen(path); 1160 1161 /* We are going to have to add this in */ 1162 if (!has_leading_slash) 1163 spec_len++; 1164 1165 /* We need to get the ':' for dma_hostlist:esi_path */ 1166 spec_len++; 1167 1168 uap = kmem_zalloc(sizeof (struct mounta), KM_SLEEP); 1169 uap->spec = kmem_zalloc(spec_len + 1, KM_SLEEP); 1170 (void) snprintf(uap->spec, spec_len + 1, "%s:%s%s", dma->dma_hostlist, 1171 has_leading_slash ? "" : "/", path); 1172 1173 uap->dir = mntpt; 1174 1175 uap->flags = MS_SYSSPACE | MS_DATA; 1176 /* fstype-independent mount options not covered elsewhere */ 1177 /* copy parent's mount(1M) "-m" flag */ 1178 if (stubvfsp->vfs_flag & VFS_NOMNTTAB) 1179 uap->flags |= MS_NOMNTTAB; 1180 1181 uap->fstype = MNTTYPE_NFS4; 1182 uap->dataptr = (char *)nargs; 1183 /* not needed for MS_SYSSPACE */ 1184 uap->datalen = 0; 1185 1186 /* use optptr to pass in extra mount options */ 1187 uap->flags |= MS_OPTIONSTR; 1188 uap->optptr = nfs4_trigger_create_mntopts(stubvfsp); 1189 if (uap->optptr == NULL) { 1190 retval = EINVAL; 1191 goto done; 1192 } 1193 /* domount() expects us to count the trailing NUL */ 1194 uap->optlen = strlen(uap->optptr) + 1; 1195 1196 retval = domount(NULL, uap, stubvp, cr, vfsp); 1197 if (retval == 0) 1198 VFS_RELE(*vfsp); 1199 done: 1200 if (uap->optptr) 1201 nfs4_trigger_destroy_mntopts(uap->optptr); 1202 1203 kmem_free(uap->spec, spec_len + 1); 1204 kmem_free(uap, sizeof (struct mounta)); 1205 kmem_free(mntpt, mntpt_len + 1); 1206 1207 return (retval); 1208 } 1209 1210 /* 1211 * Build an nfs_args structure for passing to domount(). 1212 * 1213 * Ephemeral mount-type specific data comes from the ephemeral_servinfo_t; 1214 * generic data - common to all ephemeral mount types - is read directly 1215 * from the parent mount's servinfo4_t and mntinfo4_t, via the stub vnode. 1216 */ 1217 static struct nfs_args * 1218 nfs4_trigger_nargs_create(mntinfo4_t *mi, servinfo4_t *svp, 1219 ephemeral_servinfo_t *esi) 1220 { 1221 sec_data_t *secdata; 1222 struct nfs_args *nargs; 1223 1224 /* setup the nfs args */ 1225 nargs = kmem_zalloc(sizeof (struct nfs_args), KM_SLEEP); 1226 1227 (void) nfs_rw_enter_sig(&svp->sv_lock, RW_READER, 0); 1228 1229 nargs->addr = esi->esi_addr; 1230 1231 /* for AUTH_DH by negotiation */ 1232 if (esi->esi_syncaddr || esi->esi_netname) { 1233 nargs->flags |= NFSMNT_SECURE; 1234 nargs->syncaddr = esi->esi_syncaddr; 1235 nargs->netname = esi->esi_netname; 1236 } 1237 1238 nargs->flags |= NFSMNT_KNCONF; 1239 nargs->knconf = esi->esi_knconf; 1240 nargs->flags |= NFSMNT_HOSTNAME; 1241 nargs->hostname = esi->esi_hostname; 1242 nargs->fh = esi->esi_path; 1243 1244 /* general mount settings, all copied from parent mount */ 1245 mutex_enter(&mi->mi_lock); 1246 1247 if (!(mi->mi_flags & MI4_HARD)) 1248 nargs->flags |= NFSMNT_SOFT; 1249 1250 nargs->flags |= NFSMNT_WSIZE | NFSMNT_RSIZE | NFSMNT_TIMEO | 1251 NFSMNT_RETRANS; 1252 nargs->wsize = mi->mi_stsize; 1253 nargs->rsize = mi->mi_tsize; 1254 nargs->timeo = mi->mi_timeo; 1255 nargs->retrans = mi->mi_retrans; 1256 1257 if (mi->mi_flags & MI4_INT) 1258 nargs->flags |= NFSMNT_INT; 1259 if (mi->mi_flags & MI4_NOAC) 1260 nargs->flags |= NFSMNT_NOAC; 1261 1262 nargs->flags |= NFSMNT_ACREGMIN | NFSMNT_ACREGMAX | NFSMNT_ACDIRMIN | 1263 NFSMNT_ACDIRMAX; 1264 nargs->acregmin = HR2SEC(mi->mi_acregmin); 1265 nargs->acregmax = HR2SEC(mi->mi_acregmax); 1266 nargs->acdirmin = HR2SEC(mi->mi_acdirmin); 1267 nargs->acdirmax = HR2SEC(mi->mi_acdirmax); 1268 1269 if (mi->mi_flags & MI4_NOCTO) 1270 nargs->flags |= NFSMNT_NOCTO; 1271 if (mi->mi_flags & MI4_GRPID) 1272 nargs->flags |= NFSMNT_GRPID; 1273 if (mi->mi_flags & MI4_LLOCK) 1274 nargs->flags |= NFSMNT_LLOCK; 1275 if (mi->mi_flags & MI4_NOPRINT) 1276 nargs->flags |= NFSMNT_NOPRINT; 1277 if (mi->mi_flags & MI4_DIRECTIO) 1278 nargs->flags |= NFSMNT_DIRECTIO; 1279 if (mi->mi_flags & MI4_PUBLIC) 1280 nargs->flags |= NFSMNT_PUBLIC; 1281 1282 mutex_exit(&mi->mi_lock); 1283 1284 /* add any specific flags for this type of ephemeral mount */ 1285 nargs->flags |= esi->esi_mount_flags; 1286 1287 /* 1288 * Security data & negotiation policy. 1289 * 1290 * We need to preserve the parent mount's preference for security 1291 * negotiation, translating SV4_TRYSECDEFAULT -> NFSMNT_SECDEFAULT. 1292 * 1293 * If SV4_TRYSECDEFAULT is not set, that indicates that a specific 1294 * security flavour was requested, with data in sv_secdata, and that 1295 * no negotiation should occur. If this specified flavour fails, that's 1296 * it. We will copy sv_secdata, and not set NFSMNT_SECDEFAULT. 1297 * 1298 * If SV4_TRYSECDEFAULT is set, then we start with a passed-in 1299 * default flavour, in sv_secdata, but then negotiate a new flavour. 1300 * Possible flavours are recorded in an array in sv_secinfo, with 1301 * currently in-use flavour pointed to by sv_currsec. 1302 * 1303 * If sv_currsec is set, i.e. if negotiation has already occurred, 1304 * we will copy sv_currsec. Otherwise, copy sv_secdata. Regardless, 1305 * we will set NFSMNT_SECDEFAULT, to enable negotiation. 1306 */ 1307 if (svp->sv_flags & SV4_TRYSECDEFAULT) { 1308 /* enable negotiation for ephemeral mount */ 1309 nargs->flags |= NFSMNT_SECDEFAULT; 1310 1311 /* 1312 * As a starting point for negotiation, copy parent 1313 * mount's negotiated flavour (sv_currsec) if available, 1314 * or its passed-in flavour (sv_secdata) if not. 1315 */ 1316 if (svp->sv_currsec != NULL) 1317 secdata = copy_sec_data(svp->sv_currsec); 1318 else if (svp->sv_secdata != NULL) 1319 secdata = copy_sec_data(svp->sv_secdata); 1320 else 1321 secdata = NULL; 1322 } else { 1323 /* do not enable negotiation; copy parent's passed-in flavour */ 1324 if (svp->sv_secdata != NULL) 1325 secdata = copy_sec_data(svp->sv_secdata); 1326 else 1327 secdata = NULL; 1328 } 1329 1330 nfs_rw_exit(&svp->sv_lock); 1331 1332 nargs->flags |= NFSMNT_NEWARGS; 1333 nargs->nfs_args_ext = NFS_ARGS_EXTB; 1334 nargs->nfs_ext_u.nfs_extB.secdata = secdata; 1335 1336 /* for NFS RO failover; caller will set if necessary */ 1337 nargs->nfs_ext_u.nfs_extB.next = NULL; 1338 1339 return (nargs); 1340 } 1341 1342 static void 1343 nfs4_trigger_nargs_destroy(struct nfs_args *nargs) 1344 { 1345 /* 1346 * Either the mount failed, in which case the data is not needed, or 1347 * nfs4_mount() has either taken copies of what it needs or, 1348 * where it has merely copied the ptr, it has set *our* ptr to NULL, 1349 * whereby nfs4_free_args() will ignore it. 1350 */ 1351 nfs4_free_args(nargs); 1352 kmem_free(nargs, sizeof (struct nfs_args)); 1353 } 1354 1355 /* 1356 * When we finally get into the mounting, we need to add this 1357 * node to the ephemeral tree. 1358 * 1359 * This is called from nfs4_mount(). 1360 */ 1361 void 1362 nfs4_record_ephemeral_mount(mntinfo4_t *mi, vnode_t *mvp) 1363 { 1364 mntinfo4_t *mi_parent; 1365 nfs4_ephemeral_t *eph; 1366 nfs4_ephemeral_tree_t *net; 1367 1368 nfs4_ephemeral_t *prior; 1369 nfs4_ephemeral_t *child; 1370 1371 nfs4_ephemeral_t *peer; 1372 1373 nfs4_trigger_globals_t *ntg; 1374 zone_t *zone = curproc->p_zone; 1375 1376 mi_parent = VTOMI4(mvp); 1377 1378 /* 1379 * Get this before grabbing anything else! 1380 */ 1381 ntg = zone_getspecific(nfs4_ephemeral_key, zone); 1382 if (!ntg->ntg_thread_started) { 1383 nfs4_ephemeral_start_harvester(ntg); 1384 } 1385 1386 mutex_enter(&mi_parent->mi_lock); 1387 mutex_enter(&mi->mi_lock); 1388 1389 /* 1390 * We need to tack together the ephemeral mount 1391 * with this new mntinfo. 1392 */ 1393 eph = kmem_zalloc(sizeof (*eph), KM_SLEEP); 1394 eph->ne_mount = mi; 1395 eph->ne_ref_time = gethrestime_sec(); 1396 1397 /* 1398 * We need to tell the ephemeral mount when 1399 * to time out. 1400 */ 1401 eph->ne_mount_to = ntg->ntg_mount_to; 1402 1403 mi->mi_flags |= MI4_EPHEMERAL; 1404 mi->mi_ephemeral = eph; 1405 1406 net = mi->mi_ephemeral_tree = 1407 mi_parent->mi_ephemeral_tree; 1408 ASSERT(net != NULL); 1409 1410 /* 1411 * If the enclosing mntinfo4 is also ephemeral, 1412 * then we need to point to its enclosing parent. 1413 * Else the enclosing mntinfo4 is the enclosing parent. 1414 * 1415 * We also need to weave this ephemeral node 1416 * into the tree. 1417 */ 1418 if (mi_parent->mi_flags & MI4_EPHEMERAL) { 1419 /* 1420 * We need to decide if we are 1421 * the root node of this branch 1422 * or if we are a sibling of this 1423 * branch. 1424 */ 1425 prior = mi_parent->mi_ephemeral; 1426 ASSERT(prior != NULL); 1427 if (prior->ne_child == NULL) { 1428 prior->ne_child = eph; 1429 } else { 1430 child = prior->ne_child; 1431 1432 prior->ne_child = eph; 1433 eph->ne_peer = child; 1434 1435 child->ne_prior = eph; 1436 } 1437 1438 eph->ne_prior = prior; 1439 } else { 1440 /* 1441 * The parent mntinfo4 is the non-ephemeral 1442 * root of the ephemeral tree. We 1443 * need to decide if we are the root 1444 * node of that tree or if we are a 1445 * sibling of the root node. 1446 * 1447 * We are the root if there is no 1448 * other node. 1449 */ 1450 if (net->net_root == NULL) { 1451 net->net_root = eph; 1452 } else { 1453 eph->ne_peer = peer = net->net_root; 1454 ASSERT(peer != NULL); 1455 net->net_root = eph; 1456 1457 peer->ne_prior = eph; 1458 } 1459 1460 eph->ne_prior = NULL; 1461 } 1462 1463 mutex_exit(&mi->mi_lock); 1464 mutex_exit(&mi_parent->mi_lock); 1465 } 1466 1467 /* 1468 * Commit the changes to the ephemeral tree for removing this node. 1469 */ 1470 static void 1471 nfs4_ephemeral_umount_cleanup(nfs4_ephemeral_t *eph) 1472 { 1473 nfs4_ephemeral_t *e = eph; 1474 nfs4_ephemeral_t *peer; 1475 nfs4_ephemeral_t *prior; 1476 1477 peer = eph->ne_peer; 1478 prior = e->ne_prior; 1479 1480 /* 1481 * If this branch root was not the 1482 * tree root, then we need to fix back pointers. 1483 */ 1484 if (prior) { 1485 if (prior->ne_child == e) { 1486 prior->ne_child = peer; 1487 } else { 1488 prior->ne_peer = peer; 1489 } 1490 1491 if (peer) 1492 peer->ne_prior = prior; 1493 } else if (peer) { 1494 peer->ne_mount->mi_ephemeral_tree->net_root = peer; 1495 peer->ne_prior = NULL; 1496 } else { 1497 e->ne_mount->mi_ephemeral_tree->net_root = NULL; 1498 } 1499 } 1500 1501 /* 1502 * We want to avoid recursion at all costs. So we need to 1503 * unroll the tree. We do this by a depth first traversal to 1504 * leaf nodes. We blast away the leaf and work our way back 1505 * up and down the tree. 1506 */ 1507 static int 1508 nfs4_ephemeral_unmount_engine(nfs4_ephemeral_t *eph, 1509 int isTreeRoot, int flag, cred_t *cr) 1510 { 1511 nfs4_ephemeral_t *e = eph; 1512 nfs4_ephemeral_t *prior; 1513 mntinfo4_t *mi; 1514 vfs_t *vfsp; 1515 int error; 1516 1517 /* 1518 * We use the loop while unrolling the ephemeral tree. 1519 */ 1520 for (;;) { 1521 /* 1522 * First we walk down the child. 1523 */ 1524 if (e->ne_child) { 1525 prior = e; 1526 e = e->ne_child; 1527 continue; 1528 } 1529 1530 /* 1531 * If we are the root of the branch we are removing, 1532 * we end it here. But if the branch is the root of 1533 * the tree, we have to forge on. We do not consider 1534 * the peer list for the root because while it may 1535 * be okay to remove, it is both extra work and a 1536 * potential for a false-positive error to stall the 1537 * unmount attempt. 1538 */ 1539 if (e == eph && isTreeRoot == FALSE) 1540 return (0); 1541 1542 /* 1543 * Next we walk down the peer list. 1544 */ 1545 if (e->ne_peer) { 1546 prior = e; 1547 e = e->ne_peer; 1548 continue; 1549 } 1550 1551 /* 1552 * We can only remove the node passed in by the 1553 * caller if it is the root of the ephemeral tree. 1554 * Otherwise, the caller will remove it. 1555 */ 1556 if (e == eph && isTreeRoot == FALSE) 1557 return (0); 1558 1559 /* 1560 * Okay, we have a leaf node, time 1561 * to prune it! 1562 * 1563 * Note that prior can only be NULL if 1564 * and only if it is the root of the 1565 * ephemeral tree. 1566 */ 1567 prior = e->ne_prior; 1568 1569 mi = e->ne_mount; 1570 mutex_enter(&mi->mi_lock); 1571 vfsp = mi->mi_vfsp; 1572 1573 /* 1574 * Cleared by umount2_engine. 1575 */ 1576 VFS_HOLD(vfsp); 1577 1578 /* 1579 * Inform nfs4_unmount to not recursively 1580 * descend into this node's children when it 1581 * gets processed. 1582 */ 1583 mi->mi_flags |= MI4_EPHEMERAL_RECURSED; 1584 mutex_exit(&mi->mi_lock); 1585 1586 error = umount2_engine(vfsp, flag, cr, FALSE); 1587 if (error) { 1588 /* 1589 * We need to reenable nfs4_unmount's ability 1590 * to recursively descend on this node. 1591 */ 1592 mutex_enter(&mi->mi_lock); 1593 mi->mi_flags &= ~MI4_EPHEMERAL_RECURSED; 1594 mutex_exit(&mi->mi_lock); 1595 1596 return (error); 1597 } 1598 1599 /* 1600 * If we are the current node, we do not want to 1601 * touch anything else. At this point, the only 1602 * way the current node can have survived to here 1603 * is if it is the root of the ephemeral tree and 1604 * we are unmounting the enclosing mntinfo4. 1605 */ 1606 if (e == eph) { 1607 ASSERT(prior == NULL); 1608 return (0); 1609 } 1610 1611 /* 1612 * Stitch up the prior node. Note that since 1613 * we have handled the root of the tree, prior 1614 * must be non-NULL. 1615 */ 1616 ASSERT(prior != NULL); 1617 if (prior->ne_child == e) { 1618 prior->ne_child = NULL; 1619 } else { 1620 ASSERT(prior->ne_peer == e); 1621 1622 prior->ne_peer = NULL; 1623 } 1624 1625 e = prior; 1626 } 1627 1628 /* NOTREACHED */ 1629 } 1630 1631 /* 1632 * Common code to safely release net_cnt_lock and net_tree_lock 1633 */ 1634 void 1635 nfs4_ephemeral_umount_unlock(bool_t *pmust_unlock, 1636 nfs4_ephemeral_tree_t **pnet) 1637 { 1638 nfs4_ephemeral_tree_t *net = *pnet; 1639 1640 if (*pmust_unlock) { 1641 mutex_enter(&net->net_cnt_lock); 1642 net->net_refcnt--; 1643 net->net_status &= ~NFS4_EPHEMERAL_TREE_UMOUNTING; 1644 mutex_exit(&net->net_cnt_lock); 1645 1646 mutex_exit(&net->net_tree_lock); 1647 1648 *pmust_unlock = FALSE; 1649 } 1650 } 1651 1652 /* 1653 * While we may have removed any child or sibling nodes of this 1654 * ephemeral node, we can not nuke it until we know that there 1655 * were no actived vnodes on it. This will do that final 1656 * work once we know it is not busy. 1657 */ 1658 void 1659 nfs4_ephemeral_umount_activate(mntinfo4_t *mi, bool_t *pmust_unlock, 1660 nfs4_ephemeral_tree_t **pnet) 1661 { 1662 /* 1663 * Now we need to get rid of the ephemeral data if it exists. 1664 */ 1665 mutex_enter(&mi->mi_lock); 1666 if (mi->mi_ephemeral) { 1667 /* 1668 * If we are the root node of an ephemeral branch 1669 * which is being removed, then we need to fixup 1670 * pointers into and out of the node. 1671 */ 1672 if (!(mi->mi_flags & MI4_EPHEMERAL_RECURSED)) 1673 nfs4_ephemeral_umount_cleanup(mi->mi_ephemeral); 1674 1675 ASSERT(mi->mi_ephemeral != NULL); 1676 1677 kmem_free(mi->mi_ephemeral, sizeof (*mi->mi_ephemeral)); 1678 mi->mi_ephemeral = NULL; 1679 } 1680 mutex_exit(&mi->mi_lock); 1681 1682 nfs4_ephemeral_umount_unlock(pmust_unlock, pnet); 1683 } 1684 1685 /* 1686 * Unmount an ephemeral node. 1687 */ 1688 int 1689 nfs4_ephemeral_umount(mntinfo4_t *mi, int flag, cred_t *cr, 1690 bool_t *pmust_unlock, nfs4_ephemeral_tree_t **pnet) 1691 { 1692 int error = 0; 1693 nfs4_ephemeral_t *eph; 1694 nfs4_ephemeral_tree_t *net; 1695 int is_derooting = FALSE; 1696 int is_recursed = FALSE; 1697 int was_locked = FALSE; 1698 1699 /* 1700 * The active vnodes on this file system may be ephemeral 1701 * children. We need to check for and try to unmount them 1702 * here. If any can not be unmounted, we are going 1703 * to return EBUSY. 1704 */ 1705 mutex_enter(&mi->mi_lock); 1706 1707 /* 1708 * If an ephemeral tree, we need to check to see if 1709 * the lock is already held. If it is, then we need 1710 * to see if we are being called as a result of 1711 * the recursive removal of some node of the tree or 1712 * if we are another attempt to remove the tree. 1713 * 1714 * mi_flags & MI4_EPHEMERAL indicates an ephemeral 1715 * node. mi_ephemeral being non-NULL also does this. 1716 * 1717 * mi_ephemeral_tree being non-NULL is sufficient 1718 * to also indicate either it is an ephemeral node 1719 * or the enclosing mntinfo4. 1720 * 1721 * Do we need MI4_EPHEMERAL? Yes, it is useful for 1722 * when we delete the ephemeral node and need to 1723 * differentiate from an ephemeral node and the 1724 * enclosing root node. 1725 */ 1726 *pnet = net = mi->mi_ephemeral_tree; 1727 eph = mi->mi_ephemeral; 1728 if (net) { 1729 is_recursed = mi->mi_flags & MI4_EPHEMERAL_RECURSED; 1730 is_derooting = (eph == NULL); 1731 mutex_exit(&mi->mi_lock); 1732 1733 /* 1734 * If this is not recursion, then we need to 1735 * grab a ref count. 1736 * 1737 * But wait, we also do not want to do that 1738 * if a harvester thread has already grabbed 1739 * the lock. 1740 */ 1741 if (!is_recursed) { 1742 mutex_enter(&net->net_cnt_lock); 1743 if (net->net_status & 1744 NFS4_EPHEMERAL_TREE_LOCKED) 1745 was_locked = TRUE; 1746 else 1747 net->net_refcnt++; 1748 mutex_exit(&net->net_cnt_lock); 1749 } 1750 1751 /* 1752 * If we grab the lock, it means that no other 1753 * operation is working on the tree. If we don't 1754 * grab it, we need to decide if this is because 1755 * we are a recursive call or a new operation. 1756 * 1757 * If we are a recursive call, we proceed without 1758 * the lock. 1759 * 1760 * Else we have to wait until the lock becomes free. 1761 */ 1762 if (was_locked == FALSE && 1763 !mutex_tryenter(&net->net_tree_lock)) { 1764 if (!is_recursed) { 1765 mutex_enter(&net->net_cnt_lock); 1766 if (net->net_status & 1767 (NFS4_EPHEMERAL_TREE_DEROOTING 1768 | NFS4_EPHEMERAL_TREE_INVALID)) { 1769 net->net_refcnt--; 1770 mutex_exit(&net->net_cnt_lock); 1771 goto is_busy; 1772 } 1773 mutex_exit(&net->net_cnt_lock); 1774 1775 /* 1776 * We can't hold any other locks whilst 1777 * we wait on this to free up. 1778 */ 1779 mutex_enter(&net->net_tree_lock); 1780 1781 /* 1782 * Note that while mi->mi_ephemeral 1783 * may change and thus we have to 1784 * update eph, it is the case that 1785 * we have tied down net and 1786 * do not care if mi->mi_ephemeral_tree 1787 * has changed. 1788 */ 1789 mutex_enter(&mi->mi_lock); 1790 eph = mi->mi_ephemeral; 1791 mutex_exit(&mi->mi_lock); 1792 1793 /* 1794 * Okay, we need to see if either the 1795 * tree got nuked or the current node 1796 * got nuked. Both of which will cause 1797 * an error. 1798 * 1799 * Note that a subsequent retry of the 1800 * umount shall work. 1801 */ 1802 mutex_enter(&net->net_cnt_lock); 1803 if (net->net_status & 1804 NFS4_EPHEMERAL_TREE_INVALID || 1805 (!is_derooting && eph == NULL)) { 1806 net->net_refcnt--; 1807 mutex_exit(&net->net_cnt_lock); 1808 mutex_exit(&net->net_tree_lock); 1809 goto is_busy; 1810 } 1811 mutex_exit(&net->net_cnt_lock); 1812 *pmust_unlock = TRUE; 1813 } 1814 } else if (was_locked == FALSE) { 1815 /* 1816 * If we grab it right away, everything must 1817 * be great! 1818 */ 1819 *pmust_unlock = TRUE; 1820 } 1821 1822 /* 1823 * Only once we have grabbed the lock can we mark what we 1824 * are planning on doing to the ephemeral tree. 1825 */ 1826 if (*pmust_unlock) { 1827 mutex_enter(&net->net_cnt_lock); 1828 net->net_status |= NFS4_EPHEMERAL_TREE_UMOUNTING; 1829 1830 /* 1831 * Check to see if we are nuking the root. 1832 */ 1833 if (is_derooting) 1834 net->net_status |= 1835 NFS4_EPHEMERAL_TREE_DEROOTING; 1836 mutex_exit(&net->net_cnt_lock); 1837 } 1838 1839 if (!is_derooting) { 1840 /* 1841 * Only work on children if the caller has not already 1842 * done so. 1843 */ 1844 if (!is_recursed) { 1845 ASSERT(eph != NULL); 1846 1847 error = nfs4_ephemeral_unmount_engine(eph, 1848 FALSE, flag, cr); 1849 if (error) 1850 goto is_busy; 1851 } 1852 } else { 1853 eph = net->net_root; 1854 1855 /* 1856 * Only work if there is something there. 1857 */ 1858 if (eph) { 1859 error = nfs4_ephemeral_unmount_engine(eph, TRUE, 1860 flag, cr); 1861 if (error) { 1862 mutex_enter(&net->net_cnt_lock); 1863 net->net_status &= 1864 ~NFS4_EPHEMERAL_TREE_DEROOTING; 1865 mutex_exit(&net->net_cnt_lock); 1866 goto is_busy; 1867 } 1868 1869 /* 1870 * Nothing else which goes wrong will 1871 * invalidate the blowing away of the 1872 * ephmeral tree. 1873 */ 1874 net->net_root = NULL; 1875 } 1876 1877 /* 1878 * We have derooted and we have caused the tree to be 1879 * invalid. 1880 */ 1881 mutex_enter(&net->net_cnt_lock); 1882 net->net_status &= ~NFS4_EPHEMERAL_TREE_DEROOTING; 1883 net->net_status |= NFS4_EPHEMERAL_TREE_INVALID; 1884 net->net_refcnt--; 1885 mutex_exit(&net->net_cnt_lock); 1886 1887 /* 1888 * At this point, the tree should no 1889 * longer be associated with the 1890 * mntinfo4. We need to pull it off 1891 * there and let the harvester take 1892 * care of it once the refcnt drops. 1893 */ 1894 mutex_enter(&mi->mi_lock); 1895 mi->mi_ephemeral_tree = NULL; 1896 mutex_exit(&mi->mi_lock); 1897 } 1898 } else { 1899 mutex_exit(&mi->mi_lock); 1900 } 1901 1902 return (0); 1903 1904 is_busy: 1905 1906 nfs4_ephemeral_umount_unlock(pmust_unlock, pnet); 1907 1908 return (error); 1909 } 1910 1911 /* 1912 * Do the umount and record any error in the parent. 1913 */ 1914 static void 1915 nfs4_ephemeral_record_umount(vfs_t *vfsp, int flag, 1916 nfs4_ephemeral_t *e, nfs4_ephemeral_t *prior) 1917 { 1918 int error; 1919 1920 error = umount2_engine(vfsp, flag, kcred, FALSE); 1921 if (error) { 1922 if (prior) { 1923 if (prior->ne_child == e) 1924 prior->ne_state |= 1925 NFS4_EPHEMERAL_CHILD_ERROR; 1926 else 1927 prior->ne_state |= 1928 NFS4_EPHEMERAL_PEER_ERROR; 1929 } 1930 } 1931 } 1932 1933 /* 1934 * For each tree in the forest (where the forest is in 1935 * effect all of the ephemeral trees for this zone), 1936 * scan to see if a node can be unmounted. Note that 1937 * unlike nfs4_ephemeral_unmount_engine(), we do 1938 * not process the current node before children or 1939 * siblings. I.e., if a node can be unmounted, we 1940 * do not recursively check to see if the nodes 1941 * hanging off of it can also be unmounted. 1942 * 1943 * Instead, we delve down deep to try and remove the 1944 * children first. Then, because we share code with 1945 * nfs4_ephemeral_unmount_engine(), we will try 1946 * them again. This could be a performance issue in 1947 * the future. 1948 * 1949 * Also note that unlike nfs4_ephemeral_unmount_engine(), 1950 * we do not halt on an error. We will not remove the 1951 * current node, but we will keep on trying to remove 1952 * the others. 1953 * 1954 * force indicates that we want the unmount to occur 1955 * even if there is something blocking it. 1956 * 1957 * time_check indicates that we want to see if the 1958 * mount has expired past mount_to or not. Typically 1959 * we want to do this and only on a shutdown of the 1960 * zone would we want to ignore the check. 1961 */ 1962 static void 1963 nfs4_ephemeral_harvest_forest(nfs4_trigger_globals_t *ntg, 1964 bool_t force, bool_t time_check) 1965 { 1966 nfs4_ephemeral_tree_t *net; 1967 nfs4_ephemeral_tree_t *prev = NULL; 1968 nfs4_ephemeral_tree_t *next; 1969 nfs4_ephemeral_t *e; 1970 nfs4_ephemeral_t *prior; 1971 time_t now = gethrestime_sec(); 1972 1973 nfs4_ephemeral_tree_t *harvest = NULL; 1974 1975 int flag; 1976 1977 mntinfo4_t *mi; 1978 vfs_t *vfsp; 1979 1980 if (force) 1981 flag = MS_FORCE; 1982 else 1983 flag = 0; 1984 1985 mutex_enter(&ntg->ntg_forest_lock); 1986 for (net = ntg->ntg_forest; net != NULL; net = next) { 1987 next = net->net_next; 1988 1989 mutex_enter(&net->net_cnt_lock); 1990 net->net_refcnt++; 1991 mutex_exit(&net->net_cnt_lock); 1992 1993 mutex_enter(&net->net_tree_lock); 1994 1995 /* 1996 * Let the unmount code know that the 1997 * tree is already locked! 1998 */ 1999 mutex_enter(&net->net_cnt_lock); 2000 net->net_status |= NFS4_EPHEMERAL_TREE_LOCKED; 2001 mutex_exit(&net->net_cnt_lock); 2002 2003 /* 2004 * If the intent is force all ephemeral nodes to 2005 * be unmounted in this zone, we can short circuit a 2006 * lot of tree traversal and simply zap the root node. 2007 */ 2008 if (force) { 2009 if (net->net_root) { 2010 mi = net->net_root->ne_mount; 2011 vfsp = mi->mi_vfsp; 2012 2013 /* 2014 * Cleared by umount2_engine. 2015 */ 2016 VFS_HOLD(vfsp); 2017 2018 (void) umount2_engine(vfsp, flag, 2019 kcred, FALSE); 2020 2021 goto check_done; 2022 } 2023 } 2024 2025 e = net->net_root; 2026 if (e) 2027 e->ne_state = NFS4_EPHEMERAL_VISIT_CHILD; 2028 2029 while (e) { 2030 if (e->ne_state == NFS4_EPHEMERAL_VISIT_CHILD) { 2031 e->ne_state = NFS4_EPHEMERAL_VISIT_SIBLING; 2032 if (e->ne_child) { 2033 e = e->ne_child; 2034 e->ne_state = 2035 NFS4_EPHEMERAL_VISIT_CHILD; 2036 } 2037 2038 continue; 2039 } else if (e->ne_state == 2040 NFS4_EPHEMERAL_VISIT_SIBLING) { 2041 e->ne_state = NFS4_EPHEMERAL_PROCESS_ME; 2042 if (e->ne_peer) { 2043 e = e->ne_peer; 2044 e->ne_state = 2045 NFS4_EPHEMERAL_VISIT_CHILD; 2046 } 2047 2048 continue; 2049 } else if (e->ne_state == 2050 NFS4_EPHEMERAL_CHILD_ERROR) { 2051 prior = e->ne_prior; 2052 2053 /* 2054 * If a child reported an error, do 2055 * not bother trying to unmount. 2056 * 2057 * If your prior node is a parent, 2058 * pass the error up such that they 2059 * also do not try to unmount. 2060 * 2061 * However, if your prior is a sibling, 2062 * let them try to unmount if they can. 2063 */ 2064 if (prior) { 2065 if (prior->ne_child == e) 2066 prior->ne_state |= 2067 NFS4_EPHEMERAL_CHILD_ERROR; 2068 else 2069 prior->ne_state |= 2070 NFS4_EPHEMERAL_PEER_ERROR; 2071 } 2072 2073 /* 2074 * Clear the error and if needed, process peers. 2075 * 2076 * Once we mask out the error, we know whether 2077 * or we have to process another node. 2078 */ 2079 e->ne_state &= ~NFS4_EPHEMERAL_CHILD_ERROR; 2080 if (e->ne_state == NFS4_EPHEMERAL_PROCESS_ME) 2081 e = prior; 2082 2083 continue; 2084 } else if (e->ne_state == 2085 NFS4_EPHEMERAL_PEER_ERROR) { 2086 prior = e->ne_prior; 2087 2088 if (prior) { 2089 if (prior->ne_child == e) 2090 prior->ne_state = 2091 NFS4_EPHEMERAL_CHILD_ERROR; 2092 else 2093 prior->ne_state = 2094 NFS4_EPHEMERAL_PEER_ERROR; 2095 } 2096 2097 /* 2098 * Clear the error from this node and do the 2099 * correct processing. 2100 */ 2101 e->ne_state &= ~NFS4_EPHEMERAL_PEER_ERROR; 2102 continue; 2103 } 2104 2105 prior = e->ne_prior; 2106 e->ne_state = NFS4_EPHEMERAL_OK; 2107 2108 /* 2109 * It must be the case that we need to process 2110 * this node. 2111 */ 2112 if (!time_check || 2113 now - e->ne_ref_time > e->ne_mount_to) { 2114 mi = e->ne_mount; 2115 vfsp = mi->mi_vfsp; 2116 2117 /* 2118 * Cleared by umount2_engine. 2119 */ 2120 VFS_HOLD(vfsp); 2121 2122 /* 2123 * Note that we effectively work down to the 2124 * leaf nodes first, try to unmount them, 2125 * then work our way back up into the leaf 2126 * nodes. 2127 * 2128 * Also note that we deal with a lot of 2129 * complexity by sharing the work with 2130 * the manual unmount code. 2131 */ 2132 nfs4_ephemeral_record_umount(vfsp, flag, 2133 e, prior); 2134 } 2135 2136 e = prior; 2137 } 2138 2139 check_done: 2140 2141 /* 2142 * Are we done with this tree? 2143 */ 2144 mutex_enter(&net->net_cnt_lock); 2145 if (net->net_refcnt == 1 && 2146 net->net_status & NFS4_EPHEMERAL_TREE_INVALID) { 2147 net->net_refcnt--; 2148 net->net_status &= ~NFS4_EPHEMERAL_TREE_LOCKED; 2149 mutex_exit(&net->net_cnt_lock); 2150 mutex_exit(&net->net_tree_lock); 2151 2152 if (prev) 2153 prev->net_next = net->net_next; 2154 else 2155 ntg->ntg_forest = net->net_next; 2156 2157 net->net_next = harvest; 2158 harvest = net; 2159 continue; 2160 } 2161 2162 net->net_refcnt--; 2163 net->net_status &= ~NFS4_EPHEMERAL_TREE_LOCKED; 2164 mutex_exit(&net->net_cnt_lock); 2165 mutex_exit(&net->net_tree_lock); 2166 2167 prev = net; 2168 } 2169 mutex_exit(&ntg->ntg_forest_lock); 2170 2171 for (net = harvest; net != NULL; net = next) { 2172 next = net->net_next; 2173 2174 mutex_destroy(&net->net_tree_lock); 2175 mutex_destroy(&net->net_cnt_lock); 2176 kmem_free(net, sizeof (*net)); 2177 } 2178 } 2179 2180 /* 2181 * This is the thread which decides when the harvesting 2182 * can proceed and when to kill it off for this zone. 2183 */ 2184 static void 2185 nfs4_ephemeral_harvester(nfs4_trigger_globals_t *ntg) 2186 { 2187 clock_t timeleft; 2188 zone_t *zone = curproc->p_zone; 2189 2190 for (;;) { 2191 timeleft = zone_status_timedwait(zone, lbolt + 2192 nfs4_trigger_thread_timer * hz, ZONE_IS_SHUTTING_DOWN); 2193 2194 /* 2195 * zone is exiting... 2196 */ 2197 if (timeleft != -1) { 2198 ASSERT(zone_status_get(zone) >= ZONE_IS_SHUTTING_DOWN); 2199 zthread_exit(); 2200 /* NOTREACHED */ 2201 } 2202 2203 /* 2204 * Only bother scanning if there is potential 2205 * work to be done. 2206 */ 2207 if (ntg->ntg_forest == NULL) 2208 continue; 2209 2210 /* 2211 * Now scan the list and get rid of everything which 2212 * is old. 2213 */ 2214 nfs4_ephemeral_harvest_forest(ntg, FALSE, TRUE); 2215 } 2216 2217 /* NOTREACHED */ 2218 } 2219 2220 /* 2221 * The zone specific glue needed to start the unmount harvester. 2222 * 2223 * Note that we want to avoid holding the mutex as long as possible, 2224 * hence the multiple checks. 2225 * 2226 * The caller should avoid us getting down here in the first 2227 * place. 2228 */ 2229 static void 2230 nfs4_ephemeral_start_harvester(nfs4_trigger_globals_t *ntg) 2231 { 2232 /* 2233 * It got started before we got here... 2234 */ 2235 if (ntg->ntg_thread_started) 2236 return; 2237 2238 mutex_enter(&nfs4_ephemeral_thread_lock); 2239 2240 if (ntg->ntg_thread_started) { 2241 mutex_exit(&nfs4_ephemeral_thread_lock); 2242 return; 2243 } 2244 2245 /* 2246 * Start the unmounter harvester thread for this zone. 2247 */ 2248 (void) zthread_create(NULL, 0, nfs4_ephemeral_harvester, 2249 ntg, 0, minclsyspri); 2250 2251 ntg->ntg_thread_started = TRUE; 2252 mutex_exit(&nfs4_ephemeral_thread_lock); 2253 } 2254 2255 /*ARGSUSED*/ 2256 static void * 2257 nfs4_ephemeral_zsd_create(zoneid_t zoneid) 2258 { 2259 nfs4_trigger_globals_t *ntg; 2260 2261 ntg = kmem_zalloc(sizeof (*ntg), KM_SLEEP); 2262 ntg->ntg_thread_started = FALSE; 2263 2264 /* 2265 * This is the default.... 2266 */ 2267 ntg->ntg_mount_to = nfs4_trigger_thread_timer; 2268 2269 mutex_init(&ntg->ntg_forest_lock, NULL, 2270 MUTEX_DEFAULT, NULL); 2271 2272 return (ntg); 2273 } 2274 2275 /* 2276 * Try a nice gentle walk down the forest and convince 2277 * all of the trees to gracefully give it up. 2278 */ 2279 /*ARGSUSED*/ 2280 static void 2281 nfs4_ephemeral_zsd_shutdown(zoneid_t zoneid, void *arg) 2282 { 2283 nfs4_trigger_globals_t *ntg = arg; 2284 2285 if (!ntg) 2286 return; 2287 2288 nfs4_ephemeral_harvest_forest(ntg, FALSE, FALSE); 2289 } 2290 2291 /* 2292 * Race along the forest and rip all of the trees out by 2293 * their rootballs! 2294 */ 2295 /*ARGSUSED*/ 2296 static void 2297 nfs4_ephemeral_zsd_destroy(zoneid_t zoneid, void *arg) 2298 { 2299 nfs4_trigger_globals_t *ntg = arg; 2300 2301 if (!ntg) 2302 return; 2303 2304 nfs4_ephemeral_harvest_forest(ntg, TRUE, FALSE); 2305 2306 mutex_destroy(&ntg->ntg_forest_lock); 2307 kmem_free(ntg, sizeof (*ntg)); 2308 } 2309 2310 /* 2311 * This is the zone independent cleanup needed for 2312 * emphemeral mount processing. 2313 */ 2314 void 2315 nfs4_ephemeral_fini(void) 2316 { 2317 (void) zone_key_delete(nfs4_ephemeral_key); 2318 mutex_destroy(&nfs4_ephemeral_thread_lock); 2319 } 2320 2321 /* 2322 * This is the zone independent initialization needed for 2323 * emphemeral mount processing. 2324 */ 2325 void 2326 nfs4_ephemeral_init(void) 2327 { 2328 mutex_init(&nfs4_ephemeral_thread_lock, NULL, MUTEX_DEFAULT, 2329 NULL); 2330 2331 zone_key_create(&nfs4_ephemeral_key, nfs4_ephemeral_zsd_create, 2332 nfs4_ephemeral_zsd_shutdown, nfs4_ephemeral_zsd_destroy); 2333 } 2334 2335 /* 2336 * nfssys() calls this function to set the per-zone 2337 * value of mount_to to drive when an ephemeral mount is 2338 * timed out. Each mount will grab a copy of this value 2339 * when mounted. 2340 */ 2341 void 2342 nfs4_ephemeral_set_mount_to(uint_t mount_to) 2343 { 2344 nfs4_trigger_globals_t *ntg; 2345 zone_t *zone = curproc->p_zone; 2346 2347 ntg = zone_getspecific(nfs4_ephemeral_key, zone); 2348 2349 ntg->ntg_mount_to = mount_to; 2350 } 2351 2352 /* 2353 * Walk the list of v4 mount options; if they are currently set in vfsp, 2354 * append them to a new comma-separated mount option string, and return it. 2355 * 2356 * Caller should free by calling nfs4_trigger_destroy_mntopts(). 2357 */ 2358 static char * 2359 nfs4_trigger_create_mntopts(vfs_t *vfsp) 2360 { 2361 uint_t i; 2362 char *mntopts; 2363 struct vfssw *vswp; 2364 mntopts_t *optproto; 2365 2366 mntopts = kmem_zalloc(MAX_MNTOPT_STR, KM_SLEEP); 2367 2368 /* get the list of applicable mount options for v4; locks *vswp */ 2369 vswp = vfs_getvfssw(MNTTYPE_NFS4); 2370 optproto = &vswp->vsw_optproto; 2371 2372 for (i = 0; i < optproto->mo_count; i++) { 2373 struct mntopt *mop = &optproto->mo_list[i]; 2374 2375 if (mop->mo_flags & MO_EMPTY) 2376 continue; 2377 2378 if (nfs4_trigger_add_mntopt(mntopts, mop->mo_name, vfsp)) { 2379 kmem_free(mntopts, MAX_MNTOPT_STR); 2380 vfs_unrefvfssw(vswp); 2381 return (NULL); 2382 } 2383 } 2384 2385 vfs_unrefvfssw(vswp); 2386 2387 /* 2388 * MNTOPT_XATTR is not in the v4 mount opt proto list, 2389 * and it may only be passed via MS_OPTIONSTR, so we 2390 * must handle it here. 2391 * 2392 * Ideally, it would be in the list, but NFS does not specify its 2393 * own opt proto list, it uses instead the default one. Since 2394 * not all filesystems support extended attrs, it would not be 2395 * appropriate to add it there. 2396 */ 2397 if (nfs4_trigger_add_mntopt(mntopts, MNTOPT_XATTR, vfsp) || 2398 nfs4_trigger_add_mntopt(mntopts, MNTOPT_NOXATTR, vfsp)) { 2399 kmem_free(mntopts, MAX_MNTOPT_STR); 2400 return (NULL); 2401 } 2402 2403 return (mntopts); 2404 } 2405 2406 static void 2407 nfs4_trigger_destroy_mntopts(char *mntopts) 2408 { 2409 if (mntopts) 2410 kmem_free(mntopts, MAX_MNTOPT_STR); 2411 } 2412 2413 /* 2414 * Check a single mount option (optname). Add to mntopts if it is set in VFS. 2415 */ 2416 static int 2417 nfs4_trigger_add_mntopt(char *mntopts, char *optname, vfs_t *vfsp) 2418 { 2419 if (mntopts == NULL || optname == NULL || vfsp == NULL) 2420 return (EINVAL); 2421 2422 if (vfs_optionisset(vfsp, optname, NULL)) { 2423 size_t mntoptslen = strlen(mntopts); 2424 size_t optnamelen = strlen(optname); 2425 2426 /* +1 for ',', +1 for NUL */ 2427 if (mntoptslen + optnamelen + 2 > MAX_MNTOPT_STR) 2428 return (EOVERFLOW); 2429 2430 /* first or subsequent mount option? */ 2431 if (*mntopts != '\0') 2432 (void) strcat(mntopts, ","); 2433 2434 (void) strcat(mntopts, optname); 2435 } 2436 2437 return (0); 2438 } 2439 2440 static enum clnt_stat 2441 nfs4_trigger_ping_server(servinfo4_t *svp, int nointr) 2442 { 2443 int retries, error; 2444 uint_t max_msgsize; 2445 enum clnt_stat status; 2446 CLIENT *cl; 2447 struct timeval timeout; 2448 2449 /* as per recov_newserver() */ 2450 max_msgsize = 0; 2451 retries = 1; 2452 timeout.tv_sec = 2; 2453 timeout.tv_usec = 0; 2454 2455 error = clnt_tli_kcreate(svp->sv_knconf, &svp->sv_addr, NFS_PROGRAM, 2456 NFS_V4, max_msgsize, retries, CRED(), &cl); 2457 if (error) 2458 return (RPC_FAILED); 2459 2460 if (nointr) 2461 cl->cl_nosignal = TRUE; 2462 status = CLNT_CALL(cl, RFS_NULL, xdr_void, NULL, xdr_void, NULL, 2463 timeout); 2464 if (nointr) 2465 cl->cl_nosignal = FALSE; 2466 2467 AUTH_DESTROY(cl->cl_auth); 2468 CLNT_DESTROY(cl); 2469 2470 return (status); 2471 } 2472