1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T. 28 * All Rights Reserved 29 */ 30 31 #pragma ident "%Z%%M% %I% %E% SMI" 32 33 #include <sys/param.h> 34 #include <sys/types.h> 35 #include <sys/systm.h> 36 #include <sys/thread.h> 37 #include <sys/t_lock.h> 38 #include <sys/time.h> 39 #include <sys/vnode.h> 40 #include <sys/vfs.h> 41 #include <sys/errno.h> 42 #include <sys/buf.h> 43 #include <sys/stat.h> 44 #include <sys/cred.h> 45 #include <sys/kmem.h> 46 #include <sys/debug.h> 47 #include <sys/dnlc.h> 48 #include <sys/vmsystm.h> 49 #include <sys/flock.h> 50 #include <sys/share.h> 51 #include <sys/cmn_err.h> 52 #include <sys/tiuser.h> 53 #include <sys/sysmacros.h> 54 #include <sys/callb.h> 55 #include <sys/acl.h> 56 #include <sys/kstat.h> 57 #include <sys/signal.h> 58 #include <sys/disp.h> 59 #include <sys/atomic.h> 60 #include <sys/list.h> 61 #include <sys/sdt.h> 62 63 #include <rpc/types.h> 64 #include <rpc/xdr.h> 65 #include <rpc/auth.h> 66 #include <rpc/clnt.h> 67 68 #include <nfs/nfs.h> 69 #include <nfs/nfs_clnt.h> 70 #include <nfs/nfs_acl.h> 71 72 #include <nfs/nfs4.h> 73 #include <nfs/rnode4.h> 74 #include <nfs/nfs4_clnt.h> 75 76 #include <vm/hat.h> 77 #include <vm/as.h> 78 #include <vm/page.h> 79 #include <vm/pvn.h> 80 #include <vm/seg.h> 81 #include <vm/seg_map.h> 82 #include <vm/seg_vn.h> 83 84 #include <sys/ddi.h> 85 86 /* 87 * Arguments to page-flush thread. 88 */ 89 typedef struct { 90 vnode_t *vp; 91 cred_t *cr; 92 } pgflush_t; 93 94 #ifdef DEBUG 95 int nfs4_client_lease_debug; 96 int nfs4_sharedfh_debug; 97 int nfs4_fname_debug; 98 99 /* temporary: panic if v_type is inconsistent with r_attr va_type */ 100 int nfs4_vtype_debug; 101 102 uint_t nfs4_tsd_key; 103 #endif 104 105 static time_t nfs4_client_resumed = 0; 106 static callb_id_t cid = 0; 107 108 static int nfs4renew(nfs4_server_t *); 109 static void nfs4_attrcache_va(vnode_t *, nfs4_ga_res_t *, int); 110 static void nfs4_pgflush_thread(pgflush_t *); 111 static void flush_pages(vnode_t *, cred_t *); 112 113 static boolean_t nfs4_client_cpr_callb(void *, int); 114 115 struct mi4_globals { 116 kmutex_t mig_lock; /* lock protecting mig_list */ 117 list_t mig_list; /* list of NFS v4 mounts in zone */ 118 boolean_t mig_destructor_called; 119 }; 120 121 static zone_key_t mi4_list_key; 122 123 /* 124 * Attributes caching: 125 * 126 * Attributes are cached in the rnode in struct vattr form. 127 * There is a time associated with the cached attributes (r_time_attr_inval) 128 * which tells whether the attributes are valid. The time is initialized 129 * to the difference between current time and the modify time of the vnode 130 * when new attributes are cached. This allows the attributes for 131 * files that have changed recently to be timed out sooner than for files 132 * that have not changed for a long time. There are minimum and maximum 133 * timeout values that can be set per mount point. 134 */ 135 136 /* 137 * If a cache purge is in progress, wait for it to finish. 138 * 139 * The current thread must not be in the middle of an 140 * nfs4_start_op/nfs4_end_op region. Otherwise, there could be a deadlock 141 * between this thread, a recovery thread, and the page flush thread. 142 */ 143 int 144 nfs4_waitfor_purge_complete(vnode_t *vp) 145 { 146 rnode4_t *rp; 147 k_sigset_t smask; 148 149 rp = VTOR4(vp); 150 if ((rp->r_serial != NULL && rp->r_serial != curthread) || 151 ((rp->r_flags & R4PGFLUSH) && rp->r_pgflush != curthread)) { 152 mutex_enter(&rp->r_statelock); 153 sigintr(&smask, VTOMI4(vp)->mi_flags & MI4_INT); 154 while ((rp->r_serial != NULL && rp->r_serial != curthread) || 155 ((rp->r_flags & R4PGFLUSH) && 156 rp->r_pgflush != curthread)) { 157 if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) { 158 sigunintr(&smask); 159 mutex_exit(&rp->r_statelock); 160 return (EINTR); 161 } 162 } 163 sigunintr(&smask); 164 mutex_exit(&rp->r_statelock); 165 } 166 return (0); 167 } 168 169 /* 170 * Validate caches by checking cached attributes. If they have timed out, 171 * then get new attributes from the server. As a side effect, cache 172 * invalidation is done if the attributes have changed. 173 * 174 * If the attributes have not timed out and if there is a cache 175 * invalidation being done by some other thread, then wait until that 176 * thread has completed the cache invalidation. 177 */ 178 int 179 nfs4_validate_caches(vnode_t *vp, cred_t *cr) 180 { 181 int error; 182 nfs4_ga_res_t gar; 183 184 if (ATTRCACHE4_VALID(vp)) { 185 error = nfs4_waitfor_purge_complete(vp); 186 if (error) 187 return (error); 188 return (0); 189 } 190 191 gar.n4g_va.va_mask = AT_ALL; 192 return (nfs4_getattr_otw(vp, &gar, cr, 0)); 193 } 194 195 /* 196 * Fill in attribute from the cache. 197 * If valid, then return 0 to indicate that no error occurred, 198 * otherwise return 1 to indicate that an error occurred. 199 */ 200 static int 201 nfs4_getattr_cache(vnode_t *vp, struct vattr *vap) 202 { 203 rnode4_t *rp; 204 205 rp = VTOR4(vp); 206 mutex_enter(&rp->r_statelock); 207 mutex_enter(&rp->r_statev4_lock); 208 if (ATTRCACHE4_VALID(vp)) { 209 mutex_exit(&rp->r_statev4_lock); 210 /* 211 * Cached attributes are valid 212 */ 213 *vap = rp->r_attr; 214 mutex_exit(&rp->r_statelock); 215 return (0); 216 } 217 mutex_exit(&rp->r_statev4_lock); 218 mutex_exit(&rp->r_statelock); 219 return (1); 220 } 221 222 223 /* 224 * If returned error is ESTALE flush all caches. The nfs4_purge_caches() 225 * call is synchronous because all the pages were invalidated by the 226 * nfs4_invalidate_pages() call. 227 */ 228 void 229 nfs4_purge_stale_fh(int errno, vnode_t *vp, cred_t *cr) 230 { 231 struct rnode4 *rp = VTOR4(vp); 232 233 /* Ensure that the ..._end_op() call has been done */ 234 ASSERT(tsd_get(nfs4_tsd_key) == NULL); 235 236 if (errno != ESTALE) 237 return; 238 239 mutex_enter(&rp->r_statelock); 240 rp->r_flags |= R4STALE; 241 if (!rp->r_error) 242 rp->r_error = errno; 243 mutex_exit(&rp->r_statelock); 244 if (nfs4_has_pages(vp)) 245 nfs4_invalidate_pages(vp, (u_offset_t)0, cr); 246 nfs4_purge_caches(vp, NFS4_PURGE_DNLC, cr, FALSE); 247 } 248 249 /* 250 * Purge all of the various NFS `data' caches. If "asyncpg" is TRUE, the 251 * page purge is done asynchronously. 252 */ 253 void 254 nfs4_purge_caches(vnode_t *vp, int purge_dnlc, cred_t *cr, int asyncpg) 255 { 256 rnode4_t *rp; 257 char *contents; 258 vnode_t *xattr; 259 int size; 260 int pgflush; /* are we the page flush thread? */ 261 262 /* 263 * Purge the DNLC for any entries which refer to this file. 264 */ 265 if (vp->v_count > 1 && 266 (vp->v_type == VDIR || purge_dnlc == NFS4_PURGE_DNLC)) 267 dnlc_purge_vp(vp); 268 269 /* 270 * Clear any readdir state bits and purge the readlink response cache. 271 */ 272 rp = VTOR4(vp); 273 mutex_enter(&rp->r_statelock); 274 rp->r_flags &= ~R4LOOKUP; 275 contents = rp->r_symlink.contents; 276 size = rp->r_symlink.size; 277 rp->r_symlink.contents = NULL; 278 279 xattr = rp->r_xattr_dir; 280 rp->r_xattr_dir = NULL; 281 282 /* 283 * Purge pathconf cache too. 284 */ 285 rp->r_pathconf.pc4_xattr_valid = 0; 286 rp->r_pathconf.pc4_cache_valid = 0; 287 288 pgflush = (curthread == rp->r_pgflush); 289 mutex_exit(&rp->r_statelock); 290 291 if (contents != NULL) { 292 293 kmem_free((void *)contents, size); 294 } 295 296 if (xattr != NULL) 297 VN_RELE(xattr); 298 299 /* 300 * Flush the page cache. If the current thread is the page flush 301 * thread, don't initiate a new page flush. There's no need for 302 * it, and doing it correctly is hard. 303 */ 304 if (nfs4_has_pages(vp) && !pgflush) { 305 if (!asyncpg) { 306 (void) nfs4_waitfor_purge_complete(vp); 307 flush_pages(vp, cr); 308 } else { 309 pgflush_t *args; 310 311 /* 312 * We don't hold r_statelock while creating the 313 * thread, in case the call blocks. So we use a 314 * flag to indicate that a page flush thread is 315 * active. 316 */ 317 mutex_enter(&rp->r_statelock); 318 if (rp->r_flags & R4PGFLUSH) { 319 mutex_exit(&rp->r_statelock); 320 } else { 321 rp->r_flags |= R4PGFLUSH; 322 mutex_exit(&rp->r_statelock); 323 324 args = kmem_alloc(sizeof (pgflush_t), 325 KM_SLEEP); 326 args->vp = vp; 327 VN_HOLD(args->vp); 328 args->cr = cr; 329 crhold(args->cr); 330 (void) zthread_create(NULL, 0, 331 nfs4_pgflush_thread, args, 0, 332 minclsyspri); 333 } 334 } 335 } 336 337 /* 338 * Flush the readdir response cache. 339 */ 340 nfs4_purge_rddir_cache(vp); 341 } 342 343 /* 344 * Invalidate all pages for the given file, after writing back the dirty 345 * ones. 346 */ 347 348 static void 349 flush_pages(vnode_t *vp, cred_t *cr) 350 { 351 int error; 352 rnode4_t *rp = VTOR4(vp); 353 354 error = VOP_PUTPAGE(vp, (u_offset_t)0, 0, B_INVAL, cr); 355 if (error == ENOSPC || error == EDQUOT) { 356 mutex_enter(&rp->r_statelock); 357 if (!rp->r_error) 358 rp->r_error = error; 359 mutex_exit(&rp->r_statelock); 360 } 361 } 362 363 /* 364 * Page flush thread. 365 */ 366 367 static void 368 nfs4_pgflush_thread(pgflush_t *args) 369 { 370 rnode4_t *rp = VTOR4(args->vp); 371 372 /* remember which thread we are, so we don't deadlock ourselves */ 373 mutex_enter(&rp->r_statelock); 374 ASSERT(rp->r_pgflush == NULL); 375 rp->r_pgflush = curthread; 376 mutex_exit(&rp->r_statelock); 377 378 flush_pages(args->vp, args->cr); 379 380 mutex_enter(&rp->r_statelock); 381 rp->r_pgflush = NULL; 382 rp->r_flags &= ~R4PGFLUSH; 383 cv_broadcast(&rp->r_cv); 384 mutex_exit(&rp->r_statelock); 385 386 VN_RELE(args->vp); 387 crfree(args->cr); 388 kmem_free(args, sizeof (pgflush_t)); 389 zthread_exit(); 390 } 391 392 /* 393 * Purge the readdir cache of all entries which are not currently 394 * being filled. 395 */ 396 void 397 nfs4_purge_rddir_cache(vnode_t *vp) 398 { 399 rnode4_t *rp; 400 401 rp = VTOR4(vp); 402 403 mutex_enter(&rp->r_statelock); 404 rp->r_direof = NULL; 405 rp->r_flags &= ~R4LOOKUP; 406 rp->r_flags |= R4READDIRWATTR; 407 rddir4_cache_purge(rp); 408 mutex_exit(&rp->r_statelock); 409 } 410 411 /* 412 * Set attributes cache for given vnode using virtual attributes. There is 413 * no cache validation, but if the attributes are deemed to be stale, they 414 * are ignored. This corresponds to nfs3_attrcache(). 415 * 416 * Set the timeout value on the attribute cache and fill it 417 * with the passed in attributes. 418 */ 419 void 420 nfs4_attrcache_noinval(vnode_t *vp, nfs4_ga_res_t *garp, hrtime_t t) 421 { 422 rnode4_t *rp = VTOR4(vp); 423 424 mutex_enter(&rp->r_statelock); 425 if (rp->r_time_attr_saved <= t) 426 nfs4_attrcache_va(vp, garp, FALSE); 427 mutex_exit(&rp->r_statelock); 428 } 429 430 /* 431 * Use the passed in virtual attributes to check to see whether the 432 * data and metadata caches are valid, cache the new attributes, and 433 * then do the cache invalidation if required. 434 * 435 * The cache validation and caching of the new attributes is done 436 * atomically via the use of the mutex, r_statelock. If required, 437 * the cache invalidation is done atomically w.r.t. the cache 438 * validation and caching of the attributes via the pseudo lock, 439 * r_serial. 440 * 441 * This routine is used to do cache validation and attributes caching 442 * for operations with a single set of post operation attributes. 443 */ 444 445 void 446 nfs4_attr_cache(vnode_t *vp, nfs4_ga_res_t *garp, 447 hrtime_t t, cred_t *cr, int async, 448 change_info4 *cinfo) 449 { 450 rnode4_t *rp; 451 int mtime_changed; 452 int ctime_changed; 453 vsecattr_t *vsp; 454 int was_serial, set_time_cache_inval, recov; 455 vattr_t *vap = &garp->n4g_va; 456 mntinfo4_t *mi = VTOMI4(vp); 457 458 ASSERT(mi->mi_vfsp->vfs_dev == garp->n4g_va.va_fsid); 459 460 /* Is curthread the recovery thread? */ 461 mutex_enter(&mi->mi_lock); 462 recov = (VTOMI4(vp)->mi_recovthread == curthread); 463 mutex_exit(&mi->mi_lock); 464 465 rp = VTOR4(vp); 466 mutex_enter(&rp->r_statelock); 467 was_serial = (rp->r_serial == curthread); 468 if (rp->r_serial && !was_serial) { 469 klwp_t *lwp = ttolwp(curthread); 470 471 /* 472 * If we're the recovery thread, then purge current attrs 473 * and bail out to avoid potential deadlock between another 474 * thread caching attrs (r_serial thread), recov thread, 475 * and an async writer thread. 476 */ 477 if (recov) { 478 PURGE_ATTRCACHE4_LOCKED(rp); 479 mutex_exit(&rp->r_statelock); 480 return; 481 } 482 483 if (lwp != NULL) 484 lwp->lwp_nostop++; 485 while (rp->r_serial != NULL) { 486 if (!cv_wait_sig(&rp->r_cv, &rp->r_statelock)) { 487 mutex_exit(&rp->r_statelock); 488 if (lwp != NULL) 489 lwp->lwp_nostop--; 490 return; 491 } 492 } 493 if (lwp != NULL) 494 lwp->lwp_nostop--; 495 } 496 497 /* 498 * If there is a page flush thread, the current thread needs to 499 * bail out, to prevent a possible deadlock between the current 500 * thread (which might be in a start_op/end_op region), the 501 * recovery thread, and the page flush thread. Expire the 502 * attribute cache, so that any attributes the current thread was 503 * going to set are not lost. 504 */ 505 if ((rp->r_flags & R4PGFLUSH) && rp->r_pgflush != curthread) { 506 PURGE_ATTRCACHE4_LOCKED(rp); 507 mutex_exit(&rp->r_statelock); 508 return; 509 } 510 511 if (rp->r_time_attr_saved > t) { 512 /* 513 * Attributes have been cached since these attributes were 514 * made, so don't act on them. 515 */ 516 mutex_exit(&rp->r_statelock); 517 return; 518 } 519 set_time_cache_inval = 0; 520 if (cinfo) { 521 /* 522 * Only directory modifying callers pass non-NULL cinfo. 523 */ 524 ASSERT(vp->v_type == VDIR); 525 /* 526 * If the cache timeout either doesn't exist or hasn't expired, 527 * and dir didn't changed on server before dirmod op 528 * and dir didn't change after dirmod op but before getattr 529 * then there's a chance that the client's cached data for 530 * this object is current (not stale). No immediate cache 531 * flush is required. 532 * 533 */ 534 if ((! rp->r_time_cache_inval || t < rp->r_time_cache_inval) && 535 cinfo->before == rp->r_change && 536 (garp->n4g_change_valid && 537 cinfo->after == garp->n4g_change)) { 538 539 /* 540 * If atomic isn't set, then the before/after info 541 * cannot be blindly trusted. For this case, we tell 542 * nfs4_attrcache_va to cache the attrs but also 543 * establish an absolute maximum cache timeout. When 544 * the timeout is reached, caches will be flushed. 545 */ 546 if (! cinfo->atomic) 547 set_time_cache_inval = 1; 548 549 mtime_changed = 0; 550 ctime_changed = 0; 551 } else { 552 553 /* 554 * We're not sure exactly what changed, but we know 555 * what to do. flush all caches for dir. remove the 556 * attr timeout. 557 * 558 * a) timeout expired. flush all caches. 559 * b) r_change != cinfo.before. flush all caches. 560 * c) r_change == cinfo.before, but cinfo.after != 561 * post-op getattr(change). flush all caches. 562 * d) post-op getattr(change) not provided by server. 563 * flush all caches. 564 */ 565 mtime_changed = 1; 566 ctime_changed = 1; 567 rp->r_time_cache_inval = 0; 568 } 569 } else { 570 if (!(rp->r_flags & R4WRITEMODIFIED)) { 571 if (!CACHE4_VALID(rp, vap->va_mtime, vap->va_size)) 572 mtime_changed = 1; 573 else 574 mtime_changed = 0; 575 if (rp->r_attr.va_ctime.tv_sec != 576 vap->va_ctime.tv_sec || 577 rp->r_attr.va_ctime.tv_nsec != 578 vap->va_ctime.tv_nsec) 579 ctime_changed = 1; 580 else 581 ctime_changed = 0; 582 } else { 583 mtime_changed = 0; 584 ctime_changed = 0; 585 } 586 } 587 588 nfs4_attrcache_va(vp, garp, set_time_cache_inval); 589 590 if (!mtime_changed && !ctime_changed) { 591 mutex_exit(&rp->r_statelock); 592 return; 593 } 594 595 rp->r_serial = curthread; 596 597 mutex_exit(&rp->r_statelock); 598 599 /* 600 * If we're the recov thread, then force async nfs4_purge_caches 601 * to avoid potential deadlock. 602 */ 603 if (mtime_changed) 604 nfs4_purge_caches(vp, NFS4_NOPURGE_DNLC, cr, recov ? 1 : async); 605 606 if (ctime_changed) { 607 (void) nfs4_access_purge_rp(rp); 608 if (rp->r_secattr != NULL) { 609 mutex_enter(&rp->r_statelock); 610 vsp = rp->r_secattr; 611 rp->r_secattr = NULL; 612 mutex_exit(&rp->r_statelock); 613 if (vsp != NULL) 614 nfs4_acl_free_cache(vsp); 615 } 616 } 617 618 if (!was_serial) { 619 mutex_enter(&rp->r_statelock); 620 rp->r_serial = NULL; 621 cv_broadcast(&rp->r_cv); 622 mutex_exit(&rp->r_statelock); 623 } 624 } 625 626 /* 627 * Set attributes cache for given vnode using virtual attributes. 628 * 629 * Set the timeout value on the attribute cache and fill it 630 * with the passed in attributes. 631 * 632 * The caller must be holding r_statelock. 633 */ 634 static void 635 nfs4_attrcache_va(vnode_t *vp, nfs4_ga_res_t *garp, int set_cache_timeout) 636 { 637 rnode4_t *rp; 638 mntinfo4_t *mi; 639 hrtime_t delta; 640 hrtime_t now; 641 vattr_t *vap = &garp->n4g_va; 642 643 rp = VTOR4(vp); 644 645 ASSERT(MUTEX_HELD(&rp->r_statelock)); 646 ASSERT(vap->va_mask == AT_ALL); 647 648 /* Switch to master before checking v_flag */ 649 if (IS_SHADOW(vp, rp)) 650 vp = RTOV4(rp); 651 652 now = gethrtime(); 653 654 mi = VTOMI4(vp); 655 656 /* 657 * Only establish a new cache timeout (if requested). Never 658 * extend a timeout. Never clear a timeout. Clearing a timeout 659 * is done by nfs4_update_dircaches (ancestor in our call chain) 660 */ 661 if (set_cache_timeout && ! rp->r_time_cache_inval) 662 rp->r_time_cache_inval = now + mi->mi_acdirmax; 663 664 /* 665 * Delta is the number of nanoseconds that we will 666 * cache the attributes of the file. It is based on 667 * the number of nanoseconds since the last time that 668 * we detected a change. The assumption is that files 669 * that changed recently are likely to change again. 670 * There is a minimum and a maximum for regular files 671 * and for directories which is enforced though. 672 * 673 * Using the time since last change was detected 674 * eliminates direct comparison or calculation 675 * using mixed client and server times. NFS does 676 * not make any assumptions regarding the client 677 * and server clocks being synchronized. 678 */ 679 if (vap->va_mtime.tv_sec != rp->r_attr.va_mtime.tv_sec || 680 vap->va_mtime.tv_nsec != rp->r_attr.va_mtime.tv_nsec || 681 vap->va_size != rp->r_attr.va_size) { 682 rp->r_time_attr_saved = now; 683 } 684 685 if ((mi->mi_flags & MI4_NOAC) || (vp->v_flag & VNOCACHE)) 686 delta = 0; 687 else { 688 delta = now - rp->r_time_attr_saved; 689 if (vp->v_type == VDIR) { 690 if (delta < mi->mi_acdirmin) 691 delta = mi->mi_acdirmin; 692 else if (delta > mi->mi_acdirmax) 693 delta = mi->mi_acdirmax; 694 } else { 695 if (delta < mi->mi_acregmin) 696 delta = mi->mi_acregmin; 697 else if (delta > mi->mi_acregmax) 698 delta = mi->mi_acregmax; 699 } 700 } 701 rp->r_time_attr_inval = now + delta; 702 703 rp->r_attr = *vap; 704 if (garp->n4g_change_valid) 705 rp->r_change = garp->n4g_change; 706 707 /* 708 * The attributes that were returned may be valid and can 709 * be used, but they may not be allowed to be cached. 710 * Reset the timers to cause immediate invalidation and 711 * clear r_change so no VERIFY operations will suceed 712 */ 713 if (garp->n4g_attrwhy == NFS4_GETATTR_NOCACHE_OK) { 714 rp->r_time_attr_inval = now; 715 rp->r_time_attr_saved = now; 716 rp->r_change = 0; 717 } 718 719 /* 720 * If mounted_on_fileid returned AND the object is a stub, 721 * then set object's va_nodeid to the mounted over fid 722 * returned by server. 723 * 724 * If mounted_on_fileid not provided/supported, then 725 * just set it to 0 for now. Eventually it would be 726 * better to set it to a hashed version of FH. This 727 * would probably be good enough to provide a unique 728 * fid/d_ino within a dir. 729 * 730 * We don't need to carry mounted_on_fileid in the 731 * rnode as long as the client never requests fileid 732 * without also requesting mounted_on_fileid. For 733 * now, it stays. 734 */ 735 if (garp->n4g_mon_fid_valid) { 736 rp->r_mntd_fid = garp->n4g_mon_fid; 737 738 if (rp->r_flags & R4SRVSTUB) 739 rp->r_attr.va_nodeid = rp->r_mntd_fid; 740 } 741 742 /* 743 * Check to see if there are valid pathconf bits to 744 * cache in the rnode. 745 */ 746 if (garp->n4g_ext_res) { 747 if (garp->n4g_ext_res->n4g_pc4.pc4_cache_valid) { 748 rp->r_pathconf = garp->n4g_ext_res->n4g_pc4; 749 } else { 750 if (garp->n4g_ext_res->n4g_pc4.pc4_xattr_valid) { 751 rp->r_pathconf.pc4_xattr_valid = TRUE; 752 rp->r_pathconf.pc4_xattr_exists = 753 garp->n4g_ext_res->n4g_pc4.pc4_xattr_exists; 754 } 755 } 756 } 757 /* 758 * Update the size of the file if there is no cached data or if 759 * the cached data is clean and there is no data being written 760 * out. 761 */ 762 if (rp->r_size != vap->va_size && 763 (!vn_has_cached_data(vp) || 764 (!(rp->r_flags & R4DIRTY) && rp->r_count == 0))) { 765 rp->r_size = vap->va_size; 766 } 767 nfs_setswaplike(vp, vap); 768 rp->r_flags &= ~R4WRITEMODIFIED; 769 } 770 771 /* 772 * Get attributes over-the-wire and update attributes cache 773 * if no error occurred in the over-the-wire operation. 774 * Return 0 if successful, otherwise error. 775 */ 776 int 777 nfs4_getattr_otw(vnode_t *vp, nfs4_ga_res_t *garp, cred_t *cr, int get_acl) 778 { 779 mntinfo4_t *mi = VTOMI4(vp); 780 hrtime_t t; 781 nfs4_recov_state_t recov_state; 782 nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS }; 783 784 recov_state.rs_flags = 0; 785 recov_state.rs_num_retry_despite_err = 0; 786 787 /* Save the original mount point security flavor */ 788 (void) save_mnt_secinfo(mi->mi_curr_serv); 789 790 recov_retry: 791 if ((e.error = nfs4_start_fop(mi, vp, NULL, OH_GETATTR, 792 &recov_state, NULL))) { 793 (void) check_mnt_secinfo(mi->mi_curr_serv, vp); 794 return (e.error); 795 } 796 797 t = gethrtime(); 798 799 nfs4_getattr_otw_norecovery(vp, garp, &e, cr, get_acl); 800 801 if (nfs4_needs_recovery(&e, FALSE, vp->v_vfsp)) { 802 if (nfs4_start_recovery(&e, VTOMI4(vp), vp, NULL, NULL, 803 NULL, OP_GETATTR, NULL) == FALSE) { 804 nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_GETATTR, 805 &recov_state, 1); 806 goto recov_retry; 807 } 808 } 809 810 nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_GETATTR, &recov_state, 0); 811 812 if (!e.error) { 813 if (e.stat == NFS4_OK) { 814 nfs4_attr_cache(vp, garp, t, cr, FALSE, NULL); 815 } else { 816 e.error = geterrno4(e.stat); 817 818 nfs4_purge_stale_fh(e.error, vp, cr); 819 } 820 } 821 822 /* 823 * If getattr a node that is a stub for a crossed 824 * mount point, keep the original secinfo flavor for 825 * the current file system, not the crossed one. 826 */ 827 (void) check_mnt_secinfo(mi->mi_curr_serv, vp); 828 829 return (e.error); 830 } 831 832 /* 833 * Generate a compound to get attributes over-the-wire. 834 */ 835 void 836 nfs4_getattr_otw_norecovery(vnode_t *vp, nfs4_ga_res_t *garp, 837 nfs4_error_t *ep, cred_t *cr, int get_acl) 838 { 839 COMPOUND4args_clnt args; 840 COMPOUND4res_clnt res; 841 int doqueue; 842 rnode4_t *rp = VTOR4(vp); 843 nfs_argop4 argop[2]; 844 845 args.ctag = TAG_GETATTR; 846 847 args.array_len = 2; 848 args.array = argop; 849 850 /* putfh */ 851 argop[0].argop = OP_CPUTFH; 852 argop[0].nfs_argop4_u.opcputfh.sfh = rp->r_fh; 853 854 /* getattr */ 855 /* 856 * Unlike nfs version 2 and 3, where getattr returns all the 857 * attributes, nfs version 4 returns only the ones explicitely 858 * asked for. This creates problems, as some system functions 859 * (e.g. cache check) require certain attributes and if the 860 * cached node lacks some attributes such as uid/gid, it can 861 * affect system utilities (e.g. "ls") that rely on the information 862 * to be there. This can lead to anything from system crashes to 863 * corrupted information processed by user apps. 864 * So to ensure that all bases are covered, request at least 865 * the AT_ALL attribute mask. 866 */ 867 argop[1].argop = OP_GETATTR; 868 argop[1].nfs_argop4_u.opgetattr.attr_request = NFS4_VATTR_MASK; 869 if (get_acl) 870 argop[1].nfs_argop4_u.opgetattr.attr_request |= FATTR4_ACL_MASK; 871 argop[1].nfs_argop4_u.opgetattr.mi = VTOMI4(vp); 872 873 doqueue = 1; 874 875 rfs4call(VTOMI4(vp), &args, &res, cr, &doqueue, 0, ep); 876 877 if (ep->error) 878 return; 879 880 if (res.status != NFS4_OK) { 881 (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res); 882 return; 883 } 884 885 *garp = res.array[1].nfs_resop4_u.opgetattr.ga_res; 886 887 (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res); 888 } 889 890 /* 891 * Return either cached or remote attributes. If get remote attr 892 * use them to check and invalidate caches, then cache the new attributes. 893 */ 894 int 895 nfs4getattr(vnode_t *vp, vattr_t *vap, cred_t *cr) 896 { 897 int error; 898 rnode4_t *rp; 899 nfs4_ga_res_t gar; 900 901 ASSERT(nfs4_consistent_type(vp)); 902 903 /* 904 * If we've got cached attributes, we're done, otherwise go 905 * to the server to get attributes, which will update the cache 906 * in the process. 907 */ 908 rp = VTOR4(vp); 909 mutex_enter(&rp->r_statelock); 910 mutex_enter(&rp->r_statev4_lock); 911 if (ATTRCACHE4_VALID(vp)) { 912 mutex_exit(&rp->r_statev4_lock); 913 /* 914 * Cached attributes are valid 915 * Return the client's view of file size 916 */ 917 *vap = rp->r_attr; 918 vap->va_size = rp->r_size; 919 mutex_exit(&rp->r_statelock); 920 921 ASSERT(nfs4_consistent_type(vp)); 922 923 return (0); 924 } 925 mutex_exit(&rp->r_statev4_lock); 926 mutex_exit(&rp->r_statelock); 927 928 error = nfs4_getattr_otw(vp, &gar, cr, 0); 929 if (!error) 930 *vap = gar.n4g_va; 931 932 /* Return the client's view of file size */ 933 mutex_enter(&rp->r_statelock); 934 vap->va_size = rp->r_size; 935 mutex_exit(&rp->r_statelock); 936 937 ASSERT(nfs4_consistent_type(vp)); 938 939 return (error); 940 } 941 942 int 943 nfs4_attr_otw(vnode_t *vp, nfs4_tag_type_t tag_type, 944 nfs4_ga_res_t *garp, bitmap4 reqbitmap, cred_t *cr) 945 { 946 COMPOUND4args_clnt args; 947 COMPOUND4res_clnt res; 948 int doqueue; 949 nfs_argop4 argop[2]; 950 mntinfo4_t *mi = VTOMI4(vp); 951 bool_t needrecov = FALSE; 952 nfs4_recov_state_t recov_state; 953 nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS }; 954 nfs4_ga_ext_res_t *gerp; 955 956 recov_state.rs_flags = 0; 957 recov_state.rs_num_retry_despite_err = 0; 958 959 recov_retry: 960 args.ctag = tag_type; 961 962 args.array_len = 2; 963 args.array = argop; 964 965 e.error = nfs4_start_fop(mi, vp, NULL, OH_GETATTR, &recov_state, NULL); 966 if (e.error) 967 return (e.error); 968 969 /* putfh */ 970 argop[0].argop = OP_CPUTFH; 971 argop[0].nfs_argop4_u.opcputfh.sfh = VTOR4(vp)->r_fh; 972 973 /* getattr */ 974 argop[1].argop = OP_GETATTR; 975 argop[1].nfs_argop4_u.opgetattr.attr_request = reqbitmap; 976 argop[1].nfs_argop4_u.opgetattr.mi = mi; 977 978 doqueue = 1; 979 980 NFS4_DEBUG(nfs4_client_call_debug, (CE_NOTE, 981 "nfs4_attr_otw: %s call, rp %s", needrecov ? "recov" : "first", 982 rnode4info(VTOR4(vp)))); 983 984 rfs4call(mi, &args, &res, cr, &doqueue, 0, &e); 985 986 needrecov = nfs4_needs_recovery(&e, FALSE, vp->v_vfsp); 987 if (!needrecov && e.error) { 988 nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_GETATTR, &recov_state, 989 needrecov); 990 return (e.error); 991 } 992 993 if (needrecov) { 994 bool_t abort; 995 996 NFS4_DEBUG(nfs4_client_recov_debug, (CE_NOTE, 997 "nfs4_attr_otw: initiating recovery\n")); 998 999 abort = nfs4_start_recovery(&e, VTOMI4(vp), vp, NULL, NULL, 1000 NULL, OP_GETATTR, NULL); 1001 nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_GETATTR, &recov_state, 1002 needrecov); 1003 if (!e.error) { 1004 (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res); 1005 e.error = geterrno4(res.status); 1006 } 1007 if (abort == FALSE) 1008 goto recov_retry; 1009 return (e.error); 1010 } 1011 1012 if (res.status) { 1013 e.error = geterrno4(res.status); 1014 } else { 1015 gerp = garp->n4g_ext_res; 1016 bcopy(&res.array[1].nfs_resop4_u.opgetattr.ga_res, 1017 garp, sizeof (nfs4_ga_res_t)); 1018 garp->n4g_ext_res = gerp; 1019 if (garp->n4g_ext_res && 1020 res.array[1].nfs_resop4_u.opgetattr.ga_res.n4g_ext_res) 1021 bcopy(res.array[1].nfs_resop4_u.opgetattr. 1022 ga_res.n4g_ext_res, 1023 garp->n4g_ext_res, sizeof (nfs4_ga_ext_res_t)); 1024 } 1025 (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res); 1026 nfs4_end_fop(VTOMI4(vp), vp, NULL, OH_GETATTR, &recov_state, 1027 needrecov); 1028 return (e.error); 1029 } 1030 1031 /* 1032 * Asynchronous I/O parameters. nfs_async_threads is the high-water mark 1033 * for the demand-based allocation of async threads per-mount. The 1034 * nfs_async_timeout is the amount of time a thread will live after it 1035 * becomes idle, unless new I/O requests are received before the thread 1036 * dies. See nfs4_async_putpage and nfs4_async_start. 1037 */ 1038 1039 static void nfs4_async_start(struct vfs *); 1040 1041 static void 1042 free_async_args4(struct nfs4_async_reqs *args) 1043 { 1044 rnode4_t *rp; 1045 1046 if (args->a_io != NFS4_INACTIVE) { 1047 rp = VTOR4(args->a_vp); 1048 mutex_enter(&rp->r_statelock); 1049 rp->r_count--; 1050 if (args->a_io == NFS4_PUTAPAGE || 1051 args->a_io == NFS4_PAGEIO) 1052 rp->r_awcount--; 1053 cv_broadcast(&rp->r_cv); 1054 mutex_exit(&rp->r_statelock); 1055 VN_RELE(args->a_vp); 1056 } 1057 crfree(args->a_cred); 1058 kmem_free(args, sizeof (*args)); 1059 } 1060 1061 /* 1062 * Cross-zone thread creation and NFS access is disallowed, yet fsflush() and 1063 * pageout(), running in the global zone, have legitimate reasons to do 1064 * VOP_PUTPAGE(B_ASYNC) on other zones' NFS mounts. We avoid the problem by 1065 * use of a a per-mount "asynchronous requests manager thread" which is 1066 * signaled by the various asynchronous work routines when there is 1067 * asynchronous work to be done. It is responsible for creating new 1068 * worker threads if necessary, and notifying existing worker threads 1069 * that there is work to be done. 1070 * 1071 * In other words, it will "take the specifications from the customers and 1072 * give them to the engineers." 1073 * 1074 * Worker threads die off of their own accord if they are no longer 1075 * needed. 1076 * 1077 * This thread is killed when the zone is going away or the filesystem 1078 * is being unmounted. 1079 */ 1080 void 1081 nfs4_async_manager(vfs_t *vfsp) 1082 { 1083 callb_cpr_t cprinfo; 1084 mntinfo4_t *mi; 1085 uint_t max_threads; 1086 1087 mi = VFTOMI4(vfsp); 1088 1089 CALLB_CPR_INIT(&cprinfo, &mi->mi_async_lock, callb_generic_cpr, 1090 "nfs4_async_manager"); 1091 1092 mutex_enter(&mi->mi_async_lock); 1093 /* 1094 * We want to stash the max number of threads that this mount was 1095 * allowed so we can use it later when the variable is set to zero as 1096 * part of the zone/mount going away. 1097 * 1098 * We want to be able to create at least one thread to handle 1099 * asyncrhonous inactive calls. 1100 */ 1101 max_threads = MAX(mi->mi_max_threads, 1); 1102 mutex_enter(&mi->mi_lock); 1103 /* 1104 * We don't want to wait for mi_max_threads to go to zero, since that 1105 * happens as part of a failed unmount, but this thread should only 1106 * exit when the mount is really going away. 1107 * 1108 * Once MI4_ASYNC_MGR_STOP is set, no more async operations will be 1109 * attempted: the various _async_*() functions know to do things 1110 * inline if mi_max_threads == 0. Henceforth we just drain out the 1111 * outstanding requests. 1112 * 1113 * Note that we still create zthreads even if we notice the zone is 1114 * shutting down (MI4_ASYNC_MGR_STOP is set); this may cause the zone 1115 * shutdown sequence to take slightly longer in some cases, but 1116 * doesn't violate the protocol, as all threads will exit as soon as 1117 * they're done processing the remaining requests. 1118 */ 1119 while (!(mi->mi_flags & MI4_ASYNC_MGR_STOP) || 1120 mi->mi_async_req_count > 0) { 1121 mutex_exit(&mi->mi_lock); 1122 CALLB_CPR_SAFE_BEGIN(&cprinfo); 1123 cv_wait(&mi->mi_async_reqs_cv, &mi->mi_async_lock); 1124 CALLB_CPR_SAFE_END(&cprinfo, &mi->mi_async_lock); 1125 while (mi->mi_async_req_count > 0) { 1126 /* 1127 * Paranoia: If the mount started out having 1128 * (mi->mi_max_threads == 0), and the value was 1129 * later changed (via a debugger or somesuch), 1130 * we could be confused since we will think we 1131 * can't create any threads, and the calling 1132 * code (which looks at the current value of 1133 * mi->mi_max_threads, now non-zero) thinks we 1134 * can. 1135 * 1136 * So, because we're paranoid, we create threads 1137 * up to the maximum of the original and the 1138 * current value. This means that future 1139 * (debugger-induced) alterations of 1140 * mi->mi_max_threads are ignored for our 1141 * purposes, but who told them they could change 1142 * random values on a live kernel anyhow? 1143 */ 1144 if (mi->mi_threads < 1145 MAX(mi->mi_max_threads, max_threads)) { 1146 mi->mi_threads++; 1147 mutex_exit(&mi->mi_async_lock); 1148 MI4_HOLD(mi); 1149 VFS_HOLD(vfsp); /* hold for new thread */ 1150 (void) zthread_create(NULL, 0, nfs4_async_start, 1151 vfsp, 0, minclsyspri); 1152 mutex_enter(&mi->mi_async_lock); 1153 } 1154 cv_signal(&mi->mi_async_work_cv); 1155 ASSERT(mi->mi_async_req_count != 0); 1156 mi->mi_async_req_count--; 1157 } 1158 mutex_enter(&mi->mi_lock); 1159 } 1160 mutex_exit(&mi->mi_lock); 1161 1162 NFS4_DEBUG(nfs4_client_zone_debug, (CE_NOTE, 1163 "nfs4_async_manager exiting for vfs %p\n", (void *)mi->mi_vfsp)); 1164 /* 1165 * Let everyone know we're done. 1166 */ 1167 mi->mi_manager_thread = NULL; 1168 /* 1169 * Wake up the inactive thread. 1170 */ 1171 cv_broadcast(&mi->mi_inact_req_cv); 1172 /* 1173 * Wake up anyone sitting in nfs4_async_manager_stop() 1174 */ 1175 cv_broadcast(&mi->mi_async_cv); 1176 /* 1177 * There is no explicit call to mutex_exit(&mi->mi_async_lock) 1178 * since CALLB_CPR_EXIT is actually responsible for releasing 1179 * 'mi_async_lock'. 1180 */ 1181 CALLB_CPR_EXIT(&cprinfo); 1182 VFS_RELE(vfsp); /* release thread's hold */ 1183 MI4_RELE(mi); 1184 zthread_exit(); 1185 } 1186 1187 /* 1188 * Signal (and wait for) the async manager thread to clean up and go away. 1189 */ 1190 void 1191 nfs4_async_manager_stop(vfs_t *vfsp) 1192 { 1193 mntinfo4_t *mi = VFTOMI4(vfsp); 1194 1195 mutex_enter(&mi->mi_async_lock); 1196 mutex_enter(&mi->mi_lock); 1197 mi->mi_flags |= MI4_ASYNC_MGR_STOP; 1198 mutex_exit(&mi->mi_lock); 1199 cv_broadcast(&mi->mi_async_reqs_cv); 1200 /* 1201 * Wait for the async manager thread to die. 1202 */ 1203 while (mi->mi_manager_thread != NULL) 1204 cv_wait(&mi->mi_async_cv, &mi->mi_async_lock); 1205 mutex_exit(&mi->mi_async_lock); 1206 } 1207 1208 int 1209 nfs4_async_readahead(vnode_t *vp, u_offset_t blkoff, caddr_t addr, 1210 struct seg *seg, cred_t *cr, void (*readahead)(vnode_t *, 1211 u_offset_t, caddr_t, struct seg *, cred_t *)) 1212 { 1213 rnode4_t *rp; 1214 mntinfo4_t *mi; 1215 struct nfs4_async_reqs *args; 1216 1217 rp = VTOR4(vp); 1218 ASSERT(rp->r_freef == NULL); 1219 1220 mi = VTOMI4(vp); 1221 1222 /* 1223 * If addr falls in a different segment, don't bother doing readahead. 1224 */ 1225 if (addr >= seg->s_base + seg->s_size) 1226 return (-1); 1227 1228 /* 1229 * If we can't allocate a request structure, punt on the readahead. 1230 */ 1231 if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL) 1232 return (-1); 1233 1234 /* 1235 * If a lock operation is pending, don't initiate any new 1236 * readaheads. Otherwise, bump r_count to indicate the new 1237 * asynchronous I/O. 1238 */ 1239 if (!nfs_rw_tryenter(&rp->r_lkserlock, RW_READER)) { 1240 kmem_free(args, sizeof (*args)); 1241 return (-1); 1242 } 1243 mutex_enter(&rp->r_statelock); 1244 rp->r_count++; 1245 mutex_exit(&rp->r_statelock); 1246 nfs_rw_exit(&rp->r_lkserlock); 1247 1248 args->a_next = NULL; 1249 #ifdef DEBUG 1250 args->a_queuer = curthread; 1251 #endif 1252 VN_HOLD(vp); 1253 args->a_vp = vp; 1254 ASSERT(cr != NULL); 1255 crhold(cr); 1256 args->a_cred = cr; 1257 args->a_io = NFS4_READ_AHEAD; 1258 args->a_nfs4_readahead = readahead; 1259 args->a_nfs4_blkoff = blkoff; 1260 args->a_nfs4_seg = seg; 1261 args->a_nfs4_addr = addr; 1262 1263 mutex_enter(&mi->mi_async_lock); 1264 1265 /* 1266 * If asyncio has been disabled, don't bother readahead. 1267 */ 1268 if (mi->mi_max_threads == 0) { 1269 mutex_exit(&mi->mi_async_lock); 1270 goto noasync; 1271 } 1272 1273 /* 1274 * Link request structure into the async list and 1275 * wakeup async thread to do the i/o. 1276 */ 1277 if (mi->mi_async_reqs[NFS4_READ_AHEAD] == NULL) { 1278 mi->mi_async_reqs[NFS4_READ_AHEAD] = args; 1279 mi->mi_async_tail[NFS4_READ_AHEAD] = args; 1280 } else { 1281 mi->mi_async_tail[NFS4_READ_AHEAD]->a_next = args; 1282 mi->mi_async_tail[NFS4_READ_AHEAD] = args; 1283 } 1284 1285 if (mi->mi_io_kstats) { 1286 mutex_enter(&mi->mi_lock); 1287 kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats)); 1288 mutex_exit(&mi->mi_lock); 1289 } 1290 1291 mi->mi_async_req_count++; 1292 ASSERT(mi->mi_async_req_count != 0); 1293 cv_signal(&mi->mi_async_reqs_cv); 1294 mutex_exit(&mi->mi_async_lock); 1295 return (0); 1296 1297 noasync: 1298 mutex_enter(&rp->r_statelock); 1299 rp->r_count--; 1300 cv_broadcast(&rp->r_cv); 1301 mutex_exit(&rp->r_statelock); 1302 VN_RELE(vp); 1303 crfree(cr); 1304 kmem_free(args, sizeof (*args)); 1305 return (-1); 1306 } 1307 1308 /* 1309 * The async queues for each mounted file system are arranged as a 1310 * set of queues, one for each async i/o type. Requests are taken 1311 * from the queues in a round-robin fashion. A number of consecutive 1312 * requests are taken from each queue before moving on to the next 1313 * queue. This functionality may allow the NFS Version 2 server to do 1314 * write clustering, even if the client is mixing writes and reads 1315 * because it will take multiple write requests from the queue 1316 * before processing any of the other async i/o types. 1317 * 1318 * XXX The nfs4_async_start thread is unsafe in the light of the present 1319 * model defined by cpr to suspend the system. Specifically over the 1320 * wire calls are cpr-unsafe. The thread should be reevaluated in 1321 * case of future updates to the cpr model. 1322 */ 1323 static void 1324 nfs4_async_start(struct vfs *vfsp) 1325 { 1326 struct nfs4_async_reqs *args; 1327 mntinfo4_t *mi = VFTOMI4(vfsp); 1328 clock_t time_left = 1; 1329 callb_cpr_t cprinfo; 1330 int i; 1331 extern int nfs_async_timeout; 1332 1333 /* 1334 * Dynamic initialization of nfs_async_timeout to allow nfs to be 1335 * built in an implementation independent manner. 1336 */ 1337 if (nfs_async_timeout == -1) 1338 nfs_async_timeout = NFS_ASYNC_TIMEOUT; 1339 1340 CALLB_CPR_INIT(&cprinfo, &mi->mi_async_lock, callb_generic_cpr, "nas"); 1341 1342 mutex_enter(&mi->mi_async_lock); 1343 for (;;) { 1344 /* 1345 * Find the next queue containing an entry. We start 1346 * at the current queue pointer and then round robin 1347 * through all of them until we either find a non-empty 1348 * queue or have looked through all of them. 1349 */ 1350 for (i = 0; i < NFS4_ASYNC_TYPES; i++) { 1351 args = *mi->mi_async_curr; 1352 if (args != NULL) 1353 break; 1354 mi->mi_async_curr++; 1355 if (mi->mi_async_curr == 1356 &mi->mi_async_reqs[NFS4_ASYNC_TYPES]) 1357 mi->mi_async_curr = &mi->mi_async_reqs[0]; 1358 } 1359 /* 1360 * If we didn't find a entry, then block until woken up 1361 * again and then look through the queues again. 1362 */ 1363 if (args == NULL) { 1364 /* 1365 * Exiting is considered to be safe for CPR as well 1366 */ 1367 CALLB_CPR_SAFE_BEGIN(&cprinfo); 1368 1369 /* 1370 * Wakeup thread waiting to unmount the file 1371 * system only if all async threads are inactive. 1372 * 1373 * If we've timed-out and there's nothing to do, 1374 * then get rid of this thread. 1375 */ 1376 if (mi->mi_max_threads == 0 || time_left <= 0) { 1377 if (--mi->mi_threads == 0) 1378 cv_signal(&mi->mi_async_cv); 1379 CALLB_CPR_EXIT(&cprinfo); 1380 VFS_RELE(vfsp); /* release thread's hold */ 1381 MI4_RELE(mi); 1382 zthread_exit(); 1383 /* NOTREACHED */ 1384 } 1385 time_left = cv_timedwait(&mi->mi_async_work_cv, 1386 &mi->mi_async_lock, nfs_async_timeout + lbolt); 1387 1388 CALLB_CPR_SAFE_END(&cprinfo, &mi->mi_async_lock); 1389 1390 continue; 1391 } else { 1392 time_left = 1; 1393 } 1394 1395 /* 1396 * Remove the request from the async queue and then 1397 * update the current async request queue pointer. If 1398 * the current queue is empty or we have removed enough 1399 * consecutive entries from it, then reset the counter 1400 * for this queue and then move the current pointer to 1401 * the next queue. 1402 */ 1403 *mi->mi_async_curr = args->a_next; 1404 if (*mi->mi_async_curr == NULL || 1405 --mi->mi_async_clusters[args->a_io] == 0) { 1406 mi->mi_async_clusters[args->a_io] = 1407 mi->mi_async_init_clusters; 1408 mi->mi_async_curr++; 1409 if (mi->mi_async_curr == 1410 &mi->mi_async_reqs[NFS4_ASYNC_TYPES]) 1411 mi->mi_async_curr = &mi->mi_async_reqs[0]; 1412 } 1413 1414 if (args->a_io != NFS4_INACTIVE && mi->mi_io_kstats) { 1415 mutex_enter(&mi->mi_lock); 1416 kstat_waitq_exit(KSTAT_IO_PTR(mi->mi_io_kstats)); 1417 mutex_exit(&mi->mi_lock); 1418 } 1419 1420 mutex_exit(&mi->mi_async_lock); 1421 1422 /* 1423 * Obtain arguments from the async request structure. 1424 */ 1425 if (args->a_io == NFS4_READ_AHEAD && mi->mi_max_threads > 0) { 1426 (*args->a_nfs4_readahead)(args->a_vp, 1427 args->a_nfs4_blkoff, 1428 args->a_nfs4_addr, args->a_nfs4_seg, 1429 args->a_cred); 1430 } else if (args->a_io == NFS4_PUTAPAGE) { 1431 (void) (*args->a_nfs4_putapage)(args->a_vp, 1432 args->a_nfs4_pp, args->a_nfs4_off, 1433 args->a_nfs4_len, args->a_nfs4_flags, 1434 args->a_cred); 1435 } else if (args->a_io == NFS4_PAGEIO) { 1436 (void) (*args->a_nfs4_pageio)(args->a_vp, 1437 args->a_nfs4_pp, args->a_nfs4_off, 1438 args->a_nfs4_len, args->a_nfs4_flags, 1439 args->a_cred); 1440 } else if (args->a_io == NFS4_READDIR) { 1441 (void) ((*args->a_nfs4_readdir)(args->a_vp, 1442 args->a_nfs4_rdc, args->a_cred)); 1443 } else if (args->a_io == NFS4_COMMIT) { 1444 (*args->a_nfs4_commit)(args->a_vp, args->a_nfs4_plist, 1445 args->a_nfs4_offset, args->a_nfs4_count, 1446 args->a_cred); 1447 } else if (args->a_io == NFS4_INACTIVE) { 1448 nfs4_inactive_otw(args->a_vp, args->a_cred); 1449 } 1450 1451 /* 1452 * Now, release the vnode and free the credentials 1453 * structure. 1454 */ 1455 free_async_args4(args); 1456 /* 1457 * Reacquire the mutex because it will be needed above. 1458 */ 1459 mutex_enter(&mi->mi_async_lock); 1460 } 1461 } 1462 1463 /* 1464 * nfs4_inactive_thread - look for vnodes that need over-the-wire calls as 1465 * part of VOP_INACTIVE. 1466 */ 1467 1468 void 1469 nfs4_inactive_thread(mntinfo4_t *mi) 1470 { 1471 struct nfs4_async_reqs *args; 1472 callb_cpr_t cprinfo; 1473 vfs_t *vfsp = mi->mi_vfsp; 1474 1475 CALLB_CPR_INIT(&cprinfo, &mi->mi_async_lock, callb_generic_cpr, 1476 "nfs4_inactive_thread"); 1477 1478 for (;;) { 1479 mutex_enter(&mi->mi_async_lock); 1480 args = mi->mi_async_reqs[NFS4_INACTIVE]; 1481 if (args == NULL) { 1482 mutex_enter(&mi->mi_lock); 1483 /* 1484 * We don't want to exit until the async manager is done 1485 * with its work; hence the check for mi_manager_thread 1486 * being NULL. 1487 * 1488 * The async manager thread will cv_broadcast() on 1489 * mi_inact_req_cv when it's done, at which point we'll 1490 * wake up and exit. 1491 */ 1492 if (mi->mi_manager_thread == NULL) 1493 goto die; 1494 mi->mi_flags |= MI4_INACTIVE_IDLE; 1495 mutex_exit(&mi->mi_lock); 1496 cv_signal(&mi->mi_async_cv); 1497 CALLB_CPR_SAFE_BEGIN(&cprinfo); 1498 cv_wait(&mi->mi_inact_req_cv, &mi->mi_async_lock); 1499 CALLB_CPR_SAFE_END(&cprinfo, &mi->mi_async_lock); 1500 mutex_exit(&mi->mi_async_lock); 1501 } else { 1502 mutex_enter(&mi->mi_lock); 1503 mi->mi_flags &= ~MI4_INACTIVE_IDLE; 1504 mutex_exit(&mi->mi_lock); 1505 mi->mi_async_reqs[NFS4_INACTIVE] = args->a_next; 1506 mutex_exit(&mi->mi_async_lock); 1507 nfs4_inactive_otw(args->a_vp, args->a_cred); 1508 crfree(args->a_cred); 1509 kmem_free(args, sizeof (*args)); 1510 } 1511 } 1512 die: 1513 mutex_exit(&mi->mi_lock); 1514 mi->mi_inactive_thread = NULL; 1515 cv_signal(&mi->mi_async_cv); 1516 1517 /* 1518 * There is no explicit call to mutex_exit(&mi->mi_async_lock) since 1519 * CALLB_CPR_EXIT is actually responsible for releasing 'mi_async_lock'. 1520 */ 1521 CALLB_CPR_EXIT(&cprinfo); 1522 1523 NFS4_DEBUG(nfs4_client_zone_debug, (CE_NOTE, 1524 "nfs4_inactive_thread exiting for vfs %p\n", (void *)vfsp)); 1525 1526 MI4_RELE(mi); 1527 zthread_exit(); 1528 /* NOTREACHED */ 1529 } 1530 1531 /* 1532 * nfs_async_stop: 1533 * Wait for all outstanding putpage operations and the inactive thread to 1534 * complete; nfs4_async_stop_sig() without interruptibility. 1535 */ 1536 void 1537 nfs4_async_stop(struct vfs *vfsp) 1538 { 1539 mntinfo4_t *mi = VFTOMI4(vfsp); 1540 1541 /* 1542 * Wait for all outstanding async operations to complete and for 1543 * worker threads to exit. 1544 */ 1545 mutex_enter(&mi->mi_async_lock); 1546 mi->mi_max_threads = 0; 1547 cv_broadcast(&mi->mi_async_work_cv); 1548 while (mi->mi_threads != 0) 1549 cv_wait(&mi->mi_async_cv, &mi->mi_async_lock); 1550 1551 /* 1552 * Wait for the inactive thread to finish doing what it's doing. It 1553 * won't exit until the last reference to the vfs_t goes away. 1554 */ 1555 if (mi->mi_inactive_thread != NULL) { 1556 mutex_enter(&mi->mi_lock); 1557 while (!(mi->mi_flags & MI4_INACTIVE_IDLE) || 1558 (mi->mi_async_reqs[NFS4_INACTIVE] != NULL)) { 1559 mutex_exit(&mi->mi_lock); 1560 cv_wait(&mi->mi_async_cv, &mi->mi_async_lock); 1561 mutex_enter(&mi->mi_lock); 1562 } 1563 mutex_exit(&mi->mi_lock); 1564 } 1565 mutex_exit(&mi->mi_async_lock); 1566 } 1567 1568 /* 1569 * nfs_async_stop_sig: 1570 * Wait for all outstanding putpage operations and the inactive thread to 1571 * complete. If a signal is delivered we will abort and return non-zero; 1572 * otherwise return 0. Since this routine is called from nfs4_unmount, we 1573 * need to make it interruptable. 1574 */ 1575 int 1576 nfs4_async_stop_sig(struct vfs *vfsp) 1577 { 1578 mntinfo4_t *mi = VFTOMI4(vfsp); 1579 ushort_t omax; 1580 bool_t intr = FALSE; 1581 1582 /* 1583 * Wait for all outstanding putpage operations to complete and for 1584 * worker threads to exit. 1585 */ 1586 mutex_enter(&mi->mi_async_lock); 1587 omax = mi->mi_max_threads; 1588 mi->mi_max_threads = 0; 1589 cv_broadcast(&mi->mi_async_work_cv); 1590 while (mi->mi_threads != 0) { 1591 if (!cv_wait_sig(&mi->mi_async_cv, &mi->mi_async_lock)) { 1592 intr = TRUE; 1593 goto interrupted; 1594 } 1595 } 1596 1597 /* 1598 * Wait for the inactive thread to finish doing what it's doing. It 1599 * won't exit until the a last reference to the vfs_t goes away. 1600 */ 1601 if (mi->mi_inactive_thread != NULL) { 1602 mutex_enter(&mi->mi_lock); 1603 while (!(mi->mi_flags & MI4_INACTIVE_IDLE) || 1604 (mi->mi_async_reqs[NFS4_INACTIVE] != NULL)) { 1605 mutex_exit(&mi->mi_lock); 1606 if (!cv_wait_sig(&mi->mi_async_cv, 1607 &mi->mi_async_lock)) { 1608 intr = TRUE; 1609 goto interrupted; 1610 } 1611 mutex_enter(&mi->mi_lock); 1612 } 1613 mutex_exit(&mi->mi_lock); 1614 } 1615 interrupted: 1616 if (intr) 1617 mi->mi_max_threads = omax; 1618 mutex_exit(&mi->mi_async_lock); 1619 1620 return (intr); 1621 } 1622 1623 int 1624 nfs4_async_putapage(vnode_t *vp, page_t *pp, u_offset_t off, size_t len, 1625 int flags, cred_t *cr, int (*putapage)(vnode_t *, page_t *, 1626 u_offset_t, size_t, int, cred_t *)) 1627 { 1628 rnode4_t *rp; 1629 mntinfo4_t *mi; 1630 struct nfs4_async_reqs *args; 1631 1632 ASSERT(flags & B_ASYNC); 1633 ASSERT(vp->v_vfsp != NULL); 1634 1635 rp = VTOR4(vp); 1636 ASSERT(rp->r_count > 0); 1637 1638 mi = VTOMI4(vp); 1639 1640 /* 1641 * If we can't allocate a request structure, do the putpage 1642 * operation synchronously in this thread's context. 1643 */ 1644 if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL) 1645 goto noasync; 1646 1647 args->a_next = NULL; 1648 #ifdef DEBUG 1649 args->a_queuer = curthread; 1650 #endif 1651 VN_HOLD(vp); 1652 args->a_vp = vp; 1653 ASSERT(cr != NULL); 1654 crhold(cr); 1655 args->a_cred = cr; 1656 args->a_io = NFS4_PUTAPAGE; 1657 args->a_nfs4_putapage = putapage; 1658 args->a_nfs4_pp = pp; 1659 args->a_nfs4_off = off; 1660 args->a_nfs4_len = (uint_t)len; 1661 args->a_nfs4_flags = flags; 1662 1663 mutex_enter(&mi->mi_async_lock); 1664 1665 /* 1666 * If asyncio has been disabled, then make a synchronous request. 1667 * This check is done a second time in case async io was diabled 1668 * while this thread was blocked waiting for memory pressure to 1669 * reduce or for the queue to drain. 1670 */ 1671 if (mi->mi_max_threads == 0) { 1672 mutex_exit(&mi->mi_async_lock); 1673 1674 VN_RELE(vp); 1675 crfree(cr); 1676 kmem_free(args, sizeof (*args)); 1677 goto noasync; 1678 } 1679 1680 /* 1681 * Link request structure into the async list and 1682 * wakeup async thread to do the i/o. 1683 */ 1684 if (mi->mi_async_reqs[NFS4_PUTAPAGE] == NULL) { 1685 mi->mi_async_reqs[NFS4_PUTAPAGE] = args; 1686 mi->mi_async_tail[NFS4_PUTAPAGE] = args; 1687 } else { 1688 mi->mi_async_tail[NFS4_PUTAPAGE]->a_next = args; 1689 mi->mi_async_tail[NFS4_PUTAPAGE] = args; 1690 } 1691 1692 mutex_enter(&rp->r_statelock); 1693 rp->r_count++; 1694 rp->r_awcount++; 1695 mutex_exit(&rp->r_statelock); 1696 1697 if (mi->mi_io_kstats) { 1698 mutex_enter(&mi->mi_lock); 1699 kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats)); 1700 mutex_exit(&mi->mi_lock); 1701 } 1702 1703 mi->mi_async_req_count++; 1704 ASSERT(mi->mi_async_req_count != 0); 1705 cv_signal(&mi->mi_async_reqs_cv); 1706 mutex_exit(&mi->mi_async_lock); 1707 return (0); 1708 1709 noasync: 1710 1711 if (curproc == proc_pageout || curproc == proc_fsflush || 1712 nfs_zone() == mi->mi_zone) { 1713 /* 1714 * If we get here in the context of the pageout/fsflush, 1715 * or we have run out of memory or we're attempting to 1716 * unmount we refuse to do a sync write, because this may 1717 * hang pageout/fsflush and the machine. In this case, 1718 * we just re-mark the page as dirty and punt on the page. 1719 * 1720 * Make sure B_FORCE isn't set. We can re-mark the 1721 * pages as dirty and unlock the pages in one swoop by 1722 * passing in B_ERROR to pvn_write_done(). However, 1723 * we should make sure B_FORCE isn't set - we don't 1724 * want the page tossed before it gets written out. 1725 */ 1726 if (flags & B_FORCE) 1727 flags &= ~(B_INVAL | B_FORCE); 1728 pvn_write_done(pp, flags | B_ERROR); 1729 return (0); 1730 } 1731 1732 /* 1733 * We'll get here only if (nfs_zone() != mi->mi_zone) 1734 * which means that this was a cross-zone sync putpage. 1735 * 1736 * We pass in B_ERROR to pvn_write_done() to re-mark the pages 1737 * as dirty and unlock them. 1738 * 1739 * We don't want to clear B_FORCE here as the caller presumably 1740 * knows what they're doing if they set it. 1741 */ 1742 pvn_write_done(pp, flags | B_ERROR); 1743 return (EPERM); 1744 } 1745 1746 int 1747 nfs4_async_pageio(vnode_t *vp, page_t *pp, u_offset_t io_off, size_t io_len, 1748 int flags, cred_t *cr, int (*pageio)(vnode_t *, page_t *, u_offset_t, 1749 size_t, int, cred_t *)) 1750 { 1751 rnode4_t *rp; 1752 mntinfo4_t *mi; 1753 struct nfs4_async_reqs *args; 1754 1755 ASSERT(flags & B_ASYNC); 1756 ASSERT(vp->v_vfsp != NULL); 1757 1758 rp = VTOR4(vp); 1759 ASSERT(rp->r_count > 0); 1760 1761 mi = VTOMI4(vp); 1762 1763 /* 1764 * If we can't allocate a request structure, do the pageio 1765 * request synchronously in this thread's context. 1766 */ 1767 if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL) 1768 goto noasync; 1769 1770 args->a_next = NULL; 1771 #ifdef DEBUG 1772 args->a_queuer = curthread; 1773 #endif 1774 VN_HOLD(vp); 1775 args->a_vp = vp; 1776 ASSERT(cr != NULL); 1777 crhold(cr); 1778 args->a_cred = cr; 1779 args->a_io = NFS4_PAGEIO; 1780 args->a_nfs4_pageio = pageio; 1781 args->a_nfs4_pp = pp; 1782 args->a_nfs4_off = io_off; 1783 args->a_nfs4_len = (uint_t)io_len; 1784 args->a_nfs4_flags = flags; 1785 1786 mutex_enter(&mi->mi_async_lock); 1787 1788 /* 1789 * If asyncio has been disabled, then make a synchronous request. 1790 * This check is done a second time in case async io was diabled 1791 * while this thread was blocked waiting for memory pressure to 1792 * reduce or for the queue to drain. 1793 */ 1794 if (mi->mi_max_threads == 0) { 1795 mutex_exit(&mi->mi_async_lock); 1796 1797 VN_RELE(vp); 1798 crfree(cr); 1799 kmem_free(args, sizeof (*args)); 1800 goto noasync; 1801 } 1802 1803 /* 1804 * Link request structure into the async list and 1805 * wakeup async thread to do the i/o. 1806 */ 1807 if (mi->mi_async_reqs[NFS4_PAGEIO] == NULL) { 1808 mi->mi_async_reqs[NFS4_PAGEIO] = args; 1809 mi->mi_async_tail[NFS4_PAGEIO] = args; 1810 } else { 1811 mi->mi_async_tail[NFS4_PAGEIO]->a_next = args; 1812 mi->mi_async_tail[NFS4_PAGEIO] = args; 1813 } 1814 1815 mutex_enter(&rp->r_statelock); 1816 rp->r_count++; 1817 rp->r_awcount++; 1818 mutex_exit(&rp->r_statelock); 1819 1820 if (mi->mi_io_kstats) { 1821 mutex_enter(&mi->mi_lock); 1822 kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats)); 1823 mutex_exit(&mi->mi_lock); 1824 } 1825 1826 mi->mi_async_req_count++; 1827 ASSERT(mi->mi_async_req_count != 0); 1828 cv_signal(&mi->mi_async_reqs_cv); 1829 mutex_exit(&mi->mi_async_lock); 1830 return (0); 1831 1832 noasync: 1833 /* 1834 * If we can't do it ASYNC, for reads we do nothing (but cleanup 1835 * the page list), for writes we do it synchronously, except for 1836 * proc_pageout/proc_fsflush as described below. 1837 */ 1838 if (flags & B_READ) { 1839 pvn_read_done(pp, flags | B_ERROR); 1840 return (0); 1841 } 1842 1843 if (curproc == proc_pageout || curproc == proc_fsflush) { 1844 /* 1845 * If we get here in the context of the pageout/fsflush, 1846 * we refuse to do a sync write, because this may hang 1847 * pageout/fsflush (and the machine). In this case, we just 1848 * re-mark the page as dirty and punt on the page. 1849 * 1850 * Make sure B_FORCE isn't set. We can re-mark the 1851 * pages as dirty and unlock the pages in one swoop by 1852 * passing in B_ERROR to pvn_write_done(). However, 1853 * we should make sure B_FORCE isn't set - we don't 1854 * want the page tossed before it gets written out. 1855 */ 1856 if (flags & B_FORCE) 1857 flags &= ~(B_INVAL | B_FORCE); 1858 pvn_write_done(pp, flags | B_ERROR); 1859 return (0); 1860 } 1861 1862 if (nfs_zone() != mi->mi_zone) { 1863 /* 1864 * So this was a cross-zone sync pageio. We pass in B_ERROR 1865 * to pvn_write_done() to re-mark the pages as dirty and unlock 1866 * them. 1867 * 1868 * We don't want to clear B_FORCE here as the caller presumably 1869 * knows what they're doing if they set it. 1870 */ 1871 pvn_write_done(pp, flags | B_ERROR); 1872 return (EPERM); 1873 } 1874 return ((*pageio)(vp, pp, io_off, io_len, flags, cr)); 1875 } 1876 1877 void 1878 nfs4_async_readdir(vnode_t *vp, rddir4_cache *rdc, cred_t *cr, 1879 int (*readdir)(vnode_t *, rddir4_cache *, cred_t *)) 1880 { 1881 rnode4_t *rp; 1882 mntinfo4_t *mi; 1883 struct nfs4_async_reqs *args; 1884 1885 rp = VTOR4(vp); 1886 ASSERT(rp->r_freef == NULL); 1887 1888 mi = VTOMI4(vp); 1889 1890 /* 1891 * If we can't allocate a request structure, skip the readdir. 1892 */ 1893 if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL) 1894 goto noasync; 1895 1896 args->a_next = NULL; 1897 #ifdef DEBUG 1898 args->a_queuer = curthread; 1899 #endif 1900 VN_HOLD(vp); 1901 args->a_vp = vp; 1902 ASSERT(cr != NULL); 1903 crhold(cr); 1904 args->a_cred = cr; 1905 args->a_io = NFS4_READDIR; 1906 args->a_nfs4_readdir = readdir; 1907 args->a_nfs4_rdc = rdc; 1908 1909 mutex_enter(&mi->mi_async_lock); 1910 1911 /* 1912 * If asyncio has been disabled, then skip this request 1913 */ 1914 if (mi->mi_max_threads == 0) { 1915 mutex_exit(&mi->mi_async_lock); 1916 1917 VN_RELE(vp); 1918 crfree(cr); 1919 kmem_free(args, sizeof (*args)); 1920 goto noasync; 1921 } 1922 1923 /* 1924 * Link request structure into the async list and 1925 * wakeup async thread to do the i/o. 1926 */ 1927 if (mi->mi_async_reqs[NFS4_READDIR] == NULL) { 1928 mi->mi_async_reqs[NFS4_READDIR] = args; 1929 mi->mi_async_tail[NFS4_READDIR] = args; 1930 } else { 1931 mi->mi_async_tail[NFS4_READDIR]->a_next = args; 1932 mi->mi_async_tail[NFS4_READDIR] = args; 1933 } 1934 1935 mutex_enter(&rp->r_statelock); 1936 rp->r_count++; 1937 mutex_exit(&rp->r_statelock); 1938 1939 if (mi->mi_io_kstats) { 1940 mutex_enter(&mi->mi_lock); 1941 kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats)); 1942 mutex_exit(&mi->mi_lock); 1943 } 1944 1945 mi->mi_async_req_count++; 1946 ASSERT(mi->mi_async_req_count != 0); 1947 cv_signal(&mi->mi_async_reqs_cv); 1948 mutex_exit(&mi->mi_async_lock); 1949 return; 1950 1951 noasync: 1952 mutex_enter(&rp->r_statelock); 1953 rdc->entries = NULL; 1954 /* 1955 * Indicate that no one is trying to fill this entry and 1956 * it still needs to be filled. 1957 */ 1958 rdc->flags &= ~RDDIR; 1959 rdc->flags |= RDDIRREQ; 1960 rddir4_cache_rele(rp, rdc); 1961 mutex_exit(&rp->r_statelock); 1962 } 1963 1964 void 1965 nfs4_async_commit(vnode_t *vp, page_t *plist, offset3 offset, count3 count, 1966 cred_t *cr, void (*commit)(vnode_t *, page_t *, offset3, count3, 1967 cred_t *)) 1968 { 1969 rnode4_t *rp; 1970 mntinfo4_t *mi; 1971 struct nfs4_async_reqs *args; 1972 page_t *pp; 1973 1974 rp = VTOR4(vp); 1975 mi = VTOMI4(vp); 1976 1977 /* 1978 * If we can't allocate a request structure, do the commit 1979 * operation synchronously in this thread's context. 1980 */ 1981 if ((args = kmem_alloc(sizeof (*args), KM_NOSLEEP)) == NULL) 1982 goto noasync; 1983 1984 args->a_next = NULL; 1985 #ifdef DEBUG 1986 args->a_queuer = curthread; 1987 #endif 1988 VN_HOLD(vp); 1989 args->a_vp = vp; 1990 ASSERT(cr != NULL); 1991 crhold(cr); 1992 args->a_cred = cr; 1993 args->a_io = NFS4_COMMIT; 1994 args->a_nfs4_commit = commit; 1995 args->a_nfs4_plist = plist; 1996 args->a_nfs4_offset = offset; 1997 args->a_nfs4_count = count; 1998 1999 mutex_enter(&mi->mi_async_lock); 2000 2001 /* 2002 * If asyncio has been disabled, then make a synchronous request. 2003 * This check is done a second time in case async io was diabled 2004 * while this thread was blocked waiting for memory pressure to 2005 * reduce or for the queue to drain. 2006 */ 2007 if (mi->mi_max_threads == 0) { 2008 mutex_exit(&mi->mi_async_lock); 2009 2010 VN_RELE(vp); 2011 crfree(cr); 2012 kmem_free(args, sizeof (*args)); 2013 goto noasync; 2014 } 2015 2016 /* 2017 * Link request structure into the async list and 2018 * wakeup async thread to do the i/o. 2019 */ 2020 if (mi->mi_async_reqs[NFS4_COMMIT] == NULL) { 2021 mi->mi_async_reqs[NFS4_COMMIT] = args; 2022 mi->mi_async_tail[NFS4_COMMIT] = args; 2023 } else { 2024 mi->mi_async_tail[NFS4_COMMIT]->a_next = args; 2025 mi->mi_async_tail[NFS4_COMMIT] = args; 2026 } 2027 2028 mutex_enter(&rp->r_statelock); 2029 rp->r_count++; 2030 mutex_exit(&rp->r_statelock); 2031 2032 if (mi->mi_io_kstats) { 2033 mutex_enter(&mi->mi_lock); 2034 kstat_waitq_enter(KSTAT_IO_PTR(mi->mi_io_kstats)); 2035 mutex_exit(&mi->mi_lock); 2036 } 2037 2038 mi->mi_async_req_count++; 2039 ASSERT(mi->mi_async_req_count != 0); 2040 cv_signal(&mi->mi_async_reqs_cv); 2041 mutex_exit(&mi->mi_async_lock); 2042 return; 2043 2044 noasync: 2045 if (curproc == proc_pageout || curproc == proc_fsflush || 2046 nfs_zone() != mi->mi_zone) { 2047 while (plist != NULL) { 2048 pp = plist; 2049 page_sub(&plist, pp); 2050 pp->p_fsdata = C_COMMIT; 2051 page_unlock(pp); 2052 } 2053 return; 2054 } 2055 (*commit)(vp, plist, offset, count, cr); 2056 } 2057 2058 /* 2059 * nfs4_async_inactive - hand off a VOP_INACTIVE call to a thread. The 2060 * reference to the vnode is handed over to the thread; the caller should 2061 * no longer refer to the vnode. 2062 * 2063 * Unlike most of the async routines, this handoff is needed for 2064 * correctness reasons, not just performance. So doing operations in the 2065 * context of the current thread is not an option. 2066 */ 2067 void 2068 nfs4_async_inactive(vnode_t *vp, cred_t *cr) 2069 { 2070 mntinfo4_t *mi; 2071 struct nfs4_async_reqs *args; 2072 boolean_t signal_inactive_thread = B_FALSE; 2073 2074 mi = VTOMI4(vp); 2075 2076 args = kmem_alloc(sizeof (*args), KM_SLEEP); 2077 args->a_next = NULL; 2078 #ifdef DEBUG 2079 args->a_queuer = curthread; 2080 #endif 2081 args->a_vp = vp; 2082 ASSERT(cr != NULL); 2083 crhold(cr); 2084 args->a_cred = cr; 2085 args->a_io = NFS4_INACTIVE; 2086 2087 /* 2088 * Note that we don't check mi->mi_max_threads here, since we 2089 * *need* to get rid of this vnode regardless of whether someone 2090 * set nfs4_max_threads to zero in /etc/system. 2091 * 2092 * The manager thread knows about this and is willing to create 2093 * at least one thread to accomodate us. 2094 */ 2095 mutex_enter(&mi->mi_async_lock); 2096 if (mi->mi_inactive_thread == NULL) { 2097 rnode4_t *rp; 2098 vnode_t *unldvp = NULL; 2099 char *unlname; 2100 cred_t *unlcred; 2101 2102 mutex_exit(&mi->mi_async_lock); 2103 /* 2104 * We just need to free up the memory associated with the 2105 * vnode, which can be safely done from within the current 2106 * context. 2107 */ 2108 crfree(cr); /* drop our reference */ 2109 kmem_free(args, sizeof (*args)); 2110 rp = VTOR4(vp); 2111 mutex_enter(&rp->r_statelock); 2112 if (rp->r_unldvp != NULL) { 2113 unldvp = rp->r_unldvp; 2114 rp->r_unldvp = NULL; 2115 unlname = rp->r_unlname; 2116 rp->r_unlname = NULL; 2117 unlcred = rp->r_unlcred; 2118 rp->r_unlcred = NULL; 2119 } 2120 mutex_exit(&rp->r_statelock); 2121 /* 2122 * No need to explicitly throw away any cached pages. The 2123 * eventual r4inactive() will attempt a synchronous 2124 * VOP_PUTPAGE() which will immediately fail since the request 2125 * is coming from the wrong zone, and then will proceed to call 2126 * nfs4_invalidate_pages() which will clean things up for us. 2127 * 2128 * Throw away the delegation here so rp4_addfree()'s attempt to 2129 * return any existing delegations becomes a no-op. 2130 */ 2131 if (rp->r_deleg_type != OPEN_DELEGATE_NONE) { 2132 (void) nfs_rw_enter_sig(&mi->mi_recovlock, RW_READER, 2133 FALSE); 2134 (void) nfs4delegreturn(rp, NFS4_DR_DISCARD); 2135 nfs_rw_exit(&mi->mi_recovlock); 2136 } 2137 nfs4_clear_open_streams(rp); 2138 2139 rp4_addfree(rp, cr); 2140 if (unldvp != NULL) { 2141 kmem_free(unlname, MAXNAMELEN); 2142 VN_RELE(unldvp); 2143 crfree(unlcred); 2144 } 2145 return; 2146 } 2147 2148 if (mi->mi_manager_thread == NULL) { 2149 /* 2150 * We want to talk to the inactive thread. 2151 */ 2152 signal_inactive_thread = B_TRUE; 2153 } 2154 2155 /* 2156 * Enqueue the vnode and wake up either the special thread (empty 2157 * list) or an async thread. 2158 */ 2159 if (mi->mi_async_reqs[NFS4_INACTIVE] == NULL) { 2160 mi->mi_async_reqs[NFS4_INACTIVE] = args; 2161 mi->mi_async_tail[NFS4_INACTIVE] = args; 2162 signal_inactive_thread = B_TRUE; 2163 } else { 2164 mi->mi_async_tail[NFS4_INACTIVE]->a_next = args; 2165 mi->mi_async_tail[NFS4_INACTIVE] = args; 2166 } 2167 if (signal_inactive_thread) { 2168 cv_signal(&mi->mi_inact_req_cv); 2169 } else { 2170 mi->mi_async_req_count++; 2171 ASSERT(mi->mi_async_req_count != 0); 2172 cv_signal(&mi->mi_async_reqs_cv); 2173 } 2174 2175 mutex_exit(&mi->mi_async_lock); 2176 } 2177 2178 int 2179 writerp4(rnode4_t *rp, caddr_t base, int tcount, struct uio *uio, int pgcreated) 2180 { 2181 int pagecreate; 2182 int n; 2183 int saved_n; 2184 caddr_t saved_base; 2185 u_offset_t offset; 2186 int error; 2187 int sm_error; 2188 vnode_t *vp = RTOV(rp); 2189 2190 ASSERT(tcount <= MAXBSIZE && tcount <= uio->uio_resid); 2191 ASSERT(nfs_rw_lock_held(&rp->r_rwlock, RW_WRITER)); 2192 if (!vpm_enable) { 2193 ASSERT(((uintptr_t)base & MAXBOFFSET) + tcount <= MAXBSIZE); 2194 } 2195 2196 /* 2197 * Move bytes in at most PAGESIZE chunks. We must avoid 2198 * spanning pages in uiomove() because page faults may cause 2199 * the cache to be invalidated out from under us. The r_size is not 2200 * updated until after the uiomove. If we push the last page of a 2201 * file before r_size is correct, we will lose the data written past 2202 * the current (and invalid) r_size. 2203 */ 2204 do { 2205 offset = uio->uio_loffset; 2206 pagecreate = 0; 2207 2208 /* 2209 * n is the number of bytes required to satisfy the request 2210 * or the number of bytes to fill out the page. 2211 */ 2212 n = (int)MIN((PAGESIZE - (offset & PAGEOFFSET)), tcount); 2213 2214 /* 2215 * Check to see if we can skip reading in the page 2216 * and just allocate the memory. We can do this 2217 * if we are going to rewrite the entire mapping 2218 * or if we are going to write to or beyond the current 2219 * end of file from the beginning of the mapping. 2220 * 2221 * The read of r_size is now protected by r_statelock. 2222 */ 2223 mutex_enter(&rp->r_statelock); 2224 /* 2225 * When pgcreated is nonzero the caller has already done 2226 * a segmap_getmapflt with forcefault 0 and S_WRITE. With 2227 * segkpm this means we already have at least one page 2228 * created and mapped at base. 2229 */ 2230 pagecreate = pgcreated || 2231 ((offset & PAGEOFFSET) == 0 && 2232 (n == PAGESIZE || ((offset + n) >= rp->r_size))); 2233 2234 mutex_exit(&rp->r_statelock); 2235 2236 if (!vpm_enable && pagecreate) { 2237 /* 2238 * The last argument tells segmap_pagecreate() to 2239 * always lock the page, as opposed to sometimes 2240 * returning with the page locked. This way we avoid a 2241 * fault on the ensuing uiomove(), but also 2242 * more importantly (to fix bug 1094402) we can 2243 * call segmap_fault() to unlock the page in all 2244 * cases. An alternative would be to modify 2245 * segmap_pagecreate() to tell us when it is 2246 * locking a page, but that's a fairly major 2247 * interface change. 2248 */ 2249 if (pgcreated == 0) 2250 (void) segmap_pagecreate(segkmap, base, 2251 (uint_t)n, 1); 2252 saved_base = base; 2253 saved_n = n; 2254 } 2255 2256 /* 2257 * The number of bytes of data in the last page can not 2258 * be accurately be determined while page is being 2259 * uiomove'd to and the size of the file being updated. 2260 * Thus, inform threads which need to know accurately 2261 * how much data is in the last page of the file. They 2262 * will not do the i/o immediately, but will arrange for 2263 * the i/o to happen later when this modify operation 2264 * will have finished. 2265 */ 2266 ASSERT(!(rp->r_flags & R4MODINPROGRESS)); 2267 mutex_enter(&rp->r_statelock); 2268 rp->r_flags |= R4MODINPROGRESS; 2269 rp->r_modaddr = (offset & MAXBMASK); 2270 mutex_exit(&rp->r_statelock); 2271 2272 if (vpm_enable) { 2273 /* 2274 * Copy data. If new pages are created, part of 2275 * the page that is not written will be initizliazed 2276 * with zeros. 2277 */ 2278 error = vpm_data_copy(vp, offset, n, uio, 2279 !pagecreate, NULL, 0, S_WRITE); 2280 } else { 2281 error = uiomove(base, n, UIO_WRITE, uio); 2282 } 2283 2284 /* 2285 * r_size is the maximum number of 2286 * bytes known to be in the file. 2287 * Make sure it is at least as high as the 2288 * first unwritten byte pointed to by uio_loffset. 2289 */ 2290 mutex_enter(&rp->r_statelock); 2291 if (rp->r_size < uio->uio_loffset) 2292 rp->r_size = uio->uio_loffset; 2293 rp->r_flags &= ~R4MODINPROGRESS; 2294 rp->r_flags |= R4DIRTY; 2295 mutex_exit(&rp->r_statelock); 2296 2297 /* n = # of bytes written */ 2298 n = (int)(uio->uio_loffset - offset); 2299 2300 if (!vpm_enable) { 2301 base += n; 2302 } 2303 2304 tcount -= n; 2305 /* 2306 * If we created pages w/o initializing them completely, 2307 * we need to zero the part that wasn't set up. 2308 * This happens on a most EOF write cases and if 2309 * we had some sort of error during the uiomove. 2310 */ 2311 if (!vpm_enable && pagecreate) { 2312 if ((uio->uio_loffset & PAGEOFFSET) || n == 0) 2313 (void) kzero(base, PAGESIZE - n); 2314 2315 if (pgcreated) { 2316 /* 2317 * Caller is responsible for this page, 2318 * it was not created in this loop. 2319 */ 2320 pgcreated = 0; 2321 } else { 2322 /* 2323 * For bug 1094402: segmap_pagecreate locks 2324 * page. Unlock it. This also unlocks the 2325 * pages allocated by page_create_va() in 2326 * segmap_pagecreate(). 2327 */ 2328 sm_error = segmap_fault(kas.a_hat, segkmap, 2329 saved_base, saved_n, 2330 F_SOFTUNLOCK, S_WRITE); 2331 if (error == 0) 2332 error = sm_error; 2333 } 2334 } 2335 } while (tcount > 0 && error == 0); 2336 2337 return (error); 2338 } 2339 2340 int 2341 nfs4_putpages(vnode_t *vp, u_offset_t off, size_t len, int flags, cred_t *cr) 2342 { 2343 rnode4_t *rp; 2344 page_t *pp; 2345 u_offset_t eoff; 2346 u_offset_t io_off; 2347 size_t io_len; 2348 int error; 2349 int rdirty; 2350 int err; 2351 2352 rp = VTOR4(vp); 2353 ASSERT(rp->r_count > 0); 2354 2355 if (!nfs4_has_pages(vp)) 2356 return (0); 2357 2358 ASSERT(vp->v_type != VCHR); 2359 2360 /* 2361 * If R4OUTOFSPACE is set, then all writes turn into B_INVAL 2362 * writes. B_FORCE is set to force the VM system to actually 2363 * invalidate the pages, even if the i/o failed. The pages 2364 * need to get invalidated because they can't be written out 2365 * because there isn't any space left on either the server's 2366 * file system or in the user's disk quota. The B_FREE bit 2367 * is cleared to avoid confusion as to whether this is a 2368 * request to place the page on the freelist or to destroy 2369 * it. 2370 */ 2371 if ((rp->r_flags & R4OUTOFSPACE) || 2372 (vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)) 2373 flags = (flags & ~B_FREE) | B_INVAL | B_FORCE; 2374 2375 if (len == 0) { 2376 /* 2377 * If doing a full file synchronous operation, then clear 2378 * the R4DIRTY bit. If a page gets dirtied while the flush 2379 * is happening, then R4DIRTY will get set again. The 2380 * R4DIRTY bit must get cleared before the flush so that 2381 * we don't lose this information. 2382 */ 2383 if (off == (u_offset_t)0 && 2384 !(flags & B_ASYNC) && 2385 (rp->r_flags & R4DIRTY)) { 2386 mutex_enter(&rp->r_statelock); 2387 rdirty = (rp->r_flags & R4DIRTY); 2388 rp->r_flags &= ~R4DIRTY; 2389 mutex_exit(&rp->r_statelock); 2390 } else 2391 rdirty = 0; 2392 2393 /* 2394 * Search the entire vp list for pages >= off, and flush 2395 * the dirty pages. 2396 */ 2397 error = pvn_vplist_dirty(vp, off, rp->r_putapage, 2398 flags, cr); 2399 2400 /* 2401 * If an error occured and the file was marked as dirty 2402 * before and we aren't forcibly invalidating pages, then 2403 * reset the R4DIRTY flag. 2404 */ 2405 if (error && rdirty && 2406 (flags & (B_INVAL | B_FORCE)) != (B_INVAL | B_FORCE)) { 2407 mutex_enter(&rp->r_statelock); 2408 rp->r_flags |= R4DIRTY; 2409 mutex_exit(&rp->r_statelock); 2410 } 2411 } else { 2412 /* 2413 * Do a range from [off...off + len) looking for pages 2414 * to deal with. 2415 */ 2416 error = 0; 2417 io_len = 0; 2418 eoff = off + len; 2419 mutex_enter(&rp->r_statelock); 2420 for (io_off = off; io_off < eoff && io_off < rp->r_size; 2421 io_off += io_len) { 2422 mutex_exit(&rp->r_statelock); 2423 /* 2424 * If we are not invalidating, synchronously 2425 * freeing or writing pages use the routine 2426 * page_lookup_nowait() to prevent reclaiming 2427 * them from the free list. 2428 */ 2429 if ((flags & B_INVAL) || !(flags & B_ASYNC)) { 2430 pp = page_lookup(vp, io_off, 2431 (flags & (B_INVAL | B_FREE)) ? 2432 SE_EXCL : SE_SHARED); 2433 } else { 2434 pp = page_lookup_nowait(vp, io_off, 2435 (flags & B_FREE) ? SE_EXCL : SE_SHARED); 2436 } 2437 2438 if (pp == NULL || !pvn_getdirty(pp, flags)) 2439 io_len = PAGESIZE; 2440 else { 2441 err = (*rp->r_putapage)(vp, pp, &io_off, 2442 &io_len, flags, cr); 2443 if (!error) 2444 error = err; 2445 /* 2446 * "io_off" and "io_len" are returned as 2447 * the range of pages we actually wrote. 2448 * This allows us to skip ahead more quickly 2449 * since several pages may've been dealt 2450 * with by this iteration of the loop. 2451 */ 2452 } 2453 mutex_enter(&rp->r_statelock); 2454 } 2455 mutex_exit(&rp->r_statelock); 2456 } 2457 2458 return (error); 2459 } 2460 2461 void 2462 nfs4_invalidate_pages(vnode_t *vp, u_offset_t off, cred_t *cr) 2463 { 2464 rnode4_t *rp; 2465 2466 rp = VTOR4(vp); 2467 if (IS_SHADOW(vp, rp)) 2468 vp = RTOV4(rp); 2469 mutex_enter(&rp->r_statelock); 2470 while (rp->r_flags & R4TRUNCATE) 2471 cv_wait(&rp->r_cv, &rp->r_statelock); 2472 rp->r_flags |= R4TRUNCATE; 2473 if (off == (u_offset_t)0) { 2474 rp->r_flags &= ~R4DIRTY; 2475 if (!(rp->r_flags & R4STALE)) 2476 rp->r_error = 0; 2477 } 2478 rp->r_truncaddr = off; 2479 mutex_exit(&rp->r_statelock); 2480 (void) pvn_vplist_dirty(vp, off, rp->r_putapage, 2481 B_INVAL | B_TRUNC, cr); 2482 mutex_enter(&rp->r_statelock); 2483 rp->r_flags &= ~R4TRUNCATE; 2484 cv_broadcast(&rp->r_cv); 2485 mutex_exit(&rp->r_statelock); 2486 } 2487 2488 static int 2489 nfs4_mnt_kstat_update(kstat_t *ksp, int rw) 2490 { 2491 mntinfo4_t *mi; 2492 struct mntinfo_kstat *mik; 2493 vfs_t *vfsp; 2494 2495 /* this is a read-only kstat. Bail out on a write */ 2496 if (rw == KSTAT_WRITE) 2497 return (EACCES); 2498 2499 2500 /* 2501 * We don't want to wait here as kstat_chain_lock could be held by 2502 * dounmount(). dounmount() takes vfs_reflock before the chain lock 2503 * and thus could lead to a deadlock. 2504 */ 2505 vfsp = (struct vfs *)ksp->ks_private; 2506 2507 mi = VFTOMI4(vfsp); 2508 mik = (struct mntinfo_kstat *)ksp->ks_data; 2509 2510 (void) strcpy(mik->mik_proto, mi->mi_curr_serv->sv_knconf->knc_proto); 2511 2512 mik->mik_vers = (uint32_t)mi->mi_vers; 2513 mik->mik_flags = mi->mi_flags; 2514 /* 2515 * The sv_secdata holds the flavor the client specifies. 2516 * If the client uses default and a security negotiation 2517 * occurs, sv_currsec will point to the current flavor 2518 * selected from the server flavor list. 2519 * sv_currsec is NULL if no security negotiation takes place. 2520 */ 2521 mik->mik_secmod = mi->mi_curr_serv->sv_currsec ? 2522 mi->mi_curr_serv->sv_currsec->secmod : 2523 mi->mi_curr_serv->sv_secdata->secmod; 2524 mik->mik_curread = (uint32_t)mi->mi_curread; 2525 mik->mik_curwrite = (uint32_t)mi->mi_curwrite; 2526 mik->mik_retrans = mi->mi_retrans; 2527 mik->mik_timeo = mi->mi_timeo; 2528 mik->mik_acregmin = HR2SEC(mi->mi_acregmin); 2529 mik->mik_acregmax = HR2SEC(mi->mi_acregmax); 2530 mik->mik_acdirmin = HR2SEC(mi->mi_acdirmin); 2531 mik->mik_acdirmax = HR2SEC(mi->mi_acdirmax); 2532 mik->mik_noresponse = (uint32_t)mi->mi_noresponse; 2533 mik->mik_failover = (uint32_t)mi->mi_failover; 2534 mik->mik_remap = (uint32_t)mi->mi_remap; 2535 2536 (void) strcpy(mik->mik_curserver, mi->mi_curr_serv->sv_hostname); 2537 2538 return (0); 2539 } 2540 2541 void 2542 nfs4_mnt_kstat_init(struct vfs *vfsp) 2543 { 2544 mntinfo4_t *mi = VFTOMI4(vfsp); 2545 2546 /* 2547 * PSARC 2001/697 Contract Private Interface 2548 * All nfs kstats are under SunMC contract 2549 * Please refer to the PSARC listed above and contact 2550 * SunMC before making any changes! 2551 * 2552 * Changes must be reviewed by Solaris File Sharing 2553 * Changes must be communicated to contract-2001-697@sun.com 2554 * 2555 */ 2556 2557 mi->mi_io_kstats = kstat_create_zone("nfs", getminor(vfsp->vfs_dev), 2558 NULL, "nfs", KSTAT_TYPE_IO, 1, 0, mi->mi_zone->zone_id); 2559 if (mi->mi_io_kstats) { 2560 if (mi->mi_zone->zone_id != GLOBAL_ZONEID) 2561 kstat_zone_add(mi->mi_io_kstats, GLOBAL_ZONEID); 2562 mi->mi_io_kstats->ks_lock = &mi->mi_lock; 2563 kstat_install(mi->mi_io_kstats); 2564 } 2565 2566 if ((mi->mi_ro_kstats = kstat_create_zone("nfs", 2567 getminor(vfsp->vfs_dev), "mntinfo", "misc", KSTAT_TYPE_RAW, 2568 sizeof (struct mntinfo_kstat), 0, mi->mi_zone->zone_id)) != NULL) { 2569 if (mi->mi_zone->zone_id != GLOBAL_ZONEID) 2570 kstat_zone_add(mi->mi_ro_kstats, GLOBAL_ZONEID); 2571 mi->mi_ro_kstats->ks_update = nfs4_mnt_kstat_update; 2572 mi->mi_ro_kstats->ks_private = (void *)vfsp; 2573 kstat_install(mi->mi_ro_kstats); 2574 } 2575 2576 nfs4_mnt_recov_kstat_init(vfsp); 2577 } 2578 2579 void 2580 nfs4_write_error(vnode_t *vp, int error, cred_t *cr) 2581 { 2582 mntinfo4_t *mi; 2583 2584 mi = VTOMI4(vp); 2585 /* 2586 * In case of forced unmount, do not print any messages 2587 * since it can flood the console with error messages. 2588 */ 2589 if (mi->mi_vfsp->vfs_flag & VFS_UNMOUNTED) 2590 return; 2591 2592 /* 2593 * If the mount point is dead, not recoverable, do not 2594 * print error messages that can flood the console. 2595 */ 2596 if (mi->mi_flags & MI4_RECOV_FAIL) 2597 return; 2598 2599 /* 2600 * No use in flooding the console with ENOSPC 2601 * messages from the same file system. 2602 */ 2603 if ((error != ENOSPC && error != EDQUOT) || 2604 lbolt - mi->mi_printftime > 0) { 2605 zoneid_t zoneid = mi->mi_zone->zone_id; 2606 2607 #ifdef DEBUG 2608 nfs_perror(error, "NFS%ld write error on host %s: %m.\n", 2609 mi->mi_vers, VTOR4(vp)->r_server->sv_hostname, NULL); 2610 #else 2611 nfs_perror(error, "NFS write error on host %s: %m.\n", 2612 VTOR4(vp)->r_server->sv_hostname, NULL); 2613 #endif 2614 if (error == ENOSPC || error == EDQUOT) { 2615 zcmn_err(zoneid, CE_CONT, 2616 "^File: userid=%d, groupid=%d\n", 2617 crgetuid(cr), crgetgid(cr)); 2618 if (crgetuid(curthread->t_cred) != crgetuid(cr) || 2619 crgetgid(curthread->t_cred) != crgetgid(cr)) { 2620 zcmn_err(zoneid, CE_CONT, 2621 "^User: userid=%d, groupid=%d\n", 2622 crgetuid(curthread->t_cred), 2623 crgetgid(curthread->t_cred)); 2624 } 2625 mi->mi_printftime = lbolt + 2626 nfs_write_error_interval * hz; 2627 } 2628 sfh4_printfhandle(VTOR4(vp)->r_fh); 2629 #ifdef DEBUG 2630 if (error == EACCES) { 2631 zcmn_err(zoneid, CE_CONT, 2632 "nfs_bio: cred is%s kcred\n", 2633 cr == kcred ? "" : " not"); 2634 } 2635 #endif 2636 } 2637 } 2638 2639 /* 2640 * Return non-zero if the given file can be safely memory mapped. Locks 2641 * are safe if whole-file (length and offset are both zero). 2642 */ 2643 2644 #define SAFE_LOCK(flk) ((flk).l_start == 0 && (flk).l_len == 0) 2645 2646 static int 2647 nfs4_safemap(const vnode_t *vp) 2648 { 2649 locklist_t *llp, *next_llp; 2650 int safe = 1; 2651 rnode4_t *rp = VTOR4(vp); 2652 2653 ASSERT(nfs_rw_lock_held(&rp->r_lkserlock, RW_WRITER)); 2654 2655 NFS4_DEBUG(nfs4_client_map_debug, (CE_NOTE, "nfs4_safemap: " 2656 "vp = %p", (void *)vp)); 2657 2658 /* 2659 * Review all the locks for the vnode, both ones that have been 2660 * acquired and ones that are pending. We assume that 2661 * flk_active_locks_for_vp() has merged any locks that can be 2662 * merged (so that if a process has the entire file locked, it is 2663 * represented as a single lock). 2664 * 2665 * Note that we can't bail out of the loop if we find a non-safe 2666 * lock, because we have to free all the elements in the llp list. 2667 * We might be able to speed up this code slightly by not looking 2668 * at each lock's l_start and l_len fields once we've found a 2669 * non-safe lock. 2670 */ 2671 2672 llp = flk_active_locks_for_vp(vp); 2673 while (llp) { 2674 NFS4_DEBUG(nfs4_client_map_debug, (CE_NOTE, 2675 "nfs4_safemap: active lock (%" PRId64 ", %" PRId64 ")", 2676 llp->ll_flock.l_start, llp->ll_flock.l_len)); 2677 if (!SAFE_LOCK(llp->ll_flock)) { 2678 safe = 0; 2679 NFS4_DEBUG(nfs4_client_map_debug, (CE_NOTE, 2680 "nfs4_safemap: unsafe active lock (%" PRId64 2681 ", %" PRId64 ")", llp->ll_flock.l_start, 2682 llp->ll_flock.l_len)); 2683 } 2684 next_llp = llp->ll_next; 2685 VN_RELE(llp->ll_vp); 2686 kmem_free(llp, sizeof (*llp)); 2687 llp = next_llp; 2688 } 2689 2690 NFS4_DEBUG(nfs4_client_map_debug, (CE_NOTE, "nfs4_safemap: %s", 2691 safe ? "safe" : "unsafe")); 2692 return (safe); 2693 } 2694 2695 /* 2696 * Return whether there is a lost LOCK or LOCKU queued up for the given 2697 * file that would make an mmap request unsafe. cf. nfs4_safemap(). 2698 */ 2699 2700 bool_t 2701 nfs4_map_lost_lock_conflict(vnode_t *vp) 2702 { 2703 bool_t conflict = FALSE; 2704 nfs4_lost_rqst_t *lrp; 2705 mntinfo4_t *mi = VTOMI4(vp); 2706 2707 mutex_enter(&mi->mi_lock); 2708 for (lrp = list_head(&mi->mi_lost_state); lrp != NULL; 2709 lrp = list_next(&mi->mi_lost_state, lrp)) { 2710 if (lrp->lr_op != OP_LOCK && lrp->lr_op != OP_LOCKU) 2711 continue; 2712 ASSERT(lrp->lr_vp != NULL); 2713 if (!VOP_CMP(lrp->lr_vp, vp)) 2714 continue; /* different file */ 2715 if (!SAFE_LOCK(*lrp->lr_flk)) { 2716 conflict = TRUE; 2717 break; 2718 } 2719 } 2720 2721 mutex_exit(&mi->mi_lock); 2722 return (conflict); 2723 } 2724 2725 /* 2726 * nfs_lockcompletion: 2727 * 2728 * If the vnode has a lock that makes it unsafe to cache the file, mark it 2729 * as non cachable (set VNOCACHE bit). 2730 */ 2731 2732 void 2733 nfs4_lockcompletion(vnode_t *vp, int cmd) 2734 { 2735 rnode4_t *rp = VTOR4(vp); 2736 2737 ASSERT(nfs_rw_lock_held(&rp->r_lkserlock, RW_WRITER)); 2738 ASSERT(!IS_SHADOW(vp, rp)); 2739 2740 if (cmd == F_SETLK || cmd == F_SETLKW) { 2741 2742 if (!nfs4_safemap(vp)) { 2743 mutex_enter(&vp->v_lock); 2744 vp->v_flag |= VNOCACHE; 2745 mutex_exit(&vp->v_lock); 2746 } else { 2747 mutex_enter(&vp->v_lock); 2748 vp->v_flag &= ~VNOCACHE; 2749 mutex_exit(&vp->v_lock); 2750 } 2751 } 2752 /* 2753 * The cached attributes of the file are stale after acquiring 2754 * the lock on the file. They were updated when the file was 2755 * opened, but not updated when the lock was acquired. Therefore the 2756 * cached attributes are invalidated after the lock is obtained. 2757 */ 2758 PURGE_ATTRCACHE4(vp); 2759 } 2760 2761 /* ARGSUSED */ 2762 static void * 2763 nfs4_mi_init(zoneid_t zoneid) 2764 { 2765 struct mi4_globals *mig; 2766 2767 mig = kmem_alloc(sizeof (*mig), KM_SLEEP); 2768 mutex_init(&mig->mig_lock, NULL, MUTEX_DEFAULT, NULL); 2769 list_create(&mig->mig_list, sizeof (mntinfo4_t), 2770 offsetof(mntinfo4_t, mi_zone_node)); 2771 mig->mig_destructor_called = B_FALSE; 2772 return (mig); 2773 } 2774 2775 /* 2776 * Callback routine to tell all NFSv4 mounts in the zone to start tearing down 2777 * state and killing off threads. 2778 */ 2779 /* ARGSUSED */ 2780 static void 2781 nfs4_mi_shutdown(zoneid_t zoneid, void *data) 2782 { 2783 struct mi4_globals *mig = data; 2784 mntinfo4_t *mi; 2785 nfs4_server_t *np; 2786 2787 NFS4_DEBUG(nfs4_client_zone_debug, (CE_NOTE, 2788 "nfs4_mi_shutdown zone %d\n", zoneid)); 2789 ASSERT(mig != NULL); 2790 for (;;) { 2791 mutex_enter(&mig->mig_lock); 2792 mi = list_head(&mig->mig_list); 2793 if (mi == NULL) { 2794 mutex_exit(&mig->mig_lock); 2795 break; 2796 } 2797 2798 NFS4_DEBUG(nfs4_client_zone_debug, (CE_NOTE, 2799 "nfs4_mi_shutdown stopping vfs %p\n", (void *)mi->mi_vfsp)); 2800 /* 2801 * purge the DNLC for this filesystem 2802 */ 2803 (void) dnlc_purge_vfsp(mi->mi_vfsp, 0); 2804 /* 2805 * Tell existing async worker threads to exit. 2806 */ 2807 mutex_enter(&mi->mi_async_lock); 2808 mi->mi_max_threads = 0; 2809 cv_broadcast(&mi->mi_async_work_cv); 2810 /* 2811 * Set the appropriate flags, signal and wait for both the 2812 * async manager and the inactive thread to exit when they're 2813 * done with their current work. 2814 */ 2815 mutex_enter(&mi->mi_lock); 2816 mi->mi_flags |= (MI4_ASYNC_MGR_STOP|MI4_DEAD); 2817 mutex_exit(&mi->mi_lock); 2818 mutex_exit(&mi->mi_async_lock); 2819 if (mi->mi_manager_thread) { 2820 nfs4_async_manager_stop(mi->mi_vfsp); 2821 } 2822 if (mi->mi_inactive_thread) { 2823 mutex_enter(&mi->mi_async_lock); 2824 cv_signal(&mi->mi_inact_req_cv); 2825 /* 2826 * Wait for the inactive thread to exit. 2827 */ 2828 while (mi->mi_inactive_thread != NULL) { 2829 cv_wait(&mi->mi_async_cv, &mi->mi_async_lock); 2830 } 2831 mutex_exit(&mi->mi_async_lock); 2832 } 2833 /* 2834 * Wait for the recovery thread to complete, that is, it will 2835 * signal when it is done using the "mi" structure and about 2836 * to exit 2837 */ 2838 mutex_enter(&mi->mi_lock); 2839 while (mi->mi_in_recovery > 0) 2840 cv_wait(&mi->mi_cv_in_recov, &mi->mi_lock); 2841 mutex_exit(&mi->mi_lock); 2842 /* 2843 * We're done when every mi has been done or the list is empty. 2844 * This one is done, remove it from the list. 2845 */ 2846 list_remove(&mig->mig_list, mi); 2847 mutex_exit(&mig->mig_lock); 2848 zone_rele(mi->mi_zone); 2849 /* 2850 * Release hold on vfs and mi done to prevent race with zone 2851 * shutdown. This releases the hold in nfs4_mi_zonelist_add. 2852 */ 2853 VFS_RELE(mi->mi_vfsp); 2854 MI4_RELE(mi); 2855 } 2856 /* 2857 * Tell each renew thread in the zone to exit 2858 */ 2859 mutex_enter(&nfs4_server_lst_lock); 2860 for (np = nfs4_server_lst.forw; np != &nfs4_server_lst; np = np->forw) { 2861 mutex_enter(&np->s_lock); 2862 if (np->zoneid == zoneid) { 2863 /* 2864 * We add another hold onto the nfs4_server_t 2865 * because this will make sure tha the nfs4_server_t 2866 * stays around until nfs4_callback_fini_zone destroys 2867 * the zone. This way, the renew thread can 2868 * unconditionally release its holds on the 2869 * nfs4_server_t. 2870 */ 2871 np->s_refcnt++; 2872 nfs4_mark_srv_dead(np); 2873 } 2874 mutex_exit(&np->s_lock); 2875 } 2876 mutex_exit(&nfs4_server_lst_lock); 2877 } 2878 2879 static void 2880 nfs4_mi_free_globals(struct mi4_globals *mig) 2881 { 2882 list_destroy(&mig->mig_list); /* makes sure the list is empty */ 2883 mutex_destroy(&mig->mig_lock); 2884 kmem_free(mig, sizeof (*mig)); 2885 } 2886 2887 /* ARGSUSED */ 2888 static void 2889 nfs4_mi_destroy(zoneid_t zoneid, void *data) 2890 { 2891 struct mi4_globals *mig = data; 2892 2893 NFS4_DEBUG(nfs4_client_zone_debug, (CE_NOTE, 2894 "nfs4_mi_destroy zone %d\n", zoneid)); 2895 ASSERT(mig != NULL); 2896 mutex_enter(&mig->mig_lock); 2897 if (list_head(&mig->mig_list) != NULL) { 2898 /* Still waiting for VFS_FREEVFS() */ 2899 mig->mig_destructor_called = B_TRUE; 2900 mutex_exit(&mig->mig_lock); 2901 return; 2902 } 2903 nfs4_mi_free_globals(mig); 2904 } 2905 2906 /* 2907 * Add an NFS mount to the per-zone list of NFS mounts. 2908 */ 2909 void 2910 nfs4_mi_zonelist_add(mntinfo4_t *mi) 2911 { 2912 struct mi4_globals *mig; 2913 2914 mig = zone_getspecific(mi4_list_key, mi->mi_zone); 2915 mutex_enter(&mig->mig_lock); 2916 list_insert_head(&mig->mig_list, mi); 2917 /* 2918 * hold added to eliminate race with zone shutdown -this will be 2919 * released in mi_shutdown 2920 */ 2921 MI4_HOLD(mi); 2922 VFS_HOLD(mi->mi_vfsp); 2923 mutex_exit(&mig->mig_lock); 2924 } 2925 2926 /* 2927 * Remove an NFS mount from the per-zone list of NFS mounts. 2928 */ 2929 int 2930 nfs4_mi_zonelist_remove(mntinfo4_t *mi) 2931 { 2932 struct mi4_globals *mig; 2933 int ret = 0; 2934 2935 mig = zone_getspecific(mi4_list_key, mi->mi_zone); 2936 mutex_enter(&mig->mig_lock); 2937 mutex_enter(&mi->mi_lock); 2938 /* if this mi is marked dead, then the zone already released it */ 2939 if (!(mi->mi_flags & MI4_DEAD)) { 2940 list_remove(&mig->mig_list, mi); 2941 2942 /* release the holds put on in zonelist_add(). */ 2943 VFS_RELE(mi->mi_vfsp); 2944 MI4_RELE(mi); 2945 ret = 1; 2946 } 2947 mutex_exit(&mi->mi_lock); 2948 2949 /* 2950 * We can be called asynchronously by VFS_FREEVFS() after the zone 2951 * shutdown/destroy callbacks have executed; if so, clean up the zone's 2952 * mi globals. 2953 */ 2954 if (list_head(&mig->mig_list) == NULL && 2955 mig->mig_destructor_called == B_TRUE) { 2956 nfs4_mi_free_globals(mig); 2957 return (ret); 2958 } 2959 mutex_exit(&mig->mig_lock); 2960 return (ret); 2961 } 2962 2963 void 2964 nfs_free_mi4(mntinfo4_t *mi) 2965 { 2966 nfs4_open_owner_t *foop; 2967 nfs4_oo_hash_bucket_t *bucketp; 2968 nfs4_debug_msg_t *msgp; 2969 int i; 2970 servinfo4_t *svp; 2971 2972 mutex_enter(&mi->mi_lock); 2973 ASSERT(mi->mi_recovthread == NULL); 2974 ASSERT(mi->mi_flags & MI4_ASYNC_MGR_STOP); 2975 mutex_exit(&mi->mi_lock); 2976 mutex_enter(&mi->mi_async_lock); 2977 ASSERT(mi->mi_threads == 0); 2978 ASSERT(mi->mi_manager_thread == NULL); 2979 mutex_exit(&mi->mi_async_lock); 2980 svp = mi->mi_servers; 2981 sv4_free(svp); 2982 if (mi->mi_io_kstats) { 2983 kstat_delete(mi->mi_io_kstats); 2984 mi->mi_io_kstats = NULL; 2985 } 2986 if (mi->mi_ro_kstats) { 2987 kstat_delete(mi->mi_ro_kstats); 2988 mi->mi_ro_kstats = NULL; 2989 } 2990 if (mi->mi_recov_ksp) { 2991 kstat_delete(mi->mi_recov_ksp); 2992 mi->mi_recov_ksp = NULL; 2993 } 2994 mutex_enter(&mi->mi_msg_list_lock); 2995 while (msgp = list_head(&mi->mi_msg_list)) { 2996 list_remove(&mi->mi_msg_list, msgp); 2997 nfs4_free_msg(msgp); 2998 } 2999 mutex_exit(&mi->mi_msg_list_lock); 3000 list_destroy(&mi->mi_msg_list); 3001 if (mi->mi_rootfh != NULL) 3002 sfh4_rele(&mi->mi_rootfh); 3003 if (mi->mi_srvparentfh != NULL) 3004 sfh4_rele(&mi->mi_srvparentfh); 3005 mutex_destroy(&mi->mi_lock); 3006 mutex_destroy(&mi->mi_async_lock); 3007 mutex_destroy(&mi->mi_msg_list_lock); 3008 nfs_rw_destroy(&mi->mi_recovlock); 3009 nfs_rw_destroy(&mi->mi_rename_lock); 3010 nfs_rw_destroy(&mi->mi_fh_lock); 3011 cv_destroy(&mi->mi_failover_cv); 3012 cv_destroy(&mi->mi_async_reqs_cv); 3013 cv_destroy(&mi->mi_async_work_cv); 3014 cv_destroy(&mi->mi_async_cv); 3015 cv_destroy(&mi->mi_inact_req_cv); 3016 /* 3017 * Destroy the oo hash lists and mutexes for the cred hash table. 3018 */ 3019 for (i = 0; i < NFS4_NUM_OO_BUCKETS; i++) { 3020 bucketp = &(mi->mi_oo_list[i]); 3021 /* Destroy any remaining open owners on the list */ 3022 foop = list_head(&bucketp->b_oo_hash_list); 3023 while (foop != NULL) { 3024 list_remove(&bucketp->b_oo_hash_list, foop); 3025 nfs4_destroy_open_owner(foop); 3026 foop = list_head(&bucketp->b_oo_hash_list); 3027 } 3028 list_destroy(&bucketp->b_oo_hash_list); 3029 mutex_destroy(&bucketp->b_lock); 3030 } 3031 /* 3032 * Empty and destroy the freed open owner list. 3033 */ 3034 foop = list_head(&mi->mi_foo_list); 3035 while (foop != NULL) { 3036 list_remove(&mi->mi_foo_list, foop); 3037 nfs4_destroy_open_owner(foop); 3038 foop = list_head(&mi->mi_foo_list); 3039 } 3040 list_destroy(&mi->mi_foo_list); 3041 list_destroy(&mi->mi_bseqid_list); 3042 list_destroy(&mi->mi_lost_state); 3043 avl_destroy(&mi->mi_filehandles); 3044 fn_rele(&mi->mi_fname); 3045 kmem_free(mi, sizeof (*mi)); 3046 } 3047 void 3048 mi_hold(mntinfo4_t *mi) 3049 { 3050 atomic_add_32(&mi->mi_count, 1); 3051 ASSERT(mi->mi_count != 0); 3052 } 3053 3054 void 3055 mi_rele(mntinfo4_t *mi) 3056 { 3057 ASSERT(mi->mi_count != 0); 3058 if (atomic_add_32_nv(&mi->mi_count, -1) == 0) { 3059 nfs_free_mi4(mi); 3060 } 3061 } 3062 3063 vnode_t nfs4_xattr_notsupp_vnode; 3064 3065 void 3066 nfs4_clnt_init(void) 3067 { 3068 nfs4_vnops_init(); 3069 (void) nfs4_rnode_init(); 3070 (void) nfs4_shadow_init(); 3071 (void) nfs4_acache_init(); 3072 (void) nfs4_subr_init(); 3073 nfs4_acl_init(); 3074 nfs_idmap_init(); 3075 nfs4_callback_init(); 3076 nfs4_secinfo_init(); 3077 #ifdef DEBUG 3078 tsd_create(&nfs4_tsd_key, NULL); 3079 #endif 3080 3081 /* 3082 * Add a CPR callback so that we can update client 3083 * lease after a suspend and resume. 3084 */ 3085 cid = callb_add(nfs4_client_cpr_callb, 0, CB_CL_CPR_RPC, "nfs4"); 3086 3087 zone_key_create(&mi4_list_key, nfs4_mi_init, nfs4_mi_shutdown, 3088 nfs4_mi_destroy); 3089 3090 /* 3091 * Initialise the reference count of the notsupp xattr cache vnode to 1 3092 * so that it never goes away (VOP_INACTIVE isn't called on it). 3093 */ 3094 nfs4_xattr_notsupp_vnode.v_count = 1; 3095 } 3096 3097 void 3098 nfs4_clnt_fini(void) 3099 { 3100 (void) zone_key_delete(mi4_list_key); 3101 nfs4_vnops_fini(); 3102 (void) nfs4_rnode_fini(); 3103 (void) nfs4_shadow_fini(); 3104 (void) nfs4_acache_fini(); 3105 (void) nfs4_subr_fini(); 3106 nfs_idmap_fini(); 3107 nfs4_callback_fini(); 3108 nfs4_secinfo_fini(); 3109 #ifdef DEBUG 3110 tsd_destroy(&nfs4_tsd_key); 3111 #endif 3112 if (cid) 3113 (void) callb_delete(cid); 3114 } 3115 3116 /*ARGSUSED*/ 3117 static boolean_t 3118 nfs4_client_cpr_callb(void *arg, int code) 3119 { 3120 /* 3121 * We get called for Suspend and Resume events. 3122 * For the suspend case we simply don't care! 3123 */ 3124 if (code == CB_CODE_CPR_CHKPT) { 3125 return (B_TRUE); 3126 } 3127 3128 /* 3129 * When we get to here we are in the process of 3130 * resuming the system from a previous suspend. 3131 */ 3132 nfs4_client_resumed = gethrestime_sec(); 3133 return (B_TRUE); 3134 } 3135 3136 void 3137 nfs4_renew_lease_thread(nfs4_server_t *sp) 3138 { 3139 int error = 0; 3140 time_t tmp_last_renewal_time, tmp_time, tmp_now_time, kip_secs; 3141 clock_t tick_delay = 0; 3142 clock_t time_left = 0; 3143 callb_cpr_t cpr_info; 3144 kmutex_t cpr_lock; 3145 3146 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE, 3147 "nfs4_renew_lease_thread: acting on sp 0x%p", (void*)sp)); 3148 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL); 3149 CALLB_CPR_INIT(&cpr_info, &cpr_lock, callb_generic_cpr, "nfsv4Lease"); 3150 3151 mutex_enter(&sp->s_lock); 3152 /* sp->s_lease_time is set via a GETATTR */ 3153 sp->last_renewal_time = gethrestime_sec(); 3154 sp->lease_valid = NFS4_LEASE_UNINITIALIZED; 3155 ASSERT(sp->s_refcnt >= 1); 3156 3157 for (;;) { 3158 if (!sp->state_ref_count || 3159 sp->lease_valid != NFS4_LEASE_VALID) { 3160 3161 kip_secs = MAX((sp->s_lease_time >> 1) - 3162 (3 * sp->propagation_delay.tv_sec), 1); 3163 3164 tick_delay = SEC_TO_TICK(kip_secs); 3165 3166 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE, 3167 "nfs4_renew_lease_thread: no renew : thread " 3168 "wait %ld secs", kip_secs)); 3169 3170 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE, 3171 "nfs4_renew_lease_thread: no renew : " 3172 "state_ref_count %d, lease_valid %d", 3173 sp->state_ref_count, sp->lease_valid)); 3174 3175 mutex_enter(&cpr_lock); 3176 CALLB_CPR_SAFE_BEGIN(&cpr_info); 3177 mutex_exit(&cpr_lock); 3178 time_left = cv_timedwait(&sp->cv_thread_exit, 3179 &sp->s_lock, tick_delay + lbolt); 3180 mutex_enter(&cpr_lock); 3181 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock); 3182 mutex_exit(&cpr_lock); 3183 3184 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE, 3185 "nfs4_renew_lease_thread: no renew: " 3186 "time left %ld", time_left)); 3187 3188 if (sp->s_thread_exit == NFS4_THREAD_EXIT) 3189 goto die; 3190 continue; 3191 } 3192 3193 tmp_last_renewal_time = sp->last_renewal_time; 3194 3195 tmp_time = gethrestime_sec() - sp->last_renewal_time + 3196 (3 * sp->propagation_delay.tv_sec); 3197 3198 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE, 3199 "nfs4_renew_lease_thread: tmp_time %ld, " 3200 "sp->last_renewal_time %ld", tmp_time, 3201 sp->last_renewal_time)); 3202 3203 kip_secs = MAX((sp->s_lease_time >> 1) - tmp_time, 1); 3204 3205 tick_delay = SEC_TO_TICK(kip_secs); 3206 3207 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE, 3208 "nfs4_renew_lease_thread: valid lease: sleep for %ld " 3209 "secs", kip_secs)); 3210 3211 mutex_enter(&cpr_lock); 3212 CALLB_CPR_SAFE_BEGIN(&cpr_info); 3213 mutex_exit(&cpr_lock); 3214 time_left = cv_timedwait(&sp->cv_thread_exit, &sp->s_lock, 3215 tick_delay + lbolt); 3216 mutex_enter(&cpr_lock); 3217 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock); 3218 mutex_exit(&cpr_lock); 3219 3220 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE, 3221 "nfs4_renew_lease_thread: valid lease: time left %ld :" 3222 "sp last_renewal_time %ld, nfs4_client_resumed %ld, " 3223 "tmp_last_renewal_time %ld", time_left, 3224 sp->last_renewal_time, nfs4_client_resumed, 3225 tmp_last_renewal_time)); 3226 3227 if (sp->s_thread_exit == NFS4_THREAD_EXIT) 3228 goto die; 3229 3230 if (tmp_last_renewal_time == sp->last_renewal_time || 3231 (nfs4_client_resumed != 0 && 3232 nfs4_client_resumed > sp->last_renewal_time)) { 3233 /* 3234 * Issue RENEW op since we haven't renewed the lease 3235 * since we slept. 3236 */ 3237 tmp_now_time = gethrestime_sec(); 3238 error = nfs4renew(sp); 3239 /* 3240 * Need to re-acquire sp's lock, nfs4renew() 3241 * relinqueshes it. 3242 */ 3243 mutex_enter(&sp->s_lock); 3244 3245 /* 3246 * See if someone changed s_thread_exit while we gave 3247 * up s_lock. 3248 */ 3249 if (sp->s_thread_exit == NFS4_THREAD_EXIT) 3250 goto die; 3251 3252 if (!error) { 3253 /* 3254 * check to see if we implicitly renewed while 3255 * we waited for a reply for our RENEW call. 3256 */ 3257 if (tmp_last_renewal_time == 3258 sp->last_renewal_time) { 3259 /* no implicit renew came */ 3260 sp->last_renewal_time = tmp_now_time; 3261 } else { 3262 NFS4_DEBUG(nfs4_client_lease_debug, 3263 (CE_NOTE, "renew_thread: did " 3264 "implicit renewal before reply " 3265 "from server for RENEW")); 3266 } 3267 } else { 3268 /* figure out error */ 3269 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE, 3270 "renew_thread: nfs4renew returned error" 3271 " %d", error)); 3272 } 3273 3274 } 3275 } 3276 3277 die: 3278 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE, 3279 "nfs4_renew_lease_thread: thread exiting")); 3280 3281 while (sp->s_otw_call_count != 0) { 3282 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE, 3283 "nfs4_renew_lease_thread: waiting for outstanding " 3284 "otw calls to finish for sp 0x%p, current " 3285 "s_otw_call_count %d", (void *)sp, 3286 sp->s_otw_call_count)); 3287 mutex_enter(&cpr_lock); 3288 CALLB_CPR_SAFE_BEGIN(&cpr_info); 3289 mutex_exit(&cpr_lock); 3290 cv_wait(&sp->s_cv_otw_count, &sp->s_lock); 3291 mutex_enter(&cpr_lock); 3292 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock); 3293 mutex_exit(&cpr_lock); 3294 } 3295 mutex_exit(&sp->s_lock); 3296 3297 nfs4_server_rele(sp); /* free the thread's reference */ 3298 nfs4_server_rele(sp); /* free the list's reference */ 3299 sp = NULL; 3300 3301 done: 3302 mutex_enter(&cpr_lock); 3303 CALLB_CPR_EXIT(&cpr_info); /* drops cpr_lock */ 3304 mutex_destroy(&cpr_lock); 3305 3306 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE, 3307 "nfs4_renew_lease_thread: renew thread exit officially")); 3308 3309 zthread_exit(); 3310 /* NOT REACHED */ 3311 } 3312 3313 /* 3314 * Send out a RENEW op to the server. 3315 * Assumes sp is locked down. 3316 */ 3317 static int 3318 nfs4renew(nfs4_server_t *sp) 3319 { 3320 COMPOUND4args_clnt args; 3321 COMPOUND4res_clnt res; 3322 nfs_argop4 argop[1]; 3323 int doqueue = 1; 3324 int rpc_error; 3325 cred_t *cr; 3326 mntinfo4_t *mi; 3327 timespec_t prop_time, after_time; 3328 int needrecov = FALSE; 3329 nfs4_recov_state_t recov_state; 3330 nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS }; 3331 3332 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE, "nfs4renew")); 3333 3334 recov_state.rs_flags = 0; 3335 recov_state.rs_num_retry_despite_err = 0; 3336 3337 recov_retry: 3338 mi = sp->mntinfo4_list; 3339 VFS_HOLD(mi->mi_vfsp); 3340 mutex_exit(&sp->s_lock); 3341 ASSERT(mi != NULL); 3342 3343 e.error = nfs4_start_op(mi, NULL, NULL, &recov_state); 3344 if (e.error) { 3345 VFS_RELE(mi->mi_vfsp); 3346 return (e.error); 3347 } 3348 3349 /* Check to see if we're dealing with a marked-dead sp */ 3350 mutex_enter(&sp->s_lock); 3351 if (sp->s_thread_exit == NFS4_THREAD_EXIT) { 3352 mutex_exit(&sp->s_lock); 3353 nfs4_end_op(mi, NULL, NULL, &recov_state, needrecov); 3354 VFS_RELE(mi->mi_vfsp); 3355 return (0); 3356 } 3357 3358 /* Make sure mi hasn't changed on us */ 3359 if (mi != sp->mntinfo4_list) { 3360 /* Must drop sp's lock to avoid a recursive mutex enter */ 3361 mutex_exit(&sp->s_lock); 3362 nfs4_end_op(mi, NULL, NULL, &recov_state, needrecov); 3363 VFS_RELE(mi->mi_vfsp); 3364 mutex_enter(&sp->s_lock); 3365 goto recov_retry; 3366 } 3367 mutex_exit(&sp->s_lock); 3368 3369 args.ctag = TAG_RENEW; 3370 3371 args.array_len = 1; 3372 args.array = argop; 3373 3374 argop[0].argop = OP_RENEW; 3375 3376 mutex_enter(&sp->s_lock); 3377 argop[0].nfs_argop4_u.oprenew.clientid = sp->clientid; 3378 cr = sp->s_cred; 3379 crhold(cr); 3380 mutex_exit(&sp->s_lock); 3381 3382 ASSERT(cr != NULL); 3383 3384 /* used to figure out RTT for sp */ 3385 gethrestime(&prop_time); 3386 3387 NFS4_DEBUG(nfs4_client_call_debug, (CE_NOTE, 3388 "nfs4renew: %s call, sp 0x%p", needrecov ? "recov" : "first", 3389 (void*)sp)); 3390 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE, "before: %ld s %ld ns ", 3391 prop_time.tv_sec, prop_time.tv_nsec)); 3392 3393 DTRACE_PROBE2(nfs4__renew__start, nfs4_server_t *, sp, 3394 mntinfo4_t *, mi); 3395 3396 rfs4call(mi, &args, &res, cr, &doqueue, 0, &e); 3397 crfree(cr); 3398 3399 DTRACE_PROBE2(nfs4__renew__end, nfs4_server_t *, sp, 3400 mntinfo4_t *, mi); 3401 3402 gethrestime(&after_time); 3403 3404 mutex_enter(&sp->s_lock); 3405 sp->propagation_delay.tv_sec = 3406 MAX(1, after_time.tv_sec - prop_time.tv_sec); 3407 mutex_exit(&sp->s_lock); 3408 3409 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE, "after : %ld s %ld ns ", 3410 after_time.tv_sec, after_time.tv_nsec)); 3411 3412 if (e.error == 0 && res.status == NFS4ERR_CB_PATH_DOWN) { 3413 (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res); 3414 nfs4_delegreturn_all(sp); 3415 nfs4_end_op(mi, NULL, NULL, &recov_state, needrecov); 3416 VFS_RELE(mi->mi_vfsp); 3417 /* 3418 * If the server returns CB_PATH_DOWN, it has renewed 3419 * the lease and informed us that the callback path is 3420 * down. Since the lease is renewed, just return 0 and 3421 * let the renew thread proceed as normal. 3422 */ 3423 return (0); 3424 } 3425 3426 needrecov = nfs4_needs_recovery(&e, FALSE, mi->mi_vfsp); 3427 if (!needrecov && e.error) { 3428 nfs4_end_op(mi, NULL, NULL, &recov_state, needrecov); 3429 VFS_RELE(mi->mi_vfsp); 3430 return (e.error); 3431 } 3432 3433 rpc_error = e.error; 3434 3435 if (needrecov) { 3436 NFS4_DEBUG(nfs4_client_recov_debug, (CE_NOTE, 3437 "nfs4renew: initiating recovery\n")); 3438 3439 if (nfs4_start_recovery(&e, mi, NULL, NULL, NULL, NULL, 3440 OP_RENEW, NULL) == FALSE) { 3441 nfs4_end_op(mi, NULL, NULL, &recov_state, needrecov); 3442 VFS_RELE(mi->mi_vfsp); 3443 if (!e.error) 3444 (void) xdr_free(xdr_COMPOUND4res_clnt, 3445 (caddr_t)&res); 3446 mutex_enter(&sp->s_lock); 3447 goto recov_retry; 3448 } 3449 /* fall through for res.status case */ 3450 } 3451 3452 if (res.status) { 3453 if (res.status == NFS4ERR_LEASE_MOVED) { 3454 /*EMPTY*/ 3455 /* 3456 * XXX need to try every mntinfo4 in sp->mntinfo4_list 3457 * to renew the lease on that server 3458 */ 3459 } 3460 e.error = geterrno4(res.status); 3461 } 3462 3463 if (!rpc_error) 3464 (void) xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res); 3465 3466 nfs4_end_op(mi, NULL, NULL, &recov_state, needrecov); 3467 3468 VFS_RELE(mi->mi_vfsp); 3469 3470 return (e.error); 3471 } 3472 3473 void 3474 nfs4_inc_state_ref_count(mntinfo4_t *mi) 3475 { 3476 nfs4_server_t *sp; 3477 3478 /* this locks down sp if it is found */ 3479 sp = find_nfs4_server(mi); 3480 3481 if (sp != NULL) { 3482 nfs4_inc_state_ref_count_nolock(sp, mi); 3483 mutex_exit(&sp->s_lock); 3484 nfs4_server_rele(sp); 3485 } 3486 } 3487 3488 /* 3489 * Bump the number of OPEN files (ie: those with state) so we know if this 3490 * nfs4_server has any state to maintain a lease for or not. 3491 * 3492 * Also, marks the nfs4_server's lease valid if it hasn't been done so already. 3493 */ 3494 void 3495 nfs4_inc_state_ref_count_nolock(nfs4_server_t *sp, mntinfo4_t *mi) 3496 { 3497 ASSERT(mutex_owned(&sp->s_lock)); 3498 3499 sp->state_ref_count++; 3500 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE, 3501 "nfs4_inc_state_ref_count: state_ref_count now %d", 3502 sp->state_ref_count)); 3503 3504 if (sp->lease_valid == NFS4_LEASE_UNINITIALIZED) 3505 sp->lease_valid = NFS4_LEASE_VALID; 3506 3507 /* 3508 * If this call caused the lease to be marked valid and/or 3509 * took the state_ref_count from 0 to 1, then start the time 3510 * on lease renewal. 3511 */ 3512 if (sp->lease_valid == NFS4_LEASE_VALID && sp->state_ref_count == 1) 3513 sp->last_renewal_time = gethrestime_sec(); 3514 3515 /* update the number of open files for mi */ 3516 mi->mi_open_files++; 3517 } 3518 3519 void 3520 nfs4_dec_state_ref_count(mntinfo4_t *mi) 3521 { 3522 nfs4_server_t *sp; 3523 3524 /* this locks down sp if it is found */ 3525 sp = find_nfs4_server_all(mi, 1); 3526 3527 if (sp != NULL) { 3528 nfs4_dec_state_ref_count_nolock(sp, mi); 3529 mutex_exit(&sp->s_lock); 3530 nfs4_server_rele(sp); 3531 } 3532 } 3533 3534 /* 3535 * Decrement the number of OPEN files (ie: those with state) so we know if 3536 * this nfs4_server has any state to maintain a lease for or not. 3537 */ 3538 void 3539 nfs4_dec_state_ref_count_nolock(nfs4_server_t *sp, mntinfo4_t *mi) 3540 { 3541 ASSERT(mutex_owned(&sp->s_lock)); 3542 ASSERT(sp->state_ref_count != 0); 3543 sp->state_ref_count--; 3544 3545 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE, 3546 "nfs4_dec_state_ref_count: state ref count now %d", 3547 sp->state_ref_count)); 3548 3549 mi->mi_open_files--; 3550 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE, 3551 "nfs4_dec_state_ref_count: mi open files %d, v4 flags 0x%x", 3552 mi->mi_open_files, mi->mi_flags)); 3553 3554 /* We don't have to hold the mi_lock to test mi_flags */ 3555 if (mi->mi_open_files == 0 && 3556 (mi->mi_flags & MI4_REMOVE_ON_LAST_CLOSE)) { 3557 NFS4_DEBUG(nfs4_client_lease_debug, (CE_NOTE, 3558 "nfs4_dec_state_ref_count: remove mntinfo4 %p since " 3559 "we have closed the last open file", (void*)mi)); 3560 nfs4_remove_mi_from_server(mi, sp); 3561 } 3562 } 3563 3564 bool_t 3565 inlease(nfs4_server_t *sp) 3566 { 3567 bool_t result; 3568 3569 ASSERT(mutex_owned(&sp->s_lock)); 3570 3571 if (sp->lease_valid == NFS4_LEASE_VALID && 3572 gethrestime_sec() < sp->last_renewal_time + sp->s_lease_time) 3573 result = TRUE; 3574 else 3575 result = FALSE; 3576 3577 return (result); 3578 } 3579 3580 3581 /* 3582 * Return non-zero if the given nfs4_server_t is going through recovery. 3583 */ 3584 3585 int 3586 nfs4_server_in_recovery(nfs4_server_t *sp) 3587 { 3588 return (nfs_rw_lock_held(&sp->s_recovlock, RW_WRITER)); 3589 } 3590 3591 /* 3592 * Compare two shared filehandle objects. Returns -1, 0, or +1, if the 3593 * first is less than, equal to, or greater than the second. 3594 */ 3595 3596 int 3597 sfh4cmp(const void *p1, const void *p2) 3598 { 3599 const nfs4_sharedfh_t *sfh1 = (const nfs4_sharedfh_t *)p1; 3600 const nfs4_sharedfh_t *sfh2 = (const nfs4_sharedfh_t *)p2; 3601 3602 return (nfs4cmpfh(&sfh1->sfh_fh, &sfh2->sfh_fh)); 3603 } 3604 3605 /* 3606 * Create a table for shared filehandle objects. 3607 */ 3608 3609 void 3610 sfh4_createtab(avl_tree_t *tab) 3611 { 3612 avl_create(tab, sfh4cmp, sizeof (nfs4_sharedfh_t), 3613 offsetof(nfs4_sharedfh_t, sfh_tree)); 3614 } 3615 3616 /* 3617 * Return a shared filehandle object for the given filehandle. The caller 3618 * is responsible for eventually calling sfh4_rele(). 3619 */ 3620 3621 nfs4_sharedfh_t * 3622 sfh4_put(const nfs_fh4 *fh, mntinfo4_t *mi, nfs4_sharedfh_t *key) 3623 { 3624 nfs4_sharedfh_t *sfh, *nsfh; 3625 avl_index_t where; 3626 nfs4_sharedfh_t skey; 3627 3628 if (!key) { 3629 skey.sfh_fh = *fh; 3630 key = &skey; 3631 } 3632 3633 nsfh = kmem_alloc(sizeof (nfs4_sharedfh_t), KM_SLEEP); 3634 nsfh->sfh_fh.nfs_fh4_len = fh->nfs_fh4_len; 3635 /* 3636 * We allocate the largest possible filehandle size because it's 3637 * not that big, and it saves us from possibly having to resize the 3638 * buffer later. 3639 */ 3640 nsfh->sfh_fh.nfs_fh4_val = kmem_alloc(NFS4_FHSIZE, KM_SLEEP); 3641 bcopy(fh->nfs_fh4_val, nsfh->sfh_fh.nfs_fh4_val, fh->nfs_fh4_len); 3642 mutex_init(&nsfh->sfh_lock, NULL, MUTEX_DEFAULT, NULL); 3643 nsfh->sfh_refcnt = 1; 3644 nsfh->sfh_flags = SFH4_IN_TREE; 3645 nsfh->sfh_mi = mi; 3646 NFS4_DEBUG(nfs4_sharedfh_debug, (CE_NOTE, "sfh4_get: new object (%p)", 3647 (void *)nsfh)); 3648 3649 (void) nfs_rw_enter_sig(&mi->mi_fh_lock, RW_WRITER, 0); 3650 sfh = avl_find(&mi->mi_filehandles, key, &where); 3651 if (sfh != NULL) { 3652 mutex_enter(&sfh->sfh_lock); 3653 sfh->sfh_refcnt++; 3654 mutex_exit(&sfh->sfh_lock); 3655 nfs_rw_exit(&mi->mi_fh_lock); 3656 /* free our speculative allocs */ 3657 kmem_free(nsfh->sfh_fh.nfs_fh4_val, NFS4_FHSIZE); 3658 kmem_free(nsfh, sizeof (nfs4_sharedfh_t)); 3659 return (sfh); 3660 } 3661 3662 avl_insert(&mi->mi_filehandles, nsfh, where); 3663 nfs_rw_exit(&mi->mi_fh_lock); 3664 3665 return (nsfh); 3666 } 3667 3668 /* 3669 * Return a shared filehandle object for the given filehandle. The caller 3670 * is responsible for eventually calling sfh4_rele(). 3671 */ 3672 3673 nfs4_sharedfh_t * 3674 sfh4_get(const nfs_fh4 *fh, mntinfo4_t *mi) 3675 { 3676 nfs4_sharedfh_t *sfh; 3677 nfs4_sharedfh_t key; 3678 3679 ASSERT(fh->nfs_fh4_len <= NFS4_FHSIZE); 3680 3681 #ifdef DEBUG 3682 if (nfs4_sharedfh_debug) { 3683 nfs4_fhandle_t fhandle; 3684 3685 fhandle.fh_len = fh->nfs_fh4_len; 3686 bcopy(fh->nfs_fh4_val, fhandle.fh_buf, fhandle.fh_len); 3687 zcmn_err(mi->mi_zone->zone_id, CE_NOTE, "sfh4_get:"); 3688 nfs4_printfhandle(&fhandle); 3689 } 3690 #endif 3691 3692 /* 3693 * If there's already an object for the given filehandle, bump the 3694 * reference count and return it. Otherwise, create a new object 3695 * and add it to the AVL tree. 3696 */ 3697 3698 key.sfh_fh = *fh; 3699 3700 (void) nfs_rw_enter_sig(&mi->mi_fh_lock, RW_READER, 0); 3701 sfh = avl_find(&mi->mi_filehandles, &key, NULL); 3702 if (sfh != NULL) { 3703 mutex_enter(&sfh->sfh_lock); 3704 sfh->sfh_refcnt++; 3705 NFS4_DEBUG(nfs4_sharedfh_debug, (CE_NOTE, 3706 "sfh4_get: found existing %p, new refcnt=%d", 3707 (void *)sfh, sfh->sfh_refcnt)); 3708 mutex_exit(&sfh->sfh_lock); 3709 nfs_rw_exit(&mi->mi_fh_lock); 3710 return (sfh); 3711 } 3712 nfs_rw_exit(&mi->mi_fh_lock); 3713 3714 return (sfh4_put(fh, mi, &key)); 3715 } 3716 3717 /* 3718 * Get a reference to the given shared filehandle object. 3719 */ 3720 3721 void 3722 sfh4_hold(nfs4_sharedfh_t *sfh) 3723 { 3724 ASSERT(sfh->sfh_refcnt > 0); 3725 3726 mutex_enter(&sfh->sfh_lock); 3727 sfh->sfh_refcnt++; 3728 NFS4_DEBUG(nfs4_sharedfh_debug, 3729 (CE_NOTE, "sfh4_hold %p, new refcnt=%d", 3730 (void *)sfh, sfh->sfh_refcnt)); 3731 mutex_exit(&sfh->sfh_lock); 3732 } 3733 3734 /* 3735 * Release a reference to the given shared filehandle object and null out 3736 * the given pointer. 3737 */ 3738 3739 void 3740 sfh4_rele(nfs4_sharedfh_t **sfhpp) 3741 { 3742 mntinfo4_t *mi; 3743 nfs4_sharedfh_t *sfh = *sfhpp; 3744 3745 ASSERT(sfh->sfh_refcnt > 0); 3746 3747 mutex_enter(&sfh->sfh_lock); 3748 if (sfh->sfh_refcnt > 1) { 3749 sfh->sfh_refcnt--; 3750 NFS4_DEBUG(nfs4_sharedfh_debug, (CE_NOTE, 3751 "sfh4_rele %p, new refcnt=%d", 3752 (void *)sfh, sfh->sfh_refcnt)); 3753 mutex_exit(&sfh->sfh_lock); 3754 goto finish; 3755 } 3756 mutex_exit(&sfh->sfh_lock); 3757 3758 /* 3759 * Possibly the last reference, so get the lock for the table in 3760 * case it's time to remove the object from the table. 3761 */ 3762 mi = sfh->sfh_mi; 3763 (void) nfs_rw_enter_sig(&mi->mi_fh_lock, RW_WRITER, 0); 3764 mutex_enter(&sfh->sfh_lock); 3765 sfh->sfh_refcnt--; 3766 if (sfh->sfh_refcnt > 0) { 3767 NFS4_DEBUG(nfs4_sharedfh_debug, (CE_NOTE, 3768 "sfh4_rele %p, new refcnt=%d", 3769 (void *)sfh, sfh->sfh_refcnt)); 3770 mutex_exit(&sfh->sfh_lock); 3771 nfs_rw_exit(&mi->mi_fh_lock); 3772 goto finish; 3773 } 3774 3775 NFS4_DEBUG(nfs4_sharedfh_debug, (CE_NOTE, 3776 "sfh4_rele %p, last ref", (void *)sfh)); 3777 if (sfh->sfh_flags & SFH4_IN_TREE) { 3778 avl_remove(&mi->mi_filehandles, sfh); 3779 sfh->sfh_flags &= ~SFH4_IN_TREE; 3780 } 3781 mutex_exit(&sfh->sfh_lock); 3782 nfs_rw_exit(&mi->mi_fh_lock); 3783 mutex_destroy(&sfh->sfh_lock); 3784 kmem_free(sfh->sfh_fh.nfs_fh4_val, NFS4_FHSIZE); 3785 kmem_free(sfh, sizeof (nfs4_sharedfh_t)); 3786 3787 finish: 3788 *sfhpp = NULL; 3789 } 3790 3791 /* 3792 * Update the filehandle for the given shared filehandle object. 3793 */ 3794 3795 int nfs4_warn_dupfh = 0; /* if set, always warn about dup fhs below */ 3796 3797 void 3798 sfh4_update(nfs4_sharedfh_t *sfh, const nfs_fh4 *newfh) 3799 { 3800 mntinfo4_t *mi = sfh->sfh_mi; 3801 nfs4_sharedfh_t *dupsfh; 3802 avl_index_t where; 3803 nfs4_sharedfh_t key; 3804 3805 #ifdef DEBUG 3806 mutex_enter(&sfh->sfh_lock); 3807 ASSERT(sfh->sfh_refcnt > 0); 3808 mutex_exit(&sfh->sfh_lock); 3809 #endif 3810 ASSERT(newfh->nfs_fh4_len <= NFS4_FHSIZE); 3811 3812 /* 3813 * The basic plan is to remove the shared filehandle object from 3814 * the table, update it to have the new filehandle, then reinsert 3815 * it. 3816 */ 3817 3818 (void) nfs_rw_enter_sig(&mi->mi_fh_lock, RW_WRITER, 0); 3819 mutex_enter(&sfh->sfh_lock); 3820 if (sfh->sfh_flags & SFH4_IN_TREE) { 3821 avl_remove(&mi->mi_filehandles, sfh); 3822 sfh->sfh_flags &= ~SFH4_IN_TREE; 3823 } 3824 mutex_exit(&sfh->sfh_lock); 3825 sfh->sfh_fh.nfs_fh4_len = newfh->nfs_fh4_len; 3826 bcopy(newfh->nfs_fh4_val, sfh->sfh_fh.nfs_fh4_val, 3827 sfh->sfh_fh.nfs_fh4_len); 3828 3829 /* 3830 * XXX If there is already a shared filehandle object with the new 3831 * filehandle, we're in trouble, because the rnode code assumes 3832 * that there is only one shared filehandle object for a given 3833 * filehandle. So issue a warning (for read-write mounts only) 3834 * and don't try to re-insert the given object into the table. 3835 * Hopefully the given object will quickly go away and everyone 3836 * will use the new object. 3837 */ 3838 key.sfh_fh = *newfh; 3839 dupsfh = avl_find(&mi->mi_filehandles, &key, &where); 3840 if (dupsfh != NULL) { 3841 if (!(mi->mi_vfsp->vfs_flag & VFS_RDONLY) || nfs4_warn_dupfh) { 3842 zcmn_err(mi->mi_zone->zone_id, CE_WARN, "sfh4_update: " 3843 "duplicate filehandle detected"); 3844 sfh4_printfhandle(dupsfh); 3845 } 3846 } else { 3847 avl_insert(&mi->mi_filehandles, sfh, where); 3848 mutex_enter(&sfh->sfh_lock); 3849 sfh->sfh_flags |= SFH4_IN_TREE; 3850 mutex_exit(&sfh->sfh_lock); 3851 } 3852 nfs_rw_exit(&mi->mi_fh_lock); 3853 } 3854 3855 /* 3856 * Copy out the current filehandle for the given shared filehandle object. 3857 */ 3858 3859 void 3860 sfh4_copyval(const nfs4_sharedfh_t *sfh, nfs4_fhandle_t *fhp) 3861 { 3862 mntinfo4_t *mi = sfh->sfh_mi; 3863 3864 ASSERT(sfh->sfh_refcnt > 0); 3865 3866 (void) nfs_rw_enter_sig(&mi->mi_fh_lock, RW_READER, 0); 3867 fhp->fh_len = sfh->sfh_fh.nfs_fh4_len; 3868 ASSERT(fhp->fh_len <= NFS4_FHSIZE); 3869 bcopy(sfh->sfh_fh.nfs_fh4_val, fhp->fh_buf, fhp->fh_len); 3870 nfs_rw_exit(&mi->mi_fh_lock); 3871 } 3872 3873 /* 3874 * Print out the filehandle for the given shared filehandle object. 3875 */ 3876 3877 void 3878 sfh4_printfhandle(const nfs4_sharedfh_t *sfh) 3879 { 3880 nfs4_fhandle_t fhandle; 3881 3882 sfh4_copyval(sfh, &fhandle); 3883 nfs4_printfhandle(&fhandle); 3884 } 3885 3886 /* 3887 * Compare 2 fnames. Returns -1 if the first is "less" than the second, 0 3888 * if they're the same, +1 if the first is "greater" than the second. The 3889 * caller (or whoever's calling the AVL package) is responsible for 3890 * handling locking issues. 3891 */ 3892 3893 static int 3894 fncmp(const void *p1, const void *p2) 3895 { 3896 const nfs4_fname_t *f1 = p1; 3897 const nfs4_fname_t *f2 = p2; 3898 int res; 3899 3900 res = strcmp(f1->fn_name, f2->fn_name); 3901 /* 3902 * The AVL package wants +/-1, not arbitrary positive or negative 3903 * integers. 3904 */ 3905 if (res > 0) 3906 res = 1; 3907 else if (res < 0) 3908 res = -1; 3909 return (res); 3910 } 3911 3912 /* 3913 * Get or create an fname with the given name, as a child of the given 3914 * fname. The caller is responsible for eventually releasing the reference 3915 * (fn_rele()). parent may be NULL. 3916 */ 3917 3918 nfs4_fname_t * 3919 fn_get(nfs4_fname_t *parent, char *name) 3920 { 3921 nfs4_fname_t key; 3922 nfs4_fname_t *fnp; 3923 avl_index_t where; 3924 3925 key.fn_name = name; 3926 3927 /* 3928 * If there's already an fname registered with the given name, bump 3929 * its reference count and return it. Otherwise, create a new one 3930 * and add it to the parent's AVL tree. 3931 */ 3932 3933 if (parent != NULL) { 3934 mutex_enter(&parent->fn_lock); 3935 fnp = avl_find(&parent->fn_children, &key, &where); 3936 if (fnp != NULL) { 3937 fn_hold(fnp); 3938 mutex_exit(&parent->fn_lock); 3939 return (fnp); 3940 } 3941 } 3942 3943 fnp = kmem_alloc(sizeof (nfs4_fname_t), KM_SLEEP); 3944 mutex_init(&fnp->fn_lock, NULL, MUTEX_DEFAULT, NULL); 3945 fnp->fn_parent = parent; 3946 if (parent != NULL) 3947 fn_hold(parent); 3948 fnp->fn_len = strlen(name); 3949 ASSERT(fnp->fn_len < MAXNAMELEN); 3950 fnp->fn_name = kmem_alloc(fnp->fn_len + 1, KM_SLEEP); 3951 (void) strcpy(fnp->fn_name, name); 3952 fnp->fn_refcnt = 1; 3953 avl_create(&fnp->fn_children, fncmp, sizeof (nfs4_fname_t), 3954 offsetof(nfs4_fname_t, fn_tree)); 3955 NFS4_DEBUG(nfs4_fname_debug, (CE_NOTE, 3956 "fn_get %p:%s, a new nfs4_fname_t!", 3957 (void *)fnp, fnp->fn_name)); 3958 if (parent != NULL) { 3959 avl_insert(&parent->fn_children, fnp, where); 3960 mutex_exit(&parent->fn_lock); 3961 } 3962 3963 return (fnp); 3964 } 3965 3966 void 3967 fn_hold(nfs4_fname_t *fnp) 3968 { 3969 atomic_add_32(&fnp->fn_refcnt, 1); 3970 NFS4_DEBUG(nfs4_fname_debug, (CE_NOTE, 3971 "fn_hold %p:%s, new refcnt=%d", 3972 (void *)fnp, fnp->fn_name, fnp->fn_refcnt)); 3973 } 3974 3975 /* 3976 * Decrement the reference count of the given fname, and destroy it if its 3977 * reference count goes to zero. Nulls out the given pointer. 3978 */ 3979 3980 void 3981 fn_rele(nfs4_fname_t **fnpp) 3982 { 3983 nfs4_fname_t *parent; 3984 uint32_t newref; 3985 nfs4_fname_t *fnp; 3986 3987 recur: 3988 fnp = *fnpp; 3989 *fnpp = NULL; 3990 3991 mutex_enter(&fnp->fn_lock); 3992 parent = fnp->fn_parent; 3993 if (parent != NULL) 3994 mutex_enter(&parent->fn_lock); /* prevent new references */ 3995 newref = atomic_add_32_nv(&fnp->fn_refcnt, -1); 3996 if (newref > 0) { 3997 NFS4_DEBUG(nfs4_fname_debug, (CE_NOTE, 3998 "fn_rele %p:%s, new refcnt=%d", 3999 (void *)fnp, fnp->fn_name, fnp->fn_refcnt)); 4000 if (parent != NULL) 4001 mutex_exit(&parent->fn_lock); 4002 mutex_exit(&fnp->fn_lock); 4003 return; 4004 } 4005 4006 NFS4_DEBUG(nfs4_fname_debug, (CE_NOTE, 4007 "fn_rele %p:%s, last reference, deleting...", 4008 (void *)fnp, fnp->fn_name)); 4009 if (parent != NULL) { 4010 avl_remove(&parent->fn_children, fnp); 4011 mutex_exit(&parent->fn_lock); 4012 } 4013 kmem_free(fnp->fn_name, fnp->fn_len + 1); 4014 mutex_destroy(&fnp->fn_lock); 4015 avl_destroy(&fnp->fn_children); 4016 kmem_free(fnp, sizeof (nfs4_fname_t)); 4017 /* 4018 * Recursivly fn_rele the parent. 4019 * Use goto instead of a recursive call to avoid stack overflow. 4020 */ 4021 if (parent != NULL) { 4022 fnpp = &parent; 4023 goto recur; 4024 } 4025 } 4026 4027 /* 4028 * Returns the single component name of the given fname, in a MAXNAMELEN 4029 * string buffer, which the caller is responsible for freeing. Note that 4030 * the name may become invalid as a result of fn_move(). 4031 */ 4032 4033 char * 4034 fn_name(nfs4_fname_t *fnp) 4035 { 4036 char *name; 4037 4038 ASSERT(fnp->fn_len < MAXNAMELEN); 4039 name = kmem_alloc(MAXNAMELEN, KM_SLEEP); 4040 mutex_enter(&fnp->fn_lock); 4041 (void) strcpy(name, fnp->fn_name); 4042 mutex_exit(&fnp->fn_lock); 4043 4044 return (name); 4045 } 4046 4047 4048 /* 4049 * fn_path_realloc 4050 * 4051 * This function, used only by fn_path, constructs 4052 * a new string which looks like "prepend" + "/" + "current". 4053 * by allocating a new string and freeing the old one. 4054 */ 4055 static void 4056 fn_path_realloc(char **curses, char *prepend) 4057 { 4058 int len, curlen = 0; 4059 char *news; 4060 4061 if (*curses == NULL) { 4062 /* 4063 * Prime the pump, allocate just the 4064 * space for prepend and return that. 4065 */ 4066 len = strlen(prepend) + 1; 4067 news = kmem_alloc(len, KM_SLEEP); 4068 (void) strncpy(news, prepend, len); 4069 } else { 4070 /* 4071 * Allocate the space for a new string 4072 * +1 +1 is for the "/" and the NULL 4073 * byte at the end of it all. 4074 */ 4075 curlen = strlen(*curses); 4076 len = curlen + strlen(prepend) + 1 + 1; 4077 news = kmem_alloc(len, KM_SLEEP); 4078 (void) strncpy(news, prepend, len); 4079 (void) strcat(news, "/"); 4080 (void) strcat(news, *curses); 4081 kmem_free(*curses, curlen + 1); 4082 } 4083 *curses = news; 4084 } 4085 4086 /* 4087 * Returns the path name (starting from the fs root) for the given fname. 4088 * The caller is responsible for freeing. Note that the path may be or 4089 * become invalid as a result of fn_move(). 4090 */ 4091 4092 char * 4093 fn_path(nfs4_fname_t *fnp) 4094 { 4095 char *path; 4096 nfs4_fname_t *nextfnp; 4097 4098 if (fnp == NULL) 4099 return (NULL); 4100 4101 path = NULL; 4102 4103 /* walk up the tree constructing the pathname. */ 4104 4105 fn_hold(fnp); /* adjust for later rele */ 4106 do { 4107 mutex_enter(&fnp->fn_lock); 4108 /* 4109 * Add fn_name in front of the current path 4110 */ 4111 fn_path_realloc(&path, fnp->fn_name); 4112 nextfnp = fnp->fn_parent; 4113 if (nextfnp != NULL) 4114 fn_hold(nextfnp); 4115 mutex_exit(&fnp->fn_lock); 4116 fn_rele(&fnp); 4117 fnp = nextfnp; 4118 } while (fnp != NULL); 4119 4120 return (path); 4121 } 4122 4123 /* 4124 * Return a reference to the parent of the given fname, which the caller is 4125 * responsible for eventually releasing. 4126 */ 4127 4128 nfs4_fname_t * 4129 fn_parent(nfs4_fname_t *fnp) 4130 { 4131 nfs4_fname_t *parent; 4132 4133 mutex_enter(&fnp->fn_lock); 4134 parent = fnp->fn_parent; 4135 if (parent != NULL) 4136 fn_hold(parent); 4137 mutex_exit(&fnp->fn_lock); 4138 4139 return (parent); 4140 } 4141 4142 /* 4143 * Update fnp so that its parent is newparent and its name is newname. 4144 */ 4145 4146 void 4147 fn_move(nfs4_fname_t *fnp, nfs4_fname_t *newparent, char *newname) 4148 { 4149 nfs4_fname_t *parent, *tmpfnp; 4150 ssize_t newlen; 4151 nfs4_fname_t key; 4152 avl_index_t where; 4153 4154 /* 4155 * This assert exists to catch the client trying to rename 4156 * a dir to be a child of itself. This happened at a recent 4157 * bakeoff against a 3rd party (broken) server which allowed 4158 * the rename to succeed. If it trips it means that: 4159 * a) the code in nfs4rename that detects this case is broken 4160 * b) the server is broken (since it allowed the bogus rename) 4161 * 4162 * For non-DEBUG kernels, prepare for a recursive mutex_enter 4163 * panic below from: mutex_enter(&newparent->fn_lock); 4164 */ 4165 ASSERT(fnp != newparent); 4166 4167 /* 4168 * Remove fnp from its current parent, change its name, then add it 4169 * to newparent. 4170 */ 4171 mutex_enter(&fnp->fn_lock); 4172 parent = fnp->fn_parent; 4173 mutex_enter(&parent->fn_lock); 4174 avl_remove(&parent->fn_children, fnp); 4175 mutex_exit(&parent->fn_lock); 4176 fn_rele(&fnp->fn_parent); 4177 4178 newlen = strlen(newname); 4179 if (newlen != fnp->fn_len) { 4180 ASSERT(newlen < MAXNAMELEN); 4181 kmem_free(fnp->fn_name, fnp->fn_len + 1); 4182 fnp->fn_name = kmem_alloc(newlen + 1, KM_SLEEP); 4183 fnp->fn_len = newlen; 4184 } 4185 (void) strcpy(fnp->fn_name, newname); 4186 4187 again: 4188 mutex_enter(&newparent->fn_lock); 4189 key.fn_name = fnp->fn_name; 4190 tmpfnp = avl_find(&newparent->fn_children, &key, &where); 4191 if (tmpfnp != NULL) { 4192 /* 4193 * This could be due to a file that was unlinked while 4194 * open, or perhaps the rnode is in the free list. Remove 4195 * it from newparent and let it go away on its own. The 4196 * contorted code is to deal with lock order issues and 4197 * race conditions. 4198 */ 4199 fn_hold(tmpfnp); 4200 mutex_exit(&newparent->fn_lock); 4201 mutex_enter(&tmpfnp->fn_lock); 4202 if (tmpfnp->fn_parent == newparent) { 4203 mutex_enter(&newparent->fn_lock); 4204 avl_remove(&newparent->fn_children, tmpfnp); 4205 mutex_exit(&newparent->fn_lock); 4206 fn_rele(&tmpfnp->fn_parent); 4207 } 4208 mutex_exit(&tmpfnp->fn_lock); 4209 fn_rele(&tmpfnp); 4210 goto again; 4211 } 4212 fnp->fn_parent = newparent; 4213 fn_hold(newparent); 4214 avl_insert(&newparent->fn_children, fnp, where); 4215 mutex_exit(&newparent->fn_lock); 4216 mutex_exit(&fnp->fn_lock); 4217 } 4218 4219 #ifdef DEBUG 4220 /* 4221 * Return non-zero if the type information makes sense for the given vnode. 4222 * Otherwise panic. 4223 */ 4224 int 4225 nfs4_consistent_type(vnode_t *vp) 4226 { 4227 rnode4_t *rp = VTOR4(vp); 4228 4229 if (nfs4_vtype_debug && vp->v_type != VNON && 4230 rp->r_attr.va_type != VNON && vp->v_type != rp->r_attr.va_type) { 4231 cmn_err(CE_PANIC, "vnode %p type mismatch; v_type=%d, " 4232 "rnode attr type=%d", (void *)vp, vp->v_type, 4233 rp->r_attr.va_type); 4234 } 4235 4236 return (1); 4237 } 4238 #endif /* DEBUG */ 4239