1 /* 2 * Copyright (c) 2008 Isilon Inc http://www.isilon.com/ 3 * Authors: Doug Rabson <dfr@rabson.org> 4 * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 /* 29 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 30 * Copyright (c) 2012 by Delphix. All rights reserved. 31 * Copyright (c) 2014, Joyent, Inc. All rights reserved. 32 */ 33 34 /* 35 * Client-side support for (NFS) VOP_FRLOCK, VOP_SHRLOCK. 36 * (called via klmops.c: lm_frlock, lm4_frlock) 37 * 38 * Source code derived from FreeBSD nlm_advlock.c 39 */ 40 41 #include <sys/param.h> 42 #include <sys/fcntl.h> 43 #include <sys/lock.h> 44 #include <sys/flock.h> 45 #include <sys/mount.h> 46 #include <sys/mutex.h> 47 #include <sys/proc.h> 48 #include <sys/share.h> 49 #include <sys/syslog.h> 50 #include <sys/systm.h> 51 #include <sys/unistd.h> 52 #include <sys/vnode.h> 53 #include <sys/queue.h> 54 #include <sys/sdt.h> 55 #include <netinet/in.h> 56 57 #include <fs/fs_subr.h> 58 #include <rpcsvc/nlm_prot.h> 59 60 #include <nfs/nfs.h> 61 #include <nfs/nfs_clnt.h> 62 #include <nfs/export.h> 63 #include <nfs/rnode.h> 64 #include <nfs/lm.h> 65 66 #include "nlm_impl.h" 67 68 /* Extra flags for nlm_call_lock() - xflags */ 69 #define NLM_X_RECLAIM 1 70 #define NLM_X_BLOCKING 2 71 72 /* 73 * Max. number of retries nlm_call_cancel() does 74 * when NLM server is in grace period or doesn't 75 * respond correctly. 76 */ 77 #define NLM_CANCEL_NRETRS 5 78 79 /* 80 * Determines wether given lock "flp" is safe. 81 * The lock is considered to be safe when it 82 * acquires the whole file (i.e. its start 83 * and len are zeroes). 84 */ 85 #define NLM_FLOCK_IS_SAFE(flp) \ 86 ((flp)->l_start == 0 && (flp)->l_len == 0) 87 88 static volatile uint32_t nlm_xid = 1; 89 90 static int nlm_init_fh_by_vp(vnode_t *, struct netobj *, rpcvers_t *); 91 static int nlm_map_status(nlm4_stats); 92 static int nlm_map_clnt_stat(enum clnt_stat); 93 static void nlm_send_siglost(pid_t); 94 95 static int nlm_frlock_getlk(struct nlm_host *, vnode_t *, 96 struct flock64 *, int, u_offset_t, struct netobj *, int); 97 98 static int nlm_frlock_setlk(struct nlm_host *, vnode_t *, 99 struct flock64 *, int, u_offset_t, struct netobj *, 100 struct flk_callback *, int, bool_t); 101 102 static int nlm_reclaim_lock(struct nlm_host *, vnode_t *, 103 struct flock64 *, int32_t); 104 105 static void nlm_init_lock(struct nlm4_lock *, 106 const struct flock64 *, struct netobj *, 107 struct nlm_owner_handle *); 108 109 static int nlm_call_lock(vnode_t *, struct flock64 *, 110 struct nlm_host *, struct netobj *, 111 struct flk_callback *, int, int); 112 static int nlm_call_unlock(struct flock64 *, struct nlm_host *, 113 struct netobj *, int); 114 static int nlm_call_test(struct flock64 *, struct nlm_host *, 115 struct netobj *, int); 116 static int nlm_call_cancel(struct nlm4_lockargs *, 117 struct nlm_host *, int); 118 119 static int nlm_local_getlk(vnode_t *, struct flock64 *, int); 120 static int nlm_local_setlk(vnode_t *, struct flock64 *, int); 121 static void nlm_local_cancelk(vnode_t *, struct flock64 *); 122 123 static void nlm_init_share(struct nlm4_share *, 124 const struct shrlock *, struct netobj *); 125 126 static int nlm_call_share(struct shrlock *, struct nlm_host *, 127 struct netobj *, int, int); 128 static int nlm_call_unshare(struct shrlock *, struct nlm_host *, 129 struct netobj *, int); 130 static int nlm_reclaim_share(struct nlm_host *, vnode_t *, 131 struct shrlock *, uint32_t); 132 static int nlm_local_shrlock(vnode_t *, struct shrlock *, int, int); 133 static void nlm_local_shrcancel(vnode_t *, struct shrlock *); 134 135 /* 136 * Reclaim locks/shares acquired by the client side 137 * on the given server represented by hostp. 138 * The function is called from a dedicated thread 139 * when server reports us that it's entered grace 140 * period. 141 */ 142 void 143 nlm_reclaim_client(struct nlm_globals *g, struct nlm_host *hostp) 144 { 145 int32_t state; 146 int error, sysid; 147 struct locklist *llp_head, *llp; 148 struct nlm_shres *nsp_head, *nsp; 149 bool_t restart; 150 151 sysid = hostp->nh_sysid | LM_SYSID_CLIENT; 152 do { 153 error = 0; 154 restart = FALSE; 155 state = nlm_host_get_state(hostp); 156 157 DTRACE_PROBE3(reclaim__iter, struct nlm_globals *, g, 158 struct nlm_host *, hostp, int, state); 159 160 /* 161 * We cancel all sleeping locks that were 162 * done by the host, because we don't allow 163 * reclamation of sleeping locks. The reason 164 * we do this is that allowing of sleeping locks 165 * reclamation can potentially break locks recovery 166 * order. 167 * 168 * Imagine that we have two client machines A and B 169 * and an NLM server machine. A adds a non sleeping 170 * lock to the file F and aquires this file. Machine 171 * B in its turn adds sleeping lock to the file 172 * F and blocks because F is already aquired by 173 * the machine A. Then server crashes and after the 174 * reboot it notifies its clients about the crash. 175 * If we would allow sleeping locks reclamation, 176 * there would be possible that machine B recovers 177 * its lock faster than machine A (by some reason). 178 * So that B aquires the file F after server crash and 179 * machine A (that by some reason recovers slower) fails 180 * to recover its non sleeping lock. Thus the original 181 * locks order becames broken. 182 */ 183 nlm_host_cancel_slocks(g, hostp); 184 185 /* 186 * Try to reclaim all active locks we have 187 */ 188 llp_head = llp = flk_get_active_locks(sysid, NOPID); 189 while (llp != NULL) { 190 error = nlm_reclaim_lock(hostp, llp->ll_vp, 191 &llp->ll_flock, state); 192 193 if (error == 0) { 194 llp = llp->ll_next; 195 continue; 196 } else if (error == ERESTART) { 197 restart = TRUE; 198 break; 199 } else { 200 /* 201 * Critical error occurred, the lock 202 * can not be recovered, just take it away. 203 */ 204 nlm_local_cancelk(llp->ll_vp, &llp->ll_flock); 205 } 206 207 llp = llp->ll_next; 208 } 209 210 flk_free_locklist(llp_head); 211 if (restart) { 212 /* 213 * Lock reclamation fucntion reported us that 214 * the server state was changed (again), so 215 * try to repeat the whole reclamation process. 216 */ 217 continue; 218 } 219 220 nsp_head = nsp = nlm_get_active_shres(hostp); 221 while (nsp != NULL) { 222 error = nlm_reclaim_share(hostp, nsp->ns_vp, 223 nsp->ns_shr, state); 224 225 if (error == 0) { 226 nsp = nsp->ns_next; 227 continue; 228 } else if (error == ERESTART) { 229 break; 230 } else { 231 /* Failed to reclaim share */ 232 nlm_shres_untrack(hostp, nsp->ns_vp, 233 nsp->ns_shr); 234 nlm_local_shrcancel(nsp->ns_vp, 235 nsp->ns_shr); 236 } 237 238 nsp = nsp->ns_next; 239 } 240 241 nlm_free_shrlist(nsp_head); 242 } while (state != nlm_host_get_state(hostp)); 243 } 244 245 /* 246 * nlm_frlock -- 247 * NFS advisory byte-range locks. 248 * Called in klmops.c 249 * 250 * Note that the local locking code (os/flock.c) is used to 251 * keep track of remote locks granted by some server, so we 252 * can reclaim those locks after a server restarts. We can 253 * also sometimes use this as a cache of lock information. 254 * 255 * Was: nlm_advlock() 256 */ 257 /* ARGSUSED */ 258 int 259 nlm_frlock(struct vnode *vp, int cmd, struct flock64 *flkp, 260 int flags, u_offset_t offset, struct cred *crp, 261 struct netobj *fhp, struct flk_callback *flcb, int vers) 262 { 263 mntinfo_t *mi; 264 servinfo_t *sv; 265 const char *netid; 266 struct nlm_host *hostp; 267 int error; 268 struct nlm_globals *g; 269 270 mi = VTOMI(vp); 271 sv = mi->mi_curr_serv; 272 273 netid = nlm_knc_to_netid(sv->sv_knconf); 274 if (netid == NULL) { 275 NLM_ERR("nlm_frlock: unknown NFS netid"); 276 return (ENOSYS); 277 } 278 279 g = zone_getspecific(nlm_zone_key, curzone); 280 hostp = nlm_host_findcreate(g, sv->sv_hostname, netid, &sv->sv_addr); 281 if (hostp == NULL) 282 return (ENOSYS); 283 284 /* 285 * Purge cached attributes in order to make sure that 286 * future calls of convoff()/VOP_GETATTR() will get the 287 * latest data. 288 */ 289 if (flkp->l_whence == SEEK_END) 290 PURGE_ATTRCACHE(vp); 291 292 /* Now flk0 is the zero-based lock request. */ 293 switch (cmd) { 294 case F_GETLK: 295 error = nlm_frlock_getlk(hostp, vp, flkp, flags, 296 offset, fhp, vers); 297 break; 298 299 case F_SETLK: 300 case F_SETLKW: 301 error = nlm_frlock_setlk(hostp, vp, flkp, flags, 302 offset, fhp, flcb, vers, (cmd == F_SETLKW)); 303 if (error == 0) 304 nlm_host_monitor(g, hostp, 0); 305 break; 306 307 default: 308 error = EINVAL; 309 break; 310 } 311 312 nlm_host_release(g, hostp); 313 return (error); 314 } 315 316 static int 317 nlm_frlock_getlk(struct nlm_host *hostp, vnode_t *vp, 318 struct flock64 *flkp, int flags, u_offset_t offset, 319 struct netobj *fhp, int vers) 320 { 321 struct flock64 flk0; 322 int error; 323 324 /* 325 * Check local (cached) locks first. 326 * If we find one, no need for RPC. 327 */ 328 flk0 = *flkp; 329 flk0.l_pid = curproc->p_pid; 330 error = nlm_local_getlk(vp, &flk0, flags); 331 if (error != 0) 332 return (error); 333 if (flk0.l_type != F_UNLCK) { 334 *flkp = flk0; 335 return (0); 336 } 337 338 /* Not found locally. Try remote. */ 339 flk0 = *flkp; 340 flk0.l_pid = curproc->p_pid; 341 error = convoff(vp, &flk0, 0, (offset_t)offset); 342 if (error != 0) 343 return (error); 344 345 error = nlm_call_test(&flk0, hostp, fhp, vers); 346 if (error != 0) 347 return (error); 348 349 if (flk0.l_type == F_UNLCK) { 350 /* 351 * Update the caller's *flkp with information 352 * on the conflicting lock (or lack thereof). 353 */ 354 flkp->l_type = F_UNLCK; 355 } else { 356 /* 357 * Found a conflicting lock. Set the 358 * caller's *flkp with the info, first 359 * converting to the caller's whence. 360 */ 361 (void) convoff(vp, &flk0, flkp->l_whence, (offset_t)offset); 362 *flkp = flk0; 363 } 364 365 return (0); 366 } 367 368 static int 369 nlm_frlock_setlk(struct nlm_host *hostp, vnode_t *vp, 370 struct flock64 *flkp, int flags, u_offset_t offset, 371 struct netobj *fhp, struct flk_callback *flcb, 372 int vers, bool_t do_block) 373 { 374 int error, xflags; 375 376 error = convoff(vp, flkp, 0, (offset_t)offset); 377 if (error != 0) 378 return (error); 379 380 /* 381 * NFS v2 clients should not request locks where any part 382 * of the lock range is beyond 0xffffffff. The NFS code 383 * checks that (see nfs_frlock, flk_check_lock_data), but 384 * as that's outside this module, let's check here too. 385 * This check ensures that we will be able to convert this 386 * lock request into 32-bit form without change, and that 387 * (more importantly) when the granted call back arrives, 388 * it's unchanged when converted back into 64-bit form. 389 * If this lock range were to change in any way during 390 * either of those conversions, the "granted" call back 391 * from the NLM server would not find our sleeping lock. 392 */ 393 if (vers < NLM4_VERS) { 394 if (flkp->l_start > MAX_UOFF32 || 395 flkp->l_start + flkp->l_len > MAX_UOFF32 + 1) 396 return (EINVAL); 397 } 398 399 /* 400 * Fill in l_sysid for the local locking calls. 401 * Also, let's not trust the caller's l_pid. 402 */ 403 flkp->l_sysid = hostp->nh_sysid | LM_SYSID_CLIENT; 404 flkp->l_pid = curproc->p_pid; 405 406 if (flkp->l_type == F_UNLCK) { 407 /* 408 * Purge local (cached) lock information first, 409 * then clear the remote lock. 410 */ 411 (void) nlm_local_setlk(vp, flkp, flags); 412 error = nlm_call_unlock(flkp, hostp, fhp, vers); 413 414 return (error); 415 } 416 417 if (!do_block) { 418 /* 419 * This is a non-blocking "set" request, 420 * so we can check locally first, and 421 * sometimes avoid an RPC call. 422 */ 423 struct flock64 flk0; 424 425 flk0 = *flkp; 426 error = nlm_local_getlk(vp, &flk0, flags); 427 if (error != 0 && flk0.l_type != F_UNLCK) { 428 /* Found a conflicting lock. */ 429 return (EAGAIN); 430 } 431 432 xflags = 0; 433 } else { 434 xflags = NLM_X_BLOCKING; 435 } 436 437 nfs_add_locking_id(vp, curproc->p_pid, RLMPL_PID, 438 (char *)&curproc->p_pid, sizeof (pid_t)); 439 440 error = nlm_call_lock(vp, flkp, hostp, fhp, flcb, vers, xflags); 441 if (error != 0) 442 return (error); 443 444 /* 445 * Save the lock locally. This should not fail, 446 * because the server is authoritative about locks 447 * and it just told us we have the lock! 448 */ 449 error = nlm_local_setlk(vp, flkp, flags); 450 if (error != 0) { 451 /* 452 * That's unexpected situation. Just ignore the error. 453 */ 454 NLM_WARN("nlm_frlock_setlk: Failed to set local lock. " 455 "[err=%d]\n", error); 456 error = 0; 457 } 458 459 return (error); 460 } 461 462 /* 463 * Cancel all client side remote locks/shares on the 464 * given host. Report to the processes that own 465 * cancelled locks that they are removed by force 466 * by sending SIGLOST. 467 */ 468 void 469 nlm_client_cancel_all(struct nlm_globals *g, struct nlm_host *hostp) 470 { 471 struct locklist *llp_head, *llp; 472 struct nlm_shres *nsp_head, *nsp; 473 struct netobj lm_fh; 474 rpcvers_t vers; 475 int error, sysid; 476 477 sysid = hostp->nh_sysid | LM_SYSID_CLIENT; 478 nlm_host_cancel_slocks(g, hostp); 479 480 /* 481 * Destroy all active locks 482 */ 483 llp_head = llp = flk_get_active_locks(sysid, NOPID); 484 while (llp != NULL) { 485 llp->ll_flock.l_type = F_UNLCK; 486 487 error = nlm_init_fh_by_vp(llp->ll_vp, &lm_fh, &vers); 488 if (error == 0) 489 (void) nlm_call_unlock(&llp->ll_flock, hostp, 490 &lm_fh, vers); 491 492 nlm_local_cancelk(llp->ll_vp, &llp->ll_flock); 493 llp = llp->ll_next; 494 } 495 496 flk_free_locklist(llp_head); 497 498 /* 499 * Destroy all active share reservations 500 */ 501 nsp_head = nsp = nlm_get_active_shres(hostp); 502 while (nsp != NULL) { 503 error = nlm_init_fh_by_vp(nsp->ns_vp, &lm_fh, &vers); 504 if (error == 0) 505 (void) nlm_call_unshare(nsp->ns_shr, hostp, 506 &lm_fh, vers); 507 508 nlm_local_shrcancel(nsp->ns_vp, nsp->ns_shr); 509 nlm_shres_untrack(hostp, nsp->ns_vp, nsp->ns_shr); 510 nsp = nsp->ns_next; 511 } 512 513 nlm_free_shrlist(nsp_head); 514 } 515 516 /* 517 * The function determines whether the lock "fl" can 518 * be safely applied to the file vnode "vp" corresponds to. 519 * The lock can be "safely" applied if all the conditions 520 * above are held: 521 * - It's not a mandatory lock 522 * - The vnode wasn't mapped by anyone 523 * - The vnode was mapped, but it hasn't any locks on it. 524 * - The vnode was mapped and all locks it has occupies 525 * the whole file. 526 */ 527 int 528 nlm_safelock(vnode_t *vp, const struct flock64 *fl, cred_t *cr) 529 { 530 rnode_t *rp = VTOR(vp); 531 struct vattr va; 532 int err; 533 534 if ((rp->r_mapcnt > 0) && (fl->l_start != 0 || fl->l_len != 0)) 535 return (0); 536 537 va.va_mask = AT_MODE; 538 err = VOP_GETATTR(vp, &va, 0, cr, NULL); 539 if (err != 0) 540 return (0); 541 542 /* NLM4 doesn't allow mandatory file locking */ 543 if (MANDLOCK(vp, va.va_mode)) 544 return (0); 545 546 return (1); 547 } 548 549 /* 550 * The function determines whether it's safe to map 551 * a file correspoding to vnode vp. 552 * The mapping is considered to be "safe" if file 553 * either has no any locks on it or all locks it 554 * has occupy the whole file. 555 */ 556 int 557 nlm_safemap(const vnode_t *vp) 558 { 559 struct locklist *llp, *llp_next; 560 struct nlm_slock *nslp; 561 struct nlm_globals *g; 562 int safe = 1; 563 564 /* Check active locks at first */ 565 llp = flk_active_locks_for_vp(vp); 566 while (llp != NULL) { 567 if ((llp->ll_vp == vp) && 568 !NLM_FLOCK_IS_SAFE(&llp->ll_flock)) 569 safe = 0; 570 571 llp_next = llp->ll_next; 572 VN_RELE(llp->ll_vp); 573 kmem_free(llp, sizeof (*llp)); 574 llp = llp_next; 575 } 576 if (!safe) 577 return (safe); 578 579 /* Then check sleeping locks if any */ 580 g = zone_getspecific(nlm_zone_key, curzone); 581 mutex_enter(&g->lock); 582 TAILQ_FOREACH(nslp, &g->nlm_slocks, nsl_link) { 583 if (nslp->nsl_state == NLM_SL_BLOCKED && 584 nslp->nsl_vp == vp && 585 (nslp->nsl_lock.l_offset != 0 || 586 nslp->nsl_lock.l_len != 0)) { 587 safe = 0; 588 break; 589 } 590 } 591 592 mutex_exit(&g->lock); 593 return (safe); 594 } 595 596 int 597 nlm_has_sleep(const vnode_t *vp) 598 { 599 struct nlm_globals *g; 600 struct nlm_slock *nslp; 601 int has_slocks = FALSE; 602 603 g = zone_getspecific(nlm_zone_key, curzone); 604 mutex_enter(&g->lock); 605 TAILQ_FOREACH(nslp, &g->nlm_slocks, nsl_link) { 606 if (nslp->nsl_state == NLM_SL_BLOCKED && 607 nslp->nsl_vp == vp) { 608 has_slocks = TRUE; 609 break; 610 } 611 } 612 613 mutex_exit(&g->lock); 614 return (has_slocks); 615 } 616 617 void 618 nlm_register_lock_locally(struct vnode *vp, struct nlm_host *hostp, 619 struct flock64 *flk, int flags, u_offset_t offset) 620 { 621 int sysid = 0; 622 623 if (hostp == NULL) { 624 mntinfo_t *mi; 625 servinfo_t *sv; 626 const char *netid; 627 struct nlm_globals *g; 628 629 mi = VTOMI(vp); 630 sv = mi->mi_curr_serv; 631 netid = nlm_knc_to_netid(sv->sv_knconf); 632 633 if (netid != NULL) { 634 g = zone_getspecific(nlm_zone_key, curzone); 635 hostp = nlm_host_findcreate(g, sv->sv_hostname, 636 netid, &sv->sv_addr); 637 } 638 } 639 640 if (hostp != NULL) { 641 sysid = hostp->nh_sysid | LM_SYSID_CLIENT; 642 } 643 644 flk->l_sysid = sysid; 645 (void) convoff(vp, flk, 0, (offset_t)offset); 646 (void) nlm_local_setlk(vp, flk, flags); 647 } 648 649 650 /* 651 * The BSD code had functions here to "reclaim" (destroy) 652 * remote locks when a vnode is being forcibly destroyed. 653 * We just keep vnodes around until statd tells us the 654 * client has gone away. 655 */ 656 657 static int 658 nlm_reclaim_lock(struct nlm_host *hostp, vnode_t *vp, 659 struct flock64 *flp, int32_t orig_state) 660 { 661 struct netobj lm_fh; 662 int error, state; 663 rpcvers_t vers; 664 665 /* 666 * If the remote NSM state changes during recovery, the host 667 * must have rebooted a second time. In that case, we must 668 * restart the recovery. 669 */ 670 state = nlm_host_get_state(hostp); 671 if (state != orig_state) 672 return (ERESTART); 673 674 error = nlm_init_fh_by_vp(vp, &lm_fh, &vers); 675 if (error != 0) 676 return (error); 677 678 return (nlm_call_lock(vp, flp, hostp, &lm_fh, 679 NULL, vers, NLM_X_RECLAIM)); 680 } 681 682 /* 683 * Get local lock information for some NFS server. 684 * 685 * This gets (checks for) a local conflicting lock. 686 * Note: Modifies passed flock, if a conflict is found, 687 * but the caller expects that. 688 */ 689 static int 690 nlm_local_getlk(vnode_t *vp, struct flock64 *fl, int flags) 691 { 692 VERIFY(fl->l_whence == SEEK_SET); 693 return (reclock(vp, fl, 0, flags, 0, NULL)); 694 } 695 696 /* 697 * Set local lock information for some NFS server. 698 * 699 * Called after a lock request (set or clear) succeeded. We record the 700 * details in the local lock manager. Note that since the remote 701 * server has granted the lock, we can be sure that it doesn't 702 * conflict with any other locks we have in the local lock manager. 703 * 704 * Since it is possible that host may also make NLM client requests to 705 * our NLM server, we use a different sysid value to record our own 706 * client locks. 707 * 708 * Note that since it is possible for us to receive replies from the 709 * server in a different order than the locks were granted (e.g. if 710 * many local threads are contending for the same lock), we must use a 711 * blocking operation when registering with the local lock manager. 712 * We expect that any actual wait will be rare and short hence we 713 * ignore signals for this. 714 */ 715 static int 716 nlm_local_setlk(vnode_t *vp, struct flock64 *fl, int flags) 717 { 718 VERIFY(fl->l_whence == SEEK_SET); 719 return (reclock(vp, fl, SETFLCK, flags, 0, NULL)); 720 } 721 722 /* 723 * Cancel local lock and send send SIGLOST signal 724 * to the lock owner. 725 * 726 * NOTE: modifies flp 727 */ 728 static void 729 nlm_local_cancelk(vnode_t *vp, struct flock64 *flp) 730 { 731 flp->l_type = F_UNLCK; 732 (void) nlm_local_setlk(vp, flp, FREAD | FWRITE); 733 nlm_send_siglost(flp->l_pid); 734 } 735 736 /* 737 * Do NLM_LOCK call. 738 * Was: nlm_setlock() 739 * 740 * NOTE: nlm_call_lock() function should care about locking/unlocking 741 * of rnode->r_lkserlock which should be released before nlm_call_lock() 742 * sleeps on waiting lock and acquired when it wakes up. 743 */ 744 static int 745 nlm_call_lock(vnode_t *vp, struct flock64 *flp, 746 struct nlm_host *hostp, struct netobj *fhp, 747 struct flk_callback *flcb, int vers, int xflags) 748 { 749 struct nlm4_lockargs args; 750 struct nlm_owner_handle oh; 751 struct nlm_globals *g; 752 rnode_t *rnp = VTOR(vp); 753 struct nlm_slock *nslp = NULL; 754 uint32_t xid; 755 int error = 0; 756 757 bzero(&args, sizeof (args)); 758 g = zone_getspecific(nlm_zone_key, curzone); 759 nlm_init_lock(&args.alock, flp, fhp, &oh); 760 761 args.exclusive = (flp->l_type == F_WRLCK); 762 args.reclaim = xflags & NLM_X_RECLAIM; 763 args.state = g->nsm_state; 764 args.cookie.n_len = sizeof (xid); 765 args.cookie.n_bytes = (char *)&xid; 766 767 oh.oh_sysid = hostp->nh_sysid; 768 xid = atomic_inc_32_nv(&nlm_xid); 769 770 if (xflags & NLM_X_BLOCKING) { 771 args.block = TRUE; 772 nslp = nlm_slock_register(g, hostp, &args.alock, vp); 773 } 774 775 for (;;) { 776 nlm_rpc_t *rpcp; 777 enum clnt_stat stat; 778 struct nlm4_res res; 779 enum nlm4_stats nlm_err; 780 781 error = nlm_host_get_rpc(hostp, vers, &rpcp); 782 if (error != 0) { 783 error = ENOLCK; 784 goto out; 785 } 786 787 bzero(&res, sizeof (res)); 788 stat = nlm_lock_rpc(&args, &res, rpcp->nr_handle, vers); 789 nlm_host_rele_rpc(hostp, rpcp); 790 791 error = nlm_map_clnt_stat(stat); 792 if (error != 0) { 793 if (error == EAGAIN) 794 continue; 795 796 goto out; 797 } 798 799 DTRACE_PROBE1(lock__res, enum nlm4_stats, res.stat.stat); 800 nlm_err = res.stat.stat; 801 xdr_free((xdrproc_t)xdr_nlm4_res, (void *)&res); 802 if (nlm_err == nlm4_denied_grace_period) { 803 if (args.reclaim) { 804 error = ENOLCK; 805 goto out; 806 } 807 808 error = nlm_host_wait_grace(hostp); 809 if (error != 0) 810 goto out; 811 812 continue; 813 } 814 815 switch (nlm_err) { 816 case nlm4_granted: 817 case nlm4_blocked: 818 error = 0; 819 break; 820 821 case nlm4_denied: 822 if (nslp != NULL) { 823 NLM_WARN("nlm_call_lock: got nlm4_denied for " 824 "blocking lock\n"); 825 } 826 827 error = EAGAIN; 828 break; 829 830 default: 831 error = nlm_map_status(nlm_err); 832 } 833 834 /* 835 * If we deal with either non-blocking lock or 836 * with a blocking locks that wasn't blocked on 837 * the server side (by some reason), our work 838 * is finished. 839 */ 840 if (nslp == NULL || 841 nlm_err != nlm4_blocked || 842 error != 0) 843 goto out; 844 845 /* 846 * Before releasing the r_lkserlock of rnode, we should 847 * check whether the new lock is "safe". If it's not 848 * safe, disable caching for the given vnode. That is done 849 * for sleeping locks only that are waiting for a GRANT reply 850 * from the NLM server. 851 * 852 * NOTE: the vnode cache can be enabled back later if an 853 * unsafe lock will be merged with existent locks so that 854 * it will become safe. This condition is checked in the 855 * NFSv3 code (see nfs_lockcompletion). 856 */ 857 if (!NLM_FLOCK_IS_SAFE(flp)) { 858 mutex_enter(&vp->v_lock); 859 vp->v_flag &= ~VNOCACHE; 860 mutex_exit(&vp->v_lock); 861 } 862 863 /* 864 * The server should call us back with a 865 * granted message when the lock succeeds. 866 * In order to deal with broken servers, 867 * lost granted messages, or server reboots, 868 * we will also re-try every few seconds. 869 * 870 * Note: We're supposed to call these 871 * flk_invoke_callbacks when blocking. 872 * Take care on rnode->r_lkserlock, we should 873 * release it before going to sleep. 874 */ 875 (void) flk_invoke_callbacks(flcb, FLK_BEFORE_SLEEP); 876 nfs_rw_exit(&rnp->r_lkserlock); 877 878 error = nlm_slock_wait(g, nslp, g->retrans_tmo); 879 880 /* 881 * NFS expects that we return with rnode->r_lkserlock 882 * locked on write, lock it back. 883 * 884 * NOTE: nfs_rw_enter_sig() can be either interruptible 885 * or not. It depends on options of NFS mount. Here 886 * we're _always_ uninterruptible (independently of mount 887 * options), because nfs_frlock/nfs3_frlock expects that 888 * we return with rnode->r_lkserlock acquired. So we don't 889 * want our lock attempt to be interrupted by a signal. 890 */ 891 (void) nfs_rw_enter_sig(&rnp->r_lkserlock, RW_WRITER, 0); 892 (void) flk_invoke_callbacks(flcb, FLK_AFTER_SLEEP); 893 894 if (error == 0) { 895 break; 896 } else if (error == EINTR) { 897 /* 898 * We need to call the server to cancel our 899 * lock request. 900 */ 901 DTRACE_PROBE1(cancel__lock, int, error); 902 (void) nlm_call_cancel(&args, hostp, vers); 903 break; 904 } else { 905 /* 906 * Timeout happened, resend the lock request to 907 * the server. Well, we're a bit paranoid here, 908 * but keep in mind previous request could lost 909 * (especially with conectionless transport). 910 */ 911 912 ASSERT(error == ETIMEDOUT); 913 continue; 914 } 915 } 916 917 /* 918 * We could disable the vnode cache for the given _sleeping_ 919 * (codition: nslp != NULL) lock if it was unsafe. Normally, 920 * nfs_lockcompletion() function can enable the vnode cache 921 * back if the lock becomes safe after activativation. But it 922 * will not happen if any error occurs on the locking path. 923 * 924 * Here we enable the vnode cache back if the error occurred 925 * and if there aren't any unsafe locks on the given vnode. 926 * Note that if error happened, sleeping lock was derigistered. 927 */ 928 if (error != 0 && nslp != NULL && nlm_safemap(vp)) { 929 mutex_enter(&vp->v_lock); 930 vp->v_flag |= VNOCACHE; 931 mutex_exit(&vp->v_lock); 932 } 933 934 out: 935 if (nslp != NULL) 936 nlm_slock_unregister(g, nslp); 937 938 return (error); 939 } 940 941 /* 942 * Do NLM_CANCEL call. 943 * Helper for nlm_call_lock() error recovery. 944 */ 945 static int 946 nlm_call_cancel(struct nlm4_lockargs *largs, 947 struct nlm_host *hostp, int vers) 948 { 949 nlm4_cancargs cargs; 950 uint32_t xid; 951 int error, retries; 952 953 bzero(&cargs, sizeof (cargs)); 954 955 xid = atomic_inc_32_nv(&nlm_xid); 956 cargs.cookie.n_len = sizeof (xid); 957 cargs.cookie.n_bytes = (char *)&xid; 958 cargs.block = largs->block; 959 cargs.exclusive = largs->exclusive; 960 cargs.alock = largs->alock; 961 962 /* 963 * Unlike all other nlm_call_* functions, nlm_call_cancel 964 * doesn't spin forever until it gets reasonable response 965 * from NLM server. It makes limited number of retries and 966 * if server doesn't send a reasonable reply, it returns an 967 * error. It behaves like that because it's called from nlm_call_lock 968 * with blocked signals and thus it can not be interrupted from 969 * user space. 970 */ 971 for (retries = 0; retries < NLM_CANCEL_NRETRS; retries++) { 972 nlm_rpc_t *rpcp; 973 enum clnt_stat stat; 974 struct nlm4_res res; 975 976 error = nlm_host_get_rpc(hostp, vers, &rpcp); 977 if (error != 0) 978 return (ENOLCK); 979 980 bzero(&res, sizeof (res)); 981 stat = nlm_cancel_rpc(&cargs, &res, rpcp->nr_handle, vers); 982 nlm_host_rele_rpc(hostp, rpcp); 983 984 DTRACE_PROBE1(cancel__rloop_end, enum clnt_stat, stat); 985 error = nlm_map_clnt_stat(stat); 986 if (error != 0) { 987 if (error == EAGAIN) 988 continue; 989 990 return (error); 991 } 992 993 DTRACE_PROBE1(cancel__res, enum nlm4_stats, res.stat.stat); 994 switch (res.stat.stat) { 995 /* 996 * There was nothing to cancel. We are going to go ahead 997 * and assume we got the lock. 998 */ 999 case nlm_denied: 1000 /* 1001 * The server has recently rebooted. Treat this as a 1002 * successful cancellation. 1003 */ 1004 case nlm4_denied_grace_period: 1005 /* 1006 * We managed to cancel. 1007 */ 1008 case nlm4_granted: 1009 error = 0; 1010 break; 1011 1012 default: 1013 /* 1014 * Broken server implementation. Can't really do 1015 * anything here. 1016 */ 1017 error = EIO; 1018 break; 1019 } 1020 1021 xdr_free((xdrproc_t)xdr_nlm4_res, (void *)&res); 1022 break; 1023 } 1024 1025 return (error); 1026 } 1027 1028 /* 1029 * Do NLM_UNLOCK call. 1030 * Was: nlm_clearlock 1031 */ 1032 static int 1033 nlm_call_unlock(struct flock64 *flp, struct nlm_host *hostp, 1034 struct netobj *fhp, int vers) 1035 { 1036 struct nlm4_unlockargs args; 1037 struct nlm_owner_handle oh; 1038 enum nlm4_stats nlm_err; 1039 uint32_t xid; 1040 int error; 1041 1042 bzero(&args, sizeof (args)); 1043 nlm_init_lock(&args.alock, flp, fhp, &oh); 1044 1045 oh.oh_sysid = hostp->nh_sysid; 1046 xid = atomic_inc_32_nv(&nlm_xid); 1047 args.cookie.n_len = sizeof (xid); 1048 args.cookie.n_bytes = (char *)&xid; 1049 1050 for (;;) { 1051 nlm_rpc_t *rpcp; 1052 struct nlm4_res res; 1053 enum clnt_stat stat; 1054 1055 error = nlm_host_get_rpc(hostp, vers, &rpcp); 1056 if (error != 0) 1057 return (ENOLCK); 1058 1059 bzero(&res, sizeof (res)); 1060 stat = nlm_unlock_rpc(&args, &res, rpcp->nr_handle, vers); 1061 nlm_host_rele_rpc(hostp, rpcp); 1062 1063 error = nlm_map_clnt_stat(stat); 1064 if (error != 0) { 1065 if (error == EAGAIN) 1066 continue; 1067 1068 return (error); 1069 } 1070 1071 DTRACE_PROBE1(unlock__res, enum nlm4_stats, res.stat.stat); 1072 nlm_err = res.stat.stat; 1073 xdr_free((xdrproc_t)xdr_nlm4_res, (void *)&res); 1074 if (nlm_err == nlm4_denied_grace_period) { 1075 error = nlm_host_wait_grace(hostp); 1076 if (error != 0) 1077 return (error); 1078 1079 continue; 1080 } 1081 1082 break; 1083 } 1084 1085 /* special cases */ 1086 switch (nlm_err) { 1087 case nlm4_denied: 1088 error = EINVAL; 1089 break; 1090 default: 1091 error = nlm_map_status(nlm_err); 1092 break; 1093 } 1094 1095 return (error); 1096 } 1097 1098 /* 1099 * Do NLM_TEST call. 1100 * Was: nlm_getlock() 1101 */ 1102 static int 1103 nlm_call_test(struct flock64 *flp, struct nlm_host *hostp, 1104 struct netobj *fhp, int vers) 1105 { 1106 struct nlm4_testargs args; 1107 struct nlm4_holder h; 1108 struct nlm_owner_handle oh; 1109 enum nlm4_stats nlm_err; 1110 uint32_t xid; 1111 int error; 1112 1113 bzero(&args, sizeof (args)); 1114 nlm_init_lock(&args.alock, flp, fhp, &oh); 1115 1116 args.exclusive = (flp->l_type == F_WRLCK); 1117 oh.oh_sysid = hostp->nh_sysid; 1118 xid = atomic_inc_32_nv(&nlm_xid); 1119 args.cookie.n_len = sizeof (xid); 1120 args.cookie.n_bytes = (char *)&xid; 1121 1122 for (;;) { 1123 nlm_rpc_t *rpcp; 1124 struct nlm4_testres res; 1125 enum clnt_stat stat; 1126 1127 error = nlm_host_get_rpc(hostp, vers, &rpcp); 1128 if (error != 0) 1129 return (ENOLCK); 1130 1131 bzero(&res, sizeof (res)); 1132 stat = nlm_test_rpc(&args, &res, rpcp->nr_handle, vers); 1133 nlm_host_rele_rpc(hostp, rpcp); 1134 1135 error = nlm_map_clnt_stat(stat); 1136 if (error != 0) { 1137 if (error == EAGAIN) 1138 continue; 1139 1140 return (error); 1141 } 1142 1143 DTRACE_PROBE1(test__res, enum nlm4_stats, res.stat.stat); 1144 nlm_err = res.stat.stat; 1145 bcopy(&res.stat.nlm4_testrply_u.holder, &h, sizeof (h)); 1146 xdr_free((xdrproc_t)xdr_nlm4_testres, (void *)&res); 1147 if (nlm_err == nlm4_denied_grace_period) { 1148 error = nlm_host_wait_grace(hostp); 1149 if (error != 0) 1150 return (error); 1151 1152 continue; 1153 } 1154 1155 break; 1156 } 1157 1158 switch (nlm_err) { 1159 case nlm4_granted: 1160 flp->l_type = F_UNLCK; 1161 error = 0; 1162 break; 1163 1164 case nlm4_denied: 1165 flp->l_start = h.l_offset; 1166 flp->l_len = h.l_len; 1167 flp->l_pid = h.svid; 1168 flp->l_type = (h.exclusive) ? F_WRLCK : F_RDLCK; 1169 flp->l_whence = SEEK_SET; 1170 flp->l_sysid = 0; 1171 error = 0; 1172 break; 1173 1174 default: 1175 error = nlm_map_status(nlm_err); 1176 break; 1177 } 1178 1179 return (error); 1180 } 1181 1182 1183 static void 1184 nlm_init_lock(struct nlm4_lock *lock, 1185 const struct flock64 *fl, struct netobj *fh, 1186 struct nlm_owner_handle *oh) 1187 { 1188 1189 /* Caller converts to zero-base. */ 1190 VERIFY(fl->l_whence == SEEK_SET); 1191 bzero(lock, sizeof (*lock)); 1192 bzero(oh, sizeof (*oh)); 1193 1194 lock->caller_name = uts_nodename(); 1195 lock->fh.n_len = fh->n_len; 1196 lock->fh.n_bytes = fh->n_bytes; 1197 lock->oh.n_len = sizeof (*oh); 1198 lock->oh.n_bytes = (void *)oh; 1199 lock->svid = fl->l_pid; 1200 lock->l_offset = fl->l_start; 1201 lock->l_len = fl->l_len; 1202 } 1203 1204 /* ************************************************************** */ 1205 1206 int 1207 nlm_shrlock(struct vnode *vp, int cmd, struct shrlock *shr, 1208 int flags, struct netobj *fh, int vers) 1209 { 1210 struct shrlock shlk; 1211 mntinfo_t *mi; 1212 servinfo_t *sv; 1213 const char *netid; 1214 struct nlm_host *host = NULL; 1215 int error; 1216 struct nlm_globals *g; 1217 1218 mi = VTOMI(vp); 1219 sv = mi->mi_curr_serv; 1220 1221 netid = nlm_knc_to_netid(sv->sv_knconf); 1222 if (netid == NULL) { 1223 NLM_ERR("nlm_shrlock: unknown NFS netid\n"); 1224 return (ENOSYS); 1225 } 1226 1227 g = zone_getspecific(nlm_zone_key, curzone); 1228 host = nlm_host_findcreate(g, sv->sv_hostname, netid, &sv->sv_addr); 1229 if (host == NULL) 1230 return (ENOSYS); 1231 1232 /* 1233 * Fill in s_sysid for the local locking calls. 1234 * Also, let's not trust the caller's l_pid. 1235 */ 1236 shlk = *shr; 1237 shlk.s_sysid = host->nh_sysid | LM_SYSID_CLIENT; 1238 shlk.s_pid = curproc->p_pid; 1239 1240 if (cmd == F_UNSHARE) { 1241 /* 1242 * Purge local (cached) share information first, 1243 * then clear the remote share. 1244 */ 1245 (void) nlm_local_shrlock(vp, &shlk, cmd, flags); 1246 nlm_shres_untrack(host, vp, &shlk); 1247 error = nlm_call_unshare(&shlk, host, fh, vers); 1248 goto out; 1249 } 1250 1251 nfs_add_locking_id(vp, curproc->p_pid, RLMPL_OWNER, 1252 shr->s_owner, shr->s_own_len); 1253 1254 error = nlm_call_share(&shlk, host, fh, vers, FALSE); 1255 if (error != 0) 1256 goto out; 1257 1258 /* 1259 * Save the share locally. This should not fail, 1260 * because the server is authoritative about shares 1261 * and it just told us we have the share reservation! 1262 */ 1263 error = nlm_local_shrlock(vp, shr, cmd, flags); 1264 if (error != 0) { 1265 /* 1266 * Oh oh, we really don't expect an error here. 1267 */ 1268 NLM_WARN("nlm_shrlock: set locally, err %d\n", error); 1269 error = 0; 1270 } 1271 1272 nlm_shres_track(host, vp, &shlk); 1273 nlm_host_monitor(g, host, 0); 1274 1275 out: 1276 nlm_host_release(g, host); 1277 1278 return (error); 1279 } 1280 1281 static int 1282 nlm_reclaim_share(struct nlm_host *hostp, vnode_t *vp, 1283 struct shrlock *shr, uint32_t orig_state) 1284 { 1285 struct netobj lm_fh; 1286 int error, state; 1287 rpcvers_t vers; 1288 1289 state = nlm_host_get_state(hostp); 1290 if (state != orig_state) { 1291 /* 1292 * It seems that NLM server rebooted while 1293 * we were busy with recovery. 1294 */ 1295 return (ERESTART); 1296 } 1297 1298 error = nlm_init_fh_by_vp(vp, &lm_fh, &vers); 1299 if (error != 0) 1300 return (error); 1301 1302 return (nlm_call_share(shr, hostp, &lm_fh, vers, 1)); 1303 } 1304 1305 /* 1306 * Set local share information for some NFS server. 1307 * 1308 * Called after a share request (set or clear) succeeded. We record 1309 * the details in the local lock manager. Note that since the remote 1310 * server has granted the share, we can be sure that it doesn't 1311 * conflict with any other shares we have in the local lock manager. 1312 * 1313 * Since it is possible that host may also make NLM client requests to 1314 * our NLM server, we use a different sysid value to record our own 1315 * client shares. 1316 */ 1317 int 1318 nlm_local_shrlock(vnode_t *vp, struct shrlock *shr, int cmd, int flags) 1319 { 1320 return (fs_shrlock(vp, cmd, shr, flags, CRED(), NULL)); 1321 } 1322 1323 static void 1324 nlm_local_shrcancel(vnode_t *vp, struct shrlock *shr) 1325 { 1326 (void) nlm_local_shrlock(vp, shr, F_UNSHARE, FREAD | FWRITE); 1327 nlm_send_siglost(shr->s_pid); 1328 } 1329 1330 /* 1331 * Do NLM_SHARE call. 1332 * Was: nlm_setshare() 1333 */ 1334 static int 1335 nlm_call_share(struct shrlock *shr, struct nlm_host *host, 1336 struct netobj *fh, int vers, int reclaim) 1337 { 1338 struct nlm4_shareargs args; 1339 enum nlm4_stats nlm_err; 1340 uint32_t xid; 1341 int error; 1342 1343 bzero(&args, sizeof (args)); 1344 nlm_init_share(&args.share, shr, fh); 1345 1346 args.reclaim = reclaim; 1347 xid = atomic_inc_32_nv(&nlm_xid); 1348 args.cookie.n_len = sizeof (xid); 1349 args.cookie.n_bytes = (char *)&xid; 1350 1351 1352 for (;;) { 1353 nlm_rpc_t *rpcp; 1354 struct nlm4_shareres res; 1355 enum clnt_stat stat; 1356 1357 error = nlm_host_get_rpc(host, vers, &rpcp); 1358 if (error != 0) 1359 return (ENOLCK); 1360 1361 bzero(&res, sizeof (res)); 1362 stat = nlm_share_rpc(&args, &res, rpcp->nr_handle, vers); 1363 nlm_host_rele_rpc(host, rpcp); 1364 1365 error = nlm_map_clnt_stat(stat); 1366 if (error != 0) { 1367 if (error == EAGAIN) 1368 continue; 1369 1370 return (error); 1371 } 1372 1373 DTRACE_PROBE1(share__res, enum nlm4_stats, res.stat); 1374 nlm_err = res.stat; 1375 xdr_free((xdrproc_t)xdr_nlm4_shareres, (void *)&res); 1376 if (nlm_err == nlm4_denied_grace_period) { 1377 if (args.reclaim) 1378 return (ENOLCK); 1379 1380 error = nlm_host_wait_grace(host); 1381 if (error != 0) 1382 return (error); 1383 1384 continue; 1385 } 1386 1387 break; 1388 } 1389 1390 switch (nlm_err) { 1391 case nlm4_granted: 1392 error = 0; 1393 break; 1394 case nlm4_blocked: 1395 case nlm4_denied: 1396 error = EAGAIN; 1397 break; 1398 case nlm4_denied_nolocks: 1399 case nlm4_deadlck: 1400 error = ENOLCK; 1401 break; 1402 default: 1403 error = EINVAL; 1404 break; 1405 } 1406 1407 return (error); 1408 } 1409 1410 /* 1411 * Do NLM_UNSHARE call. 1412 */ 1413 static int 1414 nlm_call_unshare(struct shrlock *shr, struct nlm_host *host, 1415 struct netobj *fh, int vers) 1416 { 1417 struct nlm4_shareargs args; 1418 enum nlm4_stats nlm_err; 1419 uint32_t xid; 1420 int error; 1421 1422 bzero(&args, sizeof (args)); 1423 nlm_init_share(&args.share, shr, fh); 1424 1425 xid = atomic_inc_32_nv(&nlm_xid); 1426 args.cookie.n_len = sizeof (xid); 1427 args.cookie.n_bytes = (char *)&xid; 1428 1429 for (;;) { 1430 nlm_rpc_t *rpcp; 1431 struct nlm4_shareres res; 1432 enum clnt_stat stat; 1433 1434 error = nlm_host_get_rpc(host, vers, &rpcp); 1435 if (error != 0) 1436 return (ENOLCK); 1437 1438 bzero(&res, sizeof (res)); 1439 stat = nlm_unshare_rpc(&args, &res, rpcp->nr_handle, vers); 1440 nlm_host_rele_rpc(host, rpcp); 1441 1442 error = nlm_map_clnt_stat(stat); 1443 if (error != 0) { 1444 if (error == EAGAIN) 1445 continue; 1446 1447 return (error); 1448 } 1449 1450 DTRACE_PROBE1(unshare__res, enum nlm4_stats, res.stat); 1451 nlm_err = res.stat; 1452 xdr_free((xdrproc_t)xdr_nlm4_res, (void *)&res); 1453 if (nlm_err == nlm4_denied_grace_period) { 1454 error = nlm_host_wait_grace(host); 1455 if (error != 0) 1456 return (error); 1457 1458 continue; 1459 } 1460 1461 break; 1462 } 1463 1464 switch (nlm_err) { 1465 case nlm4_granted: 1466 error = 0; 1467 break; 1468 case nlm4_denied: 1469 error = EAGAIN; 1470 break; 1471 case nlm4_denied_nolocks: 1472 error = ENOLCK; 1473 break; 1474 default: 1475 error = EINVAL; 1476 break; 1477 } 1478 1479 return (error); 1480 } 1481 1482 static void 1483 nlm_init_share(struct nlm4_share *args, 1484 const struct shrlock *shr, struct netobj *fh) 1485 { 1486 1487 bzero(args, sizeof (*args)); 1488 1489 args->caller_name = uts_nodename(); 1490 args->fh.n_len = fh->n_len; 1491 args->fh.n_bytes = fh->n_bytes; 1492 args->oh.n_len = shr->s_own_len; 1493 args->oh.n_bytes = (void *)shr->s_owner; 1494 1495 switch (shr->s_deny) { 1496 default: 1497 case F_NODNY: 1498 args->mode = fsm_DN; 1499 break; 1500 case F_RDDNY: 1501 args->mode = fsm_DR; 1502 break; 1503 case F_WRDNY: 1504 args->mode = fsm_DW; 1505 break; 1506 case F_RWDNY: 1507 args->mode = fsm_DRW; 1508 break; 1509 } 1510 1511 switch (shr->s_access) { 1512 default: 1513 case 0: /* seen with F_UNSHARE */ 1514 args->access = fsa_NONE; 1515 break; 1516 case F_RDACC: 1517 args->access = fsa_R; 1518 break; 1519 case F_WRACC: 1520 args->access = fsa_W; 1521 break; 1522 case F_RWACC: 1523 args->access = fsa_RW; 1524 break; 1525 } 1526 } 1527 1528 /* 1529 * Initialize filehandle according to the version 1530 * of NFS vnode was created on. The version of 1531 * NLM that can be used with given NFS version 1532 * is saved to lm_vers. 1533 */ 1534 static int 1535 nlm_init_fh_by_vp(vnode_t *vp, struct netobj *fh, rpcvers_t *lm_vers) 1536 { 1537 mntinfo_t *mi = VTOMI(vp); 1538 1539 /* 1540 * Too bad the NFS code doesn't just carry the FH 1541 * in a netobj or a netbuf. 1542 */ 1543 switch (mi->mi_vers) { 1544 case NFS_V3: 1545 /* See nfs3_frlock() */ 1546 *lm_vers = NLM4_VERS; 1547 fh->n_len = VTOFH3(vp)->fh3_length; 1548 fh->n_bytes = (char *)&(VTOFH3(vp)->fh3_u.data); 1549 break; 1550 1551 case NFS_VERSION: 1552 /* See nfs_frlock() */ 1553 *lm_vers = NLM_VERS; 1554 fh->n_len = sizeof (fhandle_t); 1555 /* LINTED E_BAD_PTR_CAST_ALIGN */ 1556 fh->n_bytes = (char *)VTOFH(vp); 1557 break; 1558 default: 1559 return (ENOSYS); 1560 } 1561 1562 return (0); 1563 } 1564 1565 /* 1566 * Send SIGLOST to the process identified by pid. 1567 * NOTE: called when NLM decides to remove lock 1568 * or share reservation ownder by the process 1569 * by force. 1570 */ 1571 static void 1572 nlm_send_siglost(pid_t pid) 1573 { 1574 proc_t *p; 1575 1576 mutex_enter(&pidlock); 1577 p = prfind(pid); 1578 if (p != NULL) 1579 psignal(p, SIGLOST); 1580 1581 mutex_exit(&pidlock); 1582 } 1583 1584 static int 1585 nlm_map_clnt_stat(enum clnt_stat stat) 1586 { 1587 switch (stat) { 1588 case RPC_SUCCESS: 1589 return (0); 1590 1591 case RPC_TIMEDOUT: 1592 case RPC_PROGUNAVAIL: 1593 return (EAGAIN); 1594 1595 case RPC_INTR: 1596 return (EINTR); 1597 1598 default: 1599 return (EINVAL); 1600 } 1601 } 1602 1603 static int 1604 nlm_map_status(enum nlm4_stats stat) 1605 { 1606 switch (stat) { 1607 case nlm4_granted: 1608 return (0); 1609 1610 case nlm4_denied: 1611 return (EAGAIN); 1612 1613 case nlm4_denied_nolocks: 1614 return (ENOLCK); 1615 1616 case nlm4_blocked: 1617 return (EAGAIN); 1618 1619 case nlm4_denied_grace_period: 1620 return (EAGAIN); 1621 1622 case nlm4_deadlck: 1623 return (EDEADLK); 1624 1625 case nlm4_rofs: 1626 return (EROFS); 1627 1628 case nlm4_stale_fh: 1629 return (ESTALE); 1630 1631 case nlm4_fbig: 1632 return (EFBIG); 1633 1634 case nlm4_failed: 1635 return (EACCES); 1636 1637 default: 1638 return (EINVAL); 1639 } 1640 } 1641