1 /* $NetBSD: lockd_lock.c,v 1.5 2000/11/21 03:47:41 enami Exp $ */ 2 3 /*- 4 * SPDX-License-Identifier: BSD-4-Clause 5 * 6 * Copyright (c) 2001 Andrew P. Lentvorski, Jr. 7 * Copyright (c) 2000 Manuel Bouyer. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #define LOCKD_DEBUG 43 44 #include <stdio.h> 45 #ifdef LOCKD_DEBUG 46 #include <stdarg.h> 47 #endif 48 #include <stdlib.h> 49 #include <unistd.h> 50 #include <fcntl.h> 51 #include <syslog.h> 52 #include <errno.h> 53 #include <string.h> 54 #include <signal.h> 55 #include <rpc/rpc.h> 56 #include <sys/types.h> 57 #include <sys/stat.h> 58 #include <sys/socket.h> 59 #include <sys/param.h> 60 #include <sys/mount.h> 61 #include <sys/wait.h> 62 #include <rpcsvc/sm_inter.h> 63 #include <rpcsvc/nlm_prot.h> 64 #include "lockd_lock.h" 65 #include "lockd.h" 66 67 #define MAXOBJECTSIZE 64 68 #define MAXBUFFERSIZE 1024 69 70 /* 71 * A set of utilities for managing file locking 72 * 73 * XXX: All locks are in a linked list, a better structure should be used 74 * to improve search/access efficiency. 75 */ 76 77 /* struct describing a lock */ 78 struct file_lock { 79 LIST_ENTRY(file_lock) nfslocklist; 80 fhandle_t filehandle; /* NFS filehandle */ 81 struct sockaddr *addr; 82 struct nlm4_holder client; /* lock holder */ 83 /* XXX: client_cookie used *only* in send_granted */ 84 netobj client_cookie; /* cookie sent by the client */ 85 int nsm_status; /* status from the remote lock manager */ 86 int status; /* lock status, see below */ 87 int flags; /* lock flags, see lockd_lock.h */ 88 int blocking; /* blocking lock or not */ 89 char client_name[SM_MAXSTRLEN]; /* client_name is really variable 90 length and must be last! */ 91 }; 92 93 LIST_HEAD(nfslocklist_head, file_lock); 94 struct nfslocklist_head nfslocklist_head = LIST_HEAD_INITIALIZER(nfslocklist_head); 95 96 LIST_HEAD(blockedlocklist_head, file_lock); 97 struct blockedlocklist_head blockedlocklist_head = LIST_HEAD_INITIALIZER(blockedlocklist_head); 98 99 /* lock status */ 100 #define LKST_LOCKED 1 /* lock is locked */ 101 /* XXX: Is this flag file specific or lock specific? */ 102 #define LKST_WAITING 2 /* file is already locked by another host */ 103 #define LKST_PROCESSING 3 /* child is trying to acquire the lock */ 104 #define LKST_DYING 4 /* must dies when we get news from the child */ 105 106 /* struct describing a monitored host */ 107 struct host { 108 LIST_ENTRY(host) hostlst; 109 int refcnt; 110 char name[SM_MAXSTRLEN]; /* name is really variable length and 111 must be last! */ 112 }; 113 /* list of hosts we monitor */ 114 LIST_HEAD(hostlst_head, host); 115 struct hostlst_head hostlst_head = LIST_HEAD_INITIALIZER(hostlst_head); 116 117 /* 118 * File monitoring handlers 119 * XXX: These might be able to be removed when kevent support 120 * is placed into the hardware lock/unlock routines. (ie. 121 * let the kernel do all the file monitoring) 122 */ 123 124 /* Struct describing a monitored file */ 125 struct monfile { 126 LIST_ENTRY(monfile) monfilelist; 127 fhandle_t filehandle; /* Local access filehandle */ 128 int fd; /* file descriptor: remains open until unlock! */ 129 int refcount; 130 int exclusive; 131 }; 132 133 /* List of files we monitor */ 134 LIST_HEAD(monfilelist_head, monfile); 135 struct monfilelist_head monfilelist_head = LIST_HEAD_INITIALIZER(monfilelist_head); 136 137 static int debugdelay = 0; 138 139 enum nfslock_status { NFS_GRANTED = 0, NFS_GRANTED_DUPLICATE, 140 NFS_DENIED, NFS_DENIED_NOLOCK, 141 NFS_RESERR }; 142 143 enum hwlock_status { HW_GRANTED = 0, HW_GRANTED_DUPLICATE, 144 HW_DENIED, HW_DENIED_NOLOCK, 145 HW_STALEFH, HW_READONLY, HW_RESERR }; 146 147 enum partialfilelock_status { PFL_GRANTED=0, PFL_GRANTED_DUPLICATE, PFL_DENIED, 148 PFL_NFSDENIED, PFL_NFSBLOCKED, PFL_NFSDENIED_NOLOCK, PFL_NFSRESERR, 149 PFL_HWDENIED, PFL_HWBLOCKED, PFL_HWDENIED_NOLOCK, PFL_HWRESERR}; 150 151 enum LFLAGS {LEDGE_LEFT, LEDGE_LBOUNDARY, LEDGE_INSIDE, LEDGE_RBOUNDARY, LEDGE_RIGHT}; 152 enum RFLAGS {REDGE_LEFT, REDGE_LBOUNDARY, REDGE_INSIDE, REDGE_RBOUNDARY, REDGE_RIGHT}; 153 /* XXX: WARNING! I HAVE OVERLOADED THIS STATUS ENUM! SPLIT IT APART INTO TWO */ 154 enum split_status {SPL_DISJOINT=0, SPL_LOCK1=1, SPL_LOCK2=2, SPL_CONTAINED=4, SPL_RESERR=8}; 155 156 enum partialfilelock_status lock_partialfilelock(struct file_lock *fl); 157 158 void send_granted(struct file_lock *fl, int opcode); 159 void siglock(void); 160 void sigunlock(void); 161 void monitor_lock_host(const char *hostname); 162 void unmonitor_lock_host(char *hostname); 163 164 void copy_nlm4_lock_to_nlm4_holder(const struct nlm4_lock *src, 165 const bool_t exclusive, struct nlm4_holder *dest); 166 struct file_lock * allocate_file_lock(const netobj *lockowner, 167 const netobj *matchcookie, 168 const struct sockaddr *addr, 169 const char *caller_name); 170 void deallocate_file_lock(struct file_lock *fl); 171 void fill_file_lock(struct file_lock *fl, const fhandle_t *fh, 172 const bool_t exclusive, const int32_t svid, 173 const u_int64_t offset, const u_int64_t len, 174 const int state, const int status, const int flags, const int blocking); 175 int regions_overlap(const u_int64_t start1, const u_int64_t len1, 176 const u_int64_t start2, const u_int64_t len2); 177 enum split_status region_compare(const u_int64_t starte, const u_int64_t lene, 178 const u_int64_t startu, const u_int64_t lenu, 179 u_int64_t *start1, u_int64_t *len1, u_int64_t *start2, u_int64_t *len2); 180 int same_netobj(const netobj *n0, const netobj *n1); 181 int same_filelock_identity(const struct file_lock *fl0, 182 const struct file_lock *fl2); 183 184 static void debuglog(char const *fmt, ...); 185 void dump_static_object(const unsigned char* object, const int sizeof_object, 186 unsigned char* hbuff, const int sizeof_hbuff, 187 unsigned char* cbuff, const int sizeof_cbuff); 188 void dump_netobj(const struct netobj *nobj); 189 void dump_filelock(const struct file_lock *fl); 190 struct file_lock * get_lock_matching_unlock(const struct file_lock *fl); 191 enum nfslock_status test_nfslock(const struct file_lock *fl, 192 struct file_lock **conflicting_fl); 193 enum nfslock_status lock_nfslock(struct file_lock *fl); 194 enum nfslock_status delete_nfslock(struct file_lock *fl); 195 enum nfslock_status unlock_nfslock(const struct file_lock *fl, 196 struct file_lock **released_lock, struct file_lock **left_lock, 197 struct file_lock **right_lock); 198 enum hwlock_status lock_hwlock(struct file_lock *fl); 199 enum split_status split_nfslock(const struct file_lock *exist_lock, 200 const struct file_lock *unlock_lock, struct file_lock **left_lock, 201 struct file_lock **right_lock); 202 int duplicate_block(struct file_lock *fl); 203 void add_blockingfilelock(struct file_lock *fl); 204 enum hwlock_status unlock_hwlock(const struct file_lock *fl); 205 enum hwlock_status test_hwlock(const struct file_lock *fl, 206 struct file_lock **conflicting_fl); 207 void remove_blockingfilelock(struct file_lock *fl); 208 void clear_blockingfilelock(const char *hostname); 209 void retry_blockingfilelocklist(void); 210 enum partialfilelock_status unlock_partialfilelock( 211 const struct file_lock *fl); 212 void clear_partialfilelock(const char *hostname); 213 enum partialfilelock_status test_partialfilelock( 214 const struct file_lock *fl, struct file_lock **conflicting_fl); 215 enum nlm_stats do_test(struct file_lock *fl, 216 struct file_lock **conflicting_fl); 217 enum nlm_stats do_unlock(struct file_lock *fl); 218 enum nlm_stats do_lock(struct file_lock *fl); 219 void do_clear(const char *hostname); 220 size_t strnlen(const char *, size_t); 221 222 void 223 debuglog(char const *fmt, ...) 224 { 225 va_list ap; 226 227 if (debug_level < 1) { 228 return; 229 } 230 231 sleep(debugdelay); 232 233 va_start(ap, fmt); 234 vsyslog(LOG_DEBUG, fmt, ap); 235 va_end(ap); 236 } 237 238 void 239 dump_static_object(object, size_object, hbuff, size_hbuff, cbuff, size_cbuff) 240 const unsigned char *object; 241 const int size_object; 242 unsigned char *hbuff; 243 const int size_hbuff; 244 unsigned char *cbuff; 245 const int size_cbuff; 246 { 247 int i, objectsize; 248 249 if (debug_level < 2) { 250 return; 251 } 252 253 objectsize = size_object; 254 255 if (objectsize == 0) { 256 debuglog("object is size 0\n"); 257 } else { 258 if (objectsize > MAXOBJECTSIZE) { 259 debuglog("Object of size %d being clamped" 260 "to size %d\n", objectsize, MAXOBJECTSIZE); 261 objectsize = MAXOBJECTSIZE; 262 } 263 264 if (hbuff != NULL) { 265 if (size_hbuff < objectsize*2+1) { 266 debuglog("Hbuff not large enough." 267 " Increase size\n"); 268 } else { 269 for(i=0;i<objectsize;i++) { 270 sprintf(hbuff+i*2,"%02x",*(object+i)); 271 } 272 *(hbuff+i*2) = '\0'; 273 } 274 } 275 276 if (cbuff != NULL) { 277 if (size_cbuff < objectsize+1) { 278 debuglog("Cbuff not large enough." 279 " Increase Size\n"); 280 } 281 282 for(i=0;i<objectsize;i++) { 283 if (*(object+i) >= 32 && *(object+i) <= 127) { 284 *(cbuff+i) = *(object+i); 285 } else { 286 *(cbuff+i) = '.'; 287 } 288 } 289 *(cbuff+i) = '\0'; 290 } 291 } 292 } 293 294 void 295 dump_netobj(const struct netobj *nobj) 296 { 297 char hbuff[MAXBUFFERSIZE*2]; 298 char cbuff[MAXBUFFERSIZE]; 299 300 if (debug_level < 2) { 301 return; 302 } 303 304 if (nobj == NULL) { 305 debuglog("Null netobj pointer\n"); 306 } 307 else if (nobj->n_len == 0) { 308 debuglog("Size zero netobj\n"); 309 } else { 310 dump_static_object(nobj->n_bytes, nobj->n_len, 311 hbuff, sizeof(hbuff), cbuff, sizeof(cbuff)); 312 debuglog("netobj: len: %d data: %s ::: %s\n", 313 nobj->n_len, hbuff, cbuff); 314 } 315 } 316 317 /* #define DUMP_FILELOCK_VERBOSE */ 318 void 319 dump_filelock(const struct file_lock *fl) 320 { 321 #ifdef DUMP_FILELOCK_VERBOSE 322 char hbuff[MAXBUFFERSIZE*2]; 323 char cbuff[MAXBUFFERSIZE]; 324 #endif 325 326 if (debug_level < 2) { 327 return; 328 } 329 330 if (fl != NULL) { 331 debuglog("Dumping file lock structure @ %p\n", fl); 332 333 #ifdef DUMP_FILELOCK_VERBOSE 334 dump_static_object((unsigned char *)&fl->filehandle, 335 sizeof(fl->filehandle), hbuff, sizeof(hbuff), 336 cbuff, sizeof(cbuff)); 337 debuglog("Filehandle: %8s ::: %8s\n", hbuff, cbuff); 338 #endif 339 340 debuglog("Dumping nlm4_holder:\n" 341 "exc: %x svid: %x offset:len %llx:%llx\n", 342 fl->client.exclusive, fl->client.svid, 343 fl->client.l_offset, fl->client.l_len); 344 345 #ifdef DUMP_FILELOCK_VERBOSE 346 debuglog("Dumping client identity:\n"); 347 dump_netobj(&fl->client.oh); 348 349 debuglog("Dumping client cookie:\n"); 350 dump_netobj(&fl->client_cookie); 351 352 debuglog("nsm: %d status: %d flags: %d svid: %x" 353 " client_name: %s\n", fl->nsm_status, fl->status, 354 fl->flags, fl->client.svid, fl->client_name); 355 #endif 356 } else { 357 debuglog("NULL file lock structure\n"); 358 } 359 } 360 361 void 362 copy_nlm4_lock_to_nlm4_holder(src, exclusive, dest) 363 const struct nlm4_lock *src; 364 const bool_t exclusive; 365 struct nlm4_holder *dest; 366 { 367 368 dest->exclusive = exclusive; 369 dest->oh.n_len = src->oh.n_len; 370 dest->oh.n_bytes = src->oh.n_bytes; 371 dest->svid = src->svid; 372 dest->l_offset = src->l_offset; 373 dest->l_len = src->l_len; 374 } 375 376 377 size_t 378 strnlen(const char *s, size_t len) 379 { 380 size_t n; 381 382 for (n = 0; s[n] != 0 && n < len; n++) 383 ; 384 return n; 385 } 386 387 /* 388 * allocate_file_lock: Create a lock with the given parameters 389 */ 390 391 struct file_lock * 392 allocate_file_lock(const netobj *lockowner, const netobj *matchcookie, 393 const struct sockaddr *addr, const char *caller_name) 394 { 395 struct file_lock *newfl; 396 size_t n; 397 398 /* Beware of rubbish input! */ 399 n = strnlen(caller_name, SM_MAXSTRLEN); 400 if (n == SM_MAXSTRLEN) { 401 return NULL; 402 } 403 404 newfl = malloc(sizeof(*newfl) - sizeof(newfl->client_name) + n + 1); 405 if (newfl == NULL) { 406 return NULL; 407 } 408 bzero(newfl, sizeof(*newfl) - sizeof(newfl->client_name)); 409 memcpy(newfl->client_name, caller_name, n); 410 newfl->client_name[n] = 0; 411 412 newfl->client.oh.n_bytes = malloc(lockowner->n_len); 413 if (newfl->client.oh.n_bytes == NULL) { 414 free(newfl); 415 return NULL; 416 } 417 newfl->client.oh.n_len = lockowner->n_len; 418 bcopy(lockowner->n_bytes, newfl->client.oh.n_bytes, lockowner->n_len); 419 420 newfl->client_cookie.n_bytes = malloc(matchcookie->n_len); 421 if (newfl->client_cookie.n_bytes == NULL) { 422 free(newfl->client.oh.n_bytes); 423 free(newfl); 424 return NULL; 425 } 426 newfl->client_cookie.n_len = matchcookie->n_len; 427 bcopy(matchcookie->n_bytes, newfl->client_cookie.n_bytes, matchcookie->n_len); 428 429 newfl->addr = malloc(addr->sa_len); 430 if (newfl->addr == NULL) { 431 free(newfl->client_cookie.n_bytes); 432 free(newfl->client.oh.n_bytes); 433 free(newfl); 434 return NULL; 435 } 436 memcpy(newfl->addr, addr, addr->sa_len); 437 438 return newfl; 439 } 440 441 /* 442 * file_file_lock: Force creation of a valid file lock 443 */ 444 void 445 fill_file_lock(struct file_lock *fl, const fhandle_t *fh, 446 const bool_t exclusive, const int32_t svid, 447 const u_int64_t offset, const u_int64_t len, 448 const int state, const int status, const int flags, const int blocking) 449 { 450 bcopy(fh, &fl->filehandle, sizeof(fhandle_t)); 451 452 fl->client.exclusive = exclusive; 453 fl->client.svid = svid; 454 fl->client.l_offset = offset; 455 fl->client.l_len = len; 456 457 fl->nsm_status = state; 458 fl->status = status; 459 fl->flags = flags; 460 fl->blocking = blocking; 461 } 462 463 /* 464 * deallocate_file_lock: Free all storage associated with a file lock 465 */ 466 void 467 deallocate_file_lock(struct file_lock *fl) 468 { 469 free(fl->addr); 470 free(fl->client.oh.n_bytes); 471 free(fl->client_cookie.n_bytes); 472 free(fl); 473 } 474 475 /* 476 * regions_overlap(): This function examines the two provided regions for 477 * overlap. 478 */ 479 int 480 regions_overlap(start1, len1, start2, len2) 481 const u_int64_t start1, len1, start2, len2; 482 { 483 u_int64_t d1,d2,d3,d4; 484 enum split_status result; 485 486 debuglog("Entering region overlap with vals: %llu:%llu--%llu:%llu\n", 487 start1, len1, start2, len2); 488 489 result = region_compare(start1, len1, start2, len2, 490 &d1, &d2, &d3, &d4); 491 492 debuglog("Exiting region overlap with val: %d\n",result); 493 494 if (result == SPL_DISJOINT) { 495 return 0; 496 } else { 497 return 1; 498 } 499 } 500 501 /* 502 * region_compare(): Examine lock regions and split appropriately 503 * 504 * XXX: Fix 64 bit overflow problems 505 * XXX: Check to make sure I got *ALL* the cases. 506 * XXX: This DESPERATELY needs a regression test. 507 */ 508 enum split_status 509 region_compare(starte, lene, startu, lenu, 510 start1, len1, start2, len2) 511 const u_int64_t starte, lene, startu, lenu; 512 u_int64_t *start1, *len1, *start2, *len2; 513 { 514 /* 515 * Please pay attention to the sequential exclusions 516 * of the if statements!!! 517 */ 518 enum LFLAGS lflags; 519 enum RFLAGS rflags; 520 enum split_status retval; 521 522 retval = SPL_DISJOINT; 523 524 if (lene == 0 && lenu == 0) { 525 /* Examine left edge of locker */ 526 lflags = LEDGE_INSIDE; 527 if (startu < starte) { 528 lflags = LEDGE_LEFT; 529 } else if (startu == starte) { 530 lflags = LEDGE_LBOUNDARY; 531 } 532 533 rflags = REDGE_RBOUNDARY; /* Both are infiinite */ 534 535 if (lflags == LEDGE_INSIDE) { 536 *start1 = starte; 537 *len1 = startu - starte; 538 } 539 540 if (lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) { 541 retval = SPL_CONTAINED; 542 } else { 543 retval = SPL_LOCK1; 544 } 545 } else if (lene == 0 && lenu != 0) { 546 /* Established lock is infinite */ 547 /* Examine left edge of unlocker */ 548 lflags = LEDGE_INSIDE; 549 if (startu < starte) { 550 lflags = LEDGE_LEFT; 551 } else if (startu == starte) { 552 lflags = LEDGE_LBOUNDARY; 553 } 554 555 /* Examine right edge of unlocker */ 556 if (startu + lenu < starte) { 557 /* Right edge of unlocker left of established lock */ 558 rflags = REDGE_LEFT; 559 return SPL_DISJOINT; 560 } else if (startu + lenu == starte) { 561 /* Right edge of unlocker on start of established lock */ 562 rflags = REDGE_LBOUNDARY; 563 return SPL_DISJOINT; 564 } else { /* Infinifty is right of finity */ 565 /* Right edge of unlocker inside established lock */ 566 rflags = REDGE_INSIDE; 567 } 568 569 if (lflags == LEDGE_INSIDE) { 570 *start1 = starte; 571 *len1 = startu - starte; 572 retval |= SPL_LOCK1; 573 } 574 575 if (rflags == REDGE_INSIDE) { 576 /* Create right lock */ 577 *start2 = startu+lenu; 578 *len2 = 0; 579 retval |= SPL_LOCK2; 580 } 581 } else if (lene != 0 && lenu == 0) { 582 /* Unlocker is infinite */ 583 /* Examine left edge of unlocker */ 584 lflags = LEDGE_RIGHT; 585 if (startu < starte) { 586 lflags = LEDGE_LEFT; 587 retval = SPL_CONTAINED; 588 return retval; 589 } else if (startu == starte) { 590 lflags = LEDGE_LBOUNDARY; 591 retval = SPL_CONTAINED; 592 return retval; 593 } else if ((startu > starte) && (startu < starte + lene - 1)) { 594 lflags = LEDGE_INSIDE; 595 } else if (startu == starte + lene - 1) { 596 lflags = LEDGE_RBOUNDARY; 597 } else { /* startu > starte + lene -1 */ 598 lflags = LEDGE_RIGHT; 599 return SPL_DISJOINT; 600 } 601 602 rflags = REDGE_RIGHT; /* Infinity is right of finity */ 603 604 if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) { 605 *start1 = starte; 606 *len1 = startu - starte; 607 retval |= SPL_LOCK1; 608 return retval; 609 } 610 } else { 611 /* Both locks are finite */ 612 613 /* Examine left edge of unlocker */ 614 lflags = LEDGE_RIGHT; 615 if (startu < starte) { 616 lflags = LEDGE_LEFT; 617 } else if (startu == starte) { 618 lflags = LEDGE_LBOUNDARY; 619 } else if ((startu > starte) && (startu < starte + lene - 1)) { 620 lflags = LEDGE_INSIDE; 621 } else if (startu == starte + lene - 1) { 622 lflags = LEDGE_RBOUNDARY; 623 } else { /* startu > starte + lene -1 */ 624 lflags = LEDGE_RIGHT; 625 return SPL_DISJOINT; 626 } 627 628 /* Examine right edge of unlocker */ 629 if (startu + lenu < starte) { 630 /* Right edge of unlocker left of established lock */ 631 rflags = REDGE_LEFT; 632 return SPL_DISJOINT; 633 } else if (startu + lenu == starte) { 634 /* Right edge of unlocker on start of established lock */ 635 rflags = REDGE_LBOUNDARY; 636 return SPL_DISJOINT; 637 } else if (startu + lenu < starte + lene) { 638 /* Right edge of unlocker inside established lock */ 639 rflags = REDGE_INSIDE; 640 } else if (startu + lenu == starte + lene) { 641 /* Right edge of unlocker on right edge of established lock */ 642 rflags = REDGE_RBOUNDARY; 643 } else { /* startu + lenu > starte + lene */ 644 /* Right edge of unlocker is right of established lock */ 645 rflags = REDGE_RIGHT; 646 } 647 648 if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) { 649 /* Create left lock */ 650 *start1 = starte; 651 *len1 = (startu - starte); 652 retval |= SPL_LOCK1; 653 } 654 655 if (rflags == REDGE_INSIDE) { 656 /* Create right lock */ 657 *start2 = startu+lenu; 658 *len2 = starte+lene-(startu+lenu); 659 retval |= SPL_LOCK2; 660 } 661 662 if ((lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) && 663 (rflags == REDGE_RBOUNDARY || rflags == REDGE_RIGHT)) { 664 retval = SPL_CONTAINED; 665 } 666 } 667 return retval; 668 } 669 670 /* 671 * same_netobj: Compares the apprpriate bits of a netobj for identity 672 */ 673 int 674 same_netobj(const netobj *n0, const netobj *n1) 675 { 676 int retval; 677 678 retval = 0; 679 680 debuglog("Entering netobj identity check\n"); 681 682 if (n0->n_len == n1->n_len) { 683 debuglog("Preliminary length check passed\n"); 684 retval = !bcmp(n0->n_bytes, n1->n_bytes, n0->n_len); 685 debuglog("netobj %smatch\n", retval ? "" : "mis"); 686 } 687 688 return (retval); 689 } 690 691 /* 692 * same_filelock_identity: Compares the appropriate bits of a file_lock 693 */ 694 int 695 same_filelock_identity(fl0, fl1) 696 const struct file_lock *fl0, *fl1; 697 { 698 int retval; 699 700 retval = 0; 701 702 debuglog("Checking filelock identity\n"); 703 704 /* 705 * Check process ids and host information. 706 */ 707 retval = (fl0->client.svid == fl1->client.svid && 708 same_netobj(&(fl0->client.oh), &(fl1->client.oh))); 709 710 debuglog("Exiting checking filelock identity: retval: %d\n",retval); 711 712 return (retval); 713 } 714 715 /* 716 * Below here are routines associated with manipulating the NFS 717 * lock list. 718 */ 719 720 /* 721 * get_lock_matching_unlock: Return a lock which matches the given unlock lock 722 * or NULL otehrwise 723 * XXX: It is a shame that this duplicates so much code from test_nfslock. 724 */ 725 struct file_lock * 726 get_lock_matching_unlock(const struct file_lock *fl) 727 { 728 struct file_lock *ifl; /* Iterator */ 729 730 debuglog("Entering get_lock_matching_unlock\n"); 731 debuglog("********Dump of fl*****************\n"); 732 dump_filelock(fl); 733 734 LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) { 735 debuglog("Pointer to file lock: %p\n",ifl); 736 737 debuglog("****Dump of ifl****\n"); 738 dump_filelock(ifl); 739 debuglog("*******************\n"); 740 741 /* 742 * XXX: It is conceivable that someone could use the NLM RPC 743 * system to directly access filehandles. This may be a 744 * security hazard as the filehandle code may bypass normal 745 * file access controls 746 */ 747 if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t))) 748 continue; 749 750 debuglog("get_lock_matching_unlock: Filehandles match, " 751 "checking regions\n"); 752 753 /* Filehandles match, check for region overlap */ 754 if (!regions_overlap(fl->client.l_offset, fl->client.l_len, 755 ifl->client.l_offset, ifl->client.l_len)) 756 continue; 757 758 debuglog("get_lock_matching_unlock: Region overlap" 759 " found %llu : %llu -- %llu : %llu\n", 760 fl->client.l_offset,fl->client.l_len, 761 ifl->client.l_offset,ifl->client.l_len); 762 763 /* Regions overlap, check the identity */ 764 if (!same_filelock_identity(fl,ifl)) 765 continue; 766 767 debuglog("get_lock_matching_unlock: Duplicate lock id. Granting\n"); 768 return (ifl); 769 } 770 771 debuglog("Exiting bet_lock_matching_unlock\n"); 772 773 return (NULL); 774 } 775 776 /* 777 * test_nfslock: check for NFS lock in lock list 778 * 779 * This routine makes the following assumptions: 780 * 1) Nothing will adjust the lock list during a lookup 781 * 782 * This routine has an intersting quirk which bit me hard. 783 * The conflicting_fl is the pointer to the conflicting lock. 784 * However, to modify the "*pointer* to the conflicting lock" rather 785 * that the "conflicting lock itself" one must pass in a "pointer to 786 * the pointer of the conflicting lock". Gross. 787 */ 788 789 enum nfslock_status 790 test_nfslock(const struct file_lock *fl, struct file_lock **conflicting_fl) 791 { 792 struct file_lock *ifl; /* Iterator */ 793 enum nfslock_status retval; 794 795 debuglog("Entering test_nfslock\n"); 796 797 retval = NFS_GRANTED; 798 (*conflicting_fl) = NULL; 799 800 debuglog("Entering lock search loop\n"); 801 802 debuglog("***********************************\n"); 803 debuglog("Dumping match filelock\n"); 804 debuglog("***********************************\n"); 805 dump_filelock(fl); 806 debuglog("***********************************\n"); 807 808 LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) { 809 if (retval == NFS_DENIED) 810 break; 811 812 debuglog("Top of lock loop\n"); 813 debuglog("Pointer to file lock: %p\n",ifl); 814 815 debuglog("***********************************\n"); 816 debuglog("Dumping test filelock\n"); 817 debuglog("***********************************\n"); 818 dump_filelock(ifl); 819 debuglog("***********************************\n"); 820 821 /* 822 * XXX: It is conceivable that someone could use the NLM RPC 823 * system to directly access filehandles. This may be a 824 * security hazard as the filehandle code may bypass normal 825 * file access controls 826 */ 827 if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t))) 828 continue; 829 830 debuglog("test_nfslock: filehandle match found\n"); 831 832 /* Filehandles match, check for region overlap */ 833 if (!regions_overlap(fl->client.l_offset, fl->client.l_len, 834 ifl->client.l_offset, ifl->client.l_len)) 835 continue; 836 837 debuglog("test_nfslock: Region overlap found" 838 " %llu : %llu -- %llu : %llu\n", 839 fl->client.l_offset,fl->client.l_len, 840 ifl->client.l_offset,ifl->client.l_len); 841 842 /* Regions overlap, check the exclusivity */ 843 if (!(fl->client.exclusive || ifl->client.exclusive)) 844 continue; 845 846 debuglog("test_nfslock: Exclusivity failure: %d %d\n", 847 fl->client.exclusive, 848 ifl->client.exclusive); 849 850 if (same_filelock_identity(fl,ifl)) { 851 debuglog("test_nfslock: Duplicate id. Granting\n"); 852 (*conflicting_fl) = ifl; 853 retval = NFS_GRANTED_DUPLICATE; 854 } else { 855 /* locking attempt fails */ 856 debuglog("test_nfslock: Lock attempt failed\n"); 857 debuglog("Desired lock\n"); 858 dump_filelock(fl); 859 debuglog("Conflicting lock\n"); 860 dump_filelock(ifl); 861 (*conflicting_fl) = ifl; 862 retval = NFS_DENIED; 863 } 864 } 865 866 debuglog("Dumping file locks\n"); 867 debuglog("Exiting test_nfslock\n"); 868 869 return (retval); 870 } 871 872 /* 873 * lock_nfslock: attempt to create a lock in the NFS lock list 874 * 875 * This routine tests whether the lock will be granted and then adds 876 * the entry to the lock list if so. 877 * 878 * Argument fl gets modified as its list housekeeping entries get modified 879 * upon insertion into the NFS lock list 880 * 881 * This routine makes several assumptions: 882 * 1) It is perfectly happy to grant a duplicate lock from the same pid. 883 * While this seems to be intuitively wrong, it is required for proper 884 * Posix semantics during unlock. It is absolutely imperative to not 885 * unlock the main lock before the two child locks are established. Thus, 886 * one has to be able to create duplicate locks over an existing lock 887 * 2) It currently accepts duplicate locks from the same id,pid 888 */ 889 890 enum nfslock_status 891 lock_nfslock(struct file_lock *fl) 892 { 893 enum nfslock_status retval; 894 struct file_lock *dummy_fl; 895 896 dummy_fl = NULL; 897 898 debuglog("Entering lock_nfslock...\n"); 899 900 retval = test_nfslock(fl,&dummy_fl); 901 902 if (retval == NFS_GRANTED || retval == NFS_GRANTED_DUPLICATE) { 903 debuglog("Inserting lock...\n"); 904 dump_filelock(fl); 905 LIST_INSERT_HEAD(&nfslocklist_head, fl, nfslocklist); 906 } 907 908 debuglog("Exiting lock_nfslock...\n"); 909 910 return (retval); 911 } 912 913 /* 914 * delete_nfslock: delete an NFS lock list entry 915 * 916 * This routine is used to delete a lock out of the NFS lock list 917 * without regard to status, underlying locks, regions or anything else 918 * 919 * Note that this routine *does not deallocate memory* of the lock. 920 * It just disconnects it from the list. The lock can then be used 921 * by other routines without fear of trashing the list. 922 */ 923 924 enum nfslock_status 925 delete_nfslock(struct file_lock *fl) 926 { 927 928 LIST_REMOVE(fl, nfslocklist); 929 930 return (NFS_GRANTED); 931 } 932 933 enum split_status 934 split_nfslock(exist_lock, unlock_lock, left_lock, right_lock) 935 const struct file_lock *exist_lock, *unlock_lock; 936 struct file_lock **left_lock, **right_lock; 937 { 938 u_int64_t start1, len1, start2, len2; 939 enum split_status spstatus; 940 941 spstatus = region_compare(exist_lock->client.l_offset, exist_lock->client.l_len, 942 unlock_lock->client.l_offset, unlock_lock->client.l_len, 943 &start1, &len1, &start2, &len2); 944 945 if ((spstatus & SPL_LOCK1) != 0) { 946 *left_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie, exist_lock->addr, exist_lock->client_name); 947 if (*left_lock == NULL) { 948 debuglog("Unable to allocate resource for split 1\n"); 949 return SPL_RESERR; 950 } 951 952 fill_file_lock(*left_lock, &exist_lock->filehandle, 953 exist_lock->client.exclusive, exist_lock->client.svid, 954 start1, len1, 955 exist_lock->nsm_status, 956 exist_lock->status, exist_lock->flags, exist_lock->blocking); 957 } 958 959 if ((spstatus & SPL_LOCK2) != 0) { 960 *right_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie, exist_lock->addr, exist_lock->client_name); 961 if (*right_lock == NULL) { 962 debuglog("Unable to allocate resource for split 1\n"); 963 if (*left_lock != NULL) { 964 deallocate_file_lock(*left_lock); 965 } 966 return SPL_RESERR; 967 } 968 969 fill_file_lock(*right_lock, &exist_lock->filehandle, 970 exist_lock->client.exclusive, exist_lock->client.svid, 971 start2, len2, 972 exist_lock->nsm_status, 973 exist_lock->status, exist_lock->flags, exist_lock->blocking); 974 } 975 976 return spstatus; 977 } 978 979 enum nfslock_status 980 unlock_nfslock(fl, released_lock, left_lock, right_lock) 981 const struct file_lock *fl; 982 struct file_lock **released_lock; 983 struct file_lock **left_lock; 984 struct file_lock **right_lock; 985 { 986 struct file_lock *mfl; /* Matching file lock */ 987 enum nfslock_status retval; 988 enum split_status spstatus; 989 990 debuglog("Entering unlock_nfslock\n"); 991 992 *released_lock = NULL; 993 *left_lock = NULL; 994 *right_lock = NULL; 995 996 retval = NFS_DENIED_NOLOCK; 997 998 debuglog("Attempting to match lock...\n"); 999 mfl = get_lock_matching_unlock(fl); 1000 1001 if (mfl != NULL) { 1002 debuglog("Unlock matched. Querying for split\n"); 1003 1004 spstatus = split_nfslock(mfl, fl, left_lock, right_lock); 1005 1006 debuglog("Split returned %d %p %p %p %p\n",spstatus,mfl,fl,*left_lock,*right_lock); 1007 debuglog("********Split dumps********"); 1008 dump_filelock(mfl); 1009 dump_filelock(fl); 1010 dump_filelock(*left_lock); 1011 dump_filelock(*right_lock); 1012 debuglog("********End Split dumps********"); 1013 1014 if (spstatus == SPL_RESERR) { 1015 if (*left_lock != NULL) { 1016 deallocate_file_lock(*left_lock); 1017 *left_lock = NULL; 1018 } 1019 1020 if (*right_lock != NULL) { 1021 deallocate_file_lock(*right_lock); 1022 *right_lock = NULL; 1023 } 1024 1025 return NFS_RESERR; 1026 } 1027 1028 /* Insert new locks from split if required */ 1029 if (*left_lock != NULL) { 1030 debuglog("Split left activated\n"); 1031 LIST_INSERT_HEAD(&nfslocklist_head, *left_lock, nfslocklist); 1032 } 1033 1034 if (*right_lock != NULL) { 1035 debuglog("Split right activated\n"); 1036 LIST_INSERT_HEAD(&nfslocklist_head, *right_lock, nfslocklist); 1037 } 1038 1039 /* Unlock the lock since it matches identity */ 1040 LIST_REMOVE(mfl, nfslocklist); 1041 *released_lock = mfl; 1042 retval = NFS_GRANTED; 1043 } 1044 1045 debuglog("Exiting unlock_nfslock\n"); 1046 1047 return retval; 1048 } 1049 1050 /* 1051 * Below here are the routines for manipulating the file lock directly 1052 * on the disk hardware itself 1053 */ 1054 enum hwlock_status 1055 lock_hwlock(struct file_lock *fl) 1056 { 1057 struct monfile *imf,*nmf; 1058 int lflags, flerror; 1059 1060 /* Scan to see if filehandle already present */ 1061 LIST_FOREACH(imf, &monfilelist_head, monfilelist) { 1062 if (bcmp(&fl->filehandle, &imf->filehandle, 1063 sizeof(fl->filehandle)) == 0) { 1064 /* imf is the correct filehandle */ 1065 break; 1066 } 1067 } 1068 1069 /* 1070 * Filehandle already exists (we control the file) 1071 * *AND* NFS has already cleared the lock for availability 1072 * Grant it and bump the refcount. 1073 */ 1074 if (imf != NULL) { 1075 ++(imf->refcount); 1076 return (HW_GRANTED); 1077 } 1078 1079 /* No filehandle found, create and go */ 1080 nmf = malloc(sizeof(struct monfile)); 1081 if (nmf == NULL) { 1082 debuglog("hwlock resource allocation failure\n"); 1083 return (HW_RESERR); 1084 } 1085 1086 /* XXX: Is O_RDWR always the correct mode? */ 1087 nmf->fd = fhopen(&fl->filehandle, O_RDWR); 1088 if (nmf->fd < 0) { 1089 debuglog("fhopen failed (from %16s): %32s\n", 1090 fl->client_name, strerror(errno)); 1091 free(nmf); 1092 switch (errno) { 1093 case ESTALE: 1094 return (HW_STALEFH); 1095 case EROFS: 1096 return (HW_READONLY); 1097 default: 1098 return (HW_RESERR); 1099 } 1100 } 1101 1102 /* File opened correctly, fill the monitor struct */ 1103 bcopy(&fl->filehandle, &nmf->filehandle, sizeof(fl->filehandle)); 1104 nmf->refcount = 1; 1105 nmf->exclusive = fl->client.exclusive; 1106 1107 lflags = (nmf->exclusive == 1) ? 1108 (LOCK_EX | LOCK_NB) : (LOCK_SH | LOCK_NB); 1109 1110 flerror = flock(nmf->fd, lflags); 1111 1112 if (flerror != 0) { 1113 debuglog("flock failed (from %16s): %32s\n", 1114 fl->client_name, strerror(errno)); 1115 close(nmf->fd); 1116 free(nmf); 1117 switch (errno) { 1118 case EAGAIN: 1119 return (HW_DENIED); 1120 case ESTALE: 1121 return (HW_STALEFH); 1122 case EROFS: 1123 return (HW_READONLY); 1124 default: 1125 return (HW_RESERR); 1126 break; 1127 } 1128 } 1129 1130 /* File opened and locked */ 1131 LIST_INSERT_HEAD(&monfilelist_head, nmf, monfilelist); 1132 1133 debuglog("flock succeeded (from %16s)\n", fl->client_name); 1134 return (HW_GRANTED); 1135 } 1136 1137 enum hwlock_status 1138 unlock_hwlock(const struct file_lock *fl) 1139 { 1140 struct monfile *imf; 1141 1142 debuglog("Entering unlock_hwlock\n"); 1143 debuglog("Entering loop interation\n"); 1144 1145 /* Scan to see if filehandle already present */ 1146 LIST_FOREACH(imf, &monfilelist_head, monfilelist) { 1147 if (bcmp(&fl->filehandle, &imf->filehandle, 1148 sizeof(fl->filehandle)) == 0) { 1149 /* imf is the correct filehandle */ 1150 break; 1151 } 1152 } 1153 1154 debuglog("Completed iteration. Proceeding\n"); 1155 1156 if (imf == NULL) { 1157 /* No lock found */ 1158 debuglog("Exiting unlock_hwlock (HW_DENIED_NOLOCK)\n"); 1159 return (HW_DENIED_NOLOCK); 1160 } 1161 1162 /* Lock found */ 1163 --imf->refcount; 1164 1165 if (imf->refcount < 0) { 1166 debuglog("Negative hardware reference count\n"); 1167 } 1168 1169 if (imf->refcount <= 0) { 1170 close(imf->fd); 1171 LIST_REMOVE(imf, monfilelist); 1172 free(imf); 1173 } 1174 debuglog("Exiting unlock_hwlock (HW_GRANTED)\n"); 1175 return (HW_GRANTED); 1176 } 1177 1178 enum hwlock_status 1179 test_hwlock(fl, conflicting_fl) 1180 const struct file_lock *fl __unused; 1181 struct file_lock **conflicting_fl __unused; 1182 { 1183 1184 /* 1185 * XXX: lock tests on hardware are not required until 1186 * true partial file testing is done on the underlying file 1187 */ 1188 return (HW_RESERR); 1189 } 1190 1191 1192 1193 /* 1194 * Below here are routines for manipulating blocked lock requests 1195 * They should only be called from the XXX_partialfilelock routines 1196 * if at all possible 1197 */ 1198 1199 int 1200 duplicate_block(struct file_lock *fl) 1201 { 1202 struct file_lock *ifl; 1203 int retval = 0; 1204 1205 debuglog("Entering duplicate_block"); 1206 1207 /* 1208 * Is this lock request already on the blocking list? 1209 * Consider it a dupe if the file handles, offset, length, 1210 * exclusivity and client match. 1211 */ 1212 LIST_FOREACH(ifl, &blockedlocklist_head, nfslocklist) { 1213 if (!bcmp(&fl->filehandle, &ifl->filehandle, 1214 sizeof(fhandle_t)) && 1215 fl->client.exclusive == ifl->client.exclusive && 1216 fl->client.l_offset == ifl->client.l_offset && 1217 fl->client.l_len == ifl->client.l_len && 1218 same_filelock_identity(fl, ifl)) { 1219 retval = 1; 1220 break; 1221 } 1222 } 1223 1224 debuglog("Exiting duplicate_block: %s\n", retval ? "already blocked" 1225 : "not already blocked"); 1226 return retval; 1227 } 1228 1229 void 1230 add_blockingfilelock(struct file_lock *fl) 1231 { 1232 debuglog("Entering add_blockingfilelock\n"); 1233 1234 /* 1235 * A blocking lock request _should_ never be duplicated as a client 1236 * that is already blocked shouldn't be able to request another 1237 * lock. Alas, there are some buggy clients that do request the same 1238 * lock repeatedly. Make sure only unique locks are on the blocked 1239 * lock list. 1240 */ 1241 if (duplicate_block(fl)) { 1242 debuglog("Exiting add_blockingfilelock: already blocked\n"); 1243 return; 1244 } 1245 1246 /* 1247 * Clear the blocking flag so that it can be reused without 1248 * adding it to the blocking queue a second time 1249 */ 1250 1251 fl->blocking = 0; 1252 LIST_INSERT_HEAD(&blockedlocklist_head, fl, nfslocklist); 1253 1254 debuglog("Exiting add_blockingfilelock: added blocked lock\n"); 1255 } 1256 1257 void 1258 remove_blockingfilelock(struct file_lock *fl) 1259 { 1260 1261 debuglog("Entering remove_blockingfilelock\n"); 1262 1263 LIST_REMOVE(fl, nfslocklist); 1264 1265 debuglog("Exiting remove_blockingfilelock\n"); 1266 } 1267 1268 void 1269 clear_blockingfilelock(const char *hostname) 1270 { 1271 struct file_lock *ifl,*nfl; 1272 1273 /* 1274 * Normally, LIST_FOREACH is called for, but since 1275 * the current element *is* the iterator, deleting it 1276 * would mess up the iteration. Thus, a next element 1277 * must be used explicitly 1278 */ 1279 1280 ifl = LIST_FIRST(&blockedlocklist_head); 1281 1282 while (ifl != NULL) { 1283 nfl = LIST_NEXT(ifl, nfslocklist); 1284 1285 if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) { 1286 remove_blockingfilelock(ifl); 1287 deallocate_file_lock(ifl); 1288 } 1289 1290 ifl = nfl; 1291 } 1292 } 1293 1294 void 1295 retry_blockingfilelocklist(void) 1296 { 1297 /* Retry all locks in the blocked list */ 1298 struct file_lock *ifl, *nfl; /* Iterator */ 1299 enum partialfilelock_status pflstatus; 1300 1301 debuglog("Entering retry_blockingfilelocklist\n"); 1302 1303 LIST_FOREACH_SAFE(ifl, &blockedlocklist_head, nfslocklist, nfl) { 1304 debuglog("Iterator choice %p\n",ifl); 1305 debuglog("Next iterator choice %p\n",nfl); 1306 1307 /* 1308 * SUBTLE BUG: The file_lock must be removed from the 1309 * old list so that it's list pointers get disconnected 1310 * before being allowed to participate in the new list 1311 * which will automatically add it in if necessary. 1312 */ 1313 1314 LIST_REMOVE(ifl, nfslocklist); 1315 pflstatus = lock_partialfilelock(ifl); 1316 1317 if (pflstatus == PFL_GRANTED || pflstatus == PFL_GRANTED_DUPLICATE) { 1318 debuglog("Granted blocked lock\n"); 1319 /* lock granted and is now being used */ 1320 send_granted(ifl,0); 1321 } else { 1322 /* Reinsert lock back into blocked list */ 1323 debuglog("Replacing blocked lock\n"); 1324 LIST_INSERT_HEAD(&blockedlocklist_head, ifl, nfslocklist); 1325 } 1326 } 1327 1328 debuglog("Exiting retry_blockingfilelocklist\n"); 1329 } 1330 1331 /* 1332 * Below here are routines associated with manipulating all 1333 * aspects of the partial file locking system (list, hardware, etc.) 1334 */ 1335 1336 /* 1337 * Please note that lock monitoring must be done at this level which 1338 * keeps track of *individual* lock requests on lock and unlock 1339 * 1340 * XXX: Split unlocking is going to make the unlock code miserable 1341 */ 1342 1343 /* 1344 * lock_partialfilelock: 1345 * 1346 * Argument fl gets modified as its list housekeeping entries get modified 1347 * upon insertion into the NFS lock list 1348 * 1349 * This routine makes several assumptions: 1350 * 1) It (will) pass locks through to flock to lock the entire underlying file 1351 * and then parcel out NFS locks if it gets control of the file. 1352 * This matches the old rpc.lockd file semantics (except where it 1353 * is now more correct). It is the safe solution, but will cause 1354 * overly restrictive blocking if someone is trying to use the 1355 * underlying files without using NFS. This appears to be an 1356 * acceptable tradeoff since most people use standalone NFS servers. 1357 * XXX: The right solution is probably kevent combined with fcntl 1358 * 1359 * 2) Nothing modifies the lock lists between testing and granting 1360 * I have no idea whether this is a useful assumption or not 1361 */ 1362 1363 enum partialfilelock_status 1364 lock_partialfilelock(struct file_lock *fl) 1365 { 1366 enum partialfilelock_status retval; 1367 enum nfslock_status lnlstatus; 1368 enum hwlock_status hwstatus; 1369 1370 debuglog("Entering lock_partialfilelock\n"); 1371 1372 retval = PFL_DENIED; 1373 1374 /* 1375 * Execute the NFS lock first, if possible, as it is significantly 1376 * easier and less expensive to undo than the filesystem lock 1377 */ 1378 1379 lnlstatus = lock_nfslock(fl); 1380 1381 switch (lnlstatus) { 1382 case NFS_GRANTED: 1383 case NFS_GRANTED_DUPLICATE: 1384 /* 1385 * At this point, the NFS lock is allocated and active. 1386 * Remember to clean it up if the hardware lock fails 1387 */ 1388 hwstatus = lock_hwlock(fl); 1389 1390 switch (hwstatus) { 1391 case HW_GRANTED: 1392 case HW_GRANTED_DUPLICATE: 1393 debuglog("HW GRANTED\n"); 1394 /* 1395 * XXX: Fixme: Check hwstatus for duplicate when 1396 * true partial file locking and accounting is 1397 * done on the hardware. 1398 */ 1399 if (lnlstatus == NFS_GRANTED_DUPLICATE) { 1400 retval = PFL_GRANTED_DUPLICATE; 1401 } else { 1402 retval = PFL_GRANTED; 1403 } 1404 monitor_lock_host(fl->client_name); 1405 break; 1406 case HW_RESERR: 1407 debuglog("HW RESERR\n"); 1408 retval = PFL_HWRESERR; 1409 break; 1410 case HW_DENIED: 1411 debuglog("HW DENIED\n"); 1412 retval = PFL_HWDENIED; 1413 break; 1414 default: 1415 debuglog("Unmatched hwstatus %d\n",hwstatus); 1416 break; 1417 } 1418 1419 if (retval != PFL_GRANTED && 1420 retval != PFL_GRANTED_DUPLICATE) { 1421 /* Clean up the NFS lock */ 1422 debuglog("Deleting trial NFS lock\n"); 1423 delete_nfslock(fl); 1424 } 1425 break; 1426 case NFS_DENIED: 1427 retval = PFL_NFSDENIED; 1428 break; 1429 case NFS_RESERR: 1430 retval = PFL_NFSRESERR; 1431 break; 1432 default: 1433 debuglog("Unmatched lnlstatus %d\n"); 1434 retval = PFL_NFSDENIED_NOLOCK; 1435 break; 1436 } 1437 1438 /* 1439 * By the time fl reaches here, it is completely free again on 1440 * failure. The NFS lock done before attempting the 1441 * hardware lock has been backed out 1442 */ 1443 1444 if (retval == PFL_NFSDENIED || retval == PFL_HWDENIED) { 1445 /* Once last chance to check the lock */ 1446 if (fl->blocking == 1) { 1447 if (retval == PFL_NFSDENIED) { 1448 /* Queue the lock */ 1449 debuglog("BLOCKING LOCK RECEIVED\n"); 1450 retval = PFL_NFSBLOCKED; 1451 add_blockingfilelock(fl); 1452 dump_filelock(fl); 1453 } else { 1454 /* retval is okay as PFL_HWDENIED */ 1455 debuglog("BLOCKING LOCK DENIED IN HARDWARE\n"); 1456 dump_filelock(fl); 1457 } 1458 } else { 1459 /* Leave retval alone, it's already correct */ 1460 debuglog("Lock denied. Non-blocking failure\n"); 1461 dump_filelock(fl); 1462 } 1463 } 1464 1465 debuglog("Exiting lock_partialfilelock\n"); 1466 1467 return retval; 1468 } 1469 1470 /* 1471 * unlock_partialfilelock: 1472 * 1473 * Given a file_lock, unlock all locks which match. 1474 * 1475 * Note that a given lock might have to unlock ITSELF! See 1476 * clear_partialfilelock for example. 1477 */ 1478 1479 enum partialfilelock_status 1480 unlock_partialfilelock(const struct file_lock *fl) 1481 { 1482 struct file_lock *lfl,*rfl,*releasedfl,*selffl; 1483 enum partialfilelock_status retval; 1484 enum nfslock_status unlstatus; 1485 enum hwlock_status unlhwstatus, lhwstatus; 1486 1487 debuglog("Entering unlock_partialfilelock\n"); 1488 1489 selffl = NULL; 1490 lfl = NULL; 1491 rfl = NULL; 1492 releasedfl = NULL; 1493 retval = PFL_DENIED; 1494 1495 /* 1496 * There are significant overlap and atomicity issues 1497 * with partially releasing a lock. For example, releasing 1498 * part of an NFS shared lock does *not* always release the 1499 * corresponding part of the file since there is only one 1500 * rpc.lockd UID but multiple users could be requesting it 1501 * from NFS. Also, an unlock request should never allow 1502 * another process to gain a lock on the remaining parts. 1503 * ie. Always apply the new locks before releasing the 1504 * old one 1505 */ 1506 1507 /* 1508 * Loop is required since multiple little locks 1509 * can be allocated and then deallocated with one 1510 * big unlock. 1511 * 1512 * The loop is required to be here so that the nfs & 1513 * hw subsystems do not need to communicate with one 1514 * one another 1515 */ 1516 1517 do { 1518 debuglog("Value of releasedfl: %p\n",releasedfl); 1519 /* lfl&rfl are created *AND* placed into the NFS lock list if required */ 1520 unlstatus = unlock_nfslock(fl, &releasedfl, &lfl, &rfl); 1521 debuglog("Value of releasedfl: %p\n",releasedfl); 1522 1523 1524 /* XXX: This is grungy. It should be refactored to be cleaner */ 1525 if (lfl != NULL) { 1526 lhwstatus = lock_hwlock(lfl); 1527 if (lhwstatus != HW_GRANTED && 1528 lhwstatus != HW_GRANTED_DUPLICATE) { 1529 debuglog("HW duplicate lock failure for left split\n"); 1530 } 1531 monitor_lock_host(lfl->client_name); 1532 } 1533 1534 if (rfl != NULL) { 1535 lhwstatus = lock_hwlock(rfl); 1536 if (lhwstatus != HW_GRANTED && 1537 lhwstatus != HW_GRANTED_DUPLICATE) { 1538 debuglog("HW duplicate lock failure for right split\n"); 1539 } 1540 monitor_lock_host(rfl->client_name); 1541 } 1542 1543 switch (unlstatus) { 1544 case NFS_GRANTED: 1545 /* Attempt to unlock on the hardware */ 1546 debuglog("NFS unlock granted. Attempting hardware unlock\n"); 1547 1548 /* This call *MUST NOT* unlock the two newly allocated locks */ 1549 unlhwstatus = unlock_hwlock(fl); 1550 debuglog("HW unlock returned with code %d\n",unlhwstatus); 1551 1552 switch (unlhwstatus) { 1553 case HW_GRANTED: 1554 debuglog("HW unlock granted\n"); 1555 unmonitor_lock_host(releasedfl->client_name); 1556 retval = PFL_GRANTED; 1557 break; 1558 case HW_DENIED_NOLOCK: 1559 /* Huh?!?! This shouldn't happen */ 1560 debuglog("HW unlock denied no lock\n"); 1561 retval = PFL_HWRESERR; 1562 /* Break out of do-while */ 1563 unlstatus = NFS_RESERR; 1564 break; 1565 default: 1566 debuglog("HW unlock failed\n"); 1567 retval = PFL_HWRESERR; 1568 /* Break out of do-while */ 1569 unlstatus = NFS_RESERR; 1570 break; 1571 } 1572 1573 debuglog("Exiting with status retval: %d\n",retval); 1574 1575 retry_blockingfilelocklist(); 1576 break; 1577 case NFS_DENIED_NOLOCK: 1578 retval = PFL_GRANTED; 1579 debuglog("All locks cleaned out\n"); 1580 break; 1581 default: 1582 retval = PFL_NFSRESERR; 1583 debuglog("NFS unlock failure\n"); 1584 dump_filelock(fl); 1585 break; 1586 } 1587 1588 if (releasedfl != NULL) { 1589 if (fl == releasedfl) { 1590 /* 1591 * XXX: YECHHH!!! Attempt to unlock self succeeded 1592 * but we can't deallocate the space yet. This is what 1593 * happens when you don't write malloc and free together 1594 */ 1595 debuglog("Attempt to unlock self\n"); 1596 selffl = releasedfl; 1597 } else { 1598 /* 1599 * XXX: this deallocation *still* needs to migrate closer 1600 * to the allocation code way up in get_lock or the allocation 1601 * code needs to migrate down (violation of "When you write 1602 * malloc you must write free") 1603 */ 1604 1605 deallocate_file_lock(releasedfl); 1606 releasedfl = NULL; 1607 } 1608 } 1609 1610 } while (unlstatus == NFS_GRANTED); 1611 1612 if (selffl != NULL) { 1613 /* 1614 * This statement wipes out the incoming file lock (fl) 1615 * in spite of the fact that it is declared const 1616 */ 1617 debuglog("WARNING! Destroying incoming lock pointer\n"); 1618 deallocate_file_lock(selffl); 1619 } 1620 1621 debuglog("Exiting unlock_partialfilelock\n"); 1622 1623 return retval; 1624 } 1625 1626 /* 1627 * clear_partialfilelock 1628 * 1629 * Normally called in response to statd state number change. 1630 * Wipe out all locks held by a host. As a bonus, the act of 1631 * doing so should automatically clear their statd entries and 1632 * unmonitor the host. 1633 */ 1634 1635 void 1636 clear_partialfilelock(const char *hostname) 1637 { 1638 struct file_lock *ifl, *nfl; 1639 1640 /* Clear blocking file lock list */ 1641 clear_blockingfilelock(hostname); 1642 1643 /* do all required unlocks */ 1644 /* Note that unlock can smash the current pointer to a lock */ 1645 1646 /* 1647 * Normally, LIST_FOREACH is called for, but since 1648 * the current element *is* the iterator, deleting it 1649 * would mess up the iteration. Thus, a next element 1650 * must be used explicitly 1651 */ 1652 1653 ifl = LIST_FIRST(&nfslocklist_head); 1654 1655 while (ifl != NULL) { 1656 nfl = LIST_NEXT(ifl, nfslocklist); 1657 1658 if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) { 1659 /* Unlock destroys ifl out from underneath */ 1660 unlock_partialfilelock(ifl); 1661 /* ifl is NO LONGER VALID AT THIS POINT */ 1662 } 1663 ifl = nfl; 1664 } 1665 } 1666 1667 /* 1668 * test_partialfilelock: 1669 */ 1670 enum partialfilelock_status 1671 test_partialfilelock(const struct file_lock *fl, 1672 struct file_lock **conflicting_fl) 1673 { 1674 enum partialfilelock_status retval; 1675 enum nfslock_status teststatus; 1676 1677 debuglog("Entering testpartialfilelock...\n"); 1678 1679 retval = PFL_DENIED; 1680 1681 teststatus = test_nfslock(fl, conflicting_fl); 1682 debuglog("test_partialfilelock: teststatus %d\n",teststatus); 1683 1684 if (teststatus == NFS_GRANTED || teststatus == NFS_GRANTED_DUPLICATE) { 1685 /* XXX: Add the underlying filesystem locking code */ 1686 retval = (teststatus == NFS_GRANTED) ? 1687 PFL_GRANTED : PFL_GRANTED_DUPLICATE; 1688 debuglog("Dumping locks...\n"); 1689 dump_filelock(fl); 1690 dump_filelock(*conflicting_fl); 1691 debuglog("Done dumping locks...\n"); 1692 } else { 1693 retval = PFL_NFSDENIED; 1694 debuglog("NFS test denied.\n"); 1695 dump_filelock(fl); 1696 debuglog("Conflicting.\n"); 1697 dump_filelock(*conflicting_fl); 1698 } 1699 1700 debuglog("Exiting testpartialfilelock...\n"); 1701 1702 return retval; 1703 } 1704 1705 /* 1706 * Below here are routines associated with translating the partial file locking 1707 * codes into useful codes to send back to the NFS RPC messaging system 1708 */ 1709 1710 /* 1711 * These routines translate the (relatively) useful return codes back onto 1712 * the few return codes which the nlm subsystems wishes to trasmit 1713 */ 1714 1715 enum nlm_stats 1716 do_test(struct file_lock *fl, struct file_lock **conflicting_fl) 1717 { 1718 enum partialfilelock_status pfsret; 1719 enum nlm_stats retval; 1720 1721 debuglog("Entering do_test...\n"); 1722 1723 pfsret = test_partialfilelock(fl,conflicting_fl); 1724 1725 switch (pfsret) { 1726 case PFL_GRANTED: 1727 debuglog("PFL test lock granted\n"); 1728 dump_filelock(fl); 1729 dump_filelock(*conflicting_fl); 1730 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1731 break; 1732 case PFL_GRANTED_DUPLICATE: 1733 debuglog("PFL test lock granted--duplicate id detected\n"); 1734 dump_filelock(fl); 1735 dump_filelock(*conflicting_fl); 1736 debuglog("Clearing conflicting_fl for call semantics\n"); 1737 *conflicting_fl = NULL; 1738 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1739 break; 1740 case PFL_NFSDENIED: 1741 case PFL_HWDENIED: 1742 debuglog("PFL test lock denied\n"); 1743 dump_filelock(fl); 1744 dump_filelock(*conflicting_fl); 1745 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied; 1746 break; 1747 case PFL_NFSRESERR: 1748 case PFL_HWRESERR: 1749 debuglog("PFL test lock resource fail\n"); 1750 dump_filelock(fl); 1751 dump_filelock(*conflicting_fl); 1752 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks; 1753 break; 1754 default: 1755 debuglog("PFL test lock *FAILED*\n"); 1756 dump_filelock(fl); 1757 dump_filelock(*conflicting_fl); 1758 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied; 1759 break; 1760 } 1761 1762 debuglog("Exiting do_test...\n"); 1763 1764 return retval; 1765 } 1766 1767 /* 1768 * do_lock: Try to acquire a lock 1769 * 1770 * This routine makes a distinction between NLM versions. I am pretty 1771 * convinced that this should be abstracted out and bounced up a level 1772 */ 1773 1774 enum nlm_stats 1775 do_lock(struct file_lock *fl) 1776 { 1777 enum partialfilelock_status pfsret; 1778 enum nlm_stats retval; 1779 1780 debuglog("Entering do_lock...\n"); 1781 1782 pfsret = lock_partialfilelock(fl); 1783 1784 switch (pfsret) { 1785 case PFL_GRANTED: 1786 debuglog("PFL lock granted"); 1787 dump_filelock(fl); 1788 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1789 break; 1790 case PFL_GRANTED_DUPLICATE: 1791 debuglog("PFL lock granted--duplicate id detected"); 1792 dump_filelock(fl); 1793 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1794 break; 1795 case PFL_NFSDENIED: 1796 case PFL_HWDENIED: 1797 debuglog("PFL_NFS lock denied"); 1798 dump_filelock(fl); 1799 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied; 1800 break; 1801 case PFL_NFSBLOCKED: 1802 case PFL_HWBLOCKED: 1803 debuglog("PFL_NFS blocking lock denied. Queued.\n"); 1804 dump_filelock(fl); 1805 retval = (fl->flags & LOCK_V4) ? nlm4_blocked : nlm_blocked; 1806 break; 1807 case PFL_NFSRESERR: 1808 case PFL_HWRESERR: 1809 debuglog("PFL lock resource alocation fail\n"); 1810 dump_filelock(fl); 1811 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks; 1812 break; 1813 default: 1814 debuglog("PFL lock *FAILED*"); 1815 dump_filelock(fl); 1816 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied; 1817 break; 1818 } 1819 1820 debuglog("Exiting do_lock...\n"); 1821 1822 return retval; 1823 } 1824 1825 enum nlm_stats 1826 do_unlock(struct file_lock *fl) 1827 { 1828 enum partialfilelock_status pfsret; 1829 enum nlm_stats retval; 1830 1831 debuglog("Entering do_unlock...\n"); 1832 pfsret = unlock_partialfilelock(fl); 1833 1834 switch (pfsret) { 1835 case PFL_GRANTED: 1836 debuglog("PFL unlock granted"); 1837 dump_filelock(fl); 1838 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1839 break; 1840 case PFL_NFSDENIED: 1841 case PFL_HWDENIED: 1842 debuglog("PFL_NFS unlock denied"); 1843 dump_filelock(fl); 1844 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied; 1845 break; 1846 case PFL_NFSDENIED_NOLOCK: 1847 case PFL_HWDENIED_NOLOCK: 1848 debuglog("PFL_NFS no lock found\n"); 1849 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1850 break; 1851 case PFL_NFSRESERR: 1852 case PFL_HWRESERR: 1853 debuglog("PFL unlock resource failure"); 1854 dump_filelock(fl); 1855 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks; 1856 break; 1857 default: 1858 debuglog("PFL unlock *FAILED*"); 1859 dump_filelock(fl); 1860 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied; 1861 break; 1862 } 1863 1864 debuglog("Exiting do_unlock...\n"); 1865 1866 return retval; 1867 } 1868 1869 /* 1870 * do_clear 1871 * 1872 * This routine is non-existent because it doesn't have a return code. 1873 * It is here for completeness in case someone *does* need to do return 1874 * codes later. A decent compiler should optimize this away. 1875 */ 1876 1877 void 1878 do_clear(const char *hostname) 1879 { 1880 1881 clear_partialfilelock(hostname); 1882 } 1883 1884 /* 1885 * The following routines are all called from the code which the 1886 * RPC layer invokes 1887 */ 1888 1889 /* 1890 * testlock(): inform the caller if the requested lock would be granted 1891 * 1892 * returns NULL if lock would granted 1893 * returns pointer to a conflicting nlm4_holder if not 1894 */ 1895 1896 struct nlm4_holder * 1897 testlock(struct nlm4_lock *lock, bool_t exclusive, int flags __unused) 1898 { 1899 struct file_lock test_fl, *conflicting_fl; 1900 1901 bzero(&test_fl, sizeof(test_fl)); 1902 1903 bcopy(lock->fh.n_bytes, &(test_fl.filehandle), sizeof(fhandle_t)); 1904 copy_nlm4_lock_to_nlm4_holder(lock, exclusive, &test_fl.client); 1905 1906 siglock(); 1907 do_test(&test_fl, &conflicting_fl); 1908 1909 if (conflicting_fl == NULL) { 1910 debuglog("No conflicting lock found\n"); 1911 sigunlock(); 1912 return NULL; 1913 } else { 1914 debuglog("Found conflicting lock\n"); 1915 dump_filelock(conflicting_fl); 1916 sigunlock(); 1917 return (&conflicting_fl->client); 1918 } 1919 } 1920 1921 /* 1922 * getlock: try to acquire the lock. 1923 * If file is already locked and we can sleep, put the lock in the list with 1924 * status LKST_WAITING; it'll be processed later. 1925 * Otherwise try to lock. If we're allowed to block, fork a child which 1926 * will do the blocking lock. 1927 */ 1928 1929 enum nlm_stats 1930 getlock(nlm4_lockargs *lckarg, struct svc_req *rqstp, const int flags) 1931 { 1932 struct file_lock *newfl; 1933 enum nlm_stats retval; 1934 1935 debuglog("Entering getlock...\n"); 1936 1937 if (grace_expired == 0 && lckarg->reclaim == 0) 1938 return (flags & LOCK_V4) ? 1939 nlm4_denied_grace_period : nlm_denied_grace_period; 1940 1941 /* allocate new file_lock for this request */ 1942 newfl = allocate_file_lock(&lckarg->alock.oh, &lckarg->cookie, 1943 (struct sockaddr *)svc_getrpccaller(rqstp->rq_xprt)->buf, lckarg->alock.caller_name); 1944 if (newfl == NULL) { 1945 syslog(LOG_NOTICE, "lock allocate failed: %s", strerror(errno)); 1946 /* failed */ 1947 return (flags & LOCK_V4) ? 1948 nlm4_denied_nolocks : nlm_denied_nolocks; 1949 } 1950 1951 if (lckarg->alock.fh.n_len != sizeof(fhandle_t)) { 1952 debuglog("received fhandle size %d, local size %d", 1953 lckarg->alock.fh.n_len, (int)sizeof(fhandle_t)); 1954 } 1955 1956 fill_file_lock(newfl, (fhandle_t *)lckarg->alock.fh.n_bytes, 1957 lckarg->exclusive, lckarg->alock.svid, lckarg->alock.l_offset, 1958 lckarg->alock.l_len, 1959 lckarg->state, 0, flags, lckarg->block); 1960 1961 /* 1962 * newfl is now fully constructed and deallocate_file_lock 1963 * can now be used to delete it 1964 */ 1965 1966 siglock(); 1967 debuglog("Pointer to new lock is %p\n",newfl); 1968 1969 retval = do_lock(newfl); 1970 1971 debuglog("Pointer to new lock is %p\n",newfl); 1972 sigunlock(); 1973 1974 switch (retval) 1975 { 1976 case nlm4_granted: 1977 /* case nlm_granted: is the same as nlm4_granted */ 1978 /* do_mon(lckarg->alock.caller_name); */ 1979 break; 1980 case nlm4_blocked: 1981 /* case nlm_blocked: is the same as nlm4_blocked */ 1982 /* do_mon(lckarg->alock.caller_name); */ 1983 break; 1984 default: 1985 deallocate_file_lock(newfl); 1986 break; 1987 } 1988 1989 debuglog("Exiting getlock...\n"); 1990 1991 return retval; 1992 } 1993 1994 1995 /* unlock a filehandle */ 1996 enum nlm_stats 1997 unlock(nlm4_lock *lock, const int flags __unused) 1998 { 1999 struct file_lock fl; 2000 enum nlm_stats err; 2001 2002 siglock(); 2003 2004 debuglog("Entering unlock...\n"); 2005 2006 bzero(&fl,sizeof(struct file_lock)); 2007 bcopy(lock->fh.n_bytes, &fl.filehandle, sizeof(fhandle_t)); 2008 2009 copy_nlm4_lock_to_nlm4_holder(lock, 0, &fl.client); 2010 2011 err = do_unlock(&fl); 2012 2013 sigunlock(); 2014 2015 debuglog("Exiting unlock...\n"); 2016 2017 return err; 2018 } 2019 2020 /* 2021 * XXX: The following monitor/unmonitor routines 2022 * have not been extensively tested (ie. no regression 2023 * script exists like for the locking sections 2024 */ 2025 2026 /* 2027 * monitor_lock_host: monitor lock hosts locally with a ref count and 2028 * inform statd 2029 */ 2030 void 2031 monitor_lock_host(const char *hostname) 2032 { 2033 struct host *ihp, *nhp; 2034 struct mon smon; 2035 struct sm_stat_res sres; 2036 int rpcret, statflag; 2037 size_t n; 2038 2039 rpcret = 0; 2040 statflag = 0; 2041 2042 LIST_FOREACH(ihp, &hostlst_head, hostlst) { 2043 if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) { 2044 /* Host is already monitored, bump refcount */ 2045 ++ihp->refcnt; 2046 /* Host should only be in the monitor list once */ 2047 return; 2048 } 2049 } 2050 2051 /* Host is not yet monitored, add it */ 2052 n = strnlen(hostname, SM_MAXSTRLEN); 2053 if (n == SM_MAXSTRLEN) { 2054 return; 2055 } 2056 nhp = malloc(sizeof(*nhp) - sizeof(nhp->name) + n + 1); 2057 if (nhp == NULL) { 2058 debuglog("Unable to allocate entry for statd mon\n"); 2059 return; 2060 } 2061 2062 /* Allocated new host entry, now fill the fields */ 2063 memcpy(nhp->name, hostname, n); 2064 nhp->name[n] = 0; 2065 nhp->refcnt = 1; 2066 debuglog("Locally Monitoring host %16s\n",hostname); 2067 2068 debuglog("Attempting to tell statd\n"); 2069 2070 bzero(&smon,sizeof(smon)); 2071 2072 smon.mon_id.mon_name = nhp->name; 2073 smon.mon_id.my_id.my_name = "localhost"; 2074 smon.mon_id.my_id.my_prog = NLM_PROG; 2075 smon.mon_id.my_id.my_vers = NLM_SM; 2076 smon.mon_id.my_id.my_proc = NLM_SM_NOTIFY; 2077 2078 rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_MON, 2079 (xdrproc_t)xdr_mon, &smon, 2080 (xdrproc_t)xdr_sm_stat_res, &sres); 2081 2082 if (rpcret == 0) { 2083 if (sres.res_stat == stat_fail) { 2084 debuglog("Statd call failed\n"); 2085 statflag = 0; 2086 } else { 2087 statflag = 1; 2088 } 2089 } else { 2090 debuglog("Rpc call to statd failed with return value: %d\n", 2091 rpcret); 2092 statflag = 0; 2093 } 2094 2095 if (statflag == 1) { 2096 LIST_INSERT_HEAD(&hostlst_head, nhp, hostlst); 2097 } else { 2098 free(nhp); 2099 } 2100 2101 } 2102 2103 /* 2104 * unmonitor_lock_host: clear monitor ref counts and inform statd when gone 2105 */ 2106 void 2107 unmonitor_lock_host(char *hostname) 2108 { 2109 struct host *ihp; 2110 struct mon_id smon_id; 2111 struct sm_stat smstat; 2112 int rpcret; 2113 2114 rpcret = 0; 2115 2116 for( ihp=LIST_FIRST(&hostlst_head); ihp != NULL; 2117 ihp=LIST_NEXT(ihp, hostlst)) { 2118 if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) { 2119 /* Host is monitored, bump refcount */ 2120 --ihp->refcnt; 2121 /* Host should only be in the monitor list once */ 2122 break; 2123 } 2124 } 2125 2126 if (ihp == NULL) { 2127 debuglog("Could not find host %16s in mon list\n", hostname); 2128 return; 2129 } 2130 2131 if (ihp->refcnt > 0) 2132 return; 2133 2134 if (ihp->refcnt < 0) { 2135 debuglog("Negative refcount!: %d\n", 2136 ihp->refcnt); 2137 } 2138 2139 debuglog("Attempting to unmonitor host %16s\n", hostname); 2140 2141 bzero(&smon_id,sizeof(smon_id)); 2142 2143 smon_id.mon_name = hostname; 2144 smon_id.my_id.my_name = "localhost"; 2145 smon_id.my_id.my_prog = NLM_PROG; 2146 smon_id.my_id.my_vers = NLM_SM; 2147 smon_id.my_id.my_proc = NLM_SM_NOTIFY; 2148 2149 rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_UNMON, 2150 (xdrproc_t)xdr_mon_id, &smon_id, 2151 (xdrproc_t)xdr_sm_stat, &smstat); 2152 2153 if (rpcret != 0) { 2154 debuglog("Rpc call to unmonitor statd failed with " 2155 " return value: %d\n", rpcret); 2156 } 2157 2158 LIST_REMOVE(ihp, hostlst); 2159 free(ihp); 2160 } 2161 2162 /* 2163 * notify: Clear all locks from a host if statd complains 2164 * 2165 * XXX: This routine has not been thoroughly tested. However, neither 2166 * had the old one been. It used to compare the statd crash state counter 2167 * to the current lock state. The upshot of this was that it basically 2168 * cleared all locks from the specified host 99% of the time (with the 2169 * other 1% being a bug). Consequently, the assumption is that clearing 2170 * all locks from a host when notified by statd is acceptable. 2171 * 2172 * Please note that this routine skips the usual level of redirection 2173 * through a do_* type routine. This introduces a possible level of 2174 * error and might better be written as do_notify and take this one out. 2175 2176 */ 2177 2178 void 2179 notify(const char *hostname, const int state) 2180 { 2181 debuglog("notify from %s, new state %d", hostname, state); 2182 2183 siglock(); 2184 do_clear(hostname); 2185 sigunlock(); 2186 2187 debuglog("Leaving notify\n"); 2188 } 2189 2190 void 2191 send_granted(fl, opcode) 2192 struct file_lock *fl; 2193 int opcode __unused; 2194 { 2195 CLIENT *cli; 2196 static char dummy; 2197 struct timeval timeo; 2198 int success; 2199 static struct nlm_res retval; 2200 static struct nlm4_res retval4; 2201 2202 debuglog("About to send granted on blocked lock\n"); 2203 2204 cli = get_client(fl->addr, 2205 (fl->flags & LOCK_V4) ? NLM_VERS4 : NLM_VERS); 2206 if (cli == NULL) { 2207 syslog(LOG_NOTICE, "failed to get CLIENT for %s", 2208 fl->client_name); 2209 /* 2210 * We fail to notify remote that the lock has been granted. 2211 * The client will timeout and retry, the lock will be 2212 * granted at this time. 2213 */ 2214 return; 2215 } 2216 timeo.tv_sec = 0; 2217 timeo.tv_usec = (fl->flags & LOCK_ASYNC) ? 0 : 500000; /* 0.5s */ 2218 2219 if (fl->flags & LOCK_V4) { 2220 static nlm4_testargs res; 2221 res.cookie = fl->client_cookie; 2222 res.exclusive = fl->client.exclusive; 2223 res.alock.caller_name = fl->client_name; 2224 res.alock.fh.n_len = sizeof(fhandle_t); 2225 res.alock.fh.n_bytes = (char*)&fl->filehandle; 2226 res.alock.oh = fl->client.oh; 2227 res.alock.svid = fl->client.svid; 2228 res.alock.l_offset = fl->client.l_offset; 2229 res.alock.l_len = fl->client.l_len; 2230 debuglog("sending v4 reply%s", 2231 (fl->flags & LOCK_ASYNC) ? " (async)":""); 2232 if (fl->flags & LOCK_ASYNC) { 2233 success = clnt_call(cli, NLM4_GRANTED_MSG, 2234 (xdrproc_t)xdr_nlm4_testargs, &res, 2235 (xdrproc_t)xdr_void, &dummy, timeo); 2236 } else { 2237 success = clnt_call(cli, NLM4_GRANTED, 2238 (xdrproc_t)xdr_nlm4_testargs, &res, 2239 (xdrproc_t)xdr_nlm4_res, &retval4, timeo); 2240 } 2241 } else { 2242 static nlm_testargs res; 2243 2244 res.cookie = fl->client_cookie; 2245 res.exclusive = fl->client.exclusive; 2246 res.alock.caller_name = fl->client_name; 2247 res.alock.fh.n_len = sizeof(fhandle_t); 2248 res.alock.fh.n_bytes = (char*)&fl->filehandle; 2249 res.alock.oh = fl->client.oh; 2250 res.alock.svid = fl->client.svid; 2251 res.alock.l_offset = fl->client.l_offset; 2252 res.alock.l_len = fl->client.l_len; 2253 debuglog("sending v1 reply%s", 2254 (fl->flags & LOCK_ASYNC) ? " (async)":""); 2255 if (fl->flags & LOCK_ASYNC) { 2256 success = clnt_call(cli, NLM_GRANTED_MSG, 2257 (xdrproc_t)xdr_nlm_testargs, &res, 2258 (xdrproc_t)xdr_void, &dummy, timeo); 2259 } else { 2260 success = clnt_call(cli, NLM_GRANTED, 2261 (xdrproc_t)xdr_nlm_testargs, &res, 2262 (xdrproc_t)xdr_nlm_res, &retval, timeo); 2263 } 2264 } 2265 if (debug_level > 2) 2266 debuglog("clnt_call returns %d(%s) for granted", 2267 success, clnt_sperrno(success)); 2268 2269 } 2270 2271 /* 2272 * Routines below here have not been modified in the overhaul 2273 */ 2274 2275 /* 2276 * Are these two routines still required since lockd is not spawning off 2277 * children to service locks anymore? Presumably they were originally 2278 * put in place to prevent a one child from changing the lock list out 2279 * from under another one. 2280 */ 2281 2282 void 2283 siglock(void) 2284 { 2285 sigset_t block; 2286 2287 sigemptyset(&block); 2288 sigaddset(&block, SIGCHLD); 2289 2290 if (sigprocmask(SIG_BLOCK, &block, NULL) < 0) { 2291 syslog(LOG_WARNING, "siglock failed: %s", strerror(errno)); 2292 } 2293 } 2294 2295 void 2296 sigunlock(void) 2297 { 2298 sigset_t block; 2299 2300 sigemptyset(&block); 2301 sigaddset(&block, SIGCHLD); 2302 2303 if (sigprocmask(SIG_UNBLOCK, &block, NULL) < 0) { 2304 syslog(LOG_WARNING, "sigunlock failed: %s", strerror(errno)); 2305 } 2306 } 2307