1 /* $NetBSD: lockd_lock.c,v 1.5 2000/11/21 03:47:41 enami Exp $ */ 2 /* $FreeBSD$ */ 3 4 /* 5 * Copyright (c) 2001 Andrew P. Lentvorski, Jr. 6 * Copyright (c) 2000 Manuel Bouyer. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 */ 37 38 #define LOCKD_DEBUG 39 40 #include <stdio.h> 41 #ifdef LOCKD_DEBUG 42 #include <stdarg.h> 43 #endif 44 #include <stdlib.h> 45 #include <unistd.h> 46 #include <fcntl.h> 47 #include <syslog.h> 48 #include <errno.h> 49 #include <string.h> 50 #include <signal.h> 51 #include <rpc/rpc.h> 52 #include <sys/types.h> 53 #include <sys/stat.h> 54 #include <sys/socket.h> 55 #include <sys/param.h> 56 #include <sys/mount.h> 57 #include <sys/wait.h> 58 #include <rpcsvc/sm_inter.h> 59 #include <rpcsvc/nlm_prot.h> 60 #include "lockd_lock.h" 61 #include "lockd.h" 62 63 #define MAXOBJECTSIZE 64 64 #define MAXBUFFERSIZE 1024 65 66 /* 67 * A set of utilities for managing file locking 68 * 69 * XXX: All locks are in a linked list, a better structure should be used 70 * to improve search/access effeciency. 71 */ 72 73 /* struct describing a lock */ 74 struct file_lock { 75 LIST_ENTRY(file_lock) nfslocklist; 76 fhandle_t filehandle; /* NFS filehandle */ 77 struct sockaddr *addr; 78 struct nlm4_holder client; /* lock holder */ 79 /* XXX: client_cookie used *only* in send_granted */ 80 netobj client_cookie; /* cookie sent by the client */ 81 int nsm_status; /* status from the remote lock manager */ 82 int status; /* lock status, see below */ 83 int flags; /* lock flags, see lockd_lock.h */ 84 int blocking; /* blocking lock or not */ 85 char client_name[SM_MAXSTRLEN]; /* client_name is really variable 86 length and must be last! */ 87 }; 88 89 LIST_HEAD(nfslocklist_head, file_lock); 90 struct nfslocklist_head nfslocklist_head = LIST_HEAD_INITIALIZER(nfslocklist_head); 91 92 LIST_HEAD(blockedlocklist_head, file_lock); 93 struct blockedlocklist_head blockedlocklist_head = LIST_HEAD_INITIALIZER(blockedlocklist_head); 94 95 /* lock status */ 96 #define LKST_LOCKED 1 /* lock is locked */ 97 /* XXX: Is this flag file specific or lock specific? */ 98 #define LKST_WAITING 2 /* file is already locked by another host */ 99 #define LKST_PROCESSING 3 /* child is trying to aquire the lock */ 100 #define LKST_DYING 4 /* must dies when we get news from the child */ 101 102 /* struct describing a monitored host */ 103 struct host { 104 LIST_ENTRY(host) hostlst; 105 int refcnt; 106 char name[SM_MAXSTRLEN]; /* name is really variable length and 107 must be last! */ 108 }; 109 /* list of hosts we monitor */ 110 LIST_HEAD(hostlst_head, host); 111 struct hostlst_head hostlst_head = LIST_HEAD_INITIALIZER(hostlst_head); 112 113 /* 114 * File monitoring handlers 115 * XXX: These might be able to be removed when kevent support 116 * is placed into the hardware lock/unlock routines. (ie. 117 * let the kernel do all the file monitoring) 118 */ 119 120 /* Struct describing a monitored file */ 121 struct monfile { 122 LIST_ENTRY(monfile) monfilelist; 123 fhandle_t filehandle; /* Local access filehandle */ 124 int fd; /* file descriptor: remains open until unlock! */ 125 int refcount; 126 int exclusive; 127 }; 128 129 /* List of files we monitor */ 130 LIST_HEAD(monfilelist_head, monfile); 131 struct monfilelist_head monfilelist_head = LIST_HEAD_INITIALIZER(monfilelist_head); 132 133 static int debugdelay = 0; 134 135 enum nfslock_status { NFS_GRANTED = 0, NFS_GRANTED_DUPLICATE, 136 NFS_DENIED, NFS_DENIED_NOLOCK, 137 NFS_RESERR }; 138 139 enum hwlock_status { HW_GRANTED = 0, HW_GRANTED_DUPLICATE, 140 HW_DENIED, HW_DENIED_NOLOCK, 141 HW_STALEFH, HW_READONLY, HW_RESERR }; 142 143 enum partialfilelock_status { PFL_GRANTED=0, PFL_GRANTED_DUPLICATE, PFL_DENIED, 144 PFL_NFSDENIED, PFL_NFSBLOCKED, PFL_NFSDENIED_NOLOCK, PFL_NFSRESERR, 145 PFL_HWDENIED, PFL_HWBLOCKED, PFL_HWDENIED_NOLOCK, PFL_HWRESERR}; 146 147 enum LFLAGS {LEDGE_LEFT, LEDGE_LBOUNDARY, LEDGE_INSIDE, LEDGE_RBOUNDARY, LEDGE_RIGHT}; 148 enum RFLAGS {REDGE_LEFT, REDGE_LBOUNDARY, REDGE_INSIDE, REDGE_RBOUNDARY, REDGE_RIGHT}; 149 /* XXX: WARNING! I HAVE OVERLOADED THIS STATUS ENUM! SPLIT IT APART INTO TWO */ 150 enum split_status {SPL_DISJOINT=0, SPL_LOCK1=1, SPL_LOCK2=2, SPL_CONTAINED=4, SPL_RESERR=8}; 151 152 enum partialfilelock_status lock_partialfilelock(struct file_lock *fl); 153 154 void send_granted(struct file_lock *fl, int opcode); 155 void siglock(void); 156 void sigunlock(void); 157 void monitor_lock_host(const char *hostname); 158 void unmonitor_lock_host(char *hostname); 159 160 void copy_nlm4_lock_to_nlm4_holder(const struct nlm4_lock *src, 161 const bool_t exclusive, struct nlm4_holder *dest); 162 struct file_lock * allocate_file_lock(const netobj *lockowner, 163 const netobj *matchcookie, 164 const struct sockaddr *addr, 165 const char *caller_name); 166 void deallocate_file_lock(struct file_lock *fl); 167 void fill_file_lock(struct file_lock *fl, const fhandle_t *fh, 168 const bool_t exclusive, const int32_t svid, 169 const u_int64_t offset, const u_int64_t len, 170 const int state, const int status, const int flags, const int blocking); 171 int regions_overlap(const u_int64_t start1, const u_int64_t len1, 172 const u_int64_t start2, const u_int64_t len2); 173 enum split_status region_compare(const u_int64_t starte, const u_int64_t lene, 174 const u_int64_t startu, const u_int64_t lenu, 175 u_int64_t *start1, u_int64_t *len1, u_int64_t *start2, u_int64_t *len2); 176 int same_netobj(const netobj *n0, const netobj *n1); 177 int same_filelock_identity(const struct file_lock *fl0, 178 const struct file_lock *fl2); 179 180 static void debuglog(char const *fmt, ...); 181 void dump_static_object(const unsigned char* object, const int sizeof_object, 182 unsigned char* hbuff, const int sizeof_hbuff, 183 unsigned char* cbuff, const int sizeof_cbuff); 184 void dump_netobj(const struct netobj *nobj); 185 void dump_filelock(const struct file_lock *fl); 186 struct file_lock * get_lock_matching_unlock(const struct file_lock *fl); 187 enum nfslock_status test_nfslock(const struct file_lock *fl, 188 struct file_lock **conflicting_fl); 189 enum nfslock_status lock_nfslock(struct file_lock *fl); 190 enum nfslock_status delete_nfslock(struct file_lock *fl); 191 enum nfslock_status unlock_nfslock(const struct file_lock *fl, 192 struct file_lock **released_lock, struct file_lock **left_lock, 193 struct file_lock **right_lock); 194 enum hwlock_status lock_hwlock(struct file_lock *fl); 195 enum split_status split_nfslock(const struct file_lock *exist_lock, 196 const struct file_lock *unlock_lock, struct file_lock **left_lock, 197 struct file_lock **right_lock); 198 void add_blockingfilelock(struct file_lock *fl); 199 enum hwlock_status unlock_hwlock(const struct file_lock *fl); 200 enum hwlock_status test_hwlock(const struct file_lock *fl, 201 struct file_lock **conflicting_fl); 202 void remove_blockingfilelock(struct file_lock *fl); 203 void clear_blockingfilelock(const char *hostname); 204 void retry_blockingfilelocklist(void); 205 enum partialfilelock_status unlock_partialfilelock( 206 const struct file_lock *fl); 207 void clear_partialfilelock(const char *hostname); 208 enum partialfilelock_status test_partialfilelock( 209 const struct file_lock *fl, struct file_lock **conflicting_fl); 210 enum nlm_stats do_test(struct file_lock *fl, 211 struct file_lock **conflicting_fl); 212 enum nlm_stats do_unlock(struct file_lock *fl); 213 enum nlm_stats do_lock(struct file_lock *fl); 214 void do_clear(const char *hostname); 215 216 217 void 218 debuglog(char const *fmt, ...) 219 { 220 va_list ap; 221 222 if (debug_level < 1) { 223 return; 224 } 225 226 sleep(debugdelay); 227 228 va_start(ap, fmt); 229 vsyslog(LOG_DEBUG, fmt, ap); 230 va_end(ap); 231 } 232 233 void 234 dump_static_object(object, size_object, hbuff, size_hbuff, cbuff, size_cbuff) 235 const unsigned char *object; 236 const int size_object; 237 unsigned char *hbuff; 238 const int size_hbuff; 239 unsigned char *cbuff; 240 const int size_cbuff; 241 { 242 int i, objectsize; 243 244 if (debug_level < 2) { 245 return; 246 } 247 248 objectsize = size_object; 249 250 if (objectsize == 0) { 251 debuglog("object is size 0\n"); 252 } else { 253 if (objectsize > MAXOBJECTSIZE) { 254 debuglog("Object of size %d being clamped" 255 "to size %d\n", objectsize, MAXOBJECTSIZE); 256 objectsize = MAXOBJECTSIZE; 257 } 258 259 if (hbuff != NULL) { 260 if (size_hbuff < objectsize*2+1) { 261 debuglog("Hbuff not large enough." 262 " Increase size\n"); 263 } else { 264 for(i=0;i<objectsize;i++) { 265 sprintf(hbuff+i*2,"%02x",*(object+i)); 266 } 267 *(hbuff+i*2) = '\0'; 268 } 269 } 270 271 if (cbuff != NULL) { 272 if (size_cbuff < objectsize+1) { 273 debuglog("Cbuff not large enough." 274 " Increase Size\n"); 275 } 276 277 for(i=0;i<objectsize;i++) { 278 if (*(object+i) >= 32 && *(object+i) <= 127) { 279 *(cbuff+i) = *(object+i); 280 } else { 281 *(cbuff+i) = '.'; 282 } 283 } 284 *(cbuff+i) = '\0'; 285 } 286 } 287 } 288 289 void 290 dump_netobj(const struct netobj *nobj) 291 { 292 char hbuff[MAXBUFFERSIZE*2]; 293 char cbuff[MAXBUFFERSIZE]; 294 295 if (debug_level < 2) { 296 return; 297 } 298 299 if (nobj == NULL) { 300 debuglog("Null netobj pointer\n"); 301 } 302 else if (nobj->n_len == 0) { 303 debuglog("Size zero netobj\n"); 304 } else { 305 dump_static_object(nobj->n_bytes, nobj->n_len, 306 hbuff, sizeof(hbuff), cbuff, sizeof(cbuff)); 307 debuglog("netobj: len: %d data: %s ::: %s\n", 308 nobj->n_len, hbuff, cbuff); 309 } 310 } 311 312 /* #define DUMP_FILELOCK_VERBOSE */ 313 void 314 dump_filelock(const struct file_lock *fl) 315 { 316 #ifdef DUMP_FILELOCK_VERBOSE 317 char hbuff[MAXBUFFERSIZE*2]; 318 char cbuff[MAXBUFFERSIZE]; 319 #endif 320 321 if (debug_level < 2) { 322 return; 323 } 324 325 if (fl != NULL) { 326 debuglog("Dumping file lock structure @ %p\n", fl); 327 328 #ifdef DUMP_FILELOCK_VERBOSE 329 dump_static_object((unsigned char *)&fl->filehandle, 330 sizeof(fl->filehandle), hbuff, sizeof(hbuff), 331 cbuff, sizeof(cbuff)); 332 debuglog("Filehandle: %8s ::: %8s\n", hbuff, cbuff); 333 #endif 334 335 debuglog("Dumping nlm4_holder:\n" 336 "exc: %x svid: %x offset:len %llx:%llx\n", 337 fl->client.exclusive, fl->client.svid, 338 fl->client.l_offset, fl->client.l_len); 339 340 #ifdef DUMP_FILELOCK_VERBOSE 341 debuglog("Dumping client identity:\n"); 342 dump_netobj(&fl->client.oh); 343 344 debuglog("Dumping client cookie:\n"); 345 dump_netobj(&fl->client_cookie); 346 347 debuglog("nsm: %d status: %d flags: %d locker: %d" 348 " fd: %d\n", fl->nsm_status, fl->status, 349 fl->flags, fl->locker, fl->fd); 350 #endif 351 } else { 352 debuglog("NULL file lock structure\n"); 353 } 354 } 355 356 void 357 copy_nlm4_lock_to_nlm4_holder(src, exclusive, dest) 358 const struct nlm4_lock *src; 359 const bool_t exclusive; 360 struct nlm4_holder *dest; 361 { 362 363 dest->exclusive = exclusive; 364 dest->oh.n_len = src->oh.n_len; 365 dest->oh.n_bytes = src->oh.n_bytes; 366 dest->svid = src->svid; 367 dest->l_offset = src->l_offset; 368 dest->l_len = src->l_len; 369 } 370 371 372 size_t 373 strnlen(const char *s, size_t len) 374 { 375 size_t n; 376 377 for (n = 0; s[n] != 0 && n < len; n++) 378 ; 379 return n; 380 } 381 382 /* 383 * allocate_file_lock: Create a lock with the given parameters 384 */ 385 386 struct file_lock * 387 allocate_file_lock(const netobj *lockowner, const netobj *matchcookie, 388 const struct sockaddr *addr, const char *caller_name) 389 { 390 struct file_lock *newfl; 391 size_t n; 392 393 /* Beware of rubbish input! */ 394 n = strnlen(caller_name, SM_MAXSTRLEN); 395 if (n == SM_MAXSTRLEN) { 396 return NULL; 397 } 398 399 newfl = malloc(sizeof(*newfl) - sizeof(newfl->client_name) + n + 1); 400 if (newfl == NULL) { 401 return NULL; 402 } 403 bzero(newfl, sizeof(*newfl) - sizeof(newfl->client_name)); 404 memcpy(newfl->client_name, caller_name, n); 405 newfl->client_name[n] = 0; 406 407 newfl->client.oh.n_bytes = malloc(lockowner->n_len); 408 if (newfl->client.oh.n_bytes == NULL) { 409 free(newfl); 410 return NULL; 411 } 412 newfl->client.oh.n_len = lockowner->n_len; 413 bcopy(lockowner->n_bytes, newfl->client.oh.n_bytes, lockowner->n_len); 414 415 newfl->client_cookie.n_bytes = malloc(matchcookie->n_len); 416 if (newfl->client_cookie.n_bytes == NULL) { 417 free(newfl->client.oh.n_bytes); 418 free(newfl); 419 return NULL; 420 } 421 newfl->client_cookie.n_len = matchcookie->n_len; 422 bcopy(matchcookie->n_bytes, newfl->client_cookie.n_bytes, matchcookie->n_len); 423 424 newfl->addr = malloc(addr->sa_len); 425 if (newfl->addr == NULL) { 426 free(newfl->client_cookie.n_bytes); 427 free(newfl->client.oh.n_bytes); 428 free(newfl); 429 return NULL; 430 } 431 memcpy(newfl->addr, addr, addr->sa_len); 432 433 return newfl; 434 } 435 436 /* 437 * file_file_lock: Force creation of a valid file lock 438 */ 439 void 440 fill_file_lock(struct file_lock *fl, const fhandle_t *fh, 441 const bool_t exclusive, const int32_t svid, 442 const u_int64_t offset, const u_int64_t len, 443 const int state, const int status, const int flags, const int blocking) 444 { 445 bcopy(fh, &fl->filehandle, sizeof(fhandle_t)); 446 447 fl->client.exclusive = exclusive; 448 fl->client.svid = svid; 449 fl->client.l_offset = offset; 450 fl->client.l_len = len; 451 452 fl->nsm_status = state; 453 fl->status = status; 454 fl->flags = flags; 455 fl->blocking = blocking; 456 } 457 458 /* 459 * deallocate_file_lock: Free all storage associated with a file lock 460 */ 461 void 462 deallocate_file_lock(struct file_lock *fl) 463 { 464 free(fl->addr); 465 free(fl->client.oh.n_bytes); 466 free(fl->client_cookie.n_bytes); 467 free(fl); 468 } 469 470 /* 471 * regions_overlap(): This function examines the two provided regions for 472 * overlap. 473 */ 474 int 475 regions_overlap(start1, len1, start2, len2) 476 const u_int64_t start1, len1, start2, len2; 477 { 478 u_int64_t d1,d2,d3,d4; 479 enum split_status result; 480 481 debuglog("Entering region overlap with vals: %llu:%llu--%llu:%llu\n", 482 start1, len1, start2, len2); 483 484 result = region_compare(start1, len1, start2, len2, 485 &d1, &d2, &d3, &d4); 486 487 debuglog("Exiting region overlap with val: %d\n",result); 488 489 if (result == SPL_DISJOINT) { 490 return 0; 491 } else { 492 return 1; 493 } 494 495 return (result); 496 } 497 498 /* 499 * region_compare(): Examine lock regions and split appropriately 500 * 501 * XXX: Fix 64 bit overflow problems 502 * XXX: Check to make sure I got *ALL* the cases. 503 * XXX: This DESPERATELY needs a regression test. 504 */ 505 enum split_status 506 region_compare(starte, lene, startu, lenu, 507 start1, len1, start2, len2) 508 const u_int64_t starte, lene, startu, lenu; 509 u_int64_t *start1, *len1, *start2, *len2; 510 { 511 /* 512 * Please pay attention to the sequential exclusions 513 * of the if statements!!! 514 */ 515 enum LFLAGS lflags; 516 enum RFLAGS rflags; 517 enum split_status retval; 518 519 retval = SPL_DISJOINT; 520 521 if (lene == 0 && lenu == 0) { 522 /* Examine left edge of locker */ 523 if (startu < starte) { 524 lflags = LEDGE_LEFT; 525 } else if (startu == starte) { 526 lflags = LEDGE_LBOUNDARY; 527 } else { 528 lflags = LEDGE_INSIDE; 529 } 530 531 rflags = REDGE_RBOUNDARY; /* Both are infiinite */ 532 533 if (lflags == LEDGE_INSIDE) { 534 *start1 = starte; 535 *len1 = startu - starte; 536 } 537 538 if (lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) { 539 retval = SPL_CONTAINED; 540 } else { 541 retval = SPL_LOCK1; 542 } 543 } else if (lene == 0 && lenu != 0) { 544 /* Established lock is infinite */ 545 /* Examine left edge of unlocker */ 546 if (startu < starte) { 547 lflags = LEDGE_LEFT; 548 } else if (startu == starte) { 549 lflags = LEDGE_LBOUNDARY; 550 } else if (startu > starte) { 551 lflags = LEDGE_INSIDE; 552 } 553 554 /* Examine right edge of unlocker */ 555 if (startu + lenu < starte) { 556 /* Right edge of unlocker left of established lock */ 557 rflags = REDGE_LEFT; 558 return SPL_DISJOINT; 559 } else if (startu + lenu == starte) { 560 /* Right edge of unlocker on start of established lock */ 561 rflags = REDGE_LBOUNDARY; 562 return SPL_DISJOINT; 563 } else { /* Infinifty is right of finity */ 564 /* Right edge of unlocker inside established lock */ 565 rflags = REDGE_INSIDE; 566 } 567 568 if (lflags == LEDGE_INSIDE) { 569 *start1 = starte; 570 *len1 = startu - starte; 571 retval |= SPL_LOCK1; 572 } 573 574 if (rflags == REDGE_INSIDE) { 575 /* Create right lock */ 576 *start2 = startu+lenu; 577 *len2 = 0; 578 retval |= SPL_LOCK2; 579 } 580 } else if (lene != 0 && lenu == 0) { 581 /* Unlocker is infinite */ 582 /* Examine left edge of unlocker */ 583 if (startu < starte) { 584 lflags = LEDGE_LEFT; 585 retval = SPL_CONTAINED; 586 return retval; 587 } else if (startu == starte) { 588 lflags = LEDGE_LBOUNDARY; 589 retval = SPL_CONTAINED; 590 return retval; 591 } else if ((startu > starte) && (startu < starte + lene - 1)) { 592 lflags = LEDGE_INSIDE; 593 } else if (startu == starte + lene - 1) { 594 lflags = LEDGE_RBOUNDARY; 595 } else { /* startu > starte + lene -1 */ 596 lflags = LEDGE_RIGHT; 597 return SPL_DISJOINT; 598 } 599 600 rflags = REDGE_RIGHT; /* Infinity is right of finity */ 601 602 if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) { 603 *start1 = starte; 604 *len1 = startu - starte; 605 retval |= SPL_LOCK1; 606 return retval; 607 } 608 609 } else { 610 /* Both locks are finite */ 611 612 /* Examine left edge of unlocker */ 613 if (startu < starte) { 614 lflags = LEDGE_LEFT; 615 } else if (startu == starte) { 616 lflags = LEDGE_LBOUNDARY; 617 } else if ((startu > starte) && (startu < starte + lene - 1)) { 618 lflags = LEDGE_INSIDE; 619 } else if (startu == starte + lene - 1) { 620 lflags = LEDGE_RBOUNDARY; 621 } else { /* startu > starte + lene -1 */ 622 lflags = LEDGE_RIGHT; 623 return SPL_DISJOINT; 624 } 625 626 /* Examine right edge of unlocker */ 627 if (startu + lenu < starte) { 628 /* Right edge of unlocker left of established lock */ 629 rflags = REDGE_LEFT; 630 return SPL_DISJOINT; 631 } else if (startu + lenu == starte) { 632 /* Right edge of unlocker on start of established lock */ 633 rflags = REDGE_LBOUNDARY; 634 return SPL_DISJOINT; 635 } else if (startu + lenu < starte + lene) { 636 /* Right edge of unlocker inside established lock */ 637 rflags = REDGE_INSIDE; 638 } else if (startu + lenu == starte + lene) { 639 /* Right edge of unlocker on right edge of established lock */ 640 rflags = REDGE_RBOUNDARY; 641 } else { /* startu + lenu > starte + lene */ 642 /* Right edge of unlocker is right of established lock */ 643 rflags = REDGE_RIGHT; 644 } 645 646 if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) { 647 /* Create left lock */ 648 *start1 = starte; 649 *len1 = (startu - starte); 650 retval |= SPL_LOCK1; 651 } 652 653 if (rflags == REDGE_INSIDE) { 654 /* Create right lock */ 655 *start2 = startu+lenu; 656 *len2 = starte+lene-(startu+lenu); 657 retval |= SPL_LOCK2; 658 } 659 660 if ((lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) && 661 (rflags == REDGE_RBOUNDARY || rflags == REDGE_RIGHT)) { 662 retval = SPL_CONTAINED; 663 } 664 } 665 666 return retval; 667 } 668 669 /* 670 * same_netobj: Compares the apprpriate bits of a netobj for identity 671 */ 672 int 673 same_netobj(const netobj *n0, const netobj *n1) 674 { 675 int retval; 676 677 retval = 0; 678 679 debuglog("Entering netobj identity check\n"); 680 681 if (n0->n_len == n1->n_len) { 682 debuglog("Preliminary length check passed\n"); 683 retval = !bcmp(n0->n_bytes, n1->n_bytes, n0->n_len); 684 debuglog("netobj %smatch\n", retval ? "" : "mis"); 685 } 686 687 return (retval); 688 } 689 690 /* 691 * same_filelock_identity: Compares the appropriate bits of a file_lock 692 */ 693 int 694 same_filelock_identity(fl0, fl1) 695 const struct file_lock *fl0, *fl1; 696 { 697 int retval; 698 699 retval = 0; 700 701 debuglog("Checking filelock identity\n"); 702 703 /* 704 * Check process ids and host information. 705 */ 706 retval = (fl0->client.svid == fl1->client.svid && 707 same_netobj(&(fl0->client.oh), &(fl1->client.oh))); 708 709 debuglog("Exiting checking filelock identity: retval: %d\n",retval); 710 711 return (retval); 712 } 713 714 /* 715 * Below here are routines associated with manipulating the NFS 716 * lock list. 717 */ 718 719 /* 720 * get_lock_matching_unlock: Return a lock which matches the given unlock lock 721 * or NULL otehrwise 722 * XXX: It is a shame that this duplicates so much code from test_nfslock. 723 */ 724 struct file_lock * 725 get_lock_matching_unlock(const struct file_lock *fl) 726 { 727 struct file_lock *ifl; /* Iterator */ 728 729 debuglog("Entering lock_matching_unlock\n"); 730 debuglog("********Dump of fl*****************\n"); 731 dump_filelock(fl); 732 733 LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) { 734 debuglog("Pointer to file lock: %p\n",ifl); 735 736 debuglog("****Dump of ifl****\n"); 737 dump_filelock(ifl); 738 debuglog("*******************\n"); 739 740 /* 741 * XXX: It is conceivable that someone could use the NLM RPC 742 * system to directly access filehandles. This may be a 743 * security hazard as the filehandle code may bypass normal 744 * file access controls 745 */ 746 if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t))) 747 continue; 748 749 debuglog("matching_unlock: Filehandles match, " 750 "checking regions\n"); 751 752 /* Filehandles match, check for region overlap */ 753 if (!regions_overlap(fl->client.l_offset, fl->client.l_len, 754 ifl->client.l_offset, ifl->client.l_len)) 755 continue; 756 757 debuglog("matching_unlock: Region overlap" 758 " found %llu : %llu -- %llu : %llu\n", 759 fl->client.l_offset,fl->client.l_len, 760 ifl->client.l_offset,ifl->client.l_len); 761 762 /* Regions overlap, check the identity */ 763 if (!same_filelock_identity(fl,ifl)) 764 continue; 765 766 debuglog("matching_unlock: Duplicate lock id. Granting\n"); 767 return (ifl); 768 } 769 770 debuglog("Exiting lock_matching_unlock\n"); 771 772 return (NULL); 773 } 774 775 /* 776 * test_nfslock: check for NFS lock in lock list 777 * 778 * This routine makes the following assumptions: 779 * 1) Nothing will adjust the lock list during a lookup 780 * 781 * This routine has an intersting quirk which bit me hard. 782 * The conflicting_fl is the pointer to the conflicting lock. 783 * However, to modify the "*pointer* to the conflicting lock" rather 784 * that the "conflicting lock itself" one must pass in a "pointer to 785 * the pointer of the conflicting lock". Gross. 786 */ 787 788 enum nfslock_status 789 test_nfslock(const struct file_lock *fl, struct file_lock **conflicting_fl) 790 { 791 struct file_lock *ifl; /* Iterator */ 792 enum nfslock_status retval; 793 794 debuglog("Entering test_nfslock\n"); 795 796 retval = NFS_GRANTED; 797 (*conflicting_fl) = NULL; 798 799 debuglog("Entering lock search loop\n"); 800 801 debuglog("***********************************\n"); 802 debuglog("Dumping match filelock\n"); 803 debuglog("***********************************\n"); 804 dump_filelock(fl); 805 debuglog("***********************************\n"); 806 807 LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) { 808 if (retval == NFS_DENIED) 809 break; 810 811 debuglog("Top of lock loop\n"); 812 debuglog("Pointer to file lock: %p\n",ifl); 813 814 debuglog("***********************************\n"); 815 debuglog("Dumping test filelock\n"); 816 debuglog("***********************************\n"); 817 dump_filelock(ifl); 818 debuglog("***********************************\n"); 819 820 /* 821 * XXX: It is conceivable that someone could use the NLM RPC 822 * system to directly access filehandles. This may be a 823 * security hazard as the filehandle code may bypass normal 824 * file access controls 825 */ 826 if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t))) 827 continue; 828 829 debuglog("test_nfslock: filehandle match found\n"); 830 831 /* Filehandles match, check for region overlap */ 832 if (!regions_overlap(fl->client.l_offset, fl->client.l_len, 833 ifl->client.l_offset, ifl->client.l_len)) 834 continue; 835 836 debuglog("test_nfslock: Region overlap found" 837 " %llu : %llu -- %llu : %llu\n", 838 fl->client.l_offset,fl->client.l_len, 839 ifl->client.l_offset,ifl->client.l_len); 840 841 /* Regions overlap, check the exclusivity */ 842 if (!(fl->client.exclusive || ifl->client.exclusive)) 843 continue; 844 845 debuglog("test_nfslock: Exclusivity failure: %d %d\n", 846 fl->client.exclusive, 847 ifl->client.exclusive); 848 849 if (same_filelock_identity(fl,ifl)) { 850 debuglog("test_nfslock: Duplicate id. Granting\n"); 851 (*conflicting_fl) = ifl; 852 retval = NFS_GRANTED_DUPLICATE; 853 } else { 854 /* locking attempt fails */ 855 debuglog("test_nfslock: Lock attempt failed\n"); 856 debuglog("Desired lock\n"); 857 dump_filelock(fl); 858 debuglog("Conflicting lock\n"); 859 dump_filelock(ifl); 860 (*conflicting_fl) = ifl; 861 retval = NFS_DENIED; 862 } 863 } 864 865 debuglog("Dumping file locks\n"); 866 debuglog("Exiting test_nfslock\n"); 867 868 return (retval); 869 } 870 871 /* 872 * lock_nfslock: attempt to create a lock in the NFS lock list 873 * 874 * This routine tests whether the lock will be granted and then adds 875 * the entry to the lock list if so. 876 * 877 * Argument fl gets modified as its list housekeeping entries get modified 878 * upon insertion into the NFS lock list 879 * 880 * This routine makes several assumptions: 881 * 1) It is perfectly happy to grant a duplicate lock from the same pid. 882 * While this seems to be intuitively wrong, it is required for proper 883 * Posix semantics during unlock. It is absolutely imperative to not 884 * unlock the main lock before the two child locks are established. Thus, 885 * one has be be able to create duplicate locks over an existing lock 886 * 2) It currently accepts duplicate locks from the same id,pid 887 */ 888 889 enum nfslock_status 890 lock_nfslock(struct file_lock *fl) 891 { 892 enum nfslock_status retval; 893 struct file_lock *dummy_fl; 894 895 dummy_fl = NULL; 896 897 debuglog("Entering lock_nfslock...\n"); 898 899 retval = test_nfslock(fl,&dummy_fl); 900 901 if (retval == NFS_GRANTED || retval == NFS_GRANTED_DUPLICATE) { 902 debuglog("Inserting lock...\n"); 903 dump_filelock(fl); 904 LIST_INSERT_HEAD(&nfslocklist_head, fl, nfslocklist); 905 } 906 907 debuglog("Exiting lock_nfslock...\n"); 908 909 return (retval); 910 } 911 912 /* 913 * delete_nfslock: delete an NFS lock list entry 914 * 915 * This routine is used to delete a lock out of the NFS lock list 916 * without regard to status, underlying locks, regions or anything else 917 * 918 * Note that this routine *does not deallocate memory* of the lock. 919 * It just disconnects it from the list. The lock can then be used 920 * by other routines without fear of trashing the list. 921 */ 922 923 enum nfslock_status 924 delete_nfslock(struct file_lock *fl) 925 { 926 927 LIST_REMOVE(fl, nfslocklist); 928 929 return (NFS_GRANTED); 930 } 931 932 enum split_status 933 split_nfslock(exist_lock, unlock_lock, left_lock, right_lock) 934 const struct file_lock *exist_lock, *unlock_lock; 935 struct file_lock **left_lock, **right_lock; 936 { 937 u_int64_t start1, len1, start2, len2; 938 enum split_status spstatus; 939 940 spstatus = region_compare(exist_lock->client.l_offset, exist_lock->client.l_len, 941 unlock_lock->client.l_offset, unlock_lock->client.l_len, 942 &start1, &len1, &start2, &len2); 943 944 if ((spstatus & SPL_LOCK1) != 0) { 945 *left_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie, exist_lock->addr, exist_lock->client_name); 946 if (*left_lock == NULL) { 947 debuglog("Unable to allocate resource for split 1\n"); 948 return SPL_RESERR; 949 } 950 951 fill_file_lock(*left_lock, &exist_lock->filehandle, 952 exist_lock->client.exclusive, exist_lock->client.svid, 953 start1, len1, 954 exist_lock->nsm_status, 955 exist_lock->status, exist_lock->flags, exist_lock->blocking); 956 } 957 958 if ((spstatus & SPL_LOCK2) != 0) { 959 *right_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie, exist_lock->addr, exist_lock->client_name); 960 if (*right_lock == NULL) { 961 debuglog("Unable to allocate resource for split 1\n"); 962 if (*left_lock != NULL) { 963 deallocate_file_lock(*left_lock); 964 } 965 return SPL_RESERR; 966 } 967 968 fill_file_lock(*right_lock, &exist_lock->filehandle, 969 exist_lock->client.exclusive, exist_lock->client.svid, 970 start2, len2, 971 exist_lock->nsm_status, 972 exist_lock->status, exist_lock->flags, exist_lock->blocking); 973 } 974 975 return spstatus; 976 } 977 978 enum nfslock_status 979 unlock_nfslock(fl, released_lock, left_lock, right_lock) 980 const struct file_lock *fl; 981 struct file_lock **released_lock; 982 struct file_lock **left_lock; 983 struct file_lock **right_lock; 984 { 985 struct file_lock *mfl; /* Matching file lock */ 986 enum nfslock_status retval; 987 enum split_status spstatus; 988 989 debuglog("Entering unlock_nfslock\n"); 990 991 *released_lock = NULL; 992 *left_lock = NULL; 993 *right_lock = NULL; 994 995 retval = NFS_DENIED_NOLOCK; 996 997 debuglog("Attempting to match lock...\n"); 998 mfl = get_lock_matching_unlock(fl); 999 1000 if (mfl != NULL) { 1001 debuglog("Unlock matched. Querying for split\n"); 1002 1003 spstatus = split_nfslock(mfl, fl, left_lock, right_lock); 1004 1005 debuglog("Split returned %d %p %p %p %p\n",spstatus,mfl,fl,*left_lock,*right_lock); 1006 debuglog("********Split dumps********"); 1007 dump_filelock(mfl); 1008 dump_filelock(fl); 1009 dump_filelock(*left_lock); 1010 dump_filelock(*right_lock); 1011 debuglog("********End Split dumps********"); 1012 1013 if (spstatus == SPL_RESERR) { 1014 if (*left_lock != NULL) { 1015 deallocate_file_lock(*left_lock); 1016 *left_lock = NULL; 1017 } 1018 1019 if (*right_lock != NULL) { 1020 deallocate_file_lock(*right_lock); 1021 *right_lock = NULL; 1022 } 1023 1024 return NFS_RESERR; 1025 } 1026 1027 /* Insert new locks from split if required */ 1028 if (*left_lock != NULL) { 1029 debuglog("Split left activated\n"); 1030 LIST_INSERT_HEAD(&nfslocklist_head, *left_lock, nfslocklist); 1031 } 1032 1033 if (*right_lock != NULL) { 1034 debuglog("Split right activated\n"); 1035 LIST_INSERT_HEAD(&nfslocklist_head, *right_lock, nfslocklist); 1036 } 1037 1038 /* Unlock the lock since it matches identity */ 1039 LIST_REMOVE(mfl, nfslocklist); 1040 *released_lock = mfl; 1041 retval = NFS_GRANTED; 1042 } 1043 1044 debuglog("Exiting unlock_nfslock\n"); 1045 1046 return retval; 1047 } 1048 1049 /* 1050 * Below here are the routines for manipulating the file lock directly 1051 * on the disk hardware itself 1052 */ 1053 enum hwlock_status 1054 lock_hwlock(struct file_lock *fl) 1055 { 1056 struct monfile *imf,*nmf; 1057 int lflags, flerror; 1058 1059 /* Scan to see if filehandle already present */ 1060 LIST_FOREACH(imf, &monfilelist_head, monfilelist) { 1061 if (bcmp(&fl->filehandle, &imf->filehandle, 1062 sizeof(fl->filehandle)) == 0) { 1063 /* imf is the correct filehandle */ 1064 break; 1065 } 1066 } 1067 1068 /* 1069 * Filehandle already exists (we control the file) 1070 * *AND* NFS has already cleared the lock for availability 1071 * Grant it and bump the refcount. 1072 */ 1073 if (imf != NULL) { 1074 ++(imf->refcount); 1075 return (HW_GRANTED); 1076 } 1077 1078 /* No filehandle found, create and go */ 1079 nmf = malloc(sizeof(struct monfile)); 1080 if (nmf == NULL) { 1081 debuglog("hwlock resource allocation failure\n"); 1082 return (HW_RESERR); 1083 } 1084 1085 /* XXX: Is O_RDWR always the correct mode? */ 1086 nmf->fd = fhopen(&fl->filehandle, O_RDWR); 1087 if (nmf->fd < 0) { 1088 debuglog("fhopen failed (from %16s): %32s\n", 1089 fl->client_name, strerror(errno)); 1090 free(nmf); 1091 switch (errno) { 1092 case ESTALE: 1093 return (HW_STALEFH); 1094 case EROFS: 1095 return (HW_READONLY); 1096 default: 1097 return (HW_RESERR); 1098 } 1099 } 1100 1101 /* File opened correctly, fill the monitor struct */ 1102 bcopy(&fl->filehandle, &nmf->filehandle, sizeof(fl->filehandle)); 1103 nmf->refcount = 1; 1104 nmf->exclusive = fl->client.exclusive; 1105 1106 lflags = (nmf->exclusive == 1) ? 1107 (LOCK_EX | LOCK_NB) : (LOCK_SH | LOCK_NB); 1108 1109 flerror = flock(nmf->fd, lflags); 1110 1111 if (flerror != 0) { 1112 debuglog("flock failed (from %16s): %32s\n", 1113 fl->client_name, strerror(errno)); 1114 close(nmf->fd); 1115 free(nmf); 1116 switch (errno) { 1117 case EAGAIN: 1118 return (HW_DENIED); 1119 case ESTALE: 1120 return (HW_STALEFH); 1121 case EROFS: 1122 return (HW_READONLY); 1123 default: 1124 return (HW_RESERR); 1125 break; 1126 } 1127 } 1128 1129 /* File opened and locked */ 1130 LIST_INSERT_HEAD(&monfilelist_head, nmf, monfilelist); 1131 1132 debuglog("flock succeeded (from %16s)\n", fl->client_name); 1133 return (HW_GRANTED); 1134 } 1135 1136 enum hwlock_status 1137 unlock_hwlock(const struct file_lock *fl) 1138 { 1139 struct monfile *imf; 1140 1141 debuglog("Entering unlock_hwlock\n"); 1142 debuglog("Entering loop interation\n"); 1143 1144 /* Scan to see if filehandle already present */ 1145 LIST_FOREACH(imf, &monfilelist_head, monfilelist) { 1146 if (bcmp(&fl->filehandle, &imf->filehandle, 1147 sizeof(fl->filehandle)) == 0) { 1148 /* imf is the correct filehandle */ 1149 break; 1150 } 1151 } 1152 1153 debuglog("Completed iteration. Proceeding\n"); 1154 1155 if (imf == NULL) { 1156 /* No lock found */ 1157 debuglog("Exiting unlock_hwlock (HW_DENIED_NOLOCK)\n"); 1158 return (HW_DENIED_NOLOCK); 1159 } 1160 1161 /* Lock found */ 1162 --imf->refcount; 1163 1164 if (imf->refcount < 0) { 1165 debuglog("Negative hardware reference count\n"); 1166 } 1167 1168 if (imf->refcount <= 0) { 1169 close(imf->fd); 1170 LIST_REMOVE(imf, monfilelist); 1171 free(imf); 1172 } 1173 debuglog("Exiting unlock_hwlock (HW_GRANTED)\n"); 1174 return (HW_GRANTED); 1175 } 1176 1177 enum hwlock_status 1178 test_hwlock(fl, conflicting_fl) 1179 const struct file_lock *fl __unused; 1180 struct file_lock **conflicting_fl __unused; 1181 { 1182 1183 /* 1184 * XXX: lock tests on hardware are not required until 1185 * true partial file testing is done on the underlying file 1186 */ 1187 return (HW_RESERR); 1188 } 1189 1190 1191 1192 /* 1193 * Below here are routines for manipulating blocked lock requests 1194 * They should only be called from the XXX_partialfilelock routines 1195 * if at all possible 1196 */ 1197 1198 void 1199 add_blockingfilelock(struct file_lock *fl) 1200 { 1201 1202 debuglog("Entering add_blockingfilelock\n"); 1203 1204 /* 1205 * Clear the blocking flag so that it can be reused without 1206 * adding it to the blocking queue a second time 1207 */ 1208 1209 fl->blocking = 0; 1210 LIST_INSERT_HEAD(&blockedlocklist_head, fl, nfslocklist); 1211 1212 debuglog("Exiting add_blockingfilelock\n"); 1213 } 1214 1215 void 1216 remove_blockingfilelock(struct file_lock *fl) 1217 { 1218 1219 debuglog("Entering remove_blockingfilelock\n"); 1220 1221 LIST_REMOVE(fl, nfslocklist); 1222 1223 debuglog("Exiting remove_blockingfilelock\n"); 1224 } 1225 1226 void 1227 clear_blockingfilelock(const char *hostname) 1228 { 1229 struct file_lock *ifl,*nfl; 1230 1231 /* 1232 * Normally, LIST_FOREACH is called for, but since 1233 * the current element *is* the iterator, deleting it 1234 * would mess up the iteration. Thus, a next element 1235 * must be used explicitly 1236 */ 1237 1238 ifl = LIST_FIRST(&blockedlocklist_head); 1239 1240 while (ifl != NULL) { 1241 nfl = LIST_NEXT(ifl, nfslocklist); 1242 1243 if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) { 1244 remove_blockingfilelock(ifl); 1245 deallocate_file_lock(ifl); 1246 } 1247 1248 ifl = nfl; 1249 } 1250 } 1251 1252 void 1253 retry_blockingfilelocklist(void) 1254 { 1255 /* Retry all locks in the blocked list */ 1256 struct file_lock *ifl, *nfl; /* Iterator */ 1257 enum partialfilelock_status pflstatus; 1258 1259 debuglog("Entering retry_blockingfilelocklist\n"); 1260 1261 LIST_FOREACH_SAFE(ifl, &blockedlocklist_head, nfslocklist, nfl) { 1262 debuglog("Iterator choice %p\n",ifl); 1263 debuglog("Next iterator choice %p\n",nfl); 1264 1265 /* 1266 * SUBTLE BUG: The file_lock must be removed from the 1267 * old list so that it's list pointers get disconnected 1268 * before being allowed to participate in the new list 1269 * which will automatically add it in if necessary. 1270 */ 1271 1272 LIST_REMOVE(ifl, nfslocklist); 1273 pflstatus = lock_partialfilelock(ifl); 1274 1275 if (pflstatus == PFL_GRANTED || pflstatus == PFL_GRANTED_DUPLICATE) { 1276 debuglog("Granted blocked lock\n"); 1277 /* lock granted and is now being used */ 1278 send_granted(ifl,0); 1279 } else { 1280 /* Reinsert lock back into blocked list */ 1281 debuglog("Replacing blocked lock\n"); 1282 LIST_INSERT_HEAD(&blockedlocklist_head, ifl, nfslocklist); 1283 } 1284 } 1285 1286 debuglog("Exiting retry_blockingfilelocklist\n"); 1287 } 1288 1289 /* 1290 * Below here are routines associated with manipulating all 1291 * aspects of the partial file locking system (list, hardware, etc.) 1292 */ 1293 1294 /* 1295 * Please note that lock monitoring must be done at this level which 1296 * keeps track of *individual* lock requests on lock and unlock 1297 * 1298 * XXX: Split unlocking is going to make the unlock code miserable 1299 */ 1300 1301 /* 1302 * lock_partialfilelock: 1303 * 1304 * Argument fl gets modified as its list housekeeping entries get modified 1305 * upon insertion into the NFS lock list 1306 * 1307 * This routine makes several assumptions: 1308 * 1) It (will) pass locks through to flock to lock the entire underlying file 1309 * and then parcel out NFS locks if it gets control of the file. 1310 * This matches the old rpc.lockd file semantics (except where it 1311 * is now more correct). It is the safe solution, but will cause 1312 * overly restrictive blocking if someone is trying to use the 1313 * underlying files without using NFS. This appears to be an 1314 * acceptable tradeoff since most people use standalone NFS servers. 1315 * XXX: The right solution is probably kevent combined with fcntl 1316 * 1317 * 2) Nothing modifies the lock lists between testing and granting 1318 * I have no idea whether this is a useful assumption or not 1319 */ 1320 1321 enum partialfilelock_status 1322 lock_partialfilelock(struct file_lock *fl) 1323 { 1324 enum partialfilelock_status retval; 1325 enum nfslock_status lnlstatus; 1326 enum hwlock_status hwstatus; 1327 1328 debuglog("Entering lock_partialfilelock\n"); 1329 1330 retval = PFL_DENIED; 1331 1332 /* 1333 * Execute the NFS lock first, if possible, as it is significantly 1334 * easier and less expensive to undo than the filesystem lock 1335 */ 1336 1337 lnlstatus = lock_nfslock(fl); 1338 1339 switch (lnlstatus) { 1340 case NFS_GRANTED: 1341 case NFS_GRANTED_DUPLICATE: 1342 /* 1343 * At this point, the NFS lock is allocated and active. 1344 * Remember to clean it up if the hardware lock fails 1345 */ 1346 hwstatus = lock_hwlock(fl); 1347 1348 switch (hwstatus) { 1349 case HW_GRANTED: 1350 case HW_GRANTED_DUPLICATE: 1351 debuglog("HW GRANTED\n"); 1352 /* 1353 * XXX: Fixme: Check hwstatus for duplicate when 1354 * true partial file locking and accounting is 1355 * done on the hardware 1356 */ 1357 if (lnlstatus == NFS_GRANTED_DUPLICATE) { 1358 retval = PFL_GRANTED_DUPLICATE; 1359 } else { 1360 retval = PFL_GRANTED; 1361 } 1362 monitor_lock_host(fl->client_name); 1363 break; 1364 case HW_RESERR: 1365 debuglog("HW RESERR\n"); 1366 retval = PFL_HWRESERR; 1367 break; 1368 case HW_DENIED: 1369 debuglog("HW DENIED\n"); 1370 retval = PFL_HWDENIED; 1371 break; 1372 default: 1373 debuglog("Unmatched hwstatus %d\n",hwstatus); 1374 break; 1375 } 1376 1377 if (retval != PFL_GRANTED && 1378 retval != PFL_GRANTED_DUPLICATE) { 1379 /* Clean up the NFS lock */ 1380 debuglog("Deleting trial NFS lock\n"); 1381 delete_nfslock(fl); 1382 } 1383 break; 1384 case NFS_DENIED: 1385 retval = PFL_NFSDENIED; 1386 break; 1387 case NFS_RESERR: 1388 retval = PFL_NFSRESERR; 1389 default: 1390 debuglog("Unmatched lnlstatus %d\n"); 1391 retval = PFL_NFSDENIED_NOLOCK; 1392 break; 1393 } 1394 1395 /* 1396 * By the time fl reaches here, it is completely free again on 1397 * failure. The NFS lock done before attempting the 1398 * hardware lock has been backed out 1399 */ 1400 1401 if (retval == PFL_NFSDENIED || retval == PFL_HWDENIED) { 1402 /* Once last chance to check the lock */ 1403 if (fl->blocking == 1) { 1404 if (retval == PFL_NFSDENIED) { 1405 /* Queue the lock */ 1406 debuglog("BLOCKING LOCK RECEIVED\n"); 1407 retval = PFL_NFSBLOCKED; 1408 add_blockingfilelock(fl); 1409 dump_filelock(fl); 1410 } else { 1411 /* retval is okay as PFL_HWDENIED */ 1412 debuglog("BLOCKING LOCK DENIED IN HARDWARE\n"); 1413 dump_filelock(fl); 1414 } 1415 } else { 1416 /* Leave retval alone, it's already correct */ 1417 debuglog("Lock denied. Non-blocking failure\n"); 1418 dump_filelock(fl); 1419 } 1420 } 1421 1422 debuglog("Exiting lock_partialfilelock\n"); 1423 1424 return retval; 1425 } 1426 1427 /* 1428 * unlock_partialfilelock: 1429 * 1430 * Given a file_lock, unlock all locks which match. 1431 * 1432 * Note that a given lock might have to unlock ITSELF! See 1433 * clear_partialfilelock for example. 1434 */ 1435 1436 enum partialfilelock_status 1437 unlock_partialfilelock(const struct file_lock *fl) 1438 { 1439 struct file_lock *lfl,*rfl,*releasedfl,*selffl; 1440 enum partialfilelock_status retval; 1441 enum nfslock_status unlstatus; 1442 enum hwlock_status unlhwstatus, lhwstatus; 1443 1444 debuglog("Entering unlock_partialfilelock\n"); 1445 1446 selffl = NULL; 1447 lfl = NULL; 1448 rfl = NULL; 1449 releasedfl = NULL; 1450 retval = PFL_DENIED; 1451 1452 /* 1453 * There are significant overlap and atomicity issues 1454 * with partially releasing a lock. For example, releasing 1455 * part of an NFS shared lock does *not* always release the 1456 * corresponding part of the file since there is only one 1457 * rpc.lockd UID but multiple users could be requesting it 1458 * from NFS. Also, an unlock request should never allow 1459 * another process to gain a lock on the remaining parts. 1460 * ie. Always apply the new locks before releasing the 1461 * old one 1462 */ 1463 1464 /* 1465 * Loop is required since multiple little locks 1466 * can be allocated and then deallocated with one 1467 * big unlock. 1468 * 1469 * The loop is required to be here so that the nfs & 1470 * hw subsystems do not need to communicate with one 1471 * one another 1472 */ 1473 1474 do { 1475 debuglog("Value of releasedfl: %p\n",releasedfl); 1476 /* lfl&rfl are created *AND* placed into the NFS lock list if required */ 1477 unlstatus = unlock_nfslock(fl, &releasedfl, &lfl, &rfl); 1478 debuglog("Value of releasedfl: %p\n",releasedfl); 1479 1480 1481 /* XXX: This is grungy. It should be refactored to be cleaner */ 1482 if (lfl != NULL) { 1483 lhwstatus = lock_hwlock(lfl); 1484 if (lhwstatus != HW_GRANTED && 1485 lhwstatus != HW_GRANTED_DUPLICATE) { 1486 debuglog("HW duplicate lock failure for left split\n"); 1487 } 1488 monitor_lock_host(lfl->client_name); 1489 } 1490 1491 if (rfl != NULL) { 1492 lhwstatus = lock_hwlock(rfl); 1493 if (lhwstatus != HW_GRANTED && 1494 lhwstatus != HW_GRANTED_DUPLICATE) { 1495 debuglog("HW duplicate lock failure for right split\n"); 1496 } 1497 monitor_lock_host(rfl->client_name); 1498 } 1499 1500 switch (unlstatus) { 1501 case NFS_GRANTED: 1502 /* Attempt to unlock on the hardware */ 1503 debuglog("NFS unlock granted. Attempting hardware unlock\n"); 1504 1505 /* This call *MUST NOT* unlock the two newly allocated locks */ 1506 unlhwstatus = unlock_hwlock(fl); 1507 debuglog("HW unlock returned with code %d\n",unlhwstatus); 1508 1509 switch (unlhwstatus) { 1510 case HW_GRANTED: 1511 debuglog("HW unlock granted\n"); 1512 unmonitor_lock_host(releasedfl->client_name); 1513 retval = PFL_GRANTED; 1514 break; 1515 case HW_DENIED_NOLOCK: 1516 /* Huh?!?! This shouldn't happen */ 1517 debuglog("HW unlock denied no lock\n"); 1518 retval = PFL_HWRESERR; 1519 /* Break out of do-while */ 1520 unlstatus = NFS_RESERR; 1521 break; 1522 default: 1523 debuglog("HW unlock failed\n"); 1524 retval = PFL_HWRESERR; 1525 /* Break out of do-while */ 1526 unlstatus = NFS_RESERR; 1527 break; 1528 } 1529 1530 debuglog("Exiting with status retval: %d\n",retval); 1531 1532 retry_blockingfilelocklist(); 1533 break; 1534 case NFS_DENIED_NOLOCK: 1535 retval = PFL_GRANTED; 1536 debuglog("All locks cleaned out\n"); 1537 break; 1538 default: 1539 retval = PFL_NFSRESERR; 1540 debuglog("NFS unlock failure\n"); 1541 dump_filelock(fl); 1542 break; 1543 } 1544 1545 if (releasedfl != NULL) { 1546 if (fl == releasedfl) { 1547 /* 1548 * XXX: YECHHH!!! Attempt to unlock self succeeded 1549 * but we can't deallocate the space yet. This is what 1550 * happens when you don't write malloc and free together 1551 */ 1552 debuglog("Attempt to unlock self\n"); 1553 selffl = releasedfl; 1554 } else { 1555 /* 1556 * XXX: this deallocation *still* needs to migrate closer 1557 * to the allocation code way up in get_lock or the allocation 1558 * code needs to migrate down (violation of "When you write 1559 * malloc you must write free") 1560 */ 1561 1562 deallocate_file_lock(releasedfl); 1563 } 1564 } 1565 1566 } while (unlstatus == NFS_GRANTED); 1567 1568 if (selffl != NULL) { 1569 /* 1570 * This statement wipes out the incoming file lock (fl) 1571 * in spite of the fact that it is declared const 1572 */ 1573 debuglog("WARNING! Destroying incoming lock pointer\n"); 1574 deallocate_file_lock(selffl); 1575 } 1576 1577 debuglog("Exiting unlock_partialfilelock\n"); 1578 1579 return retval; 1580 } 1581 1582 /* 1583 * clear_partialfilelock 1584 * 1585 * Normally called in response to statd state number change. 1586 * Wipe out all locks held by a host. As a bonus, the act of 1587 * doing so should automatically clear their statd entries and 1588 * unmonitor the host. 1589 */ 1590 1591 void 1592 clear_partialfilelock(const char *hostname) 1593 { 1594 struct file_lock *ifl, *nfl; 1595 1596 /* Clear blocking file lock list */ 1597 clear_blockingfilelock(hostname); 1598 1599 /* do all required unlocks */ 1600 /* Note that unlock can smash the current pointer to a lock */ 1601 1602 /* 1603 * Normally, LIST_FOREACH is called for, but since 1604 * the current element *is* the iterator, deleting it 1605 * would mess up the iteration. Thus, a next element 1606 * must be used explicitly 1607 */ 1608 1609 ifl = LIST_FIRST(&nfslocklist_head); 1610 1611 while (ifl != NULL) { 1612 nfl = LIST_NEXT(ifl, nfslocklist); 1613 1614 if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) { 1615 /* Unlock destroys ifl out from underneath */ 1616 unlock_partialfilelock(ifl); 1617 /* ifl is NO LONGER VALID AT THIS POINT */ 1618 } 1619 ifl = nfl; 1620 } 1621 } 1622 1623 /* 1624 * test_partialfilelock: 1625 */ 1626 enum partialfilelock_status 1627 test_partialfilelock(const struct file_lock *fl, 1628 struct file_lock **conflicting_fl) 1629 { 1630 enum partialfilelock_status retval; 1631 enum nfslock_status teststatus; 1632 1633 debuglog("Entering testpartialfilelock...\n"); 1634 1635 retval = PFL_DENIED; 1636 1637 teststatus = test_nfslock(fl, conflicting_fl); 1638 debuglog("test_partialfilelock: teststatus %d\n",teststatus); 1639 1640 if (teststatus == NFS_GRANTED || teststatus == NFS_GRANTED_DUPLICATE) { 1641 /* XXX: Add the underlying filesystem locking code */ 1642 retval = (teststatus == NFS_GRANTED) ? 1643 PFL_GRANTED : PFL_GRANTED_DUPLICATE; 1644 debuglog("Dumping locks...\n"); 1645 dump_filelock(fl); 1646 dump_filelock(*conflicting_fl); 1647 debuglog("Done dumping locks...\n"); 1648 } else { 1649 retval = PFL_NFSDENIED; 1650 debuglog("NFS test denied.\n"); 1651 dump_filelock(fl); 1652 debuglog("Conflicting.\n"); 1653 dump_filelock(*conflicting_fl); 1654 } 1655 1656 debuglog("Exiting testpartialfilelock...\n"); 1657 1658 return retval; 1659 } 1660 1661 /* 1662 * Below here are routines associated with translating the partial file locking 1663 * codes into useful codes to send back to the NFS RPC messaging system 1664 */ 1665 1666 /* 1667 * These routines translate the (relatively) useful return codes back onto 1668 * the few return codes which the nlm subsystems wishes to trasmit 1669 */ 1670 1671 enum nlm_stats 1672 do_test(struct file_lock *fl, struct file_lock **conflicting_fl) 1673 { 1674 enum partialfilelock_status pfsret; 1675 enum nlm_stats retval; 1676 1677 debuglog("Entering do_test...\n"); 1678 1679 pfsret = test_partialfilelock(fl,conflicting_fl); 1680 1681 switch (pfsret) { 1682 case PFL_GRANTED: 1683 debuglog("PFL test lock granted\n"); 1684 dump_filelock(fl); 1685 dump_filelock(*conflicting_fl); 1686 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1687 break; 1688 case PFL_GRANTED_DUPLICATE: 1689 debuglog("PFL test lock granted--duplicate id detected\n"); 1690 dump_filelock(fl); 1691 dump_filelock(*conflicting_fl); 1692 debuglog("Clearing conflicting_fl for call semantics\n"); 1693 *conflicting_fl = NULL; 1694 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1695 break; 1696 case PFL_NFSDENIED: 1697 case PFL_HWDENIED: 1698 debuglog("PFL test lock denied\n"); 1699 dump_filelock(fl); 1700 dump_filelock(*conflicting_fl); 1701 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied; 1702 break; 1703 case PFL_NFSRESERR: 1704 case PFL_HWRESERR: 1705 debuglog("PFL test lock resource fail\n"); 1706 dump_filelock(fl); 1707 dump_filelock(*conflicting_fl); 1708 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks; 1709 break; 1710 default: 1711 debuglog("PFL test lock *FAILED*\n"); 1712 dump_filelock(fl); 1713 dump_filelock(*conflicting_fl); 1714 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied; 1715 break; 1716 } 1717 1718 debuglog("Exiting do_test...\n"); 1719 1720 return retval; 1721 } 1722 1723 /* 1724 * do_lock: Try to acquire a lock 1725 * 1726 * This routine makes a distinction between NLM versions. I am pretty 1727 * convinced that this should be abstracted out and bounced up a level 1728 */ 1729 1730 enum nlm_stats 1731 do_lock(struct file_lock *fl) 1732 { 1733 enum partialfilelock_status pfsret; 1734 enum nlm_stats retval; 1735 1736 debuglog("Entering do_lock...\n"); 1737 1738 pfsret = lock_partialfilelock(fl); 1739 1740 switch (pfsret) { 1741 case PFL_GRANTED: 1742 debuglog("PFL lock granted"); 1743 dump_filelock(fl); 1744 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1745 break; 1746 case PFL_GRANTED_DUPLICATE: 1747 debuglog("PFL lock granted--duplicate id detected"); 1748 dump_filelock(fl); 1749 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1750 break; 1751 case PFL_NFSDENIED: 1752 case PFL_HWDENIED: 1753 debuglog("PFL_NFS lock denied"); 1754 dump_filelock(fl); 1755 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied; 1756 break; 1757 case PFL_NFSBLOCKED: 1758 case PFL_HWBLOCKED: 1759 debuglog("PFL_NFS blocking lock denied. Queued.\n"); 1760 dump_filelock(fl); 1761 retval = (fl->flags & LOCK_V4) ? nlm4_blocked : nlm_blocked; 1762 break; 1763 case PFL_NFSRESERR: 1764 case PFL_HWRESERR: 1765 debuglog("PFL lock resource alocation fail\n"); 1766 dump_filelock(fl); 1767 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks; 1768 break; 1769 default: 1770 debuglog("PFL lock *FAILED*"); 1771 dump_filelock(fl); 1772 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied; 1773 break; 1774 } 1775 1776 debuglog("Exiting do_lock...\n"); 1777 1778 return retval; 1779 } 1780 1781 enum nlm_stats 1782 do_unlock(struct file_lock *fl) 1783 { 1784 enum partialfilelock_status pfsret; 1785 enum nlm_stats retval; 1786 1787 debuglog("Entering do_unlock...\n"); 1788 pfsret = unlock_partialfilelock(fl); 1789 1790 switch (pfsret) { 1791 case PFL_GRANTED: 1792 debuglog("PFL unlock granted"); 1793 dump_filelock(fl); 1794 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1795 break; 1796 case PFL_NFSDENIED: 1797 case PFL_HWDENIED: 1798 debuglog("PFL_NFS unlock denied"); 1799 dump_filelock(fl); 1800 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied; 1801 break; 1802 case PFL_NFSDENIED_NOLOCK: 1803 case PFL_HWDENIED_NOLOCK: 1804 debuglog("PFL_NFS no lock found\n"); 1805 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1806 break; 1807 case PFL_NFSRESERR: 1808 case PFL_HWRESERR: 1809 debuglog("PFL unlock resource failure"); 1810 dump_filelock(fl); 1811 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks; 1812 break; 1813 default: 1814 debuglog("PFL unlock *FAILED*"); 1815 dump_filelock(fl); 1816 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied; 1817 break; 1818 } 1819 1820 debuglog("Exiting do_unlock...\n"); 1821 1822 return retval; 1823 } 1824 1825 /* 1826 * do_clear 1827 * 1828 * This routine is non-existent because it doesn't have a return code. 1829 * It is here for completeness in case someone *does* need to do return 1830 * codes later. A decent compiler should optimize this away. 1831 */ 1832 1833 void 1834 do_clear(const char *hostname) 1835 { 1836 1837 clear_partialfilelock(hostname); 1838 } 1839 1840 /* 1841 * The following routines are all called from the code which the 1842 * RPC layer invokes 1843 */ 1844 1845 /* 1846 * testlock(): inform the caller if the requested lock would be granted 1847 * 1848 * returns NULL if lock would granted 1849 * returns pointer to a conflicting nlm4_holder if not 1850 */ 1851 1852 struct nlm4_holder * 1853 testlock(struct nlm4_lock *lock, bool_t exclusive, int flags __unused) 1854 { 1855 struct file_lock test_fl, *conflicting_fl; 1856 1857 bzero(&test_fl, sizeof(test_fl)); 1858 1859 bcopy(lock->fh.n_bytes, &(test_fl.filehandle), sizeof(fhandle_t)); 1860 copy_nlm4_lock_to_nlm4_holder(lock, exclusive, &test_fl.client); 1861 1862 siglock(); 1863 do_test(&test_fl, &conflicting_fl); 1864 1865 if (conflicting_fl == NULL) { 1866 debuglog("No conflicting lock found\n"); 1867 sigunlock(); 1868 return NULL; 1869 } else { 1870 debuglog("Found conflicting lock\n"); 1871 dump_filelock(conflicting_fl); 1872 sigunlock(); 1873 return (&conflicting_fl->client); 1874 } 1875 } 1876 1877 /* 1878 * getlock: try to aquire the lock. 1879 * If file is already locked and we can sleep, put the lock in the list with 1880 * status LKST_WAITING; it'll be processed later. 1881 * Otherwise try to lock. If we're allowed to block, fork a child which 1882 * will do the blocking lock. 1883 */ 1884 1885 enum nlm_stats 1886 getlock(nlm4_lockargs *lckarg, struct svc_req *rqstp, const int flags) 1887 { 1888 struct file_lock *newfl; 1889 enum nlm_stats retval; 1890 1891 debuglog("Entering getlock...\n"); 1892 1893 if (grace_expired == 0 && lckarg->reclaim == 0) 1894 return (flags & LOCK_V4) ? 1895 nlm4_denied_grace_period : nlm_denied_grace_period; 1896 1897 /* allocate new file_lock for this request */ 1898 newfl = allocate_file_lock(&lckarg->alock.oh, &lckarg->cookie, 1899 (struct sockaddr *)svc_getrpccaller(rqstp->rq_xprt)->buf, lckarg->alock.caller_name); 1900 if (newfl == NULL) { 1901 syslog(LOG_NOTICE, "lock allocate failed: %s", strerror(errno)); 1902 /* failed */ 1903 return (flags & LOCK_V4) ? 1904 nlm4_denied_nolocks : nlm_denied_nolocks; 1905 } 1906 1907 if (lckarg->alock.fh.n_len != sizeof(fhandle_t)) { 1908 debuglog("recieved fhandle size %d, local size %d", 1909 lckarg->alock.fh.n_len, (int)sizeof(fhandle_t)); 1910 } 1911 1912 fill_file_lock(newfl, (fhandle_t *)lckarg->alock.fh.n_bytes, 1913 lckarg->exclusive, lckarg->alock.svid, lckarg->alock.l_offset, 1914 lckarg->alock.l_len, 1915 lckarg->state, 0, flags, lckarg->block); 1916 1917 /* 1918 * newfl is now fully constructed and deallocate_file_lock 1919 * can now be used to delete it 1920 */ 1921 1922 siglock(); 1923 debuglog("Pointer to new lock is %p\n",newfl); 1924 1925 retval = do_lock(newfl); 1926 1927 debuglog("Pointer to new lock is %p\n",newfl); 1928 sigunlock(); 1929 1930 switch (retval) 1931 { 1932 case nlm4_granted: 1933 /* case nlm_granted: is the same as nlm4_granted */ 1934 /* do_mon(lckarg->alock.caller_name); */ 1935 break; 1936 case nlm4_blocked: 1937 /* case nlm_blocked: is the same as nlm4_blocked */ 1938 /* do_mon(lckarg->alock.caller_name); */ 1939 break; 1940 default: 1941 deallocate_file_lock(newfl); 1942 break; 1943 } 1944 1945 debuglog("Exiting getlock...\n"); 1946 1947 return retval; 1948 } 1949 1950 1951 /* unlock a filehandle */ 1952 enum nlm_stats 1953 unlock(nlm4_lock *lock, const int flags __unused) 1954 { 1955 struct file_lock fl; 1956 enum nlm_stats err; 1957 1958 siglock(); 1959 1960 debuglog("Entering unlock...\n"); 1961 1962 bzero(&fl,sizeof(struct file_lock)); 1963 bcopy(lock->fh.n_bytes, &fl.filehandle, sizeof(fhandle_t)); 1964 1965 copy_nlm4_lock_to_nlm4_holder(lock, 0, &fl.client); 1966 1967 err = do_unlock(&fl); 1968 1969 sigunlock(); 1970 1971 debuglog("Exiting unlock...\n"); 1972 1973 return err; 1974 } 1975 1976 /* 1977 * XXX: The following monitor/unmonitor routines 1978 * have not been extensively tested (ie. no regression 1979 * script exists like for the locking sections 1980 */ 1981 1982 /* 1983 * monitor_lock_host: monitor lock hosts locally with a ref count and 1984 * inform statd 1985 */ 1986 void 1987 monitor_lock_host(const char *hostname) 1988 { 1989 struct host *ihp, *nhp; 1990 struct mon smon; 1991 struct sm_stat_res sres; 1992 int rpcret, statflag; 1993 size_t n; 1994 1995 rpcret = 0; 1996 statflag = 0; 1997 1998 LIST_FOREACH(ihp, &hostlst_head, hostlst) { 1999 if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) { 2000 /* Host is already monitored, bump refcount */ 2001 ++ihp->refcnt; 2002 /* Host should only be in the monitor list once */ 2003 return; 2004 } 2005 } 2006 2007 /* Host is not yet monitored, add it */ 2008 n = strnlen(hostname, SM_MAXSTRLEN); 2009 if (n == SM_MAXSTRLEN) { 2010 return; 2011 } 2012 nhp = malloc(sizeof(*nhp) - sizeof(nhp->name) + n + 1); 2013 if (nhp == NULL) { 2014 debuglog("Unable to allocate entry for statd mon\n"); 2015 return; 2016 } 2017 2018 /* Allocated new host entry, now fill the fields */ 2019 memcpy(nhp->name, hostname, n); 2020 nhp->name[n] = 0; 2021 nhp->refcnt = 1; 2022 debuglog("Locally Monitoring host %16s\n",hostname); 2023 2024 debuglog("Attempting to tell statd\n"); 2025 2026 bzero(&smon,sizeof(smon)); 2027 2028 smon.mon_id.mon_name = nhp->name; 2029 smon.mon_id.my_id.my_name = "localhost\0"; 2030 2031 smon.mon_id.my_id.my_prog = NLM_PROG; 2032 smon.mon_id.my_id.my_vers = NLM_SM; 2033 smon.mon_id.my_id.my_proc = NLM_SM_NOTIFY; 2034 2035 rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_MON, 2036 (xdrproc_t)xdr_mon, &smon, 2037 (xdrproc_t)xdr_sm_stat_res, &sres); 2038 2039 if (rpcret == 0) { 2040 if (sres.res_stat == stat_fail) { 2041 debuglog("Statd call failed\n"); 2042 statflag = 0; 2043 } else { 2044 statflag = 1; 2045 } 2046 } else { 2047 debuglog("Rpc call to statd failed with return value: %d\n", 2048 rpcret); 2049 statflag = 0; 2050 } 2051 2052 if (statflag == 1) { 2053 LIST_INSERT_HEAD(&hostlst_head, nhp, hostlst); 2054 } else { 2055 free(nhp); 2056 } 2057 2058 } 2059 2060 /* 2061 * unmonitor_lock_host: clear monitor ref counts and inform statd when gone 2062 */ 2063 void 2064 unmonitor_lock_host(char *hostname) 2065 { 2066 struct host *ihp; 2067 struct mon_id smon_id; 2068 struct sm_stat smstat; 2069 int rpcret; 2070 2071 rpcret = 0; 2072 2073 for( ihp=LIST_FIRST(&hostlst_head); ihp != NULL; 2074 ihp=LIST_NEXT(ihp, hostlst)) { 2075 if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) { 2076 /* Host is monitored, bump refcount */ 2077 --ihp->refcnt; 2078 /* Host should only be in the monitor list once */ 2079 break; 2080 } 2081 } 2082 2083 if (ihp == NULL) { 2084 debuglog("Could not find host %16s in mon list\n", hostname); 2085 return; 2086 } 2087 2088 if (ihp->refcnt > 0) 2089 return; 2090 2091 if (ihp->refcnt < 0) { 2092 debuglog("Negative refcount!: %d\n", 2093 ihp->refcnt); 2094 } 2095 2096 debuglog("Attempting to unmonitor host %16s\n", hostname); 2097 2098 bzero(&smon_id,sizeof(smon_id)); 2099 2100 smon_id.mon_name = hostname; 2101 smon_id.my_id.my_name = "localhost"; 2102 smon_id.my_id.my_prog = NLM_PROG; 2103 smon_id.my_id.my_vers = NLM_SM; 2104 smon_id.my_id.my_proc = NLM_SM_NOTIFY; 2105 2106 rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_UNMON, 2107 (xdrproc_t)xdr_mon_id, &smon_id, 2108 (xdrproc_t)xdr_sm_stat, &smstat); 2109 2110 if (rpcret != 0) { 2111 debuglog("Rpc call to unmonitor statd failed with " 2112 " return value: %d\n", rpcret); 2113 } 2114 2115 LIST_REMOVE(ihp, hostlst); 2116 free(ihp); 2117 } 2118 2119 /* 2120 * notify: Clear all locks from a host if statd complains 2121 * 2122 * XXX: This routine has not been thoroughly tested. However, neither 2123 * had the old one been. It used to compare the statd crash state counter 2124 * to the current lock state. The upshot of this was that it basically 2125 * cleared all locks from the specified host 99% of the time (with the 2126 * other 1% being a bug). Consequently, the assumption is that clearing 2127 * all locks from a host when notified by statd is acceptable. 2128 * 2129 * Please note that this routine skips the usual level of redirection 2130 * through a do_* type routine. This introduces a possible level of 2131 * error and might better be written as do_notify and take this one out. 2132 2133 */ 2134 2135 void 2136 notify(const char *hostname, const int state) 2137 { 2138 debuglog("notify from %s, new state %d", hostname, state); 2139 2140 siglock(); 2141 do_clear(hostname); 2142 sigunlock(); 2143 2144 debuglog("Leaving notify\n"); 2145 } 2146 2147 void 2148 send_granted(fl, opcode) 2149 struct file_lock *fl; 2150 int opcode __unused; 2151 { 2152 CLIENT *cli; 2153 static char dummy; 2154 struct timeval timeo; 2155 int success; 2156 static struct nlm_res retval; 2157 static struct nlm4_res retval4; 2158 2159 debuglog("About to send granted on blocked lock\n"); 2160 2161 cli = get_client(fl->addr, 2162 (fl->flags & LOCK_V4) ? NLM_VERS4 : NLM_VERS); 2163 if (cli == NULL) { 2164 syslog(LOG_NOTICE, "failed to get CLIENT for %s", 2165 fl->client_name); 2166 /* 2167 * We fail to notify remote that the lock has been granted. 2168 * The client will timeout and retry, the lock will be 2169 * granted at this time. 2170 */ 2171 return; 2172 } 2173 timeo.tv_sec = 0; 2174 timeo.tv_usec = (fl->flags & LOCK_ASYNC) ? 0 : 500000; /* 0.5s */ 2175 2176 if (fl->flags & LOCK_V4) { 2177 static nlm4_testargs res; 2178 res.cookie = fl->client_cookie; 2179 res.exclusive = fl->client.exclusive; 2180 res.alock.caller_name = fl->client_name; 2181 res.alock.fh.n_len = sizeof(fhandle_t); 2182 res.alock.fh.n_bytes = (char*)&fl->filehandle; 2183 res.alock.oh = fl->client.oh; 2184 res.alock.svid = fl->client.svid; 2185 res.alock.l_offset = fl->client.l_offset; 2186 res.alock.l_len = fl->client.l_len; 2187 debuglog("sending v4 reply%s", 2188 (fl->flags & LOCK_ASYNC) ? " (async)":""); 2189 if (fl->flags & LOCK_ASYNC) { 2190 success = clnt_call(cli, NLM4_GRANTED_MSG, 2191 (xdrproc_t)xdr_nlm4_testargs, &res, 2192 (xdrproc_t)xdr_void, &dummy, timeo); 2193 } else { 2194 success = clnt_call(cli, NLM4_GRANTED, 2195 (xdrproc_t)xdr_nlm4_testargs, &res, 2196 (xdrproc_t)xdr_nlm4_res, &retval4, timeo); 2197 } 2198 } else { 2199 static nlm_testargs res; 2200 2201 res.cookie = fl->client_cookie; 2202 res.exclusive = fl->client.exclusive; 2203 res.alock.caller_name = fl->client_name; 2204 res.alock.fh.n_len = sizeof(fhandle_t); 2205 res.alock.fh.n_bytes = (char*)&fl->filehandle; 2206 res.alock.oh = fl->client.oh; 2207 res.alock.svid = fl->client.svid; 2208 res.alock.l_offset = fl->client.l_offset; 2209 res.alock.l_len = fl->client.l_len; 2210 debuglog("sending v1 reply%s", 2211 (fl->flags & LOCK_ASYNC) ? " (async)":""); 2212 if (fl->flags & LOCK_ASYNC) { 2213 success = clnt_call(cli, NLM_GRANTED_MSG, 2214 (xdrproc_t)xdr_nlm_testargs, &res, 2215 (xdrproc_t)xdr_void, &dummy, timeo); 2216 } else { 2217 success = clnt_call(cli, NLM_GRANTED, 2218 (xdrproc_t)xdr_nlm_testargs, &res, 2219 (xdrproc_t)xdr_nlm_res, &retval, timeo); 2220 } 2221 } 2222 if (debug_level > 2) 2223 debuglog("clnt_call returns %d(%s) for granted", 2224 success, clnt_sperrno(success)); 2225 2226 } 2227 2228 /* 2229 * Routines below here have not been modified in the overhaul 2230 */ 2231 2232 /* 2233 * Are these two routines still required since lockd is not spawning off 2234 * children to service locks anymore? Presumably they were originally 2235 * put in place to prevent a one child from changing the lock list out 2236 * from under another one. 2237 */ 2238 2239 void 2240 siglock(void) 2241 { 2242 sigset_t block; 2243 2244 sigemptyset(&block); 2245 sigaddset(&block, SIGCHLD); 2246 2247 if (sigprocmask(SIG_BLOCK, &block, NULL) < 0) { 2248 syslog(LOG_WARNING, "siglock failed: %s", strerror(errno)); 2249 } 2250 } 2251 2252 void 2253 sigunlock(void) 2254 { 2255 sigset_t block; 2256 2257 sigemptyset(&block); 2258 sigaddset(&block, SIGCHLD); 2259 2260 if (sigprocmask(SIG_UNBLOCK, &block, NULL) < 0) { 2261 syslog(LOG_WARNING, "sigunlock failed: %s", strerror(errno)); 2262 } 2263 } 2264 2265 2266