1 /* $NetBSD: lockd_lock.c,v 1.5 2000/11/21 03:47:41 enami Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Andrew P. Lentvorski, Jr. 5 * Copyright (c) 2000 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #define LOCKD_DEBUG 41 42 #include <stdio.h> 43 #ifdef LOCKD_DEBUG 44 #include <stdarg.h> 45 #endif 46 #include <stdlib.h> 47 #include <unistd.h> 48 #include <fcntl.h> 49 #include <syslog.h> 50 #include <errno.h> 51 #include <string.h> 52 #include <signal.h> 53 #include <rpc/rpc.h> 54 #include <sys/types.h> 55 #include <sys/stat.h> 56 #include <sys/socket.h> 57 #include <sys/param.h> 58 #include <sys/mount.h> 59 #include <sys/wait.h> 60 #include <rpcsvc/sm_inter.h> 61 #include <rpcsvc/nlm_prot.h> 62 #include "lockd_lock.h" 63 #include "lockd.h" 64 65 #define MAXOBJECTSIZE 64 66 #define MAXBUFFERSIZE 1024 67 68 /* 69 * A set of utilities for managing file locking 70 * 71 * XXX: All locks are in a linked list, a better structure should be used 72 * to improve search/access effeciency. 73 */ 74 75 /* struct describing a lock */ 76 struct file_lock { 77 LIST_ENTRY(file_lock) nfslocklist; 78 fhandle_t filehandle; /* NFS filehandle */ 79 struct sockaddr *addr; 80 struct nlm4_holder client; /* lock holder */ 81 /* XXX: client_cookie used *only* in send_granted */ 82 netobj client_cookie; /* cookie sent by the client */ 83 int nsm_status; /* status from the remote lock manager */ 84 int status; /* lock status, see below */ 85 int flags; /* lock flags, see lockd_lock.h */ 86 int blocking; /* blocking lock or not */ 87 char client_name[SM_MAXSTRLEN]; /* client_name is really variable 88 length and must be last! */ 89 }; 90 91 LIST_HEAD(nfslocklist_head, file_lock); 92 struct nfslocklist_head nfslocklist_head = LIST_HEAD_INITIALIZER(nfslocklist_head); 93 94 LIST_HEAD(blockedlocklist_head, file_lock); 95 struct blockedlocklist_head blockedlocklist_head = LIST_HEAD_INITIALIZER(blockedlocklist_head); 96 97 /* lock status */ 98 #define LKST_LOCKED 1 /* lock is locked */ 99 /* XXX: Is this flag file specific or lock specific? */ 100 #define LKST_WAITING 2 /* file is already locked by another host */ 101 #define LKST_PROCESSING 3 /* child is trying to aquire the lock */ 102 #define LKST_DYING 4 /* must dies when we get news from the child */ 103 104 /* struct describing a monitored host */ 105 struct host { 106 LIST_ENTRY(host) hostlst; 107 int refcnt; 108 char name[SM_MAXSTRLEN]; /* name is really variable length and 109 must be last! */ 110 }; 111 /* list of hosts we monitor */ 112 LIST_HEAD(hostlst_head, host); 113 struct hostlst_head hostlst_head = LIST_HEAD_INITIALIZER(hostlst_head); 114 115 /* 116 * File monitoring handlers 117 * XXX: These might be able to be removed when kevent support 118 * is placed into the hardware lock/unlock routines. (ie. 119 * let the kernel do all the file monitoring) 120 */ 121 122 /* Struct describing a monitored file */ 123 struct monfile { 124 LIST_ENTRY(monfile) monfilelist; 125 fhandle_t filehandle; /* Local access filehandle */ 126 int fd; /* file descriptor: remains open until unlock! */ 127 int refcount; 128 int exclusive; 129 }; 130 131 /* List of files we monitor */ 132 LIST_HEAD(monfilelist_head, monfile); 133 struct monfilelist_head monfilelist_head = LIST_HEAD_INITIALIZER(monfilelist_head); 134 135 static int debugdelay = 0; 136 137 enum nfslock_status { NFS_GRANTED = 0, NFS_GRANTED_DUPLICATE, 138 NFS_DENIED, NFS_DENIED_NOLOCK, 139 NFS_RESERR }; 140 141 enum hwlock_status { HW_GRANTED = 0, HW_GRANTED_DUPLICATE, 142 HW_DENIED, HW_DENIED_NOLOCK, 143 HW_STALEFH, HW_READONLY, HW_RESERR }; 144 145 enum partialfilelock_status { PFL_GRANTED=0, PFL_GRANTED_DUPLICATE, PFL_DENIED, 146 PFL_NFSDENIED, PFL_NFSBLOCKED, PFL_NFSDENIED_NOLOCK, PFL_NFSRESERR, 147 PFL_HWDENIED, PFL_HWBLOCKED, PFL_HWDENIED_NOLOCK, PFL_HWRESERR}; 148 149 enum LFLAGS {LEDGE_LEFT, LEDGE_LBOUNDARY, LEDGE_INSIDE, LEDGE_RBOUNDARY, LEDGE_RIGHT}; 150 enum RFLAGS {REDGE_LEFT, REDGE_LBOUNDARY, REDGE_INSIDE, REDGE_RBOUNDARY, REDGE_RIGHT}; 151 /* XXX: WARNING! I HAVE OVERLOADED THIS STATUS ENUM! SPLIT IT APART INTO TWO */ 152 enum split_status {SPL_DISJOINT=0, SPL_LOCK1=1, SPL_LOCK2=2, SPL_CONTAINED=4, SPL_RESERR=8}; 153 154 enum partialfilelock_status lock_partialfilelock(struct file_lock *fl); 155 156 void send_granted(struct file_lock *fl, int opcode); 157 void siglock(void); 158 void sigunlock(void); 159 void monitor_lock_host(const char *hostname); 160 void unmonitor_lock_host(char *hostname); 161 162 void copy_nlm4_lock_to_nlm4_holder(const struct nlm4_lock *src, 163 const bool_t exclusive, struct nlm4_holder *dest); 164 struct file_lock * allocate_file_lock(const netobj *lockowner, 165 const netobj *matchcookie, 166 const struct sockaddr *addr, 167 const char *caller_name); 168 void deallocate_file_lock(struct file_lock *fl); 169 void fill_file_lock(struct file_lock *fl, const fhandle_t *fh, 170 const bool_t exclusive, const int32_t svid, 171 const u_int64_t offset, const u_int64_t len, 172 const int state, const int status, const int flags, const int blocking); 173 int regions_overlap(const u_int64_t start1, const u_int64_t len1, 174 const u_int64_t start2, const u_int64_t len2); 175 enum split_status region_compare(const u_int64_t starte, const u_int64_t lene, 176 const u_int64_t startu, const u_int64_t lenu, 177 u_int64_t *start1, u_int64_t *len1, u_int64_t *start2, u_int64_t *len2); 178 int same_netobj(const netobj *n0, const netobj *n1); 179 int same_filelock_identity(const struct file_lock *fl0, 180 const struct file_lock *fl2); 181 182 static void debuglog(char const *fmt, ...); 183 void dump_static_object(const unsigned char* object, const int sizeof_object, 184 unsigned char* hbuff, const int sizeof_hbuff, 185 unsigned char* cbuff, const int sizeof_cbuff); 186 void dump_netobj(const struct netobj *nobj); 187 void dump_filelock(const struct file_lock *fl); 188 struct file_lock * get_lock_matching_unlock(const struct file_lock *fl); 189 enum nfslock_status test_nfslock(const struct file_lock *fl, 190 struct file_lock **conflicting_fl); 191 enum nfslock_status lock_nfslock(struct file_lock *fl); 192 enum nfslock_status delete_nfslock(struct file_lock *fl); 193 enum nfslock_status unlock_nfslock(const struct file_lock *fl, 194 struct file_lock **released_lock, struct file_lock **left_lock, 195 struct file_lock **right_lock); 196 enum hwlock_status lock_hwlock(struct file_lock *fl); 197 enum split_status split_nfslock(const struct file_lock *exist_lock, 198 const struct file_lock *unlock_lock, struct file_lock **left_lock, 199 struct file_lock **right_lock); 200 void add_blockingfilelock(struct file_lock *fl); 201 enum hwlock_status unlock_hwlock(const struct file_lock *fl); 202 enum hwlock_status test_hwlock(const struct file_lock *fl, 203 struct file_lock **conflicting_fl); 204 void remove_blockingfilelock(struct file_lock *fl); 205 void clear_blockingfilelock(const char *hostname); 206 void retry_blockingfilelocklist(void); 207 enum partialfilelock_status unlock_partialfilelock( 208 const struct file_lock *fl); 209 void clear_partialfilelock(const char *hostname); 210 enum partialfilelock_status test_partialfilelock( 211 const struct file_lock *fl, struct file_lock **conflicting_fl); 212 enum nlm_stats do_test(struct file_lock *fl, 213 struct file_lock **conflicting_fl); 214 enum nlm_stats do_unlock(struct file_lock *fl); 215 enum nlm_stats do_lock(struct file_lock *fl); 216 void do_clear(const char *hostname); 217 size_t strnlen(const char *, size_t); 218 219 void 220 debuglog(char const *fmt, ...) 221 { 222 va_list ap; 223 224 if (debug_level < 1) { 225 return; 226 } 227 228 sleep(debugdelay); 229 230 va_start(ap, fmt); 231 vsyslog(LOG_DEBUG, fmt, ap); 232 va_end(ap); 233 } 234 235 void 236 dump_static_object(object, size_object, hbuff, size_hbuff, cbuff, size_cbuff) 237 const unsigned char *object; 238 const int size_object; 239 unsigned char *hbuff; 240 const int size_hbuff; 241 unsigned char *cbuff; 242 const int size_cbuff; 243 { 244 int i, objectsize; 245 246 if (debug_level < 2) { 247 return; 248 } 249 250 objectsize = size_object; 251 252 if (objectsize == 0) { 253 debuglog("object is size 0\n"); 254 } else { 255 if (objectsize > MAXOBJECTSIZE) { 256 debuglog("Object of size %d being clamped" 257 "to size %d\n", objectsize, MAXOBJECTSIZE); 258 objectsize = MAXOBJECTSIZE; 259 } 260 261 if (hbuff != NULL) { 262 if (size_hbuff < objectsize*2+1) { 263 debuglog("Hbuff not large enough." 264 " Increase size\n"); 265 } else { 266 for(i=0;i<objectsize;i++) { 267 sprintf(hbuff+i*2,"%02x",*(object+i)); 268 } 269 *(hbuff+i*2) = '\0'; 270 } 271 } 272 273 if (cbuff != NULL) { 274 if (size_cbuff < objectsize+1) { 275 debuglog("Cbuff not large enough." 276 " Increase Size\n"); 277 } 278 279 for(i=0;i<objectsize;i++) { 280 if (*(object+i) >= 32 && *(object+i) <= 127) { 281 *(cbuff+i) = *(object+i); 282 } else { 283 *(cbuff+i) = '.'; 284 } 285 } 286 *(cbuff+i) = '\0'; 287 } 288 } 289 } 290 291 void 292 dump_netobj(const struct netobj *nobj) 293 { 294 char hbuff[MAXBUFFERSIZE*2]; 295 char cbuff[MAXBUFFERSIZE]; 296 297 if (debug_level < 2) { 298 return; 299 } 300 301 if (nobj == NULL) { 302 debuglog("Null netobj pointer\n"); 303 } 304 else if (nobj->n_len == 0) { 305 debuglog("Size zero netobj\n"); 306 } else { 307 dump_static_object(nobj->n_bytes, nobj->n_len, 308 hbuff, sizeof(hbuff), cbuff, sizeof(cbuff)); 309 debuglog("netobj: len: %d data: %s ::: %s\n", 310 nobj->n_len, hbuff, cbuff); 311 } 312 } 313 314 /* #define DUMP_FILELOCK_VERBOSE */ 315 void 316 dump_filelock(const struct file_lock *fl) 317 { 318 #ifdef DUMP_FILELOCK_VERBOSE 319 char hbuff[MAXBUFFERSIZE*2]; 320 char cbuff[MAXBUFFERSIZE]; 321 #endif 322 323 if (debug_level < 2) { 324 return; 325 } 326 327 if (fl != NULL) { 328 debuglog("Dumping file lock structure @ %p\n", fl); 329 330 #ifdef DUMP_FILELOCK_VERBOSE 331 dump_static_object((unsigned char *)&fl->filehandle, 332 sizeof(fl->filehandle), hbuff, sizeof(hbuff), 333 cbuff, sizeof(cbuff)); 334 debuglog("Filehandle: %8s ::: %8s\n", hbuff, cbuff); 335 #endif 336 337 debuglog("Dumping nlm4_holder:\n" 338 "exc: %x svid: %x offset:len %llx:%llx\n", 339 fl->client.exclusive, fl->client.svid, 340 fl->client.l_offset, fl->client.l_len); 341 342 #ifdef DUMP_FILELOCK_VERBOSE 343 debuglog("Dumping client identity:\n"); 344 dump_netobj(&fl->client.oh); 345 346 debuglog("Dumping client cookie:\n"); 347 dump_netobj(&fl->client_cookie); 348 349 debuglog("nsm: %d status: %d flags: %d svid: %x" 350 " client_name: %s\n", fl->nsm_status, fl->status, 351 fl->flags, fl->client.svid, fl->client_name); 352 #endif 353 } else { 354 debuglog("NULL file lock structure\n"); 355 } 356 } 357 358 void 359 copy_nlm4_lock_to_nlm4_holder(src, exclusive, dest) 360 const struct nlm4_lock *src; 361 const bool_t exclusive; 362 struct nlm4_holder *dest; 363 { 364 365 dest->exclusive = exclusive; 366 dest->oh.n_len = src->oh.n_len; 367 dest->oh.n_bytes = src->oh.n_bytes; 368 dest->svid = src->svid; 369 dest->l_offset = src->l_offset; 370 dest->l_len = src->l_len; 371 } 372 373 374 size_t 375 strnlen(const char *s, size_t len) 376 { 377 size_t n; 378 379 for (n = 0; s[n] != 0 && n < len; n++) 380 ; 381 return n; 382 } 383 384 /* 385 * allocate_file_lock: Create a lock with the given parameters 386 */ 387 388 struct file_lock * 389 allocate_file_lock(const netobj *lockowner, const netobj *matchcookie, 390 const struct sockaddr *addr, const char *caller_name) 391 { 392 struct file_lock *newfl; 393 size_t n; 394 395 /* Beware of rubbish input! */ 396 n = strnlen(caller_name, SM_MAXSTRLEN); 397 if (n == SM_MAXSTRLEN) { 398 return NULL; 399 } 400 401 newfl = malloc(sizeof(*newfl) - sizeof(newfl->client_name) + n + 1); 402 if (newfl == NULL) { 403 return NULL; 404 } 405 bzero(newfl, sizeof(*newfl) - sizeof(newfl->client_name)); 406 memcpy(newfl->client_name, caller_name, n); 407 newfl->client_name[n] = 0; 408 409 newfl->client.oh.n_bytes = malloc(lockowner->n_len); 410 if (newfl->client.oh.n_bytes == NULL) { 411 free(newfl); 412 return NULL; 413 } 414 newfl->client.oh.n_len = lockowner->n_len; 415 bcopy(lockowner->n_bytes, newfl->client.oh.n_bytes, lockowner->n_len); 416 417 newfl->client_cookie.n_bytes = malloc(matchcookie->n_len); 418 if (newfl->client_cookie.n_bytes == NULL) { 419 free(newfl->client.oh.n_bytes); 420 free(newfl); 421 return NULL; 422 } 423 newfl->client_cookie.n_len = matchcookie->n_len; 424 bcopy(matchcookie->n_bytes, newfl->client_cookie.n_bytes, matchcookie->n_len); 425 426 newfl->addr = malloc(addr->sa_len); 427 if (newfl->addr == NULL) { 428 free(newfl->client_cookie.n_bytes); 429 free(newfl->client.oh.n_bytes); 430 free(newfl); 431 return NULL; 432 } 433 memcpy(newfl->addr, addr, addr->sa_len); 434 435 return newfl; 436 } 437 438 /* 439 * file_file_lock: Force creation of a valid file lock 440 */ 441 void 442 fill_file_lock(struct file_lock *fl, const fhandle_t *fh, 443 const bool_t exclusive, const int32_t svid, 444 const u_int64_t offset, const u_int64_t len, 445 const int state, const int status, const int flags, const int blocking) 446 { 447 bcopy(fh, &fl->filehandle, sizeof(fhandle_t)); 448 449 fl->client.exclusive = exclusive; 450 fl->client.svid = svid; 451 fl->client.l_offset = offset; 452 fl->client.l_len = len; 453 454 fl->nsm_status = state; 455 fl->status = status; 456 fl->flags = flags; 457 fl->blocking = blocking; 458 } 459 460 /* 461 * deallocate_file_lock: Free all storage associated with a file lock 462 */ 463 void 464 deallocate_file_lock(struct file_lock *fl) 465 { 466 free(fl->addr); 467 free(fl->client.oh.n_bytes); 468 free(fl->client_cookie.n_bytes); 469 free(fl); 470 } 471 472 /* 473 * regions_overlap(): This function examines the two provided regions for 474 * overlap. 475 */ 476 int 477 regions_overlap(start1, len1, start2, len2) 478 const u_int64_t start1, len1, start2, len2; 479 { 480 u_int64_t d1,d2,d3,d4; 481 enum split_status result; 482 483 debuglog("Entering region overlap with vals: %llu:%llu--%llu:%llu\n", 484 start1, len1, start2, len2); 485 486 result = region_compare(start1, len1, start2, len2, 487 &d1, &d2, &d3, &d4); 488 489 debuglog("Exiting region overlap with val: %d\n",result); 490 491 if (result == SPL_DISJOINT) { 492 return 0; 493 } else { 494 return 1; 495 } 496 497 return (result); 498 } 499 500 /* 501 * region_compare(): Examine lock regions and split appropriately 502 * 503 * XXX: Fix 64 bit overflow problems 504 * XXX: Check to make sure I got *ALL* the cases. 505 * XXX: This DESPERATELY needs a regression test. 506 */ 507 enum split_status 508 region_compare(starte, lene, startu, lenu, 509 start1, len1, start2, len2) 510 const u_int64_t starte, lene, startu, lenu; 511 u_int64_t *start1, *len1, *start2, *len2; 512 { 513 /* 514 * Please pay attention to the sequential exclusions 515 * of the if statements!!! 516 */ 517 enum LFLAGS lflags; 518 enum RFLAGS rflags; 519 enum split_status retval; 520 521 retval = SPL_DISJOINT; 522 523 if (lene == 0 && lenu == 0) { 524 /* Examine left edge of locker */ 525 lflags = LEDGE_INSIDE; 526 if (startu < starte) { 527 lflags = LEDGE_LEFT; 528 } else if (startu == starte) { 529 lflags = LEDGE_LBOUNDARY; 530 } 531 532 rflags = REDGE_RBOUNDARY; /* Both are infiinite */ 533 534 if (lflags == LEDGE_INSIDE) { 535 *start1 = starte; 536 *len1 = startu - starte; 537 } 538 539 if (lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) { 540 retval = SPL_CONTAINED; 541 } else { 542 retval = SPL_LOCK1; 543 } 544 } else if (lene == 0 && lenu != 0) { 545 /* Established lock is infinite */ 546 /* Examine left edge of unlocker */ 547 lflags = LEDGE_INSIDE; 548 if (startu < starte) { 549 lflags = LEDGE_LEFT; 550 } else if (startu == starte) { 551 lflags = LEDGE_LBOUNDARY; 552 } 553 554 /* Examine right edge of unlocker */ 555 if (startu + lenu < starte) { 556 /* Right edge of unlocker left of established lock */ 557 rflags = REDGE_LEFT; 558 return SPL_DISJOINT; 559 } else if (startu + lenu == starte) { 560 /* Right edge of unlocker on start of established lock */ 561 rflags = REDGE_LBOUNDARY; 562 return SPL_DISJOINT; 563 } else { /* Infinifty is right of finity */ 564 /* Right edge of unlocker inside established lock */ 565 rflags = REDGE_INSIDE; 566 } 567 568 if (lflags == LEDGE_INSIDE) { 569 *start1 = starte; 570 *len1 = startu - starte; 571 retval |= SPL_LOCK1; 572 } 573 574 if (rflags == REDGE_INSIDE) { 575 /* Create right lock */ 576 *start2 = startu+lenu; 577 *len2 = 0; 578 retval |= SPL_LOCK2; 579 } 580 } else if (lene != 0 && lenu == 0) { 581 /* Unlocker is infinite */ 582 /* Examine left edge of unlocker */ 583 lflags = LEDGE_RIGHT; 584 if (startu < starte) { 585 lflags = LEDGE_LEFT; 586 retval = SPL_CONTAINED; 587 return retval; 588 } else if (startu == starte) { 589 lflags = LEDGE_LBOUNDARY; 590 retval = SPL_CONTAINED; 591 return retval; 592 } else if ((startu > starte) && (startu < starte + lene - 1)) { 593 lflags = LEDGE_INSIDE; 594 } else if (startu == starte + lene - 1) { 595 lflags = LEDGE_RBOUNDARY; 596 } else { /* startu > starte + lene -1 */ 597 lflags = LEDGE_RIGHT; 598 return SPL_DISJOINT; 599 } 600 601 rflags = REDGE_RIGHT; /* Infinity is right of finity */ 602 603 if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) { 604 *start1 = starte; 605 *len1 = startu - starte; 606 retval |= SPL_LOCK1; 607 return retval; 608 } 609 } else { 610 /* Both locks are finite */ 611 612 /* Examine left edge of unlocker */ 613 lflags = LEDGE_RIGHT; 614 if (startu < starte) { 615 lflags = LEDGE_LEFT; 616 } else if (startu == starte) { 617 lflags = LEDGE_LBOUNDARY; 618 } else if ((startu > starte) && (startu < starte + lene - 1)) { 619 lflags = LEDGE_INSIDE; 620 } else if (startu == starte + lene - 1) { 621 lflags = LEDGE_RBOUNDARY; 622 } else { /* startu > starte + lene -1 */ 623 lflags = LEDGE_RIGHT; 624 return SPL_DISJOINT; 625 } 626 627 /* Examine right edge of unlocker */ 628 if (startu + lenu < starte) { 629 /* Right edge of unlocker left of established lock */ 630 rflags = REDGE_LEFT; 631 return SPL_DISJOINT; 632 } else if (startu + lenu == starte) { 633 /* Right edge of unlocker on start of established lock */ 634 rflags = REDGE_LBOUNDARY; 635 return SPL_DISJOINT; 636 } else if (startu + lenu < starte + lene) { 637 /* Right edge of unlocker inside established lock */ 638 rflags = REDGE_INSIDE; 639 } else if (startu + lenu == starte + lene) { 640 /* Right edge of unlocker on right edge of established lock */ 641 rflags = REDGE_RBOUNDARY; 642 } else { /* startu + lenu > starte + lene */ 643 /* Right edge of unlocker is right of established lock */ 644 rflags = REDGE_RIGHT; 645 } 646 647 if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) { 648 /* Create left lock */ 649 *start1 = starte; 650 *len1 = (startu - starte); 651 retval |= SPL_LOCK1; 652 } 653 654 if (rflags == REDGE_INSIDE) { 655 /* Create right lock */ 656 *start2 = startu+lenu; 657 *len2 = starte+lene-(startu+lenu); 658 retval |= SPL_LOCK2; 659 } 660 661 if ((lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) && 662 (rflags == REDGE_RBOUNDARY || rflags == REDGE_RIGHT)) { 663 retval = SPL_CONTAINED; 664 } 665 } 666 return retval; 667 } 668 669 /* 670 * same_netobj: Compares the apprpriate bits of a netobj for identity 671 */ 672 int 673 same_netobj(const netobj *n0, const netobj *n1) 674 { 675 int retval; 676 677 retval = 0; 678 679 debuglog("Entering netobj identity check\n"); 680 681 if (n0->n_len == n1->n_len) { 682 debuglog("Preliminary length check passed\n"); 683 retval = !bcmp(n0->n_bytes, n1->n_bytes, n0->n_len); 684 debuglog("netobj %smatch\n", retval ? "" : "mis"); 685 } 686 687 return (retval); 688 } 689 690 /* 691 * same_filelock_identity: Compares the appropriate bits of a file_lock 692 */ 693 int 694 same_filelock_identity(fl0, fl1) 695 const struct file_lock *fl0, *fl1; 696 { 697 int retval; 698 699 retval = 0; 700 701 debuglog("Checking filelock identity\n"); 702 703 /* 704 * Check process ids and host information. 705 */ 706 retval = (fl0->client.svid == fl1->client.svid && 707 same_netobj(&(fl0->client.oh), &(fl1->client.oh))); 708 709 debuglog("Exiting checking filelock identity: retval: %d\n",retval); 710 711 return (retval); 712 } 713 714 /* 715 * Below here are routines associated with manipulating the NFS 716 * lock list. 717 */ 718 719 /* 720 * get_lock_matching_unlock: Return a lock which matches the given unlock lock 721 * or NULL otehrwise 722 * XXX: It is a shame that this duplicates so much code from test_nfslock. 723 */ 724 struct file_lock * 725 get_lock_matching_unlock(const struct file_lock *fl) 726 { 727 struct file_lock *ifl; /* Iterator */ 728 729 debuglog("Entering get_lock_matching_unlock\n"); 730 debuglog("********Dump of fl*****************\n"); 731 dump_filelock(fl); 732 733 LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) { 734 debuglog("Pointer to file lock: %p\n",ifl); 735 736 debuglog("****Dump of ifl****\n"); 737 dump_filelock(ifl); 738 debuglog("*******************\n"); 739 740 /* 741 * XXX: It is conceivable that someone could use the NLM RPC 742 * system to directly access filehandles. This may be a 743 * security hazard as the filehandle code may bypass normal 744 * file access controls 745 */ 746 if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t))) 747 continue; 748 749 debuglog("get_lock_matching_unlock: Filehandles match, " 750 "checking regions\n"); 751 752 /* Filehandles match, check for region overlap */ 753 if (!regions_overlap(fl->client.l_offset, fl->client.l_len, 754 ifl->client.l_offset, ifl->client.l_len)) 755 continue; 756 757 debuglog("get_lock_matching_unlock: Region overlap" 758 " found %llu : %llu -- %llu : %llu\n", 759 fl->client.l_offset,fl->client.l_len, 760 ifl->client.l_offset,ifl->client.l_len); 761 762 /* Regions overlap, check the identity */ 763 if (!same_filelock_identity(fl,ifl)) 764 continue; 765 766 debuglog("get_lock_matching_unlock: Duplicate lock id. Granting\n"); 767 return (ifl); 768 } 769 770 debuglog("Exiting bet_lock_matching_unlock\n"); 771 772 return (NULL); 773 } 774 775 /* 776 * test_nfslock: check for NFS lock in lock list 777 * 778 * This routine makes the following assumptions: 779 * 1) Nothing will adjust the lock list during a lookup 780 * 781 * This routine has an intersting quirk which bit me hard. 782 * The conflicting_fl is the pointer to the conflicting lock. 783 * However, to modify the "*pointer* to the conflicting lock" rather 784 * that the "conflicting lock itself" one must pass in a "pointer to 785 * the pointer of the conflicting lock". Gross. 786 */ 787 788 enum nfslock_status 789 test_nfslock(const struct file_lock *fl, struct file_lock **conflicting_fl) 790 { 791 struct file_lock *ifl; /* Iterator */ 792 enum nfslock_status retval; 793 794 debuglog("Entering test_nfslock\n"); 795 796 retval = NFS_GRANTED; 797 (*conflicting_fl) = NULL; 798 799 debuglog("Entering lock search loop\n"); 800 801 debuglog("***********************************\n"); 802 debuglog("Dumping match filelock\n"); 803 debuglog("***********************************\n"); 804 dump_filelock(fl); 805 debuglog("***********************************\n"); 806 807 LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) { 808 if (retval == NFS_DENIED) 809 break; 810 811 debuglog("Top of lock loop\n"); 812 debuglog("Pointer to file lock: %p\n",ifl); 813 814 debuglog("***********************************\n"); 815 debuglog("Dumping test filelock\n"); 816 debuglog("***********************************\n"); 817 dump_filelock(ifl); 818 debuglog("***********************************\n"); 819 820 /* 821 * XXX: It is conceivable that someone could use the NLM RPC 822 * system to directly access filehandles. This may be a 823 * security hazard as the filehandle code may bypass normal 824 * file access controls 825 */ 826 if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t))) 827 continue; 828 829 debuglog("test_nfslock: filehandle match found\n"); 830 831 /* Filehandles match, check for region overlap */ 832 if (!regions_overlap(fl->client.l_offset, fl->client.l_len, 833 ifl->client.l_offset, ifl->client.l_len)) 834 continue; 835 836 debuglog("test_nfslock: Region overlap found" 837 " %llu : %llu -- %llu : %llu\n", 838 fl->client.l_offset,fl->client.l_len, 839 ifl->client.l_offset,ifl->client.l_len); 840 841 /* Regions overlap, check the exclusivity */ 842 if (!(fl->client.exclusive || ifl->client.exclusive)) 843 continue; 844 845 debuglog("test_nfslock: Exclusivity failure: %d %d\n", 846 fl->client.exclusive, 847 ifl->client.exclusive); 848 849 if (same_filelock_identity(fl,ifl)) { 850 debuglog("test_nfslock: Duplicate id. Granting\n"); 851 (*conflicting_fl) = ifl; 852 retval = NFS_GRANTED_DUPLICATE; 853 } else { 854 /* locking attempt fails */ 855 debuglog("test_nfslock: Lock attempt failed\n"); 856 debuglog("Desired lock\n"); 857 dump_filelock(fl); 858 debuglog("Conflicting lock\n"); 859 dump_filelock(ifl); 860 (*conflicting_fl) = ifl; 861 retval = NFS_DENIED; 862 } 863 } 864 865 debuglog("Dumping file locks\n"); 866 debuglog("Exiting test_nfslock\n"); 867 868 return (retval); 869 } 870 871 /* 872 * lock_nfslock: attempt to create a lock in the NFS lock list 873 * 874 * This routine tests whether the lock will be granted and then adds 875 * the entry to the lock list if so. 876 * 877 * Argument fl gets modified as its list housekeeping entries get modified 878 * upon insertion into the NFS lock list 879 * 880 * This routine makes several assumptions: 881 * 1) It is perfectly happy to grant a duplicate lock from the same pid. 882 * While this seems to be intuitively wrong, it is required for proper 883 * Posix semantics during unlock. It is absolutely imperative to not 884 * unlock the main lock before the two child locks are established. Thus, 885 * one has be be able to create duplicate locks over an existing lock 886 * 2) It currently accepts duplicate locks from the same id,pid 887 */ 888 889 enum nfslock_status 890 lock_nfslock(struct file_lock *fl) 891 { 892 enum nfslock_status retval; 893 struct file_lock *dummy_fl; 894 895 dummy_fl = NULL; 896 897 debuglog("Entering lock_nfslock...\n"); 898 899 retval = test_nfslock(fl,&dummy_fl); 900 901 if (retval == NFS_GRANTED || retval == NFS_GRANTED_DUPLICATE) { 902 debuglog("Inserting lock...\n"); 903 dump_filelock(fl); 904 LIST_INSERT_HEAD(&nfslocklist_head, fl, nfslocklist); 905 } 906 907 debuglog("Exiting lock_nfslock...\n"); 908 909 return (retval); 910 } 911 912 /* 913 * delete_nfslock: delete an NFS lock list entry 914 * 915 * This routine is used to delete a lock out of the NFS lock list 916 * without regard to status, underlying locks, regions or anything else 917 * 918 * Note that this routine *does not deallocate memory* of the lock. 919 * It just disconnects it from the list. The lock can then be used 920 * by other routines without fear of trashing the list. 921 */ 922 923 enum nfslock_status 924 delete_nfslock(struct file_lock *fl) 925 { 926 927 LIST_REMOVE(fl, nfslocklist); 928 929 return (NFS_GRANTED); 930 } 931 932 enum split_status 933 split_nfslock(exist_lock, unlock_lock, left_lock, right_lock) 934 const struct file_lock *exist_lock, *unlock_lock; 935 struct file_lock **left_lock, **right_lock; 936 { 937 u_int64_t start1, len1, start2, len2; 938 enum split_status spstatus; 939 940 spstatus = region_compare(exist_lock->client.l_offset, exist_lock->client.l_len, 941 unlock_lock->client.l_offset, unlock_lock->client.l_len, 942 &start1, &len1, &start2, &len2); 943 944 if ((spstatus & SPL_LOCK1) != 0) { 945 *left_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie, exist_lock->addr, exist_lock->client_name); 946 if (*left_lock == NULL) { 947 debuglog("Unable to allocate resource for split 1\n"); 948 return SPL_RESERR; 949 } 950 951 fill_file_lock(*left_lock, &exist_lock->filehandle, 952 exist_lock->client.exclusive, exist_lock->client.svid, 953 start1, len1, 954 exist_lock->nsm_status, 955 exist_lock->status, exist_lock->flags, exist_lock->blocking); 956 } 957 958 if ((spstatus & SPL_LOCK2) != 0) { 959 *right_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie, exist_lock->addr, exist_lock->client_name); 960 if (*right_lock == NULL) { 961 debuglog("Unable to allocate resource for split 1\n"); 962 if (*left_lock != NULL) { 963 deallocate_file_lock(*left_lock); 964 } 965 return SPL_RESERR; 966 } 967 968 fill_file_lock(*right_lock, &exist_lock->filehandle, 969 exist_lock->client.exclusive, exist_lock->client.svid, 970 start2, len2, 971 exist_lock->nsm_status, 972 exist_lock->status, exist_lock->flags, exist_lock->blocking); 973 } 974 975 return spstatus; 976 } 977 978 enum nfslock_status 979 unlock_nfslock(fl, released_lock, left_lock, right_lock) 980 const struct file_lock *fl; 981 struct file_lock **released_lock; 982 struct file_lock **left_lock; 983 struct file_lock **right_lock; 984 { 985 struct file_lock *mfl; /* Matching file lock */ 986 enum nfslock_status retval; 987 enum split_status spstatus; 988 989 debuglog("Entering unlock_nfslock\n"); 990 991 *released_lock = NULL; 992 *left_lock = NULL; 993 *right_lock = NULL; 994 995 retval = NFS_DENIED_NOLOCK; 996 997 debuglog("Attempting to match lock...\n"); 998 mfl = get_lock_matching_unlock(fl); 999 1000 if (mfl != NULL) { 1001 debuglog("Unlock matched. Querying for split\n"); 1002 1003 spstatus = split_nfslock(mfl, fl, left_lock, right_lock); 1004 1005 debuglog("Split returned %d %p %p %p %p\n",spstatus,mfl,fl,*left_lock,*right_lock); 1006 debuglog("********Split dumps********"); 1007 dump_filelock(mfl); 1008 dump_filelock(fl); 1009 dump_filelock(*left_lock); 1010 dump_filelock(*right_lock); 1011 debuglog("********End Split dumps********"); 1012 1013 if (spstatus == SPL_RESERR) { 1014 if (*left_lock != NULL) { 1015 deallocate_file_lock(*left_lock); 1016 *left_lock = NULL; 1017 } 1018 1019 if (*right_lock != NULL) { 1020 deallocate_file_lock(*right_lock); 1021 *right_lock = NULL; 1022 } 1023 1024 return NFS_RESERR; 1025 } 1026 1027 /* Insert new locks from split if required */ 1028 if (*left_lock != NULL) { 1029 debuglog("Split left activated\n"); 1030 LIST_INSERT_HEAD(&nfslocklist_head, *left_lock, nfslocklist); 1031 } 1032 1033 if (*right_lock != NULL) { 1034 debuglog("Split right activated\n"); 1035 LIST_INSERT_HEAD(&nfslocklist_head, *right_lock, nfslocklist); 1036 } 1037 1038 /* Unlock the lock since it matches identity */ 1039 LIST_REMOVE(mfl, nfslocklist); 1040 *released_lock = mfl; 1041 retval = NFS_GRANTED; 1042 } 1043 1044 debuglog("Exiting unlock_nfslock\n"); 1045 1046 return retval; 1047 } 1048 1049 /* 1050 * Below here are the routines for manipulating the file lock directly 1051 * on the disk hardware itself 1052 */ 1053 enum hwlock_status 1054 lock_hwlock(struct file_lock *fl) 1055 { 1056 struct monfile *imf,*nmf; 1057 int lflags, flerror; 1058 1059 /* Scan to see if filehandle already present */ 1060 LIST_FOREACH(imf, &monfilelist_head, monfilelist) { 1061 if (bcmp(&fl->filehandle, &imf->filehandle, 1062 sizeof(fl->filehandle)) == 0) { 1063 /* imf is the correct filehandle */ 1064 break; 1065 } 1066 } 1067 1068 /* 1069 * Filehandle already exists (we control the file) 1070 * *AND* NFS has already cleared the lock for availability 1071 * Grant it and bump the refcount. 1072 */ 1073 if (imf != NULL) { 1074 ++(imf->refcount); 1075 return (HW_GRANTED); 1076 } 1077 1078 /* No filehandle found, create and go */ 1079 nmf = malloc(sizeof(struct monfile)); 1080 if (nmf == NULL) { 1081 debuglog("hwlock resource allocation failure\n"); 1082 return (HW_RESERR); 1083 } 1084 1085 /* XXX: Is O_RDWR always the correct mode? */ 1086 nmf->fd = fhopen(&fl->filehandle, O_RDWR); 1087 if (nmf->fd < 0) { 1088 debuglog("fhopen failed (from %16s): %32s\n", 1089 fl->client_name, strerror(errno)); 1090 free(nmf); 1091 switch (errno) { 1092 case ESTALE: 1093 return (HW_STALEFH); 1094 case EROFS: 1095 return (HW_READONLY); 1096 default: 1097 return (HW_RESERR); 1098 } 1099 } 1100 1101 /* File opened correctly, fill the monitor struct */ 1102 bcopy(&fl->filehandle, &nmf->filehandle, sizeof(fl->filehandle)); 1103 nmf->refcount = 1; 1104 nmf->exclusive = fl->client.exclusive; 1105 1106 lflags = (nmf->exclusive == 1) ? 1107 (LOCK_EX | LOCK_NB) : (LOCK_SH | LOCK_NB); 1108 1109 flerror = flock(nmf->fd, lflags); 1110 1111 if (flerror != 0) { 1112 debuglog("flock failed (from %16s): %32s\n", 1113 fl->client_name, strerror(errno)); 1114 close(nmf->fd); 1115 free(nmf); 1116 switch (errno) { 1117 case EAGAIN: 1118 return (HW_DENIED); 1119 case ESTALE: 1120 return (HW_STALEFH); 1121 case EROFS: 1122 return (HW_READONLY); 1123 default: 1124 return (HW_RESERR); 1125 break; 1126 } 1127 } 1128 1129 /* File opened and locked */ 1130 LIST_INSERT_HEAD(&monfilelist_head, nmf, monfilelist); 1131 1132 debuglog("flock succeeded (from %16s)\n", fl->client_name); 1133 return (HW_GRANTED); 1134 } 1135 1136 enum hwlock_status 1137 unlock_hwlock(const struct file_lock *fl) 1138 { 1139 struct monfile *imf; 1140 1141 debuglog("Entering unlock_hwlock\n"); 1142 debuglog("Entering loop interation\n"); 1143 1144 /* Scan to see if filehandle already present */ 1145 LIST_FOREACH(imf, &monfilelist_head, monfilelist) { 1146 if (bcmp(&fl->filehandle, &imf->filehandle, 1147 sizeof(fl->filehandle)) == 0) { 1148 /* imf is the correct filehandle */ 1149 break; 1150 } 1151 } 1152 1153 debuglog("Completed iteration. Proceeding\n"); 1154 1155 if (imf == NULL) { 1156 /* No lock found */ 1157 debuglog("Exiting unlock_hwlock (HW_DENIED_NOLOCK)\n"); 1158 return (HW_DENIED_NOLOCK); 1159 } 1160 1161 /* Lock found */ 1162 --imf->refcount; 1163 1164 if (imf->refcount < 0) { 1165 debuglog("Negative hardware reference count\n"); 1166 } 1167 1168 if (imf->refcount <= 0) { 1169 close(imf->fd); 1170 LIST_REMOVE(imf, monfilelist); 1171 free(imf); 1172 } 1173 debuglog("Exiting unlock_hwlock (HW_GRANTED)\n"); 1174 return (HW_GRANTED); 1175 } 1176 1177 enum hwlock_status 1178 test_hwlock(fl, conflicting_fl) 1179 const struct file_lock *fl __unused; 1180 struct file_lock **conflicting_fl __unused; 1181 { 1182 1183 /* 1184 * XXX: lock tests on hardware are not required until 1185 * true partial file testing is done on the underlying file 1186 */ 1187 return (HW_RESERR); 1188 } 1189 1190 1191 1192 /* 1193 * Below here are routines for manipulating blocked lock requests 1194 * They should only be called from the XXX_partialfilelock routines 1195 * if at all possible 1196 */ 1197 1198 int 1199 duplicate_block(struct file_lock *fl) 1200 { 1201 struct file_lock *ifl,*nfl; 1202 int retval = 0; 1203 1204 debuglog("Entering duplicate_block"); 1205 1206 /* 1207 * Is this lock request already on the blocking list? 1208 * Consider it a dupe if the file handles, offset, length, 1209 * exclusivity and client match. 1210 */ 1211 LIST_FOREACH(ifl, &blockedlocklist_head, nfslocklist) { 1212 if (!bcmp(&fl->filehandle, &ifl->filehandle, 1213 sizeof(fhandle_t)) && 1214 fl->client.exclusive == ifl->client.exclusive && 1215 fl->client.l_offset == ifl->client.l_offset && 1216 fl->client.l_len == ifl->client.l_len && 1217 same_filelock_identity(fl, ifl)) { 1218 retval = 1; 1219 break; 1220 } 1221 } 1222 1223 debuglog("Exiting duplicate_block: %s\n", retval ? "already blocked" 1224 : "not already blocked"); 1225 return retval; 1226 } 1227 1228 void 1229 add_blockingfilelock(struct file_lock *fl) 1230 { 1231 debuglog("Entering add_blockingfilelock\n"); 1232 1233 /* 1234 * A blocking lock request _should_ never be duplicated as a client 1235 * that is already blocked shouldn't be able to request another 1236 * lock. Alas, there are some buggy clients that do request the same 1237 * lock repeatedly. Make sure only unique locks are on the blocked 1238 * lock list. 1239 */ 1240 if (duplicate_block(fl)) { 1241 debuglog("Exiting add_blockingfilelock: already blocked\n"); 1242 return; 1243 } 1244 1245 /* 1246 * Clear the blocking flag so that it can be reused without 1247 * adding it to the blocking queue a second time 1248 */ 1249 1250 fl->blocking = 0; 1251 LIST_INSERT_HEAD(&blockedlocklist_head, fl, nfslocklist); 1252 1253 debuglog("Exiting add_blockingfilelock: added blocked lock\n"); 1254 } 1255 1256 void 1257 remove_blockingfilelock(struct file_lock *fl) 1258 { 1259 1260 debuglog("Entering remove_blockingfilelock\n"); 1261 1262 LIST_REMOVE(fl, nfslocklist); 1263 1264 debuglog("Exiting remove_blockingfilelock\n"); 1265 } 1266 1267 void 1268 clear_blockingfilelock(const char *hostname) 1269 { 1270 struct file_lock *ifl,*nfl; 1271 1272 /* 1273 * Normally, LIST_FOREACH is called for, but since 1274 * the current element *is* the iterator, deleting it 1275 * would mess up the iteration. Thus, a next element 1276 * must be used explicitly 1277 */ 1278 1279 ifl = LIST_FIRST(&blockedlocklist_head); 1280 1281 while (ifl != NULL) { 1282 nfl = LIST_NEXT(ifl, nfslocklist); 1283 1284 if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) { 1285 remove_blockingfilelock(ifl); 1286 deallocate_file_lock(ifl); 1287 } 1288 1289 ifl = nfl; 1290 } 1291 } 1292 1293 void 1294 retry_blockingfilelocklist(void) 1295 { 1296 /* Retry all locks in the blocked list */ 1297 struct file_lock *ifl, *nfl; /* Iterator */ 1298 enum partialfilelock_status pflstatus; 1299 1300 debuglog("Entering retry_blockingfilelocklist\n"); 1301 1302 LIST_FOREACH_SAFE(ifl, &blockedlocklist_head, nfslocklist, nfl) { 1303 debuglog("Iterator choice %p\n",ifl); 1304 debuglog("Next iterator choice %p\n",nfl); 1305 1306 /* 1307 * SUBTLE BUG: The file_lock must be removed from the 1308 * old list so that it's list pointers get disconnected 1309 * before being allowed to participate in the new list 1310 * which will automatically add it in if necessary. 1311 */ 1312 1313 LIST_REMOVE(ifl, nfslocklist); 1314 pflstatus = lock_partialfilelock(ifl); 1315 1316 if (pflstatus == PFL_GRANTED || pflstatus == PFL_GRANTED_DUPLICATE) { 1317 debuglog("Granted blocked lock\n"); 1318 /* lock granted and is now being used */ 1319 send_granted(ifl,0); 1320 } else { 1321 /* Reinsert lock back into blocked list */ 1322 debuglog("Replacing blocked lock\n"); 1323 LIST_INSERT_HEAD(&blockedlocklist_head, ifl, nfslocklist); 1324 } 1325 } 1326 1327 debuglog("Exiting retry_blockingfilelocklist\n"); 1328 } 1329 1330 /* 1331 * Below here are routines associated with manipulating all 1332 * aspects of the partial file locking system (list, hardware, etc.) 1333 */ 1334 1335 /* 1336 * Please note that lock monitoring must be done at this level which 1337 * keeps track of *individual* lock requests on lock and unlock 1338 * 1339 * XXX: Split unlocking is going to make the unlock code miserable 1340 */ 1341 1342 /* 1343 * lock_partialfilelock: 1344 * 1345 * Argument fl gets modified as its list housekeeping entries get modified 1346 * upon insertion into the NFS lock list 1347 * 1348 * This routine makes several assumptions: 1349 * 1) It (will) pass locks through to flock to lock the entire underlying file 1350 * and then parcel out NFS locks if it gets control of the file. 1351 * This matches the old rpc.lockd file semantics (except where it 1352 * is now more correct). It is the safe solution, but will cause 1353 * overly restrictive blocking if someone is trying to use the 1354 * underlying files without using NFS. This appears to be an 1355 * acceptable tradeoff since most people use standalone NFS servers. 1356 * XXX: The right solution is probably kevent combined with fcntl 1357 * 1358 * 2) Nothing modifies the lock lists between testing and granting 1359 * I have no idea whether this is a useful assumption or not 1360 */ 1361 1362 enum partialfilelock_status 1363 lock_partialfilelock(struct file_lock *fl) 1364 { 1365 enum partialfilelock_status retval; 1366 enum nfslock_status lnlstatus; 1367 enum hwlock_status hwstatus; 1368 1369 debuglog("Entering lock_partialfilelock\n"); 1370 1371 retval = PFL_DENIED; 1372 1373 /* 1374 * Execute the NFS lock first, if possible, as it is significantly 1375 * easier and less expensive to undo than the filesystem lock 1376 */ 1377 1378 lnlstatus = lock_nfslock(fl); 1379 1380 switch (lnlstatus) { 1381 case NFS_GRANTED: 1382 case NFS_GRANTED_DUPLICATE: 1383 /* 1384 * At this point, the NFS lock is allocated and active. 1385 * Remember to clean it up if the hardware lock fails 1386 */ 1387 hwstatus = lock_hwlock(fl); 1388 1389 switch (hwstatus) { 1390 case HW_GRANTED: 1391 case HW_GRANTED_DUPLICATE: 1392 debuglog("HW GRANTED\n"); 1393 /* 1394 * XXX: Fixme: Check hwstatus for duplicate when 1395 * true partial file locking and accounting is 1396 * done on the hardware. 1397 */ 1398 if (lnlstatus == NFS_GRANTED_DUPLICATE) { 1399 retval = PFL_GRANTED_DUPLICATE; 1400 } else { 1401 retval = PFL_GRANTED; 1402 } 1403 monitor_lock_host(fl->client_name); 1404 break; 1405 case HW_RESERR: 1406 debuglog("HW RESERR\n"); 1407 retval = PFL_HWRESERR; 1408 break; 1409 case HW_DENIED: 1410 debuglog("HW DENIED\n"); 1411 retval = PFL_HWDENIED; 1412 break; 1413 default: 1414 debuglog("Unmatched hwstatus %d\n",hwstatus); 1415 break; 1416 } 1417 1418 if (retval != PFL_GRANTED && 1419 retval != PFL_GRANTED_DUPLICATE) { 1420 /* Clean up the NFS lock */ 1421 debuglog("Deleting trial NFS lock\n"); 1422 delete_nfslock(fl); 1423 } 1424 break; 1425 case NFS_DENIED: 1426 retval = PFL_NFSDENIED; 1427 break; 1428 case NFS_RESERR: 1429 retval = PFL_NFSRESERR; 1430 default: 1431 debuglog("Unmatched lnlstatus %d\n"); 1432 retval = PFL_NFSDENIED_NOLOCK; 1433 break; 1434 } 1435 1436 /* 1437 * By the time fl reaches here, it is completely free again on 1438 * failure. The NFS lock done before attempting the 1439 * hardware lock has been backed out 1440 */ 1441 1442 if (retval == PFL_NFSDENIED || retval == PFL_HWDENIED) { 1443 /* Once last chance to check the lock */ 1444 if (fl->blocking == 1) { 1445 if (retval == PFL_NFSDENIED) { 1446 /* Queue the lock */ 1447 debuglog("BLOCKING LOCK RECEIVED\n"); 1448 retval = PFL_NFSBLOCKED; 1449 add_blockingfilelock(fl); 1450 dump_filelock(fl); 1451 } else { 1452 /* retval is okay as PFL_HWDENIED */ 1453 debuglog("BLOCKING LOCK DENIED IN HARDWARE\n"); 1454 dump_filelock(fl); 1455 } 1456 } else { 1457 /* Leave retval alone, it's already correct */ 1458 debuglog("Lock denied. Non-blocking failure\n"); 1459 dump_filelock(fl); 1460 } 1461 } 1462 1463 debuglog("Exiting lock_partialfilelock\n"); 1464 1465 return retval; 1466 } 1467 1468 /* 1469 * unlock_partialfilelock: 1470 * 1471 * Given a file_lock, unlock all locks which match. 1472 * 1473 * Note that a given lock might have to unlock ITSELF! See 1474 * clear_partialfilelock for example. 1475 */ 1476 1477 enum partialfilelock_status 1478 unlock_partialfilelock(const struct file_lock *fl) 1479 { 1480 struct file_lock *lfl,*rfl,*releasedfl,*selffl; 1481 enum partialfilelock_status retval; 1482 enum nfslock_status unlstatus; 1483 enum hwlock_status unlhwstatus, lhwstatus; 1484 1485 debuglog("Entering unlock_partialfilelock\n"); 1486 1487 selffl = NULL; 1488 lfl = NULL; 1489 rfl = NULL; 1490 releasedfl = NULL; 1491 retval = PFL_DENIED; 1492 1493 /* 1494 * There are significant overlap and atomicity issues 1495 * with partially releasing a lock. For example, releasing 1496 * part of an NFS shared lock does *not* always release the 1497 * corresponding part of the file since there is only one 1498 * rpc.lockd UID but multiple users could be requesting it 1499 * from NFS. Also, an unlock request should never allow 1500 * another process to gain a lock on the remaining parts. 1501 * ie. Always apply the new locks before releasing the 1502 * old one 1503 */ 1504 1505 /* 1506 * Loop is required since multiple little locks 1507 * can be allocated and then deallocated with one 1508 * big unlock. 1509 * 1510 * The loop is required to be here so that the nfs & 1511 * hw subsystems do not need to communicate with one 1512 * one another 1513 */ 1514 1515 do { 1516 debuglog("Value of releasedfl: %p\n",releasedfl); 1517 /* lfl&rfl are created *AND* placed into the NFS lock list if required */ 1518 unlstatus = unlock_nfslock(fl, &releasedfl, &lfl, &rfl); 1519 debuglog("Value of releasedfl: %p\n",releasedfl); 1520 1521 1522 /* XXX: This is grungy. It should be refactored to be cleaner */ 1523 if (lfl != NULL) { 1524 lhwstatus = lock_hwlock(lfl); 1525 if (lhwstatus != HW_GRANTED && 1526 lhwstatus != HW_GRANTED_DUPLICATE) { 1527 debuglog("HW duplicate lock failure for left split\n"); 1528 } 1529 monitor_lock_host(lfl->client_name); 1530 } 1531 1532 if (rfl != NULL) { 1533 lhwstatus = lock_hwlock(rfl); 1534 if (lhwstatus != HW_GRANTED && 1535 lhwstatus != HW_GRANTED_DUPLICATE) { 1536 debuglog("HW duplicate lock failure for right split\n"); 1537 } 1538 monitor_lock_host(rfl->client_name); 1539 } 1540 1541 switch (unlstatus) { 1542 case NFS_GRANTED: 1543 /* Attempt to unlock on the hardware */ 1544 debuglog("NFS unlock granted. Attempting hardware unlock\n"); 1545 1546 /* This call *MUST NOT* unlock the two newly allocated locks */ 1547 unlhwstatus = unlock_hwlock(fl); 1548 debuglog("HW unlock returned with code %d\n",unlhwstatus); 1549 1550 switch (unlhwstatus) { 1551 case HW_GRANTED: 1552 debuglog("HW unlock granted\n"); 1553 unmonitor_lock_host(releasedfl->client_name); 1554 retval = PFL_GRANTED; 1555 break; 1556 case HW_DENIED_NOLOCK: 1557 /* Huh?!?! This shouldn't happen */ 1558 debuglog("HW unlock denied no lock\n"); 1559 retval = PFL_HWRESERR; 1560 /* Break out of do-while */ 1561 unlstatus = NFS_RESERR; 1562 break; 1563 default: 1564 debuglog("HW unlock failed\n"); 1565 retval = PFL_HWRESERR; 1566 /* Break out of do-while */ 1567 unlstatus = NFS_RESERR; 1568 break; 1569 } 1570 1571 debuglog("Exiting with status retval: %d\n",retval); 1572 1573 retry_blockingfilelocklist(); 1574 break; 1575 case NFS_DENIED_NOLOCK: 1576 retval = PFL_GRANTED; 1577 debuglog("All locks cleaned out\n"); 1578 break; 1579 default: 1580 retval = PFL_NFSRESERR; 1581 debuglog("NFS unlock failure\n"); 1582 dump_filelock(fl); 1583 break; 1584 } 1585 1586 if (releasedfl != NULL) { 1587 if (fl == releasedfl) { 1588 /* 1589 * XXX: YECHHH!!! Attempt to unlock self succeeded 1590 * but we can't deallocate the space yet. This is what 1591 * happens when you don't write malloc and free together 1592 */ 1593 debuglog("Attempt to unlock self\n"); 1594 selffl = releasedfl; 1595 } else { 1596 /* 1597 * XXX: this deallocation *still* needs to migrate closer 1598 * to the allocation code way up in get_lock or the allocation 1599 * code needs to migrate down (violation of "When you write 1600 * malloc you must write free") 1601 */ 1602 1603 deallocate_file_lock(releasedfl); 1604 } 1605 } 1606 1607 } while (unlstatus == NFS_GRANTED); 1608 1609 if (selffl != NULL) { 1610 /* 1611 * This statement wipes out the incoming file lock (fl) 1612 * in spite of the fact that it is declared const 1613 */ 1614 debuglog("WARNING! Destroying incoming lock pointer\n"); 1615 deallocate_file_lock(selffl); 1616 } 1617 1618 debuglog("Exiting unlock_partialfilelock\n"); 1619 1620 return retval; 1621 } 1622 1623 /* 1624 * clear_partialfilelock 1625 * 1626 * Normally called in response to statd state number change. 1627 * Wipe out all locks held by a host. As a bonus, the act of 1628 * doing so should automatically clear their statd entries and 1629 * unmonitor the host. 1630 */ 1631 1632 void 1633 clear_partialfilelock(const char *hostname) 1634 { 1635 struct file_lock *ifl, *nfl; 1636 1637 /* Clear blocking file lock list */ 1638 clear_blockingfilelock(hostname); 1639 1640 /* do all required unlocks */ 1641 /* Note that unlock can smash the current pointer to a lock */ 1642 1643 /* 1644 * Normally, LIST_FOREACH is called for, but since 1645 * the current element *is* the iterator, deleting it 1646 * would mess up the iteration. Thus, a next element 1647 * must be used explicitly 1648 */ 1649 1650 ifl = LIST_FIRST(&nfslocklist_head); 1651 1652 while (ifl != NULL) { 1653 nfl = LIST_NEXT(ifl, nfslocklist); 1654 1655 if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) { 1656 /* Unlock destroys ifl out from underneath */ 1657 unlock_partialfilelock(ifl); 1658 /* ifl is NO LONGER VALID AT THIS POINT */ 1659 } 1660 ifl = nfl; 1661 } 1662 } 1663 1664 /* 1665 * test_partialfilelock: 1666 */ 1667 enum partialfilelock_status 1668 test_partialfilelock(const struct file_lock *fl, 1669 struct file_lock **conflicting_fl) 1670 { 1671 enum partialfilelock_status retval; 1672 enum nfslock_status teststatus; 1673 1674 debuglog("Entering testpartialfilelock...\n"); 1675 1676 retval = PFL_DENIED; 1677 1678 teststatus = test_nfslock(fl, conflicting_fl); 1679 debuglog("test_partialfilelock: teststatus %d\n",teststatus); 1680 1681 if (teststatus == NFS_GRANTED || teststatus == NFS_GRANTED_DUPLICATE) { 1682 /* XXX: Add the underlying filesystem locking code */ 1683 retval = (teststatus == NFS_GRANTED) ? 1684 PFL_GRANTED : PFL_GRANTED_DUPLICATE; 1685 debuglog("Dumping locks...\n"); 1686 dump_filelock(fl); 1687 dump_filelock(*conflicting_fl); 1688 debuglog("Done dumping locks...\n"); 1689 } else { 1690 retval = PFL_NFSDENIED; 1691 debuglog("NFS test denied.\n"); 1692 dump_filelock(fl); 1693 debuglog("Conflicting.\n"); 1694 dump_filelock(*conflicting_fl); 1695 } 1696 1697 debuglog("Exiting testpartialfilelock...\n"); 1698 1699 return retval; 1700 } 1701 1702 /* 1703 * Below here are routines associated with translating the partial file locking 1704 * codes into useful codes to send back to the NFS RPC messaging system 1705 */ 1706 1707 /* 1708 * These routines translate the (relatively) useful return codes back onto 1709 * the few return codes which the nlm subsystems wishes to trasmit 1710 */ 1711 1712 enum nlm_stats 1713 do_test(struct file_lock *fl, struct file_lock **conflicting_fl) 1714 { 1715 enum partialfilelock_status pfsret; 1716 enum nlm_stats retval; 1717 1718 debuglog("Entering do_test...\n"); 1719 1720 pfsret = test_partialfilelock(fl,conflicting_fl); 1721 1722 switch (pfsret) { 1723 case PFL_GRANTED: 1724 debuglog("PFL test lock granted\n"); 1725 dump_filelock(fl); 1726 dump_filelock(*conflicting_fl); 1727 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1728 break; 1729 case PFL_GRANTED_DUPLICATE: 1730 debuglog("PFL test lock granted--duplicate id detected\n"); 1731 dump_filelock(fl); 1732 dump_filelock(*conflicting_fl); 1733 debuglog("Clearing conflicting_fl for call semantics\n"); 1734 *conflicting_fl = NULL; 1735 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1736 break; 1737 case PFL_NFSDENIED: 1738 case PFL_HWDENIED: 1739 debuglog("PFL test lock denied\n"); 1740 dump_filelock(fl); 1741 dump_filelock(*conflicting_fl); 1742 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied; 1743 break; 1744 case PFL_NFSRESERR: 1745 case PFL_HWRESERR: 1746 debuglog("PFL test lock resource fail\n"); 1747 dump_filelock(fl); 1748 dump_filelock(*conflicting_fl); 1749 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks; 1750 break; 1751 default: 1752 debuglog("PFL test lock *FAILED*\n"); 1753 dump_filelock(fl); 1754 dump_filelock(*conflicting_fl); 1755 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied; 1756 break; 1757 } 1758 1759 debuglog("Exiting do_test...\n"); 1760 1761 return retval; 1762 } 1763 1764 /* 1765 * do_lock: Try to acquire a lock 1766 * 1767 * This routine makes a distinction between NLM versions. I am pretty 1768 * convinced that this should be abstracted out and bounced up a level 1769 */ 1770 1771 enum nlm_stats 1772 do_lock(struct file_lock *fl) 1773 { 1774 enum partialfilelock_status pfsret; 1775 enum nlm_stats retval; 1776 1777 debuglog("Entering do_lock...\n"); 1778 1779 pfsret = lock_partialfilelock(fl); 1780 1781 switch (pfsret) { 1782 case PFL_GRANTED: 1783 debuglog("PFL lock granted"); 1784 dump_filelock(fl); 1785 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1786 break; 1787 case PFL_GRANTED_DUPLICATE: 1788 debuglog("PFL lock granted--duplicate id detected"); 1789 dump_filelock(fl); 1790 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1791 break; 1792 case PFL_NFSDENIED: 1793 case PFL_HWDENIED: 1794 debuglog("PFL_NFS lock denied"); 1795 dump_filelock(fl); 1796 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied; 1797 break; 1798 case PFL_NFSBLOCKED: 1799 case PFL_HWBLOCKED: 1800 debuglog("PFL_NFS blocking lock denied. Queued.\n"); 1801 dump_filelock(fl); 1802 retval = (fl->flags & LOCK_V4) ? nlm4_blocked : nlm_blocked; 1803 break; 1804 case PFL_NFSRESERR: 1805 case PFL_HWRESERR: 1806 debuglog("PFL lock resource alocation fail\n"); 1807 dump_filelock(fl); 1808 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks; 1809 break; 1810 default: 1811 debuglog("PFL lock *FAILED*"); 1812 dump_filelock(fl); 1813 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied; 1814 break; 1815 } 1816 1817 debuglog("Exiting do_lock...\n"); 1818 1819 return retval; 1820 } 1821 1822 enum nlm_stats 1823 do_unlock(struct file_lock *fl) 1824 { 1825 enum partialfilelock_status pfsret; 1826 enum nlm_stats retval; 1827 1828 debuglog("Entering do_unlock...\n"); 1829 pfsret = unlock_partialfilelock(fl); 1830 1831 switch (pfsret) { 1832 case PFL_GRANTED: 1833 debuglog("PFL unlock granted"); 1834 dump_filelock(fl); 1835 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1836 break; 1837 case PFL_NFSDENIED: 1838 case PFL_HWDENIED: 1839 debuglog("PFL_NFS unlock denied"); 1840 dump_filelock(fl); 1841 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied; 1842 break; 1843 case PFL_NFSDENIED_NOLOCK: 1844 case PFL_HWDENIED_NOLOCK: 1845 debuglog("PFL_NFS no lock found\n"); 1846 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1847 break; 1848 case PFL_NFSRESERR: 1849 case PFL_HWRESERR: 1850 debuglog("PFL unlock resource failure"); 1851 dump_filelock(fl); 1852 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks; 1853 break; 1854 default: 1855 debuglog("PFL unlock *FAILED*"); 1856 dump_filelock(fl); 1857 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied; 1858 break; 1859 } 1860 1861 debuglog("Exiting do_unlock...\n"); 1862 1863 return retval; 1864 } 1865 1866 /* 1867 * do_clear 1868 * 1869 * This routine is non-existent because it doesn't have a return code. 1870 * It is here for completeness in case someone *does* need to do return 1871 * codes later. A decent compiler should optimize this away. 1872 */ 1873 1874 void 1875 do_clear(const char *hostname) 1876 { 1877 1878 clear_partialfilelock(hostname); 1879 } 1880 1881 /* 1882 * The following routines are all called from the code which the 1883 * RPC layer invokes 1884 */ 1885 1886 /* 1887 * testlock(): inform the caller if the requested lock would be granted 1888 * 1889 * returns NULL if lock would granted 1890 * returns pointer to a conflicting nlm4_holder if not 1891 */ 1892 1893 struct nlm4_holder * 1894 testlock(struct nlm4_lock *lock, bool_t exclusive, int flags __unused) 1895 { 1896 struct file_lock test_fl, *conflicting_fl; 1897 1898 bzero(&test_fl, sizeof(test_fl)); 1899 1900 bcopy(lock->fh.n_bytes, &(test_fl.filehandle), sizeof(fhandle_t)); 1901 copy_nlm4_lock_to_nlm4_holder(lock, exclusive, &test_fl.client); 1902 1903 siglock(); 1904 do_test(&test_fl, &conflicting_fl); 1905 1906 if (conflicting_fl == NULL) { 1907 debuglog("No conflicting lock found\n"); 1908 sigunlock(); 1909 return NULL; 1910 } else { 1911 debuglog("Found conflicting lock\n"); 1912 dump_filelock(conflicting_fl); 1913 sigunlock(); 1914 return (&conflicting_fl->client); 1915 } 1916 } 1917 1918 /* 1919 * getlock: try to aquire the lock. 1920 * If file is already locked and we can sleep, put the lock in the list with 1921 * status LKST_WAITING; it'll be processed later. 1922 * Otherwise try to lock. If we're allowed to block, fork a child which 1923 * will do the blocking lock. 1924 */ 1925 1926 enum nlm_stats 1927 getlock(nlm4_lockargs *lckarg, struct svc_req *rqstp, const int flags) 1928 { 1929 struct file_lock *newfl; 1930 enum nlm_stats retval; 1931 1932 debuglog("Entering getlock...\n"); 1933 1934 if (grace_expired == 0 && lckarg->reclaim == 0) 1935 return (flags & LOCK_V4) ? 1936 nlm4_denied_grace_period : nlm_denied_grace_period; 1937 1938 /* allocate new file_lock for this request */ 1939 newfl = allocate_file_lock(&lckarg->alock.oh, &lckarg->cookie, 1940 (struct sockaddr *)svc_getrpccaller(rqstp->rq_xprt)->buf, lckarg->alock.caller_name); 1941 if (newfl == NULL) { 1942 syslog(LOG_NOTICE, "lock allocate failed: %s", strerror(errno)); 1943 /* failed */ 1944 return (flags & LOCK_V4) ? 1945 nlm4_denied_nolocks : nlm_denied_nolocks; 1946 } 1947 1948 if (lckarg->alock.fh.n_len != sizeof(fhandle_t)) { 1949 debuglog("recieved fhandle size %d, local size %d", 1950 lckarg->alock.fh.n_len, (int)sizeof(fhandle_t)); 1951 } 1952 1953 fill_file_lock(newfl, (fhandle_t *)lckarg->alock.fh.n_bytes, 1954 lckarg->exclusive, lckarg->alock.svid, lckarg->alock.l_offset, 1955 lckarg->alock.l_len, 1956 lckarg->state, 0, flags, lckarg->block); 1957 1958 /* 1959 * newfl is now fully constructed and deallocate_file_lock 1960 * can now be used to delete it 1961 */ 1962 1963 siglock(); 1964 debuglog("Pointer to new lock is %p\n",newfl); 1965 1966 retval = do_lock(newfl); 1967 1968 debuglog("Pointer to new lock is %p\n",newfl); 1969 sigunlock(); 1970 1971 switch (retval) 1972 { 1973 case nlm4_granted: 1974 /* case nlm_granted: is the same as nlm4_granted */ 1975 /* do_mon(lckarg->alock.caller_name); */ 1976 break; 1977 case nlm4_blocked: 1978 /* case nlm_blocked: is the same as nlm4_blocked */ 1979 /* do_mon(lckarg->alock.caller_name); */ 1980 break; 1981 default: 1982 deallocate_file_lock(newfl); 1983 break; 1984 } 1985 1986 debuglog("Exiting getlock...\n"); 1987 1988 return retval; 1989 } 1990 1991 1992 /* unlock a filehandle */ 1993 enum nlm_stats 1994 unlock(nlm4_lock *lock, const int flags __unused) 1995 { 1996 struct file_lock fl; 1997 enum nlm_stats err; 1998 1999 siglock(); 2000 2001 debuglog("Entering unlock...\n"); 2002 2003 bzero(&fl,sizeof(struct file_lock)); 2004 bcopy(lock->fh.n_bytes, &fl.filehandle, sizeof(fhandle_t)); 2005 2006 copy_nlm4_lock_to_nlm4_holder(lock, 0, &fl.client); 2007 2008 err = do_unlock(&fl); 2009 2010 sigunlock(); 2011 2012 debuglog("Exiting unlock...\n"); 2013 2014 return err; 2015 } 2016 2017 /* 2018 * XXX: The following monitor/unmonitor routines 2019 * have not been extensively tested (ie. no regression 2020 * script exists like for the locking sections 2021 */ 2022 2023 /* 2024 * monitor_lock_host: monitor lock hosts locally with a ref count and 2025 * inform statd 2026 */ 2027 void 2028 monitor_lock_host(const char *hostname) 2029 { 2030 struct host *ihp, *nhp; 2031 struct mon smon; 2032 struct sm_stat_res sres; 2033 int rpcret, statflag; 2034 size_t n; 2035 2036 rpcret = 0; 2037 statflag = 0; 2038 2039 LIST_FOREACH(ihp, &hostlst_head, hostlst) { 2040 if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) { 2041 /* Host is already monitored, bump refcount */ 2042 ++ihp->refcnt; 2043 /* Host should only be in the monitor list once */ 2044 return; 2045 } 2046 } 2047 2048 /* Host is not yet monitored, add it */ 2049 n = strnlen(hostname, SM_MAXSTRLEN); 2050 if (n == SM_MAXSTRLEN) { 2051 return; 2052 } 2053 nhp = malloc(sizeof(*nhp) - sizeof(nhp->name) + n + 1); 2054 if (nhp == NULL) { 2055 debuglog("Unable to allocate entry for statd mon\n"); 2056 return; 2057 } 2058 2059 /* Allocated new host entry, now fill the fields */ 2060 memcpy(nhp->name, hostname, n); 2061 nhp->name[n] = 0; 2062 nhp->refcnt = 1; 2063 debuglog("Locally Monitoring host %16s\n",hostname); 2064 2065 debuglog("Attempting to tell statd\n"); 2066 2067 bzero(&smon,sizeof(smon)); 2068 2069 smon.mon_id.mon_name = nhp->name; 2070 smon.mon_id.my_id.my_name = "localhost"; 2071 smon.mon_id.my_id.my_prog = NLM_PROG; 2072 smon.mon_id.my_id.my_vers = NLM_SM; 2073 smon.mon_id.my_id.my_proc = NLM_SM_NOTIFY; 2074 2075 rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_MON, 2076 (xdrproc_t)xdr_mon, &smon, 2077 (xdrproc_t)xdr_sm_stat_res, &sres); 2078 2079 if (rpcret == 0) { 2080 if (sres.res_stat == stat_fail) { 2081 debuglog("Statd call failed\n"); 2082 statflag = 0; 2083 } else { 2084 statflag = 1; 2085 } 2086 } else { 2087 debuglog("Rpc call to statd failed with return value: %d\n", 2088 rpcret); 2089 statflag = 0; 2090 } 2091 2092 if (statflag == 1) { 2093 LIST_INSERT_HEAD(&hostlst_head, nhp, hostlst); 2094 } else { 2095 free(nhp); 2096 } 2097 2098 } 2099 2100 /* 2101 * unmonitor_lock_host: clear monitor ref counts and inform statd when gone 2102 */ 2103 void 2104 unmonitor_lock_host(char *hostname) 2105 { 2106 struct host *ihp; 2107 struct mon_id smon_id; 2108 struct sm_stat smstat; 2109 int rpcret; 2110 2111 rpcret = 0; 2112 2113 for( ihp=LIST_FIRST(&hostlst_head); ihp != NULL; 2114 ihp=LIST_NEXT(ihp, hostlst)) { 2115 if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) { 2116 /* Host is monitored, bump refcount */ 2117 --ihp->refcnt; 2118 /* Host should only be in the monitor list once */ 2119 break; 2120 } 2121 } 2122 2123 if (ihp == NULL) { 2124 debuglog("Could not find host %16s in mon list\n", hostname); 2125 return; 2126 } 2127 2128 if (ihp->refcnt > 0) 2129 return; 2130 2131 if (ihp->refcnt < 0) { 2132 debuglog("Negative refcount!: %d\n", 2133 ihp->refcnt); 2134 } 2135 2136 debuglog("Attempting to unmonitor host %16s\n", hostname); 2137 2138 bzero(&smon_id,sizeof(smon_id)); 2139 2140 smon_id.mon_name = hostname; 2141 smon_id.my_id.my_name = "localhost"; 2142 smon_id.my_id.my_prog = NLM_PROG; 2143 smon_id.my_id.my_vers = NLM_SM; 2144 smon_id.my_id.my_proc = NLM_SM_NOTIFY; 2145 2146 rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_UNMON, 2147 (xdrproc_t)xdr_mon_id, &smon_id, 2148 (xdrproc_t)xdr_sm_stat, &smstat); 2149 2150 if (rpcret != 0) { 2151 debuglog("Rpc call to unmonitor statd failed with " 2152 " return value: %d\n", rpcret); 2153 } 2154 2155 LIST_REMOVE(ihp, hostlst); 2156 free(ihp); 2157 } 2158 2159 /* 2160 * notify: Clear all locks from a host if statd complains 2161 * 2162 * XXX: This routine has not been thoroughly tested. However, neither 2163 * had the old one been. It used to compare the statd crash state counter 2164 * to the current lock state. The upshot of this was that it basically 2165 * cleared all locks from the specified host 99% of the time (with the 2166 * other 1% being a bug). Consequently, the assumption is that clearing 2167 * all locks from a host when notified by statd is acceptable. 2168 * 2169 * Please note that this routine skips the usual level of redirection 2170 * through a do_* type routine. This introduces a possible level of 2171 * error and might better be written as do_notify and take this one out. 2172 2173 */ 2174 2175 void 2176 notify(const char *hostname, const int state) 2177 { 2178 debuglog("notify from %s, new state %d", hostname, state); 2179 2180 siglock(); 2181 do_clear(hostname); 2182 sigunlock(); 2183 2184 debuglog("Leaving notify\n"); 2185 } 2186 2187 void 2188 send_granted(fl, opcode) 2189 struct file_lock *fl; 2190 int opcode __unused; 2191 { 2192 CLIENT *cli; 2193 static char dummy; 2194 struct timeval timeo; 2195 int success; 2196 static struct nlm_res retval; 2197 static struct nlm4_res retval4; 2198 2199 debuglog("About to send granted on blocked lock\n"); 2200 2201 cli = get_client(fl->addr, 2202 (fl->flags & LOCK_V4) ? NLM_VERS4 : NLM_VERS); 2203 if (cli == NULL) { 2204 syslog(LOG_NOTICE, "failed to get CLIENT for %s", 2205 fl->client_name); 2206 /* 2207 * We fail to notify remote that the lock has been granted. 2208 * The client will timeout and retry, the lock will be 2209 * granted at this time. 2210 */ 2211 return; 2212 } 2213 timeo.tv_sec = 0; 2214 timeo.tv_usec = (fl->flags & LOCK_ASYNC) ? 0 : 500000; /* 0.5s */ 2215 2216 if (fl->flags & LOCK_V4) { 2217 static nlm4_testargs res; 2218 res.cookie = fl->client_cookie; 2219 res.exclusive = fl->client.exclusive; 2220 res.alock.caller_name = fl->client_name; 2221 res.alock.fh.n_len = sizeof(fhandle_t); 2222 res.alock.fh.n_bytes = (char*)&fl->filehandle; 2223 res.alock.oh = fl->client.oh; 2224 res.alock.svid = fl->client.svid; 2225 res.alock.l_offset = fl->client.l_offset; 2226 res.alock.l_len = fl->client.l_len; 2227 debuglog("sending v4 reply%s", 2228 (fl->flags & LOCK_ASYNC) ? " (async)":""); 2229 if (fl->flags & LOCK_ASYNC) { 2230 success = clnt_call(cli, NLM4_GRANTED_MSG, 2231 (xdrproc_t)xdr_nlm4_testargs, &res, 2232 (xdrproc_t)xdr_void, &dummy, timeo); 2233 } else { 2234 success = clnt_call(cli, NLM4_GRANTED, 2235 (xdrproc_t)xdr_nlm4_testargs, &res, 2236 (xdrproc_t)xdr_nlm4_res, &retval4, timeo); 2237 } 2238 } else { 2239 static nlm_testargs res; 2240 2241 res.cookie = fl->client_cookie; 2242 res.exclusive = fl->client.exclusive; 2243 res.alock.caller_name = fl->client_name; 2244 res.alock.fh.n_len = sizeof(fhandle_t); 2245 res.alock.fh.n_bytes = (char*)&fl->filehandle; 2246 res.alock.oh = fl->client.oh; 2247 res.alock.svid = fl->client.svid; 2248 res.alock.l_offset = fl->client.l_offset; 2249 res.alock.l_len = fl->client.l_len; 2250 debuglog("sending v1 reply%s", 2251 (fl->flags & LOCK_ASYNC) ? " (async)":""); 2252 if (fl->flags & LOCK_ASYNC) { 2253 success = clnt_call(cli, NLM_GRANTED_MSG, 2254 (xdrproc_t)xdr_nlm_testargs, &res, 2255 (xdrproc_t)xdr_void, &dummy, timeo); 2256 } else { 2257 success = clnt_call(cli, NLM_GRANTED, 2258 (xdrproc_t)xdr_nlm_testargs, &res, 2259 (xdrproc_t)xdr_nlm_res, &retval, timeo); 2260 } 2261 } 2262 if (debug_level > 2) 2263 debuglog("clnt_call returns %d(%s) for granted", 2264 success, clnt_sperrno(success)); 2265 2266 } 2267 2268 /* 2269 * Routines below here have not been modified in the overhaul 2270 */ 2271 2272 /* 2273 * Are these two routines still required since lockd is not spawning off 2274 * children to service locks anymore? Presumably they were originally 2275 * put in place to prevent a one child from changing the lock list out 2276 * from under another one. 2277 */ 2278 2279 void 2280 siglock(void) 2281 { 2282 sigset_t block; 2283 2284 sigemptyset(&block); 2285 sigaddset(&block, SIGCHLD); 2286 2287 if (sigprocmask(SIG_BLOCK, &block, NULL) < 0) { 2288 syslog(LOG_WARNING, "siglock failed: %s", strerror(errno)); 2289 } 2290 } 2291 2292 void 2293 sigunlock(void) 2294 { 2295 sigset_t block; 2296 2297 sigemptyset(&block); 2298 sigaddset(&block, SIGCHLD); 2299 2300 if (sigprocmask(SIG_UNBLOCK, &block, NULL) < 0) { 2301 syslog(LOG_WARNING, "sigunlock failed: %s", strerror(errno)); 2302 } 2303 } 2304