1 /* $NetBSD: lockd_lock.c,v 1.5 2000/11/21 03:47:41 enami Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Andrew P. Lentvorski, Jr. 5 * Copyright (c) 2000 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #define LOCKD_DEBUG 41 42 #include <stdio.h> 43 #ifdef LOCKD_DEBUG 44 #include <stdarg.h> 45 #endif 46 #include <stdlib.h> 47 #include <unistd.h> 48 #include <fcntl.h> 49 #include <syslog.h> 50 #include <errno.h> 51 #include <string.h> 52 #include <signal.h> 53 #include <rpc/rpc.h> 54 #include <sys/types.h> 55 #include <sys/stat.h> 56 #include <sys/socket.h> 57 #include <sys/param.h> 58 #include <sys/mount.h> 59 #include <sys/wait.h> 60 #include <rpcsvc/sm_inter.h> 61 #include <rpcsvc/nlm_prot.h> 62 #include "lockd_lock.h" 63 #include "lockd.h" 64 65 #define MAXOBJECTSIZE 64 66 #define MAXBUFFERSIZE 1024 67 68 /* 69 * A set of utilities for managing file locking 70 * 71 * XXX: All locks are in a linked list, a better structure should be used 72 * to improve search/access efficiency. 73 */ 74 75 /* struct describing a lock */ 76 struct file_lock { 77 LIST_ENTRY(file_lock) nfslocklist; 78 fhandle_t filehandle; /* NFS filehandle */ 79 struct sockaddr *addr; 80 struct nlm4_holder client; /* lock holder */ 81 /* XXX: client_cookie used *only* in send_granted */ 82 netobj client_cookie; /* cookie sent by the client */ 83 int nsm_status; /* status from the remote lock manager */ 84 int status; /* lock status, see below */ 85 int flags; /* lock flags, see lockd_lock.h */ 86 int blocking; /* blocking lock or not */ 87 char client_name[SM_MAXSTRLEN]; /* client_name is really variable 88 length and must be last! */ 89 }; 90 91 LIST_HEAD(nfslocklist_head, file_lock); 92 struct nfslocklist_head nfslocklist_head = LIST_HEAD_INITIALIZER(nfslocklist_head); 93 94 LIST_HEAD(blockedlocklist_head, file_lock); 95 struct blockedlocklist_head blockedlocklist_head = LIST_HEAD_INITIALIZER(blockedlocklist_head); 96 97 /* lock status */ 98 #define LKST_LOCKED 1 /* lock is locked */ 99 /* XXX: Is this flag file specific or lock specific? */ 100 #define LKST_WAITING 2 /* file is already locked by another host */ 101 #define LKST_PROCESSING 3 /* child is trying to acquire the lock */ 102 #define LKST_DYING 4 /* must dies when we get news from the child */ 103 104 /* struct describing a monitored host */ 105 struct host { 106 LIST_ENTRY(host) hostlst; 107 int refcnt; 108 char name[SM_MAXSTRLEN]; /* name is really variable length and 109 must be last! */ 110 }; 111 /* list of hosts we monitor */ 112 LIST_HEAD(hostlst_head, host); 113 struct hostlst_head hostlst_head = LIST_HEAD_INITIALIZER(hostlst_head); 114 115 /* 116 * File monitoring handlers 117 * XXX: These might be able to be removed when kevent support 118 * is placed into the hardware lock/unlock routines. (ie. 119 * let the kernel do all the file monitoring) 120 */ 121 122 /* Struct describing a monitored file */ 123 struct monfile { 124 LIST_ENTRY(monfile) monfilelist; 125 fhandle_t filehandle; /* Local access filehandle */ 126 int fd; /* file descriptor: remains open until unlock! */ 127 int refcount; 128 int exclusive; 129 }; 130 131 /* List of files we monitor */ 132 LIST_HEAD(monfilelist_head, monfile); 133 struct monfilelist_head monfilelist_head = LIST_HEAD_INITIALIZER(monfilelist_head); 134 135 static int debugdelay = 0; 136 137 enum nfslock_status { NFS_GRANTED = 0, NFS_GRANTED_DUPLICATE, 138 NFS_DENIED, NFS_DENIED_NOLOCK, 139 NFS_RESERR }; 140 141 enum hwlock_status { HW_GRANTED = 0, HW_GRANTED_DUPLICATE, 142 HW_DENIED, HW_DENIED_NOLOCK, 143 HW_STALEFH, HW_READONLY, HW_RESERR }; 144 145 enum partialfilelock_status { PFL_GRANTED=0, PFL_GRANTED_DUPLICATE, PFL_DENIED, 146 PFL_NFSDENIED, PFL_NFSBLOCKED, PFL_NFSDENIED_NOLOCK, PFL_NFSRESERR, 147 PFL_HWDENIED, PFL_HWBLOCKED, PFL_HWDENIED_NOLOCK, PFL_HWRESERR}; 148 149 enum LFLAGS {LEDGE_LEFT, LEDGE_LBOUNDARY, LEDGE_INSIDE, LEDGE_RBOUNDARY, LEDGE_RIGHT}; 150 enum RFLAGS {REDGE_LEFT, REDGE_LBOUNDARY, REDGE_INSIDE, REDGE_RBOUNDARY, REDGE_RIGHT}; 151 /* XXX: WARNING! I HAVE OVERLOADED THIS STATUS ENUM! SPLIT IT APART INTO TWO */ 152 enum split_status {SPL_DISJOINT=0, SPL_LOCK1=1, SPL_LOCK2=2, SPL_CONTAINED=4, SPL_RESERR=8}; 153 154 enum partialfilelock_status lock_partialfilelock(struct file_lock *fl); 155 156 void send_granted(struct file_lock *fl, int opcode); 157 void siglock(void); 158 void sigunlock(void); 159 void monitor_lock_host(const char *hostname); 160 void unmonitor_lock_host(char *hostname); 161 162 void copy_nlm4_lock_to_nlm4_holder(const struct nlm4_lock *src, 163 const bool_t exclusive, struct nlm4_holder *dest); 164 struct file_lock * allocate_file_lock(const netobj *lockowner, 165 const netobj *matchcookie, 166 const struct sockaddr *addr, 167 const char *caller_name); 168 void deallocate_file_lock(struct file_lock *fl); 169 void fill_file_lock(struct file_lock *fl, const fhandle_t *fh, 170 const bool_t exclusive, const int32_t svid, 171 const u_int64_t offset, const u_int64_t len, 172 const int state, const int status, const int flags, const int blocking); 173 int regions_overlap(const u_int64_t start1, const u_int64_t len1, 174 const u_int64_t start2, const u_int64_t len2); 175 enum split_status region_compare(const u_int64_t starte, const u_int64_t lene, 176 const u_int64_t startu, const u_int64_t lenu, 177 u_int64_t *start1, u_int64_t *len1, u_int64_t *start2, u_int64_t *len2); 178 int same_netobj(const netobj *n0, const netobj *n1); 179 int same_filelock_identity(const struct file_lock *fl0, 180 const struct file_lock *fl2); 181 182 static void debuglog(char const *fmt, ...); 183 void dump_static_object(const unsigned char* object, const int sizeof_object, 184 unsigned char* hbuff, const int sizeof_hbuff, 185 unsigned char* cbuff, const int sizeof_cbuff); 186 void dump_netobj(const struct netobj *nobj); 187 void dump_filelock(const struct file_lock *fl); 188 struct file_lock * get_lock_matching_unlock(const struct file_lock *fl); 189 enum nfslock_status test_nfslock(const struct file_lock *fl, 190 struct file_lock **conflicting_fl); 191 enum nfslock_status lock_nfslock(struct file_lock *fl); 192 enum nfslock_status delete_nfslock(struct file_lock *fl); 193 enum nfslock_status unlock_nfslock(const struct file_lock *fl, 194 struct file_lock **released_lock, struct file_lock **left_lock, 195 struct file_lock **right_lock); 196 enum hwlock_status lock_hwlock(struct file_lock *fl); 197 enum split_status split_nfslock(const struct file_lock *exist_lock, 198 const struct file_lock *unlock_lock, struct file_lock **left_lock, 199 struct file_lock **right_lock); 200 int duplicate_block(struct file_lock *fl); 201 void add_blockingfilelock(struct file_lock *fl); 202 enum hwlock_status unlock_hwlock(const struct file_lock *fl); 203 enum hwlock_status test_hwlock(const struct file_lock *fl, 204 struct file_lock **conflicting_fl); 205 void remove_blockingfilelock(struct file_lock *fl); 206 void clear_blockingfilelock(const char *hostname); 207 void retry_blockingfilelocklist(void); 208 enum partialfilelock_status unlock_partialfilelock( 209 const struct file_lock *fl); 210 void clear_partialfilelock(const char *hostname); 211 enum partialfilelock_status test_partialfilelock( 212 const struct file_lock *fl, struct file_lock **conflicting_fl); 213 enum nlm_stats do_test(struct file_lock *fl, 214 struct file_lock **conflicting_fl); 215 enum nlm_stats do_unlock(struct file_lock *fl); 216 enum nlm_stats do_lock(struct file_lock *fl); 217 void do_clear(const char *hostname); 218 size_t strnlen(const char *, size_t); 219 220 void 221 debuglog(char const *fmt, ...) 222 { 223 va_list ap; 224 225 if (debug_level < 1) { 226 return; 227 } 228 229 sleep(debugdelay); 230 231 va_start(ap, fmt); 232 vsyslog(LOG_DEBUG, fmt, ap); 233 va_end(ap); 234 } 235 236 void 237 dump_static_object(object, size_object, hbuff, size_hbuff, cbuff, size_cbuff) 238 const unsigned char *object; 239 const int size_object; 240 unsigned char *hbuff; 241 const int size_hbuff; 242 unsigned char *cbuff; 243 const int size_cbuff; 244 { 245 int i, objectsize; 246 247 if (debug_level < 2) { 248 return; 249 } 250 251 objectsize = size_object; 252 253 if (objectsize == 0) { 254 debuglog("object is size 0\n"); 255 } else { 256 if (objectsize > MAXOBJECTSIZE) { 257 debuglog("Object of size %d being clamped" 258 "to size %d\n", objectsize, MAXOBJECTSIZE); 259 objectsize = MAXOBJECTSIZE; 260 } 261 262 if (hbuff != NULL) { 263 if (size_hbuff < objectsize*2+1) { 264 debuglog("Hbuff not large enough." 265 " Increase size\n"); 266 } else { 267 for(i=0;i<objectsize;i++) { 268 sprintf(hbuff+i*2,"%02x",*(object+i)); 269 } 270 *(hbuff+i*2) = '\0'; 271 } 272 } 273 274 if (cbuff != NULL) { 275 if (size_cbuff < objectsize+1) { 276 debuglog("Cbuff not large enough." 277 " Increase Size\n"); 278 } 279 280 for(i=0;i<objectsize;i++) { 281 if (*(object+i) >= 32 && *(object+i) <= 127) { 282 *(cbuff+i) = *(object+i); 283 } else { 284 *(cbuff+i) = '.'; 285 } 286 } 287 *(cbuff+i) = '\0'; 288 } 289 } 290 } 291 292 void 293 dump_netobj(const struct netobj *nobj) 294 { 295 char hbuff[MAXBUFFERSIZE*2]; 296 char cbuff[MAXBUFFERSIZE]; 297 298 if (debug_level < 2) { 299 return; 300 } 301 302 if (nobj == NULL) { 303 debuglog("Null netobj pointer\n"); 304 } 305 else if (nobj->n_len == 0) { 306 debuglog("Size zero netobj\n"); 307 } else { 308 dump_static_object(nobj->n_bytes, nobj->n_len, 309 hbuff, sizeof(hbuff), cbuff, sizeof(cbuff)); 310 debuglog("netobj: len: %d data: %s ::: %s\n", 311 nobj->n_len, hbuff, cbuff); 312 } 313 } 314 315 /* #define DUMP_FILELOCK_VERBOSE */ 316 void 317 dump_filelock(const struct file_lock *fl) 318 { 319 #ifdef DUMP_FILELOCK_VERBOSE 320 char hbuff[MAXBUFFERSIZE*2]; 321 char cbuff[MAXBUFFERSIZE]; 322 #endif 323 324 if (debug_level < 2) { 325 return; 326 } 327 328 if (fl != NULL) { 329 debuglog("Dumping file lock structure @ %p\n", fl); 330 331 #ifdef DUMP_FILELOCK_VERBOSE 332 dump_static_object((unsigned char *)&fl->filehandle, 333 sizeof(fl->filehandle), hbuff, sizeof(hbuff), 334 cbuff, sizeof(cbuff)); 335 debuglog("Filehandle: %8s ::: %8s\n", hbuff, cbuff); 336 #endif 337 338 debuglog("Dumping nlm4_holder:\n" 339 "exc: %x svid: %x offset:len %llx:%llx\n", 340 fl->client.exclusive, fl->client.svid, 341 fl->client.l_offset, fl->client.l_len); 342 343 #ifdef DUMP_FILELOCK_VERBOSE 344 debuglog("Dumping client identity:\n"); 345 dump_netobj(&fl->client.oh); 346 347 debuglog("Dumping client cookie:\n"); 348 dump_netobj(&fl->client_cookie); 349 350 debuglog("nsm: %d status: %d flags: %d svid: %x" 351 " client_name: %s\n", fl->nsm_status, fl->status, 352 fl->flags, fl->client.svid, fl->client_name); 353 #endif 354 } else { 355 debuglog("NULL file lock structure\n"); 356 } 357 } 358 359 void 360 copy_nlm4_lock_to_nlm4_holder(src, exclusive, dest) 361 const struct nlm4_lock *src; 362 const bool_t exclusive; 363 struct nlm4_holder *dest; 364 { 365 366 dest->exclusive = exclusive; 367 dest->oh.n_len = src->oh.n_len; 368 dest->oh.n_bytes = src->oh.n_bytes; 369 dest->svid = src->svid; 370 dest->l_offset = src->l_offset; 371 dest->l_len = src->l_len; 372 } 373 374 375 size_t 376 strnlen(const char *s, size_t len) 377 { 378 size_t n; 379 380 for (n = 0; s[n] != 0 && n < len; n++) 381 ; 382 return n; 383 } 384 385 /* 386 * allocate_file_lock: Create a lock with the given parameters 387 */ 388 389 struct file_lock * 390 allocate_file_lock(const netobj *lockowner, const netobj *matchcookie, 391 const struct sockaddr *addr, const char *caller_name) 392 { 393 struct file_lock *newfl; 394 size_t n; 395 396 /* Beware of rubbish input! */ 397 n = strnlen(caller_name, SM_MAXSTRLEN); 398 if (n == SM_MAXSTRLEN) { 399 return NULL; 400 } 401 402 newfl = malloc(sizeof(*newfl) - sizeof(newfl->client_name) + n + 1); 403 if (newfl == NULL) { 404 return NULL; 405 } 406 bzero(newfl, sizeof(*newfl) - sizeof(newfl->client_name)); 407 memcpy(newfl->client_name, caller_name, n); 408 newfl->client_name[n] = 0; 409 410 newfl->client.oh.n_bytes = malloc(lockowner->n_len); 411 if (newfl->client.oh.n_bytes == NULL) { 412 free(newfl); 413 return NULL; 414 } 415 newfl->client.oh.n_len = lockowner->n_len; 416 bcopy(lockowner->n_bytes, newfl->client.oh.n_bytes, lockowner->n_len); 417 418 newfl->client_cookie.n_bytes = malloc(matchcookie->n_len); 419 if (newfl->client_cookie.n_bytes == NULL) { 420 free(newfl->client.oh.n_bytes); 421 free(newfl); 422 return NULL; 423 } 424 newfl->client_cookie.n_len = matchcookie->n_len; 425 bcopy(matchcookie->n_bytes, newfl->client_cookie.n_bytes, matchcookie->n_len); 426 427 newfl->addr = malloc(addr->sa_len); 428 if (newfl->addr == NULL) { 429 free(newfl->client_cookie.n_bytes); 430 free(newfl->client.oh.n_bytes); 431 free(newfl); 432 return NULL; 433 } 434 memcpy(newfl->addr, addr, addr->sa_len); 435 436 return newfl; 437 } 438 439 /* 440 * file_file_lock: Force creation of a valid file lock 441 */ 442 void 443 fill_file_lock(struct file_lock *fl, const fhandle_t *fh, 444 const bool_t exclusive, const int32_t svid, 445 const u_int64_t offset, const u_int64_t len, 446 const int state, const int status, const int flags, const int blocking) 447 { 448 bcopy(fh, &fl->filehandle, sizeof(fhandle_t)); 449 450 fl->client.exclusive = exclusive; 451 fl->client.svid = svid; 452 fl->client.l_offset = offset; 453 fl->client.l_len = len; 454 455 fl->nsm_status = state; 456 fl->status = status; 457 fl->flags = flags; 458 fl->blocking = blocking; 459 } 460 461 /* 462 * deallocate_file_lock: Free all storage associated with a file lock 463 */ 464 void 465 deallocate_file_lock(struct file_lock *fl) 466 { 467 free(fl->addr); 468 free(fl->client.oh.n_bytes); 469 free(fl->client_cookie.n_bytes); 470 free(fl); 471 } 472 473 /* 474 * regions_overlap(): This function examines the two provided regions for 475 * overlap. 476 */ 477 int 478 regions_overlap(start1, len1, start2, len2) 479 const u_int64_t start1, len1, start2, len2; 480 { 481 u_int64_t d1,d2,d3,d4; 482 enum split_status result; 483 484 debuglog("Entering region overlap with vals: %llu:%llu--%llu:%llu\n", 485 start1, len1, start2, len2); 486 487 result = region_compare(start1, len1, start2, len2, 488 &d1, &d2, &d3, &d4); 489 490 debuglog("Exiting region overlap with val: %d\n",result); 491 492 if (result == SPL_DISJOINT) { 493 return 0; 494 } else { 495 return 1; 496 } 497 } 498 499 /* 500 * region_compare(): Examine lock regions and split appropriately 501 * 502 * XXX: Fix 64 bit overflow problems 503 * XXX: Check to make sure I got *ALL* the cases. 504 * XXX: This DESPERATELY needs a regression test. 505 */ 506 enum split_status 507 region_compare(starte, lene, startu, lenu, 508 start1, len1, start2, len2) 509 const u_int64_t starte, lene, startu, lenu; 510 u_int64_t *start1, *len1, *start2, *len2; 511 { 512 /* 513 * Please pay attention to the sequential exclusions 514 * of the if statements!!! 515 */ 516 enum LFLAGS lflags; 517 enum RFLAGS rflags; 518 enum split_status retval; 519 520 retval = SPL_DISJOINT; 521 522 if (lene == 0 && lenu == 0) { 523 /* Examine left edge of locker */ 524 lflags = LEDGE_INSIDE; 525 if (startu < starte) { 526 lflags = LEDGE_LEFT; 527 } else if (startu == starte) { 528 lflags = LEDGE_LBOUNDARY; 529 } 530 531 rflags = REDGE_RBOUNDARY; /* Both are infiinite */ 532 533 if (lflags == LEDGE_INSIDE) { 534 *start1 = starte; 535 *len1 = startu - starte; 536 } 537 538 if (lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) { 539 retval = SPL_CONTAINED; 540 } else { 541 retval = SPL_LOCK1; 542 } 543 } else if (lene == 0 && lenu != 0) { 544 /* Established lock is infinite */ 545 /* Examine left edge of unlocker */ 546 lflags = LEDGE_INSIDE; 547 if (startu < starte) { 548 lflags = LEDGE_LEFT; 549 } else if (startu == starte) { 550 lflags = LEDGE_LBOUNDARY; 551 } 552 553 /* Examine right edge of unlocker */ 554 if (startu + lenu < starte) { 555 /* Right edge of unlocker left of established lock */ 556 rflags = REDGE_LEFT; 557 return SPL_DISJOINT; 558 } else if (startu + lenu == starte) { 559 /* Right edge of unlocker on start of established lock */ 560 rflags = REDGE_LBOUNDARY; 561 return SPL_DISJOINT; 562 } else { /* Infinifty is right of finity */ 563 /* Right edge of unlocker inside established lock */ 564 rflags = REDGE_INSIDE; 565 } 566 567 if (lflags == LEDGE_INSIDE) { 568 *start1 = starte; 569 *len1 = startu - starte; 570 retval |= SPL_LOCK1; 571 } 572 573 if (rflags == REDGE_INSIDE) { 574 /* Create right lock */ 575 *start2 = startu+lenu; 576 *len2 = 0; 577 retval |= SPL_LOCK2; 578 } 579 } else if (lene != 0 && lenu == 0) { 580 /* Unlocker is infinite */ 581 /* Examine left edge of unlocker */ 582 lflags = LEDGE_RIGHT; 583 if (startu < starte) { 584 lflags = LEDGE_LEFT; 585 retval = SPL_CONTAINED; 586 return retval; 587 } else if (startu == starte) { 588 lflags = LEDGE_LBOUNDARY; 589 retval = SPL_CONTAINED; 590 return retval; 591 } else if ((startu > starte) && (startu < starte + lene - 1)) { 592 lflags = LEDGE_INSIDE; 593 } else if (startu == starte + lene - 1) { 594 lflags = LEDGE_RBOUNDARY; 595 } else { /* startu > starte + lene -1 */ 596 lflags = LEDGE_RIGHT; 597 return SPL_DISJOINT; 598 } 599 600 rflags = REDGE_RIGHT; /* Infinity is right of finity */ 601 602 if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) { 603 *start1 = starte; 604 *len1 = startu - starte; 605 retval |= SPL_LOCK1; 606 return retval; 607 } 608 } else { 609 /* Both locks are finite */ 610 611 /* Examine left edge of unlocker */ 612 lflags = LEDGE_RIGHT; 613 if (startu < starte) { 614 lflags = LEDGE_LEFT; 615 } else if (startu == starte) { 616 lflags = LEDGE_LBOUNDARY; 617 } else if ((startu > starte) && (startu < starte + lene - 1)) { 618 lflags = LEDGE_INSIDE; 619 } else if (startu == starte + lene - 1) { 620 lflags = LEDGE_RBOUNDARY; 621 } else { /* startu > starte + lene -1 */ 622 lflags = LEDGE_RIGHT; 623 return SPL_DISJOINT; 624 } 625 626 /* Examine right edge of unlocker */ 627 if (startu + lenu < starte) { 628 /* Right edge of unlocker left of established lock */ 629 rflags = REDGE_LEFT; 630 return SPL_DISJOINT; 631 } else if (startu + lenu == starte) { 632 /* Right edge of unlocker on start of established lock */ 633 rflags = REDGE_LBOUNDARY; 634 return SPL_DISJOINT; 635 } else if (startu + lenu < starte + lene) { 636 /* Right edge of unlocker inside established lock */ 637 rflags = REDGE_INSIDE; 638 } else if (startu + lenu == starte + lene) { 639 /* Right edge of unlocker on right edge of established lock */ 640 rflags = REDGE_RBOUNDARY; 641 } else { /* startu + lenu > starte + lene */ 642 /* Right edge of unlocker is right of established lock */ 643 rflags = REDGE_RIGHT; 644 } 645 646 if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) { 647 /* Create left lock */ 648 *start1 = starte; 649 *len1 = (startu - starte); 650 retval |= SPL_LOCK1; 651 } 652 653 if (rflags == REDGE_INSIDE) { 654 /* Create right lock */ 655 *start2 = startu+lenu; 656 *len2 = starte+lene-(startu+lenu); 657 retval |= SPL_LOCK2; 658 } 659 660 if ((lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) && 661 (rflags == REDGE_RBOUNDARY || rflags == REDGE_RIGHT)) { 662 retval = SPL_CONTAINED; 663 } 664 } 665 return retval; 666 } 667 668 /* 669 * same_netobj: Compares the apprpriate bits of a netobj for identity 670 */ 671 int 672 same_netobj(const netobj *n0, const netobj *n1) 673 { 674 int retval; 675 676 retval = 0; 677 678 debuglog("Entering netobj identity check\n"); 679 680 if (n0->n_len == n1->n_len) { 681 debuglog("Preliminary length check passed\n"); 682 retval = !bcmp(n0->n_bytes, n1->n_bytes, n0->n_len); 683 debuglog("netobj %smatch\n", retval ? "" : "mis"); 684 } 685 686 return (retval); 687 } 688 689 /* 690 * same_filelock_identity: Compares the appropriate bits of a file_lock 691 */ 692 int 693 same_filelock_identity(fl0, fl1) 694 const struct file_lock *fl0, *fl1; 695 { 696 int retval; 697 698 retval = 0; 699 700 debuglog("Checking filelock identity\n"); 701 702 /* 703 * Check process ids and host information. 704 */ 705 retval = (fl0->client.svid == fl1->client.svid && 706 same_netobj(&(fl0->client.oh), &(fl1->client.oh))); 707 708 debuglog("Exiting checking filelock identity: retval: %d\n",retval); 709 710 return (retval); 711 } 712 713 /* 714 * Below here are routines associated with manipulating the NFS 715 * lock list. 716 */ 717 718 /* 719 * get_lock_matching_unlock: Return a lock which matches the given unlock lock 720 * or NULL otehrwise 721 * XXX: It is a shame that this duplicates so much code from test_nfslock. 722 */ 723 struct file_lock * 724 get_lock_matching_unlock(const struct file_lock *fl) 725 { 726 struct file_lock *ifl; /* Iterator */ 727 728 debuglog("Entering get_lock_matching_unlock\n"); 729 debuglog("********Dump of fl*****************\n"); 730 dump_filelock(fl); 731 732 LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) { 733 debuglog("Pointer to file lock: %p\n",ifl); 734 735 debuglog("****Dump of ifl****\n"); 736 dump_filelock(ifl); 737 debuglog("*******************\n"); 738 739 /* 740 * XXX: It is conceivable that someone could use the NLM RPC 741 * system to directly access filehandles. This may be a 742 * security hazard as the filehandle code may bypass normal 743 * file access controls 744 */ 745 if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t))) 746 continue; 747 748 debuglog("get_lock_matching_unlock: Filehandles match, " 749 "checking regions\n"); 750 751 /* Filehandles match, check for region overlap */ 752 if (!regions_overlap(fl->client.l_offset, fl->client.l_len, 753 ifl->client.l_offset, ifl->client.l_len)) 754 continue; 755 756 debuglog("get_lock_matching_unlock: Region overlap" 757 " found %llu : %llu -- %llu : %llu\n", 758 fl->client.l_offset,fl->client.l_len, 759 ifl->client.l_offset,ifl->client.l_len); 760 761 /* Regions overlap, check the identity */ 762 if (!same_filelock_identity(fl,ifl)) 763 continue; 764 765 debuglog("get_lock_matching_unlock: Duplicate lock id. Granting\n"); 766 return (ifl); 767 } 768 769 debuglog("Exiting bet_lock_matching_unlock\n"); 770 771 return (NULL); 772 } 773 774 /* 775 * test_nfslock: check for NFS lock in lock list 776 * 777 * This routine makes the following assumptions: 778 * 1) Nothing will adjust the lock list during a lookup 779 * 780 * This routine has an intersting quirk which bit me hard. 781 * The conflicting_fl is the pointer to the conflicting lock. 782 * However, to modify the "*pointer* to the conflicting lock" rather 783 * that the "conflicting lock itself" one must pass in a "pointer to 784 * the pointer of the conflicting lock". Gross. 785 */ 786 787 enum nfslock_status 788 test_nfslock(const struct file_lock *fl, struct file_lock **conflicting_fl) 789 { 790 struct file_lock *ifl; /* Iterator */ 791 enum nfslock_status retval; 792 793 debuglog("Entering test_nfslock\n"); 794 795 retval = NFS_GRANTED; 796 (*conflicting_fl) = NULL; 797 798 debuglog("Entering lock search loop\n"); 799 800 debuglog("***********************************\n"); 801 debuglog("Dumping match filelock\n"); 802 debuglog("***********************************\n"); 803 dump_filelock(fl); 804 debuglog("***********************************\n"); 805 806 LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) { 807 if (retval == NFS_DENIED) 808 break; 809 810 debuglog("Top of lock loop\n"); 811 debuglog("Pointer to file lock: %p\n",ifl); 812 813 debuglog("***********************************\n"); 814 debuglog("Dumping test filelock\n"); 815 debuglog("***********************************\n"); 816 dump_filelock(ifl); 817 debuglog("***********************************\n"); 818 819 /* 820 * XXX: It is conceivable that someone could use the NLM RPC 821 * system to directly access filehandles. This may be a 822 * security hazard as the filehandle code may bypass normal 823 * file access controls 824 */ 825 if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t))) 826 continue; 827 828 debuglog("test_nfslock: filehandle match found\n"); 829 830 /* Filehandles match, check for region overlap */ 831 if (!regions_overlap(fl->client.l_offset, fl->client.l_len, 832 ifl->client.l_offset, ifl->client.l_len)) 833 continue; 834 835 debuglog("test_nfslock: Region overlap found" 836 " %llu : %llu -- %llu : %llu\n", 837 fl->client.l_offset,fl->client.l_len, 838 ifl->client.l_offset,ifl->client.l_len); 839 840 /* Regions overlap, check the exclusivity */ 841 if (!(fl->client.exclusive || ifl->client.exclusive)) 842 continue; 843 844 debuglog("test_nfslock: Exclusivity failure: %d %d\n", 845 fl->client.exclusive, 846 ifl->client.exclusive); 847 848 if (same_filelock_identity(fl,ifl)) { 849 debuglog("test_nfslock: Duplicate id. Granting\n"); 850 (*conflicting_fl) = ifl; 851 retval = NFS_GRANTED_DUPLICATE; 852 } else { 853 /* locking attempt fails */ 854 debuglog("test_nfslock: Lock attempt failed\n"); 855 debuglog("Desired lock\n"); 856 dump_filelock(fl); 857 debuglog("Conflicting lock\n"); 858 dump_filelock(ifl); 859 (*conflicting_fl) = ifl; 860 retval = NFS_DENIED; 861 } 862 } 863 864 debuglog("Dumping file locks\n"); 865 debuglog("Exiting test_nfslock\n"); 866 867 return (retval); 868 } 869 870 /* 871 * lock_nfslock: attempt to create a lock in the NFS lock list 872 * 873 * This routine tests whether the lock will be granted and then adds 874 * the entry to the lock list if so. 875 * 876 * Argument fl gets modified as its list housekeeping entries get modified 877 * upon insertion into the NFS lock list 878 * 879 * This routine makes several assumptions: 880 * 1) It is perfectly happy to grant a duplicate lock from the same pid. 881 * While this seems to be intuitively wrong, it is required for proper 882 * Posix semantics during unlock. It is absolutely imperative to not 883 * unlock the main lock before the two child locks are established. Thus, 884 * one has be be able to create duplicate locks over an existing lock 885 * 2) It currently accepts duplicate locks from the same id,pid 886 */ 887 888 enum nfslock_status 889 lock_nfslock(struct file_lock *fl) 890 { 891 enum nfslock_status retval; 892 struct file_lock *dummy_fl; 893 894 dummy_fl = NULL; 895 896 debuglog("Entering lock_nfslock...\n"); 897 898 retval = test_nfslock(fl,&dummy_fl); 899 900 if (retval == NFS_GRANTED || retval == NFS_GRANTED_DUPLICATE) { 901 debuglog("Inserting lock...\n"); 902 dump_filelock(fl); 903 LIST_INSERT_HEAD(&nfslocklist_head, fl, nfslocklist); 904 } 905 906 debuglog("Exiting lock_nfslock...\n"); 907 908 return (retval); 909 } 910 911 /* 912 * delete_nfslock: delete an NFS lock list entry 913 * 914 * This routine is used to delete a lock out of the NFS lock list 915 * without regard to status, underlying locks, regions or anything else 916 * 917 * Note that this routine *does not deallocate memory* of the lock. 918 * It just disconnects it from the list. The lock can then be used 919 * by other routines without fear of trashing the list. 920 */ 921 922 enum nfslock_status 923 delete_nfslock(struct file_lock *fl) 924 { 925 926 LIST_REMOVE(fl, nfslocklist); 927 928 return (NFS_GRANTED); 929 } 930 931 enum split_status 932 split_nfslock(exist_lock, unlock_lock, left_lock, right_lock) 933 const struct file_lock *exist_lock, *unlock_lock; 934 struct file_lock **left_lock, **right_lock; 935 { 936 u_int64_t start1, len1, start2, len2; 937 enum split_status spstatus; 938 939 spstatus = region_compare(exist_lock->client.l_offset, exist_lock->client.l_len, 940 unlock_lock->client.l_offset, unlock_lock->client.l_len, 941 &start1, &len1, &start2, &len2); 942 943 if ((spstatus & SPL_LOCK1) != 0) { 944 *left_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie, exist_lock->addr, exist_lock->client_name); 945 if (*left_lock == NULL) { 946 debuglog("Unable to allocate resource for split 1\n"); 947 return SPL_RESERR; 948 } 949 950 fill_file_lock(*left_lock, &exist_lock->filehandle, 951 exist_lock->client.exclusive, exist_lock->client.svid, 952 start1, len1, 953 exist_lock->nsm_status, 954 exist_lock->status, exist_lock->flags, exist_lock->blocking); 955 } 956 957 if ((spstatus & SPL_LOCK2) != 0) { 958 *right_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie, exist_lock->addr, exist_lock->client_name); 959 if (*right_lock == NULL) { 960 debuglog("Unable to allocate resource for split 1\n"); 961 if (*left_lock != NULL) { 962 deallocate_file_lock(*left_lock); 963 } 964 return SPL_RESERR; 965 } 966 967 fill_file_lock(*right_lock, &exist_lock->filehandle, 968 exist_lock->client.exclusive, exist_lock->client.svid, 969 start2, len2, 970 exist_lock->nsm_status, 971 exist_lock->status, exist_lock->flags, exist_lock->blocking); 972 } 973 974 return spstatus; 975 } 976 977 enum nfslock_status 978 unlock_nfslock(fl, released_lock, left_lock, right_lock) 979 const struct file_lock *fl; 980 struct file_lock **released_lock; 981 struct file_lock **left_lock; 982 struct file_lock **right_lock; 983 { 984 struct file_lock *mfl; /* Matching file lock */ 985 enum nfslock_status retval; 986 enum split_status spstatus; 987 988 debuglog("Entering unlock_nfslock\n"); 989 990 *released_lock = NULL; 991 *left_lock = NULL; 992 *right_lock = NULL; 993 994 retval = NFS_DENIED_NOLOCK; 995 996 debuglog("Attempting to match lock...\n"); 997 mfl = get_lock_matching_unlock(fl); 998 999 if (mfl != NULL) { 1000 debuglog("Unlock matched. Querying for split\n"); 1001 1002 spstatus = split_nfslock(mfl, fl, left_lock, right_lock); 1003 1004 debuglog("Split returned %d %p %p %p %p\n",spstatus,mfl,fl,*left_lock,*right_lock); 1005 debuglog("********Split dumps********"); 1006 dump_filelock(mfl); 1007 dump_filelock(fl); 1008 dump_filelock(*left_lock); 1009 dump_filelock(*right_lock); 1010 debuglog("********End Split dumps********"); 1011 1012 if (spstatus == SPL_RESERR) { 1013 if (*left_lock != NULL) { 1014 deallocate_file_lock(*left_lock); 1015 *left_lock = NULL; 1016 } 1017 1018 if (*right_lock != NULL) { 1019 deallocate_file_lock(*right_lock); 1020 *right_lock = NULL; 1021 } 1022 1023 return NFS_RESERR; 1024 } 1025 1026 /* Insert new locks from split if required */ 1027 if (*left_lock != NULL) { 1028 debuglog("Split left activated\n"); 1029 LIST_INSERT_HEAD(&nfslocklist_head, *left_lock, nfslocklist); 1030 } 1031 1032 if (*right_lock != NULL) { 1033 debuglog("Split right activated\n"); 1034 LIST_INSERT_HEAD(&nfslocklist_head, *right_lock, nfslocklist); 1035 } 1036 1037 /* Unlock the lock since it matches identity */ 1038 LIST_REMOVE(mfl, nfslocklist); 1039 *released_lock = mfl; 1040 retval = NFS_GRANTED; 1041 } 1042 1043 debuglog("Exiting unlock_nfslock\n"); 1044 1045 return retval; 1046 } 1047 1048 /* 1049 * Below here are the routines for manipulating the file lock directly 1050 * on the disk hardware itself 1051 */ 1052 enum hwlock_status 1053 lock_hwlock(struct file_lock *fl) 1054 { 1055 struct monfile *imf,*nmf; 1056 int lflags, flerror; 1057 1058 /* Scan to see if filehandle already present */ 1059 LIST_FOREACH(imf, &monfilelist_head, monfilelist) { 1060 if (bcmp(&fl->filehandle, &imf->filehandle, 1061 sizeof(fl->filehandle)) == 0) { 1062 /* imf is the correct filehandle */ 1063 break; 1064 } 1065 } 1066 1067 /* 1068 * Filehandle already exists (we control the file) 1069 * *AND* NFS has already cleared the lock for availability 1070 * Grant it and bump the refcount. 1071 */ 1072 if (imf != NULL) { 1073 ++(imf->refcount); 1074 return (HW_GRANTED); 1075 } 1076 1077 /* No filehandle found, create and go */ 1078 nmf = malloc(sizeof(struct monfile)); 1079 if (nmf == NULL) { 1080 debuglog("hwlock resource allocation failure\n"); 1081 return (HW_RESERR); 1082 } 1083 1084 /* XXX: Is O_RDWR always the correct mode? */ 1085 nmf->fd = fhopen(&fl->filehandle, O_RDWR); 1086 if (nmf->fd < 0) { 1087 debuglog("fhopen failed (from %16s): %32s\n", 1088 fl->client_name, strerror(errno)); 1089 free(nmf); 1090 switch (errno) { 1091 case ESTALE: 1092 return (HW_STALEFH); 1093 case EROFS: 1094 return (HW_READONLY); 1095 default: 1096 return (HW_RESERR); 1097 } 1098 } 1099 1100 /* File opened correctly, fill the monitor struct */ 1101 bcopy(&fl->filehandle, &nmf->filehandle, sizeof(fl->filehandle)); 1102 nmf->refcount = 1; 1103 nmf->exclusive = fl->client.exclusive; 1104 1105 lflags = (nmf->exclusive == 1) ? 1106 (LOCK_EX | LOCK_NB) : (LOCK_SH | LOCK_NB); 1107 1108 flerror = flock(nmf->fd, lflags); 1109 1110 if (flerror != 0) { 1111 debuglog("flock failed (from %16s): %32s\n", 1112 fl->client_name, strerror(errno)); 1113 close(nmf->fd); 1114 free(nmf); 1115 switch (errno) { 1116 case EAGAIN: 1117 return (HW_DENIED); 1118 case ESTALE: 1119 return (HW_STALEFH); 1120 case EROFS: 1121 return (HW_READONLY); 1122 default: 1123 return (HW_RESERR); 1124 break; 1125 } 1126 } 1127 1128 /* File opened and locked */ 1129 LIST_INSERT_HEAD(&monfilelist_head, nmf, monfilelist); 1130 1131 debuglog("flock succeeded (from %16s)\n", fl->client_name); 1132 return (HW_GRANTED); 1133 } 1134 1135 enum hwlock_status 1136 unlock_hwlock(const struct file_lock *fl) 1137 { 1138 struct monfile *imf; 1139 1140 debuglog("Entering unlock_hwlock\n"); 1141 debuglog("Entering loop interation\n"); 1142 1143 /* Scan to see if filehandle already present */ 1144 LIST_FOREACH(imf, &monfilelist_head, monfilelist) { 1145 if (bcmp(&fl->filehandle, &imf->filehandle, 1146 sizeof(fl->filehandle)) == 0) { 1147 /* imf is the correct filehandle */ 1148 break; 1149 } 1150 } 1151 1152 debuglog("Completed iteration. Proceeding\n"); 1153 1154 if (imf == NULL) { 1155 /* No lock found */ 1156 debuglog("Exiting unlock_hwlock (HW_DENIED_NOLOCK)\n"); 1157 return (HW_DENIED_NOLOCK); 1158 } 1159 1160 /* Lock found */ 1161 --imf->refcount; 1162 1163 if (imf->refcount < 0) { 1164 debuglog("Negative hardware reference count\n"); 1165 } 1166 1167 if (imf->refcount <= 0) { 1168 close(imf->fd); 1169 LIST_REMOVE(imf, monfilelist); 1170 free(imf); 1171 } 1172 debuglog("Exiting unlock_hwlock (HW_GRANTED)\n"); 1173 return (HW_GRANTED); 1174 } 1175 1176 enum hwlock_status 1177 test_hwlock(fl, conflicting_fl) 1178 const struct file_lock *fl __unused; 1179 struct file_lock **conflicting_fl __unused; 1180 { 1181 1182 /* 1183 * XXX: lock tests on hardware are not required until 1184 * true partial file testing is done on the underlying file 1185 */ 1186 return (HW_RESERR); 1187 } 1188 1189 1190 1191 /* 1192 * Below here are routines for manipulating blocked lock requests 1193 * They should only be called from the XXX_partialfilelock routines 1194 * if at all possible 1195 */ 1196 1197 int 1198 duplicate_block(struct file_lock *fl) 1199 { 1200 struct file_lock *ifl; 1201 int retval = 0; 1202 1203 debuglog("Entering duplicate_block"); 1204 1205 /* 1206 * Is this lock request already on the blocking list? 1207 * Consider it a dupe if the file handles, offset, length, 1208 * exclusivity and client match. 1209 */ 1210 LIST_FOREACH(ifl, &blockedlocklist_head, nfslocklist) { 1211 if (!bcmp(&fl->filehandle, &ifl->filehandle, 1212 sizeof(fhandle_t)) && 1213 fl->client.exclusive == ifl->client.exclusive && 1214 fl->client.l_offset == ifl->client.l_offset && 1215 fl->client.l_len == ifl->client.l_len && 1216 same_filelock_identity(fl, ifl)) { 1217 retval = 1; 1218 break; 1219 } 1220 } 1221 1222 debuglog("Exiting duplicate_block: %s\n", retval ? "already blocked" 1223 : "not already blocked"); 1224 return retval; 1225 } 1226 1227 void 1228 add_blockingfilelock(struct file_lock *fl) 1229 { 1230 debuglog("Entering add_blockingfilelock\n"); 1231 1232 /* 1233 * A blocking lock request _should_ never be duplicated as a client 1234 * that is already blocked shouldn't be able to request another 1235 * lock. Alas, there are some buggy clients that do request the same 1236 * lock repeatedly. Make sure only unique locks are on the blocked 1237 * lock list. 1238 */ 1239 if (duplicate_block(fl)) { 1240 debuglog("Exiting add_blockingfilelock: already blocked\n"); 1241 return; 1242 } 1243 1244 /* 1245 * Clear the blocking flag so that it can be reused without 1246 * adding it to the blocking queue a second time 1247 */ 1248 1249 fl->blocking = 0; 1250 LIST_INSERT_HEAD(&blockedlocklist_head, fl, nfslocklist); 1251 1252 debuglog("Exiting add_blockingfilelock: added blocked lock\n"); 1253 } 1254 1255 void 1256 remove_blockingfilelock(struct file_lock *fl) 1257 { 1258 1259 debuglog("Entering remove_blockingfilelock\n"); 1260 1261 LIST_REMOVE(fl, nfslocklist); 1262 1263 debuglog("Exiting remove_blockingfilelock\n"); 1264 } 1265 1266 void 1267 clear_blockingfilelock(const char *hostname) 1268 { 1269 struct file_lock *ifl,*nfl; 1270 1271 /* 1272 * Normally, LIST_FOREACH is called for, but since 1273 * the current element *is* the iterator, deleting it 1274 * would mess up the iteration. Thus, a next element 1275 * must be used explicitly 1276 */ 1277 1278 ifl = LIST_FIRST(&blockedlocklist_head); 1279 1280 while (ifl != NULL) { 1281 nfl = LIST_NEXT(ifl, nfslocklist); 1282 1283 if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) { 1284 remove_blockingfilelock(ifl); 1285 deallocate_file_lock(ifl); 1286 } 1287 1288 ifl = nfl; 1289 } 1290 } 1291 1292 void 1293 retry_blockingfilelocklist(void) 1294 { 1295 /* Retry all locks in the blocked list */ 1296 struct file_lock *ifl, *nfl; /* Iterator */ 1297 enum partialfilelock_status pflstatus; 1298 1299 debuglog("Entering retry_blockingfilelocklist\n"); 1300 1301 LIST_FOREACH_SAFE(ifl, &blockedlocklist_head, nfslocklist, nfl) { 1302 debuglog("Iterator choice %p\n",ifl); 1303 debuglog("Next iterator choice %p\n",nfl); 1304 1305 /* 1306 * SUBTLE BUG: The file_lock must be removed from the 1307 * old list so that it's list pointers get disconnected 1308 * before being allowed to participate in the new list 1309 * which will automatically add it in if necessary. 1310 */ 1311 1312 LIST_REMOVE(ifl, nfslocklist); 1313 pflstatus = lock_partialfilelock(ifl); 1314 1315 if (pflstatus == PFL_GRANTED || pflstatus == PFL_GRANTED_DUPLICATE) { 1316 debuglog("Granted blocked lock\n"); 1317 /* lock granted and is now being used */ 1318 send_granted(ifl,0); 1319 } else { 1320 /* Reinsert lock back into blocked list */ 1321 debuglog("Replacing blocked lock\n"); 1322 LIST_INSERT_HEAD(&blockedlocklist_head, ifl, nfslocklist); 1323 } 1324 } 1325 1326 debuglog("Exiting retry_blockingfilelocklist\n"); 1327 } 1328 1329 /* 1330 * Below here are routines associated with manipulating all 1331 * aspects of the partial file locking system (list, hardware, etc.) 1332 */ 1333 1334 /* 1335 * Please note that lock monitoring must be done at this level which 1336 * keeps track of *individual* lock requests on lock and unlock 1337 * 1338 * XXX: Split unlocking is going to make the unlock code miserable 1339 */ 1340 1341 /* 1342 * lock_partialfilelock: 1343 * 1344 * Argument fl gets modified as its list housekeeping entries get modified 1345 * upon insertion into the NFS lock list 1346 * 1347 * This routine makes several assumptions: 1348 * 1) It (will) pass locks through to flock to lock the entire underlying file 1349 * and then parcel out NFS locks if it gets control of the file. 1350 * This matches the old rpc.lockd file semantics (except where it 1351 * is now more correct). It is the safe solution, but will cause 1352 * overly restrictive blocking if someone is trying to use the 1353 * underlying files without using NFS. This appears to be an 1354 * acceptable tradeoff since most people use standalone NFS servers. 1355 * XXX: The right solution is probably kevent combined with fcntl 1356 * 1357 * 2) Nothing modifies the lock lists between testing and granting 1358 * I have no idea whether this is a useful assumption or not 1359 */ 1360 1361 enum partialfilelock_status 1362 lock_partialfilelock(struct file_lock *fl) 1363 { 1364 enum partialfilelock_status retval; 1365 enum nfslock_status lnlstatus; 1366 enum hwlock_status hwstatus; 1367 1368 debuglog("Entering lock_partialfilelock\n"); 1369 1370 retval = PFL_DENIED; 1371 1372 /* 1373 * Execute the NFS lock first, if possible, as it is significantly 1374 * easier and less expensive to undo than the filesystem lock 1375 */ 1376 1377 lnlstatus = lock_nfslock(fl); 1378 1379 switch (lnlstatus) { 1380 case NFS_GRANTED: 1381 case NFS_GRANTED_DUPLICATE: 1382 /* 1383 * At this point, the NFS lock is allocated and active. 1384 * Remember to clean it up if the hardware lock fails 1385 */ 1386 hwstatus = lock_hwlock(fl); 1387 1388 switch (hwstatus) { 1389 case HW_GRANTED: 1390 case HW_GRANTED_DUPLICATE: 1391 debuglog("HW GRANTED\n"); 1392 /* 1393 * XXX: Fixme: Check hwstatus for duplicate when 1394 * true partial file locking and accounting is 1395 * done on the hardware. 1396 */ 1397 if (lnlstatus == NFS_GRANTED_DUPLICATE) { 1398 retval = PFL_GRANTED_DUPLICATE; 1399 } else { 1400 retval = PFL_GRANTED; 1401 } 1402 monitor_lock_host(fl->client_name); 1403 break; 1404 case HW_RESERR: 1405 debuglog("HW RESERR\n"); 1406 retval = PFL_HWRESERR; 1407 break; 1408 case HW_DENIED: 1409 debuglog("HW DENIED\n"); 1410 retval = PFL_HWDENIED; 1411 break; 1412 default: 1413 debuglog("Unmatched hwstatus %d\n",hwstatus); 1414 break; 1415 } 1416 1417 if (retval != PFL_GRANTED && 1418 retval != PFL_GRANTED_DUPLICATE) { 1419 /* Clean up the NFS lock */ 1420 debuglog("Deleting trial NFS lock\n"); 1421 delete_nfslock(fl); 1422 } 1423 break; 1424 case NFS_DENIED: 1425 retval = PFL_NFSDENIED; 1426 break; 1427 case NFS_RESERR: 1428 retval = PFL_NFSRESERR; 1429 break; 1430 default: 1431 debuglog("Unmatched lnlstatus %d\n"); 1432 retval = PFL_NFSDENIED_NOLOCK; 1433 break; 1434 } 1435 1436 /* 1437 * By the time fl reaches here, it is completely free again on 1438 * failure. The NFS lock done before attempting the 1439 * hardware lock has been backed out 1440 */ 1441 1442 if (retval == PFL_NFSDENIED || retval == PFL_HWDENIED) { 1443 /* Once last chance to check the lock */ 1444 if (fl->blocking == 1) { 1445 if (retval == PFL_NFSDENIED) { 1446 /* Queue the lock */ 1447 debuglog("BLOCKING LOCK RECEIVED\n"); 1448 retval = PFL_NFSBLOCKED; 1449 add_blockingfilelock(fl); 1450 dump_filelock(fl); 1451 } else { 1452 /* retval is okay as PFL_HWDENIED */ 1453 debuglog("BLOCKING LOCK DENIED IN HARDWARE\n"); 1454 dump_filelock(fl); 1455 } 1456 } else { 1457 /* Leave retval alone, it's already correct */ 1458 debuglog("Lock denied. Non-blocking failure\n"); 1459 dump_filelock(fl); 1460 } 1461 } 1462 1463 debuglog("Exiting lock_partialfilelock\n"); 1464 1465 return retval; 1466 } 1467 1468 /* 1469 * unlock_partialfilelock: 1470 * 1471 * Given a file_lock, unlock all locks which match. 1472 * 1473 * Note that a given lock might have to unlock ITSELF! See 1474 * clear_partialfilelock for example. 1475 */ 1476 1477 enum partialfilelock_status 1478 unlock_partialfilelock(const struct file_lock *fl) 1479 { 1480 struct file_lock *lfl,*rfl,*releasedfl,*selffl; 1481 enum partialfilelock_status retval; 1482 enum nfslock_status unlstatus; 1483 enum hwlock_status unlhwstatus, lhwstatus; 1484 1485 debuglog("Entering unlock_partialfilelock\n"); 1486 1487 selffl = NULL; 1488 lfl = NULL; 1489 rfl = NULL; 1490 releasedfl = NULL; 1491 retval = PFL_DENIED; 1492 1493 /* 1494 * There are significant overlap and atomicity issues 1495 * with partially releasing a lock. For example, releasing 1496 * part of an NFS shared lock does *not* always release the 1497 * corresponding part of the file since there is only one 1498 * rpc.lockd UID but multiple users could be requesting it 1499 * from NFS. Also, an unlock request should never allow 1500 * another process to gain a lock on the remaining parts. 1501 * ie. Always apply the new locks before releasing the 1502 * old one 1503 */ 1504 1505 /* 1506 * Loop is required since multiple little locks 1507 * can be allocated and then deallocated with one 1508 * big unlock. 1509 * 1510 * The loop is required to be here so that the nfs & 1511 * hw subsystems do not need to communicate with one 1512 * one another 1513 */ 1514 1515 do { 1516 debuglog("Value of releasedfl: %p\n",releasedfl); 1517 /* lfl&rfl are created *AND* placed into the NFS lock list if required */ 1518 unlstatus = unlock_nfslock(fl, &releasedfl, &lfl, &rfl); 1519 debuglog("Value of releasedfl: %p\n",releasedfl); 1520 1521 1522 /* XXX: This is grungy. It should be refactored to be cleaner */ 1523 if (lfl != NULL) { 1524 lhwstatus = lock_hwlock(lfl); 1525 if (lhwstatus != HW_GRANTED && 1526 lhwstatus != HW_GRANTED_DUPLICATE) { 1527 debuglog("HW duplicate lock failure for left split\n"); 1528 } 1529 monitor_lock_host(lfl->client_name); 1530 } 1531 1532 if (rfl != NULL) { 1533 lhwstatus = lock_hwlock(rfl); 1534 if (lhwstatus != HW_GRANTED && 1535 lhwstatus != HW_GRANTED_DUPLICATE) { 1536 debuglog("HW duplicate lock failure for right split\n"); 1537 } 1538 monitor_lock_host(rfl->client_name); 1539 } 1540 1541 switch (unlstatus) { 1542 case NFS_GRANTED: 1543 /* Attempt to unlock on the hardware */ 1544 debuglog("NFS unlock granted. Attempting hardware unlock\n"); 1545 1546 /* This call *MUST NOT* unlock the two newly allocated locks */ 1547 unlhwstatus = unlock_hwlock(fl); 1548 debuglog("HW unlock returned with code %d\n",unlhwstatus); 1549 1550 switch (unlhwstatus) { 1551 case HW_GRANTED: 1552 debuglog("HW unlock granted\n"); 1553 unmonitor_lock_host(releasedfl->client_name); 1554 retval = PFL_GRANTED; 1555 break; 1556 case HW_DENIED_NOLOCK: 1557 /* Huh?!?! This shouldn't happen */ 1558 debuglog("HW unlock denied no lock\n"); 1559 retval = PFL_HWRESERR; 1560 /* Break out of do-while */ 1561 unlstatus = NFS_RESERR; 1562 break; 1563 default: 1564 debuglog("HW unlock failed\n"); 1565 retval = PFL_HWRESERR; 1566 /* Break out of do-while */ 1567 unlstatus = NFS_RESERR; 1568 break; 1569 } 1570 1571 debuglog("Exiting with status retval: %d\n",retval); 1572 1573 retry_blockingfilelocklist(); 1574 break; 1575 case NFS_DENIED_NOLOCK: 1576 retval = PFL_GRANTED; 1577 debuglog("All locks cleaned out\n"); 1578 break; 1579 default: 1580 retval = PFL_NFSRESERR; 1581 debuglog("NFS unlock failure\n"); 1582 dump_filelock(fl); 1583 break; 1584 } 1585 1586 if (releasedfl != NULL) { 1587 if (fl == releasedfl) { 1588 /* 1589 * XXX: YECHHH!!! Attempt to unlock self succeeded 1590 * but we can't deallocate the space yet. This is what 1591 * happens when you don't write malloc and free together 1592 */ 1593 debuglog("Attempt to unlock self\n"); 1594 selffl = releasedfl; 1595 } else { 1596 /* 1597 * XXX: this deallocation *still* needs to migrate closer 1598 * to the allocation code way up in get_lock or the allocation 1599 * code needs to migrate down (violation of "When you write 1600 * malloc you must write free") 1601 */ 1602 1603 deallocate_file_lock(releasedfl); 1604 releasedfl = NULL; 1605 } 1606 } 1607 1608 } while (unlstatus == NFS_GRANTED); 1609 1610 if (selffl != NULL) { 1611 /* 1612 * This statement wipes out the incoming file lock (fl) 1613 * in spite of the fact that it is declared const 1614 */ 1615 debuglog("WARNING! Destroying incoming lock pointer\n"); 1616 deallocate_file_lock(selffl); 1617 } 1618 1619 debuglog("Exiting unlock_partialfilelock\n"); 1620 1621 return retval; 1622 } 1623 1624 /* 1625 * clear_partialfilelock 1626 * 1627 * Normally called in response to statd state number change. 1628 * Wipe out all locks held by a host. As a bonus, the act of 1629 * doing so should automatically clear their statd entries and 1630 * unmonitor the host. 1631 */ 1632 1633 void 1634 clear_partialfilelock(const char *hostname) 1635 { 1636 struct file_lock *ifl, *nfl; 1637 1638 /* Clear blocking file lock list */ 1639 clear_blockingfilelock(hostname); 1640 1641 /* do all required unlocks */ 1642 /* Note that unlock can smash the current pointer to a lock */ 1643 1644 /* 1645 * Normally, LIST_FOREACH is called for, but since 1646 * the current element *is* the iterator, deleting it 1647 * would mess up the iteration. Thus, a next element 1648 * must be used explicitly 1649 */ 1650 1651 ifl = LIST_FIRST(&nfslocklist_head); 1652 1653 while (ifl != NULL) { 1654 nfl = LIST_NEXT(ifl, nfslocklist); 1655 1656 if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) { 1657 /* Unlock destroys ifl out from underneath */ 1658 unlock_partialfilelock(ifl); 1659 /* ifl is NO LONGER VALID AT THIS POINT */ 1660 } 1661 ifl = nfl; 1662 } 1663 } 1664 1665 /* 1666 * test_partialfilelock: 1667 */ 1668 enum partialfilelock_status 1669 test_partialfilelock(const struct file_lock *fl, 1670 struct file_lock **conflicting_fl) 1671 { 1672 enum partialfilelock_status retval; 1673 enum nfslock_status teststatus; 1674 1675 debuglog("Entering testpartialfilelock...\n"); 1676 1677 retval = PFL_DENIED; 1678 1679 teststatus = test_nfslock(fl, conflicting_fl); 1680 debuglog("test_partialfilelock: teststatus %d\n",teststatus); 1681 1682 if (teststatus == NFS_GRANTED || teststatus == NFS_GRANTED_DUPLICATE) { 1683 /* XXX: Add the underlying filesystem locking code */ 1684 retval = (teststatus == NFS_GRANTED) ? 1685 PFL_GRANTED : PFL_GRANTED_DUPLICATE; 1686 debuglog("Dumping locks...\n"); 1687 dump_filelock(fl); 1688 dump_filelock(*conflicting_fl); 1689 debuglog("Done dumping locks...\n"); 1690 } else { 1691 retval = PFL_NFSDENIED; 1692 debuglog("NFS test denied.\n"); 1693 dump_filelock(fl); 1694 debuglog("Conflicting.\n"); 1695 dump_filelock(*conflicting_fl); 1696 } 1697 1698 debuglog("Exiting testpartialfilelock...\n"); 1699 1700 return retval; 1701 } 1702 1703 /* 1704 * Below here are routines associated with translating the partial file locking 1705 * codes into useful codes to send back to the NFS RPC messaging system 1706 */ 1707 1708 /* 1709 * These routines translate the (relatively) useful return codes back onto 1710 * the few return codes which the nlm subsystems wishes to trasmit 1711 */ 1712 1713 enum nlm_stats 1714 do_test(struct file_lock *fl, struct file_lock **conflicting_fl) 1715 { 1716 enum partialfilelock_status pfsret; 1717 enum nlm_stats retval; 1718 1719 debuglog("Entering do_test...\n"); 1720 1721 pfsret = test_partialfilelock(fl,conflicting_fl); 1722 1723 switch (pfsret) { 1724 case PFL_GRANTED: 1725 debuglog("PFL test lock granted\n"); 1726 dump_filelock(fl); 1727 dump_filelock(*conflicting_fl); 1728 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1729 break; 1730 case PFL_GRANTED_DUPLICATE: 1731 debuglog("PFL test lock granted--duplicate id detected\n"); 1732 dump_filelock(fl); 1733 dump_filelock(*conflicting_fl); 1734 debuglog("Clearing conflicting_fl for call semantics\n"); 1735 *conflicting_fl = NULL; 1736 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1737 break; 1738 case PFL_NFSDENIED: 1739 case PFL_HWDENIED: 1740 debuglog("PFL test lock denied\n"); 1741 dump_filelock(fl); 1742 dump_filelock(*conflicting_fl); 1743 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied; 1744 break; 1745 case PFL_NFSRESERR: 1746 case PFL_HWRESERR: 1747 debuglog("PFL test lock resource fail\n"); 1748 dump_filelock(fl); 1749 dump_filelock(*conflicting_fl); 1750 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks; 1751 break; 1752 default: 1753 debuglog("PFL test lock *FAILED*\n"); 1754 dump_filelock(fl); 1755 dump_filelock(*conflicting_fl); 1756 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied; 1757 break; 1758 } 1759 1760 debuglog("Exiting do_test...\n"); 1761 1762 return retval; 1763 } 1764 1765 /* 1766 * do_lock: Try to acquire a lock 1767 * 1768 * This routine makes a distinction between NLM versions. I am pretty 1769 * convinced that this should be abstracted out and bounced up a level 1770 */ 1771 1772 enum nlm_stats 1773 do_lock(struct file_lock *fl) 1774 { 1775 enum partialfilelock_status pfsret; 1776 enum nlm_stats retval; 1777 1778 debuglog("Entering do_lock...\n"); 1779 1780 pfsret = lock_partialfilelock(fl); 1781 1782 switch (pfsret) { 1783 case PFL_GRANTED: 1784 debuglog("PFL lock granted"); 1785 dump_filelock(fl); 1786 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1787 break; 1788 case PFL_GRANTED_DUPLICATE: 1789 debuglog("PFL lock granted--duplicate id detected"); 1790 dump_filelock(fl); 1791 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1792 break; 1793 case PFL_NFSDENIED: 1794 case PFL_HWDENIED: 1795 debuglog("PFL_NFS lock denied"); 1796 dump_filelock(fl); 1797 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied; 1798 break; 1799 case PFL_NFSBLOCKED: 1800 case PFL_HWBLOCKED: 1801 debuglog("PFL_NFS blocking lock denied. Queued.\n"); 1802 dump_filelock(fl); 1803 retval = (fl->flags & LOCK_V4) ? nlm4_blocked : nlm_blocked; 1804 break; 1805 case PFL_NFSRESERR: 1806 case PFL_HWRESERR: 1807 debuglog("PFL lock resource alocation fail\n"); 1808 dump_filelock(fl); 1809 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks; 1810 break; 1811 default: 1812 debuglog("PFL lock *FAILED*"); 1813 dump_filelock(fl); 1814 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied; 1815 break; 1816 } 1817 1818 debuglog("Exiting do_lock...\n"); 1819 1820 return retval; 1821 } 1822 1823 enum nlm_stats 1824 do_unlock(struct file_lock *fl) 1825 { 1826 enum partialfilelock_status pfsret; 1827 enum nlm_stats retval; 1828 1829 debuglog("Entering do_unlock...\n"); 1830 pfsret = unlock_partialfilelock(fl); 1831 1832 switch (pfsret) { 1833 case PFL_GRANTED: 1834 debuglog("PFL unlock granted"); 1835 dump_filelock(fl); 1836 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1837 break; 1838 case PFL_NFSDENIED: 1839 case PFL_HWDENIED: 1840 debuglog("PFL_NFS unlock denied"); 1841 dump_filelock(fl); 1842 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied; 1843 break; 1844 case PFL_NFSDENIED_NOLOCK: 1845 case PFL_HWDENIED_NOLOCK: 1846 debuglog("PFL_NFS no lock found\n"); 1847 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1848 break; 1849 case PFL_NFSRESERR: 1850 case PFL_HWRESERR: 1851 debuglog("PFL unlock resource failure"); 1852 dump_filelock(fl); 1853 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks; 1854 break; 1855 default: 1856 debuglog("PFL unlock *FAILED*"); 1857 dump_filelock(fl); 1858 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied; 1859 break; 1860 } 1861 1862 debuglog("Exiting do_unlock...\n"); 1863 1864 return retval; 1865 } 1866 1867 /* 1868 * do_clear 1869 * 1870 * This routine is non-existent because it doesn't have a return code. 1871 * It is here for completeness in case someone *does* need to do return 1872 * codes later. A decent compiler should optimize this away. 1873 */ 1874 1875 void 1876 do_clear(const char *hostname) 1877 { 1878 1879 clear_partialfilelock(hostname); 1880 } 1881 1882 /* 1883 * The following routines are all called from the code which the 1884 * RPC layer invokes 1885 */ 1886 1887 /* 1888 * testlock(): inform the caller if the requested lock would be granted 1889 * 1890 * returns NULL if lock would granted 1891 * returns pointer to a conflicting nlm4_holder if not 1892 */ 1893 1894 struct nlm4_holder * 1895 testlock(struct nlm4_lock *lock, bool_t exclusive, int flags __unused) 1896 { 1897 struct file_lock test_fl, *conflicting_fl; 1898 1899 bzero(&test_fl, sizeof(test_fl)); 1900 1901 bcopy(lock->fh.n_bytes, &(test_fl.filehandle), sizeof(fhandle_t)); 1902 copy_nlm4_lock_to_nlm4_holder(lock, exclusive, &test_fl.client); 1903 1904 siglock(); 1905 do_test(&test_fl, &conflicting_fl); 1906 1907 if (conflicting_fl == NULL) { 1908 debuglog("No conflicting lock found\n"); 1909 sigunlock(); 1910 return NULL; 1911 } else { 1912 debuglog("Found conflicting lock\n"); 1913 dump_filelock(conflicting_fl); 1914 sigunlock(); 1915 return (&conflicting_fl->client); 1916 } 1917 } 1918 1919 /* 1920 * getlock: try to acquire the lock. 1921 * If file is already locked and we can sleep, put the lock in the list with 1922 * status LKST_WAITING; it'll be processed later. 1923 * Otherwise try to lock. If we're allowed to block, fork a child which 1924 * will do the blocking lock. 1925 */ 1926 1927 enum nlm_stats 1928 getlock(nlm4_lockargs *lckarg, struct svc_req *rqstp, const int flags) 1929 { 1930 struct file_lock *newfl; 1931 enum nlm_stats retval; 1932 1933 debuglog("Entering getlock...\n"); 1934 1935 if (grace_expired == 0 && lckarg->reclaim == 0) 1936 return (flags & LOCK_V4) ? 1937 nlm4_denied_grace_period : nlm_denied_grace_period; 1938 1939 /* allocate new file_lock for this request */ 1940 newfl = allocate_file_lock(&lckarg->alock.oh, &lckarg->cookie, 1941 (struct sockaddr *)svc_getrpccaller(rqstp->rq_xprt)->buf, lckarg->alock.caller_name); 1942 if (newfl == NULL) { 1943 syslog(LOG_NOTICE, "lock allocate failed: %s", strerror(errno)); 1944 /* failed */ 1945 return (flags & LOCK_V4) ? 1946 nlm4_denied_nolocks : nlm_denied_nolocks; 1947 } 1948 1949 if (lckarg->alock.fh.n_len != sizeof(fhandle_t)) { 1950 debuglog("received fhandle size %d, local size %d", 1951 lckarg->alock.fh.n_len, (int)sizeof(fhandle_t)); 1952 } 1953 1954 fill_file_lock(newfl, (fhandle_t *)lckarg->alock.fh.n_bytes, 1955 lckarg->exclusive, lckarg->alock.svid, lckarg->alock.l_offset, 1956 lckarg->alock.l_len, 1957 lckarg->state, 0, flags, lckarg->block); 1958 1959 /* 1960 * newfl is now fully constructed and deallocate_file_lock 1961 * can now be used to delete it 1962 */ 1963 1964 siglock(); 1965 debuglog("Pointer to new lock is %p\n",newfl); 1966 1967 retval = do_lock(newfl); 1968 1969 debuglog("Pointer to new lock is %p\n",newfl); 1970 sigunlock(); 1971 1972 switch (retval) 1973 { 1974 case nlm4_granted: 1975 /* case nlm_granted: is the same as nlm4_granted */ 1976 /* do_mon(lckarg->alock.caller_name); */ 1977 break; 1978 case nlm4_blocked: 1979 /* case nlm_blocked: is the same as nlm4_blocked */ 1980 /* do_mon(lckarg->alock.caller_name); */ 1981 break; 1982 default: 1983 deallocate_file_lock(newfl); 1984 break; 1985 } 1986 1987 debuglog("Exiting getlock...\n"); 1988 1989 return retval; 1990 } 1991 1992 1993 /* unlock a filehandle */ 1994 enum nlm_stats 1995 unlock(nlm4_lock *lock, const int flags __unused) 1996 { 1997 struct file_lock fl; 1998 enum nlm_stats err; 1999 2000 siglock(); 2001 2002 debuglog("Entering unlock...\n"); 2003 2004 bzero(&fl,sizeof(struct file_lock)); 2005 bcopy(lock->fh.n_bytes, &fl.filehandle, sizeof(fhandle_t)); 2006 2007 copy_nlm4_lock_to_nlm4_holder(lock, 0, &fl.client); 2008 2009 err = do_unlock(&fl); 2010 2011 sigunlock(); 2012 2013 debuglog("Exiting unlock...\n"); 2014 2015 return err; 2016 } 2017 2018 /* 2019 * XXX: The following monitor/unmonitor routines 2020 * have not been extensively tested (ie. no regression 2021 * script exists like for the locking sections 2022 */ 2023 2024 /* 2025 * monitor_lock_host: monitor lock hosts locally with a ref count and 2026 * inform statd 2027 */ 2028 void 2029 monitor_lock_host(const char *hostname) 2030 { 2031 struct host *ihp, *nhp; 2032 struct mon smon; 2033 struct sm_stat_res sres; 2034 int rpcret, statflag; 2035 size_t n; 2036 2037 rpcret = 0; 2038 statflag = 0; 2039 2040 LIST_FOREACH(ihp, &hostlst_head, hostlst) { 2041 if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) { 2042 /* Host is already monitored, bump refcount */ 2043 ++ihp->refcnt; 2044 /* Host should only be in the monitor list once */ 2045 return; 2046 } 2047 } 2048 2049 /* Host is not yet monitored, add it */ 2050 n = strnlen(hostname, SM_MAXSTRLEN); 2051 if (n == SM_MAXSTRLEN) { 2052 return; 2053 } 2054 nhp = malloc(sizeof(*nhp) - sizeof(nhp->name) + n + 1); 2055 if (nhp == NULL) { 2056 debuglog("Unable to allocate entry for statd mon\n"); 2057 return; 2058 } 2059 2060 /* Allocated new host entry, now fill the fields */ 2061 memcpy(nhp->name, hostname, n); 2062 nhp->name[n] = 0; 2063 nhp->refcnt = 1; 2064 debuglog("Locally Monitoring host %16s\n",hostname); 2065 2066 debuglog("Attempting to tell statd\n"); 2067 2068 bzero(&smon,sizeof(smon)); 2069 2070 smon.mon_id.mon_name = nhp->name; 2071 smon.mon_id.my_id.my_name = "localhost"; 2072 smon.mon_id.my_id.my_prog = NLM_PROG; 2073 smon.mon_id.my_id.my_vers = NLM_SM; 2074 smon.mon_id.my_id.my_proc = NLM_SM_NOTIFY; 2075 2076 rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_MON, 2077 (xdrproc_t)xdr_mon, &smon, 2078 (xdrproc_t)xdr_sm_stat_res, &sres); 2079 2080 if (rpcret == 0) { 2081 if (sres.res_stat == stat_fail) { 2082 debuglog("Statd call failed\n"); 2083 statflag = 0; 2084 } else { 2085 statflag = 1; 2086 } 2087 } else { 2088 debuglog("Rpc call to statd failed with return value: %d\n", 2089 rpcret); 2090 statflag = 0; 2091 } 2092 2093 if (statflag == 1) { 2094 LIST_INSERT_HEAD(&hostlst_head, nhp, hostlst); 2095 } else { 2096 free(nhp); 2097 } 2098 2099 } 2100 2101 /* 2102 * unmonitor_lock_host: clear monitor ref counts and inform statd when gone 2103 */ 2104 void 2105 unmonitor_lock_host(char *hostname) 2106 { 2107 struct host *ihp; 2108 struct mon_id smon_id; 2109 struct sm_stat smstat; 2110 int rpcret; 2111 2112 rpcret = 0; 2113 2114 for( ihp=LIST_FIRST(&hostlst_head); ihp != NULL; 2115 ihp=LIST_NEXT(ihp, hostlst)) { 2116 if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) { 2117 /* Host is monitored, bump refcount */ 2118 --ihp->refcnt; 2119 /* Host should only be in the monitor list once */ 2120 break; 2121 } 2122 } 2123 2124 if (ihp == NULL) { 2125 debuglog("Could not find host %16s in mon list\n", hostname); 2126 return; 2127 } 2128 2129 if (ihp->refcnt > 0) 2130 return; 2131 2132 if (ihp->refcnt < 0) { 2133 debuglog("Negative refcount!: %d\n", 2134 ihp->refcnt); 2135 } 2136 2137 debuglog("Attempting to unmonitor host %16s\n", hostname); 2138 2139 bzero(&smon_id,sizeof(smon_id)); 2140 2141 smon_id.mon_name = hostname; 2142 smon_id.my_id.my_name = "localhost"; 2143 smon_id.my_id.my_prog = NLM_PROG; 2144 smon_id.my_id.my_vers = NLM_SM; 2145 smon_id.my_id.my_proc = NLM_SM_NOTIFY; 2146 2147 rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_UNMON, 2148 (xdrproc_t)xdr_mon_id, &smon_id, 2149 (xdrproc_t)xdr_sm_stat, &smstat); 2150 2151 if (rpcret != 0) { 2152 debuglog("Rpc call to unmonitor statd failed with " 2153 " return value: %d\n", rpcret); 2154 } 2155 2156 LIST_REMOVE(ihp, hostlst); 2157 free(ihp); 2158 } 2159 2160 /* 2161 * notify: Clear all locks from a host if statd complains 2162 * 2163 * XXX: This routine has not been thoroughly tested. However, neither 2164 * had the old one been. It used to compare the statd crash state counter 2165 * to the current lock state. The upshot of this was that it basically 2166 * cleared all locks from the specified host 99% of the time (with the 2167 * other 1% being a bug). Consequently, the assumption is that clearing 2168 * all locks from a host when notified by statd is acceptable. 2169 * 2170 * Please note that this routine skips the usual level of redirection 2171 * through a do_* type routine. This introduces a possible level of 2172 * error and might better be written as do_notify and take this one out. 2173 2174 */ 2175 2176 void 2177 notify(const char *hostname, const int state) 2178 { 2179 debuglog("notify from %s, new state %d", hostname, state); 2180 2181 siglock(); 2182 do_clear(hostname); 2183 sigunlock(); 2184 2185 debuglog("Leaving notify\n"); 2186 } 2187 2188 void 2189 send_granted(fl, opcode) 2190 struct file_lock *fl; 2191 int opcode __unused; 2192 { 2193 CLIENT *cli; 2194 static char dummy; 2195 struct timeval timeo; 2196 int success; 2197 static struct nlm_res retval; 2198 static struct nlm4_res retval4; 2199 2200 debuglog("About to send granted on blocked lock\n"); 2201 2202 cli = get_client(fl->addr, 2203 (fl->flags & LOCK_V4) ? NLM_VERS4 : NLM_VERS); 2204 if (cli == NULL) { 2205 syslog(LOG_NOTICE, "failed to get CLIENT for %s", 2206 fl->client_name); 2207 /* 2208 * We fail to notify remote that the lock has been granted. 2209 * The client will timeout and retry, the lock will be 2210 * granted at this time. 2211 */ 2212 return; 2213 } 2214 timeo.tv_sec = 0; 2215 timeo.tv_usec = (fl->flags & LOCK_ASYNC) ? 0 : 500000; /* 0.5s */ 2216 2217 if (fl->flags & LOCK_V4) { 2218 static nlm4_testargs res; 2219 res.cookie = fl->client_cookie; 2220 res.exclusive = fl->client.exclusive; 2221 res.alock.caller_name = fl->client_name; 2222 res.alock.fh.n_len = sizeof(fhandle_t); 2223 res.alock.fh.n_bytes = (char*)&fl->filehandle; 2224 res.alock.oh = fl->client.oh; 2225 res.alock.svid = fl->client.svid; 2226 res.alock.l_offset = fl->client.l_offset; 2227 res.alock.l_len = fl->client.l_len; 2228 debuglog("sending v4 reply%s", 2229 (fl->flags & LOCK_ASYNC) ? " (async)":""); 2230 if (fl->flags & LOCK_ASYNC) { 2231 success = clnt_call(cli, NLM4_GRANTED_MSG, 2232 (xdrproc_t)xdr_nlm4_testargs, &res, 2233 (xdrproc_t)xdr_void, &dummy, timeo); 2234 } else { 2235 success = clnt_call(cli, NLM4_GRANTED, 2236 (xdrproc_t)xdr_nlm4_testargs, &res, 2237 (xdrproc_t)xdr_nlm4_res, &retval4, timeo); 2238 } 2239 } else { 2240 static nlm_testargs res; 2241 2242 res.cookie = fl->client_cookie; 2243 res.exclusive = fl->client.exclusive; 2244 res.alock.caller_name = fl->client_name; 2245 res.alock.fh.n_len = sizeof(fhandle_t); 2246 res.alock.fh.n_bytes = (char*)&fl->filehandle; 2247 res.alock.oh = fl->client.oh; 2248 res.alock.svid = fl->client.svid; 2249 res.alock.l_offset = fl->client.l_offset; 2250 res.alock.l_len = fl->client.l_len; 2251 debuglog("sending v1 reply%s", 2252 (fl->flags & LOCK_ASYNC) ? " (async)":""); 2253 if (fl->flags & LOCK_ASYNC) { 2254 success = clnt_call(cli, NLM_GRANTED_MSG, 2255 (xdrproc_t)xdr_nlm_testargs, &res, 2256 (xdrproc_t)xdr_void, &dummy, timeo); 2257 } else { 2258 success = clnt_call(cli, NLM_GRANTED, 2259 (xdrproc_t)xdr_nlm_testargs, &res, 2260 (xdrproc_t)xdr_nlm_res, &retval, timeo); 2261 } 2262 } 2263 if (debug_level > 2) 2264 debuglog("clnt_call returns %d(%s) for granted", 2265 success, clnt_sperrno(success)); 2266 2267 } 2268 2269 /* 2270 * Routines below here have not been modified in the overhaul 2271 */ 2272 2273 /* 2274 * Are these two routines still required since lockd is not spawning off 2275 * children to service locks anymore? Presumably they were originally 2276 * put in place to prevent a one child from changing the lock list out 2277 * from under another one. 2278 */ 2279 2280 void 2281 siglock(void) 2282 { 2283 sigset_t block; 2284 2285 sigemptyset(&block); 2286 sigaddset(&block, SIGCHLD); 2287 2288 if (sigprocmask(SIG_BLOCK, &block, NULL) < 0) { 2289 syslog(LOG_WARNING, "siglock failed: %s", strerror(errno)); 2290 } 2291 } 2292 2293 void 2294 sigunlock(void) 2295 { 2296 sigset_t block; 2297 2298 sigemptyset(&block); 2299 sigaddset(&block, SIGCHLD); 2300 2301 if (sigprocmask(SIG_UNBLOCK, &block, NULL) < 0) { 2302 syslog(LOG_WARNING, "sigunlock failed: %s", strerror(errno)); 2303 } 2304 } 2305