1 /* $NetBSD: lockd_lock.c,v 1.5 2000/11/21 03:47:41 enami Exp $ */ 2 /* $FreeBSD$ */ 3 4 /* 5 * Copyright (c) 2001 Andrew P. Lentvorski, Jr. 6 * Copyright (c) 2000 Manuel Bouyer. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 */ 37 38 #define LOCKD_DEBUG 39 40 #include <stdio.h> 41 #ifdef LOCKD_DEBUG 42 #include <stdarg.h> 43 #endif 44 #include <stdlib.h> 45 #include <unistd.h> 46 #include <fcntl.h> 47 #include <syslog.h> 48 #include <errno.h> 49 #include <string.h> 50 #include <signal.h> 51 #include <rpc/rpc.h> 52 #include <sys/types.h> 53 #include <sys/stat.h> 54 #include <sys/socket.h> 55 #include <sys/param.h> 56 #include <sys/mount.h> 57 #include <sys/wait.h> 58 #include <rpcsvc/sm_inter.h> 59 #include <rpcsvc/nlm_prot.h> 60 #include "lockd_lock.h" 61 #include "lockd.h" 62 63 #define MAXOBJECTSIZE 64 64 #define MAXBUFFERSIZE 1024 65 66 /* 67 * SM_MAXSTRLEN is usually 1024. This means that lock requests and 68 * host name monitoring entries are *MUCH* larger than they should be 69 */ 70 71 /* 72 * A set of utilities for managing file locking 73 * 74 * XXX: All locks are in a linked list, a better structure should be used 75 * to improve search/access effeciency. 76 */ 77 78 /* struct describing a lock */ 79 struct file_lock { 80 LIST_ENTRY(file_lock) nfslocklist; 81 fhandle_t filehandle; /* NFS filehandle */ 82 struct sockaddr *addr; 83 struct nlm4_holder client; /* lock holder */ 84 /* XXX: client_cookie used *only* in send_granted */ 85 netobj client_cookie; /* cookie sent by the client */ 86 char client_name[SM_MAXSTRLEN]; 87 int nsm_status; /* status from the remote lock manager */ 88 int status; /* lock status, see below */ 89 int flags; /* lock flags, see lockd_lock.h */ 90 int blocking; /* blocking lock or not */ 91 pid_t locker; /* pid of the child process trying to get the lock */ 92 int fd; /* file descriptor for this lock */ 93 }; 94 95 LIST_HEAD(nfslocklist_head, file_lock); 96 struct nfslocklist_head nfslocklist_head = LIST_HEAD_INITIALIZER(nfslocklist_head); 97 98 LIST_HEAD(blockedlocklist_head, file_lock); 99 struct blockedlocklist_head blockedlocklist_head = LIST_HEAD_INITIALIZER(blockedlocklist_head); 100 101 /* lock status */ 102 #define LKST_LOCKED 1 /* lock is locked */ 103 /* XXX: Is this flag file specific or lock specific? */ 104 #define LKST_WAITING 2 /* file is already locked by another host */ 105 #define LKST_PROCESSING 3 /* child is trying to aquire the lock */ 106 #define LKST_DYING 4 /* must dies when we get news from the child */ 107 108 /* struct describing a monitored host */ 109 struct host { 110 LIST_ENTRY(host) hostlst; 111 char name[SM_MAXSTRLEN]; 112 int refcnt; 113 }; 114 /* list of hosts we monitor */ 115 LIST_HEAD(hostlst_head, host); 116 struct hostlst_head hostlst_head = LIST_HEAD_INITIALIZER(hostlst_head); 117 118 /* 119 * File monitoring handlers 120 * XXX: These might be able to be removed when kevent support 121 * is placed into the hardware lock/unlock routines. (ie. 122 * let the kernel do all the file monitoring) 123 */ 124 125 /* Struct describing a monitored file */ 126 struct monfile { 127 LIST_ENTRY(monfile) monfilelist; 128 fhandle_t filehandle; /* Local access filehandle */ 129 int fd; /* file descriptor: remains open until unlock! */ 130 int refcount; 131 int exclusive; 132 }; 133 134 /* List of files we monitor */ 135 LIST_HEAD(monfilelist_head, monfile); 136 struct monfilelist_head monfilelist_head = LIST_HEAD_INITIALIZER(monfilelist_head); 137 138 static int debugdelay = 0; 139 140 enum nfslock_status { NFS_GRANTED = 0, NFS_GRANTED_DUPLICATE, 141 NFS_DENIED, NFS_DENIED_NOLOCK, 142 NFS_RESERR }; 143 144 enum hwlock_status { HW_GRANTED = 0, HW_GRANTED_DUPLICATE, 145 HW_DENIED, HW_DENIED_NOLOCK, 146 HW_STALEFH, HW_READONLY, HW_RESERR }; 147 148 enum partialfilelock_status { PFL_GRANTED=0, PFL_GRANTED_DUPLICATE, PFL_DENIED, 149 PFL_NFSDENIED, PFL_NFSBLOCKED, PFL_NFSDENIED_NOLOCK, PFL_NFSRESERR, 150 PFL_HWDENIED, PFL_HWBLOCKED, PFL_HWDENIED_NOLOCK, PFL_HWRESERR}; 151 152 enum LFLAGS {LEDGE_LEFT, LEDGE_LBOUNDARY, LEDGE_INSIDE, LEDGE_RBOUNDARY, LEDGE_RIGHT}; 153 enum RFLAGS {REDGE_LEFT, REDGE_LBOUNDARY, REDGE_INSIDE, REDGE_RBOUNDARY, REDGE_RIGHT}; 154 /* XXX: WARNING! I HAVE OVERLOADED THIS STATUS ENUM! SPLIT IT APART INTO TWO */ 155 enum split_status {SPL_DISJOINT=0, SPL_LOCK1=1, SPL_LOCK2=2, SPL_CONTAINED=4, SPL_RESERR=8}; 156 157 enum partialfilelock_status lock_partialfilelock(struct file_lock *fl); 158 159 void send_granted(struct file_lock *fl, int opcode); 160 void siglock(void); 161 void sigunlock(void); 162 void monitor_lock_host(const char *hostname); 163 void unmonitor_lock_host(const char *hostname); 164 165 void copy_nlm4_lock_to_nlm4_holder(const struct nlm4_lock *src, 166 const bool_t exclusive, struct nlm4_holder *dest); 167 struct file_lock * allocate_file_lock(const netobj *lockowner, 168 const netobj *matchcookie); 169 void deallocate_file_lock(struct file_lock *fl); 170 void fill_file_lock(struct file_lock *fl, const fhandle_t *fh, 171 struct sockaddr *addr, const bool_t exclusive, const int32_t svid, 172 const u_int64_t offset, const u_int64_t len, const char *caller_name, 173 const int state, const int status, const int flags, const int blocking); 174 int regions_overlap(const u_int64_t start1, const u_int64_t len1, 175 const u_int64_t start2, const u_int64_t len2);; 176 enum split_status region_compare(const u_int64_t starte, const u_int64_t lene, 177 const u_int64_t startu, const u_int64_t lenu, 178 u_int64_t *start1, u_int64_t *len1, u_int64_t *start2, u_int64_t *len2); 179 int same_netobj(const netobj *n0, const netobj *n1); 180 int same_filelock_identity(const struct file_lock *fl0, 181 const struct file_lock *fl2); 182 183 static void debuglog(char const *fmt, ...); 184 void dump_static_object(const unsigned char* object, const int sizeof_object, 185 unsigned char* hbuff, const int sizeof_hbuff, 186 unsigned char* cbuff, const int sizeof_cbuff); 187 void dump_netobj(const struct netobj *nobj); 188 void dump_filelock(const struct file_lock *fl); 189 struct file_lock * get_lock_matching_unlock(const struct file_lock *fl); 190 enum nfslock_status test_nfslock(const struct file_lock *fl, 191 struct file_lock **conflicting_fl); 192 enum nfslock_status lock_nfslock(struct file_lock *fl); 193 enum nfslock_status delete_nfslock(struct file_lock *fl); 194 enum nfslock_status unlock_nfslock(const struct file_lock *fl, 195 struct file_lock **released_lock, struct file_lock **left_lock, 196 struct file_lock **right_lock); 197 enum hwlock_status lock_hwlock(struct file_lock *fl); 198 enum split_status split_nfslock(const struct file_lock *exist_lock, 199 const struct file_lock *unlock_lock, struct file_lock **left_lock, 200 struct file_lock **right_lock); 201 void add_blockingfilelock(struct file_lock *fl); 202 enum hwlock_status unlock_hwlock(const struct file_lock *fl); 203 enum hwlock_status test_hwlock(const struct file_lock *fl, 204 struct file_lock **conflicting_fl); 205 void remove_blockingfilelock(struct file_lock *fl); 206 void clear_blockingfilelock(const char *hostname); 207 void retry_blockingfilelocklist(void); 208 enum partialfilelock_status unlock_partialfilelock( 209 const struct file_lock *fl); 210 void clear_partialfilelock(const char *hostname); 211 enum partialfilelock_status test_partialfilelock( 212 const struct file_lock *fl, struct file_lock **conflicting_fl); 213 enum nlm_stats do_test(struct file_lock *fl, 214 struct file_lock **conflicting_fl); 215 enum nlm_stats do_unlock(struct file_lock *fl); 216 enum nlm_stats do_lock(struct file_lock *fl); 217 void do_clear(const char *hostname); 218 219 220 void 221 debuglog(char const *fmt, ...) 222 { 223 va_list ap; 224 225 if (debug_level < 1) { 226 return; 227 } 228 229 sleep(debugdelay); 230 231 va_start(ap, fmt); 232 vsyslog(LOG_DEBUG, fmt, ap); 233 va_end(ap); 234 } 235 236 void 237 dump_static_object(object, size_object, hbuff, size_hbuff, cbuff, size_cbuff) 238 const unsigned char *object; 239 const int size_object; 240 unsigned char *hbuff; 241 const int size_hbuff; 242 unsigned char *cbuff; 243 const int size_cbuff; 244 { 245 int i, objectsize; 246 247 if (debug_level < 2) { 248 return; 249 } 250 251 objectsize = size_object; 252 253 if (objectsize == 0) { 254 debuglog("object is size 0\n"); 255 } else { 256 if (objectsize > MAXOBJECTSIZE) { 257 debuglog("Object of size %d being clamped" 258 "to size %d\n", objectsize, MAXOBJECTSIZE); 259 objectsize = MAXOBJECTSIZE; 260 } 261 262 if (hbuff != NULL) { 263 if (size_hbuff < objectsize*2+1) { 264 debuglog("Hbuff not large enough." 265 " Increase size\n"); 266 } else { 267 for(i=0;i<objectsize;i++) { 268 sprintf(hbuff+i*2,"%02x",*(object+i)); 269 } 270 *(hbuff+i*2) = '\0'; 271 } 272 } 273 274 if (cbuff != NULL) { 275 if (size_cbuff < objectsize+1) { 276 debuglog("Cbuff not large enough." 277 " Increase Size\n"); 278 } 279 280 for(i=0;i<objectsize;i++) { 281 if (*(object+i) >= 32 && *(object+i) <= 127) { 282 *(cbuff+i) = *(object+i); 283 } else { 284 *(cbuff+i) = '.'; 285 } 286 } 287 *(cbuff+i) = '\0'; 288 } 289 } 290 } 291 292 void 293 dump_netobj(const struct netobj *nobj) 294 { 295 char hbuff[MAXBUFFERSIZE*2]; 296 char cbuff[MAXBUFFERSIZE]; 297 298 if (debug_level < 2) { 299 return; 300 } 301 302 if (nobj == NULL) { 303 debuglog("Null netobj pointer\n"); 304 } 305 else if (nobj->n_len == 0) { 306 debuglog("Size zero netobj\n"); 307 } else { 308 dump_static_object(nobj->n_bytes, nobj->n_len, 309 hbuff, sizeof(hbuff), cbuff, sizeof(cbuff)); 310 debuglog("netobj: len: %d data: %s ::: %s\n", 311 nobj->n_len, hbuff, cbuff); 312 } 313 } 314 315 void 316 dump_filelock(const struct file_lock *fl) 317 { 318 char hbuff[MAXBUFFERSIZE*2]; 319 char cbuff[MAXBUFFERSIZE]; 320 321 if (debug_level < 2) { 322 return; 323 } 324 325 if (fl != NULL) { 326 debuglog("Dumping file lock structure @ %p\n", fl); 327 328 /* 329 dump_static_object((unsigned char *)&fl->filehandle, 330 sizeof(fl->filehandle), hbuff, sizeof(hbuff), 331 cbuff, sizeof(cbuff)); 332 debuglog("Filehandle: %8s ::: %8s\n", hbuff, cbuff); 333 */ 334 335 debuglog("Dumping nlm4_holder:\n" 336 "exc: %x svid: %x offset:len %llx:%llx\n", 337 fl->client.exclusive, fl->client.svid, 338 fl->client.l_offset, fl->client.l_len); 339 340 /* 341 debuglog("Dumping client identity:\n"); 342 dump_netobj(&fl->client.oh); 343 344 debuglog("Dumping client cookie:\n"); 345 dump_netobj(&fl->client_cookie); 346 347 debuglog("nsm: %d status: %d flags: %d locker: %d" 348 " fd: %d\n", fl->nsm_status, fl->status, 349 fl->flags, fl->locker, fl->fd); 350 */ 351 } else { 352 debuglog("NULL file lock structure\n"); 353 } 354 } 355 356 void 357 copy_nlm4_lock_to_nlm4_holder(src, exclusive, dest) 358 const struct nlm4_lock *src; 359 const bool_t exclusive; 360 struct nlm4_holder *dest; 361 { 362 363 dest->exclusive = exclusive; 364 dest->oh.n_len = src->oh.n_len; 365 dest->oh.n_bytes = src->oh.n_bytes; 366 dest->svid = src->svid; 367 dest->l_offset = src->l_offset; 368 dest->l_len = src->l_len; 369 } 370 371 372 /* 373 * allocate_file_lock: Create a lock with the given parameters 374 */ 375 376 struct file_lock * 377 allocate_file_lock(const netobj *lockowner, const netobj *matchcookie) 378 { 379 struct file_lock *newfl; 380 381 newfl = malloc(sizeof(struct file_lock)); 382 if (newfl == NULL) { 383 return NULL; 384 } 385 bzero(newfl, sizeof(newfl)); 386 387 newfl->client.oh.n_bytes = malloc(lockowner->n_len); 388 if (newfl->client.oh.n_bytes == NULL) { 389 free(newfl); 390 return NULL; 391 } 392 newfl->client.oh.n_len = lockowner->n_len; 393 bcopy(lockowner->n_bytes, newfl->client.oh.n_bytes, lockowner->n_len); 394 395 newfl->client_cookie.n_bytes = malloc(matchcookie->n_len); 396 if (newfl->client_cookie.n_bytes == NULL) { 397 free(newfl->client.oh.n_bytes); 398 free(newfl); 399 return NULL; 400 } 401 newfl->client_cookie.n_len = matchcookie->n_len; 402 bcopy(matchcookie->n_bytes, newfl->client_cookie.n_bytes, matchcookie->n_len); 403 404 return newfl; 405 } 406 407 /* 408 * file_file_lock: Force creation of a valid file lock 409 */ 410 void 411 fill_file_lock(struct file_lock *fl, const fhandle_t *fh, 412 struct sockaddr *addr, const bool_t exclusive, const int32_t svid, 413 const u_int64_t offset, const u_int64_t len, const char *caller_name, 414 const int state, const int status, const int flags, const int blocking) 415 { 416 bcopy(fh, &fl->filehandle, sizeof(fhandle_t)); 417 fl->addr = addr; 418 419 fl->client.exclusive = exclusive; 420 fl->client.svid = svid; 421 fl->client.l_offset = offset; 422 fl->client.l_len = len; 423 424 strncpy(fl->client_name, caller_name, SM_MAXSTRLEN); 425 426 fl->nsm_status = state; 427 fl->status = status; 428 fl->flags = flags; 429 fl->blocking = blocking; 430 } 431 432 /* 433 * deallocate_file_lock: Free all storage associated with a file lock 434 */ 435 void 436 deallocate_file_lock(struct file_lock *fl) 437 { 438 free(fl->client.oh.n_bytes); 439 free(fl->client_cookie.n_bytes); 440 free(fl); 441 } 442 443 /* 444 * regions_overlap(): This function examines the two provided regions for 445 * overlap. 446 */ 447 int 448 regions_overlap(start1, len1, start2, len2) 449 const u_int64_t start1, len1, start2, len2; 450 { 451 u_int64_t d1,d2,d3,d4; 452 enum split_status result; 453 454 debuglog("Entering region overlap with vals: %llu:%llu--%llu:%llu\n", 455 start1, len1, start2, len2); 456 457 result = region_compare(start1, len1, start2, len2, 458 &d1, &d2, &d3, &d4); 459 460 debuglog("Exiting region overlap with val: %d\n",result); 461 462 if (result == SPL_DISJOINT) { 463 return 0; 464 } else { 465 return 1; 466 } 467 468 return (result); 469 } 470 471 /* 472 * region_compare(): Examine lock regions and split appropriately 473 * 474 * XXX: Fix 64 bit overflow problems 475 * XXX: Check to make sure I got *ALL* the cases. 476 * XXX: This DESPERATELY needs a regression test. 477 */ 478 enum split_status 479 region_compare(starte, lene, startu, lenu, 480 start1, len1, start2, len2) 481 const u_int64_t starte, lene, startu, lenu; 482 u_int64_t *start1, *len1, *start2, *len2; 483 { 484 /* 485 * Please pay attention to the sequential exclusions 486 * of the if statements!!! 487 */ 488 enum LFLAGS lflags; 489 enum RFLAGS rflags; 490 enum split_status retval; 491 492 retval = SPL_DISJOINT; 493 494 if (lene == 0 && lenu == 0) { 495 /* Examine left edge of locker */ 496 if (startu < starte) { 497 lflags = LEDGE_LEFT; 498 } else if (startu == starte) { 499 lflags = LEDGE_LBOUNDARY; 500 } else { 501 lflags = LEDGE_INSIDE; 502 } 503 504 rflags = REDGE_RBOUNDARY; /* Both are infiinite */ 505 506 if (lflags == LEDGE_INSIDE) { 507 *start1 = starte; 508 *len1 = startu - starte; 509 } 510 511 if (lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) { 512 retval = SPL_CONTAINED; 513 } else { 514 retval = SPL_LOCK1; 515 } 516 } else if (lene == 0 && lenu != 0) { 517 /* Established lock is infinite */ 518 /* Examine left edge of unlocker */ 519 if (startu < starte) { 520 lflags = LEDGE_LEFT; 521 } else if (startu == starte) { 522 lflags = LEDGE_LBOUNDARY; 523 } else if (startu > starte) { 524 lflags = LEDGE_INSIDE; 525 } 526 527 /* Examine right edge of unlocker */ 528 if (startu + lenu < starte) { 529 /* Right edge of unlocker left of established lock */ 530 rflags = REDGE_LEFT; 531 return SPL_DISJOINT; 532 } else if (startu + lenu == starte) { 533 /* Right edge of unlocker on start of established lock */ 534 rflags = REDGE_LBOUNDARY; 535 return SPL_DISJOINT; 536 } else { /* Infinifty is right of finity */ 537 /* Right edge of unlocker inside established lock */ 538 rflags = REDGE_INSIDE; 539 } 540 541 if (lflags == LEDGE_INSIDE) { 542 *start1 = starte; 543 *len1 = startu - starte; 544 retval |= SPL_LOCK1; 545 } 546 547 if (rflags == REDGE_INSIDE) { 548 /* Create right lock */ 549 *start2 = startu+lenu; 550 *len2 = 0; 551 retval |= SPL_LOCK2; 552 } 553 } else if (lene != 0 && lenu == 0) { 554 /* Unlocker is infinite */ 555 /* Examine left edge of unlocker */ 556 if (startu < starte) { 557 lflags = LEDGE_LEFT; 558 retval = SPL_CONTAINED; 559 return retval; 560 } else if (startu == starte) { 561 lflags = LEDGE_LBOUNDARY; 562 retval = SPL_CONTAINED; 563 return retval; 564 } else if ((startu > starte) && (startu < starte + lene - 1)) { 565 lflags = LEDGE_INSIDE; 566 } else if (startu == starte + lene - 1) { 567 lflags = LEDGE_RBOUNDARY; 568 } else { /* startu > starte + lene -1 */ 569 lflags = LEDGE_RIGHT; 570 return SPL_DISJOINT; 571 } 572 573 rflags = REDGE_RIGHT; /* Infinity is right of finity */ 574 575 if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) { 576 *start1 = starte; 577 *len1 = startu - starte; 578 retval |= SPL_LOCK1; 579 return retval; 580 } 581 582 } else { 583 /* Both locks are finite */ 584 585 /* Examine left edge of unlocker */ 586 if (startu < starte) { 587 lflags = LEDGE_LEFT; 588 } else if (startu == starte) { 589 lflags = LEDGE_LBOUNDARY; 590 } else if ((startu > starte) && (startu < starte + lene - 1)) { 591 lflags = LEDGE_INSIDE; 592 } else if (startu == starte + lene - 1) { 593 lflags = LEDGE_RBOUNDARY; 594 } else { /* startu > starte + lene -1 */ 595 lflags = LEDGE_RIGHT; 596 return SPL_DISJOINT; 597 } 598 599 /* Examine right edge of unlocker */ 600 if (startu + lenu < starte) { 601 /* Right edge of unlocker left of established lock */ 602 rflags = REDGE_LEFT; 603 return SPL_DISJOINT; 604 } else if (startu + lenu == starte) { 605 /* Right edge of unlocker on start of established lock */ 606 rflags = REDGE_LBOUNDARY; 607 return SPL_DISJOINT; 608 } else if (startu + lenu < starte + lene) { 609 /* Right edge of unlocker inside established lock */ 610 rflags = REDGE_INSIDE; 611 } else if (startu + lenu == starte + lene) { 612 /* Right edge of unlocker on right edge of established lock */ 613 rflags = REDGE_RBOUNDARY; 614 } else { /* startu + lenu > starte + lene */ 615 /* Right edge of unlocker is right of established lock */ 616 rflags = REDGE_RIGHT; 617 } 618 619 if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) { 620 /* Create left lock */ 621 *start1 = starte; 622 *len1 = (startu - starte); 623 retval |= SPL_LOCK1; 624 } 625 626 if (rflags == REDGE_INSIDE) { 627 /* Create right lock */ 628 *start2 = startu+lenu; 629 *len2 = starte+lene-(startu+lenu); 630 retval |= SPL_LOCK2; 631 } 632 633 if ((lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) && 634 (rflags == REDGE_RBOUNDARY || rflags == REDGE_RIGHT)) { 635 retval = SPL_CONTAINED; 636 } 637 } 638 639 return retval; 640 } 641 642 /* 643 * same_netobj: Compares the apprpriate bits of a netobj for identity 644 */ 645 int 646 same_netobj(const netobj *n0, const netobj *n1) 647 { 648 int retval; 649 650 retval = 0; 651 652 debuglog("Entering netobj identity check\n"); 653 654 if (n0->n_len == n1->n_len) { 655 debuglog("Preliminary length check passed\n"); 656 retval = !bcmp(n0->n_bytes, n1->n_bytes, n0->n_len); 657 debuglog("netobj %smatch\n", retval ? "" : "mis"); 658 } 659 660 return (retval); 661 } 662 663 /* 664 * same_filelock_identity: Compares the appropriate bits of a file_lock 665 */ 666 int 667 same_filelock_identity(fl0, fl1) 668 const struct file_lock *fl0, *fl1; 669 { 670 int retval; 671 672 retval = 0; 673 674 debuglog("Checking filelock identity\n"); 675 676 /* 677 * Check process ids and host information. 678 */ 679 retval = (fl0->client.svid == fl1->client.svid && 680 same_netobj(&(fl0->client.oh), &(fl1->client.oh))); 681 682 debuglog("Exiting checking filelock identity: retval: %d\n",retval); 683 684 return (retval); 685 } 686 687 /* 688 * Below here are routines associated with manipulating the NFS 689 * lock list. 690 */ 691 692 /* 693 * get_lock_matching_unlock: Return a lock which matches the given unlock lock 694 * or NULL otehrwise 695 * XXX: It is a shame that this duplicates so much code from test_nfslock. 696 */ 697 struct file_lock * 698 get_lock_matching_unlock(const struct file_lock *fl) 699 { 700 struct file_lock *ifl; /* Iterator */ 701 702 debuglog("Entering lock_matching_unlock\n"); 703 debuglog("********Dump of fl*****************\n"); 704 dump_filelock(fl); 705 706 LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) { 707 debuglog("Pointer to file lock: %p\n",ifl); 708 709 debuglog("****Dump of ifl****\n"); 710 dump_filelock(ifl); 711 debuglog("*******************\n"); 712 713 /* 714 * XXX: It is conceivable that someone could use the NLM RPC 715 * system to directly access filehandles. This may be a 716 * security hazard as the filehandle code may bypass normal 717 * file access controls 718 */ 719 if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t))) 720 continue; 721 722 debuglog("matching_unlock: Filehandles match, " 723 "checking regions\n"); 724 725 /* Filehandles match, check for region overlap */ 726 if (!regions_overlap(fl->client.l_offset, fl->client.l_len, 727 ifl->client.l_offset, ifl->client.l_len)) 728 continue; 729 730 debuglog("matching_unlock: Region overlap" 731 " found %llu : %llu -- %llu : %llu\n", 732 fl->client.l_offset,fl->client.l_len, 733 ifl->client.l_offset,ifl->client.l_len); 734 735 /* Regions overlap, check the identity */ 736 if (!same_filelock_identity(fl,ifl)) 737 continue; 738 739 debuglog("matching_unlock: Duplicate lock id. Granting\n"); 740 return (ifl); 741 } 742 743 debuglog("Exiting lock_matching_unlock\n"); 744 745 return (NULL); 746 } 747 748 /* 749 * test_nfslock: check for NFS lock in lock list 750 * 751 * This routine makes the following assumptions: 752 * 1) Nothing will adjust the lock list during a lookup 753 * 754 * This routine has an intersting quirk which bit me hard. 755 * The conflicting_fl is the pointer to the conflicting lock. 756 * However, to modify the "*pointer* to the conflicting lock" rather 757 * that the "conflicting lock itself" one must pass in a "pointer to 758 * the pointer of the conflicting lock". Gross. 759 */ 760 761 enum nfslock_status 762 test_nfslock(const struct file_lock *fl, struct file_lock **conflicting_fl) 763 { 764 struct file_lock *ifl; /* Iterator */ 765 enum nfslock_status retval; 766 767 debuglog("Entering test_nfslock\n"); 768 769 retval = NFS_GRANTED; 770 (*conflicting_fl) = NULL; 771 772 debuglog("Entering lock search loop\n"); 773 774 debuglog("***********************************\n"); 775 debuglog("Dumping match filelock\n"); 776 debuglog("***********************************\n"); 777 dump_filelock(fl); 778 debuglog("***********************************\n"); 779 780 LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) { 781 if (retval == NFS_DENIED) 782 break; 783 784 debuglog("Top of lock loop\n"); 785 debuglog("Pointer to file lock: %p\n",ifl); 786 787 debuglog("***********************************\n"); 788 debuglog("Dumping test filelock\n"); 789 debuglog("***********************************\n"); 790 dump_filelock(ifl); 791 debuglog("***********************************\n"); 792 793 /* 794 * XXX: It is conceivable that someone could use the NLM RPC 795 * system to directly access filehandles. This may be a 796 * security hazard as the filehandle code may bypass normal 797 * file access controls 798 */ 799 if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t))) 800 continue; 801 802 debuglog("test_nfslock: filehandle match found\n"); 803 804 /* Filehandles match, check for region overlap */ 805 if (!regions_overlap(fl->client.l_offset, fl->client.l_len, 806 ifl->client.l_offset, ifl->client.l_len)) 807 continue; 808 809 debuglog("test_nfslock: Region overlap found" 810 " %llu : %llu -- %llu : %llu\n", 811 fl->client.l_offset,fl->client.l_len, 812 ifl->client.l_offset,ifl->client.l_len); 813 814 /* Regions overlap, check the exclusivity */ 815 if (!(fl->client.exclusive || ifl->client.exclusive)) 816 continue; 817 818 debuglog("test_nfslock: Exclusivity failure: %d %d\n", 819 fl->client.exclusive, 820 ifl->client.exclusive); 821 822 if (same_filelock_identity(fl,ifl)) { 823 debuglog("test_nfslock: Duplicate id. Granting\n"); 824 (*conflicting_fl) = ifl; 825 retval = NFS_GRANTED_DUPLICATE; 826 } else { 827 /* locking attempt fails */ 828 debuglog("test_nfslock: Lock attempt failed\n"); 829 debuglog("Desired lock\n"); 830 dump_filelock(fl); 831 debuglog("Conflicting lock\n"); 832 dump_filelock(ifl); 833 (*conflicting_fl) = ifl; 834 retval = NFS_DENIED; 835 } 836 } 837 838 debuglog("Dumping file locks\n"); 839 debuglog("Exiting test_nfslock\n"); 840 841 return (retval); 842 } 843 844 /* 845 * lock_nfslock: attempt to create a lock in the NFS lock list 846 * 847 * This routine tests whether the lock will be granted and then adds 848 * the entry to the lock list if so. 849 * 850 * Argument fl gets modified as its list housekeeping entries get modified 851 * upon insertion into the NFS lock list 852 * 853 * This routine makes several assumptions: 854 * 1) It is perfectly happy to grant a duplicate lock from the same pid. 855 * While this seems to be intuitively wrong, it is required for proper 856 * Posix semantics during unlock. It is absolutely imperative to not 857 * unlock the main lock before the two child locks are established. Thus, 858 * one has be be able to create duplicate locks over an existing lock 859 * 2) It currently accepts duplicate locks from the same id,pid 860 */ 861 862 enum nfslock_status 863 lock_nfslock(struct file_lock *fl) 864 { 865 enum nfslock_status retval; 866 struct file_lock *dummy_fl; 867 868 dummy_fl = NULL; 869 870 debuglog("Entering lock_nfslock...\n"); 871 872 retval = test_nfslock(fl,&dummy_fl); 873 874 if (retval == NFS_GRANTED || retval == NFS_GRANTED_DUPLICATE) { 875 debuglog("Inserting lock...\n"); 876 dump_filelock(fl); 877 LIST_INSERT_HEAD(&nfslocklist_head, fl, nfslocklist); 878 } 879 880 debuglog("Exiting lock_nfslock...\n"); 881 882 return (retval); 883 } 884 885 /* 886 * delete_nfslock: delete an NFS lock list entry 887 * 888 * This routine is used to delete a lock out of the NFS lock list 889 * without regard to status, underlying locks, regions or anything else 890 * 891 * Note that this routine *does not deallocate memory* of the lock. 892 * It just disconnects it from the list. The lock can then be used 893 * by other routines without fear of trashing the list. 894 */ 895 896 enum nfslock_status 897 delete_nfslock(struct file_lock *fl) 898 { 899 900 LIST_REMOVE(fl, nfslocklist); 901 902 return (NFS_GRANTED); 903 } 904 905 enum split_status 906 split_nfslock(exist_lock, unlock_lock, left_lock, right_lock) 907 const struct file_lock *exist_lock, *unlock_lock; 908 struct file_lock **left_lock, **right_lock; 909 { 910 u_int64_t start1, len1, start2, len2; 911 enum split_status spstatus; 912 913 spstatus = region_compare(exist_lock->client.l_offset, exist_lock->client.l_len, 914 unlock_lock->client.l_offset, unlock_lock->client.l_len, 915 &start1, &len1, &start2, &len2); 916 917 if ((spstatus & SPL_LOCK1) != 0) { 918 *left_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie); 919 if (*left_lock == NULL) { 920 debuglog("Unable to allocate resource for split 1\n"); 921 return SPL_RESERR; 922 } 923 924 fill_file_lock(*left_lock, &exist_lock->filehandle, 925 exist_lock->addr, 926 exist_lock->client.exclusive, exist_lock->client.svid, 927 start1, len1, 928 exist_lock->client_name, exist_lock->nsm_status, 929 exist_lock->status, exist_lock->flags, exist_lock->blocking); 930 } 931 932 if ((spstatus & SPL_LOCK2) != 0) { 933 *right_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie); 934 if (*right_lock == NULL) { 935 debuglog("Unable to allocate resource for split 1\n"); 936 if (*left_lock != NULL) { 937 deallocate_file_lock(*left_lock); 938 } 939 return SPL_RESERR; 940 } 941 942 fill_file_lock(*right_lock, &exist_lock->filehandle, 943 exist_lock->addr, 944 exist_lock->client.exclusive, exist_lock->client.svid, 945 start2, len2, 946 exist_lock->client_name, exist_lock->nsm_status, 947 exist_lock->status, exist_lock->flags, exist_lock->blocking); 948 } 949 950 return spstatus; 951 } 952 953 enum nfslock_status 954 unlock_nfslock(fl, released_lock, left_lock, right_lock) 955 const struct file_lock *fl; 956 struct file_lock **released_lock; 957 struct file_lock **left_lock; 958 struct file_lock **right_lock; 959 { 960 struct file_lock *mfl; /* Matching file lock */ 961 enum nfslock_status retval; 962 enum split_status spstatus; 963 964 debuglog("Entering unlock_nfslock\n"); 965 966 *released_lock = NULL; 967 *left_lock = NULL; 968 *right_lock = NULL; 969 970 retval = NFS_DENIED_NOLOCK; 971 972 printf("Attempting to match lock...\n"); 973 mfl = get_lock_matching_unlock(fl); 974 975 if (mfl != NULL) { 976 debuglog("Unlock matched. Querying for split\n"); 977 978 spstatus = split_nfslock(mfl, fl, left_lock, right_lock); 979 980 debuglog("Split returned %d %p %p %p %p\n",spstatus,mfl,fl,*left_lock,*right_lock); 981 debuglog("********Split dumps********"); 982 dump_filelock(mfl); 983 dump_filelock(fl); 984 dump_filelock(*left_lock); 985 dump_filelock(*right_lock); 986 debuglog("********End Split dumps********"); 987 988 if (spstatus == SPL_RESERR) { 989 if (*left_lock != NULL) { 990 deallocate_file_lock(*left_lock); 991 *left_lock = NULL; 992 } 993 994 if (*right_lock != NULL) { 995 deallocate_file_lock(*right_lock); 996 *right_lock = NULL; 997 } 998 999 return NFS_RESERR; 1000 } 1001 1002 /* Insert new locks from split if required */ 1003 if (*left_lock != NULL) { 1004 debuglog("Split left activated\n"); 1005 LIST_INSERT_HEAD(&nfslocklist_head, *left_lock, nfslocklist); 1006 } 1007 1008 if (*right_lock != NULL) { 1009 debuglog("Split right activated\n"); 1010 LIST_INSERT_HEAD(&nfslocklist_head, *right_lock, nfslocklist); 1011 } 1012 1013 /* Unlock the lock since it matches identity */ 1014 LIST_REMOVE(mfl, nfslocklist); 1015 *released_lock = mfl; 1016 retval = NFS_GRANTED; 1017 } 1018 1019 debuglog("Exiting unlock_nfslock\n"); 1020 1021 return retval; 1022 } 1023 1024 /* 1025 * Below here are the routines for manipulating the file lock directly 1026 * on the disk hardware itself 1027 */ 1028 enum hwlock_status 1029 lock_hwlock(struct file_lock *fl) 1030 { 1031 struct monfile *imf,*nmf; 1032 int lflags, flerror; 1033 1034 /* Scan to see if filehandle already present */ 1035 LIST_FOREACH(imf, &monfilelist_head, monfilelist) { 1036 if (bcmp(&fl->filehandle, &imf->filehandle, 1037 sizeof(fl->filehandle)) == 0) { 1038 /* imf is the correct filehandle */ 1039 break; 1040 } 1041 } 1042 1043 /* 1044 * Filehandle already exists (we control the file) 1045 * *AND* NFS has already cleared the lock for availability 1046 * Grant it and bump the refcount. 1047 */ 1048 if (imf != NULL) { 1049 ++(imf->refcount); 1050 return (HW_GRANTED); 1051 } 1052 1053 /* No filehandle found, create and go */ 1054 nmf = malloc(sizeof(struct monfile)); 1055 if (nmf == NULL) { 1056 debuglog("hwlock resource allocation failure\n"); 1057 return (HW_RESERR); 1058 } 1059 1060 /* XXX: Is O_RDWR always the correct mode? */ 1061 nmf->fd = fhopen(&fl->filehandle, O_RDWR); 1062 if (nmf->fd < 0) { 1063 debuglog("fhopen failed (from %16s): %32s\n", 1064 fl->client_name, strerror(errno)); 1065 free(nmf); 1066 switch (errno) { 1067 case ESTALE: 1068 return (HW_STALEFH); 1069 case EROFS: 1070 return (HW_READONLY); 1071 default: 1072 return (HW_RESERR); 1073 } 1074 } 1075 1076 /* File opened correctly, fill the monitor struct */ 1077 bcopy(&fl->filehandle, &nmf->filehandle, sizeof(fl->filehandle)); 1078 nmf->refcount = 1; 1079 nmf->exclusive = fl->client.exclusive; 1080 1081 lflags = (nmf->exclusive == 1) ? 1082 (LOCK_EX | LOCK_NB) : (LOCK_SH | LOCK_NB); 1083 1084 flerror = flock(nmf->fd, lflags); 1085 1086 if (flerror != 0) { 1087 debuglog("flock failed (from %16s): %32s\n", 1088 fl->client_name, strerror(errno)); 1089 close(nmf->fd); 1090 free(nmf); 1091 switch (errno) { 1092 case EAGAIN: 1093 return (HW_DENIED); 1094 case ESTALE: 1095 return (HW_STALEFH); 1096 case EROFS: 1097 return (HW_READONLY); 1098 default: 1099 return (HW_RESERR); 1100 break; 1101 } 1102 } 1103 1104 /* File opened and locked */ 1105 LIST_INSERT_HEAD(&monfilelist_head, nmf, monfilelist); 1106 1107 debuglog("flock succeeded (from %16s)\n", fl->client_name); 1108 return (HW_GRANTED); 1109 } 1110 1111 enum hwlock_status 1112 unlock_hwlock(const struct file_lock *fl) 1113 { 1114 struct monfile *imf; 1115 1116 debuglog("Entering unlock_hwlock\n"); 1117 debuglog("Entering loop interation\n"); 1118 1119 /* Scan to see if filehandle already present */ 1120 LIST_FOREACH(imf, &monfilelist_head, monfilelist) { 1121 if (bcmp(&fl->filehandle, &imf->filehandle, 1122 sizeof(fl->filehandle)) == 0) { 1123 /* imf is the correct filehandle */ 1124 break; 1125 } 1126 } 1127 1128 debuglog("Completed iteration. Proceeding\n"); 1129 1130 if (imf == NULL) { 1131 /* No lock found */ 1132 debuglog("Exiting unlock_hwlock (HW_DENIED_NOLOCK)\n"); 1133 return (HW_DENIED_NOLOCK); 1134 } 1135 1136 /* Lock found */ 1137 --imf->refcount; 1138 1139 if (imf->refcount < 0) { 1140 debuglog("Negative hardware reference count\n"); 1141 } 1142 1143 if (imf->refcount <= 0) { 1144 close(imf->fd); 1145 LIST_REMOVE(imf, monfilelist); 1146 free(imf); 1147 } 1148 debuglog("Exiting unlock_hwlock (HW_GRANTED)\n"); 1149 return (HW_GRANTED); 1150 } 1151 1152 enum hwlock_status 1153 test_hwlock(const struct file_lock *fl, struct file_lock **conflicting_fl) 1154 { 1155 1156 /* 1157 * XXX: lock tests on hardware are not required until 1158 * true partial file testing is done on the underlying file 1159 */ 1160 return (HW_RESERR); 1161 } 1162 1163 1164 1165 /* 1166 * Below here are routines for manipulating blocked lock requests 1167 * They should only be called from the XXX_partialfilelock routines 1168 * if at all possible 1169 */ 1170 1171 void 1172 add_blockingfilelock(struct file_lock *fl) 1173 { 1174 1175 debuglog("Entering add_blockingfilelock\n"); 1176 1177 /* 1178 * Clear the blocking flag so that it can be reused without 1179 * adding it to the blocking queue a second time 1180 */ 1181 1182 fl->blocking = 0; 1183 LIST_INSERT_HEAD(&blockedlocklist_head, fl, nfslocklist); 1184 1185 debuglog("Exiting add_blockingfilelock\n"); 1186 } 1187 1188 void 1189 remove_blockingfilelock(struct file_lock *fl) 1190 { 1191 1192 debuglog("Entering remove_blockingfilelock\n"); 1193 1194 LIST_REMOVE(fl, nfslocklist); 1195 1196 debuglog("Exiting remove_blockingfilelock\n"); 1197 } 1198 1199 void 1200 clear_blockingfilelock(const char *hostname) 1201 { 1202 struct file_lock *ifl,*nfl; 1203 1204 /* 1205 * Normally, LIST_FOREACH is called for, but since 1206 * the current element *is* the iterator, deleting it 1207 * would mess up the iteration. Thus, a next element 1208 * must be used explicitly 1209 */ 1210 1211 ifl = LIST_FIRST(&blockedlocklist_head); 1212 1213 while (ifl != NULL) { 1214 nfl = LIST_NEXT(ifl, nfslocklist); 1215 1216 if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) { 1217 remove_blockingfilelock(ifl); 1218 deallocate_file_lock(ifl); 1219 } 1220 1221 ifl = nfl; 1222 } 1223 } 1224 1225 void 1226 retry_blockingfilelocklist(void) 1227 { 1228 /* Retry all locks in the blocked list */ 1229 struct file_lock *ifl, *nfl, *pfl; /* Iterator */ 1230 enum partialfilelock_status pflstatus; 1231 1232 debuglog("Entering retry_blockingfilelocklist\n"); 1233 1234 pfl = NULL; 1235 ifl = LIST_FIRST(&blockedlocklist_head); 1236 debuglog("Iterator choice %p\n",ifl); 1237 1238 while (ifl != NULL) { 1239 /* 1240 * SUBTLE BUG: The next element must be worked out before the 1241 * current element has been moved 1242 */ 1243 nfl = LIST_NEXT(ifl, nfslocklist); 1244 debuglog("Iterator choice %p\n",ifl); 1245 debuglog("Prev iterator choice %p\n",pfl); 1246 debuglog("Next iterator choice %p\n",nfl); 1247 1248 /* 1249 * SUBTLE BUG: The file_lock must be removed from the 1250 * old list so that it's list pointers get disconnected 1251 * before being allowed to participate in the new list 1252 * which will automatically add it in if necessary. 1253 */ 1254 1255 LIST_REMOVE(ifl, nfslocklist); 1256 pflstatus = lock_partialfilelock(ifl); 1257 1258 if (pflstatus == PFL_GRANTED || pflstatus == PFL_GRANTED_DUPLICATE) { 1259 debuglog("Granted blocked lock\n"); 1260 /* lock granted and is now being used */ 1261 send_granted(ifl,0); 1262 } else { 1263 /* Reinsert lock back into same place in blocked list */ 1264 debuglog("Replacing blocked lock\n"); 1265 if (pfl != NULL) 1266 LIST_INSERT_AFTER(pfl, ifl, nfslocklist); 1267 else 1268 /* ifl is the only elem. in the list */ 1269 LIST_INSERT_HEAD(&blockedlocklist_head, ifl, nfslocklist); 1270 } 1271 1272 /* Valid increment behavior regardless of state of ifl */ 1273 ifl = nfl; 1274 /* if a lock was granted incrementing pfl would make it nfl */ 1275 if (pfl != NULL && (LIST_NEXT(pfl, nfslocklist) != nfl)) 1276 pfl = LIST_NEXT(pfl, nfslocklist); 1277 else 1278 pfl = LIST_FIRST(&blockedlocklist_head); 1279 } 1280 1281 debuglog("Exiting retry_blockingfilelocklist\n"); 1282 } 1283 1284 /* 1285 * Below here are routines associated with manipulating all 1286 * aspects of the partial file locking system (list, hardware, etc.) 1287 */ 1288 1289 /* 1290 * Please note that lock monitoring must be done at this level which 1291 * keeps track of *individual* lock requests on lock and unlock 1292 * 1293 * XXX: Split unlocking is going to make the unlock code miserable 1294 */ 1295 1296 /* 1297 * lock_partialfilelock: 1298 * 1299 * Argument fl gets modified as its list housekeeping entries get modified 1300 * upon insertion into the NFS lock list 1301 * 1302 * This routine makes several assumptions: 1303 * 1) It (will) pass locks through to flock to lock the entire underlying file 1304 * and then parcel out NFS locks if it gets control of the file. 1305 * This matches the old rpc.lockd file semantics (except where it 1306 * is now more correct). It is the safe solution, but will cause 1307 * overly restrictive blocking if someone is trying to use the 1308 * underlying files without using NFS. This appears to be an 1309 * acceptable tradeoff since most people use standalone NFS servers. 1310 * XXX: The right solution is probably kevent combined with fcntl 1311 * 1312 * 2) Nothing modifies the lock lists between testing and granting 1313 * I have no idea whether this is a useful assumption or not 1314 */ 1315 1316 enum partialfilelock_status 1317 lock_partialfilelock(struct file_lock *fl) 1318 { 1319 enum partialfilelock_status retval; 1320 enum nfslock_status lnlstatus; 1321 enum hwlock_status hwstatus; 1322 1323 debuglog("Entering lock_partialfilelock\n"); 1324 1325 retval = PFL_DENIED; 1326 1327 /* 1328 * Execute the NFS lock first, if possible, as it is significantly 1329 * easier and less expensive to undo than the filesystem lock 1330 */ 1331 1332 lnlstatus = lock_nfslock(fl); 1333 1334 switch (lnlstatus) { 1335 case NFS_GRANTED: 1336 case NFS_GRANTED_DUPLICATE: 1337 /* 1338 * At this point, the NFS lock is allocated and active. 1339 * Remember to clean it up if the hardware lock fails 1340 */ 1341 hwstatus = lock_hwlock(fl); 1342 1343 switch (hwstatus) { 1344 case HW_GRANTED: 1345 case HW_GRANTED_DUPLICATE: 1346 debuglog("HW GRANTED\n"); 1347 /* 1348 * XXX: Fixme: Check hwstatus for duplicate when 1349 * true partial file locking and accounting is 1350 * done on the hardware 1351 */ 1352 if (lnlstatus == NFS_GRANTED_DUPLICATE) { 1353 retval = PFL_GRANTED_DUPLICATE; 1354 } else { 1355 retval = PFL_GRANTED; 1356 } 1357 monitor_lock_host(fl->client_name); 1358 break; 1359 case HW_RESERR: 1360 debuglog("HW RESERR\n"); 1361 retval = PFL_HWRESERR; 1362 break; 1363 case HW_DENIED: 1364 debuglog("HW DENIED\n"); 1365 retval = PFL_HWDENIED; 1366 break; 1367 default: 1368 debuglog("Unmatched hwstatus %d\n",hwstatus); 1369 break; 1370 } 1371 1372 if (retval != PFL_GRANTED && 1373 retval != PFL_GRANTED_DUPLICATE) { 1374 /* Clean up the NFS lock */ 1375 debuglog("Deleting trial NFS lock\n"); 1376 delete_nfslock(fl); 1377 } 1378 break; 1379 case NFS_DENIED: 1380 retval = PFL_NFSDENIED; 1381 break; 1382 case NFS_RESERR: 1383 retval = PFL_NFSRESERR; 1384 default: 1385 debuglog("Unmatched lnlstatus %d\n"); 1386 retval = PFL_NFSDENIED_NOLOCK; 1387 break; 1388 } 1389 1390 /* 1391 * By the time fl reaches here, it is completely free again on 1392 * failure. The NFS lock done before attempting the 1393 * hardware lock has been backed out 1394 */ 1395 1396 if (retval == PFL_NFSDENIED || retval == PFL_HWDENIED) { 1397 /* Once last chance to check the lock */ 1398 if (fl->blocking == 1) { 1399 /* Queue the lock */ 1400 debuglog("BLOCKING LOCK RECEIVED\n"); 1401 retval = (retval == PFL_NFSDENIED ? 1402 PFL_NFSBLOCKED : PFL_HWBLOCKED); 1403 add_blockingfilelock(fl); 1404 dump_filelock(fl); 1405 } else { 1406 /* Leave retval alone, it's already correct */ 1407 debuglog("Lock denied. Non-blocking failure\n"); 1408 dump_filelock(fl); 1409 } 1410 } 1411 1412 debuglog("Exiting lock_partialfilelock\n"); 1413 1414 return retval; 1415 } 1416 1417 /* 1418 * unlock_partialfilelock: 1419 * 1420 * Given a file_lock, unlock all locks which match. 1421 * 1422 * Note that a given lock might have to unlock ITSELF! See 1423 * clear_partialfilelock for example. 1424 */ 1425 1426 enum partialfilelock_status 1427 unlock_partialfilelock(const struct file_lock *fl) 1428 { 1429 struct file_lock *lfl,*rfl,*releasedfl,*selffl; 1430 enum partialfilelock_status retval; 1431 enum nfslock_status unlstatus; 1432 enum hwlock_status unlhwstatus, lhwstatus; 1433 1434 debuglog("Entering unlock_partialfilelock\n"); 1435 1436 selffl = NULL; 1437 lfl = NULL; 1438 rfl = NULL; 1439 releasedfl = NULL; 1440 retval = PFL_DENIED; 1441 1442 /* 1443 * There are significant overlap and atomicity issues 1444 * with partially releasing a lock. For example, releasing 1445 * part of an NFS shared lock does *not* always release the 1446 * corresponding part of the file since there is only one 1447 * rpc.lockd UID but multiple users could be requesting it 1448 * from NFS. Also, an unlock request should never allow 1449 * another process to gain a lock on the remaining parts. 1450 * ie. Always apply the new locks before releasing the 1451 * old one 1452 */ 1453 1454 /* 1455 * Loop is required since multiple little locks 1456 * can be allocated and then deallocated with one 1457 * big unlock. 1458 * 1459 * The loop is required to be here so that the nfs & 1460 * hw subsystems do not need to communicate with one 1461 * one another 1462 */ 1463 1464 do { 1465 debuglog("Value of releasedfl: %p\n",releasedfl); 1466 /* lfl&rfl are created *AND* placed into the NFS lock list if required */ 1467 unlstatus = unlock_nfslock(fl, &releasedfl, &lfl, &rfl); 1468 debuglog("Value of releasedfl: %p\n",releasedfl); 1469 1470 1471 /* XXX: This is grungy. It should be refactored to be cleaner */ 1472 if (lfl != NULL) { 1473 lhwstatus = lock_hwlock(lfl); 1474 if (lhwstatus != HW_GRANTED && 1475 lhwstatus != HW_GRANTED_DUPLICATE) { 1476 debuglog("HW duplicate lock failure for left split\n"); 1477 } 1478 monitor_lock_host(lfl->client_name); 1479 } 1480 1481 if (rfl != NULL) { 1482 lhwstatus = lock_hwlock(rfl); 1483 if (lhwstatus != HW_GRANTED && 1484 lhwstatus != HW_GRANTED_DUPLICATE) { 1485 debuglog("HW duplicate lock failure for right split\n"); 1486 } 1487 monitor_lock_host(rfl->client_name); 1488 } 1489 1490 switch (unlstatus) { 1491 case NFS_GRANTED: 1492 /* Attempt to unlock on the hardware */ 1493 debuglog("NFS unlock granted. Attempting hardware unlock\n"); 1494 1495 /* This call *MUST NOT* unlock the two newly allocated locks */ 1496 unlhwstatus = unlock_hwlock(fl); 1497 debuglog("HW unlock returned with code %d\n",unlhwstatus); 1498 1499 switch (unlhwstatus) { 1500 case HW_GRANTED: 1501 debuglog("HW unlock granted\n"); 1502 unmonitor_lock_host(releasedfl->client_name); 1503 retval = PFL_GRANTED; 1504 break; 1505 case HW_DENIED_NOLOCK: 1506 /* Huh?!?! This shouldn't happen */ 1507 debuglog("HW unlock denied no lock\n"); 1508 retval = PFL_HWRESERR; 1509 /* Break out of do-while */ 1510 unlstatus = NFS_RESERR; 1511 break; 1512 default: 1513 debuglog("HW unlock failed\n"); 1514 retval = PFL_HWRESERR; 1515 /* Break out of do-while */ 1516 unlstatus = NFS_RESERR; 1517 break; 1518 } 1519 1520 debuglog("Exiting with status retval: %d\n",retval); 1521 1522 retry_blockingfilelocklist(); 1523 break; 1524 case NFS_DENIED_NOLOCK: 1525 retval = PFL_GRANTED; 1526 debuglog("All locks cleaned out\n"); 1527 break; 1528 default: 1529 retval = PFL_NFSRESERR; 1530 debuglog("NFS unlock failure\n"); 1531 dump_filelock(fl); 1532 break; 1533 } 1534 1535 if (releasedfl != NULL) { 1536 if (fl == releasedfl) { 1537 /* 1538 * XXX: YECHHH!!! Attempt to unlock self succeeded 1539 * but we can't deallocate the space yet. This is what 1540 * happens when you don't write malloc and free together 1541 */ 1542 debuglog("Attempt to unlock self\n"); 1543 selffl = releasedfl; 1544 } else { 1545 /* 1546 * XXX: this deallocation *still* needs to migrate closer 1547 * to the allocation code way up in get_lock or the allocation 1548 * code needs to migrate down (violation of "When you write 1549 * malloc you must write free") 1550 */ 1551 1552 deallocate_file_lock(releasedfl); 1553 } 1554 } 1555 1556 } while (unlstatus == NFS_GRANTED); 1557 1558 if (selffl != NULL) { 1559 /* 1560 * This statement wipes out the incoming file lock (fl) 1561 * in spite of the fact that it is declared const 1562 */ 1563 debuglog("WARNING! Destroying incoming lock pointer\n"); 1564 deallocate_file_lock(selffl); 1565 } 1566 1567 debuglog("Exiting unlock_partialfilelock\n"); 1568 1569 return retval; 1570 } 1571 1572 /* 1573 * clear_partialfilelock 1574 * 1575 * Normally called in response to statd state number change. 1576 * Wipe out all locks held by a host. As a bonus, the act of 1577 * doing so should automatically clear their statd entries and 1578 * unmonitor the host. 1579 */ 1580 1581 void 1582 clear_partialfilelock(const char *hostname) 1583 { 1584 struct file_lock *ifl, *nfl; 1585 1586 /* Clear blocking file lock list */ 1587 clear_blockingfilelock(hostname); 1588 1589 /* do all required unlocks */ 1590 /* Note that unlock can smash the current pointer to a lock */ 1591 1592 /* 1593 * Normally, LIST_FOREACH is called for, but since 1594 * the current element *is* the iterator, deleting it 1595 * would mess up the iteration. Thus, a next element 1596 * must be used explicitly 1597 */ 1598 1599 ifl = LIST_FIRST(&nfslocklist_head); 1600 1601 while (ifl != NULL) { 1602 nfl = LIST_NEXT(ifl, nfslocklist); 1603 1604 if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) { 1605 /* Unlock destroys ifl out from underneath */ 1606 unlock_partialfilelock(ifl); 1607 /* ifl is NO LONGER VALID AT THIS POINT */ 1608 } 1609 ifl = nfl; 1610 } 1611 } 1612 1613 /* 1614 * test_partialfilelock: 1615 */ 1616 enum partialfilelock_status 1617 test_partialfilelock(const struct file_lock *fl, 1618 struct file_lock **conflicting_fl) 1619 { 1620 enum partialfilelock_status retval; 1621 enum nfslock_status teststatus; 1622 1623 debuglog("Entering testpartialfilelock...\n"); 1624 1625 retval = PFL_DENIED; 1626 1627 teststatus = test_nfslock(fl, conflicting_fl); 1628 debuglog("test_partialfilelock: teststatus %d\n",teststatus); 1629 1630 if (teststatus == NFS_GRANTED || teststatus == NFS_GRANTED_DUPLICATE) { 1631 /* XXX: Add the underlying filesystem locking code */ 1632 retval = (teststatus == NFS_GRANTED) ? 1633 PFL_GRANTED : PFL_GRANTED_DUPLICATE; 1634 debuglog("Dumping locks...\n"); 1635 dump_filelock(fl); 1636 dump_filelock(*conflicting_fl); 1637 debuglog("Done dumping locks...\n"); 1638 } else { 1639 retval = PFL_NFSDENIED; 1640 debuglog("NFS test denied.\n"); 1641 dump_filelock(fl); 1642 debuglog("Conflicting.\n"); 1643 dump_filelock(*conflicting_fl); 1644 } 1645 1646 debuglog("Exiting testpartialfilelock...\n"); 1647 1648 return retval; 1649 } 1650 1651 /* 1652 * Below here are routines associated with translating the partial file locking 1653 * codes into useful codes to send back to the NFS RPC messaging system 1654 */ 1655 1656 /* 1657 * These routines translate the (relatively) useful return codes back onto 1658 * the few return codes which the nlm subsystems wishes to trasmit 1659 */ 1660 1661 enum nlm_stats 1662 do_test(struct file_lock *fl, struct file_lock **conflicting_fl) 1663 { 1664 enum partialfilelock_status pfsret; 1665 enum nlm_stats retval; 1666 1667 debuglog("Entering do_test...\n"); 1668 1669 pfsret = test_partialfilelock(fl,conflicting_fl); 1670 1671 switch (pfsret) { 1672 case PFL_GRANTED: 1673 debuglog("PFL test lock granted\n"); 1674 dump_filelock(fl); 1675 dump_filelock(*conflicting_fl); 1676 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1677 break; 1678 case PFL_GRANTED_DUPLICATE: 1679 debuglog("PFL test lock granted--duplicate id detected\n"); 1680 dump_filelock(fl); 1681 dump_filelock(*conflicting_fl); 1682 debuglog("Clearing conflicting_fl for call semantics\n"); 1683 *conflicting_fl = NULL; 1684 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1685 break; 1686 case PFL_NFSDENIED: 1687 case PFL_HWDENIED: 1688 debuglog("PFL test lock denied\n"); 1689 dump_filelock(fl); 1690 dump_filelock(*conflicting_fl); 1691 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied; 1692 break; 1693 case PFL_NFSRESERR: 1694 case PFL_HWRESERR: 1695 debuglog("PFL test lock resource fail\n"); 1696 dump_filelock(fl); 1697 dump_filelock(*conflicting_fl); 1698 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks; 1699 break; 1700 default: 1701 debuglog("PFL test lock *FAILED*\n"); 1702 dump_filelock(fl); 1703 dump_filelock(*conflicting_fl); 1704 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied; 1705 break; 1706 } 1707 1708 debuglog("Exiting do_test...\n"); 1709 1710 return retval; 1711 } 1712 1713 /* 1714 * do_lock: Try to acquire a lock 1715 * 1716 * This routine makes a distinction between NLM versions. I am pretty 1717 * convinced that this should be abstracted out and bounced up a level 1718 */ 1719 1720 enum nlm_stats 1721 do_lock(struct file_lock *fl) 1722 { 1723 enum partialfilelock_status pfsret; 1724 enum nlm_stats retval; 1725 1726 debuglog("Entering do_lock...\n"); 1727 1728 pfsret = lock_partialfilelock(fl); 1729 1730 switch (pfsret) { 1731 case PFL_GRANTED: 1732 debuglog("PFL lock granted"); 1733 dump_filelock(fl); 1734 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1735 break; 1736 case PFL_GRANTED_DUPLICATE: 1737 debuglog("PFL lock granted--duplicate id detected"); 1738 dump_filelock(fl); 1739 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1740 break; 1741 case PFL_NFSDENIED: 1742 case PFL_HWDENIED: 1743 debuglog("PFL_NFS lock denied"); 1744 dump_filelock(fl); 1745 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied; 1746 break; 1747 case PFL_NFSBLOCKED: 1748 case PFL_HWBLOCKED: 1749 debuglog("PFL_NFS blocking lock denied. Queued.\n"); 1750 dump_filelock(fl); 1751 retval = (fl->flags & LOCK_V4) ? nlm4_blocked : nlm_blocked; 1752 break; 1753 case PFL_NFSRESERR: 1754 case PFL_HWRESERR: 1755 debuglog("PFL lock resource alocation fail\n"); 1756 dump_filelock(fl); 1757 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks; 1758 break; 1759 default: 1760 debuglog("PFL lock *FAILED*"); 1761 dump_filelock(fl); 1762 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied; 1763 break; 1764 } 1765 1766 debuglog("Exiting do_lock...\n"); 1767 1768 return retval; 1769 } 1770 1771 enum nlm_stats 1772 do_unlock(struct file_lock *fl) 1773 { 1774 enum partialfilelock_status pfsret; 1775 enum nlm_stats retval; 1776 1777 debuglog("Entering do_unlock...\n"); 1778 pfsret = unlock_partialfilelock(fl); 1779 1780 switch (pfsret) { 1781 case PFL_GRANTED: 1782 debuglog("PFL unlock granted"); 1783 dump_filelock(fl); 1784 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1785 break; 1786 case PFL_NFSDENIED: 1787 case PFL_HWDENIED: 1788 debuglog("PFL_NFS unlock denied"); 1789 dump_filelock(fl); 1790 retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied; 1791 break; 1792 case PFL_NFSDENIED_NOLOCK: 1793 case PFL_HWDENIED_NOLOCK: 1794 debuglog("PFL_NFS no lock found\n"); 1795 retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted; 1796 break; 1797 case PFL_NFSRESERR: 1798 case PFL_HWRESERR: 1799 debuglog("PFL unlock resource failure"); 1800 dump_filelock(fl); 1801 retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks; 1802 break; 1803 default: 1804 debuglog("PFL unlock *FAILED*"); 1805 dump_filelock(fl); 1806 retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied; 1807 break; 1808 } 1809 1810 debuglog("Exiting do_unlock...\n"); 1811 1812 return retval; 1813 } 1814 1815 /* 1816 * do_clear 1817 * 1818 * This routine is non-existent because it doesn't have a return code. 1819 * It is here for completeness in case someone *does* need to do return 1820 * codes later. A decent compiler should optimize this away. 1821 */ 1822 1823 void 1824 do_clear(const char *hostname) 1825 { 1826 1827 clear_partialfilelock(hostname); 1828 } 1829 1830 /* 1831 * The following routines are all called from the code which the 1832 * RPC layer invokes 1833 */ 1834 1835 /* 1836 * testlock(): inform the caller if the requested lock would be granted 1837 * 1838 * returns NULL if lock would granted 1839 * returns pointer to a conflicting nlm4_holder if not 1840 */ 1841 1842 struct nlm4_holder * 1843 testlock(struct nlm4_lock *lock, bool_t exclusive, int flags) 1844 { 1845 struct file_lock test_fl, *conflicting_fl; 1846 1847 bzero(&test_fl, sizeof(test_fl)); 1848 1849 bcopy(lock->fh.n_bytes, &(test_fl.filehandle), sizeof(fhandle_t)); 1850 copy_nlm4_lock_to_nlm4_holder(lock, exclusive, &test_fl.client); 1851 1852 siglock(); 1853 do_test(&test_fl, &conflicting_fl); 1854 1855 if (conflicting_fl == NULL) { 1856 debuglog("No conflicting lock found\n"); 1857 sigunlock(); 1858 return NULL; 1859 } else { 1860 debuglog("Found conflicting lock\n"); 1861 dump_filelock(conflicting_fl); 1862 sigunlock(); 1863 return (&conflicting_fl->client); 1864 } 1865 } 1866 1867 /* 1868 * getlock: try to aquire the lock. 1869 * If file is already locked and we can sleep, put the lock in the list with 1870 * status LKST_WAITING; it'll be processed later. 1871 * Otherwise try to lock. If we're allowed to block, fork a child which 1872 * will do the blocking lock. 1873 */ 1874 1875 enum nlm_stats 1876 getlock(nlm4_lockargs *lckarg, struct svc_req *rqstp, const int flags) 1877 { 1878 struct file_lock *newfl; 1879 enum nlm_stats retval; 1880 1881 debuglog("Entering getlock...\n"); 1882 1883 if (grace_expired == 0 && lckarg->reclaim == 0) 1884 return (flags & LOCK_V4) ? 1885 nlm4_denied_grace_period : nlm_denied_grace_period; 1886 1887 /* allocate new file_lock for this request */ 1888 newfl = allocate_file_lock(&lckarg->alock.oh, &lckarg->cookie); 1889 if (newfl == NULL) { 1890 syslog(LOG_NOTICE, "lock allocate failed: %s", strerror(errno)); 1891 /* failed */ 1892 return (flags & LOCK_V4) ? 1893 nlm4_denied_nolocks : nlm_denied_nolocks; 1894 } 1895 1896 if (lckarg->alock.fh.n_len != sizeof(fhandle_t)) { 1897 debuglog("recieved fhandle size %d, local size %d", 1898 lckarg->alock.fh.n_len, (int)sizeof(fhandle_t)); 1899 } 1900 1901 fill_file_lock(newfl, (fhandle_t *)lckarg->alock.fh.n_bytes, 1902 (struct sockaddr *)svc_getrpccaller(rqstp->rq_xprt)->buf, 1903 lckarg->exclusive, lckarg->alock.svid, lckarg->alock.l_offset, 1904 lckarg->alock.l_len, 1905 lckarg->alock.caller_name, lckarg->state, 0, flags, lckarg->block); 1906 1907 /* 1908 * newfl is now fully constructed and deallocate_file_lock 1909 * can now be used to delete it 1910 */ 1911 1912 siglock(); 1913 debuglog("Pointer to new lock is %p\n",newfl); 1914 1915 retval = do_lock(newfl); 1916 1917 debuglog("Pointer to new lock is %p\n",newfl); 1918 sigunlock(); 1919 1920 switch (retval) 1921 { 1922 case nlm4_granted: 1923 /* case nlm_granted: is the same as nlm4_granted */ 1924 /* do_mon(lckarg->alock.caller_name); */ 1925 break; 1926 case nlm4_blocked: 1927 /* case nlm_blocked: is the same as nlm4_blocked */ 1928 /* do_mon(lckarg->alock.caller_name); */ 1929 break; 1930 default: 1931 deallocate_file_lock(newfl); 1932 break; 1933 } 1934 1935 debuglog("Exiting getlock...\n"); 1936 1937 return retval; 1938 } 1939 1940 1941 /* unlock a filehandle */ 1942 enum nlm_stats 1943 unlock(nlm4_lock *lock, const int flags) 1944 { 1945 struct file_lock fl; 1946 enum nlm_stats err; 1947 1948 siglock(); 1949 1950 debuglog("Entering unlock...\n"); 1951 1952 bzero(&fl,sizeof(struct file_lock)); 1953 bcopy(lock->fh.n_bytes, &fl.filehandle, sizeof(fhandle_t)); 1954 1955 copy_nlm4_lock_to_nlm4_holder(lock, 0, &fl.client); 1956 1957 err = do_unlock(&fl); 1958 1959 sigunlock(); 1960 1961 debuglog("Exiting unlock...\n"); 1962 1963 return err; 1964 } 1965 1966 /* 1967 * XXX: The following monitor/unmonitor routines 1968 * have not been extensively tested (ie. no regression 1969 * script exists like for the locking sections 1970 */ 1971 1972 /* 1973 * monitor_lock_host: monitor lock hosts locally with a ref count and 1974 * inform statd 1975 */ 1976 void 1977 monitor_lock_host(const char *hostname) 1978 { 1979 struct host *ihp, *nhp; 1980 struct mon smon; 1981 struct sm_stat_res sres; 1982 int rpcret, statflag; 1983 1984 rpcret = 0; 1985 statflag = 0; 1986 1987 LIST_FOREACH(ihp, &hostlst_head, hostlst) { 1988 if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) { 1989 /* Host is already monitored, bump refcount */ 1990 ++ihp->refcnt; 1991 /* Host should only be in the monitor list once */ 1992 return; 1993 } 1994 } 1995 1996 /* Host is not yet monitored, add it */ 1997 nhp = malloc(sizeof(struct host)); 1998 1999 if (nhp == NULL) { 2000 debuglog("Unable to allocate entry for statd mon\n"); 2001 return; 2002 } 2003 2004 /* Allocated new host entry, now fill the fields */ 2005 strncpy(nhp->name, hostname, SM_MAXSTRLEN); 2006 nhp->refcnt = 1; 2007 debuglog("Locally Monitoring host %16s\n",hostname); 2008 2009 debuglog("Attempting to tell statd\n"); 2010 2011 bzero(&smon,sizeof(smon)); 2012 2013 smon.mon_id.mon_name = nhp->name; 2014 smon.mon_id.my_id.my_name = "localhost\0"; 2015 2016 smon.mon_id.my_id.my_prog = NLM_PROG; 2017 smon.mon_id.my_id.my_vers = NLM_SM; 2018 smon.mon_id.my_id.my_proc = NLM_SM_NOTIFY; 2019 2020 rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_MON, xdr_mon, 2021 &smon, xdr_sm_stat_res, &sres); 2022 2023 if (rpcret == 0) { 2024 if (sres.res_stat == stat_fail) { 2025 debuglog("Statd call failed\n"); 2026 statflag = 0; 2027 } else { 2028 statflag = 1; 2029 } 2030 } else { 2031 debuglog("Rpc call to statd failed with return value: %d\n", 2032 rpcret); 2033 statflag = 0; 2034 } 2035 2036 if (statflag == 1) { 2037 LIST_INSERT_HEAD(&hostlst_head, nhp, hostlst); 2038 } else { 2039 free(nhp); 2040 } 2041 2042 } 2043 2044 /* 2045 * unmonitor_lock_host: clear monitor ref counts and inform statd when gone 2046 */ 2047 void 2048 unmonitor_lock_host(const char *hostname) 2049 { 2050 struct host *ihp; 2051 struct mon_id smon_id; 2052 struct sm_stat smstat; 2053 int rpcret; 2054 2055 rpcret = 0; 2056 2057 for( ihp=LIST_FIRST(&hostlst_head); ihp != NULL; 2058 ihp=LIST_NEXT(ihp, hostlst)) { 2059 if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) { 2060 /* Host is monitored, bump refcount */ 2061 --ihp->refcnt; 2062 /* Host should only be in the monitor list once */ 2063 break; 2064 } 2065 } 2066 2067 if (ihp == NULL) { 2068 debuglog("Could not find host %16s in mon list\n", hostname); 2069 return; 2070 } 2071 2072 if (ihp->refcnt > 0) 2073 return; 2074 2075 if (ihp->refcnt < 0) { 2076 debuglog("Negative refcount!: %d\n", 2077 ihp->refcnt); 2078 } 2079 2080 debuglog("Attempting to unmonitor host %16s\n", hostname); 2081 2082 bzero(&smon_id,sizeof(smon_id)); 2083 2084 smon_id.mon_name = (char *)hostname; 2085 smon_id.my_id.my_name = "localhost"; 2086 smon_id.my_id.my_prog = NLM_PROG; 2087 smon_id.my_id.my_vers = NLM_SM; 2088 smon_id.my_id.my_proc = NLM_SM_NOTIFY; 2089 2090 rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_UNMON, xdr_mon, 2091 &smon_id, xdr_sm_stat_res, &smstat); 2092 2093 if (rpcret != 0) { 2094 debuglog("Rpc call to unmonitor statd failed with " 2095 " return value: %d\n", rpcret); 2096 } 2097 2098 LIST_REMOVE(ihp, hostlst); 2099 free(ihp); 2100 } 2101 2102 /* 2103 * notify: Clear all locks from a host if statd complains 2104 * 2105 * XXX: This routine has not been thoroughly tested. However, neither 2106 * had the old one been. It used to compare the statd crash state counter 2107 * to the current lock state. The upshot of this was that it basically 2108 * cleared all locks from the specified host 99% of the time (with the 2109 * other 1% being a bug). Consequently, the assumption is that clearing 2110 * all locks from a host when notified by statd is acceptable. 2111 * 2112 * Please note that this routine skips the usual level of redirection 2113 * through a do_* type routine. This introduces a possible level of 2114 * error and might better be written as do_notify and take this one out. 2115 2116 */ 2117 2118 void 2119 notify(const char *hostname, const int state) 2120 { 2121 debuglog("notify from %s, new state %d", hostname, state); 2122 2123 siglock(); 2124 do_clear(hostname); 2125 sigunlock(); 2126 2127 debuglog("Leaving notify\n"); 2128 } 2129 2130 void 2131 send_granted(fl, opcode) 2132 struct file_lock *fl; 2133 int opcode; 2134 { 2135 CLIENT *cli; 2136 static char dummy; 2137 struct timeval timeo; 2138 int success; 2139 static struct nlm_res retval; 2140 static struct nlm4_res retval4; 2141 2142 debuglog("About to send granted on blocked lock\n"); 2143 sleep(1); 2144 debuglog("Blowing off return send\n"); 2145 2146 cli = get_client(fl->addr, 2147 (fl->flags & LOCK_V4) ? NLM_VERS4 : NLM_VERS); 2148 if (cli == NULL) { 2149 syslog(LOG_NOTICE, "failed to get CLIENT for %s", 2150 fl->client_name); 2151 /* 2152 * We fail to notify remote that the lock has been granted. 2153 * The client will timeout and retry, the lock will be 2154 * granted at this time. 2155 */ 2156 return; 2157 } 2158 timeo.tv_sec = 0; 2159 timeo.tv_usec = (fl->flags & LOCK_ASYNC) ? 0 : 500000; /* 0.5s */ 2160 2161 if (fl->flags & LOCK_V4) { 2162 static nlm4_testargs res; 2163 res.cookie = fl->client_cookie; 2164 res.exclusive = fl->client.exclusive; 2165 res.alock.caller_name = fl->client_name; 2166 res.alock.fh.n_len = sizeof(fhandle_t); 2167 res.alock.fh.n_bytes = (char*)&fl->filehandle; 2168 res.alock.oh = fl->client.oh; 2169 res.alock.svid = fl->client.svid; 2170 res.alock.l_offset = fl->client.l_offset; 2171 res.alock.l_len = fl->client.l_len; 2172 debuglog("sending v4 reply%s", 2173 (fl->flags & LOCK_ASYNC) ? " (async)":""); 2174 if (fl->flags & LOCK_ASYNC) { 2175 success = clnt_call(cli, NLM4_GRANTED_MSG, 2176 xdr_nlm4_testargs, &res, xdr_void, &dummy, timeo); 2177 } else { 2178 success = clnt_call(cli, NLM4_GRANTED, 2179 xdr_nlm4_testargs, &res, xdr_nlm4_res, 2180 &retval4, timeo); 2181 } 2182 } else { 2183 static nlm_testargs res; 2184 2185 res.cookie = fl->client_cookie; 2186 res.exclusive = fl->client.exclusive; 2187 res.alock.caller_name = fl->client_name; 2188 res.alock.fh.n_len = sizeof(fhandle_t); 2189 res.alock.fh.n_bytes = (char*)&fl->filehandle; 2190 res.alock.oh = fl->client.oh; 2191 res.alock.svid = fl->client.svid; 2192 res.alock.l_offset = fl->client.l_offset; 2193 res.alock.l_len = fl->client.l_len; 2194 debuglog("sending v1 reply%s", 2195 (fl->flags & LOCK_ASYNC) ? " (async)":""); 2196 if (fl->flags & LOCK_ASYNC) { 2197 success = clnt_call(cli, NLM_GRANTED_MSG, 2198 xdr_nlm_testargs, &res, xdr_void, &dummy, timeo); 2199 } else { 2200 success = clnt_call(cli, NLM_GRANTED, 2201 xdr_nlm_testargs, &res, xdr_nlm_res, 2202 &retval, timeo); 2203 } 2204 } 2205 if (debug_level > 2) 2206 debuglog("clnt_call returns %d(%s) for granted", 2207 success, clnt_sperrno(success)); 2208 2209 } 2210 2211 /* 2212 * Routines below here have not been modified in the overhaul 2213 */ 2214 2215 /* 2216 * Are these two routines still required since lockd is not spawning off 2217 * children to service locks anymore? Presumably they were originally 2218 * put in place to prevent a one child from changing the lock list out 2219 * from under another one. 2220 */ 2221 2222 void 2223 siglock(void) 2224 { 2225 sigset_t block; 2226 2227 sigemptyset(&block); 2228 sigaddset(&block, SIGCHLD); 2229 2230 if (sigprocmask(SIG_BLOCK, &block, NULL) < 0) { 2231 syslog(LOG_WARNING, "siglock failed: %s", strerror(errno)); 2232 } 2233 } 2234 2235 void 2236 sigunlock(void) 2237 { 2238 sigset_t block; 2239 2240 sigemptyset(&block); 2241 sigaddset(&block, SIGCHLD); 2242 2243 if (sigprocmask(SIG_UNBLOCK, &block, NULL) < 0) { 2244 syslog(LOG_WARNING, "sigunlock failed: %s", strerror(errno)); 2245 } 2246 } 2247 2248 2249