1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/lockd/svclock.c 4 * 5 * Handling of server-side locks, mostly of the blocked variety. 6 * This is the ugliest part of lockd because we tread on very thin ice. 7 * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc. 8 * IMNSHO introducing the grant callback into the NLM protocol was one 9 * of the worst ideas Sun ever had. Except maybe for the idea of doing 10 * NFS file locking at all. 11 * 12 * I'm trying hard to avoid race conditions by protecting most accesses 13 * to a file's list of blocked locks through a semaphore. The global 14 * list of blocked locks is not protected in this fashion however. 15 * Therefore, some functions (such as the RPC callback for the async grant 16 * call) move blocked locks towards the head of the list *while some other 17 * process might be traversing it*. This should not be a problem in 18 * practice, because this will only cause functions traversing the list 19 * to visit some blocks twice. 20 * 21 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> 22 */ 23 24 #include <linux/types.h> 25 #include <linux/slab.h> 26 #include <linux/errno.h> 27 #include <linux/kernel.h> 28 #include <linux/sched.h> 29 #include <linux/sunrpc/clnt.h> 30 #include <linux/sunrpc/svc_xprt.h> 31 #include <linux/lockd/nlm.h> 32 #include <linux/lockd/lockd.h> 33 #include <linux/kthread.h> 34 35 #define NLMDBG_FACILITY NLMDBG_SVCLOCK 36 37 #ifdef CONFIG_LOCKD_V4 38 #define nlm_deadlock nlm4_deadlock 39 #else 40 #define nlm_deadlock nlm_lck_denied 41 #endif 42 43 static void nlmsvc_release_block(struct nlm_block *block); 44 static void nlmsvc_insert_block(struct nlm_block *block, unsigned long); 45 static void nlmsvc_remove_block(struct nlm_block *block); 46 47 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock); 48 static void nlmsvc_freegrantargs(struct nlm_rqst *call); 49 static const struct rpc_call_ops nlmsvc_grant_ops; 50 51 /* 52 * The list of blocked locks to retry 53 */ 54 static LIST_HEAD(nlm_blocked); 55 static DEFINE_SPINLOCK(nlm_blocked_lock); 56 57 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 58 static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie) 59 { 60 /* 61 * We can get away with a static buffer because this is only called 62 * from lockd, which is single-threaded. 63 */ 64 static char buf[2*NLM_MAXCOOKIELEN+1]; 65 unsigned int i, len = sizeof(buf); 66 char *p = buf; 67 68 len--; /* allow for trailing \0 */ 69 if (len < 3) 70 return "???"; 71 for (i = 0 ; i < cookie->len ; i++) { 72 if (len < 2) { 73 strcpy(p-3, "..."); 74 break; 75 } 76 sprintf(p, "%02x", cookie->data[i]); 77 p += 2; 78 len -= 2; 79 } 80 *p = '\0'; 81 82 return buf; 83 } 84 #endif 85 86 /* 87 * Insert a blocked lock into the global list 88 */ 89 static void 90 nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when) 91 { 92 struct nlm_block *b; 93 struct list_head *pos; 94 95 dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when); 96 if (list_empty(&block->b_list)) { 97 kref_get(&block->b_count); 98 } else { 99 list_del_init(&block->b_list); 100 } 101 102 pos = &nlm_blocked; 103 if (when != NLM_NEVER) { 104 if ((when += jiffies) == NLM_NEVER) 105 when ++; 106 list_for_each(pos, &nlm_blocked) { 107 b = list_entry(pos, struct nlm_block, b_list); 108 if (time_after(b->b_when,when) || b->b_when == NLM_NEVER) 109 break; 110 } 111 /* On normal exit from the loop, pos == &nlm_blocked, 112 * so we will be adding to the end of the list - good 113 */ 114 } 115 116 list_add_tail(&block->b_list, pos); 117 block->b_when = when; 118 } 119 120 static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when) 121 { 122 spin_lock(&nlm_blocked_lock); 123 nlmsvc_insert_block_locked(block, when); 124 spin_unlock(&nlm_blocked_lock); 125 } 126 127 /* 128 * Remove a block from the global list 129 */ 130 static inline void 131 nlmsvc_remove_block(struct nlm_block *block) 132 { 133 if (!list_empty(&block->b_list)) { 134 spin_lock(&nlm_blocked_lock); 135 list_del_init(&block->b_list); 136 spin_unlock(&nlm_blocked_lock); 137 nlmsvc_release_block(block); 138 } 139 } 140 141 /* 142 * Find a block for a given lock 143 */ 144 static struct nlm_block * 145 nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock) 146 { 147 struct nlm_block *block; 148 struct file_lock *fl; 149 150 dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n", 151 file, lock->fl.fl_pid, 152 (long long)lock->fl.fl_start, 153 (long long)lock->fl.fl_end, lock->fl.fl_type); 154 list_for_each_entry(block, &nlm_blocked, b_list) { 155 fl = &block->b_call->a_args.lock.fl; 156 dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n", 157 block->b_file, fl->fl_pid, 158 (long long)fl->fl_start, 159 (long long)fl->fl_end, fl->fl_type, 160 nlmdbg_cookie2a(&block->b_call->a_args.cookie)); 161 if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) { 162 kref_get(&block->b_count); 163 return block; 164 } 165 } 166 167 return NULL; 168 } 169 170 static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b) 171 { 172 if (a->len != b->len) 173 return 0; 174 if (memcmp(a->data, b->data, a->len)) 175 return 0; 176 return 1; 177 } 178 179 /* 180 * Find a block with a given NLM cookie. 181 */ 182 static inline struct nlm_block * 183 nlmsvc_find_block(struct nlm_cookie *cookie) 184 { 185 struct nlm_block *block; 186 187 list_for_each_entry(block, &nlm_blocked, b_list) { 188 if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie)) 189 goto found; 190 } 191 192 return NULL; 193 194 found: 195 dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block); 196 kref_get(&block->b_count); 197 return block; 198 } 199 200 /* 201 * Create a block and initialize it. 202 * 203 * Note: we explicitly set the cookie of the grant reply to that of 204 * the blocked lock request. The spec explicitly mentions that the client 205 * should _not_ rely on the callback containing the same cookie as the 206 * request, but (as I found out later) that's because some implementations 207 * do just this. Never mind the standards comittees, they support our 208 * logging industries. 209 * 210 * 10 years later: I hope we can safely ignore these old and broken 211 * clients by now. Let's fix this so we can uniquely identify an incoming 212 * GRANTED_RES message by cookie, without having to rely on the client's IP 213 * address. --okir 214 */ 215 static struct nlm_block * 216 nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host, 217 struct nlm_file *file, struct nlm_lock *lock, 218 struct nlm_cookie *cookie) 219 { 220 struct nlm_block *block; 221 struct nlm_rqst *call = NULL; 222 223 call = nlm_alloc_call(host); 224 if (call == NULL) 225 return NULL; 226 227 /* Allocate memory for block, and initialize arguments */ 228 block = kzalloc(sizeof(*block), GFP_KERNEL); 229 if (block == NULL) 230 goto failed; 231 kref_init(&block->b_count); 232 INIT_LIST_HEAD(&block->b_list); 233 INIT_LIST_HEAD(&block->b_flist); 234 235 if (!nlmsvc_setgrantargs(call, lock)) 236 goto failed_free; 237 238 /* Set notifier function for VFS, and init args */ 239 call->a_args.lock.fl.fl_flags |= FL_SLEEP; 240 call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations; 241 nlmclnt_next_cookie(&call->a_args.cookie); 242 243 dprintk("lockd: created block %p...\n", block); 244 245 /* Create and initialize the block */ 246 block->b_daemon = rqstp->rq_server; 247 block->b_host = host; 248 block->b_file = file; 249 file->f_count++; 250 251 /* Add to file's list of blocks */ 252 list_add(&block->b_flist, &file->f_blocks); 253 254 /* Set up RPC arguments for callback */ 255 block->b_call = call; 256 call->a_flags = RPC_TASK_ASYNC; 257 call->a_block = block; 258 259 return block; 260 261 failed_free: 262 kfree(block); 263 failed: 264 nlmsvc_release_call(call); 265 return NULL; 266 } 267 268 /* 269 * Delete a block. 270 * It is the caller's responsibility to check whether the file 271 * can be closed hereafter. 272 */ 273 static int nlmsvc_unlink_block(struct nlm_block *block) 274 { 275 int status; 276 dprintk("lockd: unlinking block %p...\n", block); 277 278 /* Remove block from list */ 279 status = locks_delete_block(&block->b_call->a_args.lock.fl); 280 nlmsvc_remove_block(block); 281 return status; 282 } 283 284 static void nlmsvc_free_block(struct kref *kref) 285 { 286 struct nlm_block *block = container_of(kref, struct nlm_block, b_count); 287 struct nlm_file *file = block->b_file; 288 289 dprintk("lockd: freeing block %p...\n", block); 290 291 /* Remove block from file's list of blocks */ 292 list_del_init(&block->b_flist); 293 mutex_unlock(&file->f_mutex); 294 295 nlmsvc_freegrantargs(block->b_call); 296 nlmsvc_release_call(block->b_call); 297 nlm_release_file(block->b_file); 298 kfree(block); 299 } 300 301 static void nlmsvc_release_block(struct nlm_block *block) 302 { 303 if (block != NULL) 304 kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex); 305 } 306 307 /* 308 * Loop over all blocks and delete blocks held by 309 * a matching host. 310 */ 311 void nlmsvc_traverse_blocks(struct nlm_host *host, 312 struct nlm_file *file, 313 nlm_host_match_fn_t match) 314 { 315 struct nlm_block *block, *next; 316 317 restart: 318 mutex_lock(&file->f_mutex); 319 list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) { 320 if (!match(block->b_host, host)) 321 continue; 322 /* Do not destroy blocks that are not on 323 * the global retry list - why? */ 324 if (list_empty(&block->b_list)) 325 continue; 326 kref_get(&block->b_count); 327 mutex_unlock(&file->f_mutex); 328 nlmsvc_unlink_block(block); 329 nlmsvc_release_block(block); 330 goto restart; 331 } 332 mutex_unlock(&file->f_mutex); 333 } 334 335 static struct nlm_lockowner * 336 nlmsvc_get_lockowner(struct nlm_lockowner *lockowner) 337 { 338 refcount_inc(&lockowner->count); 339 return lockowner; 340 } 341 342 static void nlmsvc_put_lockowner(struct nlm_lockowner *lockowner) 343 { 344 if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock)) 345 return; 346 list_del(&lockowner->list); 347 spin_unlock(&lockowner->host->h_lock); 348 nlmsvc_release_host(lockowner->host); 349 kfree(lockowner); 350 } 351 352 static struct nlm_lockowner *__nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid) 353 { 354 struct nlm_lockowner *lockowner; 355 list_for_each_entry(lockowner, &host->h_lockowners, list) { 356 if (lockowner->pid != pid) 357 continue; 358 return nlmsvc_get_lockowner(lockowner); 359 } 360 return NULL; 361 } 362 363 static struct nlm_lockowner *nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid) 364 { 365 struct nlm_lockowner *res, *new = NULL; 366 367 spin_lock(&host->h_lock); 368 res = __nlmsvc_find_lockowner(host, pid); 369 370 if (res == NULL) { 371 spin_unlock(&host->h_lock); 372 new = kmalloc(sizeof(*res), GFP_KERNEL); 373 spin_lock(&host->h_lock); 374 res = __nlmsvc_find_lockowner(host, pid); 375 if (res == NULL && new != NULL) { 376 res = new; 377 /* fs/locks.c will manage the refcount through lock_ops */ 378 refcount_set(&new->count, 1); 379 new->pid = pid; 380 new->host = nlm_get_host(host); 381 list_add(&new->list, &host->h_lockowners); 382 new = NULL; 383 } 384 } 385 386 spin_unlock(&host->h_lock); 387 kfree(new); 388 return res; 389 } 390 391 void 392 nlmsvc_release_lockowner(struct nlm_lock *lock) 393 { 394 if (lock->fl.fl_owner) 395 nlmsvc_put_lockowner(lock->fl.fl_owner); 396 } 397 398 static void nlmsvc_locks_copy_lock(struct file_lock *new, struct file_lock *fl) 399 { 400 struct nlm_lockowner *nlm_lo = (struct nlm_lockowner *)fl->fl_owner; 401 new->fl_owner = nlmsvc_get_lockowner(nlm_lo); 402 } 403 404 static void nlmsvc_locks_release_private(struct file_lock *fl) 405 { 406 nlmsvc_put_lockowner((struct nlm_lockowner *)fl->fl_owner); 407 } 408 409 static const struct file_lock_operations nlmsvc_lock_ops = { 410 .fl_copy_lock = nlmsvc_locks_copy_lock, 411 .fl_release_private = nlmsvc_locks_release_private, 412 }; 413 414 void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host, 415 pid_t pid) 416 { 417 fl->fl_owner = nlmsvc_find_lockowner(host, pid); 418 if (fl->fl_owner != NULL) 419 fl->fl_ops = &nlmsvc_lock_ops; 420 } 421 422 /* 423 * Initialize arguments for GRANTED call. The nlm_rqst structure 424 * has been cleared already. 425 */ 426 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock) 427 { 428 locks_copy_lock(&call->a_args.lock.fl, &lock->fl); 429 memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh)); 430 call->a_args.lock.caller = utsname()->nodename; 431 call->a_args.lock.oh.len = lock->oh.len; 432 433 /* set default data area */ 434 call->a_args.lock.oh.data = call->a_owner; 435 call->a_args.lock.svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid; 436 437 if (lock->oh.len > NLMCLNT_OHSIZE) { 438 void *data = kmalloc(lock->oh.len, GFP_KERNEL); 439 if (!data) 440 return 0; 441 call->a_args.lock.oh.data = (u8 *) data; 442 } 443 444 memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len); 445 return 1; 446 } 447 448 static void nlmsvc_freegrantargs(struct nlm_rqst *call) 449 { 450 if (call->a_args.lock.oh.data != call->a_owner) 451 kfree(call->a_args.lock.oh.data); 452 453 locks_release_private(&call->a_args.lock.fl); 454 } 455 456 /* 457 * Deferred lock request handling for non-blocking lock 458 */ 459 static __be32 460 nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block) 461 { 462 __be32 status = nlm_lck_denied_nolocks; 463 464 block->b_flags |= B_QUEUED; 465 466 nlmsvc_insert_block(block, NLM_TIMEOUT); 467 468 block->b_cache_req = &rqstp->rq_chandle; 469 if (rqstp->rq_chandle.defer) { 470 block->b_deferred_req = 471 rqstp->rq_chandle.defer(block->b_cache_req); 472 if (block->b_deferred_req != NULL) 473 status = nlm_drop_reply; 474 } 475 dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n", 476 block, block->b_flags, ntohl(status)); 477 478 return status; 479 } 480 481 /* 482 * Attempt to establish a lock, and if it can't be granted, block it 483 * if required. 484 */ 485 __be32 486 nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, 487 struct nlm_host *host, struct nlm_lock *lock, int wait, 488 struct nlm_cookie *cookie, int reclaim) 489 { 490 struct nlm_block *block = NULL; 491 int error; 492 __be32 ret; 493 494 dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n", 495 locks_inode(file->f_file)->i_sb->s_id, 496 locks_inode(file->f_file)->i_ino, 497 lock->fl.fl_type, lock->fl.fl_pid, 498 (long long)lock->fl.fl_start, 499 (long long)lock->fl.fl_end, 500 wait); 501 502 /* Lock file against concurrent access */ 503 mutex_lock(&file->f_mutex); 504 /* Get existing block (in case client is busy-waiting) 505 * or create new block 506 */ 507 block = nlmsvc_lookup_block(file, lock); 508 if (block == NULL) { 509 block = nlmsvc_create_block(rqstp, host, file, lock, cookie); 510 ret = nlm_lck_denied_nolocks; 511 if (block == NULL) 512 goto out; 513 lock = &block->b_call->a_args.lock; 514 } else 515 lock->fl.fl_flags &= ~FL_SLEEP; 516 517 if (block->b_flags & B_QUEUED) { 518 dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n", 519 block, block->b_flags); 520 if (block->b_granted) { 521 nlmsvc_unlink_block(block); 522 ret = nlm_granted; 523 goto out; 524 } 525 if (block->b_flags & B_TIMED_OUT) { 526 nlmsvc_unlink_block(block); 527 ret = nlm_lck_denied; 528 goto out; 529 } 530 ret = nlm_drop_reply; 531 goto out; 532 } 533 534 if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) { 535 ret = nlm_lck_denied_grace_period; 536 goto out; 537 } 538 if (reclaim && !locks_in_grace(SVC_NET(rqstp))) { 539 ret = nlm_lck_denied_grace_period; 540 goto out; 541 } 542 543 if (!wait) 544 lock->fl.fl_flags &= ~FL_SLEEP; 545 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); 546 lock->fl.fl_flags &= ~FL_SLEEP; 547 548 dprintk("lockd: vfs_lock_file returned %d\n", error); 549 switch (error) { 550 case 0: 551 ret = nlm_granted; 552 goto out; 553 case -EAGAIN: 554 /* 555 * If this is a blocking request for an 556 * already pending lock request then we need 557 * to put it back on lockd's block list 558 */ 559 if (wait) 560 break; 561 ret = nlm_lck_denied; 562 goto out; 563 case FILE_LOCK_DEFERRED: 564 if (wait) 565 break; 566 /* Filesystem lock operation is in progress 567 Add it to the queue waiting for callback */ 568 ret = nlmsvc_defer_lock_rqst(rqstp, block); 569 goto out; 570 case -EDEADLK: 571 ret = nlm_deadlock; 572 goto out; 573 default: /* includes ENOLCK */ 574 ret = nlm_lck_denied_nolocks; 575 goto out; 576 } 577 578 ret = nlm_lck_blocked; 579 580 /* Append to list of blocked */ 581 nlmsvc_insert_block(block, NLM_NEVER); 582 out: 583 mutex_unlock(&file->f_mutex); 584 nlmsvc_release_block(block); 585 dprintk("lockd: nlmsvc_lock returned %u\n", ret); 586 return ret; 587 } 588 589 /* 590 * Test for presence of a conflicting lock. 591 */ 592 __be32 593 nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file, 594 struct nlm_host *host, struct nlm_lock *lock, 595 struct nlm_lock *conflock, struct nlm_cookie *cookie) 596 { 597 int error; 598 __be32 ret; 599 struct nlm_lockowner *test_owner; 600 601 dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n", 602 locks_inode(file->f_file)->i_sb->s_id, 603 locks_inode(file->f_file)->i_ino, 604 lock->fl.fl_type, 605 (long long)lock->fl.fl_start, 606 (long long)lock->fl.fl_end); 607 608 if (locks_in_grace(SVC_NET(rqstp))) { 609 ret = nlm_lck_denied_grace_period; 610 goto out; 611 } 612 613 /* If there's a conflicting lock, remember to clean up the test lock */ 614 test_owner = (struct nlm_lockowner *)lock->fl.fl_owner; 615 616 error = vfs_test_lock(file->f_file, &lock->fl); 617 if (error) { 618 /* We can't currently deal with deferred test requests */ 619 if (error == FILE_LOCK_DEFERRED) 620 WARN_ON_ONCE(1); 621 622 ret = nlm_lck_denied_nolocks; 623 goto out; 624 } 625 626 if (lock->fl.fl_type == F_UNLCK) { 627 ret = nlm_granted; 628 goto out; 629 } 630 631 dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n", 632 lock->fl.fl_type, (long long)lock->fl.fl_start, 633 (long long)lock->fl.fl_end); 634 conflock->caller = "somehost"; /* FIXME */ 635 conflock->len = strlen(conflock->caller); 636 conflock->oh.len = 0; /* don't return OH info */ 637 conflock->svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid; 638 conflock->fl.fl_type = lock->fl.fl_type; 639 conflock->fl.fl_start = lock->fl.fl_start; 640 conflock->fl.fl_end = lock->fl.fl_end; 641 locks_release_private(&lock->fl); 642 643 /* Clean up the test lock */ 644 lock->fl.fl_owner = NULL; 645 nlmsvc_put_lockowner(test_owner); 646 647 ret = nlm_lck_denied; 648 out: 649 return ret; 650 } 651 652 /* 653 * Remove a lock. 654 * This implies a CANCEL call: We send a GRANT_MSG, the client replies 655 * with a GRANT_RES call which gets lost, and calls UNLOCK immediately 656 * afterwards. In this case the block will still be there, and hence 657 * must be removed. 658 */ 659 __be32 660 nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock) 661 { 662 int error; 663 664 dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n", 665 locks_inode(file->f_file)->i_sb->s_id, 666 locks_inode(file->f_file)->i_ino, 667 lock->fl.fl_pid, 668 (long long)lock->fl.fl_start, 669 (long long)lock->fl.fl_end); 670 671 /* First, cancel any lock that might be there */ 672 nlmsvc_cancel_blocked(net, file, lock); 673 674 lock->fl.fl_type = F_UNLCK; 675 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); 676 677 return (error < 0)? nlm_lck_denied_nolocks : nlm_granted; 678 } 679 680 /* 681 * Cancel a previously blocked request. 682 * 683 * A cancel request always overrides any grant that may currently 684 * be in progress. 685 * The calling procedure must check whether the file can be closed. 686 */ 687 __be32 688 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock) 689 { 690 struct nlm_block *block; 691 int status = 0; 692 693 dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n", 694 locks_inode(file->f_file)->i_sb->s_id, 695 locks_inode(file->f_file)->i_ino, 696 lock->fl.fl_pid, 697 (long long)lock->fl.fl_start, 698 (long long)lock->fl.fl_end); 699 700 if (locks_in_grace(net)) 701 return nlm_lck_denied_grace_period; 702 703 mutex_lock(&file->f_mutex); 704 block = nlmsvc_lookup_block(file, lock); 705 mutex_unlock(&file->f_mutex); 706 if (block != NULL) { 707 vfs_cancel_lock(block->b_file->f_file, 708 &block->b_call->a_args.lock.fl); 709 status = nlmsvc_unlink_block(block); 710 nlmsvc_release_block(block); 711 } 712 return status ? nlm_lck_denied : nlm_granted; 713 } 714 715 /* 716 * This is a callback from the filesystem for VFS file lock requests. 717 * It will be used if lm_grant is defined and the filesystem can not 718 * respond to the request immediately. 719 * For SETLK or SETLKW request it will get the local posix lock. 720 * In all cases it will move the block to the head of nlm_blocked q where 721 * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the 722 * deferred rpc for GETLK and SETLK. 723 */ 724 static void 725 nlmsvc_update_deferred_block(struct nlm_block *block, int result) 726 { 727 block->b_flags |= B_GOT_CALLBACK; 728 if (result == 0) 729 block->b_granted = 1; 730 else 731 block->b_flags |= B_TIMED_OUT; 732 } 733 734 static int nlmsvc_grant_deferred(struct file_lock *fl, int result) 735 { 736 struct nlm_block *block; 737 int rc = -ENOENT; 738 739 spin_lock(&nlm_blocked_lock); 740 list_for_each_entry(block, &nlm_blocked, b_list) { 741 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { 742 dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n", 743 block, block->b_flags); 744 if (block->b_flags & B_QUEUED) { 745 if (block->b_flags & B_TIMED_OUT) { 746 rc = -ENOLCK; 747 break; 748 } 749 nlmsvc_update_deferred_block(block, result); 750 } else if (result == 0) 751 block->b_granted = 1; 752 753 nlmsvc_insert_block_locked(block, 0); 754 svc_wake_up(block->b_daemon); 755 rc = 0; 756 break; 757 } 758 } 759 spin_unlock(&nlm_blocked_lock); 760 if (rc == -ENOENT) 761 printk(KERN_WARNING "lockd: grant for unknown block\n"); 762 return rc; 763 } 764 765 /* 766 * Unblock a blocked lock request. This is a callback invoked from the 767 * VFS layer when a lock on which we blocked is removed. 768 * 769 * This function doesn't grant the blocked lock instantly, but rather moves 770 * the block to the head of nlm_blocked where it can be picked up by lockd. 771 */ 772 static void 773 nlmsvc_notify_blocked(struct file_lock *fl) 774 { 775 struct nlm_block *block; 776 777 dprintk("lockd: VFS unblock notification for block %p\n", fl); 778 spin_lock(&nlm_blocked_lock); 779 list_for_each_entry(block, &nlm_blocked, b_list) { 780 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { 781 nlmsvc_insert_block_locked(block, 0); 782 spin_unlock(&nlm_blocked_lock); 783 svc_wake_up(block->b_daemon); 784 return; 785 } 786 } 787 spin_unlock(&nlm_blocked_lock); 788 printk(KERN_WARNING "lockd: notification for unknown block!\n"); 789 } 790 791 const struct lock_manager_operations nlmsvc_lock_operations = { 792 .lm_notify = nlmsvc_notify_blocked, 793 .lm_grant = nlmsvc_grant_deferred, 794 }; 795 796 /* 797 * Try to claim a lock that was previously blocked. 798 * 799 * Note that we use both the RPC_GRANTED_MSG call _and_ an async 800 * RPC thread when notifying the client. This seems like overkill... 801 * Here's why: 802 * - we don't want to use a synchronous RPC thread, otherwise 803 * we might find ourselves hanging on a dead portmapper. 804 * - Some lockd implementations (e.g. HP) don't react to 805 * RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls. 806 */ 807 static void 808 nlmsvc_grant_blocked(struct nlm_block *block) 809 { 810 struct nlm_file *file = block->b_file; 811 struct nlm_lock *lock = &block->b_call->a_args.lock; 812 int error; 813 loff_t fl_start, fl_end; 814 815 dprintk("lockd: grant blocked lock %p\n", block); 816 817 kref_get(&block->b_count); 818 819 /* Unlink block request from list */ 820 nlmsvc_unlink_block(block); 821 822 /* If b_granted is true this means we've been here before. 823 * Just retry the grant callback, possibly refreshing the RPC 824 * binding */ 825 if (block->b_granted) { 826 nlm_rebind_host(block->b_host); 827 goto callback; 828 } 829 830 /* Try the lock operation again */ 831 /* vfs_lock_file() can mangle fl_start and fl_end, but we need 832 * them unchanged for the GRANT_MSG 833 */ 834 lock->fl.fl_flags |= FL_SLEEP; 835 fl_start = lock->fl.fl_start; 836 fl_end = lock->fl.fl_end; 837 error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); 838 lock->fl.fl_flags &= ~FL_SLEEP; 839 lock->fl.fl_start = fl_start; 840 lock->fl.fl_end = fl_end; 841 842 switch (error) { 843 case 0: 844 break; 845 case FILE_LOCK_DEFERRED: 846 dprintk("lockd: lock still blocked error %d\n", error); 847 nlmsvc_insert_block(block, NLM_NEVER); 848 nlmsvc_release_block(block); 849 return; 850 default: 851 printk(KERN_WARNING "lockd: unexpected error %d in %s!\n", 852 -error, __func__); 853 nlmsvc_insert_block(block, 10 * HZ); 854 nlmsvc_release_block(block); 855 return; 856 } 857 858 callback: 859 /* Lock was granted by VFS. */ 860 dprintk("lockd: GRANTing blocked lock.\n"); 861 block->b_granted = 1; 862 863 /* keep block on the list, but don't reattempt until the RPC 864 * completes or the submission fails 865 */ 866 nlmsvc_insert_block(block, NLM_NEVER); 867 868 /* Call the client -- use a soft RPC task since nlmsvc_retry_blocked 869 * will queue up a new one if this one times out 870 */ 871 error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG, 872 &nlmsvc_grant_ops); 873 874 /* RPC submission failed, wait a bit and retry */ 875 if (error < 0) 876 nlmsvc_insert_block(block, 10 * HZ); 877 } 878 879 /* 880 * This is the callback from the RPC layer when the NLM_GRANTED_MSG 881 * RPC call has succeeded or timed out. 882 * Like all RPC callbacks, it is invoked by the rpciod process, so it 883 * better not sleep. Therefore, we put the blocked lock on the nlm_blocked 884 * chain once more in order to have it removed by lockd itself (which can 885 * then sleep on the file semaphore without disrupting e.g. the nfs client). 886 */ 887 static void nlmsvc_grant_callback(struct rpc_task *task, void *data) 888 { 889 struct nlm_rqst *call = data; 890 struct nlm_block *block = call->a_block; 891 unsigned long timeout; 892 893 dprintk("lockd: GRANT_MSG RPC callback\n"); 894 895 spin_lock(&nlm_blocked_lock); 896 /* if the block is not on a list at this point then it has 897 * been invalidated. Don't try to requeue it. 898 * 899 * FIXME: it's possible that the block is removed from the list 900 * after this check but before the nlmsvc_insert_block. In that 901 * case it will be added back. Perhaps we need better locking 902 * for nlm_blocked? 903 */ 904 if (list_empty(&block->b_list)) 905 goto out; 906 907 /* Technically, we should down the file semaphore here. Since we 908 * move the block towards the head of the queue only, no harm 909 * can be done, though. */ 910 if (task->tk_status < 0) { 911 /* RPC error: Re-insert for retransmission */ 912 timeout = 10 * HZ; 913 } else { 914 /* Call was successful, now wait for client callback */ 915 timeout = 60 * HZ; 916 } 917 nlmsvc_insert_block_locked(block, timeout); 918 svc_wake_up(block->b_daemon); 919 out: 920 spin_unlock(&nlm_blocked_lock); 921 } 922 923 /* 924 * FIXME: nlmsvc_release_block() grabs a mutex. This is not allowed for an 925 * .rpc_release rpc_call_op 926 */ 927 static void nlmsvc_grant_release(void *data) 928 { 929 struct nlm_rqst *call = data; 930 nlmsvc_release_block(call->a_block); 931 } 932 933 static const struct rpc_call_ops nlmsvc_grant_ops = { 934 .rpc_call_done = nlmsvc_grant_callback, 935 .rpc_release = nlmsvc_grant_release, 936 }; 937 938 /* 939 * We received a GRANT_RES callback. Try to find the corresponding 940 * block. 941 */ 942 void 943 nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status) 944 { 945 struct nlm_block *block; 946 947 dprintk("grant_reply: looking for cookie %x, s=%d \n", 948 *(unsigned int *)(cookie->data), status); 949 if (!(block = nlmsvc_find_block(cookie))) 950 return; 951 952 if (status == nlm_lck_denied_grace_period) { 953 /* Try again in a couple of seconds */ 954 nlmsvc_insert_block(block, 10 * HZ); 955 } else { 956 /* 957 * Lock is now held by client, or has been rejected. 958 * In both cases, the block should be removed. 959 */ 960 nlmsvc_unlink_block(block); 961 } 962 nlmsvc_release_block(block); 963 } 964 965 /* Helper function to handle retry of a deferred block. 966 * If it is a blocking lock, call grant_blocked. 967 * For a non-blocking lock or test lock, revisit the request. 968 */ 969 static void 970 retry_deferred_block(struct nlm_block *block) 971 { 972 if (!(block->b_flags & B_GOT_CALLBACK)) 973 block->b_flags |= B_TIMED_OUT; 974 nlmsvc_insert_block(block, NLM_TIMEOUT); 975 dprintk("revisit block %p flags %d\n", block, block->b_flags); 976 if (block->b_deferred_req) { 977 block->b_deferred_req->revisit(block->b_deferred_req, 0); 978 block->b_deferred_req = NULL; 979 } 980 } 981 982 /* 983 * Retry all blocked locks that have been notified. This is where lockd 984 * picks up locks that can be granted, or grant notifications that must 985 * be retransmitted. 986 */ 987 unsigned long 988 nlmsvc_retry_blocked(void) 989 { 990 unsigned long timeout = MAX_SCHEDULE_TIMEOUT; 991 struct nlm_block *block; 992 993 spin_lock(&nlm_blocked_lock); 994 while (!list_empty(&nlm_blocked) && !kthread_should_stop()) { 995 block = list_entry(nlm_blocked.next, struct nlm_block, b_list); 996 997 if (block->b_when == NLM_NEVER) 998 break; 999 if (time_after(block->b_when, jiffies)) { 1000 timeout = block->b_when - jiffies; 1001 break; 1002 } 1003 spin_unlock(&nlm_blocked_lock); 1004 1005 dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n", 1006 block, block->b_when); 1007 if (block->b_flags & B_QUEUED) { 1008 dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n", 1009 block, block->b_granted, block->b_flags); 1010 retry_deferred_block(block); 1011 } else 1012 nlmsvc_grant_blocked(block); 1013 spin_lock(&nlm_blocked_lock); 1014 } 1015 spin_unlock(&nlm_blocked_lock); 1016 1017 return timeout; 1018 } 1019