1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/lockd/svclock.c 4 * 5 * Handling of server-side locks, mostly of the blocked variety. 6 * This is the ugliest part of lockd because we tread on very thin ice. 7 * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc. 8 * IMNSHO introducing the grant callback into the NLM protocol was one 9 * of the worst ideas Sun ever had. Except maybe for the idea of doing 10 * NFS file locking at all. 11 * 12 * I'm trying hard to avoid race conditions by protecting most accesses 13 * to a file's list of blocked locks through a semaphore. The global 14 * list of blocked locks is not protected in this fashion however. 15 * Therefore, some functions (such as the RPC callback for the async grant 16 * call) move blocked locks towards the head of the list *while some other 17 * process might be traversing it*. This should not be a problem in 18 * practice, because this will only cause functions traversing the list 19 * to visit some blocks twice. 20 * 21 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> 22 */ 23 24 #include <linux/types.h> 25 #include <linux/slab.h> 26 #include <linux/errno.h> 27 #include <linux/kernel.h> 28 #include <linux/sched.h> 29 #include <linux/sunrpc/clnt.h> 30 #include <linux/sunrpc/svc_xprt.h> 31 #include <linux/lockd/nlm.h> 32 #include <linux/lockd/lockd.h> 33 34 #define NLMDBG_FACILITY NLMDBG_SVCLOCK 35 36 #ifdef CONFIG_LOCKD_V4 37 #define nlm_deadlock nlm4_deadlock 38 #else 39 #define nlm_deadlock nlm_lck_denied 40 #endif 41 42 static void nlmsvc_release_block(struct nlm_block *block); 43 static void nlmsvc_insert_block(struct nlm_block *block, unsigned long); 44 static void nlmsvc_remove_block(struct nlm_block *block); 45 46 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock); 47 static void nlmsvc_freegrantargs(struct nlm_rqst *call); 48 static const struct rpc_call_ops nlmsvc_grant_ops; 49 50 /* 51 * The list of blocked locks to retry 52 */ 53 static LIST_HEAD(nlm_blocked); 54 static DEFINE_SPINLOCK(nlm_blocked_lock); 55 56 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 57 static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie) 58 { 59 /* 60 * We can get away with a static buffer because this is only called 61 * from lockd, which is single-threaded. 62 */ 63 static char buf[2*NLM_MAXCOOKIELEN+1]; 64 unsigned int i, len = sizeof(buf); 65 char *p = buf; 66 67 len--; /* allow for trailing \0 */ 68 if (len < 3) 69 return "???"; 70 for (i = 0 ; i < cookie->len ; i++) { 71 if (len < 2) { 72 strcpy(p-3, "..."); 73 break; 74 } 75 sprintf(p, "%02x", cookie->data[i]); 76 p += 2; 77 len -= 2; 78 } 79 *p = '\0'; 80 81 return buf; 82 } 83 #endif 84 85 /* 86 * Insert a blocked lock into the global list 87 */ 88 static void 89 nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when) 90 { 91 struct nlm_block *b; 92 struct list_head *pos; 93 94 dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when); 95 if (list_empty(&block->b_list)) { 96 kref_get(&block->b_count); 97 } else { 98 list_del_init(&block->b_list); 99 } 100 101 pos = &nlm_blocked; 102 if (when != NLM_NEVER) { 103 if ((when += jiffies) == NLM_NEVER) 104 when ++; 105 list_for_each(pos, &nlm_blocked) { 106 b = list_entry(pos, struct nlm_block, b_list); 107 if (time_after(b->b_when,when) || b->b_when == NLM_NEVER) 108 break; 109 } 110 /* On normal exit from the loop, pos == &nlm_blocked, 111 * so we will be adding to the end of the list - good 112 */ 113 } 114 115 list_add_tail(&block->b_list, pos); 116 block->b_when = when; 117 } 118 119 static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when) 120 { 121 spin_lock(&nlm_blocked_lock); 122 nlmsvc_insert_block_locked(block, when); 123 spin_unlock(&nlm_blocked_lock); 124 } 125 126 /* 127 * Remove a block from the global list 128 */ 129 static inline void 130 nlmsvc_remove_block(struct nlm_block *block) 131 { 132 spin_lock(&nlm_blocked_lock); 133 if (!list_empty(&block->b_list)) { 134 list_del_init(&block->b_list); 135 spin_unlock(&nlm_blocked_lock); 136 nlmsvc_release_block(block); 137 return; 138 } 139 spin_unlock(&nlm_blocked_lock); 140 } 141 142 /* 143 * Find a block for a given lock 144 */ 145 static struct nlm_block * 146 nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock) 147 { 148 struct nlm_block *block; 149 struct file_lock *fl; 150 151 dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n", 152 file, lock->fl.c.flc_pid, 153 (long long)lock->fl.fl_start, 154 (long long)lock->fl.fl_end, 155 lock->fl.c.flc_type); 156 spin_lock(&nlm_blocked_lock); 157 list_for_each_entry(block, &nlm_blocked, b_list) { 158 fl = &block->b_call->a_args.lock.fl; 159 dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n", 160 block->b_file, fl->c.flc_pid, 161 (long long)fl->fl_start, 162 (long long)fl->fl_end, fl->c.flc_type, 163 nlmdbg_cookie2a(&block->b_call->a_args.cookie)); 164 if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) { 165 kref_get(&block->b_count); 166 spin_unlock(&nlm_blocked_lock); 167 return block; 168 } 169 } 170 spin_unlock(&nlm_blocked_lock); 171 172 return NULL; 173 } 174 175 static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b) 176 { 177 if (a->len != b->len) 178 return 0; 179 if (memcmp(a->data, b->data, a->len)) 180 return 0; 181 return 1; 182 } 183 184 /* 185 * Find a block with a given NLM cookie. 186 */ 187 static inline struct nlm_block * 188 nlmsvc_find_block(struct nlm_cookie *cookie) 189 { 190 struct nlm_block *block; 191 192 spin_lock(&nlm_blocked_lock); 193 list_for_each_entry(block, &nlm_blocked, b_list) { 194 if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie)) 195 goto found; 196 } 197 spin_unlock(&nlm_blocked_lock); 198 199 return NULL; 200 201 found: 202 dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block); 203 kref_get(&block->b_count); 204 spin_unlock(&nlm_blocked_lock); 205 return block; 206 } 207 208 /* 209 * Create a block and initialize it. 210 * 211 * Note: we explicitly set the cookie of the grant reply to that of 212 * the blocked lock request. The spec explicitly mentions that the client 213 * should _not_ rely on the callback containing the same cookie as the 214 * request, but (as I found out later) that's because some implementations 215 * do just this. Never mind the standards comittees, they support our 216 * logging industries. 217 * 218 * 10 years later: I hope we can safely ignore these old and broken 219 * clients by now. Let's fix this so we can uniquely identify an incoming 220 * GRANTED_RES message by cookie, without having to rely on the client's IP 221 * address. --okir 222 */ 223 static struct nlm_block * 224 nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host, 225 struct nlm_file *file, struct nlm_lock *lock, 226 struct nlm_cookie *cookie) 227 { 228 struct nlm_block *block; 229 struct nlm_rqst *call = NULL; 230 231 call = nlm_alloc_call(host); 232 if (call == NULL) 233 return NULL; 234 235 /* Allocate memory for block, and initialize arguments */ 236 block = kzalloc(sizeof(*block), GFP_KERNEL); 237 if (block == NULL) 238 goto failed; 239 kref_init(&block->b_count); 240 INIT_LIST_HEAD(&block->b_list); 241 INIT_LIST_HEAD(&block->b_flist); 242 243 if (!nlmsvc_setgrantargs(call, lock)) 244 goto failed_free; 245 246 /* Set notifier function for VFS, and init args */ 247 call->a_args.lock.fl.c.flc_flags |= FL_SLEEP; 248 call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations; 249 nlmclnt_next_cookie(&call->a_args.cookie); 250 251 dprintk("lockd: created block %p...\n", block); 252 253 /* Create and initialize the block */ 254 block->b_daemon = rqstp->rq_server; 255 block->b_host = host; 256 block->b_file = file; 257 file->f_count++; 258 259 /* Add to file's list of blocks */ 260 list_add(&block->b_flist, &file->f_blocks); 261 262 /* Set up RPC arguments for callback */ 263 block->b_call = call; 264 call->a_flags = RPC_TASK_ASYNC; 265 call->a_block = block; 266 267 return block; 268 269 failed_free: 270 kfree(block); 271 failed: 272 nlmsvc_release_call(call); 273 return NULL; 274 } 275 276 /* 277 * Delete a block. 278 * It is the caller's responsibility to check whether the file 279 * can be closed hereafter. 280 */ 281 static int nlmsvc_unlink_block(struct nlm_block *block) 282 { 283 int status; 284 dprintk("lockd: unlinking block %p...\n", block); 285 286 /* Remove block from list */ 287 status = locks_delete_block(&block->b_call->a_args.lock.fl); 288 nlmsvc_remove_block(block); 289 return status; 290 } 291 292 static void nlmsvc_free_block(struct kref *kref) 293 { 294 struct nlm_block *block = container_of(kref, struct nlm_block, b_count); 295 struct nlm_file *file = block->b_file; 296 297 dprintk("lockd: freeing block %p...\n", block); 298 299 /* Remove block from file's list of blocks */ 300 list_del_init(&block->b_flist); 301 mutex_unlock(&file->f_mutex); 302 303 nlmsvc_freegrantargs(block->b_call); 304 nlmsvc_release_call(block->b_call); 305 nlm_release_file(block->b_file); 306 kfree(block); 307 } 308 309 static void nlmsvc_release_block(struct nlm_block *block) 310 { 311 if (block != NULL) 312 kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex); 313 } 314 315 /* 316 * Loop over all blocks and delete blocks held by 317 * a matching host. 318 */ 319 void nlmsvc_traverse_blocks(struct nlm_host *host, 320 struct nlm_file *file, 321 nlm_host_match_fn_t match) 322 { 323 struct nlm_block *block, *next; 324 325 restart: 326 mutex_lock(&file->f_mutex); 327 spin_lock(&nlm_blocked_lock); 328 list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) { 329 if (!match(block->b_host, host)) 330 continue; 331 /* Do not destroy blocks that are not on 332 * the global retry list - why? */ 333 if (list_empty(&block->b_list)) 334 continue; 335 kref_get(&block->b_count); 336 spin_unlock(&nlm_blocked_lock); 337 mutex_unlock(&file->f_mutex); 338 nlmsvc_unlink_block(block); 339 nlmsvc_release_block(block); 340 goto restart; 341 } 342 spin_unlock(&nlm_blocked_lock); 343 mutex_unlock(&file->f_mutex); 344 } 345 346 static struct nlm_lockowner * 347 nlmsvc_get_lockowner(struct nlm_lockowner *lockowner) 348 { 349 refcount_inc(&lockowner->count); 350 return lockowner; 351 } 352 353 void nlmsvc_put_lockowner(struct nlm_lockowner *lockowner) 354 { 355 if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock)) 356 return; 357 list_del(&lockowner->list); 358 spin_unlock(&lockowner->host->h_lock); 359 nlmsvc_release_host(lockowner->host); 360 kfree(lockowner); 361 } 362 363 static struct nlm_lockowner *__nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid) 364 { 365 struct nlm_lockowner *lockowner; 366 list_for_each_entry(lockowner, &host->h_lockowners, list) { 367 if (lockowner->pid != pid) 368 continue; 369 return nlmsvc_get_lockowner(lockowner); 370 } 371 return NULL; 372 } 373 374 static struct nlm_lockowner *nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid) 375 { 376 struct nlm_lockowner *res, *new = NULL; 377 378 spin_lock(&host->h_lock); 379 res = __nlmsvc_find_lockowner(host, pid); 380 381 if (res == NULL) { 382 spin_unlock(&host->h_lock); 383 new = kmalloc(sizeof(*res), GFP_KERNEL); 384 spin_lock(&host->h_lock); 385 res = __nlmsvc_find_lockowner(host, pid); 386 if (res == NULL && new != NULL) { 387 res = new; 388 /* fs/locks.c will manage the refcount through lock_ops */ 389 refcount_set(&new->count, 1); 390 new->pid = pid; 391 new->host = nlm_get_host(host); 392 list_add(&new->list, &host->h_lockowners); 393 new = NULL; 394 } 395 } 396 397 spin_unlock(&host->h_lock); 398 kfree(new); 399 return res; 400 } 401 402 void 403 nlmsvc_release_lockowner(struct nlm_lock *lock) 404 { 405 if (lock->fl.c.flc_owner) 406 nlmsvc_put_lockowner(lock->fl.c.flc_owner); 407 } 408 409 void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host, 410 pid_t pid) 411 { 412 fl->c.flc_owner = nlmsvc_find_lockowner(host, pid); 413 } 414 415 /* 416 * Initialize arguments for GRANTED call. The nlm_rqst structure 417 * has been cleared already. 418 */ 419 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock) 420 { 421 locks_copy_lock(&call->a_args.lock.fl, &lock->fl); 422 memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh)); 423 call->a_args.lock.caller = utsname()->nodename; 424 call->a_args.lock.oh.len = lock->oh.len; 425 426 /* set default data area */ 427 call->a_args.lock.oh.data = call->a_owner; 428 call->a_args.lock.svid = ((struct nlm_lockowner *) lock->fl.c.flc_owner)->pid; 429 430 if (lock->oh.len > NLMCLNT_OHSIZE) { 431 void *data = kmalloc(lock->oh.len, GFP_KERNEL); 432 if (!data) 433 return 0; 434 call->a_args.lock.oh.data = (u8 *) data; 435 } 436 437 memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len); 438 return 1; 439 } 440 441 static void nlmsvc_freegrantargs(struct nlm_rqst *call) 442 { 443 if (call->a_args.lock.oh.data != call->a_owner) 444 kfree(call->a_args.lock.oh.data); 445 446 locks_release_private(&call->a_args.lock.fl); 447 } 448 449 /* 450 * Deferred lock request handling for non-blocking lock 451 */ 452 static __be32 453 nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block) 454 { 455 __be32 status = nlm_lck_denied_nolocks; 456 457 block->b_flags |= B_QUEUED; 458 459 nlmsvc_insert_block(block, NLM_TIMEOUT); 460 461 block->b_cache_req = &rqstp->rq_chandle; 462 if (rqstp->rq_chandle.defer) { 463 block->b_deferred_req = 464 rqstp->rq_chandle.defer(block->b_cache_req); 465 if (block->b_deferred_req != NULL) 466 status = nlm_drop_reply; 467 } 468 dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n", 469 block, block->b_flags, ntohl(status)); 470 471 return status; 472 } 473 474 /* 475 * Attempt to establish a lock, and if it can't be granted, block it 476 * if required. 477 */ 478 __be32 479 nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, 480 struct nlm_host *host, struct nlm_lock *lock, int wait, 481 struct nlm_cookie *cookie, int reclaim) 482 { 483 struct inode *inode __maybe_unused = nlmsvc_file_inode(file); 484 struct nlm_block *block = NULL; 485 int error; 486 int mode; 487 int async_block = 0; 488 __be32 ret; 489 490 dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n", 491 inode->i_sb->s_id, inode->i_ino, 492 lock->fl.c.flc_type, 493 lock->fl.c.flc_pid, 494 (long long)lock->fl.fl_start, 495 (long long)lock->fl.fl_end, 496 wait); 497 498 if (!locks_can_async_lock(nlmsvc_file_file(file)->f_op)) { 499 async_block = wait; 500 wait = 0; 501 } 502 503 /* Lock file against concurrent access */ 504 mutex_lock(&file->f_mutex); 505 /* Get existing block (in case client is busy-waiting) 506 * or create new block 507 */ 508 block = nlmsvc_lookup_block(file, lock); 509 if (block == NULL) { 510 block = nlmsvc_create_block(rqstp, host, file, lock, cookie); 511 ret = nlm_lck_denied_nolocks; 512 if (block == NULL) 513 goto out; 514 lock = &block->b_call->a_args.lock; 515 } else 516 lock->fl.c.flc_flags &= ~FL_SLEEP; 517 518 if (block->b_flags & B_QUEUED) { 519 dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n", 520 block, block->b_flags); 521 if (block->b_granted) { 522 nlmsvc_unlink_block(block); 523 ret = nlm_granted; 524 goto out; 525 } 526 if (block->b_flags & B_TIMED_OUT) { 527 nlmsvc_unlink_block(block); 528 ret = nlm_lck_denied; 529 goto out; 530 } 531 ret = nlm_drop_reply; 532 goto out; 533 } 534 535 if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) { 536 ret = nlm_lck_denied_grace_period; 537 goto out; 538 } 539 if (reclaim && !locks_in_grace(SVC_NET(rqstp))) { 540 ret = nlm_lck_denied_grace_period; 541 goto out; 542 } 543 544 spin_lock(&nlm_blocked_lock); 545 /* 546 * If this is a lock request for an already pending 547 * lock request we return nlm_lck_blocked without calling 548 * vfs_lock_file() again. Otherwise we have two pending 549 * requests on the underlaying ->lock() implementation but 550 * only one nlm_block to being granted by lm_grant(). 551 */ 552 if (locks_can_async_lock(nlmsvc_file_file(file)->f_op) && 553 !list_empty(&block->b_list)) { 554 spin_unlock(&nlm_blocked_lock); 555 ret = nlm_lck_blocked; 556 goto out; 557 } 558 559 /* Append to list of blocked */ 560 nlmsvc_insert_block_locked(block, NLM_NEVER); 561 spin_unlock(&nlm_blocked_lock); 562 563 if (!wait) 564 lock->fl.c.flc_flags &= ~FL_SLEEP; 565 mode = lock_to_openmode(&lock->fl); 566 error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL); 567 lock->fl.c.flc_flags &= ~FL_SLEEP; 568 569 dprintk("lockd: vfs_lock_file returned %d\n", error); 570 switch (error) { 571 case 0: 572 nlmsvc_remove_block(block); 573 ret = nlm_granted; 574 goto out; 575 case -EAGAIN: 576 if (!wait) 577 nlmsvc_remove_block(block); 578 ret = async_block ? nlm_lck_blocked : nlm_lck_denied; 579 goto out; 580 case FILE_LOCK_DEFERRED: 581 if (wait) 582 break; 583 /* Filesystem lock operation is in progress 584 Add it to the queue waiting for callback */ 585 ret = nlmsvc_defer_lock_rqst(rqstp, block); 586 goto out; 587 case -EDEADLK: 588 nlmsvc_remove_block(block); 589 ret = nlm_deadlock; 590 goto out; 591 default: /* includes ENOLCK */ 592 nlmsvc_remove_block(block); 593 ret = nlm_lck_denied_nolocks; 594 goto out; 595 } 596 597 ret = nlm_lck_blocked; 598 out: 599 mutex_unlock(&file->f_mutex); 600 nlmsvc_release_block(block); 601 dprintk("lockd: nlmsvc_lock returned %u\n", ret); 602 return ret; 603 } 604 605 /* 606 * Test for presence of a conflicting lock. 607 */ 608 __be32 609 nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file, 610 struct nlm_host *host, struct nlm_lock *lock, 611 struct nlm_lock *conflock) 612 { 613 int error; 614 int mode; 615 __be32 ret; 616 617 dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n", 618 nlmsvc_file_inode(file)->i_sb->s_id, 619 nlmsvc_file_inode(file)->i_ino, 620 lock->fl.c.flc_type, 621 (long long)lock->fl.fl_start, 622 (long long)lock->fl.fl_end); 623 624 if (locks_in_grace(SVC_NET(rqstp))) { 625 ret = nlm_lck_denied_grace_period; 626 goto out; 627 } 628 629 mode = lock_to_openmode(&lock->fl); 630 error = vfs_test_lock(file->f_file[mode], &lock->fl); 631 if (error) { 632 /* We can't currently deal with deferred test requests */ 633 if (error == FILE_LOCK_DEFERRED) 634 WARN_ON_ONCE(1); 635 636 ret = nlm_lck_denied_nolocks; 637 goto out; 638 } 639 640 if (lock->fl.c.flc_type == F_UNLCK) { 641 ret = nlm_granted; 642 goto out; 643 } 644 645 dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n", 646 lock->fl.c.flc_type, (long long)lock->fl.fl_start, 647 (long long)lock->fl.fl_end); 648 conflock->caller = "somehost"; /* FIXME */ 649 conflock->len = strlen(conflock->caller); 650 conflock->oh.len = 0; /* don't return OH info */ 651 conflock->svid = lock->fl.c.flc_pid; 652 conflock->fl.c.flc_type = lock->fl.c.flc_type; 653 conflock->fl.fl_start = lock->fl.fl_start; 654 conflock->fl.fl_end = lock->fl.fl_end; 655 locks_release_private(&lock->fl); 656 657 ret = nlm_lck_denied; 658 out: 659 return ret; 660 } 661 662 /* 663 * Remove a lock. 664 * This implies a CANCEL call: We send a GRANT_MSG, the client replies 665 * with a GRANT_RES call which gets lost, and calls UNLOCK immediately 666 * afterwards. In this case the block will still be there, and hence 667 * must be removed. 668 */ 669 __be32 670 nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock) 671 { 672 int error = 0; 673 674 dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n", 675 nlmsvc_file_inode(file)->i_sb->s_id, 676 nlmsvc_file_inode(file)->i_ino, 677 lock->fl.c.flc_pid, 678 (long long)lock->fl.fl_start, 679 (long long)lock->fl.fl_end); 680 681 /* First, cancel any lock that might be there */ 682 nlmsvc_cancel_blocked(net, file, lock); 683 684 lock->fl.c.flc_type = F_UNLCK; 685 lock->fl.c.flc_file = file->f_file[O_RDONLY]; 686 if (lock->fl.c.flc_file) 687 error = vfs_lock_file(lock->fl.c.flc_file, F_SETLK, 688 &lock->fl, NULL); 689 lock->fl.c.flc_file = file->f_file[O_WRONLY]; 690 if (lock->fl.c.flc_file) 691 error |= vfs_lock_file(lock->fl.c.flc_file, F_SETLK, 692 &lock->fl, NULL); 693 694 return (error < 0)? nlm_lck_denied_nolocks : nlm_granted; 695 } 696 697 /* 698 * Cancel a previously blocked request. 699 * 700 * A cancel request always overrides any grant that may currently 701 * be in progress. 702 * The calling procedure must check whether the file can be closed. 703 */ 704 __be32 705 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock) 706 { 707 struct nlm_block *block; 708 int status = 0; 709 int mode; 710 711 dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n", 712 nlmsvc_file_inode(file)->i_sb->s_id, 713 nlmsvc_file_inode(file)->i_ino, 714 lock->fl.c.flc_pid, 715 (long long)lock->fl.fl_start, 716 (long long)lock->fl.fl_end); 717 718 if (locks_in_grace(net)) 719 return nlm_lck_denied_grace_period; 720 721 mutex_lock(&file->f_mutex); 722 block = nlmsvc_lookup_block(file, lock); 723 mutex_unlock(&file->f_mutex); 724 if (block != NULL) { 725 struct file_lock *fl = &block->b_call->a_args.lock.fl; 726 727 mode = lock_to_openmode(fl); 728 vfs_cancel_lock(block->b_file->f_file[mode], fl); 729 status = nlmsvc_unlink_block(block); 730 nlmsvc_release_block(block); 731 } 732 return status ? nlm_lck_denied : nlm_granted; 733 } 734 735 /* 736 * This is a callback from the filesystem for VFS file lock requests. 737 * It will be used if lm_grant is defined and the filesystem can not 738 * respond to the request immediately. 739 * For SETLK or SETLKW request it will get the local posix lock. 740 * In all cases it will move the block to the head of nlm_blocked q where 741 * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the 742 * deferred rpc for GETLK and SETLK. 743 */ 744 static void 745 nlmsvc_update_deferred_block(struct nlm_block *block, int result) 746 { 747 block->b_flags |= B_GOT_CALLBACK; 748 if (result == 0) 749 block->b_granted = 1; 750 else 751 block->b_flags |= B_TIMED_OUT; 752 } 753 754 static int nlmsvc_grant_deferred(struct file_lock *fl, int result) 755 { 756 struct nlm_block *block; 757 int rc = -ENOENT; 758 759 spin_lock(&nlm_blocked_lock); 760 list_for_each_entry(block, &nlm_blocked, b_list) { 761 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { 762 dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n", 763 block, block->b_flags); 764 if (block->b_flags & B_QUEUED) { 765 if (block->b_flags & B_TIMED_OUT) { 766 rc = -ENOLCK; 767 break; 768 } 769 nlmsvc_update_deferred_block(block, result); 770 } else if (result == 0) 771 block->b_granted = 1; 772 773 nlmsvc_insert_block_locked(block, 0); 774 svc_wake_up(block->b_daemon); 775 rc = 0; 776 break; 777 } 778 } 779 spin_unlock(&nlm_blocked_lock); 780 if (rc == -ENOENT) 781 printk(KERN_WARNING "lockd: grant for unknown block\n"); 782 return rc; 783 } 784 785 /* 786 * Unblock a blocked lock request. This is a callback invoked from the 787 * VFS layer when a lock on which we blocked is removed. 788 * 789 * This function doesn't grant the blocked lock instantly, but rather moves 790 * the block to the head of nlm_blocked where it can be picked up by lockd. 791 */ 792 static void 793 nlmsvc_notify_blocked(struct file_lock *fl) 794 { 795 struct nlm_block *block; 796 797 dprintk("lockd: VFS unblock notification for block %p\n", fl); 798 spin_lock(&nlm_blocked_lock); 799 list_for_each_entry(block, &nlm_blocked, b_list) { 800 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { 801 nlmsvc_insert_block_locked(block, 0); 802 spin_unlock(&nlm_blocked_lock); 803 svc_wake_up(block->b_daemon); 804 return; 805 } 806 } 807 spin_unlock(&nlm_blocked_lock); 808 printk(KERN_WARNING "lockd: notification for unknown block!\n"); 809 } 810 811 static fl_owner_t nlmsvc_get_owner(fl_owner_t owner) 812 { 813 return nlmsvc_get_lockowner(owner); 814 } 815 816 static void nlmsvc_put_owner(fl_owner_t owner) 817 { 818 nlmsvc_put_lockowner(owner); 819 } 820 821 const struct lock_manager_operations nlmsvc_lock_operations = { 822 .lm_notify = nlmsvc_notify_blocked, 823 .lm_grant = nlmsvc_grant_deferred, 824 .lm_get_owner = nlmsvc_get_owner, 825 .lm_put_owner = nlmsvc_put_owner, 826 }; 827 828 /* 829 * Try to claim a lock that was previously blocked. 830 * 831 * Note that we use both the RPC_GRANTED_MSG call _and_ an async 832 * RPC thread when notifying the client. This seems like overkill... 833 * Here's why: 834 * - we don't want to use a synchronous RPC thread, otherwise 835 * we might find ourselves hanging on a dead portmapper. 836 * - Some lockd implementations (e.g. HP) don't react to 837 * RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls. 838 */ 839 static void 840 nlmsvc_grant_blocked(struct nlm_block *block) 841 { 842 struct nlm_file *file = block->b_file; 843 struct nlm_lock *lock = &block->b_call->a_args.lock; 844 int mode; 845 int error; 846 loff_t fl_start, fl_end; 847 848 dprintk("lockd: grant blocked lock %p\n", block); 849 850 kref_get(&block->b_count); 851 852 /* Unlink block request from list */ 853 nlmsvc_unlink_block(block); 854 855 /* If b_granted is true this means we've been here before. 856 * Just retry the grant callback, possibly refreshing the RPC 857 * binding */ 858 if (block->b_granted) { 859 nlm_rebind_host(block->b_host); 860 goto callback; 861 } 862 863 /* Try the lock operation again */ 864 /* vfs_lock_file() can mangle fl_start and fl_end, but we need 865 * them unchanged for the GRANT_MSG 866 */ 867 lock->fl.c.flc_flags |= FL_SLEEP; 868 fl_start = lock->fl.fl_start; 869 fl_end = lock->fl.fl_end; 870 mode = lock_to_openmode(&lock->fl); 871 error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL); 872 lock->fl.c.flc_flags &= ~FL_SLEEP; 873 lock->fl.fl_start = fl_start; 874 lock->fl.fl_end = fl_end; 875 876 switch (error) { 877 case 0: 878 break; 879 case FILE_LOCK_DEFERRED: 880 dprintk("lockd: lock still blocked error %d\n", error); 881 nlmsvc_insert_block(block, NLM_NEVER); 882 nlmsvc_release_block(block); 883 return; 884 default: 885 printk(KERN_WARNING "lockd: unexpected error %d in %s!\n", 886 -error, __func__); 887 nlmsvc_insert_block(block, 10 * HZ); 888 nlmsvc_release_block(block); 889 return; 890 } 891 892 callback: 893 /* Lock was granted by VFS. */ 894 dprintk("lockd: GRANTing blocked lock.\n"); 895 block->b_granted = 1; 896 897 /* keep block on the list, but don't reattempt until the RPC 898 * completes or the submission fails 899 */ 900 nlmsvc_insert_block(block, NLM_NEVER); 901 902 /* Call the client -- use a soft RPC task since nlmsvc_retry_blocked 903 * will queue up a new one if this one times out 904 */ 905 error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG, 906 &nlmsvc_grant_ops); 907 908 /* RPC submission failed, wait a bit and retry */ 909 if (error < 0) 910 nlmsvc_insert_block(block, 10 * HZ); 911 } 912 913 /* 914 * This is the callback from the RPC layer when the NLM_GRANTED_MSG 915 * RPC call has succeeded or timed out. 916 * Like all RPC callbacks, it is invoked by the rpciod process, so it 917 * better not sleep. Therefore, we put the blocked lock on the nlm_blocked 918 * chain once more in order to have it removed by lockd itself (which can 919 * then sleep on the file semaphore without disrupting e.g. the nfs client). 920 */ 921 static void nlmsvc_grant_callback(struct rpc_task *task, void *data) 922 { 923 struct nlm_rqst *call = data; 924 struct nlm_block *block = call->a_block; 925 unsigned long timeout; 926 927 dprintk("lockd: GRANT_MSG RPC callback\n"); 928 929 spin_lock(&nlm_blocked_lock); 930 /* if the block is not on a list at this point then it has 931 * been invalidated. Don't try to requeue it. 932 * 933 * FIXME: it's possible that the block is removed from the list 934 * after this check but before the nlmsvc_insert_block. In that 935 * case it will be added back. Perhaps we need better locking 936 * for nlm_blocked? 937 */ 938 if (list_empty(&block->b_list)) 939 goto out; 940 941 /* Technically, we should down the file semaphore here. Since we 942 * move the block towards the head of the queue only, no harm 943 * can be done, though. */ 944 if (task->tk_status < 0) { 945 /* RPC error: Re-insert for retransmission */ 946 timeout = 10 * HZ; 947 } else { 948 /* Call was successful, now wait for client callback */ 949 timeout = 60 * HZ; 950 } 951 nlmsvc_insert_block_locked(block, timeout); 952 svc_wake_up(block->b_daemon); 953 out: 954 spin_unlock(&nlm_blocked_lock); 955 } 956 957 /* 958 * FIXME: nlmsvc_release_block() grabs a mutex. This is not allowed for an 959 * .rpc_release rpc_call_op 960 */ 961 static void nlmsvc_grant_release(void *data) 962 { 963 struct nlm_rqst *call = data; 964 nlmsvc_release_block(call->a_block); 965 } 966 967 static const struct rpc_call_ops nlmsvc_grant_ops = { 968 .rpc_call_done = nlmsvc_grant_callback, 969 .rpc_release = nlmsvc_grant_release, 970 }; 971 972 /* 973 * We received a GRANT_RES callback. Try to find the corresponding 974 * block. 975 */ 976 void 977 nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status) 978 { 979 struct nlm_block *block; 980 struct file_lock *fl; 981 int error; 982 983 dprintk("grant_reply: looking for cookie %x, s=%d \n", 984 *(unsigned int *)(cookie->data), status); 985 if (!(block = nlmsvc_find_block(cookie))) 986 return; 987 988 switch (status) { 989 case nlm_lck_denied_grace_period: 990 /* Try again in a couple of seconds */ 991 nlmsvc_insert_block(block, 10 * HZ); 992 break; 993 case nlm_lck_denied: 994 /* Client doesn't want it, just unlock it */ 995 nlmsvc_unlink_block(block); 996 fl = &block->b_call->a_args.lock.fl; 997 fl->c.flc_type = F_UNLCK; 998 error = vfs_lock_file(fl->c.flc_file, F_SETLK, fl, NULL); 999 if (error) 1000 pr_warn("lockd: unable to unlock lock rejected by client!\n"); 1001 break; 1002 default: 1003 /* 1004 * Either it was accepted or the status makes no sense 1005 * just unlink it either way. 1006 */ 1007 nlmsvc_unlink_block(block); 1008 } 1009 nlmsvc_release_block(block); 1010 } 1011 1012 /* Helper function to handle retry of a deferred block. 1013 * If it is a blocking lock, call grant_blocked. 1014 * For a non-blocking lock or test lock, revisit the request. 1015 */ 1016 static void 1017 retry_deferred_block(struct nlm_block *block) 1018 { 1019 if (!(block->b_flags & B_GOT_CALLBACK)) 1020 block->b_flags |= B_TIMED_OUT; 1021 nlmsvc_insert_block(block, NLM_TIMEOUT); 1022 dprintk("revisit block %p flags %d\n", block, block->b_flags); 1023 if (block->b_deferred_req) { 1024 block->b_deferred_req->revisit(block->b_deferred_req, 0); 1025 block->b_deferred_req = NULL; 1026 } 1027 } 1028 1029 /* 1030 * Retry all blocked locks that have been notified. This is where lockd 1031 * picks up locks that can be granted, or grant notifications that must 1032 * be retransmitted. 1033 */ 1034 void 1035 nlmsvc_retry_blocked(struct svc_rqst *rqstp) 1036 { 1037 unsigned long timeout = MAX_SCHEDULE_TIMEOUT; 1038 struct nlm_block *block; 1039 1040 spin_lock(&nlm_blocked_lock); 1041 while (!list_empty(&nlm_blocked) && !svc_thread_should_stop(rqstp)) { 1042 block = list_entry(nlm_blocked.next, struct nlm_block, b_list); 1043 1044 if (block->b_when == NLM_NEVER) 1045 break; 1046 if (time_after(block->b_when, jiffies)) { 1047 timeout = block->b_when - jiffies; 1048 break; 1049 } 1050 spin_unlock(&nlm_blocked_lock); 1051 1052 dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n", 1053 block, block->b_when); 1054 if (block->b_flags & B_QUEUED) { 1055 dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n", 1056 block, block->b_granted, block->b_flags); 1057 retry_deferred_block(block); 1058 } else 1059 nlmsvc_grant_blocked(block); 1060 spin_lock(&nlm_blocked_lock); 1061 } 1062 spin_unlock(&nlm_blocked_lock); 1063 1064 if (timeout < MAX_SCHEDULE_TIMEOUT) 1065 mod_timer(&nlmsvc_retry, jiffies + timeout); 1066 } 1067