1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/lockd/svclock.c 4 * 5 * Handling of server-side locks, mostly of the blocked variety. 6 * This is the ugliest part of lockd because we tread on very thin ice. 7 * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc. 8 * IMNSHO introducing the grant callback into the NLM protocol was one 9 * of the worst ideas Sun ever had. Except maybe for the idea of doing 10 * NFS file locking at all. 11 * 12 * I'm trying hard to avoid race conditions by protecting most accesses 13 * to a file's list of blocked locks through a semaphore. The global 14 * list of blocked locks is not protected in this fashion however. 15 * Therefore, some functions (such as the RPC callback for the async grant 16 * call) move blocked locks towards the head of the list *while some other 17 * process might be traversing it*. This should not be a problem in 18 * practice, because this will only cause functions traversing the list 19 * to visit some blocks twice. 20 * 21 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> 22 */ 23 24 #include <linux/types.h> 25 #include <linux/slab.h> 26 #include <linux/errno.h> 27 #include <linux/kernel.h> 28 #include <linux/sched.h> 29 #include <linux/sunrpc/clnt.h> 30 #include <linux/sunrpc/svc_xprt.h> 31 #include <linux/lockd/nlm.h> 32 #include <linux/lockd/lockd.h> 33 #include <linux/exportfs.h> 34 35 #define NLMDBG_FACILITY NLMDBG_SVCLOCK 36 37 #ifdef CONFIG_LOCKD_V4 38 #define nlm_deadlock nlm4_deadlock 39 #else 40 #define nlm_deadlock nlm_lck_denied 41 #endif 42 43 static void nlmsvc_release_block(struct nlm_block *block); 44 static void nlmsvc_insert_block(struct nlm_block *block, unsigned long); 45 static void nlmsvc_remove_block(struct nlm_block *block); 46 47 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock); 48 static void nlmsvc_freegrantargs(struct nlm_rqst *call); 49 static const struct rpc_call_ops nlmsvc_grant_ops; 50 51 /* 52 * The list of blocked locks to retry 53 */ 54 static LIST_HEAD(nlm_blocked); 55 static DEFINE_SPINLOCK(nlm_blocked_lock); 56 57 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 58 static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie) 59 { 60 /* 61 * We can get away with a static buffer because this is only called 62 * from lockd, which is single-threaded. 63 */ 64 static char buf[2*NLM_MAXCOOKIELEN+1]; 65 unsigned int i, len = sizeof(buf); 66 char *p = buf; 67 68 len--; /* allow for trailing \0 */ 69 if (len < 3) 70 return "???"; 71 for (i = 0 ; i < cookie->len ; i++) { 72 if (len < 2) { 73 strcpy(p-3, "..."); 74 break; 75 } 76 sprintf(p, "%02x", cookie->data[i]); 77 p += 2; 78 len -= 2; 79 } 80 *p = '\0'; 81 82 return buf; 83 } 84 #endif 85 86 /* 87 * Insert a blocked lock into the global list 88 */ 89 static void 90 nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when) 91 { 92 struct nlm_block *b; 93 struct list_head *pos; 94 95 dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when); 96 if (list_empty(&block->b_list)) { 97 kref_get(&block->b_count); 98 } else { 99 list_del_init(&block->b_list); 100 } 101 102 pos = &nlm_blocked; 103 if (when != NLM_NEVER) { 104 if ((when += jiffies) == NLM_NEVER) 105 when ++; 106 list_for_each(pos, &nlm_blocked) { 107 b = list_entry(pos, struct nlm_block, b_list); 108 if (time_after(b->b_when,when) || b->b_when == NLM_NEVER) 109 break; 110 } 111 /* On normal exit from the loop, pos == &nlm_blocked, 112 * so we will be adding to the end of the list - good 113 */ 114 } 115 116 list_add_tail(&block->b_list, pos); 117 block->b_when = when; 118 } 119 120 static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when) 121 { 122 spin_lock(&nlm_blocked_lock); 123 nlmsvc_insert_block_locked(block, when); 124 spin_unlock(&nlm_blocked_lock); 125 } 126 127 /* 128 * Remove a block from the global list 129 */ 130 static inline void 131 nlmsvc_remove_block(struct nlm_block *block) 132 { 133 spin_lock(&nlm_blocked_lock); 134 if (!list_empty(&block->b_list)) { 135 list_del_init(&block->b_list); 136 spin_unlock(&nlm_blocked_lock); 137 nlmsvc_release_block(block); 138 return; 139 } 140 spin_unlock(&nlm_blocked_lock); 141 } 142 143 /* 144 * Find a block for a given lock 145 */ 146 static struct nlm_block * 147 nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock) 148 { 149 struct nlm_block *block; 150 struct file_lock *fl; 151 152 dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n", 153 file, lock->fl.fl_pid, 154 (long long)lock->fl.fl_start, 155 (long long)lock->fl.fl_end, lock->fl.fl_type); 156 spin_lock(&nlm_blocked_lock); 157 list_for_each_entry(block, &nlm_blocked, b_list) { 158 fl = &block->b_call->a_args.lock.fl; 159 dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n", 160 block->b_file, fl->fl_pid, 161 (long long)fl->fl_start, 162 (long long)fl->fl_end, fl->fl_type, 163 nlmdbg_cookie2a(&block->b_call->a_args.cookie)); 164 if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) { 165 kref_get(&block->b_count); 166 spin_unlock(&nlm_blocked_lock); 167 return block; 168 } 169 } 170 spin_unlock(&nlm_blocked_lock); 171 172 return NULL; 173 } 174 175 static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b) 176 { 177 if (a->len != b->len) 178 return 0; 179 if (memcmp(a->data, b->data, a->len)) 180 return 0; 181 return 1; 182 } 183 184 /* 185 * Find a block with a given NLM cookie. 186 */ 187 static inline struct nlm_block * 188 nlmsvc_find_block(struct nlm_cookie *cookie) 189 { 190 struct nlm_block *block; 191 192 spin_lock(&nlm_blocked_lock); 193 list_for_each_entry(block, &nlm_blocked, b_list) { 194 if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie)) 195 goto found; 196 } 197 spin_unlock(&nlm_blocked_lock); 198 199 return NULL; 200 201 found: 202 dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block); 203 kref_get(&block->b_count); 204 spin_unlock(&nlm_blocked_lock); 205 return block; 206 } 207 208 /* 209 * Create a block and initialize it. 210 * 211 * Note: we explicitly set the cookie of the grant reply to that of 212 * the blocked lock request. The spec explicitly mentions that the client 213 * should _not_ rely on the callback containing the same cookie as the 214 * request, but (as I found out later) that's because some implementations 215 * do just this. Never mind the standards comittees, they support our 216 * logging industries. 217 * 218 * 10 years later: I hope we can safely ignore these old and broken 219 * clients by now. Let's fix this so we can uniquely identify an incoming 220 * GRANTED_RES message by cookie, without having to rely on the client's IP 221 * address. --okir 222 */ 223 static struct nlm_block * 224 nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host, 225 struct nlm_file *file, struct nlm_lock *lock, 226 struct nlm_cookie *cookie) 227 { 228 struct nlm_block *block; 229 struct nlm_rqst *call = NULL; 230 231 call = nlm_alloc_call(host); 232 if (call == NULL) 233 return NULL; 234 235 /* Allocate memory for block, and initialize arguments */ 236 block = kzalloc(sizeof(*block), GFP_KERNEL); 237 if (block == NULL) 238 goto failed; 239 kref_init(&block->b_count); 240 INIT_LIST_HEAD(&block->b_list); 241 INIT_LIST_HEAD(&block->b_flist); 242 243 if (!nlmsvc_setgrantargs(call, lock)) 244 goto failed_free; 245 246 /* Set notifier function for VFS, and init args */ 247 call->a_args.lock.fl.fl_flags |= FL_SLEEP; 248 call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations; 249 nlmclnt_next_cookie(&call->a_args.cookie); 250 251 dprintk("lockd: created block %p...\n", block); 252 253 /* Create and initialize the block */ 254 block->b_daemon = rqstp->rq_server; 255 block->b_host = host; 256 block->b_file = file; 257 file->f_count++; 258 259 /* Add to file's list of blocks */ 260 list_add(&block->b_flist, &file->f_blocks); 261 262 /* Set up RPC arguments for callback */ 263 block->b_call = call; 264 call->a_flags = RPC_TASK_ASYNC; 265 call->a_block = block; 266 267 return block; 268 269 failed_free: 270 kfree(block); 271 failed: 272 nlmsvc_release_call(call); 273 return NULL; 274 } 275 276 /* 277 * Delete a block. 278 * It is the caller's responsibility to check whether the file 279 * can be closed hereafter. 280 */ 281 static int nlmsvc_unlink_block(struct nlm_block *block) 282 { 283 int status; 284 dprintk("lockd: unlinking block %p...\n", block); 285 286 /* Remove block from list */ 287 status = locks_delete_block(&block->b_call->a_args.lock.fl); 288 nlmsvc_remove_block(block); 289 return status; 290 } 291 292 static void nlmsvc_free_block(struct kref *kref) 293 { 294 struct nlm_block *block = container_of(kref, struct nlm_block, b_count); 295 struct nlm_file *file = block->b_file; 296 297 dprintk("lockd: freeing block %p...\n", block); 298 299 /* Remove block from file's list of blocks */ 300 list_del_init(&block->b_flist); 301 mutex_unlock(&file->f_mutex); 302 303 nlmsvc_freegrantargs(block->b_call); 304 nlmsvc_release_call(block->b_call); 305 nlm_release_file(block->b_file); 306 kfree(block); 307 } 308 309 static void nlmsvc_release_block(struct nlm_block *block) 310 { 311 if (block != NULL) 312 kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex); 313 } 314 315 /* 316 * Loop over all blocks and delete blocks held by 317 * a matching host. 318 */ 319 void nlmsvc_traverse_blocks(struct nlm_host *host, 320 struct nlm_file *file, 321 nlm_host_match_fn_t match) 322 { 323 struct nlm_block *block, *next; 324 325 restart: 326 mutex_lock(&file->f_mutex); 327 spin_lock(&nlm_blocked_lock); 328 list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) { 329 if (!match(block->b_host, host)) 330 continue; 331 /* Do not destroy blocks that are not on 332 * the global retry list - why? */ 333 if (list_empty(&block->b_list)) 334 continue; 335 kref_get(&block->b_count); 336 spin_unlock(&nlm_blocked_lock); 337 mutex_unlock(&file->f_mutex); 338 nlmsvc_unlink_block(block); 339 nlmsvc_release_block(block); 340 goto restart; 341 } 342 spin_unlock(&nlm_blocked_lock); 343 mutex_unlock(&file->f_mutex); 344 } 345 346 static struct nlm_lockowner * 347 nlmsvc_get_lockowner(struct nlm_lockowner *lockowner) 348 { 349 refcount_inc(&lockowner->count); 350 return lockowner; 351 } 352 353 void nlmsvc_put_lockowner(struct nlm_lockowner *lockowner) 354 { 355 if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock)) 356 return; 357 list_del(&lockowner->list); 358 spin_unlock(&lockowner->host->h_lock); 359 nlmsvc_release_host(lockowner->host); 360 kfree(lockowner); 361 } 362 363 static struct nlm_lockowner *__nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid) 364 { 365 struct nlm_lockowner *lockowner; 366 list_for_each_entry(lockowner, &host->h_lockowners, list) { 367 if (lockowner->pid != pid) 368 continue; 369 return nlmsvc_get_lockowner(lockowner); 370 } 371 return NULL; 372 } 373 374 static struct nlm_lockowner *nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid) 375 { 376 struct nlm_lockowner *res, *new = NULL; 377 378 spin_lock(&host->h_lock); 379 res = __nlmsvc_find_lockowner(host, pid); 380 381 if (res == NULL) { 382 spin_unlock(&host->h_lock); 383 new = kmalloc(sizeof(*res), GFP_KERNEL); 384 spin_lock(&host->h_lock); 385 res = __nlmsvc_find_lockowner(host, pid); 386 if (res == NULL && new != NULL) { 387 res = new; 388 /* fs/locks.c will manage the refcount through lock_ops */ 389 refcount_set(&new->count, 1); 390 new->pid = pid; 391 new->host = nlm_get_host(host); 392 list_add(&new->list, &host->h_lockowners); 393 new = NULL; 394 } 395 } 396 397 spin_unlock(&host->h_lock); 398 kfree(new); 399 return res; 400 } 401 402 void 403 nlmsvc_release_lockowner(struct nlm_lock *lock) 404 { 405 if (lock->fl.fl_owner) 406 nlmsvc_put_lockowner(lock->fl.fl_owner); 407 } 408 409 void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host, 410 pid_t pid) 411 { 412 fl->fl_owner = nlmsvc_find_lockowner(host, pid); 413 } 414 415 /* 416 * Initialize arguments for GRANTED call. The nlm_rqst structure 417 * has been cleared already. 418 */ 419 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock) 420 { 421 locks_copy_lock(&call->a_args.lock.fl, &lock->fl); 422 memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh)); 423 call->a_args.lock.caller = utsname()->nodename; 424 call->a_args.lock.oh.len = lock->oh.len; 425 426 /* set default data area */ 427 call->a_args.lock.oh.data = call->a_owner; 428 call->a_args.lock.svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid; 429 430 if (lock->oh.len > NLMCLNT_OHSIZE) { 431 void *data = kmalloc(lock->oh.len, GFP_KERNEL); 432 if (!data) 433 return 0; 434 call->a_args.lock.oh.data = (u8 *) data; 435 } 436 437 memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len); 438 return 1; 439 } 440 441 static void nlmsvc_freegrantargs(struct nlm_rqst *call) 442 { 443 if (call->a_args.lock.oh.data != call->a_owner) 444 kfree(call->a_args.lock.oh.data); 445 446 locks_release_private(&call->a_args.lock.fl); 447 } 448 449 /* 450 * Deferred lock request handling for non-blocking lock 451 */ 452 static __be32 453 nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block) 454 { 455 __be32 status = nlm_lck_denied_nolocks; 456 457 block->b_flags |= B_QUEUED; 458 459 nlmsvc_insert_block(block, NLM_TIMEOUT); 460 461 block->b_cache_req = &rqstp->rq_chandle; 462 if (rqstp->rq_chandle.defer) { 463 block->b_deferred_req = 464 rqstp->rq_chandle.defer(block->b_cache_req); 465 if (block->b_deferred_req != NULL) 466 status = nlm_drop_reply; 467 } 468 dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n", 469 block, block->b_flags, ntohl(status)); 470 471 return status; 472 } 473 474 /* 475 * Attempt to establish a lock, and if it can't be granted, block it 476 * if required. 477 */ 478 __be32 479 nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, 480 struct nlm_host *host, struct nlm_lock *lock, int wait, 481 struct nlm_cookie *cookie, int reclaim) 482 { 483 struct inode *inode = nlmsvc_file_inode(file); 484 struct nlm_block *block = NULL; 485 int error; 486 int mode; 487 int async_block = 0; 488 __be32 ret; 489 490 dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n", 491 inode->i_sb->s_id, inode->i_ino, 492 lock->fl.fl_type, lock->fl.fl_pid, 493 (long long)lock->fl.fl_start, 494 (long long)lock->fl.fl_end, 495 wait); 496 497 if (!exportfs_lock_op_is_async(inode->i_sb->s_export_op)) { 498 async_block = wait; 499 wait = 0; 500 } 501 502 /* Lock file against concurrent access */ 503 mutex_lock(&file->f_mutex); 504 /* Get existing block (in case client is busy-waiting) 505 * or create new block 506 */ 507 block = nlmsvc_lookup_block(file, lock); 508 if (block == NULL) { 509 block = nlmsvc_create_block(rqstp, host, file, lock, cookie); 510 ret = nlm_lck_denied_nolocks; 511 if (block == NULL) 512 goto out; 513 lock = &block->b_call->a_args.lock; 514 } else 515 lock->fl.fl_flags &= ~FL_SLEEP; 516 517 if (block->b_flags & B_QUEUED) { 518 dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n", 519 block, block->b_flags); 520 if (block->b_granted) { 521 nlmsvc_unlink_block(block); 522 ret = nlm_granted; 523 goto out; 524 } 525 if (block->b_flags & B_TIMED_OUT) { 526 nlmsvc_unlink_block(block); 527 ret = nlm_lck_denied; 528 goto out; 529 } 530 ret = nlm_drop_reply; 531 goto out; 532 } 533 534 if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) { 535 ret = nlm_lck_denied_grace_period; 536 goto out; 537 } 538 if (reclaim && !locks_in_grace(SVC_NET(rqstp))) { 539 ret = nlm_lck_denied_grace_period; 540 goto out; 541 } 542 543 spin_lock(&nlm_blocked_lock); 544 /* 545 * If this is a lock request for an already pending 546 * lock request we return nlm_lck_blocked without calling 547 * vfs_lock_file() again. Otherwise we have two pending 548 * requests on the underlaying ->lock() implementation but 549 * only one nlm_block to being granted by lm_grant(). 550 */ 551 if (exportfs_lock_op_is_async(inode->i_sb->s_export_op) && 552 !list_empty(&block->b_list)) { 553 spin_unlock(&nlm_blocked_lock); 554 ret = nlm_lck_blocked; 555 goto out; 556 } 557 558 /* Append to list of blocked */ 559 nlmsvc_insert_block_locked(block, NLM_NEVER); 560 spin_unlock(&nlm_blocked_lock); 561 562 if (!wait) 563 lock->fl.fl_flags &= ~FL_SLEEP; 564 mode = lock_to_openmode(&lock->fl); 565 error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL); 566 lock->fl.fl_flags &= ~FL_SLEEP; 567 568 dprintk("lockd: vfs_lock_file returned %d\n", error); 569 switch (error) { 570 case 0: 571 nlmsvc_remove_block(block); 572 ret = nlm_granted; 573 goto out; 574 case -EAGAIN: 575 if (!wait) 576 nlmsvc_remove_block(block); 577 ret = async_block ? nlm_lck_blocked : nlm_lck_denied; 578 goto out; 579 case FILE_LOCK_DEFERRED: 580 if (wait) 581 break; 582 /* Filesystem lock operation is in progress 583 Add it to the queue waiting for callback */ 584 ret = nlmsvc_defer_lock_rqst(rqstp, block); 585 goto out; 586 case -EDEADLK: 587 nlmsvc_remove_block(block); 588 ret = nlm_deadlock; 589 goto out; 590 default: /* includes ENOLCK */ 591 nlmsvc_remove_block(block); 592 ret = nlm_lck_denied_nolocks; 593 goto out; 594 } 595 596 ret = nlm_lck_blocked; 597 out: 598 mutex_unlock(&file->f_mutex); 599 nlmsvc_release_block(block); 600 dprintk("lockd: nlmsvc_lock returned %u\n", ret); 601 return ret; 602 } 603 604 /* 605 * Test for presence of a conflicting lock. 606 */ 607 __be32 608 nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file, 609 struct nlm_host *host, struct nlm_lock *lock, 610 struct nlm_lock *conflock, struct nlm_cookie *cookie) 611 { 612 int error; 613 int mode; 614 __be32 ret; 615 616 dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n", 617 nlmsvc_file_inode(file)->i_sb->s_id, 618 nlmsvc_file_inode(file)->i_ino, 619 lock->fl.fl_type, 620 (long long)lock->fl.fl_start, 621 (long long)lock->fl.fl_end); 622 623 if (locks_in_grace(SVC_NET(rqstp))) { 624 ret = nlm_lck_denied_grace_period; 625 goto out; 626 } 627 628 mode = lock_to_openmode(&lock->fl); 629 error = vfs_test_lock(file->f_file[mode], &lock->fl); 630 if (error) { 631 /* We can't currently deal with deferred test requests */ 632 if (error == FILE_LOCK_DEFERRED) 633 WARN_ON_ONCE(1); 634 635 ret = nlm_lck_denied_nolocks; 636 goto out; 637 } 638 639 if (lock->fl.fl_type == F_UNLCK) { 640 ret = nlm_granted; 641 goto out; 642 } 643 644 dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n", 645 lock->fl.fl_type, (long long)lock->fl.fl_start, 646 (long long)lock->fl.fl_end); 647 conflock->caller = "somehost"; /* FIXME */ 648 conflock->len = strlen(conflock->caller); 649 conflock->oh.len = 0; /* don't return OH info */ 650 conflock->svid = lock->fl.fl_pid; 651 conflock->fl.fl_type = lock->fl.fl_type; 652 conflock->fl.fl_start = lock->fl.fl_start; 653 conflock->fl.fl_end = lock->fl.fl_end; 654 locks_release_private(&lock->fl); 655 656 ret = nlm_lck_denied; 657 out: 658 return ret; 659 } 660 661 /* 662 * Remove a lock. 663 * This implies a CANCEL call: We send a GRANT_MSG, the client replies 664 * with a GRANT_RES call which gets lost, and calls UNLOCK immediately 665 * afterwards. In this case the block will still be there, and hence 666 * must be removed. 667 */ 668 __be32 669 nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock) 670 { 671 int error = 0; 672 673 dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n", 674 nlmsvc_file_inode(file)->i_sb->s_id, 675 nlmsvc_file_inode(file)->i_ino, 676 lock->fl.fl_pid, 677 (long long)lock->fl.fl_start, 678 (long long)lock->fl.fl_end); 679 680 /* First, cancel any lock that might be there */ 681 nlmsvc_cancel_blocked(net, file, lock); 682 683 lock->fl.fl_type = F_UNLCK; 684 lock->fl.fl_file = file->f_file[O_RDONLY]; 685 if (lock->fl.fl_file) 686 error = vfs_lock_file(lock->fl.fl_file, F_SETLK, 687 &lock->fl, NULL); 688 lock->fl.fl_file = file->f_file[O_WRONLY]; 689 if (lock->fl.fl_file) 690 error |= vfs_lock_file(lock->fl.fl_file, F_SETLK, 691 &lock->fl, NULL); 692 693 return (error < 0)? nlm_lck_denied_nolocks : nlm_granted; 694 } 695 696 /* 697 * Cancel a previously blocked request. 698 * 699 * A cancel request always overrides any grant that may currently 700 * be in progress. 701 * The calling procedure must check whether the file can be closed. 702 */ 703 __be32 704 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock) 705 { 706 struct nlm_block *block; 707 int status = 0; 708 int mode; 709 710 dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n", 711 nlmsvc_file_inode(file)->i_sb->s_id, 712 nlmsvc_file_inode(file)->i_ino, 713 lock->fl.fl_pid, 714 (long long)lock->fl.fl_start, 715 (long long)lock->fl.fl_end); 716 717 if (locks_in_grace(net)) 718 return nlm_lck_denied_grace_period; 719 720 mutex_lock(&file->f_mutex); 721 block = nlmsvc_lookup_block(file, lock); 722 mutex_unlock(&file->f_mutex); 723 if (block != NULL) { 724 struct file_lock *fl = &block->b_call->a_args.lock.fl; 725 726 mode = lock_to_openmode(fl); 727 vfs_cancel_lock(block->b_file->f_file[mode], fl); 728 status = nlmsvc_unlink_block(block); 729 nlmsvc_release_block(block); 730 } 731 return status ? nlm_lck_denied : nlm_granted; 732 } 733 734 /* 735 * This is a callback from the filesystem for VFS file lock requests. 736 * It will be used if lm_grant is defined and the filesystem can not 737 * respond to the request immediately. 738 * For SETLK or SETLKW request it will get the local posix lock. 739 * In all cases it will move the block to the head of nlm_blocked q where 740 * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the 741 * deferred rpc for GETLK and SETLK. 742 */ 743 static void 744 nlmsvc_update_deferred_block(struct nlm_block *block, int result) 745 { 746 block->b_flags |= B_GOT_CALLBACK; 747 if (result == 0) 748 block->b_granted = 1; 749 else 750 block->b_flags |= B_TIMED_OUT; 751 } 752 753 static int nlmsvc_grant_deferred(struct file_lock *fl, int result) 754 { 755 struct nlm_block *block; 756 int rc = -ENOENT; 757 758 spin_lock(&nlm_blocked_lock); 759 list_for_each_entry(block, &nlm_blocked, b_list) { 760 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { 761 dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n", 762 block, block->b_flags); 763 if (block->b_flags & B_QUEUED) { 764 if (block->b_flags & B_TIMED_OUT) { 765 rc = -ENOLCK; 766 break; 767 } 768 nlmsvc_update_deferred_block(block, result); 769 } else if (result == 0) 770 block->b_granted = 1; 771 772 nlmsvc_insert_block_locked(block, 0); 773 svc_wake_up(block->b_daemon); 774 rc = 0; 775 break; 776 } 777 } 778 spin_unlock(&nlm_blocked_lock); 779 if (rc == -ENOENT) 780 printk(KERN_WARNING "lockd: grant for unknown block\n"); 781 return rc; 782 } 783 784 /* 785 * Unblock a blocked lock request. This is a callback invoked from the 786 * VFS layer when a lock on which we blocked is removed. 787 * 788 * This function doesn't grant the blocked lock instantly, but rather moves 789 * the block to the head of nlm_blocked where it can be picked up by lockd. 790 */ 791 static void 792 nlmsvc_notify_blocked(struct file_lock *fl) 793 { 794 struct nlm_block *block; 795 796 dprintk("lockd: VFS unblock notification for block %p\n", fl); 797 spin_lock(&nlm_blocked_lock); 798 list_for_each_entry(block, &nlm_blocked, b_list) { 799 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { 800 nlmsvc_insert_block_locked(block, 0); 801 spin_unlock(&nlm_blocked_lock); 802 svc_wake_up(block->b_daemon); 803 return; 804 } 805 } 806 spin_unlock(&nlm_blocked_lock); 807 printk(KERN_WARNING "lockd: notification for unknown block!\n"); 808 } 809 810 static fl_owner_t nlmsvc_get_owner(fl_owner_t owner) 811 { 812 return nlmsvc_get_lockowner(owner); 813 } 814 815 static void nlmsvc_put_owner(fl_owner_t owner) 816 { 817 nlmsvc_put_lockowner(owner); 818 } 819 820 const struct lock_manager_operations nlmsvc_lock_operations = { 821 .lm_notify = nlmsvc_notify_blocked, 822 .lm_grant = nlmsvc_grant_deferred, 823 .lm_get_owner = nlmsvc_get_owner, 824 .lm_put_owner = nlmsvc_put_owner, 825 }; 826 827 /* 828 * Try to claim a lock that was previously blocked. 829 * 830 * Note that we use both the RPC_GRANTED_MSG call _and_ an async 831 * RPC thread when notifying the client. This seems like overkill... 832 * Here's why: 833 * - we don't want to use a synchronous RPC thread, otherwise 834 * we might find ourselves hanging on a dead portmapper. 835 * - Some lockd implementations (e.g. HP) don't react to 836 * RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls. 837 */ 838 static void 839 nlmsvc_grant_blocked(struct nlm_block *block) 840 { 841 struct nlm_file *file = block->b_file; 842 struct nlm_lock *lock = &block->b_call->a_args.lock; 843 int mode; 844 int error; 845 loff_t fl_start, fl_end; 846 847 dprintk("lockd: grant blocked lock %p\n", block); 848 849 kref_get(&block->b_count); 850 851 /* Unlink block request from list */ 852 nlmsvc_unlink_block(block); 853 854 /* If b_granted is true this means we've been here before. 855 * Just retry the grant callback, possibly refreshing the RPC 856 * binding */ 857 if (block->b_granted) { 858 nlm_rebind_host(block->b_host); 859 goto callback; 860 } 861 862 /* Try the lock operation again */ 863 /* vfs_lock_file() can mangle fl_start and fl_end, but we need 864 * them unchanged for the GRANT_MSG 865 */ 866 lock->fl.fl_flags |= FL_SLEEP; 867 fl_start = lock->fl.fl_start; 868 fl_end = lock->fl.fl_end; 869 mode = lock_to_openmode(&lock->fl); 870 error = vfs_lock_file(file->f_file[mode], F_SETLK, &lock->fl, NULL); 871 lock->fl.fl_flags &= ~FL_SLEEP; 872 lock->fl.fl_start = fl_start; 873 lock->fl.fl_end = fl_end; 874 875 switch (error) { 876 case 0: 877 break; 878 case FILE_LOCK_DEFERRED: 879 dprintk("lockd: lock still blocked error %d\n", error); 880 nlmsvc_insert_block(block, NLM_NEVER); 881 nlmsvc_release_block(block); 882 return; 883 default: 884 printk(KERN_WARNING "lockd: unexpected error %d in %s!\n", 885 -error, __func__); 886 nlmsvc_insert_block(block, 10 * HZ); 887 nlmsvc_release_block(block); 888 return; 889 } 890 891 callback: 892 /* Lock was granted by VFS. */ 893 dprintk("lockd: GRANTing blocked lock.\n"); 894 block->b_granted = 1; 895 896 /* keep block on the list, but don't reattempt until the RPC 897 * completes or the submission fails 898 */ 899 nlmsvc_insert_block(block, NLM_NEVER); 900 901 /* Call the client -- use a soft RPC task since nlmsvc_retry_blocked 902 * will queue up a new one if this one times out 903 */ 904 error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG, 905 &nlmsvc_grant_ops); 906 907 /* RPC submission failed, wait a bit and retry */ 908 if (error < 0) 909 nlmsvc_insert_block(block, 10 * HZ); 910 } 911 912 /* 913 * This is the callback from the RPC layer when the NLM_GRANTED_MSG 914 * RPC call has succeeded or timed out. 915 * Like all RPC callbacks, it is invoked by the rpciod process, so it 916 * better not sleep. Therefore, we put the blocked lock on the nlm_blocked 917 * chain once more in order to have it removed by lockd itself (which can 918 * then sleep on the file semaphore without disrupting e.g. the nfs client). 919 */ 920 static void nlmsvc_grant_callback(struct rpc_task *task, void *data) 921 { 922 struct nlm_rqst *call = data; 923 struct nlm_block *block = call->a_block; 924 unsigned long timeout; 925 926 dprintk("lockd: GRANT_MSG RPC callback\n"); 927 928 spin_lock(&nlm_blocked_lock); 929 /* if the block is not on a list at this point then it has 930 * been invalidated. Don't try to requeue it. 931 * 932 * FIXME: it's possible that the block is removed from the list 933 * after this check but before the nlmsvc_insert_block. In that 934 * case it will be added back. Perhaps we need better locking 935 * for nlm_blocked? 936 */ 937 if (list_empty(&block->b_list)) 938 goto out; 939 940 /* Technically, we should down the file semaphore here. Since we 941 * move the block towards the head of the queue only, no harm 942 * can be done, though. */ 943 if (task->tk_status < 0) { 944 /* RPC error: Re-insert for retransmission */ 945 timeout = 10 * HZ; 946 } else { 947 /* Call was successful, now wait for client callback */ 948 timeout = 60 * HZ; 949 } 950 nlmsvc_insert_block_locked(block, timeout); 951 svc_wake_up(block->b_daemon); 952 out: 953 spin_unlock(&nlm_blocked_lock); 954 } 955 956 /* 957 * FIXME: nlmsvc_release_block() grabs a mutex. This is not allowed for an 958 * .rpc_release rpc_call_op 959 */ 960 static void nlmsvc_grant_release(void *data) 961 { 962 struct nlm_rqst *call = data; 963 nlmsvc_release_block(call->a_block); 964 } 965 966 static const struct rpc_call_ops nlmsvc_grant_ops = { 967 .rpc_call_done = nlmsvc_grant_callback, 968 .rpc_release = nlmsvc_grant_release, 969 }; 970 971 /* 972 * We received a GRANT_RES callback. Try to find the corresponding 973 * block. 974 */ 975 void 976 nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status) 977 { 978 struct nlm_block *block; 979 struct file_lock *fl; 980 int error; 981 982 dprintk("grant_reply: looking for cookie %x, s=%d \n", 983 *(unsigned int *)(cookie->data), status); 984 if (!(block = nlmsvc_find_block(cookie))) 985 return; 986 987 switch (status) { 988 case nlm_lck_denied_grace_period: 989 /* Try again in a couple of seconds */ 990 nlmsvc_insert_block(block, 10 * HZ); 991 break; 992 case nlm_lck_denied: 993 /* Client doesn't want it, just unlock it */ 994 nlmsvc_unlink_block(block); 995 fl = &block->b_call->a_args.lock.fl; 996 fl->fl_type = F_UNLCK; 997 error = vfs_lock_file(fl->fl_file, F_SETLK, fl, NULL); 998 if (error) 999 pr_warn("lockd: unable to unlock lock rejected by client!\n"); 1000 break; 1001 default: 1002 /* 1003 * Either it was accepted or the status makes no sense 1004 * just unlink it either way. 1005 */ 1006 nlmsvc_unlink_block(block); 1007 } 1008 nlmsvc_release_block(block); 1009 } 1010 1011 /* Helper function to handle retry of a deferred block. 1012 * If it is a blocking lock, call grant_blocked. 1013 * For a non-blocking lock or test lock, revisit the request. 1014 */ 1015 static void 1016 retry_deferred_block(struct nlm_block *block) 1017 { 1018 if (!(block->b_flags & B_GOT_CALLBACK)) 1019 block->b_flags |= B_TIMED_OUT; 1020 nlmsvc_insert_block(block, NLM_TIMEOUT); 1021 dprintk("revisit block %p flags %d\n", block, block->b_flags); 1022 if (block->b_deferred_req) { 1023 block->b_deferred_req->revisit(block->b_deferred_req, 0); 1024 block->b_deferred_req = NULL; 1025 } 1026 } 1027 1028 /* 1029 * Retry all blocked locks that have been notified. This is where lockd 1030 * picks up locks that can be granted, or grant notifications that must 1031 * be retransmitted. 1032 */ 1033 void 1034 nlmsvc_retry_blocked(struct svc_rqst *rqstp) 1035 { 1036 unsigned long timeout = MAX_SCHEDULE_TIMEOUT; 1037 struct nlm_block *block; 1038 1039 spin_lock(&nlm_blocked_lock); 1040 while (!list_empty(&nlm_blocked) && !svc_thread_should_stop(rqstp)) { 1041 block = list_entry(nlm_blocked.next, struct nlm_block, b_list); 1042 1043 if (block->b_when == NLM_NEVER) 1044 break; 1045 if (time_after(block->b_when, jiffies)) { 1046 timeout = block->b_when - jiffies; 1047 break; 1048 } 1049 spin_unlock(&nlm_blocked_lock); 1050 1051 dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n", 1052 block, block->b_when); 1053 if (block->b_flags & B_QUEUED) { 1054 dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n", 1055 block, block->b_granted, block->b_flags); 1056 retry_deferred_block(block); 1057 } else 1058 nlmsvc_grant_blocked(block); 1059 spin_lock(&nlm_blocked_lock); 1060 } 1061 spin_unlock(&nlm_blocked_lock); 1062 1063 if (timeout < MAX_SCHEDULE_TIMEOUT) 1064 mod_timer(&nlmsvc_retry, jiffies + timeout); 1065 } 1066