1 /* 2 * linux/fs/lockd/svclock.c 3 * 4 * Handling of server-side locks, mostly of the blocked variety. 5 * This is the ugliest part of lockd because we tread on very thin ice. 6 * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc. 7 * IMNSHO introducing the grant callback into the NLM protocol was one 8 * of the worst ideas Sun ever had. Except maybe for the idea of doing 9 * NFS file locking at all. 10 * 11 * I'm trying hard to avoid race conditions by protecting most accesses 12 * to a file's list of blocked locks through a semaphore. The global 13 * list of blocked locks is not protected in this fashion however. 14 * Therefore, some functions (such as the RPC callback for the async grant 15 * call) move blocked locks towards the head of the list *while some other 16 * process might be traversing it*. This should not be a problem in 17 * practice, because this will only cause functions traversing the list 18 * to visit some blocks twice. 19 * 20 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> 21 */ 22 23 #include <linux/types.h> 24 #include <linux/errno.h> 25 #include <linux/kernel.h> 26 #include <linux/sched.h> 27 #include <linux/smp_lock.h> 28 #include <linux/sunrpc/clnt.h> 29 #include <linux/sunrpc/svc.h> 30 #include <linux/lockd/nlm.h> 31 #include <linux/lockd/lockd.h> 32 33 #define NLMDBG_FACILITY NLMDBG_SVCLOCK 34 35 #ifdef CONFIG_LOCKD_V4 36 #define nlm_deadlock nlm4_deadlock 37 #else 38 #define nlm_deadlock nlm_lck_denied 39 #endif 40 41 static void nlmsvc_release_block(struct nlm_block *block); 42 static void nlmsvc_insert_block(struct nlm_block *block, unsigned long); 43 static void nlmsvc_remove_block(struct nlm_block *block); 44 45 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock); 46 static void nlmsvc_freegrantargs(struct nlm_rqst *call); 47 static const struct rpc_call_ops nlmsvc_grant_ops; 48 49 /* 50 * The list of blocked locks to retry 51 */ 52 static LIST_HEAD(nlm_blocked); 53 54 /* 55 * Insert a blocked lock into the global list 56 */ 57 static void 58 nlmsvc_insert_block(struct nlm_block *block, unsigned long when) 59 { 60 struct nlm_block *b; 61 struct list_head *pos; 62 63 dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when); 64 if (list_empty(&block->b_list)) { 65 kref_get(&block->b_count); 66 } else { 67 list_del_init(&block->b_list); 68 } 69 70 pos = &nlm_blocked; 71 if (when != NLM_NEVER) { 72 if ((when += jiffies) == NLM_NEVER) 73 when ++; 74 list_for_each(pos, &nlm_blocked) { 75 b = list_entry(pos, struct nlm_block, b_list); 76 if (time_after(b->b_when,when) || b->b_when == NLM_NEVER) 77 break; 78 } 79 /* On normal exit from the loop, pos == &nlm_blocked, 80 * so we will be adding to the end of the list - good 81 */ 82 } 83 84 list_add_tail(&block->b_list, pos); 85 block->b_when = when; 86 } 87 88 /* 89 * Remove a block from the global list 90 */ 91 static inline void 92 nlmsvc_remove_block(struct nlm_block *block) 93 { 94 if (!list_empty(&block->b_list)) { 95 list_del_init(&block->b_list); 96 nlmsvc_release_block(block); 97 } 98 } 99 100 /* 101 * Find a block for a given lock 102 */ 103 static struct nlm_block * 104 nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock) 105 { 106 struct nlm_block *block; 107 struct file_lock *fl; 108 109 dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n", 110 file, lock->fl.fl_pid, 111 (long long)lock->fl.fl_start, 112 (long long)lock->fl.fl_end, lock->fl.fl_type); 113 list_for_each_entry(block, &nlm_blocked, b_list) { 114 fl = &block->b_call->a_args.lock.fl; 115 dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n", 116 block->b_file, fl->fl_pid, 117 (long long)fl->fl_start, 118 (long long)fl->fl_end, fl->fl_type, 119 nlmdbg_cookie2a(&block->b_call->a_args.cookie)); 120 if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) { 121 kref_get(&block->b_count); 122 return block; 123 } 124 } 125 126 return NULL; 127 } 128 129 static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b) 130 { 131 if(a->len != b->len) 132 return 0; 133 if(memcmp(a->data,b->data,a->len)) 134 return 0; 135 return 1; 136 } 137 138 /* 139 * Find a block with a given NLM cookie. 140 */ 141 static inline struct nlm_block * 142 nlmsvc_find_block(struct nlm_cookie *cookie) 143 { 144 struct nlm_block *block; 145 146 list_for_each_entry(block, &nlm_blocked, b_list) { 147 if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie)) 148 goto found; 149 } 150 151 return NULL; 152 153 found: 154 dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block); 155 kref_get(&block->b_count); 156 return block; 157 } 158 159 /* 160 * Create a block and initialize it. 161 * 162 * Note: we explicitly set the cookie of the grant reply to that of 163 * the blocked lock request. The spec explicitly mentions that the client 164 * should _not_ rely on the callback containing the same cookie as the 165 * request, but (as I found out later) that's because some implementations 166 * do just this. Never mind the standards comittees, they support our 167 * logging industries. 168 * 169 * 10 years later: I hope we can safely ignore these old and broken 170 * clients by now. Let's fix this so we can uniquely identify an incoming 171 * GRANTED_RES message by cookie, without having to rely on the client's IP 172 * address. --okir 173 */ 174 static inline struct nlm_block * 175 nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file, 176 struct nlm_lock *lock, struct nlm_cookie *cookie) 177 { 178 struct nlm_block *block; 179 struct nlm_host *host; 180 struct nlm_rqst *call = NULL; 181 182 /* Create host handle for callback */ 183 host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len); 184 if (host == NULL) 185 return NULL; 186 187 call = nlm_alloc_call(host); 188 if (call == NULL) 189 return NULL; 190 191 /* Allocate memory for block, and initialize arguments */ 192 block = kzalloc(sizeof(*block), GFP_KERNEL); 193 if (block == NULL) 194 goto failed; 195 kref_init(&block->b_count); 196 INIT_LIST_HEAD(&block->b_list); 197 INIT_LIST_HEAD(&block->b_flist); 198 199 if (!nlmsvc_setgrantargs(call, lock)) 200 goto failed_free; 201 202 /* Set notifier function for VFS, and init args */ 203 call->a_args.lock.fl.fl_flags |= FL_SLEEP; 204 call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations; 205 nlmclnt_next_cookie(&call->a_args.cookie); 206 207 dprintk("lockd: created block %p...\n", block); 208 209 /* Create and initialize the block */ 210 block->b_daemon = rqstp->rq_server; 211 block->b_host = host; 212 block->b_file = file; 213 file->f_count++; 214 215 /* Add to file's list of blocks */ 216 list_add(&block->b_flist, &file->f_blocks); 217 218 /* Set up RPC arguments for callback */ 219 block->b_call = call; 220 call->a_flags = RPC_TASK_ASYNC; 221 call->a_block = block; 222 223 return block; 224 225 failed_free: 226 kfree(block); 227 failed: 228 nlm_release_call(call); 229 return NULL; 230 } 231 232 /* 233 * Delete a block. If the lock was cancelled or the grant callback 234 * failed, unlock is set to 1. 235 * It is the caller's responsibility to check whether the file 236 * can be closed hereafter. 237 */ 238 static int nlmsvc_unlink_block(struct nlm_block *block) 239 { 240 int status; 241 dprintk("lockd: unlinking block %p...\n", block); 242 243 /* Remove block from list */ 244 status = posix_unblock_lock(block->b_file->f_file, &block->b_call->a_args.lock.fl); 245 nlmsvc_remove_block(block); 246 return status; 247 } 248 249 static void nlmsvc_free_block(struct kref *kref) 250 { 251 struct nlm_block *block = container_of(kref, struct nlm_block, b_count); 252 struct nlm_file *file = block->b_file; 253 254 dprintk("lockd: freeing block %p...\n", block); 255 256 /* Remove block from file's list of blocks */ 257 mutex_lock(&file->f_mutex); 258 list_del_init(&block->b_flist); 259 mutex_unlock(&file->f_mutex); 260 261 nlmsvc_freegrantargs(block->b_call); 262 nlm_release_call(block->b_call); 263 nlm_release_file(block->b_file); 264 kfree(block); 265 } 266 267 static void nlmsvc_release_block(struct nlm_block *block) 268 { 269 if (block != NULL) 270 kref_put(&block->b_count, nlmsvc_free_block); 271 } 272 273 /* 274 * Loop over all blocks and delete blocks held by 275 * a matching host. 276 */ 277 void nlmsvc_traverse_blocks(struct nlm_host *host, 278 struct nlm_file *file, 279 nlm_host_match_fn_t match) 280 { 281 struct nlm_block *block, *next; 282 283 restart: 284 mutex_lock(&file->f_mutex); 285 list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) { 286 if (!match(block->b_host, host)) 287 continue; 288 /* Do not destroy blocks that are not on 289 * the global retry list - why? */ 290 if (list_empty(&block->b_list)) 291 continue; 292 kref_get(&block->b_count); 293 mutex_unlock(&file->f_mutex); 294 nlmsvc_unlink_block(block); 295 nlmsvc_release_block(block); 296 goto restart; 297 } 298 mutex_unlock(&file->f_mutex); 299 } 300 301 /* 302 * Initialize arguments for GRANTED call. The nlm_rqst structure 303 * has been cleared already. 304 */ 305 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock) 306 { 307 locks_copy_lock(&call->a_args.lock.fl, &lock->fl); 308 memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh)); 309 call->a_args.lock.caller = utsname()->nodename; 310 call->a_args.lock.oh.len = lock->oh.len; 311 312 /* set default data area */ 313 call->a_args.lock.oh.data = call->a_owner; 314 call->a_args.lock.svid = lock->fl.fl_pid; 315 316 if (lock->oh.len > NLMCLNT_OHSIZE) { 317 void *data = kmalloc(lock->oh.len, GFP_KERNEL); 318 if (!data) 319 return 0; 320 call->a_args.lock.oh.data = (u8 *) data; 321 } 322 323 memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len); 324 return 1; 325 } 326 327 static void nlmsvc_freegrantargs(struct nlm_rqst *call) 328 { 329 if (call->a_args.lock.oh.data != call->a_owner) 330 kfree(call->a_args.lock.oh.data); 331 } 332 333 /* 334 * Attempt to establish a lock, and if it can't be granted, block it 335 * if required. 336 */ 337 __be32 338 nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, 339 struct nlm_lock *lock, int wait, struct nlm_cookie *cookie) 340 { 341 struct nlm_block *block, *newblock = NULL; 342 int error; 343 __be32 ret; 344 345 dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n", 346 file->f_file->f_path.dentry->d_inode->i_sb->s_id, 347 file->f_file->f_path.dentry->d_inode->i_ino, 348 lock->fl.fl_type, lock->fl.fl_pid, 349 (long long)lock->fl.fl_start, 350 (long long)lock->fl.fl_end, 351 wait); 352 353 354 lock->fl.fl_flags &= ~FL_SLEEP; 355 again: 356 /* Lock file against concurrent access */ 357 mutex_lock(&file->f_mutex); 358 /* Get existing block (in case client is busy-waiting) */ 359 block = nlmsvc_lookup_block(file, lock); 360 if (block == NULL) { 361 if (newblock != NULL) 362 lock = &newblock->b_call->a_args.lock; 363 } else 364 lock = &block->b_call->a_args.lock; 365 366 error = posix_lock_file(file->f_file, &lock->fl); 367 lock->fl.fl_flags &= ~FL_SLEEP; 368 369 dprintk("lockd: posix_lock_file returned %d\n", error); 370 371 switch(error) { 372 case 0: 373 ret = nlm_granted; 374 goto out; 375 case -EAGAIN: 376 break; 377 case -EDEADLK: 378 ret = nlm_deadlock; 379 goto out; 380 default: /* includes ENOLCK */ 381 ret = nlm_lck_denied_nolocks; 382 goto out; 383 } 384 385 ret = nlm_lck_denied; 386 if (!wait) 387 goto out; 388 389 ret = nlm_lck_blocked; 390 if (block != NULL) 391 goto out; 392 393 /* If we don't have a block, create and initialize it. Then 394 * retry because we may have slept in kmalloc. */ 395 /* We have to release f_mutex as nlmsvc_create_block may try to 396 * to claim it while doing host garbage collection */ 397 if (newblock == NULL) { 398 mutex_unlock(&file->f_mutex); 399 dprintk("lockd: blocking on this lock (allocating).\n"); 400 if (!(newblock = nlmsvc_create_block(rqstp, file, lock, cookie))) 401 return nlm_lck_denied_nolocks; 402 goto again; 403 } 404 405 /* Append to list of blocked */ 406 nlmsvc_insert_block(newblock, NLM_NEVER); 407 out: 408 mutex_unlock(&file->f_mutex); 409 nlmsvc_release_block(newblock); 410 nlmsvc_release_block(block); 411 dprintk("lockd: nlmsvc_lock returned %u\n", ret); 412 return ret; 413 } 414 415 /* 416 * Test for presence of a conflicting lock. 417 */ 418 __be32 419 nlmsvc_testlock(struct nlm_file *file, struct nlm_lock *lock, 420 struct nlm_lock *conflock) 421 { 422 dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n", 423 file->f_file->f_path.dentry->d_inode->i_sb->s_id, 424 file->f_file->f_path.dentry->d_inode->i_ino, 425 lock->fl.fl_type, 426 (long long)lock->fl.fl_start, 427 (long long)lock->fl.fl_end); 428 429 if (posix_test_lock(file->f_file, &lock->fl, &conflock->fl)) { 430 dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n", 431 conflock->fl.fl_type, 432 (long long)conflock->fl.fl_start, 433 (long long)conflock->fl.fl_end); 434 conflock->caller = "somehost"; /* FIXME */ 435 conflock->len = strlen(conflock->caller); 436 conflock->oh.len = 0; /* don't return OH info */ 437 conflock->svid = conflock->fl.fl_pid; 438 return nlm_lck_denied; 439 } 440 441 return nlm_granted; 442 } 443 444 /* 445 * Remove a lock. 446 * This implies a CANCEL call: We send a GRANT_MSG, the client replies 447 * with a GRANT_RES call which gets lost, and calls UNLOCK immediately 448 * afterwards. In this case the block will still be there, and hence 449 * must be removed. 450 */ 451 __be32 452 nlmsvc_unlock(struct nlm_file *file, struct nlm_lock *lock) 453 { 454 int error; 455 456 dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n", 457 file->f_file->f_path.dentry->d_inode->i_sb->s_id, 458 file->f_file->f_path.dentry->d_inode->i_ino, 459 lock->fl.fl_pid, 460 (long long)lock->fl.fl_start, 461 (long long)lock->fl.fl_end); 462 463 /* First, cancel any lock that might be there */ 464 nlmsvc_cancel_blocked(file, lock); 465 466 lock->fl.fl_type = F_UNLCK; 467 error = posix_lock_file(file->f_file, &lock->fl); 468 469 return (error < 0)? nlm_lck_denied_nolocks : nlm_granted; 470 } 471 472 /* 473 * Cancel a previously blocked request. 474 * 475 * A cancel request always overrides any grant that may currently 476 * be in progress. 477 * The calling procedure must check whether the file can be closed. 478 */ 479 __be32 480 nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock) 481 { 482 struct nlm_block *block; 483 int status = 0; 484 485 dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n", 486 file->f_file->f_path.dentry->d_inode->i_sb->s_id, 487 file->f_file->f_path.dentry->d_inode->i_ino, 488 lock->fl.fl_pid, 489 (long long)lock->fl.fl_start, 490 (long long)lock->fl.fl_end); 491 492 mutex_lock(&file->f_mutex); 493 block = nlmsvc_lookup_block(file, lock); 494 mutex_unlock(&file->f_mutex); 495 if (block != NULL) { 496 status = nlmsvc_unlink_block(block); 497 nlmsvc_release_block(block); 498 } 499 return status ? nlm_lck_denied : nlm_granted; 500 } 501 502 /* 503 * Unblock a blocked lock request. This is a callback invoked from the 504 * VFS layer when a lock on which we blocked is removed. 505 * 506 * This function doesn't grant the blocked lock instantly, but rather moves 507 * the block to the head of nlm_blocked where it can be picked up by lockd. 508 */ 509 static void 510 nlmsvc_notify_blocked(struct file_lock *fl) 511 { 512 struct nlm_block *block; 513 514 dprintk("lockd: VFS unblock notification for block %p\n", fl); 515 list_for_each_entry(block, &nlm_blocked, b_list) { 516 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { 517 nlmsvc_insert_block(block, 0); 518 svc_wake_up(block->b_daemon); 519 return; 520 } 521 } 522 523 printk(KERN_WARNING "lockd: notification for unknown block!\n"); 524 } 525 526 static int nlmsvc_same_owner(struct file_lock *fl1, struct file_lock *fl2) 527 { 528 return fl1->fl_owner == fl2->fl_owner && fl1->fl_pid == fl2->fl_pid; 529 } 530 531 struct lock_manager_operations nlmsvc_lock_operations = { 532 .fl_compare_owner = nlmsvc_same_owner, 533 .fl_notify = nlmsvc_notify_blocked, 534 }; 535 536 /* 537 * Try to claim a lock that was previously blocked. 538 * 539 * Note that we use both the RPC_GRANTED_MSG call _and_ an async 540 * RPC thread when notifying the client. This seems like overkill... 541 * Here's why: 542 * - we don't want to use a synchronous RPC thread, otherwise 543 * we might find ourselves hanging on a dead portmapper. 544 * - Some lockd implementations (e.g. HP) don't react to 545 * RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls. 546 */ 547 static void 548 nlmsvc_grant_blocked(struct nlm_block *block) 549 { 550 struct nlm_file *file = block->b_file; 551 struct nlm_lock *lock = &block->b_call->a_args.lock; 552 int error; 553 554 dprintk("lockd: grant blocked lock %p\n", block); 555 556 /* Unlink block request from list */ 557 nlmsvc_unlink_block(block); 558 559 /* If b_granted is true this means we've been here before. 560 * Just retry the grant callback, possibly refreshing the RPC 561 * binding */ 562 if (block->b_granted) { 563 nlm_rebind_host(block->b_host); 564 goto callback; 565 } 566 567 /* Try the lock operation again */ 568 lock->fl.fl_flags |= FL_SLEEP; 569 error = posix_lock_file(file->f_file, &lock->fl); 570 lock->fl.fl_flags &= ~FL_SLEEP; 571 572 switch (error) { 573 case 0: 574 break; 575 case -EAGAIN: 576 dprintk("lockd: lock still blocked\n"); 577 nlmsvc_insert_block(block, NLM_NEVER); 578 return; 579 default: 580 printk(KERN_WARNING "lockd: unexpected error %d in %s!\n", 581 -error, __FUNCTION__); 582 nlmsvc_insert_block(block, 10 * HZ); 583 return; 584 } 585 586 callback: 587 /* Lock was granted by VFS. */ 588 dprintk("lockd: GRANTing blocked lock.\n"); 589 block->b_granted = 1; 590 591 /* Schedule next grant callback in 30 seconds */ 592 nlmsvc_insert_block(block, 30 * HZ); 593 594 /* Call the client */ 595 kref_get(&block->b_count); 596 nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG, &nlmsvc_grant_ops); 597 } 598 599 /* 600 * This is the callback from the RPC layer when the NLM_GRANTED_MSG 601 * RPC call has succeeded or timed out. 602 * Like all RPC callbacks, it is invoked by the rpciod process, so it 603 * better not sleep. Therefore, we put the blocked lock on the nlm_blocked 604 * chain once more in order to have it removed by lockd itself (which can 605 * then sleep on the file semaphore without disrupting e.g. the nfs client). 606 */ 607 static void nlmsvc_grant_callback(struct rpc_task *task, void *data) 608 { 609 struct nlm_rqst *call = data; 610 struct nlm_block *block = call->a_block; 611 unsigned long timeout; 612 613 dprintk("lockd: GRANT_MSG RPC callback\n"); 614 615 /* Technically, we should down the file semaphore here. Since we 616 * move the block towards the head of the queue only, no harm 617 * can be done, though. */ 618 if (task->tk_status < 0) { 619 /* RPC error: Re-insert for retransmission */ 620 timeout = 10 * HZ; 621 } else { 622 /* Call was successful, now wait for client callback */ 623 timeout = 60 * HZ; 624 } 625 nlmsvc_insert_block(block, timeout); 626 svc_wake_up(block->b_daemon); 627 } 628 629 static void nlmsvc_grant_release(void *data) 630 { 631 struct nlm_rqst *call = data; 632 633 nlmsvc_release_block(call->a_block); 634 } 635 636 static const struct rpc_call_ops nlmsvc_grant_ops = { 637 .rpc_call_done = nlmsvc_grant_callback, 638 .rpc_release = nlmsvc_grant_release, 639 }; 640 641 /* 642 * We received a GRANT_RES callback. Try to find the corresponding 643 * block. 644 */ 645 void 646 nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status) 647 { 648 struct nlm_block *block; 649 650 dprintk("grant_reply: looking for cookie %x, s=%d \n", 651 *(unsigned int *)(cookie->data), status); 652 if (!(block = nlmsvc_find_block(cookie))) 653 return; 654 655 if (block) { 656 if (status == nlm_lck_denied_grace_period) { 657 /* Try again in a couple of seconds */ 658 nlmsvc_insert_block(block, 10 * HZ); 659 } else { 660 /* Lock is now held by client, or has been rejected. 661 * In both cases, the block should be removed. */ 662 nlmsvc_unlink_block(block); 663 } 664 } 665 nlmsvc_release_block(block); 666 } 667 668 /* 669 * Retry all blocked locks that have been notified. This is where lockd 670 * picks up locks that can be granted, or grant notifications that must 671 * be retransmitted. 672 */ 673 unsigned long 674 nlmsvc_retry_blocked(void) 675 { 676 unsigned long timeout = MAX_SCHEDULE_TIMEOUT; 677 struct nlm_block *block; 678 679 while (!list_empty(&nlm_blocked)) { 680 block = list_entry(nlm_blocked.next, struct nlm_block, b_list); 681 682 if (block->b_when == NLM_NEVER) 683 break; 684 if (time_after(block->b_when,jiffies)) { 685 timeout = block->b_when - jiffies; 686 break; 687 } 688 689 dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n", 690 block, block->b_when); 691 kref_get(&block->b_count); 692 nlmsvc_grant_blocked(block); 693 nlmsvc_release_block(block); 694 } 695 696 return timeout; 697 } 698