1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/nfs/delegation.c 4 * 5 * Copyright (C) 2004 Trond Myklebust 6 * 7 * NFS file delegation management 8 * 9 */ 10 #include <linux/completion.h> 11 #include <linux/kthread.h> 12 #include <linux/module.h> 13 #include <linux/sched.h> 14 #include <linux/slab.h> 15 #include <linux/spinlock.h> 16 #include <linux/iversion.h> 17 18 #include <linux/nfs4.h> 19 #include <linux/nfs_fs.h> 20 #include <linux/nfs_xdr.h> 21 22 #include "nfs4_fs.h" 23 #include "nfs4session.h" 24 #include "delegation.h" 25 #include "internal.h" 26 #include "nfs4trace.h" 27 28 #define NFS_DEFAULT_DELEGATION_WATERMARK (5000U) 29 30 static unsigned nfs_delegation_watermark = NFS_DEFAULT_DELEGATION_WATERMARK; 31 module_param_named(delegation_watermark, nfs_delegation_watermark, uint, 0644); 32 33 bool directory_delegations = true; 34 module_param(directory_delegations, bool, 0644); 35 MODULE_PARM_DESC(directory_delegations, 36 "Enable the use of directory delegations, defaults to on."); 37 38 static struct hlist_head *nfs_delegation_hash(struct nfs_server *server, 39 const struct nfs_fh *fhandle) 40 { 41 return server->delegation_hash_table + 42 (nfs_fhandle_hash(fhandle) & server->delegation_hash_mask); 43 } 44 45 static void __nfs_free_delegation(struct nfs_delegation *delegation) 46 { 47 put_cred(delegation->cred); 48 delegation->cred = NULL; 49 kfree_rcu(delegation, rcu); 50 } 51 52 static void nfs_mark_delegation_revoked(struct nfs_server *server, 53 struct nfs_delegation *delegation) 54 { 55 if (!test_and_set_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) { 56 delegation->stateid.type = NFS4_INVALID_STATEID_TYPE; 57 atomic_long_dec(&server->nr_active_delegations); 58 if (!test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) 59 nfs_clear_verifier_delegated(delegation->inode); 60 } 61 } 62 63 static struct nfs_delegation *nfs_get_delegation(struct nfs_delegation *delegation) 64 { 65 refcount_inc(&delegation->refcount); 66 return delegation; 67 } 68 69 static void nfs_put_delegation(struct nfs_delegation *delegation) 70 { 71 if (refcount_dec_and_test(&delegation->refcount)) 72 __nfs_free_delegation(delegation); 73 } 74 75 static void nfs_free_delegation(struct nfs_server *server, 76 struct nfs_delegation *delegation) 77 { 78 nfs_mark_delegation_revoked(server, delegation); 79 nfs_put_delegation(delegation); 80 } 81 82 /** 83 * nfs_mark_delegation_referenced - set delegation's REFERENCED flag 84 * @delegation: delegation to process 85 * 86 */ 87 void nfs_mark_delegation_referenced(struct nfs_delegation *delegation) 88 { 89 set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags); 90 } 91 92 static void nfs_mark_return_delegation(struct nfs_server *server, 93 struct nfs_delegation *delegation) 94 { 95 set_bit(NFS_DELEGATION_RETURN, &delegation->flags); 96 set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags); 97 set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state); 98 } 99 100 static bool nfs4_is_valid_delegation(const struct nfs_delegation *delegation, 101 fmode_t type) 102 { 103 if (delegation != NULL && (delegation->type & type) == type && 104 !test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) && 105 !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) 106 return true; 107 return false; 108 } 109 110 struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode) 111 { 112 struct nfs_delegation *delegation; 113 114 delegation = rcu_dereference(NFS_I(inode)->delegation); 115 if (nfs4_is_valid_delegation(delegation, 0)) 116 return delegation; 117 return NULL; 118 } 119 120 static int nfs4_do_check_delegation(struct inode *inode, fmode_t type, 121 int flags, bool mark) 122 { 123 struct nfs_delegation *delegation; 124 int ret = 0; 125 126 type &= FMODE_READ|FMODE_WRITE; 127 rcu_read_lock(); 128 delegation = rcu_dereference(NFS_I(inode)->delegation); 129 if (nfs4_is_valid_delegation(delegation, type)) { 130 if (mark) 131 nfs_mark_delegation_referenced(delegation); 132 ret = 1; 133 if ((flags & NFS_DELEGATION_FLAG_TIME) && 134 !test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) 135 ret = 0; 136 } 137 rcu_read_unlock(); 138 return ret; 139 } 140 /** 141 * nfs4_have_delegation - check if inode has a delegation, mark it 142 * NFS_DELEGATION_REFERENCED if there is one. 143 * @inode: inode to check 144 * @type: delegation types to check for 145 * @flags: various modifiers 146 * 147 * Returns one if inode has the indicated delegation, otherwise zero. 148 */ 149 int nfs4_have_delegation(struct inode *inode, fmode_t type, int flags) 150 { 151 if (S_ISDIR(inode->i_mode) && !directory_delegations) 152 nfs_inode_evict_delegation(inode); 153 return nfs4_do_check_delegation(inode, type, flags, true); 154 } 155 156 /* 157 * nfs4_check_delegation - check if inode has a delegation, do not mark 158 * NFS_DELEGATION_REFERENCED if it has one. 159 */ 160 int nfs4_check_delegation(struct inode *inode, fmode_t type) 161 { 162 return nfs4_do_check_delegation(inode, type, 0, false); 163 } 164 165 static int nfs_delegation_claim_locks(struct nfs4_state *state, const nfs4_stateid *stateid) 166 { 167 struct inode *inode = state->inode; 168 struct file_lock *fl; 169 struct file_lock_context *flctx = locks_inode_context(inode); 170 struct list_head *list; 171 int status = 0; 172 173 if (flctx == NULL) 174 goto out; 175 176 list = &flctx->flc_posix; 177 spin_lock(&flctx->flc_lock); 178 restart: 179 for_each_file_lock(fl, list) { 180 if (nfs_file_open_context(fl->c.flc_file)->state != state) 181 continue; 182 spin_unlock(&flctx->flc_lock); 183 status = nfs4_lock_delegation_recall(fl, state, stateid); 184 if (status < 0) 185 goto out; 186 spin_lock(&flctx->flc_lock); 187 } 188 if (list == &flctx->flc_posix) { 189 list = &flctx->flc_flock; 190 goto restart; 191 } 192 spin_unlock(&flctx->flc_lock); 193 out: 194 return status; 195 } 196 197 static int nfs_delegation_claim_opens(struct inode *inode, 198 const nfs4_stateid *stateid, fmode_t type) 199 { 200 struct nfs_inode *nfsi = NFS_I(inode); 201 struct nfs_open_context *ctx; 202 struct nfs4_state_owner *sp; 203 struct nfs4_state *state; 204 int err; 205 206 again: 207 rcu_read_lock(); 208 list_for_each_entry_rcu(ctx, &nfsi->open_files, list) { 209 state = ctx->state; 210 if (state == NULL) 211 continue; 212 if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) 213 continue; 214 if (!nfs4_valid_open_stateid(state)) 215 continue; 216 if (!nfs4_stateid_match(&state->stateid, stateid)) 217 continue; 218 if (!get_nfs_open_context(ctx)) 219 continue; 220 rcu_read_unlock(); 221 sp = state->owner; 222 /* Block nfs4_proc_unlck */ 223 mutex_lock(&sp->so_delegreturn_mutex); 224 err = nfs4_open_delegation_recall(ctx, state, stateid); 225 if (!err) 226 err = nfs_delegation_claim_locks(state, stateid); 227 mutex_unlock(&sp->so_delegreturn_mutex); 228 put_nfs_open_context(ctx); 229 if (err != 0) 230 return err; 231 goto again; 232 } 233 rcu_read_unlock(); 234 return 0; 235 } 236 237 /** 238 * nfs_inode_reclaim_delegation - process a delegation reclaim request 239 * @inode: inode to process 240 * @cred: credential to use for request 241 * @type: delegation type 242 * @stateid: delegation stateid 243 * @pagemod_limit: write delegation "space_limit" 244 * @deleg_type: raw delegation type 245 * 246 */ 247 void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred, 248 fmode_t type, const nfs4_stateid *stateid, 249 unsigned long pagemod_limit, u32 deleg_type) 250 { 251 struct nfs_delegation *delegation; 252 const struct cred *oldcred = NULL; 253 254 rcu_read_lock(); 255 delegation = rcu_dereference(NFS_I(inode)->delegation); 256 if (!delegation) { 257 rcu_read_unlock(); 258 nfs_inode_set_delegation(inode, cred, type, stateid, 259 pagemod_limit, deleg_type); 260 return; 261 } 262 263 spin_lock(&delegation->lock); 264 nfs4_stateid_copy(&delegation->stateid, stateid); 265 delegation->type = type; 266 delegation->pagemod_limit = pagemod_limit; 267 oldcred = delegation->cred; 268 delegation->cred = get_cred(cred); 269 switch (deleg_type) { 270 case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG: 271 case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG: 272 set_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags); 273 break; 274 default: 275 clear_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags); 276 } 277 clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); 278 if (test_and_clear_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) 279 atomic_long_inc(&NFS_SERVER(inode)->nr_active_delegations); 280 spin_unlock(&delegation->lock); 281 rcu_read_unlock(); 282 put_cred(oldcred); 283 trace_nfs4_reclaim_delegation(inode, type); 284 } 285 286 static int nfs_do_return_delegation(struct inode *inode, 287 struct nfs_delegation *delegation, 288 int issync) 289 { 290 const struct cred *cred; 291 int res = 0; 292 293 if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) { 294 spin_lock(&delegation->lock); 295 cred = get_cred(delegation->cred); 296 spin_unlock(&delegation->lock); 297 res = nfs4_proc_delegreturn(inode, cred, &delegation->stateid, 298 delegation, issync); 299 put_cred(cred); 300 } 301 return res; 302 } 303 304 static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation) 305 { 306 struct inode *inode = NULL; 307 308 spin_lock(&delegation->lock); 309 if (delegation->inode != NULL) 310 inode = igrab(delegation->inode); 311 if (!inode) 312 set_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags); 313 spin_unlock(&delegation->lock); 314 return inode; 315 } 316 317 static struct nfs_delegation * 318 nfs_start_delegation_return_locked(struct nfs_inode *nfsi) 319 { 320 struct nfs_delegation *ret = NULL; 321 struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation); 322 323 if (delegation == NULL) 324 goto out; 325 spin_lock(&delegation->lock); 326 if (delegation->inode && 327 !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) { 328 clear_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags); 329 /* Refcount matched in nfs_end_delegation_return() */ 330 ret = nfs_get_delegation(delegation); 331 } 332 spin_unlock(&delegation->lock); 333 if (ret) 334 nfs_clear_verifier_delegated(&nfsi->vfs_inode); 335 out: 336 return ret; 337 } 338 339 static struct nfs_delegation * 340 nfs_start_delegation_return(struct nfs_inode *nfsi) 341 { 342 struct nfs_delegation *delegation; 343 344 rcu_read_lock(); 345 delegation = nfs_start_delegation_return_locked(nfsi); 346 rcu_read_unlock(); 347 return delegation; 348 } 349 350 static void nfs_abort_delegation_return(struct nfs_delegation *delegation, 351 struct nfs_server *server, int err) 352 { 353 spin_lock(&delegation->lock); 354 clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags); 355 if (err == -EAGAIN) { 356 set_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags); 357 set_bit(NFS4SERV_DELEGRETURN_DELAYED, 358 &server->delegation_flags); 359 set_bit(NFS4CLNT_DELEGRETURN_DELAYED, 360 &server->nfs_client->cl_state); 361 } 362 spin_unlock(&delegation->lock); 363 } 364 365 static struct nfs_delegation * 366 nfs_detach_delegation_locked(struct nfs_inode *nfsi, 367 struct nfs_delegation *delegation, 368 struct nfs_client *clp) 369 { 370 struct nfs_delegation *deleg_cur = 371 rcu_dereference_protected(nfsi->delegation, 372 lockdep_is_held(&clp->cl_lock)); 373 374 trace_nfs4_detach_delegation(&nfsi->vfs_inode, delegation->type); 375 376 if (deleg_cur == NULL || delegation != deleg_cur) 377 return NULL; 378 379 spin_lock(&delegation->lock); 380 if (!delegation->inode) { 381 spin_unlock(&delegation->lock); 382 return NULL; 383 } 384 hlist_del_init_rcu(&delegation->hash); 385 list_del_rcu(&delegation->super_list); 386 delegation->inode = NULL; 387 rcu_assign_pointer(nfsi->delegation, NULL); 388 spin_unlock(&delegation->lock); 389 clear_bit(NFS_INO_REQ_DIR_DELEG, &nfsi->flags); 390 return delegation; 391 } 392 393 static struct nfs_delegation *nfs_detach_delegation(struct nfs_inode *nfsi, 394 struct nfs_delegation *delegation, 395 struct nfs_server *server) 396 { 397 struct nfs_client *clp = server->nfs_client; 398 399 spin_lock(&clp->cl_lock); 400 delegation = nfs_detach_delegation_locked(nfsi, delegation, clp); 401 spin_unlock(&clp->cl_lock); 402 return delegation; 403 } 404 405 static struct nfs_delegation * 406 nfs_inode_detach_delegation(struct inode *inode) 407 { 408 struct nfs_inode *nfsi = NFS_I(inode); 409 struct nfs_server *server = NFS_SERVER(inode); 410 struct nfs_delegation *delegation; 411 412 rcu_read_lock(); 413 delegation = rcu_dereference(nfsi->delegation); 414 if (delegation != NULL) 415 delegation = nfs_detach_delegation(nfsi, delegation, server); 416 rcu_read_unlock(); 417 return delegation; 418 } 419 420 static void 421 nfs_update_delegation_cred(struct nfs_delegation *delegation, 422 const struct cred *cred) 423 { 424 const struct cred *old; 425 426 if (cred_fscmp(delegation->cred, cred) != 0) { 427 old = xchg(&delegation->cred, get_cred(cred)); 428 put_cred(old); 429 } 430 } 431 432 static void 433 nfs_update_inplace_delegation(struct nfs_server *server, 434 struct nfs_delegation *delegation, 435 const struct nfs_delegation *update) 436 { 437 if (nfs4_stateid_is_newer(&update->stateid, &delegation->stateid)) { 438 delegation->stateid.seqid = update->stateid.seqid; 439 smp_wmb(); 440 delegation->type = update->type; 441 delegation->pagemod_limit = update->pagemod_limit; 442 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) { 443 delegation->change_attr = update->change_attr; 444 nfs_update_delegation_cred(delegation, update->cred); 445 /* smp_mb__before_atomic() is implicit due to xchg() */ 446 clear_bit(NFS_DELEGATION_REVOKED, &delegation->flags); 447 atomic_long_inc(&server->nr_active_delegations); 448 } 449 } 450 } 451 452 /** 453 * nfs_inode_set_delegation - set up a delegation on an inode 454 * @inode: inode to which delegation applies 455 * @cred: cred to use for subsequent delegation processing 456 * @type: delegation type 457 * @stateid: delegation stateid 458 * @pagemod_limit: write delegation "space_limit" 459 * @deleg_type: raw delegation type 460 * 461 * Returns zero on success, or a negative errno value. 462 */ 463 int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred, 464 fmode_t type, const nfs4_stateid *stateid, 465 unsigned long pagemod_limit, u32 deleg_type) 466 { 467 struct nfs_server *server = NFS_SERVER(inode); 468 struct nfs_client *clp = server->nfs_client; 469 struct nfs_inode *nfsi = NFS_I(inode); 470 struct nfs_delegation *delegation, *old_delegation; 471 struct nfs_delegation *freeme = NULL; 472 int status = 0; 473 474 delegation = kmalloc(sizeof(*delegation), GFP_KERNEL_ACCOUNT); 475 if (delegation == NULL) 476 return -ENOMEM; 477 nfs4_stateid_copy(&delegation->stateid, stateid); 478 refcount_set(&delegation->refcount, 1); 479 delegation->type = type; 480 delegation->pagemod_limit = pagemod_limit; 481 delegation->change_attr = inode_peek_iversion_raw(inode); 482 delegation->cred = get_cred(cred); 483 delegation->inode = inode; 484 delegation->flags = 1<<NFS_DELEGATION_REFERENCED; 485 switch (deleg_type) { 486 case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG: 487 case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG: 488 delegation->flags |= BIT(NFS_DELEGATION_DELEGTIME); 489 } 490 delegation->test_gen = 0; 491 spin_lock_init(&delegation->lock); 492 493 spin_lock(&clp->cl_lock); 494 old_delegation = rcu_dereference_protected(nfsi->delegation, 495 lockdep_is_held(&clp->cl_lock)); 496 if (old_delegation == NULL) 497 goto add_new; 498 /* Is this an update of the existing delegation? */ 499 if (nfs4_stateid_match_other(&old_delegation->stateid, 500 &delegation->stateid)) { 501 spin_lock(&old_delegation->lock); 502 nfs_update_inplace_delegation(server, old_delegation, 503 delegation); 504 spin_unlock(&old_delegation->lock); 505 goto out; 506 } 507 if (!test_bit(NFS_DELEGATION_REVOKED, &old_delegation->flags)) { 508 /* 509 * Deal with broken servers that hand out two 510 * delegations for the same file. 511 * Allow for upgrades to a WRITE delegation, but 512 * nothing else. 513 */ 514 dfprintk(FILE, "%s: server %s handed out " 515 "a duplicate delegation!\n", 516 __func__, clp->cl_hostname); 517 if (delegation->type == old_delegation->type || 518 !(delegation->type & FMODE_WRITE)) { 519 freeme = delegation; 520 delegation = NULL; 521 goto out; 522 } 523 if (test_and_set_bit(NFS_DELEGATION_RETURNING, 524 &old_delegation->flags)) 525 goto out; 526 } 527 freeme = nfs_detach_delegation_locked(nfsi, old_delegation, clp); 528 if (freeme == NULL) 529 goto out; 530 add_new: 531 /* 532 * If we didn't revalidate the change attribute before setting 533 * the delegation, then pre-emptively ask for a full attribute 534 * cache revalidation. 535 */ 536 spin_lock(&inode->i_lock); 537 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_CHANGE) 538 nfs_set_cache_invalid(inode, 539 NFS_INO_INVALID_ATIME | NFS_INO_INVALID_CTIME | 540 NFS_INO_INVALID_MTIME | NFS_INO_INVALID_SIZE | 541 NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK | 542 NFS_INO_INVALID_OTHER | NFS_INO_INVALID_DATA | 543 NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL | 544 NFS_INO_INVALID_XATTR); 545 spin_unlock(&inode->i_lock); 546 547 list_add_tail_rcu(&delegation->super_list, &server->delegations); 548 hlist_add_head_rcu(&delegation->hash, 549 nfs_delegation_hash(server, &NFS_I(inode)->fh)); 550 rcu_assign_pointer(nfsi->delegation, delegation); 551 delegation = NULL; 552 553 atomic_long_inc(&server->nr_active_delegations); 554 555 trace_nfs4_set_delegation(inode, type); 556 557 /* If we hold writebacks and have delegated mtime then update */ 558 if (deleg_type == NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG && 559 nfs_have_writebacks(inode)) 560 nfs_update_delegated_mtime(inode); 561 out: 562 spin_unlock(&clp->cl_lock); 563 if (delegation != NULL) 564 __nfs_free_delegation(delegation); 565 if (freeme != NULL) { 566 nfs_do_return_delegation(inode, freeme, 0); 567 nfs_free_delegation(server, freeme); 568 } 569 return status; 570 } 571 572 /* 573 * Basic procedure for returning a delegation to the server 574 */ 575 static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation *delegation, int issync) 576 { 577 struct nfs_server *server = NFS_SERVER(inode); 578 unsigned int mode = O_WRONLY | O_RDWR; 579 int err = 0; 580 581 if (delegation == NULL) 582 return 0; 583 584 if (!issync) 585 mode |= O_NONBLOCK; 586 /* Recall of any remaining application leases */ 587 err = break_lease(inode, mode); 588 589 while (err == 0) { 590 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) 591 break; 592 err = nfs_delegation_claim_opens(inode, &delegation->stateid, 593 delegation->type); 594 if (!issync || err != -EAGAIN) 595 break; 596 /* 597 * Guard against state recovery 598 */ 599 err = nfs4_wait_clnt_recover(server->nfs_client); 600 } 601 602 if (err) { 603 nfs_abort_delegation_return(delegation, server, err); 604 goto out; 605 } 606 607 err = nfs_do_return_delegation(inode, delegation, issync); 608 out: 609 /* Refcount matched in nfs_start_delegation_return_locked() */ 610 nfs_put_delegation(delegation); 611 return err; 612 } 613 614 static bool nfs_delegation_need_return(struct nfs_delegation *delegation) 615 { 616 bool ret = false; 617 618 trace_nfs_delegation_need_return(delegation); 619 620 if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags)) 621 ret = true; 622 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags) || 623 test_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags) || 624 test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) 625 ret = false; 626 627 return ret; 628 } 629 630 static int nfs_server_return_marked_delegations(struct nfs_server *server, 631 void __always_unused *data) 632 { 633 struct nfs_delegation *delegation; 634 struct nfs_delegation *prev; 635 struct inode *inode; 636 struct inode *place_holder = NULL; 637 struct nfs_delegation *place_holder_deleg = NULL; 638 int err = 0; 639 640 if (!test_and_clear_bit(NFS4SERV_DELEGRETURN, 641 &server->delegation_flags)) 642 return 0; 643 restart: 644 /* 645 * To avoid quadratic looping we hold a reference 646 * to an inode place_holder. Each time we restart, we 647 * list delegation in the server from the delegations 648 * of that inode. 649 * prev is an RCU-protected pointer to a delegation which 650 * wasn't marked for return and might be a good choice for 651 * the next place_holder. 652 */ 653 prev = NULL; 654 delegation = NULL; 655 rcu_read_lock(); 656 if (place_holder) 657 delegation = rcu_dereference(NFS_I(place_holder)->delegation); 658 if (!delegation || delegation != place_holder_deleg) 659 delegation = list_entry_rcu(server->delegations.next, 660 struct nfs_delegation, super_list); 661 list_for_each_entry_from_rcu(delegation, &server->delegations, super_list) { 662 struct inode *to_put = NULL; 663 664 if (test_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags)) 665 continue; 666 if (!nfs_delegation_need_return(delegation)) { 667 if (nfs4_is_valid_delegation(delegation, 0)) 668 prev = delegation; 669 continue; 670 } 671 inode = nfs_delegation_grab_inode(delegation); 672 if (inode == NULL) 673 continue; 674 675 if (prev) { 676 struct inode *tmp = nfs_delegation_grab_inode(prev); 677 if (tmp) { 678 to_put = place_holder; 679 place_holder = tmp; 680 place_holder_deleg = prev; 681 } 682 } 683 684 delegation = nfs_start_delegation_return_locked(NFS_I(inode)); 685 rcu_read_unlock(); 686 687 iput(to_put); 688 689 err = nfs_end_delegation_return(inode, delegation, 0); 690 iput(inode); 691 cond_resched(); 692 if (!err) 693 goto restart; 694 set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags); 695 set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state); 696 goto out; 697 } 698 rcu_read_unlock(); 699 out: 700 iput(place_holder); 701 return err; 702 } 703 704 static bool nfs_server_clear_delayed_delegations(struct nfs_server *server) 705 { 706 struct nfs_delegation *d; 707 bool ret = false; 708 709 if (!test_and_clear_bit(NFS4SERV_DELEGRETURN_DELAYED, 710 &server->delegation_flags)) 711 goto out; 712 list_for_each_entry_rcu (d, &server->delegations, super_list) { 713 if (!test_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags)) 714 continue; 715 nfs_mark_return_delegation(server, d); 716 clear_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags); 717 ret = true; 718 } 719 out: 720 return ret; 721 } 722 723 static bool nfs_client_clear_delayed_delegations(struct nfs_client *clp) 724 { 725 struct nfs_server *server; 726 bool ret = false; 727 728 if (!test_and_clear_bit(NFS4CLNT_DELEGRETURN_DELAYED, &clp->cl_state)) 729 goto out; 730 rcu_read_lock(); 731 list_for_each_entry_rcu (server, &clp->cl_superblocks, client_link) { 732 if (nfs_server_clear_delayed_delegations(server)) 733 ret = true; 734 } 735 rcu_read_unlock(); 736 out: 737 return ret; 738 } 739 740 /** 741 * nfs_client_return_marked_delegations - return previously marked delegations 742 * @clp: nfs_client to process 743 * 744 * Note that this function is designed to be called by the state 745 * manager thread. For this reason, it cannot flush the dirty data, 746 * since that could deadlock in case of a state recovery error. 747 * 748 * Returns zero on success, or a negative errno value. 749 */ 750 int nfs_client_return_marked_delegations(struct nfs_client *clp) 751 { 752 int err = nfs_client_for_each_server( 753 clp, nfs_server_return_marked_delegations, NULL); 754 if (err) 755 return err; 756 /* If a return was delayed, sleep to prevent hard looping */ 757 if (nfs_client_clear_delayed_delegations(clp)) 758 ssleep(1); 759 return 0; 760 } 761 762 /** 763 * nfs_inode_evict_delegation - return delegation, don't reclaim opens 764 * @inode: inode to process 765 * 766 * Does not protect against delegation reclaims, therefore really only safe 767 * to be called from nfs4_clear_inode(). Guaranteed to always free 768 * the delegation structure. 769 */ 770 void nfs_inode_evict_delegation(struct inode *inode) 771 { 772 struct nfs_delegation *delegation; 773 774 delegation = nfs_inode_detach_delegation(inode); 775 if (delegation != NULL) { 776 set_bit(NFS_DELEGATION_RETURNING, &delegation->flags); 777 set_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags); 778 nfs_do_return_delegation(inode, delegation, 1); 779 nfs_free_delegation(NFS_SERVER(inode), delegation); 780 } 781 } 782 783 /** 784 * nfs4_inode_return_delegation - synchronously return a delegation 785 * @inode: inode to process 786 * 787 * This routine will always flush any dirty data to disk on the 788 * assumption that if we need to return the delegation, then 789 * we should stop caching. 790 * 791 * Returns zero on success, or a negative errno value. 792 */ 793 int nfs4_inode_return_delegation(struct inode *inode) 794 { 795 struct nfs_inode *nfsi = NFS_I(inode); 796 struct nfs_delegation *delegation; 797 798 delegation = nfs_start_delegation_return(nfsi); 799 if (delegation != NULL) { 800 /* Synchronous recall of any application leases */ 801 break_lease(inode, O_WRONLY | O_RDWR); 802 if (S_ISREG(inode->i_mode)) 803 nfs_wb_all(inode); 804 return nfs_end_delegation_return(inode, delegation, 1); 805 } 806 return 0; 807 } 808 809 /** 810 * nfs4_inode_set_return_delegation_on_close - asynchronously return a delegation 811 * @inode: inode to process 812 * 813 * This routine is called to request that the delegation be returned as soon 814 * as the file is closed. If the file is already closed, the delegation is 815 * immediately returned. 816 */ 817 void nfs4_inode_set_return_delegation_on_close(struct inode *inode) 818 { 819 struct nfs_delegation *delegation; 820 struct nfs_delegation *ret = NULL; 821 822 if (!inode) 823 return; 824 rcu_read_lock(); 825 delegation = nfs4_get_valid_delegation(inode); 826 if (!delegation) 827 goto out; 828 spin_lock(&delegation->lock); 829 if (!delegation->inode) 830 goto out_unlock; 831 if (list_empty(&NFS_I(inode)->open_files) && 832 !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) { 833 /* Refcount matched in nfs_end_delegation_return() */ 834 ret = nfs_get_delegation(delegation); 835 } else 836 set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); 837 out_unlock: 838 spin_unlock(&delegation->lock); 839 if (ret) 840 nfs_clear_verifier_delegated(inode); 841 out: 842 rcu_read_unlock(); 843 nfs_end_delegation_return(inode, ret, 0); 844 } 845 846 /** 847 * nfs4_inode_return_delegation_on_close - asynchronously return a delegation 848 * @inode: inode to process 849 * 850 * This routine is called on file close in order to determine if the 851 * inode delegation needs to be returned immediately. 852 */ 853 void nfs4_inode_return_delegation_on_close(struct inode *inode) 854 { 855 struct nfs_delegation *delegation; 856 struct nfs_delegation *ret = NULL; 857 858 if (!inode) 859 return; 860 rcu_read_lock(); 861 delegation = nfs4_get_valid_delegation(inode); 862 if (!delegation) 863 goto out; 864 if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) || 865 atomic_long_read(&NFS_SERVER(inode)->nr_active_delegations) >= 866 nfs_delegation_watermark) { 867 spin_lock(&delegation->lock); 868 if (delegation->inode && 869 list_empty(&NFS_I(inode)->open_files) && 870 !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) { 871 clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); 872 /* Refcount matched in nfs_end_delegation_return() */ 873 ret = nfs_get_delegation(delegation); 874 } 875 spin_unlock(&delegation->lock); 876 if (ret) 877 nfs_clear_verifier_delegated(inode); 878 } 879 out: 880 rcu_read_unlock(); 881 nfs_end_delegation_return(inode, ret, 0); 882 } 883 884 /** 885 * nfs4_inode_make_writeable 886 * @inode: pointer to inode 887 * 888 * Make the inode writeable by returning the delegation if necessary 889 * 890 * Returns zero on success, or a negative errno value. 891 */ 892 int nfs4_inode_make_writeable(struct inode *inode) 893 { 894 struct nfs_delegation *delegation; 895 896 rcu_read_lock(); 897 delegation = nfs4_get_valid_delegation(inode); 898 if (delegation == NULL || 899 (nfs4_has_session(NFS_SERVER(inode)->nfs_client) && 900 (delegation->type & FMODE_WRITE))) { 901 rcu_read_unlock(); 902 return 0; 903 } 904 rcu_read_unlock(); 905 return nfs4_inode_return_delegation(inode); 906 } 907 908 static void 909 nfs_mark_return_if_closed_delegation(struct nfs_server *server, 910 struct nfs_delegation *delegation) 911 { 912 struct inode *inode; 913 914 if (test_bit(NFS_DELEGATION_RETURN, &delegation->flags) || 915 test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) 916 return; 917 spin_lock(&delegation->lock); 918 inode = delegation->inode; 919 if (!inode) 920 goto out; 921 if (list_empty(&NFS_I(inode)->open_files)) 922 nfs_mark_return_delegation(server, delegation); 923 else 924 set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); 925 out: 926 spin_unlock(&delegation->lock); 927 } 928 929 static bool nfs_server_mark_return_all_delegations(struct nfs_server *server) 930 { 931 struct nfs_delegation *delegation; 932 bool ret = false; 933 934 list_for_each_entry_rcu(delegation, &server->delegations, super_list) { 935 nfs_mark_return_delegation(server, delegation); 936 ret = true; 937 } 938 return ret; 939 } 940 941 static void nfs_client_mark_return_all_delegations(struct nfs_client *clp) 942 { 943 struct nfs_server *server; 944 945 rcu_read_lock(); 946 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) 947 nfs_server_mark_return_all_delegations(server); 948 rcu_read_unlock(); 949 } 950 951 static void nfs_delegation_run_state_manager(struct nfs_client *clp) 952 { 953 if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) 954 nfs4_schedule_state_manager(clp); 955 } 956 957 /** 958 * nfs_expire_all_delegations 959 * @clp: client to process 960 * 961 */ 962 void nfs_expire_all_delegations(struct nfs_client *clp) 963 { 964 nfs_client_mark_return_all_delegations(clp); 965 nfs_delegation_run_state_manager(clp); 966 } 967 968 /** 969 * nfs_server_return_all_delegations - return delegations for one superblock 970 * @server: pointer to nfs_server to process 971 * 972 */ 973 void nfs_server_return_all_delegations(struct nfs_server *server) 974 { 975 struct nfs_client *clp = server->nfs_client; 976 bool need_wait; 977 978 if (clp == NULL) 979 return; 980 981 rcu_read_lock(); 982 need_wait = nfs_server_mark_return_all_delegations(server); 983 rcu_read_unlock(); 984 985 if (need_wait) { 986 nfs4_schedule_state_manager(clp); 987 nfs4_wait_clnt_recover(clp); 988 } 989 } 990 991 static void nfs_mark_return_unused_delegation_types(struct nfs_server *server, 992 fmode_t flags) 993 { 994 struct nfs_delegation *delegation; 995 996 list_for_each_entry_rcu(delegation, &server->delegations, super_list) { 997 if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE)) 998 continue; 999 if (delegation->type & flags) 1000 nfs_mark_return_if_closed_delegation(server, delegation); 1001 } 1002 } 1003 1004 static void nfs_client_mark_return_unused_delegation_types(struct nfs_client *clp, 1005 fmode_t flags) 1006 { 1007 struct nfs_server *server; 1008 1009 rcu_read_lock(); 1010 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) 1011 nfs_mark_return_unused_delegation_types(server, flags); 1012 rcu_read_unlock(); 1013 } 1014 1015 static void nfs_revoke_delegation(struct inode *inode, 1016 const nfs4_stateid *stateid) 1017 { 1018 struct nfs_delegation *delegation; 1019 nfs4_stateid tmp; 1020 bool ret = false; 1021 1022 rcu_read_lock(); 1023 delegation = rcu_dereference(NFS_I(inode)->delegation); 1024 if (delegation == NULL) 1025 goto out; 1026 if (stateid == NULL) { 1027 nfs4_stateid_copy(&tmp, &delegation->stateid); 1028 stateid = &tmp; 1029 } else { 1030 if (!nfs4_stateid_match_other(stateid, &delegation->stateid)) 1031 goto out; 1032 spin_lock(&delegation->lock); 1033 if (stateid->seqid) { 1034 if (nfs4_stateid_is_newer(&delegation->stateid, stateid)) { 1035 spin_unlock(&delegation->lock); 1036 goto out; 1037 } 1038 delegation->stateid.seqid = stateid->seqid; 1039 } 1040 spin_unlock(&delegation->lock); 1041 } 1042 nfs_mark_delegation_revoked(NFS_SERVER(inode), delegation); 1043 ret = true; 1044 out: 1045 rcu_read_unlock(); 1046 if (ret) 1047 nfs_inode_find_state_and_recover(inode, stateid); 1048 } 1049 1050 void nfs_delegation_mark_returned(struct inode *inode, 1051 const nfs4_stateid *stateid) 1052 { 1053 struct nfs_delegation *delegation; 1054 1055 if (!inode) 1056 return; 1057 1058 rcu_read_lock(); 1059 delegation = rcu_dereference(NFS_I(inode)->delegation); 1060 if (!delegation) 1061 goto out_rcu_unlock; 1062 1063 spin_lock(&delegation->lock); 1064 if (!nfs4_stateid_match_other(stateid, &delegation->stateid)) 1065 goto out_spin_unlock; 1066 if (stateid->seqid) { 1067 /* If delegation->stateid is newer, dont mark as returned */ 1068 if (nfs4_stateid_is_newer(&delegation->stateid, stateid)) 1069 goto out_clear_returning; 1070 if (delegation->stateid.seqid != stateid->seqid) 1071 delegation->stateid.seqid = stateid->seqid; 1072 } 1073 1074 nfs_mark_delegation_revoked(NFS_SERVER(inode), delegation); 1075 clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags); 1076 spin_unlock(&delegation->lock); 1077 if (nfs_detach_delegation(NFS_I(inode), delegation, NFS_SERVER(inode))) 1078 nfs_put_delegation(delegation); 1079 goto out_rcu_unlock; 1080 1081 out_clear_returning: 1082 clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags); 1083 out_spin_unlock: 1084 spin_unlock(&delegation->lock); 1085 out_rcu_unlock: 1086 rcu_read_unlock(); 1087 1088 nfs_inode_find_state_and_recover(inode, stateid); 1089 } 1090 1091 /** 1092 * nfs_remove_bad_delegation - handle delegations that are unusable 1093 * @inode: inode to process 1094 * @stateid: the delegation's stateid 1095 * 1096 * If the server ACK-ed our FREE_STATEID then clean 1097 * up the delegation, else mark and keep the revoked state. 1098 */ 1099 void nfs_remove_bad_delegation(struct inode *inode, 1100 const nfs4_stateid *stateid) 1101 { 1102 if (stateid && stateid->type == NFS4_FREED_STATEID_TYPE) 1103 nfs_delegation_mark_returned(inode, stateid); 1104 else 1105 nfs_revoke_delegation(inode, stateid); 1106 } 1107 EXPORT_SYMBOL_GPL(nfs_remove_bad_delegation); 1108 1109 /** 1110 * nfs_expire_unused_delegation_types 1111 * @clp: client to process 1112 * @flags: delegation types to expire 1113 * 1114 */ 1115 void nfs_expire_unused_delegation_types(struct nfs_client *clp, fmode_t flags) 1116 { 1117 nfs_client_mark_return_unused_delegation_types(clp, flags); 1118 nfs_delegation_run_state_manager(clp); 1119 } 1120 1121 static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server) 1122 { 1123 struct nfs_delegation *delegation; 1124 1125 list_for_each_entry_rcu(delegation, &server->delegations, super_list) { 1126 if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags)) 1127 continue; 1128 nfs_mark_return_if_closed_delegation(server, delegation); 1129 } 1130 } 1131 1132 /** 1133 * nfs_expire_unreferenced_delegations - Eliminate unused delegations 1134 * @clp: nfs_client to process 1135 * 1136 */ 1137 void nfs_expire_unreferenced_delegations(struct nfs_client *clp) 1138 { 1139 struct nfs_server *server; 1140 1141 rcu_read_lock(); 1142 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) 1143 nfs_mark_return_unreferenced_delegations(server); 1144 rcu_read_unlock(); 1145 1146 nfs_delegation_run_state_manager(clp); 1147 } 1148 1149 /** 1150 * nfs_async_inode_return_delegation - asynchronously return a delegation 1151 * @inode: inode to process 1152 * @stateid: state ID information 1153 * 1154 * Returns zero on success, or a negative errno value. 1155 */ 1156 int nfs_async_inode_return_delegation(struct inode *inode, 1157 const nfs4_stateid *stateid) 1158 { 1159 struct nfs_server *server = NFS_SERVER(inode); 1160 struct nfs_client *clp = server->nfs_client; 1161 struct nfs_delegation *delegation; 1162 1163 rcu_read_lock(); 1164 delegation = nfs4_get_valid_delegation(inode); 1165 if (delegation == NULL) 1166 goto out_enoent; 1167 if (stateid != NULL && 1168 !clp->cl_mvops->match_stateid(&delegation->stateid, stateid)) 1169 goto out_enoent; 1170 nfs_mark_return_delegation(server, delegation); 1171 rcu_read_unlock(); 1172 1173 /* If there are any application leases or delegations, recall them */ 1174 break_lease(inode, O_WRONLY | O_RDWR | O_NONBLOCK); 1175 1176 nfs_delegation_run_state_manager(clp); 1177 return 0; 1178 out_enoent: 1179 rcu_read_unlock(); 1180 return -ENOENT; 1181 } 1182 1183 static struct inode * 1184 nfs_delegation_find_inode_server(struct nfs_server *server, 1185 const struct nfs_fh *fhandle) 1186 { 1187 struct hlist_head *head = nfs_delegation_hash(server, fhandle); 1188 struct nfs_delegation *delegation; 1189 struct super_block *freeme = NULL; 1190 struct inode *res = NULL; 1191 1192 hlist_for_each_entry_rcu(delegation, head, hash) { 1193 spin_lock(&delegation->lock); 1194 if (delegation->inode != NULL && 1195 !test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) && 1196 nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) { 1197 if (nfs_sb_active(server->super)) { 1198 freeme = server->super; 1199 res = igrab(delegation->inode); 1200 } 1201 spin_unlock(&delegation->lock); 1202 if (res != NULL) 1203 return res; 1204 if (freeme) { 1205 rcu_read_unlock(); 1206 nfs_sb_deactive(freeme); 1207 rcu_read_lock(); 1208 } 1209 return ERR_PTR(-EAGAIN); 1210 } 1211 spin_unlock(&delegation->lock); 1212 } 1213 return ERR_PTR(-ENOENT); 1214 } 1215 1216 /** 1217 * nfs_delegation_find_inode - retrieve the inode associated with a delegation 1218 * @clp: client state handle 1219 * @fhandle: filehandle from a delegation recall 1220 * 1221 * Returns pointer to inode matching "fhandle," or NULL if a matching inode 1222 * cannot be found. 1223 */ 1224 struct inode *nfs_delegation_find_inode(struct nfs_client *clp, 1225 const struct nfs_fh *fhandle) 1226 { 1227 struct nfs_server *server; 1228 struct inode *res; 1229 1230 rcu_read_lock(); 1231 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { 1232 res = nfs_delegation_find_inode_server(server, fhandle); 1233 if (res != ERR_PTR(-ENOENT)) { 1234 rcu_read_unlock(); 1235 return res; 1236 } 1237 } 1238 rcu_read_unlock(); 1239 return ERR_PTR(-ENOENT); 1240 } 1241 1242 static void nfs_delegation_mark_reclaim_server(struct nfs_server *server) 1243 { 1244 struct nfs_delegation *delegation; 1245 1246 list_for_each_entry_rcu(delegation, &server->delegations, super_list) { 1247 /* 1248 * If the delegation may have been admin revoked, then we 1249 * cannot reclaim it. 1250 */ 1251 if (test_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags)) 1252 continue; 1253 set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); 1254 } 1255 } 1256 1257 /** 1258 * nfs_delegation_mark_reclaim - mark all delegations as needing to be reclaimed 1259 * @clp: nfs_client to process 1260 * 1261 */ 1262 void nfs_delegation_mark_reclaim(struct nfs_client *clp) 1263 { 1264 struct nfs_server *server; 1265 1266 rcu_read_lock(); 1267 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) 1268 nfs_delegation_mark_reclaim_server(server); 1269 rcu_read_unlock(); 1270 } 1271 1272 static int nfs_server_reap_unclaimed_delegations(struct nfs_server *server, 1273 void __always_unused *data) 1274 { 1275 struct nfs_delegation *delegation; 1276 struct inode *inode; 1277 restart: 1278 rcu_read_lock(); 1279 list_for_each_entry_rcu(delegation, &server->delegations, super_list) { 1280 if (test_bit(NFS_DELEGATION_INODE_FREEING, 1281 &delegation->flags) || 1282 test_bit(NFS_DELEGATION_RETURNING, 1283 &delegation->flags) || 1284 test_bit(NFS_DELEGATION_NEED_RECLAIM, 1285 &delegation->flags) == 0) 1286 continue; 1287 inode = nfs_delegation_grab_inode(delegation); 1288 if (inode == NULL) 1289 continue; 1290 delegation = nfs_start_delegation_return_locked(NFS_I(inode)); 1291 rcu_read_unlock(); 1292 if (delegation != NULL) { 1293 if (nfs_detach_delegation(NFS_I(inode), delegation, 1294 server) != NULL) 1295 nfs_free_delegation(server, delegation); 1296 /* Match nfs_start_delegation_return_locked */ 1297 nfs_put_delegation(delegation); 1298 } 1299 iput(inode); 1300 cond_resched(); 1301 goto restart; 1302 } 1303 rcu_read_unlock(); 1304 return 0; 1305 } 1306 1307 /** 1308 * nfs_delegation_reap_unclaimed - reap unclaimed delegations after reboot recovery is done 1309 * @clp: nfs_client to process 1310 * 1311 */ 1312 void nfs_delegation_reap_unclaimed(struct nfs_client *clp) 1313 { 1314 nfs_client_for_each_server(clp, nfs_server_reap_unclaimed_delegations, 1315 NULL); 1316 } 1317 1318 static inline bool nfs4_server_rebooted(const struct nfs_client *clp) 1319 { 1320 return (clp->cl_state & (BIT(NFS4CLNT_CHECK_LEASE) | 1321 BIT(NFS4CLNT_LEASE_EXPIRED) | 1322 BIT(NFS4CLNT_SESSION_RESET))) != 0; 1323 } 1324 1325 static void nfs_mark_test_expired_delegation(struct nfs_server *server, 1326 struct nfs_delegation *delegation) 1327 { 1328 if (delegation->stateid.type == NFS4_INVALID_STATEID_TYPE) 1329 return; 1330 clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); 1331 set_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags); 1332 set_bit(NFS4SERV_DELEGATION_EXPIRED, &server->delegation_flags); 1333 set_bit(NFS4CLNT_DELEGATION_EXPIRED, &server->nfs_client->cl_state); 1334 } 1335 1336 static void nfs_inode_mark_test_expired_delegation(struct nfs_server *server, 1337 struct inode *inode) 1338 { 1339 struct nfs_delegation *delegation; 1340 1341 rcu_read_lock(); 1342 delegation = rcu_dereference(NFS_I(inode)->delegation); 1343 if (delegation) 1344 nfs_mark_test_expired_delegation(server, delegation); 1345 rcu_read_unlock(); 1346 1347 } 1348 1349 static void nfs_delegation_mark_test_expired_server(struct nfs_server *server) 1350 { 1351 struct nfs_delegation *delegation; 1352 1353 list_for_each_entry_rcu(delegation, &server->delegations, super_list) 1354 nfs_mark_test_expired_delegation(server, delegation); 1355 } 1356 1357 /** 1358 * nfs_mark_test_expired_all_delegations - mark all delegations for testing 1359 * @clp: nfs_client to process 1360 * 1361 * Iterates through all the delegations associated with this server and 1362 * marks them as needing to be checked for validity. 1363 */ 1364 void nfs_mark_test_expired_all_delegations(struct nfs_client *clp) 1365 { 1366 struct nfs_server *server; 1367 1368 rcu_read_lock(); 1369 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) 1370 nfs_delegation_mark_test_expired_server(server); 1371 rcu_read_unlock(); 1372 } 1373 1374 /** 1375 * nfs_test_expired_all_delegations - test all delegations for a client 1376 * @clp: nfs_client to process 1377 * 1378 * Helper for handling "recallable state revoked" status from server. 1379 */ 1380 void nfs_test_expired_all_delegations(struct nfs_client *clp) 1381 { 1382 nfs_mark_test_expired_all_delegations(clp); 1383 nfs4_schedule_state_manager(clp); 1384 } 1385 1386 static void 1387 nfs_delegation_test_free_expired(struct inode *inode, 1388 nfs4_stateid *stateid, 1389 const struct cred *cred) 1390 { 1391 struct nfs_server *server = NFS_SERVER(inode); 1392 const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops; 1393 int status; 1394 1395 if (!cred) 1396 return; 1397 status = ops->test_and_free_expired(server, stateid, cred); 1398 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) 1399 nfs_remove_bad_delegation(inode, stateid); 1400 } 1401 1402 static int nfs_server_reap_expired_delegations(struct nfs_server *server, 1403 void __always_unused *data) 1404 { 1405 struct nfs_delegation *delegation; 1406 struct inode *inode; 1407 const struct cred *cred; 1408 nfs4_stateid stateid; 1409 unsigned long gen = ++server->delegation_gen; 1410 1411 if (!test_and_clear_bit(NFS4SERV_DELEGATION_EXPIRED, 1412 &server->delegation_flags)) 1413 return 0; 1414 restart: 1415 rcu_read_lock(); 1416 list_for_each_entry_rcu(delegation, &server->delegations, super_list) { 1417 if (test_bit(NFS_DELEGATION_INODE_FREEING, 1418 &delegation->flags) || 1419 test_bit(NFS_DELEGATION_RETURNING, 1420 &delegation->flags) || 1421 test_bit(NFS_DELEGATION_TEST_EXPIRED, 1422 &delegation->flags) == 0 || 1423 delegation->test_gen == gen) 1424 continue; 1425 inode = nfs_delegation_grab_inode(delegation); 1426 if (inode == NULL) 1427 continue; 1428 spin_lock(&delegation->lock); 1429 cred = get_cred_rcu(delegation->cred); 1430 nfs4_stateid_copy(&stateid, &delegation->stateid); 1431 spin_unlock(&delegation->lock); 1432 delegation->test_gen = gen; 1433 clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags); 1434 rcu_read_unlock(); 1435 nfs_delegation_test_free_expired(inode, &stateid, cred); 1436 put_cred(cred); 1437 if (!nfs4_server_rebooted(server->nfs_client)) { 1438 iput(inode); 1439 cond_resched(); 1440 goto restart; 1441 } 1442 nfs_inode_mark_test_expired_delegation(server,inode); 1443 set_bit(NFS4SERV_DELEGATION_EXPIRED, &server->delegation_flags); 1444 set_bit(NFS4CLNT_DELEGATION_EXPIRED, 1445 &server->nfs_client->cl_state); 1446 iput(inode); 1447 return -EAGAIN; 1448 } 1449 rcu_read_unlock(); 1450 return 0; 1451 } 1452 1453 /** 1454 * nfs_reap_expired_delegations - reap expired delegations 1455 * @clp: nfs_client to process 1456 * 1457 * Iterates through all the delegations associated with this server and 1458 * checks if they have may have been revoked. This function is usually 1459 * expected to be called in cases where the server may have lost its 1460 * lease. 1461 */ 1462 void nfs_reap_expired_delegations(struct nfs_client *clp) 1463 { 1464 nfs_client_for_each_server(clp, nfs_server_reap_expired_delegations, 1465 NULL); 1466 } 1467 1468 void nfs_inode_find_delegation_state_and_recover(struct inode *inode, 1469 const nfs4_stateid *stateid) 1470 { 1471 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 1472 struct nfs_delegation *delegation; 1473 bool found = false; 1474 1475 rcu_read_lock(); 1476 delegation = rcu_dereference(NFS_I(inode)->delegation); 1477 if (delegation && 1478 nfs4_stateid_match_or_older(&delegation->stateid, stateid) && 1479 !test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) { 1480 nfs_mark_test_expired_delegation(NFS_SERVER(inode), delegation); 1481 found = true; 1482 } 1483 rcu_read_unlock(); 1484 if (found) 1485 nfs4_schedule_state_manager(clp); 1486 } 1487 1488 /** 1489 * nfs_delegations_present - check for existence of delegations 1490 * @clp: client state handle 1491 * 1492 * Returns one if there are any nfs_delegation structures attached 1493 * to this nfs_client. 1494 */ 1495 int nfs_delegations_present(struct nfs_client *clp) 1496 { 1497 struct nfs_server *server; 1498 int ret = 0; 1499 1500 rcu_read_lock(); 1501 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) 1502 if (!list_empty(&server->delegations)) { 1503 ret = 1; 1504 break; 1505 } 1506 rcu_read_unlock(); 1507 return ret; 1508 } 1509 1510 /** 1511 * nfs4_refresh_delegation_stateid - Update delegation stateid seqid 1512 * @dst: stateid to refresh 1513 * @inode: inode to check 1514 * 1515 * Returns "true" and updates "dst->seqid" * if inode had a delegation 1516 * that matches our delegation stateid. Otherwise "false" is returned. 1517 */ 1518 bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode) 1519 { 1520 struct nfs_delegation *delegation; 1521 bool ret = false; 1522 if (!inode) 1523 goto out; 1524 1525 rcu_read_lock(); 1526 delegation = rcu_dereference(NFS_I(inode)->delegation); 1527 if (delegation != NULL && 1528 nfs4_stateid_match_other(dst, &delegation->stateid) && 1529 nfs4_stateid_is_newer(&delegation->stateid, dst) && 1530 !test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) { 1531 dst->seqid = delegation->stateid.seqid; 1532 ret = true; 1533 } 1534 rcu_read_unlock(); 1535 out: 1536 return ret; 1537 } 1538 1539 /** 1540 * nfs4_copy_delegation_stateid - Copy inode's state ID information 1541 * @inode: inode to check 1542 * @flags: delegation type requirement 1543 * @dst: stateid data structure to fill in 1544 * @cred: optional argument to retrieve credential 1545 * 1546 * Returns "true" and fills in "dst->data" * if inode had a delegation, 1547 * otherwise "false" is returned. 1548 */ 1549 bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, 1550 nfs4_stateid *dst, const struct cred **cred) 1551 { 1552 struct nfs_inode *nfsi = NFS_I(inode); 1553 struct nfs_delegation *delegation; 1554 bool ret = false; 1555 1556 flags &= FMODE_READ|FMODE_WRITE; 1557 rcu_read_lock(); 1558 delegation = rcu_dereference(nfsi->delegation); 1559 if (!delegation) 1560 goto out; 1561 spin_lock(&delegation->lock); 1562 ret = nfs4_is_valid_delegation(delegation, flags); 1563 if (ret) { 1564 nfs4_stateid_copy(dst, &delegation->stateid); 1565 nfs_mark_delegation_referenced(delegation); 1566 if (cred) 1567 *cred = get_cred(delegation->cred); 1568 } 1569 spin_unlock(&delegation->lock); 1570 out: 1571 rcu_read_unlock(); 1572 return ret; 1573 } 1574 1575 /** 1576 * nfs4_delegation_flush_on_close - Check if we must flush file on close 1577 * @inode: inode to check 1578 * 1579 * This function checks the number of outstanding writes to the file 1580 * against the delegation 'space_limit' field to see if 1581 * the spec requires us to flush the file on close. 1582 */ 1583 bool nfs4_delegation_flush_on_close(const struct inode *inode) 1584 { 1585 struct nfs_inode *nfsi = NFS_I(inode); 1586 struct nfs_delegation *delegation; 1587 bool ret = true; 1588 1589 rcu_read_lock(); 1590 delegation = rcu_dereference(nfsi->delegation); 1591 if (delegation == NULL || !(delegation->type & FMODE_WRITE)) 1592 goto out; 1593 if (atomic_long_read(&nfsi->nrequests) < delegation->pagemod_limit) 1594 ret = false; 1595 out: 1596 rcu_read_unlock(); 1597 return ret; 1598 } 1599 1600 int nfs4_delegation_hash_alloc(struct nfs_server *server) 1601 { 1602 int delegation_buckets, i; 1603 1604 delegation_buckets = roundup_pow_of_two(nfs_delegation_watermark / 16); 1605 server->delegation_hash_mask = delegation_buckets - 1; 1606 server->delegation_hash_table = kmalloc_array(delegation_buckets, 1607 sizeof(*server->delegation_hash_table), GFP_KERNEL); 1608 if (!server->delegation_hash_table) 1609 return -ENOMEM; 1610 for (i = 0; i < delegation_buckets; i++) 1611 INIT_HLIST_HEAD(&server->delegation_hash_table[i]); 1612 return 0; 1613 } 1614