1 /* 2 * fs/nfs/nfs4state.c 3 * 4 * Client-side XDR for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 27 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Implementation of the NFSv4 state model. For the time being, 37 * this is minimal, but will be made much more complex in a 38 * subsequent patch. 39 */ 40 41 #include <linux/kernel.h> 42 #include <linux/slab.h> 43 #include <linux/fs.h> 44 #include <linux/nfs_fs.h> 45 #include <linux/nfs_idmap.h> 46 #include <linux/kthread.h> 47 #include <linux/module.h> 48 #include <linux/random.h> 49 #include <linux/ratelimit.h> 50 #include <linux/workqueue.h> 51 #include <linux/bitops.h> 52 #include <linux/jiffies.h> 53 54 #include <linux/sunrpc/clnt.h> 55 56 #include "nfs4_fs.h" 57 #include "callback.h" 58 #include "delegation.h" 59 #include "internal.h" 60 #include "pnfs.h" 61 #include "netns.h" 62 63 #define NFSDBG_FACILITY NFSDBG_STATE 64 65 #define OPENOWNER_POOL_SIZE 8 66 67 const nfs4_stateid zero_stateid; 68 static DEFINE_MUTEX(nfs_clid_init_mutex); 69 static LIST_HEAD(nfs4_clientid_list); 70 71 int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) 72 { 73 struct nfs4_setclientid_res clid = { 74 .clientid = clp->cl_clientid, 75 .confirm = clp->cl_confirm, 76 }; 77 unsigned short port; 78 int status; 79 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 80 81 if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state)) 82 goto do_confirm; 83 port = nn->nfs_callback_tcpport; 84 if (clp->cl_addr.ss_family == AF_INET6) 85 port = nn->nfs_callback_tcpport6; 86 87 status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid); 88 if (status != 0) 89 goto out; 90 clp->cl_clientid = clid.clientid; 91 clp->cl_confirm = clid.confirm; 92 set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); 93 do_confirm: 94 status = nfs4_proc_setclientid_confirm(clp, &clid, cred); 95 if (status != 0) 96 goto out; 97 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); 98 nfs4_schedule_state_renewal(clp); 99 out: 100 return status; 101 } 102 103 /** 104 * nfs40_discover_server_trunking - Detect server IP address trunking (mv0) 105 * 106 * @clp: nfs_client under test 107 * @result: OUT: found nfs_client, or clp 108 * @cred: credential to use for trunking test 109 * 110 * Returns zero, a negative errno, or a negative NFS4ERR status. 111 * If zero is returned, an nfs_client pointer is planted in 112 * "result". 113 * 114 * Note: The returned client may not yet be marked ready. 115 */ 116 int nfs40_discover_server_trunking(struct nfs_client *clp, 117 struct nfs_client **result, 118 struct rpc_cred *cred) 119 { 120 struct nfs4_setclientid_res clid = { 121 .clientid = clp->cl_clientid, 122 .confirm = clp->cl_confirm, 123 }; 124 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 125 unsigned short port; 126 int status; 127 128 port = nn->nfs_callback_tcpport; 129 if (clp->cl_addr.ss_family == AF_INET6) 130 port = nn->nfs_callback_tcpport6; 131 132 status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid); 133 if (status != 0) 134 goto out; 135 clp->cl_clientid = clid.clientid; 136 clp->cl_confirm = clid.confirm; 137 138 status = nfs40_walk_client_list(clp, result, cred); 139 switch (status) { 140 case -NFS4ERR_STALE_CLIENTID: 141 set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); 142 case 0: 143 /* Sustain the lease, even if it's empty. If the clientid4 144 * goes stale it's of no use for trunking discovery. */ 145 nfs4_schedule_state_renewal(*result); 146 break; 147 } 148 149 out: 150 return status; 151 } 152 153 struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp) 154 { 155 struct rpc_cred *cred = NULL; 156 157 if (clp->cl_machine_cred != NULL) 158 cred = get_rpccred(clp->cl_machine_cred); 159 return cred; 160 } 161 162 static void nfs4_clear_machine_cred(struct nfs_client *clp) 163 { 164 struct rpc_cred *cred; 165 166 spin_lock(&clp->cl_lock); 167 cred = clp->cl_machine_cred; 168 clp->cl_machine_cred = NULL; 169 spin_unlock(&clp->cl_lock); 170 if (cred != NULL) 171 put_rpccred(cred); 172 } 173 174 static struct rpc_cred * 175 nfs4_get_renew_cred_server_locked(struct nfs_server *server) 176 { 177 struct rpc_cred *cred = NULL; 178 struct nfs4_state_owner *sp; 179 struct rb_node *pos; 180 181 for (pos = rb_first(&server->state_owners); 182 pos != NULL; 183 pos = rb_next(pos)) { 184 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node); 185 if (list_empty(&sp->so_states)) 186 continue; 187 cred = get_rpccred(sp->so_cred); 188 break; 189 } 190 return cred; 191 } 192 193 /** 194 * nfs4_get_renew_cred_locked - Acquire credential for a renew operation 195 * @clp: client state handle 196 * 197 * Returns an rpc_cred with reference count bumped, or NULL. 198 * Caller must hold clp->cl_lock. 199 */ 200 struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp) 201 { 202 struct rpc_cred *cred = NULL; 203 struct nfs_server *server; 204 205 /* Use machine credentials if available */ 206 cred = nfs4_get_machine_cred_locked(clp); 207 if (cred != NULL) 208 goto out; 209 210 rcu_read_lock(); 211 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { 212 cred = nfs4_get_renew_cred_server_locked(server); 213 if (cred != NULL) 214 break; 215 } 216 rcu_read_unlock(); 217 218 out: 219 return cred; 220 } 221 222 #if defined(CONFIG_NFS_V4_1) 223 224 static int nfs41_setup_state_renewal(struct nfs_client *clp) 225 { 226 int status; 227 struct nfs_fsinfo fsinfo; 228 229 if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) { 230 nfs4_schedule_state_renewal(clp); 231 return 0; 232 } 233 234 status = nfs4_proc_get_lease_time(clp, &fsinfo); 235 if (status == 0) { 236 /* Update lease time and schedule renewal */ 237 spin_lock(&clp->cl_lock); 238 clp->cl_lease_time = fsinfo.lease_time * HZ; 239 clp->cl_last_renewal = jiffies; 240 spin_unlock(&clp->cl_lock); 241 242 nfs4_schedule_state_renewal(clp); 243 } 244 245 return status; 246 } 247 248 /* 249 * Back channel returns NFS4ERR_DELAY for new requests when 250 * NFS4_SESSION_DRAINING is set so there is no work to be done when draining 251 * is ended. 252 */ 253 static void nfs4_end_drain_session(struct nfs_client *clp) 254 { 255 struct nfs4_session *ses = clp->cl_session; 256 struct nfs4_slot_table *tbl; 257 int max_slots; 258 259 if (ses == NULL) 260 return; 261 tbl = &ses->fc_slot_table; 262 if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { 263 spin_lock(&tbl->slot_tbl_lock); 264 max_slots = tbl->max_slots; 265 while (max_slots--) { 266 if (rpc_wake_up_first(&tbl->slot_tbl_waitq, 267 nfs4_set_task_privileged, 268 NULL) == NULL) 269 break; 270 } 271 spin_unlock(&tbl->slot_tbl_lock); 272 } 273 } 274 275 static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl) 276 { 277 spin_lock(&tbl->slot_tbl_lock); 278 if (tbl->highest_used_slotid != NFS4_NO_SLOT) { 279 INIT_COMPLETION(tbl->complete); 280 spin_unlock(&tbl->slot_tbl_lock); 281 return wait_for_completion_interruptible(&tbl->complete); 282 } 283 spin_unlock(&tbl->slot_tbl_lock); 284 return 0; 285 } 286 287 static int nfs4_begin_drain_session(struct nfs_client *clp) 288 { 289 struct nfs4_session *ses = clp->cl_session; 290 int ret = 0; 291 292 set_bit(NFS4_SESSION_DRAINING, &ses->session_state); 293 /* back channel */ 294 ret = nfs4_wait_on_slot_tbl(&ses->bc_slot_table); 295 if (ret) 296 return ret; 297 /* fore channel */ 298 return nfs4_wait_on_slot_tbl(&ses->fc_slot_table); 299 } 300 301 static void nfs41_finish_session_reset(struct nfs_client *clp) 302 { 303 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); 304 clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); 305 /* create_session negotiated new slot table */ 306 clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); 307 clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state); 308 nfs41_setup_state_renewal(clp); 309 } 310 311 int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) 312 { 313 int status; 314 315 if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state)) 316 goto do_confirm; 317 nfs4_begin_drain_session(clp); 318 status = nfs4_proc_exchange_id(clp, cred); 319 if (status != 0) 320 goto out; 321 set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); 322 do_confirm: 323 status = nfs4_proc_create_session(clp, cred); 324 if (status != 0) 325 goto out; 326 nfs41_finish_session_reset(clp); 327 nfs_mark_client_ready(clp, NFS_CS_READY); 328 out: 329 return status; 330 } 331 332 /** 333 * nfs41_discover_server_trunking - Detect server IP address trunking (mv1) 334 * 335 * @clp: nfs_client under test 336 * @result: OUT: found nfs_client, or clp 337 * @cred: credential to use for trunking test 338 * 339 * Returns NFS4_OK, a negative errno, or a negative NFS4ERR status. 340 * If NFS4_OK is returned, an nfs_client pointer is planted in 341 * "result". 342 * 343 * Note: The returned client may not yet be marked ready. 344 */ 345 int nfs41_discover_server_trunking(struct nfs_client *clp, 346 struct nfs_client **result, 347 struct rpc_cred *cred) 348 { 349 int status; 350 351 status = nfs4_proc_exchange_id(clp, cred); 352 if (status != NFS4_OK) 353 return status; 354 set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); 355 356 return nfs41_walk_client_list(clp, result, cred); 357 } 358 359 struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp) 360 { 361 struct rpc_cred *cred; 362 363 spin_lock(&clp->cl_lock); 364 cred = nfs4_get_machine_cred_locked(clp); 365 spin_unlock(&clp->cl_lock); 366 return cred; 367 } 368 369 #endif /* CONFIG_NFS_V4_1 */ 370 371 static struct rpc_cred * 372 nfs4_get_setclientid_cred_server(struct nfs_server *server) 373 { 374 struct nfs_client *clp = server->nfs_client; 375 struct rpc_cred *cred = NULL; 376 struct nfs4_state_owner *sp; 377 struct rb_node *pos; 378 379 spin_lock(&clp->cl_lock); 380 pos = rb_first(&server->state_owners); 381 if (pos != NULL) { 382 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node); 383 cred = get_rpccred(sp->so_cred); 384 } 385 spin_unlock(&clp->cl_lock); 386 return cred; 387 } 388 389 /** 390 * nfs4_get_setclientid_cred - Acquire credential for a setclientid operation 391 * @clp: client state handle 392 * 393 * Returns an rpc_cred with reference count bumped, or NULL. 394 */ 395 struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp) 396 { 397 struct nfs_server *server; 398 struct rpc_cred *cred; 399 400 spin_lock(&clp->cl_lock); 401 cred = nfs4_get_machine_cred_locked(clp); 402 spin_unlock(&clp->cl_lock); 403 if (cred != NULL) 404 goto out; 405 406 rcu_read_lock(); 407 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { 408 cred = nfs4_get_setclientid_cred_server(server); 409 if (cred != NULL) 410 break; 411 } 412 rcu_read_unlock(); 413 414 out: 415 return cred; 416 } 417 418 static struct nfs4_state_owner * 419 nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred) 420 { 421 struct rb_node **p = &server->state_owners.rb_node, 422 *parent = NULL; 423 struct nfs4_state_owner *sp; 424 425 while (*p != NULL) { 426 parent = *p; 427 sp = rb_entry(parent, struct nfs4_state_owner, so_server_node); 428 429 if (cred < sp->so_cred) 430 p = &parent->rb_left; 431 else if (cred > sp->so_cred) 432 p = &parent->rb_right; 433 else { 434 if (!list_empty(&sp->so_lru)) 435 list_del_init(&sp->so_lru); 436 atomic_inc(&sp->so_count); 437 return sp; 438 } 439 } 440 return NULL; 441 } 442 443 static struct nfs4_state_owner * 444 nfs4_insert_state_owner_locked(struct nfs4_state_owner *new) 445 { 446 struct nfs_server *server = new->so_server; 447 struct rb_node **p = &server->state_owners.rb_node, 448 *parent = NULL; 449 struct nfs4_state_owner *sp; 450 int err; 451 452 while (*p != NULL) { 453 parent = *p; 454 sp = rb_entry(parent, struct nfs4_state_owner, so_server_node); 455 456 if (new->so_cred < sp->so_cred) 457 p = &parent->rb_left; 458 else if (new->so_cred > sp->so_cred) 459 p = &parent->rb_right; 460 else { 461 if (!list_empty(&sp->so_lru)) 462 list_del_init(&sp->so_lru); 463 atomic_inc(&sp->so_count); 464 return sp; 465 } 466 } 467 err = ida_get_new(&server->openowner_id, &new->so_seqid.owner_id); 468 if (err) 469 return ERR_PTR(err); 470 rb_link_node(&new->so_server_node, parent, p); 471 rb_insert_color(&new->so_server_node, &server->state_owners); 472 return new; 473 } 474 475 static void 476 nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp) 477 { 478 struct nfs_server *server = sp->so_server; 479 480 if (!RB_EMPTY_NODE(&sp->so_server_node)) 481 rb_erase(&sp->so_server_node, &server->state_owners); 482 ida_remove(&server->openowner_id, sp->so_seqid.owner_id); 483 } 484 485 static void 486 nfs4_init_seqid_counter(struct nfs_seqid_counter *sc) 487 { 488 sc->create_time = ktime_get(); 489 sc->flags = 0; 490 sc->counter = 0; 491 spin_lock_init(&sc->lock); 492 INIT_LIST_HEAD(&sc->list); 493 rpc_init_wait_queue(&sc->wait, "Seqid_waitqueue"); 494 } 495 496 static void 497 nfs4_destroy_seqid_counter(struct nfs_seqid_counter *sc) 498 { 499 rpc_destroy_wait_queue(&sc->wait); 500 } 501 502 /* 503 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to 504 * create a new state_owner. 505 * 506 */ 507 static struct nfs4_state_owner * 508 nfs4_alloc_state_owner(struct nfs_server *server, 509 struct rpc_cred *cred, 510 gfp_t gfp_flags) 511 { 512 struct nfs4_state_owner *sp; 513 514 sp = kzalloc(sizeof(*sp), gfp_flags); 515 if (!sp) 516 return NULL; 517 sp->so_server = server; 518 sp->so_cred = get_rpccred(cred); 519 spin_lock_init(&sp->so_lock); 520 INIT_LIST_HEAD(&sp->so_states); 521 nfs4_init_seqid_counter(&sp->so_seqid); 522 atomic_set(&sp->so_count, 1); 523 INIT_LIST_HEAD(&sp->so_lru); 524 return sp; 525 } 526 527 static void 528 nfs4_drop_state_owner(struct nfs4_state_owner *sp) 529 { 530 struct rb_node *rb_node = &sp->so_server_node; 531 532 if (!RB_EMPTY_NODE(rb_node)) { 533 struct nfs_server *server = sp->so_server; 534 struct nfs_client *clp = server->nfs_client; 535 536 spin_lock(&clp->cl_lock); 537 if (!RB_EMPTY_NODE(rb_node)) { 538 rb_erase(rb_node, &server->state_owners); 539 RB_CLEAR_NODE(rb_node); 540 } 541 spin_unlock(&clp->cl_lock); 542 } 543 } 544 545 static void nfs4_free_state_owner(struct nfs4_state_owner *sp) 546 { 547 nfs4_destroy_seqid_counter(&sp->so_seqid); 548 put_rpccred(sp->so_cred); 549 kfree(sp); 550 } 551 552 static void nfs4_gc_state_owners(struct nfs_server *server) 553 { 554 struct nfs_client *clp = server->nfs_client; 555 struct nfs4_state_owner *sp, *tmp; 556 unsigned long time_min, time_max; 557 LIST_HEAD(doomed); 558 559 spin_lock(&clp->cl_lock); 560 time_max = jiffies; 561 time_min = (long)time_max - (long)clp->cl_lease_time; 562 list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) { 563 /* NB: LRU is sorted so that oldest is at the head */ 564 if (time_in_range(sp->so_expires, time_min, time_max)) 565 break; 566 list_move(&sp->so_lru, &doomed); 567 nfs4_remove_state_owner_locked(sp); 568 } 569 spin_unlock(&clp->cl_lock); 570 571 list_for_each_entry_safe(sp, tmp, &doomed, so_lru) { 572 list_del(&sp->so_lru); 573 nfs4_free_state_owner(sp); 574 } 575 } 576 577 /** 578 * nfs4_get_state_owner - Look up a state owner given a credential 579 * @server: nfs_server to search 580 * @cred: RPC credential to match 581 * 582 * Returns a pointer to an instantiated nfs4_state_owner struct, or NULL. 583 */ 584 struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, 585 struct rpc_cred *cred, 586 gfp_t gfp_flags) 587 { 588 struct nfs_client *clp = server->nfs_client; 589 struct nfs4_state_owner *sp, *new; 590 591 spin_lock(&clp->cl_lock); 592 sp = nfs4_find_state_owner_locked(server, cred); 593 spin_unlock(&clp->cl_lock); 594 if (sp != NULL) 595 goto out; 596 new = nfs4_alloc_state_owner(server, cred, gfp_flags); 597 if (new == NULL) 598 goto out; 599 do { 600 if (ida_pre_get(&server->openowner_id, gfp_flags) == 0) 601 break; 602 spin_lock(&clp->cl_lock); 603 sp = nfs4_insert_state_owner_locked(new); 604 spin_unlock(&clp->cl_lock); 605 } while (sp == ERR_PTR(-EAGAIN)); 606 if (sp != new) 607 nfs4_free_state_owner(new); 608 out: 609 nfs4_gc_state_owners(server); 610 return sp; 611 } 612 613 /** 614 * nfs4_put_state_owner - Release a nfs4_state_owner 615 * @sp: state owner data to release 616 * 617 * Note that we keep released state owners on an LRU 618 * list. 619 * This caches valid state owners so that they can be 620 * reused, to avoid the OPEN_CONFIRM on minor version 0. 621 * It also pins the uniquifier of dropped state owners for 622 * a while, to ensure that those state owner names are 623 * never reused. 624 */ 625 void nfs4_put_state_owner(struct nfs4_state_owner *sp) 626 { 627 struct nfs_server *server = sp->so_server; 628 struct nfs_client *clp = server->nfs_client; 629 630 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock)) 631 return; 632 633 sp->so_expires = jiffies; 634 list_add_tail(&sp->so_lru, &server->state_owners_lru); 635 spin_unlock(&clp->cl_lock); 636 } 637 638 /** 639 * nfs4_purge_state_owners - Release all cached state owners 640 * @server: nfs_server with cached state owners to release 641 * 642 * Called at umount time. Remaining state owners will be on 643 * the LRU with ref count of zero. 644 */ 645 void nfs4_purge_state_owners(struct nfs_server *server) 646 { 647 struct nfs_client *clp = server->nfs_client; 648 struct nfs4_state_owner *sp, *tmp; 649 LIST_HEAD(doomed); 650 651 spin_lock(&clp->cl_lock); 652 list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) { 653 list_move(&sp->so_lru, &doomed); 654 nfs4_remove_state_owner_locked(sp); 655 } 656 spin_unlock(&clp->cl_lock); 657 658 list_for_each_entry_safe(sp, tmp, &doomed, so_lru) { 659 list_del(&sp->so_lru); 660 nfs4_free_state_owner(sp); 661 } 662 } 663 664 static struct nfs4_state * 665 nfs4_alloc_open_state(void) 666 { 667 struct nfs4_state *state; 668 669 state = kzalloc(sizeof(*state), GFP_NOFS); 670 if (!state) 671 return NULL; 672 atomic_set(&state->count, 1); 673 INIT_LIST_HEAD(&state->lock_states); 674 spin_lock_init(&state->state_lock); 675 seqlock_init(&state->seqlock); 676 return state; 677 } 678 679 void 680 nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode) 681 { 682 if (state->state == fmode) 683 return; 684 /* NB! List reordering - see the reclaim code for why. */ 685 if ((fmode & FMODE_WRITE) != (state->state & FMODE_WRITE)) { 686 if (fmode & FMODE_WRITE) 687 list_move(&state->open_states, &state->owner->so_states); 688 else 689 list_move_tail(&state->open_states, &state->owner->so_states); 690 } 691 state->state = fmode; 692 } 693 694 static struct nfs4_state * 695 __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner) 696 { 697 struct nfs_inode *nfsi = NFS_I(inode); 698 struct nfs4_state *state; 699 700 list_for_each_entry(state, &nfsi->open_states, inode_states) { 701 if (state->owner != owner) 702 continue; 703 if (atomic_inc_not_zero(&state->count)) 704 return state; 705 } 706 return NULL; 707 } 708 709 static void 710 nfs4_free_open_state(struct nfs4_state *state) 711 { 712 kfree(state); 713 } 714 715 struct nfs4_state * 716 nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner) 717 { 718 struct nfs4_state *state, *new; 719 struct nfs_inode *nfsi = NFS_I(inode); 720 721 spin_lock(&inode->i_lock); 722 state = __nfs4_find_state_byowner(inode, owner); 723 spin_unlock(&inode->i_lock); 724 if (state) 725 goto out; 726 new = nfs4_alloc_open_state(); 727 spin_lock(&owner->so_lock); 728 spin_lock(&inode->i_lock); 729 state = __nfs4_find_state_byowner(inode, owner); 730 if (state == NULL && new != NULL) { 731 state = new; 732 state->owner = owner; 733 atomic_inc(&owner->so_count); 734 list_add(&state->inode_states, &nfsi->open_states); 735 ihold(inode); 736 state->inode = inode; 737 spin_unlock(&inode->i_lock); 738 /* Note: The reclaim code dictates that we add stateless 739 * and read-only stateids to the end of the list */ 740 list_add_tail(&state->open_states, &owner->so_states); 741 spin_unlock(&owner->so_lock); 742 } else { 743 spin_unlock(&inode->i_lock); 744 spin_unlock(&owner->so_lock); 745 if (new) 746 nfs4_free_open_state(new); 747 } 748 out: 749 return state; 750 } 751 752 void nfs4_put_open_state(struct nfs4_state *state) 753 { 754 struct inode *inode = state->inode; 755 struct nfs4_state_owner *owner = state->owner; 756 757 if (!atomic_dec_and_lock(&state->count, &owner->so_lock)) 758 return; 759 spin_lock(&inode->i_lock); 760 list_del(&state->inode_states); 761 list_del(&state->open_states); 762 spin_unlock(&inode->i_lock); 763 spin_unlock(&owner->so_lock); 764 iput(inode); 765 nfs4_free_open_state(state); 766 nfs4_put_state_owner(owner); 767 } 768 769 /* 770 * Close the current file. 771 */ 772 static void __nfs4_close(struct nfs4_state *state, 773 fmode_t fmode, gfp_t gfp_mask, int wait) 774 { 775 struct nfs4_state_owner *owner = state->owner; 776 int call_close = 0; 777 fmode_t newstate; 778 779 atomic_inc(&owner->so_count); 780 /* Protect against nfs4_find_state() */ 781 spin_lock(&owner->so_lock); 782 switch (fmode & (FMODE_READ | FMODE_WRITE)) { 783 case FMODE_READ: 784 state->n_rdonly--; 785 break; 786 case FMODE_WRITE: 787 state->n_wronly--; 788 break; 789 case FMODE_READ|FMODE_WRITE: 790 state->n_rdwr--; 791 } 792 newstate = FMODE_READ|FMODE_WRITE; 793 if (state->n_rdwr == 0) { 794 if (state->n_rdonly == 0) { 795 newstate &= ~FMODE_READ; 796 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags); 797 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); 798 } 799 if (state->n_wronly == 0) { 800 newstate &= ~FMODE_WRITE; 801 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags); 802 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); 803 } 804 if (newstate == 0) 805 clear_bit(NFS_DELEGATED_STATE, &state->flags); 806 } 807 nfs4_state_set_mode_locked(state, newstate); 808 spin_unlock(&owner->so_lock); 809 810 if (!call_close) { 811 nfs4_put_open_state(state); 812 nfs4_put_state_owner(owner); 813 } else 814 nfs4_do_close(state, gfp_mask, wait); 815 } 816 817 void nfs4_close_state(struct nfs4_state *state, fmode_t fmode) 818 { 819 __nfs4_close(state, fmode, GFP_NOFS, 0); 820 } 821 822 void nfs4_close_sync(struct nfs4_state *state, fmode_t fmode) 823 { 824 __nfs4_close(state, fmode, GFP_KERNEL, 1); 825 } 826 827 /* 828 * Search the state->lock_states for an existing lock_owner 829 * that is compatible with current->files 830 */ 831 static struct nfs4_lock_state * 832 __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type) 833 { 834 struct nfs4_lock_state *pos; 835 list_for_each_entry(pos, &state->lock_states, ls_locks) { 836 if (type != NFS4_ANY_LOCK_TYPE && pos->ls_owner.lo_type != type) 837 continue; 838 switch (pos->ls_owner.lo_type) { 839 case NFS4_POSIX_LOCK_TYPE: 840 if (pos->ls_owner.lo_u.posix_owner != fl_owner) 841 continue; 842 break; 843 case NFS4_FLOCK_LOCK_TYPE: 844 if (pos->ls_owner.lo_u.flock_owner != fl_pid) 845 continue; 846 } 847 atomic_inc(&pos->ls_count); 848 return pos; 849 } 850 return NULL; 851 } 852 853 /* 854 * Return a compatible lock_state. If no initialized lock_state structure 855 * exists, return an uninitialized one. 856 * 857 */ 858 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type) 859 { 860 struct nfs4_lock_state *lsp; 861 struct nfs_server *server = state->owner->so_server; 862 863 lsp = kzalloc(sizeof(*lsp), GFP_NOFS); 864 if (lsp == NULL) 865 return NULL; 866 nfs4_init_seqid_counter(&lsp->ls_seqid); 867 atomic_set(&lsp->ls_count, 1); 868 lsp->ls_state = state; 869 lsp->ls_owner.lo_type = type; 870 switch (lsp->ls_owner.lo_type) { 871 case NFS4_FLOCK_LOCK_TYPE: 872 lsp->ls_owner.lo_u.flock_owner = fl_pid; 873 break; 874 case NFS4_POSIX_LOCK_TYPE: 875 lsp->ls_owner.lo_u.posix_owner = fl_owner; 876 break; 877 default: 878 goto out_free; 879 } 880 lsp->ls_seqid.owner_id = ida_simple_get(&server->lockowner_id, 0, 0, GFP_NOFS); 881 if (lsp->ls_seqid.owner_id < 0) 882 goto out_free; 883 INIT_LIST_HEAD(&lsp->ls_locks); 884 return lsp; 885 out_free: 886 kfree(lsp); 887 return NULL; 888 } 889 890 void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) 891 { 892 ida_simple_remove(&server->lockowner_id, lsp->ls_seqid.owner_id); 893 nfs4_destroy_seqid_counter(&lsp->ls_seqid); 894 kfree(lsp); 895 } 896 897 /* 898 * Return a compatible lock_state. If no initialized lock_state structure 899 * exists, return an uninitialized one. 900 * 901 */ 902 static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner, pid_t pid, unsigned int type) 903 { 904 struct nfs4_lock_state *lsp, *new = NULL; 905 906 for(;;) { 907 spin_lock(&state->state_lock); 908 lsp = __nfs4_find_lock_state(state, owner, pid, type); 909 if (lsp != NULL) 910 break; 911 if (new != NULL) { 912 list_add(&new->ls_locks, &state->lock_states); 913 set_bit(LK_STATE_IN_USE, &state->flags); 914 lsp = new; 915 new = NULL; 916 break; 917 } 918 spin_unlock(&state->state_lock); 919 new = nfs4_alloc_lock_state(state, owner, pid, type); 920 if (new == NULL) 921 return NULL; 922 } 923 spin_unlock(&state->state_lock); 924 if (new != NULL) 925 nfs4_free_lock_state(state->owner->so_server, new); 926 return lsp; 927 } 928 929 /* 930 * Release reference to lock_state, and free it if we see that 931 * it is no longer in use 932 */ 933 void nfs4_put_lock_state(struct nfs4_lock_state *lsp) 934 { 935 struct nfs4_state *state; 936 937 if (lsp == NULL) 938 return; 939 state = lsp->ls_state; 940 if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock)) 941 return; 942 list_del(&lsp->ls_locks); 943 if (list_empty(&state->lock_states)) 944 clear_bit(LK_STATE_IN_USE, &state->flags); 945 spin_unlock(&state->state_lock); 946 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 947 if (nfs4_release_lockowner(lsp) == 0) 948 return; 949 } 950 nfs4_free_lock_state(lsp->ls_state->owner->so_server, lsp); 951 } 952 953 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src) 954 { 955 struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner; 956 957 dst->fl_u.nfs4_fl.owner = lsp; 958 atomic_inc(&lsp->ls_count); 959 } 960 961 static void nfs4_fl_release_lock(struct file_lock *fl) 962 { 963 nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner); 964 } 965 966 static const struct file_lock_operations nfs4_fl_lock_ops = { 967 .fl_copy_lock = nfs4_fl_copy_lock, 968 .fl_release_private = nfs4_fl_release_lock, 969 }; 970 971 int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl) 972 { 973 struct nfs4_lock_state *lsp; 974 975 if (fl->fl_ops != NULL) 976 return 0; 977 if (fl->fl_flags & FL_POSIX) 978 lsp = nfs4_get_lock_state(state, fl->fl_owner, 0, NFS4_POSIX_LOCK_TYPE); 979 else if (fl->fl_flags & FL_FLOCK) 980 lsp = nfs4_get_lock_state(state, NULL, fl->fl_pid, 981 NFS4_FLOCK_LOCK_TYPE); 982 else 983 return -EINVAL; 984 if (lsp == NULL) 985 return -ENOMEM; 986 fl->fl_u.nfs4_fl.owner = lsp; 987 fl->fl_ops = &nfs4_fl_lock_ops; 988 return 0; 989 } 990 991 static bool nfs4_copy_lock_stateid(nfs4_stateid *dst, struct nfs4_state *state, 992 const struct nfs_lockowner *lockowner) 993 { 994 struct nfs4_lock_state *lsp; 995 fl_owner_t fl_owner; 996 pid_t fl_pid; 997 bool ret = false; 998 999 1000 if (lockowner == NULL) 1001 goto out; 1002 1003 if (test_bit(LK_STATE_IN_USE, &state->flags) == 0) 1004 goto out; 1005 1006 fl_owner = lockowner->l_owner; 1007 fl_pid = lockowner->l_pid; 1008 spin_lock(&state->state_lock); 1009 lsp = __nfs4_find_lock_state(state, fl_owner, fl_pid, NFS4_ANY_LOCK_TYPE); 1010 if (lsp != NULL && test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) { 1011 nfs4_stateid_copy(dst, &lsp->ls_stateid); 1012 ret = true; 1013 } 1014 spin_unlock(&state->state_lock); 1015 nfs4_put_lock_state(lsp); 1016 out: 1017 return ret; 1018 } 1019 1020 static void nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state) 1021 { 1022 int seq; 1023 1024 do { 1025 seq = read_seqbegin(&state->seqlock); 1026 nfs4_stateid_copy(dst, &state->stateid); 1027 } while (read_seqretry(&state->seqlock, seq)); 1028 } 1029 1030 /* 1031 * Byte-range lock aware utility to initialize the stateid of read/write 1032 * requests. 1033 */ 1034 void nfs4_select_rw_stateid(nfs4_stateid *dst, struct nfs4_state *state, 1035 fmode_t fmode, const struct nfs_lockowner *lockowner) 1036 { 1037 if (nfs4_copy_delegation_stateid(dst, state->inode, fmode)) 1038 return; 1039 if (nfs4_copy_lock_stateid(dst, state, lockowner)) 1040 return; 1041 nfs4_copy_open_stateid(dst, state); 1042 } 1043 1044 struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask) 1045 { 1046 struct nfs_seqid *new; 1047 1048 new = kmalloc(sizeof(*new), gfp_mask); 1049 if (new != NULL) { 1050 new->sequence = counter; 1051 INIT_LIST_HEAD(&new->list); 1052 new->task = NULL; 1053 } 1054 return new; 1055 } 1056 1057 void nfs_release_seqid(struct nfs_seqid *seqid) 1058 { 1059 struct nfs_seqid_counter *sequence; 1060 1061 if (list_empty(&seqid->list)) 1062 return; 1063 sequence = seqid->sequence; 1064 spin_lock(&sequence->lock); 1065 list_del_init(&seqid->list); 1066 if (!list_empty(&sequence->list)) { 1067 struct nfs_seqid *next; 1068 1069 next = list_first_entry(&sequence->list, 1070 struct nfs_seqid, list); 1071 rpc_wake_up_queued_task(&sequence->wait, next->task); 1072 } 1073 spin_unlock(&sequence->lock); 1074 } 1075 1076 void nfs_free_seqid(struct nfs_seqid *seqid) 1077 { 1078 nfs_release_seqid(seqid); 1079 kfree(seqid); 1080 } 1081 1082 /* 1083 * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or 1084 * failed with a seqid incrementing error - 1085 * see comments nfs_fs.h:seqid_mutating_error() 1086 */ 1087 static void nfs_increment_seqid(int status, struct nfs_seqid *seqid) 1088 { 1089 BUG_ON(list_first_entry(&seqid->sequence->list, struct nfs_seqid, list) != seqid); 1090 switch (status) { 1091 case 0: 1092 break; 1093 case -NFS4ERR_BAD_SEQID: 1094 if (seqid->sequence->flags & NFS_SEQID_CONFIRMED) 1095 return; 1096 pr_warn_ratelimited("NFS: v4 server returned a bad" 1097 " sequence-id error on an" 1098 " unconfirmed sequence %p!\n", 1099 seqid->sequence); 1100 case -NFS4ERR_STALE_CLIENTID: 1101 case -NFS4ERR_STALE_STATEID: 1102 case -NFS4ERR_BAD_STATEID: 1103 case -NFS4ERR_BADXDR: 1104 case -NFS4ERR_RESOURCE: 1105 case -NFS4ERR_NOFILEHANDLE: 1106 /* Non-seqid mutating errors */ 1107 return; 1108 }; 1109 /* 1110 * Note: no locking needed as we are guaranteed to be first 1111 * on the sequence list 1112 */ 1113 seqid->sequence->counter++; 1114 } 1115 1116 void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid) 1117 { 1118 struct nfs4_state_owner *sp = container_of(seqid->sequence, 1119 struct nfs4_state_owner, so_seqid); 1120 struct nfs_server *server = sp->so_server; 1121 1122 if (status == -NFS4ERR_BAD_SEQID) 1123 nfs4_drop_state_owner(sp); 1124 if (!nfs4_has_session(server->nfs_client)) 1125 nfs_increment_seqid(status, seqid); 1126 } 1127 1128 /* 1129 * Increment the seqid if the LOCK/LOCKU succeeded, or 1130 * failed with a seqid incrementing error - 1131 * see comments nfs_fs.h:seqid_mutating_error() 1132 */ 1133 void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid) 1134 { 1135 nfs_increment_seqid(status, seqid); 1136 } 1137 1138 int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task) 1139 { 1140 struct nfs_seqid_counter *sequence = seqid->sequence; 1141 int status = 0; 1142 1143 spin_lock(&sequence->lock); 1144 seqid->task = task; 1145 if (list_empty(&seqid->list)) 1146 list_add_tail(&seqid->list, &sequence->list); 1147 if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid) 1148 goto unlock; 1149 rpc_sleep_on(&sequence->wait, task, NULL); 1150 status = -EAGAIN; 1151 unlock: 1152 spin_unlock(&sequence->lock); 1153 return status; 1154 } 1155 1156 static int nfs4_run_state_manager(void *); 1157 1158 static void nfs4_clear_state_manager_bit(struct nfs_client *clp) 1159 { 1160 smp_mb__before_clear_bit(); 1161 clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state); 1162 smp_mb__after_clear_bit(); 1163 wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING); 1164 rpc_wake_up(&clp->cl_rpcwaitq); 1165 } 1166 1167 /* 1168 * Schedule the nfs_client asynchronous state management routine 1169 */ 1170 void nfs4_schedule_state_manager(struct nfs_client *clp) 1171 { 1172 struct task_struct *task; 1173 char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1]; 1174 1175 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) 1176 return; 1177 __module_get(THIS_MODULE); 1178 atomic_inc(&clp->cl_count); 1179 1180 /* The rcu_read_lock() is not strictly necessary, as the state 1181 * manager is the only thread that ever changes the rpc_xprt 1182 * after it's initialized. At this point, we're single threaded. */ 1183 rcu_read_lock(); 1184 snprintf(buf, sizeof(buf), "%s-manager", 1185 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)); 1186 rcu_read_unlock(); 1187 task = kthread_run(nfs4_run_state_manager, clp, buf); 1188 if (IS_ERR(task)) { 1189 printk(KERN_ERR "%s: kthread_run: %ld\n", 1190 __func__, PTR_ERR(task)); 1191 nfs4_clear_state_manager_bit(clp); 1192 nfs_put_client(clp); 1193 module_put(THIS_MODULE); 1194 } 1195 } 1196 1197 /* 1198 * Schedule a lease recovery attempt 1199 */ 1200 void nfs4_schedule_lease_recovery(struct nfs_client *clp) 1201 { 1202 if (!clp) 1203 return; 1204 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) 1205 set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state); 1206 dprintk("%s: scheduling lease recovery for server %s\n", __func__, 1207 clp->cl_hostname); 1208 nfs4_schedule_state_manager(clp); 1209 } 1210 EXPORT_SYMBOL_GPL(nfs4_schedule_lease_recovery); 1211 1212 /* 1213 * nfs40_handle_cb_pathdown - return all delegations after NFS4ERR_CB_PATH_DOWN 1214 * @clp: client to process 1215 * 1216 * Set the NFS4CLNT_LEASE_EXPIRED state in order to force a 1217 * resend of the SETCLIENTID and hence re-establish the 1218 * callback channel. Then return all existing delegations. 1219 */ 1220 static void nfs40_handle_cb_pathdown(struct nfs_client *clp) 1221 { 1222 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 1223 nfs_expire_all_delegations(clp); 1224 dprintk("%s: handling CB_PATHDOWN recovery for server %s\n", __func__, 1225 clp->cl_hostname); 1226 } 1227 1228 void nfs4_schedule_path_down_recovery(struct nfs_client *clp) 1229 { 1230 nfs40_handle_cb_pathdown(clp); 1231 nfs4_schedule_state_manager(clp); 1232 } 1233 1234 static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) 1235 { 1236 1237 set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); 1238 /* Don't recover state that expired before the reboot */ 1239 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) { 1240 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); 1241 return 0; 1242 } 1243 set_bit(NFS_OWNER_RECLAIM_REBOOT, &state->owner->so_flags); 1244 set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state); 1245 return 1; 1246 } 1247 1248 static int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state) 1249 { 1250 set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags); 1251 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); 1252 set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags); 1253 set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state); 1254 return 1; 1255 } 1256 1257 void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_state *state) 1258 { 1259 struct nfs_client *clp = server->nfs_client; 1260 1261 nfs4_state_mark_reclaim_nograce(clp, state); 1262 dprintk("%s: scheduling stateid recovery for server %s\n", __func__, 1263 clp->cl_hostname); 1264 nfs4_schedule_state_manager(clp); 1265 } 1266 EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery); 1267 1268 void nfs_inode_find_state_and_recover(struct inode *inode, 1269 const nfs4_stateid *stateid) 1270 { 1271 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 1272 struct nfs_inode *nfsi = NFS_I(inode); 1273 struct nfs_open_context *ctx; 1274 struct nfs4_state *state; 1275 bool found = false; 1276 1277 spin_lock(&inode->i_lock); 1278 list_for_each_entry(ctx, &nfsi->open_files, list) { 1279 state = ctx->state; 1280 if (state == NULL) 1281 continue; 1282 if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) 1283 continue; 1284 if (!nfs4_stateid_match(&state->stateid, stateid)) 1285 continue; 1286 nfs4_state_mark_reclaim_nograce(clp, state); 1287 found = true; 1288 } 1289 spin_unlock(&inode->i_lock); 1290 if (found) 1291 nfs4_schedule_state_manager(clp); 1292 } 1293 1294 1295 static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops) 1296 { 1297 struct inode *inode = state->inode; 1298 struct nfs_inode *nfsi = NFS_I(inode); 1299 struct file_lock *fl; 1300 int status = 0; 1301 1302 if (inode->i_flock == NULL) 1303 return 0; 1304 1305 /* Guard against delegation returns and new lock/unlock calls */ 1306 down_write(&nfsi->rwsem); 1307 /* Protect inode->i_flock using the BKL */ 1308 lock_flocks(); 1309 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 1310 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK))) 1311 continue; 1312 if (nfs_file_open_context(fl->fl_file)->state != state) 1313 continue; 1314 unlock_flocks(); 1315 status = ops->recover_lock(state, fl); 1316 switch (status) { 1317 case 0: 1318 break; 1319 case -ESTALE: 1320 case -NFS4ERR_ADMIN_REVOKED: 1321 case -NFS4ERR_STALE_STATEID: 1322 case -NFS4ERR_BAD_STATEID: 1323 case -NFS4ERR_EXPIRED: 1324 case -NFS4ERR_NO_GRACE: 1325 case -NFS4ERR_STALE_CLIENTID: 1326 case -NFS4ERR_BADSESSION: 1327 case -NFS4ERR_BADSLOT: 1328 case -NFS4ERR_BAD_HIGH_SLOT: 1329 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1330 goto out; 1331 default: 1332 printk(KERN_ERR "NFS: %s: unhandled error %d. " 1333 "Zeroing state\n", __func__, status); 1334 case -ENOMEM: 1335 case -NFS4ERR_DENIED: 1336 case -NFS4ERR_RECLAIM_BAD: 1337 case -NFS4ERR_RECLAIM_CONFLICT: 1338 /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 1339 status = 0; 1340 } 1341 lock_flocks(); 1342 } 1343 unlock_flocks(); 1344 out: 1345 up_write(&nfsi->rwsem); 1346 return status; 1347 } 1348 1349 static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops) 1350 { 1351 struct nfs4_state *state; 1352 struct nfs4_lock_state *lock; 1353 int status = 0; 1354 1355 /* Note: we rely on the sp->so_states list being ordered 1356 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE) 1357 * states first. 1358 * This is needed to ensure that the server won't give us any 1359 * read delegations that we have to return if, say, we are 1360 * recovering after a network partition or a reboot from a 1361 * server that doesn't support a grace period. 1362 */ 1363 restart: 1364 spin_lock(&sp->so_lock); 1365 list_for_each_entry(state, &sp->so_states, open_states) { 1366 if (!test_and_clear_bit(ops->state_flag_bit, &state->flags)) 1367 continue; 1368 if (state->state == 0) 1369 continue; 1370 atomic_inc(&state->count); 1371 spin_unlock(&sp->so_lock); 1372 status = ops->recover_open(sp, state); 1373 if (status >= 0) { 1374 status = nfs4_reclaim_locks(state, ops); 1375 if (status >= 0) { 1376 spin_lock(&state->state_lock); 1377 list_for_each_entry(lock, &state->lock_states, ls_locks) { 1378 if (!test_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags)) 1379 pr_warn_ratelimited("NFS: " 1380 "%s: Lock reclaim " 1381 "failed!\n", __func__); 1382 } 1383 spin_unlock(&state->state_lock); 1384 nfs4_put_open_state(state); 1385 goto restart; 1386 } 1387 } 1388 switch (status) { 1389 default: 1390 printk(KERN_ERR "NFS: %s: unhandled error %d. " 1391 "Zeroing state\n", __func__, status); 1392 case -ENOENT: 1393 case -ENOMEM: 1394 case -ESTALE: 1395 /* 1396 * Open state on this file cannot be recovered 1397 * All we can do is revert to using the zero stateid. 1398 */ 1399 memset(&state->stateid, 0, 1400 sizeof(state->stateid)); 1401 /* Mark the file as being 'closed' */ 1402 state->state = 0; 1403 break; 1404 case -EKEYEXPIRED: 1405 /* 1406 * User RPCSEC_GSS context has expired. 1407 * We cannot recover this stateid now, so 1408 * skip it and allow recovery thread to 1409 * proceed. 1410 */ 1411 break; 1412 case -NFS4ERR_ADMIN_REVOKED: 1413 case -NFS4ERR_STALE_STATEID: 1414 case -NFS4ERR_BAD_STATEID: 1415 case -NFS4ERR_RECLAIM_BAD: 1416 case -NFS4ERR_RECLAIM_CONFLICT: 1417 nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state); 1418 break; 1419 case -NFS4ERR_EXPIRED: 1420 case -NFS4ERR_NO_GRACE: 1421 nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state); 1422 case -NFS4ERR_STALE_CLIENTID: 1423 case -NFS4ERR_BADSESSION: 1424 case -NFS4ERR_BADSLOT: 1425 case -NFS4ERR_BAD_HIGH_SLOT: 1426 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1427 goto out_err; 1428 } 1429 nfs4_put_open_state(state); 1430 goto restart; 1431 } 1432 spin_unlock(&sp->so_lock); 1433 return 0; 1434 out_err: 1435 nfs4_put_open_state(state); 1436 return status; 1437 } 1438 1439 static void nfs4_clear_open_state(struct nfs4_state *state) 1440 { 1441 struct nfs4_lock_state *lock; 1442 1443 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1444 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1445 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1446 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1447 spin_lock(&state->state_lock); 1448 list_for_each_entry(lock, &state->lock_states, ls_locks) { 1449 lock->ls_seqid.flags = 0; 1450 clear_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags); 1451 } 1452 spin_unlock(&state->state_lock); 1453 } 1454 1455 static void nfs4_reset_seqids(struct nfs_server *server, 1456 int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state)) 1457 { 1458 struct nfs_client *clp = server->nfs_client; 1459 struct nfs4_state_owner *sp; 1460 struct rb_node *pos; 1461 struct nfs4_state *state; 1462 1463 spin_lock(&clp->cl_lock); 1464 for (pos = rb_first(&server->state_owners); 1465 pos != NULL; 1466 pos = rb_next(pos)) { 1467 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node); 1468 sp->so_seqid.flags = 0; 1469 spin_lock(&sp->so_lock); 1470 list_for_each_entry(state, &sp->so_states, open_states) { 1471 if (mark_reclaim(clp, state)) 1472 nfs4_clear_open_state(state); 1473 } 1474 spin_unlock(&sp->so_lock); 1475 } 1476 spin_unlock(&clp->cl_lock); 1477 } 1478 1479 static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, 1480 int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state)) 1481 { 1482 struct nfs_server *server; 1483 1484 rcu_read_lock(); 1485 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) 1486 nfs4_reset_seqids(server, mark_reclaim); 1487 rcu_read_unlock(); 1488 } 1489 1490 static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp) 1491 { 1492 /* Mark all delegations for reclaim */ 1493 nfs_delegation_mark_reclaim(clp); 1494 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot); 1495 } 1496 1497 static void nfs4_reclaim_complete(struct nfs_client *clp, 1498 const struct nfs4_state_recovery_ops *ops) 1499 { 1500 /* Notify the server we're done reclaiming our state */ 1501 if (ops->reclaim_complete) 1502 (void)ops->reclaim_complete(clp); 1503 } 1504 1505 static void nfs4_clear_reclaim_server(struct nfs_server *server) 1506 { 1507 struct nfs_client *clp = server->nfs_client; 1508 struct nfs4_state_owner *sp; 1509 struct rb_node *pos; 1510 struct nfs4_state *state; 1511 1512 spin_lock(&clp->cl_lock); 1513 for (pos = rb_first(&server->state_owners); 1514 pos != NULL; 1515 pos = rb_next(pos)) { 1516 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node); 1517 spin_lock(&sp->so_lock); 1518 list_for_each_entry(state, &sp->so_states, open_states) { 1519 if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, 1520 &state->flags)) 1521 continue; 1522 nfs4_state_mark_reclaim_nograce(clp, state); 1523 } 1524 spin_unlock(&sp->so_lock); 1525 } 1526 spin_unlock(&clp->cl_lock); 1527 } 1528 1529 static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp) 1530 { 1531 struct nfs_server *server; 1532 1533 if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) 1534 return 0; 1535 1536 rcu_read_lock(); 1537 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) 1538 nfs4_clear_reclaim_server(server); 1539 rcu_read_unlock(); 1540 1541 nfs_delegation_reap_unclaimed(clp); 1542 return 1; 1543 } 1544 1545 static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp) 1546 { 1547 if (!nfs4_state_clear_reclaim_reboot(clp)) 1548 return; 1549 nfs4_reclaim_complete(clp, clp->cl_mvops->reboot_recovery_ops); 1550 } 1551 1552 static void nfs_delegation_clear_all(struct nfs_client *clp) 1553 { 1554 nfs_delegation_mark_reclaim(clp); 1555 nfs_delegation_reap_unclaimed(clp); 1556 } 1557 1558 static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp) 1559 { 1560 nfs_delegation_clear_all(clp); 1561 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce); 1562 } 1563 1564 static void nfs4_warn_keyexpired(const char *s) 1565 { 1566 printk_ratelimited(KERN_WARNING "Error: state manager" 1567 " encountered RPCSEC_GSS session" 1568 " expired against NFSv4 server %s.\n", 1569 s); 1570 } 1571 1572 static int nfs4_recovery_handle_error(struct nfs_client *clp, int error) 1573 { 1574 switch (error) { 1575 case 0: 1576 break; 1577 case -NFS4ERR_CB_PATH_DOWN: 1578 nfs40_handle_cb_pathdown(clp); 1579 break; 1580 case -NFS4ERR_NO_GRACE: 1581 nfs4_state_end_reclaim_reboot(clp); 1582 break; 1583 case -NFS4ERR_STALE_CLIENTID: 1584 case -NFS4ERR_LEASE_MOVED: 1585 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 1586 nfs4_state_clear_reclaim_reboot(clp); 1587 nfs4_state_start_reclaim_reboot(clp); 1588 break; 1589 case -NFS4ERR_EXPIRED: 1590 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 1591 nfs4_state_start_reclaim_nograce(clp); 1592 break; 1593 case -NFS4ERR_BADSESSION: 1594 case -NFS4ERR_BADSLOT: 1595 case -NFS4ERR_BAD_HIGH_SLOT: 1596 case -NFS4ERR_DEADSESSION: 1597 case -NFS4ERR_SEQ_FALSE_RETRY: 1598 case -NFS4ERR_SEQ_MISORDERED: 1599 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); 1600 /* Zero session reset errors */ 1601 break; 1602 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1603 set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state); 1604 break; 1605 case -EKEYEXPIRED: 1606 /* Nothing we can do */ 1607 nfs4_warn_keyexpired(clp->cl_hostname); 1608 break; 1609 default: 1610 dprintk("%s: failed to handle error %d for server %s\n", 1611 __func__, error, clp->cl_hostname); 1612 return error; 1613 } 1614 dprintk("%s: handled error %d for server %s\n", __func__, error, 1615 clp->cl_hostname); 1616 return 0; 1617 } 1618 1619 static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops) 1620 { 1621 struct nfs4_state_owner *sp; 1622 struct nfs_server *server; 1623 struct rb_node *pos; 1624 int status = 0; 1625 1626 restart: 1627 rcu_read_lock(); 1628 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { 1629 nfs4_purge_state_owners(server); 1630 spin_lock(&clp->cl_lock); 1631 for (pos = rb_first(&server->state_owners); 1632 pos != NULL; 1633 pos = rb_next(pos)) { 1634 sp = rb_entry(pos, 1635 struct nfs4_state_owner, so_server_node); 1636 if (!test_and_clear_bit(ops->owner_flag_bit, 1637 &sp->so_flags)) 1638 continue; 1639 atomic_inc(&sp->so_count); 1640 spin_unlock(&clp->cl_lock); 1641 rcu_read_unlock(); 1642 1643 status = nfs4_reclaim_open_state(sp, ops); 1644 if (status < 0) { 1645 set_bit(ops->owner_flag_bit, &sp->so_flags); 1646 nfs4_put_state_owner(sp); 1647 return nfs4_recovery_handle_error(clp, status); 1648 } 1649 1650 nfs4_put_state_owner(sp); 1651 goto restart; 1652 } 1653 spin_unlock(&clp->cl_lock); 1654 } 1655 rcu_read_unlock(); 1656 return status; 1657 } 1658 1659 static int nfs4_check_lease(struct nfs_client *clp) 1660 { 1661 struct rpc_cred *cred; 1662 const struct nfs4_state_maintenance_ops *ops = 1663 clp->cl_mvops->state_renewal_ops; 1664 int status; 1665 1666 /* Is the client already known to have an expired lease? */ 1667 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) 1668 return 0; 1669 spin_lock(&clp->cl_lock); 1670 cred = ops->get_state_renewal_cred_locked(clp); 1671 spin_unlock(&clp->cl_lock); 1672 if (cred == NULL) { 1673 cred = nfs4_get_setclientid_cred(clp); 1674 status = -ENOKEY; 1675 if (cred == NULL) 1676 goto out; 1677 } 1678 status = ops->renew_lease(clp, cred); 1679 put_rpccred(cred); 1680 out: 1681 return nfs4_recovery_handle_error(clp, status); 1682 } 1683 1684 /* Set NFS4CLNT_LEASE_EXPIRED and reclaim reboot state for all v4.0 errors 1685 * and for recoverable errors on EXCHANGE_ID for v4.1 1686 */ 1687 static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status) 1688 { 1689 switch (status) { 1690 case -NFS4ERR_SEQ_MISORDERED: 1691 if (test_and_set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) 1692 return -ESERVERFAULT; 1693 /* Lease confirmation error: retry after purging the lease */ 1694 ssleep(1); 1695 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); 1696 break; 1697 case -NFS4ERR_STALE_CLIENTID: 1698 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); 1699 nfs4_state_clear_reclaim_reboot(clp); 1700 nfs4_state_start_reclaim_reboot(clp); 1701 break; 1702 case -NFS4ERR_CLID_INUSE: 1703 pr_err("NFS: Server %s reports our clientid is in use\n", 1704 clp->cl_hostname); 1705 nfs_mark_client_ready(clp, -EPERM); 1706 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); 1707 return -EPERM; 1708 case -EACCES: 1709 if (clp->cl_machine_cred == NULL) 1710 return -EACCES; 1711 /* Handle case where the user hasn't set up machine creds */ 1712 nfs4_clear_machine_cred(clp); 1713 case -NFS4ERR_DELAY: 1714 case -ETIMEDOUT: 1715 case -EAGAIN: 1716 ssleep(1); 1717 break; 1718 1719 case -NFS4ERR_MINOR_VERS_MISMATCH: 1720 if (clp->cl_cons_state == NFS_CS_SESSION_INITING) 1721 nfs_mark_client_ready(clp, -EPROTONOSUPPORT); 1722 dprintk("%s: exit with error %d for server %s\n", 1723 __func__, -EPROTONOSUPPORT, clp->cl_hostname); 1724 return -EPROTONOSUPPORT; 1725 case -EKEYEXPIRED: 1726 nfs4_warn_keyexpired(clp->cl_hostname); 1727 case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery 1728 * in nfs4_exchange_id */ 1729 default: 1730 dprintk("%s: exit with error %d for server %s\n", __func__, 1731 status, clp->cl_hostname); 1732 return status; 1733 } 1734 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 1735 dprintk("%s: handled error %d for server %s\n", __func__, status, 1736 clp->cl_hostname); 1737 return 0; 1738 } 1739 1740 static int nfs4_establish_lease(struct nfs_client *clp) 1741 { 1742 struct rpc_cred *cred; 1743 const struct nfs4_state_recovery_ops *ops = 1744 clp->cl_mvops->reboot_recovery_ops; 1745 int status; 1746 1747 cred = ops->get_clid_cred(clp); 1748 if (cred == NULL) 1749 return -ENOENT; 1750 status = ops->establish_clid(clp, cred); 1751 put_rpccred(cred); 1752 if (status != 0) 1753 return status; 1754 pnfs_destroy_all_layouts(clp); 1755 return 0; 1756 } 1757 1758 /* 1759 * Returns zero or a negative errno. NFS4ERR values are converted 1760 * to local errno values. 1761 */ 1762 static int nfs4_reclaim_lease(struct nfs_client *clp) 1763 { 1764 int status; 1765 1766 status = nfs4_establish_lease(clp); 1767 if (status < 0) 1768 return nfs4_handle_reclaim_lease_error(clp, status); 1769 if (test_and_clear_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state)) 1770 nfs4_state_start_reclaim_nograce(clp); 1771 if (!test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) 1772 set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state); 1773 clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state); 1774 clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 1775 return 0; 1776 } 1777 1778 static int nfs4_purge_lease(struct nfs_client *clp) 1779 { 1780 int status; 1781 1782 status = nfs4_establish_lease(clp); 1783 if (status < 0) 1784 return nfs4_handle_reclaim_lease_error(clp, status); 1785 clear_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state); 1786 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 1787 nfs4_state_start_reclaim_nograce(clp); 1788 return 0; 1789 } 1790 1791 /** 1792 * nfs4_discover_server_trunking - Detect server IP address trunking 1793 * 1794 * @clp: nfs_client under test 1795 * @result: OUT: found nfs_client, or clp 1796 * 1797 * Returns zero or a negative errno. If zero is returned, 1798 * an nfs_client pointer is planted in "result". 1799 * 1800 * Note: since we are invoked in process context, and 1801 * not from inside the state manager, we cannot use 1802 * nfs4_handle_reclaim_lease_error(). 1803 */ 1804 int nfs4_discover_server_trunking(struct nfs_client *clp, 1805 struct nfs_client **result) 1806 { 1807 const struct nfs4_state_recovery_ops *ops = 1808 clp->cl_mvops->reboot_recovery_ops; 1809 rpc_authflavor_t *flavors, flav, save; 1810 struct rpc_clnt *clnt; 1811 struct rpc_cred *cred; 1812 int i, len, status; 1813 1814 dprintk("NFS: %s: testing '%s'\n", __func__, clp->cl_hostname); 1815 1816 len = NFS_MAX_SECFLAVORS; 1817 flavors = kcalloc(len, sizeof(*flavors), GFP_KERNEL); 1818 if (flavors == NULL) { 1819 status = -ENOMEM; 1820 goto out; 1821 } 1822 len = rpcauth_list_flavors(flavors, len); 1823 if (len < 0) { 1824 status = len; 1825 goto out_free; 1826 } 1827 clnt = clp->cl_rpcclient; 1828 save = clnt->cl_auth->au_flavor; 1829 i = 0; 1830 1831 mutex_lock(&nfs_clid_init_mutex); 1832 status = -ENOENT; 1833 again: 1834 cred = ops->get_clid_cred(clp); 1835 if (cred == NULL) 1836 goto out_unlock; 1837 1838 status = ops->detect_trunking(clp, result, cred); 1839 put_rpccred(cred); 1840 switch (status) { 1841 case 0: 1842 break; 1843 1844 case -EACCES: 1845 if (clp->cl_machine_cred == NULL) 1846 break; 1847 /* Handle case where the user hasn't set up machine creds */ 1848 nfs4_clear_machine_cred(clp); 1849 case -NFS4ERR_DELAY: 1850 case -ETIMEDOUT: 1851 case -EAGAIN: 1852 ssleep(1); 1853 dprintk("NFS: %s after status %d, retrying\n", 1854 __func__, status); 1855 goto again; 1856 1857 case -NFS4ERR_CLID_INUSE: 1858 case -NFS4ERR_WRONGSEC: 1859 status = -EPERM; 1860 if (i >= len) 1861 break; 1862 1863 flav = flavors[i++]; 1864 if (flav == save) 1865 flav = flavors[i++]; 1866 clnt = rpc_clone_client_set_auth(clnt, flav); 1867 if (IS_ERR(clnt)) { 1868 status = PTR_ERR(clnt); 1869 break; 1870 } 1871 clp->cl_rpcclient = clnt; 1872 goto again; 1873 1874 case -NFS4ERR_MINOR_VERS_MISMATCH: 1875 status = -EPROTONOSUPPORT; 1876 break; 1877 1878 case -EKEYEXPIRED: 1879 nfs4_warn_keyexpired(clp->cl_hostname); 1880 case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery 1881 * in nfs4_exchange_id */ 1882 status = -EKEYEXPIRED; 1883 } 1884 1885 out_unlock: 1886 mutex_unlock(&nfs_clid_init_mutex); 1887 out_free: 1888 kfree(flavors); 1889 out: 1890 dprintk("NFS: %s: status = %d\n", __func__, status); 1891 return status; 1892 } 1893 1894 #ifdef CONFIG_NFS_V4_1 1895 void nfs4_schedule_session_recovery(struct nfs4_session *session, int err) 1896 { 1897 struct nfs_client *clp = session->clp; 1898 1899 switch (err) { 1900 default: 1901 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); 1902 break; 1903 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1904 set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state); 1905 } 1906 nfs4_schedule_lease_recovery(clp); 1907 } 1908 EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery); 1909 1910 void nfs41_handle_recall_slot(struct nfs_client *clp) 1911 { 1912 set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); 1913 dprintk("%s: scheduling slot recall for server %s\n", __func__, 1914 clp->cl_hostname); 1915 nfs4_schedule_state_manager(clp); 1916 } 1917 1918 static void nfs4_reset_all_state(struct nfs_client *clp) 1919 { 1920 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { 1921 set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state); 1922 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); 1923 nfs4_state_start_reclaim_nograce(clp); 1924 dprintk("%s: scheduling reset of all state for server %s!\n", 1925 __func__, clp->cl_hostname); 1926 nfs4_schedule_state_manager(clp); 1927 } 1928 } 1929 1930 static void nfs41_handle_server_reboot(struct nfs_client *clp) 1931 { 1932 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { 1933 nfs4_state_start_reclaim_reboot(clp); 1934 dprintk("%s: server %s rebooted!\n", __func__, 1935 clp->cl_hostname); 1936 nfs4_schedule_state_manager(clp); 1937 } 1938 } 1939 1940 static void nfs41_handle_state_revoked(struct nfs_client *clp) 1941 { 1942 nfs4_reset_all_state(clp); 1943 dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname); 1944 } 1945 1946 static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp) 1947 { 1948 /* This will need to handle layouts too */ 1949 nfs_expire_all_delegations(clp); 1950 dprintk("%s: Recallable state revoked on server %s!\n", __func__, 1951 clp->cl_hostname); 1952 } 1953 1954 static void nfs41_handle_backchannel_fault(struct nfs_client *clp) 1955 { 1956 nfs_expire_all_delegations(clp); 1957 if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0) 1958 nfs4_schedule_state_manager(clp); 1959 dprintk("%s: server %s declared a backchannel fault\n", __func__, 1960 clp->cl_hostname); 1961 } 1962 1963 static void nfs41_handle_cb_path_down(struct nfs_client *clp) 1964 { 1965 if (test_and_set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, 1966 &clp->cl_state) == 0) 1967 nfs4_schedule_state_manager(clp); 1968 } 1969 1970 void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags) 1971 { 1972 if (!flags) 1973 return; 1974 1975 dprintk("%s: \"%s\" (client ID %llx) flags=0x%08x\n", 1976 __func__, clp->cl_hostname, clp->cl_clientid, flags); 1977 1978 if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) 1979 nfs41_handle_server_reboot(clp); 1980 if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED | 1981 SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED | 1982 SEQ4_STATUS_ADMIN_STATE_REVOKED | 1983 SEQ4_STATUS_LEASE_MOVED)) 1984 nfs41_handle_state_revoked(clp); 1985 if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED) 1986 nfs41_handle_recallable_state_revoked(clp); 1987 if (flags & SEQ4_STATUS_BACKCHANNEL_FAULT) 1988 nfs41_handle_backchannel_fault(clp); 1989 else if (flags & (SEQ4_STATUS_CB_PATH_DOWN | 1990 SEQ4_STATUS_CB_PATH_DOWN_SESSION)) 1991 nfs41_handle_cb_path_down(clp); 1992 } 1993 1994 static int nfs4_reset_session(struct nfs_client *clp) 1995 { 1996 struct rpc_cred *cred; 1997 int status; 1998 1999 if (!nfs4_has_session(clp)) 2000 return 0; 2001 nfs4_begin_drain_session(clp); 2002 cred = nfs4_get_exchange_id_cred(clp); 2003 status = nfs4_proc_destroy_session(clp->cl_session, cred); 2004 if (status && status != -NFS4ERR_BADSESSION && 2005 status != -NFS4ERR_DEADSESSION) { 2006 status = nfs4_recovery_handle_error(clp, status); 2007 goto out; 2008 } 2009 2010 memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN); 2011 status = nfs4_proc_create_session(clp, cred); 2012 if (status) { 2013 dprintk("%s: session reset failed with status %d for server %s!\n", 2014 __func__, status, clp->cl_hostname); 2015 status = nfs4_handle_reclaim_lease_error(clp, status); 2016 goto out; 2017 } 2018 nfs41_finish_session_reset(clp); 2019 dprintk("%s: session reset was successful for server %s!\n", 2020 __func__, clp->cl_hostname); 2021 out: 2022 if (cred) 2023 put_rpccred(cred); 2024 return status; 2025 } 2026 2027 static int nfs4_recall_slot(struct nfs_client *clp) 2028 { 2029 struct nfs4_slot_table *fc_tbl; 2030 struct nfs4_slot *new, *old; 2031 int i; 2032 2033 if (!nfs4_has_session(clp)) 2034 return 0; 2035 nfs4_begin_drain_session(clp); 2036 fc_tbl = &clp->cl_session->fc_slot_table; 2037 new = kmalloc(fc_tbl->target_max_slots * sizeof(struct nfs4_slot), 2038 GFP_NOFS); 2039 if (!new) 2040 return -ENOMEM; 2041 2042 spin_lock(&fc_tbl->slot_tbl_lock); 2043 for (i = 0; i < fc_tbl->target_max_slots; i++) 2044 new[i].seq_nr = fc_tbl->slots[i].seq_nr; 2045 old = fc_tbl->slots; 2046 fc_tbl->slots = new; 2047 fc_tbl->max_slots = fc_tbl->target_max_slots; 2048 fc_tbl->target_max_slots = 0; 2049 clp->cl_session->fc_attrs.max_reqs = fc_tbl->max_slots; 2050 spin_unlock(&fc_tbl->slot_tbl_lock); 2051 2052 kfree(old); 2053 return 0; 2054 } 2055 2056 static int nfs4_bind_conn_to_session(struct nfs_client *clp) 2057 { 2058 struct rpc_cred *cred; 2059 int ret; 2060 2061 if (!nfs4_has_session(clp)) 2062 return 0; 2063 nfs4_begin_drain_session(clp); 2064 cred = nfs4_get_exchange_id_cred(clp); 2065 ret = nfs4_proc_bind_conn_to_session(clp, cred); 2066 if (cred) 2067 put_rpccred(cred); 2068 clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state); 2069 switch (ret) { 2070 case 0: 2071 dprintk("%s: bind_conn_to_session was successful for server %s!\n", 2072 __func__, clp->cl_hostname); 2073 break; 2074 case -NFS4ERR_DELAY: 2075 ssleep(1); 2076 set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state); 2077 break; 2078 default: 2079 return nfs4_recovery_handle_error(clp, ret); 2080 } 2081 return 0; 2082 } 2083 #else /* CONFIG_NFS_V4_1 */ 2084 static int nfs4_reset_session(struct nfs_client *clp) { return 0; } 2085 static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; } 2086 static int nfs4_recall_slot(struct nfs_client *clp) { return 0; } 2087 2088 static int nfs4_bind_conn_to_session(struct nfs_client *clp) 2089 { 2090 return 0; 2091 } 2092 #endif /* CONFIG_NFS_V4_1 */ 2093 2094 static void nfs4_state_manager(struct nfs_client *clp) 2095 { 2096 int status = 0; 2097 const char *section = "", *section_sep = ""; 2098 2099 /* Ensure exclusive access to NFSv4 state */ 2100 do { 2101 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 2102 section = "purge state"; 2103 status = nfs4_purge_lease(clp); 2104 if (status < 0) 2105 goto out_error; 2106 continue; 2107 } 2108 2109 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) { 2110 section = "lease expired"; 2111 /* We're going to have to re-establish a clientid */ 2112 status = nfs4_reclaim_lease(clp); 2113 if (status < 0) 2114 goto out_error; 2115 continue; 2116 } 2117 2118 if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) { 2119 section = "check lease"; 2120 status = nfs4_check_lease(clp); 2121 if (status < 0) 2122 goto out_error; 2123 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) 2124 continue; 2125 } 2126 2127 /* Initialize or reset the session */ 2128 if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)) { 2129 section = "reset session"; 2130 status = nfs4_reset_session(clp); 2131 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) 2132 continue; 2133 if (status < 0) 2134 goto out_error; 2135 } 2136 2137 /* Send BIND_CONN_TO_SESSION */ 2138 if (test_and_clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, 2139 &clp->cl_state)) { 2140 section = "bind conn to session"; 2141 status = nfs4_bind_conn_to_session(clp); 2142 if (status < 0) 2143 goto out_error; 2144 continue; 2145 } 2146 2147 /* Recall session slots */ 2148 if (test_and_clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state)) { 2149 section = "recall slot"; 2150 status = nfs4_recall_slot(clp); 2151 if (status < 0) 2152 goto out_error; 2153 continue; 2154 } 2155 2156 /* First recover reboot state... */ 2157 if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) { 2158 section = "reclaim reboot"; 2159 status = nfs4_do_reclaim(clp, 2160 clp->cl_mvops->reboot_recovery_ops); 2161 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) || 2162 test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)) 2163 continue; 2164 nfs4_state_end_reclaim_reboot(clp); 2165 if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) 2166 continue; 2167 if (status < 0) 2168 goto out_error; 2169 } 2170 2171 /* Now recover expired state... */ 2172 if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) { 2173 section = "reclaim nograce"; 2174 status = nfs4_do_reclaim(clp, 2175 clp->cl_mvops->nograce_recovery_ops); 2176 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) || 2177 test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) || 2178 test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) 2179 continue; 2180 if (status < 0) 2181 goto out_error; 2182 } 2183 2184 nfs4_end_drain_session(clp); 2185 if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) { 2186 nfs_client_return_marked_delegations(clp); 2187 continue; 2188 } 2189 2190 nfs4_clear_state_manager_bit(clp); 2191 /* Did we race with an attempt to give us more work? */ 2192 if (clp->cl_state == 0) 2193 break; 2194 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) 2195 break; 2196 } while (atomic_read(&clp->cl_count) > 1); 2197 return; 2198 out_error: 2199 if (strlen(section)) 2200 section_sep = ": "; 2201 pr_warn_ratelimited("NFS: state manager%s%s failed on NFSv4 server %s" 2202 " with error %d\n", section_sep, section, 2203 clp->cl_hostname, -status); 2204 ssleep(1); 2205 nfs4_end_drain_session(clp); 2206 nfs4_clear_state_manager_bit(clp); 2207 } 2208 2209 static int nfs4_run_state_manager(void *ptr) 2210 { 2211 struct nfs_client *clp = ptr; 2212 2213 allow_signal(SIGKILL); 2214 nfs4_state_manager(clp); 2215 nfs_put_client(clp); 2216 module_put_and_exit(0); 2217 return 0; 2218 } 2219 2220 /* 2221 * Local variables: 2222 * c-basic-offset: 8 2223 * End: 2224 */ 2225