1 /* 2 * fs/nfs/nfs4state.c 3 * 4 * Client-side XDR for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 27 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Implementation of the NFSv4 state model. For the time being, 37 * this is minimal, but will be made much more complex in a 38 * subsequent patch. 39 */ 40 41 #include <linux/kernel.h> 42 #include <linux/slab.h> 43 #include <linux/smp_lock.h> 44 #include <linux/nfs_fs.h> 45 #include <linux/nfs_idmap.h> 46 #include <linux/kthread.h> 47 #include <linux/module.h> 48 #include <linux/random.h> 49 #include <linux/workqueue.h> 50 #include <linux/bitops.h> 51 52 #include "nfs4_fs.h" 53 #include "callback.h" 54 #include "delegation.h" 55 #include "internal.h" 56 57 #define OPENOWNER_POOL_SIZE 8 58 59 const nfs4_stateid zero_stateid; 60 61 static LIST_HEAD(nfs4_clientid_list); 62 63 int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) 64 { 65 struct nfs4_setclientid_res clid; 66 unsigned short port; 67 int status; 68 69 port = nfs_callback_tcpport; 70 if (clp->cl_addr.ss_family == AF_INET6) 71 port = nfs_callback_tcpport6; 72 73 status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid); 74 if (status != 0) 75 goto out; 76 status = nfs4_proc_setclientid_confirm(clp, &clid, cred); 77 if (status != 0) 78 goto out; 79 clp->cl_clientid = clid.clientid; 80 nfs4_schedule_state_renewal(clp); 81 out: 82 return status; 83 } 84 85 struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp) 86 { 87 struct rpc_cred *cred = NULL; 88 89 if (clp->cl_machine_cred != NULL) 90 cred = get_rpccred(clp->cl_machine_cred); 91 return cred; 92 } 93 94 static void nfs4_clear_machine_cred(struct nfs_client *clp) 95 { 96 struct rpc_cred *cred; 97 98 spin_lock(&clp->cl_lock); 99 cred = clp->cl_machine_cred; 100 clp->cl_machine_cred = NULL; 101 spin_unlock(&clp->cl_lock); 102 if (cred != NULL) 103 put_rpccred(cred); 104 } 105 106 struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp) 107 { 108 struct nfs4_state_owner *sp; 109 struct rb_node *pos; 110 struct rpc_cred *cred = NULL; 111 112 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { 113 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); 114 if (list_empty(&sp->so_states)) 115 continue; 116 cred = get_rpccred(sp->so_cred); 117 break; 118 } 119 return cred; 120 } 121 122 #if defined(CONFIG_NFS_V4_1) 123 124 static int nfs41_setup_state_renewal(struct nfs_client *clp) 125 { 126 int status; 127 struct nfs_fsinfo fsinfo; 128 129 status = nfs4_proc_get_lease_time(clp, &fsinfo); 130 if (status == 0) { 131 /* Update lease time and schedule renewal */ 132 spin_lock(&clp->cl_lock); 133 clp->cl_lease_time = fsinfo.lease_time * HZ; 134 clp->cl_last_renewal = jiffies; 135 spin_unlock(&clp->cl_lock); 136 137 nfs4_schedule_state_renewal(clp); 138 } 139 140 return status; 141 } 142 143 static void nfs4_end_drain_session(struct nfs_client *clp) 144 { 145 struct nfs4_session *ses = clp->cl_session; 146 int max_slots; 147 148 if (ses == NULL) 149 return; 150 if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { 151 spin_lock(&ses->fc_slot_table.slot_tbl_lock); 152 max_slots = ses->fc_slot_table.max_slots; 153 while (max_slots--) { 154 struct rpc_task *task; 155 156 task = rpc_wake_up_next(&ses->fc_slot_table. 157 slot_tbl_waitq); 158 if (!task) 159 break; 160 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 161 } 162 spin_unlock(&ses->fc_slot_table.slot_tbl_lock); 163 } 164 } 165 166 static int nfs4_begin_drain_session(struct nfs_client *clp) 167 { 168 struct nfs4_session *ses = clp->cl_session; 169 struct nfs4_slot_table *tbl = &ses->fc_slot_table; 170 171 spin_lock(&tbl->slot_tbl_lock); 172 set_bit(NFS4_SESSION_DRAINING, &ses->session_state); 173 if (tbl->highest_used_slotid != -1) { 174 INIT_COMPLETION(ses->complete); 175 spin_unlock(&tbl->slot_tbl_lock); 176 return wait_for_completion_interruptible(&ses->complete); 177 } 178 spin_unlock(&tbl->slot_tbl_lock); 179 return 0; 180 } 181 182 int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) 183 { 184 int status; 185 186 nfs4_begin_drain_session(clp); 187 status = nfs4_proc_exchange_id(clp, cred); 188 if (status != 0) 189 goto out; 190 status = nfs4_proc_create_session(clp); 191 if (status != 0) 192 goto out; 193 nfs41_setup_state_renewal(clp); 194 nfs_mark_client_ready(clp, NFS_CS_READY); 195 out: 196 return status; 197 } 198 199 struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp) 200 { 201 struct rpc_cred *cred; 202 203 spin_lock(&clp->cl_lock); 204 cred = nfs4_get_machine_cred_locked(clp); 205 spin_unlock(&clp->cl_lock); 206 return cred; 207 } 208 209 #endif /* CONFIG_NFS_V4_1 */ 210 211 struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp) 212 { 213 struct nfs4_state_owner *sp; 214 struct rb_node *pos; 215 struct rpc_cred *cred; 216 217 spin_lock(&clp->cl_lock); 218 cred = nfs4_get_machine_cred_locked(clp); 219 if (cred != NULL) 220 goto out; 221 pos = rb_first(&clp->cl_state_owners); 222 if (pos != NULL) { 223 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); 224 cred = get_rpccred(sp->so_cred); 225 } 226 out: 227 spin_unlock(&clp->cl_lock); 228 return cred; 229 } 230 231 static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new, 232 __u64 minval, int maxbits) 233 { 234 struct rb_node **p, *parent; 235 struct nfs_unique_id *pos; 236 __u64 mask = ~0ULL; 237 238 if (maxbits < 64) 239 mask = (1ULL << maxbits) - 1ULL; 240 241 /* Ensure distribution is more or less flat */ 242 get_random_bytes(&new->id, sizeof(new->id)); 243 new->id &= mask; 244 if (new->id < minval) 245 new->id += minval; 246 retry: 247 p = &root->rb_node; 248 parent = NULL; 249 250 while (*p != NULL) { 251 parent = *p; 252 pos = rb_entry(parent, struct nfs_unique_id, rb_node); 253 254 if (new->id < pos->id) 255 p = &(*p)->rb_left; 256 else if (new->id > pos->id) 257 p = &(*p)->rb_right; 258 else 259 goto id_exists; 260 } 261 rb_link_node(&new->rb_node, parent, p); 262 rb_insert_color(&new->rb_node, root); 263 return; 264 id_exists: 265 for (;;) { 266 new->id++; 267 if (new->id < minval || (new->id & mask) != new->id) { 268 new->id = minval; 269 break; 270 } 271 parent = rb_next(parent); 272 if (parent == NULL) 273 break; 274 pos = rb_entry(parent, struct nfs_unique_id, rb_node); 275 if (new->id < pos->id) 276 break; 277 } 278 goto retry; 279 } 280 281 static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id) 282 { 283 rb_erase(&id->rb_node, root); 284 } 285 286 static struct nfs4_state_owner * 287 nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred) 288 { 289 struct nfs_client *clp = server->nfs_client; 290 struct rb_node **p = &clp->cl_state_owners.rb_node, 291 *parent = NULL; 292 struct nfs4_state_owner *sp, *res = NULL; 293 294 while (*p != NULL) { 295 parent = *p; 296 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node); 297 298 if (server < sp->so_server) { 299 p = &parent->rb_left; 300 continue; 301 } 302 if (server > sp->so_server) { 303 p = &parent->rb_right; 304 continue; 305 } 306 if (cred < sp->so_cred) 307 p = &parent->rb_left; 308 else if (cred > sp->so_cred) 309 p = &parent->rb_right; 310 else { 311 atomic_inc(&sp->so_count); 312 res = sp; 313 break; 314 } 315 } 316 return res; 317 } 318 319 static struct nfs4_state_owner * 320 nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new) 321 { 322 struct rb_node **p = &clp->cl_state_owners.rb_node, 323 *parent = NULL; 324 struct nfs4_state_owner *sp; 325 326 while (*p != NULL) { 327 parent = *p; 328 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node); 329 330 if (new->so_server < sp->so_server) { 331 p = &parent->rb_left; 332 continue; 333 } 334 if (new->so_server > sp->so_server) { 335 p = &parent->rb_right; 336 continue; 337 } 338 if (new->so_cred < sp->so_cred) 339 p = &parent->rb_left; 340 else if (new->so_cred > sp->so_cred) 341 p = &parent->rb_right; 342 else { 343 atomic_inc(&sp->so_count); 344 return sp; 345 } 346 } 347 nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64); 348 rb_link_node(&new->so_client_node, parent, p); 349 rb_insert_color(&new->so_client_node, &clp->cl_state_owners); 350 return new; 351 } 352 353 static void 354 nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp) 355 { 356 if (!RB_EMPTY_NODE(&sp->so_client_node)) 357 rb_erase(&sp->so_client_node, &clp->cl_state_owners); 358 nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id); 359 } 360 361 /* 362 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to 363 * create a new state_owner. 364 * 365 */ 366 static struct nfs4_state_owner * 367 nfs4_alloc_state_owner(void) 368 { 369 struct nfs4_state_owner *sp; 370 371 sp = kzalloc(sizeof(*sp),GFP_NOFS); 372 if (!sp) 373 return NULL; 374 spin_lock_init(&sp->so_lock); 375 INIT_LIST_HEAD(&sp->so_states); 376 rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue"); 377 sp->so_seqid.sequence = &sp->so_sequence; 378 spin_lock_init(&sp->so_sequence.lock); 379 INIT_LIST_HEAD(&sp->so_sequence.list); 380 atomic_set(&sp->so_count, 1); 381 return sp; 382 } 383 384 static void 385 nfs4_drop_state_owner(struct nfs4_state_owner *sp) 386 { 387 if (!RB_EMPTY_NODE(&sp->so_client_node)) { 388 struct nfs_client *clp = sp->so_server->nfs_client; 389 390 spin_lock(&clp->cl_lock); 391 rb_erase(&sp->so_client_node, &clp->cl_state_owners); 392 RB_CLEAR_NODE(&sp->so_client_node); 393 spin_unlock(&clp->cl_lock); 394 } 395 } 396 397 struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred) 398 { 399 struct nfs_client *clp = server->nfs_client; 400 struct nfs4_state_owner *sp, *new; 401 402 spin_lock(&clp->cl_lock); 403 sp = nfs4_find_state_owner(server, cred); 404 spin_unlock(&clp->cl_lock); 405 if (sp != NULL) 406 return sp; 407 new = nfs4_alloc_state_owner(); 408 if (new == NULL) 409 return NULL; 410 new->so_server = server; 411 new->so_cred = cred; 412 spin_lock(&clp->cl_lock); 413 sp = nfs4_insert_state_owner(clp, new); 414 spin_unlock(&clp->cl_lock); 415 if (sp == new) 416 get_rpccred(cred); 417 else { 418 rpc_destroy_wait_queue(&new->so_sequence.wait); 419 kfree(new); 420 } 421 return sp; 422 } 423 424 void nfs4_put_state_owner(struct nfs4_state_owner *sp) 425 { 426 struct nfs_client *clp = sp->so_server->nfs_client; 427 struct rpc_cred *cred = sp->so_cred; 428 429 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock)) 430 return; 431 nfs4_remove_state_owner(clp, sp); 432 spin_unlock(&clp->cl_lock); 433 rpc_destroy_wait_queue(&sp->so_sequence.wait); 434 put_rpccred(cred); 435 kfree(sp); 436 } 437 438 static struct nfs4_state * 439 nfs4_alloc_open_state(void) 440 { 441 struct nfs4_state *state; 442 443 state = kzalloc(sizeof(*state), GFP_NOFS); 444 if (!state) 445 return NULL; 446 atomic_set(&state->count, 1); 447 INIT_LIST_HEAD(&state->lock_states); 448 spin_lock_init(&state->state_lock); 449 seqlock_init(&state->seqlock); 450 return state; 451 } 452 453 void 454 nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode) 455 { 456 if (state->state == fmode) 457 return; 458 /* NB! List reordering - see the reclaim code for why. */ 459 if ((fmode & FMODE_WRITE) != (state->state & FMODE_WRITE)) { 460 if (fmode & FMODE_WRITE) 461 list_move(&state->open_states, &state->owner->so_states); 462 else 463 list_move_tail(&state->open_states, &state->owner->so_states); 464 } 465 state->state = fmode; 466 } 467 468 static struct nfs4_state * 469 __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner) 470 { 471 struct nfs_inode *nfsi = NFS_I(inode); 472 struct nfs4_state *state; 473 474 list_for_each_entry(state, &nfsi->open_states, inode_states) { 475 if (state->owner != owner) 476 continue; 477 if (atomic_inc_not_zero(&state->count)) 478 return state; 479 } 480 return NULL; 481 } 482 483 static void 484 nfs4_free_open_state(struct nfs4_state *state) 485 { 486 kfree(state); 487 } 488 489 struct nfs4_state * 490 nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner) 491 { 492 struct nfs4_state *state, *new; 493 struct nfs_inode *nfsi = NFS_I(inode); 494 495 spin_lock(&inode->i_lock); 496 state = __nfs4_find_state_byowner(inode, owner); 497 spin_unlock(&inode->i_lock); 498 if (state) 499 goto out; 500 new = nfs4_alloc_open_state(); 501 spin_lock(&owner->so_lock); 502 spin_lock(&inode->i_lock); 503 state = __nfs4_find_state_byowner(inode, owner); 504 if (state == NULL && new != NULL) { 505 state = new; 506 state->owner = owner; 507 atomic_inc(&owner->so_count); 508 list_add(&state->inode_states, &nfsi->open_states); 509 state->inode = igrab(inode); 510 spin_unlock(&inode->i_lock); 511 /* Note: The reclaim code dictates that we add stateless 512 * and read-only stateids to the end of the list */ 513 list_add_tail(&state->open_states, &owner->so_states); 514 spin_unlock(&owner->so_lock); 515 } else { 516 spin_unlock(&inode->i_lock); 517 spin_unlock(&owner->so_lock); 518 if (new) 519 nfs4_free_open_state(new); 520 } 521 out: 522 return state; 523 } 524 525 void nfs4_put_open_state(struct nfs4_state *state) 526 { 527 struct inode *inode = state->inode; 528 struct nfs4_state_owner *owner = state->owner; 529 530 if (!atomic_dec_and_lock(&state->count, &owner->so_lock)) 531 return; 532 spin_lock(&inode->i_lock); 533 list_del(&state->inode_states); 534 list_del(&state->open_states); 535 spin_unlock(&inode->i_lock); 536 spin_unlock(&owner->so_lock); 537 iput(inode); 538 nfs4_free_open_state(state); 539 nfs4_put_state_owner(owner); 540 } 541 542 /* 543 * Close the current file. 544 */ 545 static void __nfs4_close(struct path *path, struct nfs4_state *state, 546 fmode_t fmode, gfp_t gfp_mask, int wait) 547 { 548 struct nfs4_state_owner *owner = state->owner; 549 int call_close = 0; 550 fmode_t newstate; 551 552 atomic_inc(&owner->so_count); 553 /* Protect against nfs4_find_state() */ 554 spin_lock(&owner->so_lock); 555 switch (fmode & (FMODE_READ | FMODE_WRITE)) { 556 case FMODE_READ: 557 state->n_rdonly--; 558 break; 559 case FMODE_WRITE: 560 state->n_wronly--; 561 break; 562 case FMODE_READ|FMODE_WRITE: 563 state->n_rdwr--; 564 } 565 newstate = FMODE_READ|FMODE_WRITE; 566 if (state->n_rdwr == 0) { 567 if (state->n_rdonly == 0) { 568 newstate &= ~FMODE_READ; 569 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags); 570 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); 571 } 572 if (state->n_wronly == 0) { 573 newstate &= ~FMODE_WRITE; 574 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags); 575 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); 576 } 577 if (newstate == 0) 578 clear_bit(NFS_DELEGATED_STATE, &state->flags); 579 } 580 nfs4_state_set_mode_locked(state, newstate); 581 spin_unlock(&owner->so_lock); 582 583 if (!call_close) { 584 nfs4_put_open_state(state); 585 nfs4_put_state_owner(owner); 586 } else 587 nfs4_do_close(path, state, gfp_mask, wait); 588 } 589 590 void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode) 591 { 592 __nfs4_close(path, state, fmode, GFP_NOFS, 0); 593 } 594 595 void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode) 596 { 597 __nfs4_close(path, state, fmode, GFP_KERNEL, 1); 598 } 599 600 /* 601 * Search the state->lock_states for an existing lock_owner 602 * that is compatible with current->files 603 */ 604 static struct nfs4_lock_state * 605 __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type) 606 { 607 struct nfs4_lock_state *pos; 608 list_for_each_entry(pos, &state->lock_states, ls_locks) { 609 if (type != NFS4_ANY_LOCK_TYPE && pos->ls_owner.lo_type != type) 610 continue; 611 switch (pos->ls_owner.lo_type) { 612 case NFS4_POSIX_LOCK_TYPE: 613 if (pos->ls_owner.lo_u.posix_owner != fl_owner) 614 continue; 615 break; 616 case NFS4_FLOCK_LOCK_TYPE: 617 if (pos->ls_owner.lo_u.flock_owner != fl_pid) 618 continue; 619 } 620 atomic_inc(&pos->ls_count); 621 return pos; 622 } 623 return NULL; 624 } 625 626 /* 627 * Return a compatible lock_state. If no initialized lock_state structure 628 * exists, return an uninitialized one. 629 * 630 */ 631 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type) 632 { 633 struct nfs4_lock_state *lsp; 634 struct nfs_client *clp = state->owner->so_server->nfs_client; 635 636 lsp = kzalloc(sizeof(*lsp), GFP_NOFS); 637 if (lsp == NULL) 638 return NULL; 639 rpc_init_wait_queue(&lsp->ls_sequence.wait, "lock_seqid_waitqueue"); 640 spin_lock_init(&lsp->ls_sequence.lock); 641 INIT_LIST_HEAD(&lsp->ls_sequence.list); 642 lsp->ls_seqid.sequence = &lsp->ls_sequence; 643 atomic_set(&lsp->ls_count, 1); 644 lsp->ls_state = state; 645 lsp->ls_owner.lo_type = type; 646 switch (lsp->ls_owner.lo_type) { 647 case NFS4_FLOCK_LOCK_TYPE: 648 lsp->ls_owner.lo_u.flock_owner = fl_pid; 649 break; 650 case NFS4_POSIX_LOCK_TYPE: 651 lsp->ls_owner.lo_u.posix_owner = fl_owner; 652 break; 653 default: 654 kfree(lsp); 655 return NULL; 656 } 657 spin_lock(&clp->cl_lock); 658 nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64); 659 spin_unlock(&clp->cl_lock); 660 INIT_LIST_HEAD(&lsp->ls_locks); 661 return lsp; 662 } 663 664 static void nfs4_free_lock_state(struct nfs4_lock_state *lsp) 665 { 666 struct nfs_client *clp = lsp->ls_state->owner->so_server->nfs_client; 667 668 spin_lock(&clp->cl_lock); 669 nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id); 670 spin_unlock(&clp->cl_lock); 671 rpc_destroy_wait_queue(&lsp->ls_sequence.wait); 672 kfree(lsp); 673 } 674 675 /* 676 * Return a compatible lock_state. If no initialized lock_state structure 677 * exists, return an uninitialized one. 678 * 679 */ 680 static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner, pid_t pid, unsigned int type) 681 { 682 struct nfs4_lock_state *lsp, *new = NULL; 683 684 for(;;) { 685 spin_lock(&state->state_lock); 686 lsp = __nfs4_find_lock_state(state, owner, pid, type); 687 if (lsp != NULL) 688 break; 689 if (new != NULL) { 690 list_add(&new->ls_locks, &state->lock_states); 691 set_bit(LK_STATE_IN_USE, &state->flags); 692 lsp = new; 693 new = NULL; 694 break; 695 } 696 spin_unlock(&state->state_lock); 697 new = nfs4_alloc_lock_state(state, owner, pid, type); 698 if (new == NULL) 699 return NULL; 700 } 701 spin_unlock(&state->state_lock); 702 if (new != NULL) 703 nfs4_free_lock_state(new); 704 return lsp; 705 } 706 707 /* 708 * Release reference to lock_state, and free it if we see that 709 * it is no longer in use 710 */ 711 void nfs4_put_lock_state(struct nfs4_lock_state *lsp) 712 { 713 struct nfs4_state *state; 714 715 if (lsp == NULL) 716 return; 717 state = lsp->ls_state; 718 if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock)) 719 return; 720 list_del(&lsp->ls_locks); 721 if (list_empty(&state->lock_states)) 722 clear_bit(LK_STATE_IN_USE, &state->flags); 723 spin_unlock(&state->state_lock); 724 if (lsp->ls_flags & NFS_LOCK_INITIALIZED) 725 nfs4_release_lockowner(lsp); 726 nfs4_free_lock_state(lsp); 727 } 728 729 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src) 730 { 731 struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner; 732 733 dst->fl_u.nfs4_fl.owner = lsp; 734 atomic_inc(&lsp->ls_count); 735 } 736 737 static void nfs4_fl_release_lock(struct file_lock *fl) 738 { 739 nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner); 740 } 741 742 static const struct file_lock_operations nfs4_fl_lock_ops = { 743 .fl_copy_lock = nfs4_fl_copy_lock, 744 .fl_release_private = nfs4_fl_release_lock, 745 }; 746 747 int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl) 748 { 749 struct nfs4_lock_state *lsp; 750 751 if (fl->fl_ops != NULL) 752 return 0; 753 if (fl->fl_flags & FL_POSIX) 754 lsp = nfs4_get_lock_state(state, fl->fl_owner, 0, NFS4_POSIX_LOCK_TYPE); 755 else if (fl->fl_flags & FL_FLOCK) 756 lsp = nfs4_get_lock_state(state, 0, fl->fl_pid, NFS4_FLOCK_LOCK_TYPE); 757 else 758 return -EINVAL; 759 if (lsp == NULL) 760 return -ENOMEM; 761 fl->fl_u.nfs4_fl.owner = lsp; 762 fl->fl_ops = &nfs4_fl_lock_ops; 763 return 0; 764 } 765 766 /* 767 * Byte-range lock aware utility to initialize the stateid of read/write 768 * requests. 769 */ 770 void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid) 771 { 772 struct nfs4_lock_state *lsp; 773 int seq; 774 775 do { 776 seq = read_seqbegin(&state->seqlock); 777 memcpy(dst, &state->stateid, sizeof(*dst)); 778 } while (read_seqretry(&state->seqlock, seq)); 779 if (test_bit(LK_STATE_IN_USE, &state->flags) == 0) 780 return; 781 782 spin_lock(&state->state_lock); 783 lsp = __nfs4_find_lock_state(state, fl_owner, fl_pid, NFS4_ANY_LOCK_TYPE); 784 if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) 785 memcpy(dst, &lsp->ls_stateid, sizeof(*dst)); 786 spin_unlock(&state->state_lock); 787 nfs4_put_lock_state(lsp); 788 } 789 790 struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask) 791 { 792 struct nfs_seqid *new; 793 794 new = kmalloc(sizeof(*new), gfp_mask); 795 if (new != NULL) { 796 new->sequence = counter; 797 INIT_LIST_HEAD(&new->list); 798 } 799 return new; 800 } 801 802 void nfs_release_seqid(struct nfs_seqid *seqid) 803 { 804 if (!list_empty(&seqid->list)) { 805 struct rpc_sequence *sequence = seqid->sequence->sequence; 806 807 spin_lock(&sequence->lock); 808 list_del_init(&seqid->list); 809 spin_unlock(&sequence->lock); 810 rpc_wake_up(&sequence->wait); 811 } 812 } 813 814 void nfs_free_seqid(struct nfs_seqid *seqid) 815 { 816 nfs_release_seqid(seqid); 817 kfree(seqid); 818 } 819 820 /* 821 * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or 822 * failed with a seqid incrementing error - 823 * see comments nfs_fs.h:seqid_mutating_error() 824 */ 825 static void nfs_increment_seqid(int status, struct nfs_seqid *seqid) 826 { 827 BUG_ON(list_first_entry(&seqid->sequence->sequence->list, struct nfs_seqid, list) != seqid); 828 switch (status) { 829 case 0: 830 break; 831 case -NFS4ERR_BAD_SEQID: 832 if (seqid->sequence->flags & NFS_SEQID_CONFIRMED) 833 return; 834 printk(KERN_WARNING "NFS: v4 server returned a bad" 835 " sequence-id error on an" 836 " unconfirmed sequence %p!\n", 837 seqid->sequence); 838 case -NFS4ERR_STALE_CLIENTID: 839 case -NFS4ERR_STALE_STATEID: 840 case -NFS4ERR_BAD_STATEID: 841 case -NFS4ERR_BADXDR: 842 case -NFS4ERR_RESOURCE: 843 case -NFS4ERR_NOFILEHANDLE: 844 /* Non-seqid mutating errors */ 845 return; 846 }; 847 /* 848 * Note: no locking needed as we are guaranteed to be first 849 * on the sequence list 850 */ 851 seqid->sequence->counter++; 852 } 853 854 void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid) 855 { 856 struct nfs4_state_owner *sp = container_of(seqid->sequence, 857 struct nfs4_state_owner, so_seqid); 858 struct nfs_server *server = sp->so_server; 859 860 if (status == -NFS4ERR_BAD_SEQID) 861 nfs4_drop_state_owner(sp); 862 if (!nfs4_has_session(server->nfs_client)) 863 nfs_increment_seqid(status, seqid); 864 } 865 866 /* 867 * Increment the seqid if the LOCK/LOCKU succeeded, or 868 * failed with a seqid incrementing error - 869 * see comments nfs_fs.h:seqid_mutating_error() 870 */ 871 void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid) 872 { 873 nfs_increment_seqid(status, seqid); 874 } 875 876 int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task) 877 { 878 struct rpc_sequence *sequence = seqid->sequence->sequence; 879 int status = 0; 880 881 spin_lock(&sequence->lock); 882 if (list_empty(&seqid->list)) 883 list_add_tail(&seqid->list, &sequence->list); 884 if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid) 885 goto unlock; 886 rpc_sleep_on(&sequence->wait, task, NULL); 887 status = -EAGAIN; 888 unlock: 889 spin_unlock(&sequence->lock); 890 return status; 891 } 892 893 static int nfs4_run_state_manager(void *); 894 895 static void nfs4_clear_state_manager_bit(struct nfs_client *clp) 896 { 897 smp_mb__before_clear_bit(); 898 clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state); 899 smp_mb__after_clear_bit(); 900 wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING); 901 rpc_wake_up(&clp->cl_rpcwaitq); 902 } 903 904 /* 905 * Schedule the nfs_client asynchronous state management routine 906 */ 907 void nfs4_schedule_state_manager(struct nfs_client *clp) 908 { 909 struct task_struct *task; 910 911 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) 912 return; 913 __module_get(THIS_MODULE); 914 atomic_inc(&clp->cl_count); 915 task = kthread_run(nfs4_run_state_manager, clp, "%s-manager", 916 rpc_peeraddr2str(clp->cl_rpcclient, 917 RPC_DISPLAY_ADDR)); 918 if (!IS_ERR(task)) 919 return; 920 nfs4_clear_state_manager_bit(clp); 921 nfs_put_client(clp); 922 module_put(THIS_MODULE); 923 } 924 925 /* 926 * Schedule a state recovery attempt 927 */ 928 void nfs4_schedule_state_recovery(struct nfs_client *clp) 929 { 930 if (!clp) 931 return; 932 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) 933 set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state); 934 nfs4_schedule_state_manager(clp); 935 } 936 937 int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) 938 { 939 940 set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); 941 /* Don't recover state that expired before the reboot */ 942 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) { 943 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); 944 return 0; 945 } 946 set_bit(NFS_OWNER_RECLAIM_REBOOT, &state->owner->so_flags); 947 set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state); 948 return 1; 949 } 950 951 int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state) 952 { 953 set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags); 954 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); 955 set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags); 956 set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state); 957 return 1; 958 } 959 960 static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops) 961 { 962 struct inode *inode = state->inode; 963 struct nfs_inode *nfsi = NFS_I(inode); 964 struct file_lock *fl; 965 int status = 0; 966 967 if (inode->i_flock == NULL) 968 return 0; 969 970 /* Guard against delegation returns and new lock/unlock calls */ 971 down_write(&nfsi->rwsem); 972 /* Protect inode->i_flock using the BKL */ 973 lock_kernel(); 974 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 975 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK))) 976 continue; 977 if (nfs_file_open_context(fl->fl_file)->state != state) 978 continue; 979 unlock_kernel(); 980 status = ops->recover_lock(state, fl); 981 switch (status) { 982 case 0: 983 break; 984 case -ESTALE: 985 case -NFS4ERR_ADMIN_REVOKED: 986 case -NFS4ERR_STALE_STATEID: 987 case -NFS4ERR_BAD_STATEID: 988 case -NFS4ERR_EXPIRED: 989 case -NFS4ERR_NO_GRACE: 990 case -NFS4ERR_STALE_CLIENTID: 991 case -NFS4ERR_BADSESSION: 992 case -NFS4ERR_BADSLOT: 993 case -NFS4ERR_BAD_HIGH_SLOT: 994 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 995 goto out; 996 default: 997 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n", 998 __func__, status); 999 case -ENOMEM: 1000 case -NFS4ERR_DENIED: 1001 case -NFS4ERR_RECLAIM_BAD: 1002 case -NFS4ERR_RECLAIM_CONFLICT: 1003 /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 1004 status = 0; 1005 } 1006 lock_kernel(); 1007 } 1008 unlock_kernel(); 1009 out: 1010 up_write(&nfsi->rwsem); 1011 return status; 1012 } 1013 1014 static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops) 1015 { 1016 struct nfs4_state *state; 1017 struct nfs4_lock_state *lock; 1018 int status = 0; 1019 1020 /* Note: we rely on the sp->so_states list being ordered 1021 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE) 1022 * states first. 1023 * This is needed to ensure that the server won't give us any 1024 * read delegations that we have to return if, say, we are 1025 * recovering after a network partition or a reboot from a 1026 * server that doesn't support a grace period. 1027 */ 1028 restart: 1029 spin_lock(&sp->so_lock); 1030 list_for_each_entry(state, &sp->so_states, open_states) { 1031 if (!test_and_clear_bit(ops->state_flag_bit, &state->flags)) 1032 continue; 1033 if (state->state == 0) 1034 continue; 1035 atomic_inc(&state->count); 1036 spin_unlock(&sp->so_lock); 1037 status = ops->recover_open(sp, state); 1038 if (status >= 0) { 1039 status = nfs4_reclaim_locks(state, ops); 1040 if (status >= 0) { 1041 list_for_each_entry(lock, &state->lock_states, ls_locks) { 1042 if (!(lock->ls_flags & NFS_LOCK_INITIALIZED)) 1043 printk("%s: Lock reclaim failed!\n", 1044 __func__); 1045 } 1046 nfs4_put_open_state(state); 1047 goto restart; 1048 } 1049 } 1050 switch (status) { 1051 default: 1052 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n", 1053 __func__, status); 1054 case -ENOENT: 1055 case -ENOMEM: 1056 case -ESTALE: 1057 /* 1058 * Open state on this file cannot be recovered 1059 * All we can do is revert to using the zero stateid. 1060 */ 1061 memset(state->stateid.data, 0, 1062 sizeof(state->stateid.data)); 1063 /* Mark the file as being 'closed' */ 1064 state->state = 0; 1065 break; 1066 case -NFS4ERR_ADMIN_REVOKED: 1067 case -NFS4ERR_STALE_STATEID: 1068 case -NFS4ERR_BAD_STATEID: 1069 case -NFS4ERR_RECLAIM_BAD: 1070 case -NFS4ERR_RECLAIM_CONFLICT: 1071 nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state); 1072 break; 1073 case -NFS4ERR_EXPIRED: 1074 case -NFS4ERR_NO_GRACE: 1075 nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state); 1076 case -NFS4ERR_STALE_CLIENTID: 1077 case -NFS4ERR_BADSESSION: 1078 case -NFS4ERR_BADSLOT: 1079 case -NFS4ERR_BAD_HIGH_SLOT: 1080 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1081 goto out_err; 1082 } 1083 nfs4_put_open_state(state); 1084 goto restart; 1085 } 1086 spin_unlock(&sp->so_lock); 1087 return 0; 1088 out_err: 1089 nfs4_put_open_state(state); 1090 return status; 1091 } 1092 1093 static void nfs4_clear_open_state(struct nfs4_state *state) 1094 { 1095 struct nfs4_lock_state *lock; 1096 1097 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1098 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1099 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1100 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1101 list_for_each_entry(lock, &state->lock_states, ls_locks) { 1102 lock->ls_seqid.flags = 0; 1103 lock->ls_flags &= ~NFS_LOCK_INITIALIZED; 1104 } 1105 } 1106 1107 static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state)) 1108 { 1109 struct nfs4_state_owner *sp; 1110 struct rb_node *pos; 1111 struct nfs4_state *state; 1112 1113 /* Reset all sequence ids to zero */ 1114 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { 1115 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); 1116 sp->so_seqid.flags = 0; 1117 spin_lock(&sp->so_lock); 1118 list_for_each_entry(state, &sp->so_states, open_states) { 1119 if (mark_reclaim(clp, state)) 1120 nfs4_clear_open_state(state); 1121 } 1122 spin_unlock(&sp->so_lock); 1123 } 1124 } 1125 1126 static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp) 1127 { 1128 /* Mark all delegations for reclaim */ 1129 nfs_delegation_mark_reclaim(clp); 1130 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot); 1131 } 1132 1133 static void nfs4_reclaim_complete(struct nfs_client *clp, 1134 const struct nfs4_state_recovery_ops *ops) 1135 { 1136 /* Notify the server we're done reclaiming our state */ 1137 if (ops->reclaim_complete) 1138 (void)ops->reclaim_complete(clp); 1139 } 1140 1141 static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp) 1142 { 1143 struct nfs4_state_owner *sp; 1144 struct rb_node *pos; 1145 struct nfs4_state *state; 1146 1147 if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) 1148 return; 1149 1150 nfs4_reclaim_complete(clp, clp->cl_mvops->reboot_recovery_ops); 1151 1152 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { 1153 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); 1154 spin_lock(&sp->so_lock); 1155 list_for_each_entry(state, &sp->so_states, open_states) { 1156 if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags)) 1157 continue; 1158 nfs4_state_mark_reclaim_nograce(clp, state); 1159 } 1160 spin_unlock(&sp->so_lock); 1161 } 1162 1163 nfs_delegation_reap_unclaimed(clp); 1164 } 1165 1166 static void nfs_delegation_clear_all(struct nfs_client *clp) 1167 { 1168 nfs_delegation_mark_reclaim(clp); 1169 nfs_delegation_reap_unclaimed(clp); 1170 } 1171 1172 static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp) 1173 { 1174 nfs_delegation_clear_all(clp); 1175 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce); 1176 } 1177 1178 static int nfs4_recovery_handle_error(struct nfs_client *clp, int error) 1179 { 1180 switch (error) { 1181 case -NFS4ERR_CB_PATH_DOWN: 1182 nfs_handle_cb_pathdown(clp); 1183 return 0; 1184 case -NFS4ERR_NO_GRACE: 1185 nfs4_state_end_reclaim_reboot(clp); 1186 return 0; 1187 case -NFS4ERR_STALE_CLIENTID: 1188 case -NFS4ERR_LEASE_MOVED: 1189 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 1190 nfs4_state_end_reclaim_reboot(clp); 1191 nfs4_state_start_reclaim_reboot(clp); 1192 break; 1193 case -NFS4ERR_EXPIRED: 1194 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 1195 nfs4_state_start_reclaim_nograce(clp); 1196 break; 1197 case -NFS4ERR_BADSESSION: 1198 case -NFS4ERR_BADSLOT: 1199 case -NFS4ERR_BAD_HIGH_SLOT: 1200 case -NFS4ERR_DEADSESSION: 1201 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1202 case -NFS4ERR_SEQ_FALSE_RETRY: 1203 case -NFS4ERR_SEQ_MISORDERED: 1204 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); 1205 /* Zero session reset errors */ 1206 return 0; 1207 } 1208 return error; 1209 } 1210 1211 static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops) 1212 { 1213 struct rb_node *pos; 1214 int status = 0; 1215 1216 restart: 1217 spin_lock(&clp->cl_lock); 1218 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { 1219 struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); 1220 if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags)) 1221 continue; 1222 atomic_inc(&sp->so_count); 1223 spin_unlock(&clp->cl_lock); 1224 status = nfs4_reclaim_open_state(sp, ops); 1225 if (status < 0) { 1226 set_bit(ops->owner_flag_bit, &sp->so_flags); 1227 nfs4_put_state_owner(sp); 1228 return nfs4_recovery_handle_error(clp, status); 1229 } 1230 nfs4_put_state_owner(sp); 1231 goto restart; 1232 } 1233 spin_unlock(&clp->cl_lock); 1234 return status; 1235 } 1236 1237 static int nfs4_check_lease(struct nfs_client *clp) 1238 { 1239 struct rpc_cred *cred; 1240 const struct nfs4_state_maintenance_ops *ops = 1241 clp->cl_mvops->state_renewal_ops; 1242 int status = -NFS4ERR_EXPIRED; 1243 1244 /* Is the client already known to have an expired lease? */ 1245 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) 1246 return 0; 1247 spin_lock(&clp->cl_lock); 1248 cred = ops->get_state_renewal_cred_locked(clp); 1249 spin_unlock(&clp->cl_lock); 1250 if (cred == NULL) { 1251 cred = nfs4_get_setclientid_cred(clp); 1252 if (cred == NULL) 1253 goto out; 1254 } 1255 status = ops->renew_lease(clp, cred); 1256 put_rpccred(cred); 1257 out: 1258 return nfs4_recovery_handle_error(clp, status); 1259 } 1260 1261 static int nfs4_reclaim_lease(struct nfs_client *clp) 1262 { 1263 struct rpc_cred *cred; 1264 const struct nfs4_state_recovery_ops *ops = 1265 clp->cl_mvops->reboot_recovery_ops; 1266 int status = -ENOENT; 1267 1268 cred = ops->get_clid_cred(clp); 1269 if (cred != NULL) { 1270 status = ops->establish_clid(clp, cred); 1271 put_rpccred(cred); 1272 /* Handle case where the user hasn't set up machine creds */ 1273 if (status == -EACCES && cred == clp->cl_machine_cred) { 1274 nfs4_clear_machine_cred(clp); 1275 status = -EAGAIN; 1276 } 1277 if (status == -NFS4ERR_MINOR_VERS_MISMATCH) 1278 status = -EPROTONOSUPPORT; 1279 } 1280 return status; 1281 } 1282 1283 #ifdef CONFIG_NFS_V4_1 1284 void nfs41_handle_recall_slot(struct nfs_client *clp) 1285 { 1286 set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); 1287 nfs4_schedule_state_recovery(clp); 1288 } 1289 1290 static void nfs4_reset_all_state(struct nfs_client *clp) 1291 { 1292 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { 1293 clp->cl_boot_time = CURRENT_TIME; 1294 nfs4_state_start_reclaim_nograce(clp); 1295 nfs4_schedule_state_recovery(clp); 1296 } 1297 } 1298 1299 static void nfs41_handle_server_reboot(struct nfs_client *clp) 1300 { 1301 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { 1302 nfs4_state_start_reclaim_reboot(clp); 1303 nfs4_schedule_state_recovery(clp); 1304 } 1305 } 1306 1307 static void nfs41_handle_state_revoked(struct nfs_client *clp) 1308 { 1309 /* Temporary */ 1310 nfs4_reset_all_state(clp); 1311 } 1312 1313 static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp) 1314 { 1315 /* This will need to handle layouts too */ 1316 nfs_expire_all_delegations(clp); 1317 } 1318 1319 static void nfs41_handle_cb_path_down(struct nfs_client *clp) 1320 { 1321 nfs_expire_all_delegations(clp); 1322 if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0) 1323 nfs4_schedule_state_recovery(clp); 1324 } 1325 1326 void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags) 1327 { 1328 if (!flags) 1329 return; 1330 else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) 1331 nfs41_handle_server_reboot(clp); 1332 else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED | 1333 SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED | 1334 SEQ4_STATUS_ADMIN_STATE_REVOKED | 1335 SEQ4_STATUS_LEASE_MOVED)) 1336 nfs41_handle_state_revoked(clp); 1337 else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED) 1338 nfs41_handle_recallable_state_revoked(clp); 1339 else if (flags & (SEQ4_STATUS_CB_PATH_DOWN | 1340 SEQ4_STATUS_BACKCHANNEL_FAULT | 1341 SEQ4_STATUS_CB_PATH_DOWN_SESSION)) 1342 nfs41_handle_cb_path_down(clp); 1343 } 1344 1345 static int nfs4_reset_session(struct nfs_client *clp) 1346 { 1347 int status; 1348 1349 nfs4_begin_drain_session(clp); 1350 status = nfs4_proc_destroy_session(clp->cl_session); 1351 if (status && status != -NFS4ERR_BADSESSION && 1352 status != -NFS4ERR_DEADSESSION) { 1353 status = nfs4_recovery_handle_error(clp, status); 1354 goto out; 1355 } 1356 1357 memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN); 1358 status = nfs4_proc_create_session(clp); 1359 if (status) { 1360 status = nfs4_recovery_handle_error(clp, status); 1361 goto out; 1362 } 1363 /* create_session negotiated new slot table */ 1364 clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); 1365 1366 /* Let the state manager reestablish state */ 1367 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) 1368 nfs41_setup_state_renewal(clp); 1369 out: 1370 return status; 1371 } 1372 1373 static int nfs4_recall_slot(struct nfs_client *clp) 1374 { 1375 struct nfs4_slot_table *fc_tbl = &clp->cl_session->fc_slot_table; 1376 struct nfs4_channel_attrs *fc_attrs = &clp->cl_session->fc_attrs; 1377 struct nfs4_slot *new, *old; 1378 int i; 1379 1380 nfs4_begin_drain_session(clp); 1381 new = kmalloc(fc_tbl->target_max_slots * sizeof(struct nfs4_slot), 1382 GFP_NOFS); 1383 if (!new) 1384 return -ENOMEM; 1385 1386 spin_lock(&fc_tbl->slot_tbl_lock); 1387 for (i = 0; i < fc_tbl->target_max_slots; i++) 1388 new[i].seq_nr = fc_tbl->slots[i].seq_nr; 1389 old = fc_tbl->slots; 1390 fc_tbl->slots = new; 1391 fc_tbl->max_slots = fc_tbl->target_max_slots; 1392 fc_tbl->target_max_slots = 0; 1393 fc_attrs->max_reqs = fc_tbl->max_slots; 1394 spin_unlock(&fc_tbl->slot_tbl_lock); 1395 1396 kfree(old); 1397 nfs4_end_drain_session(clp); 1398 return 0; 1399 } 1400 1401 #else /* CONFIG_NFS_V4_1 */ 1402 static int nfs4_reset_session(struct nfs_client *clp) { return 0; } 1403 static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; } 1404 static int nfs4_recall_slot(struct nfs_client *clp) { return 0; } 1405 #endif /* CONFIG_NFS_V4_1 */ 1406 1407 /* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors 1408 * on EXCHANGE_ID for v4.1 1409 */ 1410 static void nfs4_set_lease_expired(struct nfs_client *clp, int status) 1411 { 1412 if (nfs4_has_session(clp)) { 1413 switch (status) { 1414 case -NFS4ERR_DELAY: 1415 case -NFS4ERR_CLID_INUSE: 1416 case -EAGAIN: 1417 case -EKEYEXPIRED: 1418 break; 1419 1420 case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery 1421 * in nfs4_exchange_id */ 1422 default: 1423 return; 1424 } 1425 } 1426 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 1427 } 1428 1429 static void nfs4_state_manager(struct nfs_client *clp) 1430 { 1431 int status = 0; 1432 1433 /* Ensure exclusive access to NFSv4 state */ 1434 for(;;) { 1435 if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) { 1436 /* We're going to have to re-establish a clientid */ 1437 status = nfs4_reclaim_lease(clp); 1438 if (status) { 1439 nfs4_set_lease_expired(clp, status); 1440 if (test_bit(NFS4CLNT_LEASE_EXPIRED, 1441 &clp->cl_state)) 1442 continue; 1443 if (clp->cl_cons_state == 1444 NFS_CS_SESSION_INITING) 1445 nfs_mark_client_ready(clp, status); 1446 goto out_error; 1447 } 1448 clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state); 1449 set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state); 1450 } 1451 1452 if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) { 1453 status = nfs4_check_lease(clp); 1454 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) 1455 continue; 1456 if (status < 0 && status != -NFS4ERR_CB_PATH_DOWN) 1457 goto out_error; 1458 } 1459 1460 /* Initialize or reset the session */ 1461 if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) 1462 && nfs4_has_session(clp)) { 1463 status = nfs4_reset_session(clp); 1464 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) 1465 continue; 1466 if (status < 0) 1467 goto out_error; 1468 } 1469 1470 /* First recover reboot state... */ 1471 if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) { 1472 status = nfs4_do_reclaim(clp, 1473 clp->cl_mvops->reboot_recovery_ops); 1474 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) || 1475 test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)) 1476 continue; 1477 nfs4_state_end_reclaim_reboot(clp); 1478 if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) 1479 continue; 1480 if (status < 0) 1481 goto out_error; 1482 } 1483 1484 /* Now recover expired state... */ 1485 if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) { 1486 status = nfs4_do_reclaim(clp, 1487 clp->cl_mvops->nograce_recovery_ops); 1488 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) || 1489 test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) || 1490 test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) 1491 continue; 1492 if (status < 0) 1493 goto out_error; 1494 } 1495 1496 nfs4_end_drain_session(clp); 1497 if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) { 1498 nfs_client_return_marked_delegations(clp); 1499 continue; 1500 } 1501 /* Recall session slots */ 1502 if (test_and_clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state) 1503 && nfs4_has_session(clp)) { 1504 status = nfs4_recall_slot(clp); 1505 if (status < 0) 1506 goto out_error; 1507 continue; 1508 } 1509 1510 1511 nfs4_clear_state_manager_bit(clp); 1512 /* Did we race with an attempt to give us more work? */ 1513 if (clp->cl_state == 0) 1514 break; 1515 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) 1516 break; 1517 } 1518 return; 1519 out_error: 1520 printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s" 1521 " with error %d\n", clp->cl_hostname, -status); 1522 nfs4_end_drain_session(clp); 1523 nfs4_clear_state_manager_bit(clp); 1524 } 1525 1526 static int nfs4_run_state_manager(void *ptr) 1527 { 1528 struct nfs_client *clp = ptr; 1529 1530 allow_signal(SIGKILL); 1531 nfs4_state_manager(clp); 1532 nfs_put_client(clp); 1533 module_put_and_exit(0); 1534 return 0; 1535 } 1536 1537 /* 1538 * Local variables: 1539 * c-basic-offset: 8 1540 * End: 1541 */ 1542