xref: /linux/fs/nfs/nfs4state.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /*
2  *  fs/nfs/nfs4state.c
3  *
4  *  Client-side XDR for NFSv4.
5  *
6  *  Copyright (c) 2002 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Kendrick Smith <kmsmith@umich.edu>
10  *
11  *  Redistribution and use in source and binary forms, with or without
12  *  modification, are permitted provided that the following conditions
13  *  are met:
14  *
15  *  1. Redistributions of source code must retain the above copyright
16  *     notice, this list of conditions and the following disclaimer.
17  *  2. Redistributions in binary form must reproduce the above copyright
18  *     notice, this list of conditions and the following disclaimer in the
19  *     documentation and/or other materials provided with the distribution.
20  *  3. Neither the name of the University nor the names of its
21  *     contributors may be used to endorse or promote products derived
22  *     from this software without specific prior written permission.
23  *
24  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Implementation of the NFSv4 state model.  For the time being,
37  * this is minimal, but will be made much more complex in a
38  * subsequent patch.
39  */
40 
41 #include <linux/slab.h>
42 #include <linux/smp_lock.h>
43 #include <linux/nfs_fs.h>
44 #include <linux/nfs_idmap.h>
45 #include <linux/kthread.h>
46 #include <linux/module.h>
47 #include <linux/workqueue.h>
48 #include <linux/bitops.h>
49 
50 #include "nfs4_fs.h"
51 #include "callback.h"
52 #include "delegation.h"
53 
54 #define OPENOWNER_POOL_SIZE	8
55 
56 const nfs4_stateid zero_stateid;
57 
58 static DEFINE_SPINLOCK(state_spinlock);
59 static LIST_HEAD(nfs4_clientid_list);
60 
61 void
62 init_nfsv4_state(struct nfs_server *server)
63 {
64 	server->nfs4_state = NULL;
65 	INIT_LIST_HEAD(&server->nfs4_siblings);
66 }
67 
68 void
69 destroy_nfsv4_state(struct nfs_server *server)
70 {
71 	kfree(server->mnt_path);
72 	server->mnt_path = NULL;
73 	if (server->nfs4_state) {
74 		nfs4_put_client(server->nfs4_state);
75 		server->nfs4_state = NULL;
76 	}
77 }
78 
79 /*
80  * nfs4_get_client(): returns an empty client structure
81  * nfs4_put_client(): drops reference to client structure
82  *
83  * Since these are allocated/deallocated very rarely, we don't
84  * bother putting them in a slab cache...
85  */
86 static struct nfs4_client *
87 nfs4_alloc_client(struct in_addr *addr)
88 {
89 	struct nfs4_client *clp;
90 
91 	if (nfs_callback_up() < 0)
92 		return NULL;
93 	if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL) {
94 		nfs_callback_down();
95 		return NULL;
96 	}
97 	memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr));
98 	init_rwsem(&clp->cl_sem);
99 	INIT_LIST_HEAD(&clp->cl_delegations);
100 	INIT_LIST_HEAD(&clp->cl_state_owners);
101 	INIT_LIST_HEAD(&clp->cl_unused);
102 	spin_lock_init(&clp->cl_lock);
103 	atomic_set(&clp->cl_count, 1);
104 	INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
105 	INIT_LIST_HEAD(&clp->cl_superblocks);
106 	rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client");
107 	clp->cl_rpcclient = ERR_PTR(-EINVAL);
108 	clp->cl_boot_time = CURRENT_TIME;
109 	clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
110 	return clp;
111 }
112 
113 static void
114 nfs4_free_client(struct nfs4_client *clp)
115 {
116 	struct nfs4_state_owner *sp;
117 
118 	while (!list_empty(&clp->cl_unused)) {
119 		sp = list_entry(clp->cl_unused.next,
120 				struct nfs4_state_owner,
121 				so_list);
122 		list_del(&sp->so_list);
123 		kfree(sp);
124 	}
125 	BUG_ON(!list_empty(&clp->cl_state_owners));
126 	nfs_idmap_delete(clp);
127 	if (!IS_ERR(clp->cl_rpcclient))
128 		rpc_shutdown_client(clp->cl_rpcclient);
129 	kfree(clp);
130 	nfs_callback_down();
131 }
132 
133 static struct nfs4_client *__nfs4_find_client(struct in_addr *addr)
134 {
135 	struct nfs4_client *clp;
136 	list_for_each_entry(clp, &nfs4_clientid_list, cl_servers) {
137 		if (memcmp(&clp->cl_addr, addr, sizeof(clp->cl_addr)) == 0) {
138 			atomic_inc(&clp->cl_count);
139 			return clp;
140 		}
141 	}
142 	return NULL;
143 }
144 
145 struct nfs4_client *nfs4_find_client(struct in_addr *addr)
146 {
147 	struct nfs4_client *clp;
148 	spin_lock(&state_spinlock);
149 	clp = __nfs4_find_client(addr);
150 	spin_unlock(&state_spinlock);
151 	return clp;
152 }
153 
154 struct nfs4_client *
155 nfs4_get_client(struct in_addr *addr)
156 {
157 	struct nfs4_client *clp, *new = NULL;
158 
159 	spin_lock(&state_spinlock);
160 	for (;;) {
161 		clp = __nfs4_find_client(addr);
162 		if (clp != NULL)
163 			break;
164 		clp = new;
165 		if (clp != NULL) {
166 			list_add(&clp->cl_servers, &nfs4_clientid_list);
167 			new = NULL;
168 			break;
169 		}
170 		spin_unlock(&state_spinlock);
171 		new = nfs4_alloc_client(addr);
172 		spin_lock(&state_spinlock);
173 		if (new == NULL)
174 			break;
175 	}
176 	spin_unlock(&state_spinlock);
177 	if (new)
178 		nfs4_free_client(new);
179 	return clp;
180 }
181 
182 void
183 nfs4_put_client(struct nfs4_client *clp)
184 {
185 	if (!atomic_dec_and_lock(&clp->cl_count, &state_spinlock))
186 		return;
187 	list_del(&clp->cl_servers);
188 	spin_unlock(&state_spinlock);
189 	BUG_ON(!list_empty(&clp->cl_superblocks));
190 	rpc_wake_up(&clp->cl_rpcwaitq);
191 	nfs4_kill_renewd(clp);
192 	nfs4_free_client(clp);
193 }
194 
195 static int nfs4_init_client(struct nfs4_client *clp, struct rpc_cred *cred)
196 {
197 	int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK,
198 			nfs_callback_tcpport, cred);
199 	if (status == 0)
200 		status = nfs4_proc_setclientid_confirm(clp, cred);
201 	if (status == 0)
202 		nfs4_schedule_state_renewal(clp);
203 	return status;
204 }
205 
206 u32
207 nfs4_alloc_lockowner_id(struct nfs4_client *clp)
208 {
209 	return clp->cl_lockowner_id ++;
210 }
211 
212 static struct nfs4_state_owner *
213 nfs4_client_grab_unused(struct nfs4_client *clp, struct rpc_cred *cred)
214 {
215 	struct nfs4_state_owner *sp = NULL;
216 
217 	if (!list_empty(&clp->cl_unused)) {
218 		sp = list_entry(clp->cl_unused.next, struct nfs4_state_owner, so_list);
219 		atomic_inc(&sp->so_count);
220 		sp->so_cred = cred;
221 		list_move(&sp->so_list, &clp->cl_state_owners);
222 		clp->cl_nunused--;
223 	}
224 	return sp;
225 }
226 
227 struct rpc_cred *nfs4_get_renew_cred(struct nfs4_client *clp)
228 {
229 	struct nfs4_state_owner *sp;
230 	struct rpc_cred *cred = NULL;
231 
232 	list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
233 		if (list_empty(&sp->so_states))
234 			continue;
235 		cred = get_rpccred(sp->so_cred);
236 		break;
237 	}
238 	return cred;
239 }
240 
241 struct rpc_cred *nfs4_get_setclientid_cred(struct nfs4_client *clp)
242 {
243 	struct nfs4_state_owner *sp;
244 
245 	if (!list_empty(&clp->cl_state_owners)) {
246 		sp = list_entry(clp->cl_state_owners.next,
247 				struct nfs4_state_owner, so_list);
248 		return get_rpccred(sp->so_cred);
249 	}
250 	return NULL;
251 }
252 
253 static struct nfs4_state_owner *
254 nfs4_find_state_owner(struct nfs4_client *clp, struct rpc_cred *cred)
255 {
256 	struct nfs4_state_owner *sp, *res = NULL;
257 
258 	list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
259 		if (sp->so_cred != cred)
260 			continue;
261 		atomic_inc(&sp->so_count);
262 		/* Move to the head of the list */
263 		list_move(&sp->so_list, &clp->cl_state_owners);
264 		res = sp;
265 		break;
266 	}
267 	return res;
268 }
269 
270 /*
271  * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
272  * create a new state_owner.
273  *
274  */
275 static struct nfs4_state_owner *
276 nfs4_alloc_state_owner(void)
277 {
278 	struct nfs4_state_owner *sp;
279 
280 	sp = kzalloc(sizeof(*sp),GFP_KERNEL);
281 	if (!sp)
282 		return NULL;
283 	spin_lock_init(&sp->so_lock);
284 	INIT_LIST_HEAD(&sp->so_states);
285 	INIT_LIST_HEAD(&sp->so_delegations);
286 	rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
287 	sp->so_seqid.sequence = &sp->so_sequence;
288 	spin_lock_init(&sp->so_sequence.lock);
289 	INIT_LIST_HEAD(&sp->so_sequence.list);
290 	atomic_set(&sp->so_count, 1);
291 	return sp;
292 }
293 
294 void
295 nfs4_drop_state_owner(struct nfs4_state_owner *sp)
296 {
297 	struct nfs4_client *clp = sp->so_client;
298 	spin_lock(&clp->cl_lock);
299 	list_del_init(&sp->so_list);
300 	spin_unlock(&clp->cl_lock);
301 }
302 
303 /*
304  * Note: must be called with clp->cl_sem held in order to prevent races
305  *       with reboot recovery!
306  */
307 struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
308 {
309 	struct nfs4_client *clp = server->nfs4_state;
310 	struct nfs4_state_owner *sp, *new;
311 
312 	get_rpccred(cred);
313 	new = nfs4_alloc_state_owner();
314 	spin_lock(&clp->cl_lock);
315 	sp = nfs4_find_state_owner(clp, cred);
316 	if (sp == NULL)
317 		sp = nfs4_client_grab_unused(clp, cred);
318 	if (sp == NULL && new != NULL) {
319 		list_add(&new->so_list, &clp->cl_state_owners);
320 		new->so_client = clp;
321 		new->so_id = nfs4_alloc_lockowner_id(clp);
322 		new->so_cred = cred;
323 		sp = new;
324 		new = NULL;
325 	}
326 	spin_unlock(&clp->cl_lock);
327 	kfree(new);
328 	if (sp != NULL)
329 		return sp;
330 	put_rpccred(cred);
331 	return NULL;
332 }
333 
334 /*
335  * Must be called with clp->cl_sem held in order to avoid races
336  * with state recovery...
337  */
338 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
339 {
340 	struct nfs4_client *clp = sp->so_client;
341 	struct rpc_cred *cred = sp->so_cred;
342 
343 	if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
344 		return;
345 	if (clp->cl_nunused >= OPENOWNER_POOL_SIZE)
346 		goto out_free;
347 	if (list_empty(&sp->so_list))
348 		goto out_free;
349 	list_move(&sp->so_list, &clp->cl_unused);
350 	clp->cl_nunused++;
351 	spin_unlock(&clp->cl_lock);
352 	put_rpccred(cred);
353 	cred = NULL;
354 	return;
355 out_free:
356 	list_del(&sp->so_list);
357 	spin_unlock(&clp->cl_lock);
358 	put_rpccred(cred);
359 	kfree(sp);
360 }
361 
362 static struct nfs4_state *
363 nfs4_alloc_open_state(void)
364 {
365 	struct nfs4_state *state;
366 
367 	state = kzalloc(sizeof(*state), GFP_KERNEL);
368 	if (!state)
369 		return NULL;
370 	atomic_set(&state->count, 1);
371 	INIT_LIST_HEAD(&state->lock_states);
372 	spin_lock_init(&state->state_lock);
373 	return state;
374 }
375 
376 void
377 nfs4_state_set_mode_locked(struct nfs4_state *state, mode_t mode)
378 {
379 	if (state->state == mode)
380 		return;
381 	/* NB! List reordering - see the reclaim code for why.  */
382 	if ((mode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
383 		if (mode & FMODE_WRITE)
384 			list_move(&state->open_states, &state->owner->so_states);
385 		else
386 			list_move_tail(&state->open_states, &state->owner->so_states);
387 	}
388 	if (mode == 0)
389 		list_del_init(&state->inode_states);
390 	state->state = mode;
391 }
392 
393 static struct nfs4_state *
394 __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
395 {
396 	struct nfs_inode *nfsi = NFS_I(inode);
397 	struct nfs4_state *state;
398 
399 	list_for_each_entry(state, &nfsi->open_states, inode_states) {
400 		/* Is this in the process of being freed? */
401 		if (state->state == 0)
402 			continue;
403 		if (state->owner == owner) {
404 			atomic_inc(&state->count);
405 			return state;
406 		}
407 	}
408 	return NULL;
409 }
410 
411 static void
412 nfs4_free_open_state(struct nfs4_state *state)
413 {
414 	kfree(state);
415 }
416 
417 struct nfs4_state *
418 nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
419 {
420 	struct nfs4_state *state, *new;
421 	struct nfs_inode *nfsi = NFS_I(inode);
422 
423 	spin_lock(&inode->i_lock);
424 	state = __nfs4_find_state_byowner(inode, owner);
425 	spin_unlock(&inode->i_lock);
426 	if (state)
427 		goto out;
428 	new = nfs4_alloc_open_state();
429 	spin_lock(&owner->so_lock);
430 	spin_lock(&inode->i_lock);
431 	state = __nfs4_find_state_byowner(inode, owner);
432 	if (state == NULL && new != NULL) {
433 		state = new;
434 		state->owner = owner;
435 		atomic_inc(&owner->so_count);
436 		list_add(&state->inode_states, &nfsi->open_states);
437 		state->inode = igrab(inode);
438 		spin_unlock(&inode->i_lock);
439 		/* Note: The reclaim code dictates that we add stateless
440 		 * and read-only stateids to the end of the list */
441 		list_add_tail(&state->open_states, &owner->so_states);
442 		spin_unlock(&owner->so_lock);
443 	} else {
444 		spin_unlock(&inode->i_lock);
445 		spin_unlock(&owner->so_lock);
446 		if (new)
447 			nfs4_free_open_state(new);
448 	}
449 out:
450 	return state;
451 }
452 
453 /*
454  * Beware! Caller must be holding exactly one
455  * reference to clp->cl_sem!
456  */
457 void nfs4_put_open_state(struct nfs4_state *state)
458 {
459 	struct inode *inode = state->inode;
460 	struct nfs4_state_owner *owner = state->owner;
461 
462 	if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
463 		return;
464 	spin_lock(&inode->i_lock);
465 	if (!list_empty(&state->inode_states))
466 		list_del(&state->inode_states);
467 	list_del(&state->open_states);
468 	spin_unlock(&inode->i_lock);
469 	spin_unlock(&owner->so_lock);
470 	iput(inode);
471 	nfs4_free_open_state(state);
472 	nfs4_put_state_owner(owner);
473 }
474 
475 /*
476  * Close the current file.
477  */
478 void nfs4_close_state(struct nfs4_state *state, mode_t mode)
479 {
480 	struct inode *inode = state->inode;
481 	struct nfs4_state_owner *owner = state->owner;
482 	int oldstate, newstate = 0;
483 
484 	atomic_inc(&owner->so_count);
485 	/* Protect against nfs4_find_state() */
486 	spin_lock(&owner->so_lock);
487 	spin_lock(&inode->i_lock);
488 	switch (mode & (FMODE_READ | FMODE_WRITE)) {
489 		case FMODE_READ:
490 			state->n_rdonly--;
491 			break;
492 		case FMODE_WRITE:
493 			state->n_wronly--;
494 			break;
495 		case FMODE_READ|FMODE_WRITE:
496 			state->n_rdwr--;
497 	}
498 	oldstate = newstate = state->state;
499 	if (state->n_rdwr == 0) {
500 		if (state->n_rdonly == 0)
501 			newstate &= ~FMODE_READ;
502 		if (state->n_wronly == 0)
503 			newstate &= ~FMODE_WRITE;
504 	}
505 	if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
506 		nfs4_state_set_mode_locked(state, newstate);
507 		oldstate = newstate;
508 	}
509 	spin_unlock(&inode->i_lock);
510 	spin_unlock(&owner->so_lock);
511 
512 	if (oldstate != newstate && nfs4_do_close(inode, state) == 0)
513 		return;
514 	nfs4_put_open_state(state);
515 	nfs4_put_state_owner(owner);
516 }
517 
518 /*
519  * Search the state->lock_states for an existing lock_owner
520  * that is compatible with current->files
521  */
522 static struct nfs4_lock_state *
523 __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
524 {
525 	struct nfs4_lock_state *pos;
526 	list_for_each_entry(pos, &state->lock_states, ls_locks) {
527 		if (pos->ls_owner != fl_owner)
528 			continue;
529 		atomic_inc(&pos->ls_count);
530 		return pos;
531 	}
532 	return NULL;
533 }
534 
535 /*
536  * Return a compatible lock_state. If no initialized lock_state structure
537  * exists, return an uninitialized one.
538  *
539  */
540 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
541 {
542 	struct nfs4_lock_state *lsp;
543 	struct nfs4_client *clp = state->owner->so_client;
544 
545 	lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
546 	if (lsp == NULL)
547 		return NULL;
548 	lsp->ls_seqid.sequence = &state->owner->so_sequence;
549 	atomic_set(&lsp->ls_count, 1);
550 	lsp->ls_owner = fl_owner;
551 	spin_lock(&clp->cl_lock);
552 	lsp->ls_id = nfs4_alloc_lockowner_id(clp);
553 	spin_unlock(&clp->cl_lock);
554 	INIT_LIST_HEAD(&lsp->ls_locks);
555 	return lsp;
556 }
557 
558 /*
559  * Return a compatible lock_state. If no initialized lock_state structure
560  * exists, return an uninitialized one.
561  *
562  * The caller must be holding clp->cl_sem
563  */
564 static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
565 {
566 	struct nfs4_lock_state *lsp, *new = NULL;
567 
568 	for(;;) {
569 		spin_lock(&state->state_lock);
570 		lsp = __nfs4_find_lock_state(state, owner);
571 		if (lsp != NULL)
572 			break;
573 		if (new != NULL) {
574 			new->ls_state = state;
575 			list_add(&new->ls_locks, &state->lock_states);
576 			set_bit(LK_STATE_IN_USE, &state->flags);
577 			lsp = new;
578 			new = NULL;
579 			break;
580 		}
581 		spin_unlock(&state->state_lock);
582 		new = nfs4_alloc_lock_state(state, owner);
583 		if (new == NULL)
584 			return NULL;
585 	}
586 	spin_unlock(&state->state_lock);
587 	kfree(new);
588 	return lsp;
589 }
590 
591 /*
592  * Release reference to lock_state, and free it if we see that
593  * it is no longer in use
594  */
595 void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
596 {
597 	struct nfs4_state *state;
598 
599 	if (lsp == NULL)
600 		return;
601 	state = lsp->ls_state;
602 	if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
603 		return;
604 	list_del(&lsp->ls_locks);
605 	if (list_empty(&state->lock_states))
606 		clear_bit(LK_STATE_IN_USE, &state->flags);
607 	spin_unlock(&state->state_lock);
608 	kfree(lsp);
609 }
610 
611 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
612 {
613 	struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
614 
615 	dst->fl_u.nfs4_fl.owner = lsp;
616 	atomic_inc(&lsp->ls_count);
617 }
618 
619 static void nfs4_fl_release_lock(struct file_lock *fl)
620 {
621 	nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
622 }
623 
624 static struct file_lock_operations nfs4_fl_lock_ops = {
625 	.fl_copy_lock = nfs4_fl_copy_lock,
626 	.fl_release_private = nfs4_fl_release_lock,
627 };
628 
629 int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
630 {
631 	struct nfs4_lock_state *lsp;
632 
633 	if (fl->fl_ops != NULL)
634 		return 0;
635 	lsp = nfs4_get_lock_state(state, fl->fl_owner);
636 	if (lsp == NULL)
637 		return -ENOMEM;
638 	fl->fl_u.nfs4_fl.owner = lsp;
639 	fl->fl_ops = &nfs4_fl_lock_ops;
640 	return 0;
641 }
642 
643 /*
644  * Byte-range lock aware utility to initialize the stateid of read/write
645  * requests.
646  */
647 void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
648 {
649 	struct nfs4_lock_state *lsp;
650 
651 	memcpy(dst, &state->stateid, sizeof(*dst));
652 	if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
653 		return;
654 
655 	spin_lock(&state->state_lock);
656 	lsp = __nfs4_find_lock_state(state, fl_owner);
657 	if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
658 		memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
659 	spin_unlock(&state->state_lock);
660 	nfs4_put_lock_state(lsp);
661 }
662 
663 struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
664 {
665 	struct rpc_sequence *sequence = counter->sequence;
666 	struct nfs_seqid *new;
667 
668 	new = kmalloc(sizeof(*new), GFP_KERNEL);
669 	if (new != NULL) {
670 		new->sequence = counter;
671 		spin_lock(&sequence->lock);
672 		list_add_tail(&new->list, &sequence->list);
673 		spin_unlock(&sequence->lock);
674 	}
675 	return new;
676 }
677 
678 void nfs_free_seqid(struct nfs_seqid *seqid)
679 {
680 	struct rpc_sequence *sequence = seqid->sequence->sequence;
681 
682 	spin_lock(&sequence->lock);
683 	list_del(&seqid->list);
684 	spin_unlock(&sequence->lock);
685 	rpc_wake_up(&sequence->wait);
686 	kfree(seqid);
687 }
688 
689 /*
690  * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
691  * failed with a seqid incrementing error -
692  * see comments nfs_fs.h:seqid_mutating_error()
693  */
694 static inline void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
695 {
696 	switch (status) {
697 		case 0:
698 			break;
699 		case -NFS4ERR_BAD_SEQID:
700 		case -NFS4ERR_STALE_CLIENTID:
701 		case -NFS4ERR_STALE_STATEID:
702 		case -NFS4ERR_BAD_STATEID:
703 		case -NFS4ERR_BADXDR:
704 		case -NFS4ERR_RESOURCE:
705 		case -NFS4ERR_NOFILEHANDLE:
706 			/* Non-seqid mutating errors */
707 			return;
708 	};
709 	/*
710 	 * Note: no locking needed as we are guaranteed to be first
711 	 * on the sequence list
712 	 */
713 	seqid->sequence->counter++;
714 }
715 
716 void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
717 {
718 	if (status == -NFS4ERR_BAD_SEQID) {
719 		struct nfs4_state_owner *sp = container_of(seqid->sequence,
720 				struct nfs4_state_owner, so_seqid);
721 		nfs4_drop_state_owner(sp);
722 	}
723 	return nfs_increment_seqid(status, seqid);
724 }
725 
726 /*
727  * Increment the seqid if the LOCK/LOCKU succeeded, or
728  * failed with a seqid incrementing error -
729  * see comments nfs_fs.h:seqid_mutating_error()
730  */
731 void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
732 {
733 	return nfs_increment_seqid(status, seqid);
734 }
735 
736 int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
737 {
738 	struct rpc_sequence *sequence = seqid->sequence->sequence;
739 	int status = 0;
740 
741 	if (sequence->list.next == &seqid->list)
742 		goto out;
743 	spin_lock(&sequence->lock);
744 	if (sequence->list.next != &seqid->list) {
745 		rpc_sleep_on(&sequence->wait, task, NULL, NULL);
746 		status = -EAGAIN;
747 	}
748 	spin_unlock(&sequence->lock);
749 out:
750 	return status;
751 }
752 
753 static int reclaimer(void *);
754 
755 static inline void nfs4_clear_recover_bit(struct nfs4_client *clp)
756 {
757 	smp_mb__before_clear_bit();
758 	clear_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state);
759 	smp_mb__after_clear_bit();
760 	wake_up_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER);
761 	rpc_wake_up(&clp->cl_rpcwaitq);
762 }
763 
764 /*
765  * State recovery routine
766  */
767 static void nfs4_recover_state(struct nfs4_client *clp)
768 {
769 	struct task_struct *task;
770 
771 	__module_get(THIS_MODULE);
772 	atomic_inc(&clp->cl_count);
773 	task = kthread_run(reclaimer, clp, "%u.%u.%u.%u-reclaim",
774 			NIPQUAD(clp->cl_addr));
775 	if (!IS_ERR(task))
776 		return;
777 	nfs4_clear_recover_bit(clp);
778 	nfs4_put_client(clp);
779 	module_put(THIS_MODULE);
780 }
781 
782 /*
783  * Schedule a state recovery attempt
784  */
785 void nfs4_schedule_state_recovery(struct nfs4_client *clp)
786 {
787 	if (!clp)
788 		return;
789 	if (test_and_set_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0)
790 		nfs4_recover_state(clp);
791 }
792 
793 static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state)
794 {
795 	struct inode *inode = state->inode;
796 	struct file_lock *fl;
797 	int status = 0;
798 
799 	for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
800 		if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
801 			continue;
802 		if (((struct nfs_open_context *)fl->fl_file->private_data)->state != state)
803 			continue;
804 		status = ops->recover_lock(state, fl);
805 		if (status >= 0)
806 			continue;
807 		switch (status) {
808 			default:
809 				printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
810 						__FUNCTION__, status);
811 			case -NFS4ERR_EXPIRED:
812 			case -NFS4ERR_NO_GRACE:
813 			case -NFS4ERR_RECLAIM_BAD:
814 			case -NFS4ERR_RECLAIM_CONFLICT:
815 				/* kill_proc(fl->fl_pid, SIGLOST, 1); */
816 				break;
817 			case -NFS4ERR_STALE_CLIENTID:
818 				goto out_err;
819 		}
820 	}
821 	return 0;
822 out_err:
823 	return status;
824 }
825 
826 static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct nfs4_state_owner *sp)
827 {
828 	struct nfs4_state *state;
829 	struct nfs4_lock_state *lock;
830 	int status = 0;
831 
832 	/* Note: we rely on the sp->so_states list being ordered
833 	 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
834 	 * states first.
835 	 * This is needed to ensure that the server won't give us any
836 	 * read delegations that we have to return if, say, we are
837 	 * recovering after a network partition or a reboot from a
838 	 * server that doesn't support a grace period.
839 	 */
840 	list_for_each_entry(state, &sp->so_states, open_states) {
841 		if (state->state == 0)
842 			continue;
843 		status = ops->recover_open(sp, state);
844 		if (status >= 0) {
845 			status = nfs4_reclaim_locks(ops, state);
846 			if (status < 0)
847 				goto out_err;
848 			list_for_each_entry(lock, &state->lock_states, ls_locks) {
849 				if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
850 					printk("%s: Lock reclaim failed!\n",
851 							__FUNCTION__);
852 			}
853 			continue;
854 		}
855 		switch (status) {
856 			default:
857 				printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
858 						__FUNCTION__, status);
859 			case -ENOENT:
860 			case -NFS4ERR_RECLAIM_BAD:
861 			case -NFS4ERR_RECLAIM_CONFLICT:
862 				/*
863 				 * Open state on this file cannot be recovered
864 				 * All we can do is revert to using the zero stateid.
865 				 */
866 				memset(state->stateid.data, 0,
867 					sizeof(state->stateid.data));
868 				/* Mark the file as being 'closed' */
869 				state->state = 0;
870 				break;
871 			case -NFS4ERR_EXPIRED:
872 			case -NFS4ERR_NO_GRACE:
873 			case -NFS4ERR_STALE_CLIENTID:
874 				goto out_err;
875 		}
876 	}
877 	return 0;
878 out_err:
879 	return status;
880 }
881 
882 static void nfs4_state_mark_reclaim(struct nfs4_client *clp)
883 {
884 	struct nfs4_state_owner *sp;
885 	struct nfs4_state *state;
886 	struct nfs4_lock_state *lock;
887 
888 	/* Reset all sequence ids to zero */
889 	list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
890 		sp->so_seqid.counter = 0;
891 		sp->so_seqid.flags = 0;
892 		spin_lock(&sp->so_lock);
893 		list_for_each_entry(state, &sp->so_states, open_states) {
894 			list_for_each_entry(lock, &state->lock_states, ls_locks) {
895 				lock->ls_seqid.counter = 0;
896 				lock->ls_seqid.flags = 0;
897 				lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
898 			}
899 		}
900 		spin_unlock(&sp->so_lock);
901 	}
902 }
903 
904 static int reclaimer(void *ptr)
905 {
906 	struct nfs4_client *clp = ptr;
907 	struct nfs4_state_owner *sp;
908 	struct nfs4_state_recovery_ops *ops;
909 	struct rpc_cred *cred;
910 	int status = 0;
911 
912 	allow_signal(SIGKILL);
913 
914 	/* Ensure exclusive access to NFSv4 state */
915 	lock_kernel();
916 	down_write(&clp->cl_sem);
917 	/* Are there any NFS mounts out there? */
918 	if (list_empty(&clp->cl_superblocks))
919 		goto out;
920 restart_loop:
921 	ops = &nfs4_network_partition_recovery_ops;
922 	/* Are there any open files on this volume? */
923 	cred = nfs4_get_renew_cred(clp);
924 	if (cred != NULL) {
925 		/* Yes there are: try to renew the old lease */
926 		status = nfs4_proc_renew(clp, cred);
927 		switch (status) {
928 			case 0:
929 			case -NFS4ERR_CB_PATH_DOWN:
930 				put_rpccred(cred);
931 				goto out;
932 			case -NFS4ERR_STALE_CLIENTID:
933 			case -NFS4ERR_LEASE_MOVED:
934 				ops = &nfs4_reboot_recovery_ops;
935 		}
936 	} else {
937 		/* "reboot" to ensure we clear all state on the server */
938 		clp->cl_boot_time = CURRENT_TIME;
939 		cred = nfs4_get_setclientid_cred(clp);
940 	}
941 	/* We're going to have to re-establish a clientid */
942 	nfs4_state_mark_reclaim(clp);
943 	status = -ENOENT;
944 	if (cred != NULL) {
945 		status = nfs4_init_client(clp, cred);
946 		put_rpccred(cred);
947 	}
948 	if (status)
949 		goto out_error;
950 	/* Mark all delegations for reclaim */
951 	nfs_delegation_mark_reclaim(clp);
952 	/* Note: list is protected by exclusive lock on cl->cl_sem */
953 	list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
954 		status = nfs4_reclaim_open_state(ops, sp);
955 		if (status < 0) {
956 			if (status == -NFS4ERR_NO_GRACE) {
957 				ops = &nfs4_network_partition_recovery_ops;
958 				status = nfs4_reclaim_open_state(ops, sp);
959 			}
960 			if (status == -NFS4ERR_STALE_CLIENTID)
961 				goto restart_loop;
962 			if (status == -NFS4ERR_EXPIRED)
963 				goto restart_loop;
964 		}
965 	}
966 	nfs_delegation_reap_unclaimed(clp);
967 out:
968 	up_write(&clp->cl_sem);
969 	unlock_kernel();
970 	if (status == -NFS4ERR_CB_PATH_DOWN)
971 		nfs_handle_cb_pathdown(clp);
972 	nfs4_clear_recover_bit(clp);
973 	nfs4_put_client(clp);
974 	module_put_and_exit(0);
975 	return 0;
976 out_error:
977 	printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
978 				NIPQUAD(clp->cl_addr.s_addr), -status);
979 	set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
980 	goto out;
981 }
982 
983 /*
984  * Local variables:
985  *  c-basic-offset: 8
986  * End:
987  */
988