xref: /linux/fs/nfs/nfs4state.c (revision 8fa5723aa7e053d498336b48448b292fc2e0458b)
1 /*
2  *  fs/nfs/nfs4state.c
3  *
4  *  Client-side XDR for NFSv4.
5  *
6  *  Copyright (c) 2002 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Kendrick Smith <kmsmith@umich.edu>
10  *
11  *  Redistribution and use in source and binary forms, with or without
12  *  modification, are permitted provided that the following conditions
13  *  are met:
14  *
15  *  1. Redistributions of source code must retain the above copyright
16  *     notice, this list of conditions and the following disclaimer.
17  *  2. Redistributions in binary form must reproduce the above copyright
18  *     notice, this list of conditions and the following disclaimer in the
19  *     documentation and/or other materials provided with the distribution.
20  *  3. Neither the name of the University nor the names of its
21  *     contributors may be used to endorse or promote products derived
22  *     from this software without specific prior written permission.
23  *
24  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Implementation of the NFSv4 state model.  For the time being,
37  * this is minimal, but will be made much more complex in a
38  * subsequent patch.
39  */
40 
41 #include <linux/kernel.h>
42 #include <linux/slab.h>
43 #include <linux/smp_lock.h>
44 #include <linux/nfs_fs.h>
45 #include <linux/nfs_idmap.h>
46 #include <linux/kthread.h>
47 #include <linux/module.h>
48 #include <linux/random.h>
49 #include <linux/workqueue.h>
50 #include <linux/bitops.h>
51 
52 #include "nfs4_fs.h"
53 #include "callback.h"
54 #include "delegation.h"
55 #include "internal.h"
56 
57 #define OPENOWNER_POOL_SIZE	8
58 
59 const nfs4_stateid zero_stateid;
60 
61 static LIST_HEAD(nfs4_clientid_list);
62 
63 static int nfs4_init_client(struct nfs_client *clp, struct rpc_cred *cred)
64 {
65 	int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK,
66 			nfs_callback_tcpport, cred);
67 	if (status == 0)
68 		status = nfs4_proc_setclientid_confirm(clp, cred);
69 	if (status == 0)
70 		nfs4_schedule_state_renewal(clp);
71 	return status;
72 }
73 
74 static struct rpc_cred *nfs4_get_machine_cred(struct nfs_client *clp)
75 {
76 	struct rpc_cred *cred = NULL;
77 
78 	spin_lock(&clp->cl_lock);
79 	if (clp->cl_machine_cred != NULL)
80 		cred = get_rpccred(clp->cl_machine_cred);
81 	spin_unlock(&clp->cl_lock);
82 	return cred;
83 }
84 
85 static void nfs4_clear_machine_cred(struct nfs_client *clp)
86 {
87 	struct rpc_cred *cred;
88 
89 	spin_lock(&clp->cl_lock);
90 	cred = clp->cl_machine_cred;
91 	clp->cl_machine_cred = NULL;
92 	spin_unlock(&clp->cl_lock);
93 	if (cred != NULL)
94 		put_rpccred(cred);
95 }
96 
97 struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp)
98 {
99 	struct nfs4_state_owner *sp;
100 	struct rb_node *pos;
101 	struct rpc_cred *cred = NULL;
102 
103 	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
104 		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
105 		if (list_empty(&sp->so_states))
106 			continue;
107 		cred = get_rpccred(sp->so_cred);
108 		break;
109 	}
110 	return cred;
111 }
112 
113 static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
114 {
115 	struct nfs4_state_owner *sp;
116 	struct rb_node *pos;
117 	struct rpc_cred *cred;
118 
119 	cred = nfs4_get_machine_cred(clp);
120 	if (cred != NULL)
121 		goto out;
122 	pos = rb_first(&clp->cl_state_owners);
123 	if (pos != NULL) {
124 		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
125 		cred = get_rpccred(sp->so_cred);
126 	}
127 out:
128 	return cred;
129 }
130 
131 static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new,
132 		__u64 minval, int maxbits)
133 {
134 	struct rb_node **p, *parent;
135 	struct nfs_unique_id *pos;
136 	__u64 mask = ~0ULL;
137 
138 	if (maxbits < 64)
139 		mask = (1ULL << maxbits) - 1ULL;
140 
141 	/* Ensure distribution is more or less flat */
142 	get_random_bytes(&new->id, sizeof(new->id));
143 	new->id &= mask;
144 	if (new->id < minval)
145 		new->id += minval;
146 retry:
147 	p = &root->rb_node;
148 	parent = NULL;
149 
150 	while (*p != NULL) {
151 		parent = *p;
152 		pos = rb_entry(parent, struct nfs_unique_id, rb_node);
153 
154 		if (new->id < pos->id)
155 			p = &(*p)->rb_left;
156 		else if (new->id > pos->id)
157 			p = &(*p)->rb_right;
158 		else
159 			goto id_exists;
160 	}
161 	rb_link_node(&new->rb_node, parent, p);
162 	rb_insert_color(&new->rb_node, root);
163 	return;
164 id_exists:
165 	for (;;) {
166 		new->id++;
167 		if (new->id < minval || (new->id & mask) != new->id) {
168 			new->id = minval;
169 			break;
170 		}
171 		parent = rb_next(parent);
172 		if (parent == NULL)
173 			break;
174 		pos = rb_entry(parent, struct nfs_unique_id, rb_node);
175 		if (new->id < pos->id)
176 			break;
177 	}
178 	goto retry;
179 }
180 
181 static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id)
182 {
183 	rb_erase(&id->rb_node, root);
184 }
185 
186 static struct nfs4_state_owner *
187 nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
188 {
189 	struct nfs_client *clp = server->nfs_client;
190 	struct rb_node **p = &clp->cl_state_owners.rb_node,
191 		       *parent = NULL;
192 	struct nfs4_state_owner *sp, *res = NULL;
193 
194 	while (*p != NULL) {
195 		parent = *p;
196 		sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
197 
198 		if (server < sp->so_server) {
199 			p = &parent->rb_left;
200 			continue;
201 		}
202 		if (server > sp->so_server) {
203 			p = &parent->rb_right;
204 			continue;
205 		}
206 		if (cred < sp->so_cred)
207 			p = &parent->rb_left;
208 		else if (cred > sp->so_cred)
209 			p = &parent->rb_right;
210 		else {
211 			atomic_inc(&sp->so_count);
212 			res = sp;
213 			break;
214 		}
215 	}
216 	return res;
217 }
218 
219 static struct nfs4_state_owner *
220 nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
221 {
222 	struct rb_node **p = &clp->cl_state_owners.rb_node,
223 		       *parent = NULL;
224 	struct nfs4_state_owner *sp;
225 
226 	while (*p != NULL) {
227 		parent = *p;
228 		sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
229 
230 		if (new->so_server < sp->so_server) {
231 			p = &parent->rb_left;
232 			continue;
233 		}
234 		if (new->so_server > sp->so_server) {
235 			p = &parent->rb_right;
236 			continue;
237 		}
238 		if (new->so_cred < sp->so_cred)
239 			p = &parent->rb_left;
240 		else if (new->so_cred > sp->so_cred)
241 			p = &parent->rb_right;
242 		else {
243 			atomic_inc(&sp->so_count);
244 			return sp;
245 		}
246 	}
247 	nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64);
248 	rb_link_node(&new->so_client_node, parent, p);
249 	rb_insert_color(&new->so_client_node, &clp->cl_state_owners);
250 	return new;
251 }
252 
253 static void
254 nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp)
255 {
256 	if (!RB_EMPTY_NODE(&sp->so_client_node))
257 		rb_erase(&sp->so_client_node, &clp->cl_state_owners);
258 	nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id);
259 }
260 
261 /*
262  * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
263  * create a new state_owner.
264  *
265  */
266 static struct nfs4_state_owner *
267 nfs4_alloc_state_owner(void)
268 {
269 	struct nfs4_state_owner *sp;
270 
271 	sp = kzalloc(sizeof(*sp),GFP_KERNEL);
272 	if (!sp)
273 		return NULL;
274 	spin_lock_init(&sp->so_lock);
275 	INIT_LIST_HEAD(&sp->so_states);
276 	INIT_LIST_HEAD(&sp->so_delegations);
277 	rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
278 	sp->so_seqid.sequence = &sp->so_sequence;
279 	spin_lock_init(&sp->so_sequence.lock);
280 	INIT_LIST_HEAD(&sp->so_sequence.list);
281 	atomic_set(&sp->so_count, 1);
282 	return sp;
283 }
284 
285 static void
286 nfs4_drop_state_owner(struct nfs4_state_owner *sp)
287 {
288 	if (!RB_EMPTY_NODE(&sp->so_client_node)) {
289 		struct nfs_client *clp = sp->so_client;
290 
291 		spin_lock(&clp->cl_lock);
292 		rb_erase(&sp->so_client_node, &clp->cl_state_owners);
293 		RB_CLEAR_NODE(&sp->so_client_node);
294 		spin_unlock(&clp->cl_lock);
295 	}
296 }
297 
298 /*
299  * Note: must be called with clp->cl_sem held in order to prevent races
300  *       with reboot recovery!
301  */
302 struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
303 {
304 	struct nfs_client *clp = server->nfs_client;
305 	struct nfs4_state_owner *sp, *new;
306 
307 	spin_lock(&clp->cl_lock);
308 	sp = nfs4_find_state_owner(server, cred);
309 	spin_unlock(&clp->cl_lock);
310 	if (sp != NULL)
311 		return sp;
312 	new = nfs4_alloc_state_owner();
313 	if (new == NULL)
314 		return NULL;
315 	new->so_client = clp;
316 	new->so_server = server;
317 	new->so_cred = cred;
318 	spin_lock(&clp->cl_lock);
319 	sp = nfs4_insert_state_owner(clp, new);
320 	spin_unlock(&clp->cl_lock);
321 	if (sp == new)
322 		get_rpccred(cred);
323 	else {
324 		rpc_destroy_wait_queue(&new->so_sequence.wait);
325 		kfree(new);
326 	}
327 	return sp;
328 }
329 
330 /*
331  * Must be called with clp->cl_sem held in order to avoid races
332  * with state recovery...
333  */
334 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
335 {
336 	struct nfs_client *clp = sp->so_client;
337 	struct rpc_cred *cred = sp->so_cred;
338 
339 	if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
340 		return;
341 	nfs4_remove_state_owner(clp, sp);
342 	spin_unlock(&clp->cl_lock);
343 	rpc_destroy_wait_queue(&sp->so_sequence.wait);
344 	put_rpccred(cred);
345 	kfree(sp);
346 }
347 
348 static struct nfs4_state *
349 nfs4_alloc_open_state(void)
350 {
351 	struct nfs4_state *state;
352 
353 	state = kzalloc(sizeof(*state), GFP_KERNEL);
354 	if (!state)
355 		return NULL;
356 	atomic_set(&state->count, 1);
357 	INIT_LIST_HEAD(&state->lock_states);
358 	spin_lock_init(&state->state_lock);
359 	seqlock_init(&state->seqlock);
360 	return state;
361 }
362 
363 void
364 nfs4_state_set_mode_locked(struct nfs4_state *state, mode_t mode)
365 {
366 	if (state->state == mode)
367 		return;
368 	/* NB! List reordering - see the reclaim code for why.  */
369 	if ((mode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
370 		if (mode & FMODE_WRITE)
371 			list_move(&state->open_states, &state->owner->so_states);
372 		else
373 			list_move_tail(&state->open_states, &state->owner->so_states);
374 	}
375 	state->state = mode;
376 }
377 
378 static struct nfs4_state *
379 __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
380 {
381 	struct nfs_inode *nfsi = NFS_I(inode);
382 	struct nfs4_state *state;
383 
384 	list_for_each_entry(state, &nfsi->open_states, inode_states) {
385 		if (state->owner != owner)
386 			continue;
387 		if (atomic_inc_not_zero(&state->count))
388 			return state;
389 	}
390 	return NULL;
391 }
392 
393 static void
394 nfs4_free_open_state(struct nfs4_state *state)
395 {
396 	kfree(state);
397 }
398 
399 struct nfs4_state *
400 nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
401 {
402 	struct nfs4_state *state, *new;
403 	struct nfs_inode *nfsi = NFS_I(inode);
404 
405 	spin_lock(&inode->i_lock);
406 	state = __nfs4_find_state_byowner(inode, owner);
407 	spin_unlock(&inode->i_lock);
408 	if (state)
409 		goto out;
410 	new = nfs4_alloc_open_state();
411 	spin_lock(&owner->so_lock);
412 	spin_lock(&inode->i_lock);
413 	state = __nfs4_find_state_byowner(inode, owner);
414 	if (state == NULL && new != NULL) {
415 		state = new;
416 		state->owner = owner;
417 		atomic_inc(&owner->so_count);
418 		list_add(&state->inode_states, &nfsi->open_states);
419 		state->inode = igrab(inode);
420 		spin_unlock(&inode->i_lock);
421 		/* Note: The reclaim code dictates that we add stateless
422 		 * and read-only stateids to the end of the list */
423 		list_add_tail(&state->open_states, &owner->so_states);
424 		spin_unlock(&owner->so_lock);
425 	} else {
426 		spin_unlock(&inode->i_lock);
427 		spin_unlock(&owner->so_lock);
428 		if (new)
429 			nfs4_free_open_state(new);
430 	}
431 out:
432 	return state;
433 }
434 
435 /*
436  * Beware! Caller must be holding exactly one
437  * reference to clp->cl_sem!
438  */
439 void nfs4_put_open_state(struct nfs4_state *state)
440 {
441 	struct inode *inode = state->inode;
442 	struct nfs4_state_owner *owner = state->owner;
443 
444 	if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
445 		return;
446 	spin_lock(&inode->i_lock);
447 	list_del(&state->inode_states);
448 	list_del(&state->open_states);
449 	spin_unlock(&inode->i_lock);
450 	spin_unlock(&owner->so_lock);
451 	iput(inode);
452 	nfs4_free_open_state(state);
453 	nfs4_put_state_owner(owner);
454 }
455 
456 /*
457  * Close the current file.
458  */
459 static void __nfs4_close(struct path *path, struct nfs4_state *state, mode_t mode, int wait)
460 {
461 	struct nfs4_state_owner *owner = state->owner;
462 	int call_close = 0;
463 	int newstate;
464 
465 	atomic_inc(&owner->so_count);
466 	/* Protect against nfs4_find_state() */
467 	spin_lock(&owner->so_lock);
468 	switch (mode & (FMODE_READ | FMODE_WRITE)) {
469 		case FMODE_READ:
470 			state->n_rdonly--;
471 			break;
472 		case FMODE_WRITE:
473 			state->n_wronly--;
474 			break;
475 		case FMODE_READ|FMODE_WRITE:
476 			state->n_rdwr--;
477 	}
478 	newstate = FMODE_READ|FMODE_WRITE;
479 	if (state->n_rdwr == 0) {
480 		if (state->n_rdonly == 0) {
481 			newstate &= ~FMODE_READ;
482 			call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
483 			call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
484 		}
485 		if (state->n_wronly == 0) {
486 			newstate &= ~FMODE_WRITE;
487 			call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
488 			call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
489 		}
490 		if (newstate == 0)
491 			clear_bit(NFS_DELEGATED_STATE, &state->flags);
492 	}
493 	nfs4_state_set_mode_locked(state, newstate);
494 	spin_unlock(&owner->so_lock);
495 
496 	if (!call_close) {
497 		nfs4_put_open_state(state);
498 		nfs4_put_state_owner(owner);
499 	} else
500 		nfs4_do_close(path, state, wait);
501 }
502 
503 void nfs4_close_state(struct path *path, struct nfs4_state *state, mode_t mode)
504 {
505 	__nfs4_close(path, state, mode, 0);
506 }
507 
508 void nfs4_close_sync(struct path *path, struct nfs4_state *state, mode_t mode)
509 {
510 	__nfs4_close(path, state, mode, 1);
511 }
512 
513 /*
514  * Search the state->lock_states for an existing lock_owner
515  * that is compatible with current->files
516  */
517 static struct nfs4_lock_state *
518 __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
519 {
520 	struct nfs4_lock_state *pos;
521 	list_for_each_entry(pos, &state->lock_states, ls_locks) {
522 		if (pos->ls_owner != fl_owner)
523 			continue;
524 		atomic_inc(&pos->ls_count);
525 		return pos;
526 	}
527 	return NULL;
528 }
529 
530 /*
531  * Return a compatible lock_state. If no initialized lock_state structure
532  * exists, return an uninitialized one.
533  *
534  */
535 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
536 {
537 	struct nfs4_lock_state *lsp;
538 	struct nfs_client *clp = state->owner->so_client;
539 
540 	lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
541 	if (lsp == NULL)
542 		return NULL;
543 	rpc_init_wait_queue(&lsp->ls_sequence.wait, "lock_seqid_waitqueue");
544 	spin_lock_init(&lsp->ls_sequence.lock);
545 	INIT_LIST_HEAD(&lsp->ls_sequence.list);
546 	lsp->ls_seqid.sequence = &lsp->ls_sequence;
547 	atomic_set(&lsp->ls_count, 1);
548 	lsp->ls_owner = fl_owner;
549 	spin_lock(&clp->cl_lock);
550 	nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
551 	spin_unlock(&clp->cl_lock);
552 	INIT_LIST_HEAD(&lsp->ls_locks);
553 	return lsp;
554 }
555 
556 static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
557 {
558 	struct nfs_client *clp = lsp->ls_state->owner->so_client;
559 
560 	spin_lock(&clp->cl_lock);
561 	nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
562 	spin_unlock(&clp->cl_lock);
563 	rpc_destroy_wait_queue(&lsp->ls_sequence.wait);
564 	kfree(lsp);
565 }
566 
567 /*
568  * Return a compatible lock_state. If no initialized lock_state structure
569  * exists, return an uninitialized one.
570  *
571  * The caller must be holding clp->cl_sem
572  */
573 static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
574 {
575 	struct nfs4_lock_state *lsp, *new = NULL;
576 
577 	for(;;) {
578 		spin_lock(&state->state_lock);
579 		lsp = __nfs4_find_lock_state(state, owner);
580 		if (lsp != NULL)
581 			break;
582 		if (new != NULL) {
583 			new->ls_state = state;
584 			list_add(&new->ls_locks, &state->lock_states);
585 			set_bit(LK_STATE_IN_USE, &state->flags);
586 			lsp = new;
587 			new = NULL;
588 			break;
589 		}
590 		spin_unlock(&state->state_lock);
591 		new = nfs4_alloc_lock_state(state, owner);
592 		if (new == NULL)
593 			return NULL;
594 	}
595 	spin_unlock(&state->state_lock);
596 	if (new != NULL)
597 		nfs4_free_lock_state(new);
598 	return lsp;
599 }
600 
601 /*
602  * Release reference to lock_state, and free it if we see that
603  * it is no longer in use
604  */
605 void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
606 {
607 	struct nfs4_state *state;
608 
609 	if (lsp == NULL)
610 		return;
611 	state = lsp->ls_state;
612 	if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
613 		return;
614 	list_del(&lsp->ls_locks);
615 	if (list_empty(&state->lock_states))
616 		clear_bit(LK_STATE_IN_USE, &state->flags);
617 	spin_unlock(&state->state_lock);
618 	nfs4_free_lock_state(lsp);
619 }
620 
621 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
622 {
623 	struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
624 
625 	dst->fl_u.nfs4_fl.owner = lsp;
626 	atomic_inc(&lsp->ls_count);
627 }
628 
629 static void nfs4_fl_release_lock(struct file_lock *fl)
630 {
631 	nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
632 }
633 
634 static struct file_lock_operations nfs4_fl_lock_ops = {
635 	.fl_copy_lock = nfs4_fl_copy_lock,
636 	.fl_release_private = nfs4_fl_release_lock,
637 };
638 
639 int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
640 {
641 	struct nfs4_lock_state *lsp;
642 
643 	if (fl->fl_ops != NULL)
644 		return 0;
645 	lsp = nfs4_get_lock_state(state, fl->fl_owner);
646 	if (lsp == NULL)
647 		return -ENOMEM;
648 	fl->fl_u.nfs4_fl.owner = lsp;
649 	fl->fl_ops = &nfs4_fl_lock_ops;
650 	return 0;
651 }
652 
653 /*
654  * Byte-range lock aware utility to initialize the stateid of read/write
655  * requests.
656  */
657 void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
658 {
659 	struct nfs4_lock_state *lsp;
660 	int seq;
661 
662 	do {
663 		seq = read_seqbegin(&state->seqlock);
664 		memcpy(dst, &state->stateid, sizeof(*dst));
665 	} while (read_seqretry(&state->seqlock, seq));
666 	if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
667 		return;
668 
669 	spin_lock(&state->state_lock);
670 	lsp = __nfs4_find_lock_state(state, fl_owner);
671 	if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
672 		memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
673 	spin_unlock(&state->state_lock);
674 	nfs4_put_lock_state(lsp);
675 }
676 
677 struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
678 {
679 	struct nfs_seqid *new;
680 
681 	new = kmalloc(sizeof(*new), GFP_KERNEL);
682 	if (new != NULL) {
683 		new->sequence = counter;
684 		INIT_LIST_HEAD(&new->list);
685 	}
686 	return new;
687 }
688 
689 void nfs_free_seqid(struct nfs_seqid *seqid)
690 {
691 	if (!list_empty(&seqid->list)) {
692 		struct rpc_sequence *sequence = seqid->sequence->sequence;
693 
694 		spin_lock(&sequence->lock);
695 		list_del(&seqid->list);
696 		spin_unlock(&sequence->lock);
697 		rpc_wake_up(&sequence->wait);
698 	}
699 	kfree(seqid);
700 }
701 
702 /*
703  * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
704  * failed with a seqid incrementing error -
705  * see comments nfs_fs.h:seqid_mutating_error()
706  */
707 static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
708 {
709 	BUG_ON(list_first_entry(&seqid->sequence->sequence->list, struct nfs_seqid, list) != seqid);
710 	switch (status) {
711 		case 0:
712 			break;
713 		case -NFS4ERR_BAD_SEQID:
714 			if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
715 				return;
716 			printk(KERN_WARNING "NFS: v4 server returned a bad"
717 					" sequence-id error on an"
718 					" unconfirmed sequence %p!\n",
719 					seqid->sequence);
720 		case -NFS4ERR_STALE_CLIENTID:
721 		case -NFS4ERR_STALE_STATEID:
722 		case -NFS4ERR_BAD_STATEID:
723 		case -NFS4ERR_BADXDR:
724 		case -NFS4ERR_RESOURCE:
725 		case -NFS4ERR_NOFILEHANDLE:
726 			/* Non-seqid mutating errors */
727 			return;
728 	};
729 	/*
730 	 * Note: no locking needed as we are guaranteed to be first
731 	 * on the sequence list
732 	 */
733 	seqid->sequence->counter++;
734 }
735 
736 void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
737 {
738 	if (status == -NFS4ERR_BAD_SEQID) {
739 		struct nfs4_state_owner *sp = container_of(seqid->sequence,
740 				struct nfs4_state_owner, so_seqid);
741 		nfs4_drop_state_owner(sp);
742 	}
743 	nfs_increment_seqid(status, seqid);
744 }
745 
746 /*
747  * Increment the seqid if the LOCK/LOCKU succeeded, or
748  * failed with a seqid incrementing error -
749  * see comments nfs_fs.h:seqid_mutating_error()
750  */
751 void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
752 {
753 	nfs_increment_seqid(status, seqid);
754 }
755 
756 int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
757 {
758 	struct rpc_sequence *sequence = seqid->sequence->sequence;
759 	int status = 0;
760 
761 	spin_lock(&sequence->lock);
762 	if (list_empty(&seqid->list))
763 		list_add_tail(&seqid->list, &sequence->list);
764 	if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
765 		goto unlock;
766 	rpc_sleep_on(&sequence->wait, task, NULL);
767 	status = -EAGAIN;
768 unlock:
769 	spin_unlock(&sequence->lock);
770 	return status;
771 }
772 
773 static int reclaimer(void *);
774 
775 static inline void nfs4_clear_recover_bit(struct nfs_client *clp)
776 {
777 	smp_mb__before_clear_bit();
778 	clear_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state);
779 	smp_mb__after_clear_bit();
780 	wake_up_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER);
781 	rpc_wake_up(&clp->cl_rpcwaitq);
782 }
783 
784 /*
785  * State recovery routine
786  */
787 static void nfs4_recover_state(struct nfs_client *clp)
788 {
789 	struct task_struct *task;
790 
791 	__module_get(THIS_MODULE);
792 	atomic_inc(&clp->cl_count);
793 	task = kthread_run(reclaimer, clp, "%s-reclaim",
794 				rpc_peeraddr2str(clp->cl_rpcclient,
795 							RPC_DISPLAY_ADDR));
796 	if (!IS_ERR(task))
797 		return;
798 	nfs4_clear_recover_bit(clp);
799 	nfs_put_client(clp);
800 	module_put(THIS_MODULE);
801 }
802 
803 /*
804  * Schedule a state recovery attempt
805  */
806 void nfs4_schedule_state_recovery(struct nfs_client *clp)
807 {
808 	if (!clp)
809 		return;
810 	if (test_and_set_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0)
811 		nfs4_recover_state(clp);
812 }
813 
814 static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state)
815 {
816 	struct inode *inode = state->inode;
817 	struct file_lock *fl;
818 	int status = 0;
819 
820 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
821 		if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
822 			continue;
823 		if (nfs_file_open_context(fl->fl_file)->state != state)
824 			continue;
825 		status = ops->recover_lock(state, fl);
826 		if (status >= 0)
827 			continue;
828 		switch (status) {
829 			default:
830 				printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
831 						__func__, status);
832 			case -NFS4ERR_EXPIRED:
833 			case -NFS4ERR_NO_GRACE:
834 			case -NFS4ERR_RECLAIM_BAD:
835 			case -NFS4ERR_RECLAIM_CONFLICT:
836 				/* kill_proc(fl->fl_pid, SIGLOST, 1); */
837 				break;
838 			case -NFS4ERR_STALE_CLIENTID:
839 				goto out_err;
840 		}
841 	}
842 	return 0;
843 out_err:
844 	return status;
845 }
846 
847 static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct nfs4_state_owner *sp)
848 {
849 	struct nfs4_state *state;
850 	struct nfs4_lock_state *lock;
851 	int status = 0;
852 
853 	/* Note: we rely on the sp->so_states list being ordered
854 	 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
855 	 * states first.
856 	 * This is needed to ensure that the server won't give us any
857 	 * read delegations that we have to return if, say, we are
858 	 * recovering after a network partition or a reboot from a
859 	 * server that doesn't support a grace period.
860 	 */
861 	list_for_each_entry(state, &sp->so_states, open_states) {
862 		if (state->state == 0)
863 			continue;
864 		status = ops->recover_open(sp, state);
865 		if (status >= 0) {
866 			status = nfs4_reclaim_locks(ops, state);
867 			if (status < 0)
868 				goto out_err;
869 			list_for_each_entry(lock, &state->lock_states, ls_locks) {
870 				if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
871 					printk("%s: Lock reclaim failed!\n",
872 							__func__);
873 			}
874 			continue;
875 		}
876 		switch (status) {
877 			default:
878 				printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
879 						__func__, status);
880 			case -ENOENT:
881 			case -NFS4ERR_RECLAIM_BAD:
882 			case -NFS4ERR_RECLAIM_CONFLICT:
883 				/*
884 				 * Open state on this file cannot be recovered
885 				 * All we can do is revert to using the zero stateid.
886 				 */
887 				memset(state->stateid.data, 0,
888 					sizeof(state->stateid.data));
889 				/* Mark the file as being 'closed' */
890 				state->state = 0;
891 				break;
892 			case -NFS4ERR_EXPIRED:
893 			case -NFS4ERR_NO_GRACE:
894 			case -NFS4ERR_STALE_CLIENTID:
895 				goto out_err;
896 		}
897 	}
898 	return 0;
899 out_err:
900 	return status;
901 }
902 
903 static void nfs4_state_mark_reclaim(struct nfs_client *clp)
904 {
905 	struct nfs4_state_owner *sp;
906 	struct rb_node *pos;
907 	struct nfs4_state *state;
908 	struct nfs4_lock_state *lock;
909 
910 	/* Reset all sequence ids to zero */
911 	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
912 		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
913 		sp->so_seqid.counter = 0;
914 		sp->so_seqid.flags = 0;
915 		spin_lock(&sp->so_lock);
916 		list_for_each_entry(state, &sp->so_states, open_states) {
917 			clear_bit(NFS_DELEGATED_STATE, &state->flags);
918 			clear_bit(NFS_O_RDONLY_STATE, &state->flags);
919 			clear_bit(NFS_O_WRONLY_STATE, &state->flags);
920 			clear_bit(NFS_O_RDWR_STATE, &state->flags);
921 			list_for_each_entry(lock, &state->lock_states, ls_locks) {
922 				lock->ls_seqid.counter = 0;
923 				lock->ls_seqid.flags = 0;
924 				lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
925 			}
926 		}
927 		spin_unlock(&sp->so_lock);
928 	}
929 }
930 
931 static int reclaimer(void *ptr)
932 {
933 	struct nfs_client *clp = ptr;
934 	struct nfs4_state_owner *sp;
935 	struct rb_node *pos;
936 	struct nfs4_state_recovery_ops *ops;
937 	struct rpc_cred *cred;
938 	int status = 0;
939 
940 	allow_signal(SIGKILL);
941 
942 	/* Ensure exclusive access to NFSv4 state */
943 	down_write(&clp->cl_sem);
944 	/* Are there any NFS mounts out there? */
945 	if (list_empty(&clp->cl_superblocks))
946 		goto out;
947 restart_loop:
948 	ops = &nfs4_network_partition_recovery_ops;
949 	/* Are there any open files on this volume? */
950 	cred = nfs4_get_renew_cred(clp);
951 	if (cred != NULL) {
952 		/* Yes there are: try to renew the old lease */
953 		status = nfs4_proc_renew(clp, cred);
954 		put_rpccred(cred);
955 		switch (status) {
956 			case 0:
957 			case -NFS4ERR_CB_PATH_DOWN:
958 				goto out;
959 			case -NFS4ERR_STALE_CLIENTID:
960 			case -NFS4ERR_LEASE_MOVED:
961 				ops = &nfs4_reboot_recovery_ops;
962 		}
963 	} else {
964 		/* "reboot" to ensure we clear all state on the server */
965 		clp->cl_boot_time = CURRENT_TIME;
966 	}
967 	/* We're going to have to re-establish a clientid */
968 	nfs4_state_mark_reclaim(clp);
969 	status = -ENOENT;
970 	cred = nfs4_get_setclientid_cred(clp);
971 	if (cred != NULL) {
972 		status = nfs4_init_client(clp, cred);
973 		put_rpccred(cred);
974 		/* Handle case where the user hasn't set up machine creds */
975 		if (status == -EACCES && cred == clp->cl_machine_cred) {
976 			nfs4_clear_machine_cred(clp);
977 			goto restart_loop;
978 		}
979 	}
980 	if (status)
981 		goto out_error;
982 	/* Mark all delegations for reclaim */
983 	nfs_delegation_mark_reclaim(clp);
984 	/* Note: list is protected by exclusive lock on cl->cl_sem */
985 	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
986 		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
987 		status = nfs4_reclaim_open_state(ops, sp);
988 		if (status < 0) {
989 			if (status == -NFS4ERR_NO_GRACE) {
990 				ops = &nfs4_network_partition_recovery_ops;
991 				status = nfs4_reclaim_open_state(ops, sp);
992 			}
993 			if (status == -NFS4ERR_STALE_CLIENTID)
994 				goto restart_loop;
995 			if (status == -NFS4ERR_EXPIRED)
996 				goto restart_loop;
997 		}
998 	}
999 	nfs_delegation_reap_unclaimed(clp);
1000 out:
1001 	up_write(&clp->cl_sem);
1002 	if (status == -NFS4ERR_CB_PATH_DOWN)
1003 		nfs_handle_cb_pathdown(clp);
1004 	nfs4_clear_recover_bit(clp);
1005 	nfs_put_client(clp);
1006 	module_put_and_exit(0);
1007 	return 0;
1008 out_error:
1009 	printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %s"
1010 			" with error %d\n", clp->cl_hostname, -status);
1011 	set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1012 	goto out;
1013 }
1014 
1015 /*
1016  * Local variables:
1017  *  c-basic-offset: 8
1018  * End:
1019  */
1020