xref: /linux/fs/nfsd/nfs4state.c (revision b0d5c81e872ed21de1e56feb0fa6e4161da7be61)
1 /*
2 *  Copyright (c) 2001 The Regents of the University of Michigan.
3 *  All rights reserved.
4 *
5 *  Kendrick Smith <kmsmith@umich.edu>
6 *  Andy Adamson <kandros@umich.edu>
7 *
8 *  Redistribution and use in source and binary forms, with or without
9 *  modification, are permitted provided that the following conditions
10 *  are met:
11 *
12 *  1. Redistributions of source code must retain the above copyright
13 *     notice, this list of conditions and the following disclaimer.
14 *  2. Redistributions in binary form must reproduce the above copyright
15 *     notice, this list of conditions and the following disclaimer in the
16 *     documentation and/or other materials provided with the distribution.
17 *  3. Neither the name of the University nor the names of its
18 *     contributors may be used to endorse or promote products derived
19 *     from this software without specific prior written permission.
20 *
21 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34 
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
45 #include "xdr4.h"
46 #include "xdr4cb.h"
47 #include "vfs.h"
48 #include "current_stateid.h"
49 
50 #include "netns.h"
51 #include "pnfs.h"
52 
53 #define NFSDDBG_FACILITY                NFSDDBG_PROC
54 
55 #define all_ones {{~0,~0},~0}
56 static const stateid_t one_stateid = {
57 	.si_generation = ~0,
58 	.si_opaque = all_ones,
59 };
60 static const stateid_t zero_stateid = {
61 	/* all fields zero */
62 };
63 static const stateid_t currentstateid = {
64 	.si_generation = 1,
65 };
66 static const stateid_t close_stateid = {
67 	.si_generation = 0xffffffffU,
68 };
69 
70 static u64 current_sessionid = 1;
71 
72 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
73 #define ONE_STATEID(stateid)  (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
74 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
75 #define CLOSE_STATEID(stateid)  (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
76 
77 /* forward declarations */
78 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
79 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
80 
81 /* Locking: */
82 
83 /*
84  * Currently used for the del_recall_lru and file hash table.  In an
85  * effort to decrease the scope of the client_mutex, this spinlock may
86  * eventually cover more:
87  */
88 static DEFINE_SPINLOCK(state_lock);
89 
90 enum nfsd4_st_mutex_lock_subclass {
91 	OPEN_STATEID_MUTEX = 0,
92 	LOCK_STATEID_MUTEX = 1,
93 };
94 
95 /*
96  * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
97  * the refcount on the open stateid to drop.
98  */
99 static DECLARE_WAIT_QUEUE_HEAD(close_wq);
100 
101 static struct kmem_cache *openowner_slab;
102 static struct kmem_cache *lockowner_slab;
103 static struct kmem_cache *file_slab;
104 static struct kmem_cache *stateid_slab;
105 static struct kmem_cache *deleg_slab;
106 static struct kmem_cache *odstate_slab;
107 
108 static void free_session(struct nfsd4_session *);
109 
110 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
111 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
112 
113 static bool is_session_dead(struct nfsd4_session *ses)
114 {
115 	return ses->se_flags & NFS4_SESSION_DEAD;
116 }
117 
118 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
119 {
120 	if (atomic_read(&ses->se_ref) > ref_held_by_me)
121 		return nfserr_jukebox;
122 	ses->se_flags |= NFS4_SESSION_DEAD;
123 	return nfs_ok;
124 }
125 
126 static bool is_client_expired(struct nfs4_client *clp)
127 {
128 	return clp->cl_time == 0;
129 }
130 
131 static __be32 get_client_locked(struct nfs4_client *clp)
132 {
133 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
134 
135 	lockdep_assert_held(&nn->client_lock);
136 
137 	if (is_client_expired(clp))
138 		return nfserr_expired;
139 	atomic_inc(&clp->cl_refcount);
140 	return nfs_ok;
141 }
142 
143 /* must be called under the client_lock */
144 static inline void
145 renew_client_locked(struct nfs4_client *clp)
146 {
147 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
148 
149 	if (is_client_expired(clp)) {
150 		WARN_ON(1);
151 		printk("%s: client (clientid %08x/%08x) already expired\n",
152 			__func__,
153 			clp->cl_clientid.cl_boot,
154 			clp->cl_clientid.cl_id);
155 		return;
156 	}
157 
158 	dprintk("renewing client (clientid %08x/%08x)\n",
159 			clp->cl_clientid.cl_boot,
160 			clp->cl_clientid.cl_id);
161 	list_move_tail(&clp->cl_lru, &nn->client_lru);
162 	clp->cl_time = get_seconds();
163 }
164 
165 static void put_client_renew_locked(struct nfs4_client *clp)
166 {
167 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
168 
169 	lockdep_assert_held(&nn->client_lock);
170 
171 	if (!atomic_dec_and_test(&clp->cl_refcount))
172 		return;
173 	if (!is_client_expired(clp))
174 		renew_client_locked(clp);
175 }
176 
177 static void put_client_renew(struct nfs4_client *clp)
178 {
179 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
180 
181 	if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
182 		return;
183 	if (!is_client_expired(clp))
184 		renew_client_locked(clp);
185 	spin_unlock(&nn->client_lock);
186 }
187 
188 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
189 {
190 	__be32 status;
191 
192 	if (is_session_dead(ses))
193 		return nfserr_badsession;
194 	status = get_client_locked(ses->se_client);
195 	if (status)
196 		return status;
197 	atomic_inc(&ses->se_ref);
198 	return nfs_ok;
199 }
200 
201 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
202 {
203 	struct nfs4_client *clp = ses->se_client;
204 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
205 
206 	lockdep_assert_held(&nn->client_lock);
207 
208 	if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
209 		free_session(ses);
210 	put_client_renew_locked(clp);
211 }
212 
213 static void nfsd4_put_session(struct nfsd4_session *ses)
214 {
215 	struct nfs4_client *clp = ses->se_client;
216 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
217 
218 	spin_lock(&nn->client_lock);
219 	nfsd4_put_session_locked(ses);
220 	spin_unlock(&nn->client_lock);
221 }
222 
223 static struct nfsd4_blocked_lock *
224 find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
225 			struct nfsd_net *nn)
226 {
227 	struct nfsd4_blocked_lock *cur, *found = NULL;
228 
229 	spin_lock(&nn->blocked_locks_lock);
230 	list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
231 		if (fh_match(fh, &cur->nbl_fh)) {
232 			list_del_init(&cur->nbl_list);
233 			list_del_init(&cur->nbl_lru);
234 			found = cur;
235 			break;
236 		}
237 	}
238 	spin_unlock(&nn->blocked_locks_lock);
239 	if (found)
240 		posix_unblock_lock(&found->nbl_lock);
241 	return found;
242 }
243 
244 static struct nfsd4_blocked_lock *
245 find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
246 			struct nfsd_net *nn)
247 {
248 	struct nfsd4_blocked_lock *nbl;
249 
250 	nbl = find_blocked_lock(lo, fh, nn);
251 	if (!nbl) {
252 		nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
253 		if (nbl) {
254 			fh_copy_shallow(&nbl->nbl_fh, fh);
255 			locks_init_lock(&nbl->nbl_lock);
256 			nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
257 					&nfsd4_cb_notify_lock_ops,
258 					NFSPROC4_CLNT_CB_NOTIFY_LOCK);
259 		}
260 	}
261 	return nbl;
262 }
263 
264 static void
265 free_blocked_lock(struct nfsd4_blocked_lock *nbl)
266 {
267 	locks_release_private(&nbl->nbl_lock);
268 	kfree(nbl);
269 }
270 
271 static void
272 remove_blocked_locks(struct nfs4_lockowner *lo)
273 {
274 	struct nfs4_client *clp = lo->lo_owner.so_client;
275 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
276 	struct nfsd4_blocked_lock *nbl;
277 	LIST_HEAD(reaplist);
278 
279 	/* Dequeue all blocked locks */
280 	spin_lock(&nn->blocked_locks_lock);
281 	while (!list_empty(&lo->lo_blocked)) {
282 		nbl = list_first_entry(&lo->lo_blocked,
283 					struct nfsd4_blocked_lock,
284 					nbl_list);
285 		list_del_init(&nbl->nbl_list);
286 		list_move(&nbl->nbl_lru, &reaplist);
287 	}
288 	spin_unlock(&nn->blocked_locks_lock);
289 
290 	/* Now free them */
291 	while (!list_empty(&reaplist)) {
292 		nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
293 					nbl_lru);
294 		list_del_init(&nbl->nbl_lru);
295 		posix_unblock_lock(&nbl->nbl_lock);
296 		free_blocked_lock(nbl);
297 	}
298 }
299 
300 static int
301 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
302 {
303 	/*
304 	 * Since this is just an optimization, we don't try very hard if it
305 	 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
306 	 * just quit trying on anything else.
307 	 */
308 	switch (task->tk_status) {
309 	case -NFS4ERR_DELAY:
310 		rpc_delay(task, 1 * HZ);
311 		return 0;
312 	default:
313 		return 1;
314 	}
315 }
316 
317 static void
318 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
319 {
320 	struct nfsd4_blocked_lock	*nbl = container_of(cb,
321 						struct nfsd4_blocked_lock, nbl_cb);
322 
323 	free_blocked_lock(nbl);
324 }
325 
326 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
327 	.done		= nfsd4_cb_notify_lock_done,
328 	.release	= nfsd4_cb_notify_lock_release,
329 };
330 
331 static inline struct nfs4_stateowner *
332 nfs4_get_stateowner(struct nfs4_stateowner *sop)
333 {
334 	atomic_inc(&sop->so_count);
335 	return sop;
336 }
337 
338 static int
339 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
340 {
341 	return (sop->so_owner.len == owner->len) &&
342 		0 == memcmp(sop->so_owner.data, owner->data, owner->len);
343 }
344 
345 static struct nfs4_openowner *
346 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
347 			struct nfs4_client *clp)
348 {
349 	struct nfs4_stateowner *so;
350 
351 	lockdep_assert_held(&clp->cl_lock);
352 
353 	list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
354 			    so_strhash) {
355 		if (!so->so_is_open_owner)
356 			continue;
357 		if (same_owner_str(so, &open->op_owner))
358 			return openowner(nfs4_get_stateowner(so));
359 	}
360 	return NULL;
361 }
362 
363 static struct nfs4_openowner *
364 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
365 			struct nfs4_client *clp)
366 {
367 	struct nfs4_openowner *oo;
368 
369 	spin_lock(&clp->cl_lock);
370 	oo = find_openstateowner_str_locked(hashval, open, clp);
371 	spin_unlock(&clp->cl_lock);
372 	return oo;
373 }
374 
375 static inline u32
376 opaque_hashval(const void *ptr, int nbytes)
377 {
378 	unsigned char *cptr = (unsigned char *) ptr;
379 
380 	u32 x = 0;
381 	while (nbytes--) {
382 		x *= 37;
383 		x += *cptr++;
384 	}
385 	return x;
386 }
387 
388 static void nfsd4_free_file_rcu(struct rcu_head *rcu)
389 {
390 	struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
391 
392 	kmem_cache_free(file_slab, fp);
393 }
394 
395 void
396 put_nfs4_file(struct nfs4_file *fi)
397 {
398 	might_lock(&state_lock);
399 
400 	if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) {
401 		hlist_del_rcu(&fi->fi_hash);
402 		spin_unlock(&state_lock);
403 		WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
404 		WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
405 		call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
406 	}
407 }
408 
409 static struct file *
410 __nfs4_get_fd(struct nfs4_file *f, int oflag)
411 {
412 	if (f->fi_fds[oflag])
413 		return get_file(f->fi_fds[oflag]);
414 	return NULL;
415 }
416 
417 static struct file *
418 find_writeable_file_locked(struct nfs4_file *f)
419 {
420 	struct file *ret;
421 
422 	lockdep_assert_held(&f->fi_lock);
423 
424 	ret = __nfs4_get_fd(f, O_WRONLY);
425 	if (!ret)
426 		ret = __nfs4_get_fd(f, O_RDWR);
427 	return ret;
428 }
429 
430 static struct file *
431 find_writeable_file(struct nfs4_file *f)
432 {
433 	struct file *ret;
434 
435 	spin_lock(&f->fi_lock);
436 	ret = find_writeable_file_locked(f);
437 	spin_unlock(&f->fi_lock);
438 
439 	return ret;
440 }
441 
442 static struct file *find_readable_file_locked(struct nfs4_file *f)
443 {
444 	struct file *ret;
445 
446 	lockdep_assert_held(&f->fi_lock);
447 
448 	ret = __nfs4_get_fd(f, O_RDONLY);
449 	if (!ret)
450 		ret = __nfs4_get_fd(f, O_RDWR);
451 	return ret;
452 }
453 
454 static struct file *
455 find_readable_file(struct nfs4_file *f)
456 {
457 	struct file *ret;
458 
459 	spin_lock(&f->fi_lock);
460 	ret = find_readable_file_locked(f);
461 	spin_unlock(&f->fi_lock);
462 
463 	return ret;
464 }
465 
466 struct file *
467 find_any_file(struct nfs4_file *f)
468 {
469 	struct file *ret;
470 
471 	spin_lock(&f->fi_lock);
472 	ret = __nfs4_get_fd(f, O_RDWR);
473 	if (!ret) {
474 		ret = __nfs4_get_fd(f, O_WRONLY);
475 		if (!ret)
476 			ret = __nfs4_get_fd(f, O_RDONLY);
477 	}
478 	spin_unlock(&f->fi_lock);
479 	return ret;
480 }
481 
482 static atomic_long_t num_delegations;
483 unsigned long max_delegations;
484 
485 /*
486  * Open owner state (share locks)
487  */
488 
489 /* hash tables for lock and open owners */
490 #define OWNER_HASH_BITS              8
491 #define OWNER_HASH_SIZE             (1 << OWNER_HASH_BITS)
492 #define OWNER_HASH_MASK             (OWNER_HASH_SIZE - 1)
493 
494 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
495 {
496 	unsigned int ret;
497 
498 	ret = opaque_hashval(ownername->data, ownername->len);
499 	return ret & OWNER_HASH_MASK;
500 }
501 
502 /* hash table for nfs4_file */
503 #define FILE_HASH_BITS                   8
504 #define FILE_HASH_SIZE                  (1 << FILE_HASH_BITS)
505 
506 static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
507 {
508 	return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
509 }
510 
511 static unsigned int file_hashval(struct knfsd_fh *fh)
512 {
513 	return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
514 }
515 
516 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
517 
518 static void
519 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
520 {
521 	lockdep_assert_held(&fp->fi_lock);
522 
523 	if (access & NFS4_SHARE_ACCESS_WRITE)
524 		atomic_inc(&fp->fi_access[O_WRONLY]);
525 	if (access & NFS4_SHARE_ACCESS_READ)
526 		atomic_inc(&fp->fi_access[O_RDONLY]);
527 }
528 
529 static __be32
530 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
531 {
532 	lockdep_assert_held(&fp->fi_lock);
533 
534 	/* Does this access mode make sense? */
535 	if (access & ~NFS4_SHARE_ACCESS_BOTH)
536 		return nfserr_inval;
537 
538 	/* Does it conflict with a deny mode already set? */
539 	if ((access & fp->fi_share_deny) != 0)
540 		return nfserr_share_denied;
541 
542 	__nfs4_file_get_access(fp, access);
543 	return nfs_ok;
544 }
545 
546 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
547 {
548 	/* Common case is that there is no deny mode. */
549 	if (deny) {
550 		/* Does this deny mode make sense? */
551 		if (deny & ~NFS4_SHARE_DENY_BOTH)
552 			return nfserr_inval;
553 
554 		if ((deny & NFS4_SHARE_DENY_READ) &&
555 		    atomic_read(&fp->fi_access[O_RDONLY]))
556 			return nfserr_share_denied;
557 
558 		if ((deny & NFS4_SHARE_DENY_WRITE) &&
559 		    atomic_read(&fp->fi_access[O_WRONLY]))
560 			return nfserr_share_denied;
561 	}
562 	return nfs_ok;
563 }
564 
565 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
566 {
567 	might_lock(&fp->fi_lock);
568 
569 	if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
570 		struct file *f1 = NULL;
571 		struct file *f2 = NULL;
572 
573 		swap(f1, fp->fi_fds[oflag]);
574 		if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
575 			swap(f2, fp->fi_fds[O_RDWR]);
576 		spin_unlock(&fp->fi_lock);
577 		if (f1)
578 			fput(f1);
579 		if (f2)
580 			fput(f2);
581 	}
582 }
583 
584 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
585 {
586 	WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
587 
588 	if (access & NFS4_SHARE_ACCESS_WRITE)
589 		__nfs4_file_put_access(fp, O_WRONLY);
590 	if (access & NFS4_SHARE_ACCESS_READ)
591 		__nfs4_file_put_access(fp, O_RDONLY);
592 }
593 
594 /*
595  * Allocate a new open/delegation state counter. This is needed for
596  * pNFS for proper return on close semantics.
597  *
598  * Note that we only allocate it for pNFS-enabled exports, otherwise
599  * all pointers to struct nfs4_clnt_odstate are always NULL.
600  */
601 static struct nfs4_clnt_odstate *
602 alloc_clnt_odstate(struct nfs4_client *clp)
603 {
604 	struct nfs4_clnt_odstate *co;
605 
606 	co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
607 	if (co) {
608 		co->co_client = clp;
609 		refcount_set(&co->co_odcount, 1);
610 	}
611 	return co;
612 }
613 
614 static void
615 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
616 {
617 	struct nfs4_file *fp = co->co_file;
618 
619 	lockdep_assert_held(&fp->fi_lock);
620 	list_add(&co->co_perfile, &fp->fi_clnt_odstate);
621 }
622 
623 static inline void
624 get_clnt_odstate(struct nfs4_clnt_odstate *co)
625 {
626 	if (co)
627 		refcount_inc(&co->co_odcount);
628 }
629 
630 static void
631 put_clnt_odstate(struct nfs4_clnt_odstate *co)
632 {
633 	struct nfs4_file *fp;
634 
635 	if (!co)
636 		return;
637 
638 	fp = co->co_file;
639 	if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
640 		list_del(&co->co_perfile);
641 		spin_unlock(&fp->fi_lock);
642 
643 		nfsd4_return_all_file_layouts(co->co_client, fp);
644 		kmem_cache_free(odstate_slab, co);
645 	}
646 }
647 
648 static struct nfs4_clnt_odstate *
649 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
650 {
651 	struct nfs4_clnt_odstate *co;
652 	struct nfs4_client *cl;
653 
654 	if (!new)
655 		return NULL;
656 
657 	cl = new->co_client;
658 
659 	spin_lock(&fp->fi_lock);
660 	list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
661 		if (co->co_client == cl) {
662 			get_clnt_odstate(co);
663 			goto out;
664 		}
665 	}
666 	co = new;
667 	co->co_file = fp;
668 	hash_clnt_odstate_locked(new);
669 out:
670 	spin_unlock(&fp->fi_lock);
671 	return co;
672 }
673 
674 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
675 				  void (*sc_free)(struct nfs4_stid *))
676 {
677 	struct nfs4_stid *stid;
678 	int new_id;
679 
680 	stid = kmem_cache_zalloc(slab, GFP_KERNEL);
681 	if (!stid)
682 		return NULL;
683 
684 	idr_preload(GFP_KERNEL);
685 	spin_lock(&cl->cl_lock);
686 	new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT);
687 	spin_unlock(&cl->cl_lock);
688 	idr_preload_end();
689 	if (new_id < 0)
690 		goto out_free;
691 
692 	stid->sc_free = sc_free;
693 	stid->sc_client = cl;
694 	stid->sc_stateid.si_opaque.so_id = new_id;
695 	stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
696 	/* Will be incremented before return to client: */
697 	refcount_set(&stid->sc_count, 1);
698 	spin_lock_init(&stid->sc_lock);
699 
700 	/*
701 	 * It shouldn't be a problem to reuse an opaque stateid value.
702 	 * I don't think it is for 4.1.  But with 4.0 I worry that, for
703 	 * example, a stray write retransmission could be accepted by
704 	 * the server when it should have been rejected.  Therefore,
705 	 * adopt a trick from the sctp code to attempt to maximize the
706 	 * amount of time until an id is reused, by ensuring they always
707 	 * "increase" (mod INT_MAX):
708 	 */
709 	return stid;
710 out_free:
711 	kmem_cache_free(slab, stid);
712 	return NULL;
713 }
714 
715 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
716 {
717 	struct nfs4_stid *stid;
718 
719 	stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
720 	if (!stid)
721 		return NULL;
722 
723 	return openlockstateid(stid);
724 }
725 
726 static void nfs4_free_deleg(struct nfs4_stid *stid)
727 {
728 	kmem_cache_free(deleg_slab, stid);
729 	atomic_long_dec(&num_delegations);
730 }
731 
732 /*
733  * When we recall a delegation, we should be careful not to hand it
734  * out again straight away.
735  * To ensure this we keep a pair of bloom filters ('new' and 'old')
736  * in which the filehandles of recalled delegations are "stored".
737  * If a filehandle appear in either filter, a delegation is blocked.
738  * When a delegation is recalled, the filehandle is stored in the "new"
739  * filter.
740  * Every 30 seconds we swap the filters and clear the "new" one,
741  * unless both are empty of course.
742  *
743  * Each filter is 256 bits.  We hash the filehandle to 32bit and use the
744  * low 3 bytes as hash-table indices.
745  *
746  * 'blocked_delegations_lock', which is always taken in block_delegations(),
747  * is used to manage concurrent access.  Testing does not need the lock
748  * except when swapping the two filters.
749  */
750 static DEFINE_SPINLOCK(blocked_delegations_lock);
751 static struct bloom_pair {
752 	int	entries, old_entries;
753 	time_t	swap_time;
754 	int	new; /* index into 'set' */
755 	DECLARE_BITMAP(set[2], 256);
756 } blocked_delegations;
757 
758 static int delegation_blocked(struct knfsd_fh *fh)
759 {
760 	u32 hash;
761 	struct bloom_pair *bd = &blocked_delegations;
762 
763 	if (bd->entries == 0)
764 		return 0;
765 	if (seconds_since_boot() - bd->swap_time > 30) {
766 		spin_lock(&blocked_delegations_lock);
767 		if (seconds_since_boot() - bd->swap_time > 30) {
768 			bd->entries -= bd->old_entries;
769 			bd->old_entries = bd->entries;
770 			memset(bd->set[bd->new], 0,
771 			       sizeof(bd->set[0]));
772 			bd->new = 1-bd->new;
773 			bd->swap_time = seconds_since_boot();
774 		}
775 		spin_unlock(&blocked_delegations_lock);
776 	}
777 	hash = jhash(&fh->fh_base, fh->fh_size, 0);
778 	if (test_bit(hash&255, bd->set[0]) &&
779 	    test_bit((hash>>8)&255, bd->set[0]) &&
780 	    test_bit((hash>>16)&255, bd->set[0]))
781 		return 1;
782 
783 	if (test_bit(hash&255, bd->set[1]) &&
784 	    test_bit((hash>>8)&255, bd->set[1]) &&
785 	    test_bit((hash>>16)&255, bd->set[1]))
786 		return 1;
787 
788 	return 0;
789 }
790 
791 static void block_delegations(struct knfsd_fh *fh)
792 {
793 	u32 hash;
794 	struct bloom_pair *bd = &blocked_delegations;
795 
796 	hash = jhash(&fh->fh_base, fh->fh_size, 0);
797 
798 	spin_lock(&blocked_delegations_lock);
799 	__set_bit(hash&255, bd->set[bd->new]);
800 	__set_bit((hash>>8)&255, bd->set[bd->new]);
801 	__set_bit((hash>>16)&255, bd->set[bd->new]);
802 	if (bd->entries == 0)
803 		bd->swap_time = seconds_since_boot();
804 	bd->entries += 1;
805 	spin_unlock(&blocked_delegations_lock);
806 }
807 
808 static struct nfs4_delegation *
809 alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
810 		 struct nfs4_clnt_odstate *odstate)
811 {
812 	struct nfs4_delegation *dp;
813 	long n;
814 
815 	dprintk("NFSD alloc_init_deleg\n");
816 	n = atomic_long_inc_return(&num_delegations);
817 	if (n < 0 || n > max_delegations)
818 		goto out_dec;
819 	if (delegation_blocked(&current_fh->fh_handle))
820 		goto out_dec;
821 	dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
822 	if (dp == NULL)
823 		goto out_dec;
824 
825 	/*
826 	 * delegation seqid's are never incremented.  The 4.1 special
827 	 * meaning of seqid 0 isn't meaningful, really, but let's avoid
828 	 * 0 anyway just for consistency and use 1:
829 	 */
830 	dp->dl_stid.sc_stateid.si_generation = 1;
831 	INIT_LIST_HEAD(&dp->dl_perfile);
832 	INIT_LIST_HEAD(&dp->dl_perclnt);
833 	INIT_LIST_HEAD(&dp->dl_recall_lru);
834 	dp->dl_clnt_odstate = odstate;
835 	get_clnt_odstate(odstate);
836 	dp->dl_type = NFS4_OPEN_DELEGATE_READ;
837 	dp->dl_retries = 1;
838 	nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
839 		      &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
840 	return dp;
841 out_dec:
842 	atomic_long_dec(&num_delegations);
843 	return NULL;
844 }
845 
846 void
847 nfs4_put_stid(struct nfs4_stid *s)
848 {
849 	struct nfs4_file *fp = s->sc_file;
850 	struct nfs4_client *clp = s->sc_client;
851 
852 	might_lock(&clp->cl_lock);
853 
854 	if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
855 		wake_up_all(&close_wq);
856 		return;
857 	}
858 	idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
859 	spin_unlock(&clp->cl_lock);
860 	s->sc_free(s);
861 	if (fp)
862 		put_nfs4_file(fp);
863 }
864 
865 void
866 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
867 {
868 	stateid_t *src = &stid->sc_stateid;
869 
870 	spin_lock(&stid->sc_lock);
871 	if (unlikely(++src->si_generation == 0))
872 		src->si_generation = 1;
873 	memcpy(dst, src, sizeof(*dst));
874 	spin_unlock(&stid->sc_lock);
875 }
876 
877 static void nfs4_put_deleg_lease(struct nfs4_file *fp)
878 {
879 	struct file *filp = NULL;
880 
881 	spin_lock(&fp->fi_lock);
882 	if (fp->fi_deleg_file && --fp->fi_delegees == 0)
883 		swap(filp, fp->fi_deleg_file);
884 	spin_unlock(&fp->fi_lock);
885 
886 	if (filp) {
887 		vfs_setlease(filp, F_UNLCK, NULL, (void **)&fp);
888 		fput(filp);
889 	}
890 }
891 
892 void nfs4_unhash_stid(struct nfs4_stid *s)
893 {
894 	s->sc_type = 0;
895 }
896 
897 /**
898  * nfs4_get_existing_delegation - Discover if this delegation already exists
899  * @clp:     a pointer to the nfs4_client we're granting a delegation to
900  * @fp:      a pointer to the nfs4_file we're granting a delegation on
901  *
902  * Return:
903  *      On success: NULL if an existing delegation was not found.
904  *
905  *      On error: -EAGAIN if one was previously granted to this nfs4_client
906  *                 for this nfs4_file.
907  *
908  */
909 
910 static int
911 nfs4_get_existing_delegation(struct nfs4_client *clp, struct nfs4_file *fp)
912 {
913 	struct nfs4_delegation *searchdp = NULL;
914 	struct nfs4_client *searchclp = NULL;
915 
916 	lockdep_assert_held(&state_lock);
917 	lockdep_assert_held(&fp->fi_lock);
918 
919 	list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
920 		searchclp = searchdp->dl_stid.sc_client;
921 		if (clp == searchclp) {
922 			return -EAGAIN;
923 		}
924 	}
925 	return 0;
926 }
927 
928 /**
929  * hash_delegation_locked - Add a delegation to the appropriate lists
930  * @dp:     a pointer to the nfs4_delegation we are adding.
931  * @fp:     a pointer to the nfs4_file we're granting a delegation on
932  *
933  * Return:
934  *      On success: NULL if the delegation was successfully hashed.
935  *
936  *      On error: -EAGAIN if one was previously granted to this
937  *                 nfs4_client for this nfs4_file. Delegation is not hashed.
938  *
939  */
940 
941 static int
942 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
943 {
944 	int status;
945 	struct nfs4_client *clp = dp->dl_stid.sc_client;
946 
947 	lockdep_assert_held(&state_lock);
948 	lockdep_assert_held(&fp->fi_lock);
949 
950 	status = nfs4_get_existing_delegation(clp, fp);
951 	if (status)
952 		return status;
953 	++fp->fi_delegees;
954 	refcount_inc(&dp->dl_stid.sc_count);
955 	dp->dl_stid.sc_type = NFS4_DELEG_STID;
956 	list_add(&dp->dl_perfile, &fp->fi_delegations);
957 	list_add(&dp->dl_perclnt, &clp->cl_delegations);
958 	return 0;
959 }
960 
961 static bool
962 unhash_delegation_locked(struct nfs4_delegation *dp)
963 {
964 	struct nfs4_file *fp = dp->dl_stid.sc_file;
965 
966 	lockdep_assert_held(&state_lock);
967 
968 	if (list_empty(&dp->dl_perfile))
969 		return false;
970 
971 	dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
972 	/* Ensure that deleg break won't try to requeue it */
973 	++dp->dl_time;
974 	spin_lock(&fp->fi_lock);
975 	list_del_init(&dp->dl_perclnt);
976 	list_del_init(&dp->dl_recall_lru);
977 	list_del_init(&dp->dl_perfile);
978 	spin_unlock(&fp->fi_lock);
979 	return true;
980 }
981 
982 static void destroy_delegation(struct nfs4_delegation *dp)
983 {
984 	bool unhashed;
985 
986 	spin_lock(&state_lock);
987 	unhashed = unhash_delegation_locked(dp);
988 	spin_unlock(&state_lock);
989 	if (unhashed) {
990 		put_clnt_odstate(dp->dl_clnt_odstate);
991 		nfs4_put_deleg_lease(dp->dl_stid.sc_file);
992 		nfs4_put_stid(&dp->dl_stid);
993 	}
994 }
995 
996 static void revoke_delegation(struct nfs4_delegation *dp)
997 {
998 	struct nfs4_client *clp = dp->dl_stid.sc_client;
999 
1000 	WARN_ON(!list_empty(&dp->dl_recall_lru));
1001 
1002 	put_clnt_odstate(dp->dl_clnt_odstate);
1003 	nfs4_put_deleg_lease(dp->dl_stid.sc_file);
1004 
1005 	if (clp->cl_minorversion == 0)
1006 		nfs4_put_stid(&dp->dl_stid);
1007 	else {
1008 		dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
1009 		spin_lock(&clp->cl_lock);
1010 		list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1011 		spin_unlock(&clp->cl_lock);
1012 	}
1013 }
1014 
1015 /*
1016  * SETCLIENTID state
1017  */
1018 
1019 static unsigned int clientid_hashval(u32 id)
1020 {
1021 	return id & CLIENT_HASH_MASK;
1022 }
1023 
1024 static unsigned int clientstr_hashval(const char *name)
1025 {
1026 	return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
1027 }
1028 
1029 /*
1030  * We store the NONE, READ, WRITE, and BOTH bits separately in the
1031  * st_{access,deny}_bmap field of the stateid, in order to track not
1032  * only what share bits are currently in force, but also what
1033  * combinations of share bits previous opens have used.  This allows us
1034  * to enforce the recommendation of rfc 3530 14.2.19 that the server
1035  * return an error if the client attempt to downgrade to a combination
1036  * of share bits not explicable by closing some of its previous opens.
1037  *
1038  * XXX: This enforcement is actually incomplete, since we don't keep
1039  * track of access/deny bit combinations; so, e.g., we allow:
1040  *
1041  *	OPEN allow read, deny write
1042  *	OPEN allow both, deny none
1043  *	DOWNGRADE allow read, deny none
1044  *
1045  * which we should reject.
1046  */
1047 static unsigned int
1048 bmap_to_share_mode(unsigned long bmap) {
1049 	int i;
1050 	unsigned int access = 0;
1051 
1052 	for (i = 1; i < 4; i++) {
1053 		if (test_bit(i, &bmap))
1054 			access |= i;
1055 	}
1056 	return access;
1057 }
1058 
1059 /* set share access for a given stateid */
1060 static inline void
1061 set_access(u32 access, struct nfs4_ol_stateid *stp)
1062 {
1063 	unsigned char mask = 1 << access;
1064 
1065 	WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1066 	stp->st_access_bmap |= mask;
1067 }
1068 
1069 /* clear share access for a given stateid */
1070 static inline void
1071 clear_access(u32 access, struct nfs4_ol_stateid *stp)
1072 {
1073 	unsigned char mask = 1 << access;
1074 
1075 	WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1076 	stp->st_access_bmap &= ~mask;
1077 }
1078 
1079 /* test whether a given stateid has access */
1080 static inline bool
1081 test_access(u32 access, struct nfs4_ol_stateid *stp)
1082 {
1083 	unsigned char mask = 1 << access;
1084 
1085 	return (bool)(stp->st_access_bmap & mask);
1086 }
1087 
1088 /* set share deny for a given stateid */
1089 static inline void
1090 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
1091 {
1092 	unsigned char mask = 1 << deny;
1093 
1094 	WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1095 	stp->st_deny_bmap |= mask;
1096 }
1097 
1098 /* clear share deny for a given stateid */
1099 static inline void
1100 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
1101 {
1102 	unsigned char mask = 1 << deny;
1103 
1104 	WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1105 	stp->st_deny_bmap &= ~mask;
1106 }
1107 
1108 /* test whether a given stateid is denying specific access */
1109 static inline bool
1110 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
1111 {
1112 	unsigned char mask = 1 << deny;
1113 
1114 	return (bool)(stp->st_deny_bmap & mask);
1115 }
1116 
1117 static int nfs4_access_to_omode(u32 access)
1118 {
1119 	switch (access & NFS4_SHARE_ACCESS_BOTH) {
1120 	case NFS4_SHARE_ACCESS_READ:
1121 		return O_RDONLY;
1122 	case NFS4_SHARE_ACCESS_WRITE:
1123 		return O_WRONLY;
1124 	case NFS4_SHARE_ACCESS_BOTH:
1125 		return O_RDWR;
1126 	}
1127 	WARN_ON_ONCE(1);
1128 	return O_RDONLY;
1129 }
1130 
1131 /*
1132  * A stateid that had a deny mode associated with it is being released
1133  * or downgraded. Recalculate the deny mode on the file.
1134  */
1135 static void
1136 recalculate_deny_mode(struct nfs4_file *fp)
1137 {
1138 	struct nfs4_ol_stateid *stp;
1139 
1140 	spin_lock(&fp->fi_lock);
1141 	fp->fi_share_deny = 0;
1142 	list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1143 		fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1144 	spin_unlock(&fp->fi_lock);
1145 }
1146 
1147 static void
1148 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1149 {
1150 	int i;
1151 	bool change = false;
1152 
1153 	for (i = 1; i < 4; i++) {
1154 		if ((i & deny) != i) {
1155 			change = true;
1156 			clear_deny(i, stp);
1157 		}
1158 	}
1159 
1160 	/* Recalculate per-file deny mode if there was a change */
1161 	if (change)
1162 		recalculate_deny_mode(stp->st_stid.sc_file);
1163 }
1164 
1165 /* release all access and file references for a given stateid */
1166 static void
1167 release_all_access(struct nfs4_ol_stateid *stp)
1168 {
1169 	int i;
1170 	struct nfs4_file *fp = stp->st_stid.sc_file;
1171 
1172 	if (fp && stp->st_deny_bmap != 0)
1173 		recalculate_deny_mode(fp);
1174 
1175 	for (i = 1; i < 4; i++) {
1176 		if (test_access(i, stp))
1177 			nfs4_file_put_access(stp->st_stid.sc_file, i);
1178 		clear_access(i, stp);
1179 	}
1180 }
1181 
1182 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1183 {
1184 	kfree(sop->so_owner.data);
1185 	sop->so_ops->so_free(sop);
1186 }
1187 
1188 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1189 {
1190 	struct nfs4_client *clp = sop->so_client;
1191 
1192 	might_lock(&clp->cl_lock);
1193 
1194 	if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1195 		return;
1196 	sop->so_ops->so_unhash(sop);
1197 	spin_unlock(&clp->cl_lock);
1198 	nfs4_free_stateowner(sop);
1199 }
1200 
1201 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1202 {
1203 	struct nfs4_file *fp = stp->st_stid.sc_file;
1204 
1205 	lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1206 
1207 	if (list_empty(&stp->st_perfile))
1208 		return false;
1209 
1210 	spin_lock(&fp->fi_lock);
1211 	list_del_init(&stp->st_perfile);
1212 	spin_unlock(&fp->fi_lock);
1213 	list_del(&stp->st_perstateowner);
1214 	return true;
1215 }
1216 
1217 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1218 {
1219 	struct nfs4_ol_stateid *stp = openlockstateid(stid);
1220 
1221 	put_clnt_odstate(stp->st_clnt_odstate);
1222 	release_all_access(stp);
1223 	if (stp->st_stateowner)
1224 		nfs4_put_stateowner(stp->st_stateowner);
1225 	kmem_cache_free(stateid_slab, stid);
1226 }
1227 
1228 static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1229 {
1230 	struct nfs4_ol_stateid *stp = openlockstateid(stid);
1231 	struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1232 	struct file *file;
1233 
1234 	file = find_any_file(stp->st_stid.sc_file);
1235 	if (file)
1236 		filp_close(file, (fl_owner_t)lo);
1237 	nfs4_free_ol_stateid(stid);
1238 }
1239 
1240 /*
1241  * Put the persistent reference to an already unhashed generic stateid, while
1242  * holding the cl_lock. If it's the last reference, then put it onto the
1243  * reaplist for later destruction.
1244  */
1245 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1246 				       struct list_head *reaplist)
1247 {
1248 	struct nfs4_stid *s = &stp->st_stid;
1249 	struct nfs4_client *clp = s->sc_client;
1250 
1251 	lockdep_assert_held(&clp->cl_lock);
1252 
1253 	WARN_ON_ONCE(!list_empty(&stp->st_locks));
1254 
1255 	if (!refcount_dec_and_test(&s->sc_count)) {
1256 		wake_up_all(&close_wq);
1257 		return;
1258 	}
1259 
1260 	idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1261 	list_add(&stp->st_locks, reaplist);
1262 }
1263 
1264 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1265 {
1266 	lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1267 
1268 	list_del_init(&stp->st_locks);
1269 	nfs4_unhash_stid(&stp->st_stid);
1270 	return unhash_ol_stateid(stp);
1271 }
1272 
1273 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1274 {
1275 	struct nfs4_client *clp = stp->st_stid.sc_client;
1276 	bool unhashed;
1277 
1278 	spin_lock(&clp->cl_lock);
1279 	unhashed = unhash_lock_stateid(stp);
1280 	spin_unlock(&clp->cl_lock);
1281 	if (unhashed)
1282 		nfs4_put_stid(&stp->st_stid);
1283 }
1284 
1285 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1286 {
1287 	struct nfs4_client *clp = lo->lo_owner.so_client;
1288 
1289 	lockdep_assert_held(&clp->cl_lock);
1290 
1291 	list_del_init(&lo->lo_owner.so_strhash);
1292 }
1293 
1294 /*
1295  * Free a list of generic stateids that were collected earlier after being
1296  * fully unhashed.
1297  */
1298 static void
1299 free_ol_stateid_reaplist(struct list_head *reaplist)
1300 {
1301 	struct nfs4_ol_stateid *stp;
1302 	struct nfs4_file *fp;
1303 
1304 	might_sleep();
1305 
1306 	while (!list_empty(reaplist)) {
1307 		stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1308 				       st_locks);
1309 		list_del(&stp->st_locks);
1310 		fp = stp->st_stid.sc_file;
1311 		stp->st_stid.sc_free(&stp->st_stid);
1312 		if (fp)
1313 			put_nfs4_file(fp);
1314 	}
1315 }
1316 
1317 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1318 				       struct list_head *reaplist)
1319 {
1320 	struct nfs4_ol_stateid *stp;
1321 
1322 	lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1323 
1324 	while (!list_empty(&open_stp->st_locks)) {
1325 		stp = list_entry(open_stp->st_locks.next,
1326 				struct nfs4_ol_stateid, st_locks);
1327 		WARN_ON(!unhash_lock_stateid(stp));
1328 		put_ol_stateid_locked(stp, reaplist);
1329 	}
1330 }
1331 
1332 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1333 				struct list_head *reaplist)
1334 {
1335 	bool unhashed;
1336 
1337 	lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1338 
1339 	unhashed = unhash_ol_stateid(stp);
1340 	release_open_stateid_locks(stp, reaplist);
1341 	return unhashed;
1342 }
1343 
1344 static void release_open_stateid(struct nfs4_ol_stateid *stp)
1345 {
1346 	LIST_HEAD(reaplist);
1347 
1348 	spin_lock(&stp->st_stid.sc_client->cl_lock);
1349 	if (unhash_open_stateid(stp, &reaplist))
1350 		put_ol_stateid_locked(stp, &reaplist);
1351 	spin_unlock(&stp->st_stid.sc_client->cl_lock);
1352 	free_ol_stateid_reaplist(&reaplist);
1353 }
1354 
1355 static void unhash_openowner_locked(struct nfs4_openowner *oo)
1356 {
1357 	struct nfs4_client *clp = oo->oo_owner.so_client;
1358 
1359 	lockdep_assert_held(&clp->cl_lock);
1360 
1361 	list_del_init(&oo->oo_owner.so_strhash);
1362 	list_del_init(&oo->oo_perclient);
1363 }
1364 
1365 static void release_last_closed_stateid(struct nfs4_openowner *oo)
1366 {
1367 	struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1368 					  nfsd_net_id);
1369 	struct nfs4_ol_stateid *s;
1370 
1371 	spin_lock(&nn->client_lock);
1372 	s = oo->oo_last_closed_stid;
1373 	if (s) {
1374 		list_del_init(&oo->oo_close_lru);
1375 		oo->oo_last_closed_stid = NULL;
1376 	}
1377 	spin_unlock(&nn->client_lock);
1378 	if (s)
1379 		nfs4_put_stid(&s->st_stid);
1380 }
1381 
1382 static void release_openowner(struct nfs4_openowner *oo)
1383 {
1384 	struct nfs4_ol_stateid *stp;
1385 	struct nfs4_client *clp = oo->oo_owner.so_client;
1386 	struct list_head reaplist;
1387 
1388 	INIT_LIST_HEAD(&reaplist);
1389 
1390 	spin_lock(&clp->cl_lock);
1391 	unhash_openowner_locked(oo);
1392 	while (!list_empty(&oo->oo_owner.so_stateids)) {
1393 		stp = list_first_entry(&oo->oo_owner.so_stateids,
1394 				struct nfs4_ol_stateid, st_perstateowner);
1395 		if (unhash_open_stateid(stp, &reaplist))
1396 			put_ol_stateid_locked(stp, &reaplist);
1397 	}
1398 	spin_unlock(&clp->cl_lock);
1399 	free_ol_stateid_reaplist(&reaplist);
1400 	release_last_closed_stateid(oo);
1401 	nfs4_put_stateowner(&oo->oo_owner);
1402 }
1403 
1404 static inline int
1405 hash_sessionid(struct nfs4_sessionid *sessionid)
1406 {
1407 	struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1408 
1409 	return sid->sequence % SESSION_HASH_SIZE;
1410 }
1411 
1412 #ifdef CONFIG_SUNRPC_DEBUG
1413 static inline void
1414 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1415 {
1416 	u32 *ptr = (u32 *)(&sessionid->data[0]);
1417 	dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1418 }
1419 #else
1420 static inline void
1421 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1422 {
1423 }
1424 #endif
1425 
1426 /*
1427  * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1428  * won't be used for replay.
1429  */
1430 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1431 {
1432 	struct nfs4_stateowner *so = cstate->replay_owner;
1433 
1434 	if (nfserr == nfserr_replay_me)
1435 		return;
1436 
1437 	if (!seqid_mutating_err(ntohl(nfserr))) {
1438 		nfsd4_cstate_clear_replay(cstate);
1439 		return;
1440 	}
1441 	if (!so)
1442 		return;
1443 	if (so->so_is_open_owner)
1444 		release_last_closed_stateid(openowner(so));
1445 	so->so_seqid++;
1446 	return;
1447 }
1448 
1449 static void
1450 gen_sessionid(struct nfsd4_session *ses)
1451 {
1452 	struct nfs4_client *clp = ses->se_client;
1453 	struct nfsd4_sessionid *sid;
1454 
1455 	sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1456 	sid->clientid = clp->cl_clientid;
1457 	sid->sequence = current_sessionid++;
1458 	sid->reserved = 0;
1459 }
1460 
1461 /*
1462  * The protocol defines ca_maxresponssize_cached to include the size of
1463  * the rpc header, but all we need to cache is the data starting after
1464  * the end of the initial SEQUENCE operation--the rest we regenerate
1465  * each time.  Therefore we can advertise a ca_maxresponssize_cached
1466  * value that is the number of bytes in our cache plus a few additional
1467  * bytes.  In order to stay on the safe side, and not promise more than
1468  * we can cache, those additional bytes must be the minimum possible: 24
1469  * bytes of rpc header (xid through accept state, with AUTH_NULL
1470  * verifier), 12 for the compound header (with zero-length tag), and 44
1471  * for the SEQUENCE op response:
1472  */
1473 #define NFSD_MIN_HDR_SEQ_SZ  (24 + 12 + 44)
1474 
1475 static void
1476 free_session_slots(struct nfsd4_session *ses)
1477 {
1478 	int i;
1479 
1480 	for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1481 		free_svc_cred(&ses->se_slots[i]->sl_cred);
1482 		kfree(ses->se_slots[i]);
1483 	}
1484 }
1485 
1486 /*
1487  * We don't actually need to cache the rpc and session headers, so we
1488  * can allocate a little less for each slot:
1489  */
1490 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1491 {
1492 	u32 size;
1493 
1494 	if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1495 		size = 0;
1496 	else
1497 		size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1498 	return size + sizeof(struct nfsd4_slot);
1499 }
1500 
1501 /*
1502  * XXX: If we run out of reserved DRC memory we could (up to a point)
1503  * re-negotiate active sessions and reduce their slot usage to make
1504  * room for new connections. For now we just fail the create session.
1505  */
1506 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
1507 {
1508 	u32 slotsize = slot_bytes(ca);
1509 	u32 num = ca->maxreqs;
1510 	int avail;
1511 
1512 	spin_lock(&nfsd_drc_lock);
1513 	avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
1514 		    nfsd_drc_max_mem - nfsd_drc_mem_used);
1515 	/*
1516 	 * Never use more than a third of the remaining memory,
1517 	 * unless it's the only way to give this client a slot:
1518 	 */
1519 	avail = clamp_t(int, avail, slotsize, avail/3);
1520 	num = min_t(int, num, avail / slotsize);
1521 	nfsd_drc_mem_used += num * slotsize;
1522 	spin_unlock(&nfsd_drc_lock);
1523 
1524 	return num;
1525 }
1526 
1527 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1528 {
1529 	int slotsize = slot_bytes(ca);
1530 
1531 	spin_lock(&nfsd_drc_lock);
1532 	nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1533 	spin_unlock(&nfsd_drc_lock);
1534 }
1535 
1536 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1537 					   struct nfsd4_channel_attrs *battrs)
1538 {
1539 	int numslots = fattrs->maxreqs;
1540 	int slotsize = slot_bytes(fattrs);
1541 	struct nfsd4_session *new;
1542 	int mem, i;
1543 
1544 	BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1545 			+ sizeof(struct nfsd4_session) > PAGE_SIZE);
1546 	mem = numslots * sizeof(struct nfsd4_slot *);
1547 
1548 	new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1549 	if (!new)
1550 		return NULL;
1551 	/* allocate each struct nfsd4_slot and data cache in one piece */
1552 	for (i = 0; i < numslots; i++) {
1553 		new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1554 		if (!new->se_slots[i])
1555 			goto out_free;
1556 	}
1557 
1558 	memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1559 	memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1560 
1561 	return new;
1562 out_free:
1563 	while (i--)
1564 		kfree(new->se_slots[i]);
1565 	kfree(new);
1566 	return NULL;
1567 }
1568 
1569 static void free_conn(struct nfsd4_conn *c)
1570 {
1571 	svc_xprt_put(c->cn_xprt);
1572 	kfree(c);
1573 }
1574 
1575 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1576 {
1577 	struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1578 	struct nfs4_client *clp = c->cn_session->se_client;
1579 
1580 	spin_lock(&clp->cl_lock);
1581 	if (!list_empty(&c->cn_persession)) {
1582 		list_del(&c->cn_persession);
1583 		free_conn(c);
1584 	}
1585 	nfsd4_probe_callback(clp);
1586 	spin_unlock(&clp->cl_lock);
1587 }
1588 
1589 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1590 {
1591 	struct nfsd4_conn *conn;
1592 
1593 	conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1594 	if (!conn)
1595 		return NULL;
1596 	svc_xprt_get(rqstp->rq_xprt);
1597 	conn->cn_xprt = rqstp->rq_xprt;
1598 	conn->cn_flags = flags;
1599 	INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1600 	return conn;
1601 }
1602 
1603 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1604 {
1605 	conn->cn_session = ses;
1606 	list_add(&conn->cn_persession, &ses->se_conns);
1607 }
1608 
1609 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1610 {
1611 	struct nfs4_client *clp = ses->se_client;
1612 
1613 	spin_lock(&clp->cl_lock);
1614 	__nfsd4_hash_conn(conn, ses);
1615 	spin_unlock(&clp->cl_lock);
1616 }
1617 
1618 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1619 {
1620 	conn->cn_xpt_user.callback = nfsd4_conn_lost;
1621 	return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1622 }
1623 
1624 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1625 {
1626 	int ret;
1627 
1628 	nfsd4_hash_conn(conn, ses);
1629 	ret = nfsd4_register_conn(conn);
1630 	if (ret)
1631 		/* oops; xprt is already down: */
1632 		nfsd4_conn_lost(&conn->cn_xpt_user);
1633 	/* We may have gained or lost a callback channel: */
1634 	nfsd4_probe_callback_sync(ses->se_client);
1635 }
1636 
1637 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1638 {
1639 	u32 dir = NFS4_CDFC4_FORE;
1640 
1641 	if (cses->flags & SESSION4_BACK_CHAN)
1642 		dir |= NFS4_CDFC4_BACK;
1643 	return alloc_conn(rqstp, dir);
1644 }
1645 
1646 /* must be called under client_lock */
1647 static void nfsd4_del_conns(struct nfsd4_session *s)
1648 {
1649 	struct nfs4_client *clp = s->se_client;
1650 	struct nfsd4_conn *c;
1651 
1652 	spin_lock(&clp->cl_lock);
1653 	while (!list_empty(&s->se_conns)) {
1654 		c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1655 		list_del_init(&c->cn_persession);
1656 		spin_unlock(&clp->cl_lock);
1657 
1658 		unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1659 		free_conn(c);
1660 
1661 		spin_lock(&clp->cl_lock);
1662 	}
1663 	spin_unlock(&clp->cl_lock);
1664 }
1665 
1666 static void __free_session(struct nfsd4_session *ses)
1667 {
1668 	free_session_slots(ses);
1669 	kfree(ses);
1670 }
1671 
1672 static void free_session(struct nfsd4_session *ses)
1673 {
1674 	nfsd4_del_conns(ses);
1675 	nfsd4_put_drc_mem(&ses->se_fchannel);
1676 	__free_session(ses);
1677 }
1678 
1679 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1680 {
1681 	int idx;
1682 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1683 
1684 	new->se_client = clp;
1685 	gen_sessionid(new);
1686 
1687 	INIT_LIST_HEAD(&new->se_conns);
1688 
1689 	new->se_cb_seq_nr = 1;
1690 	new->se_flags = cses->flags;
1691 	new->se_cb_prog = cses->callback_prog;
1692 	new->se_cb_sec = cses->cb_sec;
1693 	atomic_set(&new->se_ref, 0);
1694 	idx = hash_sessionid(&new->se_sessionid);
1695 	list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1696 	spin_lock(&clp->cl_lock);
1697 	list_add(&new->se_perclnt, &clp->cl_sessions);
1698 	spin_unlock(&clp->cl_lock);
1699 
1700 	{
1701 		struct sockaddr *sa = svc_addr(rqstp);
1702 		/*
1703 		 * This is a little silly; with sessions there's no real
1704 		 * use for the callback address.  Use the peer address
1705 		 * as a reasonable default for now, but consider fixing
1706 		 * the rpc client not to require an address in the
1707 		 * future:
1708 		 */
1709 		rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1710 		clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1711 	}
1712 }
1713 
1714 /* caller must hold client_lock */
1715 static struct nfsd4_session *
1716 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1717 {
1718 	struct nfsd4_session *elem;
1719 	int idx;
1720 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1721 
1722 	lockdep_assert_held(&nn->client_lock);
1723 
1724 	dump_sessionid(__func__, sessionid);
1725 	idx = hash_sessionid(sessionid);
1726 	/* Search in the appropriate list */
1727 	list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1728 		if (!memcmp(elem->se_sessionid.data, sessionid->data,
1729 			    NFS4_MAX_SESSIONID_LEN)) {
1730 			return elem;
1731 		}
1732 	}
1733 
1734 	dprintk("%s: session not found\n", __func__);
1735 	return NULL;
1736 }
1737 
1738 static struct nfsd4_session *
1739 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1740 		__be32 *ret)
1741 {
1742 	struct nfsd4_session *session;
1743 	__be32 status = nfserr_badsession;
1744 
1745 	session = __find_in_sessionid_hashtbl(sessionid, net);
1746 	if (!session)
1747 		goto out;
1748 	status = nfsd4_get_session_locked(session);
1749 	if (status)
1750 		session = NULL;
1751 out:
1752 	*ret = status;
1753 	return session;
1754 }
1755 
1756 /* caller must hold client_lock */
1757 static void
1758 unhash_session(struct nfsd4_session *ses)
1759 {
1760 	struct nfs4_client *clp = ses->se_client;
1761 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1762 
1763 	lockdep_assert_held(&nn->client_lock);
1764 
1765 	list_del(&ses->se_hash);
1766 	spin_lock(&ses->se_client->cl_lock);
1767 	list_del(&ses->se_perclnt);
1768 	spin_unlock(&ses->se_client->cl_lock);
1769 }
1770 
1771 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1772 static int
1773 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1774 {
1775 	/*
1776 	 * We're assuming the clid was not given out from a boot
1777 	 * precisely 2^32 (about 136 years) before this one.  That seems
1778 	 * a safe assumption:
1779 	 */
1780 	if (clid->cl_boot == (u32)nn->boot_time)
1781 		return 0;
1782 	dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1783 		clid->cl_boot, clid->cl_id, nn->boot_time);
1784 	return 1;
1785 }
1786 
1787 /*
1788  * XXX Should we use a slab cache ?
1789  * This type of memory management is somewhat inefficient, but we use it
1790  * anyway since SETCLIENTID is not a common operation.
1791  */
1792 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1793 {
1794 	struct nfs4_client *clp;
1795 	int i;
1796 
1797 	clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
1798 	if (clp == NULL)
1799 		return NULL;
1800 	clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1801 	if (clp->cl_name.data == NULL)
1802 		goto err_no_name;
1803 	clp->cl_ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
1804 			OWNER_HASH_SIZE, GFP_KERNEL);
1805 	if (!clp->cl_ownerstr_hashtbl)
1806 		goto err_no_hashtbl;
1807 	for (i = 0; i < OWNER_HASH_SIZE; i++)
1808 		INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
1809 	clp->cl_name.len = name.len;
1810 	INIT_LIST_HEAD(&clp->cl_sessions);
1811 	idr_init(&clp->cl_stateids);
1812 	atomic_set(&clp->cl_refcount, 0);
1813 	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1814 	INIT_LIST_HEAD(&clp->cl_idhash);
1815 	INIT_LIST_HEAD(&clp->cl_openowners);
1816 	INIT_LIST_HEAD(&clp->cl_delegations);
1817 	INIT_LIST_HEAD(&clp->cl_lru);
1818 	INIT_LIST_HEAD(&clp->cl_revoked);
1819 #ifdef CONFIG_NFSD_PNFS
1820 	INIT_LIST_HEAD(&clp->cl_lo_states);
1821 #endif
1822 	spin_lock_init(&clp->cl_lock);
1823 	rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1824 	return clp;
1825 err_no_hashtbl:
1826 	kfree(clp->cl_name.data);
1827 err_no_name:
1828 	kfree(clp);
1829 	return NULL;
1830 }
1831 
1832 static void
1833 free_client(struct nfs4_client *clp)
1834 {
1835 	while (!list_empty(&clp->cl_sessions)) {
1836 		struct nfsd4_session *ses;
1837 		ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1838 				se_perclnt);
1839 		list_del(&ses->se_perclnt);
1840 		WARN_ON_ONCE(atomic_read(&ses->se_ref));
1841 		free_session(ses);
1842 	}
1843 	rpc_destroy_wait_queue(&clp->cl_cb_waitq);
1844 	free_svc_cred(&clp->cl_cred);
1845 	kfree(clp->cl_ownerstr_hashtbl);
1846 	kfree(clp->cl_name.data);
1847 	idr_destroy(&clp->cl_stateids);
1848 	kfree(clp);
1849 }
1850 
1851 /* must be called under the client_lock */
1852 static void
1853 unhash_client_locked(struct nfs4_client *clp)
1854 {
1855 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1856 	struct nfsd4_session *ses;
1857 
1858 	lockdep_assert_held(&nn->client_lock);
1859 
1860 	/* Mark the client as expired! */
1861 	clp->cl_time = 0;
1862 	/* Make it invisible */
1863 	if (!list_empty(&clp->cl_idhash)) {
1864 		list_del_init(&clp->cl_idhash);
1865 		if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1866 			rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1867 		else
1868 			rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1869 	}
1870 	list_del_init(&clp->cl_lru);
1871 	spin_lock(&clp->cl_lock);
1872 	list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1873 		list_del_init(&ses->se_hash);
1874 	spin_unlock(&clp->cl_lock);
1875 }
1876 
1877 static void
1878 unhash_client(struct nfs4_client *clp)
1879 {
1880 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1881 
1882 	spin_lock(&nn->client_lock);
1883 	unhash_client_locked(clp);
1884 	spin_unlock(&nn->client_lock);
1885 }
1886 
1887 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
1888 {
1889 	if (atomic_read(&clp->cl_refcount))
1890 		return nfserr_jukebox;
1891 	unhash_client_locked(clp);
1892 	return nfs_ok;
1893 }
1894 
1895 static void
1896 __destroy_client(struct nfs4_client *clp)
1897 {
1898 	int i;
1899 	struct nfs4_openowner *oo;
1900 	struct nfs4_delegation *dp;
1901 	struct list_head reaplist;
1902 
1903 	INIT_LIST_HEAD(&reaplist);
1904 	spin_lock(&state_lock);
1905 	while (!list_empty(&clp->cl_delegations)) {
1906 		dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1907 		WARN_ON(!unhash_delegation_locked(dp));
1908 		list_add(&dp->dl_recall_lru, &reaplist);
1909 	}
1910 	spin_unlock(&state_lock);
1911 	while (!list_empty(&reaplist)) {
1912 		dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1913 		list_del_init(&dp->dl_recall_lru);
1914 		put_clnt_odstate(dp->dl_clnt_odstate);
1915 		nfs4_put_deleg_lease(dp->dl_stid.sc_file);
1916 		nfs4_put_stid(&dp->dl_stid);
1917 	}
1918 	while (!list_empty(&clp->cl_revoked)) {
1919 		dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
1920 		list_del_init(&dp->dl_recall_lru);
1921 		nfs4_put_stid(&dp->dl_stid);
1922 	}
1923 	while (!list_empty(&clp->cl_openowners)) {
1924 		oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1925 		nfs4_get_stateowner(&oo->oo_owner);
1926 		release_openowner(oo);
1927 	}
1928 	for (i = 0; i < OWNER_HASH_SIZE; i++) {
1929 		struct nfs4_stateowner *so, *tmp;
1930 
1931 		list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
1932 					 so_strhash) {
1933 			/* Should be no openowners at this point */
1934 			WARN_ON_ONCE(so->so_is_open_owner);
1935 			remove_blocked_locks(lockowner(so));
1936 		}
1937 	}
1938 	nfsd4_return_all_client_layouts(clp);
1939 	nfsd4_shutdown_callback(clp);
1940 	if (clp->cl_cb_conn.cb_xprt)
1941 		svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1942 	free_client(clp);
1943 }
1944 
1945 static void
1946 destroy_client(struct nfs4_client *clp)
1947 {
1948 	unhash_client(clp);
1949 	__destroy_client(clp);
1950 }
1951 
1952 static void expire_client(struct nfs4_client *clp)
1953 {
1954 	unhash_client(clp);
1955 	nfsd4_client_record_remove(clp);
1956 	__destroy_client(clp);
1957 }
1958 
1959 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1960 {
1961 	memcpy(target->cl_verifier.data, source->data,
1962 			sizeof(target->cl_verifier.data));
1963 }
1964 
1965 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1966 {
1967 	target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
1968 	target->cl_clientid.cl_id = source->cl_clientid.cl_id;
1969 }
1970 
1971 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1972 {
1973 	target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
1974 	target->cr_raw_principal = kstrdup(source->cr_raw_principal,
1975 								GFP_KERNEL);
1976 	if ((source->cr_principal && ! target->cr_principal) ||
1977 	    (source->cr_raw_principal && ! target->cr_raw_principal))
1978 		return -ENOMEM;
1979 
1980 	target->cr_flavor = source->cr_flavor;
1981 	target->cr_uid = source->cr_uid;
1982 	target->cr_gid = source->cr_gid;
1983 	target->cr_group_info = source->cr_group_info;
1984 	get_group_info(target->cr_group_info);
1985 	target->cr_gss_mech = source->cr_gss_mech;
1986 	if (source->cr_gss_mech)
1987 		gss_mech_get(source->cr_gss_mech);
1988 	return 0;
1989 }
1990 
1991 static int
1992 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
1993 {
1994 	if (o1->len < o2->len)
1995 		return -1;
1996 	if (o1->len > o2->len)
1997 		return 1;
1998 	return memcmp(o1->data, o2->data, o1->len);
1999 }
2000 
2001 static int same_name(const char *n1, const char *n2)
2002 {
2003 	return 0 == memcmp(n1, n2, HEXDIR_LEN);
2004 }
2005 
2006 static int
2007 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
2008 {
2009 	return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
2010 }
2011 
2012 static int
2013 same_clid(clientid_t *cl1, clientid_t *cl2)
2014 {
2015 	return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
2016 }
2017 
2018 static bool groups_equal(struct group_info *g1, struct group_info *g2)
2019 {
2020 	int i;
2021 
2022 	if (g1->ngroups != g2->ngroups)
2023 		return false;
2024 	for (i=0; i<g1->ngroups; i++)
2025 		if (!gid_eq(g1->gid[i], g2->gid[i]))
2026 			return false;
2027 	return true;
2028 }
2029 
2030 /*
2031  * RFC 3530 language requires clid_inuse be returned when the
2032  * "principal" associated with a requests differs from that previously
2033  * used.  We use uid, gid's, and gss principal string as our best
2034  * approximation.  We also don't want to allow non-gss use of a client
2035  * established using gss: in theory cr_principal should catch that
2036  * change, but in practice cr_principal can be null even in the gss case
2037  * since gssd doesn't always pass down a principal string.
2038  */
2039 static bool is_gss_cred(struct svc_cred *cr)
2040 {
2041 	/* Is cr_flavor one of the gss "pseudoflavors"?: */
2042 	return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2043 }
2044 
2045 
2046 static bool
2047 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2048 {
2049 	if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2050 		|| (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2051 		|| (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2052 		|| !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2053 		return false;
2054 	if (cr1->cr_principal == cr2->cr_principal)
2055 		return true;
2056 	if (!cr1->cr_principal || !cr2->cr_principal)
2057 		return false;
2058 	return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2059 }
2060 
2061 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2062 {
2063 	struct svc_cred *cr = &rqstp->rq_cred;
2064 	u32 service;
2065 
2066 	if (!cr->cr_gss_mech)
2067 		return false;
2068 	service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2069 	return service == RPC_GSS_SVC_INTEGRITY ||
2070 	       service == RPC_GSS_SVC_PRIVACY;
2071 }
2072 
2073 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2074 {
2075 	struct svc_cred *cr = &rqstp->rq_cred;
2076 
2077 	if (!cl->cl_mach_cred)
2078 		return true;
2079 	if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2080 		return false;
2081 	if (!svc_rqst_integrity_protected(rqstp))
2082 		return false;
2083 	if (cl->cl_cred.cr_raw_principal)
2084 		return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2085 						cr->cr_raw_principal);
2086 	if (!cr->cr_principal)
2087 		return false;
2088 	return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2089 }
2090 
2091 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2092 {
2093 	__be32 verf[2];
2094 
2095 	/*
2096 	 * This is opaque to client, so no need to byte-swap. Use
2097 	 * __force to keep sparse happy
2098 	 */
2099 	verf[0] = (__force __be32)get_seconds();
2100 	verf[1] = (__force __be32)nn->clverifier_counter++;
2101 	memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2102 }
2103 
2104 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2105 {
2106 	clp->cl_clientid.cl_boot = nn->boot_time;
2107 	clp->cl_clientid.cl_id = nn->clientid_counter++;
2108 	gen_confirm(clp, nn);
2109 }
2110 
2111 static struct nfs4_stid *
2112 find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2113 {
2114 	struct nfs4_stid *ret;
2115 
2116 	ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2117 	if (!ret || !ret->sc_type)
2118 		return NULL;
2119 	return ret;
2120 }
2121 
2122 static struct nfs4_stid *
2123 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
2124 {
2125 	struct nfs4_stid *s;
2126 
2127 	spin_lock(&cl->cl_lock);
2128 	s = find_stateid_locked(cl, t);
2129 	if (s != NULL) {
2130 		if (typemask & s->sc_type)
2131 			refcount_inc(&s->sc_count);
2132 		else
2133 			s = NULL;
2134 	}
2135 	spin_unlock(&cl->cl_lock);
2136 	return s;
2137 }
2138 
2139 static struct nfs4_client *create_client(struct xdr_netobj name,
2140 		struct svc_rqst *rqstp, nfs4_verifier *verf)
2141 {
2142 	struct nfs4_client *clp;
2143 	struct sockaddr *sa = svc_addr(rqstp);
2144 	int ret;
2145 	struct net *net = SVC_NET(rqstp);
2146 
2147 	clp = alloc_client(name);
2148 	if (clp == NULL)
2149 		return NULL;
2150 
2151 	ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2152 	if (ret) {
2153 		free_client(clp);
2154 		return NULL;
2155 	}
2156 	nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2157 	clp->cl_time = get_seconds();
2158 	clear_bit(0, &clp->cl_cb_slot_busy);
2159 	copy_verf(clp, verf);
2160 	rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
2161 	clp->cl_cb_session = NULL;
2162 	clp->net = net;
2163 	return clp;
2164 }
2165 
2166 static void
2167 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2168 {
2169 	struct rb_node **new = &(root->rb_node), *parent = NULL;
2170 	struct nfs4_client *clp;
2171 
2172 	while (*new) {
2173 		clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2174 		parent = *new;
2175 
2176 		if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2177 			new = &((*new)->rb_left);
2178 		else
2179 			new = &((*new)->rb_right);
2180 	}
2181 
2182 	rb_link_node(&new_clp->cl_namenode, parent, new);
2183 	rb_insert_color(&new_clp->cl_namenode, root);
2184 }
2185 
2186 static struct nfs4_client *
2187 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2188 {
2189 	int cmp;
2190 	struct rb_node *node = root->rb_node;
2191 	struct nfs4_client *clp;
2192 
2193 	while (node) {
2194 		clp = rb_entry(node, struct nfs4_client, cl_namenode);
2195 		cmp = compare_blob(&clp->cl_name, name);
2196 		if (cmp > 0)
2197 			node = node->rb_left;
2198 		else if (cmp < 0)
2199 			node = node->rb_right;
2200 		else
2201 			return clp;
2202 	}
2203 	return NULL;
2204 }
2205 
2206 static void
2207 add_to_unconfirmed(struct nfs4_client *clp)
2208 {
2209 	unsigned int idhashval;
2210 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2211 
2212 	lockdep_assert_held(&nn->client_lock);
2213 
2214 	clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2215 	add_clp_to_name_tree(clp, &nn->unconf_name_tree);
2216 	idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2217 	list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
2218 	renew_client_locked(clp);
2219 }
2220 
2221 static void
2222 move_to_confirmed(struct nfs4_client *clp)
2223 {
2224 	unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2225 	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2226 
2227 	lockdep_assert_held(&nn->client_lock);
2228 
2229 	dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
2230 	list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
2231 	rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2232 	add_clp_to_name_tree(clp, &nn->conf_name_tree);
2233 	set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2234 	renew_client_locked(clp);
2235 }
2236 
2237 static struct nfs4_client *
2238 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
2239 {
2240 	struct nfs4_client *clp;
2241 	unsigned int idhashval = clientid_hashval(clid->cl_id);
2242 
2243 	list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
2244 		if (same_clid(&clp->cl_clientid, clid)) {
2245 			if ((bool)clp->cl_minorversion != sessions)
2246 				return NULL;
2247 			renew_client_locked(clp);
2248 			return clp;
2249 		}
2250 	}
2251 	return NULL;
2252 }
2253 
2254 static struct nfs4_client *
2255 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2256 {
2257 	struct list_head *tbl = nn->conf_id_hashtbl;
2258 
2259 	lockdep_assert_held(&nn->client_lock);
2260 	return find_client_in_id_table(tbl, clid, sessions);
2261 }
2262 
2263 static struct nfs4_client *
2264 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2265 {
2266 	struct list_head *tbl = nn->unconf_id_hashtbl;
2267 
2268 	lockdep_assert_held(&nn->client_lock);
2269 	return find_client_in_id_table(tbl, clid, sessions);
2270 }
2271 
2272 static bool clp_used_exchangeid(struct nfs4_client *clp)
2273 {
2274 	return clp->cl_exchange_flags != 0;
2275 }
2276 
2277 static struct nfs4_client *
2278 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2279 {
2280 	lockdep_assert_held(&nn->client_lock);
2281 	return find_clp_in_name_tree(name, &nn->conf_name_tree);
2282 }
2283 
2284 static struct nfs4_client *
2285 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2286 {
2287 	lockdep_assert_held(&nn->client_lock);
2288 	return find_clp_in_name_tree(name, &nn->unconf_name_tree);
2289 }
2290 
2291 static void
2292 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
2293 {
2294 	struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
2295 	struct sockaddr	*sa = svc_addr(rqstp);
2296 	u32 scopeid = rpc_get_scope_id(sa);
2297 	unsigned short expected_family;
2298 
2299 	/* Currently, we only support tcp and tcp6 for the callback channel */
2300 	if (se->se_callback_netid_len == 3 &&
2301 	    !memcmp(se->se_callback_netid_val, "tcp", 3))
2302 		expected_family = AF_INET;
2303 	else if (se->se_callback_netid_len == 4 &&
2304 		 !memcmp(se->se_callback_netid_val, "tcp6", 4))
2305 		expected_family = AF_INET6;
2306 	else
2307 		goto out_err;
2308 
2309 	conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
2310 					    se->se_callback_addr_len,
2311 					    (struct sockaddr *)&conn->cb_addr,
2312 					    sizeof(conn->cb_addr));
2313 
2314 	if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
2315 		goto out_err;
2316 
2317 	if (conn->cb_addr.ss_family == AF_INET6)
2318 		((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
2319 
2320 	conn->cb_prog = se->se_callback_prog;
2321 	conn->cb_ident = se->se_callback_ident;
2322 	memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
2323 	return;
2324 out_err:
2325 	conn->cb_addr.ss_family = AF_UNSPEC;
2326 	conn->cb_addrlen = 0;
2327 	dprintk("NFSD: this client (clientid %08x/%08x) "
2328 		"will not receive delegations\n",
2329 		clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
2330 
2331 	return;
2332 }
2333 
2334 /*
2335  * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2336  */
2337 static void
2338 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
2339 {
2340 	struct xdr_buf *buf = resp->xdr.buf;
2341 	struct nfsd4_slot *slot = resp->cstate.slot;
2342 	unsigned int base;
2343 
2344 	dprintk("--> %s slot %p\n", __func__, slot);
2345 
2346 	slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
2347 	slot->sl_opcnt = resp->opcnt;
2348 	slot->sl_status = resp->cstate.status;
2349 	free_svc_cred(&slot->sl_cred);
2350 	copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
2351 
2352 	if (!nfsd4_cache_this(resp)) {
2353 		slot->sl_flags &= ~NFSD4_SLOT_CACHED;
2354 		return;
2355 	}
2356 	slot->sl_flags |= NFSD4_SLOT_CACHED;
2357 
2358 	base = resp->cstate.data_offset;
2359 	slot->sl_datalen = buf->len - base;
2360 	if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
2361 		WARN(1, "%s: sessions DRC could not cache compound\n",
2362 		     __func__);
2363 	return;
2364 }
2365 
2366 /*
2367  * Encode the replay sequence operation from the slot values.
2368  * If cachethis is FALSE encode the uncached rep error on the next
2369  * operation which sets resp->p and increments resp->opcnt for
2370  * nfs4svc_encode_compoundres.
2371  *
2372  */
2373 static __be32
2374 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
2375 			  struct nfsd4_compoundres *resp)
2376 {
2377 	struct nfsd4_op *op;
2378 	struct nfsd4_slot *slot = resp->cstate.slot;
2379 
2380 	/* Encode the replayed sequence operation */
2381 	op = &args->ops[resp->opcnt - 1];
2382 	nfsd4_encode_operation(resp, op);
2383 
2384 	if (slot->sl_flags & NFSD4_SLOT_CACHED)
2385 		return op->status;
2386 	if (args->opcnt == 1) {
2387 		/*
2388 		 * The original operation wasn't a solo sequence--we
2389 		 * always cache those--so this retry must not match the
2390 		 * original:
2391 		 */
2392 		op->status = nfserr_seq_false_retry;
2393 	} else {
2394 		op = &args->ops[resp->opcnt++];
2395 		op->status = nfserr_retry_uncached_rep;
2396 		nfsd4_encode_operation(resp, op);
2397 	}
2398 	return op->status;
2399 }
2400 
2401 /*
2402  * The sequence operation is not cached because we can use the slot and
2403  * session values.
2404  */
2405 static __be32
2406 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
2407 			 struct nfsd4_sequence *seq)
2408 {
2409 	struct nfsd4_slot *slot = resp->cstate.slot;
2410 	struct xdr_stream *xdr = &resp->xdr;
2411 	__be32 *p;
2412 	__be32 status;
2413 
2414 	dprintk("--> %s slot %p\n", __func__, slot);
2415 
2416 	status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
2417 	if (status)
2418 		return status;
2419 
2420 	p = xdr_reserve_space(xdr, slot->sl_datalen);
2421 	if (!p) {
2422 		WARN_ON_ONCE(1);
2423 		return nfserr_serverfault;
2424 	}
2425 	xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
2426 	xdr_commit_encode(xdr);
2427 
2428 	resp->opcnt = slot->sl_opcnt;
2429 	return slot->sl_status;
2430 }
2431 
2432 /*
2433  * Set the exchange_id flags returned by the server.
2434  */
2435 static void
2436 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
2437 {
2438 #ifdef CONFIG_NFSD_PNFS
2439 	new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
2440 #else
2441 	new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
2442 #endif
2443 
2444 	/* Referrals are supported, Migration is not. */
2445 	new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
2446 
2447 	/* set the wire flags to return to client. */
2448 	clid->flags = new->cl_exchange_flags;
2449 }
2450 
2451 static bool client_has_openowners(struct nfs4_client *clp)
2452 {
2453 	struct nfs4_openowner *oo;
2454 
2455 	list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
2456 		if (!list_empty(&oo->oo_owner.so_stateids))
2457 			return true;
2458 	}
2459 	return false;
2460 }
2461 
2462 static bool client_has_state(struct nfs4_client *clp)
2463 {
2464 	return client_has_openowners(clp)
2465 #ifdef CONFIG_NFSD_PNFS
2466 		|| !list_empty(&clp->cl_lo_states)
2467 #endif
2468 		|| !list_empty(&clp->cl_delegations)
2469 		|| !list_empty(&clp->cl_sessions);
2470 }
2471 
2472 __be32
2473 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2474 		union nfsd4_op_u *u)
2475 {
2476 	struct nfsd4_exchange_id *exid = &u->exchange_id;
2477 	struct nfs4_client *conf, *new;
2478 	struct nfs4_client *unconf = NULL;
2479 	__be32 status;
2480 	char			addr_str[INET6_ADDRSTRLEN];
2481 	nfs4_verifier		verf = exid->verifier;
2482 	struct sockaddr		*sa = svc_addr(rqstp);
2483 	bool	update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
2484 	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2485 
2486 	rpc_ntop(sa, addr_str, sizeof(addr_str));
2487 	dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
2488 		"ip_addr=%s flags %x, spa_how %d\n",
2489 		__func__, rqstp, exid, exid->clname.len, exid->clname.data,
2490 		addr_str, exid->flags, exid->spa_how);
2491 
2492 	if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
2493 		return nfserr_inval;
2494 
2495 	new = create_client(exid->clname, rqstp, &verf);
2496 	if (new == NULL)
2497 		return nfserr_jukebox;
2498 
2499 	switch (exid->spa_how) {
2500 	case SP4_MACH_CRED:
2501 		exid->spo_must_enforce[0] = 0;
2502 		exid->spo_must_enforce[1] = (
2503 			1 << (OP_BIND_CONN_TO_SESSION - 32) |
2504 			1 << (OP_EXCHANGE_ID - 32) |
2505 			1 << (OP_CREATE_SESSION - 32) |
2506 			1 << (OP_DESTROY_SESSION - 32) |
2507 			1 << (OP_DESTROY_CLIENTID - 32));
2508 
2509 		exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
2510 					1 << (OP_OPEN_DOWNGRADE) |
2511 					1 << (OP_LOCKU) |
2512 					1 << (OP_DELEGRETURN));
2513 
2514 		exid->spo_must_allow[1] &= (
2515 					1 << (OP_TEST_STATEID - 32) |
2516 					1 << (OP_FREE_STATEID - 32));
2517 		if (!svc_rqst_integrity_protected(rqstp)) {
2518 			status = nfserr_inval;
2519 			goto out_nolock;
2520 		}
2521 		/*
2522 		 * Sometimes userspace doesn't give us a principal.
2523 		 * Which is a bug, really.  Anyway, we can't enforce
2524 		 * MACH_CRED in that case, better to give up now:
2525 		 */
2526 		if (!new->cl_cred.cr_principal &&
2527 					!new->cl_cred.cr_raw_principal) {
2528 			status = nfserr_serverfault;
2529 			goto out_nolock;
2530 		}
2531 		new->cl_mach_cred = true;
2532 	case SP4_NONE:
2533 		break;
2534 	default:				/* checked by xdr code */
2535 		WARN_ON_ONCE(1);
2536 	case SP4_SSV:
2537 		status = nfserr_encr_alg_unsupp;
2538 		goto out_nolock;
2539 	}
2540 
2541 	/* Cases below refer to rfc 5661 section 18.35.4: */
2542 	spin_lock(&nn->client_lock);
2543 	conf = find_confirmed_client_by_name(&exid->clname, nn);
2544 	if (conf) {
2545 		bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
2546 		bool verfs_match = same_verf(&verf, &conf->cl_verifier);
2547 
2548 		if (update) {
2549 			if (!clp_used_exchangeid(conf)) { /* buggy client */
2550 				status = nfserr_inval;
2551 				goto out;
2552 			}
2553 			if (!nfsd4_mach_creds_match(conf, rqstp)) {
2554 				status = nfserr_wrong_cred;
2555 				goto out;
2556 			}
2557 			if (!creds_match) { /* case 9 */
2558 				status = nfserr_perm;
2559 				goto out;
2560 			}
2561 			if (!verfs_match) { /* case 8 */
2562 				status = nfserr_not_same;
2563 				goto out;
2564 			}
2565 			/* case 6 */
2566 			exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
2567 			goto out_copy;
2568 		}
2569 		if (!creds_match) { /* case 3 */
2570 			if (client_has_state(conf)) {
2571 				status = nfserr_clid_inuse;
2572 				goto out;
2573 			}
2574 			goto out_new;
2575 		}
2576 		if (verfs_match) { /* case 2 */
2577 			conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
2578 			goto out_copy;
2579 		}
2580 		/* case 5, client reboot */
2581 		conf = NULL;
2582 		goto out_new;
2583 	}
2584 
2585 	if (update) { /* case 7 */
2586 		status = nfserr_noent;
2587 		goto out;
2588 	}
2589 
2590 	unconf  = find_unconfirmed_client_by_name(&exid->clname, nn);
2591 	if (unconf) /* case 4, possible retry or client restart */
2592 		unhash_client_locked(unconf);
2593 
2594 	/* case 1 (normal case) */
2595 out_new:
2596 	if (conf) {
2597 		status = mark_client_expired_locked(conf);
2598 		if (status)
2599 			goto out;
2600 	}
2601 	new->cl_minorversion = cstate->minorversion;
2602 	new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
2603 	new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
2604 
2605 	gen_clid(new, nn);
2606 	add_to_unconfirmed(new);
2607 	swap(new, conf);
2608 out_copy:
2609 	exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
2610 	exid->clientid.cl_id = conf->cl_clientid.cl_id;
2611 
2612 	exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
2613 	nfsd4_set_ex_flags(conf, exid);
2614 
2615 	dprintk("nfsd4_exchange_id seqid %d flags %x\n",
2616 		conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
2617 	status = nfs_ok;
2618 
2619 out:
2620 	spin_unlock(&nn->client_lock);
2621 out_nolock:
2622 	if (new)
2623 		expire_client(new);
2624 	if (unconf)
2625 		expire_client(unconf);
2626 	return status;
2627 }
2628 
2629 static __be32
2630 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
2631 {
2632 	dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
2633 		slot_seqid);
2634 
2635 	/* The slot is in use, and no response has been sent. */
2636 	if (slot_inuse) {
2637 		if (seqid == slot_seqid)
2638 			return nfserr_jukebox;
2639 		else
2640 			return nfserr_seq_misordered;
2641 	}
2642 	/* Note unsigned 32-bit arithmetic handles wraparound: */
2643 	if (likely(seqid == slot_seqid + 1))
2644 		return nfs_ok;
2645 	if (seqid == slot_seqid)
2646 		return nfserr_replay_cache;
2647 	return nfserr_seq_misordered;
2648 }
2649 
2650 /*
2651  * Cache the create session result into the create session single DRC
2652  * slot cache by saving the xdr structure. sl_seqid has been set.
2653  * Do this for solo or embedded create session operations.
2654  */
2655 static void
2656 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
2657 			   struct nfsd4_clid_slot *slot, __be32 nfserr)
2658 {
2659 	slot->sl_status = nfserr;
2660 	memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
2661 }
2662 
2663 static __be32
2664 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
2665 			    struct nfsd4_clid_slot *slot)
2666 {
2667 	memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
2668 	return slot->sl_status;
2669 }
2670 
2671 #define NFSD_MIN_REQ_HDR_SEQ_SZ	((\
2672 			2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
2673 			1 +	/* MIN tag is length with zero, only length */ \
2674 			3 +	/* version, opcount, opcode */ \
2675 			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2676 				/* seqid, slotID, slotID, cache */ \
2677 			4 ) * sizeof(__be32))
2678 
2679 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
2680 			2 +	/* verifier: AUTH_NULL, length 0 */\
2681 			1 +	/* status */ \
2682 			1 +	/* MIN tag is length with zero, only length */ \
2683 			3 +	/* opcount, opcode, opstatus*/ \
2684 			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2685 				/* seqid, slotID, slotID, slotID, status */ \
2686 			5 ) * sizeof(__be32))
2687 
2688 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
2689 {
2690 	u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
2691 
2692 	if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
2693 		return nfserr_toosmall;
2694 	if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
2695 		return nfserr_toosmall;
2696 	ca->headerpadsz = 0;
2697 	ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
2698 	ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
2699 	ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
2700 	ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
2701 			NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
2702 	ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
2703 	/*
2704 	 * Note decreasing slot size below client's request may make it
2705 	 * difficult for client to function correctly, whereas
2706 	 * decreasing the number of slots will (just?) affect
2707 	 * performance.  When short on memory we therefore prefer to
2708 	 * decrease number of slots instead of their size.  Clients that
2709 	 * request larger slots than they need will get poor results:
2710 	 */
2711 	ca->maxreqs = nfsd4_get_drc_mem(ca);
2712 	if (!ca->maxreqs)
2713 		return nfserr_jukebox;
2714 
2715 	return nfs_ok;
2716 }
2717 
2718 /*
2719  * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
2720  * These are based on similar macros in linux/sunrpc/msg_prot.h .
2721  */
2722 #define RPC_MAX_HEADER_WITH_AUTH_SYS \
2723 	(RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
2724 
2725 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
2726 	(RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
2727 
2728 #define NFSD_CB_MAX_REQ_SZ	((NFS4_enc_cb_recall_sz + \
2729 				 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
2730 #define NFSD_CB_MAX_RESP_SZ	((NFS4_dec_cb_recall_sz + \
2731 				 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
2732 				 sizeof(__be32))
2733 
2734 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
2735 {
2736 	ca->headerpadsz = 0;
2737 
2738 	if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
2739 		return nfserr_toosmall;
2740 	if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
2741 		return nfserr_toosmall;
2742 	ca->maxresp_cached = 0;
2743 	if (ca->maxops < 2)
2744 		return nfserr_toosmall;
2745 
2746 	return nfs_ok;
2747 }
2748 
2749 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
2750 {
2751 	switch (cbs->flavor) {
2752 	case RPC_AUTH_NULL:
2753 	case RPC_AUTH_UNIX:
2754 		return nfs_ok;
2755 	default:
2756 		/*
2757 		 * GSS case: the spec doesn't allow us to return this
2758 		 * error.  But it also doesn't allow us not to support
2759 		 * GSS.
2760 		 * I'd rather this fail hard than return some error the
2761 		 * client might think it can already handle:
2762 		 */
2763 		return nfserr_encr_alg_unsupp;
2764 	}
2765 }
2766 
2767 __be32
2768 nfsd4_create_session(struct svc_rqst *rqstp,
2769 		struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
2770 {
2771 	struct nfsd4_create_session *cr_ses = &u->create_session;
2772 	struct sockaddr *sa = svc_addr(rqstp);
2773 	struct nfs4_client *conf, *unconf;
2774 	struct nfs4_client *old = NULL;
2775 	struct nfsd4_session *new;
2776 	struct nfsd4_conn *conn;
2777 	struct nfsd4_clid_slot *cs_slot = NULL;
2778 	__be32 status = 0;
2779 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2780 
2781 	if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
2782 		return nfserr_inval;
2783 	status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
2784 	if (status)
2785 		return status;
2786 	status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
2787 	if (status)
2788 		return status;
2789 	status = check_backchannel_attrs(&cr_ses->back_channel);
2790 	if (status)
2791 		goto out_release_drc_mem;
2792 	status = nfserr_jukebox;
2793 	new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
2794 	if (!new)
2795 		goto out_release_drc_mem;
2796 	conn = alloc_conn_from_crses(rqstp, cr_ses);
2797 	if (!conn)
2798 		goto out_free_session;
2799 
2800 	spin_lock(&nn->client_lock);
2801 	unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
2802 	conf = find_confirmed_client(&cr_ses->clientid, true, nn);
2803 	WARN_ON_ONCE(conf && unconf);
2804 
2805 	if (conf) {
2806 		status = nfserr_wrong_cred;
2807 		if (!nfsd4_mach_creds_match(conf, rqstp))
2808 			goto out_free_conn;
2809 		cs_slot = &conf->cl_cs_slot;
2810 		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2811 		if (status) {
2812 			if (status == nfserr_replay_cache)
2813 				status = nfsd4_replay_create_session(cr_ses, cs_slot);
2814 			goto out_free_conn;
2815 		}
2816 	} else if (unconf) {
2817 		if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
2818 		    !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
2819 			status = nfserr_clid_inuse;
2820 			goto out_free_conn;
2821 		}
2822 		status = nfserr_wrong_cred;
2823 		if (!nfsd4_mach_creds_match(unconf, rqstp))
2824 			goto out_free_conn;
2825 		cs_slot = &unconf->cl_cs_slot;
2826 		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2827 		if (status) {
2828 			/* an unconfirmed replay returns misordered */
2829 			status = nfserr_seq_misordered;
2830 			goto out_free_conn;
2831 		}
2832 		old = find_confirmed_client_by_name(&unconf->cl_name, nn);
2833 		if (old) {
2834 			status = mark_client_expired_locked(old);
2835 			if (status) {
2836 				old = NULL;
2837 				goto out_free_conn;
2838 			}
2839 		}
2840 		move_to_confirmed(unconf);
2841 		conf = unconf;
2842 	} else {
2843 		status = nfserr_stale_clientid;
2844 		goto out_free_conn;
2845 	}
2846 	status = nfs_ok;
2847 	/* Persistent sessions are not supported */
2848 	cr_ses->flags &= ~SESSION4_PERSIST;
2849 	/* Upshifting from TCP to RDMA is not supported */
2850 	cr_ses->flags &= ~SESSION4_RDMA;
2851 
2852 	init_session(rqstp, new, conf, cr_ses);
2853 	nfsd4_get_session_locked(new);
2854 
2855 	memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
2856 	       NFS4_MAX_SESSIONID_LEN);
2857 	cs_slot->sl_seqid++;
2858 	cr_ses->seqid = cs_slot->sl_seqid;
2859 
2860 	/* cache solo and embedded create sessions under the client_lock */
2861 	nfsd4_cache_create_session(cr_ses, cs_slot, status);
2862 	spin_unlock(&nn->client_lock);
2863 	/* init connection and backchannel */
2864 	nfsd4_init_conn(rqstp, conn, new);
2865 	nfsd4_put_session(new);
2866 	if (old)
2867 		expire_client(old);
2868 	return status;
2869 out_free_conn:
2870 	spin_unlock(&nn->client_lock);
2871 	free_conn(conn);
2872 	if (old)
2873 		expire_client(old);
2874 out_free_session:
2875 	__free_session(new);
2876 out_release_drc_mem:
2877 	nfsd4_put_drc_mem(&cr_ses->fore_channel);
2878 	return status;
2879 }
2880 
2881 static __be32 nfsd4_map_bcts_dir(u32 *dir)
2882 {
2883 	switch (*dir) {
2884 	case NFS4_CDFC4_FORE:
2885 	case NFS4_CDFC4_BACK:
2886 		return nfs_ok;
2887 	case NFS4_CDFC4_FORE_OR_BOTH:
2888 	case NFS4_CDFC4_BACK_OR_BOTH:
2889 		*dir = NFS4_CDFC4_BOTH;
2890 		return nfs_ok;
2891 	};
2892 	return nfserr_inval;
2893 }
2894 
2895 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
2896 		struct nfsd4_compound_state *cstate,
2897 		union nfsd4_op_u *u)
2898 {
2899 	struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
2900 	struct nfsd4_session *session = cstate->session;
2901 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2902 	__be32 status;
2903 
2904 	status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
2905 	if (status)
2906 		return status;
2907 	spin_lock(&nn->client_lock);
2908 	session->se_cb_prog = bc->bc_cb_program;
2909 	session->se_cb_sec = bc->bc_cb_sec;
2910 	spin_unlock(&nn->client_lock);
2911 
2912 	nfsd4_probe_callback(session->se_client);
2913 
2914 	return nfs_ok;
2915 }
2916 
2917 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
2918 		     struct nfsd4_compound_state *cstate,
2919 		     union nfsd4_op_u *u)
2920 {
2921 	struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
2922 	__be32 status;
2923 	struct nfsd4_conn *conn;
2924 	struct nfsd4_session *session;
2925 	struct net *net = SVC_NET(rqstp);
2926 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2927 
2928 	if (!nfsd4_last_compound_op(rqstp))
2929 		return nfserr_not_only_op;
2930 	spin_lock(&nn->client_lock);
2931 	session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
2932 	spin_unlock(&nn->client_lock);
2933 	if (!session)
2934 		goto out_no_session;
2935 	status = nfserr_wrong_cred;
2936 	if (!nfsd4_mach_creds_match(session->se_client, rqstp))
2937 		goto out;
2938 	status = nfsd4_map_bcts_dir(&bcts->dir);
2939 	if (status)
2940 		goto out;
2941 	conn = alloc_conn(rqstp, bcts->dir);
2942 	status = nfserr_jukebox;
2943 	if (!conn)
2944 		goto out;
2945 	nfsd4_init_conn(rqstp, conn, session);
2946 	status = nfs_ok;
2947 out:
2948 	nfsd4_put_session(session);
2949 out_no_session:
2950 	return status;
2951 }
2952 
2953 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
2954 {
2955 	if (!session)
2956 		return 0;
2957 	return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
2958 }
2959 
2960 __be32
2961 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
2962 		union nfsd4_op_u *u)
2963 {
2964 	struct nfsd4_destroy_session *sessionid = &u->destroy_session;
2965 	struct nfsd4_session *ses;
2966 	__be32 status;
2967 	int ref_held_by_me = 0;
2968 	struct net *net = SVC_NET(r);
2969 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2970 
2971 	status = nfserr_not_only_op;
2972 	if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
2973 		if (!nfsd4_last_compound_op(r))
2974 			goto out;
2975 		ref_held_by_me++;
2976 	}
2977 	dump_sessionid(__func__, &sessionid->sessionid);
2978 	spin_lock(&nn->client_lock);
2979 	ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status);
2980 	if (!ses)
2981 		goto out_client_lock;
2982 	status = nfserr_wrong_cred;
2983 	if (!nfsd4_mach_creds_match(ses->se_client, r))
2984 		goto out_put_session;
2985 	status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
2986 	if (status)
2987 		goto out_put_session;
2988 	unhash_session(ses);
2989 	spin_unlock(&nn->client_lock);
2990 
2991 	nfsd4_probe_callback_sync(ses->se_client);
2992 
2993 	spin_lock(&nn->client_lock);
2994 	status = nfs_ok;
2995 out_put_session:
2996 	nfsd4_put_session_locked(ses);
2997 out_client_lock:
2998 	spin_unlock(&nn->client_lock);
2999 out:
3000 	return status;
3001 }
3002 
3003 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
3004 {
3005 	struct nfsd4_conn *c;
3006 
3007 	list_for_each_entry(c, &s->se_conns, cn_persession) {
3008 		if (c->cn_xprt == xpt) {
3009 			return c;
3010 		}
3011 	}
3012 	return NULL;
3013 }
3014 
3015 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
3016 {
3017 	struct nfs4_client *clp = ses->se_client;
3018 	struct nfsd4_conn *c;
3019 	__be32 status = nfs_ok;
3020 	int ret;
3021 
3022 	spin_lock(&clp->cl_lock);
3023 	c = __nfsd4_find_conn(new->cn_xprt, ses);
3024 	if (c)
3025 		goto out_free;
3026 	status = nfserr_conn_not_bound_to_session;
3027 	if (clp->cl_mach_cred)
3028 		goto out_free;
3029 	__nfsd4_hash_conn(new, ses);
3030 	spin_unlock(&clp->cl_lock);
3031 	ret = nfsd4_register_conn(new);
3032 	if (ret)
3033 		/* oops; xprt is already down: */
3034 		nfsd4_conn_lost(&new->cn_xpt_user);
3035 	return nfs_ok;
3036 out_free:
3037 	spin_unlock(&clp->cl_lock);
3038 	free_conn(new);
3039 	return status;
3040 }
3041 
3042 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
3043 {
3044 	struct nfsd4_compoundargs *args = rqstp->rq_argp;
3045 
3046 	return args->opcnt > session->se_fchannel.maxops;
3047 }
3048 
3049 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
3050 				  struct nfsd4_session *session)
3051 {
3052 	struct xdr_buf *xb = &rqstp->rq_arg;
3053 
3054 	return xb->len > session->se_fchannel.maxreq_sz;
3055 }
3056 
3057 static bool replay_matches_cache(struct svc_rqst *rqstp,
3058 		 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
3059 {
3060 	struct nfsd4_compoundargs *argp = rqstp->rq_argp;
3061 
3062 	if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
3063 	    (bool)seq->cachethis)
3064 		return false;
3065 	/*
3066 	 * If there's an error than the reply can have fewer ops than
3067 	 * the call.  But if we cached a reply with *more* ops than the
3068 	 * call you're sending us now, then this new call is clearly not
3069 	 * really a replay of the old one:
3070 	 */
3071 	if (slot->sl_opcnt < argp->opcnt)
3072 		return false;
3073 	/* This is the only check explicitly called by spec: */
3074 	if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
3075 		return false;
3076 	/*
3077 	 * There may be more comparisons we could actually do, but the
3078 	 * spec doesn't require us to catch every case where the calls
3079 	 * don't match (that would require caching the call as well as
3080 	 * the reply), so we don't bother.
3081 	 */
3082 	return true;
3083 }
3084 
3085 __be32
3086 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3087 		union nfsd4_op_u *u)
3088 {
3089 	struct nfsd4_sequence *seq = &u->sequence;
3090 	struct nfsd4_compoundres *resp = rqstp->rq_resp;
3091 	struct xdr_stream *xdr = &resp->xdr;
3092 	struct nfsd4_session *session;
3093 	struct nfs4_client *clp;
3094 	struct nfsd4_slot *slot;
3095 	struct nfsd4_conn *conn;
3096 	__be32 status;
3097 	int buflen;
3098 	struct net *net = SVC_NET(rqstp);
3099 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3100 
3101 	if (resp->opcnt != 1)
3102 		return nfserr_sequence_pos;
3103 
3104 	/*
3105 	 * Will be either used or freed by nfsd4_sequence_check_conn
3106 	 * below.
3107 	 */
3108 	conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
3109 	if (!conn)
3110 		return nfserr_jukebox;
3111 
3112 	spin_lock(&nn->client_lock);
3113 	session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
3114 	if (!session)
3115 		goto out_no_session;
3116 	clp = session->se_client;
3117 
3118 	status = nfserr_too_many_ops;
3119 	if (nfsd4_session_too_many_ops(rqstp, session))
3120 		goto out_put_session;
3121 
3122 	status = nfserr_req_too_big;
3123 	if (nfsd4_request_too_big(rqstp, session))
3124 		goto out_put_session;
3125 
3126 	status = nfserr_badslot;
3127 	if (seq->slotid >= session->se_fchannel.maxreqs)
3128 		goto out_put_session;
3129 
3130 	slot = session->se_slots[seq->slotid];
3131 	dprintk("%s: slotid %d\n", __func__, seq->slotid);
3132 
3133 	/* We do not negotiate the number of slots yet, so set the
3134 	 * maxslots to the session maxreqs which is used to encode
3135 	 * sr_highest_slotid and the sr_target_slot id to maxslots */
3136 	seq->maxslots = session->se_fchannel.maxreqs;
3137 
3138 	status = check_slot_seqid(seq->seqid, slot->sl_seqid,
3139 					slot->sl_flags & NFSD4_SLOT_INUSE);
3140 	if (status == nfserr_replay_cache) {
3141 		status = nfserr_seq_misordered;
3142 		if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
3143 			goto out_put_session;
3144 		status = nfserr_seq_false_retry;
3145 		if (!replay_matches_cache(rqstp, seq, slot))
3146 			goto out_put_session;
3147 		cstate->slot = slot;
3148 		cstate->session = session;
3149 		cstate->clp = clp;
3150 		/* Return the cached reply status and set cstate->status
3151 		 * for nfsd4_proc_compound processing */
3152 		status = nfsd4_replay_cache_entry(resp, seq);
3153 		cstate->status = nfserr_replay_cache;
3154 		goto out;
3155 	}
3156 	if (status)
3157 		goto out_put_session;
3158 
3159 	status = nfsd4_sequence_check_conn(conn, session);
3160 	conn = NULL;
3161 	if (status)
3162 		goto out_put_session;
3163 
3164 	buflen = (seq->cachethis) ?
3165 			session->se_fchannel.maxresp_cached :
3166 			session->se_fchannel.maxresp_sz;
3167 	status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
3168 				    nfserr_rep_too_big;
3169 	if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
3170 		goto out_put_session;
3171 	svc_reserve(rqstp, buflen);
3172 
3173 	status = nfs_ok;
3174 	/* Success! bump slot seqid */
3175 	slot->sl_seqid = seq->seqid;
3176 	slot->sl_flags |= NFSD4_SLOT_INUSE;
3177 	if (seq->cachethis)
3178 		slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
3179 	else
3180 		slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
3181 
3182 	cstate->slot = slot;
3183 	cstate->session = session;
3184 	cstate->clp = clp;
3185 
3186 out:
3187 	switch (clp->cl_cb_state) {
3188 	case NFSD4_CB_DOWN:
3189 		seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
3190 		break;
3191 	case NFSD4_CB_FAULT:
3192 		seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
3193 		break;
3194 	default:
3195 		seq->status_flags = 0;
3196 	}
3197 	if (!list_empty(&clp->cl_revoked))
3198 		seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
3199 out_no_session:
3200 	if (conn)
3201 		free_conn(conn);
3202 	spin_unlock(&nn->client_lock);
3203 	return status;
3204 out_put_session:
3205 	nfsd4_put_session_locked(session);
3206 	goto out_no_session;
3207 }
3208 
3209 void
3210 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
3211 {
3212 	struct nfsd4_compound_state *cs = &resp->cstate;
3213 
3214 	if (nfsd4_has_session(cs)) {
3215 		if (cs->status != nfserr_replay_cache) {
3216 			nfsd4_store_cache_entry(resp);
3217 			cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
3218 		}
3219 		/* Drop session reference that was taken in nfsd4_sequence() */
3220 		nfsd4_put_session(cs->session);
3221 	} else if (cs->clp)
3222 		put_client_renew(cs->clp);
3223 }
3224 
3225 __be32
3226 nfsd4_destroy_clientid(struct svc_rqst *rqstp,
3227 		struct nfsd4_compound_state *cstate,
3228 		union nfsd4_op_u *u)
3229 {
3230 	struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
3231 	struct nfs4_client *conf, *unconf;
3232 	struct nfs4_client *clp = NULL;
3233 	__be32 status = 0;
3234 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3235 
3236 	spin_lock(&nn->client_lock);
3237 	unconf = find_unconfirmed_client(&dc->clientid, true, nn);
3238 	conf = find_confirmed_client(&dc->clientid, true, nn);
3239 	WARN_ON_ONCE(conf && unconf);
3240 
3241 	if (conf) {
3242 		if (client_has_state(conf)) {
3243 			status = nfserr_clientid_busy;
3244 			goto out;
3245 		}
3246 		status = mark_client_expired_locked(conf);
3247 		if (status)
3248 			goto out;
3249 		clp = conf;
3250 	} else if (unconf)
3251 		clp = unconf;
3252 	else {
3253 		status = nfserr_stale_clientid;
3254 		goto out;
3255 	}
3256 	if (!nfsd4_mach_creds_match(clp, rqstp)) {
3257 		clp = NULL;
3258 		status = nfserr_wrong_cred;
3259 		goto out;
3260 	}
3261 	unhash_client_locked(clp);
3262 out:
3263 	spin_unlock(&nn->client_lock);
3264 	if (clp)
3265 		expire_client(clp);
3266 	return status;
3267 }
3268 
3269 __be32
3270 nfsd4_reclaim_complete(struct svc_rqst *rqstp,
3271 		struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3272 {
3273 	struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
3274 	__be32 status = 0;
3275 
3276 	if (rc->rca_one_fs) {
3277 		if (!cstate->current_fh.fh_dentry)
3278 			return nfserr_nofilehandle;
3279 		/*
3280 		 * We don't take advantage of the rca_one_fs case.
3281 		 * That's OK, it's optional, we can safely ignore it.
3282 		 */
3283 		return nfs_ok;
3284 	}
3285 
3286 	status = nfserr_complete_already;
3287 	if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
3288 			     &cstate->session->se_client->cl_flags))
3289 		goto out;
3290 
3291 	status = nfserr_stale_clientid;
3292 	if (is_client_expired(cstate->session->se_client))
3293 		/*
3294 		 * The following error isn't really legal.
3295 		 * But we only get here if the client just explicitly
3296 		 * destroyed the client.  Surely it no longer cares what
3297 		 * error it gets back on an operation for the dead
3298 		 * client.
3299 		 */
3300 		goto out;
3301 
3302 	status = nfs_ok;
3303 	nfsd4_client_record_create(cstate->session->se_client);
3304 out:
3305 	return status;
3306 }
3307 
3308 __be32
3309 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3310 		  union nfsd4_op_u *u)
3311 {
3312 	struct nfsd4_setclientid *setclid = &u->setclientid;
3313 	struct xdr_netobj 	clname = setclid->se_name;
3314 	nfs4_verifier		clverifier = setclid->se_verf;
3315 	struct nfs4_client	*conf, *new;
3316 	struct nfs4_client	*unconf = NULL;
3317 	__be32 			status;
3318 	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3319 
3320 	new = create_client(clname, rqstp, &clverifier);
3321 	if (new == NULL)
3322 		return nfserr_jukebox;
3323 	/* Cases below refer to rfc 3530 section 14.2.33: */
3324 	spin_lock(&nn->client_lock);
3325 	conf = find_confirmed_client_by_name(&clname, nn);
3326 	if (conf && client_has_state(conf)) {
3327 		/* case 0: */
3328 		status = nfserr_clid_inuse;
3329 		if (clp_used_exchangeid(conf))
3330 			goto out;
3331 		if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
3332 			char addr_str[INET6_ADDRSTRLEN];
3333 			rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
3334 				 sizeof(addr_str));
3335 			dprintk("NFSD: setclientid: string in use by client "
3336 				"at %s\n", addr_str);
3337 			goto out;
3338 		}
3339 	}
3340 	unconf = find_unconfirmed_client_by_name(&clname, nn);
3341 	if (unconf)
3342 		unhash_client_locked(unconf);
3343 	if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
3344 		/* case 1: probable callback update */
3345 		copy_clid(new, conf);
3346 		gen_confirm(new, nn);
3347 	} else /* case 4 (new client) or cases 2, 3 (client reboot): */
3348 		gen_clid(new, nn);
3349 	new->cl_minorversion = 0;
3350 	gen_callback(new, setclid, rqstp);
3351 	add_to_unconfirmed(new);
3352 	setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
3353 	setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
3354 	memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
3355 	new = NULL;
3356 	status = nfs_ok;
3357 out:
3358 	spin_unlock(&nn->client_lock);
3359 	if (new)
3360 		free_client(new);
3361 	if (unconf)
3362 		expire_client(unconf);
3363 	return status;
3364 }
3365 
3366 
3367 __be32
3368 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
3369 			struct nfsd4_compound_state *cstate,
3370 			union nfsd4_op_u *u)
3371 {
3372 	struct nfsd4_setclientid_confirm *setclientid_confirm =
3373 			&u->setclientid_confirm;
3374 	struct nfs4_client *conf, *unconf;
3375 	struct nfs4_client *old = NULL;
3376 	nfs4_verifier confirm = setclientid_confirm->sc_confirm;
3377 	clientid_t * clid = &setclientid_confirm->sc_clientid;
3378 	__be32 status;
3379 	struct nfsd_net	*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3380 
3381 	if (STALE_CLIENTID(clid, nn))
3382 		return nfserr_stale_clientid;
3383 
3384 	spin_lock(&nn->client_lock);
3385 	conf = find_confirmed_client(clid, false, nn);
3386 	unconf = find_unconfirmed_client(clid, false, nn);
3387 	/*
3388 	 * We try hard to give out unique clientid's, so if we get an
3389 	 * attempt to confirm the same clientid with a different cred,
3390 	 * the client may be buggy; this should never happen.
3391 	 *
3392 	 * Nevertheless, RFC 7530 recommends INUSE for this case:
3393 	 */
3394 	status = nfserr_clid_inuse;
3395 	if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
3396 		goto out;
3397 	if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
3398 		goto out;
3399 	/* cases below refer to rfc 3530 section 14.2.34: */
3400 	if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
3401 		if (conf && same_verf(&confirm, &conf->cl_confirm)) {
3402 			/* case 2: probable retransmit */
3403 			status = nfs_ok;
3404 		} else /* case 4: client hasn't noticed we rebooted yet? */
3405 			status = nfserr_stale_clientid;
3406 		goto out;
3407 	}
3408 	status = nfs_ok;
3409 	if (conf) { /* case 1: callback update */
3410 		old = unconf;
3411 		unhash_client_locked(old);
3412 		nfsd4_change_callback(conf, &unconf->cl_cb_conn);
3413 	} else { /* case 3: normal case; new or rebooted client */
3414 		old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3415 		if (old) {
3416 			status = nfserr_clid_inuse;
3417 			if (client_has_state(old)
3418 					&& !same_creds(&unconf->cl_cred,
3419 							&old->cl_cred))
3420 				goto out;
3421 			status = mark_client_expired_locked(old);
3422 			if (status) {
3423 				old = NULL;
3424 				goto out;
3425 			}
3426 		}
3427 		move_to_confirmed(unconf);
3428 		conf = unconf;
3429 	}
3430 	get_client_locked(conf);
3431 	spin_unlock(&nn->client_lock);
3432 	nfsd4_probe_callback(conf);
3433 	spin_lock(&nn->client_lock);
3434 	put_client_renew_locked(conf);
3435 out:
3436 	spin_unlock(&nn->client_lock);
3437 	if (old)
3438 		expire_client(old);
3439 	return status;
3440 }
3441 
3442 static struct nfs4_file *nfsd4_alloc_file(void)
3443 {
3444 	return kmem_cache_alloc(file_slab, GFP_KERNEL);
3445 }
3446 
3447 /* OPEN Share state helper functions */
3448 static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
3449 				struct nfs4_file *fp)
3450 {
3451 	lockdep_assert_held(&state_lock);
3452 
3453 	refcount_set(&fp->fi_ref, 1);
3454 	spin_lock_init(&fp->fi_lock);
3455 	INIT_LIST_HEAD(&fp->fi_stateids);
3456 	INIT_LIST_HEAD(&fp->fi_delegations);
3457 	INIT_LIST_HEAD(&fp->fi_clnt_odstate);
3458 	fh_copy_shallow(&fp->fi_fhandle, fh);
3459 	fp->fi_deleg_file = NULL;
3460 	fp->fi_had_conflict = false;
3461 	fp->fi_share_deny = 0;
3462 	memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
3463 	memset(fp->fi_access, 0, sizeof(fp->fi_access));
3464 #ifdef CONFIG_NFSD_PNFS
3465 	INIT_LIST_HEAD(&fp->fi_lo_states);
3466 	atomic_set(&fp->fi_lo_recalls, 0);
3467 #endif
3468 	hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
3469 }
3470 
3471 void
3472 nfsd4_free_slabs(void)
3473 {
3474 	kmem_cache_destroy(odstate_slab);
3475 	kmem_cache_destroy(openowner_slab);
3476 	kmem_cache_destroy(lockowner_slab);
3477 	kmem_cache_destroy(file_slab);
3478 	kmem_cache_destroy(stateid_slab);
3479 	kmem_cache_destroy(deleg_slab);
3480 }
3481 
3482 int
3483 nfsd4_init_slabs(void)
3484 {
3485 	openowner_slab = kmem_cache_create("nfsd4_openowners",
3486 			sizeof(struct nfs4_openowner), 0, 0, NULL);
3487 	if (openowner_slab == NULL)
3488 		goto out;
3489 	lockowner_slab = kmem_cache_create("nfsd4_lockowners",
3490 			sizeof(struct nfs4_lockowner), 0, 0, NULL);
3491 	if (lockowner_slab == NULL)
3492 		goto out_free_openowner_slab;
3493 	file_slab = kmem_cache_create("nfsd4_files",
3494 			sizeof(struct nfs4_file), 0, 0, NULL);
3495 	if (file_slab == NULL)
3496 		goto out_free_lockowner_slab;
3497 	stateid_slab = kmem_cache_create("nfsd4_stateids",
3498 			sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
3499 	if (stateid_slab == NULL)
3500 		goto out_free_file_slab;
3501 	deleg_slab = kmem_cache_create("nfsd4_delegations",
3502 			sizeof(struct nfs4_delegation), 0, 0, NULL);
3503 	if (deleg_slab == NULL)
3504 		goto out_free_stateid_slab;
3505 	odstate_slab = kmem_cache_create("nfsd4_odstate",
3506 			sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
3507 	if (odstate_slab == NULL)
3508 		goto out_free_deleg_slab;
3509 	return 0;
3510 
3511 out_free_deleg_slab:
3512 	kmem_cache_destroy(deleg_slab);
3513 out_free_stateid_slab:
3514 	kmem_cache_destroy(stateid_slab);
3515 out_free_file_slab:
3516 	kmem_cache_destroy(file_slab);
3517 out_free_lockowner_slab:
3518 	kmem_cache_destroy(lockowner_slab);
3519 out_free_openowner_slab:
3520 	kmem_cache_destroy(openowner_slab);
3521 out:
3522 	dprintk("nfsd4: out of memory while initializing nfsv4\n");
3523 	return -ENOMEM;
3524 }
3525 
3526 static void init_nfs4_replay(struct nfs4_replay *rp)
3527 {
3528 	rp->rp_status = nfserr_serverfault;
3529 	rp->rp_buflen = 0;
3530 	rp->rp_buf = rp->rp_ibuf;
3531 	mutex_init(&rp->rp_mutex);
3532 }
3533 
3534 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
3535 		struct nfs4_stateowner *so)
3536 {
3537 	if (!nfsd4_has_session(cstate)) {
3538 		mutex_lock(&so->so_replay.rp_mutex);
3539 		cstate->replay_owner = nfs4_get_stateowner(so);
3540 	}
3541 }
3542 
3543 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
3544 {
3545 	struct nfs4_stateowner *so = cstate->replay_owner;
3546 
3547 	if (so != NULL) {
3548 		cstate->replay_owner = NULL;
3549 		mutex_unlock(&so->so_replay.rp_mutex);
3550 		nfs4_put_stateowner(so);
3551 	}
3552 }
3553 
3554 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
3555 {
3556 	struct nfs4_stateowner *sop;
3557 
3558 	sop = kmem_cache_alloc(slab, GFP_KERNEL);
3559 	if (!sop)
3560 		return NULL;
3561 
3562 	sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
3563 	if (!sop->so_owner.data) {
3564 		kmem_cache_free(slab, sop);
3565 		return NULL;
3566 	}
3567 	sop->so_owner.len = owner->len;
3568 
3569 	INIT_LIST_HEAD(&sop->so_stateids);
3570 	sop->so_client = clp;
3571 	init_nfs4_replay(&sop->so_replay);
3572 	atomic_set(&sop->so_count, 1);
3573 	return sop;
3574 }
3575 
3576 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
3577 {
3578 	lockdep_assert_held(&clp->cl_lock);
3579 
3580 	list_add(&oo->oo_owner.so_strhash,
3581 		 &clp->cl_ownerstr_hashtbl[strhashval]);
3582 	list_add(&oo->oo_perclient, &clp->cl_openowners);
3583 }
3584 
3585 static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
3586 {
3587 	unhash_openowner_locked(openowner(so));
3588 }
3589 
3590 static void nfs4_free_openowner(struct nfs4_stateowner *so)
3591 {
3592 	struct nfs4_openowner *oo = openowner(so);
3593 
3594 	kmem_cache_free(openowner_slab, oo);
3595 }
3596 
3597 static const struct nfs4_stateowner_operations openowner_ops = {
3598 	.so_unhash =	nfs4_unhash_openowner,
3599 	.so_free =	nfs4_free_openowner,
3600 };
3601 
3602 static struct nfs4_ol_stateid *
3603 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3604 {
3605 	struct nfs4_ol_stateid *local, *ret = NULL;
3606 	struct nfs4_openowner *oo = open->op_openowner;
3607 
3608 	lockdep_assert_held(&fp->fi_lock);
3609 
3610 	list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
3611 		/* ignore lock owners */
3612 		if (local->st_stateowner->so_is_open_owner == 0)
3613 			continue;
3614 		if (local->st_stateowner != &oo->oo_owner)
3615 			continue;
3616 		if (local->st_stid.sc_type == NFS4_OPEN_STID) {
3617 			ret = local;
3618 			refcount_inc(&ret->st_stid.sc_count);
3619 			break;
3620 		}
3621 	}
3622 	return ret;
3623 }
3624 
3625 static __be32
3626 nfsd4_verify_open_stid(struct nfs4_stid *s)
3627 {
3628 	__be32 ret = nfs_ok;
3629 
3630 	switch (s->sc_type) {
3631 	default:
3632 		break;
3633 	case 0:
3634 	case NFS4_CLOSED_STID:
3635 	case NFS4_CLOSED_DELEG_STID:
3636 		ret = nfserr_bad_stateid;
3637 		break;
3638 	case NFS4_REVOKED_DELEG_STID:
3639 		ret = nfserr_deleg_revoked;
3640 	}
3641 	return ret;
3642 }
3643 
3644 /* Lock the stateid st_mutex, and deal with races with CLOSE */
3645 static __be32
3646 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
3647 {
3648 	__be32 ret;
3649 
3650 	mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
3651 	ret = nfsd4_verify_open_stid(&stp->st_stid);
3652 	if (ret != nfs_ok)
3653 		mutex_unlock(&stp->st_mutex);
3654 	return ret;
3655 }
3656 
3657 static struct nfs4_ol_stateid *
3658 nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3659 {
3660 	struct nfs4_ol_stateid *stp;
3661 	for (;;) {
3662 		spin_lock(&fp->fi_lock);
3663 		stp = nfsd4_find_existing_open(fp, open);
3664 		spin_unlock(&fp->fi_lock);
3665 		if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
3666 			break;
3667 		nfs4_put_stid(&stp->st_stid);
3668 	}
3669 	return stp;
3670 }
3671 
3672 static struct nfs4_openowner *
3673 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
3674 			   struct nfsd4_compound_state *cstate)
3675 {
3676 	struct nfs4_client *clp = cstate->clp;
3677 	struct nfs4_openowner *oo, *ret;
3678 
3679 	oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
3680 	if (!oo)
3681 		return NULL;
3682 	oo->oo_owner.so_ops = &openowner_ops;
3683 	oo->oo_owner.so_is_open_owner = 1;
3684 	oo->oo_owner.so_seqid = open->op_seqid;
3685 	oo->oo_flags = 0;
3686 	if (nfsd4_has_session(cstate))
3687 		oo->oo_flags |= NFS4_OO_CONFIRMED;
3688 	oo->oo_time = 0;
3689 	oo->oo_last_closed_stid = NULL;
3690 	INIT_LIST_HEAD(&oo->oo_close_lru);
3691 	spin_lock(&clp->cl_lock);
3692 	ret = find_openstateowner_str_locked(strhashval, open, clp);
3693 	if (ret == NULL) {
3694 		hash_openowner(oo, clp, strhashval);
3695 		ret = oo;
3696 	} else
3697 		nfs4_free_stateowner(&oo->oo_owner);
3698 
3699 	spin_unlock(&clp->cl_lock);
3700 	return ret;
3701 }
3702 
3703 static struct nfs4_ol_stateid *
3704 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
3705 {
3706 
3707 	struct nfs4_openowner *oo = open->op_openowner;
3708 	struct nfs4_ol_stateid *retstp = NULL;
3709 	struct nfs4_ol_stateid *stp;
3710 
3711 	stp = open->op_stp;
3712 	/* We are moving these outside of the spinlocks to avoid the warnings */
3713 	mutex_init(&stp->st_mutex);
3714 	mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
3715 
3716 retry:
3717 	spin_lock(&oo->oo_owner.so_client->cl_lock);
3718 	spin_lock(&fp->fi_lock);
3719 
3720 	retstp = nfsd4_find_existing_open(fp, open);
3721 	if (retstp)
3722 		goto out_unlock;
3723 
3724 	open->op_stp = NULL;
3725 	refcount_inc(&stp->st_stid.sc_count);
3726 	stp->st_stid.sc_type = NFS4_OPEN_STID;
3727 	INIT_LIST_HEAD(&stp->st_locks);
3728 	stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
3729 	get_nfs4_file(fp);
3730 	stp->st_stid.sc_file = fp;
3731 	stp->st_access_bmap = 0;
3732 	stp->st_deny_bmap = 0;
3733 	stp->st_openstp = NULL;
3734 	list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
3735 	list_add(&stp->st_perfile, &fp->fi_stateids);
3736 
3737 out_unlock:
3738 	spin_unlock(&fp->fi_lock);
3739 	spin_unlock(&oo->oo_owner.so_client->cl_lock);
3740 	if (retstp) {
3741 		/* Handle races with CLOSE */
3742 		if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
3743 			nfs4_put_stid(&retstp->st_stid);
3744 			goto retry;
3745 		}
3746 		/* To keep mutex tracking happy */
3747 		mutex_unlock(&stp->st_mutex);
3748 		stp = retstp;
3749 	}
3750 	return stp;
3751 }
3752 
3753 /*
3754  * In the 4.0 case we need to keep the owners around a little while to handle
3755  * CLOSE replay. We still do need to release any file access that is held by
3756  * them before returning however.
3757  */
3758 static void
3759 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
3760 {
3761 	struct nfs4_ol_stateid *last;
3762 	struct nfs4_openowner *oo = openowner(s->st_stateowner);
3763 	struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
3764 						nfsd_net_id);
3765 
3766 	dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
3767 
3768 	/*
3769 	 * We know that we hold one reference via nfsd4_close, and another
3770 	 * "persistent" reference for the client. If the refcount is higher
3771 	 * than 2, then there are still calls in progress that are using this
3772 	 * stateid. We can't put the sc_file reference until they are finished.
3773 	 * Wait for the refcount to drop to 2. Since it has been unhashed,
3774 	 * there should be no danger of the refcount going back up again at
3775 	 * this point.
3776 	 */
3777 	wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
3778 
3779 	release_all_access(s);
3780 	if (s->st_stid.sc_file) {
3781 		put_nfs4_file(s->st_stid.sc_file);
3782 		s->st_stid.sc_file = NULL;
3783 	}
3784 
3785 	spin_lock(&nn->client_lock);
3786 	last = oo->oo_last_closed_stid;
3787 	oo->oo_last_closed_stid = s;
3788 	list_move_tail(&oo->oo_close_lru, &nn->close_lru);
3789 	oo->oo_time = get_seconds();
3790 	spin_unlock(&nn->client_lock);
3791 	if (last)
3792 		nfs4_put_stid(&last->st_stid);
3793 }
3794 
3795 /* search file_hashtbl[] for file */
3796 static struct nfs4_file *
3797 find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
3798 {
3799 	struct nfs4_file *fp;
3800 
3801 	hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
3802 		if (fh_match(&fp->fi_fhandle, fh)) {
3803 			if (refcount_inc_not_zero(&fp->fi_ref))
3804 				return fp;
3805 		}
3806 	}
3807 	return NULL;
3808 }
3809 
3810 struct nfs4_file *
3811 find_file(struct knfsd_fh *fh)
3812 {
3813 	struct nfs4_file *fp;
3814 	unsigned int hashval = file_hashval(fh);
3815 
3816 	rcu_read_lock();
3817 	fp = find_file_locked(fh, hashval);
3818 	rcu_read_unlock();
3819 	return fp;
3820 }
3821 
3822 static struct nfs4_file *
3823 find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
3824 {
3825 	struct nfs4_file *fp;
3826 	unsigned int hashval = file_hashval(fh);
3827 
3828 	rcu_read_lock();
3829 	fp = find_file_locked(fh, hashval);
3830 	rcu_read_unlock();
3831 	if (fp)
3832 		return fp;
3833 
3834 	spin_lock(&state_lock);
3835 	fp = find_file_locked(fh, hashval);
3836 	if (likely(fp == NULL)) {
3837 		nfsd4_init_file(fh, hashval, new);
3838 		fp = new;
3839 	}
3840 	spin_unlock(&state_lock);
3841 
3842 	return fp;
3843 }
3844 
3845 /*
3846  * Called to check deny when READ with all zero stateid or
3847  * WRITE with all zero or all one stateid
3848  */
3849 static __be32
3850 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
3851 {
3852 	struct nfs4_file *fp;
3853 	__be32 ret = nfs_ok;
3854 
3855 	fp = find_file(&current_fh->fh_handle);
3856 	if (!fp)
3857 		return ret;
3858 	/* Check for conflicting share reservations */
3859 	spin_lock(&fp->fi_lock);
3860 	if (fp->fi_share_deny & deny_type)
3861 		ret = nfserr_locked;
3862 	spin_unlock(&fp->fi_lock);
3863 	put_nfs4_file(fp);
3864 	return ret;
3865 }
3866 
3867 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
3868 {
3869 	struct nfs4_delegation *dp = cb_to_delegation(cb);
3870 	struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
3871 					  nfsd_net_id);
3872 
3873 	block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
3874 
3875 	/*
3876 	 * We can't do this in nfsd_break_deleg_cb because it is
3877 	 * already holding inode->i_lock.
3878 	 *
3879 	 * If the dl_time != 0, then we know that it has already been
3880 	 * queued for a lease break. Don't queue it again.
3881 	 */
3882 	spin_lock(&state_lock);
3883 	if (dp->dl_time == 0) {
3884 		dp->dl_time = get_seconds();
3885 		list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
3886 	}
3887 	spin_unlock(&state_lock);
3888 }
3889 
3890 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
3891 		struct rpc_task *task)
3892 {
3893 	struct nfs4_delegation *dp = cb_to_delegation(cb);
3894 
3895 	if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID)
3896 	        return 1;
3897 
3898 	switch (task->tk_status) {
3899 	case 0:
3900 		return 1;
3901 	case -EBADHANDLE:
3902 	case -NFS4ERR_BAD_STATEID:
3903 		/*
3904 		 * Race: client probably got cb_recall before open reply
3905 		 * granting delegation.
3906 		 */
3907 		if (dp->dl_retries--) {
3908 			rpc_delay(task, 2 * HZ);
3909 			return 0;
3910 		}
3911 		/*FALLTHRU*/
3912 	default:
3913 		return -1;
3914 	}
3915 }
3916 
3917 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
3918 {
3919 	struct nfs4_delegation *dp = cb_to_delegation(cb);
3920 
3921 	nfs4_put_stid(&dp->dl_stid);
3922 }
3923 
3924 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
3925 	.prepare	= nfsd4_cb_recall_prepare,
3926 	.done		= nfsd4_cb_recall_done,
3927 	.release	= nfsd4_cb_recall_release,
3928 };
3929 
3930 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
3931 {
3932 	/*
3933 	 * We're assuming the state code never drops its reference
3934 	 * without first removing the lease.  Since we're in this lease
3935 	 * callback (and since the lease code is serialized by the kernel
3936 	 * lock) we know the server hasn't removed the lease yet, we know
3937 	 * it's safe to take a reference.
3938 	 */
3939 	refcount_inc(&dp->dl_stid.sc_count);
3940 	nfsd4_run_cb(&dp->dl_recall);
3941 }
3942 
3943 /* Called from break_lease() with i_lock held. */
3944 static bool
3945 nfsd_break_deleg_cb(struct file_lock *fl)
3946 {
3947 	bool ret = false;
3948 	struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
3949 	struct nfs4_delegation *dp;
3950 
3951 	if (!fp) {
3952 		WARN(1, "(%p)->fl_owner NULL\n", fl);
3953 		return ret;
3954 	}
3955 	if (fp->fi_had_conflict) {
3956 		WARN(1, "duplicate break on %p\n", fp);
3957 		return ret;
3958 	}
3959 	/*
3960 	 * We don't want the locks code to timeout the lease for us;
3961 	 * we'll remove it ourself if a delegation isn't returned
3962 	 * in time:
3963 	 */
3964 	fl->fl_break_time = 0;
3965 
3966 	spin_lock(&fp->fi_lock);
3967 	fp->fi_had_conflict = true;
3968 	/*
3969 	 * If there are no delegations on the list, then return true
3970 	 * so that the lease code will go ahead and delete it.
3971 	 */
3972 	if (list_empty(&fp->fi_delegations))
3973 		ret = true;
3974 	else
3975 		list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
3976 			nfsd_break_one_deleg(dp);
3977 	spin_unlock(&fp->fi_lock);
3978 	return ret;
3979 }
3980 
3981 static int
3982 nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
3983 		     struct list_head *dispose)
3984 {
3985 	if (arg & F_UNLCK)
3986 		return lease_modify(onlist, arg, dispose);
3987 	else
3988 		return -EAGAIN;
3989 }
3990 
3991 static const struct lock_manager_operations nfsd_lease_mng_ops = {
3992 	.lm_break = nfsd_break_deleg_cb,
3993 	.lm_change = nfsd_change_deleg_cb,
3994 };
3995 
3996 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
3997 {
3998 	if (nfsd4_has_session(cstate))
3999 		return nfs_ok;
4000 	if (seqid == so->so_seqid - 1)
4001 		return nfserr_replay_me;
4002 	if (seqid == so->so_seqid)
4003 		return nfs_ok;
4004 	return nfserr_bad_seqid;
4005 }
4006 
4007 static __be32 lookup_clientid(clientid_t *clid,
4008 		struct nfsd4_compound_state *cstate,
4009 		struct nfsd_net *nn)
4010 {
4011 	struct nfs4_client *found;
4012 
4013 	if (cstate->clp) {
4014 		found = cstate->clp;
4015 		if (!same_clid(&found->cl_clientid, clid))
4016 			return nfserr_stale_clientid;
4017 		return nfs_ok;
4018 	}
4019 
4020 	if (STALE_CLIENTID(clid, nn))
4021 		return nfserr_stale_clientid;
4022 
4023 	/*
4024 	 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
4025 	 * cached already then we know this is for is for v4.0 and "sessions"
4026 	 * will be false.
4027 	 */
4028 	WARN_ON_ONCE(cstate->session);
4029 	spin_lock(&nn->client_lock);
4030 	found = find_confirmed_client(clid, false, nn);
4031 	if (!found) {
4032 		spin_unlock(&nn->client_lock);
4033 		return nfserr_expired;
4034 	}
4035 	atomic_inc(&found->cl_refcount);
4036 	spin_unlock(&nn->client_lock);
4037 
4038 	/* Cache the nfs4_client in cstate! */
4039 	cstate->clp = found;
4040 	return nfs_ok;
4041 }
4042 
4043 __be32
4044 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
4045 		    struct nfsd4_open *open, struct nfsd_net *nn)
4046 {
4047 	clientid_t *clientid = &open->op_clientid;
4048 	struct nfs4_client *clp = NULL;
4049 	unsigned int strhashval;
4050 	struct nfs4_openowner *oo = NULL;
4051 	__be32 status;
4052 
4053 	if (STALE_CLIENTID(&open->op_clientid, nn))
4054 		return nfserr_stale_clientid;
4055 	/*
4056 	 * In case we need it later, after we've already created the
4057 	 * file and don't want to risk a further failure:
4058 	 */
4059 	open->op_file = nfsd4_alloc_file();
4060 	if (open->op_file == NULL)
4061 		return nfserr_jukebox;
4062 
4063 	status = lookup_clientid(clientid, cstate, nn);
4064 	if (status)
4065 		return status;
4066 	clp = cstate->clp;
4067 
4068 	strhashval = ownerstr_hashval(&open->op_owner);
4069 	oo = find_openstateowner_str(strhashval, open, clp);
4070 	open->op_openowner = oo;
4071 	if (!oo) {
4072 		goto new_owner;
4073 	}
4074 	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4075 		/* Replace unconfirmed owners without checking for replay. */
4076 		release_openowner(oo);
4077 		open->op_openowner = NULL;
4078 		goto new_owner;
4079 	}
4080 	status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
4081 	if (status)
4082 		return status;
4083 	goto alloc_stateid;
4084 new_owner:
4085 	oo = alloc_init_open_stateowner(strhashval, open, cstate);
4086 	if (oo == NULL)
4087 		return nfserr_jukebox;
4088 	open->op_openowner = oo;
4089 alloc_stateid:
4090 	open->op_stp = nfs4_alloc_open_stateid(clp);
4091 	if (!open->op_stp)
4092 		return nfserr_jukebox;
4093 
4094 	if (nfsd4_has_session(cstate) &&
4095 	    (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
4096 		open->op_odstate = alloc_clnt_odstate(clp);
4097 		if (!open->op_odstate)
4098 			return nfserr_jukebox;
4099 	}
4100 
4101 	return nfs_ok;
4102 }
4103 
4104 static inline __be32
4105 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
4106 {
4107 	if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
4108 		return nfserr_openmode;
4109 	else
4110 		return nfs_ok;
4111 }
4112 
4113 static int share_access_to_flags(u32 share_access)
4114 {
4115 	return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
4116 }
4117 
4118 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
4119 {
4120 	struct nfs4_stid *ret;
4121 
4122 	ret = find_stateid_by_type(cl, s,
4123 				NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
4124 	if (!ret)
4125 		return NULL;
4126 	return delegstateid(ret);
4127 }
4128 
4129 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
4130 {
4131 	return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
4132 	       open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
4133 }
4134 
4135 static __be32
4136 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
4137 		struct nfs4_delegation **dp)
4138 {
4139 	int flags;
4140 	__be32 status = nfserr_bad_stateid;
4141 	struct nfs4_delegation *deleg;
4142 
4143 	deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
4144 	if (deleg == NULL)
4145 		goto out;
4146 	if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
4147 		nfs4_put_stid(&deleg->dl_stid);
4148 		if (cl->cl_minorversion)
4149 			status = nfserr_deleg_revoked;
4150 		goto out;
4151 	}
4152 	flags = share_access_to_flags(open->op_share_access);
4153 	status = nfs4_check_delegmode(deleg, flags);
4154 	if (status) {
4155 		nfs4_put_stid(&deleg->dl_stid);
4156 		goto out;
4157 	}
4158 	*dp = deleg;
4159 out:
4160 	if (!nfsd4_is_deleg_cur(open))
4161 		return nfs_ok;
4162 	if (status)
4163 		return status;
4164 	open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4165 	return nfs_ok;
4166 }
4167 
4168 static inline int nfs4_access_to_access(u32 nfs4_access)
4169 {
4170 	int flags = 0;
4171 
4172 	if (nfs4_access & NFS4_SHARE_ACCESS_READ)
4173 		flags |= NFSD_MAY_READ;
4174 	if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
4175 		flags |= NFSD_MAY_WRITE;
4176 	return flags;
4177 }
4178 
4179 static inline __be32
4180 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
4181 		struct nfsd4_open *open)
4182 {
4183 	struct iattr iattr = {
4184 		.ia_valid = ATTR_SIZE,
4185 		.ia_size = 0,
4186 	};
4187 	if (!open->op_truncate)
4188 		return 0;
4189 	if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
4190 		return nfserr_inval;
4191 	return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
4192 }
4193 
4194 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
4195 		struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
4196 		struct nfsd4_open *open)
4197 {
4198 	struct file *filp = NULL;
4199 	__be32 status;
4200 	int oflag = nfs4_access_to_omode(open->op_share_access);
4201 	int access = nfs4_access_to_access(open->op_share_access);
4202 	unsigned char old_access_bmap, old_deny_bmap;
4203 
4204 	spin_lock(&fp->fi_lock);
4205 
4206 	/*
4207 	 * Are we trying to set a deny mode that would conflict with
4208 	 * current access?
4209 	 */
4210 	status = nfs4_file_check_deny(fp, open->op_share_deny);
4211 	if (status != nfs_ok) {
4212 		spin_unlock(&fp->fi_lock);
4213 		goto out;
4214 	}
4215 
4216 	/* set access to the file */
4217 	status = nfs4_file_get_access(fp, open->op_share_access);
4218 	if (status != nfs_ok) {
4219 		spin_unlock(&fp->fi_lock);
4220 		goto out;
4221 	}
4222 
4223 	/* Set access bits in stateid */
4224 	old_access_bmap = stp->st_access_bmap;
4225 	set_access(open->op_share_access, stp);
4226 
4227 	/* Set new deny mask */
4228 	old_deny_bmap = stp->st_deny_bmap;
4229 	set_deny(open->op_share_deny, stp);
4230 	fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4231 
4232 	if (!fp->fi_fds[oflag]) {
4233 		spin_unlock(&fp->fi_lock);
4234 		status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp);
4235 		if (status)
4236 			goto out_put_access;
4237 		spin_lock(&fp->fi_lock);
4238 		if (!fp->fi_fds[oflag]) {
4239 			fp->fi_fds[oflag] = filp;
4240 			filp = NULL;
4241 		}
4242 	}
4243 	spin_unlock(&fp->fi_lock);
4244 	if (filp)
4245 		fput(filp);
4246 
4247 	status = nfsd4_truncate(rqstp, cur_fh, open);
4248 	if (status)
4249 		goto out_put_access;
4250 out:
4251 	return status;
4252 out_put_access:
4253 	stp->st_access_bmap = old_access_bmap;
4254 	nfs4_file_put_access(fp, open->op_share_access);
4255 	reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
4256 	goto out;
4257 }
4258 
4259 static __be32
4260 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
4261 {
4262 	__be32 status;
4263 	unsigned char old_deny_bmap = stp->st_deny_bmap;
4264 
4265 	if (!test_access(open->op_share_access, stp))
4266 		return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
4267 
4268 	/* test and set deny mode */
4269 	spin_lock(&fp->fi_lock);
4270 	status = nfs4_file_check_deny(fp, open->op_share_deny);
4271 	if (status == nfs_ok) {
4272 		set_deny(open->op_share_deny, stp);
4273 		fp->fi_share_deny |=
4274 				(open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4275 	}
4276 	spin_unlock(&fp->fi_lock);
4277 
4278 	if (status != nfs_ok)
4279 		return status;
4280 
4281 	status = nfsd4_truncate(rqstp, cur_fh, open);
4282 	if (status != nfs_ok)
4283 		reset_union_bmap_deny(old_deny_bmap, stp);
4284 	return status;
4285 }
4286 
4287 /* Should we give out recallable state?: */
4288 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
4289 {
4290 	if (clp->cl_cb_state == NFSD4_CB_UP)
4291 		return true;
4292 	/*
4293 	 * In the sessions case, since we don't have to establish a
4294 	 * separate connection for callbacks, we assume it's OK
4295 	 * until we hear otherwise:
4296 	 */
4297 	return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
4298 }
4299 
4300 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
4301 {
4302 	struct file_lock *fl;
4303 
4304 	fl = locks_alloc_lock();
4305 	if (!fl)
4306 		return NULL;
4307 	fl->fl_lmops = &nfsd_lease_mng_ops;
4308 	fl->fl_flags = FL_DELEG;
4309 	fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
4310 	fl->fl_end = OFFSET_MAX;
4311 	fl->fl_owner = (fl_owner_t)fp;
4312 	fl->fl_pid = current->tgid;
4313 	return fl;
4314 }
4315 
4316 /**
4317  * nfs4_setlease - Obtain a delegation by requesting lease from vfs layer
4318  * @dp:   a pointer to the nfs4_delegation we're adding.
4319  *
4320  * Return:
4321  *      On success: Return code will be 0 on success.
4322  *
4323  *      On error: -EAGAIN if there was an existing delegation.
4324  *                 nonzero if there is an error in other cases.
4325  *
4326  */
4327 
4328 static int nfs4_setlease(struct nfs4_delegation *dp)
4329 {
4330 	struct nfs4_file *fp = dp->dl_stid.sc_file;
4331 	struct file_lock *fl;
4332 	struct file *filp;
4333 	int status = 0;
4334 
4335 	fl = nfs4_alloc_init_lease(fp, NFS4_OPEN_DELEGATE_READ);
4336 	if (!fl)
4337 		return -ENOMEM;
4338 	filp = find_readable_file(fp);
4339 	if (!filp) {
4340 		/* We should always have a readable file here */
4341 		WARN_ON_ONCE(1);
4342 		locks_free_lock(fl);
4343 		return -EBADF;
4344 	}
4345 	fl->fl_file = filp;
4346 	status = vfs_setlease(filp, fl->fl_type, &fl, NULL);
4347 	if (fl)
4348 		locks_free_lock(fl);
4349 	if (status)
4350 		goto out_fput;
4351 	spin_lock(&state_lock);
4352 	spin_lock(&fp->fi_lock);
4353 	/* Did the lease get broken before we took the lock? */
4354 	status = -EAGAIN;
4355 	if (fp->fi_had_conflict)
4356 		goto out_unlock;
4357 	/* Race breaker */
4358 	if (fp->fi_deleg_file) {
4359 		status = hash_delegation_locked(dp, fp);
4360 		goto out_unlock;
4361 	}
4362 	fp->fi_deleg_file = filp;
4363 	fp->fi_delegees = 0;
4364 	status = hash_delegation_locked(dp, fp);
4365 	spin_unlock(&fp->fi_lock);
4366 	spin_unlock(&state_lock);
4367 	if (status) {
4368 		/* Should never happen, this is a new fi_deleg_file  */
4369 		WARN_ON_ONCE(1);
4370 		goto out_fput;
4371 	}
4372 	return 0;
4373 out_unlock:
4374 	spin_unlock(&fp->fi_lock);
4375 	spin_unlock(&state_lock);
4376 out_fput:
4377 	fput(filp);
4378 	return status;
4379 }
4380 
4381 static struct nfs4_delegation *
4382 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
4383 		    struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
4384 {
4385 	int status;
4386 	struct nfs4_delegation *dp;
4387 
4388 	if (fp->fi_had_conflict)
4389 		return ERR_PTR(-EAGAIN);
4390 
4391 	spin_lock(&state_lock);
4392 	spin_lock(&fp->fi_lock);
4393 	status = nfs4_get_existing_delegation(clp, fp);
4394 	spin_unlock(&fp->fi_lock);
4395 	spin_unlock(&state_lock);
4396 
4397 	if (status)
4398 		return ERR_PTR(status);
4399 
4400 	dp = alloc_init_deleg(clp, fh, odstate);
4401 	if (!dp)
4402 		return ERR_PTR(-ENOMEM);
4403 
4404 	get_nfs4_file(fp);
4405 	spin_lock(&state_lock);
4406 	spin_lock(&fp->fi_lock);
4407 	dp->dl_stid.sc_file = fp;
4408 	if (!fp->fi_deleg_file) {
4409 		spin_unlock(&fp->fi_lock);
4410 		spin_unlock(&state_lock);
4411 		status = nfs4_setlease(dp);
4412 		goto out;
4413 	}
4414 	if (fp->fi_had_conflict) {
4415 		status = -EAGAIN;
4416 		goto out_unlock;
4417 	}
4418 	status = hash_delegation_locked(dp, fp);
4419 out_unlock:
4420 	spin_unlock(&fp->fi_lock);
4421 	spin_unlock(&state_lock);
4422 out:
4423 	if (status) {
4424 		put_clnt_odstate(dp->dl_clnt_odstate);
4425 		nfs4_put_stid(&dp->dl_stid);
4426 		return ERR_PTR(status);
4427 	}
4428 	return dp;
4429 }
4430 
4431 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
4432 {
4433 	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4434 	if (status == -EAGAIN)
4435 		open->op_why_no_deleg = WND4_CONTENTION;
4436 	else {
4437 		open->op_why_no_deleg = WND4_RESOURCE;
4438 		switch (open->op_deleg_want) {
4439 		case NFS4_SHARE_WANT_READ_DELEG:
4440 		case NFS4_SHARE_WANT_WRITE_DELEG:
4441 		case NFS4_SHARE_WANT_ANY_DELEG:
4442 			break;
4443 		case NFS4_SHARE_WANT_CANCEL:
4444 			open->op_why_no_deleg = WND4_CANCELLED;
4445 			break;
4446 		case NFS4_SHARE_WANT_NO_DELEG:
4447 			WARN_ON_ONCE(1);
4448 		}
4449 	}
4450 }
4451 
4452 /*
4453  * Attempt to hand out a delegation.
4454  *
4455  * Note we don't support write delegations, and won't until the vfs has
4456  * proper support for them.
4457  */
4458 static void
4459 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
4460 			struct nfs4_ol_stateid *stp)
4461 {
4462 	struct nfs4_delegation *dp;
4463 	struct nfs4_openowner *oo = openowner(stp->st_stateowner);
4464 	struct nfs4_client *clp = stp->st_stid.sc_client;
4465 	int cb_up;
4466 	int status = 0;
4467 
4468 	cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
4469 	open->op_recall = 0;
4470 	switch (open->op_claim_type) {
4471 		case NFS4_OPEN_CLAIM_PREVIOUS:
4472 			if (!cb_up)
4473 				open->op_recall = 1;
4474 			if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
4475 				goto out_no_deleg;
4476 			break;
4477 		case NFS4_OPEN_CLAIM_NULL:
4478 		case NFS4_OPEN_CLAIM_FH:
4479 			/*
4480 			 * Let's not give out any delegations till everyone's
4481 			 * had the chance to reclaim theirs, *and* until
4482 			 * NLM locks have all been reclaimed:
4483 			 */
4484 			if (locks_in_grace(clp->net))
4485 				goto out_no_deleg;
4486 			if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
4487 				goto out_no_deleg;
4488 			/*
4489 			 * Also, if the file was opened for write or
4490 			 * create, there's a good chance the client's
4491 			 * about to write to it, resulting in an
4492 			 * immediate recall (since we don't support
4493 			 * write delegations):
4494 			 */
4495 			if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
4496 				goto out_no_deleg;
4497 			if (open->op_create == NFS4_OPEN_CREATE)
4498 				goto out_no_deleg;
4499 			break;
4500 		default:
4501 			goto out_no_deleg;
4502 	}
4503 	dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
4504 	if (IS_ERR(dp))
4505 		goto out_no_deleg;
4506 
4507 	memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
4508 
4509 	dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
4510 		STATEID_VAL(&dp->dl_stid.sc_stateid));
4511 	open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
4512 	nfs4_put_stid(&dp->dl_stid);
4513 	return;
4514 out_no_deleg:
4515 	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
4516 	if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
4517 	    open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
4518 		dprintk("NFSD: WARNING: refusing delegation reclaim\n");
4519 		open->op_recall = 1;
4520 	}
4521 
4522 	/* 4.1 client asking for a delegation? */
4523 	if (open->op_deleg_want)
4524 		nfsd4_open_deleg_none_ext(open, status);
4525 	return;
4526 }
4527 
4528 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
4529 					struct nfs4_delegation *dp)
4530 {
4531 	if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
4532 	    dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4533 		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4534 		open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
4535 	} else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
4536 		   dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4537 		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4538 		open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
4539 	}
4540 	/* Otherwise the client must be confused wanting a delegation
4541 	 * it already has, therefore we don't return
4542 	 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
4543 	 */
4544 }
4545 
4546 __be32
4547 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
4548 {
4549 	struct nfsd4_compoundres *resp = rqstp->rq_resp;
4550 	struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
4551 	struct nfs4_file *fp = NULL;
4552 	struct nfs4_ol_stateid *stp = NULL;
4553 	struct nfs4_delegation *dp = NULL;
4554 	__be32 status;
4555 	bool new_stp = false;
4556 
4557 	/*
4558 	 * Lookup file; if found, lookup stateid and check open request,
4559 	 * and check for delegations in the process of being recalled.
4560 	 * If not found, create the nfs4_file struct
4561 	 */
4562 	fp = find_or_add_file(open->op_file, &current_fh->fh_handle);
4563 	if (fp != open->op_file) {
4564 		status = nfs4_check_deleg(cl, open, &dp);
4565 		if (status)
4566 			goto out;
4567 		stp = nfsd4_find_and_lock_existing_open(fp, open);
4568 	} else {
4569 		open->op_file = NULL;
4570 		status = nfserr_bad_stateid;
4571 		if (nfsd4_is_deleg_cur(open))
4572 			goto out;
4573 	}
4574 
4575 	if (!stp) {
4576 		stp = init_open_stateid(fp, open);
4577 		if (!open->op_stp)
4578 			new_stp = true;
4579 	}
4580 
4581 	/*
4582 	 * OPEN the file, or upgrade an existing OPEN.
4583 	 * If truncate fails, the OPEN fails.
4584 	 *
4585 	 * stp is already locked.
4586 	 */
4587 	if (!new_stp) {
4588 		/* Stateid was found, this is an OPEN upgrade */
4589 		status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
4590 		if (status) {
4591 			mutex_unlock(&stp->st_mutex);
4592 			goto out;
4593 		}
4594 	} else {
4595 		status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
4596 		if (status) {
4597 			stp->st_stid.sc_type = NFS4_CLOSED_STID;
4598 			release_open_stateid(stp);
4599 			mutex_unlock(&stp->st_mutex);
4600 			goto out;
4601 		}
4602 
4603 		stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
4604 							open->op_odstate);
4605 		if (stp->st_clnt_odstate == open->op_odstate)
4606 			open->op_odstate = NULL;
4607 	}
4608 
4609 	nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
4610 	mutex_unlock(&stp->st_mutex);
4611 
4612 	if (nfsd4_has_session(&resp->cstate)) {
4613 		if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
4614 			open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4615 			open->op_why_no_deleg = WND4_NOT_WANTED;
4616 			goto nodeleg;
4617 		}
4618 	}
4619 
4620 	/*
4621 	* Attempt to hand out a delegation. No error return, because the
4622 	* OPEN succeeds even if we fail.
4623 	*/
4624 	nfs4_open_delegation(current_fh, open, stp);
4625 nodeleg:
4626 	status = nfs_ok;
4627 
4628 	dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
4629 		STATEID_VAL(&stp->st_stid.sc_stateid));
4630 out:
4631 	/* 4.1 client trying to upgrade/downgrade delegation? */
4632 	if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
4633 	    open->op_deleg_want)
4634 		nfsd4_deleg_xgrade_none_ext(open, dp);
4635 
4636 	if (fp)
4637 		put_nfs4_file(fp);
4638 	if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
4639 		open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4640 	/*
4641 	* To finish the open response, we just need to set the rflags.
4642 	*/
4643 	open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
4644 	if (nfsd4_has_session(&resp->cstate))
4645 		open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
4646 	else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
4647 		open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
4648 
4649 	if (dp)
4650 		nfs4_put_stid(&dp->dl_stid);
4651 	if (stp)
4652 		nfs4_put_stid(&stp->st_stid);
4653 
4654 	return status;
4655 }
4656 
4657 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
4658 			      struct nfsd4_open *open)
4659 {
4660 	if (open->op_openowner) {
4661 		struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
4662 
4663 		nfsd4_cstate_assign_replay(cstate, so);
4664 		nfs4_put_stateowner(so);
4665 	}
4666 	if (open->op_file)
4667 		kmem_cache_free(file_slab, open->op_file);
4668 	if (open->op_stp)
4669 		nfs4_put_stid(&open->op_stp->st_stid);
4670 	if (open->op_odstate)
4671 		kmem_cache_free(odstate_slab, open->op_odstate);
4672 }
4673 
4674 __be32
4675 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4676 	    union nfsd4_op_u *u)
4677 {
4678 	clientid_t *clid = &u->renew;
4679 	struct nfs4_client *clp;
4680 	__be32 status;
4681 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4682 
4683 	dprintk("process_renew(%08x/%08x): starting\n",
4684 			clid->cl_boot, clid->cl_id);
4685 	status = lookup_clientid(clid, cstate, nn);
4686 	if (status)
4687 		goto out;
4688 	clp = cstate->clp;
4689 	status = nfserr_cb_path_down;
4690 	if (!list_empty(&clp->cl_delegations)
4691 			&& clp->cl_cb_state != NFSD4_CB_UP)
4692 		goto out;
4693 	status = nfs_ok;
4694 out:
4695 	return status;
4696 }
4697 
4698 void
4699 nfsd4_end_grace(struct nfsd_net *nn)
4700 {
4701 	/* do nothing if grace period already ended */
4702 	if (nn->grace_ended)
4703 		return;
4704 
4705 	dprintk("NFSD: end of grace period\n");
4706 	nn->grace_ended = true;
4707 	/*
4708 	 * If the server goes down again right now, an NFSv4
4709 	 * client will still be allowed to reclaim after it comes back up,
4710 	 * even if it hasn't yet had a chance to reclaim state this time.
4711 	 *
4712 	 */
4713 	nfsd4_record_grace_done(nn);
4714 	/*
4715 	 * At this point, NFSv4 clients can still reclaim.  But if the
4716 	 * server crashes, any that have not yet reclaimed will be out
4717 	 * of luck on the next boot.
4718 	 *
4719 	 * (NFSv4.1+ clients are considered to have reclaimed once they
4720 	 * call RECLAIM_COMPLETE.  NFSv4.0 clients are considered to
4721 	 * have reclaimed after their first OPEN.)
4722 	 */
4723 	locks_end_grace(&nn->nfsd4_manager);
4724 	/*
4725 	 * At this point, and once lockd and/or any other containers
4726 	 * exit their grace period, further reclaims will fail and
4727 	 * regular locking can resume.
4728 	 */
4729 }
4730 
4731 static time_t
4732 nfs4_laundromat(struct nfsd_net *nn)
4733 {
4734 	struct nfs4_client *clp;
4735 	struct nfs4_openowner *oo;
4736 	struct nfs4_delegation *dp;
4737 	struct nfs4_ol_stateid *stp;
4738 	struct nfsd4_blocked_lock *nbl;
4739 	struct list_head *pos, *next, reaplist;
4740 	time_t cutoff = get_seconds() - nn->nfsd4_lease;
4741 	time_t t, new_timeo = nn->nfsd4_lease;
4742 
4743 	dprintk("NFSD: laundromat service - starting\n");
4744 	nfsd4_end_grace(nn);
4745 	INIT_LIST_HEAD(&reaplist);
4746 	spin_lock(&nn->client_lock);
4747 	list_for_each_safe(pos, next, &nn->client_lru) {
4748 		clp = list_entry(pos, struct nfs4_client, cl_lru);
4749 		if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
4750 			t = clp->cl_time - cutoff;
4751 			new_timeo = min(new_timeo, t);
4752 			break;
4753 		}
4754 		if (mark_client_expired_locked(clp)) {
4755 			dprintk("NFSD: client in use (clientid %08x)\n",
4756 				clp->cl_clientid.cl_id);
4757 			continue;
4758 		}
4759 		list_add(&clp->cl_lru, &reaplist);
4760 	}
4761 	spin_unlock(&nn->client_lock);
4762 	list_for_each_safe(pos, next, &reaplist) {
4763 		clp = list_entry(pos, struct nfs4_client, cl_lru);
4764 		dprintk("NFSD: purging unused client (clientid %08x)\n",
4765 			clp->cl_clientid.cl_id);
4766 		list_del_init(&clp->cl_lru);
4767 		expire_client(clp);
4768 	}
4769 	spin_lock(&state_lock);
4770 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
4771 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4772 		if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
4773 			t = dp->dl_time - cutoff;
4774 			new_timeo = min(new_timeo, t);
4775 			break;
4776 		}
4777 		WARN_ON(!unhash_delegation_locked(dp));
4778 		list_add(&dp->dl_recall_lru, &reaplist);
4779 	}
4780 	spin_unlock(&state_lock);
4781 	while (!list_empty(&reaplist)) {
4782 		dp = list_first_entry(&reaplist, struct nfs4_delegation,
4783 					dl_recall_lru);
4784 		list_del_init(&dp->dl_recall_lru);
4785 		revoke_delegation(dp);
4786 	}
4787 
4788 	spin_lock(&nn->client_lock);
4789 	while (!list_empty(&nn->close_lru)) {
4790 		oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
4791 					oo_close_lru);
4792 		if (time_after((unsigned long)oo->oo_time,
4793 			       (unsigned long)cutoff)) {
4794 			t = oo->oo_time - cutoff;
4795 			new_timeo = min(new_timeo, t);
4796 			break;
4797 		}
4798 		list_del_init(&oo->oo_close_lru);
4799 		stp = oo->oo_last_closed_stid;
4800 		oo->oo_last_closed_stid = NULL;
4801 		spin_unlock(&nn->client_lock);
4802 		nfs4_put_stid(&stp->st_stid);
4803 		spin_lock(&nn->client_lock);
4804 	}
4805 	spin_unlock(&nn->client_lock);
4806 
4807 	/*
4808 	 * It's possible for a client to try and acquire an already held lock
4809 	 * that is being held for a long time, and then lose interest in it.
4810 	 * So, we clean out any un-revisited request after a lease period
4811 	 * under the assumption that the client is no longer interested.
4812 	 *
4813 	 * RFC5661, sec. 9.6 states that the client must not rely on getting
4814 	 * notifications and must continue to poll for locks, even when the
4815 	 * server supports them. Thus this shouldn't lead to clients blocking
4816 	 * indefinitely once the lock does become free.
4817 	 */
4818 	BUG_ON(!list_empty(&reaplist));
4819 	spin_lock(&nn->blocked_locks_lock);
4820 	while (!list_empty(&nn->blocked_locks_lru)) {
4821 		nbl = list_first_entry(&nn->blocked_locks_lru,
4822 					struct nfsd4_blocked_lock, nbl_lru);
4823 		if (time_after((unsigned long)nbl->nbl_time,
4824 			       (unsigned long)cutoff)) {
4825 			t = nbl->nbl_time - cutoff;
4826 			new_timeo = min(new_timeo, t);
4827 			break;
4828 		}
4829 		list_move(&nbl->nbl_lru, &reaplist);
4830 		list_del_init(&nbl->nbl_list);
4831 	}
4832 	spin_unlock(&nn->blocked_locks_lock);
4833 
4834 	while (!list_empty(&reaplist)) {
4835 		nbl = list_first_entry(&reaplist,
4836 					struct nfsd4_blocked_lock, nbl_lru);
4837 		list_del_init(&nbl->nbl_lru);
4838 		posix_unblock_lock(&nbl->nbl_lock);
4839 		free_blocked_lock(nbl);
4840 	}
4841 
4842 	new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
4843 	return new_timeo;
4844 }
4845 
4846 static struct workqueue_struct *laundry_wq;
4847 static void laundromat_main(struct work_struct *);
4848 
4849 static void
4850 laundromat_main(struct work_struct *laundry)
4851 {
4852 	time_t t;
4853 	struct delayed_work *dwork = to_delayed_work(laundry);
4854 	struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
4855 					   laundromat_work);
4856 
4857 	t = nfs4_laundromat(nn);
4858 	dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
4859 	queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
4860 }
4861 
4862 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
4863 {
4864 	if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
4865 		return nfserr_bad_stateid;
4866 	return nfs_ok;
4867 }
4868 
4869 static inline int
4870 access_permit_read(struct nfs4_ol_stateid *stp)
4871 {
4872 	return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
4873 		test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
4874 		test_access(NFS4_SHARE_ACCESS_WRITE, stp);
4875 }
4876 
4877 static inline int
4878 access_permit_write(struct nfs4_ol_stateid *stp)
4879 {
4880 	return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
4881 		test_access(NFS4_SHARE_ACCESS_BOTH, stp);
4882 }
4883 
4884 static
4885 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
4886 {
4887         __be32 status = nfserr_openmode;
4888 
4889 	/* For lock stateid's, we test the parent open, not the lock: */
4890 	if (stp->st_openstp)
4891 		stp = stp->st_openstp;
4892 	if ((flags & WR_STATE) && !access_permit_write(stp))
4893                 goto out;
4894 	if ((flags & RD_STATE) && !access_permit_read(stp))
4895                 goto out;
4896 	status = nfs_ok;
4897 out:
4898 	return status;
4899 }
4900 
4901 static inline __be32
4902 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
4903 {
4904 	if (ONE_STATEID(stateid) && (flags & RD_STATE))
4905 		return nfs_ok;
4906 	else if (opens_in_grace(net)) {
4907 		/* Answer in remaining cases depends on existence of
4908 		 * conflicting state; so we must wait out the grace period. */
4909 		return nfserr_grace;
4910 	} else if (flags & WR_STATE)
4911 		return nfs4_share_conflict(current_fh,
4912 				NFS4_SHARE_DENY_WRITE);
4913 	else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
4914 		return nfs4_share_conflict(current_fh,
4915 				NFS4_SHARE_DENY_READ);
4916 }
4917 
4918 /*
4919  * Allow READ/WRITE during grace period on recovered state only for files
4920  * that are not able to provide mandatory locking.
4921  */
4922 static inline int
4923 grace_disallows_io(struct net *net, struct inode *inode)
4924 {
4925 	return opens_in_grace(net) && mandatory_lock(inode);
4926 }
4927 
4928 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
4929 {
4930 	/*
4931 	 * When sessions are used the stateid generation number is ignored
4932 	 * when it is zero.
4933 	 */
4934 	if (has_session && in->si_generation == 0)
4935 		return nfs_ok;
4936 
4937 	if (in->si_generation == ref->si_generation)
4938 		return nfs_ok;
4939 
4940 	/* If the client sends us a stateid from the future, it's buggy: */
4941 	if (nfsd4_stateid_generation_after(in, ref))
4942 		return nfserr_bad_stateid;
4943 	/*
4944 	 * However, we could see a stateid from the past, even from a
4945 	 * non-buggy client.  For example, if the client sends a lock
4946 	 * while some IO is outstanding, the lock may bump si_generation
4947 	 * while the IO is still in flight.  The client could avoid that
4948 	 * situation by waiting for responses on all the IO requests,
4949 	 * but better performance may result in retrying IO that
4950 	 * receives an old_stateid error if requests are rarely
4951 	 * reordered in flight:
4952 	 */
4953 	return nfserr_old_stateid;
4954 }
4955 
4956 static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
4957 {
4958 	__be32 ret;
4959 
4960 	spin_lock(&s->sc_lock);
4961 	ret = nfsd4_verify_open_stid(s);
4962 	if (ret == nfs_ok)
4963 		ret = check_stateid_generation(in, &s->sc_stateid, has_session);
4964 	spin_unlock(&s->sc_lock);
4965 	return ret;
4966 }
4967 
4968 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
4969 {
4970 	if (ols->st_stateowner->so_is_open_owner &&
4971 	    !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
4972 		return nfserr_bad_stateid;
4973 	return nfs_ok;
4974 }
4975 
4976 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
4977 {
4978 	struct nfs4_stid *s;
4979 	__be32 status = nfserr_bad_stateid;
4980 
4981 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
4982 		CLOSE_STATEID(stateid))
4983 		return status;
4984 	/* Client debugging aid. */
4985 	if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
4986 		char addr_str[INET6_ADDRSTRLEN];
4987 		rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
4988 				 sizeof(addr_str));
4989 		pr_warn_ratelimited("NFSD: client %s testing state ID "
4990 					"with incorrect client ID\n", addr_str);
4991 		return status;
4992 	}
4993 	spin_lock(&cl->cl_lock);
4994 	s = find_stateid_locked(cl, stateid);
4995 	if (!s)
4996 		goto out_unlock;
4997 	status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
4998 	if (status)
4999 		goto out_unlock;
5000 	switch (s->sc_type) {
5001 	case NFS4_DELEG_STID:
5002 		status = nfs_ok;
5003 		break;
5004 	case NFS4_REVOKED_DELEG_STID:
5005 		status = nfserr_deleg_revoked;
5006 		break;
5007 	case NFS4_OPEN_STID:
5008 	case NFS4_LOCK_STID:
5009 		status = nfsd4_check_openowner_confirmed(openlockstateid(s));
5010 		break;
5011 	default:
5012 		printk("unknown stateid type %x\n", s->sc_type);
5013 		/* Fallthrough */
5014 	case NFS4_CLOSED_STID:
5015 	case NFS4_CLOSED_DELEG_STID:
5016 		status = nfserr_bad_stateid;
5017 	}
5018 out_unlock:
5019 	spin_unlock(&cl->cl_lock);
5020 	return status;
5021 }
5022 
5023 __be32
5024 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
5025 		     stateid_t *stateid, unsigned char typemask,
5026 		     struct nfs4_stid **s, struct nfsd_net *nn)
5027 {
5028 	__be32 status;
5029 	bool return_revoked = false;
5030 
5031 	/*
5032 	 *  only return revoked delegations if explicitly asked.
5033 	 *  otherwise we report revoked or bad_stateid status.
5034 	 */
5035 	if (typemask & NFS4_REVOKED_DELEG_STID)
5036 		return_revoked = true;
5037 	else if (typemask & NFS4_DELEG_STID)
5038 		typemask |= NFS4_REVOKED_DELEG_STID;
5039 
5040 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5041 		CLOSE_STATEID(stateid))
5042 		return nfserr_bad_stateid;
5043 	status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
5044 	if (status == nfserr_stale_clientid) {
5045 		if (cstate->session)
5046 			return nfserr_bad_stateid;
5047 		return nfserr_stale_stateid;
5048 	}
5049 	if (status)
5050 		return status;
5051 	*s = find_stateid_by_type(cstate->clp, stateid, typemask);
5052 	if (!*s)
5053 		return nfserr_bad_stateid;
5054 	if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
5055 		nfs4_put_stid(*s);
5056 		if (cstate->minorversion)
5057 			return nfserr_deleg_revoked;
5058 		return nfserr_bad_stateid;
5059 	}
5060 	return nfs_ok;
5061 }
5062 
5063 static struct file *
5064 nfs4_find_file(struct nfs4_stid *s, int flags)
5065 {
5066 	if (!s)
5067 		return NULL;
5068 
5069 	switch (s->sc_type) {
5070 	case NFS4_DELEG_STID:
5071 		if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
5072 			return NULL;
5073 		return get_file(s->sc_file->fi_deleg_file);
5074 	case NFS4_OPEN_STID:
5075 	case NFS4_LOCK_STID:
5076 		if (flags & RD_STATE)
5077 			return find_readable_file(s->sc_file);
5078 		else
5079 			return find_writeable_file(s->sc_file);
5080 		break;
5081 	}
5082 
5083 	return NULL;
5084 }
5085 
5086 static __be32
5087 nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
5088 {
5089 	__be32 status;
5090 
5091 	status = nfsd4_check_openowner_confirmed(ols);
5092 	if (status)
5093 		return status;
5094 	return nfs4_check_openmode(ols, flags);
5095 }
5096 
5097 static __be32
5098 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
5099 		struct file **filpp, bool *tmp_file, int flags)
5100 {
5101 	int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
5102 	struct file *file;
5103 	__be32 status;
5104 
5105 	file = nfs4_find_file(s, flags);
5106 	if (file) {
5107 		status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
5108 				acc | NFSD_MAY_OWNER_OVERRIDE);
5109 		if (status) {
5110 			fput(file);
5111 			return status;
5112 		}
5113 
5114 		*filpp = file;
5115 	} else {
5116 		status = nfsd_open(rqstp, fhp, S_IFREG, acc, filpp);
5117 		if (status)
5118 			return status;
5119 
5120 		if (tmp_file)
5121 			*tmp_file = true;
5122 	}
5123 
5124 	return 0;
5125 }
5126 
5127 /*
5128  * Checks for stateid operations
5129  */
5130 __be32
5131 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
5132 		struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
5133 		stateid_t *stateid, int flags, struct file **filpp, bool *tmp_file)
5134 {
5135 	struct inode *ino = d_inode(fhp->fh_dentry);
5136 	struct net *net = SVC_NET(rqstp);
5137 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5138 	struct nfs4_stid *s = NULL;
5139 	__be32 status;
5140 
5141 	if (filpp)
5142 		*filpp = NULL;
5143 	if (tmp_file)
5144 		*tmp_file = false;
5145 
5146 	if (grace_disallows_io(net, ino))
5147 		return nfserr_grace;
5148 
5149 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
5150 		status = check_special_stateids(net, fhp, stateid, flags);
5151 		goto done;
5152 	}
5153 
5154 	status = nfsd4_lookup_stateid(cstate, stateid,
5155 				NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
5156 				&s, nn);
5157 	if (status)
5158 		return status;
5159 	status = nfsd4_stid_check_stateid_generation(stateid, s,
5160 			nfsd4_has_session(cstate));
5161 	if (status)
5162 		goto out;
5163 
5164 	switch (s->sc_type) {
5165 	case NFS4_DELEG_STID:
5166 		status = nfs4_check_delegmode(delegstateid(s), flags);
5167 		break;
5168 	case NFS4_OPEN_STID:
5169 	case NFS4_LOCK_STID:
5170 		status = nfs4_check_olstateid(fhp, openlockstateid(s), flags);
5171 		break;
5172 	default:
5173 		status = nfserr_bad_stateid;
5174 		break;
5175 	}
5176 	if (status)
5177 		goto out;
5178 	status = nfs4_check_fh(fhp, s);
5179 
5180 done:
5181 	if (!status && filpp)
5182 		status = nfs4_check_file(rqstp, fhp, s, filpp, tmp_file, flags);
5183 out:
5184 	if (s)
5185 		nfs4_put_stid(s);
5186 	return status;
5187 }
5188 
5189 /*
5190  * Test if the stateid is valid
5191  */
5192 __be32
5193 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5194 		   union nfsd4_op_u *u)
5195 {
5196 	struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
5197 	struct nfsd4_test_stateid_id *stateid;
5198 	struct nfs4_client *cl = cstate->session->se_client;
5199 
5200 	list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
5201 		stateid->ts_id_status =
5202 			nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
5203 
5204 	return nfs_ok;
5205 }
5206 
5207 static __be32
5208 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
5209 {
5210 	struct nfs4_ol_stateid *stp = openlockstateid(s);
5211 	__be32 ret;
5212 
5213 	ret = nfsd4_lock_ol_stateid(stp);
5214 	if (ret)
5215 		goto out_put_stid;
5216 
5217 	ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5218 	if (ret)
5219 		goto out;
5220 
5221 	ret = nfserr_locks_held;
5222 	if (check_for_locks(stp->st_stid.sc_file,
5223 			    lockowner(stp->st_stateowner)))
5224 		goto out;
5225 
5226 	release_lock_stateid(stp);
5227 	ret = nfs_ok;
5228 
5229 out:
5230 	mutex_unlock(&stp->st_mutex);
5231 out_put_stid:
5232 	nfs4_put_stid(s);
5233 	return ret;
5234 }
5235 
5236 __be32
5237 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5238 		   union nfsd4_op_u *u)
5239 {
5240 	struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
5241 	stateid_t *stateid = &free_stateid->fr_stateid;
5242 	struct nfs4_stid *s;
5243 	struct nfs4_delegation *dp;
5244 	struct nfs4_client *cl = cstate->session->se_client;
5245 	__be32 ret = nfserr_bad_stateid;
5246 
5247 	spin_lock(&cl->cl_lock);
5248 	s = find_stateid_locked(cl, stateid);
5249 	if (!s)
5250 		goto out_unlock;
5251 	spin_lock(&s->sc_lock);
5252 	switch (s->sc_type) {
5253 	case NFS4_DELEG_STID:
5254 		ret = nfserr_locks_held;
5255 		break;
5256 	case NFS4_OPEN_STID:
5257 		ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5258 		if (ret)
5259 			break;
5260 		ret = nfserr_locks_held;
5261 		break;
5262 	case NFS4_LOCK_STID:
5263 		spin_unlock(&s->sc_lock);
5264 		refcount_inc(&s->sc_count);
5265 		spin_unlock(&cl->cl_lock);
5266 		ret = nfsd4_free_lock_stateid(stateid, s);
5267 		goto out;
5268 	case NFS4_REVOKED_DELEG_STID:
5269 		spin_unlock(&s->sc_lock);
5270 		dp = delegstateid(s);
5271 		list_del_init(&dp->dl_recall_lru);
5272 		spin_unlock(&cl->cl_lock);
5273 		nfs4_put_stid(s);
5274 		ret = nfs_ok;
5275 		goto out;
5276 	/* Default falls through and returns nfserr_bad_stateid */
5277 	}
5278 	spin_unlock(&s->sc_lock);
5279 out_unlock:
5280 	spin_unlock(&cl->cl_lock);
5281 out:
5282 	return ret;
5283 }
5284 
5285 static inline int
5286 setlkflg (int type)
5287 {
5288 	return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
5289 		RD_STATE : WR_STATE;
5290 }
5291 
5292 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
5293 {
5294 	struct svc_fh *current_fh = &cstate->current_fh;
5295 	struct nfs4_stateowner *sop = stp->st_stateowner;
5296 	__be32 status;
5297 
5298 	status = nfsd4_check_seqid(cstate, sop, seqid);
5299 	if (status)
5300 		return status;
5301 	status = nfsd4_lock_ol_stateid(stp);
5302 	if (status != nfs_ok)
5303 		return status;
5304 	status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
5305 	if (status == nfs_ok)
5306 		status = nfs4_check_fh(current_fh, &stp->st_stid);
5307 	if (status != nfs_ok)
5308 		mutex_unlock(&stp->st_mutex);
5309 	return status;
5310 }
5311 
5312 /*
5313  * Checks for sequence id mutating operations.
5314  */
5315 static __be32
5316 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
5317 			 stateid_t *stateid, char typemask,
5318 			 struct nfs4_ol_stateid **stpp,
5319 			 struct nfsd_net *nn)
5320 {
5321 	__be32 status;
5322 	struct nfs4_stid *s;
5323 	struct nfs4_ol_stateid *stp = NULL;
5324 
5325 	dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
5326 		seqid, STATEID_VAL(stateid));
5327 
5328 	*stpp = NULL;
5329 	status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
5330 	if (status)
5331 		return status;
5332 	stp = openlockstateid(s);
5333 	nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
5334 
5335 	status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
5336 	if (!status)
5337 		*stpp = stp;
5338 	else
5339 		nfs4_put_stid(&stp->st_stid);
5340 	return status;
5341 }
5342 
5343 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
5344 						 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
5345 {
5346 	__be32 status;
5347 	struct nfs4_openowner *oo;
5348 	struct nfs4_ol_stateid *stp;
5349 
5350 	status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
5351 						NFS4_OPEN_STID, &stp, nn);
5352 	if (status)
5353 		return status;
5354 	oo = openowner(stp->st_stateowner);
5355 	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
5356 		mutex_unlock(&stp->st_mutex);
5357 		nfs4_put_stid(&stp->st_stid);
5358 		return nfserr_bad_stateid;
5359 	}
5360 	*stpp = stp;
5361 	return nfs_ok;
5362 }
5363 
5364 __be32
5365 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5366 		   union nfsd4_op_u *u)
5367 {
5368 	struct nfsd4_open_confirm *oc = &u->open_confirm;
5369 	__be32 status;
5370 	struct nfs4_openowner *oo;
5371 	struct nfs4_ol_stateid *stp;
5372 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5373 
5374 	dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
5375 			cstate->current_fh.fh_dentry);
5376 
5377 	status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
5378 	if (status)
5379 		return status;
5380 
5381 	status = nfs4_preprocess_seqid_op(cstate,
5382 					oc->oc_seqid, &oc->oc_req_stateid,
5383 					NFS4_OPEN_STID, &stp, nn);
5384 	if (status)
5385 		goto out;
5386 	oo = openowner(stp->st_stateowner);
5387 	status = nfserr_bad_stateid;
5388 	if (oo->oo_flags & NFS4_OO_CONFIRMED) {
5389 		mutex_unlock(&stp->st_mutex);
5390 		goto put_stateid;
5391 	}
5392 	oo->oo_flags |= NFS4_OO_CONFIRMED;
5393 	nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
5394 	mutex_unlock(&stp->st_mutex);
5395 	dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
5396 		__func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
5397 
5398 	nfsd4_client_record_create(oo->oo_owner.so_client);
5399 	status = nfs_ok;
5400 put_stateid:
5401 	nfs4_put_stid(&stp->st_stid);
5402 out:
5403 	nfsd4_bump_seqid(cstate, status);
5404 	return status;
5405 }
5406 
5407 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
5408 {
5409 	if (!test_access(access, stp))
5410 		return;
5411 	nfs4_file_put_access(stp->st_stid.sc_file, access);
5412 	clear_access(access, stp);
5413 }
5414 
5415 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
5416 {
5417 	switch (to_access) {
5418 	case NFS4_SHARE_ACCESS_READ:
5419 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
5420 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5421 		break;
5422 	case NFS4_SHARE_ACCESS_WRITE:
5423 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
5424 		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5425 		break;
5426 	case NFS4_SHARE_ACCESS_BOTH:
5427 		break;
5428 	default:
5429 		WARN_ON_ONCE(1);
5430 	}
5431 }
5432 
5433 __be32
5434 nfsd4_open_downgrade(struct svc_rqst *rqstp,
5435 		     struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
5436 {
5437 	struct nfsd4_open_downgrade *od = &u->open_downgrade;
5438 	__be32 status;
5439 	struct nfs4_ol_stateid *stp;
5440 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5441 
5442 	dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
5443 			cstate->current_fh.fh_dentry);
5444 
5445 	/* We don't yet support WANT bits: */
5446 	if (od->od_deleg_want)
5447 		dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
5448 			od->od_deleg_want);
5449 
5450 	status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
5451 					&od->od_stateid, &stp, nn);
5452 	if (status)
5453 		goto out;
5454 	status = nfserr_inval;
5455 	if (!test_access(od->od_share_access, stp)) {
5456 		dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
5457 			stp->st_access_bmap, od->od_share_access);
5458 		goto put_stateid;
5459 	}
5460 	if (!test_deny(od->od_share_deny, stp)) {
5461 		dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
5462 			stp->st_deny_bmap, od->od_share_deny);
5463 		goto put_stateid;
5464 	}
5465 	nfs4_stateid_downgrade(stp, od->od_share_access);
5466 	reset_union_bmap_deny(od->od_share_deny, stp);
5467 	nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
5468 	status = nfs_ok;
5469 put_stateid:
5470 	mutex_unlock(&stp->st_mutex);
5471 	nfs4_put_stid(&stp->st_stid);
5472 out:
5473 	nfsd4_bump_seqid(cstate, status);
5474 	return status;
5475 }
5476 
5477 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
5478 {
5479 	struct nfs4_client *clp = s->st_stid.sc_client;
5480 	bool unhashed;
5481 	LIST_HEAD(reaplist);
5482 
5483 	spin_lock(&clp->cl_lock);
5484 	unhashed = unhash_open_stateid(s, &reaplist);
5485 
5486 	if (clp->cl_minorversion) {
5487 		if (unhashed)
5488 			put_ol_stateid_locked(s, &reaplist);
5489 		spin_unlock(&clp->cl_lock);
5490 		free_ol_stateid_reaplist(&reaplist);
5491 	} else {
5492 		spin_unlock(&clp->cl_lock);
5493 		free_ol_stateid_reaplist(&reaplist);
5494 		if (unhashed)
5495 			move_to_close_lru(s, clp->net);
5496 	}
5497 }
5498 
5499 /*
5500  * nfs4_unlock_state() called after encode
5501  */
5502 __be32
5503 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5504 		union nfsd4_op_u *u)
5505 {
5506 	struct nfsd4_close *close = &u->close;
5507 	__be32 status;
5508 	struct nfs4_ol_stateid *stp;
5509 	struct net *net = SVC_NET(rqstp);
5510 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5511 
5512 	dprintk("NFSD: nfsd4_close on file %pd\n",
5513 			cstate->current_fh.fh_dentry);
5514 
5515 	status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
5516 					&close->cl_stateid,
5517 					NFS4_OPEN_STID|NFS4_CLOSED_STID,
5518 					&stp, nn);
5519 	nfsd4_bump_seqid(cstate, status);
5520 	if (status)
5521 		goto out;
5522 
5523 	stp->st_stid.sc_type = NFS4_CLOSED_STID;
5524 	nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
5525 
5526 	nfsd4_close_open_stateid(stp);
5527 	mutex_unlock(&stp->st_mutex);
5528 
5529 	/* See RFC5661 sectionm 18.2.4 */
5530 	if (stp->st_stid.sc_client->cl_minorversion)
5531 		memcpy(&close->cl_stateid, &close_stateid,
5532 				sizeof(close->cl_stateid));
5533 
5534 	/* put reference from nfs4_preprocess_seqid_op */
5535 	nfs4_put_stid(&stp->st_stid);
5536 out:
5537 	return status;
5538 }
5539 
5540 __be32
5541 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5542 		  union nfsd4_op_u *u)
5543 {
5544 	struct nfsd4_delegreturn *dr = &u->delegreturn;
5545 	struct nfs4_delegation *dp;
5546 	stateid_t *stateid = &dr->dr_stateid;
5547 	struct nfs4_stid *s;
5548 	__be32 status;
5549 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5550 
5551 	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
5552 		return status;
5553 
5554 	status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
5555 	if (status)
5556 		goto out;
5557 	dp = delegstateid(s);
5558 	status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
5559 	if (status)
5560 		goto put_stateid;
5561 
5562 	destroy_delegation(dp);
5563 put_stateid:
5564 	nfs4_put_stid(&dp->dl_stid);
5565 out:
5566 	return status;
5567 }
5568 
5569 static inline u64
5570 end_offset(u64 start, u64 len)
5571 {
5572 	u64 end;
5573 
5574 	end = start + len;
5575 	return end >= start ? end: NFS4_MAX_UINT64;
5576 }
5577 
5578 /* last octet in a range */
5579 static inline u64
5580 last_byte_offset(u64 start, u64 len)
5581 {
5582 	u64 end;
5583 
5584 	WARN_ON_ONCE(!len);
5585 	end = start + len;
5586 	return end > start ? end - 1: NFS4_MAX_UINT64;
5587 }
5588 
5589 /*
5590  * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
5591  * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
5592  * byte, because of sign extension problems.  Since NFSv4 calls for 64-bit
5593  * locking, this prevents us from being completely protocol-compliant.  The
5594  * real solution to this problem is to start using unsigned file offsets in
5595  * the VFS, but this is a very deep change!
5596  */
5597 static inline void
5598 nfs4_transform_lock_offset(struct file_lock *lock)
5599 {
5600 	if (lock->fl_start < 0)
5601 		lock->fl_start = OFFSET_MAX;
5602 	if (lock->fl_end < 0)
5603 		lock->fl_end = OFFSET_MAX;
5604 }
5605 
5606 static fl_owner_t
5607 nfsd4_fl_get_owner(fl_owner_t owner)
5608 {
5609 	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
5610 
5611 	nfs4_get_stateowner(&lo->lo_owner);
5612 	return owner;
5613 }
5614 
5615 static void
5616 nfsd4_fl_put_owner(fl_owner_t owner)
5617 {
5618 	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
5619 
5620 	if (lo)
5621 		nfs4_put_stateowner(&lo->lo_owner);
5622 }
5623 
5624 static void
5625 nfsd4_lm_notify(struct file_lock *fl)
5626 {
5627 	struct nfs4_lockowner		*lo = (struct nfs4_lockowner *)fl->fl_owner;
5628 	struct net			*net = lo->lo_owner.so_client->net;
5629 	struct nfsd_net			*nn = net_generic(net, nfsd_net_id);
5630 	struct nfsd4_blocked_lock	*nbl = container_of(fl,
5631 						struct nfsd4_blocked_lock, nbl_lock);
5632 	bool queue = false;
5633 
5634 	/* An empty list means that something else is going to be using it */
5635 	spin_lock(&nn->blocked_locks_lock);
5636 	if (!list_empty(&nbl->nbl_list)) {
5637 		list_del_init(&nbl->nbl_list);
5638 		list_del_init(&nbl->nbl_lru);
5639 		queue = true;
5640 	}
5641 	spin_unlock(&nn->blocked_locks_lock);
5642 
5643 	if (queue)
5644 		nfsd4_run_cb(&nbl->nbl_cb);
5645 }
5646 
5647 static const struct lock_manager_operations nfsd_posix_mng_ops  = {
5648 	.lm_notify = nfsd4_lm_notify,
5649 	.lm_get_owner = nfsd4_fl_get_owner,
5650 	.lm_put_owner = nfsd4_fl_put_owner,
5651 };
5652 
5653 static inline void
5654 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
5655 {
5656 	struct nfs4_lockowner *lo;
5657 
5658 	if (fl->fl_lmops == &nfsd_posix_mng_ops) {
5659 		lo = (struct nfs4_lockowner *) fl->fl_owner;
5660 		deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
5661 					lo->lo_owner.so_owner.len, GFP_KERNEL);
5662 		if (!deny->ld_owner.data)
5663 			/* We just don't care that much */
5664 			goto nevermind;
5665 		deny->ld_owner.len = lo->lo_owner.so_owner.len;
5666 		deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
5667 	} else {
5668 nevermind:
5669 		deny->ld_owner.len = 0;
5670 		deny->ld_owner.data = NULL;
5671 		deny->ld_clientid.cl_boot = 0;
5672 		deny->ld_clientid.cl_id = 0;
5673 	}
5674 	deny->ld_start = fl->fl_start;
5675 	deny->ld_length = NFS4_MAX_UINT64;
5676 	if (fl->fl_end != NFS4_MAX_UINT64)
5677 		deny->ld_length = fl->fl_end - fl->fl_start + 1;
5678 	deny->ld_type = NFS4_READ_LT;
5679 	if (fl->fl_type != F_RDLCK)
5680 		deny->ld_type = NFS4_WRITE_LT;
5681 }
5682 
5683 static struct nfs4_lockowner *
5684 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
5685 {
5686 	unsigned int strhashval = ownerstr_hashval(owner);
5687 	struct nfs4_stateowner *so;
5688 
5689 	lockdep_assert_held(&clp->cl_lock);
5690 
5691 	list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
5692 			    so_strhash) {
5693 		if (so->so_is_open_owner)
5694 			continue;
5695 		if (same_owner_str(so, owner))
5696 			return lockowner(nfs4_get_stateowner(so));
5697 	}
5698 	return NULL;
5699 }
5700 
5701 static struct nfs4_lockowner *
5702 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
5703 {
5704 	struct nfs4_lockowner *lo;
5705 
5706 	spin_lock(&clp->cl_lock);
5707 	lo = find_lockowner_str_locked(clp, owner);
5708 	spin_unlock(&clp->cl_lock);
5709 	return lo;
5710 }
5711 
5712 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
5713 {
5714 	unhash_lockowner_locked(lockowner(sop));
5715 }
5716 
5717 static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
5718 {
5719 	struct nfs4_lockowner *lo = lockowner(sop);
5720 
5721 	kmem_cache_free(lockowner_slab, lo);
5722 }
5723 
5724 static const struct nfs4_stateowner_operations lockowner_ops = {
5725 	.so_unhash =	nfs4_unhash_lockowner,
5726 	.so_free =	nfs4_free_lockowner,
5727 };
5728 
5729 /*
5730  * Alloc a lock owner structure.
5731  * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
5732  * occurred.
5733  *
5734  * strhashval = ownerstr_hashval
5735  */
5736 static struct nfs4_lockowner *
5737 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
5738 			   struct nfs4_ol_stateid *open_stp,
5739 			   struct nfsd4_lock *lock)
5740 {
5741 	struct nfs4_lockowner *lo, *ret;
5742 
5743 	lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
5744 	if (!lo)
5745 		return NULL;
5746 	INIT_LIST_HEAD(&lo->lo_blocked);
5747 	INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
5748 	lo->lo_owner.so_is_open_owner = 0;
5749 	lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
5750 	lo->lo_owner.so_ops = &lockowner_ops;
5751 	spin_lock(&clp->cl_lock);
5752 	ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
5753 	if (ret == NULL) {
5754 		list_add(&lo->lo_owner.so_strhash,
5755 			 &clp->cl_ownerstr_hashtbl[strhashval]);
5756 		ret = lo;
5757 	} else
5758 		nfs4_free_stateowner(&lo->lo_owner);
5759 
5760 	spin_unlock(&clp->cl_lock);
5761 	return ret;
5762 }
5763 
5764 static struct nfs4_ol_stateid *
5765 find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
5766 {
5767 	struct nfs4_ol_stateid *lst;
5768 	struct nfs4_client *clp = lo->lo_owner.so_client;
5769 
5770 	lockdep_assert_held(&clp->cl_lock);
5771 
5772 	list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
5773 		if (lst->st_stid.sc_type != NFS4_LOCK_STID)
5774 			continue;
5775 		if (lst->st_stid.sc_file == fp) {
5776 			refcount_inc(&lst->st_stid.sc_count);
5777 			return lst;
5778 		}
5779 	}
5780 	return NULL;
5781 }
5782 
5783 static struct nfs4_ol_stateid *
5784 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
5785 		  struct nfs4_file *fp, struct inode *inode,
5786 		  struct nfs4_ol_stateid *open_stp)
5787 {
5788 	struct nfs4_client *clp = lo->lo_owner.so_client;
5789 	struct nfs4_ol_stateid *retstp;
5790 
5791 	mutex_init(&stp->st_mutex);
5792 	mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
5793 retry:
5794 	spin_lock(&clp->cl_lock);
5795 	spin_lock(&fp->fi_lock);
5796 	retstp = find_lock_stateid(lo, fp);
5797 	if (retstp)
5798 		goto out_unlock;
5799 
5800 	refcount_inc(&stp->st_stid.sc_count);
5801 	stp->st_stid.sc_type = NFS4_LOCK_STID;
5802 	stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
5803 	get_nfs4_file(fp);
5804 	stp->st_stid.sc_file = fp;
5805 	stp->st_access_bmap = 0;
5806 	stp->st_deny_bmap = open_stp->st_deny_bmap;
5807 	stp->st_openstp = open_stp;
5808 	list_add(&stp->st_locks, &open_stp->st_locks);
5809 	list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
5810 	list_add(&stp->st_perfile, &fp->fi_stateids);
5811 out_unlock:
5812 	spin_unlock(&fp->fi_lock);
5813 	spin_unlock(&clp->cl_lock);
5814 	if (retstp) {
5815 		if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
5816 			nfs4_put_stid(&retstp->st_stid);
5817 			goto retry;
5818 		}
5819 		/* To keep mutex tracking happy */
5820 		mutex_unlock(&stp->st_mutex);
5821 		stp = retstp;
5822 	}
5823 	return stp;
5824 }
5825 
5826 static struct nfs4_ol_stateid *
5827 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
5828 			    struct inode *inode, struct nfs4_ol_stateid *ost,
5829 			    bool *new)
5830 {
5831 	struct nfs4_stid *ns = NULL;
5832 	struct nfs4_ol_stateid *lst;
5833 	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5834 	struct nfs4_client *clp = oo->oo_owner.so_client;
5835 
5836 	*new = false;
5837 	spin_lock(&clp->cl_lock);
5838 	lst = find_lock_stateid(lo, fi);
5839 	spin_unlock(&clp->cl_lock);
5840 	if (lst != NULL) {
5841 		if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
5842 			goto out;
5843 		nfs4_put_stid(&lst->st_stid);
5844 	}
5845 	ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
5846 	if (ns == NULL)
5847 		return NULL;
5848 
5849 	lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
5850 	if (lst == openlockstateid(ns))
5851 		*new = true;
5852 	else
5853 		nfs4_put_stid(ns);
5854 out:
5855 	return lst;
5856 }
5857 
5858 static int
5859 check_lock_length(u64 offset, u64 length)
5860 {
5861 	return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
5862 		(length > ~offset)));
5863 }
5864 
5865 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
5866 {
5867 	struct nfs4_file *fp = lock_stp->st_stid.sc_file;
5868 
5869 	lockdep_assert_held(&fp->fi_lock);
5870 
5871 	if (test_access(access, lock_stp))
5872 		return;
5873 	__nfs4_file_get_access(fp, access);
5874 	set_access(access, lock_stp);
5875 }
5876 
5877 static __be32
5878 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
5879 			    struct nfs4_ol_stateid *ost,
5880 			    struct nfsd4_lock *lock,
5881 			    struct nfs4_ol_stateid **plst, bool *new)
5882 {
5883 	__be32 status;
5884 	struct nfs4_file *fi = ost->st_stid.sc_file;
5885 	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5886 	struct nfs4_client *cl = oo->oo_owner.so_client;
5887 	struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
5888 	struct nfs4_lockowner *lo;
5889 	struct nfs4_ol_stateid *lst;
5890 	unsigned int strhashval;
5891 
5892 	lo = find_lockowner_str(cl, &lock->lk_new_owner);
5893 	if (!lo) {
5894 		strhashval = ownerstr_hashval(&lock->lk_new_owner);
5895 		lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
5896 		if (lo == NULL)
5897 			return nfserr_jukebox;
5898 	} else {
5899 		/* with an existing lockowner, seqids must be the same */
5900 		status = nfserr_bad_seqid;
5901 		if (!cstate->minorversion &&
5902 		    lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
5903 			goto out;
5904 	}
5905 
5906 	lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
5907 	if (lst == NULL) {
5908 		status = nfserr_jukebox;
5909 		goto out;
5910 	}
5911 
5912 	status = nfs_ok;
5913 	*plst = lst;
5914 out:
5915 	nfs4_put_stateowner(&lo->lo_owner);
5916 	return status;
5917 }
5918 
5919 /*
5920  *  LOCK operation
5921  */
5922 __be32
5923 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5924 	   union nfsd4_op_u *u)
5925 {
5926 	struct nfsd4_lock *lock = &u->lock;
5927 	struct nfs4_openowner *open_sop = NULL;
5928 	struct nfs4_lockowner *lock_sop = NULL;
5929 	struct nfs4_ol_stateid *lock_stp = NULL;
5930 	struct nfs4_ol_stateid *open_stp = NULL;
5931 	struct nfs4_file *fp;
5932 	struct file *filp = NULL;
5933 	struct nfsd4_blocked_lock *nbl = NULL;
5934 	struct file_lock *file_lock = NULL;
5935 	struct file_lock *conflock = NULL;
5936 	__be32 status = 0;
5937 	int lkflg;
5938 	int err;
5939 	bool new = false;
5940 	unsigned char fl_type;
5941 	unsigned int fl_flags = FL_POSIX;
5942 	struct net *net = SVC_NET(rqstp);
5943 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5944 
5945 	dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
5946 		(long long) lock->lk_offset,
5947 		(long long) lock->lk_length);
5948 
5949 	if (check_lock_length(lock->lk_offset, lock->lk_length))
5950 		 return nfserr_inval;
5951 
5952 	if ((status = fh_verify(rqstp, &cstate->current_fh,
5953 				S_IFREG, NFSD_MAY_LOCK))) {
5954 		dprintk("NFSD: nfsd4_lock: permission denied!\n");
5955 		return status;
5956 	}
5957 
5958 	if (lock->lk_is_new) {
5959 		if (nfsd4_has_session(cstate))
5960 			/* See rfc 5661 18.10.3: given clientid is ignored: */
5961 			memcpy(&lock->lk_new_clientid,
5962 				&cstate->session->se_client->cl_clientid,
5963 				sizeof(clientid_t));
5964 
5965 		status = nfserr_stale_clientid;
5966 		if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
5967 			goto out;
5968 
5969 		/* validate and update open stateid and open seqid */
5970 		status = nfs4_preprocess_confirmed_seqid_op(cstate,
5971 				        lock->lk_new_open_seqid,
5972 		                        &lock->lk_new_open_stateid,
5973 					&open_stp, nn);
5974 		if (status)
5975 			goto out;
5976 		mutex_unlock(&open_stp->st_mutex);
5977 		open_sop = openowner(open_stp->st_stateowner);
5978 		status = nfserr_bad_stateid;
5979 		if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
5980 						&lock->lk_new_clientid))
5981 			goto out;
5982 		status = lookup_or_create_lock_state(cstate, open_stp, lock,
5983 							&lock_stp, &new);
5984 	} else {
5985 		status = nfs4_preprocess_seqid_op(cstate,
5986 				       lock->lk_old_lock_seqid,
5987 				       &lock->lk_old_lock_stateid,
5988 				       NFS4_LOCK_STID, &lock_stp, nn);
5989 	}
5990 	if (status)
5991 		goto out;
5992 	lock_sop = lockowner(lock_stp->st_stateowner);
5993 
5994 	lkflg = setlkflg(lock->lk_type);
5995 	status = nfs4_check_openmode(lock_stp, lkflg);
5996 	if (status)
5997 		goto out;
5998 
5999 	status = nfserr_grace;
6000 	if (locks_in_grace(net) && !lock->lk_reclaim)
6001 		goto out;
6002 	status = nfserr_no_grace;
6003 	if (!locks_in_grace(net) && lock->lk_reclaim)
6004 		goto out;
6005 
6006 	fp = lock_stp->st_stid.sc_file;
6007 	switch (lock->lk_type) {
6008 		case NFS4_READW_LT:
6009 			if (nfsd4_has_session(cstate))
6010 				fl_flags |= FL_SLEEP;
6011 			/* Fallthrough */
6012 		case NFS4_READ_LT:
6013 			spin_lock(&fp->fi_lock);
6014 			filp = find_readable_file_locked(fp);
6015 			if (filp)
6016 				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
6017 			spin_unlock(&fp->fi_lock);
6018 			fl_type = F_RDLCK;
6019 			break;
6020 		case NFS4_WRITEW_LT:
6021 			if (nfsd4_has_session(cstate))
6022 				fl_flags |= FL_SLEEP;
6023 			/* Fallthrough */
6024 		case NFS4_WRITE_LT:
6025 			spin_lock(&fp->fi_lock);
6026 			filp = find_writeable_file_locked(fp);
6027 			if (filp)
6028 				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
6029 			spin_unlock(&fp->fi_lock);
6030 			fl_type = F_WRLCK;
6031 			break;
6032 		default:
6033 			status = nfserr_inval;
6034 		goto out;
6035 	}
6036 
6037 	if (!filp) {
6038 		status = nfserr_openmode;
6039 		goto out;
6040 	}
6041 
6042 	nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
6043 	if (!nbl) {
6044 		dprintk("NFSD: %s: unable to allocate block!\n", __func__);
6045 		status = nfserr_jukebox;
6046 		goto out;
6047 	}
6048 
6049 	file_lock = &nbl->nbl_lock;
6050 	file_lock->fl_type = fl_type;
6051 	file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
6052 	file_lock->fl_pid = current->tgid;
6053 	file_lock->fl_file = filp;
6054 	file_lock->fl_flags = fl_flags;
6055 	file_lock->fl_lmops = &nfsd_posix_mng_ops;
6056 	file_lock->fl_start = lock->lk_offset;
6057 	file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
6058 	nfs4_transform_lock_offset(file_lock);
6059 
6060 	conflock = locks_alloc_lock();
6061 	if (!conflock) {
6062 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6063 		status = nfserr_jukebox;
6064 		goto out;
6065 	}
6066 
6067 	if (fl_flags & FL_SLEEP) {
6068 		nbl->nbl_time = jiffies;
6069 		spin_lock(&nn->blocked_locks_lock);
6070 		list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
6071 		list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
6072 		spin_unlock(&nn->blocked_locks_lock);
6073 	}
6074 
6075 	err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
6076 	switch (err) {
6077 	case 0: /* success! */
6078 		nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
6079 		status = 0;
6080 		break;
6081 	case FILE_LOCK_DEFERRED:
6082 		nbl = NULL;
6083 		/* Fallthrough */
6084 	case -EAGAIN:		/* conflock holds conflicting lock */
6085 		status = nfserr_denied;
6086 		dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
6087 		nfs4_set_lock_denied(conflock, &lock->lk_denied);
6088 		break;
6089 	case -EDEADLK:
6090 		status = nfserr_deadlock;
6091 		break;
6092 	default:
6093 		dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
6094 		status = nfserrno(err);
6095 		break;
6096 	}
6097 out:
6098 	if (nbl) {
6099 		/* dequeue it if we queued it before */
6100 		if (fl_flags & FL_SLEEP) {
6101 			spin_lock(&nn->blocked_locks_lock);
6102 			list_del_init(&nbl->nbl_list);
6103 			list_del_init(&nbl->nbl_lru);
6104 			spin_unlock(&nn->blocked_locks_lock);
6105 		}
6106 		free_blocked_lock(nbl);
6107 	}
6108 	if (filp)
6109 		fput(filp);
6110 	if (lock_stp) {
6111 		/* Bump seqid manually if the 4.0 replay owner is openowner */
6112 		if (cstate->replay_owner &&
6113 		    cstate->replay_owner != &lock_sop->lo_owner &&
6114 		    seqid_mutating_err(ntohl(status)))
6115 			lock_sop->lo_owner.so_seqid++;
6116 
6117 		/*
6118 		 * If this is a new, never-before-used stateid, and we are
6119 		 * returning an error, then just go ahead and release it.
6120 		 */
6121 		if (status && new)
6122 			release_lock_stateid(lock_stp);
6123 
6124 		mutex_unlock(&lock_stp->st_mutex);
6125 
6126 		nfs4_put_stid(&lock_stp->st_stid);
6127 	}
6128 	if (open_stp)
6129 		nfs4_put_stid(&open_stp->st_stid);
6130 	nfsd4_bump_seqid(cstate, status);
6131 	if (conflock)
6132 		locks_free_lock(conflock);
6133 	return status;
6134 }
6135 
6136 /*
6137  * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
6138  * so we do a temporary open here just to get an open file to pass to
6139  * vfs_test_lock.  (Arguably perhaps test_lock should be done with an
6140  * inode operation.)
6141  */
6142 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
6143 {
6144 	struct file *file;
6145 	__be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
6146 	if (!err) {
6147 		err = nfserrno(vfs_test_lock(file, lock));
6148 		fput(file);
6149 	}
6150 	return err;
6151 }
6152 
6153 /*
6154  * LOCKT operation
6155  */
6156 __be32
6157 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6158 	    union nfsd4_op_u *u)
6159 {
6160 	struct nfsd4_lockt *lockt = &u->lockt;
6161 	struct file_lock *file_lock = NULL;
6162 	struct nfs4_lockowner *lo = NULL;
6163 	__be32 status;
6164 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6165 
6166 	if (locks_in_grace(SVC_NET(rqstp)))
6167 		return nfserr_grace;
6168 
6169 	if (check_lock_length(lockt->lt_offset, lockt->lt_length))
6170 		 return nfserr_inval;
6171 
6172 	if (!nfsd4_has_session(cstate)) {
6173 		status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
6174 		if (status)
6175 			goto out;
6176 	}
6177 
6178 	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
6179 		goto out;
6180 
6181 	file_lock = locks_alloc_lock();
6182 	if (!file_lock) {
6183 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6184 		status = nfserr_jukebox;
6185 		goto out;
6186 	}
6187 
6188 	switch (lockt->lt_type) {
6189 		case NFS4_READ_LT:
6190 		case NFS4_READW_LT:
6191 			file_lock->fl_type = F_RDLCK;
6192 		break;
6193 		case NFS4_WRITE_LT:
6194 		case NFS4_WRITEW_LT:
6195 			file_lock->fl_type = F_WRLCK;
6196 		break;
6197 		default:
6198 			dprintk("NFSD: nfs4_lockt: bad lock type!\n");
6199 			status = nfserr_inval;
6200 		goto out;
6201 	}
6202 
6203 	lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
6204 	if (lo)
6205 		file_lock->fl_owner = (fl_owner_t)lo;
6206 	file_lock->fl_pid = current->tgid;
6207 	file_lock->fl_flags = FL_POSIX;
6208 
6209 	file_lock->fl_start = lockt->lt_offset;
6210 	file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
6211 
6212 	nfs4_transform_lock_offset(file_lock);
6213 
6214 	status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
6215 	if (status)
6216 		goto out;
6217 
6218 	if (file_lock->fl_type != F_UNLCK) {
6219 		status = nfserr_denied;
6220 		nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
6221 	}
6222 out:
6223 	if (lo)
6224 		nfs4_put_stateowner(&lo->lo_owner);
6225 	if (file_lock)
6226 		locks_free_lock(file_lock);
6227 	return status;
6228 }
6229 
6230 __be32
6231 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6232 	    union nfsd4_op_u *u)
6233 {
6234 	struct nfsd4_locku *locku = &u->locku;
6235 	struct nfs4_ol_stateid *stp;
6236 	struct file *filp = NULL;
6237 	struct file_lock *file_lock = NULL;
6238 	__be32 status;
6239 	int err;
6240 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6241 
6242 	dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
6243 		(long long) locku->lu_offset,
6244 		(long long) locku->lu_length);
6245 
6246 	if (check_lock_length(locku->lu_offset, locku->lu_length))
6247 		 return nfserr_inval;
6248 
6249 	status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
6250 					&locku->lu_stateid, NFS4_LOCK_STID,
6251 					&stp, nn);
6252 	if (status)
6253 		goto out;
6254 	filp = find_any_file(stp->st_stid.sc_file);
6255 	if (!filp) {
6256 		status = nfserr_lock_range;
6257 		goto put_stateid;
6258 	}
6259 	file_lock = locks_alloc_lock();
6260 	if (!file_lock) {
6261 		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6262 		status = nfserr_jukebox;
6263 		goto fput;
6264 	}
6265 
6266 	file_lock->fl_type = F_UNLCK;
6267 	file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
6268 	file_lock->fl_pid = current->tgid;
6269 	file_lock->fl_file = filp;
6270 	file_lock->fl_flags = FL_POSIX;
6271 	file_lock->fl_lmops = &nfsd_posix_mng_ops;
6272 	file_lock->fl_start = locku->lu_offset;
6273 
6274 	file_lock->fl_end = last_byte_offset(locku->lu_offset,
6275 						locku->lu_length);
6276 	nfs4_transform_lock_offset(file_lock);
6277 
6278 	err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
6279 	if (err) {
6280 		dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
6281 		goto out_nfserr;
6282 	}
6283 	nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
6284 fput:
6285 	fput(filp);
6286 put_stateid:
6287 	mutex_unlock(&stp->st_mutex);
6288 	nfs4_put_stid(&stp->st_stid);
6289 out:
6290 	nfsd4_bump_seqid(cstate, status);
6291 	if (file_lock)
6292 		locks_free_lock(file_lock);
6293 	return status;
6294 
6295 out_nfserr:
6296 	status = nfserrno(err);
6297 	goto fput;
6298 }
6299 
6300 /*
6301  * returns
6302  * 	true:  locks held by lockowner
6303  * 	false: no locks held by lockowner
6304  */
6305 static bool
6306 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
6307 {
6308 	struct file_lock *fl;
6309 	int status = false;
6310 	struct file *filp = find_any_file(fp);
6311 	struct inode *inode;
6312 	struct file_lock_context *flctx;
6313 
6314 	if (!filp) {
6315 		/* Any valid lock stateid should have some sort of access */
6316 		WARN_ON_ONCE(1);
6317 		return status;
6318 	}
6319 
6320 	inode = file_inode(filp);
6321 	flctx = inode->i_flctx;
6322 
6323 	if (flctx && !list_empty_careful(&flctx->flc_posix)) {
6324 		spin_lock(&flctx->flc_lock);
6325 		list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
6326 			if (fl->fl_owner == (fl_owner_t)lowner) {
6327 				status = true;
6328 				break;
6329 			}
6330 		}
6331 		spin_unlock(&flctx->flc_lock);
6332 	}
6333 	fput(filp);
6334 	return status;
6335 }
6336 
6337 __be32
6338 nfsd4_release_lockowner(struct svc_rqst *rqstp,
6339 			struct nfsd4_compound_state *cstate,
6340 			union nfsd4_op_u *u)
6341 {
6342 	struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
6343 	clientid_t *clid = &rlockowner->rl_clientid;
6344 	struct nfs4_stateowner *sop;
6345 	struct nfs4_lockowner *lo = NULL;
6346 	struct nfs4_ol_stateid *stp;
6347 	struct xdr_netobj *owner = &rlockowner->rl_owner;
6348 	unsigned int hashval = ownerstr_hashval(owner);
6349 	__be32 status;
6350 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6351 	struct nfs4_client *clp;
6352 	LIST_HEAD (reaplist);
6353 
6354 	dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
6355 		clid->cl_boot, clid->cl_id);
6356 
6357 	status = lookup_clientid(clid, cstate, nn);
6358 	if (status)
6359 		return status;
6360 
6361 	clp = cstate->clp;
6362 	/* Find the matching lock stateowner */
6363 	spin_lock(&clp->cl_lock);
6364 	list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
6365 			    so_strhash) {
6366 
6367 		if (sop->so_is_open_owner || !same_owner_str(sop, owner))
6368 			continue;
6369 
6370 		/* see if there are still any locks associated with it */
6371 		lo = lockowner(sop);
6372 		list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
6373 			if (check_for_locks(stp->st_stid.sc_file, lo)) {
6374 				status = nfserr_locks_held;
6375 				spin_unlock(&clp->cl_lock);
6376 				return status;
6377 			}
6378 		}
6379 
6380 		nfs4_get_stateowner(sop);
6381 		break;
6382 	}
6383 	if (!lo) {
6384 		spin_unlock(&clp->cl_lock);
6385 		return status;
6386 	}
6387 
6388 	unhash_lockowner_locked(lo);
6389 	while (!list_empty(&lo->lo_owner.so_stateids)) {
6390 		stp = list_first_entry(&lo->lo_owner.so_stateids,
6391 				       struct nfs4_ol_stateid,
6392 				       st_perstateowner);
6393 		WARN_ON(!unhash_lock_stateid(stp));
6394 		put_ol_stateid_locked(stp, &reaplist);
6395 	}
6396 	spin_unlock(&clp->cl_lock);
6397 	free_ol_stateid_reaplist(&reaplist);
6398 	remove_blocked_locks(lo);
6399 	nfs4_put_stateowner(&lo->lo_owner);
6400 
6401 	return status;
6402 }
6403 
6404 static inline struct nfs4_client_reclaim *
6405 alloc_reclaim(void)
6406 {
6407 	return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
6408 }
6409 
6410 bool
6411 nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
6412 {
6413 	struct nfs4_client_reclaim *crp;
6414 
6415 	crp = nfsd4_find_reclaim_client(name, nn);
6416 	return (crp && crp->cr_clp);
6417 }
6418 
6419 /*
6420  * failure => all reset bets are off, nfserr_no_grace...
6421  */
6422 struct nfs4_client_reclaim *
6423 nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
6424 {
6425 	unsigned int strhashval;
6426 	struct nfs4_client_reclaim *crp;
6427 
6428 	dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
6429 	crp = alloc_reclaim();
6430 	if (crp) {
6431 		strhashval = clientstr_hashval(name);
6432 		INIT_LIST_HEAD(&crp->cr_strhash);
6433 		list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
6434 		memcpy(crp->cr_recdir, name, HEXDIR_LEN);
6435 		crp->cr_clp = NULL;
6436 		nn->reclaim_str_hashtbl_size++;
6437 	}
6438 	return crp;
6439 }
6440 
6441 void
6442 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
6443 {
6444 	list_del(&crp->cr_strhash);
6445 	kfree(crp);
6446 	nn->reclaim_str_hashtbl_size--;
6447 }
6448 
6449 void
6450 nfs4_release_reclaim(struct nfsd_net *nn)
6451 {
6452 	struct nfs4_client_reclaim *crp = NULL;
6453 	int i;
6454 
6455 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6456 		while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
6457 			crp = list_entry(nn->reclaim_str_hashtbl[i].next,
6458 			                struct nfs4_client_reclaim, cr_strhash);
6459 			nfs4_remove_reclaim_record(crp, nn);
6460 		}
6461 	}
6462 	WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
6463 }
6464 
6465 /*
6466  * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
6467 struct nfs4_client_reclaim *
6468 nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
6469 {
6470 	unsigned int strhashval;
6471 	struct nfs4_client_reclaim *crp = NULL;
6472 
6473 	dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
6474 
6475 	strhashval = clientstr_hashval(recdir);
6476 	list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
6477 		if (same_name(crp->cr_recdir, recdir)) {
6478 			return crp;
6479 		}
6480 	}
6481 	return NULL;
6482 }
6483 
6484 /*
6485 * Called from OPEN. Look for clientid in reclaim list.
6486 */
6487 __be32
6488 nfs4_check_open_reclaim(clientid_t *clid,
6489 		struct nfsd4_compound_state *cstate,
6490 		struct nfsd_net *nn)
6491 {
6492 	__be32 status;
6493 
6494 	/* find clientid in conf_id_hashtbl */
6495 	status = lookup_clientid(clid, cstate, nn);
6496 	if (status)
6497 		return nfserr_reclaim_bad;
6498 
6499 	if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags))
6500 		return nfserr_no_grace;
6501 
6502 	if (nfsd4_client_record_check(cstate->clp))
6503 		return nfserr_reclaim_bad;
6504 
6505 	return nfs_ok;
6506 }
6507 
6508 #ifdef CONFIG_NFSD_FAULT_INJECTION
6509 static inline void
6510 put_client(struct nfs4_client *clp)
6511 {
6512 	atomic_dec(&clp->cl_refcount);
6513 }
6514 
6515 static struct nfs4_client *
6516 nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
6517 {
6518 	struct nfs4_client *clp;
6519 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6520 					  nfsd_net_id);
6521 
6522 	if (!nfsd_netns_ready(nn))
6523 		return NULL;
6524 
6525 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6526 		if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
6527 			return clp;
6528 	}
6529 	return NULL;
6530 }
6531 
6532 u64
6533 nfsd_inject_print_clients(void)
6534 {
6535 	struct nfs4_client *clp;
6536 	u64 count = 0;
6537 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6538 					  nfsd_net_id);
6539 	char buf[INET6_ADDRSTRLEN];
6540 
6541 	if (!nfsd_netns_ready(nn))
6542 		return 0;
6543 
6544 	spin_lock(&nn->client_lock);
6545 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6546 		rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
6547 		pr_info("NFS Client: %s\n", buf);
6548 		++count;
6549 	}
6550 	spin_unlock(&nn->client_lock);
6551 
6552 	return count;
6553 }
6554 
6555 u64
6556 nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size)
6557 {
6558 	u64 count = 0;
6559 	struct nfs4_client *clp;
6560 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6561 					  nfsd_net_id);
6562 
6563 	if (!nfsd_netns_ready(nn))
6564 		return count;
6565 
6566 	spin_lock(&nn->client_lock);
6567 	clp = nfsd_find_client(addr, addr_size);
6568 	if (clp) {
6569 		if (mark_client_expired_locked(clp) == nfs_ok)
6570 			++count;
6571 		else
6572 			clp = NULL;
6573 	}
6574 	spin_unlock(&nn->client_lock);
6575 
6576 	if (clp)
6577 		expire_client(clp);
6578 
6579 	return count;
6580 }
6581 
6582 u64
6583 nfsd_inject_forget_clients(u64 max)
6584 {
6585 	u64 count = 0;
6586 	struct nfs4_client *clp, *next;
6587 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6588 						nfsd_net_id);
6589 	LIST_HEAD(reaplist);
6590 
6591 	if (!nfsd_netns_ready(nn))
6592 		return count;
6593 
6594 	spin_lock(&nn->client_lock);
6595 	list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
6596 		if (mark_client_expired_locked(clp) == nfs_ok) {
6597 			list_add(&clp->cl_lru, &reaplist);
6598 			if (max != 0 && ++count >= max)
6599 				break;
6600 		}
6601 	}
6602 	spin_unlock(&nn->client_lock);
6603 
6604 	list_for_each_entry_safe(clp, next, &reaplist, cl_lru)
6605 		expire_client(clp);
6606 
6607 	return count;
6608 }
6609 
6610 static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
6611 			     const char *type)
6612 {
6613 	char buf[INET6_ADDRSTRLEN];
6614 	rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
6615 	printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
6616 }
6617 
6618 static void
6619 nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
6620 			     struct list_head *collect)
6621 {
6622 	struct nfs4_client *clp = lst->st_stid.sc_client;
6623 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6624 					  nfsd_net_id);
6625 
6626 	if (!collect)
6627 		return;
6628 
6629 	lockdep_assert_held(&nn->client_lock);
6630 	atomic_inc(&clp->cl_refcount);
6631 	list_add(&lst->st_locks, collect);
6632 }
6633 
6634 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
6635 				    struct list_head *collect,
6636 				    bool (*func)(struct nfs4_ol_stateid *))
6637 {
6638 	struct nfs4_openowner *oop;
6639 	struct nfs4_ol_stateid *stp, *st_next;
6640 	struct nfs4_ol_stateid *lst, *lst_next;
6641 	u64 count = 0;
6642 
6643 	spin_lock(&clp->cl_lock);
6644 	list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
6645 		list_for_each_entry_safe(stp, st_next,
6646 				&oop->oo_owner.so_stateids, st_perstateowner) {
6647 			list_for_each_entry_safe(lst, lst_next,
6648 					&stp->st_locks, st_locks) {
6649 				if (func) {
6650 					if (func(lst))
6651 						nfsd_inject_add_lock_to_list(lst,
6652 									collect);
6653 				}
6654 				++count;
6655 				/*
6656 				 * Despite the fact that these functions deal
6657 				 * with 64-bit integers for "count", we must
6658 				 * ensure that it doesn't blow up the
6659 				 * clp->cl_refcount. Throw a warning if we
6660 				 * start to approach INT_MAX here.
6661 				 */
6662 				WARN_ON_ONCE(count == (INT_MAX / 2));
6663 				if (count == max)
6664 					goto out;
6665 			}
6666 		}
6667 	}
6668 out:
6669 	spin_unlock(&clp->cl_lock);
6670 
6671 	return count;
6672 }
6673 
6674 static u64
6675 nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect,
6676 			  u64 max)
6677 {
6678 	return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid);
6679 }
6680 
6681 static u64
6682 nfsd_print_client_locks(struct nfs4_client *clp)
6683 {
6684 	u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL);
6685 	nfsd_print_count(clp, count, "locked files");
6686 	return count;
6687 }
6688 
6689 u64
6690 nfsd_inject_print_locks(void)
6691 {
6692 	struct nfs4_client *clp;
6693 	u64 count = 0;
6694 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6695 						nfsd_net_id);
6696 
6697 	if (!nfsd_netns_ready(nn))
6698 		return 0;
6699 
6700 	spin_lock(&nn->client_lock);
6701 	list_for_each_entry(clp, &nn->client_lru, cl_lru)
6702 		count += nfsd_print_client_locks(clp);
6703 	spin_unlock(&nn->client_lock);
6704 
6705 	return count;
6706 }
6707 
6708 static void
6709 nfsd_reap_locks(struct list_head *reaplist)
6710 {
6711 	struct nfs4_client *clp;
6712 	struct nfs4_ol_stateid *stp, *next;
6713 
6714 	list_for_each_entry_safe(stp, next, reaplist, st_locks) {
6715 		list_del_init(&stp->st_locks);
6716 		clp = stp->st_stid.sc_client;
6717 		nfs4_put_stid(&stp->st_stid);
6718 		put_client(clp);
6719 	}
6720 }
6721 
6722 u64
6723 nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size)
6724 {
6725 	unsigned int count = 0;
6726 	struct nfs4_client *clp;
6727 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6728 						nfsd_net_id);
6729 	LIST_HEAD(reaplist);
6730 
6731 	if (!nfsd_netns_ready(nn))
6732 		return count;
6733 
6734 	spin_lock(&nn->client_lock);
6735 	clp = nfsd_find_client(addr, addr_size);
6736 	if (clp)
6737 		count = nfsd_collect_client_locks(clp, &reaplist, 0);
6738 	spin_unlock(&nn->client_lock);
6739 	nfsd_reap_locks(&reaplist);
6740 	return count;
6741 }
6742 
6743 u64
6744 nfsd_inject_forget_locks(u64 max)
6745 {
6746 	u64 count = 0;
6747 	struct nfs4_client *clp;
6748 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6749 						nfsd_net_id);
6750 	LIST_HEAD(reaplist);
6751 
6752 	if (!nfsd_netns_ready(nn))
6753 		return count;
6754 
6755 	spin_lock(&nn->client_lock);
6756 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6757 		count += nfsd_collect_client_locks(clp, &reaplist, max - count);
6758 		if (max != 0 && count >= max)
6759 			break;
6760 	}
6761 	spin_unlock(&nn->client_lock);
6762 	nfsd_reap_locks(&reaplist);
6763 	return count;
6764 }
6765 
6766 static u64
6767 nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max,
6768 			      struct list_head *collect,
6769 			      void (*func)(struct nfs4_openowner *))
6770 {
6771 	struct nfs4_openowner *oop, *next;
6772 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6773 						nfsd_net_id);
6774 	u64 count = 0;
6775 
6776 	lockdep_assert_held(&nn->client_lock);
6777 
6778 	spin_lock(&clp->cl_lock);
6779 	list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
6780 		if (func) {
6781 			func(oop);
6782 			if (collect) {
6783 				atomic_inc(&clp->cl_refcount);
6784 				list_add(&oop->oo_perclient, collect);
6785 			}
6786 		}
6787 		++count;
6788 		/*
6789 		 * Despite the fact that these functions deal with
6790 		 * 64-bit integers for "count", we must ensure that
6791 		 * it doesn't blow up the clp->cl_refcount. Throw a
6792 		 * warning if we start to approach INT_MAX here.
6793 		 */
6794 		WARN_ON_ONCE(count == (INT_MAX / 2));
6795 		if (count == max)
6796 			break;
6797 	}
6798 	spin_unlock(&clp->cl_lock);
6799 
6800 	return count;
6801 }
6802 
6803 static u64
6804 nfsd_print_client_openowners(struct nfs4_client *clp)
6805 {
6806 	u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL);
6807 
6808 	nfsd_print_count(clp, count, "openowners");
6809 	return count;
6810 }
6811 
6812 static u64
6813 nfsd_collect_client_openowners(struct nfs4_client *clp,
6814 			       struct list_head *collect, u64 max)
6815 {
6816 	return nfsd_foreach_client_openowner(clp, max, collect,
6817 						unhash_openowner_locked);
6818 }
6819 
6820 u64
6821 nfsd_inject_print_openowners(void)
6822 {
6823 	struct nfs4_client *clp;
6824 	u64 count = 0;
6825 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6826 						nfsd_net_id);
6827 
6828 	if (!nfsd_netns_ready(nn))
6829 		return 0;
6830 
6831 	spin_lock(&nn->client_lock);
6832 	list_for_each_entry(clp, &nn->client_lru, cl_lru)
6833 		count += nfsd_print_client_openowners(clp);
6834 	spin_unlock(&nn->client_lock);
6835 
6836 	return count;
6837 }
6838 
6839 static void
6840 nfsd_reap_openowners(struct list_head *reaplist)
6841 {
6842 	struct nfs4_client *clp;
6843 	struct nfs4_openowner *oop, *next;
6844 
6845 	list_for_each_entry_safe(oop, next, reaplist, oo_perclient) {
6846 		list_del_init(&oop->oo_perclient);
6847 		clp = oop->oo_owner.so_client;
6848 		release_openowner(oop);
6849 		put_client(clp);
6850 	}
6851 }
6852 
6853 u64
6854 nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr,
6855 				     size_t addr_size)
6856 {
6857 	unsigned int count = 0;
6858 	struct nfs4_client *clp;
6859 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6860 						nfsd_net_id);
6861 	LIST_HEAD(reaplist);
6862 
6863 	if (!nfsd_netns_ready(nn))
6864 		return count;
6865 
6866 	spin_lock(&nn->client_lock);
6867 	clp = nfsd_find_client(addr, addr_size);
6868 	if (clp)
6869 		count = nfsd_collect_client_openowners(clp, &reaplist, 0);
6870 	spin_unlock(&nn->client_lock);
6871 	nfsd_reap_openowners(&reaplist);
6872 	return count;
6873 }
6874 
6875 u64
6876 nfsd_inject_forget_openowners(u64 max)
6877 {
6878 	u64 count = 0;
6879 	struct nfs4_client *clp;
6880 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6881 						nfsd_net_id);
6882 	LIST_HEAD(reaplist);
6883 
6884 	if (!nfsd_netns_ready(nn))
6885 		return count;
6886 
6887 	spin_lock(&nn->client_lock);
6888 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6889 		count += nfsd_collect_client_openowners(clp, &reaplist,
6890 							max - count);
6891 		if (max != 0 && count >= max)
6892 			break;
6893 	}
6894 	spin_unlock(&nn->client_lock);
6895 	nfsd_reap_openowners(&reaplist);
6896 	return count;
6897 }
6898 
6899 static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
6900 				     struct list_head *victims)
6901 {
6902 	struct nfs4_delegation *dp, *next;
6903 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6904 						nfsd_net_id);
6905 	u64 count = 0;
6906 
6907 	lockdep_assert_held(&nn->client_lock);
6908 
6909 	spin_lock(&state_lock);
6910 	list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
6911 		if (victims) {
6912 			/*
6913 			 * It's not safe to mess with delegations that have a
6914 			 * non-zero dl_time. They might have already been broken
6915 			 * and could be processed by the laundromat outside of
6916 			 * the state_lock. Just leave them be.
6917 			 */
6918 			if (dp->dl_time != 0)
6919 				continue;
6920 
6921 			atomic_inc(&clp->cl_refcount);
6922 			WARN_ON(!unhash_delegation_locked(dp));
6923 			list_add(&dp->dl_recall_lru, victims);
6924 		}
6925 		++count;
6926 		/*
6927 		 * Despite the fact that these functions deal with
6928 		 * 64-bit integers for "count", we must ensure that
6929 		 * it doesn't blow up the clp->cl_refcount. Throw a
6930 		 * warning if we start to approach INT_MAX here.
6931 		 */
6932 		WARN_ON_ONCE(count == (INT_MAX / 2));
6933 		if (count == max)
6934 			break;
6935 	}
6936 	spin_unlock(&state_lock);
6937 	return count;
6938 }
6939 
6940 static u64
6941 nfsd_print_client_delegations(struct nfs4_client *clp)
6942 {
6943 	u64 count = nfsd_find_all_delegations(clp, 0, NULL);
6944 
6945 	nfsd_print_count(clp, count, "delegations");
6946 	return count;
6947 }
6948 
6949 u64
6950 nfsd_inject_print_delegations(void)
6951 {
6952 	struct nfs4_client *clp;
6953 	u64 count = 0;
6954 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6955 						nfsd_net_id);
6956 
6957 	if (!nfsd_netns_ready(nn))
6958 		return 0;
6959 
6960 	spin_lock(&nn->client_lock);
6961 	list_for_each_entry(clp, &nn->client_lru, cl_lru)
6962 		count += nfsd_print_client_delegations(clp);
6963 	spin_unlock(&nn->client_lock);
6964 
6965 	return count;
6966 }
6967 
6968 static void
6969 nfsd_forget_delegations(struct list_head *reaplist)
6970 {
6971 	struct nfs4_client *clp;
6972 	struct nfs4_delegation *dp, *next;
6973 
6974 	list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
6975 		list_del_init(&dp->dl_recall_lru);
6976 		clp = dp->dl_stid.sc_client;
6977 		revoke_delegation(dp);
6978 		put_client(clp);
6979 	}
6980 }
6981 
6982 u64
6983 nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr,
6984 				      size_t addr_size)
6985 {
6986 	u64 count = 0;
6987 	struct nfs4_client *clp;
6988 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6989 						nfsd_net_id);
6990 	LIST_HEAD(reaplist);
6991 
6992 	if (!nfsd_netns_ready(nn))
6993 		return count;
6994 
6995 	spin_lock(&nn->client_lock);
6996 	clp = nfsd_find_client(addr, addr_size);
6997 	if (clp)
6998 		count = nfsd_find_all_delegations(clp, 0, &reaplist);
6999 	spin_unlock(&nn->client_lock);
7000 
7001 	nfsd_forget_delegations(&reaplist);
7002 	return count;
7003 }
7004 
7005 u64
7006 nfsd_inject_forget_delegations(u64 max)
7007 {
7008 	u64 count = 0;
7009 	struct nfs4_client *clp;
7010 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7011 						nfsd_net_id);
7012 	LIST_HEAD(reaplist);
7013 
7014 	if (!nfsd_netns_ready(nn))
7015 		return count;
7016 
7017 	spin_lock(&nn->client_lock);
7018 	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
7019 		count += nfsd_find_all_delegations(clp, max - count, &reaplist);
7020 		if (max != 0 && count >= max)
7021 			break;
7022 	}
7023 	spin_unlock(&nn->client_lock);
7024 	nfsd_forget_delegations(&reaplist);
7025 	return count;
7026 }
7027 
7028 static void
7029 nfsd_recall_delegations(struct list_head *reaplist)
7030 {
7031 	struct nfs4_client *clp;
7032 	struct nfs4_delegation *dp, *next;
7033 
7034 	list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
7035 		list_del_init(&dp->dl_recall_lru);
7036 		clp = dp->dl_stid.sc_client;
7037 		/*
7038 		 * We skipped all entries that had a zero dl_time before,
7039 		 * so we can now reset the dl_time back to 0. If a delegation
7040 		 * break comes in now, then it won't make any difference since
7041 		 * we're recalling it either way.
7042 		 */
7043 		spin_lock(&state_lock);
7044 		dp->dl_time = 0;
7045 		spin_unlock(&state_lock);
7046 		nfsd_break_one_deleg(dp);
7047 		put_client(clp);
7048 	}
7049 }
7050 
7051 u64
7052 nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr,
7053 				      size_t addr_size)
7054 {
7055 	u64 count = 0;
7056 	struct nfs4_client *clp;
7057 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7058 						nfsd_net_id);
7059 	LIST_HEAD(reaplist);
7060 
7061 	if (!nfsd_netns_ready(nn))
7062 		return count;
7063 
7064 	spin_lock(&nn->client_lock);
7065 	clp = nfsd_find_client(addr, addr_size);
7066 	if (clp)
7067 		count = nfsd_find_all_delegations(clp, 0, &reaplist);
7068 	spin_unlock(&nn->client_lock);
7069 
7070 	nfsd_recall_delegations(&reaplist);
7071 	return count;
7072 }
7073 
7074 u64
7075 nfsd_inject_recall_delegations(u64 max)
7076 {
7077 	u64 count = 0;
7078 	struct nfs4_client *clp, *next;
7079 	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7080 						nfsd_net_id);
7081 	LIST_HEAD(reaplist);
7082 
7083 	if (!nfsd_netns_ready(nn))
7084 		return count;
7085 
7086 	spin_lock(&nn->client_lock);
7087 	list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
7088 		count += nfsd_find_all_delegations(clp, max - count, &reaplist);
7089 		if (max != 0 && ++count >= max)
7090 			break;
7091 	}
7092 	spin_unlock(&nn->client_lock);
7093 	nfsd_recall_delegations(&reaplist);
7094 	return count;
7095 }
7096 #endif /* CONFIG_NFSD_FAULT_INJECTION */
7097 
7098 /*
7099  * Since the lifetime of a delegation isn't limited to that of an open, a
7100  * client may quite reasonably hang on to a delegation as long as it has
7101  * the inode cached.  This becomes an obvious problem the first time a
7102  * client's inode cache approaches the size of the server's total memory.
7103  *
7104  * For now we avoid this problem by imposing a hard limit on the number
7105  * of delegations, which varies according to the server's memory size.
7106  */
7107 static void
7108 set_max_delegations(void)
7109 {
7110 	/*
7111 	 * Allow at most 4 delegations per megabyte of RAM.  Quick
7112 	 * estimates suggest that in the worst case (where every delegation
7113 	 * is for a different inode), a delegation could take about 1.5K,
7114 	 * giving a worst case usage of about 6% of memory.
7115 	 */
7116 	max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
7117 }
7118 
7119 static int nfs4_state_create_net(struct net *net)
7120 {
7121 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7122 	int i;
7123 
7124 	nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
7125 			CLIENT_HASH_SIZE, GFP_KERNEL);
7126 	if (!nn->conf_id_hashtbl)
7127 		goto err;
7128 	nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
7129 			CLIENT_HASH_SIZE, GFP_KERNEL);
7130 	if (!nn->unconf_id_hashtbl)
7131 		goto err_unconf_id;
7132 	nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
7133 			SESSION_HASH_SIZE, GFP_KERNEL);
7134 	if (!nn->sessionid_hashtbl)
7135 		goto err_sessionid;
7136 
7137 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7138 		INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
7139 		INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
7140 	}
7141 	for (i = 0; i < SESSION_HASH_SIZE; i++)
7142 		INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
7143 	nn->conf_name_tree = RB_ROOT;
7144 	nn->unconf_name_tree = RB_ROOT;
7145 	nn->boot_time = get_seconds();
7146 	nn->grace_ended = false;
7147 	nn->nfsd4_manager.block_opens = true;
7148 	INIT_LIST_HEAD(&nn->nfsd4_manager.list);
7149 	INIT_LIST_HEAD(&nn->client_lru);
7150 	INIT_LIST_HEAD(&nn->close_lru);
7151 	INIT_LIST_HEAD(&nn->del_recall_lru);
7152 	spin_lock_init(&nn->client_lock);
7153 
7154 	spin_lock_init(&nn->blocked_locks_lock);
7155 	INIT_LIST_HEAD(&nn->blocked_locks_lru);
7156 
7157 	INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
7158 	get_net(net);
7159 
7160 	return 0;
7161 
7162 err_sessionid:
7163 	kfree(nn->unconf_id_hashtbl);
7164 err_unconf_id:
7165 	kfree(nn->conf_id_hashtbl);
7166 err:
7167 	return -ENOMEM;
7168 }
7169 
7170 static void
7171 nfs4_state_destroy_net(struct net *net)
7172 {
7173 	int i;
7174 	struct nfs4_client *clp = NULL;
7175 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7176 
7177 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7178 		while (!list_empty(&nn->conf_id_hashtbl[i])) {
7179 			clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7180 			destroy_client(clp);
7181 		}
7182 	}
7183 
7184 	WARN_ON(!list_empty(&nn->blocked_locks_lru));
7185 
7186 	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7187 		while (!list_empty(&nn->unconf_id_hashtbl[i])) {
7188 			clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7189 			destroy_client(clp);
7190 		}
7191 	}
7192 
7193 	kfree(nn->sessionid_hashtbl);
7194 	kfree(nn->unconf_id_hashtbl);
7195 	kfree(nn->conf_id_hashtbl);
7196 	put_net(net);
7197 }
7198 
7199 int
7200 nfs4_state_start_net(struct net *net)
7201 {
7202 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7203 	int ret;
7204 
7205 	ret = nfs4_state_create_net(net);
7206 	if (ret)
7207 		return ret;
7208 	locks_start_grace(net, &nn->nfsd4_manager);
7209 	nfsd4_client_tracking_init(net);
7210 	printk(KERN_INFO "NFSD: starting %ld-second grace period (net %x)\n",
7211 	       nn->nfsd4_grace, net->ns.inum);
7212 	queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
7213 	return 0;
7214 }
7215 
7216 /* initialization to perform when the nfsd service is started: */
7217 
7218 int
7219 nfs4_state_start(void)
7220 {
7221 	int ret;
7222 
7223 	ret = set_callback_cred();
7224 	if (ret)
7225 		return ret;
7226 
7227 	laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
7228 	if (laundry_wq == NULL) {
7229 		ret = -ENOMEM;
7230 		goto out_cleanup_cred;
7231 	}
7232 	ret = nfsd4_create_callback_queue();
7233 	if (ret)
7234 		goto out_free_laundry;
7235 
7236 	set_max_delegations();
7237 	return 0;
7238 
7239 out_free_laundry:
7240 	destroy_workqueue(laundry_wq);
7241 out_cleanup_cred:
7242 	cleanup_callback_cred();
7243 	return ret;
7244 }
7245 
7246 void
7247 nfs4_state_shutdown_net(struct net *net)
7248 {
7249 	struct nfs4_delegation *dp = NULL;
7250 	struct list_head *pos, *next, reaplist;
7251 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7252 
7253 	cancel_delayed_work_sync(&nn->laundromat_work);
7254 	locks_end_grace(&nn->nfsd4_manager);
7255 
7256 	INIT_LIST_HEAD(&reaplist);
7257 	spin_lock(&state_lock);
7258 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
7259 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7260 		WARN_ON(!unhash_delegation_locked(dp));
7261 		list_add(&dp->dl_recall_lru, &reaplist);
7262 	}
7263 	spin_unlock(&state_lock);
7264 	list_for_each_safe(pos, next, &reaplist) {
7265 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7266 		list_del_init(&dp->dl_recall_lru);
7267 		put_clnt_odstate(dp->dl_clnt_odstate);
7268 		nfs4_put_deleg_lease(dp->dl_stid.sc_file);
7269 		nfs4_put_stid(&dp->dl_stid);
7270 	}
7271 
7272 	nfsd4_client_tracking_exit(net);
7273 	nfs4_state_destroy_net(net);
7274 }
7275 
7276 void
7277 nfs4_state_shutdown(void)
7278 {
7279 	destroy_workqueue(laundry_wq);
7280 	nfsd4_destroy_callback_queue();
7281 	cleanup_callback_cred();
7282 }
7283 
7284 static void
7285 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7286 {
7287 	if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
7288 		memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
7289 }
7290 
7291 static void
7292 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7293 {
7294 	if (cstate->minorversion) {
7295 		memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
7296 		SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
7297 	}
7298 }
7299 
7300 void
7301 clear_current_stateid(struct nfsd4_compound_state *cstate)
7302 {
7303 	CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
7304 }
7305 
7306 /*
7307  * functions to set current state id
7308  */
7309 void
7310 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
7311 		union nfsd4_op_u *u)
7312 {
7313 	put_stateid(cstate, &u->open_downgrade.od_stateid);
7314 }
7315 
7316 void
7317 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
7318 		union nfsd4_op_u *u)
7319 {
7320 	put_stateid(cstate, &u->open.op_stateid);
7321 }
7322 
7323 void
7324 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
7325 		union nfsd4_op_u *u)
7326 {
7327 	put_stateid(cstate, &u->close.cl_stateid);
7328 }
7329 
7330 void
7331 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
7332 		union nfsd4_op_u *u)
7333 {
7334 	put_stateid(cstate, &u->lock.lk_resp_stateid);
7335 }
7336 
7337 /*
7338  * functions to consume current state id
7339  */
7340 
7341 void
7342 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
7343 		union nfsd4_op_u *u)
7344 {
7345 	get_stateid(cstate, &u->open_downgrade.od_stateid);
7346 }
7347 
7348 void
7349 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
7350 		union nfsd4_op_u *u)
7351 {
7352 	get_stateid(cstate, &u->delegreturn.dr_stateid);
7353 }
7354 
7355 void
7356 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
7357 		union nfsd4_op_u *u)
7358 {
7359 	get_stateid(cstate, &u->free_stateid.fr_stateid);
7360 }
7361 
7362 void
7363 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
7364 		union nfsd4_op_u *u)
7365 {
7366 	get_stateid(cstate, &u->setattr.sa_stateid);
7367 }
7368 
7369 void
7370 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
7371 		union nfsd4_op_u *u)
7372 {
7373 	get_stateid(cstate, &u->close.cl_stateid);
7374 }
7375 
7376 void
7377 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
7378 		union nfsd4_op_u *u)
7379 {
7380 	get_stateid(cstate, &u->locku.lu_stateid);
7381 }
7382 
7383 void
7384 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
7385 		union nfsd4_op_u *u)
7386 {
7387 	get_stateid(cstate, &u->read.rd_stateid);
7388 }
7389 
7390 void
7391 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
7392 		union nfsd4_op_u *u)
7393 {
7394 	get_stateid(cstate, &u->write.wr_stateid);
7395 }
7396