xref: /linux/fs/nfs/nfs4proc.c (revision 5148fa52a12fa1b97c730b2fe321f2aad7ea041c)
1 /*
2  *  fs/nfs/nfs4proc.c
3  *
4  *  Client-side procedure declarations for NFSv4.
5  *
6  *  Copyright (c) 2002 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Kendrick Smith <kmsmith@umich.edu>
10  *  Andy Adamson   <andros@umich.edu>
11  *
12  *  Redistribution and use in source and binary forms, with or without
13  *  modification, are permitted provided that the following conditions
14  *  are met:
15  *
16  *  1. Redistributions of source code must retain the above copyright
17  *     notice, this list of conditions and the following disclaimer.
18  *  2. Redistributions in binary form must reproduce the above copyright
19  *     notice, this list of conditions and the following disclaimer in the
20  *     documentation and/or other materials provided with the distribution.
21  *  3. Neither the name of the University nor the names of its
22  *     contributors may be used to endorse or promote products derived
23  *     from this software without specific prior written permission.
24  *
25  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/string.h>
42 #include <linux/ratelimit.h>
43 #include <linux/printk.h>
44 #include <linux/slab.h>
45 #include <linux/sunrpc/clnt.h>
46 #include <linux/sunrpc/gss_api.h>
47 #include <linux/nfs.h>
48 #include <linux/nfs4.h>
49 #include <linux/nfs_fs.h>
50 #include <linux/nfs_page.h>
51 #include <linux/nfs_mount.h>
52 #include <linux/namei.h>
53 #include <linux/mount.h>
54 #include <linux/module.h>
55 #include <linux/nfs_idmap.h>
56 #include <linux/sunrpc/bc_xprt.h>
57 #include <linux/xattr.h>
58 #include <linux/utsname.h>
59 #include <linux/freezer.h>
60 
61 #include "nfs4_fs.h"
62 #include "delegation.h"
63 #include "internal.h"
64 #include "iostat.h"
65 #include "callback.h"
66 #include "pnfs.h"
67 
68 #define NFSDBG_FACILITY		NFSDBG_PROC
69 
70 #define NFS4_POLL_RETRY_MIN	(HZ/10)
71 #define NFS4_POLL_RETRY_MAX	(15*HZ)
72 
73 #define NFS4_MAX_LOOP_ON_RECOVER (10)
74 
75 static unsigned short max_session_slots = NFS4_DEF_SLOT_TABLE_SIZE;
76 
77 struct nfs4_opendata;
78 static int _nfs4_proc_open(struct nfs4_opendata *data);
79 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
80 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
81 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
82 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
83 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
84 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
85 			    struct nfs_fattr *fattr, struct iattr *sattr,
86 			    struct nfs4_state *state);
87 #ifdef CONFIG_NFS_V4_1
88 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *);
89 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *);
90 #endif
91 /* Prevent leaks of NFSv4 errors into userland */
92 static int nfs4_map_errors(int err)
93 {
94 	if (err >= -1000)
95 		return err;
96 	switch (err) {
97 	case -NFS4ERR_RESOURCE:
98 		return -EREMOTEIO;
99 	case -NFS4ERR_WRONGSEC:
100 		return -EPERM;
101 	case -NFS4ERR_BADOWNER:
102 	case -NFS4ERR_BADNAME:
103 		return -EINVAL;
104 	default:
105 		dprintk("%s could not handle NFSv4 error %d\n",
106 				__func__, -err);
107 		break;
108 	}
109 	return -EIO;
110 }
111 
112 /*
113  * This is our standard bitmap for GETATTR requests.
114  */
115 const u32 nfs4_fattr_bitmap[2] = {
116 	FATTR4_WORD0_TYPE
117 	| FATTR4_WORD0_CHANGE
118 	| FATTR4_WORD0_SIZE
119 	| FATTR4_WORD0_FSID
120 	| FATTR4_WORD0_FILEID,
121 	FATTR4_WORD1_MODE
122 	| FATTR4_WORD1_NUMLINKS
123 	| FATTR4_WORD1_OWNER
124 	| FATTR4_WORD1_OWNER_GROUP
125 	| FATTR4_WORD1_RAWDEV
126 	| FATTR4_WORD1_SPACE_USED
127 	| FATTR4_WORD1_TIME_ACCESS
128 	| FATTR4_WORD1_TIME_METADATA
129 	| FATTR4_WORD1_TIME_MODIFY
130 };
131 
132 const u32 nfs4_statfs_bitmap[2] = {
133 	FATTR4_WORD0_FILES_AVAIL
134 	| FATTR4_WORD0_FILES_FREE
135 	| FATTR4_WORD0_FILES_TOTAL,
136 	FATTR4_WORD1_SPACE_AVAIL
137 	| FATTR4_WORD1_SPACE_FREE
138 	| FATTR4_WORD1_SPACE_TOTAL
139 };
140 
141 const u32 nfs4_pathconf_bitmap[2] = {
142 	FATTR4_WORD0_MAXLINK
143 	| FATTR4_WORD0_MAXNAME,
144 	0
145 };
146 
147 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
148 			| FATTR4_WORD0_MAXREAD
149 			| FATTR4_WORD0_MAXWRITE
150 			| FATTR4_WORD0_LEASE_TIME,
151 			FATTR4_WORD1_TIME_DELTA
152 			| FATTR4_WORD1_FS_LAYOUT_TYPES,
153 			FATTR4_WORD2_LAYOUT_BLKSIZE
154 };
155 
156 const u32 nfs4_fs_locations_bitmap[2] = {
157 	FATTR4_WORD0_TYPE
158 	| FATTR4_WORD0_CHANGE
159 	| FATTR4_WORD0_SIZE
160 	| FATTR4_WORD0_FSID
161 	| FATTR4_WORD0_FILEID
162 	| FATTR4_WORD0_FS_LOCATIONS,
163 	FATTR4_WORD1_MODE
164 	| FATTR4_WORD1_NUMLINKS
165 	| FATTR4_WORD1_OWNER
166 	| FATTR4_WORD1_OWNER_GROUP
167 	| FATTR4_WORD1_RAWDEV
168 	| FATTR4_WORD1_SPACE_USED
169 	| FATTR4_WORD1_TIME_ACCESS
170 	| FATTR4_WORD1_TIME_METADATA
171 	| FATTR4_WORD1_TIME_MODIFY
172 	| FATTR4_WORD1_MOUNTED_ON_FILEID
173 };
174 
175 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
176 		struct nfs4_readdir_arg *readdir)
177 {
178 	__be32 *start, *p;
179 
180 	BUG_ON(readdir->count < 80);
181 	if (cookie > 2) {
182 		readdir->cookie = cookie;
183 		memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
184 		return;
185 	}
186 
187 	readdir->cookie = 0;
188 	memset(&readdir->verifier, 0, sizeof(readdir->verifier));
189 	if (cookie == 2)
190 		return;
191 
192 	/*
193 	 * NFSv4 servers do not return entries for '.' and '..'
194 	 * Therefore, we fake these entries here.  We let '.'
195 	 * have cookie 0 and '..' have cookie 1.  Note that
196 	 * when talking to the server, we always send cookie 0
197 	 * instead of 1 or 2.
198 	 */
199 	start = p = kmap_atomic(*readdir->pages);
200 
201 	if (cookie == 0) {
202 		*p++ = xdr_one;                                  /* next */
203 		*p++ = xdr_zero;                   /* cookie, first word */
204 		*p++ = xdr_one;                   /* cookie, second word */
205 		*p++ = xdr_one;                             /* entry len */
206 		memcpy(p, ".\0\0\0", 4);                        /* entry */
207 		p++;
208 		*p++ = xdr_one;                         /* bitmap length */
209 		*p++ = htonl(FATTR4_WORD0_FILEID);             /* bitmap */
210 		*p++ = htonl(8);              /* attribute buffer length */
211 		p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode));
212 	}
213 
214 	*p++ = xdr_one;                                  /* next */
215 	*p++ = xdr_zero;                   /* cookie, first word */
216 	*p++ = xdr_two;                   /* cookie, second word */
217 	*p++ = xdr_two;                             /* entry len */
218 	memcpy(p, "..\0\0", 4);                         /* entry */
219 	p++;
220 	*p++ = xdr_one;                         /* bitmap length */
221 	*p++ = htonl(FATTR4_WORD0_FILEID);             /* bitmap */
222 	*p++ = htonl(8);              /* attribute buffer length */
223 	p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode));
224 
225 	readdir->pgbase = (char *)p - (char *)start;
226 	readdir->count -= readdir->pgbase;
227 	kunmap_atomic(start);
228 }
229 
230 static int nfs4_wait_clnt_recover(struct nfs_client *clp)
231 {
232 	int res;
233 
234 	might_sleep();
235 
236 	res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
237 			nfs_wait_bit_killable, TASK_KILLABLE);
238 	return res;
239 }
240 
241 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
242 {
243 	int res = 0;
244 
245 	might_sleep();
246 
247 	if (*timeout <= 0)
248 		*timeout = NFS4_POLL_RETRY_MIN;
249 	if (*timeout > NFS4_POLL_RETRY_MAX)
250 		*timeout = NFS4_POLL_RETRY_MAX;
251 	freezable_schedule_timeout_killable(*timeout);
252 	if (fatal_signal_pending(current))
253 		res = -ERESTARTSYS;
254 	*timeout <<= 1;
255 	return res;
256 }
257 
258 /* This is the error handling routine for processes that are allowed
259  * to sleep.
260  */
261 static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
262 {
263 	struct nfs_client *clp = server->nfs_client;
264 	struct nfs4_state *state = exception->state;
265 	struct inode *inode = exception->inode;
266 	int ret = errorcode;
267 
268 	exception->retry = 0;
269 	switch(errorcode) {
270 		case 0:
271 			return 0;
272 		case -NFS4ERR_OPENMODE:
273 			if (inode && nfs_have_delegation(inode, FMODE_READ)) {
274 				nfs_inode_return_delegation(inode);
275 				exception->retry = 1;
276 				return 0;
277 			}
278 			if (state == NULL)
279 				break;
280 			nfs4_schedule_stateid_recovery(server, state);
281 			goto wait_on_recovery;
282 		case -NFS4ERR_DELEG_REVOKED:
283 		case -NFS4ERR_ADMIN_REVOKED:
284 		case -NFS4ERR_BAD_STATEID:
285 			if (state == NULL)
286 				break;
287 			nfs_remove_bad_delegation(state->inode);
288 			nfs4_schedule_stateid_recovery(server, state);
289 			goto wait_on_recovery;
290 		case -NFS4ERR_EXPIRED:
291 			if (state != NULL)
292 				nfs4_schedule_stateid_recovery(server, state);
293 		case -NFS4ERR_STALE_STATEID:
294 		case -NFS4ERR_STALE_CLIENTID:
295 			nfs4_schedule_lease_recovery(clp);
296 			goto wait_on_recovery;
297 #if defined(CONFIG_NFS_V4_1)
298 		case -NFS4ERR_BADSESSION:
299 		case -NFS4ERR_BADSLOT:
300 		case -NFS4ERR_BAD_HIGH_SLOT:
301 		case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
302 		case -NFS4ERR_DEADSESSION:
303 		case -NFS4ERR_SEQ_FALSE_RETRY:
304 		case -NFS4ERR_SEQ_MISORDERED:
305 			dprintk("%s ERROR: %d Reset session\n", __func__,
306 				errorcode);
307 			nfs4_schedule_session_recovery(clp->cl_session);
308 			exception->retry = 1;
309 			break;
310 #endif /* defined(CONFIG_NFS_V4_1) */
311 		case -NFS4ERR_FILE_OPEN:
312 			if (exception->timeout > HZ) {
313 				/* We have retried a decent amount, time to
314 				 * fail
315 				 */
316 				ret = -EBUSY;
317 				break;
318 			}
319 		case -NFS4ERR_GRACE:
320 		case -NFS4ERR_DELAY:
321 		case -EKEYEXPIRED:
322 			ret = nfs4_delay(server->client, &exception->timeout);
323 			if (ret != 0)
324 				break;
325 		case -NFS4ERR_RETRY_UNCACHED_REP:
326 		case -NFS4ERR_OLD_STATEID:
327 			exception->retry = 1;
328 			break;
329 		case -NFS4ERR_BADOWNER:
330 			/* The following works around a Linux server bug! */
331 		case -NFS4ERR_BADNAME:
332 			if (server->caps & NFS_CAP_UIDGID_NOMAP) {
333 				server->caps &= ~NFS_CAP_UIDGID_NOMAP;
334 				exception->retry = 1;
335 				printk(KERN_WARNING "NFS: v4 server %s "
336 						"does not accept raw "
337 						"uid/gids. "
338 						"Reenabling the idmapper.\n",
339 						server->nfs_client->cl_hostname);
340 			}
341 	}
342 	/* We failed to handle the error */
343 	return nfs4_map_errors(ret);
344 wait_on_recovery:
345 	ret = nfs4_wait_clnt_recover(clp);
346 	if (ret == 0)
347 		exception->retry = 1;
348 	return ret;
349 }
350 
351 
352 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
353 {
354 	spin_lock(&clp->cl_lock);
355 	if (time_before(clp->cl_last_renewal,timestamp))
356 		clp->cl_last_renewal = timestamp;
357 	spin_unlock(&clp->cl_lock);
358 }
359 
360 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
361 {
362 	do_renew_lease(server->nfs_client, timestamp);
363 }
364 
365 #if defined(CONFIG_NFS_V4_1)
366 
367 /*
368  * nfs4_free_slot - free a slot and efficiently update slot table.
369  *
370  * freeing a slot is trivially done by clearing its respective bit
371  * in the bitmap.
372  * If the freed slotid equals highest_used_slotid we want to update it
373  * so that the server would be able to size down the slot table if needed,
374  * otherwise we know that the highest_used_slotid is still in use.
375  * When updating highest_used_slotid there may be "holes" in the bitmap
376  * so we need to scan down from highest_used_slotid to 0 looking for the now
377  * highest slotid in use.
378  * If none found, highest_used_slotid is set to NFS4_NO_SLOT.
379  *
380  * Must be called while holding tbl->slot_tbl_lock
381  */
382 static void
383 nfs4_free_slot(struct nfs4_slot_table *tbl, u32 slotid)
384 {
385 	BUG_ON(slotid >= NFS4_MAX_SLOT_TABLE);
386 	/* clear used bit in bitmap */
387 	__clear_bit(slotid, tbl->used_slots);
388 
389 	/* update highest_used_slotid when it is freed */
390 	if (slotid == tbl->highest_used_slotid) {
391 		slotid = find_last_bit(tbl->used_slots, tbl->max_slots);
392 		if (slotid < tbl->max_slots)
393 			tbl->highest_used_slotid = slotid;
394 		else
395 			tbl->highest_used_slotid = NFS4_NO_SLOT;
396 	}
397 	dprintk("%s: slotid %u highest_used_slotid %d\n", __func__,
398 		slotid, tbl->highest_used_slotid);
399 }
400 
401 bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy)
402 {
403 	rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
404 	return true;
405 }
406 
407 /*
408  * Signal state manager thread if session fore channel is drained
409  */
410 static void nfs4_check_drain_fc_complete(struct nfs4_session *ses)
411 {
412 	if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
413 		rpc_wake_up_first(&ses->fc_slot_table.slot_tbl_waitq,
414 				nfs4_set_task_privileged, NULL);
415 		return;
416 	}
417 
418 	if (ses->fc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
419 		return;
420 
421 	dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__);
422 	complete(&ses->fc_slot_table.complete);
423 }
424 
425 /*
426  * Signal state manager thread if session back channel is drained
427  */
428 void nfs4_check_drain_bc_complete(struct nfs4_session *ses)
429 {
430 	if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) ||
431 	    ses->bc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
432 		return;
433 	dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__);
434 	complete(&ses->bc_slot_table.complete);
435 }
436 
437 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
438 {
439 	struct nfs4_slot_table *tbl;
440 
441 	tbl = &res->sr_session->fc_slot_table;
442 	if (!res->sr_slot) {
443 		/* just wake up the next guy waiting since
444 		 * we may have not consumed a slot after all */
445 		dprintk("%s: No slot\n", __func__);
446 		return;
447 	}
448 
449 	spin_lock(&tbl->slot_tbl_lock);
450 	nfs4_free_slot(tbl, res->sr_slot - tbl->slots);
451 	nfs4_check_drain_fc_complete(res->sr_session);
452 	spin_unlock(&tbl->slot_tbl_lock);
453 	res->sr_slot = NULL;
454 }
455 
456 static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
457 {
458 	unsigned long timestamp;
459 	struct nfs_client *clp;
460 
461 	/*
462 	 * sr_status remains 1 if an RPC level error occurred. The server
463 	 * may or may not have processed the sequence operation..
464 	 * Proceed as if the server received and processed the sequence
465 	 * operation.
466 	 */
467 	if (res->sr_status == 1)
468 		res->sr_status = NFS_OK;
469 
470 	/* don't increment the sequence number if the task wasn't sent */
471 	if (!RPC_WAS_SENT(task))
472 		goto out;
473 
474 	/* Check the SEQUENCE operation status */
475 	switch (res->sr_status) {
476 	case 0:
477 		/* Update the slot's sequence and clientid lease timer */
478 		++res->sr_slot->seq_nr;
479 		timestamp = res->sr_renewal_time;
480 		clp = res->sr_session->clp;
481 		do_renew_lease(clp, timestamp);
482 		/* Check sequence flags */
483 		if (res->sr_status_flags != 0)
484 			nfs4_schedule_lease_recovery(clp);
485 		break;
486 	case -NFS4ERR_DELAY:
487 		/* The server detected a resend of the RPC call and
488 		 * returned NFS4ERR_DELAY as per Section 2.10.6.2
489 		 * of RFC5661.
490 		 */
491 		dprintk("%s: slot=%td seq=%d: Operation in progress\n",
492 			__func__,
493 			res->sr_slot - res->sr_session->fc_slot_table.slots,
494 			res->sr_slot->seq_nr);
495 		goto out_retry;
496 	default:
497 		/* Just update the slot sequence no. */
498 		++res->sr_slot->seq_nr;
499 	}
500 out:
501 	/* The session may be reset by one of the error handlers. */
502 	dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
503 	nfs41_sequence_free_slot(res);
504 	return 1;
505 out_retry:
506 	if (!rpc_restart_call(task))
507 		goto out;
508 	rpc_delay(task, NFS4_POLL_RETRY_MAX);
509 	return 0;
510 }
511 
512 static int nfs4_sequence_done(struct rpc_task *task,
513 			       struct nfs4_sequence_res *res)
514 {
515 	if (res->sr_session == NULL)
516 		return 1;
517 	return nfs41_sequence_done(task, res);
518 }
519 
520 /*
521  * nfs4_find_slot - efficiently look for a free slot
522  *
523  * nfs4_find_slot looks for an unset bit in the used_slots bitmap.
524  * If found, we mark the slot as used, update the highest_used_slotid,
525  * and respectively set up the sequence operation args.
526  * The slot number is returned if found, or NFS4_NO_SLOT otherwise.
527  *
528  * Note: must be called with under the slot_tbl_lock.
529  */
530 static u32
531 nfs4_find_slot(struct nfs4_slot_table *tbl)
532 {
533 	u32 slotid;
534 	u32 ret_id = NFS4_NO_SLOT;
535 
536 	dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n",
537 		__func__, tbl->used_slots[0], tbl->highest_used_slotid,
538 		tbl->max_slots);
539 	slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots);
540 	if (slotid >= tbl->max_slots)
541 		goto out;
542 	__set_bit(slotid, tbl->used_slots);
543 	if (slotid > tbl->highest_used_slotid ||
544 			tbl->highest_used_slotid == NFS4_NO_SLOT)
545 		tbl->highest_used_slotid = slotid;
546 	ret_id = slotid;
547 out:
548 	dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
549 		__func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id);
550 	return ret_id;
551 }
552 
553 static void nfs41_init_sequence(struct nfs4_sequence_args *args,
554 		struct nfs4_sequence_res *res, int cache_reply)
555 {
556 	args->sa_session = NULL;
557 	args->sa_cache_this = 0;
558 	if (cache_reply)
559 		args->sa_cache_this = 1;
560 	res->sr_session = NULL;
561 	res->sr_slot = NULL;
562 }
563 
564 int nfs41_setup_sequence(struct nfs4_session *session,
565 				struct nfs4_sequence_args *args,
566 				struct nfs4_sequence_res *res,
567 				struct rpc_task *task)
568 {
569 	struct nfs4_slot *slot;
570 	struct nfs4_slot_table *tbl;
571 	u32 slotid;
572 
573 	dprintk("--> %s\n", __func__);
574 	/* slot already allocated? */
575 	if (res->sr_slot != NULL)
576 		return 0;
577 
578 	tbl = &session->fc_slot_table;
579 
580 	spin_lock(&tbl->slot_tbl_lock);
581 	if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
582 	    !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
583 		/* The state manager will wait until the slot table is empty */
584 		rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
585 		spin_unlock(&tbl->slot_tbl_lock);
586 		dprintk("%s session is draining\n", __func__);
587 		return -EAGAIN;
588 	}
589 
590 	if (!rpc_queue_empty(&tbl->slot_tbl_waitq) &&
591 	    !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
592 		rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
593 		spin_unlock(&tbl->slot_tbl_lock);
594 		dprintk("%s enforce FIFO order\n", __func__);
595 		return -EAGAIN;
596 	}
597 
598 	slotid = nfs4_find_slot(tbl);
599 	if (slotid == NFS4_NO_SLOT) {
600 		rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
601 		spin_unlock(&tbl->slot_tbl_lock);
602 		dprintk("<-- %s: no free slots\n", __func__);
603 		return -EAGAIN;
604 	}
605 	spin_unlock(&tbl->slot_tbl_lock);
606 
607 	rpc_task_set_priority(task, RPC_PRIORITY_NORMAL);
608 	slot = tbl->slots + slotid;
609 	args->sa_session = session;
610 	args->sa_slotid = slotid;
611 
612 	dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr);
613 
614 	res->sr_session = session;
615 	res->sr_slot = slot;
616 	res->sr_renewal_time = jiffies;
617 	res->sr_status_flags = 0;
618 	/*
619 	 * sr_status is only set in decode_sequence, and so will remain
620 	 * set to 1 if an rpc level failure occurs.
621 	 */
622 	res->sr_status = 1;
623 	return 0;
624 }
625 EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
626 
627 int nfs4_setup_sequence(const struct nfs_server *server,
628 			struct nfs4_sequence_args *args,
629 			struct nfs4_sequence_res *res,
630 			struct rpc_task *task)
631 {
632 	struct nfs4_session *session = nfs4_get_session(server);
633 	int ret = 0;
634 
635 	if (session == NULL)
636 		goto out;
637 
638 	dprintk("--> %s clp %p session %p sr_slot %td\n",
639 		__func__, session->clp, session, res->sr_slot ?
640 			res->sr_slot - session->fc_slot_table.slots : -1);
641 
642 	ret = nfs41_setup_sequence(session, args, res, task);
643 out:
644 	dprintk("<-- %s status=%d\n", __func__, ret);
645 	return ret;
646 }
647 
648 struct nfs41_call_sync_data {
649 	const struct nfs_server *seq_server;
650 	struct nfs4_sequence_args *seq_args;
651 	struct nfs4_sequence_res *seq_res;
652 };
653 
654 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
655 {
656 	struct nfs41_call_sync_data *data = calldata;
657 
658 	dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
659 
660 	if (nfs4_setup_sequence(data->seq_server, data->seq_args,
661 				data->seq_res, task))
662 		return;
663 	rpc_call_start(task);
664 }
665 
666 static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata)
667 {
668 	rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
669 	nfs41_call_sync_prepare(task, calldata);
670 }
671 
672 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
673 {
674 	struct nfs41_call_sync_data *data = calldata;
675 
676 	nfs41_sequence_done(task, data->seq_res);
677 }
678 
679 static const struct rpc_call_ops nfs41_call_sync_ops = {
680 	.rpc_call_prepare = nfs41_call_sync_prepare,
681 	.rpc_call_done = nfs41_call_sync_done,
682 };
683 
684 static const struct rpc_call_ops nfs41_call_priv_sync_ops = {
685 	.rpc_call_prepare = nfs41_call_priv_sync_prepare,
686 	.rpc_call_done = nfs41_call_sync_done,
687 };
688 
689 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
690 				   struct nfs_server *server,
691 				   struct rpc_message *msg,
692 				   struct nfs4_sequence_args *args,
693 				   struct nfs4_sequence_res *res,
694 				   int privileged)
695 {
696 	int ret;
697 	struct rpc_task *task;
698 	struct nfs41_call_sync_data data = {
699 		.seq_server = server,
700 		.seq_args = args,
701 		.seq_res = res,
702 	};
703 	struct rpc_task_setup task_setup = {
704 		.rpc_client = clnt,
705 		.rpc_message = msg,
706 		.callback_ops = &nfs41_call_sync_ops,
707 		.callback_data = &data
708 	};
709 
710 	if (privileged)
711 		task_setup.callback_ops = &nfs41_call_priv_sync_ops;
712 	task = rpc_run_task(&task_setup);
713 	if (IS_ERR(task))
714 		ret = PTR_ERR(task);
715 	else {
716 		ret = task->tk_status;
717 		rpc_put_task(task);
718 	}
719 	return ret;
720 }
721 
722 int _nfs4_call_sync_session(struct rpc_clnt *clnt,
723 			    struct nfs_server *server,
724 			    struct rpc_message *msg,
725 			    struct nfs4_sequence_args *args,
726 			    struct nfs4_sequence_res *res,
727 			    int cache_reply)
728 {
729 	nfs41_init_sequence(args, res, cache_reply);
730 	return nfs4_call_sync_sequence(clnt, server, msg, args, res, 0);
731 }
732 
733 #else
734 static inline
735 void nfs41_init_sequence(struct nfs4_sequence_args *args,
736 		struct nfs4_sequence_res *res, int cache_reply)
737 {
738 }
739 
740 static int nfs4_sequence_done(struct rpc_task *task,
741 			       struct nfs4_sequence_res *res)
742 {
743 	return 1;
744 }
745 #endif /* CONFIG_NFS_V4_1 */
746 
747 int _nfs4_call_sync(struct rpc_clnt *clnt,
748 		    struct nfs_server *server,
749 		    struct rpc_message *msg,
750 		    struct nfs4_sequence_args *args,
751 		    struct nfs4_sequence_res *res,
752 		    int cache_reply)
753 {
754 	nfs41_init_sequence(args, res, cache_reply);
755 	return rpc_call_sync(clnt, msg, 0);
756 }
757 
758 static inline
759 int nfs4_call_sync(struct rpc_clnt *clnt,
760 		   struct nfs_server *server,
761 		   struct rpc_message *msg,
762 		   struct nfs4_sequence_args *args,
763 		   struct nfs4_sequence_res *res,
764 		   int cache_reply)
765 {
766 	return server->nfs_client->cl_mvops->call_sync(clnt, server, msg,
767 						args, res, cache_reply);
768 }
769 
770 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
771 {
772 	struct nfs_inode *nfsi = NFS_I(dir);
773 
774 	spin_lock(&dir->i_lock);
775 	nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
776 	if (!cinfo->atomic || cinfo->before != dir->i_version)
777 		nfs_force_lookup_revalidate(dir);
778 	dir->i_version = cinfo->after;
779 	spin_unlock(&dir->i_lock);
780 }
781 
782 struct nfs4_opendata {
783 	struct kref kref;
784 	struct nfs_openargs o_arg;
785 	struct nfs_openres o_res;
786 	struct nfs_open_confirmargs c_arg;
787 	struct nfs_open_confirmres c_res;
788 	struct nfs4_string owner_name;
789 	struct nfs4_string group_name;
790 	struct nfs_fattr f_attr;
791 	struct nfs_fattr dir_attr;
792 	struct dentry *dir;
793 	struct dentry *dentry;
794 	struct nfs4_state_owner *owner;
795 	struct nfs4_state *state;
796 	struct iattr attrs;
797 	unsigned long timestamp;
798 	unsigned int rpc_done : 1;
799 	int rpc_status;
800 	int cancelled;
801 };
802 
803 
804 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
805 {
806 	p->o_res.f_attr = &p->f_attr;
807 	p->o_res.dir_attr = &p->dir_attr;
808 	p->o_res.seqid = p->o_arg.seqid;
809 	p->c_res.seqid = p->c_arg.seqid;
810 	p->o_res.server = p->o_arg.server;
811 	nfs_fattr_init(&p->f_attr);
812 	nfs_fattr_init(&p->dir_attr);
813 	nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
814 }
815 
816 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
817 		struct nfs4_state_owner *sp, fmode_t fmode, int flags,
818 		const struct iattr *attrs,
819 		gfp_t gfp_mask)
820 {
821 	struct dentry *parent = dget_parent(dentry);
822 	struct inode *dir = parent->d_inode;
823 	struct nfs_server *server = NFS_SERVER(dir);
824 	struct nfs4_opendata *p;
825 
826 	p = kzalloc(sizeof(*p), gfp_mask);
827 	if (p == NULL)
828 		goto err;
829 	p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask);
830 	if (p->o_arg.seqid == NULL)
831 		goto err_free;
832 	nfs_sb_active(dentry->d_sb);
833 	p->dentry = dget(dentry);
834 	p->dir = parent;
835 	p->owner = sp;
836 	atomic_inc(&sp->so_count);
837 	p->o_arg.fh = NFS_FH(dir);
838 	p->o_arg.open_flags = flags;
839 	p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
840 	p->o_arg.clientid = server->nfs_client->cl_clientid;
841 	p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
842 	p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
843 	p->o_arg.name = &dentry->d_name;
844 	p->o_arg.server = server;
845 	p->o_arg.bitmask = server->attr_bitmask;
846 	p->o_arg.dir_bitmask = server->cache_consistency_bitmask;
847 	p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
848 	if (attrs != NULL && attrs->ia_valid != 0) {
849 		__be32 verf[2];
850 
851 		p->o_arg.u.attrs = &p->attrs;
852 		memcpy(&p->attrs, attrs, sizeof(p->attrs));
853 
854 		verf[0] = jiffies;
855 		verf[1] = current->pid;
856 		memcpy(p->o_arg.u.verifier.data, verf,
857 				sizeof(p->o_arg.u.verifier.data));
858 	}
859 	p->c_arg.fh = &p->o_res.fh;
860 	p->c_arg.stateid = &p->o_res.stateid;
861 	p->c_arg.seqid = p->o_arg.seqid;
862 	nfs4_init_opendata_res(p);
863 	kref_init(&p->kref);
864 	return p;
865 err_free:
866 	kfree(p);
867 err:
868 	dput(parent);
869 	return NULL;
870 }
871 
872 static void nfs4_opendata_free(struct kref *kref)
873 {
874 	struct nfs4_opendata *p = container_of(kref,
875 			struct nfs4_opendata, kref);
876 	struct super_block *sb = p->dentry->d_sb;
877 
878 	nfs_free_seqid(p->o_arg.seqid);
879 	if (p->state != NULL)
880 		nfs4_put_open_state(p->state);
881 	nfs4_put_state_owner(p->owner);
882 	dput(p->dir);
883 	dput(p->dentry);
884 	nfs_sb_deactive(sb);
885 	nfs_fattr_free_names(&p->f_attr);
886 	kfree(p);
887 }
888 
889 static void nfs4_opendata_put(struct nfs4_opendata *p)
890 {
891 	if (p != NULL)
892 		kref_put(&p->kref, nfs4_opendata_free);
893 }
894 
895 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
896 {
897 	int ret;
898 
899 	ret = rpc_wait_for_completion_task(task);
900 	return ret;
901 }
902 
903 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
904 {
905 	int ret = 0;
906 
907 	if (open_mode & (O_EXCL|O_TRUNC))
908 		goto out;
909 	switch (mode & (FMODE_READ|FMODE_WRITE)) {
910 		case FMODE_READ:
911 			ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
912 				&& state->n_rdonly != 0;
913 			break;
914 		case FMODE_WRITE:
915 			ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
916 				&& state->n_wronly != 0;
917 			break;
918 		case FMODE_READ|FMODE_WRITE:
919 			ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
920 				&& state->n_rdwr != 0;
921 	}
922 out:
923 	return ret;
924 }
925 
926 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
927 {
928 	if (delegation == NULL)
929 		return 0;
930 	if ((delegation->type & fmode) != fmode)
931 		return 0;
932 	if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
933 		return 0;
934 	nfs_mark_delegation_referenced(delegation);
935 	return 1;
936 }
937 
938 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
939 {
940 	switch (fmode) {
941 		case FMODE_WRITE:
942 			state->n_wronly++;
943 			break;
944 		case FMODE_READ:
945 			state->n_rdonly++;
946 			break;
947 		case FMODE_READ|FMODE_WRITE:
948 			state->n_rdwr++;
949 	}
950 	nfs4_state_set_mode_locked(state, state->state | fmode);
951 }
952 
953 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
954 {
955 	if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
956 		nfs4_stateid_copy(&state->stateid, stateid);
957 	nfs4_stateid_copy(&state->open_stateid, stateid);
958 	switch (fmode) {
959 		case FMODE_READ:
960 			set_bit(NFS_O_RDONLY_STATE, &state->flags);
961 			break;
962 		case FMODE_WRITE:
963 			set_bit(NFS_O_WRONLY_STATE, &state->flags);
964 			break;
965 		case FMODE_READ|FMODE_WRITE:
966 			set_bit(NFS_O_RDWR_STATE, &state->flags);
967 	}
968 }
969 
970 static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
971 {
972 	write_seqlock(&state->seqlock);
973 	nfs_set_open_stateid_locked(state, stateid, fmode);
974 	write_sequnlock(&state->seqlock);
975 }
976 
977 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
978 {
979 	/*
980 	 * Protect the call to nfs4_state_set_mode_locked and
981 	 * serialise the stateid update
982 	 */
983 	write_seqlock(&state->seqlock);
984 	if (deleg_stateid != NULL) {
985 		nfs4_stateid_copy(&state->stateid, deleg_stateid);
986 		set_bit(NFS_DELEGATED_STATE, &state->flags);
987 	}
988 	if (open_stateid != NULL)
989 		nfs_set_open_stateid_locked(state, open_stateid, fmode);
990 	write_sequnlock(&state->seqlock);
991 	spin_lock(&state->owner->so_lock);
992 	update_open_stateflags(state, fmode);
993 	spin_unlock(&state->owner->so_lock);
994 }
995 
996 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
997 {
998 	struct nfs_inode *nfsi = NFS_I(state->inode);
999 	struct nfs_delegation *deleg_cur;
1000 	int ret = 0;
1001 
1002 	fmode &= (FMODE_READ|FMODE_WRITE);
1003 
1004 	rcu_read_lock();
1005 	deleg_cur = rcu_dereference(nfsi->delegation);
1006 	if (deleg_cur == NULL)
1007 		goto no_delegation;
1008 
1009 	spin_lock(&deleg_cur->lock);
1010 	if (nfsi->delegation != deleg_cur ||
1011 	    (deleg_cur->type & fmode) != fmode)
1012 		goto no_delegation_unlock;
1013 
1014 	if (delegation == NULL)
1015 		delegation = &deleg_cur->stateid;
1016 	else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1017 		goto no_delegation_unlock;
1018 
1019 	nfs_mark_delegation_referenced(deleg_cur);
1020 	__update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
1021 	ret = 1;
1022 no_delegation_unlock:
1023 	spin_unlock(&deleg_cur->lock);
1024 no_delegation:
1025 	rcu_read_unlock();
1026 
1027 	if (!ret && open_stateid != NULL) {
1028 		__update_open_stateid(state, open_stateid, NULL, fmode);
1029 		ret = 1;
1030 	}
1031 
1032 	return ret;
1033 }
1034 
1035 
1036 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1037 {
1038 	struct nfs_delegation *delegation;
1039 
1040 	rcu_read_lock();
1041 	delegation = rcu_dereference(NFS_I(inode)->delegation);
1042 	if (delegation == NULL || (delegation->type & fmode) == fmode) {
1043 		rcu_read_unlock();
1044 		return;
1045 	}
1046 	rcu_read_unlock();
1047 	nfs_inode_return_delegation(inode);
1048 }
1049 
1050 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1051 {
1052 	struct nfs4_state *state = opendata->state;
1053 	struct nfs_inode *nfsi = NFS_I(state->inode);
1054 	struct nfs_delegation *delegation;
1055 	int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC);
1056 	fmode_t fmode = opendata->o_arg.fmode;
1057 	nfs4_stateid stateid;
1058 	int ret = -EAGAIN;
1059 
1060 	for (;;) {
1061 		if (can_open_cached(state, fmode, open_mode)) {
1062 			spin_lock(&state->owner->so_lock);
1063 			if (can_open_cached(state, fmode, open_mode)) {
1064 				update_open_stateflags(state, fmode);
1065 				spin_unlock(&state->owner->so_lock);
1066 				goto out_return_state;
1067 			}
1068 			spin_unlock(&state->owner->so_lock);
1069 		}
1070 		rcu_read_lock();
1071 		delegation = rcu_dereference(nfsi->delegation);
1072 		if (!can_open_delegated(delegation, fmode)) {
1073 			rcu_read_unlock();
1074 			break;
1075 		}
1076 		/* Save the delegation */
1077 		nfs4_stateid_copy(&stateid, &delegation->stateid);
1078 		rcu_read_unlock();
1079 		ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1080 		if (ret != 0)
1081 			goto out;
1082 		ret = -EAGAIN;
1083 
1084 		/* Try to update the stateid using the delegation */
1085 		if (update_open_stateid(state, NULL, &stateid, fmode))
1086 			goto out_return_state;
1087 	}
1088 out:
1089 	return ERR_PTR(ret);
1090 out_return_state:
1091 	atomic_inc(&state->count);
1092 	return state;
1093 }
1094 
1095 static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1096 {
1097 	struct inode *inode;
1098 	struct nfs4_state *state = NULL;
1099 	struct nfs_delegation *delegation;
1100 	int ret;
1101 
1102 	if (!data->rpc_done) {
1103 		state = nfs4_try_open_cached(data);
1104 		goto out;
1105 	}
1106 
1107 	ret = -EAGAIN;
1108 	if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1109 		goto err;
1110 	inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr);
1111 	ret = PTR_ERR(inode);
1112 	if (IS_ERR(inode))
1113 		goto err;
1114 	ret = -ENOMEM;
1115 	state = nfs4_get_open_state(inode, data->owner);
1116 	if (state == NULL)
1117 		goto err_put_inode;
1118 	if (data->o_res.delegation_type != 0) {
1119 		struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
1120 		int delegation_flags = 0;
1121 
1122 		rcu_read_lock();
1123 		delegation = rcu_dereference(NFS_I(inode)->delegation);
1124 		if (delegation)
1125 			delegation_flags = delegation->flags;
1126 		rcu_read_unlock();
1127 		if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
1128 			pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1129 					"returning a delegation for "
1130 					"OPEN(CLAIM_DELEGATE_CUR)\n",
1131 					clp->cl_hostname);
1132 		} else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1133 			nfs_inode_set_delegation(state->inode,
1134 					data->owner->so_cred,
1135 					&data->o_res);
1136 		else
1137 			nfs_inode_reclaim_delegation(state->inode,
1138 					data->owner->so_cred,
1139 					&data->o_res);
1140 	}
1141 
1142 	update_open_stateid(state, &data->o_res.stateid, NULL,
1143 			data->o_arg.fmode);
1144 	iput(inode);
1145 out:
1146 	return state;
1147 err_put_inode:
1148 	iput(inode);
1149 err:
1150 	return ERR_PTR(ret);
1151 }
1152 
1153 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1154 {
1155 	struct nfs_inode *nfsi = NFS_I(state->inode);
1156 	struct nfs_open_context *ctx;
1157 
1158 	spin_lock(&state->inode->i_lock);
1159 	list_for_each_entry(ctx, &nfsi->open_files, list) {
1160 		if (ctx->state != state)
1161 			continue;
1162 		get_nfs_open_context(ctx);
1163 		spin_unlock(&state->inode->i_lock);
1164 		return ctx;
1165 	}
1166 	spin_unlock(&state->inode->i_lock);
1167 	return ERR_PTR(-ENOENT);
1168 }
1169 
1170 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state)
1171 {
1172 	struct nfs4_opendata *opendata;
1173 
1174 	opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS);
1175 	if (opendata == NULL)
1176 		return ERR_PTR(-ENOMEM);
1177 	opendata->state = state;
1178 	atomic_inc(&state->count);
1179 	return opendata;
1180 }
1181 
1182 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
1183 {
1184 	struct nfs4_state *newstate;
1185 	int ret;
1186 
1187 	opendata->o_arg.open_flags = 0;
1188 	opendata->o_arg.fmode = fmode;
1189 	memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1190 	memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1191 	nfs4_init_opendata_res(opendata);
1192 	ret = _nfs4_recover_proc_open(opendata);
1193 	if (ret != 0)
1194 		return ret;
1195 	newstate = nfs4_opendata_to_nfs4_state(opendata);
1196 	if (IS_ERR(newstate))
1197 		return PTR_ERR(newstate);
1198 	nfs4_close_state(newstate, fmode);
1199 	*res = newstate;
1200 	return 0;
1201 }
1202 
1203 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1204 {
1205 	struct nfs4_state *newstate;
1206 	int ret;
1207 
1208 	/* memory barrier prior to reading state->n_* */
1209 	clear_bit(NFS_DELEGATED_STATE, &state->flags);
1210 	smp_rmb();
1211 	if (state->n_rdwr != 0) {
1212 		clear_bit(NFS_O_RDWR_STATE, &state->flags);
1213 		ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
1214 		if (ret != 0)
1215 			return ret;
1216 		if (newstate != state)
1217 			return -ESTALE;
1218 	}
1219 	if (state->n_wronly != 0) {
1220 		clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1221 		ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
1222 		if (ret != 0)
1223 			return ret;
1224 		if (newstate != state)
1225 			return -ESTALE;
1226 	}
1227 	if (state->n_rdonly != 0) {
1228 		clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1229 		ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
1230 		if (ret != 0)
1231 			return ret;
1232 		if (newstate != state)
1233 			return -ESTALE;
1234 	}
1235 	/*
1236 	 * We may have performed cached opens for all three recoveries.
1237 	 * Check if we need to update the current stateid.
1238 	 */
1239 	if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1240 	    !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1241 		write_seqlock(&state->seqlock);
1242 		if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1243 			nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1244 		write_sequnlock(&state->seqlock);
1245 	}
1246 	return 0;
1247 }
1248 
1249 /*
1250  * OPEN_RECLAIM:
1251  * 	reclaim state on the server after a reboot.
1252  */
1253 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1254 {
1255 	struct nfs_delegation *delegation;
1256 	struct nfs4_opendata *opendata;
1257 	fmode_t delegation_type = 0;
1258 	int status;
1259 
1260 	opendata = nfs4_open_recoverdata_alloc(ctx, state);
1261 	if (IS_ERR(opendata))
1262 		return PTR_ERR(opendata);
1263 	opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS;
1264 	opendata->o_arg.fh = NFS_FH(state->inode);
1265 	rcu_read_lock();
1266 	delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1267 	if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1268 		delegation_type = delegation->type;
1269 	rcu_read_unlock();
1270 	opendata->o_arg.u.delegation_type = delegation_type;
1271 	status = nfs4_open_recover(opendata, state);
1272 	nfs4_opendata_put(opendata);
1273 	return status;
1274 }
1275 
1276 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1277 {
1278 	struct nfs_server *server = NFS_SERVER(state->inode);
1279 	struct nfs4_exception exception = { };
1280 	int err;
1281 	do {
1282 		err = _nfs4_do_open_reclaim(ctx, state);
1283 		if (err != -NFS4ERR_DELAY)
1284 			break;
1285 		nfs4_handle_exception(server, err, &exception);
1286 	} while (exception.retry);
1287 	return err;
1288 }
1289 
1290 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1291 {
1292 	struct nfs_open_context *ctx;
1293 	int ret;
1294 
1295 	ctx = nfs4_state_find_open_context(state);
1296 	if (IS_ERR(ctx))
1297 		return PTR_ERR(ctx);
1298 	ret = nfs4_do_open_reclaim(ctx, state);
1299 	put_nfs_open_context(ctx);
1300 	return ret;
1301 }
1302 
1303 static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1304 {
1305 	struct nfs4_opendata *opendata;
1306 	int ret;
1307 
1308 	opendata = nfs4_open_recoverdata_alloc(ctx, state);
1309 	if (IS_ERR(opendata))
1310 		return PTR_ERR(opendata);
1311 	opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR;
1312 	nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1313 	ret = nfs4_open_recover(opendata, state);
1314 	nfs4_opendata_put(opendata);
1315 	return ret;
1316 }
1317 
1318 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1319 {
1320 	struct nfs4_exception exception = { };
1321 	struct nfs_server *server = NFS_SERVER(state->inode);
1322 	int err;
1323 	do {
1324 		err = _nfs4_open_delegation_recall(ctx, state, stateid);
1325 		switch (err) {
1326 			case 0:
1327 			case -ENOENT:
1328 			case -ESTALE:
1329 				goto out;
1330 			case -NFS4ERR_BADSESSION:
1331 			case -NFS4ERR_BADSLOT:
1332 			case -NFS4ERR_BAD_HIGH_SLOT:
1333 			case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1334 			case -NFS4ERR_DEADSESSION:
1335 				nfs4_schedule_session_recovery(server->nfs_client->cl_session);
1336 				goto out;
1337 			case -NFS4ERR_STALE_CLIENTID:
1338 			case -NFS4ERR_STALE_STATEID:
1339 			case -NFS4ERR_EXPIRED:
1340 				/* Don't recall a delegation if it was lost */
1341 				nfs4_schedule_lease_recovery(server->nfs_client);
1342 				goto out;
1343 			case -ERESTARTSYS:
1344 				/*
1345 				 * The show must go on: exit, but mark the
1346 				 * stateid as needing recovery.
1347 				 */
1348 			case -NFS4ERR_DELEG_REVOKED:
1349 			case -NFS4ERR_ADMIN_REVOKED:
1350 			case -NFS4ERR_BAD_STATEID:
1351 				nfs_inode_find_state_and_recover(state->inode,
1352 						stateid);
1353 				nfs4_schedule_stateid_recovery(server, state);
1354 			case -EKEYEXPIRED:
1355 				/*
1356 				 * User RPCSEC_GSS context has expired.
1357 				 * We cannot recover this stateid now, so
1358 				 * skip it and allow recovery thread to
1359 				 * proceed.
1360 				 */
1361 			case -ENOMEM:
1362 				err = 0;
1363 				goto out;
1364 		}
1365 		err = nfs4_handle_exception(server, err, &exception);
1366 	} while (exception.retry);
1367 out:
1368 	return err;
1369 }
1370 
1371 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1372 {
1373 	struct nfs4_opendata *data = calldata;
1374 
1375 	data->rpc_status = task->tk_status;
1376 	if (data->rpc_status == 0) {
1377 		nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
1378 		nfs_confirm_seqid(&data->owner->so_seqid, 0);
1379 		renew_lease(data->o_res.server, data->timestamp);
1380 		data->rpc_done = 1;
1381 	}
1382 }
1383 
1384 static void nfs4_open_confirm_release(void *calldata)
1385 {
1386 	struct nfs4_opendata *data = calldata;
1387 	struct nfs4_state *state = NULL;
1388 
1389 	/* If this request hasn't been cancelled, do nothing */
1390 	if (data->cancelled == 0)
1391 		goto out_free;
1392 	/* In case of error, no cleanup! */
1393 	if (!data->rpc_done)
1394 		goto out_free;
1395 	state = nfs4_opendata_to_nfs4_state(data);
1396 	if (!IS_ERR(state))
1397 		nfs4_close_state(state, data->o_arg.fmode);
1398 out_free:
1399 	nfs4_opendata_put(data);
1400 }
1401 
1402 static const struct rpc_call_ops nfs4_open_confirm_ops = {
1403 	.rpc_call_done = nfs4_open_confirm_done,
1404 	.rpc_release = nfs4_open_confirm_release,
1405 };
1406 
1407 /*
1408  * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1409  */
1410 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1411 {
1412 	struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
1413 	struct rpc_task *task;
1414 	struct  rpc_message msg = {
1415 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1416 		.rpc_argp = &data->c_arg,
1417 		.rpc_resp = &data->c_res,
1418 		.rpc_cred = data->owner->so_cred,
1419 	};
1420 	struct rpc_task_setup task_setup_data = {
1421 		.rpc_client = server->client,
1422 		.rpc_message = &msg,
1423 		.callback_ops = &nfs4_open_confirm_ops,
1424 		.callback_data = data,
1425 		.workqueue = nfsiod_workqueue,
1426 		.flags = RPC_TASK_ASYNC,
1427 	};
1428 	int status;
1429 
1430 	kref_get(&data->kref);
1431 	data->rpc_done = 0;
1432 	data->rpc_status = 0;
1433 	data->timestamp = jiffies;
1434 	task = rpc_run_task(&task_setup_data);
1435 	if (IS_ERR(task))
1436 		return PTR_ERR(task);
1437 	status = nfs4_wait_for_completion_rpc_task(task);
1438 	if (status != 0) {
1439 		data->cancelled = 1;
1440 		smp_wmb();
1441 	} else
1442 		status = data->rpc_status;
1443 	rpc_put_task(task);
1444 	return status;
1445 }
1446 
1447 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1448 {
1449 	struct nfs4_opendata *data = calldata;
1450 	struct nfs4_state_owner *sp = data->owner;
1451 
1452 	if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1453 		return;
1454 	/*
1455 	 * Check if we still need to send an OPEN call, or if we can use
1456 	 * a delegation instead.
1457 	 */
1458 	if (data->state != NULL) {
1459 		struct nfs_delegation *delegation;
1460 
1461 		if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1462 			goto out_no_action;
1463 		rcu_read_lock();
1464 		delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1465 		if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
1466 		    can_open_delegated(delegation, data->o_arg.fmode))
1467 			goto unlock_no_action;
1468 		rcu_read_unlock();
1469 	}
1470 	/* Update client id. */
1471 	data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
1472 	if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
1473 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1474 		nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
1475 	}
1476 	data->timestamp = jiffies;
1477 	if (nfs4_setup_sequence(data->o_arg.server,
1478 				&data->o_arg.seq_args,
1479 				&data->o_res.seq_res, task))
1480 		return;
1481 	rpc_call_start(task);
1482 	return;
1483 unlock_no_action:
1484 	rcu_read_unlock();
1485 out_no_action:
1486 	task->tk_action = NULL;
1487 
1488 }
1489 
1490 static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata)
1491 {
1492 	rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
1493 	nfs4_open_prepare(task, calldata);
1494 }
1495 
1496 static void nfs4_open_done(struct rpc_task *task, void *calldata)
1497 {
1498 	struct nfs4_opendata *data = calldata;
1499 
1500 	data->rpc_status = task->tk_status;
1501 
1502 	if (!nfs4_sequence_done(task, &data->o_res.seq_res))
1503 		return;
1504 
1505 	if (task->tk_status == 0) {
1506 		switch (data->o_res.f_attr->mode & S_IFMT) {
1507 			case S_IFREG:
1508 				break;
1509 			case S_IFLNK:
1510 				data->rpc_status = -ELOOP;
1511 				break;
1512 			case S_IFDIR:
1513 				data->rpc_status = -EISDIR;
1514 				break;
1515 			default:
1516 				data->rpc_status = -ENOTDIR;
1517 		}
1518 		renew_lease(data->o_res.server, data->timestamp);
1519 		if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
1520 			nfs_confirm_seqid(&data->owner->so_seqid, 0);
1521 	}
1522 	data->rpc_done = 1;
1523 }
1524 
1525 static void nfs4_open_release(void *calldata)
1526 {
1527 	struct nfs4_opendata *data = calldata;
1528 	struct nfs4_state *state = NULL;
1529 
1530 	/* If this request hasn't been cancelled, do nothing */
1531 	if (data->cancelled == 0)
1532 		goto out_free;
1533 	/* In case of error, no cleanup! */
1534 	if (data->rpc_status != 0 || !data->rpc_done)
1535 		goto out_free;
1536 	/* In case we need an open_confirm, no cleanup! */
1537 	if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
1538 		goto out_free;
1539 	state = nfs4_opendata_to_nfs4_state(data);
1540 	if (!IS_ERR(state))
1541 		nfs4_close_state(state, data->o_arg.fmode);
1542 out_free:
1543 	nfs4_opendata_put(data);
1544 }
1545 
1546 static const struct rpc_call_ops nfs4_open_ops = {
1547 	.rpc_call_prepare = nfs4_open_prepare,
1548 	.rpc_call_done = nfs4_open_done,
1549 	.rpc_release = nfs4_open_release,
1550 };
1551 
1552 static const struct rpc_call_ops nfs4_recover_open_ops = {
1553 	.rpc_call_prepare = nfs4_recover_open_prepare,
1554 	.rpc_call_done = nfs4_open_done,
1555 	.rpc_release = nfs4_open_release,
1556 };
1557 
1558 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
1559 {
1560 	struct inode *dir = data->dir->d_inode;
1561 	struct nfs_server *server = NFS_SERVER(dir);
1562 	struct nfs_openargs *o_arg = &data->o_arg;
1563 	struct nfs_openres *o_res = &data->o_res;
1564 	struct rpc_task *task;
1565 	struct rpc_message msg = {
1566 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
1567 		.rpc_argp = o_arg,
1568 		.rpc_resp = o_res,
1569 		.rpc_cred = data->owner->so_cred,
1570 	};
1571 	struct rpc_task_setup task_setup_data = {
1572 		.rpc_client = server->client,
1573 		.rpc_message = &msg,
1574 		.callback_ops = &nfs4_open_ops,
1575 		.callback_data = data,
1576 		.workqueue = nfsiod_workqueue,
1577 		.flags = RPC_TASK_ASYNC,
1578 	};
1579 	int status;
1580 
1581 	nfs41_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
1582 	kref_get(&data->kref);
1583 	data->rpc_done = 0;
1584 	data->rpc_status = 0;
1585 	data->cancelled = 0;
1586 	if (isrecover)
1587 		task_setup_data.callback_ops = &nfs4_recover_open_ops;
1588 	task = rpc_run_task(&task_setup_data);
1589         if (IS_ERR(task))
1590                 return PTR_ERR(task);
1591         status = nfs4_wait_for_completion_rpc_task(task);
1592         if (status != 0) {
1593                 data->cancelled = 1;
1594                 smp_wmb();
1595         } else
1596                 status = data->rpc_status;
1597         rpc_put_task(task);
1598 
1599 	return status;
1600 }
1601 
1602 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
1603 {
1604 	struct inode *dir = data->dir->d_inode;
1605 	struct nfs_openres *o_res = &data->o_res;
1606         int status;
1607 
1608 	status = nfs4_run_open_task(data, 1);
1609 	if (status != 0 || !data->rpc_done)
1610 		return status;
1611 
1612 	nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
1613 
1614 	nfs_refresh_inode(dir, o_res->dir_attr);
1615 
1616 	if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1617 		status = _nfs4_proc_open_confirm(data);
1618 		if (status != 0)
1619 			return status;
1620 	}
1621 
1622 	return status;
1623 }
1624 
1625 /*
1626  * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
1627  */
1628 static int _nfs4_proc_open(struct nfs4_opendata *data)
1629 {
1630 	struct inode *dir = data->dir->d_inode;
1631 	struct nfs_server *server = NFS_SERVER(dir);
1632 	struct nfs_openargs *o_arg = &data->o_arg;
1633 	struct nfs_openres *o_res = &data->o_res;
1634 	int status;
1635 
1636 	status = nfs4_run_open_task(data, 0);
1637 	if (!data->rpc_done)
1638 		return status;
1639 	if (status != 0) {
1640 		if (status == -NFS4ERR_BADNAME &&
1641 				!(o_arg->open_flags & O_CREAT))
1642 			return -ENOENT;
1643 		return status;
1644 	}
1645 
1646 	nfs_fattr_map_and_free_names(server, &data->f_attr);
1647 
1648 	if (o_arg->open_flags & O_CREAT) {
1649 		update_changeattr(dir, &o_res->cinfo);
1650 		nfs_post_op_update_inode(dir, o_res->dir_attr);
1651 	} else
1652 		nfs_refresh_inode(dir, o_res->dir_attr);
1653 	if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
1654 		server->caps &= ~NFS_CAP_POSIX_LOCK;
1655 	if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1656 		status = _nfs4_proc_open_confirm(data);
1657 		if (status != 0)
1658 			return status;
1659 	}
1660 	if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
1661 		_nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr);
1662 	return 0;
1663 }
1664 
1665 static int nfs4_client_recover_expired_lease(struct nfs_client *clp)
1666 {
1667 	unsigned int loop;
1668 	int ret;
1669 
1670 	for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
1671 		ret = nfs4_wait_clnt_recover(clp);
1672 		if (ret != 0)
1673 			break;
1674 		if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
1675 		    !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
1676 			break;
1677 		nfs4_schedule_state_manager(clp);
1678 		ret = -EIO;
1679 	}
1680 	return ret;
1681 }
1682 
1683 static int nfs4_recover_expired_lease(struct nfs_server *server)
1684 {
1685 	return nfs4_client_recover_expired_lease(server->nfs_client);
1686 }
1687 
1688 /*
1689  * OPEN_EXPIRED:
1690  * 	reclaim state on the server after a network partition.
1691  * 	Assumes caller holds the appropriate lock
1692  */
1693 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1694 {
1695 	struct nfs4_opendata *opendata;
1696 	int ret;
1697 
1698 	opendata = nfs4_open_recoverdata_alloc(ctx, state);
1699 	if (IS_ERR(opendata))
1700 		return PTR_ERR(opendata);
1701 	ret = nfs4_open_recover(opendata, state);
1702 	if (ret == -ESTALE)
1703 		d_drop(ctx->dentry);
1704 	nfs4_opendata_put(opendata);
1705 	return ret;
1706 }
1707 
1708 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1709 {
1710 	struct nfs_server *server = NFS_SERVER(state->inode);
1711 	struct nfs4_exception exception = { };
1712 	int err;
1713 
1714 	do {
1715 		err = _nfs4_open_expired(ctx, state);
1716 		switch (err) {
1717 		default:
1718 			goto out;
1719 		case -NFS4ERR_GRACE:
1720 		case -NFS4ERR_DELAY:
1721 			nfs4_handle_exception(server, err, &exception);
1722 			err = 0;
1723 		}
1724 	} while (exception.retry);
1725 out:
1726 	return err;
1727 }
1728 
1729 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1730 {
1731 	struct nfs_open_context *ctx;
1732 	int ret;
1733 
1734 	ctx = nfs4_state_find_open_context(state);
1735 	if (IS_ERR(ctx))
1736 		return PTR_ERR(ctx);
1737 	ret = nfs4_do_open_expired(ctx, state);
1738 	put_nfs_open_context(ctx);
1739 	return ret;
1740 }
1741 
1742 #if defined(CONFIG_NFS_V4_1)
1743 static int nfs41_check_expired_stateid(struct nfs4_state *state, nfs4_stateid *stateid, unsigned int flags)
1744 {
1745 	int status = NFS_OK;
1746 	struct nfs_server *server = NFS_SERVER(state->inode);
1747 
1748 	if (state->flags & flags) {
1749 		status = nfs41_test_stateid(server, stateid);
1750 		if (status != NFS_OK) {
1751 			nfs41_free_stateid(server, stateid);
1752 			state->flags &= ~flags;
1753 		}
1754 	}
1755 	return status;
1756 }
1757 
1758 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1759 {
1760 	int deleg_status, open_status;
1761 	int deleg_flags = 1 << NFS_DELEGATED_STATE;
1762 	int open_flags = (1 << NFS_O_RDONLY_STATE) | (1 << NFS_O_WRONLY_STATE) | (1 << NFS_O_RDWR_STATE);
1763 
1764 	deleg_status = nfs41_check_expired_stateid(state, &state->stateid, deleg_flags);
1765 	open_status = nfs41_check_expired_stateid(state,  &state->open_stateid, open_flags);
1766 
1767 	if ((deleg_status == NFS_OK) && (open_status == NFS_OK))
1768 		return NFS_OK;
1769 	return nfs4_open_expired(sp, state);
1770 }
1771 #endif
1772 
1773 /*
1774  * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
1775  * fields corresponding to attributes that were used to store the verifier.
1776  * Make sure we clobber those fields in the later setattr call
1777  */
1778 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
1779 {
1780 	if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
1781 	    !(sattr->ia_valid & ATTR_ATIME_SET))
1782 		sattr->ia_valid |= ATTR_ATIME;
1783 
1784 	if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
1785 	    !(sattr->ia_valid & ATTR_MTIME_SET))
1786 		sattr->ia_valid |= ATTR_MTIME;
1787 }
1788 
1789 /*
1790  * Returns a referenced nfs4_state
1791  */
1792 static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res)
1793 {
1794 	struct nfs4_state_owner  *sp;
1795 	struct nfs4_state     *state = NULL;
1796 	struct nfs_server       *server = NFS_SERVER(dir);
1797 	struct nfs4_opendata *opendata;
1798 	int status;
1799 
1800 	/* Protect against reboot recovery conflicts */
1801 	status = -ENOMEM;
1802 	sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
1803 	if (sp == NULL) {
1804 		dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
1805 		goto out_err;
1806 	}
1807 	status = nfs4_recover_expired_lease(server);
1808 	if (status != 0)
1809 		goto err_put_state_owner;
1810 	if (dentry->d_inode != NULL)
1811 		nfs4_return_incompatible_delegation(dentry->d_inode, fmode);
1812 	status = -ENOMEM;
1813 	opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL);
1814 	if (opendata == NULL)
1815 		goto err_put_state_owner;
1816 
1817 	if (dentry->d_inode != NULL)
1818 		opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
1819 
1820 	status = _nfs4_proc_open(opendata);
1821 	if (status != 0)
1822 		goto err_opendata_put;
1823 
1824 	state = nfs4_opendata_to_nfs4_state(opendata);
1825 	status = PTR_ERR(state);
1826 	if (IS_ERR(state))
1827 		goto err_opendata_put;
1828 	if (server->caps & NFS_CAP_POSIX_LOCK)
1829 		set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
1830 
1831 	if (opendata->o_arg.open_flags & O_EXCL) {
1832 		nfs4_exclusive_attrset(opendata, sattr);
1833 
1834 		nfs_fattr_init(opendata->o_res.f_attr);
1835 		status = nfs4_do_setattr(state->inode, cred,
1836 				opendata->o_res.f_attr, sattr,
1837 				state);
1838 		if (status == 0)
1839 			nfs_setattr_update_inode(state->inode, sattr);
1840 		nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
1841 	}
1842 	nfs4_opendata_put(opendata);
1843 	nfs4_put_state_owner(sp);
1844 	*res = state;
1845 	return 0;
1846 err_opendata_put:
1847 	nfs4_opendata_put(opendata);
1848 err_put_state_owner:
1849 	nfs4_put_state_owner(sp);
1850 out_err:
1851 	*res = NULL;
1852 	return status;
1853 }
1854 
1855 
1856 static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred)
1857 {
1858 	struct nfs4_exception exception = { };
1859 	struct nfs4_state *res;
1860 	int status;
1861 
1862 	do {
1863 		status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, &res);
1864 		if (status == 0)
1865 			break;
1866 		/* NOTE: BAD_SEQID means the server and client disagree about the
1867 		 * book-keeping w.r.t. state-changing operations
1868 		 * (OPEN/CLOSE/LOCK/LOCKU...)
1869 		 * It is actually a sign of a bug on the client or on the server.
1870 		 *
1871 		 * If we receive a BAD_SEQID error in the particular case of
1872 		 * doing an OPEN, we assume that nfs_increment_open_seqid() will
1873 		 * have unhashed the old state_owner for us, and that we can
1874 		 * therefore safely retry using a new one. We should still warn
1875 		 * the user though...
1876 		 */
1877 		if (status == -NFS4ERR_BAD_SEQID) {
1878 			pr_warn_ratelimited("NFS: v4 server %s "
1879 					" returned a bad sequence-id error!\n",
1880 					NFS_SERVER(dir)->nfs_client->cl_hostname);
1881 			exception.retry = 1;
1882 			continue;
1883 		}
1884 		/*
1885 		 * BAD_STATEID on OPEN means that the server cancelled our
1886 		 * state before it received the OPEN_CONFIRM.
1887 		 * Recover by retrying the request as per the discussion
1888 		 * on Page 181 of RFC3530.
1889 		 */
1890 		if (status == -NFS4ERR_BAD_STATEID) {
1891 			exception.retry = 1;
1892 			continue;
1893 		}
1894 		if (status == -EAGAIN) {
1895 			/* We must have found a delegation */
1896 			exception.retry = 1;
1897 			continue;
1898 		}
1899 		res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
1900 					status, &exception));
1901 	} while (exception.retry);
1902 	return res;
1903 }
1904 
1905 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
1906 			    struct nfs_fattr *fattr, struct iattr *sattr,
1907 			    struct nfs4_state *state)
1908 {
1909 	struct nfs_server *server = NFS_SERVER(inode);
1910         struct nfs_setattrargs  arg = {
1911                 .fh             = NFS_FH(inode),
1912                 .iap            = sattr,
1913 		.server		= server,
1914 		.bitmask = server->attr_bitmask,
1915         };
1916         struct nfs_setattrres  res = {
1917 		.fattr		= fattr,
1918 		.server		= server,
1919         };
1920         struct rpc_message msg = {
1921 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
1922 		.rpc_argp	= &arg,
1923 		.rpc_resp	= &res,
1924 		.rpc_cred	= cred,
1925         };
1926 	unsigned long timestamp = jiffies;
1927 	int status;
1928 
1929 	nfs_fattr_init(fattr);
1930 
1931 	if (state != NULL) {
1932 		nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
1933 				current->files, current->tgid);
1934 	} else if (nfs4_copy_delegation_stateid(&arg.stateid, inode,
1935 				FMODE_WRITE)) {
1936 		/* Use that stateid */
1937 	} else
1938 		nfs4_stateid_copy(&arg.stateid, &zero_stateid);
1939 
1940 	status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
1941 	if (status == 0 && state != NULL)
1942 		renew_lease(server, timestamp);
1943 	return status;
1944 }
1945 
1946 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
1947 			   struct nfs_fattr *fattr, struct iattr *sattr,
1948 			   struct nfs4_state *state)
1949 {
1950 	struct nfs_server *server = NFS_SERVER(inode);
1951 	struct nfs4_exception exception = {
1952 		.state = state,
1953 		.inode = inode,
1954 	};
1955 	int err;
1956 	do {
1957 		err = _nfs4_do_setattr(inode, cred, fattr, sattr, state);
1958 		switch (err) {
1959 		case -NFS4ERR_OPENMODE:
1960 			if (state && !(state->state & FMODE_WRITE)) {
1961 				err = -EBADF;
1962 				if (sattr->ia_valid & ATTR_OPEN)
1963 					err = -EACCES;
1964 				goto out;
1965 			}
1966 		}
1967 		err = nfs4_handle_exception(server, err, &exception);
1968 	} while (exception.retry);
1969 out:
1970 	return err;
1971 }
1972 
1973 struct nfs4_closedata {
1974 	struct inode *inode;
1975 	struct nfs4_state *state;
1976 	struct nfs_closeargs arg;
1977 	struct nfs_closeres res;
1978 	struct nfs_fattr fattr;
1979 	unsigned long timestamp;
1980 	bool roc;
1981 	u32 roc_barrier;
1982 };
1983 
1984 static void nfs4_free_closedata(void *data)
1985 {
1986 	struct nfs4_closedata *calldata = data;
1987 	struct nfs4_state_owner *sp = calldata->state->owner;
1988 	struct super_block *sb = calldata->state->inode->i_sb;
1989 
1990 	if (calldata->roc)
1991 		pnfs_roc_release(calldata->state->inode);
1992 	nfs4_put_open_state(calldata->state);
1993 	nfs_free_seqid(calldata->arg.seqid);
1994 	nfs4_put_state_owner(sp);
1995 	nfs_sb_deactive(sb);
1996 	kfree(calldata);
1997 }
1998 
1999 static void nfs4_close_clear_stateid_flags(struct nfs4_state *state,
2000 		fmode_t fmode)
2001 {
2002 	spin_lock(&state->owner->so_lock);
2003 	if (!(fmode & FMODE_READ))
2004 		clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2005 	if (!(fmode & FMODE_WRITE))
2006 		clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2007 	clear_bit(NFS_O_RDWR_STATE, &state->flags);
2008 	spin_unlock(&state->owner->so_lock);
2009 }
2010 
2011 static void nfs4_close_done(struct rpc_task *task, void *data)
2012 {
2013 	struct nfs4_closedata *calldata = data;
2014 	struct nfs4_state *state = calldata->state;
2015 	struct nfs_server *server = NFS_SERVER(calldata->inode);
2016 
2017 	dprintk("%s: begin!\n", __func__);
2018 	if (!nfs4_sequence_done(task, &calldata->res.seq_res))
2019 		return;
2020         /* hmm. we are done with the inode, and in the process of freeing
2021 	 * the state_owner. we keep this around to process errors
2022 	 */
2023 	switch (task->tk_status) {
2024 		case 0:
2025 			if (calldata->roc)
2026 				pnfs_roc_set_barrier(state->inode,
2027 						     calldata->roc_barrier);
2028 			nfs_set_open_stateid(state, &calldata->res.stateid, 0);
2029 			renew_lease(server, calldata->timestamp);
2030 			nfs4_close_clear_stateid_flags(state,
2031 					calldata->arg.fmode);
2032 			break;
2033 		case -NFS4ERR_STALE_STATEID:
2034 		case -NFS4ERR_OLD_STATEID:
2035 		case -NFS4ERR_BAD_STATEID:
2036 		case -NFS4ERR_EXPIRED:
2037 			if (calldata->arg.fmode == 0)
2038 				break;
2039 		default:
2040 			if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
2041 				rpc_restart_call_prepare(task);
2042 	}
2043 	nfs_release_seqid(calldata->arg.seqid);
2044 	nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2045 	dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
2046 }
2047 
2048 static void nfs4_close_prepare(struct rpc_task *task, void *data)
2049 {
2050 	struct nfs4_closedata *calldata = data;
2051 	struct nfs4_state *state = calldata->state;
2052 	int call_close = 0;
2053 
2054 	dprintk("%s: begin!\n", __func__);
2055 	if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
2056 		return;
2057 
2058 	task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
2059 	calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
2060 	spin_lock(&state->owner->so_lock);
2061 	/* Calculate the change in open mode */
2062 	if (state->n_rdwr == 0) {
2063 		if (state->n_rdonly == 0) {
2064 			call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
2065 			call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
2066 			calldata->arg.fmode &= ~FMODE_READ;
2067 		}
2068 		if (state->n_wronly == 0) {
2069 			call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
2070 			call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
2071 			calldata->arg.fmode &= ~FMODE_WRITE;
2072 		}
2073 	}
2074 	spin_unlock(&state->owner->so_lock);
2075 
2076 	if (!call_close) {
2077 		/* Note: exit _without_ calling nfs4_close_done */
2078 		task->tk_action = NULL;
2079 		goto out;
2080 	}
2081 
2082 	if (calldata->arg.fmode == 0) {
2083 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2084 		if (calldata->roc &&
2085 		    pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) {
2086 			rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq,
2087 				     task, NULL);
2088 			goto out;
2089 		}
2090 	}
2091 
2092 	nfs_fattr_init(calldata->res.fattr);
2093 	calldata->timestamp = jiffies;
2094 	if (nfs4_setup_sequence(NFS_SERVER(calldata->inode),
2095 				&calldata->arg.seq_args,
2096 				&calldata->res.seq_res,
2097 				task))
2098 		goto out;
2099 	rpc_call_start(task);
2100 out:
2101 	dprintk("%s: done!\n", __func__);
2102 }
2103 
2104 static const struct rpc_call_ops nfs4_close_ops = {
2105 	.rpc_call_prepare = nfs4_close_prepare,
2106 	.rpc_call_done = nfs4_close_done,
2107 	.rpc_release = nfs4_free_closedata,
2108 };
2109 
2110 /*
2111  * It is possible for data to be read/written from a mem-mapped file
2112  * after the sys_close call (which hits the vfs layer as a flush).
2113  * This means that we can't safely call nfsv4 close on a file until
2114  * the inode is cleared. This in turn means that we are not good
2115  * NFSv4 citizens - we do not indicate to the server to update the file's
2116  * share state even when we are done with one of the three share
2117  * stateid's in the inode.
2118  *
2119  * NOTE: Caller must be holding the sp->so_owner semaphore!
2120  */
2121 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc)
2122 {
2123 	struct nfs_server *server = NFS_SERVER(state->inode);
2124 	struct nfs4_closedata *calldata;
2125 	struct nfs4_state_owner *sp = state->owner;
2126 	struct rpc_task *task;
2127 	struct rpc_message msg = {
2128 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
2129 		.rpc_cred = state->owner->so_cred,
2130 	};
2131 	struct rpc_task_setup task_setup_data = {
2132 		.rpc_client = server->client,
2133 		.rpc_message = &msg,
2134 		.callback_ops = &nfs4_close_ops,
2135 		.workqueue = nfsiod_workqueue,
2136 		.flags = RPC_TASK_ASYNC,
2137 	};
2138 	int status = -ENOMEM;
2139 
2140 	calldata = kzalloc(sizeof(*calldata), gfp_mask);
2141 	if (calldata == NULL)
2142 		goto out;
2143 	nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
2144 	calldata->inode = state->inode;
2145 	calldata->state = state;
2146 	calldata->arg.fh = NFS_FH(state->inode);
2147 	calldata->arg.stateid = &state->open_stateid;
2148 	/* Serialization for the sequence id */
2149 	calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask);
2150 	if (calldata->arg.seqid == NULL)
2151 		goto out_free_calldata;
2152 	calldata->arg.fmode = 0;
2153 	calldata->arg.bitmask = server->cache_consistency_bitmask;
2154 	calldata->res.fattr = &calldata->fattr;
2155 	calldata->res.seqid = calldata->arg.seqid;
2156 	calldata->res.server = server;
2157 	calldata->roc = roc;
2158 	nfs_sb_active(calldata->inode->i_sb);
2159 
2160 	msg.rpc_argp = &calldata->arg;
2161 	msg.rpc_resp = &calldata->res;
2162 	task_setup_data.callback_data = calldata;
2163 	task = rpc_run_task(&task_setup_data);
2164 	if (IS_ERR(task))
2165 		return PTR_ERR(task);
2166 	status = 0;
2167 	if (wait)
2168 		status = rpc_wait_for_completion_task(task);
2169 	rpc_put_task(task);
2170 	return status;
2171 out_free_calldata:
2172 	kfree(calldata);
2173 out:
2174 	if (roc)
2175 		pnfs_roc_release(state->inode);
2176 	nfs4_put_open_state(state);
2177 	nfs4_put_state_owner(sp);
2178 	return status;
2179 }
2180 
2181 static struct inode *
2182 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr)
2183 {
2184 	struct nfs4_state *state;
2185 
2186 	/* Protect against concurrent sillydeletes */
2187 	state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, ctx->cred);
2188 	if (IS_ERR(state))
2189 		return ERR_CAST(state);
2190 	ctx->state = state;
2191 	return igrab(state->inode);
2192 }
2193 
2194 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
2195 {
2196 	if (ctx->state == NULL)
2197 		return;
2198 	if (is_sync)
2199 		nfs4_close_sync(ctx->state, ctx->mode);
2200 	else
2201 		nfs4_close_state(ctx->state, ctx->mode);
2202 }
2203 
2204 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2205 {
2206 	struct nfs4_server_caps_arg args = {
2207 		.fhandle = fhandle,
2208 	};
2209 	struct nfs4_server_caps_res res = {};
2210 	struct rpc_message msg = {
2211 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
2212 		.rpc_argp = &args,
2213 		.rpc_resp = &res,
2214 	};
2215 	int status;
2216 
2217 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2218 	if (status == 0) {
2219 		memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
2220 		server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
2221 				NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
2222 				NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
2223 				NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
2224 				NFS_CAP_CTIME|NFS_CAP_MTIME);
2225 		if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
2226 			server->caps |= NFS_CAP_ACLS;
2227 		if (res.has_links != 0)
2228 			server->caps |= NFS_CAP_HARDLINKS;
2229 		if (res.has_symlinks != 0)
2230 			server->caps |= NFS_CAP_SYMLINKS;
2231 		if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
2232 			server->caps |= NFS_CAP_FILEID;
2233 		if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
2234 			server->caps |= NFS_CAP_MODE;
2235 		if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
2236 			server->caps |= NFS_CAP_NLINK;
2237 		if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
2238 			server->caps |= NFS_CAP_OWNER;
2239 		if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
2240 			server->caps |= NFS_CAP_OWNER_GROUP;
2241 		if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
2242 			server->caps |= NFS_CAP_ATIME;
2243 		if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
2244 			server->caps |= NFS_CAP_CTIME;
2245 		if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
2246 			server->caps |= NFS_CAP_MTIME;
2247 
2248 		memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
2249 		server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
2250 		server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
2251 		server->acl_bitmask = res.acl_bitmask;
2252 		server->fh_expire_type = res.fh_expire_type;
2253 	}
2254 
2255 	return status;
2256 }
2257 
2258 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2259 {
2260 	struct nfs4_exception exception = { };
2261 	int err;
2262 	do {
2263 		err = nfs4_handle_exception(server,
2264 				_nfs4_server_capabilities(server, fhandle),
2265 				&exception);
2266 	} while (exception.retry);
2267 	return err;
2268 }
2269 
2270 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2271 		struct nfs_fsinfo *info)
2272 {
2273 	struct nfs4_lookup_root_arg args = {
2274 		.bitmask = nfs4_fattr_bitmap,
2275 	};
2276 	struct nfs4_lookup_res res = {
2277 		.server = server,
2278 		.fattr = info->fattr,
2279 		.fh = fhandle,
2280 	};
2281 	struct rpc_message msg = {
2282 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
2283 		.rpc_argp = &args,
2284 		.rpc_resp = &res,
2285 	};
2286 
2287 	nfs_fattr_init(info->fattr);
2288 	return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2289 }
2290 
2291 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2292 		struct nfs_fsinfo *info)
2293 {
2294 	struct nfs4_exception exception = { };
2295 	int err;
2296 	do {
2297 		err = _nfs4_lookup_root(server, fhandle, info);
2298 		switch (err) {
2299 		case 0:
2300 		case -NFS4ERR_WRONGSEC:
2301 			goto out;
2302 		default:
2303 			err = nfs4_handle_exception(server, err, &exception);
2304 		}
2305 	} while (exception.retry);
2306 out:
2307 	return err;
2308 }
2309 
2310 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2311 				struct nfs_fsinfo *info, rpc_authflavor_t flavor)
2312 {
2313 	struct rpc_auth *auth;
2314 	int ret;
2315 
2316 	auth = rpcauth_create(flavor, server->client);
2317 	if (!auth) {
2318 		ret = -EIO;
2319 		goto out;
2320 	}
2321 	ret = nfs4_lookup_root(server, fhandle, info);
2322 out:
2323 	return ret;
2324 }
2325 
2326 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2327 			      struct nfs_fsinfo *info)
2328 {
2329 	int i, len, status = 0;
2330 	rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS];
2331 
2332 	len = gss_mech_list_pseudoflavors(&flav_array[0]);
2333 	flav_array[len] = RPC_AUTH_NULL;
2334 	len += 1;
2335 
2336 	for (i = 0; i < len; i++) {
2337 		status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
2338 		if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
2339 			continue;
2340 		break;
2341 	}
2342 	/*
2343 	 * -EACCESS could mean that the user doesn't have correct permissions
2344 	 * to access the mount.  It could also mean that we tried to mount
2345 	 * with a gss auth flavor, but rpc.gssd isn't running.  Either way,
2346 	 * existing mount programs don't handle -EACCES very well so it should
2347 	 * be mapped to -EPERM instead.
2348 	 */
2349 	if (status == -EACCES)
2350 		status = -EPERM;
2351 	return status;
2352 }
2353 
2354 /*
2355  * get the file handle for the "/" directory on the server
2356  */
2357 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
2358 			      struct nfs_fsinfo *info)
2359 {
2360 	int minor_version = server->nfs_client->cl_minorversion;
2361 	int status = nfs4_lookup_root(server, fhandle, info);
2362 	if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR))
2363 		/*
2364 		 * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM
2365 		 * by nfs4_map_errors() as this function exits.
2366 		 */
2367 		status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info);
2368 	if (status == 0)
2369 		status = nfs4_server_capabilities(server, fhandle);
2370 	if (status == 0)
2371 		status = nfs4_do_fsinfo(server, fhandle, info);
2372 	return nfs4_map_errors(status);
2373 }
2374 
2375 /*
2376  * Get locations and (maybe) other attributes of a referral.
2377  * Note that we'll actually follow the referral later when
2378  * we detect fsid mismatch in inode revalidation
2379  */
2380 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
2381 			     const struct qstr *name, struct nfs_fattr *fattr,
2382 			     struct nfs_fh *fhandle)
2383 {
2384 	int status = -ENOMEM;
2385 	struct page *page = NULL;
2386 	struct nfs4_fs_locations *locations = NULL;
2387 
2388 	page = alloc_page(GFP_KERNEL);
2389 	if (page == NULL)
2390 		goto out;
2391 	locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
2392 	if (locations == NULL)
2393 		goto out;
2394 
2395 	status = nfs4_proc_fs_locations(client, dir, name, locations, page);
2396 	if (status != 0)
2397 		goto out;
2398 	/* Make sure server returned a different fsid for the referral */
2399 	if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
2400 		dprintk("%s: server did not return a different fsid for"
2401 			" a referral at %s\n", __func__, name->name);
2402 		status = -EIO;
2403 		goto out;
2404 	}
2405 	/* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
2406 	nfs_fixup_referral_attributes(&locations->fattr);
2407 
2408 	/* replace the lookup nfs_fattr with the locations nfs_fattr */
2409 	memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
2410 	memset(fhandle, 0, sizeof(struct nfs_fh));
2411 out:
2412 	if (page)
2413 		__free_page(page);
2414 	kfree(locations);
2415 	return status;
2416 }
2417 
2418 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2419 {
2420 	struct nfs4_getattr_arg args = {
2421 		.fh = fhandle,
2422 		.bitmask = server->attr_bitmask,
2423 	};
2424 	struct nfs4_getattr_res res = {
2425 		.fattr = fattr,
2426 		.server = server,
2427 	};
2428 	struct rpc_message msg = {
2429 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
2430 		.rpc_argp = &args,
2431 		.rpc_resp = &res,
2432 	};
2433 
2434 	nfs_fattr_init(fattr);
2435 	return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2436 }
2437 
2438 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2439 {
2440 	struct nfs4_exception exception = { };
2441 	int err;
2442 	do {
2443 		err = nfs4_handle_exception(server,
2444 				_nfs4_proc_getattr(server, fhandle, fattr),
2445 				&exception);
2446 	} while (exception.retry);
2447 	return err;
2448 }
2449 
2450 /*
2451  * The file is not closed if it is opened due to the a request to change
2452  * the size of the file. The open call will not be needed once the
2453  * VFS layer lookup-intents are implemented.
2454  *
2455  * Close is called when the inode is destroyed.
2456  * If we haven't opened the file for O_WRONLY, we
2457  * need to in the size_change case to obtain a stateid.
2458  *
2459  * Got race?
2460  * Because OPEN is always done by name in nfsv4, it is
2461  * possible that we opened a different file by the same
2462  * name.  We can recognize this race condition, but we
2463  * can't do anything about it besides returning an error.
2464  *
2465  * This will be fixed with VFS changes (lookup-intent).
2466  */
2467 static int
2468 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
2469 		  struct iattr *sattr)
2470 {
2471 	struct inode *inode = dentry->d_inode;
2472 	struct rpc_cred *cred = NULL;
2473 	struct nfs4_state *state = NULL;
2474 	int status;
2475 
2476 	if (pnfs_ld_layoutret_on_setattr(inode))
2477 		pnfs_return_layout(inode);
2478 
2479 	nfs_fattr_init(fattr);
2480 
2481 	/* Search for an existing open(O_WRITE) file */
2482 	if (sattr->ia_valid & ATTR_FILE) {
2483 		struct nfs_open_context *ctx;
2484 
2485 		ctx = nfs_file_open_context(sattr->ia_file);
2486 		if (ctx) {
2487 			cred = ctx->cred;
2488 			state = ctx->state;
2489 		}
2490 	}
2491 
2492 	/* Deal with open(O_TRUNC) */
2493 	if (sattr->ia_valid & ATTR_OPEN)
2494 		sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
2495 
2496 	status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
2497 	if (status == 0)
2498 		nfs_setattr_update_inode(inode, sattr);
2499 	return status;
2500 }
2501 
2502 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
2503 		const struct qstr *name, struct nfs_fh *fhandle,
2504 		struct nfs_fattr *fattr)
2505 {
2506 	struct nfs_server *server = NFS_SERVER(dir);
2507 	int		       status;
2508 	struct nfs4_lookup_arg args = {
2509 		.bitmask = server->attr_bitmask,
2510 		.dir_fh = NFS_FH(dir),
2511 		.name = name,
2512 	};
2513 	struct nfs4_lookup_res res = {
2514 		.server = server,
2515 		.fattr = fattr,
2516 		.fh = fhandle,
2517 	};
2518 	struct rpc_message msg = {
2519 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
2520 		.rpc_argp = &args,
2521 		.rpc_resp = &res,
2522 	};
2523 
2524 	nfs_fattr_init(fattr);
2525 
2526 	dprintk("NFS call  lookup %s\n", name->name);
2527 	status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
2528 	dprintk("NFS reply lookup: %d\n", status);
2529 	return status;
2530 }
2531 
2532 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
2533 {
2534 	fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
2535 		NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
2536 	fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
2537 	fattr->nlink = 2;
2538 }
2539 
2540 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
2541 				   struct qstr *name, struct nfs_fh *fhandle,
2542 				   struct nfs_fattr *fattr)
2543 {
2544 	struct nfs4_exception exception = { };
2545 	struct rpc_clnt *client = *clnt;
2546 	int err;
2547 	do {
2548 		err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr);
2549 		switch (err) {
2550 		case -NFS4ERR_BADNAME:
2551 			err = -ENOENT;
2552 			goto out;
2553 		case -NFS4ERR_MOVED:
2554 			err = nfs4_get_referral(client, dir, name, fattr, fhandle);
2555 			goto out;
2556 		case -NFS4ERR_WRONGSEC:
2557 			err = -EPERM;
2558 			if (client != *clnt)
2559 				goto out;
2560 
2561 			client = nfs4_create_sec_client(client, dir, name);
2562 			if (IS_ERR(client))
2563 				return PTR_ERR(client);
2564 
2565 			exception.retry = 1;
2566 			break;
2567 		default:
2568 			err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
2569 		}
2570 	} while (exception.retry);
2571 
2572 out:
2573 	if (err == 0)
2574 		*clnt = client;
2575 	else if (client != *clnt)
2576 		rpc_shutdown_client(client);
2577 
2578 	return err;
2579 }
2580 
2581 static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
2582 			    struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2583 {
2584 	int status;
2585 	struct rpc_clnt *client = NFS_CLIENT(dir);
2586 
2587 	status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
2588 	if (client != NFS_CLIENT(dir)) {
2589 		rpc_shutdown_client(client);
2590 		nfs_fixup_secinfo_attributes(fattr);
2591 	}
2592 	return status;
2593 }
2594 
2595 struct rpc_clnt *
2596 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
2597 			    struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2598 {
2599 	int status;
2600 	struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir));
2601 
2602 	status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
2603 	if (status < 0) {
2604 		rpc_shutdown_client(client);
2605 		return ERR_PTR(status);
2606 	}
2607 	return client;
2608 }
2609 
2610 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2611 {
2612 	struct nfs_server *server = NFS_SERVER(inode);
2613 	struct nfs4_accessargs args = {
2614 		.fh = NFS_FH(inode),
2615 		.bitmask = server->cache_consistency_bitmask,
2616 	};
2617 	struct nfs4_accessres res = {
2618 		.server = server,
2619 	};
2620 	struct rpc_message msg = {
2621 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
2622 		.rpc_argp = &args,
2623 		.rpc_resp = &res,
2624 		.rpc_cred = entry->cred,
2625 	};
2626 	int mode = entry->mask;
2627 	int status;
2628 
2629 	/*
2630 	 * Determine which access bits we want to ask for...
2631 	 */
2632 	if (mode & MAY_READ)
2633 		args.access |= NFS4_ACCESS_READ;
2634 	if (S_ISDIR(inode->i_mode)) {
2635 		if (mode & MAY_WRITE)
2636 			args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
2637 		if (mode & MAY_EXEC)
2638 			args.access |= NFS4_ACCESS_LOOKUP;
2639 	} else {
2640 		if (mode & MAY_WRITE)
2641 			args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
2642 		if (mode & MAY_EXEC)
2643 			args.access |= NFS4_ACCESS_EXECUTE;
2644 	}
2645 
2646 	res.fattr = nfs_alloc_fattr();
2647 	if (res.fattr == NULL)
2648 		return -ENOMEM;
2649 
2650 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2651 	if (!status) {
2652 		entry->mask = 0;
2653 		if (res.access & NFS4_ACCESS_READ)
2654 			entry->mask |= MAY_READ;
2655 		if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE))
2656 			entry->mask |= MAY_WRITE;
2657 		if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE))
2658 			entry->mask |= MAY_EXEC;
2659 		nfs_refresh_inode(inode, res.fattr);
2660 	}
2661 	nfs_free_fattr(res.fattr);
2662 	return status;
2663 }
2664 
2665 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2666 {
2667 	struct nfs4_exception exception = { };
2668 	int err;
2669 	do {
2670 		err = nfs4_handle_exception(NFS_SERVER(inode),
2671 				_nfs4_proc_access(inode, entry),
2672 				&exception);
2673 	} while (exception.retry);
2674 	return err;
2675 }
2676 
2677 /*
2678  * TODO: For the time being, we don't try to get any attributes
2679  * along with any of the zero-copy operations READ, READDIR,
2680  * READLINK, WRITE.
2681  *
2682  * In the case of the first three, we want to put the GETATTR
2683  * after the read-type operation -- this is because it is hard
2684  * to predict the length of a GETATTR response in v4, and thus
2685  * align the READ data correctly.  This means that the GETATTR
2686  * may end up partially falling into the page cache, and we should
2687  * shift it into the 'tail' of the xdr_buf before processing.
2688  * To do this efficiently, we need to know the total length
2689  * of data received, which doesn't seem to be available outside
2690  * of the RPC layer.
2691  *
2692  * In the case of WRITE, we also want to put the GETATTR after
2693  * the operation -- in this case because we want to make sure
2694  * we get the post-operation mtime and size.  This means that
2695  * we can't use xdr_encode_pages() as written: we need a variant
2696  * of it which would leave room in the 'tail' iovec.
2697  *
2698  * Both of these changes to the XDR layer would in fact be quite
2699  * minor, but I decided to leave them for a subsequent patch.
2700  */
2701 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
2702 		unsigned int pgbase, unsigned int pglen)
2703 {
2704 	struct nfs4_readlink args = {
2705 		.fh       = NFS_FH(inode),
2706 		.pgbase	  = pgbase,
2707 		.pglen    = pglen,
2708 		.pages    = &page,
2709 	};
2710 	struct nfs4_readlink_res res;
2711 	struct rpc_message msg = {
2712 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
2713 		.rpc_argp = &args,
2714 		.rpc_resp = &res,
2715 	};
2716 
2717 	return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
2718 }
2719 
2720 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
2721 		unsigned int pgbase, unsigned int pglen)
2722 {
2723 	struct nfs4_exception exception = { };
2724 	int err;
2725 	do {
2726 		err = nfs4_handle_exception(NFS_SERVER(inode),
2727 				_nfs4_proc_readlink(inode, page, pgbase, pglen),
2728 				&exception);
2729 	} while (exception.retry);
2730 	return err;
2731 }
2732 
2733 /*
2734  * Got race?
2735  * We will need to arrange for the VFS layer to provide an atomic open.
2736  * Until then, this create/open method is prone to inefficiency and race
2737  * conditions due to the lookup, create, and open VFS calls from sys_open()
2738  * placed on the wire.
2739  *
2740  * Given the above sorry state of affairs, I'm simply sending an OPEN.
2741  * The file will be opened again in the subsequent VFS open call
2742  * (nfs4_proc_file_open).
2743  *
2744  * The open for read will just hang around to be used by any process that
2745  * opens the file O_RDONLY. This will all be resolved with the VFS changes.
2746  */
2747 
2748 static int
2749 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
2750                  int flags, struct nfs_open_context *ctx)
2751 {
2752 	struct dentry *de = dentry;
2753 	struct nfs4_state *state;
2754 	struct rpc_cred *cred = NULL;
2755 	fmode_t fmode = 0;
2756 	int status = 0;
2757 
2758 	if (ctx != NULL) {
2759 		cred = ctx->cred;
2760 		de = ctx->dentry;
2761 		fmode = ctx->mode;
2762 	}
2763 	sattr->ia_mode &= ~current_umask();
2764 	state = nfs4_do_open(dir, de, fmode, flags, sattr, cred);
2765 	d_drop(dentry);
2766 	if (IS_ERR(state)) {
2767 		status = PTR_ERR(state);
2768 		goto out;
2769 	}
2770 	d_add(dentry, igrab(state->inode));
2771 	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
2772 	if (ctx != NULL)
2773 		ctx->state = state;
2774 	else
2775 		nfs4_close_sync(state, fmode);
2776 out:
2777 	return status;
2778 }
2779 
2780 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
2781 {
2782 	struct nfs_server *server = NFS_SERVER(dir);
2783 	struct nfs_removeargs args = {
2784 		.fh = NFS_FH(dir),
2785 		.name = *name,
2786 		.bitmask = server->attr_bitmask,
2787 	};
2788 	struct nfs_removeres res = {
2789 		.server = server,
2790 	};
2791 	struct rpc_message msg = {
2792 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
2793 		.rpc_argp = &args,
2794 		.rpc_resp = &res,
2795 	};
2796 	int status = -ENOMEM;
2797 
2798 	res.dir_attr = nfs_alloc_fattr();
2799 	if (res.dir_attr == NULL)
2800 		goto out;
2801 
2802 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
2803 	if (status == 0) {
2804 		update_changeattr(dir, &res.cinfo);
2805 		nfs_post_op_update_inode(dir, res.dir_attr);
2806 	}
2807 	nfs_free_fattr(res.dir_attr);
2808 out:
2809 	return status;
2810 }
2811 
2812 static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
2813 {
2814 	struct nfs4_exception exception = { };
2815 	int err;
2816 	do {
2817 		err = nfs4_handle_exception(NFS_SERVER(dir),
2818 				_nfs4_proc_remove(dir, name),
2819 				&exception);
2820 	} while (exception.retry);
2821 	return err;
2822 }
2823 
2824 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
2825 {
2826 	struct nfs_server *server = NFS_SERVER(dir);
2827 	struct nfs_removeargs *args = msg->rpc_argp;
2828 	struct nfs_removeres *res = msg->rpc_resp;
2829 
2830 	args->bitmask = server->cache_consistency_bitmask;
2831 	res->server = server;
2832 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
2833 	nfs41_init_sequence(&args->seq_args, &res->seq_res, 1);
2834 }
2835 
2836 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
2837 {
2838 	if (nfs4_setup_sequence(NFS_SERVER(data->dir),
2839 				&data->args.seq_args,
2840 				&data->res.seq_res,
2841 				task))
2842 		return;
2843 	rpc_call_start(task);
2844 }
2845 
2846 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
2847 {
2848 	struct nfs_removeres *res = task->tk_msg.rpc_resp;
2849 
2850 	if (!nfs4_sequence_done(task, &res->seq_res))
2851 		return 0;
2852 	if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
2853 		return 0;
2854 	update_changeattr(dir, &res->cinfo);
2855 	nfs_post_op_update_inode(dir, res->dir_attr);
2856 	return 1;
2857 }
2858 
2859 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
2860 {
2861 	struct nfs_server *server = NFS_SERVER(dir);
2862 	struct nfs_renameargs *arg = msg->rpc_argp;
2863 	struct nfs_renameres *res = msg->rpc_resp;
2864 
2865 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
2866 	arg->bitmask = server->attr_bitmask;
2867 	res->server = server;
2868 	nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1);
2869 }
2870 
2871 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
2872 {
2873 	if (nfs4_setup_sequence(NFS_SERVER(data->old_dir),
2874 				&data->args.seq_args,
2875 				&data->res.seq_res,
2876 				task))
2877 		return;
2878 	rpc_call_start(task);
2879 }
2880 
2881 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
2882 				 struct inode *new_dir)
2883 {
2884 	struct nfs_renameres *res = task->tk_msg.rpc_resp;
2885 
2886 	if (!nfs4_sequence_done(task, &res->seq_res))
2887 		return 0;
2888 	if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
2889 		return 0;
2890 
2891 	update_changeattr(old_dir, &res->old_cinfo);
2892 	nfs_post_op_update_inode(old_dir, res->old_fattr);
2893 	update_changeattr(new_dir, &res->new_cinfo);
2894 	nfs_post_op_update_inode(new_dir, res->new_fattr);
2895 	return 1;
2896 }
2897 
2898 static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
2899 		struct inode *new_dir, struct qstr *new_name)
2900 {
2901 	struct nfs_server *server = NFS_SERVER(old_dir);
2902 	struct nfs_renameargs arg = {
2903 		.old_dir = NFS_FH(old_dir),
2904 		.new_dir = NFS_FH(new_dir),
2905 		.old_name = old_name,
2906 		.new_name = new_name,
2907 		.bitmask = server->attr_bitmask,
2908 	};
2909 	struct nfs_renameres res = {
2910 		.server = server,
2911 	};
2912 	struct rpc_message msg = {
2913 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
2914 		.rpc_argp = &arg,
2915 		.rpc_resp = &res,
2916 	};
2917 	int status = -ENOMEM;
2918 
2919 	res.old_fattr = nfs_alloc_fattr();
2920 	res.new_fattr = nfs_alloc_fattr();
2921 	if (res.old_fattr == NULL || res.new_fattr == NULL)
2922 		goto out;
2923 
2924 	status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2925 	if (!status) {
2926 		update_changeattr(old_dir, &res.old_cinfo);
2927 		nfs_post_op_update_inode(old_dir, res.old_fattr);
2928 		update_changeattr(new_dir, &res.new_cinfo);
2929 		nfs_post_op_update_inode(new_dir, res.new_fattr);
2930 	}
2931 out:
2932 	nfs_free_fattr(res.new_fattr);
2933 	nfs_free_fattr(res.old_fattr);
2934 	return status;
2935 }
2936 
2937 static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
2938 		struct inode *new_dir, struct qstr *new_name)
2939 {
2940 	struct nfs4_exception exception = { };
2941 	int err;
2942 	do {
2943 		err = nfs4_handle_exception(NFS_SERVER(old_dir),
2944 				_nfs4_proc_rename(old_dir, old_name,
2945 					new_dir, new_name),
2946 				&exception);
2947 	} while (exception.retry);
2948 	return err;
2949 }
2950 
2951 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
2952 {
2953 	struct nfs_server *server = NFS_SERVER(inode);
2954 	struct nfs4_link_arg arg = {
2955 		.fh     = NFS_FH(inode),
2956 		.dir_fh = NFS_FH(dir),
2957 		.name   = name,
2958 		.bitmask = server->attr_bitmask,
2959 	};
2960 	struct nfs4_link_res res = {
2961 		.server = server,
2962 	};
2963 	struct rpc_message msg = {
2964 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
2965 		.rpc_argp = &arg,
2966 		.rpc_resp = &res,
2967 	};
2968 	int status = -ENOMEM;
2969 
2970 	res.fattr = nfs_alloc_fattr();
2971 	res.dir_attr = nfs_alloc_fattr();
2972 	if (res.fattr == NULL || res.dir_attr == NULL)
2973 		goto out;
2974 
2975 	status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2976 	if (!status) {
2977 		update_changeattr(dir, &res.cinfo);
2978 		nfs_post_op_update_inode(dir, res.dir_attr);
2979 		nfs_post_op_update_inode(inode, res.fattr);
2980 	}
2981 out:
2982 	nfs_free_fattr(res.dir_attr);
2983 	nfs_free_fattr(res.fattr);
2984 	return status;
2985 }
2986 
2987 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
2988 {
2989 	struct nfs4_exception exception = { };
2990 	int err;
2991 	do {
2992 		err = nfs4_handle_exception(NFS_SERVER(inode),
2993 				_nfs4_proc_link(inode, dir, name),
2994 				&exception);
2995 	} while (exception.retry);
2996 	return err;
2997 }
2998 
2999 struct nfs4_createdata {
3000 	struct rpc_message msg;
3001 	struct nfs4_create_arg arg;
3002 	struct nfs4_create_res res;
3003 	struct nfs_fh fh;
3004 	struct nfs_fattr fattr;
3005 	struct nfs_fattr dir_fattr;
3006 };
3007 
3008 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
3009 		struct qstr *name, struct iattr *sattr, u32 ftype)
3010 {
3011 	struct nfs4_createdata *data;
3012 
3013 	data = kzalloc(sizeof(*data), GFP_KERNEL);
3014 	if (data != NULL) {
3015 		struct nfs_server *server = NFS_SERVER(dir);
3016 
3017 		data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
3018 		data->msg.rpc_argp = &data->arg;
3019 		data->msg.rpc_resp = &data->res;
3020 		data->arg.dir_fh = NFS_FH(dir);
3021 		data->arg.server = server;
3022 		data->arg.name = name;
3023 		data->arg.attrs = sattr;
3024 		data->arg.ftype = ftype;
3025 		data->arg.bitmask = server->attr_bitmask;
3026 		data->res.server = server;
3027 		data->res.fh = &data->fh;
3028 		data->res.fattr = &data->fattr;
3029 		data->res.dir_fattr = &data->dir_fattr;
3030 		nfs_fattr_init(data->res.fattr);
3031 		nfs_fattr_init(data->res.dir_fattr);
3032 	}
3033 	return data;
3034 }
3035 
3036 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
3037 {
3038 	int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
3039 				    &data->arg.seq_args, &data->res.seq_res, 1);
3040 	if (status == 0) {
3041 		update_changeattr(dir, &data->res.dir_cinfo);
3042 		nfs_post_op_update_inode(dir, data->res.dir_fattr);
3043 		status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
3044 	}
3045 	return status;
3046 }
3047 
3048 static void nfs4_free_createdata(struct nfs4_createdata *data)
3049 {
3050 	kfree(data);
3051 }
3052 
3053 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3054 		struct page *page, unsigned int len, struct iattr *sattr)
3055 {
3056 	struct nfs4_createdata *data;
3057 	int status = -ENAMETOOLONG;
3058 
3059 	if (len > NFS4_MAXPATHLEN)
3060 		goto out;
3061 
3062 	status = -ENOMEM;
3063 	data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
3064 	if (data == NULL)
3065 		goto out;
3066 
3067 	data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
3068 	data->arg.u.symlink.pages = &page;
3069 	data->arg.u.symlink.len = len;
3070 
3071 	status = nfs4_do_create(dir, dentry, data);
3072 
3073 	nfs4_free_createdata(data);
3074 out:
3075 	return status;
3076 }
3077 
3078 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3079 		struct page *page, unsigned int len, struct iattr *sattr)
3080 {
3081 	struct nfs4_exception exception = { };
3082 	int err;
3083 	do {
3084 		err = nfs4_handle_exception(NFS_SERVER(dir),
3085 				_nfs4_proc_symlink(dir, dentry, page,
3086 							len, sattr),
3087 				&exception);
3088 	} while (exception.retry);
3089 	return err;
3090 }
3091 
3092 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3093 		struct iattr *sattr)
3094 {
3095 	struct nfs4_createdata *data;
3096 	int status = -ENOMEM;
3097 
3098 	data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
3099 	if (data == NULL)
3100 		goto out;
3101 
3102 	status = nfs4_do_create(dir, dentry, data);
3103 
3104 	nfs4_free_createdata(data);
3105 out:
3106 	return status;
3107 }
3108 
3109 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3110 		struct iattr *sattr)
3111 {
3112 	struct nfs4_exception exception = { };
3113 	int err;
3114 
3115 	sattr->ia_mode &= ~current_umask();
3116 	do {
3117 		err = nfs4_handle_exception(NFS_SERVER(dir),
3118 				_nfs4_proc_mkdir(dir, dentry, sattr),
3119 				&exception);
3120 	} while (exception.retry);
3121 	return err;
3122 }
3123 
3124 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3125 		u64 cookie, struct page **pages, unsigned int count, int plus)
3126 {
3127 	struct inode		*dir = dentry->d_inode;
3128 	struct nfs4_readdir_arg args = {
3129 		.fh = NFS_FH(dir),
3130 		.pages = pages,
3131 		.pgbase = 0,
3132 		.count = count,
3133 		.bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
3134 		.plus = plus,
3135 	};
3136 	struct nfs4_readdir_res res;
3137 	struct rpc_message msg = {
3138 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
3139 		.rpc_argp = &args,
3140 		.rpc_resp = &res,
3141 		.rpc_cred = cred,
3142 	};
3143 	int			status;
3144 
3145 	dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
3146 			dentry->d_parent->d_name.name,
3147 			dentry->d_name.name,
3148 			(unsigned long long)cookie);
3149 	nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
3150 	res.pgbase = args.pgbase;
3151 	status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
3152 	if (status >= 0) {
3153 		memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
3154 		status += args.pgbase;
3155 	}
3156 
3157 	nfs_invalidate_atime(dir);
3158 
3159 	dprintk("%s: returns %d\n", __func__, status);
3160 	return status;
3161 }
3162 
3163 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3164 		u64 cookie, struct page **pages, unsigned int count, int plus)
3165 {
3166 	struct nfs4_exception exception = { };
3167 	int err;
3168 	do {
3169 		err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
3170 				_nfs4_proc_readdir(dentry, cred, cookie,
3171 					pages, count, plus),
3172 				&exception);
3173 	} while (exception.retry);
3174 	return err;
3175 }
3176 
3177 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3178 		struct iattr *sattr, dev_t rdev)
3179 {
3180 	struct nfs4_createdata *data;
3181 	int mode = sattr->ia_mode;
3182 	int status = -ENOMEM;
3183 
3184 	BUG_ON(!(sattr->ia_valid & ATTR_MODE));
3185 	BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode));
3186 
3187 	data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
3188 	if (data == NULL)
3189 		goto out;
3190 
3191 	if (S_ISFIFO(mode))
3192 		data->arg.ftype = NF4FIFO;
3193 	else if (S_ISBLK(mode)) {
3194 		data->arg.ftype = NF4BLK;
3195 		data->arg.u.device.specdata1 = MAJOR(rdev);
3196 		data->arg.u.device.specdata2 = MINOR(rdev);
3197 	}
3198 	else if (S_ISCHR(mode)) {
3199 		data->arg.ftype = NF4CHR;
3200 		data->arg.u.device.specdata1 = MAJOR(rdev);
3201 		data->arg.u.device.specdata2 = MINOR(rdev);
3202 	}
3203 
3204 	status = nfs4_do_create(dir, dentry, data);
3205 
3206 	nfs4_free_createdata(data);
3207 out:
3208 	return status;
3209 }
3210 
3211 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3212 		struct iattr *sattr, dev_t rdev)
3213 {
3214 	struct nfs4_exception exception = { };
3215 	int err;
3216 
3217 	sattr->ia_mode &= ~current_umask();
3218 	do {
3219 		err = nfs4_handle_exception(NFS_SERVER(dir),
3220 				_nfs4_proc_mknod(dir, dentry, sattr, rdev),
3221 				&exception);
3222 	} while (exception.retry);
3223 	return err;
3224 }
3225 
3226 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
3227 		 struct nfs_fsstat *fsstat)
3228 {
3229 	struct nfs4_statfs_arg args = {
3230 		.fh = fhandle,
3231 		.bitmask = server->attr_bitmask,
3232 	};
3233 	struct nfs4_statfs_res res = {
3234 		.fsstat = fsstat,
3235 	};
3236 	struct rpc_message msg = {
3237 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
3238 		.rpc_argp = &args,
3239 		.rpc_resp = &res,
3240 	};
3241 
3242 	nfs_fattr_init(fsstat->fattr);
3243 	return  nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3244 }
3245 
3246 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
3247 {
3248 	struct nfs4_exception exception = { };
3249 	int err;
3250 	do {
3251 		err = nfs4_handle_exception(server,
3252 				_nfs4_proc_statfs(server, fhandle, fsstat),
3253 				&exception);
3254 	} while (exception.retry);
3255 	return err;
3256 }
3257 
3258 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
3259 		struct nfs_fsinfo *fsinfo)
3260 {
3261 	struct nfs4_fsinfo_arg args = {
3262 		.fh = fhandle,
3263 		.bitmask = server->attr_bitmask,
3264 	};
3265 	struct nfs4_fsinfo_res res = {
3266 		.fsinfo = fsinfo,
3267 	};
3268 	struct rpc_message msg = {
3269 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
3270 		.rpc_argp = &args,
3271 		.rpc_resp = &res,
3272 	};
3273 
3274 	return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3275 }
3276 
3277 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3278 {
3279 	struct nfs4_exception exception = { };
3280 	int err;
3281 
3282 	do {
3283 		err = nfs4_handle_exception(server,
3284 				_nfs4_do_fsinfo(server, fhandle, fsinfo),
3285 				&exception);
3286 	} while (exception.retry);
3287 	return err;
3288 }
3289 
3290 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3291 {
3292 	nfs_fattr_init(fsinfo->fattr);
3293 	return nfs4_do_fsinfo(server, fhandle, fsinfo);
3294 }
3295 
3296 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3297 		struct nfs_pathconf *pathconf)
3298 {
3299 	struct nfs4_pathconf_arg args = {
3300 		.fh = fhandle,
3301 		.bitmask = server->attr_bitmask,
3302 	};
3303 	struct nfs4_pathconf_res res = {
3304 		.pathconf = pathconf,
3305 	};
3306 	struct rpc_message msg = {
3307 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
3308 		.rpc_argp = &args,
3309 		.rpc_resp = &res,
3310 	};
3311 
3312 	/* None of the pathconf attributes are mandatory to implement */
3313 	if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
3314 		memset(pathconf, 0, sizeof(*pathconf));
3315 		return 0;
3316 	}
3317 
3318 	nfs_fattr_init(pathconf->fattr);
3319 	return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3320 }
3321 
3322 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3323 		struct nfs_pathconf *pathconf)
3324 {
3325 	struct nfs4_exception exception = { };
3326 	int err;
3327 
3328 	do {
3329 		err = nfs4_handle_exception(server,
3330 				_nfs4_proc_pathconf(server, fhandle, pathconf),
3331 				&exception);
3332 	} while (exception.retry);
3333 	return err;
3334 }
3335 
3336 void __nfs4_read_done_cb(struct nfs_read_data *data)
3337 {
3338 	nfs_invalidate_atime(data->inode);
3339 }
3340 
3341 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
3342 {
3343 	struct nfs_server *server = NFS_SERVER(data->inode);
3344 
3345 	if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
3346 		rpc_restart_call_prepare(task);
3347 		return -EAGAIN;
3348 	}
3349 
3350 	__nfs4_read_done_cb(data);
3351 	if (task->tk_status > 0)
3352 		renew_lease(server, data->timestamp);
3353 	return 0;
3354 }
3355 
3356 static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
3357 {
3358 
3359 	dprintk("--> %s\n", __func__);
3360 
3361 	if (!nfs4_sequence_done(task, &data->res.seq_res))
3362 		return -EAGAIN;
3363 
3364 	return data->read_done_cb ? data->read_done_cb(task, data) :
3365 				    nfs4_read_done_cb(task, data);
3366 }
3367 
3368 static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
3369 {
3370 	data->timestamp   = jiffies;
3371 	data->read_done_cb = nfs4_read_done_cb;
3372 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
3373 	nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
3374 }
3375 
3376 static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data)
3377 {
3378 	if (nfs4_setup_sequence(NFS_SERVER(data->inode),
3379 				&data->args.seq_args,
3380 				&data->res.seq_res,
3381 				task))
3382 		return;
3383 	rpc_call_start(task);
3384 }
3385 
3386 /* Reset the the nfs_read_data to send the read to the MDS. */
3387 void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data)
3388 {
3389 	dprintk("%s Reset task for i/o through\n", __func__);
3390 	put_lseg(data->lseg);
3391 	data->lseg = NULL;
3392 	/* offsets will differ in the dense stripe case */
3393 	data->args.offset = data->mds_offset;
3394 	data->ds_clp = NULL;
3395 	data->args.fh     = NFS_FH(data->inode);
3396 	data->read_done_cb = nfs4_read_done_cb;
3397 	task->tk_ops = data->mds_ops;
3398 	rpc_task_reset_client(task, NFS_CLIENT(data->inode));
3399 }
3400 EXPORT_SYMBOL_GPL(nfs4_reset_read);
3401 
3402 static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
3403 {
3404 	struct inode *inode = data->inode;
3405 
3406 	if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
3407 		rpc_restart_call_prepare(task);
3408 		return -EAGAIN;
3409 	}
3410 	if (task->tk_status >= 0) {
3411 		renew_lease(NFS_SERVER(inode), data->timestamp);
3412 		nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
3413 	}
3414 	return 0;
3415 }
3416 
3417 static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
3418 {
3419 	if (!nfs4_sequence_done(task, &data->res.seq_res))
3420 		return -EAGAIN;
3421 	return data->write_done_cb ? data->write_done_cb(task, data) :
3422 		nfs4_write_done_cb(task, data);
3423 }
3424 
3425 /* Reset the the nfs_write_data to send the write to the MDS. */
3426 void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data)
3427 {
3428 	dprintk("%s Reset task for i/o through\n", __func__);
3429 	put_lseg(data->lseg);
3430 	data->lseg          = NULL;
3431 	data->ds_clp        = NULL;
3432 	data->write_done_cb = nfs4_write_done_cb;
3433 	data->args.fh       = NFS_FH(data->inode);
3434 	data->args.bitmask  = data->res.server->cache_consistency_bitmask;
3435 	data->args.offset   = data->mds_offset;
3436 	data->res.fattr     = &data->fattr;
3437 	task->tk_ops        = data->mds_ops;
3438 	rpc_task_reset_client(task, NFS_CLIENT(data->inode));
3439 }
3440 EXPORT_SYMBOL_GPL(nfs4_reset_write);
3441 
3442 static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
3443 {
3444 	struct nfs_server *server = NFS_SERVER(data->inode);
3445 
3446 	if (data->lseg) {
3447 		data->args.bitmask = NULL;
3448 		data->res.fattr = NULL;
3449 	} else
3450 		data->args.bitmask = server->cache_consistency_bitmask;
3451 	if (!data->write_done_cb)
3452 		data->write_done_cb = nfs4_write_done_cb;
3453 	data->res.server = server;
3454 	data->timestamp   = jiffies;
3455 
3456 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
3457 	nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
3458 }
3459 
3460 static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data)
3461 {
3462 	if (nfs4_setup_sequence(NFS_SERVER(data->inode),
3463 				&data->args.seq_args,
3464 				&data->res.seq_res,
3465 				task))
3466 		return;
3467 	rpc_call_start(task);
3468 }
3469 
3470 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data)
3471 {
3472 	struct inode *inode = data->inode;
3473 
3474 	if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
3475 		rpc_restart_call_prepare(task);
3476 		return -EAGAIN;
3477 	}
3478 	nfs_refresh_inode(inode, data->res.fattr);
3479 	return 0;
3480 }
3481 
3482 static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
3483 {
3484 	if (!nfs4_sequence_done(task, &data->res.seq_res))
3485 		return -EAGAIN;
3486 	return data->write_done_cb(task, data);
3487 }
3488 
3489 static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
3490 {
3491 	struct nfs_server *server = NFS_SERVER(data->inode);
3492 
3493 	if (data->lseg) {
3494 		data->args.bitmask = NULL;
3495 		data->res.fattr = NULL;
3496 	} else
3497 		data->args.bitmask = server->cache_consistency_bitmask;
3498 	if (!data->write_done_cb)
3499 		data->write_done_cb = nfs4_commit_done_cb;
3500 	data->res.server = server;
3501 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
3502 	nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
3503 }
3504 
3505 struct nfs4_renewdata {
3506 	struct nfs_client	*client;
3507 	unsigned long		timestamp;
3508 };
3509 
3510 /*
3511  * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
3512  * standalone procedure for queueing an asynchronous RENEW.
3513  */
3514 static void nfs4_renew_release(void *calldata)
3515 {
3516 	struct nfs4_renewdata *data = calldata;
3517 	struct nfs_client *clp = data->client;
3518 
3519 	if (atomic_read(&clp->cl_count) > 1)
3520 		nfs4_schedule_state_renewal(clp);
3521 	nfs_put_client(clp);
3522 	kfree(data);
3523 }
3524 
3525 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
3526 {
3527 	struct nfs4_renewdata *data = calldata;
3528 	struct nfs_client *clp = data->client;
3529 	unsigned long timestamp = data->timestamp;
3530 
3531 	if (task->tk_status < 0) {
3532 		/* Unless we're shutting down, schedule state recovery! */
3533 		if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
3534 			return;
3535 		if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
3536 			nfs4_schedule_lease_recovery(clp);
3537 			return;
3538 		}
3539 		nfs4_schedule_path_down_recovery(clp);
3540 	}
3541 	do_renew_lease(clp, timestamp);
3542 }
3543 
3544 static const struct rpc_call_ops nfs4_renew_ops = {
3545 	.rpc_call_done = nfs4_renew_done,
3546 	.rpc_release = nfs4_renew_release,
3547 };
3548 
3549 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
3550 {
3551 	struct rpc_message msg = {
3552 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3553 		.rpc_argp	= clp,
3554 		.rpc_cred	= cred,
3555 	};
3556 	struct nfs4_renewdata *data;
3557 
3558 	if (renew_flags == 0)
3559 		return 0;
3560 	if (!atomic_inc_not_zero(&clp->cl_count))
3561 		return -EIO;
3562 	data = kmalloc(sizeof(*data), GFP_NOFS);
3563 	if (data == NULL)
3564 		return -ENOMEM;
3565 	data->client = clp;
3566 	data->timestamp = jiffies;
3567 	return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
3568 			&nfs4_renew_ops, data);
3569 }
3570 
3571 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
3572 {
3573 	struct rpc_message msg = {
3574 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3575 		.rpc_argp	= clp,
3576 		.rpc_cred	= cred,
3577 	};
3578 	unsigned long now = jiffies;
3579 	int status;
3580 
3581 	status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
3582 	if (status < 0)
3583 		return status;
3584 	do_renew_lease(clp, now);
3585 	return 0;
3586 }
3587 
3588 static inline int nfs4_server_supports_acls(struct nfs_server *server)
3589 {
3590 	return (server->caps & NFS_CAP_ACLS)
3591 		&& (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3592 		&& (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
3593 }
3594 
3595 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that
3596  * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on
3597  * the stack.
3598  */
3599 #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
3600 
3601 static int buf_to_pages_noslab(const void *buf, size_t buflen,
3602 		struct page **pages, unsigned int *pgbase)
3603 {
3604 	struct page *newpage, **spages;
3605 	int rc = 0;
3606 	size_t len;
3607 	spages = pages;
3608 
3609 	do {
3610 		len = min_t(size_t, PAGE_CACHE_SIZE, buflen);
3611 		newpage = alloc_page(GFP_KERNEL);
3612 
3613 		if (newpage == NULL)
3614 			goto unwind;
3615 		memcpy(page_address(newpage), buf, len);
3616                 buf += len;
3617                 buflen -= len;
3618 		*pages++ = newpage;
3619 		rc++;
3620 	} while (buflen != 0);
3621 
3622 	return rc;
3623 
3624 unwind:
3625 	for(; rc > 0; rc--)
3626 		__free_page(spages[rc-1]);
3627 	return -ENOMEM;
3628 }
3629 
3630 struct nfs4_cached_acl {
3631 	int cached;
3632 	size_t len;
3633 	char data[0];
3634 };
3635 
3636 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
3637 {
3638 	struct nfs_inode *nfsi = NFS_I(inode);
3639 
3640 	spin_lock(&inode->i_lock);
3641 	kfree(nfsi->nfs4_acl);
3642 	nfsi->nfs4_acl = acl;
3643 	spin_unlock(&inode->i_lock);
3644 }
3645 
3646 static void nfs4_zap_acl_attr(struct inode *inode)
3647 {
3648 	nfs4_set_cached_acl(inode, NULL);
3649 }
3650 
3651 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
3652 {
3653 	struct nfs_inode *nfsi = NFS_I(inode);
3654 	struct nfs4_cached_acl *acl;
3655 	int ret = -ENOENT;
3656 
3657 	spin_lock(&inode->i_lock);
3658 	acl = nfsi->nfs4_acl;
3659 	if (acl == NULL)
3660 		goto out;
3661 	if (buf == NULL) /* user is just asking for length */
3662 		goto out_len;
3663 	if (acl->cached == 0)
3664 		goto out;
3665 	ret = -ERANGE; /* see getxattr(2) man page */
3666 	if (acl->len > buflen)
3667 		goto out;
3668 	memcpy(buf, acl->data, acl->len);
3669 out_len:
3670 	ret = acl->len;
3671 out:
3672 	spin_unlock(&inode->i_lock);
3673 	return ret;
3674 }
3675 
3676 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
3677 {
3678 	struct nfs4_cached_acl *acl;
3679 
3680 	if (pages && acl_len <= PAGE_SIZE) {
3681 		acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);
3682 		if (acl == NULL)
3683 			goto out;
3684 		acl->cached = 1;
3685 		_copy_from_pages(acl->data, pages, pgbase, acl_len);
3686 	} else {
3687 		acl = kmalloc(sizeof(*acl), GFP_KERNEL);
3688 		if (acl == NULL)
3689 			goto out;
3690 		acl->cached = 0;
3691 	}
3692 	acl->len = acl_len;
3693 out:
3694 	nfs4_set_cached_acl(inode, acl);
3695 }
3696 
3697 /*
3698  * The getxattr API returns the required buffer length when called with a
3699  * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
3700  * the required buf.  On a NULL buf, we send a page of data to the server
3701  * guessing that the ACL request can be serviced by a page. If so, we cache
3702  * up to the page of ACL data, and the 2nd call to getxattr is serviced by
3703  * the cache. If not so, we throw away the page, and cache the required
3704  * length. The next getxattr call will then produce another round trip to
3705  * the server, this time with the input buf of the required size.
3706  */
3707 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3708 {
3709 	struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
3710 	struct nfs_getaclargs args = {
3711 		.fh = NFS_FH(inode),
3712 		.acl_pages = pages,
3713 		.acl_len = buflen,
3714 	};
3715 	struct nfs_getaclres res = {
3716 		.acl_len = buflen,
3717 	};
3718 	struct rpc_message msg = {
3719 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
3720 		.rpc_argp = &args,
3721 		.rpc_resp = &res,
3722 	};
3723 	int ret = -ENOMEM, npages, i, acl_len = 0;
3724 
3725 	npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3726 	/* As long as we're doing a round trip to the server anyway,
3727 	 * let's be prepared for a page of acl data. */
3728 	if (npages == 0)
3729 		npages = 1;
3730 
3731 	/* Add an extra page to handle the bitmap returned */
3732 	npages++;
3733 
3734 	for (i = 0; i < npages; i++) {
3735 		pages[i] = alloc_page(GFP_KERNEL);
3736 		if (!pages[i])
3737 			goto out_free;
3738 	}
3739 
3740 	/* for decoding across pages */
3741 	res.acl_scratch = alloc_page(GFP_KERNEL);
3742 	if (!res.acl_scratch)
3743 		goto out_free;
3744 
3745 	args.acl_len = npages * PAGE_SIZE;
3746 	args.acl_pgbase = 0;
3747 
3748 	/* Let decode_getfacl know not to fail if the ACL data is larger than
3749 	 * the page we send as a guess */
3750 	if (buf == NULL)
3751 		res.acl_flags |= NFS4_ACL_LEN_REQUEST;
3752 
3753 	dprintk("%s  buf %p buflen %zu npages %d args.acl_len %zu\n",
3754 		__func__, buf, buflen, npages, args.acl_len);
3755 	ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
3756 			     &msg, &args.seq_args, &res.seq_res, 0);
3757 	if (ret)
3758 		goto out_free;
3759 
3760 	acl_len = res.acl_len - res.acl_data_offset;
3761 	if (acl_len > args.acl_len)
3762 		nfs4_write_cached_acl(inode, NULL, 0, acl_len);
3763 	else
3764 		nfs4_write_cached_acl(inode, pages, res.acl_data_offset,
3765 				      acl_len);
3766 	if (buf) {
3767 		ret = -ERANGE;
3768 		if (acl_len > buflen)
3769 			goto out_free;
3770 		_copy_from_pages(buf, pages, res.acl_data_offset,
3771 				acl_len);
3772 	}
3773 	ret = acl_len;
3774 out_free:
3775 	for (i = 0; i < npages; i++)
3776 		if (pages[i])
3777 			__free_page(pages[i]);
3778 	if (res.acl_scratch)
3779 		__free_page(res.acl_scratch);
3780 	return ret;
3781 }
3782 
3783 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3784 {
3785 	struct nfs4_exception exception = { };
3786 	ssize_t ret;
3787 	do {
3788 		ret = __nfs4_get_acl_uncached(inode, buf, buflen);
3789 		if (ret >= 0)
3790 			break;
3791 		ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
3792 	} while (exception.retry);
3793 	return ret;
3794 }
3795 
3796 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
3797 {
3798 	struct nfs_server *server = NFS_SERVER(inode);
3799 	int ret;
3800 
3801 	if (!nfs4_server_supports_acls(server))
3802 		return -EOPNOTSUPP;
3803 	ret = nfs_revalidate_inode(server, inode);
3804 	if (ret < 0)
3805 		return ret;
3806 	if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
3807 		nfs_zap_acl_cache(inode);
3808 	ret = nfs4_read_cached_acl(inode, buf, buflen);
3809 	if (ret != -ENOENT)
3810 		/* -ENOENT is returned if there is no ACL or if there is an ACL
3811 		 * but no cached acl data, just the acl length */
3812 		return ret;
3813 	return nfs4_get_acl_uncached(inode, buf, buflen);
3814 }
3815 
3816 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
3817 {
3818 	struct nfs_server *server = NFS_SERVER(inode);
3819 	struct page *pages[NFS4ACL_MAXPAGES];
3820 	struct nfs_setaclargs arg = {
3821 		.fh		= NFS_FH(inode),
3822 		.acl_pages	= pages,
3823 		.acl_len	= buflen,
3824 	};
3825 	struct nfs_setaclres res;
3826 	struct rpc_message msg = {
3827 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_SETACL],
3828 		.rpc_argp	= &arg,
3829 		.rpc_resp	= &res,
3830 	};
3831 	int ret, i;
3832 
3833 	if (!nfs4_server_supports_acls(server))
3834 		return -EOPNOTSUPP;
3835 	i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
3836 	if (i < 0)
3837 		return i;
3838 	nfs_inode_return_delegation(inode);
3839 	ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3840 
3841 	/*
3842 	 * Free each page after tx, so the only ref left is
3843 	 * held by the network stack
3844 	 */
3845 	for (; i > 0; i--)
3846 		put_page(pages[i-1]);
3847 
3848 	/*
3849 	 * Acl update can result in inode attribute update.
3850 	 * so mark the attribute cache invalid.
3851 	 */
3852 	spin_lock(&inode->i_lock);
3853 	NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
3854 	spin_unlock(&inode->i_lock);
3855 	nfs_access_zap_cache(inode);
3856 	nfs_zap_acl_cache(inode);
3857 	return ret;
3858 }
3859 
3860 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
3861 {
3862 	struct nfs4_exception exception = { };
3863 	int err;
3864 	do {
3865 		err = nfs4_handle_exception(NFS_SERVER(inode),
3866 				__nfs4_proc_set_acl(inode, buf, buflen),
3867 				&exception);
3868 	} while (exception.retry);
3869 	return err;
3870 }
3871 
3872 static int
3873 nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
3874 {
3875 	struct nfs_client *clp = server->nfs_client;
3876 
3877 	if (task->tk_status >= 0)
3878 		return 0;
3879 	switch(task->tk_status) {
3880 		case -NFS4ERR_DELEG_REVOKED:
3881 		case -NFS4ERR_ADMIN_REVOKED:
3882 		case -NFS4ERR_BAD_STATEID:
3883 			if (state == NULL)
3884 				break;
3885 			nfs_remove_bad_delegation(state->inode);
3886 		case -NFS4ERR_OPENMODE:
3887 			if (state == NULL)
3888 				break;
3889 			nfs4_schedule_stateid_recovery(server, state);
3890 			goto wait_on_recovery;
3891 		case -NFS4ERR_EXPIRED:
3892 			if (state != NULL)
3893 				nfs4_schedule_stateid_recovery(server, state);
3894 		case -NFS4ERR_STALE_STATEID:
3895 		case -NFS4ERR_STALE_CLIENTID:
3896 			nfs4_schedule_lease_recovery(clp);
3897 			goto wait_on_recovery;
3898 #if defined(CONFIG_NFS_V4_1)
3899 		case -NFS4ERR_BADSESSION:
3900 		case -NFS4ERR_BADSLOT:
3901 		case -NFS4ERR_BAD_HIGH_SLOT:
3902 		case -NFS4ERR_DEADSESSION:
3903 		case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
3904 		case -NFS4ERR_SEQ_FALSE_RETRY:
3905 		case -NFS4ERR_SEQ_MISORDERED:
3906 			dprintk("%s ERROR %d, Reset session\n", __func__,
3907 				task->tk_status);
3908 			nfs4_schedule_session_recovery(clp->cl_session);
3909 			task->tk_status = 0;
3910 			return -EAGAIN;
3911 #endif /* CONFIG_NFS_V4_1 */
3912 		case -NFS4ERR_DELAY:
3913 			nfs_inc_server_stats(server, NFSIOS_DELAY);
3914 		case -NFS4ERR_GRACE:
3915 		case -EKEYEXPIRED:
3916 			rpc_delay(task, NFS4_POLL_RETRY_MAX);
3917 			task->tk_status = 0;
3918 			return -EAGAIN;
3919 		case -NFS4ERR_RETRY_UNCACHED_REP:
3920 		case -NFS4ERR_OLD_STATEID:
3921 			task->tk_status = 0;
3922 			return -EAGAIN;
3923 	}
3924 	task->tk_status = nfs4_map_errors(task->tk_status);
3925 	return 0;
3926 wait_on_recovery:
3927 	rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
3928 	if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
3929 		rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
3930 	task->tk_status = 0;
3931 	return -EAGAIN;
3932 }
3933 
3934 static void nfs4_construct_boot_verifier(struct nfs_client *clp,
3935 					 nfs4_verifier *bootverf)
3936 {
3937 	__be32 verf[2];
3938 
3939 	verf[0] = htonl((u32)clp->cl_boot_time.tv_sec);
3940 	verf[1] = htonl((u32)clp->cl_boot_time.tv_nsec);
3941 	memcpy(bootverf->data, verf, sizeof(bootverf->data));
3942 }
3943 
3944 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
3945 		unsigned short port, struct rpc_cred *cred,
3946 		struct nfs4_setclientid_res *res)
3947 {
3948 	nfs4_verifier sc_verifier;
3949 	struct nfs4_setclientid setclientid = {
3950 		.sc_verifier = &sc_verifier,
3951 		.sc_prog = program,
3952 		.sc_cb_ident = clp->cl_cb_ident,
3953 	};
3954 	struct rpc_message msg = {
3955 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
3956 		.rpc_argp = &setclientid,
3957 		.rpc_resp = res,
3958 		.rpc_cred = cred,
3959 	};
3960 	int loop = 0;
3961 	int status;
3962 
3963 	nfs4_construct_boot_verifier(clp, &sc_verifier);
3964 
3965 	for(;;) {
3966 		rcu_read_lock();
3967 		setclientid.sc_name_len = scnprintf(setclientid.sc_name,
3968 				sizeof(setclientid.sc_name), "%s/%s %s %s %u",
3969 				clp->cl_ipaddr,
3970 				rpc_peeraddr2str(clp->cl_rpcclient,
3971 							RPC_DISPLAY_ADDR),
3972 				rpc_peeraddr2str(clp->cl_rpcclient,
3973 							RPC_DISPLAY_PROTO),
3974 				clp->cl_rpcclient->cl_auth->au_ops->au_name,
3975 				clp->cl_id_uniquifier);
3976 		setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
3977 				sizeof(setclientid.sc_netid),
3978 				rpc_peeraddr2str(clp->cl_rpcclient,
3979 							RPC_DISPLAY_NETID));
3980 		setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
3981 				sizeof(setclientid.sc_uaddr), "%s.%u.%u",
3982 				clp->cl_ipaddr, port >> 8, port & 255);
3983 		rcu_read_unlock();
3984 
3985 		status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
3986 		if (status != -NFS4ERR_CLID_INUSE)
3987 			break;
3988 		if (loop != 0) {
3989 			++clp->cl_id_uniquifier;
3990 			break;
3991 		}
3992 		++loop;
3993 		ssleep(clp->cl_lease_time / HZ + 1);
3994 	}
3995 	return status;
3996 }
3997 
3998 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
3999 		struct nfs4_setclientid_res *arg,
4000 		struct rpc_cred *cred)
4001 {
4002 	struct nfs_fsinfo fsinfo;
4003 	struct rpc_message msg = {
4004 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
4005 		.rpc_argp = arg,
4006 		.rpc_resp = &fsinfo,
4007 		.rpc_cred = cred,
4008 	};
4009 	unsigned long now;
4010 	int status;
4011 
4012 	now = jiffies;
4013 	status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4014 	if (status == 0) {
4015 		spin_lock(&clp->cl_lock);
4016 		clp->cl_lease_time = fsinfo.lease_time * HZ;
4017 		clp->cl_last_renewal = now;
4018 		spin_unlock(&clp->cl_lock);
4019 	}
4020 	return status;
4021 }
4022 
4023 struct nfs4_delegreturndata {
4024 	struct nfs4_delegreturnargs args;
4025 	struct nfs4_delegreturnres res;
4026 	struct nfs_fh fh;
4027 	nfs4_stateid stateid;
4028 	unsigned long timestamp;
4029 	struct nfs_fattr fattr;
4030 	int rpc_status;
4031 };
4032 
4033 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
4034 {
4035 	struct nfs4_delegreturndata *data = calldata;
4036 
4037 	if (!nfs4_sequence_done(task, &data->res.seq_res))
4038 		return;
4039 
4040 	switch (task->tk_status) {
4041 	case -NFS4ERR_STALE_STATEID:
4042 	case -NFS4ERR_EXPIRED:
4043 	case 0:
4044 		renew_lease(data->res.server, data->timestamp);
4045 		break;
4046 	default:
4047 		if (nfs4_async_handle_error(task, data->res.server, NULL) ==
4048 				-EAGAIN) {
4049 			rpc_restart_call_prepare(task);
4050 			return;
4051 		}
4052 	}
4053 	data->rpc_status = task->tk_status;
4054 }
4055 
4056 static void nfs4_delegreturn_release(void *calldata)
4057 {
4058 	kfree(calldata);
4059 }
4060 
4061 #if defined(CONFIG_NFS_V4_1)
4062 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
4063 {
4064 	struct nfs4_delegreturndata *d_data;
4065 
4066 	d_data = (struct nfs4_delegreturndata *)data;
4067 
4068 	if (nfs4_setup_sequence(d_data->res.server,
4069 				&d_data->args.seq_args,
4070 				&d_data->res.seq_res, task))
4071 		return;
4072 	rpc_call_start(task);
4073 }
4074 #endif /* CONFIG_NFS_V4_1 */
4075 
4076 static const struct rpc_call_ops nfs4_delegreturn_ops = {
4077 #if defined(CONFIG_NFS_V4_1)
4078 	.rpc_call_prepare = nfs4_delegreturn_prepare,
4079 #endif /* CONFIG_NFS_V4_1 */
4080 	.rpc_call_done = nfs4_delegreturn_done,
4081 	.rpc_release = nfs4_delegreturn_release,
4082 };
4083 
4084 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
4085 {
4086 	struct nfs4_delegreturndata *data;
4087 	struct nfs_server *server = NFS_SERVER(inode);
4088 	struct rpc_task *task;
4089 	struct rpc_message msg = {
4090 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
4091 		.rpc_cred = cred,
4092 	};
4093 	struct rpc_task_setup task_setup_data = {
4094 		.rpc_client = server->client,
4095 		.rpc_message = &msg,
4096 		.callback_ops = &nfs4_delegreturn_ops,
4097 		.flags = RPC_TASK_ASYNC,
4098 	};
4099 	int status = 0;
4100 
4101 	data = kzalloc(sizeof(*data), GFP_NOFS);
4102 	if (data == NULL)
4103 		return -ENOMEM;
4104 	nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4105 	data->args.fhandle = &data->fh;
4106 	data->args.stateid = &data->stateid;
4107 	data->args.bitmask = server->attr_bitmask;
4108 	nfs_copy_fh(&data->fh, NFS_FH(inode));
4109 	nfs4_stateid_copy(&data->stateid, stateid);
4110 	data->res.fattr = &data->fattr;
4111 	data->res.server = server;
4112 	nfs_fattr_init(data->res.fattr);
4113 	data->timestamp = jiffies;
4114 	data->rpc_status = 0;
4115 
4116 	task_setup_data.callback_data = data;
4117 	msg.rpc_argp = &data->args;
4118 	msg.rpc_resp = &data->res;
4119 	task = rpc_run_task(&task_setup_data);
4120 	if (IS_ERR(task))
4121 		return PTR_ERR(task);
4122 	if (!issync)
4123 		goto out;
4124 	status = nfs4_wait_for_completion_rpc_task(task);
4125 	if (status != 0)
4126 		goto out;
4127 	status = data->rpc_status;
4128 	if (status != 0)
4129 		goto out;
4130 	nfs_refresh_inode(inode, &data->fattr);
4131 out:
4132 	rpc_put_task(task);
4133 	return status;
4134 }
4135 
4136 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
4137 {
4138 	struct nfs_server *server = NFS_SERVER(inode);
4139 	struct nfs4_exception exception = { };
4140 	int err;
4141 	do {
4142 		err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
4143 		switch (err) {
4144 			case -NFS4ERR_STALE_STATEID:
4145 			case -NFS4ERR_EXPIRED:
4146 			case 0:
4147 				return 0;
4148 		}
4149 		err = nfs4_handle_exception(server, err, &exception);
4150 	} while (exception.retry);
4151 	return err;
4152 }
4153 
4154 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
4155 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
4156 
4157 /*
4158  * sleep, with exponential backoff, and retry the LOCK operation.
4159  */
4160 static unsigned long
4161 nfs4_set_lock_task_retry(unsigned long timeout)
4162 {
4163 	freezable_schedule_timeout_killable(timeout);
4164 	timeout <<= 1;
4165 	if (timeout > NFS4_LOCK_MAXTIMEOUT)
4166 		return NFS4_LOCK_MAXTIMEOUT;
4167 	return timeout;
4168 }
4169 
4170 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4171 {
4172 	struct inode *inode = state->inode;
4173 	struct nfs_server *server = NFS_SERVER(inode);
4174 	struct nfs_client *clp = server->nfs_client;
4175 	struct nfs_lockt_args arg = {
4176 		.fh = NFS_FH(inode),
4177 		.fl = request,
4178 	};
4179 	struct nfs_lockt_res res = {
4180 		.denied = request,
4181 	};
4182 	struct rpc_message msg = {
4183 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
4184 		.rpc_argp       = &arg,
4185 		.rpc_resp       = &res,
4186 		.rpc_cred	= state->owner->so_cred,
4187 	};
4188 	struct nfs4_lock_state *lsp;
4189 	int status;
4190 
4191 	arg.lock_owner.clientid = clp->cl_clientid;
4192 	status = nfs4_set_lock_state(state, request);
4193 	if (status != 0)
4194 		goto out;
4195 	lsp = request->fl_u.nfs4_fl.owner;
4196 	arg.lock_owner.id = lsp->ls_seqid.owner_id;
4197 	arg.lock_owner.s_dev = server->s_dev;
4198 	status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4199 	switch (status) {
4200 		case 0:
4201 			request->fl_type = F_UNLCK;
4202 			break;
4203 		case -NFS4ERR_DENIED:
4204 			status = 0;
4205 	}
4206 	request->fl_ops->fl_release_private(request);
4207 out:
4208 	return status;
4209 }
4210 
4211 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4212 {
4213 	struct nfs4_exception exception = { };
4214 	int err;
4215 
4216 	do {
4217 		err = nfs4_handle_exception(NFS_SERVER(state->inode),
4218 				_nfs4_proc_getlk(state, cmd, request),
4219 				&exception);
4220 	} while (exception.retry);
4221 	return err;
4222 }
4223 
4224 static int do_vfs_lock(struct file *file, struct file_lock *fl)
4225 {
4226 	int res = 0;
4227 	switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
4228 		case FL_POSIX:
4229 			res = posix_lock_file_wait(file, fl);
4230 			break;
4231 		case FL_FLOCK:
4232 			res = flock_lock_file_wait(file, fl);
4233 			break;
4234 		default:
4235 			BUG();
4236 	}
4237 	return res;
4238 }
4239 
4240 struct nfs4_unlockdata {
4241 	struct nfs_locku_args arg;
4242 	struct nfs_locku_res res;
4243 	struct nfs4_lock_state *lsp;
4244 	struct nfs_open_context *ctx;
4245 	struct file_lock fl;
4246 	const struct nfs_server *server;
4247 	unsigned long timestamp;
4248 };
4249 
4250 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
4251 		struct nfs_open_context *ctx,
4252 		struct nfs4_lock_state *lsp,
4253 		struct nfs_seqid *seqid)
4254 {
4255 	struct nfs4_unlockdata *p;
4256 	struct inode *inode = lsp->ls_state->inode;
4257 
4258 	p = kzalloc(sizeof(*p), GFP_NOFS);
4259 	if (p == NULL)
4260 		return NULL;
4261 	p->arg.fh = NFS_FH(inode);
4262 	p->arg.fl = &p->fl;
4263 	p->arg.seqid = seqid;
4264 	p->res.seqid = seqid;
4265 	p->arg.stateid = &lsp->ls_stateid;
4266 	p->lsp = lsp;
4267 	atomic_inc(&lsp->ls_count);
4268 	/* Ensure we don't close file until we're done freeing locks! */
4269 	p->ctx = get_nfs_open_context(ctx);
4270 	memcpy(&p->fl, fl, sizeof(p->fl));
4271 	p->server = NFS_SERVER(inode);
4272 	return p;
4273 }
4274 
4275 static void nfs4_locku_release_calldata(void *data)
4276 {
4277 	struct nfs4_unlockdata *calldata = data;
4278 	nfs_free_seqid(calldata->arg.seqid);
4279 	nfs4_put_lock_state(calldata->lsp);
4280 	put_nfs_open_context(calldata->ctx);
4281 	kfree(calldata);
4282 }
4283 
4284 static void nfs4_locku_done(struct rpc_task *task, void *data)
4285 {
4286 	struct nfs4_unlockdata *calldata = data;
4287 
4288 	if (!nfs4_sequence_done(task, &calldata->res.seq_res))
4289 		return;
4290 	switch (task->tk_status) {
4291 		case 0:
4292 			nfs4_stateid_copy(&calldata->lsp->ls_stateid,
4293 					&calldata->res.stateid);
4294 			renew_lease(calldata->server, calldata->timestamp);
4295 			break;
4296 		case -NFS4ERR_BAD_STATEID:
4297 		case -NFS4ERR_OLD_STATEID:
4298 		case -NFS4ERR_STALE_STATEID:
4299 		case -NFS4ERR_EXPIRED:
4300 			break;
4301 		default:
4302 			if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
4303 				rpc_restart_call_prepare(task);
4304 	}
4305 }
4306 
4307 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
4308 {
4309 	struct nfs4_unlockdata *calldata = data;
4310 
4311 	if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
4312 		return;
4313 	if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) {
4314 		/* Note: exit _without_ running nfs4_locku_done */
4315 		task->tk_action = NULL;
4316 		return;
4317 	}
4318 	calldata->timestamp = jiffies;
4319 	if (nfs4_setup_sequence(calldata->server,
4320 				&calldata->arg.seq_args,
4321 				&calldata->res.seq_res, task))
4322 		return;
4323 	rpc_call_start(task);
4324 }
4325 
4326 static const struct rpc_call_ops nfs4_locku_ops = {
4327 	.rpc_call_prepare = nfs4_locku_prepare,
4328 	.rpc_call_done = nfs4_locku_done,
4329 	.rpc_release = nfs4_locku_release_calldata,
4330 };
4331 
4332 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
4333 		struct nfs_open_context *ctx,
4334 		struct nfs4_lock_state *lsp,
4335 		struct nfs_seqid *seqid)
4336 {
4337 	struct nfs4_unlockdata *data;
4338 	struct rpc_message msg = {
4339 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
4340 		.rpc_cred = ctx->cred,
4341 	};
4342 	struct rpc_task_setup task_setup_data = {
4343 		.rpc_client = NFS_CLIENT(lsp->ls_state->inode),
4344 		.rpc_message = &msg,
4345 		.callback_ops = &nfs4_locku_ops,
4346 		.workqueue = nfsiod_workqueue,
4347 		.flags = RPC_TASK_ASYNC,
4348 	};
4349 
4350 	/* Ensure this is an unlock - when canceling a lock, the
4351 	 * canceled lock is passed in, and it won't be an unlock.
4352 	 */
4353 	fl->fl_type = F_UNLCK;
4354 
4355 	data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
4356 	if (data == NULL) {
4357 		nfs_free_seqid(seqid);
4358 		return ERR_PTR(-ENOMEM);
4359 	}
4360 
4361 	nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
4362 	msg.rpc_argp = &data->arg;
4363 	msg.rpc_resp = &data->res;
4364 	task_setup_data.callback_data = data;
4365 	return rpc_run_task(&task_setup_data);
4366 }
4367 
4368 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
4369 {
4370 	struct nfs_inode *nfsi = NFS_I(state->inode);
4371 	struct nfs_seqid *seqid;
4372 	struct nfs4_lock_state *lsp;
4373 	struct rpc_task *task;
4374 	int status = 0;
4375 	unsigned char fl_flags = request->fl_flags;
4376 
4377 	status = nfs4_set_lock_state(state, request);
4378 	/* Unlock _before_ we do the RPC call */
4379 	request->fl_flags |= FL_EXISTS;
4380 	down_read(&nfsi->rwsem);
4381 	if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
4382 		up_read(&nfsi->rwsem);
4383 		goto out;
4384 	}
4385 	up_read(&nfsi->rwsem);
4386 	if (status != 0)
4387 		goto out;
4388 	/* Is this a delegated lock? */
4389 	if (test_bit(NFS_DELEGATED_STATE, &state->flags))
4390 		goto out;
4391 	lsp = request->fl_u.nfs4_fl.owner;
4392 	seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
4393 	status = -ENOMEM;
4394 	if (seqid == NULL)
4395 		goto out;
4396 	task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
4397 	status = PTR_ERR(task);
4398 	if (IS_ERR(task))
4399 		goto out;
4400 	status = nfs4_wait_for_completion_rpc_task(task);
4401 	rpc_put_task(task);
4402 out:
4403 	request->fl_flags = fl_flags;
4404 	return status;
4405 }
4406 
4407 struct nfs4_lockdata {
4408 	struct nfs_lock_args arg;
4409 	struct nfs_lock_res res;
4410 	struct nfs4_lock_state *lsp;
4411 	struct nfs_open_context *ctx;
4412 	struct file_lock fl;
4413 	unsigned long timestamp;
4414 	int rpc_status;
4415 	int cancelled;
4416 	struct nfs_server *server;
4417 };
4418 
4419 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
4420 		struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
4421 		gfp_t gfp_mask)
4422 {
4423 	struct nfs4_lockdata *p;
4424 	struct inode *inode = lsp->ls_state->inode;
4425 	struct nfs_server *server = NFS_SERVER(inode);
4426 
4427 	p = kzalloc(sizeof(*p), gfp_mask);
4428 	if (p == NULL)
4429 		return NULL;
4430 
4431 	p->arg.fh = NFS_FH(inode);
4432 	p->arg.fl = &p->fl;
4433 	p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
4434 	if (p->arg.open_seqid == NULL)
4435 		goto out_free;
4436 	p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask);
4437 	if (p->arg.lock_seqid == NULL)
4438 		goto out_free_seqid;
4439 	p->arg.lock_stateid = &lsp->ls_stateid;
4440 	p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
4441 	p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
4442 	p->arg.lock_owner.s_dev = server->s_dev;
4443 	p->res.lock_seqid = p->arg.lock_seqid;
4444 	p->lsp = lsp;
4445 	p->server = server;
4446 	atomic_inc(&lsp->ls_count);
4447 	p->ctx = get_nfs_open_context(ctx);
4448 	memcpy(&p->fl, fl, sizeof(p->fl));
4449 	return p;
4450 out_free_seqid:
4451 	nfs_free_seqid(p->arg.open_seqid);
4452 out_free:
4453 	kfree(p);
4454 	return NULL;
4455 }
4456 
4457 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
4458 {
4459 	struct nfs4_lockdata *data = calldata;
4460 	struct nfs4_state *state = data->lsp->ls_state;
4461 
4462 	dprintk("%s: begin!\n", __func__);
4463 	if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
4464 		return;
4465 	/* Do we need to do an open_to_lock_owner? */
4466 	if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
4467 		if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0)
4468 			return;
4469 		data->arg.open_stateid = &state->stateid;
4470 		data->arg.new_lock_owner = 1;
4471 		data->res.open_seqid = data->arg.open_seqid;
4472 	} else
4473 		data->arg.new_lock_owner = 0;
4474 	data->timestamp = jiffies;
4475 	if (nfs4_setup_sequence(data->server,
4476 				&data->arg.seq_args,
4477 				&data->res.seq_res, task))
4478 		return;
4479 	rpc_call_start(task);
4480 	dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
4481 }
4482 
4483 static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata)
4484 {
4485 	rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
4486 	nfs4_lock_prepare(task, calldata);
4487 }
4488 
4489 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
4490 {
4491 	struct nfs4_lockdata *data = calldata;
4492 
4493 	dprintk("%s: begin!\n", __func__);
4494 
4495 	if (!nfs4_sequence_done(task, &data->res.seq_res))
4496 		return;
4497 
4498 	data->rpc_status = task->tk_status;
4499 	if (data->arg.new_lock_owner != 0) {
4500 		if (data->rpc_status == 0)
4501 			nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
4502 		else
4503 			goto out;
4504 	}
4505 	if (data->rpc_status == 0) {
4506 		nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid);
4507 		data->lsp->ls_flags |= NFS_LOCK_INITIALIZED;
4508 		renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp);
4509 	}
4510 out:
4511 	dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
4512 }
4513 
4514 static void nfs4_lock_release(void *calldata)
4515 {
4516 	struct nfs4_lockdata *data = calldata;
4517 
4518 	dprintk("%s: begin!\n", __func__);
4519 	nfs_free_seqid(data->arg.open_seqid);
4520 	if (data->cancelled != 0) {
4521 		struct rpc_task *task;
4522 		task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
4523 				data->arg.lock_seqid);
4524 		if (!IS_ERR(task))
4525 			rpc_put_task_async(task);
4526 		dprintk("%s: cancelling lock!\n", __func__);
4527 	} else
4528 		nfs_free_seqid(data->arg.lock_seqid);
4529 	nfs4_put_lock_state(data->lsp);
4530 	put_nfs_open_context(data->ctx);
4531 	kfree(data);
4532 	dprintk("%s: done!\n", __func__);
4533 }
4534 
4535 static const struct rpc_call_ops nfs4_lock_ops = {
4536 	.rpc_call_prepare = nfs4_lock_prepare,
4537 	.rpc_call_done = nfs4_lock_done,
4538 	.rpc_release = nfs4_lock_release,
4539 };
4540 
4541 static const struct rpc_call_ops nfs4_recover_lock_ops = {
4542 	.rpc_call_prepare = nfs4_recover_lock_prepare,
4543 	.rpc_call_done = nfs4_lock_done,
4544 	.rpc_release = nfs4_lock_release,
4545 };
4546 
4547 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
4548 {
4549 	switch (error) {
4550 	case -NFS4ERR_ADMIN_REVOKED:
4551 	case -NFS4ERR_BAD_STATEID:
4552 		lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4553 		if (new_lock_owner != 0 ||
4554 		   (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
4555 			nfs4_schedule_stateid_recovery(server, lsp->ls_state);
4556 		break;
4557 	case -NFS4ERR_STALE_STATEID:
4558 		lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4559 	case -NFS4ERR_EXPIRED:
4560 		nfs4_schedule_lease_recovery(server->nfs_client);
4561 	};
4562 }
4563 
4564 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
4565 {
4566 	struct nfs4_lockdata *data;
4567 	struct rpc_task *task;
4568 	struct rpc_message msg = {
4569 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
4570 		.rpc_cred = state->owner->so_cred,
4571 	};
4572 	struct rpc_task_setup task_setup_data = {
4573 		.rpc_client = NFS_CLIENT(state->inode),
4574 		.rpc_message = &msg,
4575 		.callback_ops = &nfs4_lock_ops,
4576 		.workqueue = nfsiod_workqueue,
4577 		.flags = RPC_TASK_ASYNC,
4578 	};
4579 	int ret;
4580 
4581 	dprintk("%s: begin!\n", __func__);
4582 	data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
4583 			fl->fl_u.nfs4_fl.owner,
4584 			recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
4585 	if (data == NULL)
4586 		return -ENOMEM;
4587 	if (IS_SETLKW(cmd))
4588 		data->arg.block = 1;
4589 	if (recovery_type > NFS_LOCK_NEW) {
4590 		if (recovery_type == NFS_LOCK_RECLAIM)
4591 			data->arg.reclaim = NFS_LOCK_RECLAIM;
4592 		task_setup_data.callback_ops = &nfs4_recover_lock_ops;
4593 	}
4594 	nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
4595 	msg.rpc_argp = &data->arg;
4596 	msg.rpc_resp = &data->res;
4597 	task_setup_data.callback_data = data;
4598 	task = rpc_run_task(&task_setup_data);
4599 	if (IS_ERR(task))
4600 		return PTR_ERR(task);
4601 	ret = nfs4_wait_for_completion_rpc_task(task);
4602 	if (ret == 0) {
4603 		ret = data->rpc_status;
4604 		if (ret)
4605 			nfs4_handle_setlk_error(data->server, data->lsp,
4606 					data->arg.new_lock_owner, ret);
4607 	} else
4608 		data->cancelled = 1;
4609 	rpc_put_task(task);
4610 	dprintk("%s: done, ret = %d!\n", __func__, ret);
4611 	return ret;
4612 }
4613 
4614 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
4615 {
4616 	struct nfs_server *server = NFS_SERVER(state->inode);
4617 	struct nfs4_exception exception = {
4618 		.inode = state->inode,
4619 	};
4620 	int err;
4621 
4622 	do {
4623 		/* Cache the lock if possible... */
4624 		if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4625 			return 0;
4626 		err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
4627 		if (err != -NFS4ERR_DELAY)
4628 			break;
4629 		nfs4_handle_exception(server, err, &exception);
4630 	} while (exception.retry);
4631 	return err;
4632 }
4633 
4634 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
4635 {
4636 	struct nfs_server *server = NFS_SERVER(state->inode);
4637 	struct nfs4_exception exception = {
4638 		.inode = state->inode,
4639 	};
4640 	int err;
4641 
4642 	err = nfs4_set_lock_state(state, request);
4643 	if (err != 0)
4644 		return err;
4645 	do {
4646 		if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4647 			return 0;
4648 		err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
4649 		switch (err) {
4650 		default:
4651 			goto out;
4652 		case -NFS4ERR_GRACE:
4653 		case -NFS4ERR_DELAY:
4654 			nfs4_handle_exception(server, err, &exception);
4655 			err = 0;
4656 		}
4657 	} while (exception.retry);
4658 out:
4659 	return err;
4660 }
4661 
4662 #if defined(CONFIG_NFS_V4_1)
4663 static int nfs41_check_expired_locks(struct nfs4_state *state)
4664 {
4665 	int status, ret = NFS_OK;
4666 	struct nfs4_lock_state *lsp;
4667 	struct nfs_server *server = NFS_SERVER(state->inode);
4668 
4669 	list_for_each_entry(lsp, &state->lock_states, ls_locks) {
4670 		if (lsp->ls_flags & NFS_LOCK_INITIALIZED) {
4671 			status = nfs41_test_stateid(server, &lsp->ls_stateid);
4672 			if (status != NFS_OK) {
4673 				nfs41_free_stateid(server, &lsp->ls_stateid);
4674 				lsp->ls_flags &= ~NFS_LOCK_INITIALIZED;
4675 				ret = status;
4676 			}
4677 		}
4678 	};
4679 
4680 	return ret;
4681 }
4682 
4683 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
4684 {
4685 	int status = NFS_OK;
4686 
4687 	if (test_bit(LK_STATE_IN_USE, &state->flags))
4688 		status = nfs41_check_expired_locks(state);
4689 	if (status == NFS_OK)
4690 		return status;
4691 	return nfs4_lock_expired(state, request);
4692 }
4693 #endif
4694 
4695 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4696 {
4697 	struct nfs_inode *nfsi = NFS_I(state->inode);
4698 	unsigned char fl_flags = request->fl_flags;
4699 	int status = -ENOLCK;
4700 
4701 	if ((fl_flags & FL_POSIX) &&
4702 			!test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
4703 		goto out;
4704 	/* Is this a delegated open? */
4705 	status = nfs4_set_lock_state(state, request);
4706 	if (status != 0)
4707 		goto out;
4708 	request->fl_flags |= FL_ACCESS;
4709 	status = do_vfs_lock(request->fl_file, request);
4710 	if (status < 0)
4711 		goto out;
4712 	down_read(&nfsi->rwsem);
4713 	if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
4714 		/* Yes: cache locks! */
4715 		/* ...but avoid races with delegation recall... */
4716 		request->fl_flags = fl_flags & ~FL_SLEEP;
4717 		status = do_vfs_lock(request->fl_file, request);
4718 		goto out_unlock;
4719 	}
4720 	status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
4721 	if (status != 0)
4722 		goto out_unlock;
4723 	/* Note: we always want to sleep here! */
4724 	request->fl_flags = fl_flags | FL_SLEEP;
4725 	if (do_vfs_lock(request->fl_file, request) < 0)
4726 		printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock "
4727 			"manager!\n", __func__);
4728 out_unlock:
4729 	up_read(&nfsi->rwsem);
4730 out:
4731 	request->fl_flags = fl_flags;
4732 	return status;
4733 }
4734 
4735 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4736 {
4737 	struct nfs4_exception exception = {
4738 		.state = state,
4739 		.inode = state->inode,
4740 	};
4741 	int err;
4742 
4743 	do {
4744 		err = _nfs4_proc_setlk(state, cmd, request);
4745 		if (err == -NFS4ERR_DENIED)
4746 			err = -EAGAIN;
4747 		err = nfs4_handle_exception(NFS_SERVER(state->inode),
4748 				err, &exception);
4749 	} while (exception.retry);
4750 	return err;
4751 }
4752 
4753 static int
4754 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
4755 {
4756 	struct nfs_open_context *ctx;
4757 	struct nfs4_state *state;
4758 	unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
4759 	int status;
4760 
4761 	/* verify open state */
4762 	ctx = nfs_file_open_context(filp);
4763 	state = ctx->state;
4764 
4765 	if (request->fl_start < 0 || request->fl_end < 0)
4766 		return -EINVAL;
4767 
4768 	if (IS_GETLK(cmd)) {
4769 		if (state != NULL)
4770 			return nfs4_proc_getlk(state, F_GETLK, request);
4771 		return 0;
4772 	}
4773 
4774 	if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
4775 		return -EINVAL;
4776 
4777 	if (request->fl_type == F_UNLCK) {
4778 		if (state != NULL)
4779 			return nfs4_proc_unlck(state, cmd, request);
4780 		return 0;
4781 	}
4782 
4783 	if (state == NULL)
4784 		return -ENOLCK;
4785 	/*
4786 	 * Don't rely on the VFS having checked the file open mode,
4787 	 * since it won't do this for flock() locks.
4788 	 */
4789 	switch (request->fl_type & (F_RDLCK|F_WRLCK|F_UNLCK)) {
4790 	case F_RDLCK:
4791 		if (!(filp->f_mode & FMODE_READ))
4792 			return -EBADF;
4793 		break;
4794 	case F_WRLCK:
4795 		if (!(filp->f_mode & FMODE_WRITE))
4796 			return -EBADF;
4797 	}
4798 
4799 	do {
4800 		status = nfs4_proc_setlk(state, cmd, request);
4801 		if ((status != -EAGAIN) || IS_SETLK(cmd))
4802 			break;
4803 		timeout = nfs4_set_lock_task_retry(timeout);
4804 		status = -ERESTARTSYS;
4805 		if (signalled())
4806 			break;
4807 	} while(status < 0);
4808 	return status;
4809 }
4810 
4811 int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
4812 {
4813 	struct nfs_server *server = NFS_SERVER(state->inode);
4814 	struct nfs4_exception exception = { };
4815 	int err;
4816 
4817 	err = nfs4_set_lock_state(state, fl);
4818 	if (err != 0)
4819 		goto out;
4820 	do {
4821 		err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
4822 		switch (err) {
4823 			default:
4824 				printk(KERN_ERR "NFS: %s: unhandled error "
4825 					"%d.\n", __func__, err);
4826 			case 0:
4827 			case -ESTALE:
4828 				goto out;
4829 			case -NFS4ERR_EXPIRED:
4830 				nfs4_schedule_stateid_recovery(server, state);
4831 			case -NFS4ERR_STALE_CLIENTID:
4832 			case -NFS4ERR_STALE_STATEID:
4833 				nfs4_schedule_lease_recovery(server->nfs_client);
4834 				goto out;
4835 			case -NFS4ERR_BADSESSION:
4836 			case -NFS4ERR_BADSLOT:
4837 			case -NFS4ERR_BAD_HIGH_SLOT:
4838 			case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
4839 			case -NFS4ERR_DEADSESSION:
4840 				nfs4_schedule_session_recovery(server->nfs_client->cl_session);
4841 				goto out;
4842 			case -ERESTARTSYS:
4843 				/*
4844 				 * The show must go on: exit, but mark the
4845 				 * stateid as needing recovery.
4846 				 */
4847 			case -NFS4ERR_DELEG_REVOKED:
4848 			case -NFS4ERR_ADMIN_REVOKED:
4849 			case -NFS4ERR_BAD_STATEID:
4850 			case -NFS4ERR_OPENMODE:
4851 				nfs4_schedule_stateid_recovery(server, state);
4852 				err = 0;
4853 				goto out;
4854 			case -EKEYEXPIRED:
4855 				/*
4856 				 * User RPCSEC_GSS context has expired.
4857 				 * We cannot recover this stateid now, so
4858 				 * skip it and allow recovery thread to
4859 				 * proceed.
4860 				 */
4861 				err = 0;
4862 				goto out;
4863 			case -ENOMEM:
4864 			case -NFS4ERR_DENIED:
4865 				/* kill_proc(fl->fl_pid, SIGLOST, 1); */
4866 				err = 0;
4867 				goto out;
4868 			case -NFS4ERR_DELAY:
4869 				break;
4870 		}
4871 		err = nfs4_handle_exception(server, err, &exception);
4872 	} while (exception.retry);
4873 out:
4874 	return err;
4875 }
4876 
4877 struct nfs_release_lockowner_data {
4878 	struct nfs4_lock_state *lsp;
4879 	struct nfs_server *server;
4880 	struct nfs_release_lockowner_args args;
4881 };
4882 
4883 static void nfs4_release_lockowner_release(void *calldata)
4884 {
4885 	struct nfs_release_lockowner_data *data = calldata;
4886 	nfs4_free_lock_state(data->server, data->lsp);
4887 	kfree(calldata);
4888 }
4889 
4890 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
4891 	.rpc_release = nfs4_release_lockowner_release,
4892 };
4893 
4894 int nfs4_release_lockowner(struct nfs4_lock_state *lsp)
4895 {
4896 	struct nfs_server *server = lsp->ls_state->owner->so_server;
4897 	struct nfs_release_lockowner_data *data;
4898 	struct rpc_message msg = {
4899 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
4900 	};
4901 
4902 	if (server->nfs_client->cl_mvops->minor_version != 0)
4903 		return -EINVAL;
4904 	data = kmalloc(sizeof(*data), GFP_NOFS);
4905 	if (!data)
4906 		return -ENOMEM;
4907 	data->lsp = lsp;
4908 	data->server = server;
4909 	data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
4910 	data->args.lock_owner.id = lsp->ls_seqid.owner_id;
4911 	data->args.lock_owner.s_dev = server->s_dev;
4912 	msg.rpc_argp = &data->args;
4913 	rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
4914 	return 0;
4915 }
4916 
4917 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
4918 
4919 static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
4920 				   const void *buf, size_t buflen,
4921 				   int flags, int type)
4922 {
4923 	if (strcmp(key, "") != 0)
4924 		return -EINVAL;
4925 
4926 	return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
4927 }
4928 
4929 static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
4930 				   void *buf, size_t buflen, int type)
4931 {
4932 	if (strcmp(key, "") != 0)
4933 		return -EINVAL;
4934 
4935 	return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
4936 }
4937 
4938 static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
4939 				       size_t list_len, const char *name,
4940 				       size_t name_len, int type)
4941 {
4942 	size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
4943 
4944 	if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
4945 		return 0;
4946 
4947 	if (list && len <= list_len)
4948 		memcpy(list, XATTR_NAME_NFSV4_ACL, len);
4949 	return len;
4950 }
4951 
4952 /*
4953  * nfs_fhget will use either the mounted_on_fileid or the fileid
4954  */
4955 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
4956 {
4957 	if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
4958 	       (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
4959 	      (fattr->valid & NFS_ATTR_FATTR_FSID) &&
4960 	      (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
4961 		return;
4962 
4963 	fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
4964 		NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
4965 	fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
4966 	fattr->nlink = 2;
4967 }
4968 
4969 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
4970 				   const struct qstr *name,
4971 				   struct nfs4_fs_locations *fs_locations,
4972 				   struct page *page)
4973 {
4974 	struct nfs_server *server = NFS_SERVER(dir);
4975 	u32 bitmask[2] = {
4976 		[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
4977 	};
4978 	struct nfs4_fs_locations_arg args = {
4979 		.dir_fh = NFS_FH(dir),
4980 		.name = name,
4981 		.page = page,
4982 		.bitmask = bitmask,
4983 	};
4984 	struct nfs4_fs_locations_res res = {
4985 		.fs_locations = fs_locations,
4986 	};
4987 	struct rpc_message msg = {
4988 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
4989 		.rpc_argp = &args,
4990 		.rpc_resp = &res,
4991 	};
4992 	int status;
4993 
4994 	dprintk("%s: start\n", __func__);
4995 
4996 	/* Ask for the fileid of the absent filesystem if mounted_on_fileid
4997 	 * is not supported */
4998 	if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
4999 		bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
5000 	else
5001 		bitmask[0] |= FATTR4_WORD0_FILEID;
5002 
5003 	nfs_fattr_init(&fs_locations->fattr);
5004 	fs_locations->server = server;
5005 	fs_locations->nlocations = 0;
5006 	status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
5007 	dprintk("%s: returned status = %d\n", __func__, status);
5008 	return status;
5009 }
5010 
5011 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
5012 			   const struct qstr *name,
5013 			   struct nfs4_fs_locations *fs_locations,
5014 			   struct page *page)
5015 {
5016 	struct nfs4_exception exception = { };
5017 	int err;
5018 	do {
5019 		err = nfs4_handle_exception(NFS_SERVER(dir),
5020 				_nfs4_proc_fs_locations(client, dir, name, fs_locations, page),
5021 				&exception);
5022 	} while (exception.retry);
5023 	return err;
5024 }
5025 
5026 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
5027 {
5028 	int status;
5029 	struct nfs4_secinfo_arg args = {
5030 		.dir_fh = NFS_FH(dir),
5031 		.name   = name,
5032 	};
5033 	struct nfs4_secinfo_res res = {
5034 		.flavors     = flavors,
5035 	};
5036 	struct rpc_message msg = {
5037 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
5038 		.rpc_argp = &args,
5039 		.rpc_resp = &res,
5040 	};
5041 
5042 	dprintk("NFS call  secinfo %s\n", name->name);
5043 	status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
5044 	dprintk("NFS reply  secinfo: %d\n", status);
5045 	return status;
5046 }
5047 
5048 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
5049 		      struct nfs4_secinfo_flavors *flavors)
5050 {
5051 	struct nfs4_exception exception = { };
5052 	int err;
5053 	do {
5054 		err = nfs4_handle_exception(NFS_SERVER(dir),
5055 				_nfs4_proc_secinfo(dir, name, flavors),
5056 				&exception);
5057 	} while (exception.retry);
5058 	return err;
5059 }
5060 
5061 #ifdef CONFIG_NFS_V4_1
5062 /*
5063  * Check the exchange flags returned by the server for invalid flags, having
5064  * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
5065  * DS flags set.
5066  */
5067 static int nfs4_check_cl_exchange_flags(u32 flags)
5068 {
5069 	if (flags & ~EXCHGID4_FLAG_MASK_R)
5070 		goto out_inval;
5071 	if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
5072 	    (flags & EXCHGID4_FLAG_USE_NON_PNFS))
5073 		goto out_inval;
5074 	if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
5075 		goto out_inval;
5076 	return NFS_OK;
5077 out_inval:
5078 	return -NFS4ERR_INVAL;
5079 }
5080 
5081 static bool
5082 nfs41_same_server_scope(struct server_scope *a, struct server_scope *b)
5083 {
5084 	if (a->server_scope_sz == b->server_scope_sz &&
5085 	    memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
5086 		return true;
5087 
5088 	return false;
5089 }
5090 
5091 /*
5092  * nfs4_proc_exchange_id()
5093  *
5094  * Since the clientid has expired, all compounds using sessions
5095  * associated with the stale clientid will be returning
5096  * NFS4ERR_BADSESSION in the sequence operation, and will therefore
5097  * be in some phase of session reset.
5098  */
5099 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
5100 {
5101 	nfs4_verifier verifier;
5102 	struct nfs41_exchange_id_args args = {
5103 		.verifier = &verifier,
5104 		.client = clp,
5105 		.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
5106 	};
5107 	struct nfs41_exchange_id_res res = {
5108 		.client = clp,
5109 	};
5110 	int status;
5111 	struct rpc_message msg = {
5112 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
5113 		.rpc_argp = &args,
5114 		.rpc_resp = &res,
5115 		.rpc_cred = cred,
5116 	};
5117 
5118 	dprintk("--> %s\n", __func__);
5119 	BUG_ON(clp == NULL);
5120 
5121 	nfs4_construct_boot_verifier(clp, &verifier);
5122 
5123 	args.id_len = scnprintf(args.id, sizeof(args.id),
5124 				"%s/%s/%u",
5125 				clp->cl_ipaddr,
5126 				clp->cl_rpcclient->cl_nodename,
5127 				clp->cl_rpcclient->cl_auth->au_flavor);
5128 
5129 	res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL);
5130 	if (unlikely(!res.server_scope)) {
5131 		status = -ENOMEM;
5132 		goto out;
5133 	}
5134 
5135 	res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_KERNEL);
5136 	if (unlikely(!res.impl_id)) {
5137 		status = -ENOMEM;
5138 		goto out_server_scope;
5139 	}
5140 
5141 	status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5142 	if (!status)
5143 		status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
5144 
5145 	if (!status) {
5146 		/* use the most recent implementation id */
5147 		kfree(clp->impl_id);
5148 		clp->impl_id = res.impl_id;
5149 	} else
5150 		kfree(res.impl_id);
5151 
5152 	if (!status) {
5153 		if (clp->server_scope &&
5154 		    !nfs41_same_server_scope(clp->server_scope,
5155 					     res.server_scope)) {
5156 			dprintk("%s: server_scope mismatch detected\n",
5157 				__func__);
5158 			set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
5159 			kfree(clp->server_scope);
5160 			clp->server_scope = NULL;
5161 		}
5162 
5163 		if (!clp->server_scope) {
5164 			clp->server_scope = res.server_scope;
5165 			goto out;
5166 		}
5167 	}
5168 
5169 out_server_scope:
5170 	kfree(res.server_scope);
5171 out:
5172 	if (clp->impl_id)
5173 		dprintk("%s: Server Implementation ID: "
5174 			"domain: %s, name: %s, date: %llu,%u\n",
5175 			__func__, clp->impl_id->domain, clp->impl_id->name,
5176 			clp->impl_id->date.seconds,
5177 			clp->impl_id->date.nseconds);
5178 	dprintk("<-- %s status= %d\n", __func__, status);
5179 	return status;
5180 }
5181 
5182 struct nfs4_get_lease_time_data {
5183 	struct nfs4_get_lease_time_args *args;
5184 	struct nfs4_get_lease_time_res *res;
5185 	struct nfs_client *clp;
5186 };
5187 
5188 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
5189 					void *calldata)
5190 {
5191 	int ret;
5192 	struct nfs4_get_lease_time_data *data =
5193 			(struct nfs4_get_lease_time_data *)calldata;
5194 
5195 	dprintk("--> %s\n", __func__);
5196 	rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
5197 	/* just setup sequence, do not trigger session recovery
5198 	   since we're invoked within one */
5199 	ret = nfs41_setup_sequence(data->clp->cl_session,
5200 				   &data->args->la_seq_args,
5201 				   &data->res->lr_seq_res, task);
5202 
5203 	BUG_ON(ret == -EAGAIN);
5204 	rpc_call_start(task);
5205 	dprintk("<-- %s\n", __func__);
5206 }
5207 
5208 /*
5209  * Called from nfs4_state_manager thread for session setup, so don't recover
5210  * from sequence operation or clientid errors.
5211  */
5212 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
5213 {
5214 	struct nfs4_get_lease_time_data *data =
5215 			(struct nfs4_get_lease_time_data *)calldata;
5216 
5217 	dprintk("--> %s\n", __func__);
5218 	if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
5219 		return;
5220 	switch (task->tk_status) {
5221 	case -NFS4ERR_DELAY:
5222 	case -NFS4ERR_GRACE:
5223 		dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
5224 		rpc_delay(task, NFS4_POLL_RETRY_MIN);
5225 		task->tk_status = 0;
5226 		/* fall through */
5227 	case -NFS4ERR_RETRY_UNCACHED_REP:
5228 		rpc_restart_call_prepare(task);
5229 		return;
5230 	}
5231 	dprintk("<-- %s\n", __func__);
5232 }
5233 
5234 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
5235 	.rpc_call_prepare = nfs4_get_lease_time_prepare,
5236 	.rpc_call_done = nfs4_get_lease_time_done,
5237 };
5238 
5239 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
5240 {
5241 	struct rpc_task *task;
5242 	struct nfs4_get_lease_time_args args;
5243 	struct nfs4_get_lease_time_res res = {
5244 		.lr_fsinfo = fsinfo,
5245 	};
5246 	struct nfs4_get_lease_time_data data = {
5247 		.args = &args,
5248 		.res = &res,
5249 		.clp = clp,
5250 	};
5251 	struct rpc_message msg = {
5252 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
5253 		.rpc_argp = &args,
5254 		.rpc_resp = &res,
5255 	};
5256 	struct rpc_task_setup task_setup = {
5257 		.rpc_client = clp->cl_rpcclient,
5258 		.rpc_message = &msg,
5259 		.callback_ops = &nfs4_get_lease_time_ops,
5260 		.callback_data = &data,
5261 		.flags = RPC_TASK_TIMEOUT,
5262 	};
5263 	int status;
5264 
5265 	nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
5266 	dprintk("--> %s\n", __func__);
5267 	task = rpc_run_task(&task_setup);
5268 
5269 	if (IS_ERR(task))
5270 		status = PTR_ERR(task);
5271 	else {
5272 		status = task->tk_status;
5273 		rpc_put_task(task);
5274 	}
5275 	dprintk("<-- %s return %d\n", __func__, status);
5276 
5277 	return status;
5278 }
5279 
5280 static struct nfs4_slot *nfs4_alloc_slots(u32 max_slots, gfp_t gfp_flags)
5281 {
5282 	return kcalloc(max_slots, sizeof(struct nfs4_slot), gfp_flags);
5283 }
5284 
5285 static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl,
5286 		struct nfs4_slot *new,
5287 		u32 max_slots,
5288 		u32 ivalue)
5289 {
5290 	struct nfs4_slot *old = NULL;
5291 	u32 i;
5292 
5293 	spin_lock(&tbl->slot_tbl_lock);
5294 	if (new) {
5295 		old = tbl->slots;
5296 		tbl->slots = new;
5297 		tbl->max_slots = max_slots;
5298 	}
5299 	tbl->highest_used_slotid = -1;	/* no slot is currently used */
5300 	for (i = 0; i < tbl->max_slots; i++)
5301 		tbl->slots[i].seq_nr = ivalue;
5302 	spin_unlock(&tbl->slot_tbl_lock);
5303 	kfree(old);
5304 }
5305 
5306 /*
5307  * (re)Initialise a slot table
5308  */
5309 static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs,
5310 				 u32 ivalue)
5311 {
5312 	struct nfs4_slot *new = NULL;
5313 	int ret = -ENOMEM;
5314 
5315 	dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__,
5316 		max_reqs, tbl->max_slots);
5317 
5318 	/* Does the newly negotiated max_reqs match the existing slot table? */
5319 	if (max_reqs != tbl->max_slots) {
5320 		new = nfs4_alloc_slots(max_reqs, GFP_NOFS);
5321 		if (!new)
5322 			goto out;
5323 	}
5324 	ret = 0;
5325 
5326 	nfs4_add_and_init_slots(tbl, new, max_reqs, ivalue);
5327 	dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
5328 		tbl, tbl->slots, tbl->max_slots);
5329 out:
5330 	dprintk("<-- %s: return %d\n", __func__, ret);
5331 	return ret;
5332 }
5333 
5334 /* Destroy the slot table */
5335 static void nfs4_destroy_slot_tables(struct nfs4_session *session)
5336 {
5337 	if (session->fc_slot_table.slots != NULL) {
5338 		kfree(session->fc_slot_table.slots);
5339 		session->fc_slot_table.slots = NULL;
5340 	}
5341 	if (session->bc_slot_table.slots != NULL) {
5342 		kfree(session->bc_slot_table.slots);
5343 		session->bc_slot_table.slots = NULL;
5344 	}
5345 	return;
5346 }
5347 
5348 /*
5349  * Initialize or reset the forechannel and backchannel tables
5350  */
5351 static int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
5352 {
5353 	struct nfs4_slot_table *tbl;
5354 	int status;
5355 
5356 	dprintk("--> %s\n", __func__);
5357 	/* Fore channel */
5358 	tbl = &ses->fc_slot_table;
5359 	status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
5360 	if (status) /* -ENOMEM */
5361 		return status;
5362 	/* Back channel */
5363 	tbl = &ses->bc_slot_table;
5364 	status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
5365 	if (status && tbl->slots == NULL)
5366 		/* Fore and back channel share a connection so get
5367 		 * both slot tables or neither */
5368 		nfs4_destroy_slot_tables(ses);
5369 	return status;
5370 }
5371 
5372 struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
5373 {
5374 	struct nfs4_session *session;
5375 	struct nfs4_slot_table *tbl;
5376 
5377 	session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
5378 	if (!session)
5379 		return NULL;
5380 
5381 	tbl = &session->fc_slot_table;
5382 	tbl->highest_used_slotid = NFS4_NO_SLOT;
5383 	spin_lock_init(&tbl->slot_tbl_lock);
5384 	rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
5385 	init_completion(&tbl->complete);
5386 
5387 	tbl = &session->bc_slot_table;
5388 	tbl->highest_used_slotid = NFS4_NO_SLOT;
5389 	spin_lock_init(&tbl->slot_tbl_lock);
5390 	rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
5391 	init_completion(&tbl->complete);
5392 
5393 	session->session_state = 1<<NFS4_SESSION_INITING;
5394 
5395 	session->clp = clp;
5396 	return session;
5397 }
5398 
5399 void nfs4_destroy_session(struct nfs4_session *session)
5400 {
5401 	struct rpc_xprt *xprt;
5402 
5403 	nfs4_proc_destroy_session(session);
5404 
5405 	rcu_read_lock();
5406 	xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
5407 	rcu_read_unlock();
5408 	dprintk("%s Destroy backchannel for xprt %p\n",
5409 		__func__, xprt);
5410 	xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS);
5411 	nfs4_destroy_slot_tables(session);
5412 	kfree(session);
5413 }
5414 
5415 /*
5416  * Initialize the values to be used by the client in CREATE_SESSION
5417  * If nfs4_init_session set the fore channel request and response sizes,
5418  * use them.
5419  *
5420  * Set the back channel max_resp_sz_cached to zero to force the client to
5421  * always set csa_cachethis to FALSE because the current implementation
5422  * of the back channel DRC only supports caching the CB_SEQUENCE operation.
5423  */
5424 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
5425 {
5426 	struct nfs4_session *session = args->client->cl_session;
5427 	unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz,
5428 		     mxresp_sz = session->fc_attrs.max_resp_sz;
5429 
5430 	if (mxrqst_sz == 0)
5431 		mxrqst_sz = NFS_MAX_FILE_IO_SIZE;
5432 	if (mxresp_sz == 0)
5433 		mxresp_sz = NFS_MAX_FILE_IO_SIZE;
5434 	/* Fore channel attributes */
5435 	args->fc_attrs.max_rqst_sz = mxrqst_sz;
5436 	args->fc_attrs.max_resp_sz = mxresp_sz;
5437 	args->fc_attrs.max_ops = NFS4_MAX_OPS;
5438 	args->fc_attrs.max_reqs = max_session_slots;
5439 
5440 	dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
5441 		"max_ops=%u max_reqs=%u\n",
5442 		__func__,
5443 		args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
5444 		args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
5445 
5446 	/* Back channel attributes */
5447 	args->bc_attrs.max_rqst_sz = PAGE_SIZE;
5448 	args->bc_attrs.max_resp_sz = PAGE_SIZE;
5449 	args->bc_attrs.max_resp_sz_cached = 0;
5450 	args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
5451 	args->bc_attrs.max_reqs = 1;
5452 
5453 	dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
5454 		"max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
5455 		__func__,
5456 		args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
5457 		args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
5458 		args->bc_attrs.max_reqs);
5459 }
5460 
5461 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5462 {
5463 	struct nfs4_channel_attrs *sent = &args->fc_attrs;
5464 	struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
5465 
5466 	if (rcvd->max_resp_sz > sent->max_resp_sz)
5467 		return -EINVAL;
5468 	/*
5469 	 * Our requested max_ops is the minimum we need; we're not
5470 	 * prepared to break up compounds into smaller pieces than that.
5471 	 * So, no point even trying to continue if the server won't
5472 	 * cooperate:
5473 	 */
5474 	if (rcvd->max_ops < sent->max_ops)
5475 		return -EINVAL;
5476 	if (rcvd->max_reqs == 0)
5477 		return -EINVAL;
5478 	if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
5479 		rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
5480 	return 0;
5481 }
5482 
5483 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5484 {
5485 	struct nfs4_channel_attrs *sent = &args->bc_attrs;
5486 	struct nfs4_channel_attrs *rcvd = &session->bc_attrs;
5487 
5488 	if (rcvd->max_rqst_sz > sent->max_rqst_sz)
5489 		return -EINVAL;
5490 	if (rcvd->max_resp_sz < sent->max_resp_sz)
5491 		return -EINVAL;
5492 	if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
5493 		return -EINVAL;
5494 	/* These would render the backchannel useless: */
5495 	if (rcvd->max_ops != sent->max_ops)
5496 		return -EINVAL;
5497 	if (rcvd->max_reqs != sent->max_reqs)
5498 		return -EINVAL;
5499 	return 0;
5500 }
5501 
5502 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
5503 				     struct nfs4_session *session)
5504 {
5505 	int ret;
5506 
5507 	ret = nfs4_verify_fore_channel_attrs(args, session);
5508 	if (ret)
5509 		return ret;
5510 	return nfs4_verify_back_channel_attrs(args, session);
5511 }
5512 
5513 static int _nfs4_proc_create_session(struct nfs_client *clp)
5514 {
5515 	struct nfs4_session *session = clp->cl_session;
5516 	struct nfs41_create_session_args args = {
5517 		.client = clp,
5518 		.cb_program = NFS4_CALLBACK,
5519 	};
5520 	struct nfs41_create_session_res res = {
5521 		.client = clp,
5522 	};
5523 	struct rpc_message msg = {
5524 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
5525 		.rpc_argp = &args,
5526 		.rpc_resp = &res,
5527 	};
5528 	int status;
5529 
5530 	nfs4_init_channel_attrs(&args);
5531 	args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
5532 
5533 	status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5534 
5535 	if (!status)
5536 		/* Verify the session's negotiated channel_attrs values */
5537 		status = nfs4_verify_channel_attrs(&args, session);
5538 	if (!status) {
5539 		/* Increment the clientid slot sequence id */
5540 		clp->cl_seqid++;
5541 	}
5542 
5543 	return status;
5544 }
5545 
5546 /*
5547  * Issues a CREATE_SESSION operation to the server.
5548  * It is the responsibility of the caller to verify the session is
5549  * expired before calling this routine.
5550  */
5551 int nfs4_proc_create_session(struct nfs_client *clp)
5552 {
5553 	int status;
5554 	unsigned *ptr;
5555 	struct nfs4_session *session = clp->cl_session;
5556 
5557 	dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
5558 
5559 	status = _nfs4_proc_create_session(clp);
5560 	if (status)
5561 		goto out;
5562 
5563 	/* Init or reset the session slot tables */
5564 	status = nfs4_setup_session_slot_tables(session);
5565 	dprintk("slot table setup returned %d\n", status);
5566 	if (status)
5567 		goto out;
5568 
5569 	ptr = (unsigned *)&session->sess_id.data[0];
5570 	dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
5571 		clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
5572 out:
5573 	dprintk("<-- %s\n", __func__);
5574 	return status;
5575 }
5576 
5577 /*
5578  * Issue the over-the-wire RPC DESTROY_SESSION.
5579  * The caller must serialize access to this routine.
5580  */
5581 int nfs4_proc_destroy_session(struct nfs4_session *session)
5582 {
5583 	int status = 0;
5584 	struct rpc_message msg;
5585 
5586 	dprintk("--> nfs4_proc_destroy_session\n");
5587 
5588 	/* session is still being setup */
5589 	if (session->clp->cl_cons_state != NFS_CS_READY)
5590 		return status;
5591 
5592 	msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION];
5593 	msg.rpc_argp = session;
5594 	msg.rpc_resp = NULL;
5595 	msg.rpc_cred = NULL;
5596 	status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5597 
5598 	if (status)
5599 		printk(KERN_WARNING
5600 			"NFS: Got error %d from the server on DESTROY_SESSION. "
5601 			"Session has been destroyed regardless...\n", status);
5602 
5603 	dprintk("<-- nfs4_proc_destroy_session\n");
5604 	return status;
5605 }
5606 
5607 int nfs4_init_session(struct nfs_server *server)
5608 {
5609 	struct nfs_client *clp = server->nfs_client;
5610 	struct nfs4_session *session;
5611 	unsigned int rsize, wsize;
5612 	int ret;
5613 
5614 	if (!nfs4_has_session(clp))
5615 		return 0;
5616 
5617 	session = clp->cl_session;
5618 	if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
5619 		return 0;
5620 
5621 	rsize = server->rsize;
5622 	if (rsize == 0)
5623 		rsize = NFS_MAX_FILE_IO_SIZE;
5624 	wsize = server->wsize;
5625 	if (wsize == 0)
5626 		wsize = NFS_MAX_FILE_IO_SIZE;
5627 
5628 	session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
5629 	session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
5630 
5631 	ret = nfs4_recover_expired_lease(server);
5632 	if (!ret)
5633 		ret = nfs4_check_client_ready(clp);
5634 	return ret;
5635 }
5636 
5637 int nfs4_init_ds_session(struct nfs_client *clp)
5638 {
5639 	struct nfs4_session *session = clp->cl_session;
5640 	int ret;
5641 
5642 	if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
5643 		return 0;
5644 
5645 	ret = nfs4_client_recover_expired_lease(clp);
5646 	if (!ret)
5647 		/* Test for the DS role */
5648 		if (!is_ds_client(clp))
5649 			ret = -ENODEV;
5650 	if (!ret)
5651 		ret = nfs4_check_client_ready(clp);
5652 	return ret;
5653 
5654 }
5655 EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
5656 
5657 
5658 /*
5659  * Renew the cl_session lease.
5660  */
5661 struct nfs4_sequence_data {
5662 	struct nfs_client *clp;
5663 	struct nfs4_sequence_args args;
5664 	struct nfs4_sequence_res res;
5665 };
5666 
5667 static void nfs41_sequence_release(void *data)
5668 {
5669 	struct nfs4_sequence_data *calldata = data;
5670 	struct nfs_client *clp = calldata->clp;
5671 
5672 	if (atomic_read(&clp->cl_count) > 1)
5673 		nfs4_schedule_state_renewal(clp);
5674 	nfs_put_client(clp);
5675 	kfree(calldata);
5676 }
5677 
5678 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
5679 {
5680 	switch(task->tk_status) {
5681 	case -NFS4ERR_DELAY:
5682 		rpc_delay(task, NFS4_POLL_RETRY_MAX);
5683 		return -EAGAIN;
5684 	default:
5685 		nfs4_schedule_lease_recovery(clp);
5686 	}
5687 	return 0;
5688 }
5689 
5690 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
5691 {
5692 	struct nfs4_sequence_data *calldata = data;
5693 	struct nfs_client *clp = calldata->clp;
5694 
5695 	if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
5696 		return;
5697 
5698 	if (task->tk_status < 0) {
5699 		dprintk("%s ERROR %d\n", __func__, task->tk_status);
5700 		if (atomic_read(&clp->cl_count) == 1)
5701 			goto out;
5702 
5703 		if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
5704 			rpc_restart_call_prepare(task);
5705 			return;
5706 		}
5707 	}
5708 	dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
5709 out:
5710 	dprintk("<-- %s\n", __func__);
5711 }
5712 
5713 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
5714 {
5715 	struct nfs4_sequence_data *calldata = data;
5716 	struct nfs_client *clp = calldata->clp;
5717 	struct nfs4_sequence_args *args;
5718 	struct nfs4_sequence_res *res;
5719 
5720 	args = task->tk_msg.rpc_argp;
5721 	res = task->tk_msg.rpc_resp;
5722 
5723 	if (nfs41_setup_sequence(clp->cl_session, args, res, task))
5724 		return;
5725 	rpc_call_start(task);
5726 }
5727 
5728 static const struct rpc_call_ops nfs41_sequence_ops = {
5729 	.rpc_call_done = nfs41_sequence_call_done,
5730 	.rpc_call_prepare = nfs41_sequence_prepare,
5731 	.rpc_release = nfs41_sequence_release,
5732 };
5733 
5734 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5735 {
5736 	struct nfs4_sequence_data *calldata;
5737 	struct rpc_message msg = {
5738 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
5739 		.rpc_cred = cred,
5740 	};
5741 	struct rpc_task_setup task_setup_data = {
5742 		.rpc_client = clp->cl_rpcclient,
5743 		.rpc_message = &msg,
5744 		.callback_ops = &nfs41_sequence_ops,
5745 		.flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
5746 	};
5747 
5748 	if (!atomic_inc_not_zero(&clp->cl_count))
5749 		return ERR_PTR(-EIO);
5750 	calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
5751 	if (calldata == NULL) {
5752 		nfs_put_client(clp);
5753 		return ERR_PTR(-ENOMEM);
5754 	}
5755 	nfs41_init_sequence(&calldata->args, &calldata->res, 0);
5756 	msg.rpc_argp = &calldata->args;
5757 	msg.rpc_resp = &calldata->res;
5758 	calldata->clp = clp;
5759 	task_setup_data.callback_data = calldata;
5760 
5761 	return rpc_run_task(&task_setup_data);
5762 }
5763 
5764 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
5765 {
5766 	struct rpc_task *task;
5767 	int ret = 0;
5768 
5769 	if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
5770 		return 0;
5771 	task = _nfs41_proc_sequence(clp, cred);
5772 	if (IS_ERR(task))
5773 		ret = PTR_ERR(task);
5774 	else
5775 		rpc_put_task_async(task);
5776 	dprintk("<-- %s status=%d\n", __func__, ret);
5777 	return ret;
5778 }
5779 
5780 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5781 {
5782 	struct rpc_task *task;
5783 	int ret;
5784 
5785 	task = _nfs41_proc_sequence(clp, cred);
5786 	if (IS_ERR(task)) {
5787 		ret = PTR_ERR(task);
5788 		goto out;
5789 	}
5790 	ret = rpc_wait_for_completion_task(task);
5791 	if (!ret) {
5792 		struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
5793 
5794 		if (task->tk_status == 0)
5795 			nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
5796 		ret = task->tk_status;
5797 	}
5798 	rpc_put_task(task);
5799 out:
5800 	dprintk("<-- %s status=%d\n", __func__, ret);
5801 	return ret;
5802 }
5803 
5804 struct nfs4_reclaim_complete_data {
5805 	struct nfs_client *clp;
5806 	struct nfs41_reclaim_complete_args arg;
5807 	struct nfs41_reclaim_complete_res res;
5808 };
5809 
5810 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
5811 {
5812 	struct nfs4_reclaim_complete_data *calldata = data;
5813 
5814 	rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
5815 	if (nfs41_setup_sequence(calldata->clp->cl_session,
5816 				&calldata->arg.seq_args,
5817 				&calldata->res.seq_res, task))
5818 		return;
5819 
5820 	rpc_call_start(task);
5821 }
5822 
5823 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
5824 {
5825 	switch(task->tk_status) {
5826 	case 0:
5827 	case -NFS4ERR_COMPLETE_ALREADY:
5828 	case -NFS4ERR_WRONG_CRED: /* What to do here? */
5829 		break;
5830 	case -NFS4ERR_DELAY:
5831 		rpc_delay(task, NFS4_POLL_RETRY_MAX);
5832 		/* fall through */
5833 	case -NFS4ERR_RETRY_UNCACHED_REP:
5834 		return -EAGAIN;
5835 	default:
5836 		nfs4_schedule_lease_recovery(clp);
5837 	}
5838 	return 0;
5839 }
5840 
5841 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
5842 {
5843 	struct nfs4_reclaim_complete_data *calldata = data;
5844 	struct nfs_client *clp = calldata->clp;
5845 	struct nfs4_sequence_res *res = &calldata->res.seq_res;
5846 
5847 	dprintk("--> %s\n", __func__);
5848 	if (!nfs41_sequence_done(task, res))
5849 		return;
5850 
5851 	if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
5852 		rpc_restart_call_prepare(task);
5853 		return;
5854 	}
5855 	dprintk("<-- %s\n", __func__);
5856 }
5857 
5858 static void nfs4_free_reclaim_complete_data(void *data)
5859 {
5860 	struct nfs4_reclaim_complete_data *calldata = data;
5861 
5862 	kfree(calldata);
5863 }
5864 
5865 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
5866 	.rpc_call_prepare = nfs4_reclaim_complete_prepare,
5867 	.rpc_call_done = nfs4_reclaim_complete_done,
5868 	.rpc_release = nfs4_free_reclaim_complete_data,
5869 };
5870 
5871 /*
5872  * Issue a global reclaim complete.
5873  */
5874 static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
5875 {
5876 	struct nfs4_reclaim_complete_data *calldata;
5877 	struct rpc_task *task;
5878 	struct rpc_message msg = {
5879 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
5880 	};
5881 	struct rpc_task_setup task_setup_data = {
5882 		.rpc_client = clp->cl_rpcclient,
5883 		.rpc_message = &msg,
5884 		.callback_ops = &nfs4_reclaim_complete_call_ops,
5885 		.flags = RPC_TASK_ASYNC,
5886 	};
5887 	int status = -ENOMEM;
5888 
5889 	dprintk("--> %s\n", __func__);
5890 	calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
5891 	if (calldata == NULL)
5892 		goto out;
5893 	calldata->clp = clp;
5894 	calldata->arg.one_fs = 0;
5895 
5896 	nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
5897 	msg.rpc_argp = &calldata->arg;
5898 	msg.rpc_resp = &calldata->res;
5899 	task_setup_data.callback_data = calldata;
5900 	task = rpc_run_task(&task_setup_data);
5901 	if (IS_ERR(task)) {
5902 		status = PTR_ERR(task);
5903 		goto out;
5904 	}
5905 	status = nfs4_wait_for_completion_rpc_task(task);
5906 	if (status == 0)
5907 		status = task->tk_status;
5908 	rpc_put_task(task);
5909 	return 0;
5910 out:
5911 	dprintk("<-- %s status=%d\n", __func__, status);
5912 	return status;
5913 }
5914 
5915 static void
5916 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
5917 {
5918 	struct nfs4_layoutget *lgp = calldata;
5919 	struct nfs_server *server = NFS_SERVER(lgp->args.inode);
5920 
5921 	dprintk("--> %s\n", __func__);
5922 	/* Note the is a race here, where a CB_LAYOUTRECALL can come in
5923 	 * right now covering the LAYOUTGET we are about to send.
5924 	 * However, that is not so catastrophic, and there seems
5925 	 * to be no way to prevent it completely.
5926 	 */
5927 	if (nfs4_setup_sequence(server, &lgp->args.seq_args,
5928 				&lgp->res.seq_res, task))
5929 		return;
5930 	if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
5931 					  NFS_I(lgp->args.inode)->layout,
5932 					  lgp->args.ctx->state)) {
5933 		rpc_exit(task, NFS4_OK);
5934 		return;
5935 	}
5936 	rpc_call_start(task);
5937 }
5938 
5939 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
5940 {
5941 	struct nfs4_layoutget *lgp = calldata;
5942 	struct nfs_server *server = NFS_SERVER(lgp->args.inode);
5943 
5944 	dprintk("--> %s\n", __func__);
5945 
5946 	if (!nfs4_sequence_done(task, &lgp->res.seq_res))
5947 		return;
5948 
5949 	switch (task->tk_status) {
5950 	case 0:
5951 		break;
5952 	case -NFS4ERR_LAYOUTTRYLATER:
5953 	case -NFS4ERR_RECALLCONFLICT:
5954 		task->tk_status = -NFS4ERR_DELAY;
5955 		/* Fall through */
5956 	default:
5957 		if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
5958 			rpc_restart_call_prepare(task);
5959 			return;
5960 		}
5961 	}
5962 	dprintk("<-- %s\n", __func__);
5963 }
5964 
5965 static void nfs4_layoutget_release(void *calldata)
5966 {
5967 	struct nfs4_layoutget *lgp = calldata;
5968 
5969 	dprintk("--> %s\n", __func__);
5970 	put_nfs_open_context(lgp->args.ctx);
5971 	kfree(calldata);
5972 	dprintk("<-- %s\n", __func__);
5973 }
5974 
5975 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
5976 	.rpc_call_prepare = nfs4_layoutget_prepare,
5977 	.rpc_call_done = nfs4_layoutget_done,
5978 	.rpc_release = nfs4_layoutget_release,
5979 };
5980 
5981 int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
5982 {
5983 	struct nfs_server *server = NFS_SERVER(lgp->args.inode);
5984 	struct rpc_task *task;
5985 	struct rpc_message msg = {
5986 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
5987 		.rpc_argp = &lgp->args,
5988 		.rpc_resp = &lgp->res,
5989 	};
5990 	struct rpc_task_setup task_setup_data = {
5991 		.rpc_client = server->client,
5992 		.rpc_message = &msg,
5993 		.callback_ops = &nfs4_layoutget_call_ops,
5994 		.callback_data = lgp,
5995 		.flags = RPC_TASK_ASYNC,
5996 	};
5997 	int status = 0;
5998 
5999 	dprintk("--> %s\n", __func__);
6000 
6001 	lgp->res.layoutp = &lgp->args.layout;
6002 	lgp->res.seq_res.sr_slot = NULL;
6003 	nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
6004 	task = rpc_run_task(&task_setup_data);
6005 	if (IS_ERR(task))
6006 		return PTR_ERR(task);
6007 	status = nfs4_wait_for_completion_rpc_task(task);
6008 	if (status == 0)
6009 		status = task->tk_status;
6010 	if (status == 0)
6011 		status = pnfs_layout_process(lgp);
6012 	rpc_put_task(task);
6013 	dprintk("<-- %s status=%d\n", __func__, status);
6014 	return status;
6015 }
6016 
6017 static void
6018 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
6019 {
6020 	struct nfs4_layoutreturn *lrp = calldata;
6021 
6022 	dprintk("--> %s\n", __func__);
6023 	if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args,
6024 				&lrp->res.seq_res, task))
6025 		return;
6026 	rpc_call_start(task);
6027 }
6028 
6029 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
6030 {
6031 	struct nfs4_layoutreturn *lrp = calldata;
6032 	struct nfs_server *server;
6033 	struct pnfs_layout_hdr *lo = lrp->args.layout;
6034 
6035 	dprintk("--> %s\n", __func__);
6036 
6037 	if (!nfs4_sequence_done(task, &lrp->res.seq_res))
6038 		return;
6039 
6040 	server = NFS_SERVER(lrp->args.inode);
6041 	if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6042 		rpc_restart_call_prepare(task);
6043 		return;
6044 	}
6045 	spin_lock(&lo->plh_inode->i_lock);
6046 	if (task->tk_status == 0) {
6047 		if (lrp->res.lrs_present) {
6048 			pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
6049 		} else
6050 			BUG_ON(!list_empty(&lo->plh_segs));
6051 	}
6052 	lo->plh_block_lgets--;
6053 	spin_unlock(&lo->plh_inode->i_lock);
6054 	dprintk("<-- %s\n", __func__);
6055 }
6056 
6057 static void nfs4_layoutreturn_release(void *calldata)
6058 {
6059 	struct nfs4_layoutreturn *lrp = calldata;
6060 
6061 	dprintk("--> %s\n", __func__);
6062 	put_layout_hdr(lrp->args.layout);
6063 	kfree(calldata);
6064 	dprintk("<-- %s\n", __func__);
6065 }
6066 
6067 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
6068 	.rpc_call_prepare = nfs4_layoutreturn_prepare,
6069 	.rpc_call_done = nfs4_layoutreturn_done,
6070 	.rpc_release = nfs4_layoutreturn_release,
6071 };
6072 
6073 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp)
6074 {
6075 	struct rpc_task *task;
6076 	struct rpc_message msg = {
6077 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
6078 		.rpc_argp = &lrp->args,
6079 		.rpc_resp = &lrp->res,
6080 	};
6081 	struct rpc_task_setup task_setup_data = {
6082 		.rpc_client = lrp->clp->cl_rpcclient,
6083 		.rpc_message = &msg,
6084 		.callback_ops = &nfs4_layoutreturn_call_ops,
6085 		.callback_data = lrp,
6086 	};
6087 	int status;
6088 
6089 	dprintk("--> %s\n", __func__);
6090 	nfs41_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
6091 	task = rpc_run_task(&task_setup_data);
6092 	if (IS_ERR(task))
6093 		return PTR_ERR(task);
6094 	status = task->tk_status;
6095 	dprintk("<-- %s status=%d\n", __func__, status);
6096 	rpc_put_task(task);
6097 	return status;
6098 }
6099 
6100 /*
6101  * Retrieve the list of Data Server devices from the MDS.
6102  */
6103 static int _nfs4_getdevicelist(struct nfs_server *server,
6104 				    const struct nfs_fh *fh,
6105 				    struct pnfs_devicelist *devlist)
6106 {
6107 	struct nfs4_getdevicelist_args args = {
6108 		.fh = fh,
6109 		.layoutclass = server->pnfs_curr_ld->id,
6110 	};
6111 	struct nfs4_getdevicelist_res res = {
6112 		.devlist = devlist,
6113 	};
6114 	struct rpc_message msg = {
6115 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST],
6116 		.rpc_argp = &args,
6117 		.rpc_resp = &res,
6118 	};
6119 	int status;
6120 
6121 	dprintk("--> %s\n", __func__);
6122 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
6123 				&res.seq_res, 0);
6124 	dprintk("<-- %s status=%d\n", __func__, status);
6125 	return status;
6126 }
6127 
6128 int nfs4_proc_getdevicelist(struct nfs_server *server,
6129 			    const struct nfs_fh *fh,
6130 			    struct pnfs_devicelist *devlist)
6131 {
6132 	struct nfs4_exception exception = { };
6133 	int err;
6134 
6135 	do {
6136 		err = nfs4_handle_exception(server,
6137 				_nfs4_getdevicelist(server, fh, devlist),
6138 				&exception);
6139 	} while (exception.retry);
6140 
6141 	dprintk("%s: err=%d, num_devs=%u\n", __func__,
6142 		err, devlist->num_devs);
6143 
6144 	return err;
6145 }
6146 EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist);
6147 
6148 static int
6149 _nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
6150 {
6151 	struct nfs4_getdeviceinfo_args args = {
6152 		.pdev = pdev,
6153 	};
6154 	struct nfs4_getdeviceinfo_res res = {
6155 		.pdev = pdev,
6156 	};
6157 	struct rpc_message msg = {
6158 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
6159 		.rpc_argp = &args,
6160 		.rpc_resp = &res,
6161 	};
6162 	int status;
6163 
6164 	dprintk("--> %s\n", __func__);
6165 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
6166 	dprintk("<-- %s status=%d\n", __func__, status);
6167 
6168 	return status;
6169 }
6170 
6171 int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
6172 {
6173 	struct nfs4_exception exception = { };
6174 	int err;
6175 
6176 	do {
6177 		err = nfs4_handle_exception(server,
6178 					_nfs4_proc_getdeviceinfo(server, pdev),
6179 					&exception);
6180 	} while (exception.retry);
6181 	return err;
6182 }
6183 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
6184 
6185 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
6186 {
6187 	struct nfs4_layoutcommit_data *data = calldata;
6188 	struct nfs_server *server = NFS_SERVER(data->args.inode);
6189 
6190 	if (nfs4_setup_sequence(server, &data->args.seq_args,
6191 				&data->res.seq_res, task))
6192 		return;
6193 	rpc_call_start(task);
6194 }
6195 
6196 static void
6197 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
6198 {
6199 	struct nfs4_layoutcommit_data *data = calldata;
6200 	struct nfs_server *server = NFS_SERVER(data->args.inode);
6201 
6202 	if (!nfs4_sequence_done(task, &data->res.seq_res))
6203 		return;
6204 
6205 	switch (task->tk_status) { /* Just ignore these failures */
6206 	case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
6207 	case -NFS4ERR_BADIOMODE:     /* no IOMODE_RW layout for range */
6208 	case -NFS4ERR_BADLAYOUT:     /* no layout */
6209 	case -NFS4ERR_GRACE:	    /* loca_recalim always false */
6210 		task->tk_status = 0;
6211 		break;
6212 	case 0:
6213 		nfs_post_op_update_inode_force_wcc(data->args.inode,
6214 						   data->res.fattr);
6215 		break;
6216 	default:
6217 		if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6218 			rpc_restart_call_prepare(task);
6219 			return;
6220 		}
6221 	}
6222 }
6223 
6224 static void nfs4_layoutcommit_release(void *calldata)
6225 {
6226 	struct nfs4_layoutcommit_data *data = calldata;
6227 	struct pnfs_layout_segment *lseg, *tmp;
6228 	unsigned long *bitlock = &NFS_I(data->args.inode)->flags;
6229 
6230 	pnfs_cleanup_layoutcommit(data);
6231 	/* Matched by references in pnfs_set_layoutcommit */
6232 	list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) {
6233 		list_del_init(&lseg->pls_lc_list);
6234 		if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT,
6235 				       &lseg->pls_flags))
6236 			put_lseg(lseg);
6237 	}
6238 
6239 	clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
6240 	smp_mb__after_clear_bit();
6241 	wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
6242 
6243 	put_rpccred(data->cred);
6244 	kfree(data);
6245 }
6246 
6247 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
6248 	.rpc_call_prepare = nfs4_layoutcommit_prepare,
6249 	.rpc_call_done = nfs4_layoutcommit_done,
6250 	.rpc_release = nfs4_layoutcommit_release,
6251 };
6252 
6253 int
6254 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
6255 {
6256 	struct rpc_message msg = {
6257 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
6258 		.rpc_argp = &data->args,
6259 		.rpc_resp = &data->res,
6260 		.rpc_cred = data->cred,
6261 	};
6262 	struct rpc_task_setup task_setup_data = {
6263 		.task = &data->task,
6264 		.rpc_client = NFS_CLIENT(data->args.inode),
6265 		.rpc_message = &msg,
6266 		.callback_ops = &nfs4_layoutcommit_ops,
6267 		.callback_data = data,
6268 		.flags = RPC_TASK_ASYNC,
6269 	};
6270 	struct rpc_task *task;
6271 	int status = 0;
6272 
6273 	dprintk("NFS: %4d initiating layoutcommit call. sync %d "
6274 		"lbw: %llu inode %lu\n",
6275 		data->task.tk_pid, sync,
6276 		data->args.lastbytewritten,
6277 		data->args.inode->i_ino);
6278 
6279 	nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
6280 	task = rpc_run_task(&task_setup_data);
6281 	if (IS_ERR(task))
6282 		return PTR_ERR(task);
6283 	if (sync == false)
6284 		goto out;
6285 	status = nfs4_wait_for_completion_rpc_task(task);
6286 	if (status != 0)
6287 		goto out;
6288 	status = task->tk_status;
6289 out:
6290 	dprintk("%s: status %d\n", __func__, status);
6291 	rpc_put_task(task);
6292 	return status;
6293 }
6294 
6295 static int
6296 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
6297 		    struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
6298 {
6299 	struct nfs41_secinfo_no_name_args args = {
6300 		.style = SECINFO_STYLE_CURRENT_FH,
6301 	};
6302 	struct nfs4_secinfo_res res = {
6303 		.flavors = flavors,
6304 	};
6305 	struct rpc_message msg = {
6306 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
6307 		.rpc_argp = &args,
6308 		.rpc_resp = &res,
6309 	};
6310 	return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
6311 }
6312 
6313 static int
6314 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
6315 			   struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
6316 {
6317 	struct nfs4_exception exception = { };
6318 	int err;
6319 	do {
6320 		err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
6321 		switch (err) {
6322 		case 0:
6323 		case -NFS4ERR_WRONGSEC:
6324 		case -NFS4ERR_NOTSUPP:
6325 			goto out;
6326 		default:
6327 			err = nfs4_handle_exception(server, err, &exception);
6328 		}
6329 	} while (exception.retry);
6330 out:
6331 	return err;
6332 }
6333 
6334 static int
6335 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
6336 		    struct nfs_fsinfo *info)
6337 {
6338 	int err;
6339 	struct page *page;
6340 	rpc_authflavor_t flavor;
6341 	struct nfs4_secinfo_flavors *flavors;
6342 
6343 	page = alloc_page(GFP_KERNEL);
6344 	if (!page) {
6345 		err = -ENOMEM;
6346 		goto out;
6347 	}
6348 
6349 	flavors = page_address(page);
6350 	err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
6351 
6352 	/*
6353 	 * Fall back on "guess and check" method if
6354 	 * the server doesn't support SECINFO_NO_NAME
6355 	 */
6356 	if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) {
6357 		err = nfs4_find_root_sec(server, fhandle, info);
6358 		goto out_freepage;
6359 	}
6360 	if (err)
6361 		goto out_freepage;
6362 
6363 	flavor = nfs_find_best_sec(flavors);
6364 	if (err == 0)
6365 		err = nfs4_lookup_root_sec(server, fhandle, info, flavor);
6366 
6367 out_freepage:
6368 	put_page(page);
6369 	if (err == -EACCES)
6370 		return -EPERM;
6371 out:
6372 	return err;
6373 }
6374 
6375 static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6376 {
6377 	int status;
6378 	struct nfs41_test_stateid_args args = {
6379 		.stateid = stateid,
6380 	};
6381 	struct nfs41_test_stateid_res res;
6382 	struct rpc_message msg = {
6383 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
6384 		.rpc_argp = &args,
6385 		.rpc_resp = &res,
6386 	};
6387 
6388 	nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
6389 	status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
6390 
6391 	if (status == NFS_OK)
6392 		return res.status;
6393 	return status;
6394 }
6395 
6396 static int nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6397 {
6398 	struct nfs4_exception exception = { };
6399 	int err;
6400 	do {
6401 		err = nfs4_handle_exception(server,
6402 				_nfs41_test_stateid(server, stateid),
6403 				&exception);
6404 	} while (exception.retry);
6405 	return err;
6406 }
6407 
6408 static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6409 {
6410 	struct nfs41_free_stateid_args args = {
6411 		.stateid = stateid,
6412 	};
6413 	struct nfs41_free_stateid_res res;
6414 	struct rpc_message msg = {
6415 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
6416 		.rpc_argp = &args,
6417 		.rpc_resp = &res,
6418 	};
6419 
6420 	nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
6421 	return nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
6422 }
6423 
6424 static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6425 {
6426 	struct nfs4_exception exception = { };
6427 	int err;
6428 	do {
6429 		err = nfs4_handle_exception(server,
6430 				_nfs4_free_stateid(server, stateid),
6431 				&exception);
6432 	} while (exception.retry);
6433 	return err;
6434 }
6435 
6436 static bool nfs41_match_stateid(const nfs4_stateid *s1,
6437 		const nfs4_stateid *s2)
6438 {
6439 	if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
6440 		return false;
6441 
6442 	if (s1->seqid == s2->seqid)
6443 		return true;
6444 	if (s1->seqid == 0 || s2->seqid == 0)
6445 		return true;
6446 
6447 	return false;
6448 }
6449 
6450 #endif /* CONFIG_NFS_V4_1 */
6451 
6452 static bool nfs4_match_stateid(const nfs4_stateid *s1,
6453 		const nfs4_stateid *s2)
6454 {
6455 	return nfs4_stateid_match(s1, s2);
6456 }
6457 
6458 
6459 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
6460 	.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
6461 	.state_flag_bit	= NFS_STATE_RECLAIM_REBOOT,
6462 	.recover_open	= nfs4_open_reclaim,
6463 	.recover_lock	= nfs4_lock_reclaim,
6464 	.establish_clid = nfs4_init_clientid,
6465 	.get_clid_cred	= nfs4_get_setclientid_cred,
6466 };
6467 
6468 #if defined(CONFIG_NFS_V4_1)
6469 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
6470 	.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
6471 	.state_flag_bit	= NFS_STATE_RECLAIM_REBOOT,
6472 	.recover_open	= nfs4_open_reclaim,
6473 	.recover_lock	= nfs4_lock_reclaim,
6474 	.establish_clid = nfs41_init_clientid,
6475 	.get_clid_cred	= nfs4_get_exchange_id_cred,
6476 	.reclaim_complete = nfs41_proc_reclaim_complete,
6477 };
6478 #endif /* CONFIG_NFS_V4_1 */
6479 
6480 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
6481 	.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
6482 	.state_flag_bit	= NFS_STATE_RECLAIM_NOGRACE,
6483 	.recover_open	= nfs4_open_expired,
6484 	.recover_lock	= nfs4_lock_expired,
6485 	.establish_clid = nfs4_init_clientid,
6486 	.get_clid_cred	= nfs4_get_setclientid_cred,
6487 };
6488 
6489 #if defined(CONFIG_NFS_V4_1)
6490 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
6491 	.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
6492 	.state_flag_bit	= NFS_STATE_RECLAIM_NOGRACE,
6493 	.recover_open	= nfs41_open_expired,
6494 	.recover_lock	= nfs41_lock_expired,
6495 	.establish_clid = nfs41_init_clientid,
6496 	.get_clid_cred	= nfs4_get_exchange_id_cred,
6497 };
6498 #endif /* CONFIG_NFS_V4_1 */
6499 
6500 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
6501 	.sched_state_renewal = nfs4_proc_async_renew,
6502 	.get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
6503 	.renew_lease = nfs4_proc_renew,
6504 };
6505 
6506 #if defined(CONFIG_NFS_V4_1)
6507 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
6508 	.sched_state_renewal = nfs41_proc_async_sequence,
6509 	.get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
6510 	.renew_lease = nfs4_proc_sequence,
6511 };
6512 #endif
6513 
6514 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
6515 	.minor_version = 0,
6516 	.call_sync = _nfs4_call_sync,
6517 	.match_stateid = nfs4_match_stateid,
6518 	.find_root_sec = nfs4_find_root_sec,
6519 	.reboot_recovery_ops = &nfs40_reboot_recovery_ops,
6520 	.nograce_recovery_ops = &nfs40_nograce_recovery_ops,
6521 	.state_renewal_ops = &nfs40_state_renewal_ops,
6522 };
6523 
6524 #if defined(CONFIG_NFS_V4_1)
6525 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
6526 	.minor_version = 1,
6527 	.call_sync = _nfs4_call_sync_session,
6528 	.match_stateid = nfs41_match_stateid,
6529 	.find_root_sec = nfs41_find_root_sec,
6530 	.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
6531 	.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
6532 	.state_renewal_ops = &nfs41_state_renewal_ops,
6533 };
6534 #endif
6535 
6536 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
6537 	[0] = &nfs_v4_0_minor_ops,
6538 #if defined(CONFIG_NFS_V4_1)
6539 	[1] = &nfs_v4_1_minor_ops,
6540 #endif
6541 };
6542 
6543 static const struct inode_operations nfs4_file_inode_operations = {
6544 	.permission	= nfs_permission,
6545 	.getattr	= nfs_getattr,
6546 	.setattr	= nfs_setattr,
6547 	.getxattr	= generic_getxattr,
6548 	.setxattr	= generic_setxattr,
6549 	.listxattr	= generic_listxattr,
6550 	.removexattr	= generic_removexattr,
6551 };
6552 
6553 const struct nfs_rpc_ops nfs_v4_clientops = {
6554 	.version	= 4,			/* protocol version */
6555 	.dentry_ops	= &nfs4_dentry_operations,
6556 	.dir_inode_ops	= &nfs4_dir_inode_operations,
6557 	.file_inode_ops	= &nfs4_file_inode_operations,
6558 	.file_ops	= &nfs4_file_operations,
6559 	.getroot	= nfs4_proc_get_root,
6560 	.getattr	= nfs4_proc_getattr,
6561 	.setattr	= nfs4_proc_setattr,
6562 	.lookup		= nfs4_proc_lookup,
6563 	.access		= nfs4_proc_access,
6564 	.readlink	= nfs4_proc_readlink,
6565 	.create		= nfs4_proc_create,
6566 	.remove		= nfs4_proc_remove,
6567 	.unlink_setup	= nfs4_proc_unlink_setup,
6568 	.unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
6569 	.unlink_done	= nfs4_proc_unlink_done,
6570 	.rename		= nfs4_proc_rename,
6571 	.rename_setup	= nfs4_proc_rename_setup,
6572 	.rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
6573 	.rename_done	= nfs4_proc_rename_done,
6574 	.link		= nfs4_proc_link,
6575 	.symlink	= nfs4_proc_symlink,
6576 	.mkdir		= nfs4_proc_mkdir,
6577 	.rmdir		= nfs4_proc_remove,
6578 	.readdir	= nfs4_proc_readdir,
6579 	.mknod		= nfs4_proc_mknod,
6580 	.statfs		= nfs4_proc_statfs,
6581 	.fsinfo		= nfs4_proc_fsinfo,
6582 	.pathconf	= nfs4_proc_pathconf,
6583 	.set_capabilities = nfs4_server_capabilities,
6584 	.decode_dirent	= nfs4_decode_dirent,
6585 	.read_setup	= nfs4_proc_read_setup,
6586 	.read_rpc_prepare = nfs4_proc_read_rpc_prepare,
6587 	.read_done	= nfs4_read_done,
6588 	.write_setup	= nfs4_proc_write_setup,
6589 	.write_rpc_prepare = nfs4_proc_write_rpc_prepare,
6590 	.write_done	= nfs4_write_done,
6591 	.commit_setup	= nfs4_proc_commit_setup,
6592 	.commit_done	= nfs4_commit_done,
6593 	.lock		= nfs4_proc_lock,
6594 	.clear_acl_cache = nfs4_zap_acl_attr,
6595 	.close_context  = nfs4_close_context,
6596 	.open_context	= nfs4_atomic_open,
6597 	.init_client	= nfs4_init_client,
6598 	.secinfo	= nfs4_proc_secinfo,
6599 };
6600 
6601 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
6602 	.prefix	= XATTR_NAME_NFSV4_ACL,
6603 	.list	= nfs4_xattr_list_nfs4_acl,
6604 	.get	= nfs4_xattr_get_nfs4_acl,
6605 	.set	= nfs4_xattr_set_nfs4_acl,
6606 };
6607 
6608 const struct xattr_handler *nfs4_xattr_handlers[] = {
6609 	&nfs4_xattr_nfs4_acl_handler,
6610 	NULL
6611 };
6612 
6613 module_param(max_session_slots, ushort, 0644);
6614 MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 "
6615 		"requests the client will negotiate");
6616 
6617 /*
6618  * Local variables:
6619  *  c-basic-offset: 8
6620  * End:
6621  */
6622