xref: /linux/fs/nfs/nfs4proc.c (revision 2277ab4a1df50e05bc732fe9488d4e902bb8399a)
1 /*
2  *  fs/nfs/nfs4proc.c
3  *
4  *  Client-side procedure declarations for NFSv4.
5  *
6  *  Copyright (c) 2002 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Kendrick Smith <kmsmith@umich.edu>
10  *  Andy Adamson   <andros@umich.edu>
11  *
12  *  Redistribution and use in source and binary forms, with or without
13  *  modification, are permitted provided that the following conditions
14  *  are met:
15  *
16  *  1. Redistributions of source code must retain the above copyright
17  *     notice, this list of conditions and the following disclaimer.
18  *  2. Redistributions in binary form must reproduce the above copyright
19  *     notice, this list of conditions and the following disclaimer in the
20  *     documentation and/or other materials provided with the distribution.
21  *  3. Neither the name of the University nor the names of its
22  *     contributors may be used to endorse or promote products derived
23  *     from this software without specific prior written permission.
24  *
25  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include <linux/mm.h>
39 #include <linux/utsname.h>
40 #include <linux/delay.h>
41 #include <linux/errno.h>
42 #include <linux/string.h>
43 #include <linux/sunrpc/clnt.h>
44 #include <linux/nfs.h>
45 #include <linux/nfs4.h>
46 #include <linux/nfs_fs.h>
47 #include <linux/nfs_page.h>
48 #include <linux/namei.h>
49 #include <linux/mount.h>
50 #include <linux/module.h>
51 #include <linux/sunrpc/bc_xprt.h>
52 
53 #include "nfs4_fs.h"
54 #include "delegation.h"
55 #include "internal.h"
56 #include "iostat.h"
57 #include "callback.h"
58 
59 #define NFSDBG_FACILITY		NFSDBG_PROC
60 
61 #define NFS4_POLL_RETRY_MIN	(HZ/10)
62 #define NFS4_POLL_RETRY_MAX	(15*HZ)
63 
64 struct nfs4_opendata;
65 static int _nfs4_proc_open(struct nfs4_opendata *data);
66 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
67 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
68 static int _nfs4_proc_lookup(struct inode *dir, const struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
69 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
70 
71 /* Prevent leaks of NFSv4 errors into userland */
72 static int nfs4_map_errors(int err)
73 {
74 	if (err < -1000) {
75 		dprintk("%s could not handle NFSv4 error %d\n",
76 				__func__, -err);
77 		return -EIO;
78 	}
79 	return err;
80 }
81 
82 /*
83  * This is our standard bitmap for GETATTR requests.
84  */
85 const u32 nfs4_fattr_bitmap[2] = {
86 	FATTR4_WORD0_TYPE
87 	| FATTR4_WORD0_CHANGE
88 	| FATTR4_WORD0_SIZE
89 	| FATTR4_WORD0_FSID
90 	| FATTR4_WORD0_FILEID,
91 	FATTR4_WORD1_MODE
92 	| FATTR4_WORD1_NUMLINKS
93 	| FATTR4_WORD1_OWNER
94 	| FATTR4_WORD1_OWNER_GROUP
95 	| FATTR4_WORD1_RAWDEV
96 	| FATTR4_WORD1_SPACE_USED
97 	| FATTR4_WORD1_TIME_ACCESS
98 	| FATTR4_WORD1_TIME_METADATA
99 	| FATTR4_WORD1_TIME_MODIFY
100 };
101 
102 const u32 nfs4_statfs_bitmap[2] = {
103 	FATTR4_WORD0_FILES_AVAIL
104 	| FATTR4_WORD0_FILES_FREE
105 	| FATTR4_WORD0_FILES_TOTAL,
106 	FATTR4_WORD1_SPACE_AVAIL
107 	| FATTR4_WORD1_SPACE_FREE
108 	| FATTR4_WORD1_SPACE_TOTAL
109 };
110 
111 const u32 nfs4_pathconf_bitmap[2] = {
112 	FATTR4_WORD0_MAXLINK
113 	| FATTR4_WORD0_MAXNAME,
114 	0
115 };
116 
117 const u32 nfs4_fsinfo_bitmap[2] = { FATTR4_WORD0_MAXFILESIZE
118 			| FATTR4_WORD0_MAXREAD
119 			| FATTR4_WORD0_MAXWRITE
120 			| FATTR4_WORD0_LEASE_TIME,
121 			0
122 };
123 
124 const u32 nfs4_fs_locations_bitmap[2] = {
125 	FATTR4_WORD0_TYPE
126 	| FATTR4_WORD0_CHANGE
127 	| FATTR4_WORD0_SIZE
128 	| FATTR4_WORD0_FSID
129 	| FATTR4_WORD0_FILEID
130 	| FATTR4_WORD0_FS_LOCATIONS,
131 	FATTR4_WORD1_MODE
132 	| FATTR4_WORD1_NUMLINKS
133 	| FATTR4_WORD1_OWNER
134 	| FATTR4_WORD1_OWNER_GROUP
135 	| FATTR4_WORD1_RAWDEV
136 	| FATTR4_WORD1_SPACE_USED
137 	| FATTR4_WORD1_TIME_ACCESS
138 	| FATTR4_WORD1_TIME_METADATA
139 	| FATTR4_WORD1_TIME_MODIFY
140 	| FATTR4_WORD1_MOUNTED_ON_FILEID
141 };
142 
143 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
144 		struct nfs4_readdir_arg *readdir)
145 {
146 	__be32 *start, *p;
147 
148 	BUG_ON(readdir->count < 80);
149 	if (cookie > 2) {
150 		readdir->cookie = cookie;
151 		memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
152 		return;
153 	}
154 
155 	readdir->cookie = 0;
156 	memset(&readdir->verifier, 0, sizeof(readdir->verifier));
157 	if (cookie == 2)
158 		return;
159 
160 	/*
161 	 * NFSv4 servers do not return entries for '.' and '..'
162 	 * Therefore, we fake these entries here.  We let '.'
163 	 * have cookie 0 and '..' have cookie 1.  Note that
164 	 * when talking to the server, we always send cookie 0
165 	 * instead of 1 or 2.
166 	 */
167 	start = p = kmap_atomic(*readdir->pages, KM_USER0);
168 
169 	if (cookie == 0) {
170 		*p++ = xdr_one;                                  /* next */
171 		*p++ = xdr_zero;                   /* cookie, first word */
172 		*p++ = xdr_one;                   /* cookie, second word */
173 		*p++ = xdr_one;                             /* entry len */
174 		memcpy(p, ".\0\0\0", 4);                        /* entry */
175 		p++;
176 		*p++ = xdr_one;                         /* bitmap length */
177 		*p++ = htonl(FATTR4_WORD0_FILEID);             /* bitmap */
178 		*p++ = htonl(8);              /* attribute buffer length */
179 		p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode));
180 	}
181 
182 	*p++ = xdr_one;                                  /* next */
183 	*p++ = xdr_zero;                   /* cookie, first word */
184 	*p++ = xdr_two;                   /* cookie, second word */
185 	*p++ = xdr_two;                             /* entry len */
186 	memcpy(p, "..\0\0", 4);                         /* entry */
187 	p++;
188 	*p++ = xdr_one;                         /* bitmap length */
189 	*p++ = htonl(FATTR4_WORD0_FILEID);             /* bitmap */
190 	*p++ = htonl(8);              /* attribute buffer length */
191 	p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode));
192 
193 	readdir->pgbase = (char *)p - (char *)start;
194 	readdir->count -= readdir->pgbase;
195 	kunmap_atomic(start, KM_USER0);
196 }
197 
198 static int nfs4_wait_clnt_recover(struct nfs_client *clp)
199 {
200 	int res;
201 
202 	might_sleep();
203 
204 	res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
205 			nfs_wait_bit_killable, TASK_KILLABLE);
206 	return res;
207 }
208 
209 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
210 {
211 	int res = 0;
212 
213 	might_sleep();
214 
215 	if (*timeout <= 0)
216 		*timeout = NFS4_POLL_RETRY_MIN;
217 	if (*timeout > NFS4_POLL_RETRY_MAX)
218 		*timeout = NFS4_POLL_RETRY_MAX;
219 	schedule_timeout_killable(*timeout);
220 	if (fatal_signal_pending(current))
221 		res = -ERESTARTSYS;
222 	*timeout <<= 1;
223 	return res;
224 }
225 
226 /* This is the error handling routine for processes that are allowed
227  * to sleep.
228  */
229 static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
230 {
231 	struct nfs_client *clp = server->nfs_client;
232 	struct nfs4_state *state = exception->state;
233 	int ret = errorcode;
234 
235 	exception->retry = 0;
236 	switch(errorcode) {
237 		case 0:
238 			return 0;
239 		case -NFS4ERR_ADMIN_REVOKED:
240 		case -NFS4ERR_BAD_STATEID:
241 		case -NFS4ERR_OPENMODE:
242 			if (state == NULL)
243 				break;
244 			nfs4_state_mark_reclaim_nograce(clp, state);
245 		case -NFS4ERR_STALE_CLIENTID:
246 		case -NFS4ERR_STALE_STATEID:
247 		case -NFS4ERR_EXPIRED:
248 			nfs4_schedule_state_recovery(clp);
249 			ret = nfs4_wait_clnt_recover(clp);
250 			if (ret == 0)
251 				exception->retry = 1;
252 #if !defined(CONFIG_NFS_V4_1)
253 			break;
254 #else /* !defined(CONFIG_NFS_V4_1) */
255 			if (!nfs4_has_session(server->nfs_client))
256 				break;
257 			/* FALLTHROUGH */
258 		case -NFS4ERR_BADSESSION:
259 		case -NFS4ERR_BADSLOT:
260 		case -NFS4ERR_BAD_HIGH_SLOT:
261 		case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
262 		case -NFS4ERR_DEADSESSION:
263 		case -NFS4ERR_SEQ_FALSE_RETRY:
264 		case -NFS4ERR_SEQ_MISORDERED:
265 			dprintk("%s ERROR: %d Reset session\n", __func__,
266 				errorcode);
267 			set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
268 			exception->retry = 1;
269 			/* FALLTHROUGH */
270 #endif /* !defined(CONFIG_NFS_V4_1) */
271 		case -NFS4ERR_FILE_OPEN:
272 		case -NFS4ERR_GRACE:
273 		case -NFS4ERR_DELAY:
274 			ret = nfs4_delay(server->client, &exception->timeout);
275 			if (ret != 0)
276 				break;
277 		case -NFS4ERR_OLD_STATEID:
278 			exception->retry = 1;
279 	}
280 	/* We failed to handle the error */
281 	return nfs4_map_errors(ret);
282 }
283 
284 
285 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
286 {
287 	struct nfs_client *clp = server->nfs_client;
288 	spin_lock(&clp->cl_lock);
289 	if (time_before(clp->cl_last_renewal,timestamp))
290 		clp->cl_last_renewal = timestamp;
291 	spin_unlock(&clp->cl_lock);
292 }
293 
294 #if defined(CONFIG_NFS_V4_1)
295 
296 /*
297  * nfs4_free_slot - free a slot and efficiently update slot table.
298  *
299  * freeing a slot is trivially done by clearing its respective bit
300  * in the bitmap.
301  * If the freed slotid equals highest_used_slotid we want to update it
302  * so that the server would be able to size down the slot table if needed,
303  * otherwise we know that the highest_used_slotid is still in use.
304  * When updating highest_used_slotid there may be "holes" in the bitmap
305  * so we need to scan down from highest_used_slotid to 0 looking for the now
306  * highest slotid in use.
307  * If none found, highest_used_slotid is set to -1.
308  */
309 static void
310 nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid)
311 {
312 	int slotid = free_slotid;
313 
314 	spin_lock(&tbl->slot_tbl_lock);
315 	/* clear used bit in bitmap */
316 	__clear_bit(slotid, tbl->used_slots);
317 
318 	/* update highest_used_slotid when it is freed */
319 	if (slotid == tbl->highest_used_slotid) {
320 		slotid = find_last_bit(tbl->used_slots, tbl->max_slots);
321 		if (slotid >= 0 && slotid < tbl->max_slots)
322 			tbl->highest_used_slotid = slotid;
323 		else
324 			tbl->highest_used_slotid = -1;
325 	}
326 	rpc_wake_up_next(&tbl->slot_tbl_waitq);
327 	spin_unlock(&tbl->slot_tbl_lock);
328 	dprintk("%s: free_slotid %u highest_used_slotid %d\n", __func__,
329 		free_slotid, tbl->highest_used_slotid);
330 }
331 
332 void nfs41_sequence_free_slot(const struct nfs_client *clp,
333 			      struct nfs4_sequence_res *res)
334 {
335 	struct nfs4_slot_table *tbl;
336 
337 	if (!nfs4_has_session(clp)) {
338 		dprintk("%s: No session\n", __func__);
339 		return;
340 	}
341 	tbl = &clp->cl_session->fc_slot_table;
342 	if (res->sr_slotid == NFS4_MAX_SLOT_TABLE) {
343 		dprintk("%s: No slot\n", __func__);
344 		/* just wake up the next guy waiting since
345 		 * we may have not consumed a slot after all */
346 		rpc_wake_up_next(&tbl->slot_tbl_waitq);
347 		return;
348 	}
349 	nfs4_free_slot(tbl, res->sr_slotid);
350 	res->sr_slotid = NFS4_MAX_SLOT_TABLE;
351 }
352 
353 static void nfs41_sequence_done(struct nfs_client *clp,
354 				struct nfs4_sequence_res *res,
355 				int rpc_status)
356 {
357 	unsigned long timestamp;
358 	struct nfs4_slot_table *tbl;
359 	struct nfs4_slot *slot;
360 
361 	/*
362 	 * sr_status remains 1 if an RPC level error occurred. The server
363 	 * may or may not have processed the sequence operation..
364 	 * Proceed as if the server received and processed the sequence
365 	 * operation.
366 	 */
367 	if (res->sr_status == 1)
368 		res->sr_status = NFS_OK;
369 
370 	/* -ERESTARTSYS can result in skipping nfs41_sequence_setup */
371 	if (res->sr_slotid == NFS4_MAX_SLOT_TABLE)
372 		goto out;
373 
374 	tbl = &clp->cl_session->fc_slot_table;
375 	slot = tbl->slots + res->sr_slotid;
376 
377 	if (res->sr_status == 0) {
378 		/* Update the slot's sequence and clientid lease timer */
379 		++slot->seq_nr;
380 		timestamp = res->sr_renewal_time;
381 		spin_lock(&clp->cl_lock);
382 		if (time_before(clp->cl_last_renewal, timestamp))
383 			clp->cl_last_renewal = timestamp;
384 		spin_unlock(&clp->cl_lock);
385 		return;
386 	}
387 out:
388 	/* The session may be reset by one of the error handlers. */
389 	dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
390 	nfs41_sequence_free_slot(clp, res);
391 }
392 
393 /*
394  * nfs4_find_slot - efficiently look for a free slot
395  *
396  * nfs4_find_slot looks for an unset bit in the used_slots bitmap.
397  * If found, we mark the slot as used, update the highest_used_slotid,
398  * and respectively set up the sequence operation args.
399  * The slot number is returned if found, or NFS4_MAX_SLOT_TABLE otherwise.
400  *
401  * Note: must be called with under the slot_tbl_lock.
402  */
403 static u8
404 nfs4_find_slot(struct nfs4_slot_table *tbl, struct rpc_task *task)
405 {
406 	int slotid;
407 	u8 ret_id = NFS4_MAX_SLOT_TABLE;
408 	BUILD_BUG_ON((u8)NFS4_MAX_SLOT_TABLE != (int)NFS4_MAX_SLOT_TABLE);
409 
410 	dprintk("--> %s used_slots=%04lx highest_used=%d max_slots=%d\n",
411 		__func__, tbl->used_slots[0], tbl->highest_used_slotid,
412 		tbl->max_slots);
413 	slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots);
414 	if (slotid >= tbl->max_slots)
415 		goto out;
416 	__set_bit(slotid, tbl->used_slots);
417 	if (slotid > tbl->highest_used_slotid)
418 		tbl->highest_used_slotid = slotid;
419 	ret_id = slotid;
420 out:
421 	dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
422 		__func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id);
423 	return ret_id;
424 }
425 
426 static int nfs4_recover_session(struct nfs4_session *session)
427 {
428 	struct nfs_client *clp = session->clp;
429 	int ret;
430 
431 	for (;;) {
432 		ret = nfs4_wait_clnt_recover(clp);
433 		if (ret != 0)
434 				return ret;
435 		if (!test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state))
436 			break;
437 		nfs4_schedule_state_manager(clp);
438 	}
439 	return 0;
440 }
441 
442 static int nfs41_setup_sequence(struct nfs4_session *session,
443 				struct nfs4_sequence_args *args,
444 				struct nfs4_sequence_res *res,
445 				int cache_reply,
446 				struct rpc_task *task)
447 {
448 	struct nfs4_slot *slot;
449 	struct nfs4_slot_table *tbl;
450 	int status = 0;
451 	u8 slotid;
452 
453 	dprintk("--> %s\n", __func__);
454 	/* slot already allocated? */
455 	if (res->sr_slotid != NFS4_MAX_SLOT_TABLE)
456 		return 0;
457 
458 	memset(res, 0, sizeof(*res));
459 	res->sr_slotid = NFS4_MAX_SLOT_TABLE;
460 	tbl = &session->fc_slot_table;
461 
462 	spin_lock(&tbl->slot_tbl_lock);
463 	if (test_bit(NFS4CLNT_SESSION_SETUP, &session->clp->cl_state)) {
464 		if (tbl->highest_used_slotid != -1) {
465 			rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
466 			spin_unlock(&tbl->slot_tbl_lock);
467 			dprintk("<-- %s: Session reset: draining\n", __func__);
468 			return -EAGAIN;
469 		}
470 
471 		/* The slot table is empty; start the reset thread */
472 		dprintk("%s Session Reset\n", __func__);
473 		spin_unlock(&tbl->slot_tbl_lock);
474 		status = nfs4_recover_session(session);
475 		if (status)
476 			return status;
477 		spin_lock(&tbl->slot_tbl_lock);
478 	}
479 
480 	slotid = nfs4_find_slot(tbl, task);
481 	if (slotid == NFS4_MAX_SLOT_TABLE) {
482 		rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
483 		spin_unlock(&tbl->slot_tbl_lock);
484 		dprintk("<-- %s: no free slots\n", __func__);
485 		return -EAGAIN;
486 	}
487 	spin_unlock(&tbl->slot_tbl_lock);
488 
489 	slot = tbl->slots + slotid;
490 	args->sa_session = session;
491 	args->sa_slotid = slotid;
492 	args->sa_cache_this = cache_reply;
493 
494 	dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr);
495 
496 	res->sr_session = session;
497 	res->sr_slotid = slotid;
498 	res->sr_renewal_time = jiffies;
499 	/*
500 	 * sr_status is only set in decode_sequence, and so will remain
501 	 * set to 1 if an rpc level failure occurs.
502 	 */
503 	res->sr_status = 1;
504 	return 0;
505 }
506 
507 int nfs4_setup_sequence(struct nfs_client *clp,
508 			struct nfs4_sequence_args *args,
509 			struct nfs4_sequence_res *res,
510 			int cache_reply,
511 			struct rpc_task *task)
512 {
513 	int ret = 0;
514 
515 	dprintk("--> %s clp %p session %p sr_slotid %d\n",
516 		__func__, clp, clp->cl_session, res->sr_slotid);
517 
518 	if (!nfs4_has_session(clp))
519 		goto out;
520 	ret = nfs41_setup_sequence(clp->cl_session, args, res, cache_reply,
521 				   task);
522 	if (ret != -EAGAIN) {
523 		/* terminate rpc task */
524 		task->tk_status = ret;
525 		task->tk_action = NULL;
526 	}
527 out:
528 	dprintk("<-- %s status=%d\n", __func__, ret);
529 	return ret;
530 }
531 
532 struct nfs41_call_sync_data {
533 	struct nfs_client *clp;
534 	struct nfs4_sequence_args *seq_args;
535 	struct nfs4_sequence_res *seq_res;
536 	int cache_reply;
537 };
538 
539 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
540 {
541 	struct nfs41_call_sync_data *data = calldata;
542 
543 	dprintk("--> %s data->clp->cl_session %p\n", __func__,
544 		data->clp->cl_session);
545 	if (nfs4_setup_sequence(data->clp, data->seq_args,
546 				data->seq_res, data->cache_reply, task))
547 		return;
548 	rpc_call_start(task);
549 }
550 
551 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
552 {
553 	struct nfs41_call_sync_data *data = calldata;
554 
555 	nfs41_sequence_done(data->clp, data->seq_res, task->tk_status);
556 	nfs41_sequence_free_slot(data->clp, data->seq_res);
557 }
558 
559 struct rpc_call_ops nfs41_call_sync_ops = {
560 	.rpc_call_prepare = nfs41_call_sync_prepare,
561 	.rpc_call_done = nfs41_call_sync_done,
562 };
563 
564 static int nfs4_call_sync_sequence(struct nfs_client *clp,
565 				   struct rpc_clnt *clnt,
566 				   struct rpc_message *msg,
567 				   struct nfs4_sequence_args *args,
568 				   struct nfs4_sequence_res *res,
569 				   int cache_reply)
570 {
571 	int ret;
572 	struct rpc_task *task;
573 	struct nfs41_call_sync_data data = {
574 		.clp = clp,
575 		.seq_args = args,
576 		.seq_res = res,
577 		.cache_reply = cache_reply,
578 	};
579 	struct rpc_task_setup task_setup = {
580 		.rpc_client = clnt,
581 		.rpc_message = msg,
582 		.callback_ops = &nfs41_call_sync_ops,
583 		.callback_data = &data
584 	};
585 
586 	res->sr_slotid = NFS4_MAX_SLOT_TABLE;
587 	task = rpc_run_task(&task_setup);
588 	if (IS_ERR(task))
589 		ret = PTR_ERR(task);
590 	else {
591 		ret = task->tk_status;
592 		rpc_put_task(task);
593 	}
594 	return ret;
595 }
596 
597 int _nfs4_call_sync_session(struct nfs_server *server,
598 			    struct rpc_message *msg,
599 			    struct nfs4_sequence_args *args,
600 			    struct nfs4_sequence_res *res,
601 			    int cache_reply)
602 {
603 	return nfs4_call_sync_sequence(server->nfs_client, server->client,
604 				       msg, args, res, cache_reply);
605 }
606 
607 #endif /* CONFIG_NFS_V4_1 */
608 
609 int _nfs4_call_sync(struct nfs_server *server,
610 		    struct rpc_message *msg,
611 		    struct nfs4_sequence_args *args,
612 		    struct nfs4_sequence_res *res,
613 		    int cache_reply)
614 {
615 	args->sa_session = res->sr_session = NULL;
616 	return rpc_call_sync(server->client, msg, 0);
617 }
618 
619 #define nfs4_call_sync(server, msg, args, res, cache_reply) \
620 	(server)->nfs_client->cl_call_sync((server), (msg), &(args)->seq_args, \
621 			&(res)->seq_res, (cache_reply))
622 
623 static void nfs4_sequence_done(const struct nfs_server *server,
624 			       struct nfs4_sequence_res *res, int rpc_status)
625 {
626 #ifdef CONFIG_NFS_V4_1
627 	if (nfs4_has_session(server->nfs_client))
628 		nfs41_sequence_done(server->nfs_client, res, rpc_status);
629 #endif /* CONFIG_NFS_V4_1 */
630 }
631 
632 /* no restart, therefore free slot here */
633 static void nfs4_sequence_done_free_slot(const struct nfs_server *server,
634 					 struct nfs4_sequence_res *res,
635 					 int rpc_status)
636 {
637 	nfs4_sequence_done(server, res, rpc_status);
638 	nfs4_sequence_free_slot(server->nfs_client, res);
639 }
640 
641 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
642 {
643 	struct nfs_inode *nfsi = NFS_I(dir);
644 
645 	spin_lock(&dir->i_lock);
646 	nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
647 	if (!cinfo->atomic || cinfo->before != nfsi->change_attr)
648 		nfs_force_lookup_revalidate(dir);
649 	nfsi->change_attr = cinfo->after;
650 	spin_unlock(&dir->i_lock);
651 }
652 
653 struct nfs4_opendata {
654 	struct kref kref;
655 	struct nfs_openargs o_arg;
656 	struct nfs_openres o_res;
657 	struct nfs_open_confirmargs c_arg;
658 	struct nfs_open_confirmres c_res;
659 	struct nfs_fattr f_attr;
660 	struct nfs_fattr dir_attr;
661 	struct path path;
662 	struct dentry *dir;
663 	struct nfs4_state_owner *owner;
664 	struct nfs4_state *state;
665 	struct iattr attrs;
666 	unsigned long timestamp;
667 	unsigned int rpc_done : 1;
668 	int rpc_status;
669 	int cancelled;
670 };
671 
672 
673 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
674 {
675 	p->o_res.f_attr = &p->f_attr;
676 	p->o_res.dir_attr = &p->dir_attr;
677 	p->o_res.seqid = p->o_arg.seqid;
678 	p->c_res.seqid = p->c_arg.seqid;
679 	p->o_res.server = p->o_arg.server;
680 	nfs_fattr_init(&p->f_attr);
681 	nfs_fattr_init(&p->dir_attr);
682 	p->o_res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
683 }
684 
685 static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path,
686 		struct nfs4_state_owner *sp, fmode_t fmode, int flags,
687 		const struct iattr *attrs)
688 {
689 	struct dentry *parent = dget_parent(path->dentry);
690 	struct inode *dir = parent->d_inode;
691 	struct nfs_server *server = NFS_SERVER(dir);
692 	struct nfs4_opendata *p;
693 
694 	p = kzalloc(sizeof(*p), GFP_KERNEL);
695 	if (p == NULL)
696 		goto err;
697 	p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid);
698 	if (p->o_arg.seqid == NULL)
699 		goto err_free;
700 	p->path.mnt = mntget(path->mnt);
701 	p->path.dentry = dget(path->dentry);
702 	p->dir = parent;
703 	p->owner = sp;
704 	atomic_inc(&sp->so_count);
705 	p->o_arg.fh = NFS_FH(dir);
706 	p->o_arg.open_flags = flags;
707 	p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
708 	p->o_arg.clientid = server->nfs_client->cl_clientid;
709 	p->o_arg.id = sp->so_owner_id.id;
710 	p->o_arg.name = &p->path.dentry->d_name;
711 	p->o_arg.server = server;
712 	p->o_arg.bitmask = server->attr_bitmask;
713 	p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
714 	if (flags & O_EXCL) {
715 		u32 *s = (u32 *) p->o_arg.u.verifier.data;
716 		s[0] = jiffies;
717 		s[1] = current->pid;
718 	} else if (flags & O_CREAT) {
719 		p->o_arg.u.attrs = &p->attrs;
720 		memcpy(&p->attrs, attrs, sizeof(p->attrs));
721 	}
722 	p->c_arg.fh = &p->o_res.fh;
723 	p->c_arg.stateid = &p->o_res.stateid;
724 	p->c_arg.seqid = p->o_arg.seqid;
725 	nfs4_init_opendata_res(p);
726 	kref_init(&p->kref);
727 	return p;
728 err_free:
729 	kfree(p);
730 err:
731 	dput(parent);
732 	return NULL;
733 }
734 
735 static void nfs4_opendata_free(struct kref *kref)
736 {
737 	struct nfs4_opendata *p = container_of(kref,
738 			struct nfs4_opendata, kref);
739 
740 	nfs_free_seqid(p->o_arg.seqid);
741 	if (p->state != NULL)
742 		nfs4_put_open_state(p->state);
743 	nfs4_put_state_owner(p->owner);
744 	dput(p->dir);
745 	path_put(&p->path);
746 	kfree(p);
747 }
748 
749 static void nfs4_opendata_put(struct nfs4_opendata *p)
750 {
751 	if (p != NULL)
752 		kref_put(&p->kref, nfs4_opendata_free);
753 }
754 
755 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
756 {
757 	int ret;
758 
759 	ret = rpc_wait_for_completion_task(task);
760 	return ret;
761 }
762 
763 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
764 {
765 	int ret = 0;
766 
767 	if (open_mode & O_EXCL)
768 		goto out;
769 	switch (mode & (FMODE_READ|FMODE_WRITE)) {
770 		case FMODE_READ:
771 			ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0;
772 			break;
773 		case FMODE_WRITE:
774 			ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0;
775 			break;
776 		case FMODE_READ|FMODE_WRITE:
777 			ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0;
778 	}
779 out:
780 	return ret;
781 }
782 
783 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
784 {
785 	if ((delegation->type & fmode) != fmode)
786 		return 0;
787 	if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
788 		return 0;
789 	nfs_mark_delegation_referenced(delegation);
790 	return 1;
791 }
792 
793 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
794 {
795 	switch (fmode) {
796 		case FMODE_WRITE:
797 			state->n_wronly++;
798 			break;
799 		case FMODE_READ:
800 			state->n_rdonly++;
801 			break;
802 		case FMODE_READ|FMODE_WRITE:
803 			state->n_rdwr++;
804 	}
805 	nfs4_state_set_mode_locked(state, state->state | fmode);
806 }
807 
808 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
809 {
810 	if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
811 		memcpy(state->stateid.data, stateid->data, sizeof(state->stateid.data));
812 	memcpy(state->open_stateid.data, stateid->data, sizeof(state->open_stateid.data));
813 	switch (fmode) {
814 		case FMODE_READ:
815 			set_bit(NFS_O_RDONLY_STATE, &state->flags);
816 			break;
817 		case FMODE_WRITE:
818 			set_bit(NFS_O_WRONLY_STATE, &state->flags);
819 			break;
820 		case FMODE_READ|FMODE_WRITE:
821 			set_bit(NFS_O_RDWR_STATE, &state->flags);
822 	}
823 }
824 
825 static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
826 {
827 	write_seqlock(&state->seqlock);
828 	nfs_set_open_stateid_locked(state, stateid, fmode);
829 	write_sequnlock(&state->seqlock);
830 }
831 
832 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
833 {
834 	/*
835 	 * Protect the call to nfs4_state_set_mode_locked and
836 	 * serialise the stateid update
837 	 */
838 	write_seqlock(&state->seqlock);
839 	if (deleg_stateid != NULL) {
840 		memcpy(state->stateid.data, deleg_stateid->data, sizeof(state->stateid.data));
841 		set_bit(NFS_DELEGATED_STATE, &state->flags);
842 	}
843 	if (open_stateid != NULL)
844 		nfs_set_open_stateid_locked(state, open_stateid, fmode);
845 	write_sequnlock(&state->seqlock);
846 	spin_lock(&state->owner->so_lock);
847 	update_open_stateflags(state, fmode);
848 	spin_unlock(&state->owner->so_lock);
849 }
850 
851 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
852 {
853 	struct nfs_inode *nfsi = NFS_I(state->inode);
854 	struct nfs_delegation *deleg_cur;
855 	int ret = 0;
856 
857 	fmode &= (FMODE_READ|FMODE_WRITE);
858 
859 	rcu_read_lock();
860 	deleg_cur = rcu_dereference(nfsi->delegation);
861 	if (deleg_cur == NULL)
862 		goto no_delegation;
863 
864 	spin_lock(&deleg_cur->lock);
865 	if (nfsi->delegation != deleg_cur ||
866 	    (deleg_cur->type & fmode) != fmode)
867 		goto no_delegation_unlock;
868 
869 	if (delegation == NULL)
870 		delegation = &deleg_cur->stateid;
871 	else if (memcmp(deleg_cur->stateid.data, delegation->data, NFS4_STATEID_SIZE) != 0)
872 		goto no_delegation_unlock;
873 
874 	nfs_mark_delegation_referenced(deleg_cur);
875 	__update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
876 	ret = 1;
877 no_delegation_unlock:
878 	spin_unlock(&deleg_cur->lock);
879 no_delegation:
880 	rcu_read_unlock();
881 
882 	if (!ret && open_stateid != NULL) {
883 		__update_open_stateid(state, open_stateid, NULL, fmode);
884 		ret = 1;
885 	}
886 
887 	return ret;
888 }
889 
890 
891 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
892 {
893 	struct nfs_delegation *delegation;
894 
895 	rcu_read_lock();
896 	delegation = rcu_dereference(NFS_I(inode)->delegation);
897 	if (delegation == NULL || (delegation->type & fmode) == fmode) {
898 		rcu_read_unlock();
899 		return;
900 	}
901 	rcu_read_unlock();
902 	nfs_inode_return_delegation(inode);
903 }
904 
905 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
906 {
907 	struct nfs4_state *state = opendata->state;
908 	struct nfs_inode *nfsi = NFS_I(state->inode);
909 	struct nfs_delegation *delegation;
910 	int open_mode = opendata->o_arg.open_flags & O_EXCL;
911 	fmode_t fmode = opendata->o_arg.fmode;
912 	nfs4_stateid stateid;
913 	int ret = -EAGAIN;
914 
915 	for (;;) {
916 		if (can_open_cached(state, fmode, open_mode)) {
917 			spin_lock(&state->owner->so_lock);
918 			if (can_open_cached(state, fmode, open_mode)) {
919 				update_open_stateflags(state, fmode);
920 				spin_unlock(&state->owner->so_lock);
921 				goto out_return_state;
922 			}
923 			spin_unlock(&state->owner->so_lock);
924 		}
925 		rcu_read_lock();
926 		delegation = rcu_dereference(nfsi->delegation);
927 		if (delegation == NULL ||
928 		    !can_open_delegated(delegation, fmode)) {
929 			rcu_read_unlock();
930 			break;
931 		}
932 		/* Save the delegation */
933 		memcpy(stateid.data, delegation->stateid.data, sizeof(stateid.data));
934 		rcu_read_unlock();
935 		ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
936 		if (ret != 0)
937 			goto out;
938 		ret = -EAGAIN;
939 
940 		/* Try to update the stateid using the delegation */
941 		if (update_open_stateid(state, NULL, &stateid, fmode))
942 			goto out_return_state;
943 	}
944 out:
945 	return ERR_PTR(ret);
946 out_return_state:
947 	atomic_inc(&state->count);
948 	return state;
949 }
950 
951 static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
952 {
953 	struct inode *inode;
954 	struct nfs4_state *state = NULL;
955 	struct nfs_delegation *delegation;
956 	int ret;
957 
958 	if (!data->rpc_done) {
959 		state = nfs4_try_open_cached(data);
960 		goto out;
961 	}
962 
963 	ret = -EAGAIN;
964 	if (!(data->f_attr.valid & NFS_ATTR_FATTR))
965 		goto err;
966 	inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr);
967 	ret = PTR_ERR(inode);
968 	if (IS_ERR(inode))
969 		goto err;
970 	ret = -ENOMEM;
971 	state = nfs4_get_open_state(inode, data->owner);
972 	if (state == NULL)
973 		goto err_put_inode;
974 	if (data->o_res.delegation_type != 0) {
975 		int delegation_flags = 0;
976 
977 		rcu_read_lock();
978 		delegation = rcu_dereference(NFS_I(inode)->delegation);
979 		if (delegation)
980 			delegation_flags = delegation->flags;
981 		rcu_read_unlock();
982 		if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
983 			nfs_inode_set_delegation(state->inode,
984 					data->owner->so_cred,
985 					&data->o_res);
986 		else
987 			nfs_inode_reclaim_delegation(state->inode,
988 					data->owner->so_cred,
989 					&data->o_res);
990 	}
991 
992 	update_open_stateid(state, &data->o_res.stateid, NULL,
993 			data->o_arg.fmode);
994 	iput(inode);
995 out:
996 	return state;
997 err_put_inode:
998 	iput(inode);
999 err:
1000 	return ERR_PTR(ret);
1001 }
1002 
1003 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1004 {
1005 	struct nfs_inode *nfsi = NFS_I(state->inode);
1006 	struct nfs_open_context *ctx;
1007 
1008 	spin_lock(&state->inode->i_lock);
1009 	list_for_each_entry(ctx, &nfsi->open_files, list) {
1010 		if (ctx->state != state)
1011 			continue;
1012 		get_nfs_open_context(ctx);
1013 		spin_unlock(&state->inode->i_lock);
1014 		return ctx;
1015 	}
1016 	spin_unlock(&state->inode->i_lock);
1017 	return ERR_PTR(-ENOENT);
1018 }
1019 
1020 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state)
1021 {
1022 	struct nfs4_opendata *opendata;
1023 
1024 	opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, 0, NULL);
1025 	if (opendata == NULL)
1026 		return ERR_PTR(-ENOMEM);
1027 	opendata->state = state;
1028 	atomic_inc(&state->count);
1029 	return opendata;
1030 }
1031 
1032 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
1033 {
1034 	struct nfs4_state *newstate;
1035 	int ret;
1036 
1037 	opendata->o_arg.open_flags = 0;
1038 	opendata->o_arg.fmode = fmode;
1039 	memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1040 	memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1041 	nfs4_init_opendata_res(opendata);
1042 	ret = _nfs4_proc_open(opendata);
1043 	if (ret != 0)
1044 		return ret;
1045 	newstate = nfs4_opendata_to_nfs4_state(opendata);
1046 	if (IS_ERR(newstate))
1047 		return PTR_ERR(newstate);
1048 	nfs4_close_state(&opendata->path, newstate, fmode);
1049 	*res = newstate;
1050 	return 0;
1051 }
1052 
1053 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1054 {
1055 	struct nfs4_state *newstate;
1056 	int ret;
1057 
1058 	/* memory barrier prior to reading state->n_* */
1059 	clear_bit(NFS_DELEGATED_STATE, &state->flags);
1060 	smp_rmb();
1061 	if (state->n_rdwr != 0) {
1062 		ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
1063 		if (ret != 0)
1064 			return ret;
1065 		if (newstate != state)
1066 			return -ESTALE;
1067 	}
1068 	if (state->n_wronly != 0) {
1069 		ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
1070 		if (ret != 0)
1071 			return ret;
1072 		if (newstate != state)
1073 			return -ESTALE;
1074 	}
1075 	if (state->n_rdonly != 0) {
1076 		ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
1077 		if (ret != 0)
1078 			return ret;
1079 		if (newstate != state)
1080 			return -ESTALE;
1081 	}
1082 	/*
1083 	 * We may have performed cached opens for all three recoveries.
1084 	 * Check if we need to update the current stateid.
1085 	 */
1086 	if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1087 	    memcmp(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data)) != 0) {
1088 		write_seqlock(&state->seqlock);
1089 		if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1090 			memcpy(state->stateid.data, state->open_stateid.data, sizeof(state->stateid.data));
1091 		write_sequnlock(&state->seqlock);
1092 	}
1093 	return 0;
1094 }
1095 
1096 /*
1097  * OPEN_RECLAIM:
1098  * 	reclaim state on the server after a reboot.
1099  */
1100 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1101 {
1102 	struct nfs_delegation *delegation;
1103 	struct nfs4_opendata *opendata;
1104 	fmode_t delegation_type = 0;
1105 	int status;
1106 
1107 	opendata = nfs4_open_recoverdata_alloc(ctx, state);
1108 	if (IS_ERR(opendata))
1109 		return PTR_ERR(opendata);
1110 	opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS;
1111 	opendata->o_arg.fh = NFS_FH(state->inode);
1112 	rcu_read_lock();
1113 	delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1114 	if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1115 		delegation_type = delegation->type;
1116 	rcu_read_unlock();
1117 	opendata->o_arg.u.delegation_type = delegation_type;
1118 	status = nfs4_open_recover(opendata, state);
1119 	nfs4_opendata_put(opendata);
1120 	return status;
1121 }
1122 
1123 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1124 {
1125 	struct nfs_server *server = NFS_SERVER(state->inode);
1126 	struct nfs4_exception exception = { };
1127 	int err;
1128 	do {
1129 		err = _nfs4_do_open_reclaim(ctx, state);
1130 		if (err != -NFS4ERR_DELAY)
1131 			break;
1132 		nfs4_handle_exception(server, err, &exception);
1133 	} while (exception.retry);
1134 	return err;
1135 }
1136 
1137 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1138 {
1139 	struct nfs_open_context *ctx;
1140 	int ret;
1141 
1142 	ctx = nfs4_state_find_open_context(state);
1143 	if (IS_ERR(ctx))
1144 		return PTR_ERR(ctx);
1145 	ret = nfs4_do_open_reclaim(ctx, state);
1146 	put_nfs_open_context(ctx);
1147 	return ret;
1148 }
1149 
1150 static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1151 {
1152 	struct nfs4_opendata *opendata;
1153 	int ret;
1154 
1155 	opendata = nfs4_open_recoverdata_alloc(ctx, state);
1156 	if (IS_ERR(opendata))
1157 		return PTR_ERR(opendata);
1158 	opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR;
1159 	memcpy(opendata->o_arg.u.delegation.data, stateid->data,
1160 			sizeof(opendata->o_arg.u.delegation.data));
1161 	ret = nfs4_open_recover(opendata, state);
1162 	nfs4_opendata_put(opendata);
1163 	return ret;
1164 }
1165 
1166 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1167 {
1168 	struct nfs4_exception exception = { };
1169 	struct nfs_server *server = NFS_SERVER(state->inode);
1170 	int err;
1171 	do {
1172 		err = _nfs4_open_delegation_recall(ctx, state, stateid);
1173 		switch (err) {
1174 			case 0:
1175 			case -ENOENT:
1176 			case -ESTALE:
1177 				goto out;
1178 			case -NFS4ERR_STALE_CLIENTID:
1179 			case -NFS4ERR_STALE_STATEID:
1180 			case -NFS4ERR_EXPIRED:
1181 				/* Don't recall a delegation if it was lost */
1182 				nfs4_schedule_state_recovery(server->nfs_client);
1183 				goto out;
1184 			case -ERESTARTSYS:
1185 				/*
1186 				 * The show must go on: exit, but mark the
1187 				 * stateid as needing recovery.
1188 				 */
1189 			case -NFS4ERR_ADMIN_REVOKED:
1190 			case -NFS4ERR_BAD_STATEID:
1191 				nfs4_state_mark_reclaim_nograce(server->nfs_client, state);
1192 			case -ENOMEM:
1193 				err = 0;
1194 				goto out;
1195 		}
1196 		err = nfs4_handle_exception(server, err, &exception);
1197 	} while (exception.retry);
1198 out:
1199 	return err;
1200 }
1201 
1202 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1203 {
1204 	struct nfs4_opendata *data = calldata;
1205 
1206 	data->rpc_status = task->tk_status;
1207 	if (RPC_ASSASSINATED(task))
1208 		return;
1209 	if (data->rpc_status == 0) {
1210 		memcpy(data->o_res.stateid.data, data->c_res.stateid.data,
1211 				sizeof(data->o_res.stateid.data));
1212 		nfs_confirm_seqid(&data->owner->so_seqid, 0);
1213 		renew_lease(data->o_res.server, data->timestamp);
1214 		data->rpc_done = 1;
1215 	}
1216 }
1217 
1218 static void nfs4_open_confirm_release(void *calldata)
1219 {
1220 	struct nfs4_opendata *data = calldata;
1221 	struct nfs4_state *state = NULL;
1222 
1223 	/* If this request hasn't been cancelled, do nothing */
1224 	if (data->cancelled == 0)
1225 		goto out_free;
1226 	/* In case of error, no cleanup! */
1227 	if (!data->rpc_done)
1228 		goto out_free;
1229 	state = nfs4_opendata_to_nfs4_state(data);
1230 	if (!IS_ERR(state))
1231 		nfs4_close_state(&data->path, state, data->o_arg.fmode);
1232 out_free:
1233 	nfs4_opendata_put(data);
1234 }
1235 
1236 static const struct rpc_call_ops nfs4_open_confirm_ops = {
1237 	.rpc_call_done = nfs4_open_confirm_done,
1238 	.rpc_release = nfs4_open_confirm_release,
1239 };
1240 
1241 /*
1242  * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1243  */
1244 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1245 {
1246 	struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
1247 	struct rpc_task *task;
1248 	struct  rpc_message msg = {
1249 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1250 		.rpc_argp = &data->c_arg,
1251 		.rpc_resp = &data->c_res,
1252 		.rpc_cred = data->owner->so_cred,
1253 	};
1254 	struct rpc_task_setup task_setup_data = {
1255 		.rpc_client = server->client,
1256 		.rpc_message = &msg,
1257 		.callback_ops = &nfs4_open_confirm_ops,
1258 		.callback_data = data,
1259 		.workqueue = nfsiod_workqueue,
1260 		.flags = RPC_TASK_ASYNC,
1261 	};
1262 	int status;
1263 
1264 	kref_get(&data->kref);
1265 	data->rpc_done = 0;
1266 	data->rpc_status = 0;
1267 	data->timestamp = jiffies;
1268 	task = rpc_run_task(&task_setup_data);
1269 	if (IS_ERR(task))
1270 		return PTR_ERR(task);
1271 	status = nfs4_wait_for_completion_rpc_task(task);
1272 	if (status != 0) {
1273 		data->cancelled = 1;
1274 		smp_wmb();
1275 	} else
1276 		status = data->rpc_status;
1277 	rpc_put_task(task);
1278 	return status;
1279 }
1280 
1281 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1282 {
1283 	struct nfs4_opendata *data = calldata;
1284 	struct nfs4_state_owner *sp = data->owner;
1285 
1286 	if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1287 		return;
1288 	/*
1289 	 * Check if we still need to send an OPEN call, or if we can use
1290 	 * a delegation instead.
1291 	 */
1292 	if (data->state != NULL) {
1293 		struct nfs_delegation *delegation;
1294 
1295 		if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1296 			goto out_no_action;
1297 		rcu_read_lock();
1298 		delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1299 		if (delegation != NULL &&
1300 		    test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0) {
1301 			rcu_read_unlock();
1302 			goto out_no_action;
1303 		}
1304 		rcu_read_unlock();
1305 	}
1306 	/* Update sequence id. */
1307 	data->o_arg.id = sp->so_owner_id.id;
1308 	data->o_arg.clientid = sp->so_client->cl_clientid;
1309 	if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
1310 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1311 		nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
1312 	}
1313 	data->timestamp = jiffies;
1314 	if (nfs4_setup_sequence(data->o_arg.server->nfs_client,
1315 				&data->o_arg.seq_args,
1316 				&data->o_res.seq_res, 1, task))
1317 		return;
1318 	rpc_call_start(task);
1319 	return;
1320 out_no_action:
1321 	task->tk_action = NULL;
1322 
1323 }
1324 
1325 static void nfs4_open_done(struct rpc_task *task, void *calldata)
1326 {
1327 	struct nfs4_opendata *data = calldata;
1328 
1329 	data->rpc_status = task->tk_status;
1330 
1331 	nfs4_sequence_done_free_slot(data->o_arg.server, &data->o_res.seq_res,
1332 				     task->tk_status);
1333 
1334 	if (RPC_ASSASSINATED(task))
1335 		return;
1336 	if (task->tk_status == 0) {
1337 		switch (data->o_res.f_attr->mode & S_IFMT) {
1338 			case S_IFREG:
1339 				break;
1340 			case S_IFLNK:
1341 				data->rpc_status = -ELOOP;
1342 				break;
1343 			case S_IFDIR:
1344 				data->rpc_status = -EISDIR;
1345 				break;
1346 			default:
1347 				data->rpc_status = -ENOTDIR;
1348 		}
1349 		renew_lease(data->o_res.server, data->timestamp);
1350 		if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
1351 			nfs_confirm_seqid(&data->owner->so_seqid, 0);
1352 	}
1353 	data->rpc_done = 1;
1354 }
1355 
1356 static void nfs4_open_release(void *calldata)
1357 {
1358 	struct nfs4_opendata *data = calldata;
1359 	struct nfs4_state *state = NULL;
1360 
1361 	/* If this request hasn't been cancelled, do nothing */
1362 	if (data->cancelled == 0)
1363 		goto out_free;
1364 	/* In case of error, no cleanup! */
1365 	if (data->rpc_status != 0 || !data->rpc_done)
1366 		goto out_free;
1367 	/* In case we need an open_confirm, no cleanup! */
1368 	if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
1369 		goto out_free;
1370 	state = nfs4_opendata_to_nfs4_state(data);
1371 	if (!IS_ERR(state))
1372 		nfs4_close_state(&data->path, state, data->o_arg.fmode);
1373 out_free:
1374 	nfs4_opendata_put(data);
1375 }
1376 
1377 static const struct rpc_call_ops nfs4_open_ops = {
1378 	.rpc_call_prepare = nfs4_open_prepare,
1379 	.rpc_call_done = nfs4_open_done,
1380 	.rpc_release = nfs4_open_release,
1381 };
1382 
1383 /*
1384  * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
1385  */
1386 static int _nfs4_proc_open(struct nfs4_opendata *data)
1387 {
1388 	struct inode *dir = data->dir->d_inode;
1389 	struct nfs_server *server = NFS_SERVER(dir);
1390 	struct nfs_openargs *o_arg = &data->o_arg;
1391 	struct nfs_openres *o_res = &data->o_res;
1392 	struct rpc_task *task;
1393 	struct rpc_message msg = {
1394 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
1395 		.rpc_argp = o_arg,
1396 		.rpc_resp = o_res,
1397 		.rpc_cred = data->owner->so_cred,
1398 	};
1399 	struct rpc_task_setup task_setup_data = {
1400 		.rpc_client = server->client,
1401 		.rpc_message = &msg,
1402 		.callback_ops = &nfs4_open_ops,
1403 		.callback_data = data,
1404 		.workqueue = nfsiod_workqueue,
1405 		.flags = RPC_TASK_ASYNC,
1406 	};
1407 	int status;
1408 
1409 	kref_get(&data->kref);
1410 	data->rpc_done = 0;
1411 	data->rpc_status = 0;
1412 	data->cancelled = 0;
1413 	task = rpc_run_task(&task_setup_data);
1414 	if (IS_ERR(task))
1415 		return PTR_ERR(task);
1416 	status = nfs4_wait_for_completion_rpc_task(task);
1417 	if (status != 0) {
1418 		data->cancelled = 1;
1419 		smp_wmb();
1420 	} else
1421 		status = data->rpc_status;
1422 	rpc_put_task(task);
1423 	if (status != 0 || !data->rpc_done)
1424 		return status;
1425 
1426 	if (o_res->fh.size == 0)
1427 		_nfs4_proc_lookup(dir, o_arg->name, &o_res->fh, o_res->f_attr);
1428 
1429 	if (o_arg->open_flags & O_CREAT) {
1430 		update_changeattr(dir, &o_res->cinfo);
1431 		nfs_post_op_update_inode(dir, o_res->dir_attr);
1432 	} else
1433 		nfs_refresh_inode(dir, o_res->dir_attr);
1434 	if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1435 		status = _nfs4_proc_open_confirm(data);
1436 		if (status != 0)
1437 			return status;
1438 	}
1439 	if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
1440 		_nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr);
1441 	return 0;
1442 }
1443 
1444 static int nfs4_recover_expired_lease(struct nfs_server *server)
1445 {
1446 	struct nfs_client *clp = server->nfs_client;
1447 	int ret;
1448 
1449 	for (;;) {
1450 		ret = nfs4_wait_clnt_recover(clp);
1451 		if (ret != 0)
1452 			return ret;
1453 		if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
1454 		    !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
1455 			break;
1456 		nfs4_schedule_state_recovery(clp);
1457 	}
1458 	return 0;
1459 }
1460 
1461 /*
1462  * OPEN_EXPIRED:
1463  * 	reclaim state on the server after a network partition.
1464  * 	Assumes caller holds the appropriate lock
1465  */
1466 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1467 {
1468 	struct nfs4_opendata *opendata;
1469 	int ret;
1470 
1471 	opendata = nfs4_open_recoverdata_alloc(ctx, state);
1472 	if (IS_ERR(opendata))
1473 		return PTR_ERR(opendata);
1474 	ret = nfs4_open_recover(opendata, state);
1475 	if (ret == -ESTALE)
1476 		d_drop(ctx->path.dentry);
1477 	nfs4_opendata_put(opendata);
1478 	return ret;
1479 }
1480 
1481 static inline int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1482 {
1483 	struct nfs_server *server = NFS_SERVER(state->inode);
1484 	struct nfs4_exception exception = { };
1485 	int err;
1486 
1487 	do {
1488 		err = _nfs4_open_expired(ctx, state);
1489 		if (err != -NFS4ERR_DELAY)
1490 			break;
1491 		nfs4_handle_exception(server, err, &exception);
1492 	} while (exception.retry);
1493 	return err;
1494 }
1495 
1496 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1497 {
1498 	struct nfs_open_context *ctx;
1499 	int ret;
1500 
1501 	ctx = nfs4_state_find_open_context(state);
1502 	if (IS_ERR(ctx))
1503 		return PTR_ERR(ctx);
1504 	ret = nfs4_do_open_expired(ctx, state);
1505 	put_nfs_open_context(ctx);
1506 	return ret;
1507 }
1508 
1509 /*
1510  * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
1511  * fields corresponding to attributes that were used to store the verifier.
1512  * Make sure we clobber those fields in the later setattr call
1513  */
1514 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
1515 {
1516 	if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
1517 	    !(sattr->ia_valid & ATTR_ATIME_SET))
1518 		sattr->ia_valid |= ATTR_ATIME;
1519 
1520 	if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
1521 	    !(sattr->ia_valid & ATTR_MTIME_SET))
1522 		sattr->ia_valid |= ATTR_MTIME;
1523 }
1524 
1525 /*
1526  * Returns a referenced nfs4_state
1527  */
1528 static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res)
1529 {
1530 	struct nfs4_state_owner  *sp;
1531 	struct nfs4_state     *state = NULL;
1532 	struct nfs_server       *server = NFS_SERVER(dir);
1533 	struct nfs4_opendata *opendata;
1534 	int status;
1535 
1536 	/* Protect against reboot recovery conflicts */
1537 	status = -ENOMEM;
1538 	if (!(sp = nfs4_get_state_owner(server, cred))) {
1539 		dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
1540 		goto out_err;
1541 	}
1542 	status = nfs4_recover_expired_lease(server);
1543 	if (status != 0)
1544 		goto err_put_state_owner;
1545 	if (path->dentry->d_inode != NULL)
1546 		nfs4_return_incompatible_delegation(path->dentry->d_inode, fmode);
1547 	status = -ENOMEM;
1548 	opendata = nfs4_opendata_alloc(path, sp, fmode, flags, sattr);
1549 	if (opendata == NULL)
1550 		goto err_put_state_owner;
1551 
1552 	if (path->dentry->d_inode != NULL)
1553 		opendata->state = nfs4_get_open_state(path->dentry->d_inode, sp);
1554 
1555 	status = _nfs4_proc_open(opendata);
1556 	if (status != 0)
1557 		goto err_opendata_put;
1558 
1559 	if (opendata->o_arg.open_flags & O_EXCL)
1560 		nfs4_exclusive_attrset(opendata, sattr);
1561 
1562 	state = nfs4_opendata_to_nfs4_state(opendata);
1563 	status = PTR_ERR(state);
1564 	if (IS_ERR(state))
1565 		goto err_opendata_put;
1566 	nfs4_opendata_put(opendata);
1567 	nfs4_put_state_owner(sp);
1568 	*res = state;
1569 	return 0;
1570 err_opendata_put:
1571 	nfs4_opendata_put(opendata);
1572 err_put_state_owner:
1573 	nfs4_put_state_owner(sp);
1574 out_err:
1575 	*res = NULL;
1576 	return status;
1577 }
1578 
1579 
1580 static struct nfs4_state *nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred)
1581 {
1582 	struct nfs4_exception exception = { };
1583 	struct nfs4_state *res;
1584 	int status;
1585 
1586 	do {
1587 		status = _nfs4_do_open(dir, path, fmode, flags, sattr, cred, &res);
1588 		if (status == 0)
1589 			break;
1590 		/* NOTE: BAD_SEQID means the server and client disagree about the
1591 		 * book-keeping w.r.t. state-changing operations
1592 		 * (OPEN/CLOSE/LOCK/LOCKU...)
1593 		 * It is actually a sign of a bug on the client or on the server.
1594 		 *
1595 		 * If we receive a BAD_SEQID error in the particular case of
1596 		 * doing an OPEN, we assume that nfs_increment_open_seqid() will
1597 		 * have unhashed the old state_owner for us, and that we can
1598 		 * therefore safely retry using a new one. We should still warn
1599 		 * the user though...
1600 		 */
1601 		if (status == -NFS4ERR_BAD_SEQID) {
1602 			printk(KERN_WARNING "NFS: v4 server %s "
1603 					" returned a bad sequence-id error!\n",
1604 					NFS_SERVER(dir)->nfs_client->cl_hostname);
1605 			exception.retry = 1;
1606 			continue;
1607 		}
1608 		/*
1609 		 * BAD_STATEID on OPEN means that the server cancelled our
1610 		 * state before it received the OPEN_CONFIRM.
1611 		 * Recover by retrying the request as per the discussion
1612 		 * on Page 181 of RFC3530.
1613 		 */
1614 		if (status == -NFS4ERR_BAD_STATEID) {
1615 			exception.retry = 1;
1616 			continue;
1617 		}
1618 		if (status == -EAGAIN) {
1619 			/* We must have found a delegation */
1620 			exception.retry = 1;
1621 			continue;
1622 		}
1623 		res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
1624 					status, &exception));
1625 	} while (exception.retry);
1626 	return res;
1627 }
1628 
1629 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
1630 			    struct nfs_fattr *fattr, struct iattr *sattr,
1631 			    struct nfs4_state *state)
1632 {
1633 	struct nfs_server *server = NFS_SERVER(inode);
1634         struct nfs_setattrargs  arg = {
1635                 .fh             = NFS_FH(inode),
1636                 .iap            = sattr,
1637 		.server		= server,
1638 		.bitmask = server->attr_bitmask,
1639         };
1640         struct nfs_setattrres  res = {
1641 		.fattr		= fattr,
1642 		.server		= server,
1643         };
1644         struct rpc_message msg = {
1645 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
1646 		.rpc_argp	= &arg,
1647 		.rpc_resp	= &res,
1648 		.rpc_cred	= cred,
1649         };
1650 	unsigned long timestamp = jiffies;
1651 	int status;
1652 
1653 	nfs_fattr_init(fattr);
1654 
1655 	if (nfs4_copy_delegation_stateid(&arg.stateid, inode)) {
1656 		/* Use that stateid */
1657 	} else if (state != NULL) {
1658 		nfs4_copy_stateid(&arg.stateid, state, current->files);
1659 	} else
1660 		memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));
1661 
1662 	status = nfs4_call_sync(server, &msg, &arg, &res, 1);
1663 	if (status == 0 && state != NULL)
1664 		renew_lease(server, timestamp);
1665 	return status;
1666 }
1667 
1668 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
1669 			   struct nfs_fattr *fattr, struct iattr *sattr,
1670 			   struct nfs4_state *state)
1671 {
1672 	struct nfs_server *server = NFS_SERVER(inode);
1673 	struct nfs4_exception exception = { };
1674 	int err;
1675 	do {
1676 		err = nfs4_handle_exception(server,
1677 				_nfs4_do_setattr(inode, cred, fattr, sattr, state),
1678 				&exception);
1679 	} while (exception.retry);
1680 	return err;
1681 }
1682 
1683 struct nfs4_closedata {
1684 	struct path path;
1685 	struct inode *inode;
1686 	struct nfs4_state *state;
1687 	struct nfs_closeargs arg;
1688 	struct nfs_closeres res;
1689 	struct nfs_fattr fattr;
1690 	unsigned long timestamp;
1691 };
1692 
1693 static void nfs4_free_closedata(void *data)
1694 {
1695 	struct nfs4_closedata *calldata = data;
1696 	struct nfs4_state_owner *sp = calldata->state->owner;
1697 
1698 	nfs4_put_open_state(calldata->state);
1699 	nfs_free_seqid(calldata->arg.seqid);
1700 	nfs4_put_state_owner(sp);
1701 	path_put(&calldata->path);
1702 	kfree(calldata);
1703 }
1704 
1705 static void nfs4_close_done(struct rpc_task *task, void *data)
1706 {
1707 	struct nfs4_closedata *calldata = data;
1708 	struct nfs4_state *state = calldata->state;
1709 	struct nfs_server *server = NFS_SERVER(calldata->inode);
1710 
1711 	nfs4_sequence_done(server, &calldata->res.seq_res, task->tk_status);
1712 	if (RPC_ASSASSINATED(task))
1713 		return;
1714         /* hmm. we are done with the inode, and in the process of freeing
1715 	 * the state_owner. we keep this around to process errors
1716 	 */
1717 	switch (task->tk_status) {
1718 		case 0:
1719 			nfs_set_open_stateid(state, &calldata->res.stateid, 0);
1720 			renew_lease(server, calldata->timestamp);
1721 			break;
1722 		case -NFS4ERR_STALE_STATEID:
1723 		case -NFS4ERR_OLD_STATEID:
1724 		case -NFS4ERR_BAD_STATEID:
1725 		case -NFS4ERR_EXPIRED:
1726 			if (calldata->arg.fmode == 0)
1727 				break;
1728 		default:
1729 			if (nfs4_async_handle_error(task, server, state) == -EAGAIN) {
1730 				nfs4_restart_rpc(task, server->nfs_client);
1731 				return;
1732 			}
1733 	}
1734 	nfs4_sequence_free_slot(server->nfs_client, &calldata->res.seq_res);
1735 	nfs_refresh_inode(calldata->inode, calldata->res.fattr);
1736 }
1737 
1738 static void nfs4_close_prepare(struct rpc_task *task, void *data)
1739 {
1740 	struct nfs4_closedata *calldata = data;
1741 	struct nfs4_state *state = calldata->state;
1742 	int clear_rd, clear_wr, clear_rdwr;
1743 
1744 	if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
1745 		return;
1746 
1747 	clear_rd = clear_wr = clear_rdwr = 0;
1748 	spin_lock(&state->owner->so_lock);
1749 	/* Calculate the change in open mode */
1750 	if (state->n_rdwr == 0) {
1751 		if (state->n_rdonly == 0) {
1752 			clear_rd |= test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1753 			clear_rdwr |= test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags);
1754 		}
1755 		if (state->n_wronly == 0) {
1756 			clear_wr |= test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1757 			clear_rdwr |= test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags);
1758 		}
1759 	}
1760 	spin_unlock(&state->owner->so_lock);
1761 	if (!clear_rd && !clear_wr && !clear_rdwr) {
1762 		/* Note: exit _without_ calling nfs4_close_done */
1763 		task->tk_action = NULL;
1764 		return;
1765 	}
1766 	nfs_fattr_init(calldata->res.fattr);
1767 	if (test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0) {
1768 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
1769 		calldata->arg.fmode = FMODE_READ;
1770 	} else if (test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0) {
1771 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
1772 		calldata->arg.fmode = FMODE_WRITE;
1773 	}
1774 	calldata->timestamp = jiffies;
1775 	if (nfs4_setup_sequence((NFS_SERVER(calldata->inode))->nfs_client,
1776 				&calldata->arg.seq_args, &calldata->res.seq_res,
1777 				1, task))
1778 		return;
1779 	rpc_call_start(task);
1780 }
1781 
1782 static const struct rpc_call_ops nfs4_close_ops = {
1783 	.rpc_call_prepare = nfs4_close_prepare,
1784 	.rpc_call_done = nfs4_close_done,
1785 	.rpc_release = nfs4_free_closedata,
1786 };
1787 
1788 /*
1789  * It is possible for data to be read/written from a mem-mapped file
1790  * after the sys_close call (which hits the vfs layer as a flush).
1791  * This means that we can't safely call nfsv4 close on a file until
1792  * the inode is cleared. This in turn means that we are not good
1793  * NFSv4 citizens - we do not indicate to the server to update the file's
1794  * share state even when we are done with one of the three share
1795  * stateid's in the inode.
1796  *
1797  * NOTE: Caller must be holding the sp->so_owner semaphore!
1798  */
1799 int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait)
1800 {
1801 	struct nfs_server *server = NFS_SERVER(state->inode);
1802 	struct nfs4_closedata *calldata;
1803 	struct nfs4_state_owner *sp = state->owner;
1804 	struct rpc_task *task;
1805 	struct rpc_message msg = {
1806 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
1807 		.rpc_cred = state->owner->so_cred,
1808 	};
1809 	struct rpc_task_setup task_setup_data = {
1810 		.rpc_client = server->client,
1811 		.rpc_message = &msg,
1812 		.callback_ops = &nfs4_close_ops,
1813 		.workqueue = nfsiod_workqueue,
1814 		.flags = RPC_TASK_ASYNC,
1815 	};
1816 	int status = -ENOMEM;
1817 
1818 	calldata = kzalloc(sizeof(*calldata), GFP_KERNEL);
1819 	if (calldata == NULL)
1820 		goto out;
1821 	calldata->inode = state->inode;
1822 	calldata->state = state;
1823 	calldata->arg.fh = NFS_FH(state->inode);
1824 	calldata->arg.stateid = &state->open_stateid;
1825 	if (nfs4_has_session(server->nfs_client))
1826 		memset(calldata->arg.stateid->data, 0, 4);    /* clear seqid */
1827 	/* Serialization for the sequence id */
1828 	calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid);
1829 	if (calldata->arg.seqid == NULL)
1830 		goto out_free_calldata;
1831 	calldata->arg.fmode = 0;
1832 	calldata->arg.bitmask = server->cache_consistency_bitmask;
1833 	calldata->res.fattr = &calldata->fattr;
1834 	calldata->res.seqid = calldata->arg.seqid;
1835 	calldata->res.server = server;
1836 	calldata->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
1837 	calldata->path.mnt = mntget(path->mnt);
1838 	calldata->path.dentry = dget(path->dentry);
1839 
1840 	msg.rpc_argp = &calldata->arg,
1841 	msg.rpc_resp = &calldata->res,
1842 	task_setup_data.callback_data = calldata;
1843 	task = rpc_run_task(&task_setup_data);
1844 	if (IS_ERR(task))
1845 		return PTR_ERR(task);
1846 	status = 0;
1847 	if (wait)
1848 		status = rpc_wait_for_completion_task(task);
1849 	rpc_put_task(task);
1850 	return status;
1851 out_free_calldata:
1852 	kfree(calldata);
1853 out:
1854 	nfs4_put_open_state(state);
1855 	nfs4_put_state_owner(sp);
1856 	return status;
1857 }
1858 
1859 static int nfs4_intent_set_file(struct nameidata *nd, struct path *path, struct nfs4_state *state, fmode_t fmode)
1860 {
1861 	struct file *filp;
1862 	int ret;
1863 
1864 	/* If the open_intent is for execute, we have an extra check to make */
1865 	if (fmode & FMODE_EXEC) {
1866 		ret = nfs_may_open(state->inode,
1867 				state->owner->so_cred,
1868 				nd->intent.open.flags);
1869 		if (ret < 0)
1870 			goto out_close;
1871 	}
1872 	filp = lookup_instantiate_filp(nd, path->dentry, NULL);
1873 	if (!IS_ERR(filp)) {
1874 		struct nfs_open_context *ctx;
1875 		ctx = nfs_file_open_context(filp);
1876 		ctx->state = state;
1877 		return 0;
1878 	}
1879 	ret = PTR_ERR(filp);
1880 out_close:
1881 	nfs4_close_sync(path, state, fmode & (FMODE_READ|FMODE_WRITE));
1882 	return ret;
1883 }
1884 
1885 struct dentry *
1886 nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
1887 {
1888 	struct path path = {
1889 		.mnt = nd->path.mnt,
1890 		.dentry = dentry,
1891 	};
1892 	struct dentry *parent;
1893 	struct iattr attr;
1894 	struct rpc_cred *cred;
1895 	struct nfs4_state *state;
1896 	struct dentry *res;
1897 	fmode_t fmode = nd->intent.open.flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
1898 
1899 	if (nd->flags & LOOKUP_CREATE) {
1900 		attr.ia_mode = nd->intent.open.create_mode;
1901 		attr.ia_valid = ATTR_MODE;
1902 		if (!IS_POSIXACL(dir))
1903 			attr.ia_mode &= ~current_umask();
1904 	} else {
1905 		attr.ia_valid = 0;
1906 		BUG_ON(nd->intent.open.flags & O_CREAT);
1907 	}
1908 
1909 	cred = rpc_lookup_cred();
1910 	if (IS_ERR(cred))
1911 		return (struct dentry *)cred;
1912 	parent = dentry->d_parent;
1913 	/* Protect against concurrent sillydeletes */
1914 	nfs_block_sillyrename(parent);
1915 	state = nfs4_do_open(dir, &path, fmode, nd->intent.open.flags, &attr, cred);
1916 	put_rpccred(cred);
1917 	if (IS_ERR(state)) {
1918 		if (PTR_ERR(state) == -ENOENT) {
1919 			d_add(dentry, NULL);
1920 			nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
1921 		}
1922 		nfs_unblock_sillyrename(parent);
1923 		return (struct dentry *)state;
1924 	}
1925 	res = d_add_unique(dentry, igrab(state->inode));
1926 	if (res != NULL)
1927 		path.dentry = res;
1928 	nfs_set_verifier(path.dentry, nfs_save_change_attribute(dir));
1929 	nfs_unblock_sillyrename(parent);
1930 	nfs4_intent_set_file(nd, &path, state, fmode);
1931 	return res;
1932 }
1933 
1934 int
1935 nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd)
1936 {
1937 	struct path path = {
1938 		.mnt = nd->path.mnt,
1939 		.dentry = dentry,
1940 	};
1941 	struct rpc_cred *cred;
1942 	struct nfs4_state *state;
1943 	fmode_t fmode = openflags & (FMODE_READ | FMODE_WRITE);
1944 
1945 	cred = rpc_lookup_cred();
1946 	if (IS_ERR(cred))
1947 		return PTR_ERR(cred);
1948 	state = nfs4_do_open(dir, &path, fmode, openflags, NULL, cred);
1949 	put_rpccred(cred);
1950 	if (IS_ERR(state)) {
1951 		switch (PTR_ERR(state)) {
1952 			case -EPERM:
1953 			case -EACCES:
1954 			case -EDQUOT:
1955 			case -ENOSPC:
1956 			case -EROFS:
1957 				lookup_instantiate_filp(nd, (struct dentry *)state, NULL);
1958 				return 1;
1959 			default:
1960 				goto out_drop;
1961 		}
1962 	}
1963 	if (state->inode == dentry->d_inode) {
1964 		nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
1965 		nfs4_intent_set_file(nd, &path, state, fmode);
1966 		return 1;
1967 	}
1968 	nfs4_close_sync(&path, state, fmode);
1969 out_drop:
1970 	d_drop(dentry);
1971 	return 0;
1972 }
1973 
1974 void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
1975 {
1976 	if (ctx->state == NULL)
1977 		return;
1978 	if (is_sync)
1979 		nfs4_close_sync(&ctx->path, ctx->state, ctx->mode);
1980 	else
1981 		nfs4_close_state(&ctx->path, ctx->state, ctx->mode);
1982 }
1983 
1984 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
1985 {
1986 	struct nfs4_server_caps_arg args = {
1987 		.fhandle = fhandle,
1988 	};
1989 	struct nfs4_server_caps_res res = {};
1990 	struct rpc_message msg = {
1991 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
1992 		.rpc_argp = &args,
1993 		.rpc_resp = &res,
1994 	};
1995 	int status;
1996 
1997 	status = nfs4_call_sync(server, &msg, &args, &res, 0);
1998 	if (status == 0) {
1999 		memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
2000 		if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
2001 			server->caps |= NFS_CAP_ACLS;
2002 		if (res.has_links != 0)
2003 			server->caps |= NFS_CAP_HARDLINKS;
2004 		if (res.has_symlinks != 0)
2005 			server->caps |= NFS_CAP_SYMLINKS;
2006 		memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
2007 		server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
2008 		server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
2009 		server->acl_bitmask = res.acl_bitmask;
2010 	}
2011 
2012 	return status;
2013 }
2014 
2015 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2016 {
2017 	struct nfs4_exception exception = { };
2018 	int err;
2019 	do {
2020 		err = nfs4_handle_exception(server,
2021 				_nfs4_server_capabilities(server, fhandle),
2022 				&exception);
2023 	} while (exception.retry);
2024 	return err;
2025 }
2026 
2027 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2028 		struct nfs_fsinfo *info)
2029 {
2030 	struct nfs4_lookup_root_arg args = {
2031 		.bitmask = nfs4_fattr_bitmap,
2032 	};
2033 	struct nfs4_lookup_res res = {
2034 		.server = server,
2035 		.fattr = info->fattr,
2036 		.fh = fhandle,
2037 	};
2038 	struct rpc_message msg = {
2039 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
2040 		.rpc_argp = &args,
2041 		.rpc_resp = &res,
2042 	};
2043 	int status;
2044 
2045 	nfs_fattr_init(info->fattr);
2046 	status = nfs4_recover_expired_lease(server);
2047 	if (!status)
2048 		status = nfs4_check_client_ready(server->nfs_client);
2049 	if (!status)
2050 		status = nfs4_call_sync(server, &msg, &args, &res, 0);
2051 	return status;
2052 }
2053 
2054 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2055 		struct nfs_fsinfo *info)
2056 {
2057 	struct nfs4_exception exception = { };
2058 	int err;
2059 	do {
2060 		err = nfs4_handle_exception(server,
2061 				_nfs4_lookup_root(server, fhandle, info),
2062 				&exception);
2063 	} while (exception.retry);
2064 	return err;
2065 }
2066 
2067 /*
2068  * get the file handle for the "/" directory on the server
2069  */
2070 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
2071 			      struct nfs_fsinfo *info)
2072 {
2073 	int status;
2074 
2075 	status = nfs4_lookup_root(server, fhandle, info);
2076 	if (status == 0)
2077 		status = nfs4_server_capabilities(server, fhandle);
2078 	if (status == 0)
2079 		status = nfs4_do_fsinfo(server, fhandle, info);
2080 	return nfs4_map_errors(status);
2081 }
2082 
2083 /*
2084  * Get locations and (maybe) other attributes of a referral.
2085  * Note that we'll actually follow the referral later when
2086  * we detect fsid mismatch in inode revalidation
2087  */
2088 static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct nfs_fattr *fattr, struct nfs_fh *fhandle)
2089 {
2090 	int status = -ENOMEM;
2091 	struct page *page = NULL;
2092 	struct nfs4_fs_locations *locations = NULL;
2093 
2094 	page = alloc_page(GFP_KERNEL);
2095 	if (page == NULL)
2096 		goto out;
2097 	locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
2098 	if (locations == NULL)
2099 		goto out;
2100 
2101 	status = nfs4_proc_fs_locations(dir, name, locations, page);
2102 	if (status != 0)
2103 		goto out;
2104 	/* Make sure server returned a different fsid for the referral */
2105 	if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
2106 		dprintk("%s: server did not return a different fsid for a referral at %s\n", __func__, name->name);
2107 		status = -EIO;
2108 		goto out;
2109 	}
2110 
2111 	memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
2112 	fattr->valid |= NFS_ATTR_FATTR_V4_REFERRAL;
2113 	if (!fattr->mode)
2114 		fattr->mode = S_IFDIR;
2115 	memset(fhandle, 0, sizeof(struct nfs_fh));
2116 out:
2117 	if (page)
2118 		__free_page(page);
2119 	if (locations)
2120 		kfree(locations);
2121 	return status;
2122 }
2123 
2124 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2125 {
2126 	struct nfs4_getattr_arg args = {
2127 		.fh = fhandle,
2128 		.bitmask = server->attr_bitmask,
2129 	};
2130 	struct nfs4_getattr_res res = {
2131 		.fattr = fattr,
2132 		.server = server,
2133 	};
2134 	struct rpc_message msg = {
2135 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
2136 		.rpc_argp = &args,
2137 		.rpc_resp = &res,
2138 	};
2139 
2140 	nfs_fattr_init(fattr);
2141 	return nfs4_call_sync(server, &msg, &args, &res, 0);
2142 }
2143 
2144 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2145 {
2146 	struct nfs4_exception exception = { };
2147 	int err;
2148 	do {
2149 		err = nfs4_handle_exception(server,
2150 				_nfs4_proc_getattr(server, fhandle, fattr),
2151 				&exception);
2152 	} while (exception.retry);
2153 	return err;
2154 }
2155 
2156 /*
2157  * The file is not closed if it is opened due to the a request to change
2158  * the size of the file. The open call will not be needed once the
2159  * VFS layer lookup-intents are implemented.
2160  *
2161  * Close is called when the inode is destroyed.
2162  * If we haven't opened the file for O_WRONLY, we
2163  * need to in the size_change case to obtain a stateid.
2164  *
2165  * Got race?
2166  * Because OPEN is always done by name in nfsv4, it is
2167  * possible that we opened a different file by the same
2168  * name.  We can recognize this race condition, but we
2169  * can't do anything about it besides returning an error.
2170  *
2171  * This will be fixed with VFS changes (lookup-intent).
2172  */
2173 static int
2174 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
2175 		  struct iattr *sattr)
2176 {
2177 	struct inode *inode = dentry->d_inode;
2178 	struct rpc_cred *cred = NULL;
2179 	struct nfs4_state *state = NULL;
2180 	int status;
2181 
2182 	nfs_fattr_init(fattr);
2183 
2184 	/* Search for an existing open(O_WRITE) file */
2185 	if (sattr->ia_valid & ATTR_FILE) {
2186 		struct nfs_open_context *ctx;
2187 
2188 		ctx = nfs_file_open_context(sattr->ia_file);
2189 		if (ctx) {
2190 			cred = ctx->cred;
2191 			state = ctx->state;
2192 		}
2193 	}
2194 
2195 	status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
2196 	if (status == 0)
2197 		nfs_setattr_update_inode(inode, sattr);
2198 	return status;
2199 }
2200 
2201 static int _nfs4_proc_lookupfh(struct nfs_server *server, const struct nfs_fh *dirfh,
2202 		const struct qstr *name, struct nfs_fh *fhandle,
2203 		struct nfs_fattr *fattr)
2204 {
2205 	int		       status;
2206 	struct nfs4_lookup_arg args = {
2207 		.bitmask = server->attr_bitmask,
2208 		.dir_fh = dirfh,
2209 		.name = name,
2210 	};
2211 	struct nfs4_lookup_res res = {
2212 		.server = server,
2213 		.fattr = fattr,
2214 		.fh = fhandle,
2215 	};
2216 	struct rpc_message msg = {
2217 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
2218 		.rpc_argp = &args,
2219 		.rpc_resp = &res,
2220 	};
2221 
2222 	nfs_fattr_init(fattr);
2223 
2224 	dprintk("NFS call  lookupfh %s\n", name->name);
2225 	status = nfs4_call_sync(server, &msg, &args, &res, 0);
2226 	dprintk("NFS reply lookupfh: %d\n", status);
2227 	return status;
2228 }
2229 
2230 static int nfs4_proc_lookupfh(struct nfs_server *server, struct nfs_fh *dirfh,
2231 			      struct qstr *name, struct nfs_fh *fhandle,
2232 			      struct nfs_fattr *fattr)
2233 {
2234 	struct nfs4_exception exception = { };
2235 	int err;
2236 	do {
2237 		err = _nfs4_proc_lookupfh(server, dirfh, name, fhandle, fattr);
2238 		/* FIXME: !!!! */
2239 		if (err == -NFS4ERR_MOVED) {
2240 			err = -EREMOTE;
2241 			break;
2242 		}
2243 		err = nfs4_handle_exception(server, err, &exception);
2244 	} while (exception.retry);
2245 	return err;
2246 }
2247 
2248 static int _nfs4_proc_lookup(struct inode *dir, const struct qstr *name,
2249 		struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2250 {
2251 	int status;
2252 
2253 	dprintk("NFS call  lookup %s\n", name->name);
2254 	status = _nfs4_proc_lookupfh(NFS_SERVER(dir), NFS_FH(dir), name, fhandle, fattr);
2255 	if (status == -NFS4ERR_MOVED)
2256 		status = nfs4_get_referral(dir, name, fattr, fhandle);
2257 	dprintk("NFS reply lookup: %d\n", status);
2258 	return status;
2259 }
2260 
2261 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2262 {
2263 	struct nfs4_exception exception = { };
2264 	int err;
2265 	do {
2266 		err = nfs4_handle_exception(NFS_SERVER(dir),
2267 				_nfs4_proc_lookup(dir, name, fhandle, fattr),
2268 				&exception);
2269 	} while (exception.retry);
2270 	return err;
2271 }
2272 
2273 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2274 {
2275 	struct nfs_server *server = NFS_SERVER(inode);
2276 	struct nfs_fattr fattr;
2277 	struct nfs4_accessargs args = {
2278 		.fh = NFS_FH(inode),
2279 		.bitmask = server->attr_bitmask,
2280 	};
2281 	struct nfs4_accessres res = {
2282 		.server = server,
2283 		.fattr = &fattr,
2284 	};
2285 	struct rpc_message msg = {
2286 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
2287 		.rpc_argp = &args,
2288 		.rpc_resp = &res,
2289 		.rpc_cred = entry->cred,
2290 	};
2291 	int mode = entry->mask;
2292 	int status;
2293 
2294 	/*
2295 	 * Determine which access bits we want to ask for...
2296 	 */
2297 	if (mode & MAY_READ)
2298 		args.access |= NFS4_ACCESS_READ;
2299 	if (S_ISDIR(inode->i_mode)) {
2300 		if (mode & MAY_WRITE)
2301 			args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
2302 		if (mode & MAY_EXEC)
2303 			args.access |= NFS4_ACCESS_LOOKUP;
2304 	} else {
2305 		if (mode & MAY_WRITE)
2306 			args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
2307 		if (mode & MAY_EXEC)
2308 			args.access |= NFS4_ACCESS_EXECUTE;
2309 	}
2310 	nfs_fattr_init(&fattr);
2311 	status = nfs4_call_sync(server, &msg, &args, &res, 0);
2312 	if (!status) {
2313 		entry->mask = 0;
2314 		if (res.access & NFS4_ACCESS_READ)
2315 			entry->mask |= MAY_READ;
2316 		if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE))
2317 			entry->mask |= MAY_WRITE;
2318 		if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE))
2319 			entry->mask |= MAY_EXEC;
2320 		nfs_refresh_inode(inode, &fattr);
2321 	}
2322 	return status;
2323 }
2324 
2325 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2326 {
2327 	struct nfs4_exception exception = { };
2328 	int err;
2329 	do {
2330 		err = nfs4_handle_exception(NFS_SERVER(inode),
2331 				_nfs4_proc_access(inode, entry),
2332 				&exception);
2333 	} while (exception.retry);
2334 	return err;
2335 }
2336 
2337 /*
2338  * TODO: For the time being, we don't try to get any attributes
2339  * along with any of the zero-copy operations READ, READDIR,
2340  * READLINK, WRITE.
2341  *
2342  * In the case of the first three, we want to put the GETATTR
2343  * after the read-type operation -- this is because it is hard
2344  * to predict the length of a GETATTR response in v4, and thus
2345  * align the READ data correctly.  This means that the GETATTR
2346  * may end up partially falling into the page cache, and we should
2347  * shift it into the 'tail' of the xdr_buf before processing.
2348  * To do this efficiently, we need to know the total length
2349  * of data received, which doesn't seem to be available outside
2350  * of the RPC layer.
2351  *
2352  * In the case of WRITE, we also want to put the GETATTR after
2353  * the operation -- in this case because we want to make sure
2354  * we get the post-operation mtime and size.  This means that
2355  * we can't use xdr_encode_pages() as written: we need a variant
2356  * of it which would leave room in the 'tail' iovec.
2357  *
2358  * Both of these changes to the XDR layer would in fact be quite
2359  * minor, but I decided to leave them for a subsequent patch.
2360  */
2361 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
2362 		unsigned int pgbase, unsigned int pglen)
2363 {
2364 	struct nfs4_readlink args = {
2365 		.fh       = NFS_FH(inode),
2366 		.pgbase	  = pgbase,
2367 		.pglen    = pglen,
2368 		.pages    = &page,
2369 	};
2370 	struct nfs4_readlink_res res;
2371 	struct rpc_message msg = {
2372 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
2373 		.rpc_argp = &args,
2374 		.rpc_resp = &res,
2375 	};
2376 
2377 	return nfs4_call_sync(NFS_SERVER(inode), &msg, &args, &res, 0);
2378 }
2379 
2380 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
2381 		unsigned int pgbase, unsigned int pglen)
2382 {
2383 	struct nfs4_exception exception = { };
2384 	int err;
2385 	do {
2386 		err = nfs4_handle_exception(NFS_SERVER(inode),
2387 				_nfs4_proc_readlink(inode, page, pgbase, pglen),
2388 				&exception);
2389 	} while (exception.retry);
2390 	return err;
2391 }
2392 
2393 /*
2394  * Got race?
2395  * We will need to arrange for the VFS layer to provide an atomic open.
2396  * Until then, this create/open method is prone to inefficiency and race
2397  * conditions due to the lookup, create, and open VFS calls from sys_open()
2398  * placed on the wire.
2399  *
2400  * Given the above sorry state of affairs, I'm simply sending an OPEN.
2401  * The file will be opened again in the subsequent VFS open call
2402  * (nfs4_proc_file_open).
2403  *
2404  * The open for read will just hang around to be used by any process that
2405  * opens the file O_RDONLY. This will all be resolved with the VFS changes.
2406  */
2407 
2408 static int
2409 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
2410                  int flags, struct nameidata *nd)
2411 {
2412 	struct path path = {
2413 		.mnt = nd->path.mnt,
2414 		.dentry = dentry,
2415 	};
2416 	struct nfs4_state *state;
2417 	struct rpc_cred *cred;
2418 	fmode_t fmode = flags & (FMODE_READ | FMODE_WRITE);
2419 	int status = 0;
2420 
2421 	cred = rpc_lookup_cred();
2422 	if (IS_ERR(cred)) {
2423 		status = PTR_ERR(cred);
2424 		goto out;
2425 	}
2426 	state = nfs4_do_open(dir, &path, fmode, flags, sattr, cred);
2427 	d_drop(dentry);
2428 	if (IS_ERR(state)) {
2429 		status = PTR_ERR(state);
2430 		goto out_putcred;
2431 	}
2432 	d_add(dentry, igrab(state->inode));
2433 	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
2434 	if (flags & O_EXCL) {
2435 		struct nfs_fattr fattr;
2436 		status = nfs4_do_setattr(state->inode, cred, &fattr, sattr, state);
2437 		if (status == 0)
2438 			nfs_setattr_update_inode(state->inode, sattr);
2439 		nfs_post_op_update_inode(state->inode, &fattr);
2440 	}
2441 	if (status == 0 && (nd->flags & LOOKUP_OPEN) != 0)
2442 		status = nfs4_intent_set_file(nd, &path, state, fmode);
2443 	else
2444 		nfs4_close_sync(&path, state, fmode);
2445 out_putcred:
2446 	put_rpccred(cred);
2447 out:
2448 	return status;
2449 }
2450 
2451 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
2452 {
2453 	struct nfs_server *server = NFS_SERVER(dir);
2454 	struct nfs_removeargs args = {
2455 		.fh = NFS_FH(dir),
2456 		.name.len = name->len,
2457 		.name.name = name->name,
2458 		.bitmask = server->attr_bitmask,
2459 	};
2460 	struct nfs_removeres res = {
2461 		.server = server,
2462 	};
2463 	struct rpc_message msg = {
2464 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
2465 		.rpc_argp = &args,
2466 		.rpc_resp = &res,
2467 	};
2468 	int			status;
2469 
2470 	nfs_fattr_init(&res.dir_attr);
2471 	status = nfs4_call_sync(server, &msg, &args, &res, 1);
2472 	if (status == 0) {
2473 		update_changeattr(dir, &res.cinfo);
2474 		nfs_post_op_update_inode(dir, &res.dir_attr);
2475 	}
2476 	return status;
2477 }
2478 
2479 static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
2480 {
2481 	struct nfs4_exception exception = { };
2482 	int err;
2483 	do {
2484 		err = nfs4_handle_exception(NFS_SERVER(dir),
2485 				_nfs4_proc_remove(dir, name),
2486 				&exception);
2487 	} while (exception.retry);
2488 	return err;
2489 }
2490 
2491 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
2492 {
2493 	struct nfs_server *server = NFS_SERVER(dir);
2494 	struct nfs_removeargs *args = msg->rpc_argp;
2495 	struct nfs_removeres *res = msg->rpc_resp;
2496 
2497 	args->bitmask = server->cache_consistency_bitmask;
2498 	res->server = server;
2499 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
2500 }
2501 
2502 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
2503 {
2504 	struct nfs_removeres *res = task->tk_msg.rpc_resp;
2505 
2506 	nfs4_sequence_done(res->server, &res->seq_res, task->tk_status);
2507 	if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
2508 		return 0;
2509 	nfs4_sequence_free_slot(res->server->nfs_client, &res->seq_res);
2510 	update_changeattr(dir, &res->cinfo);
2511 	nfs_post_op_update_inode(dir, &res->dir_attr);
2512 	return 1;
2513 }
2514 
2515 static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
2516 		struct inode *new_dir, struct qstr *new_name)
2517 {
2518 	struct nfs_server *server = NFS_SERVER(old_dir);
2519 	struct nfs4_rename_arg arg = {
2520 		.old_dir = NFS_FH(old_dir),
2521 		.new_dir = NFS_FH(new_dir),
2522 		.old_name = old_name,
2523 		.new_name = new_name,
2524 		.bitmask = server->attr_bitmask,
2525 	};
2526 	struct nfs_fattr old_fattr, new_fattr;
2527 	struct nfs4_rename_res res = {
2528 		.server = server,
2529 		.old_fattr = &old_fattr,
2530 		.new_fattr = &new_fattr,
2531 	};
2532 	struct rpc_message msg = {
2533 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
2534 		.rpc_argp = &arg,
2535 		.rpc_resp = &res,
2536 	};
2537 	int			status;
2538 
2539 	nfs_fattr_init(res.old_fattr);
2540 	nfs_fattr_init(res.new_fattr);
2541 	status = nfs4_call_sync(server, &msg, &arg, &res, 1);
2542 
2543 	if (!status) {
2544 		update_changeattr(old_dir, &res.old_cinfo);
2545 		nfs_post_op_update_inode(old_dir, res.old_fattr);
2546 		update_changeattr(new_dir, &res.new_cinfo);
2547 		nfs_post_op_update_inode(new_dir, res.new_fattr);
2548 	}
2549 	return status;
2550 }
2551 
2552 static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
2553 		struct inode *new_dir, struct qstr *new_name)
2554 {
2555 	struct nfs4_exception exception = { };
2556 	int err;
2557 	do {
2558 		err = nfs4_handle_exception(NFS_SERVER(old_dir),
2559 				_nfs4_proc_rename(old_dir, old_name,
2560 					new_dir, new_name),
2561 				&exception);
2562 	} while (exception.retry);
2563 	return err;
2564 }
2565 
2566 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
2567 {
2568 	struct nfs_server *server = NFS_SERVER(inode);
2569 	struct nfs4_link_arg arg = {
2570 		.fh     = NFS_FH(inode),
2571 		.dir_fh = NFS_FH(dir),
2572 		.name   = name,
2573 		.bitmask = server->attr_bitmask,
2574 	};
2575 	struct nfs_fattr fattr, dir_attr;
2576 	struct nfs4_link_res res = {
2577 		.server = server,
2578 		.fattr = &fattr,
2579 		.dir_attr = &dir_attr,
2580 	};
2581 	struct rpc_message msg = {
2582 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
2583 		.rpc_argp = &arg,
2584 		.rpc_resp = &res,
2585 	};
2586 	int			status;
2587 
2588 	nfs_fattr_init(res.fattr);
2589 	nfs_fattr_init(res.dir_attr);
2590 	status = nfs4_call_sync(server, &msg, &arg, &res, 1);
2591 	if (!status) {
2592 		update_changeattr(dir, &res.cinfo);
2593 		nfs_post_op_update_inode(dir, res.dir_attr);
2594 		nfs_post_op_update_inode(inode, res.fattr);
2595 	}
2596 
2597 	return status;
2598 }
2599 
2600 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
2601 {
2602 	struct nfs4_exception exception = { };
2603 	int err;
2604 	do {
2605 		err = nfs4_handle_exception(NFS_SERVER(inode),
2606 				_nfs4_proc_link(inode, dir, name),
2607 				&exception);
2608 	} while (exception.retry);
2609 	return err;
2610 }
2611 
2612 struct nfs4_createdata {
2613 	struct rpc_message msg;
2614 	struct nfs4_create_arg arg;
2615 	struct nfs4_create_res res;
2616 	struct nfs_fh fh;
2617 	struct nfs_fattr fattr;
2618 	struct nfs_fattr dir_fattr;
2619 };
2620 
2621 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
2622 		struct qstr *name, struct iattr *sattr, u32 ftype)
2623 {
2624 	struct nfs4_createdata *data;
2625 
2626 	data = kzalloc(sizeof(*data), GFP_KERNEL);
2627 	if (data != NULL) {
2628 		struct nfs_server *server = NFS_SERVER(dir);
2629 
2630 		data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
2631 		data->msg.rpc_argp = &data->arg;
2632 		data->msg.rpc_resp = &data->res;
2633 		data->arg.dir_fh = NFS_FH(dir);
2634 		data->arg.server = server;
2635 		data->arg.name = name;
2636 		data->arg.attrs = sattr;
2637 		data->arg.ftype = ftype;
2638 		data->arg.bitmask = server->attr_bitmask;
2639 		data->res.server = server;
2640 		data->res.fh = &data->fh;
2641 		data->res.fattr = &data->fattr;
2642 		data->res.dir_fattr = &data->dir_fattr;
2643 		nfs_fattr_init(data->res.fattr);
2644 		nfs_fattr_init(data->res.dir_fattr);
2645 	}
2646 	return data;
2647 }
2648 
2649 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
2650 {
2651 	int status = nfs4_call_sync(NFS_SERVER(dir), &data->msg,
2652 				    &data->arg, &data->res, 1);
2653 	if (status == 0) {
2654 		update_changeattr(dir, &data->res.dir_cinfo);
2655 		nfs_post_op_update_inode(dir, data->res.dir_fattr);
2656 		status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
2657 	}
2658 	return status;
2659 }
2660 
2661 static void nfs4_free_createdata(struct nfs4_createdata *data)
2662 {
2663 	kfree(data);
2664 }
2665 
2666 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
2667 		struct page *page, unsigned int len, struct iattr *sattr)
2668 {
2669 	struct nfs4_createdata *data;
2670 	int status = -ENAMETOOLONG;
2671 
2672 	if (len > NFS4_MAXPATHLEN)
2673 		goto out;
2674 
2675 	status = -ENOMEM;
2676 	data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
2677 	if (data == NULL)
2678 		goto out;
2679 
2680 	data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
2681 	data->arg.u.symlink.pages = &page;
2682 	data->arg.u.symlink.len = len;
2683 
2684 	status = nfs4_do_create(dir, dentry, data);
2685 
2686 	nfs4_free_createdata(data);
2687 out:
2688 	return status;
2689 }
2690 
2691 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
2692 		struct page *page, unsigned int len, struct iattr *sattr)
2693 {
2694 	struct nfs4_exception exception = { };
2695 	int err;
2696 	do {
2697 		err = nfs4_handle_exception(NFS_SERVER(dir),
2698 				_nfs4_proc_symlink(dir, dentry, page,
2699 							len, sattr),
2700 				&exception);
2701 	} while (exception.retry);
2702 	return err;
2703 }
2704 
2705 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
2706 		struct iattr *sattr)
2707 {
2708 	struct nfs4_createdata *data;
2709 	int status = -ENOMEM;
2710 
2711 	data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
2712 	if (data == NULL)
2713 		goto out;
2714 
2715 	status = nfs4_do_create(dir, dentry, data);
2716 
2717 	nfs4_free_createdata(data);
2718 out:
2719 	return status;
2720 }
2721 
2722 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
2723 		struct iattr *sattr)
2724 {
2725 	struct nfs4_exception exception = { };
2726 	int err;
2727 	do {
2728 		err = nfs4_handle_exception(NFS_SERVER(dir),
2729 				_nfs4_proc_mkdir(dir, dentry, sattr),
2730 				&exception);
2731 	} while (exception.retry);
2732 	return err;
2733 }
2734 
2735 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
2736                   u64 cookie, struct page *page, unsigned int count, int plus)
2737 {
2738 	struct inode		*dir = dentry->d_inode;
2739 	struct nfs4_readdir_arg args = {
2740 		.fh = NFS_FH(dir),
2741 		.pages = &page,
2742 		.pgbase = 0,
2743 		.count = count,
2744 		.bitmask = NFS_SERVER(dentry->d_inode)->cache_consistency_bitmask,
2745 	};
2746 	struct nfs4_readdir_res res;
2747 	struct rpc_message msg = {
2748 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
2749 		.rpc_argp = &args,
2750 		.rpc_resp = &res,
2751 		.rpc_cred = cred,
2752 	};
2753 	int			status;
2754 
2755 	dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
2756 			dentry->d_parent->d_name.name,
2757 			dentry->d_name.name,
2758 			(unsigned long long)cookie);
2759 	nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
2760 	res.pgbase = args.pgbase;
2761 	status = nfs4_call_sync(NFS_SERVER(dir), &msg, &args, &res, 0);
2762 	if (status == 0)
2763 		memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
2764 
2765 	nfs_invalidate_atime(dir);
2766 
2767 	dprintk("%s: returns %d\n", __func__, status);
2768 	return status;
2769 }
2770 
2771 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
2772                   u64 cookie, struct page *page, unsigned int count, int plus)
2773 {
2774 	struct nfs4_exception exception = { };
2775 	int err;
2776 	do {
2777 		err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
2778 				_nfs4_proc_readdir(dentry, cred, cookie,
2779 					page, count, plus),
2780 				&exception);
2781 	} while (exception.retry);
2782 	return err;
2783 }
2784 
2785 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
2786 		struct iattr *sattr, dev_t rdev)
2787 {
2788 	struct nfs4_createdata *data;
2789 	int mode = sattr->ia_mode;
2790 	int status = -ENOMEM;
2791 
2792 	BUG_ON(!(sattr->ia_valid & ATTR_MODE));
2793 	BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode));
2794 
2795 	data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
2796 	if (data == NULL)
2797 		goto out;
2798 
2799 	if (S_ISFIFO(mode))
2800 		data->arg.ftype = NF4FIFO;
2801 	else if (S_ISBLK(mode)) {
2802 		data->arg.ftype = NF4BLK;
2803 		data->arg.u.device.specdata1 = MAJOR(rdev);
2804 		data->arg.u.device.specdata2 = MINOR(rdev);
2805 	}
2806 	else if (S_ISCHR(mode)) {
2807 		data->arg.ftype = NF4CHR;
2808 		data->arg.u.device.specdata1 = MAJOR(rdev);
2809 		data->arg.u.device.specdata2 = MINOR(rdev);
2810 	}
2811 
2812 	status = nfs4_do_create(dir, dentry, data);
2813 
2814 	nfs4_free_createdata(data);
2815 out:
2816 	return status;
2817 }
2818 
2819 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
2820 		struct iattr *sattr, dev_t rdev)
2821 {
2822 	struct nfs4_exception exception = { };
2823 	int err;
2824 	do {
2825 		err = nfs4_handle_exception(NFS_SERVER(dir),
2826 				_nfs4_proc_mknod(dir, dentry, sattr, rdev),
2827 				&exception);
2828 	} while (exception.retry);
2829 	return err;
2830 }
2831 
2832 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
2833 		 struct nfs_fsstat *fsstat)
2834 {
2835 	struct nfs4_statfs_arg args = {
2836 		.fh = fhandle,
2837 		.bitmask = server->attr_bitmask,
2838 	};
2839 	struct nfs4_statfs_res res = {
2840 		.fsstat = fsstat,
2841 	};
2842 	struct rpc_message msg = {
2843 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
2844 		.rpc_argp = &args,
2845 		.rpc_resp = &res,
2846 	};
2847 
2848 	nfs_fattr_init(fsstat->fattr);
2849 	return  nfs4_call_sync(server, &msg, &args, &res, 0);
2850 }
2851 
2852 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
2853 {
2854 	struct nfs4_exception exception = { };
2855 	int err;
2856 	do {
2857 		err = nfs4_handle_exception(server,
2858 				_nfs4_proc_statfs(server, fhandle, fsstat),
2859 				&exception);
2860 	} while (exception.retry);
2861 	return err;
2862 }
2863 
2864 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
2865 		struct nfs_fsinfo *fsinfo)
2866 {
2867 	struct nfs4_fsinfo_arg args = {
2868 		.fh = fhandle,
2869 		.bitmask = server->attr_bitmask,
2870 	};
2871 	struct nfs4_fsinfo_res res = {
2872 		.fsinfo = fsinfo,
2873 	};
2874 	struct rpc_message msg = {
2875 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
2876 		.rpc_argp = &args,
2877 		.rpc_resp = &res,
2878 	};
2879 
2880 	return nfs4_call_sync(server, &msg, &args, &res, 0);
2881 }
2882 
2883 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
2884 {
2885 	struct nfs4_exception exception = { };
2886 	int err;
2887 
2888 	do {
2889 		err = nfs4_handle_exception(server,
2890 				_nfs4_do_fsinfo(server, fhandle, fsinfo),
2891 				&exception);
2892 	} while (exception.retry);
2893 	return err;
2894 }
2895 
2896 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
2897 {
2898 	nfs_fattr_init(fsinfo->fattr);
2899 	return nfs4_do_fsinfo(server, fhandle, fsinfo);
2900 }
2901 
2902 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
2903 		struct nfs_pathconf *pathconf)
2904 {
2905 	struct nfs4_pathconf_arg args = {
2906 		.fh = fhandle,
2907 		.bitmask = server->attr_bitmask,
2908 	};
2909 	struct nfs4_pathconf_res res = {
2910 		.pathconf = pathconf,
2911 	};
2912 	struct rpc_message msg = {
2913 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
2914 		.rpc_argp = &args,
2915 		.rpc_resp = &res,
2916 	};
2917 
2918 	/* None of the pathconf attributes are mandatory to implement */
2919 	if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
2920 		memset(pathconf, 0, sizeof(*pathconf));
2921 		return 0;
2922 	}
2923 
2924 	nfs_fattr_init(pathconf->fattr);
2925 	return nfs4_call_sync(server, &msg, &args, &res, 0);
2926 }
2927 
2928 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
2929 		struct nfs_pathconf *pathconf)
2930 {
2931 	struct nfs4_exception exception = { };
2932 	int err;
2933 
2934 	do {
2935 		err = nfs4_handle_exception(server,
2936 				_nfs4_proc_pathconf(server, fhandle, pathconf),
2937 				&exception);
2938 	} while (exception.retry);
2939 	return err;
2940 }
2941 
2942 static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
2943 {
2944 	struct nfs_server *server = NFS_SERVER(data->inode);
2945 
2946 	dprintk("--> %s\n", __func__);
2947 
2948 	/* nfs4_sequence_free_slot called in the read rpc_call_done */
2949 	nfs4_sequence_done(server, &data->res.seq_res, task->tk_status);
2950 
2951 	if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
2952 		nfs4_restart_rpc(task, server->nfs_client);
2953 		return -EAGAIN;
2954 	}
2955 
2956 	nfs_invalidate_atime(data->inode);
2957 	if (task->tk_status > 0)
2958 		renew_lease(server, data->timestamp);
2959 	return 0;
2960 }
2961 
2962 static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
2963 {
2964 	data->timestamp   = jiffies;
2965 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
2966 }
2967 
2968 static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
2969 {
2970 	struct inode *inode = data->inode;
2971 
2972 	/* slot is freed in nfs_writeback_done */
2973 	nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res,
2974 			   task->tk_status);
2975 
2976 	if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
2977 		nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
2978 		return -EAGAIN;
2979 	}
2980 	if (task->tk_status >= 0) {
2981 		renew_lease(NFS_SERVER(inode), data->timestamp);
2982 		nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
2983 	}
2984 	return 0;
2985 }
2986 
2987 static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
2988 {
2989 	struct nfs_server *server = NFS_SERVER(data->inode);
2990 
2991 	data->args.bitmask = server->cache_consistency_bitmask;
2992 	data->res.server = server;
2993 	data->timestamp   = jiffies;
2994 
2995 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
2996 }
2997 
2998 static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
2999 {
3000 	struct inode *inode = data->inode;
3001 
3002 	nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res,
3003 			   task->tk_status);
3004 	if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
3005 		nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
3006 		return -EAGAIN;
3007 	}
3008 	nfs4_sequence_free_slot(NFS_SERVER(inode)->nfs_client,
3009 				&data->res.seq_res);
3010 	nfs_refresh_inode(inode, data->res.fattr);
3011 	return 0;
3012 }
3013 
3014 static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
3015 {
3016 	struct nfs_server *server = NFS_SERVER(data->inode);
3017 
3018 	data->args.bitmask = server->cache_consistency_bitmask;
3019 	data->res.server = server;
3020 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
3021 }
3022 
3023 /*
3024  * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
3025  * standalone procedure for queueing an asynchronous RENEW.
3026  */
3027 static void nfs4_renew_done(struct rpc_task *task, void *data)
3028 {
3029 	struct nfs_client *clp = (struct nfs_client *)task->tk_msg.rpc_argp;
3030 	unsigned long timestamp = (unsigned long)data;
3031 
3032 	if (task->tk_status < 0) {
3033 		/* Unless we're shutting down, schedule state recovery! */
3034 		if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0)
3035 			nfs4_schedule_state_recovery(clp);
3036 		return;
3037 	}
3038 	spin_lock(&clp->cl_lock);
3039 	if (time_before(clp->cl_last_renewal,timestamp))
3040 		clp->cl_last_renewal = timestamp;
3041 	spin_unlock(&clp->cl_lock);
3042 	dprintk("%s calling put_rpccred on rpc_cred %p\n", __func__,
3043 				task->tk_msg.rpc_cred);
3044 	put_rpccred(task->tk_msg.rpc_cred);
3045 }
3046 
3047 static const struct rpc_call_ops nfs4_renew_ops = {
3048 	.rpc_call_done = nfs4_renew_done,
3049 };
3050 
3051 int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)
3052 {
3053 	struct rpc_message msg = {
3054 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3055 		.rpc_argp	= clp,
3056 		.rpc_cred	= cred,
3057 	};
3058 
3059 	return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
3060 			&nfs4_renew_ops, (void *)jiffies);
3061 }
3062 
3063 int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
3064 {
3065 	struct rpc_message msg = {
3066 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3067 		.rpc_argp	= clp,
3068 		.rpc_cred	= cred,
3069 	};
3070 	unsigned long now = jiffies;
3071 	int status;
3072 
3073 	status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
3074 	if (status < 0)
3075 		return status;
3076 	spin_lock(&clp->cl_lock);
3077 	if (time_before(clp->cl_last_renewal,now))
3078 		clp->cl_last_renewal = now;
3079 	spin_unlock(&clp->cl_lock);
3080 	return 0;
3081 }
3082 
3083 static inline int nfs4_server_supports_acls(struct nfs_server *server)
3084 {
3085 	return (server->caps & NFS_CAP_ACLS)
3086 		&& (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3087 		&& (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
3088 }
3089 
3090 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that
3091  * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on
3092  * the stack.
3093  */
3094 #define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
3095 
3096 static void buf_to_pages(const void *buf, size_t buflen,
3097 		struct page **pages, unsigned int *pgbase)
3098 {
3099 	const void *p = buf;
3100 
3101 	*pgbase = offset_in_page(buf);
3102 	p -= *pgbase;
3103 	while (p < buf + buflen) {
3104 		*(pages++) = virt_to_page(p);
3105 		p += PAGE_CACHE_SIZE;
3106 	}
3107 }
3108 
3109 struct nfs4_cached_acl {
3110 	int cached;
3111 	size_t len;
3112 	char data[0];
3113 };
3114 
3115 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
3116 {
3117 	struct nfs_inode *nfsi = NFS_I(inode);
3118 
3119 	spin_lock(&inode->i_lock);
3120 	kfree(nfsi->nfs4_acl);
3121 	nfsi->nfs4_acl = acl;
3122 	spin_unlock(&inode->i_lock);
3123 }
3124 
3125 static void nfs4_zap_acl_attr(struct inode *inode)
3126 {
3127 	nfs4_set_cached_acl(inode, NULL);
3128 }
3129 
3130 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
3131 {
3132 	struct nfs_inode *nfsi = NFS_I(inode);
3133 	struct nfs4_cached_acl *acl;
3134 	int ret = -ENOENT;
3135 
3136 	spin_lock(&inode->i_lock);
3137 	acl = nfsi->nfs4_acl;
3138 	if (acl == NULL)
3139 		goto out;
3140 	if (buf == NULL) /* user is just asking for length */
3141 		goto out_len;
3142 	if (acl->cached == 0)
3143 		goto out;
3144 	ret = -ERANGE; /* see getxattr(2) man page */
3145 	if (acl->len > buflen)
3146 		goto out;
3147 	memcpy(buf, acl->data, acl->len);
3148 out_len:
3149 	ret = acl->len;
3150 out:
3151 	spin_unlock(&inode->i_lock);
3152 	return ret;
3153 }
3154 
3155 static void nfs4_write_cached_acl(struct inode *inode, const char *buf, size_t acl_len)
3156 {
3157 	struct nfs4_cached_acl *acl;
3158 
3159 	if (buf && acl_len <= PAGE_SIZE) {
3160 		acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);
3161 		if (acl == NULL)
3162 			goto out;
3163 		acl->cached = 1;
3164 		memcpy(acl->data, buf, acl_len);
3165 	} else {
3166 		acl = kmalloc(sizeof(*acl), GFP_KERNEL);
3167 		if (acl == NULL)
3168 			goto out;
3169 		acl->cached = 0;
3170 	}
3171 	acl->len = acl_len;
3172 out:
3173 	nfs4_set_cached_acl(inode, acl);
3174 }
3175 
3176 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3177 {
3178 	struct page *pages[NFS4ACL_MAXPAGES];
3179 	struct nfs_getaclargs args = {
3180 		.fh = NFS_FH(inode),
3181 		.acl_pages = pages,
3182 		.acl_len = buflen,
3183 	};
3184 	struct nfs_getaclres res = {
3185 		.acl_len = buflen,
3186 	};
3187 	void *resp_buf;
3188 	struct rpc_message msg = {
3189 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
3190 		.rpc_argp = &args,
3191 		.rpc_resp = &res,
3192 	};
3193 	struct page *localpage = NULL;
3194 	int ret;
3195 
3196 	if (buflen < PAGE_SIZE) {
3197 		/* As long as we're doing a round trip to the server anyway,
3198 		 * let's be prepared for a page of acl data. */
3199 		localpage = alloc_page(GFP_KERNEL);
3200 		resp_buf = page_address(localpage);
3201 		if (localpage == NULL)
3202 			return -ENOMEM;
3203 		args.acl_pages[0] = localpage;
3204 		args.acl_pgbase = 0;
3205 		args.acl_len = PAGE_SIZE;
3206 	} else {
3207 		resp_buf = buf;
3208 		buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
3209 	}
3210 	ret = nfs4_call_sync(NFS_SERVER(inode), &msg, &args, &res, 0);
3211 	if (ret)
3212 		goto out_free;
3213 	if (res.acl_len > args.acl_len)
3214 		nfs4_write_cached_acl(inode, NULL, res.acl_len);
3215 	else
3216 		nfs4_write_cached_acl(inode, resp_buf, res.acl_len);
3217 	if (buf) {
3218 		ret = -ERANGE;
3219 		if (res.acl_len > buflen)
3220 			goto out_free;
3221 		if (localpage)
3222 			memcpy(buf, resp_buf, res.acl_len);
3223 	}
3224 	ret = res.acl_len;
3225 out_free:
3226 	if (localpage)
3227 		__free_page(localpage);
3228 	return ret;
3229 }
3230 
3231 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3232 {
3233 	struct nfs4_exception exception = { };
3234 	ssize_t ret;
3235 	do {
3236 		ret = __nfs4_get_acl_uncached(inode, buf, buflen);
3237 		if (ret >= 0)
3238 			break;
3239 		ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
3240 	} while (exception.retry);
3241 	return ret;
3242 }
3243 
3244 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
3245 {
3246 	struct nfs_server *server = NFS_SERVER(inode);
3247 	int ret;
3248 
3249 	if (!nfs4_server_supports_acls(server))
3250 		return -EOPNOTSUPP;
3251 	ret = nfs_revalidate_inode(server, inode);
3252 	if (ret < 0)
3253 		return ret;
3254 	ret = nfs4_read_cached_acl(inode, buf, buflen);
3255 	if (ret != -ENOENT)
3256 		return ret;
3257 	return nfs4_get_acl_uncached(inode, buf, buflen);
3258 }
3259 
3260 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
3261 {
3262 	struct nfs_server *server = NFS_SERVER(inode);
3263 	struct page *pages[NFS4ACL_MAXPAGES];
3264 	struct nfs_setaclargs arg = {
3265 		.fh		= NFS_FH(inode),
3266 		.acl_pages	= pages,
3267 		.acl_len	= buflen,
3268 	};
3269 	struct nfs_setaclres res;
3270 	struct rpc_message msg = {
3271 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_SETACL],
3272 		.rpc_argp	= &arg,
3273 		.rpc_resp	= &res,
3274 	};
3275 	int ret;
3276 
3277 	if (!nfs4_server_supports_acls(server))
3278 		return -EOPNOTSUPP;
3279 	nfs_inode_return_delegation(inode);
3280 	buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
3281 	ret = nfs4_call_sync(server, &msg, &arg, &res, 1);
3282 	nfs_access_zap_cache(inode);
3283 	nfs_zap_acl_cache(inode);
3284 	return ret;
3285 }
3286 
3287 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
3288 {
3289 	struct nfs4_exception exception = { };
3290 	int err;
3291 	do {
3292 		err = nfs4_handle_exception(NFS_SERVER(inode),
3293 				__nfs4_proc_set_acl(inode, buf, buflen),
3294 				&exception);
3295 	} while (exception.retry);
3296 	return err;
3297 }
3298 
3299 static int
3300 _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs_client *clp, struct nfs4_state *state)
3301 {
3302 	if (!clp || task->tk_status >= 0)
3303 		return 0;
3304 	switch(task->tk_status) {
3305 		case -NFS4ERR_ADMIN_REVOKED:
3306 		case -NFS4ERR_BAD_STATEID:
3307 		case -NFS4ERR_OPENMODE:
3308 			if (state == NULL)
3309 				break;
3310 			nfs4_state_mark_reclaim_nograce(clp, state);
3311 		case -NFS4ERR_STALE_CLIENTID:
3312 		case -NFS4ERR_STALE_STATEID:
3313 		case -NFS4ERR_EXPIRED:
3314 			rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
3315 			nfs4_schedule_state_recovery(clp);
3316 			if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
3317 				rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
3318 			task->tk_status = 0;
3319 			return -EAGAIN;
3320 #if defined(CONFIG_NFS_V4_1)
3321 		case -NFS4ERR_BADSESSION:
3322 		case -NFS4ERR_BADSLOT:
3323 		case -NFS4ERR_BAD_HIGH_SLOT:
3324 		case -NFS4ERR_DEADSESSION:
3325 		case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
3326 		case -NFS4ERR_SEQ_FALSE_RETRY:
3327 		case -NFS4ERR_SEQ_MISORDERED:
3328 			dprintk("%s ERROR %d, Reset session\n", __func__,
3329 				task->tk_status);
3330 			set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
3331 			task->tk_status = 0;
3332 			return -EAGAIN;
3333 #endif /* CONFIG_NFS_V4_1 */
3334 		case -NFS4ERR_DELAY:
3335 			if (server)
3336 				nfs_inc_server_stats(server, NFSIOS_DELAY);
3337 		case -NFS4ERR_GRACE:
3338 			rpc_delay(task, NFS4_POLL_RETRY_MAX);
3339 			task->tk_status = 0;
3340 			return -EAGAIN;
3341 		case -NFS4ERR_OLD_STATEID:
3342 			task->tk_status = 0;
3343 			return -EAGAIN;
3344 	}
3345 	task->tk_status = nfs4_map_errors(task->tk_status);
3346 	return 0;
3347 }
3348 
3349 static int
3350 nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
3351 {
3352 	return _nfs4_async_handle_error(task, server, server->nfs_client, state);
3353 }
3354 
3355 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, unsigned short port, struct rpc_cred *cred)
3356 {
3357 	nfs4_verifier sc_verifier;
3358 	struct nfs4_setclientid setclientid = {
3359 		.sc_verifier = &sc_verifier,
3360 		.sc_prog = program,
3361 	};
3362 	struct rpc_message msg = {
3363 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
3364 		.rpc_argp = &setclientid,
3365 		.rpc_resp = clp,
3366 		.rpc_cred = cred,
3367 	};
3368 	__be32 *p;
3369 	int loop = 0;
3370 	int status;
3371 
3372 	p = (__be32*)sc_verifier.data;
3373 	*p++ = htonl((u32)clp->cl_boot_time.tv_sec);
3374 	*p = htonl((u32)clp->cl_boot_time.tv_nsec);
3375 
3376 	for(;;) {
3377 		setclientid.sc_name_len = scnprintf(setclientid.sc_name,
3378 				sizeof(setclientid.sc_name), "%s/%s %s %s %u",
3379 				clp->cl_ipaddr,
3380 				rpc_peeraddr2str(clp->cl_rpcclient,
3381 							RPC_DISPLAY_ADDR),
3382 				rpc_peeraddr2str(clp->cl_rpcclient,
3383 							RPC_DISPLAY_PROTO),
3384 				clp->cl_rpcclient->cl_auth->au_ops->au_name,
3385 				clp->cl_id_uniquifier);
3386 		setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
3387 				sizeof(setclientid.sc_netid),
3388 				rpc_peeraddr2str(clp->cl_rpcclient,
3389 							RPC_DISPLAY_NETID));
3390 		setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
3391 				sizeof(setclientid.sc_uaddr), "%s.%u.%u",
3392 				clp->cl_ipaddr, port >> 8, port & 255);
3393 
3394 		status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
3395 		if (status != -NFS4ERR_CLID_INUSE)
3396 			break;
3397 		if (signalled())
3398 			break;
3399 		if (loop++ & 1)
3400 			ssleep(clp->cl_lease_time + 1);
3401 		else
3402 			if (++clp->cl_id_uniquifier == 0)
3403 				break;
3404 	}
3405 	return status;
3406 }
3407 
3408 static int _nfs4_proc_setclientid_confirm(struct nfs_client *clp, struct rpc_cred *cred)
3409 {
3410 	struct nfs_fsinfo fsinfo;
3411 	struct rpc_message msg = {
3412 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
3413 		.rpc_argp = clp,
3414 		.rpc_resp = &fsinfo,
3415 		.rpc_cred = cred,
3416 	};
3417 	unsigned long now;
3418 	int status;
3419 
3420 	now = jiffies;
3421 	status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
3422 	if (status == 0) {
3423 		spin_lock(&clp->cl_lock);
3424 		clp->cl_lease_time = fsinfo.lease_time * HZ;
3425 		clp->cl_last_renewal = now;
3426 		spin_unlock(&clp->cl_lock);
3427 	}
3428 	return status;
3429 }
3430 
3431 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, struct rpc_cred *cred)
3432 {
3433 	long timeout = 0;
3434 	int err;
3435 	do {
3436 		err = _nfs4_proc_setclientid_confirm(clp, cred);
3437 		switch (err) {
3438 			case 0:
3439 				return err;
3440 			case -NFS4ERR_RESOURCE:
3441 				/* The IBM lawyers misread another document! */
3442 			case -NFS4ERR_DELAY:
3443 				err = nfs4_delay(clp->cl_rpcclient, &timeout);
3444 		}
3445 	} while (err == 0);
3446 	return err;
3447 }
3448 
3449 struct nfs4_delegreturndata {
3450 	struct nfs4_delegreturnargs args;
3451 	struct nfs4_delegreturnres res;
3452 	struct nfs_fh fh;
3453 	nfs4_stateid stateid;
3454 	unsigned long timestamp;
3455 	struct nfs_fattr fattr;
3456 	int rpc_status;
3457 };
3458 
3459 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
3460 {
3461 	struct nfs4_delegreturndata *data = calldata;
3462 
3463 	nfs4_sequence_done_free_slot(data->res.server, &data->res.seq_res,
3464 				     task->tk_status);
3465 
3466 	data->rpc_status = task->tk_status;
3467 	if (data->rpc_status == 0)
3468 		renew_lease(data->res.server, data->timestamp);
3469 }
3470 
3471 static void nfs4_delegreturn_release(void *calldata)
3472 {
3473 	kfree(calldata);
3474 }
3475 
3476 #if defined(CONFIG_NFS_V4_1)
3477 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
3478 {
3479 	struct nfs4_delegreturndata *d_data;
3480 
3481 	d_data = (struct nfs4_delegreturndata *)data;
3482 
3483 	if (nfs4_setup_sequence(d_data->res.server->nfs_client,
3484 				&d_data->args.seq_args,
3485 				&d_data->res.seq_res, 1, task))
3486 		return;
3487 	rpc_call_start(task);
3488 }
3489 #endif /* CONFIG_NFS_V4_1 */
3490 
3491 static const struct rpc_call_ops nfs4_delegreturn_ops = {
3492 #if defined(CONFIG_NFS_V4_1)
3493 	.rpc_call_prepare = nfs4_delegreturn_prepare,
3494 #endif /* CONFIG_NFS_V4_1 */
3495 	.rpc_call_done = nfs4_delegreturn_done,
3496 	.rpc_release = nfs4_delegreturn_release,
3497 };
3498 
3499 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
3500 {
3501 	struct nfs4_delegreturndata *data;
3502 	struct nfs_server *server = NFS_SERVER(inode);
3503 	struct rpc_task *task;
3504 	struct rpc_message msg = {
3505 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
3506 		.rpc_cred = cred,
3507 	};
3508 	struct rpc_task_setup task_setup_data = {
3509 		.rpc_client = server->client,
3510 		.rpc_message = &msg,
3511 		.callback_ops = &nfs4_delegreturn_ops,
3512 		.flags = RPC_TASK_ASYNC,
3513 	};
3514 	int status = 0;
3515 
3516 	data = kzalloc(sizeof(*data), GFP_KERNEL);
3517 	if (data == NULL)
3518 		return -ENOMEM;
3519 	data->args.fhandle = &data->fh;
3520 	data->args.stateid = &data->stateid;
3521 	data->args.bitmask = server->attr_bitmask;
3522 	nfs_copy_fh(&data->fh, NFS_FH(inode));
3523 	memcpy(&data->stateid, stateid, sizeof(data->stateid));
3524 	data->res.fattr = &data->fattr;
3525 	data->res.server = server;
3526 	data->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
3527 	nfs_fattr_init(data->res.fattr);
3528 	data->timestamp = jiffies;
3529 	data->rpc_status = 0;
3530 
3531 	task_setup_data.callback_data = data;
3532 	msg.rpc_argp = &data->args,
3533 	msg.rpc_resp = &data->res,
3534 	task = rpc_run_task(&task_setup_data);
3535 	if (IS_ERR(task))
3536 		return PTR_ERR(task);
3537 	if (!issync)
3538 		goto out;
3539 	status = nfs4_wait_for_completion_rpc_task(task);
3540 	if (status != 0)
3541 		goto out;
3542 	status = data->rpc_status;
3543 	if (status != 0)
3544 		goto out;
3545 	nfs_refresh_inode(inode, &data->fattr);
3546 out:
3547 	rpc_put_task(task);
3548 	return status;
3549 }
3550 
3551 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
3552 {
3553 	struct nfs_server *server = NFS_SERVER(inode);
3554 	struct nfs4_exception exception = { };
3555 	int err;
3556 	do {
3557 		err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
3558 		switch (err) {
3559 			case -NFS4ERR_STALE_STATEID:
3560 			case -NFS4ERR_EXPIRED:
3561 			case 0:
3562 				return 0;
3563 		}
3564 		err = nfs4_handle_exception(server, err, &exception);
3565 	} while (exception.retry);
3566 	return err;
3567 }
3568 
3569 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
3570 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
3571 
3572 /*
3573  * sleep, with exponential backoff, and retry the LOCK operation.
3574  */
3575 static unsigned long
3576 nfs4_set_lock_task_retry(unsigned long timeout)
3577 {
3578 	schedule_timeout_killable(timeout);
3579 	timeout <<= 1;
3580 	if (timeout > NFS4_LOCK_MAXTIMEOUT)
3581 		return NFS4_LOCK_MAXTIMEOUT;
3582 	return timeout;
3583 }
3584 
3585 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
3586 {
3587 	struct inode *inode = state->inode;
3588 	struct nfs_server *server = NFS_SERVER(inode);
3589 	struct nfs_client *clp = server->nfs_client;
3590 	struct nfs_lockt_args arg = {
3591 		.fh = NFS_FH(inode),
3592 		.fl = request,
3593 	};
3594 	struct nfs_lockt_res res = {
3595 		.denied = request,
3596 	};
3597 	struct rpc_message msg = {
3598 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
3599 		.rpc_argp       = &arg,
3600 		.rpc_resp       = &res,
3601 		.rpc_cred	= state->owner->so_cred,
3602 	};
3603 	struct nfs4_lock_state *lsp;
3604 	int status;
3605 
3606 	arg.lock_owner.clientid = clp->cl_clientid;
3607 	status = nfs4_set_lock_state(state, request);
3608 	if (status != 0)
3609 		goto out;
3610 	lsp = request->fl_u.nfs4_fl.owner;
3611 	arg.lock_owner.id = lsp->ls_id.id;
3612 	status = nfs4_call_sync(server, &msg, &arg, &res, 1);
3613 	switch (status) {
3614 		case 0:
3615 			request->fl_type = F_UNLCK;
3616 			break;
3617 		case -NFS4ERR_DENIED:
3618 			status = 0;
3619 	}
3620 	request->fl_ops->fl_release_private(request);
3621 out:
3622 	return status;
3623 }
3624 
3625 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
3626 {
3627 	struct nfs4_exception exception = { };
3628 	int err;
3629 
3630 	do {
3631 		err = nfs4_handle_exception(NFS_SERVER(state->inode),
3632 				_nfs4_proc_getlk(state, cmd, request),
3633 				&exception);
3634 	} while (exception.retry);
3635 	return err;
3636 }
3637 
3638 static int do_vfs_lock(struct file *file, struct file_lock *fl)
3639 {
3640 	int res = 0;
3641 	switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
3642 		case FL_POSIX:
3643 			res = posix_lock_file_wait(file, fl);
3644 			break;
3645 		case FL_FLOCK:
3646 			res = flock_lock_file_wait(file, fl);
3647 			break;
3648 		default:
3649 			BUG();
3650 	}
3651 	return res;
3652 }
3653 
3654 struct nfs4_unlockdata {
3655 	struct nfs_locku_args arg;
3656 	struct nfs_locku_res res;
3657 	struct nfs4_lock_state *lsp;
3658 	struct nfs_open_context *ctx;
3659 	struct file_lock fl;
3660 	const struct nfs_server *server;
3661 	unsigned long timestamp;
3662 };
3663 
3664 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
3665 		struct nfs_open_context *ctx,
3666 		struct nfs4_lock_state *lsp,
3667 		struct nfs_seqid *seqid)
3668 {
3669 	struct nfs4_unlockdata *p;
3670 	struct inode *inode = lsp->ls_state->inode;
3671 
3672 	p = kzalloc(sizeof(*p), GFP_KERNEL);
3673 	if (p == NULL)
3674 		return NULL;
3675 	p->arg.fh = NFS_FH(inode);
3676 	p->arg.fl = &p->fl;
3677 	p->arg.seqid = seqid;
3678 	p->res.seqid = seqid;
3679 	p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
3680 	p->arg.stateid = &lsp->ls_stateid;
3681 	p->lsp = lsp;
3682 	atomic_inc(&lsp->ls_count);
3683 	/* Ensure we don't close file until we're done freeing locks! */
3684 	p->ctx = get_nfs_open_context(ctx);
3685 	memcpy(&p->fl, fl, sizeof(p->fl));
3686 	p->server = NFS_SERVER(inode);
3687 	return p;
3688 }
3689 
3690 static void nfs4_locku_release_calldata(void *data)
3691 {
3692 	struct nfs4_unlockdata *calldata = data;
3693 	nfs_free_seqid(calldata->arg.seqid);
3694 	nfs4_put_lock_state(calldata->lsp);
3695 	put_nfs_open_context(calldata->ctx);
3696 	kfree(calldata);
3697 }
3698 
3699 static void nfs4_locku_done(struct rpc_task *task, void *data)
3700 {
3701 	struct nfs4_unlockdata *calldata = data;
3702 
3703 	nfs4_sequence_done(calldata->server, &calldata->res.seq_res,
3704 			   task->tk_status);
3705 	if (RPC_ASSASSINATED(task))
3706 		return;
3707 	switch (task->tk_status) {
3708 		case 0:
3709 			memcpy(calldata->lsp->ls_stateid.data,
3710 					calldata->res.stateid.data,
3711 					sizeof(calldata->lsp->ls_stateid.data));
3712 			renew_lease(calldata->server, calldata->timestamp);
3713 			break;
3714 		case -NFS4ERR_BAD_STATEID:
3715 		case -NFS4ERR_OLD_STATEID:
3716 		case -NFS4ERR_STALE_STATEID:
3717 		case -NFS4ERR_EXPIRED:
3718 			break;
3719 		default:
3720 			if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
3721 				nfs4_restart_rpc(task,
3722 						calldata->server->nfs_client);
3723 	}
3724 	nfs4_sequence_free_slot(calldata->server->nfs_client,
3725 				&calldata->res.seq_res);
3726 }
3727 
3728 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
3729 {
3730 	struct nfs4_unlockdata *calldata = data;
3731 
3732 	if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
3733 		return;
3734 	if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) {
3735 		/* Note: exit _without_ running nfs4_locku_done */
3736 		task->tk_action = NULL;
3737 		return;
3738 	}
3739 	calldata->timestamp = jiffies;
3740 	if (nfs4_setup_sequence(calldata->server->nfs_client,
3741 				&calldata->arg.seq_args,
3742 				&calldata->res.seq_res, 1, task))
3743 		return;
3744 	rpc_call_start(task);
3745 }
3746 
3747 static const struct rpc_call_ops nfs4_locku_ops = {
3748 	.rpc_call_prepare = nfs4_locku_prepare,
3749 	.rpc_call_done = nfs4_locku_done,
3750 	.rpc_release = nfs4_locku_release_calldata,
3751 };
3752 
3753 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
3754 		struct nfs_open_context *ctx,
3755 		struct nfs4_lock_state *lsp,
3756 		struct nfs_seqid *seqid)
3757 {
3758 	struct nfs4_unlockdata *data;
3759 	struct rpc_message msg = {
3760 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
3761 		.rpc_cred = ctx->cred,
3762 	};
3763 	struct rpc_task_setup task_setup_data = {
3764 		.rpc_client = NFS_CLIENT(lsp->ls_state->inode),
3765 		.rpc_message = &msg,
3766 		.callback_ops = &nfs4_locku_ops,
3767 		.workqueue = nfsiod_workqueue,
3768 		.flags = RPC_TASK_ASYNC,
3769 	};
3770 
3771 	/* Ensure this is an unlock - when canceling a lock, the
3772 	 * canceled lock is passed in, and it won't be an unlock.
3773 	 */
3774 	fl->fl_type = F_UNLCK;
3775 
3776 	data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
3777 	if (data == NULL) {
3778 		nfs_free_seqid(seqid);
3779 		return ERR_PTR(-ENOMEM);
3780 	}
3781 
3782 	msg.rpc_argp = &data->arg,
3783 	msg.rpc_resp = &data->res,
3784 	task_setup_data.callback_data = data;
3785 	return rpc_run_task(&task_setup_data);
3786 }
3787 
3788 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
3789 {
3790 	struct nfs_inode *nfsi = NFS_I(state->inode);
3791 	struct nfs_seqid *seqid;
3792 	struct nfs4_lock_state *lsp;
3793 	struct rpc_task *task;
3794 	int status = 0;
3795 	unsigned char fl_flags = request->fl_flags;
3796 
3797 	status = nfs4_set_lock_state(state, request);
3798 	/* Unlock _before_ we do the RPC call */
3799 	request->fl_flags |= FL_EXISTS;
3800 	down_read(&nfsi->rwsem);
3801 	if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
3802 		up_read(&nfsi->rwsem);
3803 		goto out;
3804 	}
3805 	up_read(&nfsi->rwsem);
3806 	if (status != 0)
3807 		goto out;
3808 	/* Is this a delegated lock? */
3809 	if (test_bit(NFS_DELEGATED_STATE, &state->flags))
3810 		goto out;
3811 	lsp = request->fl_u.nfs4_fl.owner;
3812 	seqid = nfs_alloc_seqid(&lsp->ls_seqid);
3813 	status = -ENOMEM;
3814 	if (seqid == NULL)
3815 		goto out;
3816 	task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
3817 	status = PTR_ERR(task);
3818 	if (IS_ERR(task))
3819 		goto out;
3820 	status = nfs4_wait_for_completion_rpc_task(task);
3821 	rpc_put_task(task);
3822 out:
3823 	request->fl_flags = fl_flags;
3824 	return status;
3825 }
3826 
3827 struct nfs4_lockdata {
3828 	struct nfs_lock_args arg;
3829 	struct nfs_lock_res res;
3830 	struct nfs4_lock_state *lsp;
3831 	struct nfs_open_context *ctx;
3832 	struct file_lock fl;
3833 	unsigned long timestamp;
3834 	int rpc_status;
3835 	int cancelled;
3836 	struct nfs_server *server;
3837 };
3838 
3839 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
3840 		struct nfs_open_context *ctx, struct nfs4_lock_state *lsp)
3841 {
3842 	struct nfs4_lockdata *p;
3843 	struct inode *inode = lsp->ls_state->inode;
3844 	struct nfs_server *server = NFS_SERVER(inode);
3845 
3846 	p = kzalloc(sizeof(*p), GFP_KERNEL);
3847 	if (p == NULL)
3848 		return NULL;
3849 
3850 	p->arg.fh = NFS_FH(inode);
3851 	p->arg.fl = &p->fl;
3852 	p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid);
3853 	if (p->arg.open_seqid == NULL)
3854 		goto out_free;
3855 	p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid);
3856 	if (p->arg.lock_seqid == NULL)
3857 		goto out_free_seqid;
3858 	p->arg.lock_stateid = &lsp->ls_stateid;
3859 	p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
3860 	p->arg.lock_owner.id = lsp->ls_id.id;
3861 	p->res.lock_seqid = p->arg.lock_seqid;
3862 	p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
3863 	p->lsp = lsp;
3864 	p->server = server;
3865 	atomic_inc(&lsp->ls_count);
3866 	p->ctx = get_nfs_open_context(ctx);
3867 	memcpy(&p->fl, fl, sizeof(p->fl));
3868 	return p;
3869 out_free_seqid:
3870 	nfs_free_seqid(p->arg.open_seqid);
3871 out_free:
3872 	kfree(p);
3873 	return NULL;
3874 }
3875 
3876 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
3877 {
3878 	struct nfs4_lockdata *data = calldata;
3879 	struct nfs4_state *state = data->lsp->ls_state;
3880 
3881 	dprintk("%s: begin!\n", __func__);
3882 	if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
3883 		return;
3884 	/* Do we need to do an open_to_lock_owner? */
3885 	if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
3886 		if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0)
3887 			return;
3888 		data->arg.open_stateid = &state->stateid;
3889 		data->arg.new_lock_owner = 1;
3890 		data->res.open_seqid = data->arg.open_seqid;
3891 	} else
3892 		data->arg.new_lock_owner = 0;
3893 	data->timestamp = jiffies;
3894 	if (nfs4_setup_sequence(data->server->nfs_client, &data->arg.seq_args,
3895 				&data->res.seq_res, 1, task))
3896 		return;
3897 	rpc_call_start(task);
3898 	dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
3899 }
3900 
3901 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
3902 {
3903 	struct nfs4_lockdata *data = calldata;
3904 
3905 	dprintk("%s: begin!\n", __func__);
3906 
3907 	nfs4_sequence_done_free_slot(data->server, &data->res.seq_res,
3908 				     task->tk_status);
3909 
3910 	data->rpc_status = task->tk_status;
3911 	if (RPC_ASSASSINATED(task))
3912 		goto out;
3913 	if (data->arg.new_lock_owner != 0) {
3914 		if (data->rpc_status == 0)
3915 			nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
3916 		else
3917 			goto out;
3918 	}
3919 	if (data->rpc_status == 0) {
3920 		memcpy(data->lsp->ls_stateid.data, data->res.stateid.data,
3921 					sizeof(data->lsp->ls_stateid.data));
3922 		data->lsp->ls_flags |= NFS_LOCK_INITIALIZED;
3923 		renew_lease(NFS_SERVER(data->ctx->path.dentry->d_inode), data->timestamp);
3924 	}
3925 out:
3926 	dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
3927 }
3928 
3929 static void nfs4_lock_release(void *calldata)
3930 {
3931 	struct nfs4_lockdata *data = calldata;
3932 
3933 	dprintk("%s: begin!\n", __func__);
3934 	nfs_free_seqid(data->arg.open_seqid);
3935 	if (data->cancelled != 0) {
3936 		struct rpc_task *task;
3937 		task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
3938 				data->arg.lock_seqid);
3939 		if (!IS_ERR(task))
3940 			rpc_put_task(task);
3941 		dprintk("%s: cancelling lock!\n", __func__);
3942 	} else
3943 		nfs_free_seqid(data->arg.lock_seqid);
3944 	nfs4_put_lock_state(data->lsp);
3945 	put_nfs_open_context(data->ctx);
3946 	kfree(data);
3947 	dprintk("%s: done!\n", __func__);
3948 }
3949 
3950 static const struct rpc_call_ops nfs4_lock_ops = {
3951 	.rpc_call_prepare = nfs4_lock_prepare,
3952 	.rpc_call_done = nfs4_lock_done,
3953 	.rpc_release = nfs4_lock_release,
3954 };
3955 
3956 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int reclaim)
3957 {
3958 	struct nfs4_lockdata *data;
3959 	struct rpc_task *task;
3960 	struct rpc_message msg = {
3961 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
3962 		.rpc_cred = state->owner->so_cred,
3963 	};
3964 	struct rpc_task_setup task_setup_data = {
3965 		.rpc_client = NFS_CLIENT(state->inode),
3966 		.rpc_message = &msg,
3967 		.callback_ops = &nfs4_lock_ops,
3968 		.workqueue = nfsiod_workqueue,
3969 		.flags = RPC_TASK_ASYNC,
3970 	};
3971 	int ret;
3972 
3973 	dprintk("%s: begin!\n", __func__);
3974 	data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
3975 			fl->fl_u.nfs4_fl.owner);
3976 	if (data == NULL)
3977 		return -ENOMEM;
3978 	if (IS_SETLKW(cmd))
3979 		data->arg.block = 1;
3980 	if (reclaim != 0)
3981 		data->arg.reclaim = 1;
3982 	msg.rpc_argp = &data->arg,
3983 	msg.rpc_resp = &data->res,
3984 	task_setup_data.callback_data = data;
3985 	task = rpc_run_task(&task_setup_data);
3986 	if (IS_ERR(task))
3987 		return PTR_ERR(task);
3988 	ret = nfs4_wait_for_completion_rpc_task(task);
3989 	if (ret == 0) {
3990 		ret = data->rpc_status;
3991 	} else
3992 		data->cancelled = 1;
3993 	rpc_put_task(task);
3994 	dprintk("%s: done, ret = %d!\n", __func__, ret);
3995 	return ret;
3996 }
3997 
3998 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
3999 {
4000 	struct nfs_server *server = NFS_SERVER(state->inode);
4001 	struct nfs4_exception exception = { };
4002 	int err;
4003 
4004 	do {
4005 		/* Cache the lock if possible... */
4006 		if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4007 			return 0;
4008 		err = _nfs4_do_setlk(state, F_SETLK, request, 1);
4009 		if (err != -NFS4ERR_DELAY)
4010 			break;
4011 		nfs4_handle_exception(server, err, &exception);
4012 	} while (exception.retry);
4013 	return err;
4014 }
4015 
4016 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
4017 {
4018 	struct nfs_server *server = NFS_SERVER(state->inode);
4019 	struct nfs4_exception exception = { };
4020 	int err;
4021 
4022 	err = nfs4_set_lock_state(state, request);
4023 	if (err != 0)
4024 		return err;
4025 	do {
4026 		if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4027 			return 0;
4028 		err = _nfs4_do_setlk(state, F_SETLK, request, 0);
4029 		if (err != -NFS4ERR_DELAY)
4030 			break;
4031 		nfs4_handle_exception(server, err, &exception);
4032 	} while (exception.retry);
4033 	return err;
4034 }
4035 
4036 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4037 {
4038 	struct nfs_inode *nfsi = NFS_I(state->inode);
4039 	unsigned char fl_flags = request->fl_flags;
4040 	int status;
4041 
4042 	/* Is this a delegated open? */
4043 	status = nfs4_set_lock_state(state, request);
4044 	if (status != 0)
4045 		goto out;
4046 	request->fl_flags |= FL_ACCESS;
4047 	status = do_vfs_lock(request->fl_file, request);
4048 	if (status < 0)
4049 		goto out;
4050 	down_read(&nfsi->rwsem);
4051 	if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
4052 		/* Yes: cache locks! */
4053 		/* ...but avoid races with delegation recall... */
4054 		request->fl_flags = fl_flags & ~FL_SLEEP;
4055 		status = do_vfs_lock(request->fl_file, request);
4056 		goto out_unlock;
4057 	}
4058 	status = _nfs4_do_setlk(state, cmd, request, 0);
4059 	if (status != 0)
4060 		goto out_unlock;
4061 	/* Note: we always want to sleep here! */
4062 	request->fl_flags = fl_flags | FL_SLEEP;
4063 	if (do_vfs_lock(request->fl_file, request) < 0)
4064 		printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
4065 out_unlock:
4066 	up_read(&nfsi->rwsem);
4067 out:
4068 	request->fl_flags = fl_flags;
4069 	return status;
4070 }
4071 
4072 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4073 {
4074 	struct nfs4_exception exception = { };
4075 	int err;
4076 
4077 	do {
4078 		err = _nfs4_proc_setlk(state, cmd, request);
4079 		if (err == -NFS4ERR_DENIED)
4080 			err = -EAGAIN;
4081 		err = nfs4_handle_exception(NFS_SERVER(state->inode),
4082 				err, &exception);
4083 	} while (exception.retry);
4084 	return err;
4085 }
4086 
4087 static int
4088 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
4089 {
4090 	struct nfs_open_context *ctx;
4091 	struct nfs4_state *state;
4092 	unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
4093 	int status;
4094 
4095 	/* verify open state */
4096 	ctx = nfs_file_open_context(filp);
4097 	state = ctx->state;
4098 
4099 	if (request->fl_start < 0 || request->fl_end < 0)
4100 		return -EINVAL;
4101 
4102 	if (IS_GETLK(cmd))
4103 		return nfs4_proc_getlk(state, F_GETLK, request);
4104 
4105 	if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
4106 		return -EINVAL;
4107 
4108 	if (request->fl_type == F_UNLCK)
4109 		return nfs4_proc_unlck(state, cmd, request);
4110 
4111 	do {
4112 		status = nfs4_proc_setlk(state, cmd, request);
4113 		if ((status != -EAGAIN) || IS_SETLK(cmd))
4114 			break;
4115 		timeout = nfs4_set_lock_task_retry(timeout);
4116 		status = -ERESTARTSYS;
4117 		if (signalled())
4118 			break;
4119 	} while(status < 0);
4120 	return status;
4121 }
4122 
4123 int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
4124 {
4125 	struct nfs_server *server = NFS_SERVER(state->inode);
4126 	struct nfs4_exception exception = { };
4127 	int err;
4128 
4129 	err = nfs4_set_lock_state(state, fl);
4130 	if (err != 0)
4131 		goto out;
4132 	do {
4133 		err = _nfs4_do_setlk(state, F_SETLK, fl, 0);
4134 		switch (err) {
4135 			default:
4136 				printk(KERN_ERR "%s: unhandled error %d.\n",
4137 						__func__, err);
4138 			case 0:
4139 			case -ESTALE:
4140 				goto out;
4141 			case -NFS4ERR_EXPIRED:
4142 			case -NFS4ERR_STALE_CLIENTID:
4143 			case -NFS4ERR_STALE_STATEID:
4144 				nfs4_schedule_state_recovery(server->nfs_client);
4145 				goto out;
4146 			case -ERESTARTSYS:
4147 				/*
4148 				 * The show must go on: exit, but mark the
4149 				 * stateid as needing recovery.
4150 				 */
4151 			case -NFS4ERR_ADMIN_REVOKED:
4152 			case -NFS4ERR_BAD_STATEID:
4153 			case -NFS4ERR_OPENMODE:
4154 				nfs4_state_mark_reclaim_nograce(server->nfs_client, state);
4155 				err = 0;
4156 				goto out;
4157 			case -ENOMEM:
4158 			case -NFS4ERR_DENIED:
4159 				/* kill_proc(fl->fl_pid, SIGLOST, 1); */
4160 				err = 0;
4161 				goto out;
4162 			case -NFS4ERR_DELAY:
4163 				break;
4164 		}
4165 		err = nfs4_handle_exception(server, err, &exception);
4166 	} while (exception.retry);
4167 out:
4168 	return err;
4169 }
4170 
4171 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
4172 
4173 int nfs4_setxattr(struct dentry *dentry, const char *key, const void *buf,
4174 		size_t buflen, int flags)
4175 {
4176 	struct inode *inode = dentry->d_inode;
4177 
4178 	if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0)
4179 		return -EOPNOTSUPP;
4180 
4181 	return nfs4_proc_set_acl(inode, buf, buflen);
4182 }
4183 
4184 /* The getxattr man page suggests returning -ENODATA for unknown attributes,
4185  * and that's what we'll do for e.g. user attributes that haven't been set.
4186  * But we'll follow ext2/ext3's lead by returning -EOPNOTSUPP for unsupported
4187  * attributes in kernel-managed attribute namespaces. */
4188 ssize_t nfs4_getxattr(struct dentry *dentry, const char *key, void *buf,
4189 		size_t buflen)
4190 {
4191 	struct inode *inode = dentry->d_inode;
4192 
4193 	if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0)
4194 		return -EOPNOTSUPP;
4195 
4196 	return nfs4_proc_get_acl(inode, buf, buflen);
4197 }
4198 
4199 ssize_t nfs4_listxattr(struct dentry *dentry, char *buf, size_t buflen)
4200 {
4201 	size_t len = strlen(XATTR_NAME_NFSV4_ACL) + 1;
4202 
4203 	if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
4204 		return 0;
4205 	if (buf && buflen < len)
4206 		return -ERANGE;
4207 	if (buf)
4208 		memcpy(buf, XATTR_NAME_NFSV4_ACL, len);
4209 	return len;
4210 }
4211 
4212 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
4213 {
4214 	if (!((fattr->valid & NFS_ATTR_FATTR_FILEID) &&
4215 		(fattr->valid & NFS_ATTR_FATTR_FSID) &&
4216 		(fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)))
4217 		return;
4218 
4219 	fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
4220 		NFS_ATTR_FATTR_NLINK;
4221 	fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
4222 	fattr->nlink = 2;
4223 }
4224 
4225 int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
4226 		struct nfs4_fs_locations *fs_locations, struct page *page)
4227 {
4228 	struct nfs_server *server = NFS_SERVER(dir);
4229 	u32 bitmask[2] = {
4230 		[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
4231 		[1] = FATTR4_WORD1_MOUNTED_ON_FILEID,
4232 	};
4233 	struct nfs4_fs_locations_arg args = {
4234 		.dir_fh = NFS_FH(dir),
4235 		.name = name,
4236 		.page = page,
4237 		.bitmask = bitmask,
4238 	};
4239 	struct nfs4_fs_locations_res res = {
4240 		.fs_locations = fs_locations,
4241 	};
4242 	struct rpc_message msg = {
4243 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
4244 		.rpc_argp = &args,
4245 		.rpc_resp = &res,
4246 	};
4247 	int status;
4248 
4249 	dprintk("%s: start\n", __func__);
4250 	nfs_fattr_init(&fs_locations->fattr);
4251 	fs_locations->server = server;
4252 	fs_locations->nlocations = 0;
4253 	status = nfs4_call_sync(server, &msg, &args, &res, 0);
4254 	nfs_fixup_referral_attributes(&fs_locations->fattr);
4255 	dprintk("%s: returned status = %d\n", __func__, status);
4256 	return status;
4257 }
4258 
4259 #ifdef CONFIG_NFS_V4_1
4260 /*
4261  * nfs4_proc_exchange_id()
4262  *
4263  * Since the clientid has expired, all compounds using sessions
4264  * associated with the stale clientid will be returning
4265  * NFS4ERR_BADSESSION in the sequence operation, and will therefore
4266  * be in some phase of session reset.
4267  */
4268 static int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
4269 {
4270 	nfs4_verifier verifier;
4271 	struct nfs41_exchange_id_args args = {
4272 		.client = clp,
4273 		.flags = clp->cl_exchange_flags,
4274 	};
4275 	struct nfs41_exchange_id_res res = {
4276 		.client = clp,
4277 	};
4278 	int status;
4279 	struct rpc_message msg = {
4280 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
4281 		.rpc_argp = &args,
4282 		.rpc_resp = &res,
4283 		.rpc_cred = cred,
4284 	};
4285 	__be32 *p;
4286 
4287 	dprintk("--> %s\n", __func__);
4288 	BUG_ON(clp == NULL);
4289 
4290 	p = (u32 *)verifier.data;
4291 	*p++ = htonl((u32)clp->cl_boot_time.tv_sec);
4292 	*p = htonl((u32)clp->cl_boot_time.tv_nsec);
4293 	args.verifier = &verifier;
4294 
4295 	while (1) {
4296 		args.id_len = scnprintf(args.id, sizeof(args.id),
4297 					"%s/%s %u",
4298 					clp->cl_ipaddr,
4299 					rpc_peeraddr2str(clp->cl_rpcclient,
4300 							 RPC_DISPLAY_ADDR),
4301 					clp->cl_id_uniquifier);
4302 
4303 		status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
4304 
4305 		if (status != NFS4ERR_CLID_INUSE)
4306 			break;
4307 
4308 		if (signalled())
4309 			break;
4310 
4311 		if (++clp->cl_id_uniquifier == 0)
4312 			break;
4313 	}
4314 
4315 	dprintk("<-- %s status= %d\n", __func__, status);
4316 	return status;
4317 }
4318 
4319 struct nfs4_get_lease_time_data {
4320 	struct nfs4_get_lease_time_args *args;
4321 	struct nfs4_get_lease_time_res *res;
4322 	struct nfs_client *clp;
4323 };
4324 
4325 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
4326 					void *calldata)
4327 {
4328 	int ret;
4329 	struct nfs4_get_lease_time_data *data =
4330 			(struct nfs4_get_lease_time_data *)calldata;
4331 
4332 	dprintk("--> %s\n", __func__);
4333 	/* just setup sequence, do not trigger session recovery
4334 	   since we're invoked within one */
4335 	ret = nfs41_setup_sequence(data->clp->cl_session,
4336 					&data->args->la_seq_args,
4337 					&data->res->lr_seq_res, 0, task);
4338 
4339 	BUG_ON(ret == -EAGAIN);
4340 	rpc_call_start(task);
4341 	dprintk("<-- %s\n", __func__);
4342 }
4343 
4344 /*
4345  * Called from nfs4_state_manager thread for session setup, so don't recover
4346  * from sequence operation or clientid errors.
4347  */
4348 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
4349 {
4350 	struct nfs4_get_lease_time_data *data =
4351 			(struct nfs4_get_lease_time_data *)calldata;
4352 
4353 	dprintk("--> %s\n", __func__);
4354 	nfs41_sequence_done(data->clp, &data->res->lr_seq_res, task->tk_status);
4355 	switch (task->tk_status) {
4356 	case -NFS4ERR_DELAY:
4357 	case -NFS4ERR_GRACE:
4358 		dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
4359 		rpc_delay(task, NFS4_POLL_RETRY_MIN);
4360 		task->tk_status = 0;
4361 		nfs4_restart_rpc(task, data->clp);
4362 		return;
4363 	}
4364 	nfs41_sequence_free_slot(data->clp, &data->res->lr_seq_res);
4365 	dprintk("<-- %s\n", __func__);
4366 }
4367 
4368 struct rpc_call_ops nfs4_get_lease_time_ops = {
4369 	.rpc_call_prepare = nfs4_get_lease_time_prepare,
4370 	.rpc_call_done = nfs4_get_lease_time_done,
4371 };
4372 
4373 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
4374 {
4375 	struct rpc_task *task;
4376 	struct nfs4_get_lease_time_args args;
4377 	struct nfs4_get_lease_time_res res = {
4378 		.lr_fsinfo = fsinfo,
4379 	};
4380 	struct nfs4_get_lease_time_data data = {
4381 		.args = &args,
4382 		.res = &res,
4383 		.clp = clp,
4384 	};
4385 	struct rpc_message msg = {
4386 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
4387 		.rpc_argp = &args,
4388 		.rpc_resp = &res,
4389 	};
4390 	struct rpc_task_setup task_setup = {
4391 		.rpc_client = clp->cl_rpcclient,
4392 		.rpc_message = &msg,
4393 		.callback_ops = &nfs4_get_lease_time_ops,
4394 		.callback_data = &data
4395 	};
4396 	int status;
4397 
4398 	res.lr_seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
4399 	dprintk("--> %s\n", __func__);
4400 	task = rpc_run_task(&task_setup);
4401 
4402 	if (IS_ERR(task))
4403 		status = PTR_ERR(task);
4404 	else {
4405 		status = task->tk_status;
4406 		rpc_put_task(task);
4407 	}
4408 	dprintk("<-- %s return %d\n", __func__, status);
4409 
4410 	return status;
4411 }
4412 
4413 /*
4414  * Reset a slot table
4415  */
4416 static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, int max_slots,
4417 		int old_max_slots, int ivalue)
4418 {
4419 	int i;
4420 	int ret = 0;
4421 
4422 	dprintk("--> %s: max_reqs=%u, tbl %p\n", __func__, max_slots, tbl);
4423 
4424 	/*
4425 	 * Until we have dynamic slot table adjustment, insist
4426 	 * upon the same slot table size
4427 	 */
4428 	if (max_slots != old_max_slots) {
4429 		dprintk("%s reset slot table does't match old\n",
4430 			__func__);
4431 		ret = -EINVAL; /*XXX NFS4ERR_REQ_TOO_BIG ? */
4432 		goto out;
4433 	}
4434 	spin_lock(&tbl->slot_tbl_lock);
4435 	for (i = 0; i < max_slots; ++i)
4436 		tbl->slots[i].seq_nr = ivalue;
4437 	tbl->highest_used_slotid = -1;
4438 	spin_unlock(&tbl->slot_tbl_lock);
4439 	dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
4440 		tbl, tbl->slots, tbl->max_slots);
4441 out:
4442 	dprintk("<-- %s: return %d\n", __func__, ret);
4443 	return ret;
4444 }
4445 
4446 /*
4447  * Reset the forechannel and backchannel slot tables
4448  */
4449 static int nfs4_reset_slot_tables(struct nfs4_session *session)
4450 {
4451 	int status;
4452 
4453 	status = nfs4_reset_slot_table(&session->fc_slot_table,
4454 			session->fc_attrs.max_reqs,
4455 			session->fc_slot_table.max_slots,
4456 			1);
4457 	if (status)
4458 		return status;
4459 
4460 	status = nfs4_reset_slot_table(&session->bc_slot_table,
4461 			session->bc_attrs.max_reqs,
4462 			session->bc_slot_table.max_slots,
4463 			0);
4464 	return status;
4465 }
4466 
4467 /* Destroy the slot table */
4468 static void nfs4_destroy_slot_tables(struct nfs4_session *session)
4469 {
4470 	if (session->fc_slot_table.slots != NULL) {
4471 		kfree(session->fc_slot_table.slots);
4472 		session->fc_slot_table.slots = NULL;
4473 	}
4474 	if (session->bc_slot_table.slots != NULL) {
4475 		kfree(session->bc_slot_table.slots);
4476 		session->bc_slot_table.slots = NULL;
4477 	}
4478 	return;
4479 }
4480 
4481 /*
4482  * Initialize slot table
4483  */
4484 static int nfs4_init_slot_table(struct nfs4_slot_table *tbl,
4485 		int max_slots, int ivalue)
4486 {
4487 	int i;
4488 	struct nfs4_slot *slot;
4489 	int ret = -ENOMEM;
4490 
4491 	BUG_ON(max_slots > NFS4_MAX_SLOT_TABLE);
4492 
4493 	dprintk("--> %s: max_reqs=%u\n", __func__, max_slots);
4494 
4495 	slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_KERNEL);
4496 	if (!slot)
4497 		goto out;
4498 	for (i = 0; i < max_slots; ++i)
4499 		slot[i].seq_nr = ivalue;
4500 	ret = 0;
4501 
4502 	spin_lock(&tbl->slot_tbl_lock);
4503 	if (tbl->slots != NULL) {
4504 		spin_unlock(&tbl->slot_tbl_lock);
4505 		dprintk("%s: slot table already initialized. tbl=%p slots=%p\n",
4506 			__func__, tbl, tbl->slots);
4507 		WARN_ON(1);
4508 		goto out_free;
4509 	}
4510 	tbl->max_slots = max_slots;
4511 	tbl->slots = slot;
4512 	tbl->highest_used_slotid = -1;  /* no slot is currently used */
4513 	spin_unlock(&tbl->slot_tbl_lock);
4514 	dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
4515 		tbl, tbl->slots, tbl->max_slots);
4516 out:
4517 	dprintk("<-- %s: return %d\n", __func__, ret);
4518 	return ret;
4519 
4520 out_free:
4521 	kfree(slot);
4522 	goto out;
4523 }
4524 
4525 /*
4526  * Initialize the forechannel and backchannel tables
4527  */
4528 static int nfs4_init_slot_tables(struct nfs4_session *session)
4529 {
4530 	int status;
4531 
4532 	status = nfs4_init_slot_table(&session->fc_slot_table,
4533 			session->fc_attrs.max_reqs, 1);
4534 	if (status)
4535 		return status;
4536 
4537 	status = nfs4_init_slot_table(&session->bc_slot_table,
4538 			session->bc_attrs.max_reqs, 0);
4539 	if (status)
4540 		nfs4_destroy_slot_tables(session);
4541 
4542 	return status;
4543 }
4544 
4545 struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
4546 {
4547 	struct nfs4_session *session;
4548 	struct nfs4_slot_table *tbl;
4549 
4550 	session = kzalloc(sizeof(struct nfs4_session), GFP_KERNEL);
4551 	if (!session)
4552 		return NULL;
4553 
4554 	set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
4555 	/*
4556 	 * The create session reply races with the server back
4557 	 * channel probe. Mark the client NFS_CS_SESSION_INITING
4558 	 * so that the client back channel can find the
4559 	 * nfs_client struct
4560 	 */
4561 	clp->cl_cons_state = NFS_CS_SESSION_INITING;
4562 
4563 	tbl = &session->fc_slot_table;
4564 	spin_lock_init(&tbl->slot_tbl_lock);
4565 	rpc_init_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
4566 
4567 	tbl = &session->bc_slot_table;
4568 	spin_lock_init(&tbl->slot_tbl_lock);
4569 	rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
4570 
4571 	session->clp = clp;
4572 	return session;
4573 }
4574 
4575 void nfs4_destroy_session(struct nfs4_session *session)
4576 {
4577 	nfs4_proc_destroy_session(session);
4578 	dprintk("%s Destroy backchannel for xprt %p\n",
4579 		__func__, session->clp->cl_rpcclient->cl_xprt);
4580 	xprt_destroy_backchannel(session->clp->cl_rpcclient->cl_xprt,
4581 				NFS41_BC_MIN_CALLBACKS);
4582 	nfs4_destroy_slot_tables(session);
4583 	kfree(session);
4584 }
4585 
4586 /*
4587  * Initialize the values to be used by the client in CREATE_SESSION
4588  * If nfs4_init_session set the fore channel request and response sizes,
4589  * use them.
4590  *
4591  * Set the back channel max_resp_sz_cached to zero to force the client to
4592  * always set csa_cachethis to FALSE because the current implementation
4593  * of the back channel DRC only supports caching the CB_SEQUENCE operation.
4594  */
4595 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
4596 {
4597 	struct nfs4_session *session = args->client->cl_session;
4598 	unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz,
4599 		     mxresp_sz = session->fc_attrs.max_resp_sz;
4600 
4601 	if (mxrqst_sz == 0)
4602 		mxrqst_sz = NFS_MAX_FILE_IO_SIZE;
4603 	if (mxresp_sz == 0)
4604 		mxresp_sz = NFS_MAX_FILE_IO_SIZE;
4605 	/* Fore channel attributes */
4606 	args->fc_attrs.headerpadsz = 0;
4607 	args->fc_attrs.max_rqst_sz = mxrqst_sz;
4608 	args->fc_attrs.max_resp_sz = mxresp_sz;
4609 	args->fc_attrs.max_resp_sz_cached = mxresp_sz;
4610 	args->fc_attrs.max_ops = NFS4_MAX_OPS;
4611 	args->fc_attrs.max_reqs = session->clp->cl_rpcclient->cl_xprt->max_reqs;
4612 
4613 	dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
4614 		"max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
4615 		__func__,
4616 		args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
4617 		args->fc_attrs.max_resp_sz_cached, args->fc_attrs.max_ops,
4618 		args->fc_attrs.max_reqs);
4619 
4620 	/* Back channel attributes */
4621 	args->bc_attrs.headerpadsz = 0;
4622 	args->bc_attrs.max_rqst_sz = PAGE_SIZE;
4623 	args->bc_attrs.max_resp_sz = PAGE_SIZE;
4624 	args->bc_attrs.max_resp_sz_cached = 0;
4625 	args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
4626 	args->bc_attrs.max_reqs = 1;
4627 
4628 	dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
4629 		"max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
4630 		__func__,
4631 		args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
4632 		args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
4633 		args->bc_attrs.max_reqs);
4634 }
4635 
4636 static int _verify_channel_attr(char *chan, char *attr_name, u32 sent, u32 rcvd)
4637 {
4638 	if (rcvd <= sent)
4639 		return 0;
4640 	printk(KERN_WARNING "%s: Session INVALID: %s channel %s increased. "
4641 		"sent=%u rcvd=%u\n", __func__, chan, attr_name, sent, rcvd);
4642 	return -EINVAL;
4643 }
4644 
4645 #define _verify_fore_channel_attr(_name_) \
4646 	_verify_channel_attr("fore", #_name_, \
4647 			     args->fc_attrs._name_, \
4648 			     session->fc_attrs._name_)
4649 
4650 #define _verify_back_channel_attr(_name_) \
4651 	_verify_channel_attr("back", #_name_, \
4652 			     args->bc_attrs._name_, \
4653 			     session->bc_attrs._name_)
4654 
4655 /*
4656  * The server is not allowed to increase the fore channel header pad size,
4657  * maximum response size, or maximum number of operations.
4658  *
4659  * The back channel attributes are only negotiatied down: We send what the
4660  * (back channel) server insists upon.
4661  */
4662 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
4663 				     struct nfs4_session *session)
4664 {
4665 	int ret = 0;
4666 
4667 	ret |= _verify_fore_channel_attr(headerpadsz);
4668 	ret |= _verify_fore_channel_attr(max_resp_sz);
4669 	ret |= _verify_fore_channel_attr(max_ops);
4670 
4671 	ret |= _verify_back_channel_attr(headerpadsz);
4672 	ret |= _verify_back_channel_attr(max_rqst_sz);
4673 	ret |= _verify_back_channel_attr(max_resp_sz);
4674 	ret |= _verify_back_channel_attr(max_resp_sz_cached);
4675 	ret |= _verify_back_channel_attr(max_ops);
4676 	ret |= _verify_back_channel_attr(max_reqs);
4677 
4678 	return ret;
4679 }
4680 
4681 static int _nfs4_proc_create_session(struct nfs_client *clp)
4682 {
4683 	struct nfs4_session *session = clp->cl_session;
4684 	struct nfs41_create_session_args args = {
4685 		.client = clp,
4686 		.cb_program = NFS4_CALLBACK,
4687 	};
4688 	struct nfs41_create_session_res res = {
4689 		.client = clp,
4690 	};
4691 	struct rpc_message msg = {
4692 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
4693 		.rpc_argp = &args,
4694 		.rpc_resp = &res,
4695 	};
4696 	int status;
4697 
4698 	nfs4_init_channel_attrs(&args);
4699 	args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
4700 
4701 	status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 0);
4702 
4703 	if (!status)
4704 		/* Verify the session's negotiated channel_attrs values */
4705 		status = nfs4_verify_channel_attrs(&args, session);
4706 	if (!status) {
4707 		/* Increment the clientid slot sequence id */
4708 		clp->cl_seqid++;
4709 	}
4710 
4711 	return status;
4712 }
4713 
4714 /*
4715  * Issues a CREATE_SESSION operation to the server.
4716  * It is the responsibility of the caller to verify the session is
4717  * expired before calling this routine.
4718  */
4719 int nfs4_proc_create_session(struct nfs_client *clp, int reset)
4720 {
4721 	int status;
4722 	unsigned *ptr;
4723 	struct nfs_fsinfo fsinfo;
4724 	struct nfs4_session *session = clp->cl_session;
4725 
4726 	dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
4727 
4728 	status = _nfs4_proc_create_session(clp);
4729 	if (status)
4730 		goto out;
4731 
4732 	/* Init or reset the fore channel */
4733 	if (reset)
4734 		status = nfs4_reset_slot_tables(session);
4735 	else
4736 		status = nfs4_init_slot_tables(session);
4737 	dprintk("fore channel slot table initialization returned %d\n", status);
4738 	if (status)
4739 		goto out;
4740 
4741 	ptr = (unsigned *)&session->sess_id.data[0];
4742 	dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
4743 		clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
4744 
4745 	if (reset)
4746 		/* Lease time is aleady set */
4747 		goto out;
4748 
4749 	/* Get the lease time */
4750 	status = nfs4_proc_get_lease_time(clp, &fsinfo);
4751 	if (status == 0) {
4752 		/* Update lease time and schedule renewal */
4753 		spin_lock(&clp->cl_lock);
4754 		clp->cl_lease_time = fsinfo.lease_time * HZ;
4755 		clp->cl_last_renewal = jiffies;
4756 		clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
4757 		spin_unlock(&clp->cl_lock);
4758 
4759 		nfs4_schedule_state_renewal(clp);
4760 	}
4761 out:
4762 	dprintk("<-- %s\n", __func__);
4763 	return status;
4764 }
4765 
4766 /*
4767  * Issue the over-the-wire RPC DESTROY_SESSION.
4768  * The caller must serialize access to this routine.
4769  */
4770 int nfs4_proc_destroy_session(struct nfs4_session *session)
4771 {
4772 	int status = 0;
4773 	struct rpc_message msg;
4774 
4775 	dprintk("--> nfs4_proc_destroy_session\n");
4776 
4777 	/* session is still being setup */
4778 	if (session->clp->cl_cons_state != NFS_CS_READY)
4779 		return status;
4780 
4781 	msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION];
4782 	msg.rpc_argp = session;
4783 	msg.rpc_resp = NULL;
4784 	msg.rpc_cred = NULL;
4785 	status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 0);
4786 
4787 	if (status)
4788 		printk(KERN_WARNING
4789 			"Got error %d from the server on DESTROY_SESSION. "
4790 			"Session has been destroyed regardless...\n", status);
4791 
4792 	dprintk("<-- nfs4_proc_destroy_session\n");
4793 	return status;
4794 }
4795 
4796 /*
4797  * Renew the cl_session lease.
4798  */
4799 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
4800 {
4801 	struct nfs4_sequence_args args;
4802 	struct nfs4_sequence_res res;
4803 
4804 	struct rpc_message msg = {
4805 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
4806 		.rpc_argp = &args,
4807 		.rpc_resp = &res,
4808 		.rpc_cred = cred,
4809 	};
4810 
4811 	args.sa_cache_this = 0;
4812 
4813 	return nfs4_call_sync_sequence(clp, clp->cl_rpcclient, &msg, &args,
4814 				       &res, 0);
4815 }
4816 
4817 void nfs41_sequence_call_done(struct rpc_task *task, void *data)
4818 {
4819 	struct nfs_client *clp = (struct nfs_client *)data;
4820 
4821 	nfs41_sequence_done(clp, task->tk_msg.rpc_resp, task->tk_status);
4822 
4823 	if (task->tk_status < 0) {
4824 		dprintk("%s ERROR %d\n", __func__, task->tk_status);
4825 
4826 		if (_nfs4_async_handle_error(task, NULL, clp, NULL)
4827 								== -EAGAIN) {
4828 			nfs4_restart_rpc(task, clp);
4829 			return;
4830 		}
4831 	}
4832 	nfs41_sequence_free_slot(clp, task->tk_msg.rpc_resp);
4833 	dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
4834 
4835 	put_rpccred(task->tk_msg.rpc_cred);
4836 	kfree(task->tk_msg.rpc_argp);
4837 	kfree(task->tk_msg.rpc_resp);
4838 
4839 	dprintk("<-- %s\n", __func__);
4840 }
4841 
4842 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
4843 {
4844 	struct nfs_client *clp;
4845 	struct nfs4_sequence_args *args;
4846 	struct nfs4_sequence_res *res;
4847 
4848 	clp = (struct nfs_client *)data;
4849 	args = task->tk_msg.rpc_argp;
4850 	res = task->tk_msg.rpc_resp;
4851 
4852 	if (nfs4_setup_sequence(clp, args, res, 0, task))
4853 		return;
4854 	rpc_call_start(task);
4855 }
4856 
4857 static const struct rpc_call_ops nfs41_sequence_ops = {
4858 	.rpc_call_done = nfs41_sequence_call_done,
4859 	.rpc_call_prepare = nfs41_sequence_prepare,
4860 };
4861 
4862 static int nfs41_proc_async_sequence(struct nfs_client *clp,
4863 				     struct rpc_cred *cred)
4864 {
4865 	struct nfs4_sequence_args *args;
4866 	struct nfs4_sequence_res *res;
4867 	struct rpc_message msg = {
4868 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
4869 		.rpc_cred = cred,
4870 	};
4871 
4872 	args = kzalloc(sizeof(*args), GFP_KERNEL);
4873 	if (!args)
4874 		return -ENOMEM;
4875 	res = kzalloc(sizeof(*res), GFP_KERNEL);
4876 	if (!res) {
4877 		kfree(args);
4878 		return -ENOMEM;
4879 	}
4880 	res->sr_slotid = NFS4_MAX_SLOT_TABLE;
4881 	msg.rpc_argp = args;
4882 	msg.rpc_resp = res;
4883 
4884 	return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
4885 			      &nfs41_sequence_ops, (void *)clp);
4886 }
4887 
4888 #endif /* CONFIG_NFS_V4_1 */
4889 
4890 struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
4891 	.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
4892 	.state_flag_bit	= NFS_STATE_RECLAIM_REBOOT,
4893 	.recover_open	= nfs4_open_reclaim,
4894 	.recover_lock	= nfs4_lock_reclaim,
4895 	.establish_clid = nfs4_init_clientid,
4896 	.get_clid_cred	= nfs4_get_setclientid_cred,
4897 };
4898 
4899 #if defined(CONFIG_NFS_V4_1)
4900 struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
4901 	.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
4902 	.state_flag_bit	= NFS_STATE_RECLAIM_REBOOT,
4903 	.recover_open	= nfs4_open_reclaim,
4904 	.recover_lock	= nfs4_lock_reclaim,
4905 	.establish_clid = nfs4_proc_exchange_id,
4906 	.get_clid_cred	= nfs4_get_exchange_id_cred,
4907 };
4908 #endif /* CONFIG_NFS_V4_1 */
4909 
4910 struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
4911 	.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
4912 	.state_flag_bit	= NFS_STATE_RECLAIM_NOGRACE,
4913 	.recover_open	= nfs4_open_expired,
4914 	.recover_lock	= nfs4_lock_expired,
4915 	.establish_clid = nfs4_init_clientid,
4916 	.get_clid_cred	= nfs4_get_setclientid_cred,
4917 };
4918 
4919 #if defined(CONFIG_NFS_V4_1)
4920 struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
4921 	.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
4922 	.state_flag_bit	= NFS_STATE_RECLAIM_NOGRACE,
4923 	.recover_open	= nfs4_open_expired,
4924 	.recover_lock	= nfs4_lock_expired,
4925 	.establish_clid = nfs4_proc_exchange_id,
4926 	.get_clid_cred	= nfs4_get_exchange_id_cred,
4927 };
4928 #endif /* CONFIG_NFS_V4_1 */
4929 
4930 struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
4931 	.sched_state_renewal = nfs4_proc_async_renew,
4932 	.get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
4933 	.renew_lease = nfs4_proc_renew,
4934 };
4935 
4936 #if defined(CONFIG_NFS_V4_1)
4937 struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
4938 	.sched_state_renewal = nfs41_proc_async_sequence,
4939 	.get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
4940 	.renew_lease = nfs4_proc_sequence,
4941 };
4942 #endif
4943 
4944 /*
4945  * Per minor version reboot and network partition recovery ops
4946  */
4947 
4948 struct nfs4_state_recovery_ops *nfs4_reboot_recovery_ops[] = {
4949 	&nfs40_reboot_recovery_ops,
4950 #if defined(CONFIG_NFS_V4_1)
4951 	&nfs41_reboot_recovery_ops,
4952 #endif
4953 };
4954 
4955 struct nfs4_state_recovery_ops *nfs4_nograce_recovery_ops[] = {
4956 	&nfs40_nograce_recovery_ops,
4957 #if defined(CONFIG_NFS_V4_1)
4958 	&nfs41_nograce_recovery_ops,
4959 #endif
4960 };
4961 
4962 struct nfs4_state_maintenance_ops *nfs4_state_renewal_ops[] = {
4963 	&nfs40_state_renewal_ops,
4964 #if defined(CONFIG_NFS_V4_1)
4965 	&nfs41_state_renewal_ops,
4966 #endif
4967 };
4968 
4969 static const struct inode_operations nfs4_file_inode_operations = {
4970 	.permission	= nfs_permission,
4971 	.getattr	= nfs_getattr,
4972 	.setattr	= nfs_setattr,
4973 	.getxattr	= nfs4_getxattr,
4974 	.setxattr	= nfs4_setxattr,
4975 	.listxattr	= nfs4_listxattr,
4976 };
4977 
4978 const struct nfs_rpc_ops nfs_v4_clientops = {
4979 	.version	= 4,			/* protocol version */
4980 	.dentry_ops	= &nfs4_dentry_operations,
4981 	.dir_inode_ops	= &nfs4_dir_inode_operations,
4982 	.file_inode_ops	= &nfs4_file_inode_operations,
4983 	.getroot	= nfs4_proc_get_root,
4984 	.getattr	= nfs4_proc_getattr,
4985 	.setattr	= nfs4_proc_setattr,
4986 	.lookupfh	= nfs4_proc_lookupfh,
4987 	.lookup		= nfs4_proc_lookup,
4988 	.access		= nfs4_proc_access,
4989 	.readlink	= nfs4_proc_readlink,
4990 	.create		= nfs4_proc_create,
4991 	.remove		= nfs4_proc_remove,
4992 	.unlink_setup	= nfs4_proc_unlink_setup,
4993 	.unlink_done	= nfs4_proc_unlink_done,
4994 	.rename		= nfs4_proc_rename,
4995 	.link		= nfs4_proc_link,
4996 	.symlink	= nfs4_proc_symlink,
4997 	.mkdir		= nfs4_proc_mkdir,
4998 	.rmdir		= nfs4_proc_remove,
4999 	.readdir	= nfs4_proc_readdir,
5000 	.mknod		= nfs4_proc_mknod,
5001 	.statfs		= nfs4_proc_statfs,
5002 	.fsinfo		= nfs4_proc_fsinfo,
5003 	.pathconf	= nfs4_proc_pathconf,
5004 	.set_capabilities = nfs4_server_capabilities,
5005 	.decode_dirent	= nfs4_decode_dirent,
5006 	.read_setup	= nfs4_proc_read_setup,
5007 	.read_done	= nfs4_read_done,
5008 	.write_setup	= nfs4_proc_write_setup,
5009 	.write_done	= nfs4_write_done,
5010 	.commit_setup	= nfs4_proc_commit_setup,
5011 	.commit_done	= nfs4_commit_done,
5012 	.lock		= nfs4_proc_lock,
5013 	.clear_acl_cache = nfs4_zap_acl_attr,
5014 	.close_context  = nfs4_close_context,
5015 };
5016 
5017 /*
5018  * Local variables:
5019  *  c-basic-offset: 8
5020  * End:
5021  */
5022