xref: /linux/fs/nfs/nfs4proc.c (revision 2241f81c91f211b512bd2c3a26a4a74258d0e008)
1 /*
2  *  fs/nfs/nfs4proc.c
3  *
4  *  Client-side procedure declarations for NFSv4.
5  *
6  *  Copyright (c) 2002 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Kendrick Smith <kmsmith@umich.edu>
10  *  Andy Adamson   <andros@umich.edu>
11  *
12  *  Redistribution and use in source and binary forms, with or without
13  *  modification, are permitted provided that the following conditions
14  *  are met:
15  *
16  *  1. Redistributions of source code must retain the above copyright
17  *     notice, this list of conditions and the following disclaimer.
18  *  2. Redistributions in binary form must reproduce the above copyright
19  *     notice, this list of conditions and the following disclaimer in the
20  *     documentation and/or other materials provided with the distribution.
21  *  3. Neither the name of the University nor the names of its
22  *     contributors may be used to endorse or promote products derived
23  *     from this software without specific prior written permission.
24  *
25  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/string.h>
42 #include <linux/ratelimit.h>
43 #include <linux/printk.h>
44 #include <linux/slab.h>
45 #include <linux/sunrpc/clnt.h>
46 #include <linux/nfs.h>
47 #include <linux/nfs4.h>
48 #include <linux/nfs_fs.h>
49 #include <linux/nfs_page.h>
50 #include <linux/nfs_mount.h>
51 #include <linux/namei.h>
52 #include <linux/mount.h>
53 #include <linux/module.h>
54 #include <linux/xattr.h>
55 #include <linux/utsname.h>
56 #include <linux/freezer.h>
57 #include <linux/iversion.h>
58 
59 #include "nfs4_fs.h"
60 #include "delegation.h"
61 #include "internal.h"
62 #include "iostat.h"
63 #include "callback.h"
64 #include "pnfs.h"
65 #include "netns.h"
66 #include "sysfs.h"
67 #include "nfs4idmap.h"
68 #include "nfs4session.h"
69 #include "fscache.h"
70 #include "nfs42.h"
71 
72 #include "nfs4trace.h"
73 
74 #define NFSDBG_FACILITY		NFSDBG_PROC
75 
76 #define NFS4_BITMASK_SZ		3
77 
78 #define NFS4_POLL_RETRY_MIN	(HZ/10)
79 #define NFS4_POLL_RETRY_MAX	(15*HZ)
80 
81 /* file attributes which can be mapped to nfs attributes */
82 #define NFS4_VALID_ATTRS (ATTR_MODE \
83 	| ATTR_UID \
84 	| ATTR_GID \
85 	| ATTR_SIZE \
86 	| ATTR_ATIME \
87 	| ATTR_MTIME \
88 	| ATTR_CTIME \
89 	| ATTR_ATIME_SET \
90 	| ATTR_MTIME_SET)
91 
92 struct nfs4_opendata;
93 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
94 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
95 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
96 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
97 			      struct nfs_fattr *fattr, struct inode *inode);
98 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
99 			    struct nfs_fattr *fattr, struct iattr *sattr,
100 			    struct nfs_open_context *ctx, struct nfs4_label *ilabel);
101 #ifdef CONFIG_NFS_V4_1
102 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
103 		const struct cred *cred,
104 		struct nfs4_slot *slot,
105 		bool is_privileged);
106 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
107 		const struct cred *);
108 static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
109 		const struct cred *, bool);
110 #endif
111 
112 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
113 static inline struct nfs4_label *
114 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
115 	struct iattr *sattr, struct nfs4_label *label)
116 {
117 	int err;
118 
119 	if (label == NULL)
120 		return NULL;
121 
122 	if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
123 		return NULL;
124 
125 	label->lfs = 0;
126 	label->pi = 0;
127 	label->len = 0;
128 	label->label = NULL;
129 
130 	err = security_dentry_init_security(dentry, sattr->ia_mode,
131 				&dentry->d_name, NULL,
132 				(void **)&label->label, &label->len);
133 	if (err == 0)
134 		return label;
135 
136 	return NULL;
137 }
138 static inline void
139 nfs4_label_release_security(struct nfs4_label *label)
140 {
141 	if (label)
142 		security_release_secctx(label->label, label->len);
143 }
144 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
145 {
146 	if (label)
147 		return server->attr_bitmask;
148 
149 	return server->attr_bitmask_nl;
150 }
151 #else
152 static inline struct nfs4_label *
153 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
154 	struct iattr *sattr, struct nfs4_label *l)
155 { return NULL; }
156 static inline void
157 nfs4_label_release_security(struct nfs4_label *label)
158 { return; }
159 static inline u32 *
160 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
161 { return server->attr_bitmask; }
162 #endif
163 
164 /* Prevent leaks of NFSv4 errors into userland */
165 static int nfs4_map_errors(int err)
166 {
167 	if (err >= -1000)
168 		return err;
169 	switch (err) {
170 	case -NFS4ERR_RESOURCE:
171 	case -NFS4ERR_LAYOUTTRYLATER:
172 	case -NFS4ERR_RECALLCONFLICT:
173 		return -EREMOTEIO;
174 	case -NFS4ERR_WRONGSEC:
175 	case -NFS4ERR_WRONG_CRED:
176 		return -EPERM;
177 	case -NFS4ERR_BADOWNER:
178 	case -NFS4ERR_BADNAME:
179 		return -EINVAL;
180 	case -NFS4ERR_SHARE_DENIED:
181 		return -EACCES;
182 	case -NFS4ERR_MINOR_VERS_MISMATCH:
183 		return -EPROTONOSUPPORT;
184 	case -NFS4ERR_FILE_OPEN:
185 		return -EBUSY;
186 	case -NFS4ERR_NOT_SAME:
187 		return -ENOTSYNC;
188 	default:
189 		dprintk("%s could not handle NFSv4 error %d\n",
190 				__func__, -err);
191 		break;
192 	}
193 	return -EIO;
194 }
195 
196 /*
197  * This is our standard bitmap for GETATTR requests.
198  */
199 const u32 nfs4_fattr_bitmap[3] = {
200 	FATTR4_WORD0_TYPE
201 	| FATTR4_WORD0_CHANGE
202 	| FATTR4_WORD0_SIZE
203 	| FATTR4_WORD0_FSID
204 	| FATTR4_WORD0_FILEID,
205 	FATTR4_WORD1_MODE
206 	| FATTR4_WORD1_NUMLINKS
207 	| FATTR4_WORD1_OWNER
208 	| FATTR4_WORD1_OWNER_GROUP
209 	| FATTR4_WORD1_RAWDEV
210 	| FATTR4_WORD1_SPACE_USED
211 	| FATTR4_WORD1_TIME_ACCESS
212 	| FATTR4_WORD1_TIME_METADATA
213 	| FATTR4_WORD1_TIME_MODIFY
214 	| FATTR4_WORD1_MOUNTED_ON_FILEID,
215 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
216 	FATTR4_WORD2_SECURITY_LABEL
217 #endif
218 };
219 
220 static const u32 nfs4_pnfs_open_bitmap[3] = {
221 	FATTR4_WORD0_TYPE
222 	| FATTR4_WORD0_CHANGE
223 	| FATTR4_WORD0_SIZE
224 	| FATTR4_WORD0_FSID
225 	| FATTR4_WORD0_FILEID,
226 	FATTR4_WORD1_MODE
227 	| FATTR4_WORD1_NUMLINKS
228 	| FATTR4_WORD1_OWNER
229 	| FATTR4_WORD1_OWNER_GROUP
230 	| FATTR4_WORD1_RAWDEV
231 	| FATTR4_WORD1_SPACE_USED
232 	| FATTR4_WORD1_TIME_ACCESS
233 	| FATTR4_WORD1_TIME_METADATA
234 	| FATTR4_WORD1_TIME_MODIFY,
235 	FATTR4_WORD2_MDSTHRESHOLD
236 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
237 	| FATTR4_WORD2_SECURITY_LABEL
238 #endif
239 };
240 
241 static const u32 nfs4_open_noattr_bitmap[3] = {
242 	FATTR4_WORD0_TYPE
243 	| FATTR4_WORD0_FILEID,
244 };
245 
246 const u32 nfs4_statfs_bitmap[3] = {
247 	FATTR4_WORD0_FILES_AVAIL
248 	| FATTR4_WORD0_FILES_FREE
249 	| FATTR4_WORD0_FILES_TOTAL,
250 	FATTR4_WORD1_SPACE_AVAIL
251 	| FATTR4_WORD1_SPACE_FREE
252 	| FATTR4_WORD1_SPACE_TOTAL
253 };
254 
255 const u32 nfs4_pathconf_bitmap[3] = {
256 	FATTR4_WORD0_MAXLINK
257 	| FATTR4_WORD0_MAXNAME,
258 	0
259 };
260 
261 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
262 			| FATTR4_WORD0_MAXREAD
263 			| FATTR4_WORD0_MAXWRITE
264 			| FATTR4_WORD0_LEASE_TIME,
265 			FATTR4_WORD1_TIME_DELTA
266 			| FATTR4_WORD1_FS_LAYOUT_TYPES,
267 			FATTR4_WORD2_LAYOUT_BLKSIZE
268 			| FATTR4_WORD2_CLONE_BLKSIZE
269 			| FATTR4_WORD2_CHANGE_ATTR_TYPE
270 			| FATTR4_WORD2_XATTR_SUPPORT
271 };
272 
273 const u32 nfs4_fs_locations_bitmap[3] = {
274 	FATTR4_WORD0_CHANGE
275 	| FATTR4_WORD0_SIZE
276 	| FATTR4_WORD0_FSID
277 	| FATTR4_WORD0_FILEID
278 	| FATTR4_WORD0_FS_LOCATIONS,
279 	FATTR4_WORD1_OWNER
280 	| FATTR4_WORD1_OWNER_GROUP
281 	| FATTR4_WORD1_RAWDEV
282 	| FATTR4_WORD1_SPACE_USED
283 	| FATTR4_WORD1_TIME_ACCESS
284 	| FATTR4_WORD1_TIME_METADATA
285 	| FATTR4_WORD1_TIME_MODIFY
286 	| FATTR4_WORD1_MOUNTED_ON_FILEID,
287 };
288 
289 static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src,
290 				    struct inode *inode, unsigned long flags)
291 {
292 	unsigned long cache_validity;
293 
294 	memcpy(dst, src, NFS4_BITMASK_SZ*sizeof(*dst));
295 	if (!inode || !nfs4_have_delegation(inode, FMODE_READ))
296 		return;
297 
298 	cache_validity = READ_ONCE(NFS_I(inode)->cache_validity) | flags;
299 
300 	/* Remove the attributes over which we have full control */
301 	dst[1] &= ~FATTR4_WORD1_RAWDEV;
302 	if (!(cache_validity & NFS_INO_INVALID_SIZE))
303 		dst[0] &= ~FATTR4_WORD0_SIZE;
304 
305 	if (!(cache_validity & NFS_INO_INVALID_CHANGE))
306 		dst[0] &= ~FATTR4_WORD0_CHANGE;
307 
308 	if (!(cache_validity & NFS_INO_INVALID_MODE))
309 		dst[1] &= ~FATTR4_WORD1_MODE;
310 	if (!(cache_validity & NFS_INO_INVALID_OTHER))
311 		dst[1] &= ~(FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP);
312 }
313 
314 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
315 		struct nfs4_readdir_arg *readdir)
316 {
317 	unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE;
318 	__be32 *start, *p;
319 
320 	if (cookie > 2) {
321 		readdir->cookie = cookie;
322 		memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
323 		return;
324 	}
325 
326 	readdir->cookie = 0;
327 	memset(&readdir->verifier, 0, sizeof(readdir->verifier));
328 	if (cookie == 2)
329 		return;
330 
331 	/*
332 	 * NFSv4 servers do not return entries for '.' and '..'
333 	 * Therefore, we fake these entries here.  We let '.'
334 	 * have cookie 0 and '..' have cookie 1.  Note that
335 	 * when talking to the server, we always send cookie 0
336 	 * instead of 1 or 2.
337 	 */
338 	start = p = kmap_atomic(*readdir->pages);
339 
340 	if (cookie == 0) {
341 		*p++ = xdr_one;                                  /* next */
342 		*p++ = xdr_zero;                   /* cookie, first word */
343 		*p++ = xdr_one;                   /* cookie, second word */
344 		*p++ = xdr_one;                             /* entry len */
345 		memcpy(p, ".\0\0\0", 4);                        /* entry */
346 		p++;
347 		*p++ = xdr_one;                         /* bitmap length */
348 		*p++ = htonl(attrs);                           /* bitmap */
349 		*p++ = htonl(12);             /* attribute buffer length */
350 		*p++ = htonl(NF4DIR);
351 		p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
352 	}
353 
354 	*p++ = xdr_one;                                  /* next */
355 	*p++ = xdr_zero;                   /* cookie, first word */
356 	*p++ = xdr_two;                   /* cookie, second word */
357 	*p++ = xdr_two;                             /* entry len */
358 	memcpy(p, "..\0\0", 4);                         /* entry */
359 	p++;
360 	*p++ = xdr_one;                         /* bitmap length */
361 	*p++ = htonl(attrs);                           /* bitmap */
362 	*p++ = htonl(12);             /* attribute buffer length */
363 	*p++ = htonl(NF4DIR);
364 	p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
365 
366 	readdir->pgbase = (char *)p - (char *)start;
367 	readdir->count -= readdir->pgbase;
368 	kunmap_atomic(start);
369 }
370 
371 static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version)
372 {
373 	if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) {
374 		fattr->pre_change_attr = version;
375 		fattr->valid |= NFS_ATTR_FATTR_PRECHANGE;
376 	}
377 }
378 
379 static void nfs4_test_and_free_stateid(struct nfs_server *server,
380 		nfs4_stateid *stateid,
381 		const struct cred *cred)
382 {
383 	const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops;
384 
385 	ops->test_and_free_expired(server, stateid, cred);
386 }
387 
388 static void __nfs4_free_revoked_stateid(struct nfs_server *server,
389 		nfs4_stateid *stateid,
390 		const struct cred *cred)
391 {
392 	stateid->type = NFS4_REVOKED_STATEID_TYPE;
393 	nfs4_test_and_free_stateid(server, stateid, cred);
394 }
395 
396 static void nfs4_free_revoked_stateid(struct nfs_server *server,
397 		const nfs4_stateid *stateid,
398 		const struct cred *cred)
399 {
400 	nfs4_stateid tmp;
401 
402 	nfs4_stateid_copy(&tmp, stateid);
403 	__nfs4_free_revoked_stateid(server, &tmp, cred);
404 }
405 
406 static long nfs4_update_delay(long *timeout)
407 {
408 	long ret;
409 	if (!timeout)
410 		return NFS4_POLL_RETRY_MAX;
411 	if (*timeout <= 0)
412 		*timeout = NFS4_POLL_RETRY_MIN;
413 	if (*timeout > NFS4_POLL_RETRY_MAX)
414 		*timeout = NFS4_POLL_RETRY_MAX;
415 	ret = *timeout;
416 	*timeout <<= 1;
417 	return ret;
418 }
419 
420 static int nfs4_delay_killable(long *timeout)
421 {
422 	might_sleep();
423 
424 	__set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
425 	schedule_timeout(nfs4_update_delay(timeout));
426 	if (!__fatal_signal_pending(current))
427 		return 0;
428 	return -EINTR;
429 }
430 
431 static int nfs4_delay_interruptible(long *timeout)
432 {
433 	might_sleep();
434 
435 	__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE);
436 	schedule_timeout(nfs4_update_delay(timeout));
437 	if (!signal_pending(current))
438 		return 0;
439 	return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS;
440 }
441 
442 static int nfs4_delay(long *timeout, bool interruptible)
443 {
444 	if (interruptible)
445 		return nfs4_delay_interruptible(timeout);
446 	return nfs4_delay_killable(timeout);
447 }
448 
449 static const nfs4_stateid *
450 nfs4_recoverable_stateid(const nfs4_stateid *stateid)
451 {
452 	if (!stateid)
453 		return NULL;
454 	switch (stateid->type) {
455 	case NFS4_OPEN_STATEID_TYPE:
456 	case NFS4_LOCK_STATEID_TYPE:
457 	case NFS4_DELEGATION_STATEID_TYPE:
458 		return stateid;
459 	default:
460 		break;
461 	}
462 	return NULL;
463 }
464 
465 /* This is the error handling routine for processes that are allowed
466  * to sleep.
467  */
468 static int nfs4_do_handle_exception(struct nfs_server *server,
469 		int errorcode, struct nfs4_exception *exception)
470 {
471 	struct nfs_client *clp = server->nfs_client;
472 	struct nfs4_state *state = exception->state;
473 	const nfs4_stateid *stateid;
474 	struct inode *inode = exception->inode;
475 	int ret = errorcode;
476 
477 	exception->delay = 0;
478 	exception->recovering = 0;
479 	exception->retry = 0;
480 
481 	stateid = nfs4_recoverable_stateid(exception->stateid);
482 	if (stateid == NULL && state != NULL)
483 		stateid = nfs4_recoverable_stateid(&state->stateid);
484 
485 	switch(errorcode) {
486 		case 0:
487 			return 0;
488 		case -NFS4ERR_BADHANDLE:
489 		case -ESTALE:
490 			if (inode != NULL && S_ISREG(inode->i_mode))
491 				pnfs_destroy_layout(NFS_I(inode));
492 			break;
493 		case -NFS4ERR_DELEG_REVOKED:
494 		case -NFS4ERR_ADMIN_REVOKED:
495 		case -NFS4ERR_EXPIRED:
496 		case -NFS4ERR_BAD_STATEID:
497 		case -NFS4ERR_PARTNER_NO_AUTH:
498 			if (inode != NULL && stateid != NULL) {
499 				nfs_inode_find_state_and_recover(inode,
500 						stateid);
501 				goto wait_on_recovery;
502 			}
503 			fallthrough;
504 		case -NFS4ERR_OPENMODE:
505 			if (inode) {
506 				int err;
507 
508 				err = nfs_async_inode_return_delegation(inode,
509 						stateid);
510 				if (err == 0)
511 					goto wait_on_recovery;
512 				if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) {
513 					exception->retry = 1;
514 					break;
515 				}
516 			}
517 			if (state == NULL)
518 				break;
519 			ret = nfs4_schedule_stateid_recovery(server, state);
520 			if (ret < 0)
521 				break;
522 			goto wait_on_recovery;
523 		case -NFS4ERR_STALE_STATEID:
524 		case -NFS4ERR_STALE_CLIENTID:
525 			nfs4_schedule_lease_recovery(clp);
526 			goto wait_on_recovery;
527 		case -NFS4ERR_MOVED:
528 			ret = nfs4_schedule_migration_recovery(server);
529 			if (ret < 0)
530 				break;
531 			goto wait_on_recovery;
532 		case -NFS4ERR_LEASE_MOVED:
533 			nfs4_schedule_lease_moved_recovery(clp);
534 			goto wait_on_recovery;
535 #if defined(CONFIG_NFS_V4_1)
536 		case -NFS4ERR_BADSESSION:
537 		case -NFS4ERR_BADSLOT:
538 		case -NFS4ERR_BAD_HIGH_SLOT:
539 		case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
540 		case -NFS4ERR_DEADSESSION:
541 		case -NFS4ERR_SEQ_FALSE_RETRY:
542 		case -NFS4ERR_SEQ_MISORDERED:
543 			/* Handled in nfs41_sequence_process() */
544 			goto wait_on_recovery;
545 #endif /* defined(CONFIG_NFS_V4_1) */
546 		case -NFS4ERR_FILE_OPEN:
547 			if (exception->timeout > HZ) {
548 				/* We have retried a decent amount, time to
549 				 * fail
550 				 */
551 				ret = -EBUSY;
552 				break;
553 			}
554 			fallthrough;
555 		case -NFS4ERR_DELAY:
556 			nfs_inc_server_stats(server, NFSIOS_DELAY);
557 			fallthrough;
558 		case -NFS4ERR_GRACE:
559 		case -NFS4ERR_LAYOUTTRYLATER:
560 		case -NFS4ERR_RECALLCONFLICT:
561 			exception->delay = 1;
562 			return 0;
563 
564 		case -NFS4ERR_RETRY_UNCACHED_REP:
565 		case -NFS4ERR_OLD_STATEID:
566 			exception->retry = 1;
567 			break;
568 		case -NFS4ERR_BADOWNER:
569 			/* The following works around a Linux server bug! */
570 		case -NFS4ERR_BADNAME:
571 			if (server->caps & NFS_CAP_UIDGID_NOMAP) {
572 				server->caps &= ~NFS_CAP_UIDGID_NOMAP;
573 				exception->retry = 1;
574 				printk(KERN_WARNING "NFS: v4 server %s "
575 						"does not accept raw "
576 						"uid/gids. "
577 						"Reenabling the idmapper.\n",
578 						server->nfs_client->cl_hostname);
579 			}
580 	}
581 	/* We failed to handle the error */
582 	return nfs4_map_errors(ret);
583 wait_on_recovery:
584 	exception->recovering = 1;
585 	return 0;
586 }
587 
588 /*
589  * Track the number of NFS4ERR_DELAY related retransmissions and return
590  * EAGAIN if the 'softerr' mount option is set, and we've exceeded the limit
591  * set by 'nfs_delay_retrans'.
592  */
593 static int nfs4_exception_should_retrans(const struct nfs_server *server,
594 					 struct nfs4_exception *exception)
595 {
596 	if (server->flags & NFS_MOUNT_SOFTERR && nfs_delay_retrans >= 0) {
597 		if (exception->retrans++ >= (unsigned short)nfs_delay_retrans)
598 			return -EAGAIN;
599 	}
600 	return 0;
601 }
602 
603 /* This is the error handling routine for processes that are allowed
604  * to sleep.
605  */
606 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
607 {
608 	struct nfs_client *clp = server->nfs_client;
609 	int ret;
610 
611 	ret = nfs4_do_handle_exception(server, errorcode, exception);
612 	if (exception->delay) {
613 		int ret2 = nfs4_exception_should_retrans(server, exception);
614 		if (ret2 < 0) {
615 			exception->retry = 0;
616 			return ret2;
617 		}
618 		ret = nfs4_delay(&exception->timeout,
619 				exception->interruptible);
620 		goto out_retry;
621 	}
622 	if (exception->recovering) {
623 		if (exception->task_is_privileged)
624 			return -EDEADLOCK;
625 		ret = nfs4_wait_clnt_recover(clp);
626 		if (test_bit(NFS_MIG_FAILED, &server->mig_status))
627 			return -EIO;
628 		goto out_retry;
629 	}
630 	return ret;
631 out_retry:
632 	if (ret == 0)
633 		exception->retry = 1;
634 	return ret;
635 }
636 
637 static int
638 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
639 		int errorcode, struct nfs4_exception *exception)
640 {
641 	struct nfs_client *clp = server->nfs_client;
642 	int ret;
643 
644 	ret = nfs4_do_handle_exception(server, errorcode, exception);
645 	if (exception->delay) {
646 		int ret2 = nfs4_exception_should_retrans(server, exception);
647 		if (ret2 < 0) {
648 			exception->retry = 0;
649 			return ret2;
650 		}
651 		rpc_delay(task, nfs4_update_delay(&exception->timeout));
652 		goto out_retry;
653 	}
654 	if (exception->recovering) {
655 		if (exception->task_is_privileged)
656 			return -EDEADLOCK;
657 		rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
658 		if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
659 			rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
660 		goto out_retry;
661 	}
662 	if (test_bit(NFS_MIG_FAILED, &server->mig_status))
663 		ret = -EIO;
664 	return ret;
665 out_retry:
666 	if (ret == 0) {
667 		exception->retry = 1;
668 		/*
669 		 * For NFS4ERR_MOVED, the client transport will need to
670 		 * be recomputed after migration recovery has completed.
671 		 */
672 		if (errorcode == -NFS4ERR_MOVED)
673 			rpc_task_release_transport(task);
674 	}
675 	return ret;
676 }
677 
678 int
679 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server,
680 			struct nfs4_state *state, long *timeout)
681 {
682 	struct nfs4_exception exception = {
683 		.state = state,
684 	};
685 
686 	if (task->tk_status >= 0)
687 		return 0;
688 	if (timeout)
689 		exception.timeout = *timeout;
690 	task->tk_status = nfs4_async_handle_exception(task, server,
691 			task->tk_status,
692 			&exception);
693 	if (exception.delay && timeout)
694 		*timeout = exception.timeout;
695 	if (exception.retry)
696 		return -EAGAIN;
697 	return 0;
698 }
699 
700 /*
701  * Return 'true' if 'clp' is using an rpc_client that is integrity protected
702  * or 'false' otherwise.
703  */
704 static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
705 {
706 	rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
707 	return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P);
708 }
709 
710 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
711 {
712 	spin_lock(&clp->cl_lock);
713 	if (time_before(clp->cl_last_renewal,timestamp))
714 		clp->cl_last_renewal = timestamp;
715 	spin_unlock(&clp->cl_lock);
716 }
717 
718 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
719 {
720 	struct nfs_client *clp = server->nfs_client;
721 
722 	if (!nfs4_has_session(clp))
723 		do_renew_lease(clp, timestamp);
724 }
725 
726 struct nfs4_call_sync_data {
727 	const struct nfs_server *seq_server;
728 	struct nfs4_sequence_args *seq_args;
729 	struct nfs4_sequence_res *seq_res;
730 };
731 
732 void nfs4_init_sequence(struct nfs4_sequence_args *args,
733 			struct nfs4_sequence_res *res, int cache_reply,
734 			int privileged)
735 {
736 	args->sa_slot = NULL;
737 	args->sa_cache_this = cache_reply;
738 	args->sa_privileged = privileged;
739 
740 	res->sr_slot = NULL;
741 }
742 
743 static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res)
744 {
745 	struct nfs4_slot *slot = res->sr_slot;
746 	struct nfs4_slot_table *tbl;
747 
748 	tbl = slot->table;
749 	spin_lock(&tbl->slot_tbl_lock);
750 	if (!nfs41_wake_and_assign_slot(tbl, slot))
751 		nfs4_free_slot(tbl, slot);
752 	spin_unlock(&tbl->slot_tbl_lock);
753 
754 	res->sr_slot = NULL;
755 }
756 
757 static int nfs40_sequence_done(struct rpc_task *task,
758 			       struct nfs4_sequence_res *res)
759 {
760 	if (res->sr_slot != NULL)
761 		nfs40_sequence_free_slot(res);
762 	return 1;
763 }
764 
765 #if defined(CONFIG_NFS_V4_1)
766 
767 static void nfs41_release_slot(struct nfs4_slot *slot)
768 {
769 	struct nfs4_session *session;
770 	struct nfs4_slot_table *tbl;
771 	bool send_new_highest_used_slotid = false;
772 
773 	if (!slot)
774 		return;
775 	tbl = slot->table;
776 	session = tbl->session;
777 
778 	/* Bump the slot sequence number */
779 	if (slot->seq_done)
780 		slot->seq_nr++;
781 	slot->seq_done = 0;
782 
783 	spin_lock(&tbl->slot_tbl_lock);
784 	/* Be nice to the server: try to ensure that the last transmitted
785 	 * value for highest_user_slotid <= target_highest_slotid
786 	 */
787 	if (tbl->highest_used_slotid > tbl->target_highest_slotid)
788 		send_new_highest_used_slotid = true;
789 
790 	if (nfs41_wake_and_assign_slot(tbl, slot)) {
791 		send_new_highest_used_slotid = false;
792 		goto out_unlock;
793 	}
794 	nfs4_free_slot(tbl, slot);
795 
796 	if (tbl->highest_used_slotid != NFS4_NO_SLOT)
797 		send_new_highest_used_slotid = false;
798 out_unlock:
799 	spin_unlock(&tbl->slot_tbl_lock);
800 	if (send_new_highest_used_slotid)
801 		nfs41_notify_server(session->clp);
802 	if (waitqueue_active(&tbl->slot_waitq))
803 		wake_up_all(&tbl->slot_waitq);
804 }
805 
806 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
807 {
808 	nfs41_release_slot(res->sr_slot);
809 	res->sr_slot = NULL;
810 }
811 
812 static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot,
813 		u32 seqnr)
814 {
815 	if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0)
816 		slot->seq_nr_highest_sent = seqnr;
817 }
818 static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, u32 seqnr)
819 {
820 	nfs4_slot_sequence_record_sent(slot, seqnr);
821 	slot->seq_nr_last_acked = seqnr;
822 }
823 
824 static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred,
825 				struct nfs4_slot *slot)
826 {
827 	struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true);
828 	if (!IS_ERR(task))
829 		rpc_put_task_async(task);
830 }
831 
832 static int nfs41_sequence_process(struct rpc_task *task,
833 		struct nfs4_sequence_res *res)
834 {
835 	struct nfs4_session *session;
836 	struct nfs4_slot *slot = res->sr_slot;
837 	struct nfs_client *clp;
838 	int status;
839 	int ret = 1;
840 
841 	if (slot == NULL)
842 		goto out_noaction;
843 	/* don't increment the sequence number if the task wasn't sent */
844 	if (!RPC_WAS_SENT(task) || slot->seq_done)
845 		goto out;
846 
847 	session = slot->table->session;
848 	clp = session->clp;
849 
850 	trace_nfs4_sequence_done(session, res);
851 
852 	status = res->sr_status;
853 	if (task->tk_status == -NFS4ERR_DEADSESSION)
854 		status = -NFS4ERR_DEADSESSION;
855 
856 	/* Check the SEQUENCE operation status */
857 	switch (status) {
858 	case 0:
859 		/* Mark this sequence number as having been acked */
860 		nfs4_slot_sequence_acked(slot, slot->seq_nr);
861 		/* Update the slot's sequence and clientid lease timer */
862 		slot->seq_done = 1;
863 		do_renew_lease(clp, res->sr_timestamp);
864 		/* Check sequence flags */
865 		nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags,
866 				!!slot->privileged);
867 		nfs41_update_target_slotid(slot->table, slot, res);
868 		break;
869 	case 1:
870 		/*
871 		 * sr_status remains 1 if an RPC level error occurred.
872 		 * The server may or may not have processed the sequence
873 		 * operation..
874 		 */
875 		nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
876 		slot->seq_done = 1;
877 		goto out;
878 	case -NFS4ERR_DELAY:
879 		/* The server detected a resend of the RPC call and
880 		 * returned NFS4ERR_DELAY as per Section 2.10.6.2
881 		 * of RFC5661.
882 		 */
883 		dprintk("%s: slot=%u seq=%u: Operation in progress\n",
884 			__func__,
885 			slot->slot_nr,
886 			slot->seq_nr);
887 		goto out_retry;
888 	case -NFS4ERR_RETRY_UNCACHED_REP:
889 	case -NFS4ERR_SEQ_FALSE_RETRY:
890 		/*
891 		 * The server thinks we tried to replay a request.
892 		 * Retry the call after bumping the sequence ID.
893 		 */
894 		nfs4_slot_sequence_acked(slot, slot->seq_nr);
895 		goto retry_new_seq;
896 	case -NFS4ERR_BADSLOT:
897 		/*
898 		 * The slot id we used was probably retired. Try again
899 		 * using a different slot id.
900 		 */
901 		if (slot->slot_nr < slot->table->target_highest_slotid)
902 			goto session_recover;
903 		goto retry_nowait;
904 	case -NFS4ERR_SEQ_MISORDERED:
905 		nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
906 		/*
907 		 * Were one or more calls using this slot interrupted?
908 		 * If the server never received the request, then our
909 		 * transmitted slot sequence number may be too high. However,
910 		 * if the server did receive the request then it might
911 		 * accidentally give us a reply with a mismatched operation.
912 		 * We can sort this out by sending a lone sequence operation
913 		 * to the server on the same slot.
914 		 */
915 		if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) {
916 			slot->seq_nr--;
917 			if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) {
918 				nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot);
919 				res->sr_slot = NULL;
920 			}
921 			goto retry_nowait;
922 		}
923 		/*
924 		 * RFC5661:
925 		 * A retry might be sent while the original request is
926 		 * still in progress on the replier. The replier SHOULD
927 		 * deal with the issue by returning NFS4ERR_DELAY as the
928 		 * reply to SEQUENCE or CB_SEQUENCE operation, but
929 		 * implementations MAY return NFS4ERR_SEQ_MISORDERED.
930 		 *
931 		 * Restart the search after a delay.
932 		 */
933 		slot->seq_nr = slot->seq_nr_highest_sent;
934 		goto out_retry;
935 	case -NFS4ERR_BADSESSION:
936 	case -NFS4ERR_DEADSESSION:
937 	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
938 		goto session_recover;
939 	default:
940 		/* Just update the slot sequence no. */
941 		slot->seq_done = 1;
942 	}
943 out:
944 	/* The session may be reset by one of the error handlers. */
945 	dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
946 out_noaction:
947 	return ret;
948 session_recover:
949 	set_bit(NFS4_SLOT_TBL_DRAINING, &session->fc_slot_table.slot_tbl_state);
950 	nfs4_schedule_session_recovery(session, status);
951 	dprintk("%s ERROR: %d Reset session\n", __func__, status);
952 	nfs41_sequence_free_slot(res);
953 	goto out;
954 retry_new_seq:
955 	++slot->seq_nr;
956 retry_nowait:
957 	if (rpc_restart_call_prepare(task)) {
958 		nfs41_sequence_free_slot(res);
959 		task->tk_status = 0;
960 		ret = 0;
961 	}
962 	goto out;
963 out_retry:
964 	if (!rpc_restart_call(task))
965 		goto out;
966 	rpc_delay(task, NFS4_POLL_RETRY_MAX);
967 	return 0;
968 }
969 
970 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
971 {
972 	if (!nfs41_sequence_process(task, res))
973 		return 0;
974 	if (res->sr_slot != NULL)
975 		nfs41_sequence_free_slot(res);
976 	return 1;
977 
978 }
979 EXPORT_SYMBOL_GPL(nfs41_sequence_done);
980 
981 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
982 {
983 	if (res->sr_slot == NULL)
984 		return 1;
985 	if (res->sr_slot->table->session != NULL)
986 		return nfs41_sequence_process(task, res);
987 	return nfs40_sequence_done(task, res);
988 }
989 
990 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
991 {
992 	if (res->sr_slot != NULL) {
993 		if (res->sr_slot->table->session != NULL)
994 			nfs41_sequence_free_slot(res);
995 		else
996 			nfs40_sequence_free_slot(res);
997 	}
998 }
999 
1000 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
1001 {
1002 	if (res->sr_slot == NULL)
1003 		return 1;
1004 	if (!res->sr_slot->table->session)
1005 		return nfs40_sequence_done(task, res);
1006 	return nfs41_sequence_done(task, res);
1007 }
1008 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
1009 
1010 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
1011 {
1012 	struct nfs4_call_sync_data *data = calldata;
1013 
1014 	dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
1015 
1016 	nfs4_setup_sequence(data->seq_server->nfs_client,
1017 			    data->seq_args, data->seq_res, task);
1018 }
1019 
1020 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
1021 {
1022 	struct nfs4_call_sync_data *data = calldata;
1023 
1024 	nfs41_sequence_done(task, data->seq_res);
1025 }
1026 
1027 static const struct rpc_call_ops nfs41_call_sync_ops = {
1028 	.rpc_call_prepare = nfs41_call_sync_prepare,
1029 	.rpc_call_done = nfs41_call_sync_done,
1030 };
1031 
1032 #else	/* !CONFIG_NFS_V4_1 */
1033 
1034 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
1035 {
1036 	return nfs40_sequence_done(task, res);
1037 }
1038 
1039 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
1040 {
1041 	if (res->sr_slot != NULL)
1042 		nfs40_sequence_free_slot(res);
1043 }
1044 
1045 int nfs4_sequence_done(struct rpc_task *task,
1046 		       struct nfs4_sequence_res *res)
1047 {
1048 	return nfs40_sequence_done(task, res);
1049 }
1050 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
1051 
1052 #endif	/* !CONFIG_NFS_V4_1 */
1053 
1054 static void nfs41_sequence_res_init(struct nfs4_sequence_res *res)
1055 {
1056 	res->sr_timestamp = jiffies;
1057 	res->sr_status_flags = 0;
1058 	res->sr_status = 1;
1059 }
1060 
1061 static
1062 void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
1063 		struct nfs4_sequence_res *res,
1064 		struct nfs4_slot *slot)
1065 {
1066 	if (!slot)
1067 		return;
1068 	slot->privileged = args->sa_privileged ? 1 : 0;
1069 	args->sa_slot = slot;
1070 
1071 	res->sr_slot = slot;
1072 }
1073 
1074 int nfs4_setup_sequence(struct nfs_client *client,
1075 			struct nfs4_sequence_args *args,
1076 			struct nfs4_sequence_res *res,
1077 			struct rpc_task *task)
1078 {
1079 	struct nfs4_session *session = nfs4_get_session(client);
1080 	struct nfs4_slot_table *tbl  = client->cl_slot_tbl;
1081 	struct nfs4_slot *slot;
1082 
1083 	/* slot already allocated? */
1084 	if (res->sr_slot != NULL)
1085 		goto out_start;
1086 
1087 	if (session)
1088 		tbl = &session->fc_slot_table;
1089 
1090 	spin_lock(&tbl->slot_tbl_lock);
1091 	/* The state manager will wait until the slot table is empty */
1092 	if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
1093 		goto out_sleep;
1094 
1095 	slot = nfs4_alloc_slot(tbl);
1096 	if (IS_ERR(slot)) {
1097 		if (slot == ERR_PTR(-ENOMEM))
1098 			goto out_sleep_timeout;
1099 		goto out_sleep;
1100 	}
1101 	spin_unlock(&tbl->slot_tbl_lock);
1102 
1103 	nfs4_sequence_attach_slot(args, res, slot);
1104 
1105 	trace_nfs4_setup_sequence(session, args);
1106 out_start:
1107 	nfs41_sequence_res_init(res);
1108 	rpc_call_start(task);
1109 	return 0;
1110 out_sleep_timeout:
1111 	/* Try again in 1/4 second */
1112 	if (args->sa_privileged)
1113 		rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task,
1114 				jiffies + (HZ >> 2), RPC_PRIORITY_PRIVILEGED);
1115 	else
1116 		rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task,
1117 				NULL, jiffies + (HZ >> 2));
1118 	spin_unlock(&tbl->slot_tbl_lock);
1119 	return -EAGAIN;
1120 out_sleep:
1121 	if (args->sa_privileged)
1122 		rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
1123 				RPC_PRIORITY_PRIVILEGED);
1124 	else
1125 		rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
1126 	spin_unlock(&tbl->slot_tbl_lock);
1127 	return -EAGAIN;
1128 }
1129 EXPORT_SYMBOL_GPL(nfs4_setup_sequence);
1130 
1131 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
1132 {
1133 	struct nfs4_call_sync_data *data = calldata;
1134 	nfs4_setup_sequence(data->seq_server->nfs_client,
1135 				data->seq_args, data->seq_res, task);
1136 }
1137 
1138 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
1139 {
1140 	struct nfs4_call_sync_data *data = calldata;
1141 	nfs4_sequence_done(task, data->seq_res);
1142 }
1143 
1144 static const struct rpc_call_ops nfs40_call_sync_ops = {
1145 	.rpc_call_prepare = nfs40_call_sync_prepare,
1146 	.rpc_call_done = nfs40_call_sync_done,
1147 };
1148 
1149 static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup)
1150 {
1151 	int ret;
1152 	struct rpc_task *task;
1153 
1154 	task = rpc_run_task(task_setup);
1155 	if (IS_ERR(task))
1156 		return PTR_ERR(task);
1157 
1158 	ret = task->tk_status;
1159 	rpc_put_task(task);
1160 	return ret;
1161 }
1162 
1163 static int nfs4_do_call_sync(struct rpc_clnt *clnt,
1164 			     struct nfs_server *server,
1165 			     struct rpc_message *msg,
1166 			     struct nfs4_sequence_args *args,
1167 			     struct nfs4_sequence_res *res,
1168 			     unsigned short task_flags)
1169 {
1170 	struct nfs_client *clp = server->nfs_client;
1171 	struct nfs4_call_sync_data data = {
1172 		.seq_server = server,
1173 		.seq_args = args,
1174 		.seq_res = res,
1175 	};
1176 	struct rpc_task_setup task_setup = {
1177 		.rpc_client = clnt,
1178 		.rpc_message = msg,
1179 		.callback_ops = clp->cl_mvops->call_sync_ops,
1180 		.callback_data = &data,
1181 		.flags = task_flags,
1182 	};
1183 
1184 	return nfs4_call_sync_custom(&task_setup);
1185 }
1186 
1187 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
1188 				   struct nfs_server *server,
1189 				   struct rpc_message *msg,
1190 				   struct nfs4_sequence_args *args,
1191 				   struct nfs4_sequence_res *res)
1192 {
1193 	unsigned short task_flags = 0;
1194 
1195 	if (server->caps & NFS_CAP_MOVEABLE)
1196 		task_flags = RPC_TASK_MOVEABLE;
1197 	return nfs4_do_call_sync(clnt, server, msg, args, res, task_flags);
1198 }
1199 
1200 
1201 int nfs4_call_sync(struct rpc_clnt *clnt,
1202 		   struct nfs_server *server,
1203 		   struct rpc_message *msg,
1204 		   struct nfs4_sequence_args *args,
1205 		   struct nfs4_sequence_res *res,
1206 		   int cache_reply)
1207 {
1208 	nfs4_init_sequence(args, res, cache_reply, 0);
1209 	return nfs4_call_sync_sequence(clnt, server, msg, args, res);
1210 }
1211 
1212 static void
1213 nfs4_inc_nlink_locked(struct inode *inode)
1214 {
1215 	nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
1216 					     NFS_INO_INVALID_CTIME |
1217 					     NFS_INO_INVALID_NLINK);
1218 	inc_nlink(inode);
1219 }
1220 
1221 static void
1222 nfs4_inc_nlink(struct inode *inode)
1223 {
1224 	spin_lock(&inode->i_lock);
1225 	nfs4_inc_nlink_locked(inode);
1226 	spin_unlock(&inode->i_lock);
1227 }
1228 
1229 static void
1230 nfs4_dec_nlink_locked(struct inode *inode)
1231 {
1232 	nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
1233 					     NFS_INO_INVALID_CTIME |
1234 					     NFS_INO_INVALID_NLINK);
1235 	drop_nlink(inode);
1236 }
1237 
1238 static void
1239 nfs4_update_changeattr_locked(struct inode *inode,
1240 		struct nfs4_change_info *cinfo,
1241 		unsigned long timestamp, unsigned long cache_validity)
1242 {
1243 	struct nfs_inode *nfsi = NFS_I(inode);
1244 	u64 change_attr = inode_peek_iversion_raw(inode);
1245 
1246 	cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME;
1247 	if (S_ISDIR(inode->i_mode))
1248 		cache_validity |= NFS_INO_INVALID_DATA;
1249 
1250 	switch (NFS_SERVER(inode)->change_attr_type) {
1251 	case NFS4_CHANGE_TYPE_IS_UNDEFINED:
1252 		if (cinfo->after == change_attr)
1253 			goto out;
1254 		break;
1255 	default:
1256 		if ((s64)(change_attr - cinfo->after) >= 0)
1257 			goto out;
1258 	}
1259 
1260 	inode_set_iversion_raw(inode, cinfo->after);
1261 	if (!cinfo->atomic || cinfo->before != change_attr) {
1262 		if (S_ISDIR(inode->i_mode))
1263 			nfs_force_lookup_revalidate(inode);
1264 
1265 		if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
1266 			cache_validity |=
1267 				NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
1268 				NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER |
1269 				NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK |
1270 				NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR;
1271 		nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
1272 	}
1273 	nfsi->attrtimeo_timestamp = jiffies;
1274 	nfsi->read_cache_jiffies = timestamp;
1275 	nfsi->attr_gencount = nfs_inc_attr_generation_counter();
1276 	nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE;
1277 out:
1278 	nfs_set_cache_invalid(inode, cache_validity);
1279 }
1280 
1281 void
1282 nfs4_update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
1283 		unsigned long timestamp, unsigned long cache_validity)
1284 {
1285 	spin_lock(&dir->i_lock);
1286 	nfs4_update_changeattr_locked(dir, cinfo, timestamp, cache_validity);
1287 	spin_unlock(&dir->i_lock);
1288 }
1289 
1290 struct nfs4_open_createattrs {
1291 	struct nfs4_label *label;
1292 	struct iattr *sattr;
1293 	const __u32 verf[2];
1294 };
1295 
1296 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
1297 		int err, struct nfs4_exception *exception)
1298 {
1299 	if (err != -EINVAL)
1300 		return false;
1301 	if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1302 		return false;
1303 	server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
1304 	exception->retry = 1;
1305 	return true;
1306 }
1307 
1308 static fmode_t _nfs4_ctx_to_accessmode(const struct nfs_open_context *ctx)
1309 {
1310 	 return ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
1311 }
1312 
1313 static fmode_t _nfs4_ctx_to_openmode(const struct nfs_open_context *ctx)
1314 {
1315 	fmode_t ret = ctx->mode & (FMODE_READ|FMODE_WRITE);
1316 
1317 	return (ctx->mode & FMODE_EXEC) ? FMODE_READ | ret : ret;
1318 }
1319 
1320 static u32
1321 nfs4_map_atomic_open_share(struct nfs_server *server,
1322 		fmode_t fmode, int openflags)
1323 {
1324 	u32 res = 0;
1325 
1326 	switch (fmode & (FMODE_READ | FMODE_WRITE)) {
1327 	case FMODE_READ:
1328 		res = NFS4_SHARE_ACCESS_READ;
1329 		break;
1330 	case FMODE_WRITE:
1331 		res = NFS4_SHARE_ACCESS_WRITE;
1332 		break;
1333 	case FMODE_READ|FMODE_WRITE:
1334 		res = NFS4_SHARE_ACCESS_BOTH;
1335 	}
1336 	if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1337 		goto out;
1338 	/* Want no delegation if we're using O_DIRECT */
1339 	if (openflags & O_DIRECT)
1340 		res |= NFS4_SHARE_WANT_NO_DELEG;
1341 out:
1342 	return res;
1343 }
1344 
1345 static enum open_claim_type4
1346 nfs4_map_atomic_open_claim(struct nfs_server *server,
1347 		enum open_claim_type4 claim)
1348 {
1349 	if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
1350 		return claim;
1351 	switch (claim) {
1352 	default:
1353 		return claim;
1354 	case NFS4_OPEN_CLAIM_FH:
1355 		return NFS4_OPEN_CLAIM_NULL;
1356 	case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1357 		return NFS4_OPEN_CLAIM_DELEGATE_CUR;
1358 	case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1359 		return NFS4_OPEN_CLAIM_DELEGATE_PREV;
1360 	}
1361 }
1362 
1363 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
1364 {
1365 	p->o_res.f_attr = &p->f_attr;
1366 	p->o_res.seqid = p->o_arg.seqid;
1367 	p->c_res.seqid = p->c_arg.seqid;
1368 	p->o_res.server = p->o_arg.server;
1369 	p->o_res.access_request = p->o_arg.access;
1370 	nfs_fattr_init(&p->f_attr);
1371 	nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
1372 }
1373 
1374 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1375 		struct nfs4_state_owner *sp, fmode_t fmode, int flags,
1376 		const struct nfs4_open_createattrs *c,
1377 		enum open_claim_type4 claim,
1378 		gfp_t gfp_mask)
1379 {
1380 	struct dentry *parent = dget_parent(dentry);
1381 	struct inode *dir = d_inode(parent);
1382 	struct nfs_server *server = NFS_SERVER(dir);
1383 	struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
1384 	struct nfs4_label *label = (c != NULL) ? c->label : NULL;
1385 	struct nfs4_opendata *p;
1386 
1387 	p = kzalloc(sizeof(*p), gfp_mask);
1388 	if (p == NULL)
1389 		goto err;
1390 
1391 	p->f_attr.label = nfs4_label_alloc(server, gfp_mask);
1392 	if (IS_ERR(p->f_attr.label))
1393 		goto err_free_p;
1394 
1395 	p->a_label = nfs4_label_alloc(server, gfp_mask);
1396 	if (IS_ERR(p->a_label))
1397 		goto err_free_f;
1398 
1399 	alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1400 	p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1401 	if (IS_ERR(p->o_arg.seqid))
1402 		goto err_free_label;
1403 	nfs_sb_active(dentry->d_sb);
1404 	p->dentry = dget(dentry);
1405 	p->dir = parent;
1406 	p->owner = sp;
1407 	atomic_inc(&sp->so_count);
1408 	p->o_arg.open_flags = flags;
1409 	p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
1410 	p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1411 	p->o_arg.share_access = nfs4_map_atomic_open_share(server,
1412 			fmode, flags);
1413 	if (flags & O_CREAT) {
1414 		p->o_arg.umask = current_umask();
1415 		p->o_arg.label = nfs4_label_copy(p->a_label, label);
1416 		if (c->sattr != NULL && c->sattr->ia_valid != 0) {
1417 			p->o_arg.u.attrs = &p->attrs;
1418 			memcpy(&p->attrs, c->sattr, sizeof(p->attrs));
1419 
1420 			memcpy(p->o_arg.u.verifier.data, c->verf,
1421 					sizeof(p->o_arg.u.verifier.data));
1422 		}
1423 	}
1424 	/* ask server to check for all possible rights as results
1425 	 * are cached */
1426 	switch (p->o_arg.claim) {
1427 	default:
1428 		break;
1429 	case NFS4_OPEN_CLAIM_NULL:
1430 	case NFS4_OPEN_CLAIM_FH:
1431 		p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
1432 				  NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE |
1433 				  NFS4_ACCESS_EXECUTE |
1434 				  nfs_access_xattr_mask(server);
1435 	}
1436 	p->o_arg.clientid = server->nfs_client->cl_clientid;
1437 	p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
1438 	p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
1439 	p->o_arg.name = &dentry->d_name;
1440 	p->o_arg.server = server;
1441 	p->o_arg.bitmask = nfs4_bitmask(server, label);
1442 	p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1443 	switch (p->o_arg.claim) {
1444 	case NFS4_OPEN_CLAIM_NULL:
1445 	case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1446 	case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1447 		p->o_arg.fh = NFS_FH(dir);
1448 		break;
1449 	case NFS4_OPEN_CLAIM_PREVIOUS:
1450 	case NFS4_OPEN_CLAIM_FH:
1451 	case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1452 	case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1453 		p->o_arg.fh = NFS_FH(d_inode(dentry));
1454 	}
1455 	p->c_arg.fh = &p->o_res.fh;
1456 	p->c_arg.stateid = &p->o_res.stateid;
1457 	p->c_arg.seqid = p->o_arg.seqid;
1458 	nfs4_init_opendata_res(p);
1459 	kref_init(&p->kref);
1460 	return p;
1461 
1462 err_free_label:
1463 	nfs4_label_free(p->a_label);
1464 err_free_f:
1465 	nfs4_label_free(p->f_attr.label);
1466 err_free_p:
1467 	kfree(p);
1468 err:
1469 	dput(parent);
1470 	return NULL;
1471 }
1472 
1473 static void nfs4_opendata_free(struct kref *kref)
1474 {
1475 	struct nfs4_opendata *p = container_of(kref,
1476 			struct nfs4_opendata, kref);
1477 	struct super_block *sb = p->dentry->d_sb;
1478 
1479 	nfs4_lgopen_release(p->lgp);
1480 	nfs_free_seqid(p->o_arg.seqid);
1481 	nfs4_sequence_free_slot(&p->o_res.seq_res);
1482 	if (p->state != NULL)
1483 		nfs4_put_open_state(p->state);
1484 	nfs4_put_state_owner(p->owner);
1485 
1486 	nfs4_label_free(p->a_label);
1487 	nfs4_label_free(p->f_attr.label);
1488 
1489 	dput(p->dir);
1490 	dput(p->dentry);
1491 	nfs_sb_deactive(sb);
1492 	nfs_fattr_free_names(&p->f_attr);
1493 	kfree(p->f_attr.mdsthreshold);
1494 	kfree(p);
1495 }
1496 
1497 static void nfs4_opendata_put(struct nfs4_opendata *p)
1498 {
1499 	if (p != NULL)
1500 		kref_put(&p->kref, nfs4_opendata_free);
1501 }
1502 
1503 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
1504 		fmode_t fmode)
1505 {
1506 	switch(fmode & (FMODE_READ|FMODE_WRITE)) {
1507 	case FMODE_READ|FMODE_WRITE:
1508 		return state->n_rdwr != 0;
1509 	case FMODE_WRITE:
1510 		return state->n_wronly != 0;
1511 	case FMODE_READ:
1512 		return state->n_rdonly != 0;
1513 	}
1514 	WARN_ON_ONCE(1);
1515 	return false;
1516 }
1517 
1518 static int can_open_cached(struct nfs4_state *state, fmode_t mode,
1519 		int open_mode, enum open_claim_type4 claim)
1520 {
1521 	int ret = 0;
1522 
1523 	if (open_mode & (O_EXCL|O_TRUNC))
1524 		goto out;
1525 	switch (claim) {
1526 	case NFS4_OPEN_CLAIM_NULL:
1527 	case NFS4_OPEN_CLAIM_FH:
1528 		goto out;
1529 	default:
1530 		break;
1531 	}
1532 	switch (mode & (FMODE_READ|FMODE_WRITE)) {
1533 		case FMODE_READ:
1534 			ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1535 				&& state->n_rdonly != 0;
1536 			break;
1537 		case FMODE_WRITE:
1538 			ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1539 				&& state->n_wronly != 0;
1540 			break;
1541 		case FMODE_READ|FMODE_WRITE:
1542 			ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1543 				&& state->n_rdwr != 0;
1544 	}
1545 out:
1546 	return ret;
1547 }
1548 
1549 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
1550 		enum open_claim_type4 claim)
1551 {
1552 	if (delegation == NULL)
1553 		return 0;
1554 	if ((delegation->type & fmode) != fmode)
1555 		return 0;
1556 	switch (claim) {
1557 	case NFS4_OPEN_CLAIM_NULL:
1558 	case NFS4_OPEN_CLAIM_FH:
1559 		break;
1560 	case NFS4_OPEN_CLAIM_PREVIOUS:
1561 		if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
1562 			break;
1563 		fallthrough;
1564 	default:
1565 		return 0;
1566 	}
1567 	nfs_mark_delegation_referenced(delegation);
1568 	return 1;
1569 }
1570 
1571 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1572 {
1573 	switch (fmode) {
1574 		case FMODE_WRITE:
1575 			state->n_wronly++;
1576 			break;
1577 		case FMODE_READ:
1578 			state->n_rdonly++;
1579 			break;
1580 		case FMODE_READ|FMODE_WRITE:
1581 			state->n_rdwr++;
1582 	}
1583 	nfs4_state_set_mode_locked(state, state->state | fmode);
1584 }
1585 
1586 #ifdef CONFIG_NFS_V4_1
1587 static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state)
1588 {
1589 	if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags))
1590 		return true;
1591 	if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags))
1592 		return true;
1593 	if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags))
1594 		return true;
1595 	return false;
1596 }
1597 #endif /* CONFIG_NFS_V4_1 */
1598 
1599 static void nfs_state_log_update_open_stateid(struct nfs4_state *state)
1600 {
1601 	if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags))
1602 		wake_up_all(&state->waitq);
1603 }
1604 
1605 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
1606 {
1607 	struct nfs_client *clp = state->owner->so_server->nfs_client;
1608 	bool need_recover = false;
1609 
1610 	if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
1611 		need_recover = true;
1612 	if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
1613 		need_recover = true;
1614 	if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
1615 		need_recover = true;
1616 	if (need_recover)
1617 		nfs4_state_mark_reclaim_nograce(clp, state);
1618 }
1619 
1620 /*
1621  * Check for whether or not the caller may update the open stateid
1622  * to the value passed in by stateid.
1623  *
1624  * Note: This function relies heavily on the server implementing
1625  * RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2
1626  * correctly.
1627  * i.e. The stateid seqids have to be initialised to 1, and
1628  * are then incremented on every state transition.
1629  */
1630 static bool nfs_stateid_is_sequential(struct nfs4_state *state,
1631 		const nfs4_stateid *stateid)
1632 {
1633 	if (test_bit(NFS_OPEN_STATE, &state->flags)) {
1634 		/* The common case - we're updating to a new sequence number */
1635 		if (nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1636 			if (nfs4_stateid_is_next(&state->open_stateid, stateid))
1637 				return true;
1638 			return false;
1639 		}
1640 		/* The server returned a new stateid */
1641 	}
1642 	/* This is the first OPEN in this generation */
1643 	if (stateid->seqid == cpu_to_be32(1))
1644 		return true;
1645 	return false;
1646 }
1647 
1648 static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1649 {
1650 	if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1651 		return;
1652 	if (state->n_wronly)
1653 		set_bit(NFS_O_WRONLY_STATE, &state->flags);
1654 	if (state->n_rdonly)
1655 		set_bit(NFS_O_RDONLY_STATE, &state->flags);
1656 	if (state->n_rdwr)
1657 		set_bit(NFS_O_RDWR_STATE, &state->flags);
1658 	set_bit(NFS_OPEN_STATE, &state->flags);
1659 }
1660 
1661 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1662 		nfs4_stateid *stateid, fmode_t fmode)
1663 {
1664 	clear_bit(NFS_O_RDWR_STATE, &state->flags);
1665 	switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1666 	case FMODE_WRITE:
1667 		clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1668 		break;
1669 	case FMODE_READ:
1670 		clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1671 		break;
1672 	case 0:
1673 		clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1674 		clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1675 		clear_bit(NFS_OPEN_STATE, &state->flags);
1676 	}
1677 	if (stateid == NULL)
1678 		return;
1679 	/* Handle OPEN+OPEN_DOWNGRADE races */
1680 	if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
1681 	    !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
1682 		nfs_resync_open_stateid_locked(state);
1683 		goto out;
1684 	}
1685 	if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1686 		nfs4_stateid_copy(&state->stateid, stateid);
1687 	nfs4_stateid_copy(&state->open_stateid, stateid);
1688 	trace_nfs4_open_stateid_update(state->inode, stateid, 0);
1689 out:
1690 	nfs_state_log_update_open_stateid(state);
1691 }
1692 
1693 static void nfs_clear_open_stateid(struct nfs4_state *state,
1694 	nfs4_stateid *arg_stateid,
1695 	nfs4_stateid *stateid, fmode_t fmode)
1696 {
1697 	write_seqlock(&state->seqlock);
1698 	/* Ignore, if the CLOSE argment doesn't match the current stateid */
1699 	if (nfs4_state_match_open_stateid_other(state, arg_stateid))
1700 		nfs_clear_open_stateid_locked(state, stateid, fmode);
1701 	write_sequnlock(&state->seqlock);
1702 	if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1703 		nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1704 }
1705 
1706 static void nfs_set_open_stateid_locked(struct nfs4_state *state,
1707 		const nfs4_stateid *stateid, nfs4_stateid *freeme)
1708 	__must_hold(&state->owner->so_lock)
1709 	__must_hold(&state->seqlock)
1710 	__must_hold(RCU)
1711 
1712 {
1713 	DEFINE_WAIT(wait);
1714 	int status = 0;
1715 	for (;;) {
1716 
1717 		if (nfs_stateid_is_sequential(state, stateid))
1718 			break;
1719 
1720 		if (status)
1721 			break;
1722 		/* Rely on seqids for serialisation with NFSv4.0 */
1723 		if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client))
1724 			break;
1725 
1726 		set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
1727 		prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
1728 		/*
1729 		 * Ensure we process the state changes in the same order
1730 		 * in which the server processed them by delaying the
1731 		 * update of the stateid until we are in sequence.
1732 		 */
1733 		write_sequnlock(&state->seqlock);
1734 		spin_unlock(&state->owner->so_lock);
1735 		rcu_read_unlock();
1736 		trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
1737 
1738 		if (!fatal_signal_pending(current)) {
1739 			if (schedule_timeout(5*HZ) == 0)
1740 				status = -EAGAIN;
1741 			else
1742 				status = 0;
1743 		} else
1744 			status = -EINTR;
1745 		finish_wait(&state->waitq, &wait);
1746 		rcu_read_lock();
1747 		spin_lock(&state->owner->so_lock);
1748 		write_seqlock(&state->seqlock);
1749 	}
1750 
1751 	if (test_bit(NFS_OPEN_STATE, &state->flags) &&
1752 	    !nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1753 		nfs4_stateid_copy(freeme, &state->open_stateid);
1754 		nfs_test_and_clear_all_open_stateid(state);
1755 	}
1756 
1757 	if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1758 		nfs4_stateid_copy(&state->stateid, stateid);
1759 	nfs4_stateid_copy(&state->open_stateid, stateid);
1760 	trace_nfs4_open_stateid_update(state->inode, stateid, status);
1761 	nfs_state_log_update_open_stateid(state);
1762 }
1763 
1764 static void nfs_state_set_open_stateid(struct nfs4_state *state,
1765 		const nfs4_stateid *open_stateid,
1766 		fmode_t fmode,
1767 		nfs4_stateid *freeme)
1768 {
1769 	/*
1770 	 * Protect the call to nfs4_state_set_mode_locked and
1771 	 * serialise the stateid update
1772 	 */
1773 	write_seqlock(&state->seqlock);
1774 	nfs_set_open_stateid_locked(state, open_stateid, freeme);
1775 	switch (fmode) {
1776 	case FMODE_READ:
1777 		set_bit(NFS_O_RDONLY_STATE, &state->flags);
1778 		break;
1779 	case FMODE_WRITE:
1780 		set_bit(NFS_O_WRONLY_STATE, &state->flags);
1781 		break;
1782 	case FMODE_READ|FMODE_WRITE:
1783 		set_bit(NFS_O_RDWR_STATE, &state->flags);
1784 	}
1785 	set_bit(NFS_OPEN_STATE, &state->flags);
1786 	write_sequnlock(&state->seqlock);
1787 }
1788 
1789 static void nfs_state_clear_open_state_flags(struct nfs4_state *state)
1790 {
1791 	clear_bit(NFS_O_RDWR_STATE, &state->flags);
1792 	clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1793 	clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1794 	clear_bit(NFS_OPEN_STATE, &state->flags);
1795 }
1796 
1797 static void nfs_state_set_delegation(struct nfs4_state *state,
1798 		const nfs4_stateid *deleg_stateid,
1799 		fmode_t fmode)
1800 {
1801 	/*
1802 	 * Protect the call to nfs4_state_set_mode_locked and
1803 	 * serialise the stateid update
1804 	 */
1805 	write_seqlock(&state->seqlock);
1806 	nfs4_stateid_copy(&state->stateid, deleg_stateid);
1807 	set_bit(NFS_DELEGATED_STATE, &state->flags);
1808 	write_sequnlock(&state->seqlock);
1809 }
1810 
1811 static void nfs_state_clear_delegation(struct nfs4_state *state)
1812 {
1813 	write_seqlock(&state->seqlock);
1814 	nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1815 	clear_bit(NFS_DELEGATED_STATE, &state->flags);
1816 	write_sequnlock(&state->seqlock);
1817 }
1818 
1819 int update_open_stateid(struct nfs4_state *state,
1820 		const nfs4_stateid *open_stateid,
1821 		const nfs4_stateid *delegation,
1822 		fmode_t fmode)
1823 {
1824 	struct nfs_server *server = NFS_SERVER(state->inode);
1825 	struct nfs_client *clp = server->nfs_client;
1826 	struct nfs_inode *nfsi = NFS_I(state->inode);
1827 	struct nfs_delegation *deleg_cur;
1828 	nfs4_stateid freeme = { };
1829 	int ret = 0;
1830 
1831 	fmode &= (FMODE_READ|FMODE_WRITE);
1832 
1833 	rcu_read_lock();
1834 	spin_lock(&state->owner->so_lock);
1835 	if (open_stateid != NULL) {
1836 		nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme);
1837 		ret = 1;
1838 	}
1839 
1840 	deleg_cur = nfs4_get_valid_delegation(state->inode);
1841 	if (deleg_cur == NULL)
1842 		goto no_delegation;
1843 
1844 	spin_lock(&deleg_cur->lock);
1845 	if (rcu_dereference(nfsi->delegation) != deleg_cur ||
1846 	   test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1847 	    (deleg_cur->type & fmode) != fmode)
1848 		goto no_delegation_unlock;
1849 
1850 	if (delegation == NULL)
1851 		delegation = &deleg_cur->stateid;
1852 	else if (!nfs4_stateid_match_other(&deleg_cur->stateid, delegation))
1853 		goto no_delegation_unlock;
1854 
1855 	nfs_mark_delegation_referenced(deleg_cur);
1856 	nfs_state_set_delegation(state, &deleg_cur->stateid, fmode);
1857 	ret = 1;
1858 no_delegation_unlock:
1859 	spin_unlock(&deleg_cur->lock);
1860 no_delegation:
1861 	if (ret)
1862 		update_open_stateflags(state, fmode);
1863 	spin_unlock(&state->owner->so_lock);
1864 	rcu_read_unlock();
1865 
1866 	if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1867 		nfs4_schedule_state_manager(clp);
1868 	if (freeme.type != 0)
1869 		nfs4_test_and_free_stateid(server, &freeme,
1870 				state->owner->so_cred);
1871 
1872 	return ret;
1873 }
1874 
1875 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
1876 		const nfs4_stateid *stateid)
1877 {
1878 	struct nfs4_state *state = lsp->ls_state;
1879 	bool ret = false;
1880 
1881 	spin_lock(&state->state_lock);
1882 	if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
1883 		goto out_noupdate;
1884 	if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
1885 		goto out_noupdate;
1886 	nfs4_stateid_copy(&lsp->ls_stateid, stateid);
1887 	ret = true;
1888 out_noupdate:
1889 	spin_unlock(&state->state_lock);
1890 	return ret;
1891 }
1892 
1893 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1894 {
1895 	struct nfs_delegation *delegation;
1896 
1897 	fmode &= FMODE_READ|FMODE_WRITE;
1898 	rcu_read_lock();
1899 	delegation = nfs4_get_valid_delegation(inode);
1900 	if (delegation == NULL || (delegation->type & fmode) == fmode) {
1901 		rcu_read_unlock();
1902 		return;
1903 	}
1904 	rcu_read_unlock();
1905 	nfs4_inode_return_delegation(inode);
1906 }
1907 
1908 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1909 {
1910 	struct nfs4_state *state = opendata->state;
1911 	struct nfs_delegation *delegation;
1912 	int open_mode = opendata->o_arg.open_flags;
1913 	fmode_t fmode = opendata->o_arg.fmode;
1914 	enum open_claim_type4 claim = opendata->o_arg.claim;
1915 	nfs4_stateid stateid;
1916 	int ret = -EAGAIN;
1917 
1918 	for (;;) {
1919 		spin_lock(&state->owner->so_lock);
1920 		if (can_open_cached(state, fmode, open_mode, claim)) {
1921 			update_open_stateflags(state, fmode);
1922 			spin_unlock(&state->owner->so_lock);
1923 			goto out_return_state;
1924 		}
1925 		spin_unlock(&state->owner->so_lock);
1926 		rcu_read_lock();
1927 		delegation = nfs4_get_valid_delegation(state->inode);
1928 		if (!can_open_delegated(delegation, fmode, claim)) {
1929 			rcu_read_unlock();
1930 			break;
1931 		}
1932 		/* Save the delegation */
1933 		nfs4_stateid_copy(&stateid, &delegation->stateid);
1934 		rcu_read_unlock();
1935 		nfs_release_seqid(opendata->o_arg.seqid);
1936 		if (!opendata->is_recover) {
1937 			ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1938 			if (ret != 0)
1939 				goto out;
1940 		}
1941 		ret = -EAGAIN;
1942 
1943 		/* Try to update the stateid using the delegation */
1944 		if (update_open_stateid(state, NULL, &stateid, fmode))
1945 			goto out_return_state;
1946 	}
1947 out:
1948 	return ERR_PTR(ret);
1949 out_return_state:
1950 	refcount_inc(&state->count);
1951 	return state;
1952 }
1953 
1954 static void
1955 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1956 {
1957 	struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1958 	struct nfs_delegation *delegation;
1959 	int delegation_flags = 0;
1960 
1961 	rcu_read_lock();
1962 	delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1963 	if (delegation)
1964 		delegation_flags = delegation->flags;
1965 	rcu_read_unlock();
1966 	switch (data->o_arg.claim) {
1967 	default:
1968 		break;
1969 	case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1970 	case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1971 		pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1972 				   "returning a delegation for "
1973 				   "OPEN(CLAIM_DELEGATE_CUR)\n",
1974 				   clp->cl_hostname);
1975 		return;
1976 	}
1977 	if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1978 		nfs_inode_set_delegation(state->inode,
1979 				data->owner->so_cred,
1980 				data->o_res.delegation_type,
1981 				&data->o_res.delegation,
1982 				data->o_res.pagemod_limit);
1983 	else
1984 		nfs_inode_reclaim_delegation(state->inode,
1985 				data->owner->so_cred,
1986 				data->o_res.delegation_type,
1987 				&data->o_res.delegation,
1988 				data->o_res.pagemod_limit);
1989 
1990 	if (data->o_res.do_recall)
1991 		nfs_async_inode_return_delegation(state->inode,
1992 						  &data->o_res.delegation);
1993 }
1994 
1995 /*
1996  * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1997  * and update the nfs4_state.
1998  */
1999 static struct nfs4_state *
2000 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
2001 {
2002 	struct inode *inode = data->state->inode;
2003 	struct nfs4_state *state = data->state;
2004 	int ret;
2005 
2006 	if (!data->rpc_done) {
2007 		if (data->rpc_status)
2008 			return ERR_PTR(data->rpc_status);
2009 		return nfs4_try_open_cached(data);
2010 	}
2011 
2012 	ret = nfs_refresh_inode(inode, &data->f_attr);
2013 	if (ret)
2014 		return ERR_PTR(ret);
2015 
2016 	if (data->o_res.delegation_type != 0)
2017 		nfs4_opendata_check_deleg(data, state);
2018 
2019 	if (!update_open_stateid(state, &data->o_res.stateid,
2020 				NULL, data->o_arg.fmode))
2021 		return ERR_PTR(-EAGAIN);
2022 	refcount_inc(&state->count);
2023 
2024 	return state;
2025 }
2026 
2027 static struct inode *
2028 nfs4_opendata_get_inode(struct nfs4_opendata *data)
2029 {
2030 	struct inode *inode;
2031 
2032 	switch (data->o_arg.claim) {
2033 	case NFS4_OPEN_CLAIM_NULL:
2034 	case NFS4_OPEN_CLAIM_DELEGATE_CUR:
2035 	case NFS4_OPEN_CLAIM_DELEGATE_PREV:
2036 		if (!(data->f_attr.valid & NFS_ATTR_FATTR))
2037 			return ERR_PTR(-EAGAIN);
2038 		inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh,
2039 				&data->f_attr);
2040 		break;
2041 	default:
2042 		inode = d_inode(data->dentry);
2043 		ihold(inode);
2044 		nfs_refresh_inode(inode, &data->f_attr);
2045 	}
2046 	return inode;
2047 }
2048 
2049 static struct nfs4_state *
2050 nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data)
2051 {
2052 	struct nfs4_state *state;
2053 	struct inode *inode;
2054 
2055 	inode = nfs4_opendata_get_inode(data);
2056 	if (IS_ERR(inode))
2057 		return ERR_CAST(inode);
2058 	if (data->state != NULL && data->state->inode == inode) {
2059 		state = data->state;
2060 		refcount_inc(&state->count);
2061 	} else
2062 		state = nfs4_get_open_state(inode, data->owner);
2063 	iput(inode);
2064 	if (state == NULL)
2065 		state = ERR_PTR(-ENOMEM);
2066 	return state;
2067 }
2068 
2069 static struct nfs4_state *
2070 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
2071 {
2072 	struct nfs4_state *state;
2073 
2074 	if (!data->rpc_done) {
2075 		state = nfs4_try_open_cached(data);
2076 		trace_nfs4_cached_open(data->state);
2077 		goto out;
2078 	}
2079 
2080 	state = nfs4_opendata_find_nfs4_state(data);
2081 	if (IS_ERR(state))
2082 		goto out;
2083 
2084 	if (data->o_res.delegation_type != 0)
2085 		nfs4_opendata_check_deleg(data, state);
2086 	if (!update_open_stateid(state, &data->o_res.stateid,
2087 				NULL, data->o_arg.fmode)) {
2088 		nfs4_put_open_state(state);
2089 		state = ERR_PTR(-EAGAIN);
2090 	}
2091 out:
2092 	nfs_release_seqid(data->o_arg.seqid);
2093 	return state;
2094 }
2095 
2096 static struct nfs4_state *
2097 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
2098 {
2099 	struct nfs4_state *ret;
2100 
2101 	if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
2102 		ret =_nfs4_opendata_reclaim_to_nfs4_state(data);
2103 	else
2104 		ret = _nfs4_opendata_to_nfs4_state(data);
2105 	nfs4_sequence_free_slot(&data->o_res.seq_res);
2106 	return ret;
2107 }
2108 
2109 static struct nfs_open_context *
2110 nfs4_state_find_open_context_mode(struct nfs4_state *state, fmode_t mode)
2111 {
2112 	struct nfs_inode *nfsi = NFS_I(state->inode);
2113 	struct nfs_open_context *ctx;
2114 
2115 	rcu_read_lock();
2116 	list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
2117 		if (ctx->state != state)
2118 			continue;
2119 		if ((ctx->mode & mode) != mode)
2120 			continue;
2121 		if (!get_nfs_open_context(ctx))
2122 			continue;
2123 		rcu_read_unlock();
2124 		return ctx;
2125 	}
2126 	rcu_read_unlock();
2127 	return ERR_PTR(-ENOENT);
2128 }
2129 
2130 static struct nfs_open_context *
2131 nfs4_state_find_open_context(struct nfs4_state *state)
2132 {
2133 	struct nfs_open_context *ctx;
2134 
2135 	ctx = nfs4_state_find_open_context_mode(state, FMODE_READ|FMODE_WRITE);
2136 	if (!IS_ERR(ctx))
2137 		return ctx;
2138 	ctx = nfs4_state_find_open_context_mode(state, FMODE_WRITE);
2139 	if (!IS_ERR(ctx))
2140 		return ctx;
2141 	return nfs4_state_find_open_context_mode(state, FMODE_READ);
2142 }
2143 
2144 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
2145 		struct nfs4_state *state, enum open_claim_type4 claim)
2146 {
2147 	struct nfs4_opendata *opendata;
2148 
2149 	opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
2150 			NULL, claim, GFP_NOFS);
2151 	if (opendata == NULL)
2152 		return ERR_PTR(-ENOMEM);
2153 	opendata->state = state;
2154 	refcount_inc(&state->count);
2155 	return opendata;
2156 }
2157 
2158 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
2159 				    fmode_t fmode)
2160 {
2161 	struct nfs4_state *newstate;
2162 	struct nfs_server *server = NFS_SB(opendata->dentry->d_sb);
2163 	int openflags = opendata->o_arg.open_flags;
2164 	int ret;
2165 
2166 	if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
2167 		return 0;
2168 	opendata->o_arg.fmode = fmode;
2169 	opendata->o_arg.share_access =
2170 		nfs4_map_atomic_open_share(server, fmode, openflags);
2171 	memset(&opendata->o_res, 0, sizeof(opendata->o_res));
2172 	memset(&opendata->c_res, 0, sizeof(opendata->c_res));
2173 	nfs4_init_opendata_res(opendata);
2174 	ret = _nfs4_recover_proc_open(opendata);
2175 	if (ret != 0)
2176 		return ret;
2177 	newstate = nfs4_opendata_to_nfs4_state(opendata);
2178 	if (IS_ERR(newstate))
2179 		return PTR_ERR(newstate);
2180 	if (newstate != opendata->state)
2181 		ret = -ESTALE;
2182 	nfs4_close_state(newstate, fmode);
2183 	return ret;
2184 }
2185 
2186 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
2187 {
2188 	int ret;
2189 
2190 	/* memory barrier prior to reading state->n_* */
2191 	smp_rmb();
2192 	ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
2193 	if (ret != 0)
2194 		return ret;
2195 	ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
2196 	if (ret != 0)
2197 		return ret;
2198 	ret = nfs4_open_recover_helper(opendata, FMODE_READ);
2199 	if (ret != 0)
2200 		return ret;
2201 	/*
2202 	 * We may have performed cached opens for all three recoveries.
2203 	 * Check if we need to update the current stateid.
2204 	 */
2205 	if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
2206 	    !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
2207 		write_seqlock(&state->seqlock);
2208 		if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
2209 			nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2210 		write_sequnlock(&state->seqlock);
2211 	}
2212 	return 0;
2213 }
2214 
2215 /*
2216  * OPEN_RECLAIM:
2217  * 	reclaim state on the server after a reboot.
2218  */
2219 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
2220 {
2221 	struct nfs_delegation *delegation;
2222 	struct nfs4_opendata *opendata;
2223 	fmode_t delegation_type = 0;
2224 	int status;
2225 
2226 	opendata = nfs4_open_recoverdata_alloc(ctx, state,
2227 			NFS4_OPEN_CLAIM_PREVIOUS);
2228 	if (IS_ERR(opendata))
2229 		return PTR_ERR(opendata);
2230 	rcu_read_lock();
2231 	delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2232 	if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
2233 		delegation_type = delegation->type;
2234 	rcu_read_unlock();
2235 	opendata->o_arg.u.delegation_type = delegation_type;
2236 	status = nfs4_open_recover(opendata, state);
2237 	nfs4_opendata_put(opendata);
2238 	return status;
2239 }
2240 
2241 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
2242 {
2243 	struct nfs_server *server = NFS_SERVER(state->inode);
2244 	struct nfs4_exception exception = { };
2245 	int err;
2246 	do {
2247 		err = _nfs4_do_open_reclaim(ctx, state);
2248 		trace_nfs4_open_reclaim(ctx, 0, err);
2249 		if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2250 			continue;
2251 		if (err != -NFS4ERR_DELAY)
2252 			break;
2253 		nfs4_handle_exception(server, err, &exception);
2254 	} while (exception.retry);
2255 	return err;
2256 }
2257 
2258 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
2259 {
2260 	struct nfs_open_context *ctx;
2261 	int ret;
2262 
2263 	ctx = nfs4_state_find_open_context(state);
2264 	if (IS_ERR(ctx))
2265 		return -EAGAIN;
2266 	clear_bit(NFS_DELEGATED_STATE, &state->flags);
2267 	nfs_state_clear_open_state_flags(state);
2268 	ret = nfs4_do_open_reclaim(ctx, state);
2269 	put_nfs_open_context(ctx);
2270 	return ret;
2271 }
2272 
2273 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err)
2274 {
2275 	switch (err) {
2276 		default:
2277 			printk(KERN_ERR "NFS: %s: unhandled error "
2278 					"%d.\n", __func__, err);
2279 			fallthrough;
2280 		case 0:
2281 		case -ENOENT:
2282 		case -EAGAIN:
2283 		case -ESTALE:
2284 		case -ETIMEDOUT:
2285 			break;
2286 		case -NFS4ERR_BADSESSION:
2287 		case -NFS4ERR_BADSLOT:
2288 		case -NFS4ERR_BAD_HIGH_SLOT:
2289 		case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
2290 		case -NFS4ERR_DEADSESSION:
2291 			return -EAGAIN;
2292 		case -NFS4ERR_STALE_CLIENTID:
2293 		case -NFS4ERR_STALE_STATEID:
2294 			/* Don't recall a delegation if it was lost */
2295 			nfs4_schedule_lease_recovery(server->nfs_client);
2296 			return -EAGAIN;
2297 		case -NFS4ERR_MOVED:
2298 			nfs4_schedule_migration_recovery(server);
2299 			return -EAGAIN;
2300 		case -NFS4ERR_LEASE_MOVED:
2301 			nfs4_schedule_lease_moved_recovery(server->nfs_client);
2302 			return -EAGAIN;
2303 		case -NFS4ERR_DELEG_REVOKED:
2304 		case -NFS4ERR_ADMIN_REVOKED:
2305 		case -NFS4ERR_EXPIRED:
2306 		case -NFS4ERR_BAD_STATEID:
2307 		case -NFS4ERR_OPENMODE:
2308 			nfs_inode_find_state_and_recover(state->inode,
2309 					stateid);
2310 			nfs4_schedule_stateid_recovery(server, state);
2311 			return -EAGAIN;
2312 		case -NFS4ERR_DELAY:
2313 		case -NFS4ERR_GRACE:
2314 			ssleep(1);
2315 			return -EAGAIN;
2316 		case -ENOMEM:
2317 		case -NFS4ERR_DENIED:
2318 			if (fl) {
2319 				struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner;
2320 				if (lsp)
2321 					set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
2322 			}
2323 			return 0;
2324 	}
2325 	return err;
2326 }
2327 
2328 int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
2329 		struct nfs4_state *state, const nfs4_stateid *stateid)
2330 {
2331 	struct nfs_server *server = NFS_SERVER(state->inode);
2332 	struct nfs4_opendata *opendata;
2333 	int err = 0;
2334 
2335 	opendata = nfs4_open_recoverdata_alloc(ctx, state,
2336 			NFS4_OPEN_CLAIM_DELEG_CUR_FH);
2337 	if (IS_ERR(opendata))
2338 		return PTR_ERR(opendata);
2339 	nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
2340 	if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) {
2341 		err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
2342 		if (err)
2343 			goto out;
2344 	}
2345 	if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) {
2346 		err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
2347 		if (err)
2348 			goto out;
2349 	}
2350 	if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) {
2351 		err = nfs4_open_recover_helper(opendata, FMODE_READ);
2352 		if (err)
2353 			goto out;
2354 	}
2355 	nfs_state_clear_delegation(state);
2356 out:
2357 	nfs4_opendata_put(opendata);
2358 	return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
2359 }
2360 
2361 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
2362 {
2363 	struct nfs4_opendata *data = calldata;
2364 
2365 	nfs4_setup_sequence(data->o_arg.server->nfs_client,
2366 			   &data->c_arg.seq_args, &data->c_res.seq_res, task);
2367 }
2368 
2369 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
2370 {
2371 	struct nfs4_opendata *data = calldata;
2372 
2373 	nfs40_sequence_done(task, &data->c_res.seq_res);
2374 
2375 	data->rpc_status = task->tk_status;
2376 	if (data->rpc_status == 0) {
2377 		nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
2378 		nfs_confirm_seqid(&data->owner->so_seqid, 0);
2379 		renew_lease(data->o_res.server, data->timestamp);
2380 		data->rpc_done = true;
2381 	}
2382 }
2383 
2384 static void nfs4_open_confirm_release(void *calldata)
2385 {
2386 	struct nfs4_opendata *data = calldata;
2387 	struct nfs4_state *state = NULL;
2388 
2389 	/* If this request hasn't been cancelled, do nothing */
2390 	if (!data->cancelled)
2391 		goto out_free;
2392 	/* In case of error, no cleanup! */
2393 	if (!data->rpc_done)
2394 		goto out_free;
2395 	state = nfs4_opendata_to_nfs4_state(data);
2396 	if (!IS_ERR(state))
2397 		nfs4_close_state(state, data->o_arg.fmode);
2398 out_free:
2399 	nfs4_opendata_put(data);
2400 }
2401 
2402 static const struct rpc_call_ops nfs4_open_confirm_ops = {
2403 	.rpc_call_prepare = nfs4_open_confirm_prepare,
2404 	.rpc_call_done = nfs4_open_confirm_done,
2405 	.rpc_release = nfs4_open_confirm_release,
2406 };
2407 
2408 /*
2409  * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
2410  */
2411 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
2412 {
2413 	struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
2414 	struct rpc_task *task;
2415 	struct  rpc_message msg = {
2416 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
2417 		.rpc_argp = &data->c_arg,
2418 		.rpc_resp = &data->c_res,
2419 		.rpc_cred = data->owner->so_cred,
2420 	};
2421 	struct rpc_task_setup task_setup_data = {
2422 		.rpc_client = server->client,
2423 		.rpc_message = &msg,
2424 		.callback_ops = &nfs4_open_confirm_ops,
2425 		.callback_data = data,
2426 		.workqueue = nfsiod_workqueue,
2427 		.flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
2428 	};
2429 	int status;
2430 
2431 	nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1,
2432 				data->is_recover);
2433 	kref_get(&data->kref);
2434 	data->rpc_done = false;
2435 	data->rpc_status = 0;
2436 	data->timestamp = jiffies;
2437 	task = rpc_run_task(&task_setup_data);
2438 	if (IS_ERR(task))
2439 		return PTR_ERR(task);
2440 	status = rpc_wait_for_completion_task(task);
2441 	if (status != 0) {
2442 		data->cancelled = true;
2443 		smp_wmb();
2444 	} else
2445 		status = data->rpc_status;
2446 	rpc_put_task(task);
2447 	return status;
2448 }
2449 
2450 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
2451 {
2452 	struct nfs4_opendata *data = calldata;
2453 	struct nfs4_state_owner *sp = data->owner;
2454 	struct nfs_client *clp = sp->so_server->nfs_client;
2455 	enum open_claim_type4 claim = data->o_arg.claim;
2456 
2457 	if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
2458 		goto out_wait;
2459 	/*
2460 	 * Check if we still need to send an OPEN call, or if we can use
2461 	 * a delegation instead.
2462 	 */
2463 	if (data->state != NULL) {
2464 		struct nfs_delegation *delegation;
2465 
2466 		if (can_open_cached(data->state, data->o_arg.fmode,
2467 					data->o_arg.open_flags, claim))
2468 			goto out_no_action;
2469 		rcu_read_lock();
2470 		delegation = nfs4_get_valid_delegation(data->state->inode);
2471 		if (can_open_delegated(delegation, data->o_arg.fmode, claim))
2472 			goto unlock_no_action;
2473 		rcu_read_unlock();
2474 	}
2475 	/* Update client id. */
2476 	data->o_arg.clientid = clp->cl_clientid;
2477 	switch (claim) {
2478 	default:
2479 		break;
2480 	case NFS4_OPEN_CLAIM_PREVIOUS:
2481 	case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
2482 	case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
2483 		data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
2484 		fallthrough;
2485 	case NFS4_OPEN_CLAIM_FH:
2486 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
2487 	}
2488 	data->timestamp = jiffies;
2489 	if (nfs4_setup_sequence(data->o_arg.server->nfs_client,
2490 				&data->o_arg.seq_args,
2491 				&data->o_res.seq_res,
2492 				task) != 0)
2493 		nfs_release_seqid(data->o_arg.seqid);
2494 
2495 	/* Set the create mode (note dependency on the session type) */
2496 	data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
2497 	if (data->o_arg.open_flags & O_EXCL) {
2498 		data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
2499 		if (clp->cl_mvops->minor_version == 0) {
2500 			data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
2501 			/* don't put an ACCESS op in OPEN compound if O_EXCL,
2502 			 * because ACCESS will return permission denied for
2503 			 * all bits until close */
2504 			data->o_res.access_request = data->o_arg.access = 0;
2505 		} else if (nfs4_has_persistent_session(clp))
2506 			data->o_arg.createmode = NFS4_CREATE_GUARDED;
2507 	}
2508 	return;
2509 unlock_no_action:
2510 	trace_nfs4_cached_open(data->state);
2511 	rcu_read_unlock();
2512 out_no_action:
2513 	task->tk_action = NULL;
2514 out_wait:
2515 	nfs4_sequence_done(task, &data->o_res.seq_res);
2516 }
2517 
2518 static void nfs4_open_done(struct rpc_task *task, void *calldata)
2519 {
2520 	struct nfs4_opendata *data = calldata;
2521 
2522 	data->rpc_status = task->tk_status;
2523 
2524 	if (!nfs4_sequence_process(task, &data->o_res.seq_res))
2525 		return;
2526 
2527 	if (task->tk_status == 0) {
2528 		if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
2529 			switch (data->o_res.f_attr->mode & S_IFMT) {
2530 			case S_IFREG:
2531 				break;
2532 			case S_IFLNK:
2533 				data->rpc_status = -ELOOP;
2534 				break;
2535 			case S_IFDIR:
2536 				data->rpc_status = -EISDIR;
2537 				break;
2538 			default:
2539 				data->rpc_status = -ENOTDIR;
2540 			}
2541 		}
2542 		renew_lease(data->o_res.server, data->timestamp);
2543 		if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
2544 			nfs_confirm_seqid(&data->owner->so_seqid, 0);
2545 	}
2546 	data->rpc_done = true;
2547 }
2548 
2549 static void nfs4_open_release(void *calldata)
2550 {
2551 	struct nfs4_opendata *data = calldata;
2552 	struct nfs4_state *state = NULL;
2553 
2554 	/* If this request hasn't been cancelled, do nothing */
2555 	if (!data->cancelled)
2556 		goto out_free;
2557 	/* In case of error, no cleanup! */
2558 	if (data->rpc_status != 0 || !data->rpc_done)
2559 		goto out_free;
2560 	/* In case we need an open_confirm, no cleanup! */
2561 	if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
2562 		goto out_free;
2563 	state = nfs4_opendata_to_nfs4_state(data);
2564 	if (!IS_ERR(state))
2565 		nfs4_close_state(state, data->o_arg.fmode);
2566 out_free:
2567 	nfs4_opendata_put(data);
2568 }
2569 
2570 static const struct rpc_call_ops nfs4_open_ops = {
2571 	.rpc_call_prepare = nfs4_open_prepare,
2572 	.rpc_call_done = nfs4_open_done,
2573 	.rpc_release = nfs4_open_release,
2574 };
2575 
2576 static int nfs4_run_open_task(struct nfs4_opendata *data,
2577 			      struct nfs_open_context *ctx)
2578 {
2579 	struct inode *dir = d_inode(data->dir);
2580 	struct nfs_server *server = NFS_SERVER(dir);
2581 	struct nfs_openargs *o_arg = &data->o_arg;
2582 	struct nfs_openres *o_res = &data->o_res;
2583 	struct rpc_task *task;
2584 	struct rpc_message msg = {
2585 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
2586 		.rpc_argp = o_arg,
2587 		.rpc_resp = o_res,
2588 		.rpc_cred = data->owner->so_cred,
2589 	};
2590 	struct rpc_task_setup task_setup_data = {
2591 		.rpc_client = server->client,
2592 		.rpc_message = &msg,
2593 		.callback_ops = &nfs4_open_ops,
2594 		.callback_data = data,
2595 		.workqueue = nfsiod_workqueue,
2596 		.flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
2597 	};
2598 	int status;
2599 
2600 	if (nfs_server_capable(dir, NFS_CAP_MOVEABLE))
2601 		task_setup_data.flags |= RPC_TASK_MOVEABLE;
2602 
2603 	kref_get(&data->kref);
2604 	data->rpc_done = false;
2605 	data->rpc_status = 0;
2606 	data->cancelled = false;
2607 	data->is_recover = false;
2608 	if (!ctx) {
2609 		nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1);
2610 		data->is_recover = true;
2611 		task_setup_data.flags |= RPC_TASK_TIMEOUT;
2612 	} else {
2613 		nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0);
2614 		pnfs_lgopen_prepare(data, ctx);
2615 	}
2616 	task = rpc_run_task(&task_setup_data);
2617 	if (IS_ERR(task))
2618 		return PTR_ERR(task);
2619 	status = rpc_wait_for_completion_task(task);
2620 	if (status != 0) {
2621 		data->cancelled = true;
2622 		smp_wmb();
2623 	} else
2624 		status = data->rpc_status;
2625 	rpc_put_task(task);
2626 
2627 	return status;
2628 }
2629 
2630 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
2631 {
2632 	struct inode *dir = d_inode(data->dir);
2633 	struct nfs_openres *o_res = &data->o_res;
2634 	int status;
2635 
2636 	status = nfs4_run_open_task(data, NULL);
2637 	if (status != 0 || !data->rpc_done)
2638 		return status;
2639 
2640 	nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
2641 
2642 	if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM)
2643 		status = _nfs4_proc_open_confirm(data);
2644 
2645 	return status;
2646 }
2647 
2648 /*
2649  * Additional permission checks in order to distinguish between an
2650  * open for read, and an open for execute. This works around the
2651  * fact that NFSv4 OPEN treats read and execute permissions as being
2652  * the same.
2653  * Note that in the non-execute case, we want to turn off permission
2654  * checking if we just created a new file (POSIX open() semantics).
2655  */
2656 static int nfs4_opendata_access(const struct cred *cred,
2657 				struct nfs4_opendata *opendata,
2658 				struct nfs4_state *state, fmode_t fmode)
2659 {
2660 	struct nfs_access_entry cache;
2661 	u32 mask, flags;
2662 
2663 	/* access call failed or for some reason the server doesn't
2664 	 * support any access modes -- defer access call until later */
2665 	if (opendata->o_res.access_supported == 0)
2666 		return 0;
2667 
2668 	mask = 0;
2669 	if (fmode & FMODE_EXEC) {
2670 		/* ONLY check for exec rights */
2671 		if (S_ISDIR(state->inode->i_mode))
2672 			mask = NFS4_ACCESS_LOOKUP;
2673 		else
2674 			mask = NFS4_ACCESS_EXECUTE;
2675 	} else if ((fmode & FMODE_READ) && !opendata->file_created)
2676 		mask = NFS4_ACCESS_READ;
2677 
2678 	nfs_access_set_mask(&cache, opendata->o_res.access_result);
2679 	nfs_access_add_cache(state->inode, &cache, cred);
2680 
2681 	flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP;
2682 	if ((mask & ~cache.mask & flags) == 0)
2683 		return 0;
2684 
2685 	return -EACCES;
2686 }
2687 
2688 /*
2689  * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
2690  */
2691 static int _nfs4_proc_open(struct nfs4_opendata *data,
2692 			   struct nfs_open_context *ctx)
2693 {
2694 	struct inode *dir = d_inode(data->dir);
2695 	struct nfs_server *server = NFS_SERVER(dir);
2696 	struct nfs_openargs *o_arg = &data->o_arg;
2697 	struct nfs_openres *o_res = &data->o_res;
2698 	int status;
2699 
2700 	status = nfs4_run_open_task(data, ctx);
2701 	if (!data->rpc_done)
2702 		return status;
2703 	if (status != 0) {
2704 		if (status == -NFS4ERR_BADNAME &&
2705 				!(o_arg->open_flags & O_CREAT))
2706 			return -ENOENT;
2707 		return status;
2708 	}
2709 
2710 	nfs_fattr_map_and_free_names(server, &data->f_attr);
2711 
2712 	if (o_arg->open_flags & O_CREAT) {
2713 		if (o_arg->open_flags & O_EXCL)
2714 			data->file_created = true;
2715 		else if (o_res->cinfo.before != o_res->cinfo.after)
2716 			data->file_created = true;
2717 		if (data->file_created ||
2718 		    inode_peek_iversion_raw(dir) != o_res->cinfo.after)
2719 			nfs4_update_changeattr(dir, &o_res->cinfo,
2720 					o_res->f_attr->time_start,
2721 					NFS_INO_INVALID_DATA);
2722 	}
2723 	if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2724 		server->caps &= ~NFS_CAP_POSIX_LOCK;
2725 	if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2726 		status = _nfs4_proc_open_confirm(data);
2727 		if (status != 0)
2728 			return status;
2729 	}
2730 	if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) {
2731 		struct nfs_fh *fh = &o_res->fh;
2732 
2733 		nfs4_sequence_free_slot(&o_res->seq_res);
2734 		if (o_arg->claim == NFS4_OPEN_CLAIM_FH)
2735 			fh = NFS_FH(d_inode(data->dentry));
2736 		nfs4_proc_getattr(server, fh, o_res->f_attr, NULL);
2737 	}
2738 	return 0;
2739 }
2740 
2741 /*
2742  * OPEN_EXPIRED:
2743  * 	reclaim state on the server after a network partition.
2744  * 	Assumes caller holds the appropriate lock
2745  */
2746 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2747 {
2748 	struct nfs4_opendata *opendata;
2749 	int ret;
2750 
2751 	opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_FH);
2752 	if (IS_ERR(opendata))
2753 		return PTR_ERR(opendata);
2754 	/*
2755 	 * We're not recovering a delegation, so ask for no delegation.
2756 	 * Otherwise the recovery thread could deadlock with an outstanding
2757 	 * delegation return.
2758 	 */
2759 	opendata->o_arg.open_flags = O_DIRECT;
2760 	ret = nfs4_open_recover(opendata, state);
2761 	if (ret == -ESTALE)
2762 		d_drop(ctx->dentry);
2763 	nfs4_opendata_put(opendata);
2764 	return ret;
2765 }
2766 
2767 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2768 {
2769 	struct nfs_server *server = NFS_SERVER(state->inode);
2770 	struct nfs4_exception exception = { };
2771 	int err;
2772 
2773 	do {
2774 		err = _nfs4_open_expired(ctx, state);
2775 		trace_nfs4_open_expired(ctx, 0, err);
2776 		if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2777 			continue;
2778 		switch (err) {
2779 		default:
2780 			goto out;
2781 		case -NFS4ERR_GRACE:
2782 		case -NFS4ERR_DELAY:
2783 			nfs4_handle_exception(server, err, &exception);
2784 			err = 0;
2785 		}
2786 	} while (exception.retry);
2787 out:
2788 	return err;
2789 }
2790 
2791 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2792 {
2793 	struct nfs_open_context *ctx;
2794 	int ret;
2795 
2796 	ctx = nfs4_state_find_open_context(state);
2797 	if (IS_ERR(ctx))
2798 		return -EAGAIN;
2799 	ret = nfs4_do_open_expired(ctx, state);
2800 	put_nfs_open_context(ctx);
2801 	return ret;
2802 }
2803 
2804 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state,
2805 		const nfs4_stateid *stateid)
2806 {
2807 	nfs_remove_bad_delegation(state->inode, stateid);
2808 	nfs_state_clear_delegation(state);
2809 }
2810 
2811 static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2812 {
2813 	if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2814 		nfs_finish_clear_delegation_stateid(state, NULL);
2815 }
2816 
2817 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2818 {
2819 	/* NFSv4.0 doesn't allow for delegation recovery on open expire */
2820 	nfs40_clear_delegation_stateid(state);
2821 	nfs_state_clear_open_state_flags(state);
2822 	return nfs4_open_expired(sp, state);
2823 }
2824 
2825 static int nfs40_test_and_free_expired_stateid(struct nfs_server *server,
2826 		nfs4_stateid *stateid,
2827 		const struct cred *cred)
2828 {
2829 	return -NFS4ERR_BAD_STATEID;
2830 }
2831 
2832 #if defined(CONFIG_NFS_V4_1)
2833 static int nfs41_test_and_free_expired_stateid(struct nfs_server *server,
2834 		nfs4_stateid *stateid,
2835 		const struct cred *cred)
2836 {
2837 	int status;
2838 
2839 	switch (stateid->type) {
2840 	default:
2841 		break;
2842 	case NFS4_INVALID_STATEID_TYPE:
2843 	case NFS4_SPECIAL_STATEID_TYPE:
2844 		return -NFS4ERR_BAD_STATEID;
2845 	case NFS4_REVOKED_STATEID_TYPE:
2846 		goto out_free;
2847 	}
2848 
2849 	status = nfs41_test_stateid(server, stateid, cred);
2850 	switch (status) {
2851 	case -NFS4ERR_EXPIRED:
2852 	case -NFS4ERR_ADMIN_REVOKED:
2853 	case -NFS4ERR_DELEG_REVOKED:
2854 		break;
2855 	default:
2856 		return status;
2857 	}
2858 out_free:
2859 	/* Ack the revoked state to the server */
2860 	nfs41_free_stateid(server, stateid, cred, true);
2861 	return -NFS4ERR_EXPIRED;
2862 }
2863 
2864 static int nfs41_check_delegation_stateid(struct nfs4_state *state)
2865 {
2866 	struct nfs_server *server = NFS_SERVER(state->inode);
2867 	nfs4_stateid stateid;
2868 	struct nfs_delegation *delegation;
2869 	const struct cred *cred = NULL;
2870 	int status, ret = NFS_OK;
2871 
2872 	/* Get the delegation credential for use by test/free_stateid */
2873 	rcu_read_lock();
2874 	delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2875 	if (delegation == NULL) {
2876 		rcu_read_unlock();
2877 		nfs_state_clear_delegation(state);
2878 		return NFS_OK;
2879 	}
2880 
2881 	spin_lock(&delegation->lock);
2882 	nfs4_stateid_copy(&stateid, &delegation->stateid);
2883 
2884 	if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
2885 				&delegation->flags)) {
2886 		spin_unlock(&delegation->lock);
2887 		rcu_read_unlock();
2888 		return NFS_OK;
2889 	}
2890 
2891 	if (delegation->cred)
2892 		cred = get_cred(delegation->cred);
2893 	spin_unlock(&delegation->lock);
2894 	rcu_read_unlock();
2895 	status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
2896 	trace_nfs4_test_delegation_stateid(state, NULL, status);
2897 	if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
2898 		nfs_finish_clear_delegation_stateid(state, &stateid);
2899 	else
2900 		ret = status;
2901 
2902 	put_cred(cred);
2903 	return ret;
2904 }
2905 
2906 static void nfs41_delegation_recover_stateid(struct nfs4_state *state)
2907 {
2908 	nfs4_stateid tmp;
2909 
2910 	if (test_bit(NFS_DELEGATED_STATE, &state->flags) &&
2911 	    nfs4_copy_delegation_stateid(state->inode, state->state,
2912 				&tmp, NULL) &&
2913 	    nfs4_stateid_match_other(&state->stateid, &tmp))
2914 		nfs_state_set_delegation(state, &tmp, state->state);
2915 	else
2916 		nfs_state_clear_delegation(state);
2917 }
2918 
2919 /**
2920  * nfs41_check_expired_locks - possibly free a lock stateid
2921  *
2922  * @state: NFSv4 state for an inode
2923  *
2924  * Returns NFS_OK if recovery for this stateid is now finished.
2925  * Otherwise a negative NFS4ERR value is returned.
2926  */
2927 static int nfs41_check_expired_locks(struct nfs4_state *state)
2928 {
2929 	int status, ret = NFS_OK;
2930 	struct nfs4_lock_state *lsp, *prev = NULL;
2931 	struct nfs_server *server = NFS_SERVER(state->inode);
2932 
2933 	if (!test_bit(LK_STATE_IN_USE, &state->flags))
2934 		goto out;
2935 
2936 	spin_lock(&state->state_lock);
2937 	list_for_each_entry(lsp, &state->lock_states, ls_locks) {
2938 		if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
2939 			const struct cred *cred = lsp->ls_state->owner->so_cred;
2940 
2941 			refcount_inc(&lsp->ls_count);
2942 			spin_unlock(&state->state_lock);
2943 
2944 			nfs4_put_lock_state(prev);
2945 			prev = lsp;
2946 
2947 			status = nfs41_test_and_free_expired_stateid(server,
2948 					&lsp->ls_stateid,
2949 					cred);
2950 			trace_nfs4_test_lock_stateid(state, lsp, status);
2951 			if (status == -NFS4ERR_EXPIRED ||
2952 			    status == -NFS4ERR_BAD_STATEID) {
2953 				clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
2954 				lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE;
2955 				if (!recover_lost_locks)
2956 					set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
2957 			} else if (status != NFS_OK) {
2958 				ret = status;
2959 				nfs4_put_lock_state(prev);
2960 				goto out;
2961 			}
2962 			spin_lock(&state->state_lock);
2963 		}
2964 	}
2965 	spin_unlock(&state->state_lock);
2966 	nfs4_put_lock_state(prev);
2967 out:
2968 	return ret;
2969 }
2970 
2971 /**
2972  * nfs41_check_open_stateid - possibly free an open stateid
2973  *
2974  * @state: NFSv4 state for an inode
2975  *
2976  * Returns NFS_OK if recovery for this stateid is now finished.
2977  * Otherwise a negative NFS4ERR value is returned.
2978  */
2979 static int nfs41_check_open_stateid(struct nfs4_state *state)
2980 {
2981 	struct nfs_server *server = NFS_SERVER(state->inode);
2982 	nfs4_stateid *stateid = &state->open_stateid;
2983 	const struct cred *cred = state->owner->so_cred;
2984 	int status;
2985 
2986 	if (test_bit(NFS_OPEN_STATE, &state->flags) == 0)
2987 		return -NFS4ERR_BAD_STATEID;
2988 	status = nfs41_test_and_free_expired_stateid(server, stateid, cred);
2989 	trace_nfs4_test_open_stateid(state, NULL, status);
2990 	if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) {
2991 		nfs_state_clear_open_state_flags(state);
2992 		stateid->type = NFS4_INVALID_STATEID_TYPE;
2993 		return status;
2994 	}
2995 	if (nfs_open_stateid_recover_openmode(state))
2996 		return -NFS4ERR_OPENMODE;
2997 	return NFS_OK;
2998 }
2999 
3000 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
3001 {
3002 	int status;
3003 
3004 	status = nfs41_check_delegation_stateid(state);
3005 	if (status != NFS_OK)
3006 		return status;
3007 	nfs41_delegation_recover_stateid(state);
3008 
3009 	status = nfs41_check_expired_locks(state);
3010 	if (status != NFS_OK)
3011 		return status;
3012 	status = nfs41_check_open_stateid(state);
3013 	if (status != NFS_OK)
3014 		status = nfs4_open_expired(sp, state);
3015 	return status;
3016 }
3017 #endif
3018 
3019 /*
3020  * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
3021  * fields corresponding to attributes that were used to store the verifier.
3022  * Make sure we clobber those fields in the later setattr call
3023  */
3024 static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
3025 				struct iattr *sattr, struct nfs4_label **label)
3026 {
3027 	const __u32 *bitmask = opendata->o_arg.server->exclcreat_bitmask;
3028 	__u32 attrset[3];
3029 	unsigned ret;
3030 	unsigned i;
3031 
3032 	for (i = 0; i < ARRAY_SIZE(attrset); i++) {
3033 		attrset[i] = opendata->o_res.attrset[i];
3034 		if (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE4_1)
3035 			attrset[i] &= ~bitmask[i];
3036 	}
3037 
3038 	ret = (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE) ?
3039 		sattr->ia_valid : 0;
3040 
3041 	if ((attrset[1] & (FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET))) {
3042 		if (sattr->ia_valid & ATTR_ATIME_SET)
3043 			ret |= ATTR_ATIME_SET;
3044 		else
3045 			ret |= ATTR_ATIME;
3046 	}
3047 
3048 	if ((attrset[1] & (FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET))) {
3049 		if (sattr->ia_valid & ATTR_MTIME_SET)
3050 			ret |= ATTR_MTIME_SET;
3051 		else
3052 			ret |= ATTR_MTIME;
3053 	}
3054 
3055 	if (!(attrset[2] & FATTR4_WORD2_SECURITY_LABEL))
3056 		*label = NULL;
3057 	return ret;
3058 }
3059 
3060 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
3061 		struct nfs_open_context *ctx)
3062 {
3063 	struct nfs4_state_owner *sp = opendata->owner;
3064 	struct nfs_server *server = sp->so_server;
3065 	struct dentry *dentry;
3066 	struct nfs4_state *state;
3067 	fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx);
3068 	struct inode *dir = d_inode(opendata->dir);
3069 	unsigned long dir_verifier;
3070 	unsigned int seq;
3071 	int ret;
3072 
3073 	seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
3074 	dir_verifier = nfs_save_change_attribute(dir);
3075 
3076 	ret = _nfs4_proc_open(opendata, ctx);
3077 	if (ret != 0)
3078 		goto out;
3079 
3080 	state = _nfs4_opendata_to_nfs4_state(opendata);
3081 	ret = PTR_ERR(state);
3082 	if (IS_ERR(state))
3083 		goto out;
3084 	ctx->state = state;
3085 	if (server->caps & NFS_CAP_POSIX_LOCK)
3086 		set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
3087 	if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK)
3088 		set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags);
3089 	if (opendata->o_res.rflags & NFS4_OPEN_RESULT_PRESERVE_UNLINKED)
3090 		set_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(state->inode)->flags);
3091 
3092 	dentry = opendata->dentry;
3093 	if (d_really_is_negative(dentry)) {
3094 		struct dentry *alias;
3095 		d_drop(dentry);
3096 		alias = d_exact_alias(dentry, state->inode);
3097 		if (!alias)
3098 			alias = d_splice_alias(igrab(state->inode), dentry);
3099 		/* d_splice_alias() can't fail here - it's a non-directory */
3100 		if (alias) {
3101 			dput(ctx->dentry);
3102 			ctx->dentry = dentry = alias;
3103 		}
3104 	}
3105 
3106 	switch(opendata->o_arg.claim) {
3107 	default:
3108 		break;
3109 	case NFS4_OPEN_CLAIM_NULL:
3110 	case NFS4_OPEN_CLAIM_DELEGATE_CUR:
3111 	case NFS4_OPEN_CLAIM_DELEGATE_PREV:
3112 		if (!opendata->rpc_done)
3113 			break;
3114 		if (opendata->o_res.delegation_type != 0)
3115 			dir_verifier = nfs_save_change_attribute(dir);
3116 		nfs_set_verifier(dentry, dir_verifier);
3117 	}
3118 
3119 	/* Parse layoutget results before we check for access */
3120 	pnfs_parse_lgopen(state->inode, opendata->lgp, ctx);
3121 
3122 	ret = nfs4_opendata_access(sp->so_cred, opendata, state, acc_mode);
3123 	if (ret != 0)
3124 		goto out;
3125 
3126 	if (d_inode(dentry) == state->inode) {
3127 		nfs_inode_attach_open_context(ctx);
3128 		if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
3129 			nfs4_schedule_stateid_recovery(server, state);
3130 	}
3131 
3132 out:
3133 	if (!opendata->cancelled) {
3134 		if (opendata->lgp) {
3135 			nfs4_lgopen_release(opendata->lgp);
3136 			opendata->lgp = NULL;
3137 		}
3138 		nfs4_sequence_free_slot(&opendata->o_res.seq_res);
3139 	}
3140 	return ret;
3141 }
3142 
3143 /*
3144  * Returns a referenced nfs4_state
3145  */
3146 static int _nfs4_do_open(struct inode *dir,
3147 			struct nfs_open_context *ctx,
3148 			int flags,
3149 			const struct nfs4_open_createattrs *c,
3150 			int *opened)
3151 {
3152 	struct nfs4_state_owner  *sp;
3153 	struct nfs4_state     *state = NULL;
3154 	struct nfs_server       *server = NFS_SERVER(dir);
3155 	struct nfs4_opendata *opendata;
3156 	struct dentry *dentry = ctx->dentry;
3157 	const struct cred *cred = ctx->cred;
3158 	struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
3159 	fmode_t fmode = _nfs4_ctx_to_openmode(ctx);
3160 	enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
3161 	struct iattr *sattr = c->sattr;
3162 	struct nfs4_label *label = c->label;
3163 	int status;
3164 
3165 	/* Protect against reboot recovery conflicts */
3166 	status = -ENOMEM;
3167 	sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
3168 	if (sp == NULL) {
3169 		dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
3170 		goto out_err;
3171 	}
3172 	status = nfs4_client_recover_expired_lease(server->nfs_client);
3173 	if (status != 0)
3174 		goto err_put_state_owner;
3175 	if (d_really_is_positive(dentry))
3176 		nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
3177 	status = -ENOMEM;
3178 	if (d_really_is_positive(dentry))
3179 		claim = NFS4_OPEN_CLAIM_FH;
3180 	opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags,
3181 			c, claim, GFP_KERNEL);
3182 	if (opendata == NULL)
3183 		goto err_put_state_owner;
3184 
3185 	if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
3186 		if (!opendata->f_attr.mdsthreshold) {
3187 			opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
3188 			if (!opendata->f_attr.mdsthreshold)
3189 				goto err_opendata_put;
3190 		}
3191 		opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
3192 	}
3193 	if (d_really_is_positive(dentry))
3194 		opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
3195 
3196 	status = _nfs4_open_and_get_state(opendata, ctx);
3197 	if (status != 0)
3198 		goto err_opendata_put;
3199 	state = ctx->state;
3200 
3201 	if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
3202 	    (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
3203 		unsigned attrs = nfs4_exclusive_attrset(opendata, sattr, &label);
3204 		/*
3205 		 * send create attributes which was not set by open
3206 		 * with an extra setattr.
3207 		 */
3208 		if (attrs || label) {
3209 			unsigned ia_old = sattr->ia_valid;
3210 
3211 			sattr->ia_valid = attrs;
3212 			nfs_fattr_init(opendata->o_res.f_attr);
3213 			status = nfs4_do_setattr(state->inode, cred,
3214 					opendata->o_res.f_attr, sattr,
3215 					ctx, label);
3216 			if (status == 0) {
3217 				nfs_setattr_update_inode(state->inode, sattr,
3218 						opendata->o_res.f_attr);
3219 				nfs_setsecurity(state->inode, opendata->o_res.f_attr);
3220 			}
3221 			sattr->ia_valid = ia_old;
3222 		}
3223 	}
3224 	if (opened && opendata->file_created)
3225 		*opened = 1;
3226 
3227 	if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
3228 		*ctx_th = opendata->f_attr.mdsthreshold;
3229 		opendata->f_attr.mdsthreshold = NULL;
3230 	}
3231 
3232 	nfs4_opendata_put(opendata);
3233 	nfs4_put_state_owner(sp);
3234 	return 0;
3235 err_opendata_put:
3236 	nfs4_opendata_put(opendata);
3237 err_put_state_owner:
3238 	nfs4_put_state_owner(sp);
3239 out_err:
3240 	return status;
3241 }
3242 
3243 
3244 static struct nfs4_state *nfs4_do_open(struct inode *dir,
3245 					struct nfs_open_context *ctx,
3246 					int flags,
3247 					struct iattr *sattr,
3248 					struct nfs4_label *label,
3249 					int *opened)
3250 {
3251 	struct nfs_server *server = NFS_SERVER(dir);
3252 	struct nfs4_exception exception = {
3253 		.interruptible = true,
3254 	};
3255 	struct nfs4_state *res;
3256 	struct nfs4_open_createattrs c = {
3257 		.label = label,
3258 		.sattr = sattr,
3259 		.verf = {
3260 			[0] = (__u32)jiffies,
3261 			[1] = (__u32)current->pid,
3262 		},
3263 	};
3264 	int status;
3265 
3266 	do {
3267 		status = _nfs4_do_open(dir, ctx, flags, &c, opened);
3268 		res = ctx->state;
3269 		trace_nfs4_open_file(ctx, flags, status);
3270 		if (status == 0)
3271 			break;
3272 		/* NOTE: BAD_SEQID means the server and client disagree about the
3273 		 * book-keeping w.r.t. state-changing operations
3274 		 * (OPEN/CLOSE/LOCK/LOCKU...)
3275 		 * It is actually a sign of a bug on the client or on the server.
3276 		 *
3277 		 * If we receive a BAD_SEQID error in the particular case of
3278 		 * doing an OPEN, we assume that nfs_increment_open_seqid() will
3279 		 * have unhashed the old state_owner for us, and that we can
3280 		 * therefore safely retry using a new one. We should still warn
3281 		 * the user though...
3282 		 */
3283 		if (status == -NFS4ERR_BAD_SEQID) {
3284 			pr_warn_ratelimited("NFS: v4 server %s "
3285 					" returned a bad sequence-id error!\n",
3286 					NFS_SERVER(dir)->nfs_client->cl_hostname);
3287 			exception.retry = 1;
3288 			continue;
3289 		}
3290 		/*
3291 		 * BAD_STATEID on OPEN means that the server cancelled our
3292 		 * state before it received the OPEN_CONFIRM.
3293 		 * Recover by retrying the request as per the discussion
3294 		 * on Page 181 of RFC3530.
3295 		 */
3296 		if (status == -NFS4ERR_BAD_STATEID) {
3297 			exception.retry = 1;
3298 			continue;
3299 		}
3300 		if (status == -NFS4ERR_EXPIRED) {
3301 			nfs4_schedule_lease_recovery(server->nfs_client);
3302 			exception.retry = 1;
3303 			continue;
3304 		}
3305 		if (status == -EAGAIN) {
3306 			/* We must have found a delegation */
3307 			exception.retry = 1;
3308 			continue;
3309 		}
3310 		if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
3311 			continue;
3312 		res = ERR_PTR(nfs4_handle_exception(server,
3313 					status, &exception));
3314 	} while (exception.retry);
3315 	return res;
3316 }
3317 
3318 static int _nfs4_do_setattr(struct inode *inode,
3319 			    struct nfs_setattrargs *arg,
3320 			    struct nfs_setattrres *res,
3321 			    const struct cred *cred,
3322 			    struct nfs_open_context *ctx)
3323 {
3324 	struct nfs_server *server = NFS_SERVER(inode);
3325 	struct rpc_message msg = {
3326 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
3327 		.rpc_argp	= arg,
3328 		.rpc_resp	= res,
3329 		.rpc_cred	= cred,
3330 	};
3331 	const struct cred *delegation_cred = NULL;
3332 	unsigned long timestamp = jiffies;
3333 	bool truncate;
3334 	int status;
3335 
3336 	nfs_fattr_init(res->fattr);
3337 
3338 	/* Servers should only apply open mode checks for file size changes */
3339 	truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false;
3340 	if (!truncate) {
3341 		nfs4_inode_make_writeable(inode);
3342 		goto zero_stateid;
3343 	}
3344 
3345 	if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
3346 		/* Use that stateid */
3347 	} else if (ctx != NULL && ctx->state) {
3348 		struct nfs_lock_context *l_ctx;
3349 		if (!nfs4_valid_open_stateid(ctx->state))
3350 			return -EBADF;
3351 		l_ctx = nfs_get_lock_context(ctx);
3352 		if (IS_ERR(l_ctx))
3353 			return PTR_ERR(l_ctx);
3354 		status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx,
3355 						&arg->stateid, &delegation_cred);
3356 		nfs_put_lock_context(l_ctx);
3357 		if (status == -EIO)
3358 			return -EBADF;
3359 		else if (status == -EAGAIN)
3360 			goto zero_stateid;
3361 	} else {
3362 zero_stateid:
3363 		nfs4_stateid_copy(&arg->stateid, &zero_stateid);
3364 	}
3365 	if (delegation_cred)
3366 		msg.rpc_cred = delegation_cred;
3367 
3368 	status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1);
3369 
3370 	put_cred(delegation_cred);
3371 	if (status == 0 && ctx != NULL)
3372 		renew_lease(server, timestamp);
3373 	trace_nfs4_setattr(inode, &arg->stateid, status);
3374 	return status;
3375 }
3376 
3377 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
3378 			   struct nfs_fattr *fattr, struct iattr *sattr,
3379 			   struct nfs_open_context *ctx, struct nfs4_label *ilabel)
3380 {
3381 	struct nfs_server *server = NFS_SERVER(inode);
3382 	__u32 bitmask[NFS4_BITMASK_SZ];
3383 	struct nfs4_state *state = ctx ? ctx->state : NULL;
3384 	struct nfs_setattrargs	arg = {
3385 		.fh		= NFS_FH(inode),
3386 		.iap		= sattr,
3387 		.server		= server,
3388 		.bitmask = bitmask,
3389 		.label		= ilabel,
3390 	};
3391 	struct nfs_setattrres  res = {
3392 		.fattr		= fattr,
3393 		.server		= server,
3394 	};
3395 	struct nfs4_exception exception = {
3396 		.state = state,
3397 		.inode = inode,
3398 		.stateid = &arg.stateid,
3399 	};
3400 	unsigned long adjust_flags = NFS_INO_INVALID_CHANGE;
3401 	int err;
3402 
3403 	if (sattr->ia_valid & (ATTR_MODE | ATTR_KILL_SUID | ATTR_KILL_SGID))
3404 		adjust_flags |= NFS_INO_INVALID_MODE;
3405 	if (sattr->ia_valid & (ATTR_UID | ATTR_GID))
3406 		adjust_flags |= NFS_INO_INVALID_OTHER;
3407 
3408 	do {
3409 		nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label),
3410 					inode, adjust_flags);
3411 
3412 		err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx);
3413 		switch (err) {
3414 		case -NFS4ERR_OPENMODE:
3415 			if (!(sattr->ia_valid & ATTR_SIZE)) {
3416 				pr_warn_once("NFSv4: server %s is incorrectly "
3417 						"applying open mode checks to "
3418 						"a SETATTR that is not "
3419 						"changing file size.\n",
3420 						server->nfs_client->cl_hostname);
3421 			}
3422 			if (state && !(state->state & FMODE_WRITE)) {
3423 				err = -EBADF;
3424 				if (sattr->ia_valid & ATTR_OPEN)
3425 					err = -EACCES;
3426 				goto out;
3427 			}
3428 		}
3429 		err = nfs4_handle_exception(server, err, &exception);
3430 	} while (exception.retry);
3431 out:
3432 	return err;
3433 }
3434 
3435 static bool
3436 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
3437 {
3438 	if (inode == NULL || !nfs_have_layout(inode))
3439 		return false;
3440 
3441 	return pnfs_wait_on_layoutreturn(inode, task);
3442 }
3443 
3444 /*
3445  * Update the seqid of an open stateid
3446  */
3447 static void nfs4_sync_open_stateid(nfs4_stateid *dst,
3448 		struct nfs4_state *state)
3449 {
3450 	__be32 seqid_open;
3451 	u32 dst_seqid;
3452 	int seq;
3453 
3454 	for (;;) {
3455 		if (!nfs4_valid_open_stateid(state))
3456 			break;
3457 		seq = read_seqbegin(&state->seqlock);
3458 		if (!nfs4_state_match_open_stateid_other(state, dst)) {
3459 			nfs4_stateid_copy(dst, &state->open_stateid);
3460 			if (read_seqretry(&state->seqlock, seq))
3461 				continue;
3462 			break;
3463 		}
3464 		seqid_open = state->open_stateid.seqid;
3465 		if (read_seqretry(&state->seqlock, seq))
3466 			continue;
3467 
3468 		dst_seqid = be32_to_cpu(dst->seqid);
3469 		if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0)
3470 			dst->seqid = seqid_open;
3471 		break;
3472 	}
3473 }
3474 
3475 /*
3476  * Update the seqid of an open stateid after receiving
3477  * NFS4ERR_OLD_STATEID
3478  */
3479 static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
3480 		struct nfs4_state *state)
3481 {
3482 	__be32 seqid_open;
3483 	u32 dst_seqid;
3484 	bool ret;
3485 	int seq, status = -EAGAIN;
3486 	DEFINE_WAIT(wait);
3487 
3488 	for (;;) {
3489 		ret = false;
3490 		if (!nfs4_valid_open_stateid(state))
3491 			break;
3492 		seq = read_seqbegin(&state->seqlock);
3493 		if (!nfs4_state_match_open_stateid_other(state, dst)) {
3494 			if (read_seqretry(&state->seqlock, seq))
3495 				continue;
3496 			break;
3497 		}
3498 
3499 		write_seqlock(&state->seqlock);
3500 		seqid_open = state->open_stateid.seqid;
3501 
3502 		dst_seqid = be32_to_cpu(dst->seqid);
3503 
3504 		/* Did another OPEN bump the state's seqid?  try again: */
3505 		if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) {
3506 			dst->seqid = seqid_open;
3507 			write_sequnlock(&state->seqlock);
3508 			ret = true;
3509 			break;
3510 		}
3511 
3512 		/* server says we're behind but we haven't seen the update yet */
3513 		set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
3514 		prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
3515 		write_sequnlock(&state->seqlock);
3516 		trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
3517 
3518 		if (fatal_signal_pending(current))
3519 			status = -EINTR;
3520 		else
3521 			if (schedule_timeout(5*HZ) != 0)
3522 				status = 0;
3523 
3524 		finish_wait(&state->waitq, &wait);
3525 
3526 		if (!status)
3527 			continue;
3528 		if (status == -EINTR)
3529 			break;
3530 
3531 		/* we slept the whole 5 seconds, we must have lost a seqid */
3532 		dst->seqid = cpu_to_be32(dst_seqid + 1);
3533 		ret = true;
3534 		break;
3535 	}
3536 
3537 	return ret;
3538 }
3539 
3540 struct nfs4_closedata {
3541 	struct inode *inode;
3542 	struct nfs4_state *state;
3543 	struct nfs_closeargs arg;
3544 	struct nfs_closeres res;
3545 	struct {
3546 		struct nfs4_layoutreturn_args arg;
3547 		struct nfs4_layoutreturn_res res;
3548 		struct nfs4_xdr_opaque_data ld_private;
3549 		u32 roc_barrier;
3550 		bool roc;
3551 	} lr;
3552 	struct nfs_fattr fattr;
3553 	unsigned long timestamp;
3554 };
3555 
3556 static void nfs4_free_closedata(void *data)
3557 {
3558 	struct nfs4_closedata *calldata = data;
3559 	struct nfs4_state_owner *sp = calldata->state->owner;
3560 	struct super_block *sb = calldata->state->inode->i_sb;
3561 
3562 	if (calldata->lr.roc)
3563 		pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res,
3564 				calldata->res.lr_ret);
3565 	nfs4_put_open_state(calldata->state);
3566 	nfs_free_seqid(calldata->arg.seqid);
3567 	nfs4_put_state_owner(sp);
3568 	nfs_sb_deactive(sb);
3569 	kfree(calldata);
3570 }
3571 
3572 static void nfs4_close_done(struct rpc_task *task, void *data)
3573 {
3574 	struct nfs4_closedata *calldata = data;
3575 	struct nfs4_state *state = calldata->state;
3576 	struct nfs_server *server = NFS_SERVER(calldata->inode);
3577 	nfs4_stateid *res_stateid = NULL;
3578 	struct nfs4_exception exception = {
3579 		.state = state,
3580 		.inode = calldata->inode,
3581 		.stateid = &calldata->arg.stateid,
3582 	};
3583 
3584 	if (!nfs4_sequence_done(task, &calldata->res.seq_res))
3585 		return;
3586 	trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
3587 
3588 	/* Handle Layoutreturn errors */
3589 	if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res,
3590 			  &calldata->res.lr_ret) == -EAGAIN)
3591 		goto out_restart;
3592 
3593 	/* hmm. we are done with the inode, and in the process of freeing
3594 	 * the state_owner. we keep this around to process errors
3595 	 */
3596 	switch (task->tk_status) {
3597 		case 0:
3598 			res_stateid = &calldata->res.stateid;
3599 			renew_lease(server, calldata->timestamp);
3600 			break;
3601 		case -NFS4ERR_ACCESS:
3602 			if (calldata->arg.bitmask != NULL) {
3603 				calldata->arg.bitmask = NULL;
3604 				calldata->res.fattr = NULL;
3605 				goto out_restart;
3606 
3607 			}
3608 			break;
3609 		case -NFS4ERR_OLD_STATEID:
3610 			/* Did we race with OPEN? */
3611 			if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid,
3612 						state))
3613 				goto out_restart;
3614 			goto out_release;
3615 		case -NFS4ERR_ADMIN_REVOKED:
3616 		case -NFS4ERR_STALE_STATEID:
3617 		case -NFS4ERR_EXPIRED:
3618 			nfs4_free_revoked_stateid(server,
3619 					&calldata->arg.stateid,
3620 					task->tk_msg.rpc_cred);
3621 			fallthrough;
3622 		case -NFS4ERR_BAD_STATEID:
3623 			if (calldata->arg.fmode == 0)
3624 				break;
3625 			fallthrough;
3626 		default:
3627 			task->tk_status = nfs4_async_handle_exception(task,
3628 					server, task->tk_status, &exception);
3629 			if (exception.retry)
3630 				goto out_restart;
3631 	}
3632 	nfs_clear_open_stateid(state, &calldata->arg.stateid,
3633 			res_stateid, calldata->arg.fmode);
3634 out_release:
3635 	task->tk_status = 0;
3636 	nfs_release_seqid(calldata->arg.seqid);
3637 	nfs_refresh_inode(calldata->inode, &calldata->fattr);
3638 	dprintk("%s: ret = %d\n", __func__, task->tk_status);
3639 	return;
3640 out_restart:
3641 	task->tk_status = 0;
3642 	rpc_restart_call_prepare(task);
3643 	goto out_release;
3644 }
3645 
3646 static void nfs4_close_prepare(struct rpc_task *task, void *data)
3647 {
3648 	struct nfs4_closedata *calldata = data;
3649 	struct nfs4_state *state = calldata->state;
3650 	struct inode *inode = calldata->inode;
3651 	struct nfs_server *server = NFS_SERVER(inode);
3652 	struct pnfs_layout_hdr *lo;
3653 	bool is_rdonly, is_wronly, is_rdwr;
3654 	int call_close = 0;
3655 
3656 	if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
3657 		goto out_wait;
3658 
3659 	task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
3660 	spin_lock(&state->owner->so_lock);
3661 	is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
3662 	is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
3663 	is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
3664 	/* Calculate the change in open mode */
3665 	calldata->arg.fmode = 0;
3666 	if (state->n_rdwr == 0) {
3667 		if (state->n_rdonly == 0)
3668 			call_close |= is_rdonly;
3669 		else if (is_rdonly)
3670 			calldata->arg.fmode |= FMODE_READ;
3671 		if (state->n_wronly == 0)
3672 			call_close |= is_wronly;
3673 		else if (is_wronly)
3674 			calldata->arg.fmode |= FMODE_WRITE;
3675 		if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
3676 			call_close |= is_rdwr;
3677 	} else if (is_rdwr)
3678 		calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
3679 
3680 	nfs4_sync_open_stateid(&calldata->arg.stateid, state);
3681 	if (!nfs4_valid_open_stateid(state))
3682 		call_close = 0;
3683 	spin_unlock(&state->owner->so_lock);
3684 
3685 	if (!call_close) {
3686 		/* Note: exit _without_ calling nfs4_close_done */
3687 		goto out_no_action;
3688 	}
3689 
3690 	if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) {
3691 		nfs_release_seqid(calldata->arg.seqid);
3692 		goto out_wait;
3693 	}
3694 
3695 	lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL;
3696 	if (lo && !pnfs_layout_is_valid(lo)) {
3697 		calldata->arg.lr_args = NULL;
3698 		calldata->res.lr_res = NULL;
3699 	}
3700 
3701 	if (calldata->arg.fmode == 0)
3702 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
3703 
3704 	if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) {
3705 		/* Close-to-open cache consistency revalidation */
3706 		if (!nfs4_have_delegation(inode, FMODE_READ)) {
3707 			nfs4_bitmask_set(calldata->arg.bitmask_store,
3708 					 server->cache_consistency_bitmask,
3709 					 inode, 0);
3710 			calldata->arg.bitmask = calldata->arg.bitmask_store;
3711 		} else
3712 			calldata->arg.bitmask = NULL;
3713 	}
3714 
3715 	calldata->arg.share_access =
3716 		nfs4_map_atomic_open_share(NFS_SERVER(inode),
3717 				calldata->arg.fmode, 0);
3718 
3719 	if (calldata->res.fattr == NULL)
3720 		calldata->arg.bitmask = NULL;
3721 	else if (calldata->arg.bitmask == NULL)
3722 		calldata->res.fattr = NULL;
3723 	calldata->timestamp = jiffies;
3724 	if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client,
3725 				&calldata->arg.seq_args,
3726 				&calldata->res.seq_res,
3727 				task) != 0)
3728 		nfs_release_seqid(calldata->arg.seqid);
3729 	return;
3730 out_no_action:
3731 	task->tk_action = NULL;
3732 out_wait:
3733 	nfs4_sequence_done(task, &calldata->res.seq_res);
3734 }
3735 
3736 static const struct rpc_call_ops nfs4_close_ops = {
3737 	.rpc_call_prepare = nfs4_close_prepare,
3738 	.rpc_call_done = nfs4_close_done,
3739 	.rpc_release = nfs4_free_closedata,
3740 };
3741 
3742 /*
3743  * It is possible for data to be read/written from a mem-mapped file
3744  * after the sys_close call (which hits the vfs layer as a flush).
3745  * This means that we can't safely call nfsv4 close on a file until
3746  * the inode is cleared. This in turn means that we are not good
3747  * NFSv4 citizens - we do not indicate to the server to update the file's
3748  * share state even when we are done with one of the three share
3749  * stateid's in the inode.
3750  *
3751  * NOTE: Caller must be holding the sp->so_owner semaphore!
3752  */
3753 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
3754 {
3755 	struct nfs_server *server = NFS_SERVER(state->inode);
3756 	struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
3757 	struct nfs4_closedata *calldata;
3758 	struct nfs4_state_owner *sp = state->owner;
3759 	struct rpc_task *task;
3760 	struct rpc_message msg = {
3761 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
3762 		.rpc_cred = state->owner->so_cred,
3763 	};
3764 	struct rpc_task_setup task_setup_data = {
3765 		.rpc_client = server->client,
3766 		.rpc_message = &msg,
3767 		.callback_ops = &nfs4_close_ops,
3768 		.workqueue = nfsiod_workqueue,
3769 		.flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
3770 	};
3771 	int status = -ENOMEM;
3772 
3773 	if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE))
3774 		task_setup_data.flags |= RPC_TASK_MOVEABLE;
3775 
3776 	nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
3777 		&task_setup_data.rpc_client, &msg);
3778 
3779 	calldata = kzalloc(sizeof(*calldata), gfp_mask);
3780 	if (calldata == NULL)
3781 		goto out;
3782 	nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1, 0);
3783 	calldata->inode = state->inode;
3784 	calldata->state = state;
3785 	calldata->arg.fh = NFS_FH(state->inode);
3786 	if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state))
3787 		goto out_free_calldata;
3788 	/* Serialization for the sequence id */
3789 	alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
3790 	calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
3791 	if (IS_ERR(calldata->arg.seqid))
3792 		goto out_free_calldata;
3793 	nfs_fattr_init(&calldata->fattr);
3794 	calldata->arg.fmode = 0;
3795 	calldata->lr.arg.ld_private = &calldata->lr.ld_private;
3796 	calldata->res.fattr = &calldata->fattr;
3797 	calldata->res.seqid = calldata->arg.seqid;
3798 	calldata->res.server = server;
3799 	calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
3800 	calldata->lr.roc = pnfs_roc(state->inode,
3801 			&calldata->lr.arg, &calldata->lr.res, msg.rpc_cred);
3802 	if (calldata->lr.roc) {
3803 		calldata->arg.lr_args = &calldata->lr.arg;
3804 		calldata->res.lr_res = &calldata->lr.res;
3805 	}
3806 	nfs_sb_active(calldata->inode->i_sb);
3807 
3808 	msg.rpc_argp = &calldata->arg;
3809 	msg.rpc_resp = &calldata->res;
3810 	task_setup_data.callback_data = calldata;
3811 	task = rpc_run_task(&task_setup_data);
3812 	if (IS_ERR(task))
3813 		return PTR_ERR(task);
3814 	status = 0;
3815 	if (wait)
3816 		status = rpc_wait_for_completion_task(task);
3817 	rpc_put_task(task);
3818 	return status;
3819 out_free_calldata:
3820 	kfree(calldata);
3821 out:
3822 	nfs4_put_open_state(state);
3823 	nfs4_put_state_owner(sp);
3824 	return status;
3825 }
3826 
3827 static struct inode *
3828 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
3829 		int open_flags, struct iattr *attr, int *opened)
3830 {
3831 	struct nfs4_state *state;
3832 	struct nfs4_label l, *label;
3833 
3834 	label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
3835 
3836 	/* Protect against concurrent sillydeletes */
3837 	state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
3838 
3839 	nfs4_label_release_security(label);
3840 
3841 	if (IS_ERR(state))
3842 		return ERR_CAST(state);
3843 	return state->inode;
3844 }
3845 
3846 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
3847 {
3848 	if (ctx->state == NULL)
3849 		return;
3850 	if (is_sync)
3851 		nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx));
3852 	else
3853 		nfs4_close_state(ctx->state, _nfs4_ctx_to_openmode(ctx));
3854 }
3855 
3856 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
3857 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
3858 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_XATTR_SUPPORT - 1UL)
3859 
3860 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3861 {
3862 	u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion;
3863 	struct nfs4_server_caps_arg args = {
3864 		.fhandle = fhandle,
3865 		.bitmask = bitmask,
3866 	};
3867 	struct nfs4_server_caps_res res = {};
3868 	struct rpc_message msg = {
3869 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
3870 		.rpc_argp = &args,
3871 		.rpc_resp = &res,
3872 	};
3873 	int status;
3874 	int i;
3875 
3876 	bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
3877 		     FATTR4_WORD0_FH_EXPIRE_TYPE |
3878 		     FATTR4_WORD0_LINK_SUPPORT |
3879 		     FATTR4_WORD0_SYMLINK_SUPPORT |
3880 		     FATTR4_WORD0_ACLSUPPORT |
3881 		     FATTR4_WORD0_CASE_INSENSITIVE |
3882 		     FATTR4_WORD0_CASE_PRESERVING;
3883 	if (minorversion)
3884 		bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
3885 
3886 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3887 	if (status == 0) {
3888 		/* Sanity check the server answers */
3889 		switch (minorversion) {
3890 		case 0:
3891 			res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
3892 			res.attr_bitmask[2] = 0;
3893 			break;
3894 		case 1:
3895 			res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
3896 			break;
3897 		case 2:
3898 			res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
3899 		}
3900 		memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
3901 		server->caps &= ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS |
3902 				  NFS_CAP_SYMLINKS| NFS_CAP_SECURITY_LABEL);
3903 		server->fattr_valid = NFS_ATTR_FATTR_V4;
3904 		if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
3905 				res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3906 			server->caps |= NFS_CAP_ACLS;
3907 		if (res.has_links != 0)
3908 			server->caps |= NFS_CAP_HARDLINKS;
3909 		if (res.has_symlinks != 0)
3910 			server->caps |= NFS_CAP_SYMLINKS;
3911 		if (res.case_insensitive)
3912 			server->caps |= NFS_CAP_CASE_INSENSITIVE;
3913 		if (res.case_preserving)
3914 			server->caps |= NFS_CAP_CASE_PRESERVING;
3915 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
3916 		if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
3917 			server->caps |= NFS_CAP_SECURITY_LABEL;
3918 #endif
3919 		if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS)
3920 			server->caps |= NFS_CAP_FS_LOCATIONS;
3921 		if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID))
3922 			server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID;
3923 		if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE))
3924 			server->fattr_valid &= ~NFS_ATTR_FATTR_MODE;
3925 		if (!(res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS))
3926 			server->fattr_valid &= ~NFS_ATTR_FATTR_NLINK;
3927 		if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER))
3928 			server->fattr_valid &= ~(NFS_ATTR_FATTR_OWNER |
3929 				NFS_ATTR_FATTR_OWNER_NAME);
3930 		if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP))
3931 			server->fattr_valid &= ~(NFS_ATTR_FATTR_GROUP |
3932 				NFS_ATTR_FATTR_GROUP_NAME);
3933 		if (!(res.attr_bitmask[1] & FATTR4_WORD1_SPACE_USED))
3934 			server->fattr_valid &= ~NFS_ATTR_FATTR_SPACE_USED;
3935 		if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS))
3936 			server->fattr_valid &= ~NFS_ATTR_FATTR_ATIME;
3937 		if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA))
3938 			server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME;
3939 		if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY))
3940 			server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME;
3941 		memcpy(server->attr_bitmask_nl, res.attr_bitmask,
3942 				sizeof(server->attr_bitmask));
3943 		server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
3944 
3945 		memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
3946 		server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
3947 		server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
3948 		server->cache_consistency_bitmask[2] = 0;
3949 
3950 		/* Avoid a regression due to buggy server */
3951 		for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++)
3952 			res.exclcreat_bitmask[i] &= res.attr_bitmask[i];
3953 		memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
3954 			sizeof(server->exclcreat_bitmask));
3955 
3956 		server->acl_bitmask = res.acl_bitmask;
3957 		server->fh_expire_type = res.fh_expire_type;
3958 	}
3959 
3960 	return status;
3961 }
3962 
3963 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3964 {
3965 	struct nfs4_exception exception = {
3966 		.interruptible = true,
3967 	};
3968 	int err;
3969 
3970 	nfs4_server_set_init_caps(server);
3971 	do {
3972 		err = nfs4_handle_exception(server,
3973 				_nfs4_server_capabilities(server, fhandle),
3974 				&exception);
3975 	} while (exception.retry);
3976 	return err;
3977 }
3978 
3979 static void test_fs_location_for_trunking(struct nfs4_fs_location *location,
3980 					  struct nfs_client *clp,
3981 					  struct nfs_server *server)
3982 {
3983 	int i;
3984 
3985 	for (i = 0; i < location->nservers; i++) {
3986 		struct nfs4_string *srv_loc = &location->servers[i];
3987 		struct sockaddr_storage addr;
3988 		size_t addrlen;
3989 		struct xprt_create xprt_args = {
3990 			.ident = 0,
3991 			.net = clp->cl_net,
3992 		};
3993 		struct nfs4_add_xprt_data xprtdata = {
3994 			.clp = clp,
3995 		};
3996 		struct rpc_add_xprt_test rpcdata = {
3997 			.add_xprt_test = clp->cl_mvops->session_trunk,
3998 			.data = &xprtdata,
3999 		};
4000 		char *servername = NULL;
4001 
4002 		if (!srv_loc->len)
4003 			continue;
4004 
4005 		addrlen = nfs_parse_server_name(srv_loc->data, srv_loc->len,
4006 						&addr, sizeof(addr),
4007 						clp->cl_net, server->port);
4008 		if (!addrlen)
4009 			return;
4010 		xprt_args.dstaddr = (struct sockaddr *)&addr;
4011 		xprt_args.addrlen = addrlen;
4012 		servername = kmalloc(srv_loc->len + 1, GFP_KERNEL);
4013 		if (!servername)
4014 			return;
4015 		memcpy(servername, srv_loc->data, srv_loc->len);
4016 		servername[srv_loc->len] = '\0';
4017 		xprt_args.servername = servername;
4018 
4019 		xprtdata.cred = nfs4_get_clid_cred(clp);
4020 		rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
4021 				  rpc_clnt_setup_test_and_add_xprt,
4022 				  &rpcdata);
4023 		if (xprtdata.cred)
4024 			put_cred(xprtdata.cred);
4025 		kfree(servername);
4026 	}
4027 }
4028 
4029 static int _nfs4_discover_trunking(struct nfs_server *server,
4030 				   struct nfs_fh *fhandle)
4031 {
4032 	struct nfs4_fs_locations *locations = NULL;
4033 	struct page *page;
4034 	const struct cred *cred;
4035 	struct nfs_client *clp = server->nfs_client;
4036 	const struct nfs4_state_maintenance_ops *ops =
4037 		clp->cl_mvops->state_renewal_ops;
4038 	int status = -ENOMEM, i;
4039 
4040 	cred = ops->get_state_renewal_cred(clp);
4041 	if (cred == NULL) {
4042 		cred = nfs4_get_clid_cred(clp);
4043 		if (cred == NULL)
4044 			return -ENOKEY;
4045 	}
4046 
4047 	page = alloc_page(GFP_KERNEL);
4048 	if (!page)
4049 		goto out_put_cred;
4050 	locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
4051 	if (!locations)
4052 		goto out_free;
4053 	locations->fattr = nfs_alloc_fattr();
4054 	if (!locations->fattr)
4055 		goto out_free_2;
4056 
4057 	status = nfs4_proc_get_locations(server, fhandle, locations, page,
4058 					 cred);
4059 	if (status)
4060 		goto out_free_3;
4061 
4062 	for (i = 0; i < locations->nlocations; i++)
4063 		test_fs_location_for_trunking(&locations->locations[i], clp,
4064 					      server);
4065 out_free_3:
4066 	kfree(locations->fattr);
4067 out_free_2:
4068 	kfree(locations);
4069 out_free:
4070 	__free_page(page);
4071 out_put_cred:
4072 	put_cred(cred);
4073 	return status;
4074 }
4075 
4076 static int nfs4_discover_trunking(struct nfs_server *server,
4077 				  struct nfs_fh *fhandle)
4078 {
4079 	struct nfs4_exception exception = {
4080 		.interruptible = true,
4081 	};
4082 	struct nfs_client *clp = server->nfs_client;
4083 	int err = 0;
4084 
4085 	if (!nfs4_has_session(clp))
4086 		goto out;
4087 	do {
4088 		err = nfs4_handle_exception(server,
4089 				_nfs4_discover_trunking(server, fhandle),
4090 				&exception);
4091 	} while (exception.retry);
4092 out:
4093 	return err;
4094 }
4095 
4096 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
4097 		struct nfs_fsinfo *info)
4098 {
4099 	u32 bitmask[3];
4100 	struct nfs4_lookup_root_arg args = {
4101 		.bitmask = bitmask,
4102 	};
4103 	struct nfs4_lookup_res res = {
4104 		.server = server,
4105 		.fattr = info->fattr,
4106 		.fh = fhandle,
4107 	};
4108 	struct rpc_message msg = {
4109 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
4110 		.rpc_argp = &args,
4111 		.rpc_resp = &res,
4112 	};
4113 
4114 	bitmask[0] = nfs4_fattr_bitmap[0];
4115 	bitmask[1] = nfs4_fattr_bitmap[1];
4116 	/*
4117 	 * Process the label in the upcoming getfattr
4118 	 */
4119 	bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
4120 
4121 	nfs_fattr_init(info->fattr);
4122 	return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4123 }
4124 
4125 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
4126 		struct nfs_fsinfo *info)
4127 {
4128 	struct nfs4_exception exception = {
4129 		.interruptible = true,
4130 	};
4131 	int err;
4132 	do {
4133 		err = _nfs4_lookup_root(server, fhandle, info);
4134 		trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
4135 		switch (err) {
4136 		case 0:
4137 		case -NFS4ERR_WRONGSEC:
4138 			goto out;
4139 		default:
4140 			err = nfs4_handle_exception(server, err, &exception);
4141 		}
4142 	} while (exception.retry);
4143 out:
4144 	return err;
4145 }
4146 
4147 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
4148 				struct nfs_fsinfo *info, rpc_authflavor_t flavor)
4149 {
4150 	struct rpc_auth_create_args auth_args = {
4151 		.pseudoflavor = flavor,
4152 	};
4153 	struct rpc_auth *auth;
4154 
4155 	auth = rpcauth_create(&auth_args, server->client);
4156 	if (IS_ERR(auth))
4157 		return -EACCES;
4158 	return nfs4_lookup_root(server, fhandle, info);
4159 }
4160 
4161 /*
4162  * Retry pseudoroot lookup with various security flavors.  We do this when:
4163  *
4164  *   NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
4165  *   NFSv4.1: the server does not support the SECINFO_NO_NAME operation
4166  *
4167  * Returns zero on success, or a negative NFS4ERR value, or a
4168  * negative errno value.
4169  */
4170 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
4171 			      struct nfs_fsinfo *info)
4172 {
4173 	/* Per 3530bis 15.33.5 */
4174 	static const rpc_authflavor_t flav_array[] = {
4175 		RPC_AUTH_GSS_KRB5P,
4176 		RPC_AUTH_GSS_KRB5I,
4177 		RPC_AUTH_GSS_KRB5,
4178 		RPC_AUTH_UNIX,			/* courtesy */
4179 		RPC_AUTH_NULL,
4180 	};
4181 	int status = -EPERM;
4182 	size_t i;
4183 
4184 	if (server->auth_info.flavor_len > 0) {
4185 		/* try each flavor specified by user */
4186 		for (i = 0; i < server->auth_info.flavor_len; i++) {
4187 			status = nfs4_lookup_root_sec(server, fhandle, info,
4188 						server->auth_info.flavors[i]);
4189 			if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
4190 				continue;
4191 			break;
4192 		}
4193 	} else {
4194 		/* no flavors specified by user, try default list */
4195 		for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
4196 			status = nfs4_lookup_root_sec(server, fhandle, info,
4197 						      flav_array[i]);
4198 			if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
4199 				continue;
4200 			break;
4201 		}
4202 	}
4203 
4204 	/*
4205 	 * -EACCES could mean that the user doesn't have correct permissions
4206 	 * to access the mount.  It could also mean that we tried to mount
4207 	 * with a gss auth flavor, but rpc.gssd isn't running.  Either way,
4208 	 * existing mount programs don't handle -EACCES very well so it should
4209 	 * be mapped to -EPERM instead.
4210 	 */
4211 	if (status == -EACCES)
4212 		status = -EPERM;
4213 	return status;
4214 }
4215 
4216 /**
4217  * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
4218  * @server: initialized nfs_server handle
4219  * @fhandle: we fill in the pseudo-fs root file handle
4220  * @info: we fill in an FSINFO struct
4221  * @auth_probe: probe the auth flavours
4222  *
4223  * Returns zero on success, or a negative errno.
4224  */
4225 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
4226 			 struct nfs_fsinfo *info,
4227 			 bool auth_probe)
4228 {
4229 	int status = 0;
4230 
4231 	if (!auth_probe)
4232 		status = nfs4_lookup_root(server, fhandle, info);
4233 
4234 	if (auth_probe || status == NFS4ERR_WRONGSEC)
4235 		status = server->nfs_client->cl_mvops->find_root_sec(server,
4236 				fhandle, info);
4237 
4238 	if (status == 0)
4239 		status = nfs4_server_capabilities(server, fhandle);
4240 	if (status == 0)
4241 		status = nfs4_do_fsinfo(server, fhandle, info);
4242 
4243 	return nfs4_map_errors(status);
4244 }
4245 
4246 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
4247 			      struct nfs_fsinfo *info)
4248 {
4249 	int error;
4250 	struct nfs_fattr *fattr = info->fattr;
4251 
4252 	error = nfs4_server_capabilities(server, mntfh);
4253 	if (error < 0) {
4254 		dprintk("nfs4_get_root: getcaps error = %d\n", -error);
4255 		return error;
4256 	}
4257 
4258 	error = nfs4_proc_getattr(server, mntfh, fattr, NULL);
4259 	if (error < 0) {
4260 		dprintk("nfs4_get_root: getattr error = %d\n", -error);
4261 		goto out;
4262 	}
4263 
4264 	if (fattr->valid & NFS_ATTR_FATTR_FSID &&
4265 	    !nfs_fsid_equal(&server->fsid, &fattr->fsid))
4266 		memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
4267 
4268 out:
4269 	return error;
4270 }
4271 
4272 /*
4273  * Get locations and (maybe) other attributes of a referral.
4274  * Note that we'll actually follow the referral later when
4275  * we detect fsid mismatch in inode revalidation
4276  */
4277 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
4278 			     const struct qstr *name, struct nfs_fattr *fattr,
4279 			     struct nfs_fh *fhandle)
4280 {
4281 	int status = -ENOMEM;
4282 	struct page *page = NULL;
4283 	struct nfs4_fs_locations *locations = NULL;
4284 
4285 	page = alloc_page(GFP_KERNEL);
4286 	if (page == NULL)
4287 		goto out;
4288 	locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
4289 	if (locations == NULL)
4290 		goto out;
4291 
4292 	locations->fattr = fattr;
4293 
4294 	status = nfs4_proc_fs_locations(client, dir, name, locations, page);
4295 	if (status != 0)
4296 		goto out;
4297 
4298 	/*
4299 	 * If the fsid didn't change, this is a migration event, not a
4300 	 * referral.  Cause us to drop into the exception handler, which
4301 	 * will kick off migration recovery.
4302 	 */
4303 	if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &fattr->fsid)) {
4304 		dprintk("%s: server did not return a different fsid for"
4305 			" a referral at %s\n", __func__, name->name);
4306 		status = -NFS4ERR_MOVED;
4307 		goto out;
4308 	}
4309 	/* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
4310 	nfs_fixup_referral_attributes(fattr);
4311 	memset(fhandle, 0, sizeof(struct nfs_fh));
4312 out:
4313 	if (page)
4314 		__free_page(page);
4315 	kfree(locations);
4316 	return status;
4317 }
4318 
4319 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
4320 				struct nfs_fattr *fattr, struct inode *inode)
4321 {
4322 	__u32 bitmask[NFS4_BITMASK_SZ];
4323 	struct nfs4_getattr_arg args = {
4324 		.fh = fhandle,
4325 		.bitmask = bitmask,
4326 	};
4327 	struct nfs4_getattr_res res = {
4328 		.fattr = fattr,
4329 		.server = server,
4330 	};
4331 	struct rpc_message msg = {
4332 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
4333 		.rpc_argp = &args,
4334 		.rpc_resp = &res,
4335 	};
4336 	unsigned short task_flags = 0;
4337 
4338 	if (nfs4_has_session(server->nfs_client))
4339 		task_flags = RPC_TASK_MOVEABLE;
4340 
4341 	/* Is this is an attribute revalidation, subject to softreval? */
4342 	if (inode && (server->flags & NFS_MOUNT_SOFTREVAL))
4343 		task_flags |= RPC_TASK_TIMEOUT;
4344 
4345 	nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), inode, 0);
4346 	nfs_fattr_init(fattr);
4347 	nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
4348 	return nfs4_do_call_sync(server->client, server, &msg,
4349 			&args.seq_args, &res.seq_res, task_flags);
4350 }
4351 
4352 int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
4353 				struct nfs_fattr *fattr, struct inode *inode)
4354 {
4355 	struct nfs4_exception exception = {
4356 		.interruptible = true,
4357 	};
4358 	int err;
4359 	do {
4360 		err = _nfs4_proc_getattr(server, fhandle, fattr, inode);
4361 		trace_nfs4_getattr(server, fhandle, fattr, err);
4362 		err = nfs4_handle_exception(server, err,
4363 				&exception);
4364 	} while (exception.retry);
4365 	return err;
4366 }
4367 
4368 /*
4369  * The file is not closed if it is opened due to the a request to change
4370  * the size of the file. The open call will not be needed once the
4371  * VFS layer lookup-intents are implemented.
4372  *
4373  * Close is called when the inode is destroyed.
4374  * If we haven't opened the file for O_WRONLY, we
4375  * need to in the size_change case to obtain a stateid.
4376  *
4377  * Got race?
4378  * Because OPEN is always done by name in nfsv4, it is
4379  * possible that we opened a different file by the same
4380  * name.  We can recognize this race condition, but we
4381  * can't do anything about it besides returning an error.
4382  *
4383  * This will be fixed with VFS changes (lookup-intent).
4384  */
4385 static int
4386 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
4387 		  struct iattr *sattr)
4388 {
4389 	struct inode *inode = d_inode(dentry);
4390 	const struct cred *cred = NULL;
4391 	struct nfs_open_context *ctx = NULL;
4392 	int status;
4393 
4394 	if (pnfs_ld_layoutret_on_setattr(inode) &&
4395 	    sattr->ia_valid & ATTR_SIZE &&
4396 	    sattr->ia_size < i_size_read(inode))
4397 		pnfs_commit_and_return_layout(inode);
4398 
4399 	nfs_fattr_init(fattr);
4400 
4401 	/* Deal with open(O_TRUNC) */
4402 	if (sattr->ia_valid & ATTR_OPEN)
4403 		sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
4404 
4405 	/* Optimization: if the end result is no change, don't RPC */
4406 	if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
4407 		return 0;
4408 
4409 	/* Search for an existing open(O_WRITE) file */
4410 	if (sattr->ia_valid & ATTR_FILE) {
4411 
4412 		ctx = nfs_file_open_context(sattr->ia_file);
4413 		if (ctx)
4414 			cred = ctx->cred;
4415 	}
4416 
4417 	/* Return any delegations if we're going to change ACLs */
4418 	if ((sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
4419 		nfs4_inode_make_writeable(inode);
4420 
4421 	status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL);
4422 	if (status == 0) {
4423 		nfs_setattr_update_inode(inode, sattr, fattr);
4424 		nfs_setsecurity(inode, fattr);
4425 	}
4426 	return status;
4427 }
4428 
4429 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
4430 		struct dentry *dentry, struct nfs_fh *fhandle,
4431 		struct nfs_fattr *fattr)
4432 {
4433 	struct nfs_server *server = NFS_SERVER(dir);
4434 	int		       status;
4435 	struct nfs4_lookup_arg args = {
4436 		.bitmask = server->attr_bitmask,
4437 		.dir_fh = NFS_FH(dir),
4438 		.name = &dentry->d_name,
4439 	};
4440 	struct nfs4_lookup_res res = {
4441 		.server = server,
4442 		.fattr = fattr,
4443 		.fh = fhandle,
4444 	};
4445 	struct rpc_message msg = {
4446 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
4447 		.rpc_argp = &args,
4448 		.rpc_resp = &res,
4449 	};
4450 	unsigned short task_flags = 0;
4451 
4452 	if (nfs_server_capable(dir, NFS_CAP_MOVEABLE))
4453 		task_flags = RPC_TASK_MOVEABLE;
4454 
4455 	/* Is this is an attribute revalidation, subject to softreval? */
4456 	if (nfs_lookup_is_soft_revalidate(dentry))
4457 		task_flags |= RPC_TASK_TIMEOUT;
4458 
4459 	args.bitmask = nfs4_bitmask(server, fattr->label);
4460 
4461 	nfs_fattr_init(fattr);
4462 
4463 	dprintk("NFS call  lookup %pd2\n", dentry);
4464 	nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
4465 	status = nfs4_do_call_sync(clnt, server, &msg,
4466 			&args.seq_args, &res.seq_res, task_flags);
4467 	dprintk("NFS reply lookup: %d\n", status);
4468 	return status;
4469 }
4470 
4471 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
4472 {
4473 	fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
4474 		NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
4475 	fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
4476 	fattr->nlink = 2;
4477 }
4478 
4479 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
4480 				   struct dentry *dentry, struct nfs_fh *fhandle,
4481 				   struct nfs_fattr *fattr)
4482 {
4483 	struct nfs4_exception exception = {
4484 		.interruptible = true,
4485 	};
4486 	struct rpc_clnt *client = *clnt;
4487 	const struct qstr *name = &dentry->d_name;
4488 	int err;
4489 	do {
4490 		err = _nfs4_proc_lookup(client, dir, dentry, fhandle, fattr);
4491 		trace_nfs4_lookup(dir, name, err);
4492 		switch (err) {
4493 		case -NFS4ERR_BADNAME:
4494 			err = -ENOENT;
4495 			goto out;
4496 		case -NFS4ERR_MOVED:
4497 			err = nfs4_get_referral(client, dir, name, fattr, fhandle);
4498 			if (err == -NFS4ERR_MOVED)
4499 				err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
4500 			goto out;
4501 		case -NFS4ERR_WRONGSEC:
4502 			err = -EPERM;
4503 			if (client != *clnt)
4504 				goto out;
4505 			client = nfs4_negotiate_security(client, dir, name);
4506 			if (IS_ERR(client))
4507 				return PTR_ERR(client);
4508 
4509 			exception.retry = 1;
4510 			break;
4511 		default:
4512 			err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
4513 		}
4514 	} while (exception.retry);
4515 
4516 out:
4517 	if (err == 0)
4518 		*clnt = client;
4519 	else if (client != *clnt)
4520 		rpc_shutdown_client(client);
4521 
4522 	return err;
4523 }
4524 
4525 static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry,
4526 			    struct nfs_fh *fhandle, struct nfs_fattr *fattr)
4527 {
4528 	int status;
4529 	struct rpc_clnt *client = NFS_CLIENT(dir);
4530 
4531 	status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr);
4532 	if (client != NFS_CLIENT(dir)) {
4533 		rpc_shutdown_client(client);
4534 		nfs_fixup_secinfo_attributes(fattr);
4535 	}
4536 	return status;
4537 }
4538 
4539 struct rpc_clnt *
4540 nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry,
4541 			    struct nfs_fh *fhandle, struct nfs_fattr *fattr)
4542 {
4543 	struct rpc_clnt *client = NFS_CLIENT(dir);
4544 	int status;
4545 
4546 	status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr);
4547 	if (status < 0)
4548 		return ERR_PTR(status);
4549 	return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
4550 }
4551 
4552 static int _nfs4_proc_lookupp(struct inode *inode,
4553 		struct nfs_fh *fhandle, struct nfs_fattr *fattr)
4554 {
4555 	struct rpc_clnt *clnt = NFS_CLIENT(inode);
4556 	struct nfs_server *server = NFS_SERVER(inode);
4557 	int		       status;
4558 	struct nfs4_lookupp_arg args = {
4559 		.bitmask = server->attr_bitmask,
4560 		.fh = NFS_FH(inode),
4561 	};
4562 	struct nfs4_lookupp_res res = {
4563 		.server = server,
4564 		.fattr = fattr,
4565 		.fh = fhandle,
4566 	};
4567 	struct rpc_message msg = {
4568 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP],
4569 		.rpc_argp = &args,
4570 		.rpc_resp = &res,
4571 	};
4572 	unsigned short task_flags = 0;
4573 
4574 	if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
4575 		task_flags |= RPC_TASK_TIMEOUT;
4576 
4577 	args.bitmask = nfs4_bitmask(server, fattr->label);
4578 
4579 	nfs_fattr_init(fattr);
4580 
4581 	dprintk("NFS call  lookupp ino=0x%lx\n", inode->i_ino);
4582 	status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
4583 				&res.seq_res, task_flags);
4584 	dprintk("NFS reply lookupp: %d\n", status);
4585 	return status;
4586 }
4587 
4588 static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle,
4589 			     struct nfs_fattr *fattr)
4590 {
4591 	struct nfs4_exception exception = {
4592 		.interruptible = true,
4593 	};
4594 	int err;
4595 	do {
4596 		err = _nfs4_proc_lookupp(inode, fhandle, fattr);
4597 		trace_nfs4_lookupp(inode, err);
4598 		err = nfs4_handle_exception(NFS_SERVER(inode), err,
4599 				&exception);
4600 	} while (exception.retry);
4601 	return err;
4602 }
4603 
4604 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry,
4605 			     const struct cred *cred)
4606 {
4607 	struct nfs_server *server = NFS_SERVER(inode);
4608 	struct nfs4_accessargs args = {
4609 		.fh = NFS_FH(inode),
4610 		.access = entry->mask,
4611 	};
4612 	struct nfs4_accessres res = {
4613 		.server = server,
4614 	};
4615 	struct rpc_message msg = {
4616 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
4617 		.rpc_argp = &args,
4618 		.rpc_resp = &res,
4619 		.rpc_cred = cred,
4620 	};
4621 	int status = 0;
4622 
4623 	if (!nfs4_have_delegation(inode, FMODE_READ)) {
4624 		res.fattr = nfs_alloc_fattr();
4625 		if (res.fattr == NULL)
4626 			return -ENOMEM;
4627 		args.bitmask = server->cache_consistency_bitmask;
4628 	}
4629 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4630 	if (!status) {
4631 		nfs_access_set_mask(entry, res.access);
4632 		if (res.fattr)
4633 			nfs_refresh_inode(inode, res.fattr);
4634 	}
4635 	nfs_free_fattr(res.fattr);
4636 	return status;
4637 }
4638 
4639 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry,
4640 			    const struct cred *cred)
4641 {
4642 	struct nfs4_exception exception = {
4643 		.interruptible = true,
4644 	};
4645 	int err;
4646 	do {
4647 		err = _nfs4_proc_access(inode, entry, cred);
4648 		trace_nfs4_access(inode, err);
4649 		err = nfs4_handle_exception(NFS_SERVER(inode), err,
4650 				&exception);
4651 	} while (exception.retry);
4652 	return err;
4653 }
4654 
4655 /*
4656  * TODO: For the time being, we don't try to get any attributes
4657  * along with any of the zero-copy operations READ, READDIR,
4658  * READLINK, WRITE.
4659  *
4660  * In the case of the first three, we want to put the GETATTR
4661  * after the read-type operation -- this is because it is hard
4662  * to predict the length of a GETATTR response in v4, and thus
4663  * align the READ data correctly.  This means that the GETATTR
4664  * may end up partially falling into the page cache, and we should
4665  * shift it into the 'tail' of the xdr_buf before processing.
4666  * To do this efficiently, we need to know the total length
4667  * of data received, which doesn't seem to be available outside
4668  * of the RPC layer.
4669  *
4670  * In the case of WRITE, we also want to put the GETATTR after
4671  * the operation -- in this case because we want to make sure
4672  * we get the post-operation mtime and size.
4673  *
4674  * Both of these changes to the XDR layer would in fact be quite
4675  * minor, but I decided to leave them for a subsequent patch.
4676  */
4677 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
4678 		unsigned int pgbase, unsigned int pglen)
4679 {
4680 	struct nfs4_readlink args = {
4681 		.fh       = NFS_FH(inode),
4682 		.pgbase	  = pgbase,
4683 		.pglen    = pglen,
4684 		.pages    = &page,
4685 	};
4686 	struct nfs4_readlink_res res;
4687 	struct rpc_message msg = {
4688 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
4689 		.rpc_argp = &args,
4690 		.rpc_resp = &res,
4691 	};
4692 
4693 	return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
4694 }
4695 
4696 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
4697 		unsigned int pgbase, unsigned int pglen)
4698 {
4699 	struct nfs4_exception exception = {
4700 		.interruptible = true,
4701 	};
4702 	int err;
4703 	do {
4704 		err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
4705 		trace_nfs4_readlink(inode, err);
4706 		err = nfs4_handle_exception(NFS_SERVER(inode), err,
4707 				&exception);
4708 	} while (exception.retry);
4709 	return err;
4710 }
4711 
4712 /*
4713  * This is just for mknod.  open(O_CREAT) will always do ->open_context().
4714  */
4715 static int
4716 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
4717 		 int flags)
4718 {
4719 	struct nfs_server *server = NFS_SERVER(dir);
4720 	struct nfs4_label l, *ilabel;
4721 	struct nfs_open_context *ctx;
4722 	struct nfs4_state *state;
4723 	int status = 0;
4724 
4725 	ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL);
4726 	if (IS_ERR(ctx))
4727 		return PTR_ERR(ctx);
4728 
4729 	ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
4730 
4731 	if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
4732 		sattr->ia_mode &= ~current_umask();
4733 	state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL);
4734 	if (IS_ERR(state)) {
4735 		status = PTR_ERR(state);
4736 		goto out;
4737 	}
4738 out:
4739 	nfs4_label_release_security(ilabel);
4740 	put_nfs_open_context(ctx);
4741 	return status;
4742 }
4743 
4744 static int
4745 _nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype)
4746 {
4747 	struct nfs_server *server = NFS_SERVER(dir);
4748 	struct nfs_removeargs args = {
4749 		.fh = NFS_FH(dir),
4750 		.name = *name,
4751 	};
4752 	struct nfs_removeres res = {
4753 		.server = server,
4754 	};
4755 	struct rpc_message msg = {
4756 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
4757 		.rpc_argp = &args,
4758 		.rpc_resp = &res,
4759 	};
4760 	unsigned long timestamp = jiffies;
4761 	int status;
4762 
4763 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
4764 	if (status == 0) {
4765 		spin_lock(&dir->i_lock);
4766 		/* Removing a directory decrements nlink in the parent */
4767 		if (ftype == NF4DIR && dir->i_nlink > 2)
4768 			nfs4_dec_nlink_locked(dir);
4769 		nfs4_update_changeattr_locked(dir, &res.cinfo, timestamp,
4770 					      NFS_INO_INVALID_DATA);
4771 		spin_unlock(&dir->i_lock);
4772 	}
4773 	return status;
4774 }
4775 
4776 static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry)
4777 {
4778 	struct nfs4_exception exception = {
4779 		.interruptible = true,
4780 	};
4781 	struct inode *inode = d_inode(dentry);
4782 	int err;
4783 
4784 	if (inode) {
4785 		if (inode->i_nlink == 1)
4786 			nfs4_inode_return_delegation(inode);
4787 		else
4788 			nfs4_inode_make_writeable(inode);
4789 	}
4790 	do {
4791 		err = _nfs4_proc_remove(dir, &dentry->d_name, NF4REG);
4792 		trace_nfs4_remove(dir, &dentry->d_name, err);
4793 		err = nfs4_handle_exception(NFS_SERVER(dir), err,
4794 				&exception);
4795 	} while (exception.retry);
4796 	return err;
4797 }
4798 
4799 static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name)
4800 {
4801 	struct nfs4_exception exception = {
4802 		.interruptible = true,
4803 	};
4804 	int err;
4805 
4806 	do {
4807 		err = _nfs4_proc_remove(dir, name, NF4DIR);
4808 		trace_nfs4_remove(dir, name, err);
4809 		err = nfs4_handle_exception(NFS_SERVER(dir), err,
4810 				&exception);
4811 	} while (exception.retry);
4812 	return err;
4813 }
4814 
4815 static void nfs4_proc_unlink_setup(struct rpc_message *msg,
4816 		struct dentry *dentry,
4817 		struct inode *inode)
4818 {
4819 	struct nfs_removeargs *args = msg->rpc_argp;
4820 	struct nfs_removeres *res = msg->rpc_resp;
4821 
4822 	res->server = NFS_SB(dentry->d_sb);
4823 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
4824 	nfs4_init_sequence(&args->seq_args, &res->seq_res, 1, 0);
4825 
4826 	nfs_fattr_init(res->dir_attr);
4827 
4828 	if (inode) {
4829 		nfs4_inode_return_delegation(inode);
4830 		nfs_d_prune_case_insensitive_aliases(inode);
4831 	}
4832 }
4833 
4834 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
4835 {
4836 	nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client,
4837 			&data->args.seq_args,
4838 			&data->res.seq_res,
4839 			task);
4840 }
4841 
4842 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
4843 {
4844 	struct nfs_unlinkdata *data = task->tk_calldata;
4845 	struct nfs_removeres *res = &data->res;
4846 
4847 	if (!nfs4_sequence_done(task, &res->seq_res))
4848 		return 0;
4849 	if (nfs4_async_handle_error(task, res->server, NULL,
4850 				    &data->timeout) == -EAGAIN)
4851 		return 0;
4852 	if (task->tk_status == 0)
4853 		nfs4_update_changeattr(dir, &res->cinfo,
4854 				res->dir_attr->time_start,
4855 				NFS_INO_INVALID_DATA);
4856 	return 1;
4857 }
4858 
4859 static void nfs4_proc_rename_setup(struct rpc_message *msg,
4860 		struct dentry *old_dentry,
4861 		struct dentry *new_dentry)
4862 {
4863 	struct nfs_renameargs *arg = msg->rpc_argp;
4864 	struct nfs_renameres *res = msg->rpc_resp;
4865 	struct inode *old_inode = d_inode(old_dentry);
4866 	struct inode *new_inode = d_inode(new_dentry);
4867 
4868 	if (old_inode)
4869 		nfs4_inode_make_writeable(old_inode);
4870 	if (new_inode)
4871 		nfs4_inode_return_delegation(new_inode);
4872 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
4873 	res->server = NFS_SB(old_dentry->d_sb);
4874 	nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1, 0);
4875 }
4876 
4877 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
4878 {
4879 	nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client,
4880 			&data->args.seq_args,
4881 			&data->res.seq_res,
4882 			task);
4883 }
4884 
4885 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
4886 				 struct inode *new_dir)
4887 {
4888 	struct nfs_renamedata *data = task->tk_calldata;
4889 	struct nfs_renameres *res = &data->res;
4890 
4891 	if (!nfs4_sequence_done(task, &res->seq_res))
4892 		return 0;
4893 	if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
4894 		return 0;
4895 
4896 	if (task->tk_status == 0) {
4897 		nfs_d_prune_case_insensitive_aliases(d_inode(data->old_dentry));
4898 		if (new_dir != old_dir) {
4899 			/* Note: If we moved a directory, nlink will change */
4900 			nfs4_update_changeattr(old_dir, &res->old_cinfo,
4901 					res->old_fattr->time_start,
4902 					NFS_INO_INVALID_NLINK |
4903 					    NFS_INO_INVALID_DATA);
4904 			nfs4_update_changeattr(new_dir, &res->new_cinfo,
4905 					res->new_fattr->time_start,
4906 					NFS_INO_INVALID_NLINK |
4907 					    NFS_INO_INVALID_DATA);
4908 		} else
4909 			nfs4_update_changeattr(old_dir, &res->old_cinfo,
4910 					res->old_fattr->time_start,
4911 					NFS_INO_INVALID_DATA);
4912 	}
4913 	return 1;
4914 }
4915 
4916 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
4917 {
4918 	struct nfs_server *server = NFS_SERVER(inode);
4919 	__u32 bitmask[NFS4_BITMASK_SZ];
4920 	struct nfs4_link_arg arg = {
4921 		.fh     = NFS_FH(inode),
4922 		.dir_fh = NFS_FH(dir),
4923 		.name   = name,
4924 		.bitmask = bitmask,
4925 	};
4926 	struct nfs4_link_res res = {
4927 		.server = server,
4928 	};
4929 	struct rpc_message msg = {
4930 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
4931 		.rpc_argp = &arg,
4932 		.rpc_resp = &res,
4933 	};
4934 	int status = -ENOMEM;
4935 
4936 	res.fattr = nfs_alloc_fattr_with_label(server);
4937 	if (res.fattr == NULL)
4938 		goto out;
4939 
4940 	nfs4_inode_make_writeable(inode);
4941 	nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, res.fattr->label), inode,
4942 				NFS_INO_INVALID_CHANGE);
4943 	status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4944 	if (!status) {
4945 		nfs4_update_changeattr(dir, &res.cinfo, res.fattr->time_start,
4946 				       NFS_INO_INVALID_DATA);
4947 		nfs4_inc_nlink(inode);
4948 		status = nfs_post_op_update_inode(inode, res.fattr);
4949 		if (!status)
4950 			nfs_setsecurity(inode, res.fattr);
4951 	}
4952 
4953 out:
4954 	nfs_free_fattr(res.fattr);
4955 	return status;
4956 }
4957 
4958 static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
4959 {
4960 	struct nfs4_exception exception = {
4961 		.interruptible = true,
4962 	};
4963 	int err;
4964 	do {
4965 		err = nfs4_handle_exception(NFS_SERVER(inode),
4966 				_nfs4_proc_link(inode, dir, name),
4967 				&exception);
4968 	} while (exception.retry);
4969 	return err;
4970 }
4971 
4972 struct nfs4_createdata {
4973 	struct rpc_message msg;
4974 	struct nfs4_create_arg arg;
4975 	struct nfs4_create_res res;
4976 	struct nfs_fh fh;
4977 	struct nfs_fattr fattr;
4978 };
4979 
4980 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
4981 		const struct qstr *name, struct iattr *sattr, u32 ftype)
4982 {
4983 	struct nfs4_createdata *data;
4984 
4985 	data = kzalloc(sizeof(*data), GFP_KERNEL);
4986 	if (data != NULL) {
4987 		struct nfs_server *server = NFS_SERVER(dir);
4988 
4989 		data->fattr.label = nfs4_label_alloc(server, GFP_KERNEL);
4990 		if (IS_ERR(data->fattr.label))
4991 			goto out_free;
4992 
4993 		data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
4994 		data->msg.rpc_argp = &data->arg;
4995 		data->msg.rpc_resp = &data->res;
4996 		data->arg.dir_fh = NFS_FH(dir);
4997 		data->arg.server = server;
4998 		data->arg.name = name;
4999 		data->arg.attrs = sattr;
5000 		data->arg.ftype = ftype;
5001 		data->arg.bitmask = nfs4_bitmask(server, data->fattr.label);
5002 		data->arg.umask = current_umask();
5003 		data->res.server = server;
5004 		data->res.fh = &data->fh;
5005 		data->res.fattr = &data->fattr;
5006 		nfs_fattr_init(data->res.fattr);
5007 	}
5008 	return data;
5009 out_free:
5010 	kfree(data);
5011 	return NULL;
5012 }
5013 
5014 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
5015 {
5016 	int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
5017 				    &data->arg.seq_args, &data->res.seq_res, 1);
5018 	if (status == 0) {
5019 		spin_lock(&dir->i_lock);
5020 		/* Creating a directory bumps nlink in the parent */
5021 		if (data->arg.ftype == NF4DIR)
5022 			nfs4_inc_nlink_locked(dir);
5023 		nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo,
5024 					      data->res.fattr->time_start,
5025 					      NFS_INO_INVALID_DATA);
5026 		spin_unlock(&dir->i_lock);
5027 		status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
5028 	}
5029 	return status;
5030 }
5031 
5032 static void nfs4_free_createdata(struct nfs4_createdata *data)
5033 {
5034 	nfs4_label_free(data->fattr.label);
5035 	kfree(data);
5036 }
5037 
5038 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
5039 		struct folio *folio, unsigned int len, struct iattr *sattr,
5040 		struct nfs4_label *label)
5041 {
5042 	struct page *page = &folio->page;
5043 	struct nfs4_createdata *data;
5044 	int status = -ENAMETOOLONG;
5045 
5046 	if (len > NFS4_MAXPATHLEN)
5047 		goto out;
5048 
5049 	status = -ENOMEM;
5050 	data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
5051 	if (data == NULL)
5052 		goto out;
5053 
5054 	data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
5055 	data->arg.u.symlink.pages = &page;
5056 	data->arg.u.symlink.len = len;
5057 	data->arg.label = label;
5058 
5059 	status = nfs4_do_create(dir, dentry, data);
5060 
5061 	nfs4_free_createdata(data);
5062 out:
5063 	return status;
5064 }
5065 
5066 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
5067 		struct folio *folio, unsigned int len, struct iattr *sattr)
5068 {
5069 	struct nfs4_exception exception = {
5070 		.interruptible = true,
5071 	};
5072 	struct nfs4_label l, *label;
5073 	int err;
5074 
5075 	label = nfs4_label_init_security(dir, dentry, sattr, &l);
5076 
5077 	do {
5078 		err = _nfs4_proc_symlink(dir, dentry, folio, len, sattr, label);
5079 		trace_nfs4_symlink(dir, &dentry->d_name, err);
5080 		err = nfs4_handle_exception(NFS_SERVER(dir), err,
5081 				&exception);
5082 	} while (exception.retry);
5083 
5084 	nfs4_label_release_security(label);
5085 	return err;
5086 }
5087 
5088 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
5089 		struct iattr *sattr, struct nfs4_label *label)
5090 {
5091 	struct nfs4_createdata *data;
5092 	int status = -ENOMEM;
5093 
5094 	data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
5095 	if (data == NULL)
5096 		goto out;
5097 
5098 	data->arg.label = label;
5099 	status = nfs4_do_create(dir, dentry, data);
5100 
5101 	nfs4_free_createdata(data);
5102 out:
5103 	return status;
5104 }
5105 
5106 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
5107 		struct iattr *sattr)
5108 {
5109 	struct nfs_server *server = NFS_SERVER(dir);
5110 	struct nfs4_exception exception = {
5111 		.interruptible = true,
5112 	};
5113 	struct nfs4_label l, *label;
5114 	int err;
5115 
5116 	label = nfs4_label_init_security(dir, dentry, sattr, &l);
5117 
5118 	if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
5119 		sattr->ia_mode &= ~current_umask();
5120 	do {
5121 		err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
5122 		trace_nfs4_mkdir(dir, &dentry->d_name, err);
5123 		err = nfs4_handle_exception(NFS_SERVER(dir), err,
5124 				&exception);
5125 	} while (exception.retry);
5126 	nfs4_label_release_security(label);
5127 
5128 	return err;
5129 }
5130 
5131 static int _nfs4_proc_readdir(struct nfs_readdir_arg *nr_arg,
5132 			      struct nfs_readdir_res *nr_res)
5133 {
5134 	struct inode		*dir = d_inode(nr_arg->dentry);
5135 	struct nfs_server	*server = NFS_SERVER(dir);
5136 	struct nfs4_readdir_arg args = {
5137 		.fh = NFS_FH(dir),
5138 		.pages = nr_arg->pages,
5139 		.pgbase = 0,
5140 		.count = nr_arg->page_len,
5141 		.plus = nr_arg->plus,
5142 	};
5143 	struct nfs4_readdir_res res;
5144 	struct rpc_message msg = {
5145 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
5146 		.rpc_argp = &args,
5147 		.rpc_resp = &res,
5148 		.rpc_cred = nr_arg->cred,
5149 	};
5150 	int			status;
5151 
5152 	dprintk("%s: dentry = %pd2, cookie = %llu\n", __func__,
5153 		nr_arg->dentry, (unsigned long long)nr_arg->cookie);
5154 	if (!(server->caps & NFS_CAP_SECURITY_LABEL))
5155 		args.bitmask = server->attr_bitmask_nl;
5156 	else
5157 		args.bitmask = server->attr_bitmask;
5158 
5159 	nfs4_setup_readdir(nr_arg->cookie, nr_arg->verf, nr_arg->dentry, &args);
5160 	res.pgbase = args.pgbase;
5161 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
5162 			&res.seq_res, 0);
5163 	if (status >= 0) {
5164 		memcpy(nr_res->verf, res.verifier.data, NFS4_VERIFIER_SIZE);
5165 		status += args.pgbase;
5166 	}
5167 
5168 	nfs_invalidate_atime(dir);
5169 
5170 	dprintk("%s: returns %d\n", __func__, status);
5171 	return status;
5172 }
5173 
5174 static int nfs4_proc_readdir(struct nfs_readdir_arg *arg,
5175 			     struct nfs_readdir_res *res)
5176 {
5177 	struct nfs4_exception exception = {
5178 		.interruptible = true,
5179 	};
5180 	int err;
5181 	do {
5182 		err = _nfs4_proc_readdir(arg, res);
5183 		trace_nfs4_readdir(d_inode(arg->dentry), err);
5184 		err = nfs4_handle_exception(NFS_SERVER(d_inode(arg->dentry)),
5185 					    err, &exception);
5186 	} while (exception.retry);
5187 	return err;
5188 }
5189 
5190 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
5191 		struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
5192 {
5193 	struct nfs4_createdata *data;
5194 	int mode = sattr->ia_mode;
5195 	int status = -ENOMEM;
5196 
5197 	data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
5198 	if (data == NULL)
5199 		goto out;
5200 
5201 	if (S_ISFIFO(mode))
5202 		data->arg.ftype = NF4FIFO;
5203 	else if (S_ISBLK(mode)) {
5204 		data->arg.ftype = NF4BLK;
5205 		data->arg.u.device.specdata1 = MAJOR(rdev);
5206 		data->arg.u.device.specdata2 = MINOR(rdev);
5207 	}
5208 	else if (S_ISCHR(mode)) {
5209 		data->arg.ftype = NF4CHR;
5210 		data->arg.u.device.specdata1 = MAJOR(rdev);
5211 		data->arg.u.device.specdata2 = MINOR(rdev);
5212 	} else if (!S_ISSOCK(mode)) {
5213 		status = -EINVAL;
5214 		goto out_free;
5215 	}
5216 
5217 	data->arg.label = label;
5218 	status = nfs4_do_create(dir, dentry, data);
5219 out_free:
5220 	nfs4_free_createdata(data);
5221 out:
5222 	return status;
5223 }
5224 
5225 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
5226 		struct iattr *sattr, dev_t rdev)
5227 {
5228 	struct nfs_server *server = NFS_SERVER(dir);
5229 	struct nfs4_exception exception = {
5230 		.interruptible = true,
5231 	};
5232 	struct nfs4_label l, *label;
5233 	int err;
5234 
5235 	label = nfs4_label_init_security(dir, dentry, sattr, &l);
5236 
5237 	if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
5238 		sattr->ia_mode &= ~current_umask();
5239 	do {
5240 		err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
5241 		trace_nfs4_mknod(dir, &dentry->d_name, err);
5242 		err = nfs4_handle_exception(NFS_SERVER(dir), err,
5243 				&exception);
5244 	} while (exception.retry);
5245 
5246 	nfs4_label_release_security(label);
5247 
5248 	return err;
5249 }
5250 
5251 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
5252 		 struct nfs_fsstat *fsstat)
5253 {
5254 	struct nfs4_statfs_arg args = {
5255 		.fh = fhandle,
5256 		.bitmask = server->attr_bitmask,
5257 	};
5258 	struct nfs4_statfs_res res = {
5259 		.fsstat = fsstat,
5260 	};
5261 	struct rpc_message msg = {
5262 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
5263 		.rpc_argp = &args,
5264 		.rpc_resp = &res,
5265 	};
5266 
5267 	nfs_fattr_init(fsstat->fattr);
5268 	return  nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5269 }
5270 
5271 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
5272 {
5273 	struct nfs4_exception exception = {
5274 		.interruptible = true,
5275 	};
5276 	int err;
5277 	do {
5278 		err = nfs4_handle_exception(server,
5279 				_nfs4_proc_statfs(server, fhandle, fsstat),
5280 				&exception);
5281 	} while (exception.retry);
5282 	return err;
5283 }
5284 
5285 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
5286 		struct nfs_fsinfo *fsinfo)
5287 {
5288 	struct nfs4_fsinfo_arg args = {
5289 		.fh = fhandle,
5290 		.bitmask = server->attr_bitmask,
5291 	};
5292 	struct nfs4_fsinfo_res res = {
5293 		.fsinfo = fsinfo,
5294 	};
5295 	struct rpc_message msg = {
5296 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
5297 		.rpc_argp = &args,
5298 		.rpc_resp = &res,
5299 	};
5300 
5301 	return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5302 }
5303 
5304 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
5305 {
5306 	struct nfs4_exception exception = {
5307 		.interruptible = true,
5308 	};
5309 	int err;
5310 
5311 	do {
5312 		err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
5313 		trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
5314 		if (err == 0) {
5315 			nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ);
5316 			break;
5317 		}
5318 		err = nfs4_handle_exception(server, err, &exception);
5319 	} while (exception.retry);
5320 	return err;
5321 }
5322 
5323 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
5324 {
5325 	int error;
5326 
5327 	nfs_fattr_init(fsinfo->fattr);
5328 	error = nfs4_do_fsinfo(server, fhandle, fsinfo);
5329 	if (error == 0) {
5330 		/* block layout checks this! */
5331 		server->pnfs_blksize = fsinfo->blksize;
5332 		set_pnfs_layoutdriver(server, fhandle, fsinfo);
5333 	}
5334 
5335 	return error;
5336 }
5337 
5338 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
5339 		struct nfs_pathconf *pathconf)
5340 {
5341 	struct nfs4_pathconf_arg args = {
5342 		.fh = fhandle,
5343 		.bitmask = server->attr_bitmask,
5344 	};
5345 	struct nfs4_pathconf_res res = {
5346 		.pathconf = pathconf,
5347 	};
5348 	struct rpc_message msg = {
5349 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
5350 		.rpc_argp = &args,
5351 		.rpc_resp = &res,
5352 	};
5353 
5354 	/* None of the pathconf attributes are mandatory to implement */
5355 	if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
5356 		memset(pathconf, 0, sizeof(*pathconf));
5357 		return 0;
5358 	}
5359 
5360 	nfs_fattr_init(pathconf->fattr);
5361 	return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5362 }
5363 
5364 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
5365 		struct nfs_pathconf *pathconf)
5366 {
5367 	struct nfs4_exception exception = {
5368 		.interruptible = true,
5369 	};
5370 	int err;
5371 
5372 	do {
5373 		err = nfs4_handle_exception(server,
5374 				_nfs4_proc_pathconf(server, fhandle, pathconf),
5375 				&exception);
5376 	} while (exception.retry);
5377 	return err;
5378 }
5379 
5380 int nfs4_set_rw_stateid(nfs4_stateid *stateid,
5381 		const struct nfs_open_context *ctx,
5382 		const struct nfs_lock_context *l_ctx,
5383 		fmode_t fmode)
5384 {
5385 	return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL);
5386 }
5387 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
5388 
5389 static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
5390 		const struct nfs_open_context *ctx,
5391 		const struct nfs_lock_context *l_ctx,
5392 		fmode_t fmode)
5393 {
5394 	nfs4_stateid _current_stateid;
5395 
5396 	/* If the current stateid represents a lost lock, then exit */
5397 	if (nfs4_set_rw_stateid(&_current_stateid, ctx, l_ctx, fmode) == -EIO)
5398 		return true;
5399 	return nfs4_stateid_match(stateid, &_current_stateid);
5400 }
5401 
5402 static bool nfs4_error_stateid_expired(int err)
5403 {
5404 	switch (err) {
5405 	case -NFS4ERR_DELEG_REVOKED:
5406 	case -NFS4ERR_ADMIN_REVOKED:
5407 	case -NFS4ERR_BAD_STATEID:
5408 	case -NFS4ERR_STALE_STATEID:
5409 	case -NFS4ERR_OLD_STATEID:
5410 	case -NFS4ERR_OPENMODE:
5411 	case -NFS4ERR_EXPIRED:
5412 		return true;
5413 	}
5414 	return false;
5415 }
5416 
5417 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
5418 {
5419 	struct nfs_server *server = NFS_SERVER(hdr->inode);
5420 
5421 	trace_nfs4_read(hdr, task->tk_status);
5422 	if (task->tk_status < 0) {
5423 		struct nfs4_exception exception = {
5424 			.inode = hdr->inode,
5425 			.state = hdr->args.context->state,
5426 			.stateid = &hdr->args.stateid,
5427 		};
5428 		task->tk_status = nfs4_async_handle_exception(task,
5429 				server, task->tk_status, &exception);
5430 		if (exception.retry) {
5431 			rpc_restart_call_prepare(task);
5432 			return -EAGAIN;
5433 		}
5434 	}
5435 
5436 	if (task->tk_status > 0)
5437 		renew_lease(server, hdr->timestamp);
5438 	return 0;
5439 }
5440 
5441 static bool nfs4_read_stateid_changed(struct rpc_task *task,
5442 		struct nfs_pgio_args *args)
5443 {
5444 
5445 	if (!nfs4_error_stateid_expired(task->tk_status) ||
5446 		nfs4_stateid_is_current(&args->stateid,
5447 				args->context,
5448 				args->lock_context,
5449 				FMODE_READ))
5450 		return false;
5451 	rpc_restart_call_prepare(task);
5452 	return true;
5453 }
5454 
5455 static bool nfs4_read_plus_not_supported(struct rpc_task *task,
5456 					 struct nfs_pgio_header *hdr)
5457 {
5458 	struct nfs_server *server = NFS_SERVER(hdr->inode);
5459 	struct rpc_message *msg = &task->tk_msg;
5460 
5461 	if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] &&
5462 	    server->caps & NFS_CAP_READ_PLUS && task->tk_status == -ENOTSUPP) {
5463 		server->caps &= ~NFS_CAP_READ_PLUS;
5464 		msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
5465 		rpc_restart_call_prepare(task);
5466 		return true;
5467 	}
5468 	return false;
5469 }
5470 
5471 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
5472 {
5473 	if (!nfs4_sequence_done(task, &hdr->res.seq_res))
5474 		return -EAGAIN;
5475 	if (nfs4_read_stateid_changed(task, &hdr->args))
5476 		return -EAGAIN;
5477 	if (nfs4_read_plus_not_supported(task, hdr))
5478 		return -EAGAIN;
5479 	if (task->tk_status > 0)
5480 		nfs_invalidate_atime(hdr->inode);
5481 	return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
5482 				    nfs4_read_done_cb(task, hdr);
5483 }
5484 
5485 #if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS
5486 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr,
5487 				    struct rpc_message *msg)
5488 {
5489 	/* Note: We don't use READ_PLUS with pNFS yet */
5490 	if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp) {
5491 		msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS];
5492 		return nfs_read_alloc_scratch(hdr, READ_PLUS_SCRATCH_SIZE);
5493 	}
5494 	return false;
5495 }
5496 #else
5497 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr,
5498 				    struct rpc_message *msg)
5499 {
5500 	return false;
5501 }
5502 #endif /* CONFIG_NFS_V4_2 */
5503 
5504 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
5505 				 struct rpc_message *msg)
5506 {
5507 	hdr->timestamp   = jiffies;
5508 	if (!hdr->pgio_done_cb)
5509 		hdr->pgio_done_cb = nfs4_read_done_cb;
5510 	if (!nfs42_read_plus_support(hdr, msg))
5511 		msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
5512 	nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
5513 }
5514 
5515 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
5516 				      struct nfs_pgio_header *hdr)
5517 {
5518 	if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client,
5519 			&hdr->args.seq_args,
5520 			&hdr->res.seq_res,
5521 			task))
5522 		return 0;
5523 	if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
5524 				hdr->args.lock_context,
5525 				hdr->rw_mode) == -EIO)
5526 		return -EIO;
5527 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
5528 		return -EIO;
5529 	return 0;
5530 }
5531 
5532 static int nfs4_write_done_cb(struct rpc_task *task,
5533 			      struct nfs_pgio_header *hdr)
5534 {
5535 	struct inode *inode = hdr->inode;
5536 
5537 	trace_nfs4_write(hdr, task->tk_status);
5538 	if (task->tk_status < 0) {
5539 		struct nfs4_exception exception = {
5540 			.inode = hdr->inode,
5541 			.state = hdr->args.context->state,
5542 			.stateid = &hdr->args.stateid,
5543 		};
5544 		task->tk_status = nfs4_async_handle_exception(task,
5545 				NFS_SERVER(inode), task->tk_status,
5546 				&exception);
5547 		if (exception.retry) {
5548 			rpc_restart_call_prepare(task);
5549 			return -EAGAIN;
5550 		}
5551 	}
5552 	if (task->tk_status >= 0) {
5553 		renew_lease(NFS_SERVER(inode), hdr->timestamp);
5554 		nfs_writeback_update_inode(hdr);
5555 	}
5556 	return 0;
5557 }
5558 
5559 static bool nfs4_write_stateid_changed(struct rpc_task *task,
5560 		struct nfs_pgio_args *args)
5561 {
5562 
5563 	if (!nfs4_error_stateid_expired(task->tk_status) ||
5564 		nfs4_stateid_is_current(&args->stateid,
5565 				args->context,
5566 				args->lock_context,
5567 				FMODE_WRITE))
5568 		return false;
5569 	rpc_restart_call_prepare(task);
5570 	return true;
5571 }
5572 
5573 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
5574 {
5575 	if (!nfs4_sequence_done(task, &hdr->res.seq_res))
5576 		return -EAGAIN;
5577 	if (nfs4_write_stateid_changed(task, &hdr->args))
5578 		return -EAGAIN;
5579 	return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
5580 		nfs4_write_done_cb(task, hdr);
5581 }
5582 
5583 static
5584 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
5585 {
5586 	/* Don't request attributes for pNFS or O_DIRECT writes */
5587 	if (hdr->ds_clp != NULL || hdr->dreq != NULL)
5588 		return false;
5589 	/* Otherwise, request attributes if and only if we don't hold
5590 	 * a delegation
5591 	 */
5592 	return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
5593 }
5594 
5595 void nfs4_bitmask_set(__u32 bitmask[], const __u32 src[],
5596 		      struct inode *inode, unsigned long cache_validity)
5597 {
5598 	struct nfs_server *server = NFS_SERVER(inode);
5599 	unsigned int i;
5600 
5601 	memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ);
5602 	cache_validity |= READ_ONCE(NFS_I(inode)->cache_validity);
5603 
5604 	if (cache_validity & NFS_INO_INVALID_CHANGE)
5605 		bitmask[0] |= FATTR4_WORD0_CHANGE;
5606 	if (cache_validity & NFS_INO_INVALID_ATIME)
5607 		bitmask[1] |= FATTR4_WORD1_TIME_ACCESS;
5608 	if (cache_validity & NFS_INO_INVALID_MODE)
5609 		bitmask[1] |= FATTR4_WORD1_MODE;
5610 	if (cache_validity & NFS_INO_INVALID_OTHER)
5611 		bitmask[1] |= FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP;
5612 	if (cache_validity & NFS_INO_INVALID_NLINK)
5613 		bitmask[1] |= FATTR4_WORD1_NUMLINKS;
5614 	if (cache_validity & NFS_INO_INVALID_CTIME)
5615 		bitmask[1] |= FATTR4_WORD1_TIME_METADATA;
5616 	if (cache_validity & NFS_INO_INVALID_MTIME)
5617 		bitmask[1] |= FATTR4_WORD1_TIME_MODIFY;
5618 	if (cache_validity & NFS_INO_INVALID_BLOCKS)
5619 		bitmask[1] |= FATTR4_WORD1_SPACE_USED;
5620 
5621 	if (cache_validity & NFS_INO_INVALID_SIZE)
5622 		bitmask[0] |= FATTR4_WORD0_SIZE;
5623 
5624 	for (i = 0; i < NFS4_BITMASK_SZ; i++)
5625 		bitmask[i] &= server->attr_bitmask[i];
5626 }
5627 
5628 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
5629 				  struct rpc_message *msg,
5630 				  struct rpc_clnt **clnt)
5631 {
5632 	struct nfs_server *server = NFS_SERVER(hdr->inode);
5633 
5634 	if (!nfs4_write_need_cache_consistency_data(hdr)) {
5635 		hdr->args.bitmask = NULL;
5636 		hdr->res.fattr = NULL;
5637 	} else {
5638 		nfs4_bitmask_set(hdr->args.bitmask_store,
5639 				 server->cache_consistency_bitmask,
5640 				 hdr->inode, NFS_INO_INVALID_BLOCKS);
5641 		hdr->args.bitmask = hdr->args.bitmask_store;
5642 	}
5643 
5644 	if (!hdr->pgio_done_cb)
5645 		hdr->pgio_done_cb = nfs4_write_done_cb;
5646 	hdr->res.server = server;
5647 	hdr->timestamp   = jiffies;
5648 
5649 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
5650 	nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
5651 	nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr);
5652 }
5653 
5654 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
5655 {
5656 	nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client,
5657 			&data->args.seq_args,
5658 			&data->res.seq_res,
5659 			task);
5660 }
5661 
5662 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
5663 {
5664 	struct inode *inode = data->inode;
5665 
5666 	trace_nfs4_commit(data, task->tk_status);
5667 	if (nfs4_async_handle_error(task, NFS_SERVER(inode),
5668 				    NULL, NULL) == -EAGAIN) {
5669 		rpc_restart_call_prepare(task);
5670 		return -EAGAIN;
5671 	}
5672 	return 0;
5673 }
5674 
5675 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
5676 {
5677 	if (!nfs4_sequence_done(task, &data->res.seq_res))
5678 		return -EAGAIN;
5679 	return data->commit_done_cb(task, data);
5680 }
5681 
5682 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg,
5683 				   struct rpc_clnt **clnt)
5684 {
5685 	struct nfs_server *server = NFS_SERVER(data->inode);
5686 
5687 	if (data->commit_done_cb == NULL)
5688 		data->commit_done_cb = nfs4_commit_done_cb;
5689 	data->res.server = server;
5690 	msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
5691 	nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
5692 	nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client,
5693 			NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
5694 }
5695 
5696 static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args,
5697 				struct nfs_commitres *res)
5698 {
5699 	struct inode *dst_inode = file_inode(dst);
5700 	struct nfs_server *server = NFS_SERVER(dst_inode);
5701 	struct rpc_message msg = {
5702 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT],
5703 		.rpc_argp = args,
5704 		.rpc_resp = res,
5705 	};
5706 
5707 	args->fh = NFS_FH(dst_inode);
5708 	return nfs4_call_sync(server->client, server, &msg,
5709 			&args->seq_args, &res->seq_res, 1);
5710 }
5711 
5712 int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res)
5713 {
5714 	struct nfs_commitargs args = {
5715 		.offset = offset,
5716 		.count = count,
5717 	};
5718 	struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
5719 	struct nfs4_exception exception = { };
5720 	int status;
5721 
5722 	do {
5723 		status = _nfs4_proc_commit(dst, &args, res);
5724 		status = nfs4_handle_exception(dst_server, status, &exception);
5725 	} while (exception.retry);
5726 
5727 	return status;
5728 }
5729 
5730 struct nfs4_renewdata {
5731 	struct nfs_client	*client;
5732 	unsigned long		timestamp;
5733 };
5734 
5735 /*
5736  * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
5737  * standalone procedure for queueing an asynchronous RENEW.
5738  */
5739 static void nfs4_renew_release(void *calldata)
5740 {
5741 	struct nfs4_renewdata *data = calldata;
5742 	struct nfs_client *clp = data->client;
5743 
5744 	if (refcount_read(&clp->cl_count) > 1)
5745 		nfs4_schedule_state_renewal(clp);
5746 	nfs_put_client(clp);
5747 	kfree(data);
5748 }
5749 
5750 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
5751 {
5752 	struct nfs4_renewdata *data = calldata;
5753 	struct nfs_client *clp = data->client;
5754 	unsigned long timestamp = data->timestamp;
5755 
5756 	trace_nfs4_renew_async(clp, task->tk_status);
5757 	switch (task->tk_status) {
5758 	case 0:
5759 		break;
5760 	case -NFS4ERR_LEASE_MOVED:
5761 		nfs4_schedule_lease_moved_recovery(clp);
5762 		break;
5763 	default:
5764 		/* Unless we're shutting down, schedule state recovery! */
5765 		if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
5766 			return;
5767 		if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
5768 			nfs4_schedule_lease_recovery(clp);
5769 			return;
5770 		}
5771 		nfs4_schedule_path_down_recovery(clp);
5772 	}
5773 	do_renew_lease(clp, timestamp);
5774 }
5775 
5776 static const struct rpc_call_ops nfs4_renew_ops = {
5777 	.rpc_call_done = nfs4_renew_done,
5778 	.rpc_release = nfs4_renew_release,
5779 };
5780 
5781 static int nfs4_proc_async_renew(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
5782 {
5783 	struct rpc_message msg = {
5784 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_RENEW],
5785 		.rpc_argp	= clp,
5786 		.rpc_cred	= cred,
5787 	};
5788 	struct nfs4_renewdata *data;
5789 
5790 	if (renew_flags == 0)
5791 		return 0;
5792 	if (!refcount_inc_not_zero(&clp->cl_count))
5793 		return -EIO;
5794 	data = kmalloc(sizeof(*data), GFP_NOFS);
5795 	if (data == NULL) {
5796 		nfs_put_client(clp);
5797 		return -ENOMEM;
5798 	}
5799 	data->client = clp;
5800 	data->timestamp = jiffies;
5801 	return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
5802 			&nfs4_renew_ops, data);
5803 }
5804 
5805 static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred)
5806 {
5807 	struct rpc_message msg = {
5808 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_RENEW],
5809 		.rpc_argp	= clp,
5810 		.rpc_cred	= cred,
5811 	};
5812 	unsigned long now = jiffies;
5813 	int status;
5814 
5815 	status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5816 	if (status < 0)
5817 		return status;
5818 	do_renew_lease(clp, now);
5819 	return 0;
5820 }
5821 
5822 static bool nfs4_server_supports_acls(const struct nfs_server *server,
5823 				      enum nfs4_acl_type type)
5824 {
5825 	switch (type) {
5826 	default:
5827 		return server->attr_bitmask[0] & FATTR4_WORD0_ACL;
5828 	case NFS4ACL_DACL:
5829 		return server->attr_bitmask[1] & FATTR4_WORD1_DACL;
5830 	case NFS4ACL_SACL:
5831 		return server->attr_bitmask[1] & FATTR4_WORD1_SACL;
5832 	}
5833 }
5834 
5835 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
5836  * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
5837  * the stack.
5838  */
5839 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
5840 
5841 int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen,
5842 		struct page **pages)
5843 {
5844 	struct page *newpage, **spages;
5845 	int rc = 0;
5846 	size_t len;
5847 	spages = pages;
5848 
5849 	do {
5850 		len = min_t(size_t, PAGE_SIZE, buflen);
5851 		newpage = alloc_page(GFP_KERNEL);
5852 
5853 		if (newpage == NULL)
5854 			goto unwind;
5855 		memcpy(page_address(newpage), buf, len);
5856 		buf += len;
5857 		buflen -= len;
5858 		*pages++ = newpage;
5859 		rc++;
5860 	} while (buflen != 0);
5861 
5862 	return rc;
5863 
5864 unwind:
5865 	for(; rc > 0; rc--)
5866 		__free_page(spages[rc-1]);
5867 	return -ENOMEM;
5868 }
5869 
5870 struct nfs4_cached_acl {
5871 	enum nfs4_acl_type type;
5872 	int cached;
5873 	size_t len;
5874 	char data[];
5875 };
5876 
5877 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
5878 {
5879 	struct nfs_inode *nfsi = NFS_I(inode);
5880 
5881 	spin_lock(&inode->i_lock);
5882 	kfree(nfsi->nfs4_acl);
5883 	nfsi->nfs4_acl = acl;
5884 	spin_unlock(&inode->i_lock);
5885 }
5886 
5887 static void nfs4_zap_acl_attr(struct inode *inode)
5888 {
5889 	nfs4_set_cached_acl(inode, NULL);
5890 }
5891 
5892 static ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf,
5893 				    size_t buflen, enum nfs4_acl_type type)
5894 {
5895 	struct nfs_inode *nfsi = NFS_I(inode);
5896 	struct nfs4_cached_acl *acl;
5897 	int ret = -ENOENT;
5898 
5899 	spin_lock(&inode->i_lock);
5900 	acl = nfsi->nfs4_acl;
5901 	if (acl == NULL)
5902 		goto out;
5903 	if (acl->type != type)
5904 		goto out;
5905 	if (buf == NULL) /* user is just asking for length */
5906 		goto out_len;
5907 	if (acl->cached == 0)
5908 		goto out;
5909 	ret = -ERANGE; /* see getxattr(2) man page */
5910 	if (acl->len > buflen)
5911 		goto out;
5912 	memcpy(buf, acl->data, acl->len);
5913 out_len:
5914 	ret = acl->len;
5915 out:
5916 	spin_unlock(&inode->i_lock);
5917 	return ret;
5918 }
5919 
5920 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages,
5921 				  size_t pgbase, size_t acl_len,
5922 				  enum nfs4_acl_type type)
5923 {
5924 	struct nfs4_cached_acl *acl;
5925 	size_t buflen = sizeof(*acl) + acl_len;
5926 
5927 	if (buflen <= PAGE_SIZE) {
5928 		acl = kmalloc(buflen, GFP_KERNEL);
5929 		if (acl == NULL)
5930 			goto out;
5931 		acl->cached = 1;
5932 		_copy_from_pages(acl->data, pages, pgbase, acl_len);
5933 	} else {
5934 		acl = kmalloc(sizeof(*acl), GFP_KERNEL);
5935 		if (acl == NULL)
5936 			goto out;
5937 		acl->cached = 0;
5938 	}
5939 	acl->type = type;
5940 	acl->len = acl_len;
5941 out:
5942 	nfs4_set_cached_acl(inode, acl);
5943 }
5944 
5945 /*
5946  * The getxattr API returns the required buffer length when called with a
5947  * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
5948  * the required buf.  On a NULL buf, we send a page of data to the server
5949  * guessing that the ACL request can be serviced by a page. If so, we cache
5950  * up to the page of ACL data, and the 2nd call to getxattr is serviced by
5951  * the cache. If not so, we throw away the page, and cache the required
5952  * length. The next getxattr call will then produce another round trip to
5953  * the server, this time with the input buf of the required size.
5954  */
5955 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf,
5956 				       size_t buflen, enum nfs4_acl_type type)
5957 {
5958 	struct page **pages;
5959 	struct nfs_getaclargs args = {
5960 		.fh = NFS_FH(inode),
5961 		.acl_type = type,
5962 		.acl_len = buflen,
5963 	};
5964 	struct nfs_getaclres res = {
5965 		.acl_type = type,
5966 		.acl_len = buflen,
5967 	};
5968 	struct rpc_message msg = {
5969 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
5970 		.rpc_argp = &args,
5971 		.rpc_resp = &res,
5972 	};
5973 	unsigned int npages;
5974 	int ret = -ENOMEM, i;
5975 	struct nfs_server *server = NFS_SERVER(inode);
5976 
5977 	if (buflen == 0)
5978 		buflen = server->rsize;
5979 
5980 	npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
5981 	pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
5982 	if (!pages)
5983 		return -ENOMEM;
5984 
5985 	args.acl_pages = pages;
5986 
5987 	for (i = 0; i < npages; i++) {
5988 		pages[i] = alloc_page(GFP_KERNEL);
5989 		if (!pages[i])
5990 			goto out_free;
5991 	}
5992 
5993 	/* for decoding across pages */
5994 	res.acl_scratch = alloc_page(GFP_KERNEL);
5995 	if (!res.acl_scratch)
5996 		goto out_free;
5997 
5998 	args.acl_len = npages * PAGE_SIZE;
5999 
6000 	dprintk("%s  buf %p buflen %zu npages %d args.acl_len %zu\n",
6001 		__func__, buf, buflen, npages, args.acl_len);
6002 	ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
6003 			     &msg, &args.seq_args, &res.seq_res, 0);
6004 	if (ret)
6005 		goto out_free;
6006 
6007 	/* Handle the case where the passed-in buffer is too short */
6008 	if (res.acl_flags & NFS4_ACL_TRUNC) {
6009 		/* Did the user only issue a request for the acl length? */
6010 		if (buf == NULL)
6011 			goto out_ok;
6012 		ret = -ERANGE;
6013 		goto out_free;
6014 	}
6015 	nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len,
6016 			      type);
6017 	if (buf) {
6018 		if (res.acl_len > buflen) {
6019 			ret = -ERANGE;
6020 			goto out_free;
6021 		}
6022 		_copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
6023 	}
6024 out_ok:
6025 	ret = res.acl_len;
6026 out_free:
6027 	while (--i >= 0)
6028 		__free_page(pages[i]);
6029 	if (res.acl_scratch)
6030 		__free_page(res.acl_scratch);
6031 	kfree(pages);
6032 	return ret;
6033 }
6034 
6035 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf,
6036 				     size_t buflen, enum nfs4_acl_type type)
6037 {
6038 	struct nfs4_exception exception = {
6039 		.interruptible = true,
6040 	};
6041 	ssize_t ret;
6042 	do {
6043 		ret = __nfs4_get_acl_uncached(inode, buf, buflen, type);
6044 		trace_nfs4_get_acl(inode, ret);
6045 		if (ret >= 0)
6046 			break;
6047 		ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
6048 	} while (exception.retry);
6049 	return ret;
6050 }
6051 
6052 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen,
6053 				 enum nfs4_acl_type type)
6054 {
6055 	struct nfs_server *server = NFS_SERVER(inode);
6056 	int ret;
6057 
6058 	if (!nfs4_server_supports_acls(server, type))
6059 		return -EOPNOTSUPP;
6060 	ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
6061 	if (ret < 0)
6062 		return ret;
6063 	if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
6064 		nfs_zap_acl_cache(inode);
6065 	ret = nfs4_read_cached_acl(inode, buf, buflen, type);
6066 	if (ret != -ENOENT)
6067 		/* -ENOENT is returned if there is no ACL or if there is an ACL
6068 		 * but no cached acl data, just the acl length */
6069 		return ret;
6070 	return nfs4_get_acl_uncached(inode, buf, buflen, type);
6071 }
6072 
6073 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf,
6074 			       size_t buflen, enum nfs4_acl_type type)
6075 {
6076 	struct nfs_server *server = NFS_SERVER(inode);
6077 	struct page *pages[NFS4ACL_MAXPAGES];
6078 	struct nfs_setaclargs arg = {
6079 		.fh = NFS_FH(inode),
6080 		.acl_type = type,
6081 		.acl_len = buflen,
6082 		.acl_pages = pages,
6083 	};
6084 	struct nfs_setaclres res;
6085 	struct rpc_message msg = {
6086 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_SETACL],
6087 		.rpc_argp	= &arg,
6088 		.rpc_resp	= &res,
6089 	};
6090 	unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
6091 	int ret, i;
6092 
6093 	/* You can't remove system.nfs4_acl: */
6094 	if (buflen == 0)
6095 		return -EINVAL;
6096 	if (!nfs4_server_supports_acls(server, type))
6097 		return -EOPNOTSUPP;
6098 	if (npages > ARRAY_SIZE(pages))
6099 		return -ERANGE;
6100 	i = nfs4_buf_to_pages_noslab(buf, buflen, arg.acl_pages);
6101 	if (i < 0)
6102 		return i;
6103 	nfs4_inode_make_writeable(inode);
6104 	ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
6105 
6106 	/*
6107 	 * Free each page after tx, so the only ref left is
6108 	 * held by the network stack
6109 	 */
6110 	for (; i > 0; i--)
6111 		put_page(pages[i-1]);
6112 
6113 	/*
6114 	 * Acl update can result in inode attribute update.
6115 	 * so mark the attribute cache invalid.
6116 	 */
6117 	spin_lock(&inode->i_lock);
6118 	nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
6119 					     NFS_INO_INVALID_CTIME |
6120 					     NFS_INO_REVAL_FORCED);
6121 	spin_unlock(&inode->i_lock);
6122 	nfs_access_zap_cache(inode);
6123 	nfs_zap_acl_cache(inode);
6124 	return ret;
6125 }
6126 
6127 static int nfs4_proc_set_acl(struct inode *inode, const void *buf,
6128 			     size_t buflen, enum nfs4_acl_type type)
6129 {
6130 	struct nfs4_exception exception = { };
6131 	int err;
6132 	do {
6133 		err = __nfs4_proc_set_acl(inode, buf, buflen, type);
6134 		trace_nfs4_set_acl(inode, err);
6135 		if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) {
6136 			/*
6137 			 * no need to retry since the kernel
6138 			 * isn't involved in encoding the ACEs.
6139 			 */
6140 			err = -EINVAL;
6141 			break;
6142 		}
6143 		err = nfs4_handle_exception(NFS_SERVER(inode), err,
6144 				&exception);
6145 	} while (exception.retry);
6146 	return err;
6147 }
6148 
6149 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
6150 static int _nfs4_get_security_label(struct inode *inode, void *buf,
6151 					size_t buflen)
6152 {
6153 	struct nfs_server *server = NFS_SERVER(inode);
6154 	struct nfs4_label label = {0, 0, buflen, buf};
6155 
6156 	u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
6157 	struct nfs_fattr fattr = {
6158 		.label = &label,
6159 	};
6160 	struct nfs4_getattr_arg arg = {
6161 		.fh		= NFS_FH(inode),
6162 		.bitmask	= bitmask,
6163 	};
6164 	struct nfs4_getattr_res res = {
6165 		.fattr		= &fattr,
6166 		.server		= server,
6167 	};
6168 	struct rpc_message msg = {
6169 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
6170 		.rpc_argp	= &arg,
6171 		.rpc_resp	= &res,
6172 	};
6173 	int ret;
6174 
6175 	nfs_fattr_init(&fattr);
6176 
6177 	ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
6178 	if (ret)
6179 		return ret;
6180 	if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
6181 		return -ENOENT;
6182 	return label.len;
6183 }
6184 
6185 static int nfs4_get_security_label(struct inode *inode, void *buf,
6186 					size_t buflen)
6187 {
6188 	struct nfs4_exception exception = {
6189 		.interruptible = true,
6190 	};
6191 	int err;
6192 
6193 	if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
6194 		return -EOPNOTSUPP;
6195 
6196 	do {
6197 		err = _nfs4_get_security_label(inode, buf, buflen);
6198 		trace_nfs4_get_security_label(inode, err);
6199 		err = nfs4_handle_exception(NFS_SERVER(inode), err,
6200 				&exception);
6201 	} while (exception.retry);
6202 	return err;
6203 }
6204 
6205 static int _nfs4_do_set_security_label(struct inode *inode,
6206 		struct nfs4_label *ilabel,
6207 		struct nfs_fattr *fattr)
6208 {
6209 
6210 	struct iattr sattr = {0};
6211 	struct nfs_server *server = NFS_SERVER(inode);
6212 	const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
6213 	struct nfs_setattrargs arg = {
6214 		.fh		= NFS_FH(inode),
6215 		.iap		= &sattr,
6216 		.server		= server,
6217 		.bitmask	= bitmask,
6218 		.label		= ilabel,
6219 	};
6220 	struct nfs_setattrres res = {
6221 		.fattr		= fattr,
6222 		.server		= server,
6223 	};
6224 	struct rpc_message msg = {
6225 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
6226 		.rpc_argp	= &arg,
6227 		.rpc_resp	= &res,
6228 	};
6229 	int status;
6230 
6231 	nfs4_stateid_copy(&arg.stateid, &zero_stateid);
6232 
6233 	status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
6234 	if (status)
6235 		dprintk("%s failed: %d\n", __func__, status);
6236 
6237 	return status;
6238 }
6239 
6240 static int nfs4_do_set_security_label(struct inode *inode,
6241 		struct nfs4_label *ilabel,
6242 		struct nfs_fattr *fattr)
6243 {
6244 	struct nfs4_exception exception = { };
6245 	int err;
6246 
6247 	do {
6248 		err = _nfs4_do_set_security_label(inode, ilabel, fattr);
6249 		trace_nfs4_set_security_label(inode, err);
6250 		err = nfs4_handle_exception(NFS_SERVER(inode), err,
6251 				&exception);
6252 	} while (exception.retry);
6253 	return err;
6254 }
6255 
6256 static int
6257 nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
6258 {
6259 	struct nfs4_label ilabel = {0, 0, buflen, (char *)buf };
6260 	struct nfs_fattr *fattr;
6261 	int status;
6262 
6263 	if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
6264 		return -EOPNOTSUPP;
6265 
6266 	fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode));
6267 	if (fattr == NULL)
6268 		return -ENOMEM;
6269 
6270 	status = nfs4_do_set_security_label(inode, &ilabel, fattr);
6271 	if (status == 0)
6272 		nfs_setsecurity(inode, fattr);
6273 
6274 	return status;
6275 }
6276 #endif	/* CONFIG_NFS_V4_SECURITY_LABEL */
6277 
6278 
6279 static void nfs4_init_boot_verifier(const struct nfs_client *clp,
6280 				    nfs4_verifier *bootverf)
6281 {
6282 	__be32 verf[2];
6283 
6284 	if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
6285 		/* An impossible timestamp guarantees this value
6286 		 * will never match a generated boot time. */
6287 		verf[0] = cpu_to_be32(U32_MAX);
6288 		verf[1] = cpu_to_be32(U32_MAX);
6289 	} else {
6290 		struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
6291 		u64 ns = ktime_to_ns(nn->boot_time);
6292 
6293 		verf[0] = cpu_to_be32(ns >> 32);
6294 		verf[1] = cpu_to_be32(ns);
6295 	}
6296 	memcpy(bootverf->data, verf, sizeof(bootverf->data));
6297 }
6298 
6299 static size_t
6300 nfs4_get_uniquifier(struct nfs_client *clp, char *buf, size_t buflen)
6301 {
6302 	struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
6303 	struct nfs_netns_client *nn_clp = nn->nfs_client;
6304 	const char *id;
6305 
6306 	buf[0] = '\0';
6307 
6308 	if (nn_clp) {
6309 		rcu_read_lock();
6310 		id = rcu_dereference(nn_clp->identifier);
6311 		if (id)
6312 			strscpy(buf, id, buflen);
6313 		rcu_read_unlock();
6314 	}
6315 
6316 	if (nfs4_client_id_uniquifier[0] != '\0' && buf[0] == '\0')
6317 		strscpy(buf, nfs4_client_id_uniquifier, buflen);
6318 
6319 	return strlen(buf);
6320 }
6321 
6322 static int
6323 nfs4_init_nonuniform_client_string(struct nfs_client *clp)
6324 {
6325 	char buf[NFS4_CLIENT_ID_UNIQ_LEN];
6326 	size_t buflen;
6327 	size_t len;
6328 	char *str;
6329 
6330 	if (clp->cl_owner_id != NULL)
6331 		return 0;
6332 
6333 	rcu_read_lock();
6334 	len = 14 +
6335 		strlen(clp->cl_rpcclient->cl_nodename) +
6336 		1 +
6337 		strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
6338 		1;
6339 	rcu_read_unlock();
6340 
6341 	buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf));
6342 	if (buflen)
6343 		len += buflen + 1;
6344 
6345 	if (len > NFS4_OPAQUE_LIMIT + 1)
6346 		return -EINVAL;
6347 
6348 	/*
6349 	 * Since this string is allocated at mount time, and held until the
6350 	 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
6351 	 * about a memory-reclaim deadlock.
6352 	 */
6353 	str = kmalloc(len, GFP_KERNEL);
6354 	if (!str)
6355 		return -ENOMEM;
6356 
6357 	rcu_read_lock();
6358 	if (buflen)
6359 		scnprintf(str, len, "Linux NFSv4.0 %s/%s/%s",
6360 			  clp->cl_rpcclient->cl_nodename, buf,
6361 			  rpc_peeraddr2str(clp->cl_rpcclient,
6362 					   RPC_DISPLAY_ADDR));
6363 	else
6364 		scnprintf(str, len, "Linux NFSv4.0 %s/%s",
6365 			  clp->cl_rpcclient->cl_nodename,
6366 			  rpc_peeraddr2str(clp->cl_rpcclient,
6367 					   RPC_DISPLAY_ADDR));
6368 	rcu_read_unlock();
6369 
6370 	clp->cl_owner_id = str;
6371 	return 0;
6372 }
6373 
6374 static int
6375 nfs4_init_uniform_client_string(struct nfs_client *clp)
6376 {
6377 	char buf[NFS4_CLIENT_ID_UNIQ_LEN];
6378 	size_t buflen;
6379 	size_t len;
6380 	char *str;
6381 
6382 	if (clp->cl_owner_id != NULL)
6383 		return 0;
6384 
6385 	len = 10 + 10 + 1 + 10 + 1 +
6386 		strlen(clp->cl_rpcclient->cl_nodename) + 1;
6387 
6388 	buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf));
6389 	if (buflen)
6390 		len += buflen + 1;
6391 
6392 	if (len > NFS4_OPAQUE_LIMIT + 1)
6393 		return -EINVAL;
6394 
6395 	/*
6396 	 * Since this string is allocated at mount time, and held until the
6397 	 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
6398 	 * about a memory-reclaim deadlock.
6399 	 */
6400 	str = kmalloc(len, GFP_KERNEL);
6401 	if (!str)
6402 		return -ENOMEM;
6403 
6404 	if (buflen)
6405 		scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
6406 			  clp->rpc_ops->version, clp->cl_minorversion,
6407 			  buf, clp->cl_rpcclient->cl_nodename);
6408 	else
6409 		scnprintf(str, len, "Linux NFSv%u.%u %s",
6410 			  clp->rpc_ops->version, clp->cl_minorversion,
6411 			  clp->cl_rpcclient->cl_nodename);
6412 	clp->cl_owner_id = str;
6413 	return 0;
6414 }
6415 
6416 /*
6417  * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
6418  * services.  Advertise one based on the address family of the
6419  * clientaddr.
6420  */
6421 static unsigned int
6422 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
6423 {
6424 	if (strchr(clp->cl_ipaddr, ':') != NULL)
6425 		return scnprintf(buf, len, "tcp6");
6426 	else
6427 		return scnprintf(buf, len, "tcp");
6428 }
6429 
6430 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
6431 {
6432 	struct nfs4_setclientid *sc = calldata;
6433 
6434 	if (task->tk_status == 0)
6435 		sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
6436 }
6437 
6438 static const struct rpc_call_ops nfs4_setclientid_ops = {
6439 	.rpc_call_done = nfs4_setclientid_done,
6440 };
6441 
6442 /**
6443  * nfs4_proc_setclientid - Negotiate client ID
6444  * @clp: state data structure
6445  * @program: RPC program for NFSv4 callback service
6446  * @port: IP port number for NFS4 callback service
6447  * @cred: credential to use for this call
6448  * @res: where to place the result
6449  *
6450  * Returns zero, a negative errno, or a negative NFS4ERR status code.
6451  */
6452 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
6453 		unsigned short port, const struct cred *cred,
6454 		struct nfs4_setclientid_res *res)
6455 {
6456 	nfs4_verifier sc_verifier;
6457 	struct nfs4_setclientid setclientid = {
6458 		.sc_verifier = &sc_verifier,
6459 		.sc_prog = program,
6460 		.sc_clnt = clp,
6461 	};
6462 	struct rpc_message msg = {
6463 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
6464 		.rpc_argp = &setclientid,
6465 		.rpc_resp = res,
6466 		.rpc_cred = cred,
6467 	};
6468 	struct rpc_task_setup task_setup_data = {
6469 		.rpc_client = clp->cl_rpcclient,
6470 		.rpc_message = &msg,
6471 		.callback_ops = &nfs4_setclientid_ops,
6472 		.callback_data = &setclientid,
6473 		.flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
6474 	};
6475 	unsigned long now = jiffies;
6476 	int status;
6477 
6478 	/* nfs_client_id4 */
6479 	nfs4_init_boot_verifier(clp, &sc_verifier);
6480 
6481 	if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
6482 		status = nfs4_init_uniform_client_string(clp);
6483 	else
6484 		status = nfs4_init_nonuniform_client_string(clp);
6485 
6486 	if (status)
6487 		goto out;
6488 
6489 	/* cb_client4 */
6490 	setclientid.sc_netid_len =
6491 				nfs4_init_callback_netid(clp,
6492 						setclientid.sc_netid,
6493 						sizeof(setclientid.sc_netid));
6494 	setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
6495 				sizeof(setclientid.sc_uaddr), "%s.%u.%u",
6496 				clp->cl_ipaddr, port >> 8, port & 255);
6497 
6498 	dprintk("NFS call  setclientid auth=%s, '%s'\n",
6499 		clp->cl_rpcclient->cl_auth->au_ops->au_name,
6500 		clp->cl_owner_id);
6501 
6502 	status = nfs4_call_sync_custom(&task_setup_data);
6503 	if (setclientid.sc_cred) {
6504 		kfree(clp->cl_acceptor);
6505 		clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
6506 		put_rpccred(setclientid.sc_cred);
6507 	}
6508 
6509 	if (status == 0)
6510 		do_renew_lease(clp, now);
6511 out:
6512 	trace_nfs4_setclientid(clp, status);
6513 	dprintk("NFS reply setclientid: %d\n", status);
6514 	return status;
6515 }
6516 
6517 /**
6518  * nfs4_proc_setclientid_confirm - Confirm client ID
6519  * @clp: state data structure
6520  * @arg: result of a previous SETCLIENTID
6521  * @cred: credential to use for this call
6522  *
6523  * Returns zero, a negative errno, or a negative NFS4ERR status code.
6524  */
6525 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
6526 		struct nfs4_setclientid_res *arg,
6527 		const struct cred *cred)
6528 {
6529 	struct rpc_message msg = {
6530 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
6531 		.rpc_argp = arg,
6532 		.rpc_cred = cred,
6533 	};
6534 	int status;
6535 
6536 	dprintk("NFS call  setclientid_confirm auth=%s, (client ID %llx)\n",
6537 		clp->cl_rpcclient->cl_auth->au_ops->au_name,
6538 		clp->cl_clientid);
6539 	status = rpc_call_sync(clp->cl_rpcclient, &msg,
6540 			       RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
6541 	trace_nfs4_setclientid_confirm(clp, status);
6542 	dprintk("NFS reply setclientid_confirm: %d\n", status);
6543 	return status;
6544 }
6545 
6546 struct nfs4_delegreturndata {
6547 	struct nfs4_delegreturnargs args;
6548 	struct nfs4_delegreturnres res;
6549 	struct nfs_fh fh;
6550 	nfs4_stateid stateid;
6551 	unsigned long timestamp;
6552 	struct {
6553 		struct nfs4_layoutreturn_args arg;
6554 		struct nfs4_layoutreturn_res res;
6555 		struct nfs4_xdr_opaque_data ld_private;
6556 		u32 roc_barrier;
6557 		bool roc;
6558 	} lr;
6559 	struct nfs_fattr fattr;
6560 	int rpc_status;
6561 	struct inode *inode;
6562 };
6563 
6564 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
6565 {
6566 	struct nfs4_delegreturndata *data = calldata;
6567 	struct nfs4_exception exception = {
6568 		.inode = data->inode,
6569 		.stateid = &data->stateid,
6570 		.task_is_privileged = data->args.seq_args.sa_privileged,
6571 	};
6572 
6573 	if (!nfs4_sequence_done(task, &data->res.seq_res))
6574 		return;
6575 
6576 	trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
6577 
6578 	/* Handle Layoutreturn errors */
6579 	if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res,
6580 			  &data->res.lr_ret) == -EAGAIN)
6581 		goto out_restart;
6582 
6583 	switch (task->tk_status) {
6584 	case 0:
6585 		renew_lease(data->res.server, data->timestamp);
6586 		break;
6587 	case -NFS4ERR_ADMIN_REVOKED:
6588 	case -NFS4ERR_DELEG_REVOKED:
6589 	case -NFS4ERR_EXPIRED:
6590 		nfs4_free_revoked_stateid(data->res.server,
6591 				data->args.stateid,
6592 				task->tk_msg.rpc_cred);
6593 		fallthrough;
6594 	case -NFS4ERR_BAD_STATEID:
6595 	case -NFS4ERR_STALE_STATEID:
6596 	case -ETIMEDOUT:
6597 		task->tk_status = 0;
6598 		break;
6599 	case -NFS4ERR_OLD_STATEID:
6600 		if (!nfs4_refresh_delegation_stateid(&data->stateid, data->inode))
6601 			nfs4_stateid_seqid_inc(&data->stateid);
6602 		if (data->args.bitmask) {
6603 			data->args.bitmask = NULL;
6604 			data->res.fattr = NULL;
6605 		}
6606 		goto out_restart;
6607 	case -NFS4ERR_ACCESS:
6608 		if (data->args.bitmask) {
6609 			data->args.bitmask = NULL;
6610 			data->res.fattr = NULL;
6611 			goto out_restart;
6612 		}
6613 		fallthrough;
6614 	default:
6615 		task->tk_status = nfs4_async_handle_exception(task,
6616 				data->res.server, task->tk_status,
6617 				&exception);
6618 		if (exception.retry)
6619 			goto out_restart;
6620 	}
6621 	nfs_delegation_mark_returned(data->inode, data->args.stateid);
6622 	data->rpc_status = task->tk_status;
6623 	return;
6624 out_restart:
6625 	task->tk_status = 0;
6626 	rpc_restart_call_prepare(task);
6627 }
6628 
6629 static void nfs4_delegreturn_release(void *calldata)
6630 {
6631 	struct nfs4_delegreturndata *data = calldata;
6632 	struct inode *inode = data->inode;
6633 
6634 	if (data->lr.roc)
6635 		pnfs_roc_release(&data->lr.arg, &data->lr.res,
6636 				 data->res.lr_ret);
6637 	if (inode) {
6638 		nfs4_fattr_set_prechange(&data->fattr,
6639 					 inode_peek_iversion_raw(inode));
6640 		nfs_refresh_inode(inode, &data->fattr);
6641 		nfs_iput_and_deactive(inode);
6642 	}
6643 	kfree(calldata);
6644 }
6645 
6646 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
6647 {
6648 	struct nfs4_delegreturndata *d_data;
6649 	struct pnfs_layout_hdr *lo;
6650 
6651 	d_data = data;
6652 
6653 	if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) {
6654 		nfs4_sequence_done(task, &d_data->res.seq_res);
6655 		return;
6656 	}
6657 
6658 	lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL;
6659 	if (lo && !pnfs_layout_is_valid(lo)) {
6660 		d_data->args.lr_args = NULL;
6661 		d_data->res.lr_res = NULL;
6662 	}
6663 
6664 	nfs4_setup_sequence(d_data->res.server->nfs_client,
6665 			&d_data->args.seq_args,
6666 			&d_data->res.seq_res,
6667 			task);
6668 }
6669 
6670 static const struct rpc_call_ops nfs4_delegreturn_ops = {
6671 	.rpc_call_prepare = nfs4_delegreturn_prepare,
6672 	.rpc_call_done = nfs4_delegreturn_done,
6673 	.rpc_release = nfs4_delegreturn_release,
6674 };
6675 
6676 static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync)
6677 {
6678 	struct nfs4_delegreturndata *data;
6679 	struct nfs_server *server = NFS_SERVER(inode);
6680 	struct rpc_task *task;
6681 	struct rpc_message msg = {
6682 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
6683 		.rpc_cred = cred,
6684 	};
6685 	struct rpc_task_setup task_setup_data = {
6686 		.rpc_client = server->client,
6687 		.rpc_message = &msg,
6688 		.callback_ops = &nfs4_delegreturn_ops,
6689 		.flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
6690 	};
6691 	int status = 0;
6692 
6693 	if (nfs_server_capable(inode, NFS_CAP_MOVEABLE))
6694 		task_setup_data.flags |= RPC_TASK_MOVEABLE;
6695 
6696 	data = kzalloc(sizeof(*data), GFP_KERNEL);
6697 	if (data == NULL)
6698 		return -ENOMEM;
6699 
6700 	nfs4_state_protect(server->nfs_client,
6701 			NFS_SP4_MACH_CRED_CLEANUP,
6702 			&task_setup_data.rpc_client, &msg);
6703 
6704 	data->args.fhandle = &data->fh;
6705 	data->args.stateid = &data->stateid;
6706 	nfs4_bitmask_set(data->args.bitmask_store,
6707 			 server->cache_consistency_bitmask, inode, 0);
6708 	data->args.bitmask = data->args.bitmask_store;
6709 	nfs_copy_fh(&data->fh, NFS_FH(inode));
6710 	nfs4_stateid_copy(&data->stateid, stateid);
6711 	data->res.fattr = &data->fattr;
6712 	data->res.server = server;
6713 	data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
6714 	data->lr.arg.ld_private = &data->lr.ld_private;
6715 	nfs_fattr_init(data->res.fattr);
6716 	data->timestamp = jiffies;
6717 	data->rpc_status = 0;
6718 	data->inode = nfs_igrab_and_active(inode);
6719 	if (data->inode || issync) {
6720 		data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res,
6721 					cred);
6722 		if (data->lr.roc) {
6723 			data->args.lr_args = &data->lr.arg;
6724 			data->res.lr_res = &data->lr.res;
6725 		}
6726 	}
6727 
6728 	if (!data->inode)
6729 		nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
6730 				   1);
6731 	else
6732 		nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
6733 				   0);
6734 	task_setup_data.callback_data = data;
6735 	msg.rpc_argp = &data->args;
6736 	msg.rpc_resp = &data->res;
6737 	task = rpc_run_task(&task_setup_data);
6738 	if (IS_ERR(task))
6739 		return PTR_ERR(task);
6740 	if (!issync)
6741 		goto out;
6742 	status = rpc_wait_for_completion_task(task);
6743 	if (status != 0)
6744 		goto out;
6745 	status = data->rpc_status;
6746 out:
6747 	rpc_put_task(task);
6748 	return status;
6749 }
6750 
6751 int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync)
6752 {
6753 	struct nfs_server *server = NFS_SERVER(inode);
6754 	struct nfs4_exception exception = { };
6755 	int err;
6756 	do {
6757 		err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
6758 		trace_nfs4_delegreturn(inode, stateid, err);
6759 		switch (err) {
6760 			case -NFS4ERR_STALE_STATEID:
6761 			case -NFS4ERR_EXPIRED:
6762 			case 0:
6763 				return 0;
6764 		}
6765 		err = nfs4_handle_exception(server, err, &exception);
6766 	} while (exception.retry);
6767 	return err;
6768 }
6769 
6770 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6771 {
6772 	struct inode *inode = state->inode;
6773 	struct nfs_server *server = NFS_SERVER(inode);
6774 	struct nfs_client *clp = server->nfs_client;
6775 	struct nfs_lockt_args arg = {
6776 		.fh = NFS_FH(inode),
6777 		.fl = request,
6778 	};
6779 	struct nfs_lockt_res res = {
6780 		.denied = request,
6781 	};
6782 	struct rpc_message msg = {
6783 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
6784 		.rpc_argp	= &arg,
6785 		.rpc_resp	= &res,
6786 		.rpc_cred	= state->owner->so_cred,
6787 	};
6788 	struct nfs4_lock_state *lsp;
6789 	int status;
6790 
6791 	arg.lock_owner.clientid = clp->cl_clientid;
6792 	status = nfs4_set_lock_state(state, request);
6793 	if (status != 0)
6794 		goto out;
6795 	lsp = request->fl_u.nfs4_fl.owner;
6796 	arg.lock_owner.id = lsp->ls_seqid.owner_id;
6797 	arg.lock_owner.s_dev = server->s_dev;
6798 	status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
6799 	switch (status) {
6800 		case 0:
6801 			request->fl_type = F_UNLCK;
6802 			break;
6803 		case -NFS4ERR_DENIED:
6804 			status = 0;
6805 	}
6806 	request->fl_ops->fl_release_private(request);
6807 	request->fl_ops = NULL;
6808 out:
6809 	return status;
6810 }
6811 
6812 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6813 {
6814 	struct nfs4_exception exception = {
6815 		.interruptible = true,
6816 	};
6817 	int err;
6818 
6819 	do {
6820 		err = _nfs4_proc_getlk(state, cmd, request);
6821 		trace_nfs4_get_lock(request, state, cmd, err);
6822 		err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
6823 				&exception);
6824 	} while (exception.retry);
6825 	return err;
6826 }
6827 
6828 /*
6829  * Update the seqid of a lock stateid after receiving
6830  * NFS4ERR_OLD_STATEID
6831  */
6832 static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst,
6833 		struct nfs4_lock_state *lsp)
6834 {
6835 	struct nfs4_state *state = lsp->ls_state;
6836 	bool ret = false;
6837 
6838 	spin_lock(&state->state_lock);
6839 	if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid))
6840 		goto out;
6841 	if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst))
6842 		nfs4_stateid_seqid_inc(dst);
6843 	else
6844 		dst->seqid = lsp->ls_stateid.seqid;
6845 	ret = true;
6846 out:
6847 	spin_unlock(&state->state_lock);
6848 	return ret;
6849 }
6850 
6851 static bool nfs4_sync_lock_stateid(nfs4_stateid *dst,
6852 		struct nfs4_lock_state *lsp)
6853 {
6854 	struct nfs4_state *state = lsp->ls_state;
6855 	bool ret;
6856 
6857 	spin_lock(&state->state_lock);
6858 	ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid);
6859 	nfs4_stateid_copy(dst, &lsp->ls_stateid);
6860 	spin_unlock(&state->state_lock);
6861 	return ret;
6862 }
6863 
6864 struct nfs4_unlockdata {
6865 	struct nfs_locku_args arg;
6866 	struct nfs_locku_res res;
6867 	struct nfs4_lock_state *lsp;
6868 	struct nfs_open_context *ctx;
6869 	struct nfs_lock_context *l_ctx;
6870 	struct file_lock fl;
6871 	struct nfs_server *server;
6872 	unsigned long timestamp;
6873 };
6874 
6875 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
6876 		struct nfs_open_context *ctx,
6877 		struct nfs4_lock_state *lsp,
6878 		struct nfs_seqid *seqid)
6879 {
6880 	struct nfs4_unlockdata *p;
6881 	struct nfs4_state *state = lsp->ls_state;
6882 	struct inode *inode = state->inode;
6883 
6884 	p = kzalloc(sizeof(*p), GFP_KERNEL);
6885 	if (p == NULL)
6886 		return NULL;
6887 	p->arg.fh = NFS_FH(inode);
6888 	p->arg.fl = &p->fl;
6889 	p->arg.seqid = seqid;
6890 	p->res.seqid = seqid;
6891 	p->lsp = lsp;
6892 	/* Ensure we don't close file until we're done freeing locks! */
6893 	p->ctx = get_nfs_open_context(ctx);
6894 	p->l_ctx = nfs_get_lock_context(ctx);
6895 	locks_init_lock(&p->fl);
6896 	locks_copy_lock(&p->fl, fl);
6897 	p->server = NFS_SERVER(inode);
6898 	spin_lock(&state->state_lock);
6899 	nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid);
6900 	spin_unlock(&state->state_lock);
6901 	return p;
6902 }
6903 
6904 static void nfs4_locku_release_calldata(void *data)
6905 {
6906 	struct nfs4_unlockdata *calldata = data;
6907 	nfs_free_seqid(calldata->arg.seqid);
6908 	nfs4_put_lock_state(calldata->lsp);
6909 	nfs_put_lock_context(calldata->l_ctx);
6910 	put_nfs_open_context(calldata->ctx);
6911 	kfree(calldata);
6912 }
6913 
6914 static void nfs4_locku_done(struct rpc_task *task, void *data)
6915 {
6916 	struct nfs4_unlockdata *calldata = data;
6917 	struct nfs4_exception exception = {
6918 		.inode = calldata->lsp->ls_state->inode,
6919 		.stateid = &calldata->arg.stateid,
6920 	};
6921 
6922 	if (!nfs4_sequence_done(task, &calldata->res.seq_res))
6923 		return;
6924 	switch (task->tk_status) {
6925 		case 0:
6926 			renew_lease(calldata->server, calldata->timestamp);
6927 			locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl);
6928 			if (nfs4_update_lock_stateid(calldata->lsp,
6929 					&calldata->res.stateid))
6930 				break;
6931 			fallthrough;
6932 		case -NFS4ERR_ADMIN_REVOKED:
6933 		case -NFS4ERR_EXPIRED:
6934 			nfs4_free_revoked_stateid(calldata->server,
6935 					&calldata->arg.stateid,
6936 					task->tk_msg.rpc_cred);
6937 			fallthrough;
6938 		case -NFS4ERR_BAD_STATEID:
6939 		case -NFS4ERR_STALE_STATEID:
6940 			if (nfs4_sync_lock_stateid(&calldata->arg.stateid,
6941 						calldata->lsp))
6942 				rpc_restart_call_prepare(task);
6943 			break;
6944 		case -NFS4ERR_OLD_STATEID:
6945 			if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid,
6946 						calldata->lsp))
6947 				rpc_restart_call_prepare(task);
6948 			break;
6949 		default:
6950 			task->tk_status = nfs4_async_handle_exception(task,
6951 					calldata->server, task->tk_status,
6952 					&exception);
6953 			if (exception.retry)
6954 				rpc_restart_call_prepare(task);
6955 	}
6956 	nfs_release_seqid(calldata->arg.seqid);
6957 }
6958 
6959 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
6960 {
6961 	struct nfs4_unlockdata *calldata = data;
6962 
6963 	if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) &&
6964 		nfs_async_iocounter_wait(task, calldata->l_ctx))
6965 		return;
6966 
6967 	if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
6968 		goto out_wait;
6969 	if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
6970 		/* Note: exit _without_ running nfs4_locku_done */
6971 		goto out_no_action;
6972 	}
6973 	calldata->timestamp = jiffies;
6974 	if (nfs4_setup_sequence(calldata->server->nfs_client,
6975 				&calldata->arg.seq_args,
6976 				&calldata->res.seq_res,
6977 				task) != 0)
6978 		nfs_release_seqid(calldata->arg.seqid);
6979 	return;
6980 out_no_action:
6981 	task->tk_action = NULL;
6982 out_wait:
6983 	nfs4_sequence_done(task, &calldata->res.seq_res);
6984 }
6985 
6986 static const struct rpc_call_ops nfs4_locku_ops = {
6987 	.rpc_call_prepare = nfs4_locku_prepare,
6988 	.rpc_call_done = nfs4_locku_done,
6989 	.rpc_release = nfs4_locku_release_calldata,
6990 };
6991 
6992 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
6993 		struct nfs_open_context *ctx,
6994 		struct nfs4_lock_state *lsp,
6995 		struct nfs_seqid *seqid)
6996 {
6997 	struct nfs4_unlockdata *data;
6998 	struct rpc_message msg = {
6999 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
7000 		.rpc_cred = ctx->cred,
7001 	};
7002 	struct rpc_task_setup task_setup_data = {
7003 		.rpc_client = NFS_CLIENT(lsp->ls_state->inode),
7004 		.rpc_message = &msg,
7005 		.callback_ops = &nfs4_locku_ops,
7006 		.workqueue = nfsiod_workqueue,
7007 		.flags = RPC_TASK_ASYNC,
7008 	};
7009 
7010 	if (nfs_server_capable(lsp->ls_state->inode, NFS_CAP_MOVEABLE))
7011 		task_setup_data.flags |= RPC_TASK_MOVEABLE;
7012 
7013 	nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
7014 		NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
7015 
7016 	/* Ensure this is an unlock - when canceling a lock, the
7017 	 * canceled lock is passed in, and it won't be an unlock.
7018 	 */
7019 	fl->fl_type = F_UNLCK;
7020 	if (fl->fl_flags & FL_CLOSE)
7021 		set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags);
7022 
7023 	data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
7024 	if (data == NULL) {
7025 		nfs_free_seqid(seqid);
7026 		return ERR_PTR(-ENOMEM);
7027 	}
7028 
7029 	nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 0);
7030 	msg.rpc_argp = &data->arg;
7031 	msg.rpc_resp = &data->res;
7032 	task_setup_data.callback_data = data;
7033 	return rpc_run_task(&task_setup_data);
7034 }
7035 
7036 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
7037 {
7038 	struct inode *inode = state->inode;
7039 	struct nfs4_state_owner *sp = state->owner;
7040 	struct nfs_inode *nfsi = NFS_I(inode);
7041 	struct nfs_seqid *seqid;
7042 	struct nfs4_lock_state *lsp;
7043 	struct rpc_task *task;
7044 	struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
7045 	int status = 0;
7046 	unsigned char fl_flags = request->fl_flags;
7047 
7048 	status = nfs4_set_lock_state(state, request);
7049 	/* Unlock _before_ we do the RPC call */
7050 	request->fl_flags |= FL_EXISTS;
7051 	/* Exclude nfs_delegation_claim_locks() */
7052 	mutex_lock(&sp->so_delegreturn_mutex);
7053 	/* Exclude nfs4_reclaim_open_stateid() - note nesting! */
7054 	down_read(&nfsi->rwsem);
7055 	if (locks_lock_inode_wait(inode, request) == -ENOENT) {
7056 		up_read(&nfsi->rwsem);
7057 		mutex_unlock(&sp->so_delegreturn_mutex);
7058 		goto out;
7059 	}
7060 	lsp = request->fl_u.nfs4_fl.owner;
7061 	set_bit(NFS_LOCK_UNLOCKING, &lsp->ls_flags);
7062 	up_read(&nfsi->rwsem);
7063 	mutex_unlock(&sp->so_delegreturn_mutex);
7064 	if (status != 0)
7065 		goto out;
7066 	/* Is this a delegated lock? */
7067 	if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
7068 		goto out;
7069 	alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
7070 	seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
7071 	status = -ENOMEM;
7072 	if (IS_ERR(seqid))
7073 		goto out;
7074 	task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
7075 	status = PTR_ERR(task);
7076 	if (IS_ERR(task))
7077 		goto out;
7078 	status = rpc_wait_for_completion_task(task);
7079 	rpc_put_task(task);
7080 out:
7081 	request->fl_flags = fl_flags;
7082 	trace_nfs4_unlock(request, state, F_SETLK, status);
7083 	return status;
7084 }
7085 
7086 struct nfs4_lockdata {
7087 	struct nfs_lock_args arg;
7088 	struct nfs_lock_res res;
7089 	struct nfs4_lock_state *lsp;
7090 	struct nfs_open_context *ctx;
7091 	struct file_lock fl;
7092 	unsigned long timestamp;
7093 	int rpc_status;
7094 	int cancelled;
7095 	struct nfs_server *server;
7096 };
7097 
7098 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
7099 		struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
7100 		gfp_t gfp_mask)
7101 {
7102 	struct nfs4_lockdata *p;
7103 	struct inode *inode = lsp->ls_state->inode;
7104 	struct nfs_server *server = NFS_SERVER(inode);
7105 	struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
7106 
7107 	p = kzalloc(sizeof(*p), gfp_mask);
7108 	if (p == NULL)
7109 		return NULL;
7110 
7111 	p->arg.fh = NFS_FH(inode);
7112 	p->arg.fl = &p->fl;
7113 	p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
7114 	if (IS_ERR(p->arg.open_seqid))
7115 		goto out_free;
7116 	alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
7117 	p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
7118 	if (IS_ERR(p->arg.lock_seqid))
7119 		goto out_free_seqid;
7120 	p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
7121 	p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
7122 	p->arg.lock_owner.s_dev = server->s_dev;
7123 	p->res.lock_seqid = p->arg.lock_seqid;
7124 	p->lsp = lsp;
7125 	p->server = server;
7126 	p->ctx = get_nfs_open_context(ctx);
7127 	locks_init_lock(&p->fl);
7128 	locks_copy_lock(&p->fl, fl);
7129 	return p;
7130 out_free_seqid:
7131 	nfs_free_seqid(p->arg.open_seqid);
7132 out_free:
7133 	kfree(p);
7134 	return NULL;
7135 }
7136 
7137 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
7138 {
7139 	struct nfs4_lockdata *data = calldata;
7140 	struct nfs4_state *state = data->lsp->ls_state;
7141 
7142 	if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
7143 		goto out_wait;
7144 	/* Do we need to do an open_to_lock_owner? */
7145 	if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
7146 		if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
7147 			goto out_release_lock_seqid;
7148 		}
7149 		nfs4_stateid_copy(&data->arg.open_stateid,
7150 				&state->open_stateid);
7151 		data->arg.new_lock_owner = 1;
7152 		data->res.open_seqid = data->arg.open_seqid;
7153 	} else {
7154 		data->arg.new_lock_owner = 0;
7155 		nfs4_stateid_copy(&data->arg.lock_stateid,
7156 				&data->lsp->ls_stateid);
7157 	}
7158 	if (!nfs4_valid_open_stateid(state)) {
7159 		data->rpc_status = -EBADF;
7160 		task->tk_action = NULL;
7161 		goto out_release_open_seqid;
7162 	}
7163 	data->timestamp = jiffies;
7164 	if (nfs4_setup_sequence(data->server->nfs_client,
7165 				&data->arg.seq_args,
7166 				&data->res.seq_res,
7167 				task) == 0)
7168 		return;
7169 out_release_open_seqid:
7170 	nfs_release_seqid(data->arg.open_seqid);
7171 out_release_lock_seqid:
7172 	nfs_release_seqid(data->arg.lock_seqid);
7173 out_wait:
7174 	nfs4_sequence_done(task, &data->res.seq_res);
7175 	dprintk("%s: ret = %d\n", __func__, data->rpc_status);
7176 }
7177 
7178 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
7179 {
7180 	struct nfs4_lockdata *data = calldata;
7181 	struct nfs4_lock_state *lsp = data->lsp;
7182 
7183 	if (!nfs4_sequence_done(task, &data->res.seq_res))
7184 		return;
7185 
7186 	data->rpc_status = task->tk_status;
7187 	switch (task->tk_status) {
7188 	case 0:
7189 		renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
7190 				data->timestamp);
7191 		if (data->arg.new_lock && !data->cancelled) {
7192 			data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
7193 			if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
7194 				goto out_restart;
7195 		}
7196 		if (data->arg.new_lock_owner != 0) {
7197 			nfs_confirm_seqid(&lsp->ls_seqid, 0);
7198 			nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
7199 			set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
7200 		} else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
7201 			goto out_restart;
7202 		break;
7203 	case -NFS4ERR_OLD_STATEID:
7204 		if (data->arg.new_lock_owner != 0 &&
7205 			nfs4_refresh_open_old_stateid(&data->arg.open_stateid,
7206 					lsp->ls_state))
7207 			goto out_restart;
7208 		if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp))
7209 			goto out_restart;
7210 		fallthrough;
7211 	case -NFS4ERR_BAD_STATEID:
7212 	case -NFS4ERR_STALE_STATEID:
7213 	case -NFS4ERR_EXPIRED:
7214 		if (data->arg.new_lock_owner != 0) {
7215 			if (!nfs4_stateid_match(&data->arg.open_stateid,
7216 						&lsp->ls_state->open_stateid))
7217 				goto out_restart;
7218 		} else if (!nfs4_stateid_match(&data->arg.lock_stateid,
7219 						&lsp->ls_stateid))
7220 				goto out_restart;
7221 	}
7222 out_done:
7223 	dprintk("%s: ret = %d!\n", __func__, data->rpc_status);
7224 	return;
7225 out_restart:
7226 	if (!data->cancelled)
7227 		rpc_restart_call_prepare(task);
7228 	goto out_done;
7229 }
7230 
7231 static void nfs4_lock_release(void *calldata)
7232 {
7233 	struct nfs4_lockdata *data = calldata;
7234 
7235 	nfs_free_seqid(data->arg.open_seqid);
7236 	if (data->cancelled && data->rpc_status == 0) {
7237 		struct rpc_task *task;
7238 		task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
7239 				data->arg.lock_seqid);
7240 		if (!IS_ERR(task))
7241 			rpc_put_task_async(task);
7242 		dprintk("%s: cancelling lock!\n", __func__);
7243 	} else
7244 		nfs_free_seqid(data->arg.lock_seqid);
7245 	nfs4_put_lock_state(data->lsp);
7246 	put_nfs_open_context(data->ctx);
7247 	kfree(data);
7248 }
7249 
7250 static const struct rpc_call_ops nfs4_lock_ops = {
7251 	.rpc_call_prepare = nfs4_lock_prepare,
7252 	.rpc_call_done = nfs4_lock_done,
7253 	.rpc_release = nfs4_lock_release,
7254 };
7255 
7256 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
7257 {
7258 	switch (error) {
7259 	case -NFS4ERR_ADMIN_REVOKED:
7260 	case -NFS4ERR_EXPIRED:
7261 	case -NFS4ERR_BAD_STATEID:
7262 		lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
7263 		if (new_lock_owner != 0 ||
7264 		   test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
7265 			nfs4_schedule_stateid_recovery(server, lsp->ls_state);
7266 		break;
7267 	case -NFS4ERR_STALE_STATEID:
7268 		lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
7269 		nfs4_schedule_lease_recovery(server->nfs_client);
7270 	}
7271 }
7272 
7273 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
7274 {
7275 	struct nfs4_lockdata *data;
7276 	struct rpc_task *task;
7277 	struct rpc_message msg = {
7278 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
7279 		.rpc_cred = state->owner->so_cred,
7280 	};
7281 	struct rpc_task_setup task_setup_data = {
7282 		.rpc_client = NFS_CLIENT(state->inode),
7283 		.rpc_message = &msg,
7284 		.callback_ops = &nfs4_lock_ops,
7285 		.workqueue = nfsiod_workqueue,
7286 		.flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
7287 	};
7288 	int ret;
7289 
7290 	if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE))
7291 		task_setup_data.flags |= RPC_TASK_MOVEABLE;
7292 
7293 	data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
7294 				   fl->fl_u.nfs4_fl.owner, GFP_KERNEL);
7295 	if (data == NULL)
7296 		return -ENOMEM;
7297 	if (IS_SETLKW(cmd))
7298 		data->arg.block = 1;
7299 	nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1,
7300 				recovery_type > NFS_LOCK_NEW);
7301 	msg.rpc_argp = &data->arg;
7302 	msg.rpc_resp = &data->res;
7303 	task_setup_data.callback_data = data;
7304 	if (recovery_type > NFS_LOCK_NEW) {
7305 		if (recovery_type == NFS_LOCK_RECLAIM)
7306 			data->arg.reclaim = NFS_LOCK_RECLAIM;
7307 	} else
7308 		data->arg.new_lock = 1;
7309 	task = rpc_run_task(&task_setup_data);
7310 	if (IS_ERR(task))
7311 		return PTR_ERR(task);
7312 	ret = rpc_wait_for_completion_task(task);
7313 	if (ret == 0) {
7314 		ret = data->rpc_status;
7315 		if (ret)
7316 			nfs4_handle_setlk_error(data->server, data->lsp,
7317 					data->arg.new_lock_owner, ret);
7318 	} else
7319 		data->cancelled = true;
7320 	trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
7321 	rpc_put_task(task);
7322 	dprintk("%s: ret = %d\n", __func__, ret);
7323 	return ret;
7324 }
7325 
7326 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
7327 {
7328 	struct nfs_server *server = NFS_SERVER(state->inode);
7329 	struct nfs4_exception exception = {
7330 		.inode = state->inode,
7331 	};
7332 	int err;
7333 
7334 	do {
7335 		/* Cache the lock if possible... */
7336 		if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
7337 			return 0;
7338 		err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
7339 		if (err != -NFS4ERR_DELAY)
7340 			break;
7341 		nfs4_handle_exception(server, err, &exception);
7342 	} while (exception.retry);
7343 	return err;
7344 }
7345 
7346 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
7347 {
7348 	struct nfs_server *server = NFS_SERVER(state->inode);
7349 	struct nfs4_exception exception = {
7350 		.inode = state->inode,
7351 	};
7352 	int err;
7353 
7354 	err = nfs4_set_lock_state(state, request);
7355 	if (err != 0)
7356 		return err;
7357 	if (!recover_lost_locks) {
7358 		set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
7359 		return 0;
7360 	}
7361 	do {
7362 		if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
7363 			return 0;
7364 		err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
7365 		switch (err) {
7366 		default:
7367 			goto out;
7368 		case -NFS4ERR_GRACE:
7369 		case -NFS4ERR_DELAY:
7370 			nfs4_handle_exception(server, err, &exception);
7371 			err = 0;
7372 		}
7373 	} while (exception.retry);
7374 out:
7375 	return err;
7376 }
7377 
7378 #if defined(CONFIG_NFS_V4_1)
7379 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
7380 {
7381 	struct nfs4_lock_state *lsp;
7382 	int status;
7383 
7384 	status = nfs4_set_lock_state(state, request);
7385 	if (status != 0)
7386 		return status;
7387 	lsp = request->fl_u.nfs4_fl.owner;
7388 	if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) ||
7389 	    test_bit(NFS_LOCK_LOST, &lsp->ls_flags))
7390 		return 0;
7391 	return nfs4_lock_expired(state, request);
7392 }
7393 #endif
7394 
7395 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7396 {
7397 	struct nfs_inode *nfsi = NFS_I(state->inode);
7398 	struct nfs4_state_owner *sp = state->owner;
7399 	unsigned char fl_flags = request->fl_flags;
7400 	int status;
7401 
7402 	request->fl_flags |= FL_ACCESS;
7403 	status = locks_lock_inode_wait(state->inode, request);
7404 	if (status < 0)
7405 		goto out;
7406 	mutex_lock(&sp->so_delegreturn_mutex);
7407 	down_read(&nfsi->rwsem);
7408 	if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
7409 		/* Yes: cache locks! */
7410 		/* ...but avoid races with delegation recall... */
7411 		request->fl_flags = fl_flags & ~FL_SLEEP;
7412 		status = locks_lock_inode_wait(state->inode, request);
7413 		up_read(&nfsi->rwsem);
7414 		mutex_unlock(&sp->so_delegreturn_mutex);
7415 		goto out;
7416 	}
7417 	up_read(&nfsi->rwsem);
7418 	mutex_unlock(&sp->so_delegreturn_mutex);
7419 	status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
7420 out:
7421 	request->fl_flags = fl_flags;
7422 	return status;
7423 }
7424 
7425 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7426 {
7427 	struct nfs4_exception exception = {
7428 		.state = state,
7429 		.inode = state->inode,
7430 		.interruptible = true,
7431 	};
7432 	int err;
7433 
7434 	do {
7435 		err = _nfs4_proc_setlk(state, cmd, request);
7436 		if (err == -NFS4ERR_DENIED)
7437 			err = -EAGAIN;
7438 		err = nfs4_handle_exception(NFS_SERVER(state->inode),
7439 				err, &exception);
7440 	} while (exception.retry);
7441 	return err;
7442 }
7443 
7444 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
7445 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
7446 
7447 static int
7448 nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd,
7449 			struct file_lock *request)
7450 {
7451 	int		status = -ERESTARTSYS;
7452 	unsigned long	timeout = NFS4_LOCK_MINTIMEOUT;
7453 
7454 	while(!signalled()) {
7455 		status = nfs4_proc_setlk(state, cmd, request);
7456 		if ((status != -EAGAIN) || IS_SETLK(cmd))
7457 			break;
7458 		__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
7459 		schedule_timeout(timeout);
7460 		timeout *= 2;
7461 		timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout);
7462 		status = -ERESTARTSYS;
7463 	}
7464 	return status;
7465 }
7466 
7467 #ifdef CONFIG_NFS_V4_1
7468 struct nfs4_lock_waiter {
7469 	struct inode		*inode;
7470 	struct nfs_lowner	owner;
7471 	wait_queue_entry_t	wait;
7472 };
7473 
7474 static int
7475 nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key)
7476 {
7477 	struct nfs4_lock_waiter	*waiter	=
7478 		container_of(wait, struct nfs4_lock_waiter, wait);
7479 
7480 	/* NULL key means to wake up everyone */
7481 	if (key) {
7482 		struct cb_notify_lock_args	*cbnl = key;
7483 		struct nfs_lowner		*lowner = &cbnl->cbnl_owner,
7484 						*wowner = &waiter->owner;
7485 
7486 		/* Only wake if the callback was for the same owner. */
7487 		if (lowner->id != wowner->id || lowner->s_dev != wowner->s_dev)
7488 			return 0;
7489 
7490 		/* Make sure it's for the right inode */
7491 		if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh))
7492 			return 0;
7493 	}
7494 
7495 	return woken_wake_function(wait, mode, flags, key);
7496 }
7497 
7498 static int
7499 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7500 {
7501 	struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
7502 	struct nfs_server *server = NFS_SERVER(state->inode);
7503 	struct nfs_client *clp = server->nfs_client;
7504 	wait_queue_head_t *q = &clp->cl_lock_waitq;
7505 	struct nfs4_lock_waiter waiter = {
7506 		.inode = state->inode,
7507 		.owner = { .clientid = clp->cl_clientid,
7508 			   .id = lsp->ls_seqid.owner_id,
7509 			   .s_dev = server->s_dev },
7510 	};
7511 	int status;
7512 
7513 	/* Don't bother with waitqueue if we don't expect a callback */
7514 	if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags))
7515 		return nfs4_retry_setlk_simple(state, cmd, request);
7516 
7517 	init_wait(&waiter.wait);
7518 	waiter.wait.func = nfs4_wake_lock_waiter;
7519 	add_wait_queue(q, &waiter.wait);
7520 
7521 	do {
7522 		status = nfs4_proc_setlk(state, cmd, request);
7523 		if (status != -EAGAIN || IS_SETLK(cmd))
7524 			break;
7525 
7526 		status = -ERESTARTSYS;
7527 		wait_woken(&waiter.wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE,
7528 			   NFS4_LOCK_MAXTIMEOUT);
7529 	} while (!signalled());
7530 
7531 	remove_wait_queue(q, &waiter.wait);
7532 
7533 	return status;
7534 }
7535 #else /* !CONFIG_NFS_V4_1 */
7536 static inline int
7537 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7538 {
7539 	return nfs4_retry_setlk_simple(state, cmd, request);
7540 }
7541 #endif
7542 
7543 static int
7544 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
7545 {
7546 	struct nfs_open_context *ctx;
7547 	struct nfs4_state *state;
7548 	int status;
7549 
7550 	/* verify open state */
7551 	ctx = nfs_file_open_context(filp);
7552 	state = ctx->state;
7553 
7554 	if (IS_GETLK(cmd)) {
7555 		if (state != NULL)
7556 			return nfs4_proc_getlk(state, F_GETLK, request);
7557 		return 0;
7558 	}
7559 
7560 	if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
7561 		return -EINVAL;
7562 
7563 	if (request->fl_type == F_UNLCK) {
7564 		if (state != NULL)
7565 			return nfs4_proc_unlck(state, cmd, request);
7566 		return 0;
7567 	}
7568 
7569 	if (state == NULL)
7570 		return -ENOLCK;
7571 
7572 	if ((request->fl_flags & FL_POSIX) &&
7573 	    !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
7574 		return -ENOLCK;
7575 
7576 	/*
7577 	 * Don't rely on the VFS having checked the file open mode,
7578 	 * since it won't do this for flock() locks.
7579 	 */
7580 	switch (request->fl_type) {
7581 	case F_RDLCK:
7582 		if (!(filp->f_mode & FMODE_READ))
7583 			return -EBADF;
7584 		break;
7585 	case F_WRLCK:
7586 		if (!(filp->f_mode & FMODE_WRITE))
7587 			return -EBADF;
7588 	}
7589 
7590 	status = nfs4_set_lock_state(state, request);
7591 	if (status != 0)
7592 		return status;
7593 
7594 	return nfs4_retry_setlk(state, cmd, request);
7595 }
7596 
7597 static int nfs4_delete_lease(struct file *file, void **priv)
7598 {
7599 	return generic_setlease(file, F_UNLCK, NULL, priv);
7600 }
7601 
7602 static int nfs4_add_lease(struct file *file, int arg, struct file_lock **lease,
7603 			  void **priv)
7604 {
7605 	struct inode *inode = file_inode(file);
7606 	fmode_t type = arg == F_RDLCK ? FMODE_READ : FMODE_WRITE;
7607 	int ret;
7608 
7609 	/* No delegation, no lease */
7610 	if (!nfs4_have_delegation(inode, type))
7611 		return -EAGAIN;
7612 	ret = generic_setlease(file, arg, lease, priv);
7613 	if (ret || nfs4_have_delegation(inode, type))
7614 		return ret;
7615 	/* We raced with a delegation return */
7616 	nfs4_delete_lease(file, priv);
7617 	return -EAGAIN;
7618 }
7619 
7620 int nfs4_proc_setlease(struct file *file, int arg, struct file_lock **lease,
7621 		       void **priv)
7622 {
7623 	switch (arg) {
7624 	case F_RDLCK:
7625 	case F_WRLCK:
7626 		return nfs4_add_lease(file, arg, lease, priv);
7627 	case F_UNLCK:
7628 		return nfs4_delete_lease(file, priv);
7629 	default:
7630 		return -EINVAL;
7631 	}
7632 }
7633 
7634 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
7635 {
7636 	struct nfs_server *server = NFS_SERVER(state->inode);
7637 	int err;
7638 
7639 	err = nfs4_set_lock_state(state, fl);
7640 	if (err != 0)
7641 		return err;
7642 	do {
7643 		err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
7644 		if (err != -NFS4ERR_DELAY)
7645 			break;
7646 		ssleep(1);
7647 	} while (err == -NFS4ERR_DELAY);
7648 	return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
7649 }
7650 
7651 struct nfs_release_lockowner_data {
7652 	struct nfs4_lock_state *lsp;
7653 	struct nfs_server *server;
7654 	struct nfs_release_lockowner_args args;
7655 	struct nfs_release_lockowner_res res;
7656 	unsigned long timestamp;
7657 };
7658 
7659 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
7660 {
7661 	struct nfs_release_lockowner_data *data = calldata;
7662 	struct nfs_server *server = data->server;
7663 	nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
7664 			   &data->res.seq_res, task);
7665 	data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
7666 	data->timestamp = jiffies;
7667 }
7668 
7669 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
7670 {
7671 	struct nfs_release_lockowner_data *data = calldata;
7672 	struct nfs_server *server = data->server;
7673 
7674 	nfs40_sequence_done(task, &data->res.seq_res);
7675 
7676 	switch (task->tk_status) {
7677 	case 0:
7678 		renew_lease(server, data->timestamp);
7679 		break;
7680 	case -NFS4ERR_STALE_CLIENTID:
7681 	case -NFS4ERR_EXPIRED:
7682 		nfs4_schedule_lease_recovery(server->nfs_client);
7683 		break;
7684 	case -NFS4ERR_LEASE_MOVED:
7685 	case -NFS4ERR_DELAY:
7686 		if (nfs4_async_handle_error(task, server,
7687 					    NULL, NULL) == -EAGAIN)
7688 			rpc_restart_call_prepare(task);
7689 	}
7690 }
7691 
7692 static void nfs4_release_lockowner_release(void *calldata)
7693 {
7694 	struct nfs_release_lockowner_data *data = calldata;
7695 	nfs4_free_lock_state(data->server, data->lsp);
7696 	kfree(calldata);
7697 }
7698 
7699 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
7700 	.rpc_call_prepare = nfs4_release_lockowner_prepare,
7701 	.rpc_call_done = nfs4_release_lockowner_done,
7702 	.rpc_release = nfs4_release_lockowner_release,
7703 };
7704 
7705 static void
7706 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
7707 {
7708 	struct nfs_release_lockowner_data *data;
7709 	struct rpc_message msg = {
7710 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
7711 	};
7712 
7713 	if (server->nfs_client->cl_mvops->minor_version != 0)
7714 		return;
7715 
7716 	data = kmalloc(sizeof(*data), GFP_KERNEL);
7717 	if (!data)
7718 		return;
7719 	data->lsp = lsp;
7720 	data->server = server;
7721 	data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
7722 	data->args.lock_owner.id = lsp->ls_seqid.owner_id;
7723 	data->args.lock_owner.s_dev = server->s_dev;
7724 
7725 	msg.rpc_argp = &data->args;
7726 	msg.rpc_resp = &data->res;
7727 	nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
7728 	rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
7729 }
7730 
7731 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
7732 
7733 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
7734 				   struct mnt_idmap *idmap,
7735 				   struct dentry *unused, struct inode *inode,
7736 				   const char *key, const void *buf,
7737 				   size_t buflen, int flags)
7738 {
7739 	return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_ACL);
7740 }
7741 
7742 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
7743 				   struct dentry *unused, struct inode *inode,
7744 				   const char *key, void *buf, size_t buflen)
7745 {
7746 	return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_ACL);
7747 }
7748 
7749 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry)
7750 {
7751 	return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_ACL);
7752 }
7753 
7754 #if defined(CONFIG_NFS_V4_1)
7755 #define XATTR_NAME_NFSV4_DACL "system.nfs4_dacl"
7756 
7757 static int nfs4_xattr_set_nfs4_dacl(const struct xattr_handler *handler,
7758 				    struct mnt_idmap *idmap,
7759 				    struct dentry *unused, struct inode *inode,
7760 				    const char *key, const void *buf,
7761 				    size_t buflen, int flags)
7762 {
7763 	return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_DACL);
7764 }
7765 
7766 static int nfs4_xattr_get_nfs4_dacl(const struct xattr_handler *handler,
7767 				    struct dentry *unused, struct inode *inode,
7768 				    const char *key, void *buf, size_t buflen)
7769 {
7770 	return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_DACL);
7771 }
7772 
7773 static bool nfs4_xattr_list_nfs4_dacl(struct dentry *dentry)
7774 {
7775 	return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_DACL);
7776 }
7777 
7778 #define XATTR_NAME_NFSV4_SACL "system.nfs4_sacl"
7779 
7780 static int nfs4_xattr_set_nfs4_sacl(const struct xattr_handler *handler,
7781 				    struct mnt_idmap *idmap,
7782 				    struct dentry *unused, struct inode *inode,
7783 				    const char *key, const void *buf,
7784 				    size_t buflen, int flags)
7785 {
7786 	return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_SACL);
7787 }
7788 
7789 static int nfs4_xattr_get_nfs4_sacl(const struct xattr_handler *handler,
7790 				    struct dentry *unused, struct inode *inode,
7791 				    const char *key, void *buf, size_t buflen)
7792 {
7793 	return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_SACL);
7794 }
7795 
7796 static bool nfs4_xattr_list_nfs4_sacl(struct dentry *dentry)
7797 {
7798 	return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_SACL);
7799 }
7800 
7801 #endif
7802 
7803 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
7804 
7805 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
7806 				     struct mnt_idmap *idmap,
7807 				     struct dentry *unused, struct inode *inode,
7808 				     const char *key, const void *buf,
7809 				     size_t buflen, int flags)
7810 {
7811 	if (security_ismaclabel(key))
7812 		return nfs4_set_security_label(inode, buf, buflen);
7813 
7814 	return -EOPNOTSUPP;
7815 }
7816 
7817 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler,
7818 				     struct dentry *unused, struct inode *inode,
7819 				     const char *key, void *buf, size_t buflen)
7820 {
7821 	if (security_ismaclabel(key))
7822 		return nfs4_get_security_label(inode, buf, buflen);
7823 	return -EOPNOTSUPP;
7824 }
7825 
7826 static ssize_t
7827 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
7828 {
7829 	int len = 0;
7830 
7831 	if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) {
7832 		len = security_inode_listsecurity(inode, list, list_len);
7833 		if (len >= 0 && list_len && len > list_len)
7834 			return -ERANGE;
7835 	}
7836 	return len;
7837 }
7838 
7839 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
7840 	.prefix = XATTR_SECURITY_PREFIX,
7841 	.get	= nfs4_xattr_get_nfs4_label,
7842 	.set	= nfs4_xattr_set_nfs4_label,
7843 };
7844 
7845 #else
7846 
7847 static ssize_t
7848 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
7849 {
7850 	return 0;
7851 }
7852 
7853 #endif
7854 
7855 #ifdef CONFIG_NFS_V4_2
7856 static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler,
7857 				    struct mnt_idmap *idmap,
7858 				    struct dentry *unused, struct inode *inode,
7859 				    const char *key, const void *buf,
7860 				    size_t buflen, int flags)
7861 {
7862 	u32 mask;
7863 	int ret;
7864 
7865 	if (!nfs_server_capable(inode, NFS_CAP_XATTR))
7866 		return -EOPNOTSUPP;
7867 
7868 	/*
7869 	 * There is no mapping from the MAY_* flags to the NFS_ACCESS_XA*
7870 	 * flags right now. Handling of xattr operations use the normal
7871 	 * file read/write permissions.
7872 	 *
7873 	 * Just in case the server has other ideas (which RFC 8276 allows),
7874 	 * do a cached access check for the XA* flags to possibly avoid
7875 	 * doing an RPC and getting EACCES back.
7876 	 */
7877 	if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
7878 		if (!(mask & NFS_ACCESS_XAWRITE))
7879 			return -EACCES;
7880 	}
7881 
7882 	if (buf == NULL) {
7883 		ret = nfs42_proc_removexattr(inode, key);
7884 		if (!ret)
7885 			nfs4_xattr_cache_remove(inode, key);
7886 	} else {
7887 		ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags);
7888 		if (!ret)
7889 			nfs4_xattr_cache_add(inode, key, buf, NULL, buflen);
7890 	}
7891 
7892 	return ret;
7893 }
7894 
7895 static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler,
7896 				    struct dentry *unused, struct inode *inode,
7897 				    const char *key, void *buf, size_t buflen)
7898 {
7899 	u32 mask;
7900 	ssize_t ret;
7901 
7902 	if (!nfs_server_capable(inode, NFS_CAP_XATTR))
7903 		return -EOPNOTSUPP;
7904 
7905 	if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
7906 		if (!(mask & NFS_ACCESS_XAREAD))
7907 			return -EACCES;
7908 	}
7909 
7910 	ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
7911 	if (ret)
7912 		return ret;
7913 
7914 	ret = nfs4_xattr_cache_get(inode, key, buf, buflen);
7915 	if (ret >= 0 || (ret < 0 && ret != -ENOENT))
7916 		return ret;
7917 
7918 	ret = nfs42_proc_getxattr(inode, key, buf, buflen);
7919 
7920 	return ret;
7921 }
7922 
7923 static ssize_t
7924 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
7925 {
7926 	u64 cookie;
7927 	bool eof;
7928 	ssize_t ret, size;
7929 	char *buf;
7930 	size_t buflen;
7931 	u32 mask;
7932 
7933 	if (!nfs_server_capable(inode, NFS_CAP_XATTR))
7934 		return 0;
7935 
7936 	if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
7937 		if (!(mask & NFS_ACCESS_XALIST))
7938 			return 0;
7939 	}
7940 
7941 	ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
7942 	if (ret)
7943 		return ret;
7944 
7945 	ret = nfs4_xattr_cache_list(inode, list, list_len);
7946 	if (ret >= 0 || (ret < 0 && ret != -ENOENT))
7947 		return ret;
7948 
7949 	cookie = 0;
7950 	eof = false;
7951 	buflen = list_len ? list_len : XATTR_LIST_MAX;
7952 	buf = list_len ? list : NULL;
7953 	size = 0;
7954 
7955 	while (!eof) {
7956 		ret = nfs42_proc_listxattrs(inode, buf, buflen,
7957 		    &cookie, &eof);
7958 		if (ret < 0)
7959 			return ret;
7960 
7961 		if (list_len) {
7962 			buf += ret;
7963 			buflen -= ret;
7964 		}
7965 		size += ret;
7966 	}
7967 
7968 	if (list_len)
7969 		nfs4_xattr_cache_set_list(inode, list, size);
7970 
7971 	return size;
7972 }
7973 
7974 #else
7975 
7976 static ssize_t
7977 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
7978 {
7979 	return 0;
7980 }
7981 #endif /* CONFIG_NFS_V4_2 */
7982 
7983 /*
7984  * nfs_fhget will use either the mounted_on_fileid or the fileid
7985  */
7986 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
7987 {
7988 	if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
7989 	       (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
7990 	      (fattr->valid & NFS_ATTR_FATTR_FSID) &&
7991 	      (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
7992 		return;
7993 
7994 	fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
7995 		NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
7996 	fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
7997 	fattr->nlink = 2;
7998 }
7999 
8000 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
8001 				   const struct qstr *name,
8002 				   struct nfs4_fs_locations *fs_locations,
8003 				   struct page *page)
8004 {
8005 	struct nfs_server *server = NFS_SERVER(dir);
8006 	u32 bitmask[3];
8007 	struct nfs4_fs_locations_arg args = {
8008 		.dir_fh = NFS_FH(dir),
8009 		.name = name,
8010 		.page = page,
8011 		.bitmask = bitmask,
8012 	};
8013 	struct nfs4_fs_locations_res res = {
8014 		.fs_locations = fs_locations,
8015 	};
8016 	struct rpc_message msg = {
8017 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
8018 		.rpc_argp = &args,
8019 		.rpc_resp = &res,
8020 	};
8021 	int status;
8022 
8023 	dprintk("%s: start\n", __func__);
8024 
8025 	bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS;
8026 	bitmask[1] = nfs4_fattr_bitmap[1];
8027 
8028 	/* Ask for the fileid of the absent filesystem if mounted_on_fileid
8029 	 * is not supported */
8030 	if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
8031 		bitmask[0] &= ~FATTR4_WORD0_FILEID;
8032 	else
8033 		bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
8034 
8035 	nfs_fattr_init(fs_locations->fattr);
8036 	fs_locations->server = server;
8037 	fs_locations->nlocations = 0;
8038 	status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
8039 	dprintk("%s: returned status = %d\n", __func__, status);
8040 	return status;
8041 }
8042 
8043 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
8044 			   const struct qstr *name,
8045 			   struct nfs4_fs_locations *fs_locations,
8046 			   struct page *page)
8047 {
8048 	struct nfs4_exception exception = {
8049 		.interruptible = true,
8050 	};
8051 	int err;
8052 	do {
8053 		err = _nfs4_proc_fs_locations(client, dir, name,
8054 				fs_locations, page);
8055 		trace_nfs4_get_fs_locations(dir, name, err);
8056 		err = nfs4_handle_exception(NFS_SERVER(dir), err,
8057 				&exception);
8058 	} while (exception.retry);
8059 	return err;
8060 }
8061 
8062 /*
8063  * This operation also signals the server that this client is
8064  * performing migration recovery.  The server can stop returning
8065  * NFS4ERR_LEASE_MOVED to this client.  A RENEW operation is
8066  * appended to this compound to identify the client ID which is
8067  * performing recovery.
8068  */
8069 static int _nfs40_proc_get_locations(struct nfs_server *server,
8070 				     struct nfs_fh *fhandle,
8071 				     struct nfs4_fs_locations *locations,
8072 				     struct page *page, const struct cred *cred)
8073 {
8074 	struct rpc_clnt *clnt = server->client;
8075 	u32 bitmask[2] = {
8076 		[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
8077 	};
8078 	struct nfs4_fs_locations_arg args = {
8079 		.clientid	= server->nfs_client->cl_clientid,
8080 		.fh		= fhandle,
8081 		.page		= page,
8082 		.bitmask	= bitmask,
8083 		.migration	= 1,		/* skip LOOKUP */
8084 		.renew		= 1,		/* append RENEW */
8085 	};
8086 	struct nfs4_fs_locations_res res = {
8087 		.fs_locations	= locations,
8088 		.migration	= 1,
8089 		.renew		= 1,
8090 	};
8091 	struct rpc_message msg = {
8092 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
8093 		.rpc_argp	= &args,
8094 		.rpc_resp	= &res,
8095 		.rpc_cred	= cred,
8096 	};
8097 	unsigned long now = jiffies;
8098 	int status;
8099 
8100 	nfs_fattr_init(locations->fattr);
8101 	locations->server = server;
8102 	locations->nlocations = 0;
8103 
8104 	nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8105 	status = nfs4_call_sync_sequence(clnt, server, &msg,
8106 					&args.seq_args, &res.seq_res);
8107 	if (status)
8108 		return status;
8109 
8110 	renew_lease(server, now);
8111 	return 0;
8112 }
8113 
8114 #ifdef CONFIG_NFS_V4_1
8115 
8116 /*
8117  * This operation also signals the server that this client is
8118  * performing migration recovery.  The server can stop asserting
8119  * SEQ4_STATUS_LEASE_MOVED for this client.  The client ID
8120  * performing this operation is identified in the SEQUENCE
8121  * operation in this compound.
8122  *
8123  * When the client supports GETATTR(fs_locations_info), it can
8124  * be plumbed in here.
8125  */
8126 static int _nfs41_proc_get_locations(struct nfs_server *server,
8127 				     struct nfs_fh *fhandle,
8128 				     struct nfs4_fs_locations *locations,
8129 				     struct page *page, const struct cred *cred)
8130 {
8131 	struct rpc_clnt *clnt = server->client;
8132 	u32 bitmask[2] = {
8133 		[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
8134 	};
8135 	struct nfs4_fs_locations_arg args = {
8136 		.fh		= fhandle,
8137 		.page		= page,
8138 		.bitmask	= bitmask,
8139 		.migration	= 1,		/* skip LOOKUP */
8140 	};
8141 	struct nfs4_fs_locations_res res = {
8142 		.fs_locations	= locations,
8143 		.migration	= 1,
8144 	};
8145 	struct rpc_message msg = {
8146 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
8147 		.rpc_argp	= &args,
8148 		.rpc_resp	= &res,
8149 		.rpc_cred	= cred,
8150 	};
8151 	struct nfs4_call_sync_data data = {
8152 		.seq_server = server,
8153 		.seq_args = &args.seq_args,
8154 		.seq_res = &res.seq_res,
8155 	};
8156 	struct rpc_task_setup task_setup_data = {
8157 		.rpc_client = clnt,
8158 		.rpc_message = &msg,
8159 		.callback_ops = server->nfs_client->cl_mvops->call_sync_ops,
8160 		.callback_data = &data,
8161 		.flags = RPC_TASK_NO_ROUND_ROBIN,
8162 	};
8163 	int status;
8164 
8165 	nfs_fattr_init(locations->fattr);
8166 	locations->server = server;
8167 	locations->nlocations = 0;
8168 
8169 	nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8170 	status = nfs4_call_sync_custom(&task_setup_data);
8171 	if (status == NFS4_OK &&
8172 	    res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
8173 		status = -NFS4ERR_LEASE_MOVED;
8174 	return status;
8175 }
8176 
8177 #endif	/* CONFIG_NFS_V4_1 */
8178 
8179 /**
8180  * nfs4_proc_get_locations - discover locations for a migrated FSID
8181  * @server: pointer to nfs_server to process
8182  * @fhandle: pointer to the kernel NFS client file handle
8183  * @locations: result of query
8184  * @page: buffer
8185  * @cred: credential to use for this operation
8186  *
8187  * Returns NFS4_OK on success, a negative NFS4ERR status code if the
8188  * operation failed, or a negative errno if a local error occurred.
8189  *
8190  * On success, "locations" is filled in, but if the server has
8191  * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
8192  * asserted.
8193  *
8194  * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
8195  * from this client that require migration recovery.
8196  */
8197 int nfs4_proc_get_locations(struct nfs_server *server,
8198 			    struct nfs_fh *fhandle,
8199 			    struct nfs4_fs_locations *locations,
8200 			    struct page *page, const struct cred *cred)
8201 {
8202 	struct nfs_client *clp = server->nfs_client;
8203 	const struct nfs4_mig_recovery_ops *ops =
8204 					clp->cl_mvops->mig_recovery_ops;
8205 	struct nfs4_exception exception = {
8206 		.interruptible = true,
8207 	};
8208 	int status;
8209 
8210 	dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
8211 		(unsigned long long)server->fsid.major,
8212 		(unsigned long long)server->fsid.minor,
8213 		clp->cl_hostname);
8214 	nfs_display_fhandle(fhandle, __func__);
8215 
8216 	do {
8217 		status = ops->get_locations(server, fhandle, locations, page,
8218 					    cred);
8219 		if (status != -NFS4ERR_DELAY)
8220 			break;
8221 		nfs4_handle_exception(server, status, &exception);
8222 	} while (exception.retry);
8223 	return status;
8224 }
8225 
8226 /*
8227  * This operation also signals the server that this client is
8228  * performing "lease moved" recovery.  The server can stop
8229  * returning NFS4ERR_LEASE_MOVED to this client.  A RENEW operation
8230  * is appended to this compound to identify the client ID which is
8231  * performing recovery.
8232  */
8233 static int _nfs40_proc_fsid_present(struct inode *inode, const struct cred *cred)
8234 {
8235 	struct nfs_server *server = NFS_SERVER(inode);
8236 	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
8237 	struct rpc_clnt *clnt = server->client;
8238 	struct nfs4_fsid_present_arg args = {
8239 		.fh		= NFS_FH(inode),
8240 		.clientid	= clp->cl_clientid,
8241 		.renew		= 1,		/* append RENEW */
8242 	};
8243 	struct nfs4_fsid_present_res res = {
8244 		.renew		= 1,
8245 	};
8246 	struct rpc_message msg = {
8247 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
8248 		.rpc_argp	= &args,
8249 		.rpc_resp	= &res,
8250 		.rpc_cred	= cred,
8251 	};
8252 	unsigned long now = jiffies;
8253 	int status;
8254 
8255 	res.fh = nfs_alloc_fhandle();
8256 	if (res.fh == NULL)
8257 		return -ENOMEM;
8258 
8259 	nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8260 	status = nfs4_call_sync_sequence(clnt, server, &msg,
8261 						&args.seq_args, &res.seq_res);
8262 	nfs_free_fhandle(res.fh);
8263 	if (status)
8264 		return status;
8265 
8266 	do_renew_lease(clp, now);
8267 	return 0;
8268 }
8269 
8270 #ifdef CONFIG_NFS_V4_1
8271 
8272 /*
8273  * This operation also signals the server that this client is
8274  * performing "lease moved" recovery.  The server can stop asserting
8275  * SEQ4_STATUS_LEASE_MOVED for this client.  The client ID performing
8276  * this operation is identified in the SEQUENCE operation in this
8277  * compound.
8278  */
8279 static int _nfs41_proc_fsid_present(struct inode *inode, const struct cred *cred)
8280 {
8281 	struct nfs_server *server = NFS_SERVER(inode);
8282 	struct rpc_clnt *clnt = server->client;
8283 	struct nfs4_fsid_present_arg args = {
8284 		.fh		= NFS_FH(inode),
8285 	};
8286 	struct nfs4_fsid_present_res res = {
8287 	};
8288 	struct rpc_message msg = {
8289 		.rpc_proc	= &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
8290 		.rpc_argp	= &args,
8291 		.rpc_resp	= &res,
8292 		.rpc_cred	= cred,
8293 	};
8294 	int status;
8295 
8296 	res.fh = nfs_alloc_fhandle();
8297 	if (res.fh == NULL)
8298 		return -ENOMEM;
8299 
8300 	nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8301 	status = nfs4_call_sync_sequence(clnt, server, &msg,
8302 						&args.seq_args, &res.seq_res);
8303 	nfs_free_fhandle(res.fh);
8304 	if (status == NFS4_OK &&
8305 	    res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
8306 		status = -NFS4ERR_LEASE_MOVED;
8307 	return status;
8308 }
8309 
8310 #endif	/* CONFIG_NFS_V4_1 */
8311 
8312 /**
8313  * nfs4_proc_fsid_present - Is this FSID present or absent on server?
8314  * @inode: inode on FSID to check
8315  * @cred: credential to use for this operation
8316  *
8317  * Server indicates whether the FSID is present, moved, or not
8318  * recognized.  This operation is necessary to clear a LEASE_MOVED
8319  * condition for this client ID.
8320  *
8321  * Returns NFS4_OK if the FSID is present on this server,
8322  * -NFS4ERR_MOVED if the FSID is no longer present, a negative
8323  *  NFS4ERR code if some error occurred on the server, or a
8324  *  negative errno if a local failure occurred.
8325  */
8326 int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred)
8327 {
8328 	struct nfs_server *server = NFS_SERVER(inode);
8329 	struct nfs_client *clp = server->nfs_client;
8330 	const struct nfs4_mig_recovery_ops *ops =
8331 					clp->cl_mvops->mig_recovery_ops;
8332 	struct nfs4_exception exception = {
8333 		.interruptible = true,
8334 	};
8335 	int status;
8336 
8337 	dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
8338 		(unsigned long long)server->fsid.major,
8339 		(unsigned long long)server->fsid.minor,
8340 		clp->cl_hostname);
8341 	nfs_display_fhandle(NFS_FH(inode), __func__);
8342 
8343 	do {
8344 		status = ops->fsid_present(inode, cred);
8345 		if (status != -NFS4ERR_DELAY)
8346 			break;
8347 		nfs4_handle_exception(server, status, &exception);
8348 	} while (exception.retry);
8349 	return status;
8350 }
8351 
8352 /*
8353  * If 'use_integrity' is true and the state managment nfs_client
8354  * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
8355  * and the machine credential as per RFC3530bis and RFC5661 Security
8356  * Considerations sections. Otherwise, just use the user cred with the
8357  * filesystem's rpc_client.
8358  */
8359 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
8360 {
8361 	int status;
8362 	struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
8363 	struct nfs_client *clp = NFS_SERVER(dir)->nfs_client;
8364 	struct nfs4_secinfo_arg args = {
8365 		.dir_fh = NFS_FH(dir),
8366 		.name   = name,
8367 	};
8368 	struct nfs4_secinfo_res res = {
8369 		.flavors     = flavors,
8370 	};
8371 	struct rpc_message msg = {
8372 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
8373 		.rpc_argp = &args,
8374 		.rpc_resp = &res,
8375 	};
8376 	struct nfs4_call_sync_data data = {
8377 		.seq_server = NFS_SERVER(dir),
8378 		.seq_args = &args.seq_args,
8379 		.seq_res = &res.seq_res,
8380 	};
8381 	struct rpc_task_setup task_setup = {
8382 		.rpc_client = clnt,
8383 		.rpc_message = &msg,
8384 		.callback_ops = clp->cl_mvops->call_sync_ops,
8385 		.callback_data = &data,
8386 		.flags = RPC_TASK_NO_ROUND_ROBIN,
8387 	};
8388 	const struct cred *cred = NULL;
8389 
8390 	if (use_integrity) {
8391 		clnt = clp->cl_rpcclient;
8392 		task_setup.rpc_client = clnt;
8393 
8394 		cred = nfs4_get_clid_cred(clp);
8395 		msg.rpc_cred = cred;
8396 	}
8397 
8398 	dprintk("NFS call  secinfo %s\n", name->name);
8399 
8400 	nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
8401 	nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
8402 	status = nfs4_call_sync_custom(&task_setup);
8403 
8404 	dprintk("NFS reply  secinfo: %d\n", status);
8405 
8406 	put_cred(cred);
8407 	return status;
8408 }
8409 
8410 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
8411 		      struct nfs4_secinfo_flavors *flavors)
8412 {
8413 	struct nfs4_exception exception = {
8414 		.interruptible = true,
8415 	};
8416 	int err;
8417 	do {
8418 		err = -NFS4ERR_WRONGSEC;
8419 
8420 		/* try to use integrity protection with machine cred */
8421 		if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
8422 			err = _nfs4_proc_secinfo(dir, name, flavors, true);
8423 
8424 		/*
8425 		 * if unable to use integrity protection, or SECINFO with
8426 		 * integrity protection returns NFS4ERR_WRONGSEC (which is
8427 		 * disallowed by spec, but exists in deployed servers) use
8428 		 * the current filesystem's rpc_client and the user cred.
8429 		 */
8430 		if (err == -NFS4ERR_WRONGSEC)
8431 			err = _nfs4_proc_secinfo(dir, name, flavors, false);
8432 
8433 		trace_nfs4_secinfo(dir, name, err);
8434 		err = nfs4_handle_exception(NFS_SERVER(dir), err,
8435 				&exception);
8436 	} while (exception.retry);
8437 	return err;
8438 }
8439 
8440 #ifdef CONFIG_NFS_V4_1
8441 /*
8442  * Check the exchange flags returned by the server for invalid flags, having
8443  * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
8444  * DS flags set.
8445  */
8446 static int nfs4_check_cl_exchange_flags(u32 flags, u32 version)
8447 {
8448 	if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R))
8449 		goto out_inval;
8450 	else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R))
8451 		goto out_inval;
8452 	if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
8453 	    (flags & EXCHGID4_FLAG_USE_NON_PNFS))
8454 		goto out_inval;
8455 	if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
8456 		goto out_inval;
8457 	return NFS_OK;
8458 out_inval:
8459 	return -NFS4ERR_INVAL;
8460 }
8461 
8462 static bool
8463 nfs41_same_server_scope(struct nfs41_server_scope *a,
8464 			struct nfs41_server_scope *b)
8465 {
8466 	if (a->server_scope_sz != b->server_scope_sz)
8467 		return false;
8468 	return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0;
8469 }
8470 
8471 static void
8472 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
8473 {
8474 	struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp;
8475 	struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp;
8476 	struct nfs_client *clp = args->client;
8477 
8478 	switch (task->tk_status) {
8479 	case -NFS4ERR_BADSESSION:
8480 	case -NFS4ERR_DEADSESSION:
8481 		nfs4_schedule_session_recovery(clp->cl_session,
8482 				task->tk_status);
8483 		return;
8484 	}
8485 	if (args->dir == NFS4_CDFC4_FORE_OR_BOTH &&
8486 			res->dir != NFS4_CDFS4_BOTH) {
8487 		rpc_task_close_connection(task);
8488 		if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES)
8489 			rpc_restart_call(task);
8490 	}
8491 }
8492 
8493 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
8494 	.rpc_call_done =  nfs4_bind_one_conn_to_session_done,
8495 };
8496 
8497 /*
8498  * nfs4_proc_bind_one_conn_to_session()
8499  *
8500  * The 4.1 client currently uses the same TCP connection for the
8501  * fore and backchannel.
8502  */
8503 static
8504 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt,
8505 		struct rpc_xprt *xprt,
8506 		struct nfs_client *clp,
8507 		const struct cred *cred)
8508 {
8509 	int status;
8510 	struct nfs41_bind_conn_to_session_args args = {
8511 		.client = clp,
8512 		.dir = NFS4_CDFC4_FORE_OR_BOTH,
8513 		.retries = 0,
8514 	};
8515 	struct nfs41_bind_conn_to_session_res res;
8516 	struct rpc_message msg = {
8517 		.rpc_proc =
8518 			&nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
8519 		.rpc_argp = &args,
8520 		.rpc_resp = &res,
8521 		.rpc_cred = cred,
8522 	};
8523 	struct rpc_task_setup task_setup_data = {
8524 		.rpc_client = clnt,
8525 		.rpc_xprt = xprt,
8526 		.callback_ops = &nfs4_bind_one_conn_to_session_ops,
8527 		.rpc_message = &msg,
8528 		.flags = RPC_TASK_TIMEOUT,
8529 	};
8530 	struct rpc_task *task;
8531 
8532 	nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
8533 	if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
8534 		args.dir = NFS4_CDFC4_FORE;
8535 
8536 	/* Do not set the backchannel flag unless this is clnt->cl_xprt */
8537 	if (xprt != rcu_access_pointer(clnt->cl_xprt))
8538 		args.dir = NFS4_CDFC4_FORE;
8539 
8540 	task = rpc_run_task(&task_setup_data);
8541 	if (!IS_ERR(task)) {
8542 		status = task->tk_status;
8543 		rpc_put_task(task);
8544 	} else
8545 		status = PTR_ERR(task);
8546 	trace_nfs4_bind_conn_to_session(clp, status);
8547 	if (status == 0) {
8548 		if (memcmp(res.sessionid.data,
8549 		    clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
8550 			dprintk("NFS: %s: Session ID mismatch\n", __func__);
8551 			return -EIO;
8552 		}
8553 		if ((res.dir & args.dir) != res.dir || res.dir == 0) {
8554 			dprintk("NFS: %s: Unexpected direction from server\n",
8555 				__func__);
8556 			return -EIO;
8557 		}
8558 		if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
8559 			dprintk("NFS: %s: Server returned RDMA mode = true\n",
8560 				__func__);
8561 			return -EIO;
8562 		}
8563 	}
8564 
8565 	return status;
8566 }
8567 
8568 struct rpc_bind_conn_calldata {
8569 	struct nfs_client *clp;
8570 	const struct cred *cred;
8571 };
8572 
8573 static int
8574 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt,
8575 		struct rpc_xprt *xprt,
8576 		void *calldata)
8577 {
8578 	struct rpc_bind_conn_calldata *p = calldata;
8579 
8580 	return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred);
8581 }
8582 
8583 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, const struct cred *cred)
8584 {
8585 	struct rpc_bind_conn_calldata data = {
8586 		.clp = clp,
8587 		.cred = cred,
8588 	};
8589 	return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient,
8590 			nfs4_proc_bind_conn_to_session_callback, &data);
8591 }
8592 
8593 /*
8594  * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
8595  * and operations we'd like to see to enable certain features in the allow map
8596  */
8597 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
8598 	.how = SP4_MACH_CRED,
8599 	.enforce.u.words = {
8600 		[1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
8601 		      1 << (OP_EXCHANGE_ID - 32) |
8602 		      1 << (OP_CREATE_SESSION - 32) |
8603 		      1 << (OP_DESTROY_SESSION - 32) |
8604 		      1 << (OP_DESTROY_CLIENTID - 32)
8605 	},
8606 	.allow.u.words = {
8607 		[0] = 1 << (OP_CLOSE) |
8608 		      1 << (OP_OPEN_DOWNGRADE) |
8609 		      1 << (OP_LOCKU) |
8610 		      1 << (OP_DELEGRETURN) |
8611 		      1 << (OP_COMMIT),
8612 		[1] = 1 << (OP_SECINFO - 32) |
8613 		      1 << (OP_SECINFO_NO_NAME - 32) |
8614 		      1 << (OP_LAYOUTRETURN - 32) |
8615 		      1 << (OP_TEST_STATEID - 32) |
8616 		      1 << (OP_FREE_STATEID - 32) |
8617 		      1 << (OP_WRITE - 32)
8618 	}
8619 };
8620 
8621 /*
8622  * Select the state protection mode for client `clp' given the server results
8623  * from exchange_id in `sp'.
8624  *
8625  * Returns 0 on success, negative errno otherwise.
8626  */
8627 static int nfs4_sp4_select_mode(struct nfs_client *clp,
8628 				 struct nfs41_state_protection *sp)
8629 {
8630 	static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
8631 		[1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
8632 		      1 << (OP_EXCHANGE_ID - 32) |
8633 		      1 << (OP_CREATE_SESSION - 32) |
8634 		      1 << (OP_DESTROY_SESSION - 32) |
8635 		      1 << (OP_DESTROY_CLIENTID - 32)
8636 	};
8637 	unsigned long flags = 0;
8638 	unsigned int i;
8639 	int ret = 0;
8640 
8641 	if (sp->how == SP4_MACH_CRED) {
8642 		/* Print state protect result */
8643 		dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
8644 		for (i = 0; i <= LAST_NFS4_OP; i++) {
8645 			if (test_bit(i, sp->enforce.u.longs))
8646 				dfprintk(MOUNT, "  enforce op %d\n", i);
8647 			if (test_bit(i, sp->allow.u.longs))
8648 				dfprintk(MOUNT, "  allow op %d\n", i);
8649 		}
8650 
8651 		/* make sure nothing is on enforce list that isn't supported */
8652 		for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
8653 			if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
8654 				dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
8655 				ret = -EINVAL;
8656 				goto out;
8657 			}
8658 		}
8659 
8660 		/*
8661 		 * Minimal mode - state operations are allowed to use machine
8662 		 * credential.  Note this already happens by default, so the
8663 		 * client doesn't have to do anything more than the negotiation.
8664 		 *
8665 		 * NOTE: we don't care if EXCHANGE_ID is in the list -
8666 		 *       we're already using the machine cred for exchange_id
8667 		 *       and will never use a different cred.
8668 		 */
8669 		if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
8670 		    test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
8671 		    test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
8672 		    test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
8673 			dfprintk(MOUNT, "sp4_mach_cred:\n");
8674 			dfprintk(MOUNT, "  minimal mode enabled\n");
8675 			__set_bit(NFS_SP4_MACH_CRED_MINIMAL, &flags);
8676 		} else {
8677 			dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
8678 			ret = -EINVAL;
8679 			goto out;
8680 		}
8681 
8682 		if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
8683 		    test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) &&
8684 		    test_bit(OP_DELEGRETURN, sp->allow.u.longs) &&
8685 		    test_bit(OP_LOCKU, sp->allow.u.longs)) {
8686 			dfprintk(MOUNT, "  cleanup mode enabled\n");
8687 			__set_bit(NFS_SP4_MACH_CRED_CLEANUP, &flags);
8688 		}
8689 
8690 		if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) {
8691 			dfprintk(MOUNT, "  pnfs cleanup mode enabled\n");
8692 			__set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, &flags);
8693 		}
8694 
8695 		if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
8696 		    test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
8697 			dfprintk(MOUNT, "  secinfo mode enabled\n");
8698 			__set_bit(NFS_SP4_MACH_CRED_SECINFO, &flags);
8699 		}
8700 
8701 		if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
8702 		    test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
8703 			dfprintk(MOUNT, "  stateid mode enabled\n");
8704 			__set_bit(NFS_SP4_MACH_CRED_STATEID, &flags);
8705 		}
8706 
8707 		if (test_bit(OP_WRITE, sp->allow.u.longs)) {
8708 			dfprintk(MOUNT, "  write mode enabled\n");
8709 			__set_bit(NFS_SP4_MACH_CRED_WRITE, &flags);
8710 		}
8711 
8712 		if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
8713 			dfprintk(MOUNT, "  commit mode enabled\n");
8714 			__set_bit(NFS_SP4_MACH_CRED_COMMIT, &flags);
8715 		}
8716 	}
8717 out:
8718 	clp->cl_sp4_flags = flags;
8719 	return ret;
8720 }
8721 
8722 struct nfs41_exchange_id_data {
8723 	struct nfs41_exchange_id_res res;
8724 	struct nfs41_exchange_id_args args;
8725 };
8726 
8727 static void nfs4_exchange_id_release(void *data)
8728 {
8729 	struct nfs41_exchange_id_data *cdata =
8730 					(struct nfs41_exchange_id_data *)data;
8731 
8732 	nfs_put_client(cdata->args.client);
8733 	kfree(cdata->res.impl_id);
8734 	kfree(cdata->res.server_scope);
8735 	kfree(cdata->res.server_owner);
8736 	kfree(cdata);
8737 }
8738 
8739 static const struct rpc_call_ops nfs4_exchange_id_call_ops = {
8740 	.rpc_release = nfs4_exchange_id_release,
8741 };
8742 
8743 /*
8744  * _nfs4_proc_exchange_id()
8745  *
8746  * Wrapper for EXCHANGE_ID operation.
8747  */
8748 static struct rpc_task *
8749 nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred,
8750 			u32 sp4_how, struct rpc_xprt *xprt)
8751 {
8752 	struct rpc_message msg = {
8753 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
8754 		.rpc_cred = cred,
8755 	};
8756 	struct rpc_task_setup task_setup_data = {
8757 		.rpc_client = clp->cl_rpcclient,
8758 		.callback_ops = &nfs4_exchange_id_call_ops,
8759 		.rpc_message = &msg,
8760 		.flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
8761 	};
8762 	struct nfs41_exchange_id_data *calldata;
8763 	int status;
8764 
8765 	if (!refcount_inc_not_zero(&clp->cl_count))
8766 		return ERR_PTR(-EIO);
8767 
8768 	status = -ENOMEM;
8769 	calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
8770 	if (!calldata)
8771 		goto out;
8772 
8773 	nfs4_init_boot_verifier(clp, &calldata->args.verifier);
8774 
8775 	status = nfs4_init_uniform_client_string(clp);
8776 	if (status)
8777 		goto out_calldata;
8778 
8779 	calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
8780 						GFP_NOFS);
8781 	status = -ENOMEM;
8782 	if (unlikely(calldata->res.server_owner == NULL))
8783 		goto out_calldata;
8784 
8785 	calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
8786 					GFP_NOFS);
8787 	if (unlikely(calldata->res.server_scope == NULL))
8788 		goto out_server_owner;
8789 
8790 	calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
8791 	if (unlikely(calldata->res.impl_id == NULL))
8792 		goto out_server_scope;
8793 
8794 	switch (sp4_how) {
8795 	case SP4_NONE:
8796 		calldata->args.state_protect.how = SP4_NONE;
8797 		break;
8798 
8799 	case SP4_MACH_CRED:
8800 		calldata->args.state_protect = nfs4_sp4_mach_cred_request;
8801 		break;
8802 
8803 	default:
8804 		/* unsupported! */
8805 		WARN_ON_ONCE(1);
8806 		status = -EINVAL;
8807 		goto out_impl_id;
8808 	}
8809 	if (xprt) {
8810 		task_setup_data.rpc_xprt = xprt;
8811 		task_setup_data.flags |= RPC_TASK_SOFTCONN;
8812 		memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
8813 				sizeof(calldata->args.verifier.data));
8814 	}
8815 	calldata->args.client = clp;
8816 	calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
8817 	EXCHGID4_FLAG_BIND_PRINC_STATEID;
8818 #ifdef CONFIG_NFS_V4_1_MIGRATION
8819 	calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR;
8820 #endif
8821 	if (test_bit(NFS_CS_DS, &clp->cl_flags))
8822 		calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS;
8823 	msg.rpc_argp = &calldata->args;
8824 	msg.rpc_resp = &calldata->res;
8825 	task_setup_data.callback_data = calldata;
8826 
8827 	return rpc_run_task(&task_setup_data);
8828 
8829 out_impl_id:
8830 	kfree(calldata->res.impl_id);
8831 out_server_scope:
8832 	kfree(calldata->res.server_scope);
8833 out_server_owner:
8834 	kfree(calldata->res.server_owner);
8835 out_calldata:
8836 	kfree(calldata);
8837 out:
8838 	nfs_put_client(clp);
8839 	return ERR_PTR(status);
8840 }
8841 
8842 /*
8843  * _nfs4_proc_exchange_id()
8844  *
8845  * Wrapper for EXCHANGE_ID operation.
8846  */
8847 static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred,
8848 			u32 sp4_how)
8849 {
8850 	struct rpc_task *task;
8851 	struct nfs41_exchange_id_args *argp;
8852 	struct nfs41_exchange_id_res *resp;
8853 	unsigned long now = jiffies;
8854 	int status;
8855 
8856 	task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL);
8857 	if (IS_ERR(task))
8858 		return PTR_ERR(task);
8859 
8860 	argp = task->tk_msg.rpc_argp;
8861 	resp = task->tk_msg.rpc_resp;
8862 	status = task->tk_status;
8863 	if (status  != 0)
8864 		goto out;
8865 
8866 	status = nfs4_check_cl_exchange_flags(resp->flags,
8867 			clp->cl_mvops->minor_version);
8868 	if (status  != 0)
8869 		goto out;
8870 
8871 	status = nfs4_sp4_select_mode(clp, &resp->state_protect);
8872 	if (status != 0)
8873 		goto out;
8874 
8875 	do_renew_lease(clp, now);
8876 
8877 	clp->cl_clientid = resp->clientid;
8878 	clp->cl_exchange_flags = resp->flags;
8879 	clp->cl_seqid = resp->seqid;
8880 	/* Client ID is not confirmed */
8881 	if (!(resp->flags & EXCHGID4_FLAG_CONFIRMED_R))
8882 		clear_bit(NFS4_SESSION_ESTABLISHED,
8883 			  &clp->cl_session->session_state);
8884 
8885 	if (clp->cl_serverscope != NULL &&
8886 	    !nfs41_same_server_scope(clp->cl_serverscope,
8887 				resp->server_scope)) {
8888 		dprintk("%s: server_scope mismatch detected\n",
8889 			__func__);
8890 		set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
8891 	}
8892 
8893 	swap(clp->cl_serverowner, resp->server_owner);
8894 	swap(clp->cl_serverscope, resp->server_scope);
8895 	swap(clp->cl_implid, resp->impl_id);
8896 
8897 	/* Save the EXCHANGE_ID verifier session trunk tests */
8898 	memcpy(clp->cl_confirm.data, argp->verifier.data,
8899 	       sizeof(clp->cl_confirm.data));
8900 out:
8901 	trace_nfs4_exchange_id(clp, status);
8902 	rpc_put_task(task);
8903 	return status;
8904 }
8905 
8906 /*
8907  * nfs4_proc_exchange_id()
8908  *
8909  * Returns zero, a negative errno, or a negative NFS4ERR status code.
8910  *
8911  * Since the clientid has expired, all compounds using sessions
8912  * associated with the stale clientid will be returning
8913  * NFS4ERR_BADSESSION in the sequence operation, and will therefore
8914  * be in some phase of session reset.
8915  *
8916  * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
8917  */
8918 int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred)
8919 {
8920 	rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
8921 	int status;
8922 
8923 	/* try SP4_MACH_CRED if krb5i/p	*/
8924 	if (authflavor == RPC_AUTH_GSS_KRB5I ||
8925 	    authflavor == RPC_AUTH_GSS_KRB5P) {
8926 		status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
8927 		if (!status)
8928 			return 0;
8929 	}
8930 
8931 	/* try SP4_NONE */
8932 	return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
8933 }
8934 
8935 /**
8936  * nfs4_test_session_trunk
8937  *
8938  * This is an add_xprt_test() test function called from
8939  * rpc_clnt_setup_test_and_add_xprt.
8940  *
8941  * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt
8942  * and is dereferrenced in nfs4_exchange_id_release
8943  *
8944  * Upon success, add the new transport to the rpc_clnt
8945  *
8946  * @clnt: struct rpc_clnt to get new transport
8947  * @xprt: the rpc_xprt to test
8948  * @data: call data for _nfs4_proc_exchange_id.
8949  */
8950 void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
8951 			    void *data)
8952 {
8953 	struct nfs4_add_xprt_data *adata = data;
8954 	struct rpc_task *task;
8955 	int status;
8956 
8957 	u32 sp4_how;
8958 
8959 	dprintk("--> %s try %s\n", __func__,
8960 		xprt->address_strings[RPC_DISPLAY_ADDR]);
8961 
8962 	sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED);
8963 
8964 try_again:
8965 	/* Test connection for session trunking. Async exchange_id call */
8966 	task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
8967 	if (IS_ERR(task))
8968 		return;
8969 
8970 	status = task->tk_status;
8971 	if (status == 0)
8972 		status = nfs4_detect_session_trunking(adata->clp,
8973 				task->tk_msg.rpc_resp, xprt);
8974 
8975 	if (status == 0)
8976 		rpc_clnt_xprt_switch_add_xprt(clnt, xprt);
8977 	else if (status != -NFS4ERR_DELAY && rpc_clnt_xprt_switch_has_addr(clnt,
8978 				(struct sockaddr *)&xprt->addr))
8979 		rpc_clnt_xprt_switch_remove_xprt(clnt, xprt);
8980 
8981 	rpc_put_task(task);
8982 	if (status == -NFS4ERR_DELAY) {
8983 		ssleep(1);
8984 		goto try_again;
8985 	}
8986 }
8987 EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
8988 
8989 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
8990 		const struct cred *cred)
8991 {
8992 	struct rpc_message msg = {
8993 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
8994 		.rpc_argp = clp,
8995 		.rpc_cred = cred,
8996 	};
8997 	int status;
8998 
8999 	status = rpc_call_sync(clp->cl_rpcclient, &msg,
9000 			       RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
9001 	trace_nfs4_destroy_clientid(clp, status);
9002 	if (status)
9003 		dprintk("NFS: Got error %d from the server %s on "
9004 			"DESTROY_CLIENTID.", status, clp->cl_hostname);
9005 	return status;
9006 }
9007 
9008 static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
9009 		const struct cred *cred)
9010 {
9011 	unsigned int loop;
9012 	int ret;
9013 
9014 	for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
9015 		ret = _nfs4_proc_destroy_clientid(clp, cred);
9016 		switch (ret) {
9017 		case -NFS4ERR_DELAY:
9018 		case -NFS4ERR_CLIENTID_BUSY:
9019 			ssleep(1);
9020 			break;
9021 		default:
9022 			return ret;
9023 		}
9024 	}
9025 	return 0;
9026 }
9027 
9028 int nfs4_destroy_clientid(struct nfs_client *clp)
9029 {
9030 	const struct cred *cred;
9031 	int ret = 0;
9032 
9033 	if (clp->cl_mvops->minor_version < 1)
9034 		goto out;
9035 	if (clp->cl_exchange_flags == 0)
9036 		goto out;
9037 	if (clp->cl_preserve_clid)
9038 		goto out;
9039 	cred = nfs4_get_clid_cred(clp);
9040 	ret = nfs4_proc_destroy_clientid(clp, cred);
9041 	put_cred(cred);
9042 	switch (ret) {
9043 	case 0:
9044 	case -NFS4ERR_STALE_CLIENTID:
9045 		clp->cl_exchange_flags = 0;
9046 	}
9047 out:
9048 	return ret;
9049 }
9050 
9051 #endif /* CONFIG_NFS_V4_1 */
9052 
9053 struct nfs4_get_lease_time_data {
9054 	struct nfs4_get_lease_time_args *args;
9055 	struct nfs4_get_lease_time_res *res;
9056 	struct nfs_client *clp;
9057 };
9058 
9059 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
9060 					void *calldata)
9061 {
9062 	struct nfs4_get_lease_time_data *data =
9063 			(struct nfs4_get_lease_time_data *)calldata;
9064 
9065 	/* just setup sequence, do not trigger session recovery
9066 	   since we're invoked within one */
9067 	nfs4_setup_sequence(data->clp,
9068 			&data->args->la_seq_args,
9069 			&data->res->lr_seq_res,
9070 			task);
9071 }
9072 
9073 /*
9074  * Called from nfs4_state_manager thread for session setup, so don't recover
9075  * from sequence operation or clientid errors.
9076  */
9077 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
9078 {
9079 	struct nfs4_get_lease_time_data *data =
9080 			(struct nfs4_get_lease_time_data *)calldata;
9081 
9082 	if (!nfs4_sequence_done(task, &data->res->lr_seq_res))
9083 		return;
9084 	switch (task->tk_status) {
9085 	case -NFS4ERR_DELAY:
9086 	case -NFS4ERR_GRACE:
9087 		rpc_delay(task, NFS4_POLL_RETRY_MIN);
9088 		task->tk_status = 0;
9089 		fallthrough;
9090 	case -NFS4ERR_RETRY_UNCACHED_REP:
9091 		rpc_restart_call_prepare(task);
9092 		return;
9093 	}
9094 }
9095 
9096 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
9097 	.rpc_call_prepare = nfs4_get_lease_time_prepare,
9098 	.rpc_call_done = nfs4_get_lease_time_done,
9099 };
9100 
9101 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
9102 {
9103 	struct nfs4_get_lease_time_args args;
9104 	struct nfs4_get_lease_time_res res = {
9105 		.lr_fsinfo = fsinfo,
9106 	};
9107 	struct nfs4_get_lease_time_data data = {
9108 		.args = &args,
9109 		.res = &res,
9110 		.clp = clp,
9111 	};
9112 	struct rpc_message msg = {
9113 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
9114 		.rpc_argp = &args,
9115 		.rpc_resp = &res,
9116 	};
9117 	struct rpc_task_setup task_setup = {
9118 		.rpc_client = clp->cl_rpcclient,
9119 		.rpc_message = &msg,
9120 		.callback_ops = &nfs4_get_lease_time_ops,
9121 		.callback_data = &data,
9122 		.flags = RPC_TASK_TIMEOUT,
9123 	};
9124 
9125 	nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0, 1);
9126 	return nfs4_call_sync_custom(&task_setup);
9127 }
9128 
9129 #ifdef CONFIG_NFS_V4_1
9130 
9131 /*
9132  * Initialize the values to be used by the client in CREATE_SESSION
9133  * If nfs4_init_session set the fore channel request and response sizes,
9134  * use them.
9135  *
9136  * Set the back channel max_resp_sz_cached to zero to force the client to
9137  * always set csa_cachethis to FALSE because the current implementation
9138  * of the back channel DRC only supports caching the CB_SEQUENCE operation.
9139  */
9140 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args,
9141 				    struct rpc_clnt *clnt)
9142 {
9143 	unsigned int max_rqst_sz, max_resp_sz;
9144 	unsigned int max_bc_payload = rpc_max_bc_payload(clnt);
9145 	unsigned int max_bc_slots = rpc_num_bc_slots(clnt);
9146 
9147 	max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
9148 	max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
9149 
9150 	/* Fore channel attributes */
9151 	args->fc_attrs.max_rqst_sz = max_rqst_sz;
9152 	args->fc_attrs.max_resp_sz = max_resp_sz;
9153 	args->fc_attrs.max_ops = NFS4_MAX_OPS;
9154 	args->fc_attrs.max_reqs = max_session_slots;
9155 
9156 	dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
9157 		"max_ops=%u max_reqs=%u\n",
9158 		__func__,
9159 		args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
9160 		args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
9161 
9162 	/* Back channel attributes */
9163 	args->bc_attrs.max_rqst_sz = max_bc_payload;
9164 	args->bc_attrs.max_resp_sz = max_bc_payload;
9165 	args->bc_attrs.max_resp_sz_cached = 0;
9166 	args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
9167 	args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1);
9168 	if (args->bc_attrs.max_reqs > max_bc_slots)
9169 		args->bc_attrs.max_reqs = max_bc_slots;
9170 
9171 	dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
9172 		"max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
9173 		__func__,
9174 		args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
9175 		args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
9176 		args->bc_attrs.max_reqs);
9177 }
9178 
9179 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
9180 		struct nfs41_create_session_res *res)
9181 {
9182 	struct nfs4_channel_attrs *sent = &args->fc_attrs;
9183 	struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
9184 
9185 	if (rcvd->max_resp_sz > sent->max_resp_sz)
9186 		return -EINVAL;
9187 	/*
9188 	 * Our requested max_ops is the minimum we need; we're not
9189 	 * prepared to break up compounds into smaller pieces than that.
9190 	 * So, no point even trying to continue if the server won't
9191 	 * cooperate:
9192 	 */
9193 	if (rcvd->max_ops < sent->max_ops)
9194 		return -EINVAL;
9195 	if (rcvd->max_reqs == 0)
9196 		return -EINVAL;
9197 	if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
9198 		rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
9199 	return 0;
9200 }
9201 
9202 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
9203 		struct nfs41_create_session_res *res)
9204 {
9205 	struct nfs4_channel_attrs *sent = &args->bc_attrs;
9206 	struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
9207 
9208 	if (!(res->flags & SESSION4_BACK_CHAN))
9209 		goto out;
9210 	if (rcvd->max_rqst_sz > sent->max_rqst_sz)
9211 		return -EINVAL;
9212 	if (rcvd->max_resp_sz < sent->max_resp_sz)
9213 		return -EINVAL;
9214 	if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
9215 		return -EINVAL;
9216 	if (rcvd->max_ops > sent->max_ops)
9217 		return -EINVAL;
9218 	if (rcvd->max_reqs > sent->max_reqs)
9219 		return -EINVAL;
9220 out:
9221 	return 0;
9222 }
9223 
9224 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
9225 				     struct nfs41_create_session_res *res)
9226 {
9227 	int ret;
9228 
9229 	ret = nfs4_verify_fore_channel_attrs(args, res);
9230 	if (ret)
9231 		return ret;
9232 	return nfs4_verify_back_channel_attrs(args, res);
9233 }
9234 
9235 static void nfs4_update_session(struct nfs4_session *session,
9236 		struct nfs41_create_session_res *res)
9237 {
9238 	nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
9239 	/* Mark client id and session as being confirmed */
9240 	session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
9241 	set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
9242 	session->flags = res->flags;
9243 	memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
9244 	if (res->flags & SESSION4_BACK_CHAN)
9245 		memcpy(&session->bc_attrs, &res->bc_attrs,
9246 				sizeof(session->bc_attrs));
9247 }
9248 
9249 static int _nfs4_proc_create_session(struct nfs_client *clp,
9250 		const struct cred *cred)
9251 {
9252 	struct nfs4_session *session = clp->cl_session;
9253 	struct nfs41_create_session_args args = {
9254 		.client = clp,
9255 		.clientid = clp->cl_clientid,
9256 		.seqid = clp->cl_seqid,
9257 		.cb_program = NFS4_CALLBACK,
9258 	};
9259 	struct nfs41_create_session_res res;
9260 
9261 	struct rpc_message msg = {
9262 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
9263 		.rpc_argp = &args,
9264 		.rpc_resp = &res,
9265 		.rpc_cred = cred,
9266 	};
9267 	int status;
9268 
9269 	nfs4_init_channel_attrs(&args, clp->cl_rpcclient);
9270 	args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
9271 
9272 	status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
9273 			       RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
9274 	trace_nfs4_create_session(clp, status);
9275 
9276 	switch (status) {
9277 	case -NFS4ERR_STALE_CLIENTID:
9278 	case -NFS4ERR_DELAY:
9279 	case -ETIMEDOUT:
9280 	case -EACCES:
9281 	case -EAGAIN:
9282 		goto out;
9283 	}
9284 
9285 	clp->cl_seqid++;
9286 	if (!status) {
9287 		/* Verify the session's negotiated channel_attrs values */
9288 		status = nfs4_verify_channel_attrs(&args, &res);
9289 		/* Increment the clientid slot sequence id */
9290 		if (status)
9291 			goto out;
9292 		nfs4_update_session(session, &res);
9293 	}
9294 out:
9295 	return status;
9296 }
9297 
9298 /*
9299  * Issues a CREATE_SESSION operation to the server.
9300  * It is the responsibility of the caller to verify the session is
9301  * expired before calling this routine.
9302  */
9303 int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred)
9304 {
9305 	int status;
9306 	unsigned *ptr;
9307 	struct nfs4_session *session = clp->cl_session;
9308 	struct nfs4_add_xprt_data xprtdata = {
9309 		.clp = clp,
9310 	};
9311 	struct rpc_add_xprt_test rpcdata = {
9312 		.add_xprt_test = clp->cl_mvops->session_trunk,
9313 		.data = &xprtdata,
9314 	};
9315 
9316 	dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
9317 
9318 	status = _nfs4_proc_create_session(clp, cred);
9319 	if (status)
9320 		goto out;
9321 
9322 	/* Init or reset the session slot tables */
9323 	status = nfs4_setup_session_slot_tables(session);
9324 	dprintk("slot table setup returned %d\n", status);
9325 	if (status)
9326 		goto out;
9327 
9328 	ptr = (unsigned *)&session->sess_id.data[0];
9329 	dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
9330 		clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
9331 	rpc_clnt_probe_trunked_xprts(clp->cl_rpcclient, &rpcdata);
9332 out:
9333 	return status;
9334 }
9335 
9336 /*
9337  * Issue the over-the-wire RPC DESTROY_SESSION.
9338  * The caller must serialize access to this routine.
9339  */
9340 int nfs4_proc_destroy_session(struct nfs4_session *session,
9341 		const struct cred *cred)
9342 {
9343 	struct rpc_message msg = {
9344 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
9345 		.rpc_argp = session,
9346 		.rpc_cred = cred,
9347 	};
9348 	int status = 0;
9349 
9350 	/* session is still being setup */
9351 	if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
9352 		return 0;
9353 
9354 	status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
9355 			       RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
9356 	trace_nfs4_destroy_session(session->clp, status);
9357 
9358 	if (status)
9359 		dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
9360 			"Session has been destroyed regardless...\n", status);
9361 	rpc_clnt_manage_trunked_xprts(session->clp->cl_rpcclient);
9362 	return status;
9363 }
9364 
9365 /*
9366  * Renew the cl_session lease.
9367  */
9368 struct nfs4_sequence_data {
9369 	struct nfs_client *clp;
9370 	struct nfs4_sequence_args args;
9371 	struct nfs4_sequence_res res;
9372 };
9373 
9374 static void nfs41_sequence_release(void *data)
9375 {
9376 	struct nfs4_sequence_data *calldata = data;
9377 	struct nfs_client *clp = calldata->clp;
9378 
9379 	if (refcount_read(&clp->cl_count) > 1)
9380 		nfs4_schedule_state_renewal(clp);
9381 	nfs_put_client(clp);
9382 	kfree(calldata);
9383 }
9384 
9385 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
9386 {
9387 	switch(task->tk_status) {
9388 	case -NFS4ERR_DELAY:
9389 		rpc_delay(task, NFS4_POLL_RETRY_MAX);
9390 		return -EAGAIN;
9391 	default:
9392 		nfs4_schedule_lease_recovery(clp);
9393 	}
9394 	return 0;
9395 }
9396 
9397 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
9398 {
9399 	struct nfs4_sequence_data *calldata = data;
9400 	struct nfs_client *clp = calldata->clp;
9401 
9402 	if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
9403 		return;
9404 
9405 	trace_nfs4_sequence(clp, task->tk_status);
9406 	if (task->tk_status < 0 && !task->tk_client->cl_shutdown) {
9407 		dprintk("%s ERROR %d\n", __func__, task->tk_status);
9408 		if (refcount_read(&clp->cl_count) == 1)
9409 			return;
9410 
9411 		if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
9412 			rpc_restart_call_prepare(task);
9413 			return;
9414 		}
9415 	}
9416 	dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
9417 }
9418 
9419 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
9420 {
9421 	struct nfs4_sequence_data *calldata = data;
9422 	struct nfs_client *clp = calldata->clp;
9423 	struct nfs4_sequence_args *args;
9424 	struct nfs4_sequence_res *res;
9425 
9426 	args = task->tk_msg.rpc_argp;
9427 	res = task->tk_msg.rpc_resp;
9428 
9429 	nfs4_setup_sequence(clp, args, res, task);
9430 }
9431 
9432 static const struct rpc_call_ops nfs41_sequence_ops = {
9433 	.rpc_call_done = nfs41_sequence_call_done,
9434 	.rpc_call_prepare = nfs41_sequence_prepare,
9435 	.rpc_release = nfs41_sequence_release,
9436 };
9437 
9438 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
9439 		const struct cred *cred,
9440 		struct nfs4_slot *slot,
9441 		bool is_privileged)
9442 {
9443 	struct nfs4_sequence_data *calldata;
9444 	struct rpc_message msg = {
9445 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
9446 		.rpc_cred = cred,
9447 	};
9448 	struct rpc_task_setup task_setup_data = {
9449 		.rpc_client = clp->cl_rpcclient,
9450 		.rpc_message = &msg,
9451 		.callback_ops = &nfs41_sequence_ops,
9452 		.flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE,
9453 	};
9454 	struct rpc_task *ret;
9455 
9456 	ret = ERR_PTR(-EIO);
9457 	if (!refcount_inc_not_zero(&clp->cl_count))
9458 		goto out_err;
9459 
9460 	ret = ERR_PTR(-ENOMEM);
9461 	calldata = kzalloc(sizeof(*calldata), GFP_KERNEL);
9462 	if (calldata == NULL)
9463 		goto out_put_clp;
9464 	nfs4_init_sequence(&calldata->args, &calldata->res, 0, is_privileged);
9465 	nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot);
9466 	msg.rpc_argp = &calldata->args;
9467 	msg.rpc_resp = &calldata->res;
9468 	calldata->clp = clp;
9469 	task_setup_data.callback_data = calldata;
9470 
9471 	ret = rpc_run_task(&task_setup_data);
9472 	if (IS_ERR(ret))
9473 		goto out_err;
9474 	return ret;
9475 out_put_clp:
9476 	nfs_put_client(clp);
9477 out_err:
9478 	nfs41_release_slot(slot);
9479 	return ret;
9480 }
9481 
9482 static int nfs41_proc_async_sequence(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
9483 {
9484 	struct rpc_task *task;
9485 	int ret = 0;
9486 
9487 	if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
9488 		return -EAGAIN;
9489 	task = _nfs41_proc_sequence(clp, cred, NULL, false);
9490 	if (IS_ERR(task))
9491 		ret = PTR_ERR(task);
9492 	else
9493 		rpc_put_task_async(task);
9494 	dprintk("<-- %s status=%d\n", __func__, ret);
9495 	return ret;
9496 }
9497 
9498 static int nfs4_proc_sequence(struct nfs_client *clp, const struct cred *cred)
9499 {
9500 	struct rpc_task *task;
9501 	int ret;
9502 
9503 	task = _nfs41_proc_sequence(clp, cred, NULL, true);
9504 	if (IS_ERR(task)) {
9505 		ret = PTR_ERR(task);
9506 		goto out;
9507 	}
9508 	ret = rpc_wait_for_completion_task(task);
9509 	if (!ret)
9510 		ret = task->tk_status;
9511 	rpc_put_task(task);
9512 out:
9513 	dprintk("<-- %s status=%d\n", __func__, ret);
9514 	return ret;
9515 }
9516 
9517 struct nfs4_reclaim_complete_data {
9518 	struct nfs_client *clp;
9519 	struct nfs41_reclaim_complete_args arg;
9520 	struct nfs41_reclaim_complete_res res;
9521 };
9522 
9523 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
9524 {
9525 	struct nfs4_reclaim_complete_data *calldata = data;
9526 
9527 	nfs4_setup_sequence(calldata->clp,
9528 			&calldata->arg.seq_args,
9529 			&calldata->res.seq_res,
9530 			task);
9531 }
9532 
9533 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
9534 {
9535 	switch(task->tk_status) {
9536 	case 0:
9537 		wake_up_all(&clp->cl_lock_waitq);
9538 		fallthrough;
9539 	case -NFS4ERR_COMPLETE_ALREADY:
9540 	case -NFS4ERR_WRONG_CRED: /* What to do here? */
9541 		break;
9542 	case -NFS4ERR_DELAY:
9543 		rpc_delay(task, NFS4_POLL_RETRY_MAX);
9544 		fallthrough;
9545 	case -NFS4ERR_RETRY_UNCACHED_REP:
9546 	case -EACCES:
9547 		dprintk("%s: failed to reclaim complete error %d for server %s, retrying\n",
9548 			__func__, task->tk_status, clp->cl_hostname);
9549 		return -EAGAIN;
9550 	case -NFS4ERR_BADSESSION:
9551 	case -NFS4ERR_DEADSESSION:
9552 	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
9553 		break;
9554 	default:
9555 		nfs4_schedule_lease_recovery(clp);
9556 	}
9557 	return 0;
9558 }
9559 
9560 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
9561 {
9562 	struct nfs4_reclaim_complete_data *calldata = data;
9563 	struct nfs_client *clp = calldata->clp;
9564 	struct nfs4_sequence_res *res = &calldata->res.seq_res;
9565 
9566 	if (!nfs41_sequence_done(task, res))
9567 		return;
9568 
9569 	trace_nfs4_reclaim_complete(clp, task->tk_status);
9570 	if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
9571 		rpc_restart_call_prepare(task);
9572 		return;
9573 	}
9574 }
9575 
9576 static void nfs4_free_reclaim_complete_data(void *data)
9577 {
9578 	struct nfs4_reclaim_complete_data *calldata = data;
9579 
9580 	kfree(calldata);
9581 }
9582 
9583 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
9584 	.rpc_call_prepare = nfs4_reclaim_complete_prepare,
9585 	.rpc_call_done = nfs4_reclaim_complete_done,
9586 	.rpc_release = nfs4_free_reclaim_complete_data,
9587 };
9588 
9589 /*
9590  * Issue a global reclaim complete.
9591  */
9592 static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
9593 		const struct cred *cred)
9594 {
9595 	struct nfs4_reclaim_complete_data *calldata;
9596 	struct rpc_message msg = {
9597 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
9598 		.rpc_cred = cred,
9599 	};
9600 	struct rpc_task_setup task_setup_data = {
9601 		.rpc_client = clp->cl_rpcclient,
9602 		.rpc_message = &msg,
9603 		.callback_ops = &nfs4_reclaim_complete_call_ops,
9604 		.flags = RPC_TASK_NO_ROUND_ROBIN,
9605 	};
9606 	int status = -ENOMEM;
9607 
9608 	calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
9609 	if (calldata == NULL)
9610 		goto out;
9611 	calldata->clp = clp;
9612 	calldata->arg.one_fs = 0;
9613 
9614 	nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0, 1);
9615 	msg.rpc_argp = &calldata->arg;
9616 	msg.rpc_resp = &calldata->res;
9617 	task_setup_data.callback_data = calldata;
9618 	status = nfs4_call_sync_custom(&task_setup_data);
9619 out:
9620 	dprintk("<-- %s status=%d\n", __func__, status);
9621 	return status;
9622 }
9623 
9624 static void
9625 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
9626 {
9627 	struct nfs4_layoutget *lgp = calldata;
9628 	struct nfs_server *server = NFS_SERVER(lgp->args.inode);
9629 
9630 	nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args,
9631 				&lgp->res.seq_res, task);
9632 }
9633 
9634 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
9635 {
9636 	struct nfs4_layoutget *lgp = calldata;
9637 
9638 	nfs41_sequence_process(task, &lgp->res.seq_res);
9639 }
9640 
9641 static int
9642 nfs4_layoutget_handle_exception(struct rpc_task *task,
9643 		struct nfs4_layoutget *lgp, struct nfs4_exception *exception)
9644 {
9645 	struct inode *inode = lgp->args.inode;
9646 	struct nfs_server *server = NFS_SERVER(inode);
9647 	struct pnfs_layout_hdr *lo = lgp->lo;
9648 	int nfs4err = task->tk_status;
9649 	int err, status = 0;
9650 	LIST_HEAD(head);
9651 
9652 	dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
9653 
9654 	nfs4_sequence_free_slot(&lgp->res.seq_res);
9655 
9656 	exception->state = NULL;
9657 	exception->stateid = NULL;
9658 
9659 	switch (nfs4err) {
9660 	case 0:
9661 		goto out;
9662 
9663 	/*
9664 	 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs
9665 	 * on the file. set tk_status to -ENODATA to tell upper layer to
9666 	 * retry go inband.
9667 	 */
9668 	case -NFS4ERR_LAYOUTUNAVAILABLE:
9669 		status = -ENODATA;
9670 		goto out;
9671 	/*
9672 	 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
9673 	 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
9674 	 */
9675 	case -NFS4ERR_BADLAYOUT:
9676 		status = -EOVERFLOW;
9677 		goto out;
9678 	/*
9679 	 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
9680 	 * (or clients) writing to the same RAID stripe except when
9681 	 * the minlength argument is 0 (see RFC5661 section 18.43.3).
9682 	 *
9683 	 * Treat it like we would RECALLCONFLICT -- we retry for a little
9684 	 * while, and then eventually give up.
9685 	 */
9686 	case -NFS4ERR_LAYOUTTRYLATER:
9687 		if (lgp->args.minlength == 0) {
9688 			status = -EOVERFLOW;
9689 			goto out;
9690 		}
9691 		status = -EBUSY;
9692 		break;
9693 	case -NFS4ERR_RECALLCONFLICT:
9694 		status = -ERECALLCONFLICT;
9695 		break;
9696 	case -NFS4ERR_DELEG_REVOKED:
9697 	case -NFS4ERR_ADMIN_REVOKED:
9698 	case -NFS4ERR_EXPIRED:
9699 	case -NFS4ERR_BAD_STATEID:
9700 		exception->timeout = 0;
9701 		spin_lock(&inode->i_lock);
9702 		/* If the open stateid was bad, then recover it. */
9703 		if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
9704 		    !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
9705 			spin_unlock(&inode->i_lock);
9706 			exception->state = lgp->args.ctx->state;
9707 			exception->stateid = &lgp->args.stateid;
9708 			break;
9709 		}
9710 
9711 		/*
9712 		 * Mark the bad layout state as invalid, then retry
9713 		 */
9714 		pnfs_mark_layout_stateid_invalid(lo, &head);
9715 		spin_unlock(&inode->i_lock);
9716 		nfs_commit_inode(inode, 0);
9717 		pnfs_free_lseg_list(&head);
9718 		status = -EAGAIN;
9719 		goto out;
9720 	}
9721 
9722 	err = nfs4_handle_exception(server, nfs4err, exception);
9723 	if (!status) {
9724 		if (exception->retry)
9725 			status = -EAGAIN;
9726 		else
9727 			status = err;
9728 	}
9729 out:
9730 	return status;
9731 }
9732 
9733 size_t max_response_pages(struct nfs_server *server)
9734 {
9735 	u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
9736 	return nfs_page_array_len(0, max_resp_sz);
9737 }
9738 
9739 static void nfs4_layoutget_release(void *calldata)
9740 {
9741 	struct nfs4_layoutget *lgp = calldata;
9742 
9743 	nfs4_sequence_free_slot(&lgp->res.seq_res);
9744 	pnfs_layoutget_free(lgp);
9745 }
9746 
9747 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
9748 	.rpc_call_prepare = nfs4_layoutget_prepare,
9749 	.rpc_call_done = nfs4_layoutget_done,
9750 	.rpc_release = nfs4_layoutget_release,
9751 };
9752 
9753 struct pnfs_layout_segment *
9754 nfs4_proc_layoutget(struct nfs4_layoutget *lgp,
9755 		    struct nfs4_exception *exception)
9756 {
9757 	struct inode *inode = lgp->args.inode;
9758 	struct nfs_server *server = NFS_SERVER(inode);
9759 	struct rpc_task *task;
9760 	struct rpc_message msg = {
9761 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
9762 		.rpc_argp = &lgp->args,
9763 		.rpc_resp = &lgp->res,
9764 		.rpc_cred = lgp->cred,
9765 	};
9766 	struct rpc_task_setup task_setup_data = {
9767 		.rpc_client = server->client,
9768 		.rpc_message = &msg,
9769 		.callback_ops = &nfs4_layoutget_call_ops,
9770 		.callback_data = lgp,
9771 		.flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF |
9772 			 RPC_TASK_MOVEABLE,
9773 	};
9774 	struct pnfs_layout_segment *lseg = NULL;
9775 	int status = 0;
9776 
9777 	nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0);
9778 	exception->retry = 0;
9779 
9780 	task = rpc_run_task(&task_setup_data);
9781 	if (IS_ERR(task))
9782 		return ERR_CAST(task);
9783 
9784 	status = rpc_wait_for_completion_task(task);
9785 	if (status != 0)
9786 		goto out;
9787 
9788 	if (task->tk_status < 0) {
9789 		exception->retry = 1;
9790 		status = nfs4_layoutget_handle_exception(task, lgp, exception);
9791 	} else if (lgp->res.layoutp->len == 0) {
9792 		exception->retry = 1;
9793 		status = -EAGAIN;
9794 		nfs4_update_delay(&exception->timeout);
9795 	} else
9796 		lseg = pnfs_layout_process(lgp);
9797 out:
9798 	trace_nfs4_layoutget(lgp->args.ctx,
9799 			&lgp->args.range,
9800 			&lgp->res.range,
9801 			&lgp->res.stateid,
9802 			status);
9803 
9804 	rpc_put_task(task);
9805 	dprintk("<-- %s status=%d\n", __func__, status);
9806 	if (status)
9807 		return ERR_PTR(status);
9808 	return lseg;
9809 }
9810 
9811 static void
9812 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
9813 {
9814 	struct nfs4_layoutreturn *lrp = calldata;
9815 
9816 	nfs4_setup_sequence(lrp->clp,
9817 			&lrp->args.seq_args,
9818 			&lrp->res.seq_res,
9819 			task);
9820 	if (!pnfs_layout_is_valid(lrp->args.layout))
9821 		rpc_exit(task, 0);
9822 }
9823 
9824 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
9825 {
9826 	struct nfs4_layoutreturn *lrp = calldata;
9827 	struct nfs_server *server;
9828 
9829 	if (!nfs41_sequence_process(task, &lrp->res.seq_res))
9830 		return;
9831 
9832 	/*
9833 	 * Was there an RPC level error? Assume the call succeeded,
9834 	 * and that we need to release the layout
9835 	 */
9836 	if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) {
9837 		lrp->res.lrs_present = 0;
9838 		return;
9839 	}
9840 
9841 	server = NFS_SERVER(lrp->args.inode);
9842 	switch (task->tk_status) {
9843 	case -NFS4ERR_OLD_STATEID:
9844 		if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid,
9845 					&lrp->args.range,
9846 					lrp->args.inode))
9847 			goto out_restart;
9848 		fallthrough;
9849 	default:
9850 		task->tk_status = 0;
9851 		fallthrough;
9852 	case 0:
9853 		break;
9854 	case -NFS4ERR_DELAY:
9855 		if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
9856 			break;
9857 		goto out_restart;
9858 	}
9859 	return;
9860 out_restart:
9861 	task->tk_status = 0;
9862 	nfs4_sequence_free_slot(&lrp->res.seq_res);
9863 	rpc_restart_call_prepare(task);
9864 }
9865 
9866 static void nfs4_layoutreturn_release(void *calldata)
9867 {
9868 	struct nfs4_layoutreturn *lrp = calldata;
9869 	struct pnfs_layout_hdr *lo = lrp->args.layout;
9870 
9871 	pnfs_layoutreturn_free_lsegs(lo, &lrp->args.stateid, &lrp->args.range,
9872 			lrp->res.lrs_present ? &lrp->res.stateid : NULL);
9873 	nfs4_sequence_free_slot(&lrp->res.seq_res);
9874 	if (lrp->ld_private.ops && lrp->ld_private.ops->free)
9875 		lrp->ld_private.ops->free(&lrp->ld_private);
9876 	pnfs_put_layout_hdr(lrp->args.layout);
9877 	nfs_iput_and_deactive(lrp->inode);
9878 	put_cred(lrp->cred);
9879 	kfree(calldata);
9880 }
9881 
9882 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
9883 	.rpc_call_prepare = nfs4_layoutreturn_prepare,
9884 	.rpc_call_done = nfs4_layoutreturn_done,
9885 	.rpc_release = nfs4_layoutreturn_release,
9886 };
9887 
9888 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
9889 {
9890 	struct rpc_task *task;
9891 	struct rpc_message msg = {
9892 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
9893 		.rpc_argp = &lrp->args,
9894 		.rpc_resp = &lrp->res,
9895 		.rpc_cred = lrp->cred,
9896 	};
9897 	struct rpc_task_setup task_setup_data = {
9898 		.rpc_client = NFS_SERVER(lrp->args.inode)->client,
9899 		.rpc_message = &msg,
9900 		.callback_ops = &nfs4_layoutreturn_call_ops,
9901 		.callback_data = lrp,
9902 		.flags = RPC_TASK_MOVEABLE,
9903 	};
9904 	int status = 0;
9905 
9906 	nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client,
9907 			NFS_SP4_MACH_CRED_PNFS_CLEANUP,
9908 			&task_setup_data.rpc_client, &msg);
9909 
9910 	lrp->inode = nfs_igrab_and_active(lrp->args.inode);
9911 	if (!sync) {
9912 		if (!lrp->inode) {
9913 			nfs4_layoutreturn_release(lrp);
9914 			return -EAGAIN;
9915 		}
9916 		task_setup_data.flags |= RPC_TASK_ASYNC;
9917 	}
9918 	if (!lrp->inode)
9919 		nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
9920 				   1);
9921 	else
9922 		nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
9923 				   0);
9924 	task = rpc_run_task(&task_setup_data);
9925 	if (IS_ERR(task))
9926 		return PTR_ERR(task);
9927 	if (sync)
9928 		status = task->tk_status;
9929 	trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status);
9930 	dprintk("<-- %s status=%d\n", __func__, status);
9931 	rpc_put_task(task);
9932 	return status;
9933 }
9934 
9935 static int
9936 _nfs4_proc_getdeviceinfo(struct nfs_server *server,
9937 		struct pnfs_device *pdev,
9938 		const struct cred *cred)
9939 {
9940 	struct nfs4_getdeviceinfo_args args = {
9941 		.pdev = pdev,
9942 		.notify_types = NOTIFY_DEVICEID4_CHANGE |
9943 			NOTIFY_DEVICEID4_DELETE,
9944 	};
9945 	struct nfs4_getdeviceinfo_res res = {
9946 		.pdev = pdev,
9947 	};
9948 	struct rpc_message msg = {
9949 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
9950 		.rpc_argp = &args,
9951 		.rpc_resp = &res,
9952 		.rpc_cred = cred,
9953 	};
9954 	int status;
9955 
9956 	status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
9957 	if (res.notification & ~args.notify_types)
9958 		dprintk("%s: unsupported notification\n", __func__);
9959 	if (res.notification != args.notify_types)
9960 		pdev->nocache = 1;
9961 
9962 	trace_nfs4_getdeviceinfo(server, &pdev->dev_id, status);
9963 
9964 	dprintk("<-- %s status=%d\n", __func__, status);
9965 
9966 	return status;
9967 }
9968 
9969 int nfs4_proc_getdeviceinfo(struct nfs_server *server,
9970 		struct pnfs_device *pdev,
9971 		const struct cred *cred)
9972 {
9973 	struct nfs4_exception exception = { };
9974 	int err;
9975 
9976 	do {
9977 		err = nfs4_handle_exception(server,
9978 					_nfs4_proc_getdeviceinfo(server, pdev, cred),
9979 					&exception);
9980 	} while (exception.retry);
9981 	return err;
9982 }
9983 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
9984 
9985 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
9986 {
9987 	struct nfs4_layoutcommit_data *data = calldata;
9988 	struct nfs_server *server = NFS_SERVER(data->args.inode);
9989 
9990 	nfs4_setup_sequence(server->nfs_client,
9991 			&data->args.seq_args,
9992 			&data->res.seq_res,
9993 			task);
9994 }
9995 
9996 static void
9997 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
9998 {
9999 	struct nfs4_layoutcommit_data *data = calldata;
10000 	struct nfs_server *server = NFS_SERVER(data->args.inode);
10001 
10002 	if (!nfs41_sequence_done(task, &data->res.seq_res))
10003 		return;
10004 
10005 	switch (task->tk_status) { /* Just ignore these failures */
10006 	case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
10007 	case -NFS4ERR_BADIOMODE:     /* no IOMODE_RW layout for range */
10008 	case -NFS4ERR_BADLAYOUT:     /* no layout */
10009 	case -NFS4ERR_GRACE:	    /* loca_recalim always false */
10010 		task->tk_status = 0;
10011 		break;
10012 	case 0:
10013 		break;
10014 	default:
10015 		if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
10016 			rpc_restart_call_prepare(task);
10017 			return;
10018 		}
10019 	}
10020 }
10021 
10022 static void nfs4_layoutcommit_release(void *calldata)
10023 {
10024 	struct nfs4_layoutcommit_data *data = calldata;
10025 
10026 	pnfs_cleanup_layoutcommit(data);
10027 	nfs_post_op_update_inode_force_wcc(data->args.inode,
10028 					   data->res.fattr);
10029 	put_cred(data->cred);
10030 	nfs_iput_and_deactive(data->inode);
10031 	kfree(data);
10032 }
10033 
10034 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
10035 	.rpc_call_prepare = nfs4_layoutcommit_prepare,
10036 	.rpc_call_done = nfs4_layoutcommit_done,
10037 	.rpc_release = nfs4_layoutcommit_release,
10038 };
10039 
10040 int
10041 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
10042 {
10043 	struct rpc_message msg = {
10044 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
10045 		.rpc_argp = &data->args,
10046 		.rpc_resp = &data->res,
10047 		.rpc_cred = data->cred,
10048 	};
10049 	struct rpc_task_setup task_setup_data = {
10050 		.task = &data->task,
10051 		.rpc_client = NFS_CLIENT(data->args.inode),
10052 		.rpc_message = &msg,
10053 		.callback_ops = &nfs4_layoutcommit_ops,
10054 		.callback_data = data,
10055 		.flags = RPC_TASK_MOVEABLE,
10056 	};
10057 	struct rpc_task *task;
10058 	int status = 0;
10059 
10060 	dprintk("NFS: initiating layoutcommit call. sync %d "
10061 		"lbw: %llu inode %lu\n", sync,
10062 		data->args.lastbytewritten,
10063 		data->args.inode->i_ino);
10064 
10065 	if (!sync) {
10066 		data->inode = nfs_igrab_and_active(data->args.inode);
10067 		if (data->inode == NULL) {
10068 			nfs4_layoutcommit_release(data);
10069 			return -EAGAIN;
10070 		}
10071 		task_setup_data.flags = RPC_TASK_ASYNC;
10072 	}
10073 	nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
10074 	task = rpc_run_task(&task_setup_data);
10075 	if (IS_ERR(task))
10076 		return PTR_ERR(task);
10077 	if (sync)
10078 		status = task->tk_status;
10079 	trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status);
10080 	dprintk("%s: status %d\n", __func__, status);
10081 	rpc_put_task(task);
10082 	return status;
10083 }
10084 
10085 /*
10086  * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
10087  * possible) as per RFC3530bis and RFC5661 Security Considerations sections
10088  */
10089 static int
10090 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
10091 		    struct nfs_fsinfo *info,
10092 		    struct nfs4_secinfo_flavors *flavors, bool use_integrity)
10093 {
10094 	struct nfs41_secinfo_no_name_args args = {
10095 		.style = SECINFO_STYLE_CURRENT_FH,
10096 	};
10097 	struct nfs4_secinfo_res res = {
10098 		.flavors = flavors,
10099 	};
10100 	struct rpc_message msg = {
10101 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
10102 		.rpc_argp = &args,
10103 		.rpc_resp = &res,
10104 	};
10105 	struct nfs4_call_sync_data data = {
10106 		.seq_server = server,
10107 		.seq_args = &args.seq_args,
10108 		.seq_res = &res.seq_res,
10109 	};
10110 	struct rpc_task_setup task_setup = {
10111 		.rpc_client = server->client,
10112 		.rpc_message = &msg,
10113 		.callback_ops = server->nfs_client->cl_mvops->call_sync_ops,
10114 		.callback_data = &data,
10115 		.flags = RPC_TASK_NO_ROUND_ROBIN,
10116 	};
10117 	const struct cred *cred = NULL;
10118 	int status;
10119 
10120 	if (use_integrity) {
10121 		task_setup.rpc_client = server->nfs_client->cl_rpcclient;
10122 
10123 		cred = nfs4_get_clid_cred(server->nfs_client);
10124 		msg.rpc_cred = cred;
10125 	}
10126 
10127 	nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
10128 	status = nfs4_call_sync_custom(&task_setup);
10129 	dprintk("<-- %s status=%d\n", __func__, status);
10130 
10131 	put_cred(cred);
10132 
10133 	return status;
10134 }
10135 
10136 static int
10137 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
10138 			   struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
10139 {
10140 	struct nfs4_exception exception = {
10141 		.interruptible = true,
10142 	};
10143 	int err;
10144 	do {
10145 		/* first try using integrity protection */
10146 		err = -NFS4ERR_WRONGSEC;
10147 
10148 		/* try to use integrity protection with machine cred */
10149 		if (_nfs4_is_integrity_protected(server->nfs_client))
10150 			err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
10151 							  flavors, true);
10152 
10153 		/*
10154 		 * if unable to use integrity protection, or SECINFO with
10155 		 * integrity protection returns NFS4ERR_WRONGSEC (which is
10156 		 * disallowed by spec, but exists in deployed servers) use
10157 		 * the current filesystem's rpc_client and the user cred.
10158 		 */
10159 		if (err == -NFS4ERR_WRONGSEC)
10160 			err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
10161 							  flavors, false);
10162 
10163 		switch (err) {
10164 		case 0:
10165 		case -NFS4ERR_WRONGSEC:
10166 		case -ENOTSUPP:
10167 			goto out;
10168 		default:
10169 			err = nfs4_handle_exception(server, err, &exception);
10170 		}
10171 	} while (exception.retry);
10172 out:
10173 	return err;
10174 }
10175 
10176 static int
10177 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
10178 		    struct nfs_fsinfo *info)
10179 {
10180 	int err;
10181 	struct page *page;
10182 	rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
10183 	struct nfs4_secinfo_flavors *flavors;
10184 	struct nfs4_secinfo4 *secinfo;
10185 	int i;
10186 
10187 	page = alloc_page(GFP_KERNEL);
10188 	if (!page) {
10189 		err = -ENOMEM;
10190 		goto out;
10191 	}
10192 
10193 	flavors = page_address(page);
10194 	err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
10195 
10196 	/*
10197 	 * Fall back on "guess and check" method if
10198 	 * the server doesn't support SECINFO_NO_NAME
10199 	 */
10200 	if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
10201 		err = nfs4_find_root_sec(server, fhandle, info);
10202 		goto out_freepage;
10203 	}
10204 	if (err)
10205 		goto out_freepage;
10206 
10207 	for (i = 0; i < flavors->num_flavors; i++) {
10208 		secinfo = &flavors->flavors[i];
10209 
10210 		switch (secinfo->flavor) {
10211 		case RPC_AUTH_NULL:
10212 		case RPC_AUTH_UNIX:
10213 		case RPC_AUTH_GSS:
10214 			flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
10215 					&secinfo->flavor_info);
10216 			break;
10217 		default:
10218 			flavor = RPC_AUTH_MAXFLAVOR;
10219 			break;
10220 		}
10221 
10222 		if (!nfs_auth_info_match(&server->auth_info, flavor))
10223 			flavor = RPC_AUTH_MAXFLAVOR;
10224 
10225 		if (flavor != RPC_AUTH_MAXFLAVOR) {
10226 			err = nfs4_lookup_root_sec(server, fhandle,
10227 						   info, flavor);
10228 			if (!err)
10229 				break;
10230 		}
10231 	}
10232 
10233 	if (flavor == RPC_AUTH_MAXFLAVOR)
10234 		err = -EPERM;
10235 
10236 out_freepage:
10237 	put_page(page);
10238 	if (err == -EACCES)
10239 		return -EPERM;
10240 out:
10241 	return err;
10242 }
10243 
10244 static int _nfs41_test_stateid(struct nfs_server *server,
10245 		nfs4_stateid *stateid,
10246 		const struct cred *cred)
10247 {
10248 	int status;
10249 	struct nfs41_test_stateid_args args = {
10250 		.stateid = stateid,
10251 	};
10252 	struct nfs41_test_stateid_res res;
10253 	struct rpc_message msg = {
10254 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
10255 		.rpc_argp = &args,
10256 		.rpc_resp = &res,
10257 		.rpc_cred = cred,
10258 	};
10259 	struct rpc_clnt *rpc_client = server->client;
10260 
10261 	nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
10262 		&rpc_client, &msg);
10263 
10264 	dprintk("NFS call  test_stateid %p\n", stateid);
10265 	nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
10266 	status = nfs4_call_sync_sequence(rpc_client, server, &msg,
10267 			&args.seq_args, &res.seq_res);
10268 	if (status != NFS_OK) {
10269 		dprintk("NFS reply test_stateid: failed, %d\n", status);
10270 		return status;
10271 	}
10272 	dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
10273 	return -res.status;
10274 }
10275 
10276 static void nfs4_handle_delay_or_session_error(struct nfs_server *server,
10277 		int err, struct nfs4_exception *exception)
10278 {
10279 	exception->retry = 0;
10280 	switch(err) {
10281 	case -NFS4ERR_DELAY:
10282 	case -NFS4ERR_RETRY_UNCACHED_REP:
10283 		nfs4_handle_exception(server, err, exception);
10284 		break;
10285 	case -NFS4ERR_BADSESSION:
10286 	case -NFS4ERR_BADSLOT:
10287 	case -NFS4ERR_BAD_HIGH_SLOT:
10288 	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
10289 	case -NFS4ERR_DEADSESSION:
10290 		nfs4_do_handle_exception(server, err, exception);
10291 	}
10292 }
10293 
10294 /**
10295  * nfs41_test_stateid - perform a TEST_STATEID operation
10296  *
10297  * @server: server / transport on which to perform the operation
10298  * @stateid: state ID to test
10299  * @cred: credential
10300  *
10301  * Returns NFS_OK if the server recognizes that "stateid" is valid.
10302  * Otherwise a negative NFS4ERR value is returned if the operation
10303  * failed or the state ID is not currently valid.
10304  */
10305 static int nfs41_test_stateid(struct nfs_server *server,
10306 		nfs4_stateid *stateid,
10307 		const struct cred *cred)
10308 {
10309 	struct nfs4_exception exception = {
10310 		.interruptible = true,
10311 	};
10312 	int err;
10313 	do {
10314 		err = _nfs41_test_stateid(server, stateid, cred);
10315 		nfs4_handle_delay_or_session_error(server, err, &exception);
10316 	} while (exception.retry);
10317 	return err;
10318 }
10319 
10320 struct nfs_free_stateid_data {
10321 	struct nfs_server *server;
10322 	struct nfs41_free_stateid_args args;
10323 	struct nfs41_free_stateid_res res;
10324 };
10325 
10326 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
10327 {
10328 	struct nfs_free_stateid_data *data = calldata;
10329 	nfs4_setup_sequence(data->server->nfs_client,
10330 			&data->args.seq_args,
10331 			&data->res.seq_res,
10332 			task);
10333 }
10334 
10335 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
10336 {
10337 	struct nfs_free_stateid_data *data = calldata;
10338 
10339 	nfs41_sequence_done(task, &data->res.seq_res);
10340 
10341 	switch (task->tk_status) {
10342 	case -NFS4ERR_DELAY:
10343 		if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
10344 			rpc_restart_call_prepare(task);
10345 	}
10346 }
10347 
10348 static void nfs41_free_stateid_release(void *calldata)
10349 {
10350 	struct nfs_free_stateid_data *data = calldata;
10351 	struct nfs_client *clp = data->server->nfs_client;
10352 
10353 	nfs_put_client(clp);
10354 	kfree(calldata);
10355 }
10356 
10357 static const struct rpc_call_ops nfs41_free_stateid_ops = {
10358 	.rpc_call_prepare = nfs41_free_stateid_prepare,
10359 	.rpc_call_done = nfs41_free_stateid_done,
10360 	.rpc_release = nfs41_free_stateid_release,
10361 };
10362 
10363 /**
10364  * nfs41_free_stateid - perform a FREE_STATEID operation
10365  *
10366  * @server: server / transport on which to perform the operation
10367  * @stateid: state ID to release
10368  * @cred: credential
10369  * @privileged: set to true if this call needs to be privileged
10370  *
10371  * Note: this function is always asynchronous.
10372  */
10373 static int nfs41_free_stateid(struct nfs_server *server,
10374 		const nfs4_stateid *stateid,
10375 		const struct cred *cred,
10376 		bool privileged)
10377 {
10378 	struct rpc_message msg = {
10379 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
10380 		.rpc_cred = cred,
10381 	};
10382 	struct rpc_task_setup task_setup = {
10383 		.rpc_client = server->client,
10384 		.rpc_message = &msg,
10385 		.callback_ops = &nfs41_free_stateid_ops,
10386 		.flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE,
10387 	};
10388 	struct nfs_free_stateid_data *data;
10389 	struct rpc_task *task;
10390 	struct nfs_client *clp = server->nfs_client;
10391 
10392 	if (!refcount_inc_not_zero(&clp->cl_count))
10393 		return -EIO;
10394 
10395 	nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
10396 		&task_setup.rpc_client, &msg);
10397 
10398 	dprintk("NFS call  free_stateid %p\n", stateid);
10399 	data = kmalloc(sizeof(*data), GFP_KERNEL);
10400 	if (!data)
10401 		return -ENOMEM;
10402 	data->server = server;
10403 	nfs4_stateid_copy(&data->args.stateid, stateid);
10404 
10405 	task_setup.callback_data = data;
10406 
10407 	msg.rpc_argp = &data->args;
10408 	msg.rpc_resp = &data->res;
10409 	nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, privileged);
10410 	task = rpc_run_task(&task_setup);
10411 	if (IS_ERR(task))
10412 		return PTR_ERR(task);
10413 	rpc_put_task(task);
10414 	return 0;
10415 }
10416 
10417 static void
10418 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
10419 {
10420 	const struct cred *cred = lsp->ls_state->owner->so_cred;
10421 
10422 	nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
10423 	nfs4_free_lock_state(server, lsp);
10424 }
10425 
10426 static bool nfs41_match_stateid(const nfs4_stateid *s1,
10427 		const nfs4_stateid *s2)
10428 {
10429 	if (s1->type != s2->type)
10430 		return false;
10431 
10432 	if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
10433 		return false;
10434 
10435 	if (s1->seqid == s2->seqid)
10436 		return true;
10437 
10438 	return s1->seqid == 0 || s2->seqid == 0;
10439 }
10440 
10441 #endif /* CONFIG_NFS_V4_1 */
10442 
10443 static bool nfs4_match_stateid(const nfs4_stateid *s1,
10444 		const nfs4_stateid *s2)
10445 {
10446 	return nfs4_stateid_match(s1, s2);
10447 }
10448 
10449 
10450 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
10451 	.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
10452 	.state_flag_bit	= NFS_STATE_RECLAIM_REBOOT,
10453 	.recover_open	= nfs4_open_reclaim,
10454 	.recover_lock	= nfs4_lock_reclaim,
10455 	.establish_clid = nfs4_init_clientid,
10456 	.detect_trunking = nfs40_discover_server_trunking,
10457 };
10458 
10459 #if defined(CONFIG_NFS_V4_1)
10460 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
10461 	.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
10462 	.state_flag_bit	= NFS_STATE_RECLAIM_REBOOT,
10463 	.recover_open	= nfs4_open_reclaim,
10464 	.recover_lock	= nfs4_lock_reclaim,
10465 	.establish_clid = nfs41_init_clientid,
10466 	.reclaim_complete = nfs41_proc_reclaim_complete,
10467 	.detect_trunking = nfs41_discover_server_trunking,
10468 };
10469 #endif /* CONFIG_NFS_V4_1 */
10470 
10471 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
10472 	.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
10473 	.state_flag_bit	= NFS_STATE_RECLAIM_NOGRACE,
10474 	.recover_open	= nfs40_open_expired,
10475 	.recover_lock	= nfs4_lock_expired,
10476 	.establish_clid = nfs4_init_clientid,
10477 };
10478 
10479 #if defined(CONFIG_NFS_V4_1)
10480 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
10481 	.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
10482 	.state_flag_bit	= NFS_STATE_RECLAIM_NOGRACE,
10483 	.recover_open	= nfs41_open_expired,
10484 	.recover_lock	= nfs41_lock_expired,
10485 	.establish_clid = nfs41_init_clientid,
10486 };
10487 #endif /* CONFIG_NFS_V4_1 */
10488 
10489 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
10490 	.sched_state_renewal = nfs4_proc_async_renew,
10491 	.get_state_renewal_cred = nfs4_get_renew_cred,
10492 	.renew_lease = nfs4_proc_renew,
10493 };
10494 
10495 #if defined(CONFIG_NFS_V4_1)
10496 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
10497 	.sched_state_renewal = nfs41_proc_async_sequence,
10498 	.get_state_renewal_cred = nfs4_get_machine_cred,
10499 	.renew_lease = nfs4_proc_sequence,
10500 };
10501 #endif
10502 
10503 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
10504 	.get_locations = _nfs40_proc_get_locations,
10505 	.fsid_present = _nfs40_proc_fsid_present,
10506 };
10507 
10508 #if defined(CONFIG_NFS_V4_1)
10509 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
10510 	.get_locations = _nfs41_proc_get_locations,
10511 	.fsid_present = _nfs41_proc_fsid_present,
10512 };
10513 #endif	/* CONFIG_NFS_V4_1 */
10514 
10515 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
10516 	.minor_version = 0,
10517 	.init_caps = NFS_CAP_READDIRPLUS
10518 		| NFS_CAP_ATOMIC_OPEN
10519 		| NFS_CAP_POSIX_LOCK,
10520 	.init_client = nfs40_init_client,
10521 	.shutdown_client = nfs40_shutdown_client,
10522 	.match_stateid = nfs4_match_stateid,
10523 	.find_root_sec = nfs4_find_root_sec,
10524 	.free_lock_state = nfs4_release_lockowner,
10525 	.test_and_free_expired = nfs40_test_and_free_expired_stateid,
10526 	.alloc_seqid = nfs_alloc_seqid,
10527 	.call_sync_ops = &nfs40_call_sync_ops,
10528 	.reboot_recovery_ops = &nfs40_reboot_recovery_ops,
10529 	.nograce_recovery_ops = &nfs40_nograce_recovery_ops,
10530 	.state_renewal_ops = &nfs40_state_renewal_ops,
10531 	.mig_recovery_ops = &nfs40_mig_recovery_ops,
10532 };
10533 
10534 #if defined(CONFIG_NFS_V4_1)
10535 static struct nfs_seqid *
10536 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
10537 {
10538 	return NULL;
10539 }
10540 
10541 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
10542 	.minor_version = 1,
10543 	.init_caps = NFS_CAP_READDIRPLUS
10544 		| NFS_CAP_ATOMIC_OPEN
10545 		| NFS_CAP_POSIX_LOCK
10546 		| NFS_CAP_STATEID_NFSV41
10547 		| NFS_CAP_ATOMIC_OPEN_V1
10548 		| NFS_CAP_LGOPEN
10549 		| NFS_CAP_MOVEABLE,
10550 	.init_client = nfs41_init_client,
10551 	.shutdown_client = nfs41_shutdown_client,
10552 	.match_stateid = nfs41_match_stateid,
10553 	.find_root_sec = nfs41_find_root_sec,
10554 	.free_lock_state = nfs41_free_lock_state,
10555 	.test_and_free_expired = nfs41_test_and_free_expired_stateid,
10556 	.alloc_seqid = nfs_alloc_no_seqid,
10557 	.session_trunk = nfs4_test_session_trunk,
10558 	.call_sync_ops = &nfs41_call_sync_ops,
10559 	.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
10560 	.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
10561 	.state_renewal_ops = &nfs41_state_renewal_ops,
10562 	.mig_recovery_ops = &nfs41_mig_recovery_ops,
10563 };
10564 #endif
10565 
10566 #if defined(CONFIG_NFS_V4_2)
10567 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
10568 	.minor_version = 2,
10569 	.init_caps = NFS_CAP_READDIRPLUS
10570 		| NFS_CAP_ATOMIC_OPEN
10571 		| NFS_CAP_POSIX_LOCK
10572 		| NFS_CAP_STATEID_NFSV41
10573 		| NFS_CAP_ATOMIC_OPEN_V1
10574 		| NFS_CAP_LGOPEN
10575 		| NFS_CAP_ALLOCATE
10576 		| NFS_CAP_COPY
10577 		| NFS_CAP_OFFLOAD_CANCEL
10578 		| NFS_CAP_COPY_NOTIFY
10579 		| NFS_CAP_DEALLOCATE
10580 		| NFS_CAP_SEEK
10581 		| NFS_CAP_LAYOUTSTATS
10582 		| NFS_CAP_CLONE
10583 		| NFS_CAP_LAYOUTERROR
10584 		| NFS_CAP_READ_PLUS
10585 		| NFS_CAP_MOVEABLE,
10586 	.init_client = nfs41_init_client,
10587 	.shutdown_client = nfs41_shutdown_client,
10588 	.match_stateid = nfs41_match_stateid,
10589 	.find_root_sec = nfs41_find_root_sec,
10590 	.free_lock_state = nfs41_free_lock_state,
10591 	.call_sync_ops = &nfs41_call_sync_ops,
10592 	.test_and_free_expired = nfs41_test_and_free_expired_stateid,
10593 	.alloc_seqid = nfs_alloc_no_seqid,
10594 	.session_trunk = nfs4_test_session_trunk,
10595 	.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
10596 	.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
10597 	.state_renewal_ops = &nfs41_state_renewal_ops,
10598 	.mig_recovery_ops = &nfs41_mig_recovery_ops,
10599 };
10600 #endif
10601 
10602 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
10603 	[0] = &nfs_v4_0_minor_ops,
10604 #if defined(CONFIG_NFS_V4_1)
10605 	[1] = &nfs_v4_1_minor_ops,
10606 #endif
10607 #if defined(CONFIG_NFS_V4_2)
10608 	[2] = &nfs_v4_2_minor_ops,
10609 #endif
10610 };
10611 
10612 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
10613 {
10614 	ssize_t error, error2, error3;
10615 
10616 	error = generic_listxattr(dentry, list, size);
10617 	if (error < 0)
10618 		return error;
10619 	if (list) {
10620 		list += error;
10621 		size -= error;
10622 	}
10623 
10624 	error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size);
10625 	if (error2 < 0)
10626 		return error2;
10627 
10628 	if (list) {
10629 		list += error2;
10630 		size -= error2;
10631 	}
10632 
10633 	error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, size);
10634 	if (error3 < 0)
10635 		return error3;
10636 
10637 	return error + error2 + error3;
10638 }
10639 
10640 static void nfs4_enable_swap(struct inode *inode)
10641 {
10642 	/* The state manager thread must always be running.
10643 	 * It will notice the client is a swapper, and stay put.
10644 	 */
10645 	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
10646 
10647 	nfs4_schedule_state_manager(clp);
10648 }
10649 
10650 static void nfs4_disable_swap(struct inode *inode)
10651 {
10652 	/* The state manager thread will now exit once it is
10653 	 * woken.
10654 	 */
10655 	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
10656 
10657 	set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
10658 	clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
10659 	wake_up_var(&clp->cl_state);
10660 }
10661 
10662 static const struct inode_operations nfs4_dir_inode_operations = {
10663 	.create		= nfs_create,
10664 	.lookup		= nfs_lookup,
10665 	.atomic_open	= nfs_atomic_open,
10666 	.link		= nfs_link,
10667 	.unlink		= nfs_unlink,
10668 	.symlink	= nfs_symlink,
10669 	.mkdir		= nfs_mkdir,
10670 	.rmdir		= nfs_rmdir,
10671 	.mknod		= nfs_mknod,
10672 	.rename		= nfs_rename,
10673 	.permission	= nfs_permission,
10674 	.getattr	= nfs_getattr,
10675 	.setattr	= nfs_setattr,
10676 	.listxattr	= nfs4_listxattr,
10677 };
10678 
10679 static const struct inode_operations nfs4_file_inode_operations = {
10680 	.permission	= nfs_permission,
10681 	.getattr	= nfs_getattr,
10682 	.setattr	= nfs_setattr,
10683 	.listxattr	= nfs4_listxattr,
10684 };
10685 
10686 const struct nfs_rpc_ops nfs_v4_clientops = {
10687 	.version	= 4,			/* protocol version */
10688 	.dentry_ops	= &nfs4_dentry_operations,
10689 	.dir_inode_ops	= &nfs4_dir_inode_operations,
10690 	.file_inode_ops	= &nfs4_file_inode_operations,
10691 	.file_ops	= &nfs4_file_operations,
10692 	.getroot	= nfs4_proc_get_root,
10693 	.submount	= nfs4_submount,
10694 	.try_get_tree	= nfs4_try_get_tree,
10695 	.getattr	= nfs4_proc_getattr,
10696 	.setattr	= nfs4_proc_setattr,
10697 	.lookup		= nfs4_proc_lookup,
10698 	.lookupp	= nfs4_proc_lookupp,
10699 	.access		= nfs4_proc_access,
10700 	.readlink	= nfs4_proc_readlink,
10701 	.create		= nfs4_proc_create,
10702 	.remove		= nfs4_proc_remove,
10703 	.unlink_setup	= nfs4_proc_unlink_setup,
10704 	.unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
10705 	.unlink_done	= nfs4_proc_unlink_done,
10706 	.rename_setup	= nfs4_proc_rename_setup,
10707 	.rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
10708 	.rename_done	= nfs4_proc_rename_done,
10709 	.link		= nfs4_proc_link,
10710 	.symlink	= nfs4_proc_symlink,
10711 	.mkdir		= nfs4_proc_mkdir,
10712 	.rmdir		= nfs4_proc_rmdir,
10713 	.readdir	= nfs4_proc_readdir,
10714 	.mknod		= nfs4_proc_mknod,
10715 	.statfs		= nfs4_proc_statfs,
10716 	.fsinfo		= nfs4_proc_fsinfo,
10717 	.pathconf	= nfs4_proc_pathconf,
10718 	.set_capabilities = nfs4_server_capabilities,
10719 	.decode_dirent	= nfs4_decode_dirent,
10720 	.pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
10721 	.read_setup	= nfs4_proc_read_setup,
10722 	.read_done	= nfs4_read_done,
10723 	.write_setup	= nfs4_proc_write_setup,
10724 	.write_done	= nfs4_write_done,
10725 	.commit_setup	= nfs4_proc_commit_setup,
10726 	.commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
10727 	.commit_done	= nfs4_commit_done,
10728 	.lock		= nfs4_proc_lock,
10729 	.clear_acl_cache = nfs4_zap_acl_attr,
10730 	.close_context  = nfs4_close_context,
10731 	.open_context	= nfs4_atomic_open,
10732 	.have_delegation = nfs4_have_delegation,
10733 	.alloc_client	= nfs4_alloc_client,
10734 	.init_client	= nfs4_init_client,
10735 	.free_client	= nfs4_free_client,
10736 	.create_server	= nfs4_create_server,
10737 	.clone_server	= nfs_clone_server,
10738 	.discover_trunking = nfs4_discover_trunking,
10739 	.enable_swap	= nfs4_enable_swap,
10740 	.disable_swap	= nfs4_disable_swap,
10741 };
10742 
10743 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
10744 	.name	= XATTR_NAME_NFSV4_ACL,
10745 	.list	= nfs4_xattr_list_nfs4_acl,
10746 	.get	= nfs4_xattr_get_nfs4_acl,
10747 	.set	= nfs4_xattr_set_nfs4_acl,
10748 };
10749 
10750 #if defined(CONFIG_NFS_V4_1)
10751 static const struct xattr_handler nfs4_xattr_nfs4_dacl_handler = {
10752 	.name	= XATTR_NAME_NFSV4_DACL,
10753 	.list	= nfs4_xattr_list_nfs4_dacl,
10754 	.get	= nfs4_xattr_get_nfs4_dacl,
10755 	.set	= nfs4_xattr_set_nfs4_dacl,
10756 };
10757 
10758 static const struct xattr_handler nfs4_xattr_nfs4_sacl_handler = {
10759 	.name	= XATTR_NAME_NFSV4_SACL,
10760 	.list	= nfs4_xattr_list_nfs4_sacl,
10761 	.get	= nfs4_xattr_get_nfs4_sacl,
10762 	.set	= nfs4_xattr_set_nfs4_sacl,
10763 };
10764 #endif
10765 
10766 #ifdef CONFIG_NFS_V4_2
10767 static const struct xattr_handler nfs4_xattr_nfs4_user_handler = {
10768 	.prefix	= XATTR_USER_PREFIX,
10769 	.get	= nfs4_xattr_get_nfs4_user,
10770 	.set	= nfs4_xattr_set_nfs4_user,
10771 };
10772 #endif
10773 
10774 const struct xattr_handler * const nfs4_xattr_handlers[] = {
10775 	&nfs4_xattr_nfs4_acl_handler,
10776 #if defined(CONFIG_NFS_V4_1)
10777 	&nfs4_xattr_nfs4_dacl_handler,
10778 	&nfs4_xattr_nfs4_sacl_handler,
10779 #endif
10780 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
10781 	&nfs4_xattr_nfs4_label_handler,
10782 #endif
10783 #ifdef CONFIG_NFS_V4_2
10784 	&nfs4_xattr_nfs4_user_handler,
10785 #endif
10786 	NULL
10787 };
10788