1 /*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/string.h>
42 #include <linux/ratelimit.h>
43 #include <linux/printk.h>
44 #include <linux/slab.h>
45 #include <linux/sunrpc/clnt.h>
46 #include <linux/nfs.h>
47 #include <linux/nfs4.h>
48 #include <linux/nfs_fs.h>
49 #include <linux/nfs_page.h>
50 #include <linux/nfs_mount.h>
51 #include <linux/namei.h>
52 #include <linux/mount.h>
53 #include <linux/module.h>
54 #include <linux/xattr.h>
55 #include <linux/utsname.h>
56 #include <linux/freezer.h>
57 #include <linux/iversion.h>
58
59 #include "nfs4_fs.h"
60 #include "delegation.h"
61 #include "internal.h"
62 #include "iostat.h"
63 #include "callback.h"
64 #include "pnfs.h"
65 #include "netns.h"
66 #include "sysfs.h"
67 #include "nfs4idmap.h"
68 #include "nfs4session.h"
69 #include "fscache.h"
70 #include "nfs42.h"
71
72 #include "nfs4trace.h"
73
74 #define NFSDBG_FACILITY NFSDBG_PROC
75
76 #define NFS4_BITMASK_SZ 3
77
78 #define NFS4_POLL_RETRY_MIN (HZ/10)
79 #define NFS4_POLL_RETRY_MAX (15*HZ)
80
81 /* file attributes which can be mapped to nfs attributes */
82 #define NFS4_VALID_ATTRS (ATTR_MODE \
83 | ATTR_UID \
84 | ATTR_GID \
85 | ATTR_SIZE \
86 | ATTR_ATIME \
87 | ATTR_MTIME \
88 | ATTR_CTIME \
89 | ATTR_ATIME_SET \
90 | ATTR_MTIME_SET)
91
92 struct nfs4_opendata;
93 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
94 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
95 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
96 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
97 struct nfs_fattr *fattr, struct inode *inode);
98 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
99 struct nfs_fattr *fattr, struct iattr *sattr,
100 struct nfs_open_context *ctx, struct nfs4_label *ilabel);
101 #ifdef CONFIG_NFS_V4_1
102 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
103 const struct cred *cred,
104 struct nfs4_slot *slot,
105 bool is_privileged);
106 static int nfs41_test_stateid(struct nfs_server *, const nfs4_stateid *,
107 const struct cred *);
108 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *,
109 const struct cred *, bool);
110 #endif
111
112 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
113 static inline struct nfs4_label *
nfs4_label_init_security(struct inode * dir,struct dentry * dentry,struct iattr * sattr,struct nfs4_label * label)114 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
115 struct iattr *sattr, struct nfs4_label *label)
116 {
117 struct lsm_context shim;
118 int err;
119
120 if (label == NULL)
121 return NULL;
122
123 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
124 return NULL;
125
126 label->lfs = 0;
127 label->pi = 0;
128 label->len = 0;
129 label->label = NULL;
130
131 err = security_dentry_init_security(dentry, sattr->ia_mode,
132 &dentry->d_name, NULL, &shim);
133 if (err)
134 return NULL;
135
136 label->lsmid = shim.id;
137 label->label = shim.context;
138 label->len = shim.len;
139 return label;
140 }
141 static inline void
nfs4_label_release_security(struct nfs4_label * label)142 nfs4_label_release_security(struct nfs4_label *label)
143 {
144 struct lsm_context shim;
145
146 if (label) {
147 shim.context = label->label;
148 shim.len = label->len;
149 shim.id = label->lsmid;
150 security_release_secctx(&shim);
151 }
152 }
nfs4_bitmask(struct nfs_server * server,struct nfs4_label * label)153 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
154 {
155 if (label)
156 return server->attr_bitmask;
157
158 return server->attr_bitmask_nl;
159 }
160 #else
161 static inline struct nfs4_label *
nfs4_label_init_security(struct inode * dir,struct dentry * dentry,struct iattr * sattr,struct nfs4_label * l)162 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
163 struct iattr *sattr, struct nfs4_label *l)
164 { return NULL; }
165 static inline void
nfs4_label_release_security(struct nfs4_label * label)166 nfs4_label_release_security(struct nfs4_label *label)
167 { return; }
168 static inline u32 *
nfs4_bitmask(struct nfs_server * server,struct nfs4_label * label)169 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
170 { return server->attr_bitmask; }
171 #endif
172
173 /* Prevent leaks of NFSv4 errors into userland */
nfs4_map_errors(int err)174 static int nfs4_map_errors(int err)
175 {
176 if (err >= -1000)
177 return err;
178 switch (err) {
179 case -NFS4ERR_RESOURCE:
180 case -NFS4ERR_LAYOUTTRYLATER:
181 case -NFS4ERR_RECALLCONFLICT:
182 case -NFS4ERR_RETURNCONFLICT:
183 return -EREMOTEIO;
184 case -NFS4ERR_WRONGSEC:
185 case -NFS4ERR_WRONG_CRED:
186 return -EPERM;
187 case -NFS4ERR_BADOWNER:
188 case -NFS4ERR_BADNAME:
189 return -EINVAL;
190 case -NFS4ERR_SHARE_DENIED:
191 return -EACCES;
192 case -NFS4ERR_MINOR_VERS_MISMATCH:
193 return -EPROTONOSUPPORT;
194 case -NFS4ERR_FILE_OPEN:
195 return -EBUSY;
196 case -NFS4ERR_NOT_SAME:
197 return -ENOTSYNC;
198 case -ENETDOWN:
199 case -ENETUNREACH:
200 break;
201 default:
202 dprintk("%s could not handle NFSv4 error %d\n",
203 __func__, -err);
204 break;
205 }
206 return -EIO;
207 }
208
209 /*
210 * This is our standard bitmap for GETATTR requests.
211 */
212 const u32 nfs4_fattr_bitmap[3] = {
213 FATTR4_WORD0_TYPE
214 | FATTR4_WORD0_CHANGE
215 | FATTR4_WORD0_SIZE
216 | FATTR4_WORD0_FSID
217 | FATTR4_WORD0_FILEID,
218 FATTR4_WORD1_MODE
219 | FATTR4_WORD1_NUMLINKS
220 | FATTR4_WORD1_OWNER
221 | FATTR4_WORD1_OWNER_GROUP
222 | FATTR4_WORD1_RAWDEV
223 | FATTR4_WORD1_SPACE_USED
224 | FATTR4_WORD1_TIME_ACCESS
225 | FATTR4_WORD1_TIME_METADATA
226 | FATTR4_WORD1_TIME_MODIFY
227 | FATTR4_WORD1_MOUNTED_ON_FILEID,
228 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
229 FATTR4_WORD2_SECURITY_LABEL
230 #endif
231 };
232
233 static const u32 nfs4_pnfs_open_bitmap[3] = {
234 FATTR4_WORD0_TYPE
235 | FATTR4_WORD0_CHANGE
236 | FATTR4_WORD0_SIZE
237 | FATTR4_WORD0_FSID
238 | FATTR4_WORD0_FILEID,
239 FATTR4_WORD1_MODE
240 | FATTR4_WORD1_NUMLINKS
241 | FATTR4_WORD1_OWNER
242 | FATTR4_WORD1_OWNER_GROUP
243 | FATTR4_WORD1_RAWDEV
244 | FATTR4_WORD1_SPACE_USED
245 | FATTR4_WORD1_TIME_ACCESS
246 | FATTR4_WORD1_TIME_METADATA
247 | FATTR4_WORD1_TIME_MODIFY,
248 FATTR4_WORD2_MDSTHRESHOLD
249 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
250 | FATTR4_WORD2_SECURITY_LABEL
251 #endif
252 };
253
254 static const u32 nfs4_open_noattr_bitmap[3] = {
255 FATTR4_WORD0_TYPE
256 | FATTR4_WORD0_FILEID,
257 };
258
259 const u32 nfs4_statfs_bitmap[3] = {
260 FATTR4_WORD0_FILES_AVAIL
261 | FATTR4_WORD0_FILES_FREE
262 | FATTR4_WORD0_FILES_TOTAL,
263 FATTR4_WORD1_SPACE_AVAIL
264 | FATTR4_WORD1_SPACE_FREE
265 | FATTR4_WORD1_SPACE_TOTAL
266 };
267
268 const u32 nfs4_pathconf_bitmap[3] = {
269 FATTR4_WORD0_MAXLINK
270 | FATTR4_WORD0_MAXNAME,
271 0
272 };
273
274 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
275 | FATTR4_WORD0_MAXREAD
276 | FATTR4_WORD0_MAXWRITE
277 | FATTR4_WORD0_LEASE_TIME,
278 FATTR4_WORD1_TIME_DELTA
279 | FATTR4_WORD1_FS_LAYOUT_TYPES,
280 FATTR4_WORD2_LAYOUT_BLKSIZE
281 | FATTR4_WORD2_CLONE_BLKSIZE
282 | FATTR4_WORD2_CHANGE_ATTR_TYPE
283 | FATTR4_WORD2_XATTR_SUPPORT
284 };
285
286 const u32 nfs4_fs_locations_bitmap[3] = {
287 FATTR4_WORD0_CHANGE
288 | FATTR4_WORD0_SIZE
289 | FATTR4_WORD0_FSID
290 | FATTR4_WORD0_FILEID
291 | FATTR4_WORD0_FS_LOCATIONS,
292 FATTR4_WORD1_OWNER
293 | FATTR4_WORD1_OWNER_GROUP
294 | FATTR4_WORD1_RAWDEV
295 | FATTR4_WORD1_SPACE_USED
296 | FATTR4_WORD1_TIME_ACCESS
297 | FATTR4_WORD1_TIME_METADATA
298 | FATTR4_WORD1_TIME_MODIFY
299 | FATTR4_WORD1_MOUNTED_ON_FILEID,
300 };
301
nfs4_bitmap_copy_adjust(__u32 * dst,const __u32 * src,struct inode * inode,unsigned long flags)302 static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src,
303 struct inode *inode, unsigned long flags)
304 {
305 unsigned long cache_validity;
306
307 memcpy(dst, src, NFS4_BITMASK_SZ*sizeof(*dst));
308 if (!inode || !nfs_have_read_or_write_delegation(inode))
309 return;
310
311 cache_validity = READ_ONCE(NFS_I(inode)->cache_validity) | flags;
312
313 /* Remove the attributes over which we have full control */
314 dst[1] &= ~FATTR4_WORD1_RAWDEV;
315 if (!(cache_validity & NFS_INO_INVALID_SIZE))
316 dst[0] &= ~FATTR4_WORD0_SIZE;
317
318 if (!(cache_validity & NFS_INO_INVALID_CHANGE))
319 dst[0] &= ~FATTR4_WORD0_CHANGE;
320
321 if (!(cache_validity & NFS_INO_INVALID_MODE))
322 dst[1] &= ~FATTR4_WORD1_MODE;
323 if (!(cache_validity & NFS_INO_INVALID_OTHER))
324 dst[1] &= ~(FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP);
325
326 if (nfs_have_delegated_mtime(inode)) {
327 if (!(cache_validity & NFS_INO_INVALID_ATIME))
328 dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET);
329 if (!(cache_validity & NFS_INO_INVALID_MTIME))
330 dst[1] &= ~(FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET);
331 if (!(cache_validity & NFS_INO_INVALID_CTIME))
332 dst[1] &= ~(FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY_SET);
333 } else if (nfs_have_delegated_atime(inode)) {
334 if (!(cache_validity & NFS_INO_INVALID_ATIME))
335 dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET);
336 }
337 }
338
nfs4_setup_readdir(u64 cookie,__be32 * verifier,struct dentry * dentry,struct nfs4_readdir_arg * readdir)339 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
340 struct nfs4_readdir_arg *readdir)
341 {
342 unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE;
343 __be32 *start, *p;
344
345 if (cookie > 2) {
346 readdir->cookie = cookie;
347 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
348 return;
349 }
350
351 readdir->cookie = 0;
352 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
353 if (cookie == 2)
354 return;
355
356 /*
357 * NFSv4 servers do not return entries for '.' and '..'
358 * Therefore, we fake these entries here. We let '.'
359 * have cookie 0 and '..' have cookie 1. Note that
360 * when talking to the server, we always send cookie 0
361 * instead of 1 or 2.
362 */
363 start = p = kmap_atomic(*readdir->pages);
364
365 if (cookie == 0) {
366 *p++ = xdr_one; /* next */
367 *p++ = xdr_zero; /* cookie, first word */
368 *p++ = xdr_one; /* cookie, second word */
369 *p++ = xdr_one; /* entry len */
370 memcpy(p, ".\0\0\0", 4); /* entry */
371 p++;
372 *p++ = xdr_one; /* bitmap length */
373 *p++ = htonl(attrs); /* bitmap */
374 *p++ = htonl(12); /* attribute buffer length */
375 *p++ = htonl(NF4DIR);
376 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
377 }
378
379 *p++ = xdr_one; /* next */
380 *p++ = xdr_zero; /* cookie, first word */
381 *p++ = xdr_two; /* cookie, second word */
382 *p++ = xdr_two; /* entry len */
383 memcpy(p, "..\0\0", 4); /* entry */
384 p++;
385 *p++ = xdr_one; /* bitmap length */
386 *p++ = htonl(attrs); /* bitmap */
387 *p++ = htonl(12); /* attribute buffer length */
388 *p++ = htonl(NF4DIR);
389 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
390
391 readdir->pgbase = (char *)p - (char *)start;
392 readdir->count -= readdir->pgbase;
393 kunmap_atomic(start);
394 }
395
nfs4_fattr_set_prechange(struct nfs_fattr * fattr,u64 version)396 static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version)
397 {
398 if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) {
399 fattr->pre_change_attr = version;
400 fattr->valid |= NFS_ATTR_FATTR_PRECHANGE;
401 }
402 }
403
nfs4_test_and_free_stateid(struct nfs_server * server,nfs4_stateid * stateid,const struct cred * cred)404 static void nfs4_test_and_free_stateid(struct nfs_server *server,
405 nfs4_stateid *stateid,
406 const struct cred *cred)
407 {
408 const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops;
409
410 ops->test_and_free_expired(server, stateid, cred);
411 }
412
__nfs4_free_revoked_stateid(struct nfs_server * server,nfs4_stateid * stateid,const struct cred * cred)413 static void __nfs4_free_revoked_stateid(struct nfs_server *server,
414 nfs4_stateid *stateid,
415 const struct cred *cred)
416 {
417 stateid->type = NFS4_REVOKED_STATEID_TYPE;
418 nfs4_test_and_free_stateid(server, stateid, cred);
419 }
420
nfs4_free_revoked_stateid(struct nfs_server * server,const nfs4_stateid * stateid,const struct cred * cred)421 static void nfs4_free_revoked_stateid(struct nfs_server *server,
422 const nfs4_stateid *stateid,
423 const struct cred *cred)
424 {
425 nfs4_stateid tmp;
426
427 nfs4_stateid_copy(&tmp, stateid);
428 __nfs4_free_revoked_stateid(server, &tmp, cred);
429 }
430
nfs4_update_delay(long * timeout)431 static long nfs4_update_delay(long *timeout)
432 {
433 long ret;
434 if (!timeout)
435 return NFS4_POLL_RETRY_MAX;
436 if (*timeout <= 0)
437 *timeout = NFS4_POLL_RETRY_MIN;
438 if (*timeout > NFS4_POLL_RETRY_MAX)
439 *timeout = NFS4_POLL_RETRY_MAX;
440 ret = *timeout;
441 *timeout <<= 1;
442 return ret;
443 }
444
nfs4_delay_killable(long * timeout)445 static int nfs4_delay_killable(long *timeout)
446 {
447 might_sleep();
448
449 if (unlikely(nfs_current_task_exiting()))
450 return -EINTR;
451 __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
452 schedule_timeout(nfs4_update_delay(timeout));
453 if (!__fatal_signal_pending(current))
454 return 0;
455 return -EINTR;
456 }
457
nfs4_delay_interruptible(long * timeout)458 static int nfs4_delay_interruptible(long *timeout)
459 {
460 might_sleep();
461
462 if (unlikely(nfs_current_task_exiting()))
463 return -EINTR;
464 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE);
465 schedule_timeout(nfs4_update_delay(timeout));
466 if (!signal_pending(current))
467 return 0;
468 return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS;
469 }
470
nfs4_delay(long * timeout,bool interruptible)471 static int nfs4_delay(long *timeout, bool interruptible)
472 {
473 if (interruptible)
474 return nfs4_delay_interruptible(timeout);
475 return nfs4_delay_killable(timeout);
476 }
477
478 static const nfs4_stateid *
nfs4_recoverable_stateid(const nfs4_stateid * stateid)479 nfs4_recoverable_stateid(const nfs4_stateid *stateid)
480 {
481 if (!stateid)
482 return NULL;
483 switch (stateid->type) {
484 case NFS4_OPEN_STATEID_TYPE:
485 case NFS4_LOCK_STATEID_TYPE:
486 case NFS4_DELEGATION_STATEID_TYPE:
487 return stateid;
488 default:
489 break;
490 }
491 return NULL;
492 }
493
494 /* This is the error handling routine for processes that are allowed
495 * to sleep.
496 */
nfs4_do_handle_exception(struct nfs_server * server,int errorcode,struct nfs4_exception * exception)497 static int nfs4_do_handle_exception(struct nfs_server *server,
498 int errorcode, struct nfs4_exception *exception)
499 {
500 struct nfs_client *clp = server->nfs_client;
501 struct nfs4_state *state = exception->state;
502 const nfs4_stateid *stateid;
503 struct inode *inode = exception->inode;
504 int ret = errorcode;
505
506 exception->delay = 0;
507 exception->recovering = 0;
508 exception->retry = 0;
509
510 stateid = nfs4_recoverable_stateid(exception->stateid);
511 if (stateid == NULL && state != NULL)
512 stateid = nfs4_recoverable_stateid(&state->stateid);
513
514 switch(errorcode) {
515 case 0:
516 return 0;
517 case -NFS4ERR_BADHANDLE:
518 case -ESTALE:
519 if (inode != NULL && S_ISREG(inode->i_mode))
520 pnfs_destroy_layout(NFS_I(inode));
521 break;
522 case -NFS4ERR_DELEG_REVOKED:
523 case -NFS4ERR_ADMIN_REVOKED:
524 case -NFS4ERR_EXPIRED:
525 case -NFS4ERR_BAD_STATEID:
526 case -NFS4ERR_PARTNER_NO_AUTH:
527 if (inode != NULL && stateid != NULL) {
528 nfs_inode_find_state_and_recover(inode,
529 stateid);
530 goto wait_on_recovery;
531 }
532 fallthrough;
533 case -NFS4ERR_OPENMODE:
534 if (inode) {
535 int err;
536
537 err = nfs_async_inode_return_delegation(inode,
538 stateid);
539 if (err == 0)
540 goto wait_on_recovery;
541 if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) {
542 exception->retry = 1;
543 break;
544 }
545 }
546 if (state == NULL)
547 break;
548 ret = nfs4_schedule_stateid_recovery(server, state);
549 if (ret < 0)
550 break;
551 goto wait_on_recovery;
552 case -NFS4ERR_STALE_STATEID:
553 case -NFS4ERR_STALE_CLIENTID:
554 nfs4_schedule_lease_recovery(clp);
555 goto wait_on_recovery;
556 case -NFS4ERR_MOVED:
557 ret = nfs4_schedule_migration_recovery(server);
558 if (ret < 0)
559 break;
560 goto wait_on_recovery;
561 case -NFS4ERR_LEASE_MOVED:
562 nfs4_schedule_lease_moved_recovery(clp);
563 goto wait_on_recovery;
564 #if defined(CONFIG_NFS_V4_1)
565 case -NFS4ERR_BADSESSION:
566 case -NFS4ERR_BADSLOT:
567 case -NFS4ERR_BAD_HIGH_SLOT:
568 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
569 case -NFS4ERR_DEADSESSION:
570 case -NFS4ERR_SEQ_FALSE_RETRY:
571 case -NFS4ERR_SEQ_MISORDERED:
572 /* Handled in nfs41_sequence_process() */
573 goto wait_on_recovery;
574 #endif /* defined(CONFIG_NFS_V4_1) */
575 case -NFS4ERR_FILE_OPEN:
576 if (exception->timeout > HZ) {
577 /* We have retried a decent amount, time to
578 * fail
579 */
580 ret = -EBUSY;
581 break;
582 }
583 fallthrough;
584 case -NFS4ERR_DELAY:
585 nfs_inc_server_stats(server, NFSIOS_DELAY);
586 fallthrough;
587 case -NFS4ERR_GRACE:
588 case -NFS4ERR_LAYOUTTRYLATER:
589 case -NFS4ERR_RECALLCONFLICT:
590 case -NFS4ERR_RETURNCONFLICT:
591 exception->delay = 1;
592 return 0;
593
594 case -NFS4ERR_RETRY_UNCACHED_REP:
595 case -NFS4ERR_OLD_STATEID:
596 exception->retry = 1;
597 break;
598 case -NFS4ERR_BADOWNER:
599 /* The following works around a Linux server bug! */
600 case -NFS4ERR_BADNAME:
601 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
602 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
603 exception->retry = 1;
604 printk(KERN_WARNING "NFS: v4 server %s "
605 "does not accept raw "
606 "uid/gids. "
607 "Reenabling the idmapper.\n",
608 server->nfs_client->cl_hostname);
609 }
610 }
611 /* We failed to handle the error */
612 return nfs4_map_errors(ret);
613 wait_on_recovery:
614 exception->recovering = 1;
615 return 0;
616 }
617
618 /*
619 * Track the number of NFS4ERR_DELAY related retransmissions and return
620 * EAGAIN if the 'softerr' mount option is set, and we've exceeded the limit
621 * set by 'nfs_delay_retrans'.
622 */
nfs4_exception_should_retrans(const struct nfs_server * server,struct nfs4_exception * exception)623 static int nfs4_exception_should_retrans(const struct nfs_server *server,
624 struct nfs4_exception *exception)
625 {
626 if (server->flags & NFS_MOUNT_SOFTERR && nfs_delay_retrans >= 0) {
627 if (exception->retrans++ >= (unsigned short)nfs_delay_retrans)
628 return -EAGAIN;
629 }
630 return 0;
631 }
632
633 /* This is the error handling routine for processes that are allowed
634 * to sleep.
635 */
nfs4_handle_exception(struct nfs_server * server,int errorcode,struct nfs4_exception * exception)636 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
637 {
638 struct nfs_client *clp = server->nfs_client;
639 int ret;
640
641 ret = nfs4_do_handle_exception(server, errorcode, exception);
642 if (exception->delay) {
643 int ret2 = nfs4_exception_should_retrans(server, exception);
644 if (ret2 < 0) {
645 exception->retry = 0;
646 return ret2;
647 }
648 ret = nfs4_delay(&exception->timeout,
649 exception->interruptible);
650 goto out_retry;
651 }
652 if (exception->recovering) {
653 if (exception->task_is_privileged)
654 return -EDEADLOCK;
655 ret = nfs4_wait_clnt_recover(clp);
656 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
657 return -EIO;
658 goto out_retry;
659 }
660 return ret;
661 out_retry:
662 if (ret == 0)
663 exception->retry = 1;
664 return ret;
665 }
666
667 static int
nfs4_async_handle_exception(struct rpc_task * task,struct nfs_server * server,int errorcode,struct nfs4_exception * exception)668 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
669 int errorcode, struct nfs4_exception *exception)
670 {
671 struct nfs_client *clp = server->nfs_client;
672 int ret;
673
674 if ((task->tk_rpc_status == -ENETDOWN ||
675 task->tk_rpc_status == -ENETUNREACH) &&
676 task->tk_flags & RPC_TASK_NETUNREACH_FATAL) {
677 exception->delay = 0;
678 exception->recovering = 0;
679 exception->retry = 0;
680 return -EIO;
681 }
682
683 ret = nfs4_do_handle_exception(server, errorcode, exception);
684 if (exception->delay) {
685 int ret2 = nfs4_exception_should_retrans(server, exception);
686 if (ret2 < 0) {
687 exception->retry = 0;
688 return ret2;
689 }
690 rpc_delay(task, nfs4_update_delay(&exception->timeout));
691 goto out_retry;
692 }
693 if (exception->recovering) {
694 if (exception->task_is_privileged)
695 return -EDEADLOCK;
696 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
697 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
698 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
699 goto out_retry;
700 }
701 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
702 ret = -EIO;
703 return ret;
704 out_retry:
705 if (ret == 0) {
706 exception->retry = 1;
707 /*
708 * For NFS4ERR_MOVED, the client transport will need to
709 * be recomputed after migration recovery has completed.
710 */
711 if (errorcode == -NFS4ERR_MOVED)
712 rpc_task_release_transport(task);
713 }
714 return ret;
715 }
716
717 int
nfs4_async_handle_error(struct rpc_task * task,struct nfs_server * server,struct nfs4_state * state,long * timeout)718 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server,
719 struct nfs4_state *state, long *timeout)
720 {
721 struct nfs4_exception exception = {
722 .state = state,
723 };
724
725 if (task->tk_status >= 0)
726 return 0;
727 if (timeout)
728 exception.timeout = *timeout;
729 task->tk_status = nfs4_async_handle_exception(task, server,
730 task->tk_status,
731 &exception);
732 if (exception.delay && timeout)
733 *timeout = exception.timeout;
734 if (exception.retry)
735 return -EAGAIN;
736 return 0;
737 }
738
739 /*
740 * Return 'true' if 'clp' is using an rpc_client that is integrity protected
741 * or 'false' otherwise.
742 */
_nfs4_is_integrity_protected(struct nfs_client * clp)743 static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
744 {
745 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
746 return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P);
747 }
748
do_renew_lease(struct nfs_client * clp,unsigned long timestamp)749 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
750 {
751 spin_lock(&clp->cl_lock);
752 if (time_before(clp->cl_last_renewal,timestamp))
753 clp->cl_last_renewal = timestamp;
754 spin_unlock(&clp->cl_lock);
755 }
756
renew_lease(const struct nfs_server * server,unsigned long timestamp)757 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
758 {
759 struct nfs_client *clp = server->nfs_client;
760
761 if (!nfs4_has_session(clp))
762 do_renew_lease(clp, timestamp);
763 }
764
765 struct nfs4_call_sync_data {
766 const struct nfs_server *seq_server;
767 struct nfs4_sequence_args *seq_args;
768 struct nfs4_sequence_res *seq_res;
769 };
770
nfs4_init_sequence(struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,int cache_reply,int privileged)771 void nfs4_init_sequence(struct nfs4_sequence_args *args,
772 struct nfs4_sequence_res *res, int cache_reply,
773 int privileged)
774 {
775 args->sa_slot = NULL;
776 args->sa_cache_this = cache_reply;
777 args->sa_privileged = privileged;
778
779 res->sr_slot = NULL;
780 }
781
nfs40_sequence_free_slot(struct nfs4_sequence_res * res)782 static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res)
783 {
784 struct nfs4_slot *slot = res->sr_slot;
785 struct nfs4_slot_table *tbl;
786
787 tbl = slot->table;
788 spin_lock(&tbl->slot_tbl_lock);
789 if (!nfs41_wake_and_assign_slot(tbl, slot))
790 nfs4_free_slot(tbl, slot);
791 spin_unlock(&tbl->slot_tbl_lock);
792
793 res->sr_slot = NULL;
794 }
795
nfs40_sequence_done(struct rpc_task * task,struct nfs4_sequence_res * res)796 static int nfs40_sequence_done(struct rpc_task *task,
797 struct nfs4_sequence_res *res)
798 {
799 if (res->sr_slot != NULL)
800 nfs40_sequence_free_slot(res);
801 return 1;
802 }
803
804 #if defined(CONFIG_NFS_V4_1)
805
nfs41_release_slot(struct nfs4_slot * slot)806 static void nfs41_release_slot(struct nfs4_slot *slot)
807 {
808 struct nfs4_session *session;
809 struct nfs4_slot_table *tbl;
810 bool send_new_highest_used_slotid = false;
811
812 if (!slot)
813 return;
814 tbl = slot->table;
815 session = tbl->session;
816
817 /* Bump the slot sequence number */
818 if (slot->seq_done)
819 slot->seq_nr++;
820 slot->seq_done = 0;
821
822 spin_lock(&tbl->slot_tbl_lock);
823 /* Be nice to the server: try to ensure that the last transmitted
824 * value for highest_user_slotid <= target_highest_slotid
825 */
826 if (tbl->highest_used_slotid > tbl->target_highest_slotid)
827 send_new_highest_used_slotid = true;
828
829 if (nfs41_wake_and_assign_slot(tbl, slot)) {
830 send_new_highest_used_slotid = false;
831 goto out_unlock;
832 }
833 nfs4_free_slot(tbl, slot);
834
835 if (tbl->highest_used_slotid != NFS4_NO_SLOT)
836 send_new_highest_used_slotid = false;
837 out_unlock:
838 spin_unlock(&tbl->slot_tbl_lock);
839 if (send_new_highest_used_slotid)
840 nfs41_notify_server(session->clp);
841 if (waitqueue_active(&tbl->slot_waitq))
842 wake_up_all(&tbl->slot_waitq);
843 }
844
nfs41_sequence_free_slot(struct nfs4_sequence_res * res)845 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
846 {
847 nfs41_release_slot(res->sr_slot);
848 res->sr_slot = NULL;
849 }
850
nfs4_slot_sequence_record_sent(struct nfs4_slot * slot,u32 seqnr)851 static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot,
852 u32 seqnr)
853 {
854 if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0)
855 slot->seq_nr_highest_sent = seqnr;
856 }
nfs4_slot_sequence_acked(struct nfs4_slot * slot,u32 seqnr)857 static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, u32 seqnr)
858 {
859 nfs4_slot_sequence_record_sent(slot, seqnr);
860 slot->seq_nr_last_acked = seqnr;
861 }
862
nfs4_probe_sequence(struct nfs_client * client,const struct cred * cred,struct nfs4_slot * slot)863 static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred,
864 struct nfs4_slot *slot)
865 {
866 struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true);
867 if (!IS_ERR(task))
868 rpc_put_task_async(task);
869 }
870
nfs41_sequence_process(struct rpc_task * task,struct nfs4_sequence_res * res)871 static int nfs41_sequence_process(struct rpc_task *task,
872 struct nfs4_sequence_res *res)
873 {
874 struct nfs4_session *session;
875 struct nfs4_slot *slot = res->sr_slot;
876 struct nfs_client *clp;
877 int status;
878 int ret = 1;
879
880 if (slot == NULL)
881 goto out_noaction;
882 /* don't increment the sequence number if the task wasn't sent */
883 if (!RPC_WAS_SENT(task) || slot->seq_done)
884 goto out;
885
886 session = slot->table->session;
887 clp = session->clp;
888
889 trace_nfs4_sequence_done(session, res);
890
891 status = res->sr_status;
892 if (task->tk_status == -NFS4ERR_DEADSESSION)
893 status = -NFS4ERR_DEADSESSION;
894
895 /* Check the SEQUENCE operation status */
896 switch (status) {
897 case 0:
898 /* Mark this sequence number as having been acked */
899 nfs4_slot_sequence_acked(slot, slot->seq_nr);
900 /* Update the slot's sequence and clientid lease timer */
901 slot->seq_done = 1;
902 do_renew_lease(clp, res->sr_timestamp);
903 /* Check sequence flags */
904 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags,
905 !!slot->privileged);
906 nfs41_update_target_slotid(slot->table, slot, res);
907 break;
908 case 1:
909 /*
910 * sr_status remains 1 if an RPC level error occurred.
911 * The server may or may not have processed the sequence
912 * operation..
913 */
914 nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
915 slot->seq_done = 1;
916 goto out;
917 case -NFS4ERR_DELAY:
918 /* The server detected a resend of the RPC call and
919 * returned NFS4ERR_DELAY as per Section 2.10.6.2
920 * of RFC5661.
921 */
922 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
923 __func__,
924 slot->slot_nr,
925 slot->seq_nr);
926 goto out_retry;
927 case -NFS4ERR_RETRY_UNCACHED_REP:
928 case -NFS4ERR_SEQ_FALSE_RETRY:
929 /*
930 * The server thinks we tried to replay a request.
931 * Retry the call after bumping the sequence ID.
932 */
933 nfs4_slot_sequence_acked(slot, slot->seq_nr);
934 goto retry_new_seq;
935 case -NFS4ERR_BADSLOT:
936 /*
937 * The slot id we used was probably retired. Try again
938 * using a different slot id.
939 */
940 if (slot->slot_nr < slot->table->target_highest_slotid)
941 goto session_recover;
942 goto retry_nowait;
943 case -NFS4ERR_SEQ_MISORDERED:
944 nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
945 /*
946 * Were one or more calls using this slot interrupted?
947 * If the server never received the request, then our
948 * transmitted slot sequence number may be too high. However,
949 * if the server did receive the request then it might
950 * accidentally give us a reply with a mismatched operation.
951 * We can sort this out by sending a lone sequence operation
952 * to the server on the same slot.
953 */
954 if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) {
955 slot->seq_nr--;
956 if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) {
957 nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot);
958 res->sr_slot = NULL;
959 }
960 goto retry_nowait;
961 }
962 /*
963 * RFC5661:
964 * A retry might be sent while the original request is
965 * still in progress on the replier. The replier SHOULD
966 * deal with the issue by returning NFS4ERR_DELAY as the
967 * reply to SEQUENCE or CB_SEQUENCE operation, but
968 * implementations MAY return NFS4ERR_SEQ_MISORDERED.
969 *
970 * Restart the search after a delay.
971 */
972 slot->seq_nr = slot->seq_nr_highest_sent;
973 goto out_retry;
974 case -NFS4ERR_BADSESSION:
975 case -NFS4ERR_DEADSESSION:
976 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
977 goto session_recover;
978 default:
979 /* Just update the slot sequence no. */
980 slot->seq_done = 1;
981 }
982 out:
983 /* The session may be reset by one of the error handlers. */
984 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
985 out_noaction:
986 return ret;
987 session_recover:
988 set_bit(NFS4_SLOT_TBL_DRAINING, &session->fc_slot_table.slot_tbl_state);
989 nfs4_schedule_session_recovery(session, status);
990 dprintk("%s ERROR: %d Reset session\n", __func__, status);
991 nfs41_sequence_free_slot(res);
992 goto out;
993 retry_new_seq:
994 ++slot->seq_nr;
995 retry_nowait:
996 if (rpc_restart_call_prepare(task)) {
997 nfs41_sequence_free_slot(res);
998 task->tk_status = 0;
999 ret = 0;
1000 }
1001 goto out;
1002 out_retry:
1003 if (!rpc_restart_call(task))
1004 goto out;
1005 rpc_delay(task, NFS4_POLL_RETRY_MAX);
1006 return 0;
1007 }
1008
nfs41_sequence_done(struct rpc_task * task,struct nfs4_sequence_res * res)1009 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
1010 {
1011 if (!nfs41_sequence_process(task, res))
1012 return 0;
1013 if (res->sr_slot != NULL)
1014 nfs41_sequence_free_slot(res);
1015 return 1;
1016
1017 }
1018 EXPORT_SYMBOL_GPL(nfs41_sequence_done);
1019
nfs4_sequence_process(struct rpc_task * task,struct nfs4_sequence_res * res)1020 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
1021 {
1022 if (res->sr_slot == NULL)
1023 return 1;
1024 if (res->sr_slot->table->session != NULL)
1025 return nfs41_sequence_process(task, res);
1026 return nfs40_sequence_done(task, res);
1027 }
1028
nfs4_sequence_free_slot(struct nfs4_sequence_res * res)1029 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
1030 {
1031 if (res->sr_slot != NULL) {
1032 if (res->sr_slot->table->session != NULL)
1033 nfs41_sequence_free_slot(res);
1034 else
1035 nfs40_sequence_free_slot(res);
1036 }
1037 }
1038
nfs4_sequence_done(struct rpc_task * task,struct nfs4_sequence_res * res)1039 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
1040 {
1041 if (res->sr_slot == NULL)
1042 return 1;
1043 if (!res->sr_slot->table->session)
1044 return nfs40_sequence_done(task, res);
1045 return nfs41_sequence_done(task, res);
1046 }
1047 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
1048
nfs41_call_sync_prepare(struct rpc_task * task,void * calldata)1049 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
1050 {
1051 struct nfs4_call_sync_data *data = calldata;
1052
1053 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
1054
1055 nfs4_setup_sequence(data->seq_server->nfs_client,
1056 data->seq_args, data->seq_res, task);
1057 }
1058
nfs41_call_sync_done(struct rpc_task * task,void * calldata)1059 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
1060 {
1061 struct nfs4_call_sync_data *data = calldata;
1062
1063 nfs41_sequence_done(task, data->seq_res);
1064 }
1065
1066 static const struct rpc_call_ops nfs41_call_sync_ops = {
1067 .rpc_call_prepare = nfs41_call_sync_prepare,
1068 .rpc_call_done = nfs41_call_sync_done,
1069 };
1070
1071 #else /* !CONFIG_NFS_V4_1 */
1072
nfs4_sequence_process(struct rpc_task * task,struct nfs4_sequence_res * res)1073 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
1074 {
1075 return nfs40_sequence_done(task, res);
1076 }
1077
nfs4_sequence_free_slot(struct nfs4_sequence_res * res)1078 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
1079 {
1080 if (res->sr_slot != NULL)
1081 nfs40_sequence_free_slot(res);
1082 }
1083
nfs4_sequence_done(struct rpc_task * task,struct nfs4_sequence_res * res)1084 int nfs4_sequence_done(struct rpc_task *task,
1085 struct nfs4_sequence_res *res)
1086 {
1087 return nfs40_sequence_done(task, res);
1088 }
1089 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
1090
1091 #endif /* !CONFIG_NFS_V4_1 */
1092
nfs41_sequence_res_init(struct nfs4_sequence_res * res)1093 static void nfs41_sequence_res_init(struct nfs4_sequence_res *res)
1094 {
1095 res->sr_timestamp = jiffies;
1096 res->sr_status_flags = 0;
1097 res->sr_status = 1;
1098 }
1099
1100 static
nfs4_sequence_attach_slot(struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,struct nfs4_slot * slot)1101 void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
1102 struct nfs4_sequence_res *res,
1103 struct nfs4_slot *slot)
1104 {
1105 if (!slot)
1106 return;
1107 slot->privileged = args->sa_privileged ? 1 : 0;
1108 args->sa_slot = slot;
1109
1110 res->sr_slot = slot;
1111 }
1112
nfs4_setup_sequence(struct nfs_client * client,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,struct rpc_task * task)1113 int nfs4_setup_sequence(struct nfs_client *client,
1114 struct nfs4_sequence_args *args,
1115 struct nfs4_sequence_res *res,
1116 struct rpc_task *task)
1117 {
1118 struct nfs4_session *session = nfs4_get_session(client);
1119 struct nfs4_slot_table *tbl = client->cl_slot_tbl;
1120 struct nfs4_slot *slot;
1121
1122 /* slot already allocated? */
1123 if (res->sr_slot != NULL)
1124 goto out_start;
1125
1126 if (session)
1127 tbl = &session->fc_slot_table;
1128
1129 spin_lock(&tbl->slot_tbl_lock);
1130 /* The state manager will wait until the slot table is empty */
1131 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
1132 goto out_sleep;
1133
1134 slot = nfs4_alloc_slot(tbl);
1135 if (IS_ERR(slot)) {
1136 if (slot == ERR_PTR(-ENOMEM))
1137 goto out_sleep_timeout;
1138 goto out_sleep;
1139 }
1140 spin_unlock(&tbl->slot_tbl_lock);
1141
1142 nfs4_sequence_attach_slot(args, res, slot);
1143
1144 trace_nfs4_setup_sequence(session, args);
1145 out_start:
1146 nfs41_sequence_res_init(res);
1147 rpc_call_start(task);
1148 return 0;
1149 out_sleep_timeout:
1150 /* Try again in 1/4 second */
1151 if (args->sa_privileged)
1152 rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task,
1153 jiffies + (HZ >> 2), RPC_PRIORITY_PRIVILEGED);
1154 else
1155 rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task,
1156 NULL, jiffies + (HZ >> 2));
1157 spin_unlock(&tbl->slot_tbl_lock);
1158 return -EAGAIN;
1159 out_sleep:
1160 if (args->sa_privileged)
1161 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
1162 RPC_PRIORITY_PRIVILEGED);
1163 else
1164 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
1165 spin_unlock(&tbl->slot_tbl_lock);
1166 return -EAGAIN;
1167 }
1168 EXPORT_SYMBOL_GPL(nfs4_setup_sequence);
1169
nfs40_call_sync_prepare(struct rpc_task * task,void * calldata)1170 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
1171 {
1172 struct nfs4_call_sync_data *data = calldata;
1173 nfs4_setup_sequence(data->seq_server->nfs_client,
1174 data->seq_args, data->seq_res, task);
1175 }
1176
nfs40_call_sync_done(struct rpc_task * task,void * calldata)1177 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
1178 {
1179 struct nfs4_call_sync_data *data = calldata;
1180 nfs4_sequence_done(task, data->seq_res);
1181 }
1182
1183 static const struct rpc_call_ops nfs40_call_sync_ops = {
1184 .rpc_call_prepare = nfs40_call_sync_prepare,
1185 .rpc_call_done = nfs40_call_sync_done,
1186 };
1187
nfs4_call_sync_custom(struct rpc_task_setup * task_setup)1188 static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup)
1189 {
1190 int ret;
1191 struct rpc_task *task;
1192
1193 task = rpc_run_task(task_setup);
1194 if (IS_ERR(task))
1195 return PTR_ERR(task);
1196
1197 ret = task->tk_status;
1198 rpc_put_task(task);
1199 return ret;
1200 }
1201
nfs4_do_call_sync(struct rpc_clnt * clnt,struct nfs_server * server,struct rpc_message * msg,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,unsigned short task_flags)1202 static int nfs4_do_call_sync(struct rpc_clnt *clnt,
1203 struct nfs_server *server,
1204 struct rpc_message *msg,
1205 struct nfs4_sequence_args *args,
1206 struct nfs4_sequence_res *res,
1207 unsigned short task_flags)
1208 {
1209 struct nfs_client *clp = server->nfs_client;
1210 struct nfs4_call_sync_data data = {
1211 .seq_server = server,
1212 .seq_args = args,
1213 .seq_res = res,
1214 };
1215 struct rpc_task_setup task_setup = {
1216 .rpc_client = clnt,
1217 .rpc_message = msg,
1218 .callback_ops = clp->cl_mvops->call_sync_ops,
1219 .callback_data = &data,
1220 .flags = task_flags,
1221 };
1222
1223 return nfs4_call_sync_custom(&task_setup);
1224 }
1225
nfs4_call_sync_sequence(struct rpc_clnt * clnt,struct nfs_server * server,struct rpc_message * msg,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res)1226 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
1227 struct nfs_server *server,
1228 struct rpc_message *msg,
1229 struct nfs4_sequence_args *args,
1230 struct nfs4_sequence_res *res)
1231 {
1232 unsigned short task_flags = 0;
1233
1234 if (server->caps & NFS_CAP_MOVEABLE)
1235 task_flags = RPC_TASK_MOVEABLE;
1236 return nfs4_do_call_sync(clnt, server, msg, args, res, task_flags);
1237 }
1238
1239
nfs4_call_sync(struct rpc_clnt * clnt,struct nfs_server * server,struct rpc_message * msg,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,int cache_reply)1240 int nfs4_call_sync(struct rpc_clnt *clnt,
1241 struct nfs_server *server,
1242 struct rpc_message *msg,
1243 struct nfs4_sequence_args *args,
1244 struct nfs4_sequence_res *res,
1245 int cache_reply)
1246 {
1247 nfs4_init_sequence(args, res, cache_reply, 0);
1248 return nfs4_call_sync_sequence(clnt, server, msg, args, res);
1249 }
1250
1251 static void
nfs4_inc_nlink_locked(struct inode * inode)1252 nfs4_inc_nlink_locked(struct inode *inode)
1253 {
1254 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
1255 NFS_INO_INVALID_CTIME |
1256 NFS_INO_INVALID_NLINK);
1257 inc_nlink(inode);
1258 }
1259
1260 static void
nfs4_inc_nlink(struct inode * inode)1261 nfs4_inc_nlink(struct inode *inode)
1262 {
1263 spin_lock(&inode->i_lock);
1264 nfs4_inc_nlink_locked(inode);
1265 spin_unlock(&inode->i_lock);
1266 }
1267
1268 static void
nfs4_dec_nlink_locked(struct inode * inode)1269 nfs4_dec_nlink_locked(struct inode *inode)
1270 {
1271 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
1272 NFS_INO_INVALID_CTIME |
1273 NFS_INO_INVALID_NLINK);
1274 drop_nlink(inode);
1275 }
1276
1277 static void
nfs4_update_changeattr_locked(struct inode * inode,struct nfs4_change_info * cinfo,unsigned long timestamp,unsigned long cache_validity)1278 nfs4_update_changeattr_locked(struct inode *inode,
1279 struct nfs4_change_info *cinfo,
1280 unsigned long timestamp, unsigned long cache_validity)
1281 {
1282 struct nfs_inode *nfsi = NFS_I(inode);
1283 u64 change_attr = inode_peek_iversion_raw(inode);
1284
1285 if (!nfs_have_delegated_mtime(inode))
1286 cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME;
1287 if (S_ISDIR(inode->i_mode))
1288 cache_validity |= NFS_INO_INVALID_DATA;
1289
1290 switch (NFS_SERVER(inode)->change_attr_type) {
1291 case NFS4_CHANGE_TYPE_IS_UNDEFINED:
1292 if (cinfo->after == change_attr)
1293 goto out;
1294 break;
1295 default:
1296 if ((s64)(change_attr - cinfo->after) >= 0)
1297 goto out;
1298 }
1299
1300 inode_set_iversion_raw(inode, cinfo->after);
1301 if (!cinfo->atomic || cinfo->before != change_attr) {
1302 if (S_ISDIR(inode->i_mode))
1303 nfs_force_lookup_revalidate(inode);
1304
1305 if (!nfs_have_delegated_attributes(inode))
1306 cache_validity |=
1307 NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
1308 NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER |
1309 NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK |
1310 NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR;
1311 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
1312 }
1313 nfsi->attrtimeo_timestamp = jiffies;
1314 nfsi->read_cache_jiffies = timestamp;
1315 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
1316 nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE;
1317 out:
1318 nfs_set_cache_invalid(inode, cache_validity);
1319 }
1320
1321 void
nfs4_update_changeattr(struct inode * dir,struct nfs4_change_info * cinfo,unsigned long timestamp,unsigned long cache_validity)1322 nfs4_update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
1323 unsigned long timestamp, unsigned long cache_validity)
1324 {
1325 spin_lock(&dir->i_lock);
1326 nfs4_update_changeattr_locked(dir, cinfo, timestamp, cache_validity);
1327 spin_unlock(&dir->i_lock);
1328 }
1329
1330 struct nfs4_open_createattrs {
1331 struct nfs4_label *label;
1332 struct iattr *sattr;
1333 const __u32 verf[2];
1334 };
1335
nfs4_clear_cap_atomic_open_v1(struct nfs_server * server,int err,struct nfs4_exception * exception)1336 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
1337 int err, struct nfs4_exception *exception)
1338 {
1339 if (err != -EINVAL)
1340 return false;
1341 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1342 return false;
1343 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
1344 exception->retry = 1;
1345 return true;
1346 }
1347
_nfs4_ctx_to_accessmode(const struct nfs_open_context * ctx)1348 static fmode_t _nfs4_ctx_to_accessmode(const struct nfs_open_context *ctx)
1349 {
1350 return ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
1351 }
1352
_nfs4_ctx_to_openmode(const struct nfs_open_context * ctx)1353 static fmode_t _nfs4_ctx_to_openmode(const struct nfs_open_context *ctx)
1354 {
1355 fmode_t ret = ctx->mode & (FMODE_READ|FMODE_WRITE);
1356
1357 return (ctx->mode & FMODE_EXEC) ? FMODE_READ | ret : ret;
1358 }
1359
1360 static u32
nfs4_fmode_to_share_access(fmode_t fmode)1361 nfs4_fmode_to_share_access(fmode_t fmode)
1362 {
1363 u32 res = 0;
1364
1365 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
1366 case FMODE_READ:
1367 res = NFS4_SHARE_ACCESS_READ;
1368 break;
1369 case FMODE_WRITE:
1370 res = NFS4_SHARE_ACCESS_WRITE;
1371 break;
1372 case FMODE_READ|FMODE_WRITE:
1373 res = NFS4_SHARE_ACCESS_BOTH;
1374 }
1375 return res;
1376 }
1377
1378 static u32
nfs4_map_atomic_open_share(struct nfs_server * server,fmode_t fmode,int openflags)1379 nfs4_map_atomic_open_share(struct nfs_server *server,
1380 fmode_t fmode, int openflags)
1381 {
1382 u32 res = nfs4_fmode_to_share_access(fmode);
1383
1384 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1385 goto out;
1386 /* Want no delegation if we're using O_DIRECT */
1387 if (openflags & O_DIRECT) {
1388 res |= NFS4_SHARE_WANT_NO_DELEG;
1389 goto out;
1390 }
1391 /* res |= NFS4_SHARE_WANT_NO_PREFERENCE; */
1392 if (server->caps & NFS_CAP_DELEGTIME)
1393 res |= NFS4_SHARE_WANT_DELEG_TIMESTAMPS;
1394 if (server->caps & NFS_CAP_OPEN_XOR)
1395 res |= NFS4_SHARE_WANT_OPEN_XOR_DELEGATION;
1396 out:
1397 return res;
1398 }
1399
1400 static enum open_claim_type4
nfs4_map_atomic_open_claim(struct nfs_server * server,enum open_claim_type4 claim)1401 nfs4_map_atomic_open_claim(struct nfs_server *server,
1402 enum open_claim_type4 claim)
1403 {
1404 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
1405 return claim;
1406 switch (claim) {
1407 default:
1408 return claim;
1409 case NFS4_OPEN_CLAIM_FH:
1410 return NFS4_OPEN_CLAIM_NULL;
1411 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1412 return NFS4_OPEN_CLAIM_DELEGATE_CUR;
1413 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1414 return NFS4_OPEN_CLAIM_DELEGATE_PREV;
1415 }
1416 }
1417
nfs4_init_opendata_res(struct nfs4_opendata * p)1418 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
1419 {
1420 p->o_res.f_attr = &p->f_attr;
1421 p->o_res.seqid = p->o_arg.seqid;
1422 p->c_res.seqid = p->c_arg.seqid;
1423 p->o_res.server = p->o_arg.server;
1424 p->o_res.access_request = p->o_arg.access;
1425 nfs_fattr_init(&p->f_attr);
1426 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
1427 }
1428
nfs4_opendata_alloc(struct dentry * dentry,struct nfs4_state_owner * sp,fmode_t fmode,int flags,const struct nfs4_open_createattrs * c,enum open_claim_type4 claim,gfp_t gfp_mask)1429 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1430 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
1431 const struct nfs4_open_createattrs *c,
1432 enum open_claim_type4 claim,
1433 gfp_t gfp_mask)
1434 {
1435 struct dentry *parent = dget_parent(dentry);
1436 struct inode *dir = d_inode(parent);
1437 struct nfs_server *server = NFS_SERVER(dir);
1438 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
1439 struct nfs4_label *label = (c != NULL) ? c->label : NULL;
1440 struct nfs4_opendata *p;
1441
1442 p = kzalloc(sizeof(*p), gfp_mask);
1443 if (p == NULL)
1444 goto err;
1445
1446 p->f_attr.label = nfs4_label_alloc(server, gfp_mask);
1447 if (IS_ERR(p->f_attr.label))
1448 goto err_free_p;
1449
1450 p->a_label = nfs4_label_alloc(server, gfp_mask);
1451 if (IS_ERR(p->a_label))
1452 goto err_free_f;
1453
1454 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1455 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1456 if (IS_ERR(p->o_arg.seqid))
1457 goto err_free_label;
1458 nfs_sb_active(dentry->d_sb);
1459 p->dentry = dget(dentry);
1460 p->dir = parent;
1461 p->owner = sp;
1462 atomic_inc(&sp->so_count);
1463 p->o_arg.open_flags = flags;
1464 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
1465 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1466 p->o_arg.share_access = nfs4_map_atomic_open_share(server,
1467 fmode, flags);
1468 if (flags & O_CREAT) {
1469 p->o_arg.umask = current_umask();
1470 p->o_arg.label = nfs4_label_copy(p->a_label, label);
1471 if (c->sattr != NULL && c->sattr->ia_valid != 0) {
1472 p->o_arg.u.attrs = &p->attrs;
1473 memcpy(&p->attrs, c->sattr, sizeof(p->attrs));
1474
1475 memcpy(p->o_arg.u.verifier.data, c->verf,
1476 sizeof(p->o_arg.u.verifier.data));
1477 }
1478 }
1479 /* ask server to check for all possible rights as results
1480 * are cached */
1481 switch (p->o_arg.claim) {
1482 default:
1483 break;
1484 case NFS4_OPEN_CLAIM_NULL:
1485 case NFS4_OPEN_CLAIM_FH:
1486 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
1487 NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE |
1488 NFS4_ACCESS_EXECUTE |
1489 nfs_access_xattr_mask(server);
1490 }
1491 p->o_arg.clientid = server->nfs_client->cl_clientid;
1492 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
1493 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
1494 p->o_arg.name = &dentry->d_name;
1495 p->o_arg.server = server;
1496 p->o_arg.bitmask = nfs4_bitmask(server, label);
1497 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1498 switch (p->o_arg.claim) {
1499 case NFS4_OPEN_CLAIM_NULL:
1500 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1501 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1502 p->o_arg.fh = NFS_FH(dir);
1503 break;
1504 case NFS4_OPEN_CLAIM_PREVIOUS:
1505 case NFS4_OPEN_CLAIM_FH:
1506 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1507 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1508 p->o_arg.fh = NFS_FH(d_inode(dentry));
1509 }
1510 p->c_arg.fh = &p->o_res.fh;
1511 p->c_arg.stateid = &p->o_res.stateid;
1512 p->c_arg.seqid = p->o_arg.seqid;
1513 nfs4_init_opendata_res(p);
1514 kref_init(&p->kref);
1515 return p;
1516
1517 err_free_label:
1518 nfs4_label_free(p->a_label);
1519 err_free_f:
1520 nfs4_label_free(p->f_attr.label);
1521 err_free_p:
1522 kfree(p);
1523 err:
1524 dput(parent);
1525 return NULL;
1526 }
1527
nfs4_opendata_free(struct kref * kref)1528 static void nfs4_opendata_free(struct kref *kref)
1529 {
1530 struct nfs4_opendata *p = container_of(kref,
1531 struct nfs4_opendata, kref);
1532 struct super_block *sb = p->dentry->d_sb;
1533
1534 nfs4_lgopen_release(p->lgp);
1535 nfs_free_seqid(p->o_arg.seqid);
1536 nfs4_sequence_free_slot(&p->o_res.seq_res);
1537 if (p->state != NULL)
1538 nfs4_put_open_state(p->state);
1539 nfs4_put_state_owner(p->owner);
1540
1541 nfs4_label_free(p->a_label);
1542 nfs4_label_free(p->f_attr.label);
1543
1544 dput(p->dir);
1545 dput(p->dentry);
1546 nfs_sb_deactive(sb);
1547 nfs_fattr_free_names(&p->f_attr);
1548 kfree(p->f_attr.mdsthreshold);
1549 kfree(p);
1550 }
1551
nfs4_opendata_put(struct nfs4_opendata * p)1552 static void nfs4_opendata_put(struct nfs4_opendata *p)
1553 {
1554 if (p != NULL)
1555 kref_put(&p->kref, nfs4_opendata_free);
1556 }
1557
nfs4_mode_match_open_stateid(struct nfs4_state * state,fmode_t fmode)1558 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
1559 fmode_t fmode)
1560 {
1561 switch(fmode & (FMODE_READ|FMODE_WRITE)) {
1562 case FMODE_READ|FMODE_WRITE:
1563 return state->n_rdwr != 0;
1564 case FMODE_WRITE:
1565 return state->n_wronly != 0;
1566 case FMODE_READ:
1567 return state->n_rdonly != 0;
1568 }
1569 WARN_ON_ONCE(1);
1570 return false;
1571 }
1572
can_open_cached(struct nfs4_state * state,fmode_t mode,int open_mode,enum open_claim_type4 claim)1573 static int can_open_cached(struct nfs4_state *state, fmode_t mode,
1574 int open_mode, enum open_claim_type4 claim)
1575 {
1576 int ret = 0;
1577
1578 if (open_mode & (O_EXCL|O_TRUNC))
1579 goto out;
1580 switch (claim) {
1581 case NFS4_OPEN_CLAIM_NULL:
1582 case NFS4_OPEN_CLAIM_FH:
1583 goto out;
1584 default:
1585 break;
1586 }
1587 switch (mode & (FMODE_READ|FMODE_WRITE)) {
1588 case FMODE_READ:
1589 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1590 && state->n_rdonly != 0;
1591 break;
1592 case FMODE_WRITE:
1593 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1594 && state->n_wronly != 0;
1595 break;
1596 case FMODE_READ|FMODE_WRITE:
1597 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1598 && state->n_rdwr != 0;
1599 }
1600 out:
1601 return ret;
1602 }
1603
can_open_delegated(struct nfs_delegation * delegation,fmode_t fmode,enum open_claim_type4 claim)1604 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
1605 enum open_claim_type4 claim)
1606 {
1607 if (delegation == NULL)
1608 return 0;
1609 if ((delegation->type & fmode) != fmode)
1610 return 0;
1611 switch (claim) {
1612 case NFS4_OPEN_CLAIM_NULL:
1613 case NFS4_OPEN_CLAIM_FH:
1614 break;
1615 case NFS4_OPEN_CLAIM_PREVIOUS:
1616 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
1617 break;
1618 fallthrough;
1619 default:
1620 return 0;
1621 }
1622 nfs_mark_delegation_referenced(delegation);
1623 return 1;
1624 }
1625
update_open_stateflags(struct nfs4_state * state,fmode_t fmode)1626 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1627 {
1628 switch (fmode) {
1629 case FMODE_WRITE:
1630 state->n_wronly++;
1631 break;
1632 case FMODE_READ:
1633 state->n_rdonly++;
1634 break;
1635 case FMODE_READ|FMODE_WRITE:
1636 state->n_rdwr++;
1637 }
1638 nfs4_state_set_mode_locked(state, state->state | fmode);
1639 }
1640
1641 #ifdef CONFIG_NFS_V4_1
nfs_open_stateid_recover_openmode(struct nfs4_state * state)1642 static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state)
1643 {
1644 if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags))
1645 return true;
1646 if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags))
1647 return true;
1648 if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags))
1649 return true;
1650 return false;
1651 }
1652 #endif /* CONFIG_NFS_V4_1 */
1653
nfs_state_log_update_open_stateid(struct nfs4_state * state)1654 static void nfs_state_log_update_open_stateid(struct nfs4_state *state)
1655 {
1656 if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags))
1657 wake_up_all(&state->waitq);
1658 }
1659
nfs_test_and_clear_all_open_stateid(struct nfs4_state * state)1660 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
1661 {
1662 struct nfs_client *clp = state->owner->so_server->nfs_client;
1663 bool need_recover = false;
1664
1665 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
1666 need_recover = true;
1667 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
1668 need_recover = true;
1669 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
1670 need_recover = true;
1671 if (need_recover)
1672 nfs4_state_mark_reclaim_nograce(clp, state);
1673 }
1674
1675 /*
1676 * Check for whether or not the caller may update the open stateid
1677 * to the value passed in by stateid.
1678 *
1679 * Note: This function relies heavily on the server implementing
1680 * RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2
1681 * correctly.
1682 * i.e. The stateid seqids have to be initialised to 1, and
1683 * are then incremented on every state transition.
1684 */
nfs_stateid_is_sequential(struct nfs4_state * state,const nfs4_stateid * stateid)1685 static bool nfs_stateid_is_sequential(struct nfs4_state *state,
1686 const nfs4_stateid *stateid)
1687 {
1688 if (test_bit(NFS_OPEN_STATE, &state->flags)) {
1689 /* The common case - we're updating to a new sequence number */
1690 if (nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1691 if (nfs4_stateid_is_next(&state->open_stateid, stateid))
1692 return true;
1693 return false;
1694 }
1695 /* The server returned a new stateid */
1696 }
1697 /* This is the first OPEN in this generation */
1698 if (stateid->seqid == cpu_to_be32(1))
1699 return true;
1700 return false;
1701 }
1702
nfs_resync_open_stateid_locked(struct nfs4_state * state)1703 static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1704 {
1705 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1706 return;
1707 if (state->n_wronly)
1708 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1709 if (state->n_rdonly)
1710 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1711 if (state->n_rdwr)
1712 set_bit(NFS_O_RDWR_STATE, &state->flags);
1713 set_bit(NFS_OPEN_STATE, &state->flags);
1714 }
1715
nfs_clear_open_stateid_locked(struct nfs4_state * state,nfs4_stateid * stateid,fmode_t fmode)1716 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1717 nfs4_stateid *stateid, fmode_t fmode)
1718 {
1719 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1720 switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1721 case FMODE_WRITE:
1722 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1723 break;
1724 case FMODE_READ:
1725 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1726 break;
1727 case 0:
1728 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1729 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1730 clear_bit(NFS_OPEN_STATE, &state->flags);
1731 }
1732 if (stateid == NULL)
1733 return;
1734 /* Handle OPEN+OPEN_DOWNGRADE races */
1735 if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
1736 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
1737 nfs_resync_open_stateid_locked(state);
1738 goto out;
1739 }
1740 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1741 nfs4_stateid_copy(&state->stateid, stateid);
1742 nfs4_stateid_copy(&state->open_stateid, stateid);
1743 trace_nfs4_open_stateid_update(state->inode, stateid, 0);
1744 out:
1745 nfs_state_log_update_open_stateid(state);
1746 }
1747
nfs_clear_open_stateid(struct nfs4_state * state,nfs4_stateid * arg_stateid,nfs4_stateid * stateid,fmode_t fmode)1748 static void nfs_clear_open_stateid(struct nfs4_state *state,
1749 nfs4_stateid *arg_stateid,
1750 nfs4_stateid *stateid, fmode_t fmode)
1751 {
1752 write_seqlock(&state->seqlock);
1753 /* Ignore, if the CLOSE argment doesn't match the current stateid */
1754 if (nfs4_state_match_open_stateid_other(state, arg_stateid))
1755 nfs_clear_open_stateid_locked(state, stateid, fmode);
1756 write_sequnlock(&state->seqlock);
1757 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1758 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1759 }
1760
nfs_set_open_stateid_locked(struct nfs4_state * state,const nfs4_stateid * stateid,nfs4_stateid * freeme)1761 static void nfs_set_open_stateid_locked(struct nfs4_state *state,
1762 const nfs4_stateid *stateid, nfs4_stateid *freeme)
1763 __must_hold(&state->owner->so_lock)
1764 __must_hold(&state->seqlock)
1765 __must_hold(RCU)
1766
1767 {
1768 DEFINE_WAIT(wait);
1769 int status = 0;
1770 for (;;) {
1771
1772 if (nfs_stateid_is_sequential(state, stateid))
1773 break;
1774
1775 if (status)
1776 break;
1777 /* Rely on seqids for serialisation with NFSv4.0 */
1778 if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client))
1779 break;
1780
1781 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
1782 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
1783 /*
1784 * Ensure we process the state changes in the same order
1785 * in which the server processed them by delaying the
1786 * update of the stateid until we are in sequence.
1787 */
1788 write_sequnlock(&state->seqlock);
1789 spin_unlock(&state->owner->so_lock);
1790 rcu_read_unlock();
1791 trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
1792
1793 if (!fatal_signal_pending(current) &&
1794 !nfs_current_task_exiting()) {
1795 if (schedule_timeout(5*HZ) == 0)
1796 status = -EAGAIN;
1797 else
1798 status = 0;
1799 } else
1800 status = -EINTR;
1801 finish_wait(&state->waitq, &wait);
1802 rcu_read_lock();
1803 spin_lock(&state->owner->so_lock);
1804 write_seqlock(&state->seqlock);
1805 }
1806
1807 if (test_bit(NFS_OPEN_STATE, &state->flags) &&
1808 !nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1809 nfs4_stateid_copy(freeme, &state->open_stateid);
1810 nfs_test_and_clear_all_open_stateid(state);
1811 }
1812
1813 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1814 nfs4_stateid_copy(&state->stateid, stateid);
1815 nfs4_stateid_copy(&state->open_stateid, stateid);
1816 trace_nfs4_open_stateid_update(state->inode, stateid, status);
1817 nfs_state_log_update_open_stateid(state);
1818 }
1819
nfs_state_set_open_stateid(struct nfs4_state * state,const nfs4_stateid * open_stateid,fmode_t fmode,nfs4_stateid * freeme)1820 static void nfs_state_set_open_stateid(struct nfs4_state *state,
1821 const nfs4_stateid *open_stateid,
1822 fmode_t fmode,
1823 nfs4_stateid *freeme)
1824 {
1825 /*
1826 * Protect the call to nfs4_state_set_mode_locked and
1827 * serialise the stateid update
1828 */
1829 write_seqlock(&state->seqlock);
1830 nfs_set_open_stateid_locked(state, open_stateid, freeme);
1831 switch (fmode) {
1832 case FMODE_READ:
1833 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1834 break;
1835 case FMODE_WRITE:
1836 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1837 break;
1838 case FMODE_READ|FMODE_WRITE:
1839 set_bit(NFS_O_RDWR_STATE, &state->flags);
1840 }
1841 set_bit(NFS_OPEN_STATE, &state->flags);
1842 write_sequnlock(&state->seqlock);
1843 }
1844
nfs_state_clear_open_state_flags(struct nfs4_state * state)1845 static void nfs_state_clear_open_state_flags(struct nfs4_state *state)
1846 {
1847 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1848 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1849 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1850 clear_bit(NFS_OPEN_STATE, &state->flags);
1851 }
1852
nfs_state_set_delegation(struct nfs4_state * state,const nfs4_stateid * deleg_stateid,fmode_t fmode)1853 static void nfs_state_set_delegation(struct nfs4_state *state,
1854 const nfs4_stateid *deleg_stateid,
1855 fmode_t fmode)
1856 {
1857 /*
1858 * Protect the call to nfs4_state_set_mode_locked and
1859 * serialise the stateid update
1860 */
1861 write_seqlock(&state->seqlock);
1862 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1863 set_bit(NFS_DELEGATED_STATE, &state->flags);
1864 write_sequnlock(&state->seqlock);
1865 }
1866
nfs_state_clear_delegation(struct nfs4_state * state)1867 static void nfs_state_clear_delegation(struct nfs4_state *state)
1868 {
1869 write_seqlock(&state->seqlock);
1870 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1871 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1872 write_sequnlock(&state->seqlock);
1873 }
1874
update_open_stateid(struct nfs4_state * state,const nfs4_stateid * open_stateid,const nfs4_stateid * delegation,fmode_t fmode)1875 int update_open_stateid(struct nfs4_state *state,
1876 const nfs4_stateid *open_stateid,
1877 const nfs4_stateid *delegation,
1878 fmode_t fmode)
1879 {
1880 struct nfs_server *server = NFS_SERVER(state->inode);
1881 struct nfs_client *clp = server->nfs_client;
1882 struct nfs_inode *nfsi = NFS_I(state->inode);
1883 struct nfs_delegation *deleg_cur;
1884 nfs4_stateid freeme = { };
1885 int ret = 0;
1886
1887 fmode &= (FMODE_READ|FMODE_WRITE);
1888
1889 rcu_read_lock();
1890 spin_lock(&state->owner->so_lock);
1891 if (open_stateid != NULL) {
1892 nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme);
1893 ret = 1;
1894 }
1895
1896 deleg_cur = nfs4_get_valid_delegation(state->inode);
1897 if (deleg_cur == NULL)
1898 goto no_delegation;
1899
1900 spin_lock(&deleg_cur->lock);
1901 if (rcu_dereference(nfsi->delegation) != deleg_cur ||
1902 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1903 (deleg_cur->type & fmode) != fmode)
1904 goto no_delegation_unlock;
1905
1906 if (delegation == NULL)
1907 delegation = &deleg_cur->stateid;
1908 else if (!nfs4_stateid_match_other(&deleg_cur->stateid, delegation))
1909 goto no_delegation_unlock;
1910
1911 nfs_mark_delegation_referenced(deleg_cur);
1912 nfs_state_set_delegation(state, &deleg_cur->stateid, fmode);
1913 ret = 1;
1914 no_delegation_unlock:
1915 spin_unlock(&deleg_cur->lock);
1916 no_delegation:
1917 if (ret)
1918 update_open_stateflags(state, fmode);
1919 spin_unlock(&state->owner->so_lock);
1920 rcu_read_unlock();
1921
1922 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1923 nfs4_schedule_state_manager(clp);
1924 if (freeme.type != 0)
1925 nfs4_test_and_free_stateid(server, &freeme,
1926 state->owner->so_cred);
1927
1928 return ret;
1929 }
1930
nfs4_update_lock_stateid(struct nfs4_lock_state * lsp,const nfs4_stateid * stateid)1931 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
1932 const nfs4_stateid *stateid)
1933 {
1934 struct nfs4_state *state = lsp->ls_state;
1935 bool ret = false;
1936
1937 spin_lock(&state->state_lock);
1938 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
1939 goto out_noupdate;
1940 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
1941 goto out_noupdate;
1942 nfs4_stateid_copy(&lsp->ls_stateid, stateid);
1943 ret = true;
1944 out_noupdate:
1945 spin_unlock(&state->state_lock);
1946 return ret;
1947 }
1948
nfs4_return_incompatible_delegation(struct inode * inode,fmode_t fmode)1949 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1950 {
1951 struct nfs_delegation *delegation;
1952
1953 fmode &= FMODE_READ|FMODE_WRITE;
1954 rcu_read_lock();
1955 delegation = nfs4_get_valid_delegation(inode);
1956 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1957 rcu_read_unlock();
1958 return;
1959 }
1960 rcu_read_unlock();
1961 nfs4_inode_return_delegation(inode);
1962 }
1963
nfs4_try_open_cached(struct nfs4_opendata * opendata)1964 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1965 {
1966 struct nfs4_state *state = opendata->state;
1967 struct nfs_delegation *delegation;
1968 int open_mode = opendata->o_arg.open_flags;
1969 fmode_t fmode = opendata->o_arg.fmode;
1970 enum open_claim_type4 claim = opendata->o_arg.claim;
1971 nfs4_stateid stateid;
1972 int ret = -EAGAIN;
1973
1974 for (;;) {
1975 spin_lock(&state->owner->so_lock);
1976 if (can_open_cached(state, fmode, open_mode, claim)) {
1977 update_open_stateflags(state, fmode);
1978 spin_unlock(&state->owner->so_lock);
1979 goto out_return_state;
1980 }
1981 spin_unlock(&state->owner->so_lock);
1982 rcu_read_lock();
1983 delegation = nfs4_get_valid_delegation(state->inode);
1984 if (!can_open_delegated(delegation, fmode, claim)) {
1985 rcu_read_unlock();
1986 break;
1987 }
1988 /* Save the delegation */
1989 nfs4_stateid_copy(&stateid, &delegation->stateid);
1990 rcu_read_unlock();
1991 nfs_release_seqid(opendata->o_arg.seqid);
1992 if (!opendata->is_recover) {
1993 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1994 if (ret != 0)
1995 goto out;
1996 }
1997 ret = -EAGAIN;
1998
1999 /* Try to update the stateid using the delegation */
2000 if (update_open_stateid(state, NULL, &stateid, fmode))
2001 goto out_return_state;
2002 }
2003 out:
2004 return ERR_PTR(ret);
2005 out_return_state:
2006 refcount_inc(&state->count);
2007 return state;
2008 }
2009
2010 static void
nfs4_process_delegation(struct inode * inode,const struct cred * cred,enum open_claim_type4 claim,const struct nfs4_open_delegation * delegation)2011 nfs4_process_delegation(struct inode *inode, const struct cred *cred,
2012 enum open_claim_type4 claim,
2013 const struct nfs4_open_delegation *delegation)
2014 {
2015 switch (delegation->open_delegation_type) {
2016 case NFS4_OPEN_DELEGATE_READ:
2017 case NFS4_OPEN_DELEGATE_WRITE:
2018 case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG:
2019 case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG:
2020 break;
2021 default:
2022 return;
2023 }
2024 switch (claim) {
2025 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
2026 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
2027 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
2028 "returning a delegation for "
2029 "OPEN(CLAIM_DELEGATE_CUR)\n",
2030 NFS_SERVER(inode)->nfs_client->cl_hostname);
2031 break;
2032 case NFS4_OPEN_CLAIM_PREVIOUS:
2033 nfs_inode_reclaim_delegation(inode, cred, delegation->type,
2034 &delegation->stateid,
2035 delegation->pagemod_limit,
2036 delegation->open_delegation_type);
2037 break;
2038 default:
2039 nfs_inode_set_delegation(inode, cred, delegation->type,
2040 &delegation->stateid,
2041 delegation->pagemod_limit,
2042 delegation->open_delegation_type);
2043 }
2044 if (delegation->do_recall)
2045 nfs_async_inode_return_delegation(inode, &delegation->stateid);
2046 }
2047
2048 /*
2049 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
2050 * and update the nfs4_state.
2051 */
2052 static struct nfs4_state *
_nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata * data)2053 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
2054 {
2055 struct inode *inode = data->state->inode;
2056 struct nfs4_state *state = data->state;
2057 int ret;
2058
2059 if (!data->rpc_done) {
2060 if (data->rpc_status)
2061 return ERR_PTR(data->rpc_status);
2062 return nfs4_try_open_cached(data);
2063 }
2064
2065 ret = nfs_refresh_inode(inode, &data->f_attr);
2066 if (ret)
2067 return ERR_PTR(ret);
2068
2069 nfs4_process_delegation(state->inode,
2070 data->owner->so_cred,
2071 data->o_arg.claim,
2072 &data->o_res.delegation);
2073
2074 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) {
2075 if (!update_open_stateid(state, &data->o_res.stateid,
2076 NULL, data->o_arg.fmode))
2077 return ERR_PTR(-EAGAIN);
2078 } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode))
2079 return ERR_PTR(-EAGAIN);
2080 refcount_inc(&state->count);
2081
2082 return state;
2083 }
2084
2085 static struct inode *
nfs4_opendata_get_inode(struct nfs4_opendata * data)2086 nfs4_opendata_get_inode(struct nfs4_opendata *data)
2087 {
2088 struct inode *inode;
2089
2090 switch (data->o_arg.claim) {
2091 case NFS4_OPEN_CLAIM_NULL:
2092 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
2093 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
2094 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
2095 return ERR_PTR(-EAGAIN);
2096 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh,
2097 &data->f_attr);
2098 break;
2099 default:
2100 inode = d_inode(data->dentry);
2101 ihold(inode);
2102 nfs_refresh_inode(inode, &data->f_attr);
2103 }
2104 return inode;
2105 }
2106
2107 static struct nfs4_state *
nfs4_opendata_find_nfs4_state(struct nfs4_opendata * data)2108 nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data)
2109 {
2110 struct nfs4_state *state;
2111 struct inode *inode;
2112
2113 inode = nfs4_opendata_get_inode(data);
2114 if (IS_ERR(inode))
2115 return ERR_CAST(inode);
2116 if (data->state != NULL && data->state->inode == inode) {
2117 state = data->state;
2118 refcount_inc(&state->count);
2119 } else
2120 state = nfs4_get_open_state(inode, data->owner);
2121 iput(inode);
2122 if (state == NULL)
2123 state = ERR_PTR(-ENOMEM);
2124 return state;
2125 }
2126
2127 static struct nfs4_state *
_nfs4_opendata_to_nfs4_state(struct nfs4_opendata * data)2128 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
2129 {
2130 struct nfs4_state *state;
2131
2132 if (!data->rpc_done) {
2133 state = nfs4_try_open_cached(data);
2134 trace_nfs4_cached_open(data->state);
2135 goto out;
2136 }
2137
2138 state = nfs4_opendata_find_nfs4_state(data);
2139 if (IS_ERR(state))
2140 goto out;
2141
2142 nfs4_process_delegation(state->inode,
2143 data->owner->so_cred,
2144 data->o_arg.claim,
2145 &data->o_res.delegation);
2146
2147 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) {
2148 if (!update_open_stateid(state, &data->o_res.stateid,
2149 NULL, data->o_arg.fmode)) {
2150 nfs4_put_open_state(state);
2151 state = ERR_PTR(-EAGAIN);
2152 }
2153 } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode)) {
2154 nfs4_put_open_state(state);
2155 state = ERR_PTR(-EAGAIN);
2156 }
2157 out:
2158 nfs_release_seqid(data->o_arg.seqid);
2159 return state;
2160 }
2161
2162 static struct nfs4_state *
nfs4_opendata_to_nfs4_state(struct nfs4_opendata * data)2163 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
2164 {
2165 struct nfs4_state *ret;
2166
2167 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
2168 ret =_nfs4_opendata_reclaim_to_nfs4_state(data);
2169 else
2170 ret = _nfs4_opendata_to_nfs4_state(data);
2171 nfs4_sequence_free_slot(&data->o_res.seq_res);
2172 return ret;
2173 }
2174
2175 static struct nfs_open_context *
nfs4_state_find_open_context_mode(struct nfs4_state * state,fmode_t mode)2176 nfs4_state_find_open_context_mode(struct nfs4_state *state, fmode_t mode)
2177 {
2178 struct nfs_inode *nfsi = NFS_I(state->inode);
2179 struct nfs_open_context *ctx;
2180
2181 rcu_read_lock();
2182 list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
2183 if (ctx->state != state)
2184 continue;
2185 if ((ctx->mode & mode) != mode)
2186 continue;
2187 if (!get_nfs_open_context(ctx))
2188 continue;
2189 rcu_read_unlock();
2190 return ctx;
2191 }
2192 rcu_read_unlock();
2193 return ERR_PTR(-ENOENT);
2194 }
2195
2196 static struct nfs_open_context *
nfs4_state_find_open_context(struct nfs4_state * state)2197 nfs4_state_find_open_context(struct nfs4_state *state)
2198 {
2199 struct nfs_open_context *ctx;
2200
2201 ctx = nfs4_state_find_open_context_mode(state, FMODE_READ|FMODE_WRITE);
2202 if (!IS_ERR(ctx))
2203 return ctx;
2204 ctx = nfs4_state_find_open_context_mode(state, FMODE_WRITE);
2205 if (!IS_ERR(ctx))
2206 return ctx;
2207 return nfs4_state_find_open_context_mode(state, FMODE_READ);
2208 }
2209
nfs4_open_recoverdata_alloc(struct nfs_open_context * ctx,struct nfs4_state * state,enum open_claim_type4 claim)2210 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
2211 struct nfs4_state *state, enum open_claim_type4 claim)
2212 {
2213 struct nfs4_opendata *opendata;
2214
2215 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
2216 NULL, claim, GFP_NOFS);
2217 if (opendata == NULL)
2218 return ERR_PTR(-ENOMEM);
2219 opendata->state = state;
2220 refcount_inc(&state->count);
2221 return opendata;
2222 }
2223
nfs4_open_recover_helper(struct nfs4_opendata * opendata,fmode_t fmode)2224 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
2225 fmode_t fmode)
2226 {
2227 struct nfs4_state *newstate;
2228 struct nfs_server *server = NFS_SB(opendata->dentry->d_sb);
2229 int openflags = opendata->o_arg.open_flags;
2230 int ret;
2231
2232 if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
2233 return 0;
2234 opendata->o_arg.fmode = fmode;
2235 opendata->o_arg.share_access =
2236 nfs4_map_atomic_open_share(server, fmode, openflags);
2237 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
2238 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
2239 nfs4_init_opendata_res(opendata);
2240 ret = _nfs4_recover_proc_open(opendata);
2241 if (ret != 0)
2242 return ret;
2243 newstate = nfs4_opendata_to_nfs4_state(opendata);
2244 if (IS_ERR(newstate))
2245 return PTR_ERR(newstate);
2246 if (newstate != opendata->state)
2247 ret = -ESTALE;
2248 nfs4_close_state(newstate, fmode);
2249 return ret;
2250 }
2251
nfs4_open_recover(struct nfs4_opendata * opendata,struct nfs4_state * state)2252 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
2253 {
2254 int ret;
2255
2256 /* memory barrier prior to reading state->n_* */
2257 smp_rmb();
2258 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
2259 if (ret != 0)
2260 return ret;
2261 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
2262 if (ret != 0)
2263 return ret;
2264 ret = nfs4_open_recover_helper(opendata, FMODE_READ);
2265 if (ret != 0)
2266 return ret;
2267 /*
2268 * We may have performed cached opens for all three recoveries.
2269 * Check if we need to update the current stateid.
2270 */
2271 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
2272 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
2273 write_seqlock(&state->seqlock);
2274 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
2275 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2276 write_sequnlock(&state->seqlock);
2277 }
2278 return 0;
2279 }
2280
2281 /*
2282 * OPEN_RECLAIM:
2283 * reclaim state on the server after a reboot.
2284 */
_nfs4_do_open_reclaim(struct nfs_open_context * ctx,struct nfs4_state * state)2285 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
2286 {
2287 struct nfs_delegation *delegation;
2288 struct nfs4_opendata *opendata;
2289 u32 delegation_type = NFS4_OPEN_DELEGATE_NONE;
2290 int status;
2291
2292 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2293 NFS4_OPEN_CLAIM_PREVIOUS);
2294 if (IS_ERR(opendata))
2295 return PTR_ERR(opendata);
2296 rcu_read_lock();
2297 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2298 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) {
2299 switch(delegation->type) {
2300 case FMODE_READ:
2301 delegation_type = NFS4_OPEN_DELEGATE_READ;
2302 if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags))
2303 delegation_type = NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG;
2304 break;
2305 case FMODE_WRITE:
2306 case FMODE_READ|FMODE_WRITE:
2307 delegation_type = NFS4_OPEN_DELEGATE_WRITE;
2308 if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags))
2309 delegation_type = NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG;
2310 }
2311 }
2312 rcu_read_unlock();
2313 opendata->o_arg.u.delegation_type = delegation_type;
2314 status = nfs4_open_recover(opendata, state);
2315 nfs4_opendata_put(opendata);
2316 return status;
2317 }
2318
nfs4_do_open_reclaim(struct nfs_open_context * ctx,struct nfs4_state * state)2319 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
2320 {
2321 struct nfs_server *server = NFS_SERVER(state->inode);
2322 struct nfs4_exception exception = { };
2323 int err;
2324 do {
2325 err = _nfs4_do_open_reclaim(ctx, state);
2326 trace_nfs4_open_reclaim(ctx, 0, err);
2327 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2328 continue;
2329 if (err != -NFS4ERR_DELAY)
2330 break;
2331 nfs4_handle_exception(server, err, &exception);
2332 } while (exception.retry);
2333 return err;
2334 }
2335
nfs4_open_reclaim(struct nfs4_state_owner * sp,struct nfs4_state * state)2336 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
2337 {
2338 struct nfs_open_context *ctx;
2339 int ret;
2340
2341 ctx = nfs4_state_find_open_context(state);
2342 if (IS_ERR(ctx))
2343 return -EAGAIN;
2344 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2345 nfs_state_clear_open_state_flags(state);
2346 ret = nfs4_do_open_reclaim(ctx, state);
2347 put_nfs_open_context(ctx);
2348 return ret;
2349 }
2350
nfs4_handle_delegation_recall_error(struct nfs_server * server,struct nfs4_state * state,const nfs4_stateid * stateid,struct file_lock * fl,int err)2351 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err)
2352 {
2353 switch (err) {
2354 default:
2355 printk(KERN_ERR "NFS: %s: unhandled error "
2356 "%d.\n", __func__, err);
2357 fallthrough;
2358 case 0:
2359 case -ENOENT:
2360 case -EAGAIN:
2361 case -ESTALE:
2362 case -ETIMEDOUT:
2363 break;
2364 case -NFS4ERR_BADSESSION:
2365 case -NFS4ERR_BADSLOT:
2366 case -NFS4ERR_BAD_HIGH_SLOT:
2367 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
2368 case -NFS4ERR_DEADSESSION:
2369 return -EAGAIN;
2370 case -NFS4ERR_STALE_CLIENTID:
2371 case -NFS4ERR_STALE_STATEID:
2372 /* Don't recall a delegation if it was lost */
2373 nfs4_schedule_lease_recovery(server->nfs_client);
2374 return -EAGAIN;
2375 case -NFS4ERR_MOVED:
2376 nfs4_schedule_migration_recovery(server);
2377 return -EAGAIN;
2378 case -NFS4ERR_LEASE_MOVED:
2379 nfs4_schedule_lease_moved_recovery(server->nfs_client);
2380 return -EAGAIN;
2381 case -NFS4ERR_DELEG_REVOKED:
2382 case -NFS4ERR_ADMIN_REVOKED:
2383 case -NFS4ERR_EXPIRED:
2384 case -NFS4ERR_BAD_STATEID:
2385 case -NFS4ERR_OPENMODE:
2386 nfs_inode_find_state_and_recover(state->inode,
2387 stateid);
2388 nfs4_schedule_stateid_recovery(server, state);
2389 return -EAGAIN;
2390 case -NFS4ERR_DELAY:
2391 case -NFS4ERR_GRACE:
2392 ssleep(1);
2393 return -EAGAIN;
2394 case -ENOMEM:
2395 case -NFS4ERR_DENIED:
2396 if (fl) {
2397 struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner;
2398 if (lsp)
2399 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
2400 }
2401 return 0;
2402 }
2403 return err;
2404 }
2405
nfs4_open_delegation_recall(struct nfs_open_context * ctx,struct nfs4_state * state,const nfs4_stateid * stateid)2406 int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
2407 struct nfs4_state *state, const nfs4_stateid *stateid)
2408 {
2409 struct nfs_server *server = NFS_SERVER(state->inode);
2410 struct nfs4_opendata *opendata;
2411 int err = 0;
2412
2413 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2414 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
2415 if (IS_ERR(opendata))
2416 return PTR_ERR(opendata);
2417 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
2418 if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) {
2419 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
2420 if (err)
2421 goto out;
2422 }
2423 if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) {
2424 err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
2425 if (err)
2426 goto out;
2427 }
2428 if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) {
2429 err = nfs4_open_recover_helper(opendata, FMODE_READ);
2430 if (err)
2431 goto out;
2432 }
2433 nfs_state_clear_delegation(state);
2434 out:
2435 nfs4_opendata_put(opendata);
2436 return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
2437 }
2438
nfs4_open_confirm_prepare(struct rpc_task * task,void * calldata)2439 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
2440 {
2441 struct nfs4_opendata *data = calldata;
2442
2443 nfs4_setup_sequence(data->o_arg.server->nfs_client,
2444 &data->c_arg.seq_args, &data->c_res.seq_res, task);
2445 }
2446
nfs4_open_confirm_done(struct rpc_task * task,void * calldata)2447 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
2448 {
2449 struct nfs4_opendata *data = calldata;
2450
2451 nfs40_sequence_done(task, &data->c_res.seq_res);
2452
2453 data->rpc_status = task->tk_status;
2454 if (data->rpc_status == 0) {
2455 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
2456 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2457 renew_lease(data->o_res.server, data->timestamp);
2458 data->rpc_done = true;
2459 }
2460 }
2461
nfs4_open_confirm_release(void * calldata)2462 static void nfs4_open_confirm_release(void *calldata)
2463 {
2464 struct nfs4_opendata *data = calldata;
2465 struct nfs4_state *state = NULL;
2466
2467 /* If this request hasn't been cancelled, do nothing */
2468 if (!data->cancelled)
2469 goto out_free;
2470 /* In case of error, no cleanup! */
2471 if (!data->rpc_done)
2472 goto out_free;
2473 state = nfs4_opendata_to_nfs4_state(data);
2474 if (!IS_ERR(state))
2475 nfs4_close_state(state, data->o_arg.fmode);
2476 out_free:
2477 nfs4_opendata_put(data);
2478 }
2479
2480 static const struct rpc_call_ops nfs4_open_confirm_ops = {
2481 .rpc_call_prepare = nfs4_open_confirm_prepare,
2482 .rpc_call_done = nfs4_open_confirm_done,
2483 .rpc_release = nfs4_open_confirm_release,
2484 };
2485
2486 /*
2487 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
2488 */
_nfs4_proc_open_confirm(struct nfs4_opendata * data)2489 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
2490 {
2491 struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
2492 struct rpc_task *task;
2493 struct rpc_message msg = {
2494 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
2495 .rpc_argp = &data->c_arg,
2496 .rpc_resp = &data->c_res,
2497 .rpc_cred = data->owner->so_cred,
2498 };
2499 struct rpc_task_setup task_setup_data = {
2500 .rpc_client = server->client,
2501 .rpc_message = &msg,
2502 .callback_ops = &nfs4_open_confirm_ops,
2503 .callback_data = data,
2504 .workqueue = nfsiod_workqueue,
2505 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
2506 };
2507 int status;
2508
2509 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1,
2510 data->is_recover);
2511 kref_get(&data->kref);
2512 data->rpc_done = false;
2513 data->rpc_status = 0;
2514 data->timestamp = jiffies;
2515 task = rpc_run_task(&task_setup_data);
2516 if (IS_ERR(task))
2517 return PTR_ERR(task);
2518 status = rpc_wait_for_completion_task(task);
2519 if (status != 0) {
2520 data->cancelled = true;
2521 smp_wmb();
2522 } else
2523 status = data->rpc_status;
2524 rpc_put_task(task);
2525 return status;
2526 }
2527
nfs4_open_prepare(struct rpc_task * task,void * calldata)2528 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
2529 {
2530 struct nfs4_opendata *data = calldata;
2531 struct nfs4_state_owner *sp = data->owner;
2532 struct nfs_client *clp = sp->so_server->nfs_client;
2533 enum open_claim_type4 claim = data->o_arg.claim;
2534
2535 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
2536 goto out_wait;
2537 /*
2538 * Check if we still need to send an OPEN call, or if we can use
2539 * a delegation instead.
2540 */
2541 if (data->state != NULL) {
2542 struct nfs_delegation *delegation;
2543
2544 if (can_open_cached(data->state, data->o_arg.fmode,
2545 data->o_arg.open_flags, claim))
2546 goto out_no_action;
2547 rcu_read_lock();
2548 delegation = nfs4_get_valid_delegation(data->state->inode);
2549 if (can_open_delegated(delegation, data->o_arg.fmode, claim))
2550 goto unlock_no_action;
2551 rcu_read_unlock();
2552 }
2553 /* Update client id. */
2554 data->o_arg.clientid = clp->cl_clientid;
2555 switch (claim) {
2556 default:
2557 break;
2558 case NFS4_OPEN_CLAIM_PREVIOUS:
2559 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
2560 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
2561 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
2562 fallthrough;
2563 case NFS4_OPEN_CLAIM_FH:
2564 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
2565 }
2566 data->timestamp = jiffies;
2567 if (nfs4_setup_sequence(data->o_arg.server->nfs_client,
2568 &data->o_arg.seq_args,
2569 &data->o_res.seq_res,
2570 task) != 0)
2571 nfs_release_seqid(data->o_arg.seqid);
2572
2573 /* Set the create mode (note dependency on the session type) */
2574 data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
2575 if (data->o_arg.open_flags & O_EXCL) {
2576 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
2577 if (clp->cl_mvops->minor_version == 0) {
2578 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
2579 /* don't put an ACCESS op in OPEN compound if O_EXCL,
2580 * because ACCESS will return permission denied for
2581 * all bits until close */
2582 data->o_res.access_request = data->o_arg.access = 0;
2583 } else if (nfs4_has_persistent_session(clp))
2584 data->o_arg.createmode = NFS4_CREATE_GUARDED;
2585 }
2586 return;
2587 unlock_no_action:
2588 trace_nfs4_cached_open(data->state);
2589 rcu_read_unlock();
2590 out_no_action:
2591 task->tk_action = NULL;
2592 out_wait:
2593 nfs4_sequence_done(task, &data->o_res.seq_res);
2594 }
2595
nfs4_open_done(struct rpc_task * task,void * calldata)2596 static void nfs4_open_done(struct rpc_task *task, void *calldata)
2597 {
2598 struct nfs4_opendata *data = calldata;
2599
2600 data->rpc_status = task->tk_status;
2601
2602 if (!nfs4_sequence_process(task, &data->o_res.seq_res))
2603 return;
2604
2605 if (task->tk_status == 0) {
2606 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
2607 switch (data->o_res.f_attr->mode & S_IFMT) {
2608 case S_IFREG:
2609 break;
2610 case S_IFLNK:
2611 data->rpc_status = -ELOOP;
2612 break;
2613 case S_IFDIR:
2614 data->rpc_status = -EISDIR;
2615 break;
2616 default:
2617 data->rpc_status = -ENOTDIR;
2618 }
2619 }
2620 renew_lease(data->o_res.server, data->timestamp);
2621 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
2622 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2623 }
2624 data->rpc_done = true;
2625 }
2626
nfs4_open_release(void * calldata)2627 static void nfs4_open_release(void *calldata)
2628 {
2629 struct nfs4_opendata *data = calldata;
2630 struct nfs4_state *state = NULL;
2631
2632 /* In case of error, no cleanup! */
2633 if (data->rpc_status != 0 || !data->rpc_done) {
2634 nfs_release_seqid(data->o_arg.seqid);
2635 goto out_free;
2636 }
2637 /* If this request hasn't been cancelled, do nothing */
2638 if (!data->cancelled)
2639 goto out_free;
2640 /* In case we need an open_confirm, no cleanup! */
2641 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
2642 goto out_free;
2643 state = nfs4_opendata_to_nfs4_state(data);
2644 if (!IS_ERR(state))
2645 nfs4_close_state(state, data->o_arg.fmode);
2646 out_free:
2647 nfs4_opendata_put(data);
2648 }
2649
2650 static const struct rpc_call_ops nfs4_open_ops = {
2651 .rpc_call_prepare = nfs4_open_prepare,
2652 .rpc_call_done = nfs4_open_done,
2653 .rpc_release = nfs4_open_release,
2654 };
2655
nfs4_run_open_task(struct nfs4_opendata * data,struct nfs_open_context * ctx)2656 static int nfs4_run_open_task(struct nfs4_opendata *data,
2657 struct nfs_open_context *ctx)
2658 {
2659 struct inode *dir = d_inode(data->dir);
2660 struct nfs_server *server = NFS_SERVER(dir);
2661 struct nfs_openargs *o_arg = &data->o_arg;
2662 struct nfs_openres *o_res = &data->o_res;
2663 struct rpc_task *task;
2664 struct rpc_message msg = {
2665 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
2666 .rpc_argp = o_arg,
2667 .rpc_resp = o_res,
2668 .rpc_cred = data->owner->so_cred,
2669 };
2670 struct rpc_task_setup task_setup_data = {
2671 .rpc_client = server->client,
2672 .rpc_message = &msg,
2673 .callback_ops = &nfs4_open_ops,
2674 .callback_data = data,
2675 .workqueue = nfsiod_workqueue,
2676 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
2677 };
2678 int status;
2679
2680 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE))
2681 task_setup_data.flags |= RPC_TASK_MOVEABLE;
2682
2683 kref_get(&data->kref);
2684 data->rpc_done = false;
2685 data->rpc_status = 0;
2686 data->cancelled = false;
2687 data->is_recover = false;
2688 if (!ctx) {
2689 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1);
2690 data->is_recover = true;
2691 task_setup_data.flags |= RPC_TASK_TIMEOUT;
2692 } else {
2693 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0);
2694 pnfs_lgopen_prepare(data, ctx);
2695 }
2696 task = rpc_run_task(&task_setup_data);
2697 if (IS_ERR(task))
2698 return PTR_ERR(task);
2699 status = rpc_wait_for_completion_task(task);
2700 if (status != 0) {
2701 data->cancelled = true;
2702 smp_wmb();
2703 } else
2704 status = data->rpc_status;
2705 rpc_put_task(task);
2706
2707 return status;
2708 }
2709
_nfs4_recover_proc_open(struct nfs4_opendata * data)2710 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
2711 {
2712 struct inode *dir = d_inode(data->dir);
2713 struct nfs_openres *o_res = &data->o_res;
2714 int status;
2715
2716 status = nfs4_run_open_task(data, NULL);
2717 if (status != 0 || !data->rpc_done)
2718 return status;
2719
2720 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
2721
2722 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM)
2723 status = _nfs4_proc_open_confirm(data);
2724
2725 return status;
2726 }
2727
2728 /*
2729 * Additional permission checks in order to distinguish between an
2730 * open for read, and an open for execute. This works around the
2731 * fact that NFSv4 OPEN treats read and execute permissions as being
2732 * the same.
2733 * Note that in the non-execute case, we want to turn off permission
2734 * checking if we just created a new file (POSIX open() semantics).
2735 */
nfs4_opendata_access(const struct cred * cred,struct nfs4_opendata * opendata,struct nfs4_state * state,fmode_t fmode)2736 static int nfs4_opendata_access(const struct cred *cred,
2737 struct nfs4_opendata *opendata,
2738 struct nfs4_state *state, fmode_t fmode)
2739 {
2740 struct nfs_access_entry cache;
2741 u32 mask, flags;
2742
2743 /* access call failed or for some reason the server doesn't
2744 * support any access modes -- defer access call until later */
2745 if (opendata->o_res.access_supported == 0)
2746 return 0;
2747
2748 mask = 0;
2749 if (fmode & FMODE_EXEC) {
2750 /* ONLY check for exec rights */
2751 if (S_ISDIR(state->inode->i_mode))
2752 mask = NFS4_ACCESS_LOOKUP;
2753 else
2754 mask = NFS4_ACCESS_EXECUTE;
2755 } else if ((fmode & FMODE_READ) && !opendata->file_created)
2756 mask = NFS4_ACCESS_READ;
2757
2758 nfs_access_set_mask(&cache, opendata->o_res.access_result);
2759 nfs_access_add_cache(state->inode, &cache, cred);
2760
2761 flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP;
2762 if ((mask & ~cache.mask & flags) == 0)
2763 return 0;
2764
2765 return -EACCES;
2766 }
2767
2768 /*
2769 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
2770 */
_nfs4_proc_open(struct nfs4_opendata * data,struct nfs_open_context * ctx)2771 static int _nfs4_proc_open(struct nfs4_opendata *data,
2772 struct nfs_open_context *ctx)
2773 {
2774 struct inode *dir = d_inode(data->dir);
2775 struct nfs_server *server = NFS_SERVER(dir);
2776 struct nfs_openargs *o_arg = &data->o_arg;
2777 struct nfs_openres *o_res = &data->o_res;
2778 int status;
2779
2780 status = nfs4_run_open_task(data, ctx);
2781 if (!data->rpc_done)
2782 return status;
2783 if (status != 0) {
2784 if (status == -NFS4ERR_BADNAME &&
2785 !(o_arg->open_flags & O_CREAT))
2786 return -ENOENT;
2787 return status;
2788 }
2789
2790 nfs_fattr_map_and_free_names(server, &data->f_attr);
2791
2792 if (o_arg->open_flags & O_CREAT) {
2793 if (o_arg->open_flags & O_EXCL)
2794 data->file_created = true;
2795 else if (o_res->cinfo.before != o_res->cinfo.after)
2796 data->file_created = true;
2797 if (data->file_created ||
2798 inode_peek_iversion_raw(dir) != o_res->cinfo.after)
2799 nfs4_update_changeattr(dir, &o_res->cinfo,
2800 o_res->f_attr->time_start,
2801 NFS_INO_INVALID_DATA);
2802 }
2803 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2804 server->caps &= ~NFS_CAP_POSIX_LOCK;
2805 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2806 status = _nfs4_proc_open_confirm(data);
2807 if (status != 0)
2808 return status;
2809 }
2810 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) {
2811 struct nfs_fh *fh = &o_res->fh;
2812
2813 nfs4_sequence_free_slot(&o_res->seq_res);
2814 if (o_arg->claim == NFS4_OPEN_CLAIM_FH)
2815 fh = NFS_FH(d_inode(data->dentry));
2816 nfs4_proc_getattr(server, fh, o_res->f_attr, NULL);
2817 }
2818 return 0;
2819 }
2820
2821 /*
2822 * OPEN_EXPIRED:
2823 * reclaim state on the server after a network partition.
2824 * Assumes caller holds the appropriate lock
2825 */
_nfs4_open_expired(struct nfs_open_context * ctx,struct nfs4_state * state)2826 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2827 {
2828 struct nfs4_opendata *opendata;
2829 int ret;
2830
2831 opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_FH);
2832 if (IS_ERR(opendata))
2833 return PTR_ERR(opendata);
2834 /*
2835 * We're not recovering a delegation, so ask for no delegation.
2836 * Otherwise the recovery thread could deadlock with an outstanding
2837 * delegation return.
2838 */
2839 opendata->o_arg.open_flags = O_DIRECT;
2840 ret = nfs4_open_recover(opendata, state);
2841 if (ret == -ESTALE)
2842 d_drop(ctx->dentry);
2843 nfs4_opendata_put(opendata);
2844 return ret;
2845 }
2846
nfs4_do_open_expired(struct nfs_open_context * ctx,struct nfs4_state * state)2847 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2848 {
2849 struct nfs_server *server = NFS_SERVER(state->inode);
2850 struct nfs4_exception exception = { };
2851 int err;
2852
2853 do {
2854 err = _nfs4_open_expired(ctx, state);
2855 trace_nfs4_open_expired(ctx, 0, err);
2856 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2857 continue;
2858 switch (err) {
2859 default:
2860 goto out;
2861 case -NFS4ERR_GRACE:
2862 case -NFS4ERR_DELAY:
2863 nfs4_handle_exception(server, err, &exception);
2864 err = 0;
2865 }
2866 } while (exception.retry);
2867 out:
2868 return err;
2869 }
2870
nfs4_open_expired(struct nfs4_state_owner * sp,struct nfs4_state * state)2871 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2872 {
2873 struct nfs_open_context *ctx;
2874 int ret;
2875
2876 ctx = nfs4_state_find_open_context(state);
2877 if (IS_ERR(ctx))
2878 return -EAGAIN;
2879 ret = nfs4_do_open_expired(ctx, state);
2880 put_nfs_open_context(ctx);
2881 return ret;
2882 }
2883
nfs_finish_clear_delegation_stateid(struct nfs4_state * state,const nfs4_stateid * stateid)2884 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state,
2885 const nfs4_stateid *stateid)
2886 {
2887 nfs_remove_bad_delegation(state->inode, stateid);
2888 nfs_state_clear_delegation(state);
2889 }
2890
nfs40_clear_delegation_stateid(struct nfs4_state * state)2891 static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2892 {
2893 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2894 nfs_finish_clear_delegation_stateid(state, NULL);
2895 }
2896
nfs40_open_expired(struct nfs4_state_owner * sp,struct nfs4_state * state)2897 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2898 {
2899 /* NFSv4.0 doesn't allow for delegation recovery on open expire */
2900 nfs40_clear_delegation_stateid(state);
2901 nfs_state_clear_open_state_flags(state);
2902 return nfs4_open_expired(sp, state);
2903 }
2904
nfs40_test_and_free_expired_stateid(struct nfs_server * server,nfs4_stateid * stateid,const struct cred * cred)2905 static int nfs40_test_and_free_expired_stateid(struct nfs_server *server,
2906 nfs4_stateid *stateid, const struct cred *cred)
2907 {
2908 return -NFS4ERR_BAD_STATEID;
2909 }
2910
2911 #if defined(CONFIG_NFS_V4_1)
nfs41_test_and_free_expired_stateid(struct nfs_server * server,nfs4_stateid * stateid,const struct cred * cred)2912 static int nfs41_test_and_free_expired_stateid(struct nfs_server *server,
2913 nfs4_stateid *stateid, const struct cred *cred)
2914 {
2915 int status;
2916
2917 switch (stateid->type) {
2918 default:
2919 break;
2920 case NFS4_INVALID_STATEID_TYPE:
2921 case NFS4_SPECIAL_STATEID_TYPE:
2922 case NFS4_FREED_STATEID_TYPE:
2923 return -NFS4ERR_BAD_STATEID;
2924 case NFS4_REVOKED_STATEID_TYPE:
2925 goto out_free;
2926 }
2927
2928 status = nfs41_test_stateid(server, stateid, cred);
2929 switch (status) {
2930 case -NFS4ERR_EXPIRED:
2931 case -NFS4ERR_ADMIN_REVOKED:
2932 case -NFS4ERR_DELEG_REVOKED:
2933 break;
2934 default:
2935 return status;
2936 }
2937 out_free:
2938 /* Ack the revoked state to the server */
2939 nfs41_free_stateid(server, stateid, cred, true);
2940 return -NFS4ERR_EXPIRED;
2941 }
2942
nfs41_check_delegation_stateid(struct nfs4_state * state)2943 static int nfs41_check_delegation_stateid(struct nfs4_state *state)
2944 {
2945 struct nfs_server *server = NFS_SERVER(state->inode);
2946 nfs4_stateid stateid;
2947 struct nfs_delegation *delegation;
2948 const struct cred *cred = NULL;
2949 int status, ret = NFS_OK;
2950
2951 /* Get the delegation credential for use by test/free_stateid */
2952 rcu_read_lock();
2953 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2954 if (delegation == NULL) {
2955 rcu_read_unlock();
2956 nfs_state_clear_delegation(state);
2957 return NFS_OK;
2958 }
2959
2960 spin_lock(&delegation->lock);
2961 nfs4_stateid_copy(&stateid, &delegation->stateid);
2962
2963 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
2964 &delegation->flags)) {
2965 spin_unlock(&delegation->lock);
2966 rcu_read_unlock();
2967 return NFS_OK;
2968 }
2969
2970 if (delegation->cred)
2971 cred = get_cred(delegation->cred);
2972 spin_unlock(&delegation->lock);
2973 rcu_read_unlock();
2974 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
2975 trace_nfs4_test_delegation_stateid(state, NULL, status);
2976 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
2977 nfs_finish_clear_delegation_stateid(state, &stateid);
2978 else
2979 ret = status;
2980
2981 put_cred(cred);
2982 return ret;
2983 }
2984
nfs41_delegation_recover_stateid(struct nfs4_state * state)2985 static void nfs41_delegation_recover_stateid(struct nfs4_state *state)
2986 {
2987 nfs4_stateid tmp;
2988
2989 if (test_bit(NFS_DELEGATED_STATE, &state->flags) &&
2990 nfs4_copy_delegation_stateid(state->inode, state->state,
2991 &tmp, NULL) &&
2992 nfs4_stateid_match_other(&state->stateid, &tmp))
2993 nfs_state_set_delegation(state, &tmp, state->state);
2994 else
2995 nfs_state_clear_delegation(state);
2996 }
2997
2998 /**
2999 * nfs41_check_expired_locks - possibly free a lock stateid
3000 *
3001 * @state: NFSv4 state for an inode
3002 *
3003 * Returns NFS_OK if recovery for this stateid is now finished.
3004 * Otherwise a negative NFS4ERR value is returned.
3005 */
nfs41_check_expired_locks(struct nfs4_state * state)3006 static int nfs41_check_expired_locks(struct nfs4_state *state)
3007 {
3008 int status, ret = NFS_OK;
3009 struct nfs4_lock_state *lsp, *prev = NULL;
3010 struct nfs_server *server = NFS_SERVER(state->inode);
3011
3012 if (!test_bit(LK_STATE_IN_USE, &state->flags))
3013 goto out;
3014
3015 spin_lock(&state->state_lock);
3016 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
3017 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
3018 const struct cred *cred = lsp->ls_state->owner->so_cred;
3019
3020 refcount_inc(&lsp->ls_count);
3021 spin_unlock(&state->state_lock);
3022
3023 nfs4_put_lock_state(prev);
3024 prev = lsp;
3025
3026 status = nfs41_test_and_free_expired_stateid(server,
3027 &lsp->ls_stateid,
3028 cred);
3029 trace_nfs4_test_lock_stateid(state, lsp, status);
3030 if (status == -NFS4ERR_EXPIRED ||
3031 status == -NFS4ERR_BAD_STATEID) {
3032 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
3033 lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE;
3034 if (!recover_lost_locks)
3035 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
3036 } else if (status != NFS_OK) {
3037 ret = status;
3038 nfs4_put_lock_state(prev);
3039 goto out;
3040 }
3041 spin_lock(&state->state_lock);
3042 }
3043 }
3044 spin_unlock(&state->state_lock);
3045 nfs4_put_lock_state(prev);
3046 out:
3047 return ret;
3048 }
3049
3050 /**
3051 * nfs41_check_open_stateid - possibly free an open stateid
3052 *
3053 * @state: NFSv4 state for an inode
3054 *
3055 * Returns NFS_OK if recovery for this stateid is now finished.
3056 * Otherwise a negative NFS4ERR value is returned.
3057 */
nfs41_check_open_stateid(struct nfs4_state * state)3058 static int nfs41_check_open_stateid(struct nfs4_state *state)
3059 {
3060 struct nfs_server *server = NFS_SERVER(state->inode);
3061 nfs4_stateid *stateid = &state->open_stateid;
3062 const struct cred *cred = state->owner->so_cred;
3063 int status;
3064
3065 if (test_bit(NFS_OPEN_STATE, &state->flags) == 0)
3066 return -NFS4ERR_BAD_STATEID;
3067 status = nfs41_test_and_free_expired_stateid(server, stateid, cred);
3068 trace_nfs4_test_open_stateid(state, NULL, status);
3069 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) {
3070 nfs_state_clear_open_state_flags(state);
3071 stateid->type = NFS4_INVALID_STATEID_TYPE;
3072 return status;
3073 }
3074 if (nfs_open_stateid_recover_openmode(state))
3075 return -NFS4ERR_OPENMODE;
3076 return NFS_OK;
3077 }
3078
nfs41_open_expired(struct nfs4_state_owner * sp,struct nfs4_state * state)3079 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
3080 {
3081 int status;
3082
3083 status = nfs41_check_delegation_stateid(state);
3084 if (status != NFS_OK)
3085 return status;
3086 nfs41_delegation_recover_stateid(state);
3087
3088 status = nfs41_check_expired_locks(state);
3089 if (status != NFS_OK)
3090 return status;
3091 status = nfs41_check_open_stateid(state);
3092 if (status != NFS_OK)
3093 status = nfs4_open_expired(sp, state);
3094 return status;
3095 }
3096 #endif
3097
3098 /*
3099 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
3100 * fields corresponding to attributes that were used to store the verifier.
3101 * Make sure we clobber those fields in the later setattr call
3102 */
nfs4_exclusive_attrset(struct nfs4_opendata * opendata,struct iattr * sattr,struct nfs4_label ** label)3103 static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
3104 struct iattr *sattr, struct nfs4_label **label)
3105 {
3106 const __u32 *bitmask = opendata->o_arg.server->exclcreat_bitmask;
3107 __u32 attrset[3];
3108 unsigned ret;
3109 unsigned i;
3110
3111 for (i = 0; i < ARRAY_SIZE(attrset); i++) {
3112 attrset[i] = opendata->o_res.attrset[i];
3113 if (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE4_1)
3114 attrset[i] &= ~bitmask[i];
3115 }
3116
3117 ret = (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE) ?
3118 sattr->ia_valid : 0;
3119
3120 if ((attrset[1] & (FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET))) {
3121 if (sattr->ia_valid & ATTR_ATIME_SET)
3122 ret |= ATTR_ATIME_SET;
3123 else
3124 ret |= ATTR_ATIME;
3125 }
3126
3127 if ((attrset[1] & (FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET))) {
3128 if (sattr->ia_valid & ATTR_MTIME_SET)
3129 ret |= ATTR_MTIME_SET;
3130 else
3131 ret |= ATTR_MTIME;
3132 }
3133
3134 if (!(attrset[2] & FATTR4_WORD2_SECURITY_LABEL))
3135 *label = NULL;
3136 return ret;
3137 }
3138
_nfs4_open_and_get_state(struct nfs4_opendata * opendata,struct nfs_open_context * ctx)3139 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
3140 struct nfs_open_context *ctx)
3141 {
3142 struct nfs4_state_owner *sp = opendata->owner;
3143 struct nfs_server *server = sp->so_server;
3144 struct dentry *dentry;
3145 struct nfs4_state *state;
3146 fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx);
3147 struct inode *dir = d_inode(opendata->dir);
3148 unsigned long dir_verifier;
3149 int ret;
3150
3151 dir_verifier = nfs_save_change_attribute(dir);
3152
3153 ret = _nfs4_proc_open(opendata, ctx);
3154 if (ret != 0)
3155 goto out;
3156
3157 state = _nfs4_opendata_to_nfs4_state(opendata);
3158 ret = PTR_ERR(state);
3159 if (IS_ERR(state))
3160 goto out;
3161 ctx->state = state;
3162 if (server->caps & NFS_CAP_POSIX_LOCK)
3163 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
3164 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK)
3165 set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags);
3166 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_PRESERVE_UNLINKED)
3167 set_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(state->inode)->flags);
3168
3169 dentry = opendata->dentry;
3170 if (d_really_is_negative(dentry)) {
3171 struct dentry *alias;
3172 d_drop(dentry);
3173 alias = d_splice_alias(igrab(state->inode), dentry);
3174 /* d_splice_alias() can't fail here - it's a non-directory */
3175 if (alias) {
3176 dput(ctx->dentry);
3177 ctx->dentry = dentry = alias;
3178 }
3179 }
3180
3181 switch(opendata->o_arg.claim) {
3182 default:
3183 break;
3184 case NFS4_OPEN_CLAIM_NULL:
3185 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
3186 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
3187 if (!opendata->rpc_done)
3188 break;
3189 if (opendata->o_res.delegation.type != 0)
3190 dir_verifier = nfs_save_change_attribute(dir);
3191 nfs_set_verifier(dentry, dir_verifier);
3192 }
3193
3194 /* Parse layoutget results before we check for access */
3195 pnfs_parse_lgopen(state->inode, opendata->lgp, ctx);
3196
3197 ret = nfs4_opendata_access(sp->so_cred, opendata, state, acc_mode);
3198 if (ret != 0)
3199 goto out;
3200
3201 if (d_inode(dentry) == state->inode)
3202 nfs_inode_attach_open_context(ctx);
3203
3204 out:
3205 if (!opendata->cancelled) {
3206 if (opendata->lgp) {
3207 nfs4_lgopen_release(opendata->lgp);
3208 opendata->lgp = NULL;
3209 }
3210 nfs4_sequence_free_slot(&opendata->o_res.seq_res);
3211 }
3212 return ret;
3213 }
3214
3215 /*
3216 * Returns a referenced nfs4_state
3217 */
_nfs4_do_open(struct inode * dir,struct nfs_open_context * ctx,int flags,const struct nfs4_open_createattrs * c,int * opened)3218 static int _nfs4_do_open(struct inode *dir,
3219 struct nfs_open_context *ctx,
3220 int flags,
3221 const struct nfs4_open_createattrs *c,
3222 int *opened)
3223 {
3224 struct nfs4_state_owner *sp;
3225 struct nfs4_state *state = NULL;
3226 struct nfs_server *server = NFS_SERVER(dir);
3227 struct nfs4_opendata *opendata;
3228 struct dentry *dentry = ctx->dentry;
3229 const struct cred *cred = ctx->cred;
3230 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
3231 fmode_t fmode = _nfs4_ctx_to_openmode(ctx);
3232 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
3233 struct iattr *sattr = c->sattr;
3234 struct nfs4_label *label = c->label;
3235 int status;
3236
3237 /* Protect against reboot recovery conflicts */
3238 status = -ENOMEM;
3239 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
3240 if (sp == NULL) {
3241 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
3242 goto out_err;
3243 }
3244 status = nfs4_client_recover_expired_lease(server->nfs_client);
3245 if (status != 0)
3246 goto err_put_state_owner;
3247 if (d_really_is_positive(dentry))
3248 nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
3249 status = -ENOMEM;
3250 if (d_really_is_positive(dentry))
3251 claim = NFS4_OPEN_CLAIM_FH;
3252 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags,
3253 c, claim, GFP_KERNEL);
3254 if (opendata == NULL)
3255 goto err_put_state_owner;
3256
3257 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
3258 if (!opendata->f_attr.mdsthreshold) {
3259 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
3260 if (!opendata->f_attr.mdsthreshold)
3261 goto err_opendata_put;
3262 }
3263 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
3264 }
3265 if (d_really_is_positive(dentry))
3266 opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
3267
3268 status = _nfs4_open_and_get_state(opendata, ctx);
3269 if (status != 0)
3270 goto err_opendata_put;
3271 state = ctx->state;
3272
3273 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
3274 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
3275 unsigned attrs = nfs4_exclusive_attrset(opendata, sattr, &label);
3276 /*
3277 * send create attributes which was not set by open
3278 * with an extra setattr.
3279 */
3280 if (attrs || label) {
3281 unsigned ia_old = sattr->ia_valid;
3282
3283 sattr->ia_valid = attrs;
3284 nfs_fattr_init(opendata->o_res.f_attr);
3285 status = nfs4_do_setattr(state->inode, cred,
3286 opendata->o_res.f_attr, sattr,
3287 ctx, label);
3288 if (status == 0) {
3289 nfs_setattr_update_inode(state->inode, sattr,
3290 opendata->o_res.f_attr);
3291 nfs_setsecurity(state->inode, opendata->o_res.f_attr);
3292 }
3293 sattr->ia_valid = ia_old;
3294 }
3295 }
3296 if (opened && opendata->file_created)
3297 *opened = 1;
3298
3299 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
3300 *ctx_th = opendata->f_attr.mdsthreshold;
3301 opendata->f_attr.mdsthreshold = NULL;
3302 }
3303
3304 nfs4_opendata_put(opendata);
3305 nfs4_put_state_owner(sp);
3306 return 0;
3307 err_opendata_put:
3308 nfs4_opendata_put(opendata);
3309 err_put_state_owner:
3310 nfs4_put_state_owner(sp);
3311 out_err:
3312 return status;
3313 }
3314
3315
nfs4_do_open(struct inode * dir,struct nfs_open_context * ctx,int flags,struct iattr * sattr,struct nfs4_label * label,int * opened)3316 static struct nfs4_state *nfs4_do_open(struct inode *dir,
3317 struct nfs_open_context *ctx,
3318 int flags,
3319 struct iattr *sattr,
3320 struct nfs4_label *label,
3321 int *opened)
3322 {
3323 struct nfs_server *server = NFS_SERVER(dir);
3324 struct nfs4_exception exception = {
3325 .interruptible = true,
3326 };
3327 struct nfs4_state *res;
3328 struct nfs4_open_createattrs c = {
3329 .label = label,
3330 .sattr = sattr,
3331 .verf = {
3332 [0] = (__u32)jiffies,
3333 [1] = (__u32)current->pid,
3334 },
3335 };
3336 int status;
3337
3338 do {
3339 status = _nfs4_do_open(dir, ctx, flags, &c, opened);
3340 res = ctx->state;
3341 trace_nfs4_open_file(ctx, flags, status);
3342 if (status == 0)
3343 break;
3344 /* NOTE: BAD_SEQID means the server and client disagree about the
3345 * book-keeping w.r.t. state-changing operations
3346 * (OPEN/CLOSE/LOCK/LOCKU...)
3347 * It is actually a sign of a bug on the client or on the server.
3348 *
3349 * If we receive a BAD_SEQID error in the particular case of
3350 * doing an OPEN, we assume that nfs_increment_open_seqid() will
3351 * have unhashed the old state_owner for us, and that we can
3352 * therefore safely retry using a new one. We should still warn
3353 * the user though...
3354 */
3355 if (status == -NFS4ERR_BAD_SEQID) {
3356 pr_warn_ratelimited("NFS: v4 server %s "
3357 " returned a bad sequence-id error!\n",
3358 NFS_SERVER(dir)->nfs_client->cl_hostname);
3359 exception.retry = 1;
3360 continue;
3361 }
3362 /*
3363 * BAD_STATEID on OPEN means that the server cancelled our
3364 * state before it received the OPEN_CONFIRM.
3365 * Recover by retrying the request as per the discussion
3366 * on Page 181 of RFC3530.
3367 */
3368 if (status == -NFS4ERR_BAD_STATEID) {
3369 exception.retry = 1;
3370 continue;
3371 }
3372 if (status == -NFS4ERR_EXPIRED) {
3373 nfs4_schedule_lease_recovery(server->nfs_client);
3374 exception.retry = 1;
3375 continue;
3376 }
3377 if (status == -EAGAIN) {
3378 /* We must have found a delegation */
3379 exception.retry = 1;
3380 continue;
3381 }
3382 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
3383 continue;
3384 res = ERR_PTR(nfs4_handle_exception(server,
3385 status, &exception));
3386 } while (exception.retry);
3387 return res;
3388 }
3389
_nfs4_do_setattr(struct inode * inode,struct nfs_setattrargs * arg,struct nfs_setattrres * res,const struct cred * cred,struct nfs_open_context * ctx)3390 static int _nfs4_do_setattr(struct inode *inode,
3391 struct nfs_setattrargs *arg,
3392 struct nfs_setattrres *res,
3393 const struct cred *cred,
3394 struct nfs_open_context *ctx)
3395 {
3396 struct nfs_server *server = NFS_SERVER(inode);
3397 struct rpc_message msg = {
3398 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
3399 .rpc_argp = arg,
3400 .rpc_resp = res,
3401 .rpc_cred = cred,
3402 };
3403 const struct cred *delegation_cred = NULL;
3404 unsigned long timestamp = jiffies;
3405 bool truncate;
3406 int status;
3407
3408 nfs_fattr_init(res->fattr);
3409
3410 /* Servers should only apply open mode checks for file size changes */
3411 truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false;
3412 if (!truncate) {
3413 nfs4_inode_make_writeable(inode);
3414 goto zero_stateid;
3415 }
3416
3417 if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
3418 /* Use that stateid */
3419 } else if (ctx != NULL && ctx->state) {
3420 struct nfs_lock_context *l_ctx;
3421 if (!nfs4_valid_open_stateid(ctx->state))
3422 return -EBADF;
3423 l_ctx = nfs_get_lock_context(ctx);
3424 if (IS_ERR(l_ctx))
3425 return PTR_ERR(l_ctx);
3426 status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx,
3427 &arg->stateid, &delegation_cred);
3428 nfs_put_lock_context(l_ctx);
3429 if (status == -EIO)
3430 return -EBADF;
3431 else if (status == -EAGAIN)
3432 goto zero_stateid;
3433 } else {
3434 zero_stateid:
3435 nfs4_stateid_copy(&arg->stateid, &zero_stateid);
3436 }
3437 if (delegation_cred)
3438 msg.rpc_cred = delegation_cred;
3439
3440 status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1);
3441
3442 put_cred(delegation_cred);
3443 if (status == 0 && ctx != NULL)
3444 renew_lease(server, timestamp);
3445 trace_nfs4_setattr(inode, &arg->stateid, status);
3446 return status;
3447 }
3448
nfs4_do_setattr(struct inode * inode,const struct cred * cred,struct nfs_fattr * fattr,struct iattr * sattr,struct nfs_open_context * ctx,struct nfs4_label * ilabel)3449 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
3450 struct nfs_fattr *fattr, struct iattr *sattr,
3451 struct nfs_open_context *ctx, struct nfs4_label *ilabel)
3452 {
3453 struct nfs_server *server = NFS_SERVER(inode);
3454 __u32 bitmask[NFS4_BITMASK_SZ];
3455 struct nfs4_state *state = ctx ? ctx->state : NULL;
3456 struct nfs_setattrargs arg = {
3457 .fh = NFS_FH(inode),
3458 .iap = sattr,
3459 .server = server,
3460 .bitmask = bitmask,
3461 .label = ilabel,
3462 };
3463 struct nfs_setattrres res = {
3464 .fattr = fattr,
3465 .server = server,
3466 };
3467 struct nfs4_exception exception = {
3468 .state = state,
3469 .inode = inode,
3470 .stateid = &arg.stateid,
3471 };
3472 unsigned long adjust_flags = NFS_INO_INVALID_CHANGE |
3473 NFS_INO_INVALID_CTIME;
3474 int err;
3475
3476 if (sattr->ia_valid & (ATTR_MODE | ATTR_KILL_SUID | ATTR_KILL_SGID))
3477 adjust_flags |= NFS_INO_INVALID_MODE;
3478 if (sattr->ia_valid & (ATTR_UID | ATTR_GID))
3479 adjust_flags |= NFS_INO_INVALID_OTHER;
3480 if (sattr->ia_valid & ATTR_ATIME)
3481 adjust_flags |= NFS_INO_INVALID_ATIME;
3482 if (sattr->ia_valid & ATTR_MTIME)
3483 adjust_flags |= NFS_INO_INVALID_MTIME;
3484
3485 do {
3486 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label),
3487 inode, adjust_flags);
3488
3489 err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx);
3490 switch (err) {
3491 case -NFS4ERR_OPENMODE:
3492 if (!(sattr->ia_valid & ATTR_SIZE)) {
3493 pr_warn_once("NFSv4: server %s is incorrectly "
3494 "applying open mode checks to "
3495 "a SETATTR that is not "
3496 "changing file size.\n",
3497 server->nfs_client->cl_hostname);
3498 }
3499 if (state && !(state->state & FMODE_WRITE)) {
3500 err = -EBADF;
3501 if (sattr->ia_valid & ATTR_OPEN)
3502 err = -EACCES;
3503 goto out;
3504 }
3505 }
3506 err = nfs4_handle_exception(server, err, &exception);
3507 } while (exception.retry);
3508 out:
3509 return err;
3510 }
3511
3512 static bool
nfs4_wait_on_layoutreturn(struct inode * inode,struct rpc_task * task)3513 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
3514 {
3515 if (inode == NULL || !nfs_have_layout(inode))
3516 return false;
3517
3518 return pnfs_wait_on_layoutreturn(inode, task);
3519 }
3520
3521 /*
3522 * Update the seqid of an open stateid
3523 */
nfs4_sync_open_stateid(nfs4_stateid * dst,struct nfs4_state * state)3524 static void nfs4_sync_open_stateid(nfs4_stateid *dst,
3525 struct nfs4_state *state)
3526 {
3527 __be32 seqid_open;
3528 u32 dst_seqid;
3529 int seq;
3530
3531 for (;;) {
3532 if (!nfs4_valid_open_stateid(state))
3533 break;
3534 seq = read_seqbegin(&state->seqlock);
3535 if (!nfs4_state_match_open_stateid_other(state, dst)) {
3536 nfs4_stateid_copy(dst, &state->open_stateid);
3537 if (read_seqretry(&state->seqlock, seq))
3538 continue;
3539 break;
3540 }
3541 seqid_open = state->open_stateid.seqid;
3542 if (read_seqretry(&state->seqlock, seq))
3543 continue;
3544
3545 dst_seqid = be32_to_cpu(dst->seqid);
3546 if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0)
3547 dst->seqid = seqid_open;
3548 break;
3549 }
3550 }
3551
3552 /*
3553 * Update the seqid of an open stateid after receiving
3554 * NFS4ERR_OLD_STATEID
3555 */
nfs4_refresh_open_old_stateid(nfs4_stateid * dst,struct nfs4_state * state)3556 static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
3557 struct nfs4_state *state)
3558 {
3559 __be32 seqid_open;
3560 u32 dst_seqid;
3561 bool ret;
3562 int seq, status = -EAGAIN;
3563 DEFINE_WAIT(wait);
3564
3565 for (;;) {
3566 ret = false;
3567 if (!nfs4_valid_open_stateid(state))
3568 break;
3569 seq = read_seqbegin(&state->seqlock);
3570 if (!nfs4_state_match_open_stateid_other(state, dst)) {
3571 if (read_seqretry(&state->seqlock, seq))
3572 continue;
3573 break;
3574 }
3575
3576 write_seqlock(&state->seqlock);
3577 seqid_open = state->open_stateid.seqid;
3578
3579 dst_seqid = be32_to_cpu(dst->seqid);
3580
3581 /* Did another OPEN bump the state's seqid? try again: */
3582 if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) {
3583 dst->seqid = seqid_open;
3584 write_sequnlock(&state->seqlock);
3585 ret = true;
3586 break;
3587 }
3588
3589 /* server says we're behind but we haven't seen the update yet */
3590 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
3591 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
3592 write_sequnlock(&state->seqlock);
3593 trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
3594
3595 if (fatal_signal_pending(current) || nfs_current_task_exiting())
3596 status = -EINTR;
3597 else
3598 if (schedule_timeout(5*HZ) != 0)
3599 status = 0;
3600
3601 finish_wait(&state->waitq, &wait);
3602
3603 if (!status)
3604 continue;
3605 if (status == -EINTR)
3606 break;
3607
3608 /* we slept the whole 5 seconds, we must have lost a seqid */
3609 dst->seqid = cpu_to_be32(dst_seqid + 1);
3610 ret = true;
3611 break;
3612 }
3613
3614 return ret;
3615 }
3616
3617 struct nfs4_closedata {
3618 struct inode *inode;
3619 struct nfs4_state *state;
3620 struct nfs_closeargs arg;
3621 struct nfs_closeres res;
3622 struct {
3623 struct nfs4_layoutreturn_args arg;
3624 struct nfs4_layoutreturn_res res;
3625 struct nfs4_xdr_opaque_data ld_private;
3626 u32 roc_barrier;
3627 bool roc;
3628 } lr;
3629 struct nfs_fattr fattr;
3630 unsigned long timestamp;
3631 };
3632
nfs4_free_closedata(void * data)3633 static void nfs4_free_closedata(void *data)
3634 {
3635 struct nfs4_closedata *calldata = data;
3636 struct nfs4_state_owner *sp = calldata->state->owner;
3637 struct super_block *sb = calldata->state->inode->i_sb;
3638
3639 if (calldata->lr.roc)
3640 pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res,
3641 calldata->res.lr_ret);
3642 nfs4_put_open_state(calldata->state);
3643 nfs_free_seqid(calldata->arg.seqid);
3644 nfs4_put_state_owner(sp);
3645 nfs_sb_deactive(sb);
3646 kfree(calldata);
3647 }
3648
nfs4_close_done(struct rpc_task * task,void * data)3649 static void nfs4_close_done(struct rpc_task *task, void *data)
3650 {
3651 struct nfs4_closedata *calldata = data;
3652 struct nfs4_state *state = calldata->state;
3653 struct nfs_server *server = NFS_SERVER(calldata->inode);
3654 nfs4_stateid *res_stateid = NULL;
3655 struct nfs4_exception exception = {
3656 .state = state,
3657 .inode = calldata->inode,
3658 .stateid = &calldata->arg.stateid,
3659 };
3660
3661 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
3662 return;
3663 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
3664
3665 /* Handle Layoutreturn errors */
3666 if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res,
3667 &calldata->res.lr_ret) == -EAGAIN)
3668 goto out_restart;
3669
3670 /* hmm. we are done with the inode, and in the process of freeing
3671 * the state_owner. we keep this around to process errors
3672 */
3673 switch (task->tk_status) {
3674 case 0:
3675 res_stateid = &calldata->res.stateid;
3676 renew_lease(server, calldata->timestamp);
3677 break;
3678 case -NFS4ERR_ACCESS:
3679 if (calldata->arg.bitmask != NULL) {
3680 calldata->arg.bitmask = NULL;
3681 calldata->res.fattr = NULL;
3682 goto out_restart;
3683
3684 }
3685 break;
3686 case -NFS4ERR_OLD_STATEID:
3687 /* Did we race with OPEN? */
3688 if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid,
3689 state))
3690 goto out_restart;
3691 goto out_release;
3692 case -NFS4ERR_ADMIN_REVOKED:
3693 case -NFS4ERR_STALE_STATEID:
3694 case -NFS4ERR_EXPIRED:
3695 nfs4_free_revoked_stateid(server,
3696 &calldata->arg.stateid,
3697 task->tk_msg.rpc_cred);
3698 fallthrough;
3699 case -NFS4ERR_BAD_STATEID:
3700 if (calldata->arg.fmode == 0)
3701 break;
3702 fallthrough;
3703 default:
3704 task->tk_status = nfs4_async_handle_exception(task,
3705 server, task->tk_status, &exception);
3706 if (exception.retry)
3707 goto out_restart;
3708 }
3709 nfs_clear_open_stateid(state, &calldata->arg.stateid,
3710 res_stateid, calldata->arg.fmode);
3711 out_release:
3712 task->tk_status = 0;
3713 nfs_release_seqid(calldata->arg.seqid);
3714 nfs_refresh_inode(calldata->inode, &calldata->fattr);
3715 dprintk("%s: ret = %d\n", __func__, task->tk_status);
3716 return;
3717 out_restart:
3718 task->tk_status = 0;
3719 rpc_restart_call_prepare(task);
3720 goto out_release;
3721 }
3722
nfs4_close_prepare(struct rpc_task * task,void * data)3723 static void nfs4_close_prepare(struct rpc_task *task, void *data)
3724 {
3725 struct nfs4_closedata *calldata = data;
3726 struct nfs4_state *state = calldata->state;
3727 struct inode *inode = calldata->inode;
3728 struct nfs_server *server = NFS_SERVER(inode);
3729 struct pnfs_layout_hdr *lo;
3730 bool is_rdonly, is_wronly, is_rdwr;
3731 int call_close = 0;
3732
3733 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
3734 goto out_wait;
3735
3736 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
3737 spin_lock(&state->owner->so_lock);
3738 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
3739 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
3740 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
3741 /* Calculate the change in open mode */
3742 calldata->arg.fmode = 0;
3743 if (state->n_rdwr == 0) {
3744 if (state->n_rdonly == 0)
3745 call_close |= is_rdonly;
3746 else if (is_rdonly)
3747 calldata->arg.fmode |= FMODE_READ;
3748 if (state->n_wronly == 0)
3749 call_close |= is_wronly;
3750 else if (is_wronly)
3751 calldata->arg.fmode |= FMODE_WRITE;
3752 if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
3753 call_close |= is_rdwr;
3754 } else if (is_rdwr)
3755 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
3756
3757 nfs4_sync_open_stateid(&calldata->arg.stateid, state);
3758 if (!nfs4_valid_open_stateid(state))
3759 call_close = 0;
3760 spin_unlock(&state->owner->so_lock);
3761
3762 if (!call_close) {
3763 /* Note: exit _without_ calling nfs4_close_done */
3764 goto out_no_action;
3765 }
3766
3767 if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) {
3768 nfs_release_seqid(calldata->arg.seqid);
3769 goto out_wait;
3770 }
3771
3772 lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL;
3773 if (lo && !pnfs_layout_is_valid(lo)) {
3774 calldata->arg.lr_args = NULL;
3775 calldata->res.lr_res = NULL;
3776 }
3777
3778 if (calldata->arg.fmode == 0)
3779 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
3780
3781 if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) {
3782 /* Close-to-open cache consistency revalidation */
3783 if (!nfs4_have_delegation(inode, FMODE_READ, 0)) {
3784 nfs4_bitmask_set(calldata->arg.bitmask_store,
3785 server->cache_consistency_bitmask,
3786 inode, 0);
3787 calldata->arg.bitmask = calldata->arg.bitmask_store;
3788 } else
3789 calldata->arg.bitmask = NULL;
3790 }
3791
3792 calldata->arg.share_access =
3793 nfs4_fmode_to_share_access(calldata->arg.fmode);
3794
3795 if (calldata->res.fattr == NULL)
3796 calldata->arg.bitmask = NULL;
3797 else if (calldata->arg.bitmask == NULL)
3798 calldata->res.fattr = NULL;
3799 calldata->timestamp = jiffies;
3800 if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client,
3801 &calldata->arg.seq_args,
3802 &calldata->res.seq_res,
3803 task) != 0)
3804 nfs_release_seqid(calldata->arg.seqid);
3805 return;
3806 out_no_action:
3807 task->tk_action = NULL;
3808 out_wait:
3809 nfs4_sequence_done(task, &calldata->res.seq_res);
3810 }
3811
3812 static const struct rpc_call_ops nfs4_close_ops = {
3813 .rpc_call_prepare = nfs4_close_prepare,
3814 .rpc_call_done = nfs4_close_done,
3815 .rpc_release = nfs4_free_closedata,
3816 };
3817
3818 /*
3819 * It is possible for data to be read/written from a mem-mapped file
3820 * after the sys_close call (which hits the vfs layer as a flush).
3821 * This means that we can't safely call nfsv4 close on a file until
3822 * the inode is cleared. This in turn means that we are not good
3823 * NFSv4 citizens - we do not indicate to the server to update the file's
3824 * share state even when we are done with one of the three share
3825 * stateid's in the inode.
3826 *
3827 * NOTE: Caller must be holding the sp->so_owner semaphore!
3828 */
nfs4_do_close(struct nfs4_state * state,gfp_t gfp_mask,int wait)3829 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
3830 {
3831 struct nfs_server *server = NFS_SERVER(state->inode);
3832 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
3833 struct nfs4_closedata *calldata;
3834 struct nfs4_state_owner *sp = state->owner;
3835 struct rpc_task *task;
3836 struct rpc_message msg = {
3837 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
3838 .rpc_cred = state->owner->so_cred,
3839 };
3840 struct rpc_task_setup task_setup_data = {
3841 .rpc_client = server->client,
3842 .rpc_message = &msg,
3843 .callback_ops = &nfs4_close_ops,
3844 .workqueue = nfsiod_workqueue,
3845 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
3846 };
3847 int status = -ENOMEM;
3848
3849 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE))
3850 task_setup_data.flags |= RPC_TASK_MOVEABLE;
3851
3852 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
3853 &task_setup_data.rpc_client, &msg);
3854
3855 calldata = kzalloc(sizeof(*calldata), gfp_mask);
3856 if (calldata == NULL)
3857 goto out;
3858 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1, 0);
3859 calldata->inode = state->inode;
3860 calldata->state = state;
3861 calldata->arg.fh = NFS_FH(state->inode);
3862 if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state))
3863 goto out_free_calldata;
3864 /* Serialization for the sequence id */
3865 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
3866 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
3867 if (IS_ERR(calldata->arg.seqid))
3868 goto out_free_calldata;
3869 nfs_fattr_init(&calldata->fattr);
3870 calldata->arg.fmode = 0;
3871 calldata->lr.arg.ld_private = &calldata->lr.ld_private;
3872 calldata->res.fattr = &calldata->fattr;
3873 calldata->res.seqid = calldata->arg.seqid;
3874 calldata->res.server = server;
3875 calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
3876 calldata->lr.roc = pnfs_roc(state->inode,
3877 &calldata->lr.arg, &calldata->lr.res, msg.rpc_cred);
3878 if (calldata->lr.roc) {
3879 calldata->arg.lr_args = &calldata->lr.arg;
3880 calldata->res.lr_res = &calldata->lr.res;
3881 }
3882 nfs_sb_active(calldata->inode->i_sb);
3883
3884 msg.rpc_argp = &calldata->arg;
3885 msg.rpc_resp = &calldata->res;
3886 task_setup_data.callback_data = calldata;
3887 task = rpc_run_task(&task_setup_data);
3888 if (IS_ERR(task))
3889 return PTR_ERR(task);
3890 status = 0;
3891 if (wait)
3892 status = rpc_wait_for_completion_task(task);
3893 rpc_put_task(task);
3894 return status;
3895 out_free_calldata:
3896 kfree(calldata);
3897 out:
3898 nfs4_put_open_state(state);
3899 nfs4_put_state_owner(sp);
3900 return status;
3901 }
3902
3903 static struct inode *
nfs4_atomic_open(struct inode * dir,struct nfs_open_context * ctx,int open_flags,struct iattr * attr,int * opened)3904 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
3905 int open_flags, struct iattr *attr, int *opened)
3906 {
3907 struct nfs4_state *state;
3908 struct nfs4_label l, *label;
3909
3910 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
3911
3912 /* Protect against concurrent sillydeletes */
3913 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
3914
3915 nfs4_label_release_security(label);
3916
3917 if (IS_ERR(state))
3918 return ERR_CAST(state);
3919 return state->inode;
3920 }
3921
nfs4_close_context(struct nfs_open_context * ctx,int is_sync)3922 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
3923 {
3924 struct dentry *dentry = ctx->dentry;
3925 if (ctx->state == NULL)
3926 return;
3927 if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
3928 nfs4_inode_set_return_delegation_on_close(d_inode(dentry));
3929 if (is_sync)
3930 nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx));
3931 else
3932 nfs4_close_state(ctx->state, _nfs4_ctx_to_openmode(ctx));
3933 }
3934
3935 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
3936 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
3937 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_OPEN_ARGUMENTS - 1UL)
3938
3939 #define FATTR4_WORD2_NFS42_TIME_DELEG_MASK \
3940 (FATTR4_WORD2_TIME_DELEG_MODIFY|FATTR4_WORD2_TIME_DELEG_ACCESS)
nfs4_server_delegtime_capable(struct nfs4_server_caps_res * res)3941 static bool nfs4_server_delegtime_capable(struct nfs4_server_caps_res *res)
3942 {
3943 u32 share_access_want = res->open_caps.oa_share_access_want[0];
3944 u32 attr_bitmask = res->attr_bitmask[2];
3945
3946 return (share_access_want & NFS4_SHARE_WANT_DELEG_TIMESTAMPS) &&
3947 ((attr_bitmask & FATTR4_WORD2_NFS42_TIME_DELEG_MASK) ==
3948 FATTR4_WORD2_NFS42_TIME_DELEG_MASK);
3949 }
3950
_nfs4_server_capabilities(struct nfs_server * server,struct nfs_fh * fhandle)3951 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3952 {
3953 u32 minorversion = server->nfs_client->cl_minorversion;
3954 u32 bitmask[3] = {
3955 [0] = FATTR4_WORD0_SUPPORTED_ATTRS,
3956 };
3957 struct nfs4_server_caps_arg args = {
3958 .fhandle = fhandle,
3959 .bitmask = bitmask,
3960 };
3961 struct nfs4_server_caps_res res = {};
3962 struct rpc_message msg = {
3963 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
3964 .rpc_argp = &args,
3965 .rpc_resp = &res,
3966 };
3967 int status;
3968 int i;
3969
3970 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
3971 FATTR4_WORD0_FH_EXPIRE_TYPE |
3972 FATTR4_WORD0_LINK_SUPPORT |
3973 FATTR4_WORD0_SYMLINK_SUPPORT |
3974 FATTR4_WORD0_ACLSUPPORT |
3975 FATTR4_WORD0_CASE_INSENSITIVE |
3976 FATTR4_WORD0_CASE_PRESERVING;
3977 if (minorversion)
3978 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
3979 if (minorversion > 1)
3980 bitmask[2] |= FATTR4_WORD2_OPEN_ARGUMENTS;
3981
3982 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3983 if (status == 0) {
3984 bitmask[0] = (FATTR4_WORD0_SUPPORTED_ATTRS |
3985 FATTR4_WORD0_FH_EXPIRE_TYPE |
3986 FATTR4_WORD0_LINK_SUPPORT |
3987 FATTR4_WORD0_SYMLINK_SUPPORT |
3988 FATTR4_WORD0_ACLSUPPORT |
3989 FATTR4_WORD0_CASE_INSENSITIVE |
3990 FATTR4_WORD0_CASE_PRESERVING) &
3991 res.attr_bitmask[0];
3992 /* Sanity check the server answers */
3993 switch (minorversion) {
3994 case 0:
3995 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
3996 res.attr_bitmask[2] = 0;
3997 break;
3998 case 1:
3999 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
4000 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT &
4001 res.attr_bitmask[2];
4002 break;
4003 case 2:
4004 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
4005 bitmask[2] = (FATTR4_WORD2_SUPPATTR_EXCLCREAT |
4006 FATTR4_WORD2_OPEN_ARGUMENTS) &
4007 res.attr_bitmask[2];
4008 }
4009 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
4010 server->caps &= ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS |
4011 NFS_CAP_SYMLINKS| NFS_CAP_SECURITY_LABEL);
4012 server->fattr_valid = NFS_ATTR_FATTR_V4;
4013 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
4014 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
4015 server->caps |= NFS_CAP_ACLS;
4016 if (res.has_links != 0)
4017 server->caps |= NFS_CAP_HARDLINKS;
4018 if (res.has_symlinks != 0)
4019 server->caps |= NFS_CAP_SYMLINKS;
4020 if (res.case_insensitive)
4021 server->caps |= NFS_CAP_CASE_INSENSITIVE;
4022 if (res.case_preserving)
4023 server->caps |= NFS_CAP_CASE_PRESERVING;
4024 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
4025 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
4026 server->caps |= NFS_CAP_SECURITY_LABEL;
4027 #endif
4028 if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS)
4029 server->caps |= NFS_CAP_FS_LOCATIONS;
4030 if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID))
4031 server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID;
4032 if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE))
4033 server->fattr_valid &= ~NFS_ATTR_FATTR_MODE;
4034 if (!(res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS))
4035 server->fattr_valid &= ~NFS_ATTR_FATTR_NLINK;
4036 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER))
4037 server->fattr_valid &= ~(NFS_ATTR_FATTR_OWNER |
4038 NFS_ATTR_FATTR_OWNER_NAME);
4039 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP))
4040 server->fattr_valid &= ~(NFS_ATTR_FATTR_GROUP |
4041 NFS_ATTR_FATTR_GROUP_NAME);
4042 if (!(res.attr_bitmask[1] & FATTR4_WORD1_SPACE_USED))
4043 server->fattr_valid &= ~NFS_ATTR_FATTR_SPACE_USED;
4044 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS))
4045 server->fattr_valid &= ~NFS_ATTR_FATTR_ATIME;
4046 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA))
4047 server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME;
4048 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY))
4049 server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME;
4050 memcpy(server->attr_bitmask_nl, res.attr_bitmask,
4051 sizeof(server->attr_bitmask));
4052 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
4053
4054 if (res.open_caps.oa_share_access_want[0] &
4055 NFS4_SHARE_WANT_OPEN_XOR_DELEGATION)
4056 server->caps |= NFS_CAP_OPEN_XOR;
4057 if (nfs4_server_delegtime_capable(&res))
4058 server->caps |= NFS_CAP_DELEGTIME;
4059
4060 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
4061 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
4062 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
4063 server->cache_consistency_bitmask[2] = 0;
4064
4065 /* Avoid a regression due to buggy server */
4066 for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++)
4067 res.exclcreat_bitmask[i] &= res.attr_bitmask[i];
4068 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
4069 sizeof(server->exclcreat_bitmask));
4070
4071 server->acl_bitmask = res.acl_bitmask;
4072 server->fh_expire_type = res.fh_expire_type;
4073 }
4074
4075 return status;
4076 }
4077
nfs4_server_capabilities(struct nfs_server * server,struct nfs_fh * fhandle)4078 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
4079 {
4080 struct nfs4_exception exception = {
4081 .interruptible = true,
4082 };
4083 int err;
4084
4085 nfs4_server_set_init_caps(server);
4086 do {
4087 err = nfs4_handle_exception(server,
4088 _nfs4_server_capabilities(server, fhandle),
4089 &exception);
4090 } while (exception.retry);
4091 return err;
4092 }
4093
test_fs_location_for_trunking(struct nfs4_fs_location * location,struct nfs_client * clp,struct nfs_server * server)4094 static void test_fs_location_for_trunking(struct nfs4_fs_location *location,
4095 struct nfs_client *clp,
4096 struct nfs_server *server)
4097 {
4098 int i;
4099
4100 for (i = 0; i < location->nservers; i++) {
4101 struct nfs4_string *srv_loc = &location->servers[i];
4102 struct sockaddr_storage addr;
4103 size_t addrlen;
4104 struct xprt_create xprt_args = {
4105 .ident = 0,
4106 .net = clp->cl_net,
4107 };
4108 struct nfs4_add_xprt_data xprtdata = {
4109 .clp = clp,
4110 };
4111 struct rpc_add_xprt_test rpcdata = {
4112 .add_xprt_test = clp->cl_mvops->session_trunk,
4113 .data = &xprtdata,
4114 };
4115 char *servername = NULL;
4116
4117 if (!srv_loc->len)
4118 continue;
4119
4120 addrlen = nfs_parse_server_name(srv_loc->data, srv_loc->len,
4121 &addr, sizeof(addr),
4122 clp->cl_net, server->port);
4123 if (!addrlen)
4124 return;
4125 xprt_args.dstaddr = (struct sockaddr *)&addr;
4126 xprt_args.addrlen = addrlen;
4127 servername = kmalloc(srv_loc->len + 1, GFP_KERNEL);
4128 if (!servername)
4129 return;
4130 memcpy(servername, srv_loc->data, srv_loc->len);
4131 servername[srv_loc->len] = '\0';
4132 xprt_args.servername = servername;
4133
4134 xprtdata.cred = nfs4_get_clid_cred(clp);
4135 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
4136 rpc_clnt_setup_test_and_add_xprt,
4137 &rpcdata);
4138 if (xprtdata.cred)
4139 put_cred(xprtdata.cred);
4140 kfree(servername);
4141 }
4142 }
4143
_is_same_nfs4_pathname(struct nfs4_pathname * path1,struct nfs4_pathname * path2)4144 static bool _is_same_nfs4_pathname(struct nfs4_pathname *path1,
4145 struct nfs4_pathname *path2)
4146 {
4147 int i;
4148
4149 if (path1->ncomponents != path2->ncomponents)
4150 return false;
4151 for (i = 0; i < path1->ncomponents; i++) {
4152 if (path1->components[i].len != path2->components[i].len)
4153 return false;
4154 if (memcmp(path1->components[i].data, path2->components[i].data,
4155 path1->components[i].len))
4156 return false;
4157 }
4158 return true;
4159 }
4160
_nfs4_discover_trunking(struct nfs_server * server,struct nfs_fh * fhandle)4161 static int _nfs4_discover_trunking(struct nfs_server *server,
4162 struct nfs_fh *fhandle)
4163 {
4164 struct nfs4_fs_locations *locations = NULL;
4165 struct page *page;
4166 const struct cred *cred;
4167 struct nfs_client *clp = server->nfs_client;
4168 const struct nfs4_state_maintenance_ops *ops =
4169 clp->cl_mvops->state_renewal_ops;
4170 int status = -ENOMEM, i;
4171
4172 cred = ops->get_state_renewal_cred(clp);
4173 if (cred == NULL) {
4174 cred = nfs4_get_clid_cred(clp);
4175 if (cred == NULL)
4176 return -ENOKEY;
4177 }
4178
4179 page = alloc_page(GFP_KERNEL);
4180 if (!page)
4181 goto out_put_cred;
4182 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
4183 if (!locations)
4184 goto out_free;
4185 locations->fattr = nfs_alloc_fattr();
4186 if (!locations->fattr)
4187 goto out_free_2;
4188
4189 status = nfs4_proc_get_locations(server, fhandle, locations, page,
4190 cred);
4191 if (status)
4192 goto out_free_3;
4193
4194 for (i = 0; i < locations->nlocations; i++) {
4195 if (!_is_same_nfs4_pathname(&locations->fs_path,
4196 &locations->locations[i].rootpath))
4197 continue;
4198 test_fs_location_for_trunking(&locations->locations[i], clp,
4199 server);
4200 }
4201 out_free_3:
4202 kfree(locations->fattr);
4203 out_free_2:
4204 kfree(locations);
4205 out_free:
4206 __free_page(page);
4207 out_put_cred:
4208 put_cred(cred);
4209 return status;
4210 }
4211
nfs4_discover_trunking(struct nfs_server * server,struct nfs_fh * fhandle)4212 static int nfs4_discover_trunking(struct nfs_server *server,
4213 struct nfs_fh *fhandle)
4214 {
4215 struct nfs4_exception exception = {
4216 .interruptible = true,
4217 };
4218 struct nfs_client *clp = server->nfs_client;
4219 int err = 0;
4220
4221 if (!nfs4_has_session(clp))
4222 goto out;
4223 do {
4224 err = nfs4_handle_exception(server,
4225 _nfs4_discover_trunking(server, fhandle),
4226 &exception);
4227 } while (exception.retry);
4228 out:
4229 return err;
4230 }
4231
_nfs4_lookup_root(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)4232 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
4233 struct nfs_fsinfo *info)
4234 {
4235 u32 bitmask[3];
4236 struct nfs4_lookup_root_arg args = {
4237 .bitmask = bitmask,
4238 };
4239 struct nfs4_lookup_res res = {
4240 .server = server,
4241 .fattr = info->fattr,
4242 .fh = fhandle,
4243 };
4244 struct rpc_message msg = {
4245 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
4246 .rpc_argp = &args,
4247 .rpc_resp = &res,
4248 };
4249
4250 bitmask[0] = nfs4_fattr_bitmap[0];
4251 bitmask[1] = nfs4_fattr_bitmap[1];
4252 /*
4253 * Process the label in the upcoming getfattr
4254 */
4255 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
4256
4257 nfs_fattr_init(info->fattr);
4258 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4259 }
4260
nfs4_lookup_root(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)4261 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
4262 struct nfs_fsinfo *info)
4263 {
4264 struct nfs4_exception exception = {
4265 .interruptible = true,
4266 };
4267 int err;
4268 do {
4269 err = _nfs4_lookup_root(server, fhandle, info);
4270 trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
4271 switch (err) {
4272 case 0:
4273 case -NFS4ERR_WRONGSEC:
4274 goto out;
4275 default:
4276 err = nfs4_handle_exception(server, err, &exception);
4277 }
4278 } while (exception.retry);
4279 out:
4280 return err;
4281 }
4282
nfs4_lookup_root_sec(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info,rpc_authflavor_t flavor)4283 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
4284 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
4285 {
4286 struct rpc_auth_create_args auth_args = {
4287 .pseudoflavor = flavor,
4288 };
4289 struct rpc_auth *auth;
4290
4291 auth = rpcauth_create(&auth_args, server->client);
4292 if (IS_ERR(auth))
4293 return -EACCES;
4294 return nfs4_lookup_root(server, fhandle, info);
4295 }
4296
4297 /*
4298 * Retry pseudoroot lookup with various security flavors. We do this when:
4299 *
4300 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
4301 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation
4302 *
4303 * Returns zero on success, or a negative NFS4ERR value, or a
4304 * negative errno value.
4305 */
nfs4_find_root_sec(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)4306 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
4307 struct nfs_fsinfo *info)
4308 {
4309 /* Per 3530bis 15.33.5 */
4310 static const rpc_authflavor_t flav_array[] = {
4311 RPC_AUTH_GSS_KRB5P,
4312 RPC_AUTH_GSS_KRB5I,
4313 RPC_AUTH_GSS_KRB5,
4314 RPC_AUTH_UNIX, /* courtesy */
4315 RPC_AUTH_NULL,
4316 };
4317 int status = -EPERM;
4318 size_t i;
4319
4320 if (server->auth_info.flavor_len > 0) {
4321 /* try each flavor specified by user */
4322 for (i = 0; i < server->auth_info.flavor_len; i++) {
4323 status = nfs4_lookup_root_sec(server, fhandle, info,
4324 server->auth_info.flavors[i]);
4325 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
4326 continue;
4327 break;
4328 }
4329 } else {
4330 /* no flavors specified by user, try default list */
4331 for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
4332 status = nfs4_lookup_root_sec(server, fhandle, info,
4333 flav_array[i]);
4334 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
4335 continue;
4336 break;
4337 }
4338 }
4339
4340 /*
4341 * -EACCES could mean that the user doesn't have correct permissions
4342 * to access the mount. It could also mean that we tried to mount
4343 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
4344 * existing mount programs don't handle -EACCES very well so it should
4345 * be mapped to -EPERM instead.
4346 */
4347 if (status == -EACCES)
4348 status = -EPERM;
4349 return status;
4350 }
4351
4352 /**
4353 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
4354 * @server: initialized nfs_server handle
4355 * @fhandle: we fill in the pseudo-fs root file handle
4356 * @info: we fill in an FSINFO struct
4357 * @auth_probe: probe the auth flavours
4358 *
4359 * Returns zero on success, or a negative errno.
4360 */
nfs4_proc_get_rootfh(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info,bool auth_probe)4361 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
4362 struct nfs_fsinfo *info,
4363 bool auth_probe)
4364 {
4365 int status = 0;
4366
4367 if (!auth_probe)
4368 status = nfs4_lookup_root(server, fhandle, info);
4369
4370 if (auth_probe || status == NFS4ERR_WRONGSEC)
4371 status = server->nfs_client->cl_mvops->find_root_sec(server,
4372 fhandle, info);
4373
4374 if (status == 0)
4375 status = nfs4_server_capabilities(server, fhandle);
4376 if (status == 0)
4377 status = nfs4_do_fsinfo(server, fhandle, info);
4378
4379 return nfs4_map_errors(status);
4380 }
4381
nfs4_proc_get_root(struct nfs_server * server,struct nfs_fh * mntfh,struct nfs_fsinfo * info)4382 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
4383 struct nfs_fsinfo *info)
4384 {
4385 int error;
4386 struct nfs_fattr *fattr = info->fattr;
4387
4388 error = nfs4_server_capabilities(server, mntfh);
4389 if (error < 0) {
4390 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
4391 return error;
4392 }
4393
4394 error = nfs4_proc_getattr(server, mntfh, fattr, NULL);
4395 if (error < 0) {
4396 dprintk("nfs4_get_root: getattr error = %d\n", -error);
4397 goto out;
4398 }
4399
4400 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
4401 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
4402 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
4403
4404 out:
4405 return error;
4406 }
4407
4408 /*
4409 * Get locations and (maybe) other attributes of a referral.
4410 * Note that we'll actually follow the referral later when
4411 * we detect fsid mismatch in inode revalidation
4412 */
nfs4_get_referral(struct rpc_clnt * client,struct inode * dir,const struct qstr * name,struct nfs_fattr * fattr,struct nfs_fh * fhandle)4413 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
4414 const struct qstr *name, struct nfs_fattr *fattr,
4415 struct nfs_fh *fhandle)
4416 {
4417 int status = -ENOMEM;
4418 struct page *page = NULL;
4419 struct nfs4_fs_locations *locations = NULL;
4420
4421 page = alloc_page(GFP_KERNEL);
4422 if (page == NULL)
4423 goto out;
4424 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
4425 if (locations == NULL)
4426 goto out;
4427
4428 locations->fattr = fattr;
4429
4430 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
4431 if (status != 0)
4432 goto out;
4433
4434 /*
4435 * If the fsid didn't change, this is a migration event, not a
4436 * referral. Cause us to drop into the exception handler, which
4437 * will kick off migration recovery.
4438 */
4439 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &fattr->fsid)) {
4440 dprintk("%s: server did not return a different fsid for"
4441 " a referral at %s\n", __func__, name->name);
4442 status = -NFS4ERR_MOVED;
4443 goto out;
4444 }
4445 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
4446 nfs_fixup_referral_attributes(fattr);
4447 memset(fhandle, 0, sizeof(struct nfs_fh));
4448 out:
4449 if (page)
4450 __free_page(page);
4451 kfree(locations);
4452 return status;
4453 }
4454
_nfs4_proc_getattr(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fattr * fattr,struct inode * inode)4455 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
4456 struct nfs_fattr *fattr, struct inode *inode)
4457 {
4458 __u32 bitmask[NFS4_BITMASK_SZ];
4459 struct nfs4_getattr_arg args = {
4460 .fh = fhandle,
4461 .bitmask = bitmask,
4462 };
4463 struct nfs4_getattr_res res = {
4464 .fattr = fattr,
4465 .server = server,
4466 };
4467 struct rpc_message msg = {
4468 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
4469 .rpc_argp = &args,
4470 .rpc_resp = &res,
4471 };
4472 unsigned short task_flags = 0;
4473
4474 if (nfs4_has_session(server->nfs_client))
4475 task_flags = RPC_TASK_MOVEABLE;
4476
4477 /* Is this is an attribute revalidation, subject to softreval? */
4478 if (inode && (server->flags & NFS_MOUNT_SOFTREVAL))
4479 task_flags |= RPC_TASK_TIMEOUT;
4480
4481 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), inode, 0);
4482 nfs_fattr_init(fattr);
4483 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
4484 return nfs4_do_call_sync(server->client, server, &msg,
4485 &args.seq_args, &res.seq_res, task_flags);
4486 }
4487
nfs4_proc_getattr(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fattr * fattr,struct inode * inode)4488 int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
4489 struct nfs_fattr *fattr, struct inode *inode)
4490 {
4491 struct nfs4_exception exception = {
4492 .interruptible = true,
4493 };
4494 int err;
4495 do {
4496 err = _nfs4_proc_getattr(server, fhandle, fattr, inode);
4497 trace_nfs4_getattr(server, fhandle, fattr, err);
4498 err = nfs4_handle_exception(server, err,
4499 &exception);
4500 } while (exception.retry);
4501 return err;
4502 }
4503
4504 /*
4505 * The file is not closed if it is opened due to the a request to change
4506 * the size of the file. The open call will not be needed once the
4507 * VFS layer lookup-intents are implemented.
4508 *
4509 * Close is called when the inode is destroyed.
4510 * If we haven't opened the file for O_WRONLY, we
4511 * need to in the size_change case to obtain a stateid.
4512 *
4513 * Got race?
4514 * Because OPEN is always done by name in nfsv4, it is
4515 * possible that we opened a different file by the same
4516 * name. We can recognize this race condition, but we
4517 * can't do anything about it besides returning an error.
4518 *
4519 * This will be fixed with VFS changes (lookup-intent).
4520 */
4521 static int
nfs4_proc_setattr(struct dentry * dentry,struct nfs_fattr * fattr,struct iattr * sattr)4522 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
4523 struct iattr *sattr)
4524 {
4525 struct inode *inode = d_inode(dentry);
4526 const struct cred *cred = NULL;
4527 struct nfs_open_context *ctx = NULL;
4528 int status;
4529
4530 if (pnfs_ld_layoutret_on_setattr(inode) &&
4531 sattr->ia_valid & ATTR_SIZE &&
4532 sattr->ia_size < i_size_read(inode))
4533 pnfs_commit_and_return_layout(inode);
4534
4535 nfs_fattr_init(fattr);
4536
4537 /* Deal with open(O_TRUNC) */
4538 if (sattr->ia_valid & ATTR_OPEN)
4539 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
4540
4541 /* Optimization: if the end result is no change, don't RPC */
4542 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
4543 return 0;
4544
4545 /* Search for an existing open(O_WRITE) file */
4546 if (sattr->ia_valid & ATTR_FILE) {
4547
4548 ctx = nfs_file_open_context(sattr->ia_file);
4549 if (ctx)
4550 cred = ctx->cred;
4551 }
4552
4553 /* Return any delegations if we're going to change ACLs */
4554 if ((sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
4555 nfs4_inode_make_writeable(inode);
4556
4557 status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL);
4558 if (status == 0) {
4559 nfs_setattr_update_inode(inode, sattr, fattr);
4560 nfs_setsecurity(inode, fattr);
4561 }
4562 return status;
4563 }
4564
_nfs4_proc_lookup(struct rpc_clnt * clnt,struct inode * dir,struct dentry * dentry,const struct qstr * name,struct nfs_fh * fhandle,struct nfs_fattr * fattr)4565 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
4566 struct dentry *dentry, const struct qstr *name,
4567 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
4568 {
4569 struct nfs_server *server = NFS_SERVER(dir);
4570 int status;
4571 struct nfs4_lookup_arg args = {
4572 .bitmask = server->attr_bitmask,
4573 .dir_fh = NFS_FH(dir),
4574 .name = name,
4575 };
4576 struct nfs4_lookup_res res = {
4577 .server = server,
4578 .fattr = fattr,
4579 .fh = fhandle,
4580 };
4581 struct rpc_message msg = {
4582 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
4583 .rpc_argp = &args,
4584 .rpc_resp = &res,
4585 };
4586 unsigned short task_flags = 0;
4587
4588 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE))
4589 task_flags = RPC_TASK_MOVEABLE;
4590
4591 /* Is this is an attribute revalidation, subject to softreval? */
4592 if (nfs_lookup_is_soft_revalidate(dentry))
4593 task_flags |= RPC_TASK_TIMEOUT;
4594
4595 args.bitmask = nfs4_bitmask(server, fattr->label);
4596
4597 nfs_fattr_init(fattr);
4598
4599 dprintk("NFS call lookup %pd2\n", dentry);
4600 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
4601 status = nfs4_do_call_sync(clnt, server, &msg,
4602 &args.seq_args, &res.seq_res, task_flags);
4603 dprintk("NFS reply lookup: %d\n", status);
4604 return status;
4605 }
4606
nfs_fixup_secinfo_attributes(struct nfs_fattr * fattr)4607 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
4608 {
4609 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
4610 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
4611 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
4612 fattr->nlink = 2;
4613 }
4614
nfs4_proc_lookup_common(struct rpc_clnt ** clnt,struct inode * dir,struct dentry * dentry,const struct qstr * name,struct nfs_fh * fhandle,struct nfs_fattr * fattr)4615 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
4616 struct dentry *dentry, const struct qstr *name,
4617 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
4618 {
4619 struct nfs4_exception exception = {
4620 .interruptible = true,
4621 };
4622 struct rpc_clnt *client = *clnt;
4623 int err;
4624 do {
4625 err = _nfs4_proc_lookup(client, dir, dentry, name, fhandle, fattr);
4626 trace_nfs4_lookup(dir, name, err);
4627 switch (err) {
4628 case -NFS4ERR_BADNAME:
4629 err = -ENOENT;
4630 goto out;
4631 case -NFS4ERR_MOVED:
4632 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
4633 if (err == -NFS4ERR_MOVED)
4634 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
4635 goto out;
4636 case -NFS4ERR_WRONGSEC:
4637 err = -EPERM;
4638 if (client != *clnt)
4639 goto out;
4640 client = nfs4_negotiate_security(client, dir, name);
4641 if (IS_ERR(client))
4642 return PTR_ERR(client);
4643
4644 exception.retry = 1;
4645 break;
4646 default:
4647 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
4648 }
4649 } while (exception.retry);
4650
4651 out:
4652 if (err == 0)
4653 *clnt = client;
4654 else if (client != *clnt)
4655 rpc_shutdown_client(client);
4656
4657 return err;
4658 }
4659
nfs4_proc_lookup(struct inode * dir,struct dentry * dentry,const struct qstr * name,struct nfs_fh * fhandle,struct nfs_fattr * fattr)4660 static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry, const struct qstr *name,
4661 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
4662 {
4663 int status;
4664 struct rpc_clnt *client = NFS_CLIENT(dir);
4665
4666 status = nfs4_proc_lookup_common(&client, dir, dentry, name, fhandle, fattr);
4667 if (client != NFS_CLIENT(dir)) {
4668 rpc_shutdown_client(client);
4669 nfs_fixup_secinfo_attributes(fattr);
4670 }
4671 return status;
4672 }
4673
4674 struct rpc_clnt *
nfs4_proc_lookup_mountpoint(struct inode * dir,struct dentry * dentry,struct nfs_fh * fhandle,struct nfs_fattr * fattr)4675 nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry,
4676 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
4677 {
4678 struct rpc_clnt *client = NFS_CLIENT(dir);
4679 int status;
4680
4681 status = nfs4_proc_lookup_common(&client, dir, dentry, &dentry->d_name,
4682 fhandle, fattr);
4683 if (status < 0)
4684 return ERR_PTR(status);
4685 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
4686 }
4687
_nfs4_proc_lookupp(struct inode * inode,struct nfs_fh * fhandle,struct nfs_fattr * fattr)4688 static int _nfs4_proc_lookupp(struct inode *inode,
4689 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
4690 {
4691 struct rpc_clnt *clnt = NFS_CLIENT(inode);
4692 struct nfs_server *server = NFS_SERVER(inode);
4693 int status;
4694 struct nfs4_lookupp_arg args = {
4695 .bitmask = server->attr_bitmask,
4696 .fh = NFS_FH(inode),
4697 };
4698 struct nfs4_lookupp_res res = {
4699 .server = server,
4700 .fattr = fattr,
4701 .fh = fhandle,
4702 };
4703 struct rpc_message msg = {
4704 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP],
4705 .rpc_argp = &args,
4706 .rpc_resp = &res,
4707 };
4708 unsigned short task_flags = 0;
4709
4710 if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
4711 task_flags |= RPC_TASK_TIMEOUT;
4712
4713 args.bitmask = nfs4_bitmask(server, fattr->label);
4714
4715 nfs_fattr_init(fattr);
4716
4717 dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino);
4718 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
4719 &res.seq_res, task_flags);
4720 dprintk("NFS reply lookupp: %d\n", status);
4721 return status;
4722 }
4723
nfs4_proc_lookupp(struct inode * inode,struct nfs_fh * fhandle,struct nfs_fattr * fattr)4724 static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle,
4725 struct nfs_fattr *fattr)
4726 {
4727 struct nfs4_exception exception = {
4728 .interruptible = true,
4729 };
4730 int err;
4731 do {
4732 err = _nfs4_proc_lookupp(inode, fhandle, fattr);
4733 trace_nfs4_lookupp(inode, err);
4734 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4735 &exception);
4736 } while (exception.retry);
4737 return err;
4738 }
4739
_nfs4_proc_access(struct inode * inode,struct nfs_access_entry * entry,const struct cred * cred)4740 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry,
4741 const struct cred *cred)
4742 {
4743 struct nfs_server *server = NFS_SERVER(inode);
4744 struct nfs4_accessargs args = {
4745 .fh = NFS_FH(inode),
4746 .access = entry->mask,
4747 };
4748 struct nfs4_accessres res = {
4749 .server = server,
4750 };
4751 struct rpc_message msg = {
4752 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
4753 .rpc_argp = &args,
4754 .rpc_resp = &res,
4755 .rpc_cred = cred,
4756 };
4757 int status = 0;
4758
4759 if (!nfs4_have_delegation(inode, FMODE_READ, 0)) {
4760 res.fattr = nfs_alloc_fattr();
4761 if (res.fattr == NULL)
4762 return -ENOMEM;
4763 args.bitmask = server->cache_consistency_bitmask;
4764 }
4765 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4766 if (!status) {
4767 nfs_access_set_mask(entry, res.access);
4768 if (res.fattr)
4769 nfs_refresh_inode(inode, res.fattr);
4770 }
4771 nfs_free_fattr(res.fattr);
4772 return status;
4773 }
4774
nfs4_proc_access(struct inode * inode,struct nfs_access_entry * entry,const struct cred * cred)4775 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry,
4776 const struct cred *cred)
4777 {
4778 struct nfs4_exception exception = {
4779 .interruptible = true,
4780 };
4781 int err;
4782 do {
4783 err = _nfs4_proc_access(inode, entry, cred);
4784 trace_nfs4_access(inode, err);
4785 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4786 &exception);
4787 } while (exception.retry);
4788 return err;
4789 }
4790
4791 /*
4792 * TODO: For the time being, we don't try to get any attributes
4793 * along with any of the zero-copy operations READ, READDIR,
4794 * READLINK, WRITE.
4795 *
4796 * In the case of the first three, we want to put the GETATTR
4797 * after the read-type operation -- this is because it is hard
4798 * to predict the length of a GETATTR response in v4, and thus
4799 * align the READ data correctly. This means that the GETATTR
4800 * may end up partially falling into the page cache, and we should
4801 * shift it into the 'tail' of the xdr_buf before processing.
4802 * To do this efficiently, we need to know the total length
4803 * of data received, which doesn't seem to be available outside
4804 * of the RPC layer.
4805 *
4806 * In the case of WRITE, we also want to put the GETATTR after
4807 * the operation -- in this case because we want to make sure
4808 * we get the post-operation mtime and size.
4809 *
4810 * Both of these changes to the XDR layer would in fact be quite
4811 * minor, but I decided to leave them for a subsequent patch.
4812 */
_nfs4_proc_readlink(struct inode * inode,struct page * page,unsigned int pgbase,unsigned int pglen)4813 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
4814 unsigned int pgbase, unsigned int pglen)
4815 {
4816 struct nfs4_readlink args = {
4817 .fh = NFS_FH(inode),
4818 .pgbase = pgbase,
4819 .pglen = pglen,
4820 .pages = &page,
4821 };
4822 struct nfs4_readlink_res res;
4823 struct rpc_message msg = {
4824 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
4825 .rpc_argp = &args,
4826 .rpc_resp = &res,
4827 };
4828
4829 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
4830 }
4831
nfs4_proc_readlink(struct inode * inode,struct page * page,unsigned int pgbase,unsigned int pglen)4832 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
4833 unsigned int pgbase, unsigned int pglen)
4834 {
4835 struct nfs4_exception exception = {
4836 .interruptible = true,
4837 };
4838 int err;
4839 do {
4840 err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
4841 trace_nfs4_readlink(inode, err);
4842 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4843 &exception);
4844 } while (exception.retry);
4845 return err;
4846 }
4847
4848 /*
4849 * This is just for mknod. open(O_CREAT) will always do ->open_context().
4850 */
4851 static int
nfs4_proc_create(struct inode * dir,struct dentry * dentry,struct iattr * sattr,int flags)4852 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
4853 int flags)
4854 {
4855 struct nfs_server *server = NFS_SERVER(dir);
4856 struct nfs4_label l, *ilabel;
4857 struct nfs_open_context *ctx;
4858 struct nfs4_state *state;
4859 int status = 0;
4860
4861 ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL);
4862 if (IS_ERR(ctx))
4863 return PTR_ERR(ctx);
4864
4865 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
4866
4867 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
4868 sattr->ia_mode &= ~current_umask();
4869 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL);
4870 if (IS_ERR(state)) {
4871 status = PTR_ERR(state);
4872 goto out;
4873 }
4874 out:
4875 nfs4_label_release_security(ilabel);
4876 put_nfs_open_context(ctx);
4877 return status;
4878 }
4879
4880 static int
_nfs4_proc_remove(struct inode * dir,const struct qstr * name,u32 ftype)4881 _nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype)
4882 {
4883 struct nfs_server *server = NFS_SERVER(dir);
4884 struct nfs_removeargs args = {
4885 .fh = NFS_FH(dir),
4886 .name = *name,
4887 };
4888 struct nfs_removeres res = {
4889 .server = server,
4890 };
4891 struct rpc_message msg = {
4892 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
4893 .rpc_argp = &args,
4894 .rpc_resp = &res,
4895 };
4896 unsigned long timestamp = jiffies;
4897 int status;
4898
4899 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
4900 if (status == 0) {
4901 spin_lock(&dir->i_lock);
4902 /* Removing a directory decrements nlink in the parent */
4903 if (ftype == NF4DIR && dir->i_nlink > 2)
4904 nfs4_dec_nlink_locked(dir);
4905 nfs4_update_changeattr_locked(dir, &res.cinfo, timestamp,
4906 NFS_INO_INVALID_DATA);
4907 spin_unlock(&dir->i_lock);
4908 }
4909 return status;
4910 }
4911
nfs4_proc_remove(struct inode * dir,struct dentry * dentry)4912 static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry)
4913 {
4914 struct nfs4_exception exception = {
4915 .interruptible = true,
4916 };
4917 struct inode *inode = d_inode(dentry);
4918 int err;
4919
4920 if (inode) {
4921 if (inode->i_nlink == 1)
4922 nfs4_inode_return_delegation(inode);
4923 else
4924 nfs4_inode_make_writeable(inode);
4925 }
4926 do {
4927 err = _nfs4_proc_remove(dir, &dentry->d_name, NF4REG);
4928 trace_nfs4_remove(dir, &dentry->d_name, err);
4929 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4930 &exception);
4931 } while (exception.retry);
4932 return err;
4933 }
4934
nfs4_proc_rmdir(struct inode * dir,const struct qstr * name)4935 static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name)
4936 {
4937 struct nfs4_exception exception = {
4938 .interruptible = true,
4939 };
4940 int err;
4941
4942 do {
4943 err = _nfs4_proc_remove(dir, name, NF4DIR);
4944 trace_nfs4_remove(dir, name, err);
4945 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4946 &exception);
4947 } while (exception.retry);
4948 return err;
4949 }
4950
nfs4_proc_unlink_setup(struct rpc_message * msg,struct dentry * dentry,struct inode * inode)4951 static void nfs4_proc_unlink_setup(struct rpc_message *msg,
4952 struct dentry *dentry,
4953 struct inode *inode)
4954 {
4955 struct nfs_removeargs *args = msg->rpc_argp;
4956 struct nfs_removeres *res = msg->rpc_resp;
4957
4958 res->server = NFS_SB(dentry->d_sb);
4959 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
4960 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1, 0);
4961
4962 nfs_fattr_init(res->dir_attr);
4963
4964 if (inode) {
4965 nfs4_inode_return_delegation(inode);
4966 nfs_d_prune_case_insensitive_aliases(inode);
4967 }
4968 }
4969
nfs4_proc_unlink_rpc_prepare(struct rpc_task * task,struct nfs_unlinkdata * data)4970 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
4971 {
4972 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client,
4973 &data->args.seq_args,
4974 &data->res.seq_res,
4975 task);
4976 }
4977
nfs4_proc_unlink_done(struct rpc_task * task,struct inode * dir)4978 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
4979 {
4980 struct nfs_unlinkdata *data = task->tk_calldata;
4981 struct nfs_removeres *res = &data->res;
4982
4983 if (!nfs4_sequence_done(task, &res->seq_res))
4984 return 0;
4985 if (nfs4_async_handle_error(task, res->server, NULL,
4986 &data->timeout) == -EAGAIN)
4987 return 0;
4988 if (task->tk_status == 0)
4989 nfs4_update_changeattr(dir, &res->cinfo,
4990 res->dir_attr->time_start,
4991 NFS_INO_INVALID_DATA);
4992 return 1;
4993 }
4994
nfs4_proc_rename_setup(struct rpc_message * msg,struct dentry * old_dentry,struct dentry * new_dentry)4995 static void nfs4_proc_rename_setup(struct rpc_message *msg,
4996 struct dentry *old_dentry,
4997 struct dentry *new_dentry)
4998 {
4999 struct nfs_renameargs *arg = msg->rpc_argp;
5000 struct nfs_renameres *res = msg->rpc_resp;
5001 struct inode *old_inode = d_inode(old_dentry);
5002 struct inode *new_inode = d_inode(new_dentry);
5003
5004 if (old_inode)
5005 nfs4_inode_make_writeable(old_inode);
5006 if (new_inode)
5007 nfs4_inode_return_delegation(new_inode);
5008 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
5009 res->server = NFS_SB(old_dentry->d_sb);
5010 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1, 0);
5011 }
5012
nfs4_proc_rename_rpc_prepare(struct rpc_task * task,struct nfs_renamedata * data)5013 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
5014 {
5015 nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client,
5016 &data->args.seq_args,
5017 &data->res.seq_res,
5018 task);
5019 }
5020
nfs4_proc_rename_done(struct rpc_task * task,struct inode * old_dir,struct inode * new_dir)5021 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
5022 struct inode *new_dir)
5023 {
5024 struct nfs_renamedata *data = task->tk_calldata;
5025 struct nfs_renameres *res = &data->res;
5026
5027 if (!nfs4_sequence_done(task, &res->seq_res))
5028 return 0;
5029 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
5030 return 0;
5031
5032 if (task->tk_status == 0) {
5033 nfs_d_prune_case_insensitive_aliases(d_inode(data->old_dentry));
5034 if (new_dir != old_dir) {
5035 /* Note: If we moved a directory, nlink will change */
5036 nfs4_update_changeattr(old_dir, &res->old_cinfo,
5037 res->old_fattr->time_start,
5038 NFS_INO_INVALID_NLINK |
5039 NFS_INO_INVALID_DATA);
5040 nfs4_update_changeattr(new_dir, &res->new_cinfo,
5041 res->new_fattr->time_start,
5042 NFS_INO_INVALID_NLINK |
5043 NFS_INO_INVALID_DATA);
5044 } else
5045 nfs4_update_changeattr(old_dir, &res->old_cinfo,
5046 res->old_fattr->time_start,
5047 NFS_INO_INVALID_DATA);
5048 }
5049 return 1;
5050 }
5051
_nfs4_proc_link(struct inode * inode,struct inode * dir,const struct qstr * name)5052 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
5053 {
5054 struct nfs_server *server = NFS_SERVER(inode);
5055 __u32 bitmask[NFS4_BITMASK_SZ];
5056 struct nfs4_link_arg arg = {
5057 .fh = NFS_FH(inode),
5058 .dir_fh = NFS_FH(dir),
5059 .name = name,
5060 .bitmask = bitmask,
5061 };
5062 struct nfs4_link_res res = {
5063 .server = server,
5064 };
5065 struct rpc_message msg = {
5066 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
5067 .rpc_argp = &arg,
5068 .rpc_resp = &res,
5069 };
5070 int status = -ENOMEM;
5071
5072 res.fattr = nfs_alloc_fattr_with_label(server);
5073 if (res.fattr == NULL)
5074 goto out;
5075
5076 nfs4_inode_make_writeable(inode);
5077 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, res.fattr->label),
5078 inode,
5079 NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME);
5080 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5081 if (!status) {
5082 nfs4_update_changeattr(dir, &res.cinfo, res.fattr->time_start,
5083 NFS_INO_INVALID_DATA);
5084 nfs4_inc_nlink(inode);
5085 status = nfs_post_op_update_inode(inode, res.fattr);
5086 if (!status)
5087 nfs_setsecurity(inode, res.fattr);
5088 }
5089
5090 out:
5091 nfs_free_fattr(res.fattr);
5092 return status;
5093 }
5094
nfs4_proc_link(struct inode * inode,struct inode * dir,const struct qstr * name)5095 static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
5096 {
5097 struct nfs4_exception exception = {
5098 .interruptible = true,
5099 };
5100 int err;
5101 do {
5102 err = nfs4_handle_exception(NFS_SERVER(inode),
5103 _nfs4_proc_link(inode, dir, name),
5104 &exception);
5105 } while (exception.retry);
5106 return err;
5107 }
5108
5109 struct nfs4_createdata {
5110 struct rpc_message msg;
5111 struct nfs4_create_arg arg;
5112 struct nfs4_create_res res;
5113 struct nfs_fh fh;
5114 struct nfs_fattr fattr;
5115 };
5116
nfs4_alloc_createdata(struct inode * dir,const struct qstr * name,struct iattr * sattr,u32 ftype)5117 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
5118 const struct qstr *name, struct iattr *sattr, u32 ftype)
5119 {
5120 struct nfs4_createdata *data;
5121
5122 data = kzalloc(sizeof(*data), GFP_KERNEL);
5123 if (data != NULL) {
5124 struct nfs_server *server = NFS_SERVER(dir);
5125
5126 data->fattr.label = nfs4_label_alloc(server, GFP_KERNEL);
5127 if (IS_ERR(data->fattr.label))
5128 goto out_free;
5129
5130 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
5131 data->msg.rpc_argp = &data->arg;
5132 data->msg.rpc_resp = &data->res;
5133 data->arg.dir_fh = NFS_FH(dir);
5134 data->arg.server = server;
5135 data->arg.name = name;
5136 data->arg.attrs = sattr;
5137 data->arg.ftype = ftype;
5138 data->arg.bitmask = nfs4_bitmask(server, data->fattr.label);
5139 data->arg.umask = current_umask();
5140 data->res.server = server;
5141 data->res.fh = &data->fh;
5142 data->res.fattr = &data->fattr;
5143 nfs_fattr_init(data->res.fattr);
5144 }
5145 return data;
5146 out_free:
5147 kfree(data);
5148 return NULL;
5149 }
5150
nfs4_do_create(struct inode * dir,struct dentry * dentry,struct nfs4_createdata * data)5151 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
5152 {
5153 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
5154 &data->arg.seq_args, &data->res.seq_res, 1);
5155 if (status == 0) {
5156 spin_lock(&dir->i_lock);
5157 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo,
5158 data->res.fattr->time_start,
5159 NFS_INO_INVALID_DATA);
5160 spin_unlock(&dir->i_lock);
5161 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
5162 }
5163 return status;
5164 }
5165
nfs4_do_mkdir(struct inode * dir,struct dentry * dentry,struct nfs4_createdata * data,int * statusp)5166 static struct dentry *nfs4_do_mkdir(struct inode *dir, struct dentry *dentry,
5167 struct nfs4_createdata *data, int *statusp)
5168 {
5169 struct dentry *ret;
5170
5171 *statusp = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
5172 &data->arg.seq_args, &data->res.seq_res, 1);
5173
5174 if (*statusp)
5175 return NULL;
5176
5177 spin_lock(&dir->i_lock);
5178 /* Creating a directory bumps nlink in the parent */
5179 nfs4_inc_nlink_locked(dir);
5180 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo,
5181 data->res.fattr->time_start,
5182 NFS_INO_INVALID_DATA);
5183 spin_unlock(&dir->i_lock);
5184 ret = nfs_add_or_obtain(dentry, data->res.fh, data->res.fattr);
5185 if (!IS_ERR(ret))
5186 return ret;
5187 *statusp = PTR_ERR(ret);
5188 return NULL;
5189 }
5190
nfs4_free_createdata(struct nfs4_createdata * data)5191 static void nfs4_free_createdata(struct nfs4_createdata *data)
5192 {
5193 nfs4_label_free(data->fattr.label);
5194 kfree(data);
5195 }
5196
_nfs4_proc_symlink(struct inode * dir,struct dentry * dentry,struct folio * folio,unsigned int len,struct iattr * sattr,struct nfs4_label * label)5197 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
5198 struct folio *folio, unsigned int len, struct iattr *sattr,
5199 struct nfs4_label *label)
5200 {
5201 struct page *page = &folio->page;
5202 struct nfs4_createdata *data;
5203 int status = -ENAMETOOLONG;
5204
5205 if (len > NFS4_MAXPATHLEN)
5206 goto out;
5207
5208 status = -ENOMEM;
5209 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
5210 if (data == NULL)
5211 goto out;
5212
5213 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
5214 data->arg.u.symlink.pages = &page;
5215 data->arg.u.symlink.len = len;
5216 data->arg.label = label;
5217
5218 status = nfs4_do_create(dir, dentry, data);
5219
5220 nfs4_free_createdata(data);
5221 out:
5222 return status;
5223 }
5224
nfs4_proc_symlink(struct inode * dir,struct dentry * dentry,struct folio * folio,unsigned int len,struct iattr * sattr)5225 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
5226 struct folio *folio, unsigned int len, struct iattr *sattr)
5227 {
5228 struct nfs4_exception exception = {
5229 .interruptible = true,
5230 };
5231 struct nfs4_label l, *label;
5232 int err;
5233
5234 label = nfs4_label_init_security(dir, dentry, sattr, &l);
5235
5236 do {
5237 err = _nfs4_proc_symlink(dir, dentry, folio, len, sattr, label);
5238 trace_nfs4_symlink(dir, &dentry->d_name, err);
5239 err = nfs4_handle_exception(NFS_SERVER(dir), err,
5240 &exception);
5241 } while (exception.retry);
5242
5243 nfs4_label_release_security(label);
5244 return err;
5245 }
5246
_nfs4_proc_mkdir(struct inode * dir,struct dentry * dentry,struct iattr * sattr,struct nfs4_label * label,int * statusp)5247 static struct dentry *_nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
5248 struct iattr *sattr,
5249 struct nfs4_label *label, int *statusp)
5250 {
5251 struct nfs4_createdata *data;
5252 struct dentry *ret = NULL;
5253
5254 *statusp = -ENOMEM;
5255 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
5256 if (data == NULL)
5257 goto out;
5258
5259 data->arg.label = label;
5260 ret = nfs4_do_mkdir(dir, dentry, data, statusp);
5261
5262 nfs4_free_createdata(data);
5263 out:
5264 return ret;
5265 }
5266
nfs4_proc_mkdir(struct inode * dir,struct dentry * dentry,struct iattr * sattr)5267 static struct dentry *nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
5268 struct iattr *sattr)
5269 {
5270 struct nfs_server *server = NFS_SERVER(dir);
5271 struct nfs4_exception exception = {
5272 .interruptible = true,
5273 };
5274 struct nfs4_label l, *label;
5275 struct dentry *alias;
5276 int err;
5277
5278 label = nfs4_label_init_security(dir, dentry, sattr, &l);
5279
5280 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
5281 sattr->ia_mode &= ~current_umask();
5282 do {
5283 alias = _nfs4_proc_mkdir(dir, dentry, sattr, label, &err);
5284 trace_nfs4_mkdir(dir, &dentry->d_name, err);
5285 if (err)
5286 alias = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
5287 err,
5288 &exception));
5289 } while (exception.retry);
5290 nfs4_label_release_security(label);
5291
5292 return alias;
5293 }
5294
_nfs4_proc_readdir(struct nfs_readdir_arg * nr_arg,struct nfs_readdir_res * nr_res)5295 static int _nfs4_proc_readdir(struct nfs_readdir_arg *nr_arg,
5296 struct nfs_readdir_res *nr_res)
5297 {
5298 struct inode *dir = d_inode(nr_arg->dentry);
5299 struct nfs_server *server = NFS_SERVER(dir);
5300 struct nfs4_readdir_arg args = {
5301 .fh = NFS_FH(dir),
5302 .pages = nr_arg->pages,
5303 .pgbase = 0,
5304 .count = nr_arg->page_len,
5305 .plus = nr_arg->plus,
5306 };
5307 struct nfs4_readdir_res res;
5308 struct rpc_message msg = {
5309 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
5310 .rpc_argp = &args,
5311 .rpc_resp = &res,
5312 .rpc_cred = nr_arg->cred,
5313 };
5314 int status;
5315
5316 dprintk("%s: dentry = %pd2, cookie = %llu\n", __func__,
5317 nr_arg->dentry, (unsigned long long)nr_arg->cookie);
5318 if (!(server->caps & NFS_CAP_SECURITY_LABEL))
5319 args.bitmask = server->attr_bitmask_nl;
5320 else
5321 args.bitmask = server->attr_bitmask;
5322
5323 nfs4_setup_readdir(nr_arg->cookie, nr_arg->verf, nr_arg->dentry, &args);
5324 res.pgbase = args.pgbase;
5325 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
5326 &res.seq_res, 0);
5327 if (status >= 0) {
5328 memcpy(nr_res->verf, res.verifier.data, NFS4_VERIFIER_SIZE);
5329 status += args.pgbase;
5330 }
5331
5332 nfs_invalidate_atime(dir);
5333
5334 dprintk("%s: returns %d\n", __func__, status);
5335 return status;
5336 }
5337
nfs4_proc_readdir(struct nfs_readdir_arg * arg,struct nfs_readdir_res * res)5338 static int nfs4_proc_readdir(struct nfs_readdir_arg *arg,
5339 struct nfs_readdir_res *res)
5340 {
5341 struct nfs4_exception exception = {
5342 .interruptible = true,
5343 };
5344 int err;
5345 do {
5346 err = _nfs4_proc_readdir(arg, res);
5347 trace_nfs4_readdir(d_inode(arg->dentry), err);
5348 err = nfs4_handle_exception(NFS_SERVER(d_inode(arg->dentry)),
5349 err, &exception);
5350 } while (exception.retry);
5351 return err;
5352 }
5353
_nfs4_proc_mknod(struct inode * dir,struct dentry * dentry,struct iattr * sattr,struct nfs4_label * label,dev_t rdev)5354 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
5355 struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
5356 {
5357 struct nfs4_createdata *data;
5358 int mode = sattr->ia_mode;
5359 int status = -ENOMEM;
5360
5361 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
5362 if (data == NULL)
5363 goto out;
5364
5365 if (S_ISFIFO(mode))
5366 data->arg.ftype = NF4FIFO;
5367 else if (S_ISBLK(mode)) {
5368 data->arg.ftype = NF4BLK;
5369 data->arg.u.device.specdata1 = MAJOR(rdev);
5370 data->arg.u.device.specdata2 = MINOR(rdev);
5371 }
5372 else if (S_ISCHR(mode)) {
5373 data->arg.ftype = NF4CHR;
5374 data->arg.u.device.specdata1 = MAJOR(rdev);
5375 data->arg.u.device.specdata2 = MINOR(rdev);
5376 } else if (!S_ISSOCK(mode)) {
5377 status = -EINVAL;
5378 goto out_free;
5379 }
5380
5381 data->arg.label = label;
5382 status = nfs4_do_create(dir, dentry, data);
5383 out_free:
5384 nfs4_free_createdata(data);
5385 out:
5386 return status;
5387 }
5388
nfs4_proc_mknod(struct inode * dir,struct dentry * dentry,struct iattr * sattr,dev_t rdev)5389 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
5390 struct iattr *sattr, dev_t rdev)
5391 {
5392 struct nfs_server *server = NFS_SERVER(dir);
5393 struct nfs4_exception exception = {
5394 .interruptible = true,
5395 };
5396 struct nfs4_label l, *label;
5397 int err;
5398
5399 label = nfs4_label_init_security(dir, dentry, sattr, &l);
5400
5401 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
5402 sattr->ia_mode &= ~current_umask();
5403 do {
5404 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
5405 trace_nfs4_mknod(dir, &dentry->d_name, err);
5406 err = nfs4_handle_exception(NFS_SERVER(dir), err,
5407 &exception);
5408 } while (exception.retry);
5409
5410 nfs4_label_release_security(label);
5411
5412 return err;
5413 }
5414
_nfs4_proc_statfs(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsstat * fsstat)5415 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
5416 struct nfs_fsstat *fsstat)
5417 {
5418 struct nfs4_statfs_arg args = {
5419 .fh = fhandle,
5420 .bitmask = server->attr_bitmask,
5421 };
5422 struct nfs4_statfs_res res = {
5423 .fsstat = fsstat,
5424 };
5425 struct rpc_message msg = {
5426 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
5427 .rpc_argp = &args,
5428 .rpc_resp = &res,
5429 };
5430
5431 nfs_fattr_init(fsstat->fattr);
5432 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5433 }
5434
nfs4_proc_statfs(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsstat * fsstat)5435 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
5436 {
5437 struct nfs4_exception exception = {
5438 .interruptible = true,
5439 };
5440 int err;
5441 do {
5442 err = nfs4_handle_exception(server,
5443 _nfs4_proc_statfs(server, fhandle, fsstat),
5444 &exception);
5445 } while (exception.retry);
5446 return err;
5447 }
5448
_nfs4_do_fsinfo(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * fsinfo)5449 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
5450 struct nfs_fsinfo *fsinfo)
5451 {
5452 struct nfs4_fsinfo_arg args = {
5453 .fh = fhandle,
5454 .bitmask = server->attr_bitmask,
5455 };
5456 struct nfs4_fsinfo_res res = {
5457 .fsinfo = fsinfo,
5458 };
5459 struct rpc_message msg = {
5460 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
5461 .rpc_argp = &args,
5462 .rpc_resp = &res,
5463 };
5464
5465 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5466 }
5467
nfs4_do_fsinfo(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * fsinfo)5468 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
5469 {
5470 struct nfs4_exception exception = {
5471 .interruptible = true,
5472 };
5473 int err;
5474
5475 do {
5476 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
5477 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
5478 if (err == 0) {
5479 nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ);
5480 break;
5481 }
5482 err = nfs4_handle_exception(server, err, &exception);
5483 } while (exception.retry);
5484 return err;
5485 }
5486
nfs4_proc_fsinfo(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * fsinfo)5487 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
5488 {
5489 int error;
5490
5491 nfs_fattr_init(fsinfo->fattr);
5492 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
5493 if (error == 0) {
5494 /* block layout checks this! */
5495 server->pnfs_blksize = fsinfo->blksize;
5496 set_pnfs_layoutdriver(server, fhandle, fsinfo);
5497 }
5498
5499 return error;
5500 }
5501
_nfs4_proc_pathconf(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_pathconf * pathconf)5502 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
5503 struct nfs_pathconf *pathconf)
5504 {
5505 struct nfs4_pathconf_arg args = {
5506 .fh = fhandle,
5507 .bitmask = server->attr_bitmask,
5508 };
5509 struct nfs4_pathconf_res res = {
5510 .pathconf = pathconf,
5511 };
5512 struct rpc_message msg = {
5513 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
5514 .rpc_argp = &args,
5515 .rpc_resp = &res,
5516 };
5517
5518 /* None of the pathconf attributes are mandatory to implement */
5519 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
5520 memset(pathconf, 0, sizeof(*pathconf));
5521 return 0;
5522 }
5523
5524 nfs_fattr_init(pathconf->fattr);
5525 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5526 }
5527
nfs4_proc_pathconf(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_pathconf * pathconf)5528 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
5529 struct nfs_pathconf *pathconf)
5530 {
5531 struct nfs4_exception exception = {
5532 .interruptible = true,
5533 };
5534 int err;
5535
5536 do {
5537 err = nfs4_handle_exception(server,
5538 _nfs4_proc_pathconf(server, fhandle, pathconf),
5539 &exception);
5540 } while (exception.retry);
5541 return err;
5542 }
5543
nfs4_set_rw_stateid(nfs4_stateid * stateid,const struct nfs_open_context * ctx,const struct nfs_lock_context * l_ctx,fmode_t fmode)5544 int nfs4_set_rw_stateid(nfs4_stateid *stateid,
5545 const struct nfs_open_context *ctx,
5546 const struct nfs_lock_context *l_ctx,
5547 fmode_t fmode)
5548 {
5549 return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL);
5550 }
5551 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
5552
nfs4_stateid_is_current(nfs4_stateid * stateid,const struct nfs_open_context * ctx,const struct nfs_lock_context * l_ctx,fmode_t fmode)5553 static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
5554 const struct nfs_open_context *ctx,
5555 const struct nfs_lock_context *l_ctx,
5556 fmode_t fmode)
5557 {
5558 nfs4_stateid _current_stateid;
5559
5560 /* If the current stateid represents a lost lock, then exit */
5561 if (nfs4_set_rw_stateid(&_current_stateid, ctx, l_ctx, fmode) == -EIO)
5562 return true;
5563 return nfs4_stateid_match(stateid, &_current_stateid);
5564 }
5565
nfs4_error_stateid_expired(int err)5566 static bool nfs4_error_stateid_expired(int err)
5567 {
5568 switch (err) {
5569 case -NFS4ERR_DELEG_REVOKED:
5570 case -NFS4ERR_ADMIN_REVOKED:
5571 case -NFS4ERR_BAD_STATEID:
5572 case -NFS4ERR_STALE_STATEID:
5573 case -NFS4ERR_OLD_STATEID:
5574 case -NFS4ERR_OPENMODE:
5575 case -NFS4ERR_EXPIRED:
5576 return true;
5577 }
5578 return false;
5579 }
5580
nfs4_read_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)5581 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
5582 {
5583 struct nfs_server *server = NFS_SERVER(hdr->inode);
5584
5585 trace_nfs4_read(hdr, task->tk_status);
5586 if (task->tk_status < 0) {
5587 struct nfs4_exception exception = {
5588 .inode = hdr->inode,
5589 .state = hdr->args.context->state,
5590 .stateid = &hdr->args.stateid,
5591 };
5592 task->tk_status = nfs4_async_handle_exception(task,
5593 server, task->tk_status, &exception);
5594 if (exception.retry) {
5595 rpc_restart_call_prepare(task);
5596 return -EAGAIN;
5597 }
5598 }
5599
5600 if (task->tk_status > 0)
5601 renew_lease(server, hdr->timestamp);
5602 return 0;
5603 }
5604
nfs4_read_stateid_changed(struct rpc_task * task,struct nfs_pgio_args * args)5605 static bool nfs4_read_stateid_changed(struct rpc_task *task,
5606 struct nfs_pgio_args *args)
5607 {
5608
5609 if (!nfs4_error_stateid_expired(task->tk_status) ||
5610 nfs4_stateid_is_current(&args->stateid,
5611 args->context,
5612 args->lock_context,
5613 FMODE_READ))
5614 return false;
5615 rpc_restart_call_prepare(task);
5616 return true;
5617 }
5618
nfs4_read_plus_not_supported(struct rpc_task * task,struct nfs_pgio_header * hdr)5619 static bool nfs4_read_plus_not_supported(struct rpc_task *task,
5620 struct nfs_pgio_header *hdr)
5621 {
5622 struct nfs_server *server = NFS_SERVER(hdr->inode);
5623 struct rpc_message *msg = &task->tk_msg;
5624
5625 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] &&
5626 task->tk_status == -ENOTSUPP) {
5627 server->caps &= ~NFS_CAP_READ_PLUS;
5628 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
5629 rpc_restart_call_prepare(task);
5630 return true;
5631 }
5632 return false;
5633 }
5634
nfs4_read_done(struct rpc_task * task,struct nfs_pgio_header * hdr)5635 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
5636 {
5637 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
5638 return -EAGAIN;
5639 if (nfs4_read_stateid_changed(task, &hdr->args))
5640 return -EAGAIN;
5641 if (nfs4_read_plus_not_supported(task, hdr))
5642 return -EAGAIN;
5643 if (task->tk_status > 0)
5644 nfs_invalidate_atime(hdr->inode);
5645 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
5646 nfs4_read_done_cb(task, hdr);
5647 }
5648
5649 #if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS
nfs42_read_plus_support(struct nfs_pgio_header * hdr,struct rpc_message * msg)5650 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr,
5651 struct rpc_message *msg)
5652 {
5653 /* Note: We don't use READ_PLUS with pNFS yet */
5654 if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp) {
5655 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS];
5656 return nfs_read_alloc_scratch(hdr, READ_PLUS_SCRATCH_SIZE);
5657 }
5658 return false;
5659 }
5660 #else
nfs42_read_plus_support(struct nfs_pgio_header * hdr,struct rpc_message * msg)5661 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr,
5662 struct rpc_message *msg)
5663 {
5664 return false;
5665 }
5666 #endif /* CONFIG_NFS_V4_2 */
5667
nfs4_proc_read_setup(struct nfs_pgio_header * hdr,struct rpc_message * msg)5668 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
5669 struct rpc_message *msg)
5670 {
5671 hdr->timestamp = jiffies;
5672 if (!hdr->pgio_done_cb)
5673 hdr->pgio_done_cb = nfs4_read_done_cb;
5674 if (!nfs42_read_plus_support(hdr, msg))
5675 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
5676 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
5677 }
5678
nfs4_proc_pgio_rpc_prepare(struct rpc_task * task,struct nfs_pgio_header * hdr)5679 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
5680 struct nfs_pgio_header *hdr)
5681 {
5682 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client,
5683 &hdr->args.seq_args,
5684 &hdr->res.seq_res,
5685 task))
5686 return 0;
5687 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
5688 hdr->args.lock_context,
5689 hdr->rw_mode) == -EIO)
5690 return -EIO;
5691 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
5692 return -EIO;
5693 return 0;
5694 }
5695
nfs4_write_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)5696 static int nfs4_write_done_cb(struct rpc_task *task,
5697 struct nfs_pgio_header *hdr)
5698 {
5699 struct inode *inode = hdr->inode;
5700
5701 trace_nfs4_write(hdr, task->tk_status);
5702 if (task->tk_status < 0) {
5703 struct nfs4_exception exception = {
5704 .inode = hdr->inode,
5705 .state = hdr->args.context->state,
5706 .stateid = &hdr->args.stateid,
5707 };
5708 task->tk_status = nfs4_async_handle_exception(task,
5709 NFS_SERVER(inode), task->tk_status,
5710 &exception);
5711 if (exception.retry) {
5712 rpc_restart_call_prepare(task);
5713 return -EAGAIN;
5714 }
5715 }
5716 if (task->tk_status >= 0) {
5717 renew_lease(NFS_SERVER(inode), hdr->timestamp);
5718 nfs_writeback_update_inode(hdr);
5719 }
5720 return 0;
5721 }
5722
nfs4_write_stateid_changed(struct rpc_task * task,struct nfs_pgio_args * args)5723 static bool nfs4_write_stateid_changed(struct rpc_task *task,
5724 struct nfs_pgio_args *args)
5725 {
5726
5727 if (!nfs4_error_stateid_expired(task->tk_status) ||
5728 nfs4_stateid_is_current(&args->stateid,
5729 args->context,
5730 args->lock_context,
5731 FMODE_WRITE))
5732 return false;
5733 rpc_restart_call_prepare(task);
5734 return true;
5735 }
5736
nfs4_write_done(struct rpc_task * task,struct nfs_pgio_header * hdr)5737 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
5738 {
5739 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
5740 return -EAGAIN;
5741 if (nfs4_write_stateid_changed(task, &hdr->args))
5742 return -EAGAIN;
5743 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
5744 nfs4_write_done_cb(task, hdr);
5745 }
5746
5747 static
nfs4_write_need_cache_consistency_data(struct nfs_pgio_header * hdr)5748 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
5749 {
5750 /* Don't request attributes for pNFS or O_DIRECT writes */
5751 if (hdr->ds_clp != NULL || hdr->dreq != NULL)
5752 return false;
5753 /* Otherwise, request attributes if and only if we don't hold
5754 * a delegation
5755 */
5756 return nfs4_have_delegation(hdr->inode, FMODE_READ, 0) == 0;
5757 }
5758
nfs4_bitmask_set(__u32 bitmask[],const __u32 src[],struct inode * inode,unsigned long cache_validity)5759 void nfs4_bitmask_set(__u32 bitmask[], const __u32 src[],
5760 struct inode *inode, unsigned long cache_validity)
5761 {
5762 struct nfs_server *server = NFS_SERVER(inode);
5763 unsigned int i;
5764
5765 memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ);
5766 cache_validity |= READ_ONCE(NFS_I(inode)->cache_validity);
5767
5768 if (cache_validity & NFS_INO_INVALID_CHANGE)
5769 bitmask[0] |= FATTR4_WORD0_CHANGE;
5770 if (cache_validity & NFS_INO_INVALID_ATIME)
5771 bitmask[1] |= FATTR4_WORD1_TIME_ACCESS;
5772 if (cache_validity & NFS_INO_INVALID_MODE)
5773 bitmask[1] |= FATTR4_WORD1_MODE;
5774 if (cache_validity & NFS_INO_INVALID_OTHER)
5775 bitmask[1] |= FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP;
5776 if (cache_validity & NFS_INO_INVALID_NLINK)
5777 bitmask[1] |= FATTR4_WORD1_NUMLINKS;
5778 if (cache_validity & NFS_INO_INVALID_CTIME)
5779 bitmask[1] |= FATTR4_WORD1_TIME_METADATA;
5780 if (cache_validity & NFS_INO_INVALID_MTIME)
5781 bitmask[1] |= FATTR4_WORD1_TIME_MODIFY;
5782 if (cache_validity & NFS_INO_INVALID_BLOCKS)
5783 bitmask[1] |= FATTR4_WORD1_SPACE_USED;
5784
5785 if (cache_validity & NFS_INO_INVALID_SIZE)
5786 bitmask[0] |= FATTR4_WORD0_SIZE;
5787
5788 for (i = 0; i < NFS4_BITMASK_SZ; i++)
5789 bitmask[i] &= server->attr_bitmask[i];
5790 }
5791
nfs4_proc_write_setup(struct nfs_pgio_header * hdr,struct rpc_message * msg,struct rpc_clnt ** clnt)5792 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
5793 struct rpc_message *msg,
5794 struct rpc_clnt **clnt)
5795 {
5796 struct nfs_server *server = NFS_SERVER(hdr->inode);
5797
5798 if (!nfs4_write_need_cache_consistency_data(hdr)) {
5799 hdr->args.bitmask = NULL;
5800 hdr->res.fattr = NULL;
5801 } else {
5802 nfs4_bitmask_set(hdr->args.bitmask_store,
5803 server->cache_consistency_bitmask,
5804 hdr->inode, NFS_INO_INVALID_BLOCKS);
5805 hdr->args.bitmask = hdr->args.bitmask_store;
5806 }
5807
5808 if (!hdr->pgio_done_cb)
5809 hdr->pgio_done_cb = nfs4_write_done_cb;
5810 hdr->res.server = server;
5811 hdr->timestamp = jiffies;
5812
5813 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
5814 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
5815 nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr);
5816 }
5817
nfs4_proc_commit_rpc_prepare(struct rpc_task * task,struct nfs_commit_data * data)5818 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
5819 {
5820 nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client,
5821 &data->args.seq_args,
5822 &data->res.seq_res,
5823 task);
5824 }
5825
nfs4_commit_done_cb(struct rpc_task * task,struct nfs_commit_data * data)5826 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
5827 {
5828 struct inode *inode = data->inode;
5829
5830 trace_nfs4_commit(data, task->tk_status);
5831 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
5832 NULL, NULL) == -EAGAIN) {
5833 rpc_restart_call_prepare(task);
5834 return -EAGAIN;
5835 }
5836 return 0;
5837 }
5838
nfs4_commit_done(struct rpc_task * task,struct nfs_commit_data * data)5839 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
5840 {
5841 if (!nfs4_sequence_done(task, &data->res.seq_res))
5842 return -EAGAIN;
5843 return data->commit_done_cb(task, data);
5844 }
5845
nfs4_proc_commit_setup(struct nfs_commit_data * data,struct rpc_message * msg,struct rpc_clnt ** clnt)5846 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg,
5847 struct rpc_clnt **clnt)
5848 {
5849 struct nfs_server *server = NFS_SERVER(data->inode);
5850
5851 if (data->commit_done_cb == NULL)
5852 data->commit_done_cb = nfs4_commit_done_cb;
5853 data->res.server = server;
5854 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
5855 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
5856 nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client,
5857 NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
5858 }
5859
_nfs4_proc_commit(struct file * dst,struct nfs_commitargs * args,struct nfs_commitres * res)5860 static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args,
5861 struct nfs_commitres *res)
5862 {
5863 struct inode *dst_inode = file_inode(dst);
5864 struct nfs_server *server = NFS_SERVER(dst_inode);
5865 struct rpc_message msg = {
5866 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT],
5867 .rpc_argp = args,
5868 .rpc_resp = res,
5869 };
5870
5871 args->fh = NFS_FH(dst_inode);
5872 return nfs4_call_sync(server->client, server, &msg,
5873 &args->seq_args, &res->seq_res, 1);
5874 }
5875
nfs4_proc_commit(struct file * dst,__u64 offset,__u32 count,struct nfs_commitres * res)5876 int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res)
5877 {
5878 struct nfs_commitargs args = {
5879 .offset = offset,
5880 .count = count,
5881 };
5882 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
5883 struct nfs4_exception exception = { };
5884 int status;
5885
5886 do {
5887 status = _nfs4_proc_commit(dst, &args, res);
5888 status = nfs4_handle_exception(dst_server, status, &exception);
5889 } while (exception.retry);
5890
5891 return status;
5892 }
5893
5894 struct nfs4_renewdata {
5895 struct nfs_client *client;
5896 unsigned long timestamp;
5897 };
5898
5899 /*
5900 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
5901 * standalone procedure for queueing an asynchronous RENEW.
5902 */
nfs4_renew_release(void * calldata)5903 static void nfs4_renew_release(void *calldata)
5904 {
5905 struct nfs4_renewdata *data = calldata;
5906 struct nfs_client *clp = data->client;
5907
5908 if (refcount_read(&clp->cl_count) > 1)
5909 nfs4_schedule_state_renewal(clp);
5910 nfs_put_client(clp);
5911 kfree(data);
5912 }
5913
nfs4_renew_done(struct rpc_task * task,void * calldata)5914 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
5915 {
5916 struct nfs4_renewdata *data = calldata;
5917 struct nfs_client *clp = data->client;
5918 unsigned long timestamp = data->timestamp;
5919
5920 trace_nfs4_renew_async(clp, task->tk_status);
5921 switch (task->tk_status) {
5922 case 0:
5923 break;
5924 case -NFS4ERR_LEASE_MOVED:
5925 nfs4_schedule_lease_moved_recovery(clp);
5926 break;
5927 default:
5928 /* Unless we're shutting down, schedule state recovery! */
5929 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
5930 return;
5931 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
5932 nfs4_schedule_lease_recovery(clp);
5933 return;
5934 }
5935 nfs4_schedule_path_down_recovery(clp);
5936 }
5937 do_renew_lease(clp, timestamp);
5938 }
5939
5940 static const struct rpc_call_ops nfs4_renew_ops = {
5941 .rpc_call_done = nfs4_renew_done,
5942 .rpc_release = nfs4_renew_release,
5943 };
5944
nfs4_proc_async_renew(struct nfs_client * clp,const struct cred * cred,unsigned renew_flags)5945 static int nfs4_proc_async_renew(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
5946 {
5947 struct rpc_message msg = {
5948 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
5949 .rpc_argp = clp,
5950 .rpc_cred = cred,
5951 };
5952 struct nfs4_renewdata *data;
5953
5954 if (renew_flags == 0)
5955 return 0;
5956 if (!refcount_inc_not_zero(&clp->cl_count))
5957 return -EIO;
5958 data = kmalloc(sizeof(*data), GFP_NOFS);
5959 if (data == NULL) {
5960 nfs_put_client(clp);
5961 return -ENOMEM;
5962 }
5963 data->client = clp;
5964 data->timestamp = jiffies;
5965 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
5966 &nfs4_renew_ops, data);
5967 }
5968
nfs4_proc_renew(struct nfs_client * clp,const struct cred * cred)5969 static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred)
5970 {
5971 struct rpc_message msg = {
5972 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
5973 .rpc_argp = clp,
5974 .rpc_cred = cred,
5975 };
5976 unsigned long now = jiffies;
5977 int status;
5978
5979 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5980 if (status < 0)
5981 return status;
5982 do_renew_lease(clp, now);
5983 return 0;
5984 }
5985
nfs4_server_supports_acls(const struct nfs_server * server,enum nfs4_acl_type type)5986 static bool nfs4_server_supports_acls(const struct nfs_server *server,
5987 enum nfs4_acl_type type)
5988 {
5989 switch (type) {
5990 default:
5991 return server->attr_bitmask[0] & FATTR4_WORD0_ACL;
5992 case NFS4ACL_DACL:
5993 return server->attr_bitmask[1] & FATTR4_WORD1_DACL;
5994 case NFS4ACL_SACL:
5995 return server->attr_bitmask[1] & FATTR4_WORD1_SACL;
5996 }
5997 }
5998
5999 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
6000 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
6001 * the stack.
6002 */
6003 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
6004
nfs4_buf_to_pages_noslab(const void * buf,size_t buflen,struct page ** pages)6005 int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen,
6006 struct page **pages)
6007 {
6008 struct page *newpage, **spages;
6009 int rc = 0;
6010 size_t len;
6011 spages = pages;
6012
6013 do {
6014 len = min_t(size_t, PAGE_SIZE, buflen);
6015 newpage = alloc_page(GFP_KERNEL);
6016
6017 if (newpage == NULL)
6018 goto unwind;
6019 memcpy(page_address(newpage), buf, len);
6020 buf += len;
6021 buflen -= len;
6022 *pages++ = newpage;
6023 rc++;
6024 } while (buflen != 0);
6025
6026 return rc;
6027
6028 unwind:
6029 for(; rc > 0; rc--)
6030 __free_page(spages[rc-1]);
6031 return -ENOMEM;
6032 }
6033
6034 struct nfs4_cached_acl {
6035 enum nfs4_acl_type type;
6036 int cached;
6037 size_t len;
6038 char data[];
6039 };
6040
nfs4_set_cached_acl(struct inode * inode,struct nfs4_cached_acl * acl)6041 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
6042 {
6043 struct nfs_inode *nfsi = NFS_I(inode);
6044
6045 spin_lock(&inode->i_lock);
6046 kfree(nfsi->nfs4_acl);
6047 nfsi->nfs4_acl = acl;
6048 spin_unlock(&inode->i_lock);
6049 }
6050
nfs4_zap_acl_attr(struct inode * inode)6051 static void nfs4_zap_acl_attr(struct inode *inode)
6052 {
6053 nfs4_set_cached_acl(inode, NULL);
6054 }
6055
nfs4_read_cached_acl(struct inode * inode,char * buf,size_t buflen,enum nfs4_acl_type type)6056 static ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf,
6057 size_t buflen, enum nfs4_acl_type type)
6058 {
6059 struct nfs_inode *nfsi = NFS_I(inode);
6060 struct nfs4_cached_acl *acl;
6061 int ret = -ENOENT;
6062
6063 spin_lock(&inode->i_lock);
6064 acl = nfsi->nfs4_acl;
6065 if (acl == NULL)
6066 goto out;
6067 if (acl->type != type)
6068 goto out;
6069 if (buf == NULL) /* user is just asking for length */
6070 goto out_len;
6071 if (acl->cached == 0)
6072 goto out;
6073 ret = -ERANGE; /* see getxattr(2) man page */
6074 if (acl->len > buflen)
6075 goto out;
6076 memcpy(buf, acl->data, acl->len);
6077 out_len:
6078 ret = acl->len;
6079 out:
6080 spin_unlock(&inode->i_lock);
6081 return ret;
6082 }
6083
nfs4_write_cached_acl(struct inode * inode,struct page ** pages,size_t pgbase,size_t acl_len,enum nfs4_acl_type type)6084 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages,
6085 size_t pgbase, size_t acl_len,
6086 enum nfs4_acl_type type)
6087 {
6088 struct nfs4_cached_acl *acl;
6089 size_t buflen = sizeof(*acl) + acl_len;
6090
6091 if (buflen <= PAGE_SIZE) {
6092 acl = kmalloc(buflen, GFP_KERNEL);
6093 if (acl == NULL)
6094 goto out;
6095 acl->cached = 1;
6096 _copy_from_pages(acl->data, pages, pgbase, acl_len);
6097 } else {
6098 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
6099 if (acl == NULL)
6100 goto out;
6101 acl->cached = 0;
6102 }
6103 acl->type = type;
6104 acl->len = acl_len;
6105 out:
6106 nfs4_set_cached_acl(inode, acl);
6107 }
6108
6109 /*
6110 * The getxattr API returns the required buffer length when called with a
6111 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
6112 * the required buf. On a NULL buf, we send a page of data to the server
6113 * guessing that the ACL request can be serviced by a page. If so, we cache
6114 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
6115 * the cache. If not so, we throw away the page, and cache the required
6116 * length. The next getxattr call will then produce another round trip to
6117 * the server, this time with the input buf of the required size.
6118 */
__nfs4_get_acl_uncached(struct inode * inode,void * buf,size_t buflen,enum nfs4_acl_type type)6119 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf,
6120 size_t buflen, enum nfs4_acl_type type)
6121 {
6122 struct page **pages;
6123 struct nfs_getaclargs args = {
6124 .fh = NFS_FH(inode),
6125 .acl_type = type,
6126 .acl_len = buflen,
6127 };
6128 struct nfs_getaclres res = {
6129 .acl_type = type,
6130 .acl_len = buflen,
6131 };
6132 struct rpc_message msg = {
6133 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
6134 .rpc_argp = &args,
6135 .rpc_resp = &res,
6136 };
6137 unsigned int npages;
6138 int ret = -ENOMEM, i;
6139 struct nfs_server *server = NFS_SERVER(inode);
6140
6141 if (buflen == 0)
6142 buflen = server->rsize;
6143
6144 npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
6145 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
6146 if (!pages)
6147 return -ENOMEM;
6148
6149 args.acl_pages = pages;
6150
6151 for (i = 0; i < npages; i++) {
6152 pages[i] = alloc_page(GFP_KERNEL);
6153 if (!pages[i])
6154 goto out_free;
6155 }
6156
6157 /* for decoding across pages */
6158 res.acl_scratch = alloc_page(GFP_KERNEL);
6159 if (!res.acl_scratch)
6160 goto out_free;
6161
6162 args.acl_len = npages * PAGE_SIZE;
6163
6164 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
6165 __func__, buf, buflen, npages, args.acl_len);
6166 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
6167 &msg, &args.seq_args, &res.seq_res, 0);
6168 if (ret)
6169 goto out_free;
6170
6171 /* Handle the case where the passed-in buffer is too short */
6172 if (res.acl_flags & NFS4_ACL_TRUNC) {
6173 /* Did the user only issue a request for the acl length? */
6174 if (buf == NULL)
6175 goto out_ok;
6176 ret = -ERANGE;
6177 goto out_free;
6178 }
6179 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len,
6180 type);
6181 if (buf) {
6182 if (res.acl_len > buflen) {
6183 ret = -ERANGE;
6184 goto out_free;
6185 }
6186 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
6187 }
6188 out_ok:
6189 ret = res.acl_len;
6190 out_free:
6191 while (--i >= 0)
6192 __free_page(pages[i]);
6193 if (res.acl_scratch)
6194 __free_page(res.acl_scratch);
6195 kfree(pages);
6196 return ret;
6197 }
6198
nfs4_get_acl_uncached(struct inode * inode,void * buf,size_t buflen,enum nfs4_acl_type type)6199 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf,
6200 size_t buflen, enum nfs4_acl_type type)
6201 {
6202 struct nfs4_exception exception = {
6203 .interruptible = true,
6204 };
6205 ssize_t ret;
6206 do {
6207 ret = __nfs4_get_acl_uncached(inode, buf, buflen, type);
6208 trace_nfs4_get_acl(inode, ret);
6209 if (ret >= 0)
6210 break;
6211 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
6212 } while (exception.retry);
6213 return ret;
6214 }
6215
nfs4_proc_get_acl(struct inode * inode,void * buf,size_t buflen,enum nfs4_acl_type type)6216 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen,
6217 enum nfs4_acl_type type)
6218 {
6219 struct nfs_server *server = NFS_SERVER(inode);
6220 int ret;
6221
6222 if (unlikely(NFS_FH(inode)->size == 0))
6223 return -ENODATA;
6224 if (!nfs4_server_supports_acls(server, type))
6225 return -EOPNOTSUPP;
6226 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
6227 if (ret < 0)
6228 return ret;
6229 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
6230 nfs_zap_acl_cache(inode);
6231 ret = nfs4_read_cached_acl(inode, buf, buflen, type);
6232 if (ret != -ENOENT)
6233 /* -ENOENT is returned if there is no ACL or if there is an ACL
6234 * but no cached acl data, just the acl length */
6235 return ret;
6236 return nfs4_get_acl_uncached(inode, buf, buflen, type);
6237 }
6238
__nfs4_proc_set_acl(struct inode * inode,const void * buf,size_t buflen,enum nfs4_acl_type type)6239 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf,
6240 size_t buflen, enum nfs4_acl_type type)
6241 {
6242 struct nfs_server *server = NFS_SERVER(inode);
6243 struct page *pages[NFS4ACL_MAXPAGES];
6244 struct nfs_setaclargs arg = {
6245 .fh = NFS_FH(inode),
6246 .acl_type = type,
6247 .acl_len = buflen,
6248 .acl_pages = pages,
6249 };
6250 struct nfs_setaclres res;
6251 struct rpc_message msg = {
6252 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
6253 .rpc_argp = &arg,
6254 .rpc_resp = &res,
6255 };
6256 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
6257 int ret, i;
6258
6259 /* You can't remove system.nfs4_acl: */
6260 if (buflen == 0)
6261 return -EINVAL;
6262 if (!nfs4_server_supports_acls(server, type))
6263 return -EOPNOTSUPP;
6264 if (npages > ARRAY_SIZE(pages))
6265 return -ERANGE;
6266 i = nfs4_buf_to_pages_noslab(buf, buflen, arg.acl_pages);
6267 if (i < 0)
6268 return i;
6269 nfs4_inode_make_writeable(inode);
6270 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
6271
6272 /*
6273 * Free each page after tx, so the only ref left is
6274 * held by the network stack
6275 */
6276 for (; i > 0; i--)
6277 put_page(pages[i-1]);
6278
6279 /*
6280 * Acl update can result in inode attribute update.
6281 * so mark the attribute cache invalid.
6282 */
6283 spin_lock(&inode->i_lock);
6284 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
6285 NFS_INO_INVALID_CTIME |
6286 NFS_INO_REVAL_FORCED);
6287 spin_unlock(&inode->i_lock);
6288 nfs_access_zap_cache(inode);
6289 nfs_zap_acl_cache(inode);
6290 return ret;
6291 }
6292
nfs4_proc_set_acl(struct inode * inode,const void * buf,size_t buflen,enum nfs4_acl_type type)6293 static int nfs4_proc_set_acl(struct inode *inode, const void *buf,
6294 size_t buflen, enum nfs4_acl_type type)
6295 {
6296 struct nfs4_exception exception = { };
6297 int err;
6298
6299 if (unlikely(NFS_FH(inode)->size == 0))
6300 return -ENODATA;
6301 do {
6302 err = __nfs4_proc_set_acl(inode, buf, buflen, type);
6303 trace_nfs4_set_acl(inode, err);
6304 if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) {
6305 /*
6306 * no need to retry since the kernel
6307 * isn't involved in encoding the ACEs.
6308 */
6309 err = -EINVAL;
6310 break;
6311 }
6312 err = nfs4_handle_exception(NFS_SERVER(inode), err,
6313 &exception);
6314 } while (exception.retry);
6315 return err;
6316 }
6317
6318 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
_nfs4_get_security_label(struct inode * inode,void * buf,size_t buflen)6319 static int _nfs4_get_security_label(struct inode *inode, void *buf,
6320 size_t buflen)
6321 {
6322 struct nfs_server *server = NFS_SERVER(inode);
6323 struct nfs4_label label = {0, 0, 0, buflen, buf};
6324
6325 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
6326 struct nfs_fattr fattr = {
6327 .label = &label,
6328 };
6329 struct nfs4_getattr_arg arg = {
6330 .fh = NFS_FH(inode),
6331 .bitmask = bitmask,
6332 };
6333 struct nfs4_getattr_res res = {
6334 .fattr = &fattr,
6335 .server = server,
6336 };
6337 struct rpc_message msg = {
6338 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
6339 .rpc_argp = &arg,
6340 .rpc_resp = &res,
6341 };
6342 int ret;
6343
6344 nfs_fattr_init(&fattr);
6345
6346 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
6347 if (ret)
6348 return ret;
6349 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
6350 return -ENOENT;
6351 return label.len;
6352 }
6353
nfs4_get_security_label(struct inode * inode,void * buf,size_t buflen)6354 static int nfs4_get_security_label(struct inode *inode, void *buf,
6355 size_t buflen)
6356 {
6357 struct nfs4_exception exception = {
6358 .interruptible = true,
6359 };
6360 int err;
6361
6362 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
6363 return -EOPNOTSUPP;
6364
6365 do {
6366 err = _nfs4_get_security_label(inode, buf, buflen);
6367 trace_nfs4_get_security_label(inode, err);
6368 err = nfs4_handle_exception(NFS_SERVER(inode), err,
6369 &exception);
6370 } while (exception.retry);
6371 return err;
6372 }
6373
_nfs4_do_set_security_label(struct inode * inode,struct nfs4_label * ilabel,struct nfs_fattr * fattr)6374 static int _nfs4_do_set_security_label(struct inode *inode,
6375 struct nfs4_label *ilabel,
6376 struct nfs_fattr *fattr)
6377 {
6378
6379 struct iattr sattr = {0};
6380 struct nfs_server *server = NFS_SERVER(inode);
6381 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
6382 struct nfs_setattrargs arg = {
6383 .fh = NFS_FH(inode),
6384 .iap = &sattr,
6385 .server = server,
6386 .bitmask = bitmask,
6387 .label = ilabel,
6388 };
6389 struct nfs_setattrres res = {
6390 .fattr = fattr,
6391 .server = server,
6392 };
6393 struct rpc_message msg = {
6394 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
6395 .rpc_argp = &arg,
6396 .rpc_resp = &res,
6397 };
6398 int status;
6399
6400 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
6401
6402 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
6403 if (status)
6404 dprintk("%s failed: %d\n", __func__, status);
6405
6406 return status;
6407 }
6408
nfs4_do_set_security_label(struct inode * inode,struct nfs4_label * ilabel,struct nfs_fattr * fattr)6409 static int nfs4_do_set_security_label(struct inode *inode,
6410 struct nfs4_label *ilabel,
6411 struct nfs_fattr *fattr)
6412 {
6413 struct nfs4_exception exception = { };
6414 int err;
6415
6416 do {
6417 err = _nfs4_do_set_security_label(inode, ilabel, fattr);
6418 trace_nfs4_set_security_label(inode, err);
6419 err = nfs4_handle_exception(NFS_SERVER(inode), err,
6420 &exception);
6421 } while (exception.retry);
6422 return err;
6423 }
6424
6425 static int
nfs4_set_security_label(struct inode * inode,const void * buf,size_t buflen)6426 nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
6427 {
6428 struct nfs4_label ilabel = {0, 0, 0, buflen, (char *)buf };
6429 struct nfs_fattr *fattr;
6430 int status;
6431
6432 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
6433 return -EOPNOTSUPP;
6434
6435 fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode));
6436 if (fattr == NULL)
6437 return -ENOMEM;
6438
6439 status = nfs4_do_set_security_label(inode, &ilabel, fattr);
6440 if (status == 0)
6441 nfs_setsecurity(inode, fattr);
6442
6443 nfs_free_fattr(fattr);
6444 return status;
6445 }
6446 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */
6447
6448
nfs4_init_boot_verifier(const struct nfs_client * clp,nfs4_verifier * bootverf)6449 static void nfs4_init_boot_verifier(const struct nfs_client *clp,
6450 nfs4_verifier *bootverf)
6451 {
6452 __be32 verf[2];
6453
6454 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
6455 /* An impossible timestamp guarantees this value
6456 * will never match a generated boot time. */
6457 verf[0] = cpu_to_be32(U32_MAX);
6458 verf[1] = cpu_to_be32(U32_MAX);
6459 } else {
6460 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
6461 u64 ns = ktime_to_ns(nn->boot_time);
6462
6463 verf[0] = cpu_to_be32(ns >> 32);
6464 verf[1] = cpu_to_be32(ns);
6465 }
6466 memcpy(bootverf->data, verf, sizeof(bootverf->data));
6467 }
6468
6469 static size_t
nfs4_get_uniquifier(struct nfs_client * clp,char * buf,size_t buflen)6470 nfs4_get_uniquifier(struct nfs_client *clp, char *buf, size_t buflen)
6471 {
6472 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
6473 struct nfs_netns_client *nn_clp = nn->nfs_client;
6474 const char *id;
6475
6476 buf[0] = '\0';
6477
6478 if (nn_clp) {
6479 rcu_read_lock();
6480 id = rcu_dereference(nn_clp->identifier);
6481 if (id)
6482 strscpy(buf, id, buflen);
6483 rcu_read_unlock();
6484 }
6485
6486 if (nfs4_client_id_uniquifier[0] != '\0' && buf[0] == '\0')
6487 strscpy(buf, nfs4_client_id_uniquifier, buflen);
6488
6489 return strlen(buf);
6490 }
6491
6492 static int
nfs4_init_nonuniform_client_string(struct nfs_client * clp)6493 nfs4_init_nonuniform_client_string(struct nfs_client *clp)
6494 {
6495 char buf[NFS4_CLIENT_ID_UNIQ_LEN];
6496 size_t buflen;
6497 size_t len;
6498 char *str;
6499
6500 if (clp->cl_owner_id != NULL)
6501 return 0;
6502
6503 rcu_read_lock();
6504 len = 14 +
6505 strlen(clp->cl_rpcclient->cl_nodename) +
6506 1 +
6507 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
6508 1;
6509 rcu_read_unlock();
6510
6511 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf));
6512 if (buflen)
6513 len += buflen + 1;
6514
6515 if (len > NFS4_OPAQUE_LIMIT + 1)
6516 return -EINVAL;
6517
6518 /*
6519 * Since this string is allocated at mount time, and held until the
6520 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
6521 * about a memory-reclaim deadlock.
6522 */
6523 str = kmalloc(len, GFP_KERNEL);
6524 if (!str)
6525 return -ENOMEM;
6526
6527 rcu_read_lock();
6528 if (buflen)
6529 scnprintf(str, len, "Linux NFSv4.0 %s/%s/%s",
6530 clp->cl_rpcclient->cl_nodename, buf,
6531 rpc_peeraddr2str(clp->cl_rpcclient,
6532 RPC_DISPLAY_ADDR));
6533 else
6534 scnprintf(str, len, "Linux NFSv4.0 %s/%s",
6535 clp->cl_rpcclient->cl_nodename,
6536 rpc_peeraddr2str(clp->cl_rpcclient,
6537 RPC_DISPLAY_ADDR));
6538 rcu_read_unlock();
6539
6540 clp->cl_owner_id = str;
6541 return 0;
6542 }
6543
6544 static int
nfs4_init_uniform_client_string(struct nfs_client * clp)6545 nfs4_init_uniform_client_string(struct nfs_client *clp)
6546 {
6547 char buf[NFS4_CLIENT_ID_UNIQ_LEN];
6548 size_t buflen;
6549 size_t len;
6550 char *str;
6551
6552 if (clp->cl_owner_id != NULL)
6553 return 0;
6554
6555 len = 10 + 10 + 1 + 10 + 1 +
6556 strlen(clp->cl_rpcclient->cl_nodename) + 1;
6557
6558 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf));
6559 if (buflen)
6560 len += buflen + 1;
6561
6562 if (len > NFS4_OPAQUE_LIMIT + 1)
6563 return -EINVAL;
6564
6565 /*
6566 * Since this string is allocated at mount time, and held until the
6567 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
6568 * about a memory-reclaim deadlock.
6569 */
6570 str = kmalloc(len, GFP_KERNEL);
6571 if (!str)
6572 return -ENOMEM;
6573
6574 if (buflen)
6575 scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
6576 clp->rpc_ops->version, clp->cl_minorversion,
6577 buf, clp->cl_rpcclient->cl_nodename);
6578 else
6579 scnprintf(str, len, "Linux NFSv%u.%u %s",
6580 clp->rpc_ops->version, clp->cl_minorversion,
6581 clp->cl_rpcclient->cl_nodename);
6582 clp->cl_owner_id = str;
6583 return 0;
6584 }
6585
6586 /*
6587 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
6588 * services. Advertise one based on the address family of the
6589 * clientaddr.
6590 */
6591 static unsigned int
nfs4_init_callback_netid(const struct nfs_client * clp,char * buf,size_t len)6592 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
6593 {
6594 if (strchr(clp->cl_ipaddr, ':') != NULL)
6595 return scnprintf(buf, len, "tcp6");
6596 else
6597 return scnprintf(buf, len, "tcp");
6598 }
6599
nfs4_setclientid_done(struct rpc_task * task,void * calldata)6600 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
6601 {
6602 struct nfs4_setclientid *sc = calldata;
6603
6604 if (task->tk_status == 0)
6605 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
6606 }
6607
6608 static const struct rpc_call_ops nfs4_setclientid_ops = {
6609 .rpc_call_done = nfs4_setclientid_done,
6610 };
6611
6612 /**
6613 * nfs4_proc_setclientid - Negotiate client ID
6614 * @clp: state data structure
6615 * @program: RPC program for NFSv4 callback service
6616 * @port: IP port number for NFS4 callback service
6617 * @cred: credential to use for this call
6618 * @res: where to place the result
6619 *
6620 * Returns zero, a negative errno, or a negative NFS4ERR status code.
6621 */
nfs4_proc_setclientid(struct nfs_client * clp,u32 program,unsigned short port,const struct cred * cred,struct nfs4_setclientid_res * res)6622 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
6623 unsigned short port, const struct cred *cred,
6624 struct nfs4_setclientid_res *res)
6625 {
6626 nfs4_verifier sc_verifier;
6627 struct nfs4_setclientid setclientid = {
6628 .sc_verifier = &sc_verifier,
6629 .sc_prog = program,
6630 .sc_clnt = clp,
6631 };
6632 struct rpc_message msg = {
6633 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
6634 .rpc_argp = &setclientid,
6635 .rpc_resp = res,
6636 .rpc_cred = cred,
6637 };
6638 struct rpc_task_setup task_setup_data = {
6639 .rpc_client = clp->cl_rpcclient,
6640 .rpc_message = &msg,
6641 .callback_ops = &nfs4_setclientid_ops,
6642 .callback_data = &setclientid,
6643 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
6644 };
6645 unsigned long now = jiffies;
6646 int status;
6647
6648 /* nfs_client_id4 */
6649 nfs4_init_boot_verifier(clp, &sc_verifier);
6650
6651 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
6652 status = nfs4_init_uniform_client_string(clp);
6653 else
6654 status = nfs4_init_nonuniform_client_string(clp);
6655
6656 if (status)
6657 goto out;
6658
6659 /* cb_client4 */
6660 setclientid.sc_netid_len =
6661 nfs4_init_callback_netid(clp,
6662 setclientid.sc_netid,
6663 sizeof(setclientid.sc_netid));
6664 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
6665 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
6666 clp->cl_ipaddr, port >> 8, port & 255);
6667
6668 dprintk("NFS call setclientid auth=%s, '%s'\n",
6669 clp->cl_rpcclient->cl_auth->au_ops->au_name,
6670 clp->cl_owner_id);
6671
6672 status = nfs4_call_sync_custom(&task_setup_data);
6673 if (setclientid.sc_cred) {
6674 kfree(clp->cl_acceptor);
6675 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
6676 put_rpccred(setclientid.sc_cred);
6677 }
6678
6679 if (status == 0)
6680 do_renew_lease(clp, now);
6681 out:
6682 trace_nfs4_setclientid(clp, status);
6683 dprintk("NFS reply setclientid: %d\n", status);
6684 return status;
6685 }
6686
6687 /**
6688 * nfs4_proc_setclientid_confirm - Confirm client ID
6689 * @clp: state data structure
6690 * @arg: result of a previous SETCLIENTID
6691 * @cred: credential to use for this call
6692 *
6693 * Returns zero, a negative errno, or a negative NFS4ERR status code.
6694 */
nfs4_proc_setclientid_confirm(struct nfs_client * clp,struct nfs4_setclientid_res * arg,const struct cred * cred)6695 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
6696 struct nfs4_setclientid_res *arg,
6697 const struct cred *cred)
6698 {
6699 struct rpc_message msg = {
6700 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
6701 .rpc_argp = arg,
6702 .rpc_cred = cred,
6703 };
6704 int status;
6705
6706 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
6707 clp->cl_rpcclient->cl_auth->au_ops->au_name,
6708 clp->cl_clientid);
6709 status = rpc_call_sync(clp->cl_rpcclient, &msg,
6710 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
6711 trace_nfs4_setclientid_confirm(clp, status);
6712 dprintk("NFS reply setclientid_confirm: %d\n", status);
6713 return status;
6714 }
6715
6716 struct nfs4_delegreturndata {
6717 struct nfs4_delegreturnargs args;
6718 struct nfs4_delegreturnres res;
6719 struct nfs_fh fh;
6720 nfs4_stateid stateid;
6721 unsigned long timestamp;
6722 struct {
6723 struct nfs4_layoutreturn_args arg;
6724 struct nfs4_layoutreturn_res res;
6725 struct nfs4_xdr_opaque_data ld_private;
6726 u32 roc_barrier;
6727 bool roc;
6728 } lr;
6729 struct nfs4_delegattr sattr;
6730 struct nfs_fattr fattr;
6731 int rpc_status;
6732 struct inode *inode;
6733 };
6734
nfs4_delegreturn_done(struct rpc_task * task,void * calldata)6735 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
6736 {
6737 struct nfs4_delegreturndata *data = calldata;
6738 struct nfs4_exception exception = {
6739 .inode = data->inode,
6740 .stateid = &data->stateid,
6741 .task_is_privileged = data->args.seq_args.sa_privileged,
6742 };
6743
6744 if (!nfs4_sequence_done(task, &data->res.seq_res))
6745 return;
6746
6747 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
6748
6749 /* Handle Layoutreturn errors */
6750 if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res,
6751 &data->res.lr_ret) == -EAGAIN)
6752 goto out_restart;
6753
6754 if (data->args.sattr_args && task->tk_status != 0) {
6755 switch(data->res.sattr_ret) {
6756 case 0:
6757 data->args.sattr_args = NULL;
6758 data->res.sattr_res = false;
6759 break;
6760 case -NFS4ERR_ADMIN_REVOKED:
6761 case -NFS4ERR_DELEG_REVOKED:
6762 case -NFS4ERR_EXPIRED:
6763 case -NFS4ERR_BAD_STATEID:
6764 /* Let the main handler below do stateid recovery */
6765 break;
6766 case -NFS4ERR_OLD_STATEID:
6767 if (nfs4_refresh_delegation_stateid(&data->stateid,
6768 data->inode))
6769 goto out_restart;
6770 fallthrough;
6771 default:
6772 data->args.sattr_args = NULL;
6773 data->res.sattr_res = false;
6774 goto out_restart;
6775 }
6776 }
6777
6778 switch (task->tk_status) {
6779 case 0:
6780 renew_lease(data->res.server, data->timestamp);
6781 break;
6782 case -NFS4ERR_ADMIN_REVOKED:
6783 case -NFS4ERR_DELEG_REVOKED:
6784 case -NFS4ERR_EXPIRED:
6785 nfs4_free_revoked_stateid(data->res.server,
6786 data->args.stateid,
6787 task->tk_msg.rpc_cred);
6788 fallthrough;
6789 case -NFS4ERR_BAD_STATEID:
6790 case -NFS4ERR_STALE_STATEID:
6791 case -ETIMEDOUT:
6792 task->tk_status = 0;
6793 break;
6794 case -NFS4ERR_OLD_STATEID:
6795 if (!nfs4_refresh_delegation_stateid(&data->stateid, data->inode))
6796 nfs4_stateid_seqid_inc(&data->stateid);
6797 if (data->args.bitmask) {
6798 data->args.bitmask = NULL;
6799 data->res.fattr = NULL;
6800 }
6801 goto out_restart;
6802 case -NFS4ERR_ACCESS:
6803 if (data->args.bitmask) {
6804 data->args.bitmask = NULL;
6805 data->res.fattr = NULL;
6806 goto out_restart;
6807 }
6808 fallthrough;
6809 default:
6810 task->tk_status = nfs4_async_handle_exception(task,
6811 data->res.server, task->tk_status,
6812 &exception);
6813 if (exception.retry)
6814 goto out_restart;
6815 }
6816 nfs_delegation_mark_returned(data->inode, data->args.stateid);
6817 data->rpc_status = task->tk_status;
6818 return;
6819 out_restart:
6820 task->tk_status = 0;
6821 rpc_restart_call_prepare(task);
6822 }
6823
nfs4_delegreturn_release(void * calldata)6824 static void nfs4_delegreturn_release(void *calldata)
6825 {
6826 struct nfs4_delegreturndata *data = calldata;
6827 struct inode *inode = data->inode;
6828
6829 if (data->lr.roc)
6830 pnfs_roc_release(&data->lr.arg, &data->lr.res,
6831 data->res.lr_ret);
6832 if (inode) {
6833 nfs4_fattr_set_prechange(&data->fattr,
6834 inode_peek_iversion_raw(inode));
6835 nfs_refresh_inode(inode, &data->fattr);
6836 nfs_iput_and_deactive(inode);
6837 }
6838 kfree(calldata);
6839 }
6840
nfs4_delegreturn_prepare(struct rpc_task * task,void * data)6841 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
6842 {
6843 struct nfs4_delegreturndata *d_data;
6844 struct pnfs_layout_hdr *lo;
6845
6846 d_data = data;
6847
6848 if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) {
6849 nfs4_sequence_done(task, &d_data->res.seq_res);
6850 return;
6851 }
6852
6853 lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL;
6854 if (lo && !pnfs_layout_is_valid(lo)) {
6855 d_data->args.lr_args = NULL;
6856 d_data->res.lr_res = NULL;
6857 }
6858
6859 nfs4_setup_sequence(d_data->res.server->nfs_client,
6860 &d_data->args.seq_args,
6861 &d_data->res.seq_res,
6862 task);
6863 }
6864
6865 static const struct rpc_call_ops nfs4_delegreturn_ops = {
6866 .rpc_call_prepare = nfs4_delegreturn_prepare,
6867 .rpc_call_done = nfs4_delegreturn_done,
6868 .rpc_release = nfs4_delegreturn_release,
6869 };
6870
_nfs4_proc_delegreturn(struct inode * inode,const struct cred * cred,const nfs4_stateid * stateid,struct nfs_delegation * delegation,int issync)6871 static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
6872 const nfs4_stateid *stateid,
6873 struct nfs_delegation *delegation,
6874 int issync)
6875 {
6876 struct nfs4_delegreturndata *data;
6877 struct nfs_server *server = NFS_SERVER(inode);
6878 struct rpc_task *task;
6879 struct rpc_message msg = {
6880 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
6881 .rpc_cred = cred,
6882 };
6883 struct rpc_task_setup task_setup_data = {
6884 .rpc_client = server->client,
6885 .rpc_message = &msg,
6886 .callback_ops = &nfs4_delegreturn_ops,
6887 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
6888 };
6889 int status = 0;
6890
6891 if (nfs_server_capable(inode, NFS_CAP_MOVEABLE))
6892 task_setup_data.flags |= RPC_TASK_MOVEABLE;
6893
6894 data = kzalloc(sizeof(*data), GFP_KERNEL);
6895 if (data == NULL)
6896 return -ENOMEM;
6897
6898 nfs4_state_protect(server->nfs_client,
6899 NFS_SP4_MACH_CRED_CLEANUP,
6900 &task_setup_data.rpc_client, &msg);
6901
6902 data->args.fhandle = &data->fh;
6903 data->args.stateid = &data->stateid;
6904 nfs4_bitmask_set(data->args.bitmask_store,
6905 server->cache_consistency_bitmask, inode, 0);
6906 data->args.bitmask = data->args.bitmask_store;
6907 nfs_copy_fh(&data->fh, NFS_FH(inode));
6908 nfs4_stateid_copy(&data->stateid, stateid);
6909 data->res.fattr = &data->fattr;
6910 data->res.server = server;
6911 data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
6912 data->lr.arg.ld_private = &data->lr.ld_private;
6913 nfs_fattr_init(data->res.fattr);
6914 data->timestamp = jiffies;
6915 data->rpc_status = 0;
6916 data->inode = nfs_igrab_and_active(inode);
6917 if (data->inode || issync) {
6918 data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res,
6919 cred);
6920 if (data->lr.roc) {
6921 data->args.lr_args = &data->lr.arg;
6922 data->res.lr_res = &data->lr.res;
6923 }
6924 }
6925
6926 if (delegation &&
6927 test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) {
6928 if (delegation->type & FMODE_READ) {
6929 data->sattr.atime = inode_get_atime(inode);
6930 data->sattr.atime_set = true;
6931 }
6932 if (delegation->type & FMODE_WRITE) {
6933 data->sattr.mtime = inode_get_mtime(inode);
6934 data->sattr.mtime_set = true;
6935 }
6936 data->args.sattr_args = &data->sattr;
6937 data->res.sattr_res = true;
6938 }
6939
6940 if (!data->inode)
6941 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
6942 1);
6943 else
6944 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
6945 0);
6946
6947 task_setup_data.callback_data = data;
6948 msg.rpc_argp = &data->args;
6949 msg.rpc_resp = &data->res;
6950 task = rpc_run_task(&task_setup_data);
6951 if (IS_ERR(task))
6952 return PTR_ERR(task);
6953 if (!issync)
6954 goto out;
6955 status = rpc_wait_for_completion_task(task);
6956 if (status != 0)
6957 goto out;
6958 status = data->rpc_status;
6959 out:
6960 rpc_put_task(task);
6961 return status;
6962 }
6963
nfs4_proc_delegreturn(struct inode * inode,const struct cred * cred,const nfs4_stateid * stateid,struct nfs_delegation * delegation,int issync)6964 int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
6965 const nfs4_stateid *stateid,
6966 struct nfs_delegation *delegation, int issync)
6967 {
6968 struct nfs_server *server = NFS_SERVER(inode);
6969 struct nfs4_exception exception = { };
6970 int err;
6971 do {
6972 err = _nfs4_proc_delegreturn(inode, cred, stateid,
6973 delegation, issync);
6974 trace_nfs4_delegreturn(inode, stateid, err);
6975 switch (err) {
6976 case -NFS4ERR_STALE_STATEID:
6977 case -NFS4ERR_EXPIRED:
6978 case 0:
6979 return 0;
6980 }
6981 err = nfs4_handle_exception(server, err, &exception);
6982 } while (exception.retry);
6983 return err;
6984 }
6985
_nfs4_proc_getlk(struct nfs4_state * state,int cmd,struct file_lock * request)6986 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6987 {
6988 struct inode *inode = state->inode;
6989 struct nfs_server *server = NFS_SERVER(inode);
6990 struct nfs_client *clp = server->nfs_client;
6991 struct nfs_lockt_args arg = {
6992 .fh = NFS_FH(inode),
6993 .fl = request,
6994 };
6995 struct nfs_lockt_res res = {
6996 .denied = request,
6997 };
6998 struct rpc_message msg = {
6999 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
7000 .rpc_argp = &arg,
7001 .rpc_resp = &res,
7002 .rpc_cred = state->owner->so_cred,
7003 };
7004 struct nfs4_lock_state *lsp;
7005 int status;
7006
7007 arg.lock_owner.clientid = clp->cl_clientid;
7008 status = nfs4_set_lock_state(state, request);
7009 if (status != 0)
7010 goto out;
7011 lsp = request->fl_u.nfs4_fl.owner;
7012 arg.lock_owner.id = lsp->ls_seqid.owner_id;
7013 arg.lock_owner.s_dev = server->s_dev;
7014 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
7015 switch (status) {
7016 case 0:
7017 request->c.flc_type = F_UNLCK;
7018 break;
7019 case -NFS4ERR_DENIED:
7020 status = 0;
7021 }
7022 request->fl_ops->fl_release_private(request);
7023 request->fl_ops = NULL;
7024 out:
7025 return status;
7026 }
7027
nfs4_proc_getlk(struct nfs4_state * state,int cmd,struct file_lock * request)7028 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7029 {
7030 struct nfs4_exception exception = {
7031 .interruptible = true,
7032 };
7033 int err;
7034
7035 do {
7036 err = _nfs4_proc_getlk(state, cmd, request);
7037 trace_nfs4_get_lock(request, state, cmd, err);
7038 err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
7039 &exception);
7040 } while (exception.retry);
7041 return err;
7042 }
7043
7044 /*
7045 * Update the seqid of a lock stateid after receiving
7046 * NFS4ERR_OLD_STATEID
7047 */
nfs4_refresh_lock_old_stateid(nfs4_stateid * dst,struct nfs4_lock_state * lsp)7048 static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst,
7049 struct nfs4_lock_state *lsp)
7050 {
7051 struct nfs4_state *state = lsp->ls_state;
7052 bool ret = false;
7053
7054 spin_lock(&state->state_lock);
7055 if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid))
7056 goto out;
7057 if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst))
7058 nfs4_stateid_seqid_inc(dst);
7059 else
7060 dst->seqid = lsp->ls_stateid.seqid;
7061 ret = true;
7062 out:
7063 spin_unlock(&state->state_lock);
7064 return ret;
7065 }
7066
nfs4_sync_lock_stateid(nfs4_stateid * dst,struct nfs4_lock_state * lsp)7067 static bool nfs4_sync_lock_stateid(nfs4_stateid *dst,
7068 struct nfs4_lock_state *lsp)
7069 {
7070 struct nfs4_state *state = lsp->ls_state;
7071 bool ret;
7072
7073 spin_lock(&state->state_lock);
7074 ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid);
7075 nfs4_stateid_copy(dst, &lsp->ls_stateid);
7076 spin_unlock(&state->state_lock);
7077 return ret;
7078 }
7079
7080 struct nfs4_unlockdata {
7081 struct nfs_locku_args arg;
7082 struct nfs_locku_res res;
7083 struct nfs4_lock_state *lsp;
7084 struct nfs_open_context *ctx;
7085 struct nfs_lock_context *l_ctx;
7086 struct file_lock fl;
7087 struct nfs_server *server;
7088 unsigned long timestamp;
7089 };
7090
nfs4_alloc_unlockdata(struct file_lock * fl,struct nfs_open_context * ctx,struct nfs4_lock_state * lsp,struct nfs_seqid * seqid)7091 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
7092 struct nfs_open_context *ctx,
7093 struct nfs4_lock_state *lsp,
7094 struct nfs_seqid *seqid)
7095 {
7096 struct nfs4_unlockdata *p;
7097 struct nfs4_state *state = lsp->ls_state;
7098 struct inode *inode = state->inode;
7099 struct nfs_lock_context *l_ctx;
7100
7101 p = kzalloc(sizeof(*p), GFP_KERNEL);
7102 if (p == NULL)
7103 return NULL;
7104 l_ctx = nfs_get_lock_context(ctx);
7105 if (!IS_ERR(l_ctx)) {
7106 p->l_ctx = l_ctx;
7107 } else {
7108 kfree(p);
7109 return NULL;
7110 }
7111 p->arg.fh = NFS_FH(inode);
7112 p->arg.fl = &p->fl;
7113 p->arg.seqid = seqid;
7114 p->res.seqid = seqid;
7115 p->lsp = lsp;
7116 /* Ensure we don't close file until we're done freeing locks! */
7117 p->ctx = get_nfs_open_context(ctx);
7118 locks_init_lock(&p->fl);
7119 locks_copy_lock(&p->fl, fl);
7120 p->server = NFS_SERVER(inode);
7121 spin_lock(&state->state_lock);
7122 nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid);
7123 spin_unlock(&state->state_lock);
7124 return p;
7125 }
7126
nfs4_locku_release_calldata(void * data)7127 static void nfs4_locku_release_calldata(void *data)
7128 {
7129 struct nfs4_unlockdata *calldata = data;
7130 nfs_free_seqid(calldata->arg.seqid);
7131 nfs4_put_lock_state(calldata->lsp);
7132 nfs_put_lock_context(calldata->l_ctx);
7133 put_nfs_open_context(calldata->ctx);
7134 kfree(calldata);
7135 }
7136
nfs4_locku_done(struct rpc_task * task,void * data)7137 static void nfs4_locku_done(struct rpc_task *task, void *data)
7138 {
7139 struct nfs4_unlockdata *calldata = data;
7140 struct nfs4_exception exception = {
7141 .inode = calldata->lsp->ls_state->inode,
7142 .stateid = &calldata->arg.stateid,
7143 };
7144
7145 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
7146 return;
7147 switch (task->tk_status) {
7148 case 0:
7149 renew_lease(calldata->server, calldata->timestamp);
7150 locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl);
7151 if (nfs4_update_lock_stateid(calldata->lsp,
7152 &calldata->res.stateid))
7153 break;
7154 fallthrough;
7155 case -NFS4ERR_ADMIN_REVOKED:
7156 case -NFS4ERR_EXPIRED:
7157 nfs4_free_revoked_stateid(calldata->server,
7158 &calldata->arg.stateid,
7159 task->tk_msg.rpc_cred);
7160 fallthrough;
7161 case -NFS4ERR_BAD_STATEID:
7162 case -NFS4ERR_STALE_STATEID:
7163 if (nfs4_sync_lock_stateid(&calldata->arg.stateid,
7164 calldata->lsp))
7165 rpc_restart_call_prepare(task);
7166 break;
7167 case -NFS4ERR_OLD_STATEID:
7168 if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid,
7169 calldata->lsp))
7170 rpc_restart_call_prepare(task);
7171 break;
7172 default:
7173 task->tk_status = nfs4_async_handle_exception(task,
7174 calldata->server, task->tk_status,
7175 &exception);
7176 if (exception.retry)
7177 rpc_restart_call_prepare(task);
7178 }
7179 nfs_release_seqid(calldata->arg.seqid);
7180 }
7181
nfs4_locku_prepare(struct rpc_task * task,void * data)7182 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
7183 {
7184 struct nfs4_unlockdata *calldata = data;
7185
7186 if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) &&
7187 nfs_async_iocounter_wait(task, calldata->l_ctx))
7188 return;
7189
7190 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
7191 goto out_wait;
7192 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
7193 /* Note: exit _without_ running nfs4_locku_done */
7194 goto out_no_action;
7195 }
7196 calldata->timestamp = jiffies;
7197 if (nfs4_setup_sequence(calldata->server->nfs_client,
7198 &calldata->arg.seq_args,
7199 &calldata->res.seq_res,
7200 task) != 0)
7201 nfs_release_seqid(calldata->arg.seqid);
7202 return;
7203 out_no_action:
7204 task->tk_action = NULL;
7205 out_wait:
7206 nfs4_sequence_done(task, &calldata->res.seq_res);
7207 }
7208
7209 static const struct rpc_call_ops nfs4_locku_ops = {
7210 .rpc_call_prepare = nfs4_locku_prepare,
7211 .rpc_call_done = nfs4_locku_done,
7212 .rpc_release = nfs4_locku_release_calldata,
7213 };
7214
nfs4_do_unlck(struct file_lock * fl,struct nfs_open_context * ctx,struct nfs4_lock_state * lsp,struct nfs_seqid * seqid)7215 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
7216 struct nfs_open_context *ctx,
7217 struct nfs4_lock_state *lsp,
7218 struct nfs_seqid *seqid)
7219 {
7220 struct nfs4_unlockdata *data;
7221 struct rpc_message msg = {
7222 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
7223 .rpc_cred = ctx->cred,
7224 };
7225 struct rpc_task_setup task_setup_data = {
7226 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
7227 .rpc_message = &msg,
7228 .callback_ops = &nfs4_locku_ops,
7229 .workqueue = nfsiod_workqueue,
7230 .flags = RPC_TASK_ASYNC,
7231 };
7232
7233 if (nfs_server_capable(lsp->ls_state->inode, NFS_CAP_MOVEABLE))
7234 task_setup_data.flags |= RPC_TASK_MOVEABLE;
7235
7236 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
7237 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
7238
7239 /* Ensure this is an unlock - when canceling a lock, the
7240 * canceled lock is passed in, and it won't be an unlock.
7241 */
7242 fl->c.flc_type = F_UNLCK;
7243 if (fl->c.flc_flags & FL_CLOSE)
7244 set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags);
7245
7246 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
7247 if (data == NULL) {
7248 nfs_free_seqid(seqid);
7249 return ERR_PTR(-ENOMEM);
7250 }
7251
7252 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 0);
7253 msg.rpc_argp = &data->arg;
7254 msg.rpc_resp = &data->res;
7255 task_setup_data.callback_data = data;
7256 return rpc_run_task(&task_setup_data);
7257 }
7258
nfs4_proc_unlck(struct nfs4_state * state,int cmd,struct file_lock * request)7259 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
7260 {
7261 struct inode *inode = state->inode;
7262 struct nfs4_state_owner *sp = state->owner;
7263 struct nfs_inode *nfsi = NFS_I(inode);
7264 struct nfs_seqid *seqid;
7265 struct nfs4_lock_state *lsp;
7266 struct rpc_task *task;
7267 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
7268 int status = 0;
7269 unsigned char saved_flags = request->c.flc_flags;
7270
7271 status = nfs4_set_lock_state(state, request);
7272 /* Unlock _before_ we do the RPC call */
7273 request->c.flc_flags |= FL_EXISTS;
7274 /* Exclude nfs_delegation_claim_locks() */
7275 mutex_lock(&sp->so_delegreturn_mutex);
7276 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
7277 down_read(&nfsi->rwsem);
7278 if (locks_lock_inode_wait(inode, request) == -ENOENT) {
7279 up_read(&nfsi->rwsem);
7280 mutex_unlock(&sp->so_delegreturn_mutex);
7281 goto out;
7282 }
7283 lsp = request->fl_u.nfs4_fl.owner;
7284 set_bit(NFS_LOCK_UNLOCKING, &lsp->ls_flags);
7285 up_read(&nfsi->rwsem);
7286 mutex_unlock(&sp->so_delegreturn_mutex);
7287 if (status != 0)
7288 goto out;
7289 /* Is this a delegated lock? */
7290 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
7291 goto out;
7292 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
7293 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
7294 status = -ENOMEM;
7295 if (IS_ERR(seqid))
7296 goto out;
7297 task = nfs4_do_unlck(request,
7298 nfs_file_open_context(request->c.flc_file),
7299 lsp, seqid);
7300 status = PTR_ERR(task);
7301 if (IS_ERR(task))
7302 goto out;
7303 status = rpc_wait_for_completion_task(task);
7304 rpc_put_task(task);
7305 out:
7306 request->c.flc_flags = saved_flags;
7307 trace_nfs4_unlock(request, state, F_SETLK, status);
7308 return status;
7309 }
7310
7311 struct nfs4_lockdata {
7312 struct nfs_lock_args arg;
7313 struct nfs_lock_res res;
7314 struct nfs4_lock_state *lsp;
7315 struct nfs_open_context *ctx;
7316 struct file_lock fl;
7317 unsigned long timestamp;
7318 int rpc_status;
7319 int cancelled;
7320 struct nfs_server *server;
7321 };
7322
nfs4_alloc_lockdata(struct file_lock * fl,struct nfs_open_context * ctx,struct nfs4_lock_state * lsp,gfp_t gfp_mask)7323 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
7324 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
7325 gfp_t gfp_mask)
7326 {
7327 struct nfs4_lockdata *p;
7328 struct inode *inode = lsp->ls_state->inode;
7329 struct nfs_server *server = NFS_SERVER(inode);
7330 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
7331
7332 p = kzalloc(sizeof(*p), gfp_mask);
7333 if (p == NULL)
7334 return NULL;
7335
7336 p->arg.fh = NFS_FH(inode);
7337 p->arg.fl = &p->fl;
7338 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
7339 if (IS_ERR(p->arg.open_seqid))
7340 goto out_free;
7341 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
7342 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
7343 if (IS_ERR(p->arg.lock_seqid))
7344 goto out_free_seqid;
7345 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
7346 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
7347 p->arg.lock_owner.s_dev = server->s_dev;
7348 p->res.lock_seqid = p->arg.lock_seqid;
7349 p->lsp = lsp;
7350 p->server = server;
7351 p->ctx = get_nfs_open_context(ctx);
7352 locks_init_lock(&p->fl);
7353 locks_copy_lock(&p->fl, fl);
7354 return p;
7355 out_free_seqid:
7356 nfs_free_seqid(p->arg.open_seqid);
7357 out_free:
7358 kfree(p);
7359 return NULL;
7360 }
7361
nfs4_lock_prepare(struct rpc_task * task,void * calldata)7362 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
7363 {
7364 struct nfs4_lockdata *data = calldata;
7365 struct nfs4_state *state = data->lsp->ls_state;
7366
7367 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
7368 goto out_wait;
7369 /* Do we need to do an open_to_lock_owner? */
7370 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
7371 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
7372 goto out_release_lock_seqid;
7373 }
7374 nfs4_stateid_copy(&data->arg.open_stateid,
7375 &state->open_stateid);
7376 data->arg.new_lock_owner = 1;
7377 data->res.open_seqid = data->arg.open_seqid;
7378 } else {
7379 data->arg.new_lock_owner = 0;
7380 nfs4_stateid_copy(&data->arg.lock_stateid,
7381 &data->lsp->ls_stateid);
7382 }
7383 if (!nfs4_valid_open_stateid(state)) {
7384 data->rpc_status = -EBADF;
7385 task->tk_action = NULL;
7386 goto out_release_open_seqid;
7387 }
7388 data->timestamp = jiffies;
7389 if (nfs4_setup_sequence(data->server->nfs_client,
7390 &data->arg.seq_args,
7391 &data->res.seq_res,
7392 task) == 0)
7393 return;
7394 out_release_open_seqid:
7395 nfs_release_seqid(data->arg.open_seqid);
7396 out_release_lock_seqid:
7397 nfs_release_seqid(data->arg.lock_seqid);
7398 out_wait:
7399 nfs4_sequence_done(task, &data->res.seq_res);
7400 dprintk("%s: ret = %d\n", __func__, data->rpc_status);
7401 }
7402
nfs4_lock_done(struct rpc_task * task,void * calldata)7403 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
7404 {
7405 struct nfs4_lockdata *data = calldata;
7406 struct nfs4_lock_state *lsp = data->lsp;
7407
7408 if (!nfs4_sequence_done(task, &data->res.seq_res))
7409 return;
7410
7411 data->rpc_status = task->tk_status;
7412 switch (task->tk_status) {
7413 case 0:
7414 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
7415 data->timestamp);
7416 if (data->arg.new_lock && !data->cancelled) {
7417 data->fl.c.flc_flags &= ~(FL_SLEEP | FL_ACCESS);
7418 if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
7419 goto out_restart;
7420 }
7421 if (data->arg.new_lock_owner != 0) {
7422 nfs_confirm_seqid(&lsp->ls_seqid, 0);
7423 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
7424 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
7425 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
7426 goto out_restart;
7427 break;
7428 case -NFS4ERR_OLD_STATEID:
7429 if (data->arg.new_lock_owner != 0 &&
7430 nfs4_refresh_open_old_stateid(&data->arg.open_stateid,
7431 lsp->ls_state))
7432 goto out_restart;
7433 if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp))
7434 goto out_restart;
7435 fallthrough;
7436 case -NFS4ERR_BAD_STATEID:
7437 case -NFS4ERR_STALE_STATEID:
7438 case -NFS4ERR_EXPIRED:
7439 if (data->arg.new_lock_owner != 0) {
7440 if (!nfs4_stateid_match(&data->arg.open_stateid,
7441 &lsp->ls_state->open_stateid))
7442 goto out_restart;
7443 } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
7444 &lsp->ls_stateid))
7445 goto out_restart;
7446 }
7447 out_done:
7448 dprintk("%s: ret = %d!\n", __func__, data->rpc_status);
7449 return;
7450 out_restart:
7451 if (!data->cancelled)
7452 rpc_restart_call_prepare(task);
7453 goto out_done;
7454 }
7455
nfs4_lock_release(void * calldata)7456 static void nfs4_lock_release(void *calldata)
7457 {
7458 struct nfs4_lockdata *data = calldata;
7459
7460 nfs_free_seqid(data->arg.open_seqid);
7461 if (data->cancelled && data->rpc_status == 0) {
7462 struct rpc_task *task;
7463 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
7464 data->arg.lock_seqid);
7465 if (!IS_ERR(task))
7466 rpc_put_task_async(task);
7467 dprintk("%s: cancelling lock!\n", __func__);
7468 } else
7469 nfs_free_seqid(data->arg.lock_seqid);
7470 nfs4_put_lock_state(data->lsp);
7471 put_nfs_open_context(data->ctx);
7472 kfree(data);
7473 }
7474
7475 static const struct rpc_call_ops nfs4_lock_ops = {
7476 .rpc_call_prepare = nfs4_lock_prepare,
7477 .rpc_call_done = nfs4_lock_done,
7478 .rpc_release = nfs4_lock_release,
7479 };
7480
nfs4_handle_setlk_error(struct nfs_server * server,struct nfs4_lock_state * lsp,int new_lock_owner,int error)7481 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
7482 {
7483 switch (error) {
7484 case -NFS4ERR_ADMIN_REVOKED:
7485 case -NFS4ERR_EXPIRED:
7486 case -NFS4ERR_BAD_STATEID:
7487 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
7488 if (new_lock_owner != 0 ||
7489 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
7490 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
7491 break;
7492 case -NFS4ERR_STALE_STATEID:
7493 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
7494 nfs4_schedule_lease_recovery(server->nfs_client);
7495 }
7496 }
7497
_nfs4_do_setlk(struct nfs4_state * state,int cmd,struct file_lock * fl,int recovery_type)7498 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
7499 {
7500 struct nfs4_lockdata *data;
7501 struct rpc_task *task;
7502 struct rpc_message msg = {
7503 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
7504 .rpc_cred = state->owner->so_cred,
7505 };
7506 struct rpc_task_setup task_setup_data = {
7507 .rpc_client = NFS_CLIENT(state->inode),
7508 .rpc_message = &msg,
7509 .callback_ops = &nfs4_lock_ops,
7510 .workqueue = nfsiod_workqueue,
7511 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
7512 };
7513 int ret;
7514
7515 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE))
7516 task_setup_data.flags |= RPC_TASK_MOVEABLE;
7517
7518 data = nfs4_alloc_lockdata(fl,
7519 nfs_file_open_context(fl->c.flc_file),
7520 fl->fl_u.nfs4_fl.owner, GFP_KERNEL);
7521 if (data == NULL)
7522 return -ENOMEM;
7523 if (IS_SETLKW(cmd))
7524 data->arg.block = 1;
7525 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1,
7526 recovery_type > NFS_LOCK_NEW);
7527 msg.rpc_argp = &data->arg;
7528 msg.rpc_resp = &data->res;
7529 task_setup_data.callback_data = data;
7530 if (recovery_type > NFS_LOCK_NEW) {
7531 if (recovery_type == NFS_LOCK_RECLAIM)
7532 data->arg.reclaim = NFS_LOCK_RECLAIM;
7533 } else
7534 data->arg.new_lock = 1;
7535 task = rpc_run_task(&task_setup_data);
7536 if (IS_ERR(task))
7537 return PTR_ERR(task);
7538 ret = rpc_wait_for_completion_task(task);
7539 if (ret == 0) {
7540 ret = data->rpc_status;
7541 if (ret)
7542 nfs4_handle_setlk_error(data->server, data->lsp,
7543 data->arg.new_lock_owner, ret);
7544 } else
7545 data->cancelled = true;
7546 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
7547 rpc_put_task(task);
7548 dprintk("%s: ret = %d\n", __func__, ret);
7549 return ret;
7550 }
7551
nfs4_lock_reclaim(struct nfs4_state * state,struct file_lock * request)7552 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
7553 {
7554 struct nfs_server *server = NFS_SERVER(state->inode);
7555 struct nfs4_exception exception = {
7556 .inode = state->inode,
7557 };
7558 int err;
7559
7560 do {
7561 /* Cache the lock if possible... */
7562 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
7563 return 0;
7564 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
7565 if (err != -NFS4ERR_DELAY)
7566 break;
7567 nfs4_handle_exception(server, err, &exception);
7568 } while (exception.retry);
7569 return err;
7570 }
7571
nfs4_lock_expired(struct nfs4_state * state,struct file_lock * request)7572 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
7573 {
7574 struct nfs_server *server = NFS_SERVER(state->inode);
7575 struct nfs4_exception exception = {
7576 .inode = state->inode,
7577 };
7578 int err;
7579
7580 err = nfs4_set_lock_state(state, request);
7581 if (err != 0)
7582 return err;
7583 if (!recover_lost_locks) {
7584 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
7585 return 0;
7586 }
7587 do {
7588 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
7589 return 0;
7590 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
7591 switch (err) {
7592 default:
7593 goto out;
7594 case -NFS4ERR_GRACE:
7595 case -NFS4ERR_DELAY:
7596 nfs4_handle_exception(server, err, &exception);
7597 err = 0;
7598 }
7599 } while (exception.retry);
7600 out:
7601 return err;
7602 }
7603
7604 #if defined(CONFIG_NFS_V4_1)
nfs41_lock_expired(struct nfs4_state * state,struct file_lock * request)7605 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
7606 {
7607 struct nfs4_lock_state *lsp;
7608 int status;
7609
7610 status = nfs4_set_lock_state(state, request);
7611 if (status != 0)
7612 return status;
7613 lsp = request->fl_u.nfs4_fl.owner;
7614 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) ||
7615 test_bit(NFS_LOCK_LOST, &lsp->ls_flags))
7616 return 0;
7617 return nfs4_lock_expired(state, request);
7618 }
7619 #endif
7620
_nfs4_proc_setlk(struct nfs4_state * state,int cmd,struct file_lock * request)7621 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7622 {
7623 struct nfs_inode *nfsi = NFS_I(state->inode);
7624 struct nfs4_state_owner *sp = state->owner;
7625 unsigned char flags = request->c.flc_flags;
7626 int status;
7627
7628 request->c.flc_flags |= FL_ACCESS;
7629 status = locks_lock_inode_wait(state->inode, request);
7630 if (status < 0)
7631 goto out;
7632 mutex_lock(&sp->so_delegreturn_mutex);
7633 down_read(&nfsi->rwsem);
7634 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
7635 /* Yes: cache locks! */
7636 /* ...but avoid races with delegation recall... */
7637 request->c.flc_flags = flags & ~FL_SLEEP;
7638 status = locks_lock_inode_wait(state->inode, request);
7639 up_read(&nfsi->rwsem);
7640 mutex_unlock(&sp->so_delegreturn_mutex);
7641 goto out;
7642 }
7643 up_read(&nfsi->rwsem);
7644 mutex_unlock(&sp->so_delegreturn_mutex);
7645 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
7646 out:
7647 request->c.flc_flags = flags;
7648 return status;
7649 }
7650
nfs4_proc_setlk(struct nfs4_state * state,int cmd,struct file_lock * request)7651 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7652 {
7653 struct nfs4_exception exception = {
7654 .state = state,
7655 .inode = state->inode,
7656 .interruptible = true,
7657 };
7658 int err;
7659
7660 do {
7661 err = _nfs4_proc_setlk(state, cmd, request);
7662 if (err == -NFS4ERR_DENIED)
7663 err = -EAGAIN;
7664 err = nfs4_handle_exception(NFS_SERVER(state->inode),
7665 err, &exception);
7666 } while (exception.retry);
7667 return err;
7668 }
7669
7670 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
7671 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
7672
7673 static int
nfs4_retry_setlk_simple(struct nfs4_state * state,int cmd,struct file_lock * request)7674 nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd,
7675 struct file_lock *request)
7676 {
7677 int status = -ERESTARTSYS;
7678 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
7679
7680 while(!signalled()) {
7681 status = nfs4_proc_setlk(state, cmd, request);
7682 if ((status != -EAGAIN) || IS_SETLK(cmd))
7683 break;
7684 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
7685 schedule_timeout(timeout);
7686 timeout *= 2;
7687 timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout);
7688 status = -ERESTARTSYS;
7689 }
7690 return status;
7691 }
7692
7693 #ifdef CONFIG_NFS_V4_1
7694 struct nfs4_lock_waiter {
7695 struct inode *inode;
7696 struct nfs_lowner owner;
7697 wait_queue_entry_t wait;
7698 };
7699
7700 static int
nfs4_wake_lock_waiter(wait_queue_entry_t * wait,unsigned int mode,int flags,void * key)7701 nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key)
7702 {
7703 struct nfs4_lock_waiter *waiter =
7704 container_of(wait, struct nfs4_lock_waiter, wait);
7705
7706 /* NULL key means to wake up everyone */
7707 if (key) {
7708 struct cb_notify_lock_args *cbnl = key;
7709 struct nfs_lowner *lowner = &cbnl->cbnl_owner,
7710 *wowner = &waiter->owner;
7711
7712 /* Only wake if the callback was for the same owner. */
7713 if (lowner->id != wowner->id || lowner->s_dev != wowner->s_dev)
7714 return 0;
7715
7716 /* Make sure it's for the right inode */
7717 if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh))
7718 return 0;
7719 }
7720
7721 return woken_wake_function(wait, mode, flags, key);
7722 }
7723
7724 static int
nfs4_retry_setlk(struct nfs4_state * state,int cmd,struct file_lock * request)7725 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7726 {
7727 struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
7728 struct nfs_server *server = NFS_SERVER(state->inode);
7729 struct nfs_client *clp = server->nfs_client;
7730 wait_queue_head_t *q = &clp->cl_lock_waitq;
7731 struct nfs4_lock_waiter waiter = {
7732 .inode = state->inode,
7733 .owner = { .clientid = clp->cl_clientid,
7734 .id = lsp->ls_seqid.owner_id,
7735 .s_dev = server->s_dev },
7736 };
7737 int status;
7738
7739 /* Don't bother with waitqueue if we don't expect a callback */
7740 if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags))
7741 return nfs4_retry_setlk_simple(state, cmd, request);
7742
7743 init_wait(&waiter.wait);
7744 waiter.wait.func = nfs4_wake_lock_waiter;
7745 add_wait_queue(q, &waiter.wait);
7746
7747 do {
7748 status = nfs4_proc_setlk(state, cmd, request);
7749 if (status != -EAGAIN || IS_SETLK(cmd))
7750 break;
7751
7752 status = -ERESTARTSYS;
7753 wait_woken(&waiter.wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE,
7754 NFS4_LOCK_MAXTIMEOUT);
7755 } while (!signalled());
7756
7757 remove_wait_queue(q, &waiter.wait);
7758
7759 return status;
7760 }
7761 #else /* !CONFIG_NFS_V4_1 */
7762 static inline int
nfs4_retry_setlk(struct nfs4_state * state,int cmd,struct file_lock * request)7763 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7764 {
7765 return nfs4_retry_setlk_simple(state, cmd, request);
7766 }
7767 #endif
7768
7769 static int
nfs4_proc_lock(struct file * filp,int cmd,struct file_lock * request)7770 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
7771 {
7772 struct nfs_open_context *ctx;
7773 struct nfs4_state *state;
7774 int status;
7775
7776 /* verify open state */
7777 ctx = nfs_file_open_context(filp);
7778 state = ctx->state;
7779
7780 if (IS_GETLK(cmd)) {
7781 if (state != NULL)
7782 return nfs4_proc_getlk(state, F_GETLK, request);
7783 return 0;
7784 }
7785
7786 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
7787 return -EINVAL;
7788
7789 if (lock_is_unlock(request)) {
7790 if (state != NULL)
7791 return nfs4_proc_unlck(state, cmd, request);
7792 return 0;
7793 }
7794
7795 if (state == NULL)
7796 return -ENOLCK;
7797
7798 if ((request->c.flc_flags & FL_POSIX) &&
7799 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
7800 return -ENOLCK;
7801
7802 /*
7803 * Don't rely on the VFS having checked the file open mode,
7804 * since it won't do this for flock() locks.
7805 */
7806 switch (request->c.flc_type) {
7807 case F_RDLCK:
7808 if (!(filp->f_mode & FMODE_READ))
7809 return -EBADF;
7810 break;
7811 case F_WRLCK:
7812 if (!(filp->f_mode & FMODE_WRITE))
7813 return -EBADF;
7814 }
7815
7816 status = nfs4_set_lock_state(state, request);
7817 if (status != 0)
7818 return status;
7819
7820 return nfs4_retry_setlk(state, cmd, request);
7821 }
7822
nfs4_delete_lease(struct file * file,void ** priv)7823 static int nfs4_delete_lease(struct file *file, void **priv)
7824 {
7825 return generic_setlease(file, F_UNLCK, NULL, priv);
7826 }
7827
nfs4_add_lease(struct file * file,int arg,struct file_lease ** lease,void ** priv)7828 static int nfs4_add_lease(struct file *file, int arg, struct file_lease **lease,
7829 void **priv)
7830 {
7831 struct inode *inode = file_inode(file);
7832 fmode_t type = arg == F_RDLCK ? FMODE_READ : FMODE_WRITE;
7833 int ret;
7834
7835 /* No delegation, no lease */
7836 if (!nfs4_have_delegation(inode, type, 0))
7837 return -EAGAIN;
7838 ret = generic_setlease(file, arg, lease, priv);
7839 if (ret || nfs4_have_delegation(inode, type, 0))
7840 return ret;
7841 /* We raced with a delegation return */
7842 nfs4_delete_lease(file, priv);
7843 return -EAGAIN;
7844 }
7845
nfs4_proc_setlease(struct file * file,int arg,struct file_lease ** lease,void ** priv)7846 int nfs4_proc_setlease(struct file *file, int arg, struct file_lease **lease,
7847 void **priv)
7848 {
7849 switch (arg) {
7850 case F_RDLCK:
7851 case F_WRLCK:
7852 return nfs4_add_lease(file, arg, lease, priv);
7853 case F_UNLCK:
7854 return nfs4_delete_lease(file, priv);
7855 default:
7856 return -EINVAL;
7857 }
7858 }
7859
nfs4_lock_delegation_recall(struct file_lock * fl,struct nfs4_state * state,const nfs4_stateid * stateid)7860 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
7861 {
7862 struct nfs_server *server = NFS_SERVER(state->inode);
7863 int err;
7864
7865 err = nfs4_set_lock_state(state, fl);
7866 if (err != 0)
7867 return err;
7868 do {
7869 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
7870 if (err != -NFS4ERR_DELAY)
7871 break;
7872 ssleep(1);
7873 } while (err == -NFS4ERR_DELAY);
7874 return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
7875 }
7876
7877 struct nfs_release_lockowner_data {
7878 struct nfs4_lock_state *lsp;
7879 struct nfs_server *server;
7880 struct nfs_release_lockowner_args args;
7881 struct nfs_release_lockowner_res res;
7882 unsigned long timestamp;
7883 };
7884
nfs4_release_lockowner_prepare(struct rpc_task * task,void * calldata)7885 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
7886 {
7887 struct nfs_release_lockowner_data *data = calldata;
7888 struct nfs_server *server = data->server;
7889 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
7890 &data->res.seq_res, task);
7891 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
7892 data->timestamp = jiffies;
7893 }
7894
nfs4_release_lockowner_done(struct rpc_task * task,void * calldata)7895 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
7896 {
7897 struct nfs_release_lockowner_data *data = calldata;
7898 struct nfs_server *server = data->server;
7899
7900 nfs40_sequence_done(task, &data->res.seq_res);
7901
7902 switch (task->tk_status) {
7903 case 0:
7904 renew_lease(server, data->timestamp);
7905 break;
7906 case -NFS4ERR_STALE_CLIENTID:
7907 case -NFS4ERR_EXPIRED:
7908 nfs4_schedule_lease_recovery(server->nfs_client);
7909 break;
7910 case -NFS4ERR_LEASE_MOVED:
7911 case -NFS4ERR_DELAY:
7912 if (nfs4_async_handle_error(task, server,
7913 NULL, NULL) == -EAGAIN)
7914 rpc_restart_call_prepare(task);
7915 }
7916 }
7917
nfs4_release_lockowner_release(void * calldata)7918 static void nfs4_release_lockowner_release(void *calldata)
7919 {
7920 struct nfs_release_lockowner_data *data = calldata;
7921 nfs4_free_lock_state(data->server, data->lsp);
7922 kfree(calldata);
7923 }
7924
7925 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
7926 .rpc_call_prepare = nfs4_release_lockowner_prepare,
7927 .rpc_call_done = nfs4_release_lockowner_done,
7928 .rpc_release = nfs4_release_lockowner_release,
7929 };
7930
7931 static void
nfs4_release_lockowner(struct nfs_server * server,struct nfs4_lock_state * lsp)7932 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
7933 {
7934 struct nfs_release_lockowner_data *data;
7935 struct rpc_message msg = {
7936 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
7937 };
7938
7939 if (server->nfs_client->cl_mvops->minor_version != 0)
7940 return;
7941
7942 data = kmalloc(sizeof(*data), GFP_KERNEL);
7943 if (!data)
7944 return;
7945 data->lsp = lsp;
7946 data->server = server;
7947 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
7948 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
7949 data->args.lock_owner.s_dev = server->s_dev;
7950
7951 msg.rpc_argp = &data->args;
7952 msg.rpc_resp = &data->res;
7953 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
7954 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
7955 }
7956
7957 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
7958
nfs4_xattr_set_nfs4_acl(const struct xattr_handler * handler,struct mnt_idmap * idmap,struct dentry * unused,struct inode * inode,const char * key,const void * buf,size_t buflen,int flags)7959 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
7960 struct mnt_idmap *idmap,
7961 struct dentry *unused, struct inode *inode,
7962 const char *key, const void *buf,
7963 size_t buflen, int flags)
7964 {
7965 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_ACL);
7966 }
7967
nfs4_xattr_get_nfs4_acl(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * key,void * buf,size_t buflen)7968 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
7969 struct dentry *unused, struct inode *inode,
7970 const char *key, void *buf, size_t buflen)
7971 {
7972 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_ACL);
7973 }
7974
nfs4_xattr_list_nfs4_acl(struct dentry * dentry)7975 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry)
7976 {
7977 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_ACL);
7978 }
7979
7980 #if defined(CONFIG_NFS_V4_1)
7981 #define XATTR_NAME_NFSV4_DACL "system.nfs4_dacl"
7982
nfs4_xattr_set_nfs4_dacl(const struct xattr_handler * handler,struct mnt_idmap * idmap,struct dentry * unused,struct inode * inode,const char * key,const void * buf,size_t buflen,int flags)7983 static int nfs4_xattr_set_nfs4_dacl(const struct xattr_handler *handler,
7984 struct mnt_idmap *idmap,
7985 struct dentry *unused, struct inode *inode,
7986 const char *key, const void *buf,
7987 size_t buflen, int flags)
7988 {
7989 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_DACL);
7990 }
7991
nfs4_xattr_get_nfs4_dacl(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * key,void * buf,size_t buflen)7992 static int nfs4_xattr_get_nfs4_dacl(const struct xattr_handler *handler,
7993 struct dentry *unused, struct inode *inode,
7994 const char *key, void *buf, size_t buflen)
7995 {
7996 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_DACL);
7997 }
7998
nfs4_xattr_list_nfs4_dacl(struct dentry * dentry)7999 static bool nfs4_xattr_list_nfs4_dacl(struct dentry *dentry)
8000 {
8001 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_DACL);
8002 }
8003
8004 #define XATTR_NAME_NFSV4_SACL "system.nfs4_sacl"
8005
nfs4_xattr_set_nfs4_sacl(const struct xattr_handler * handler,struct mnt_idmap * idmap,struct dentry * unused,struct inode * inode,const char * key,const void * buf,size_t buflen,int flags)8006 static int nfs4_xattr_set_nfs4_sacl(const struct xattr_handler *handler,
8007 struct mnt_idmap *idmap,
8008 struct dentry *unused, struct inode *inode,
8009 const char *key, const void *buf,
8010 size_t buflen, int flags)
8011 {
8012 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_SACL);
8013 }
8014
nfs4_xattr_get_nfs4_sacl(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * key,void * buf,size_t buflen)8015 static int nfs4_xattr_get_nfs4_sacl(const struct xattr_handler *handler,
8016 struct dentry *unused, struct inode *inode,
8017 const char *key, void *buf, size_t buflen)
8018 {
8019 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_SACL);
8020 }
8021
nfs4_xattr_list_nfs4_sacl(struct dentry * dentry)8022 static bool nfs4_xattr_list_nfs4_sacl(struct dentry *dentry)
8023 {
8024 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_SACL);
8025 }
8026
8027 #endif
8028
8029 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
8030
nfs4_xattr_set_nfs4_label(const struct xattr_handler * handler,struct mnt_idmap * idmap,struct dentry * unused,struct inode * inode,const char * key,const void * buf,size_t buflen,int flags)8031 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
8032 struct mnt_idmap *idmap,
8033 struct dentry *unused, struct inode *inode,
8034 const char *key, const void *buf,
8035 size_t buflen, int flags)
8036 {
8037 if (security_ismaclabel(key))
8038 return nfs4_set_security_label(inode, buf, buflen);
8039
8040 return -EOPNOTSUPP;
8041 }
8042
nfs4_xattr_get_nfs4_label(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * key,void * buf,size_t buflen)8043 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler,
8044 struct dentry *unused, struct inode *inode,
8045 const char *key, void *buf, size_t buflen)
8046 {
8047 if (security_ismaclabel(key))
8048 return nfs4_get_security_label(inode, buf, buflen);
8049 return -EOPNOTSUPP;
8050 }
8051
8052 static ssize_t
nfs4_listxattr_nfs4_label(struct inode * inode,char * list,size_t list_len)8053 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
8054 {
8055 int len = 0;
8056
8057 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) {
8058 len = security_inode_listsecurity(inode, list, list_len);
8059 if (len >= 0 && list_len && len > list_len)
8060 return -ERANGE;
8061 }
8062 return len;
8063 }
8064
8065 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
8066 .prefix = XATTR_SECURITY_PREFIX,
8067 .get = nfs4_xattr_get_nfs4_label,
8068 .set = nfs4_xattr_set_nfs4_label,
8069 };
8070
8071 #else
8072
8073 static ssize_t
nfs4_listxattr_nfs4_label(struct inode * inode,char * list,size_t list_len)8074 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
8075 {
8076 return 0;
8077 }
8078
8079 #endif
8080
8081 #ifdef CONFIG_NFS_V4_2
nfs4_xattr_set_nfs4_user(const struct xattr_handler * handler,struct mnt_idmap * idmap,struct dentry * unused,struct inode * inode,const char * key,const void * buf,size_t buflen,int flags)8082 static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler,
8083 struct mnt_idmap *idmap,
8084 struct dentry *unused, struct inode *inode,
8085 const char *key, const void *buf,
8086 size_t buflen, int flags)
8087 {
8088 u32 mask;
8089 int ret;
8090
8091 if (!nfs_server_capable(inode, NFS_CAP_XATTR))
8092 return -EOPNOTSUPP;
8093
8094 /*
8095 * There is no mapping from the MAY_* flags to the NFS_ACCESS_XA*
8096 * flags right now. Handling of xattr operations use the normal
8097 * file read/write permissions.
8098 *
8099 * Just in case the server has other ideas (which RFC 8276 allows),
8100 * do a cached access check for the XA* flags to possibly avoid
8101 * doing an RPC and getting EACCES back.
8102 */
8103 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
8104 if (!(mask & NFS_ACCESS_XAWRITE))
8105 return -EACCES;
8106 }
8107
8108 if (buf == NULL) {
8109 ret = nfs42_proc_removexattr(inode, key);
8110 if (!ret)
8111 nfs4_xattr_cache_remove(inode, key);
8112 } else {
8113 ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags);
8114 if (!ret)
8115 nfs4_xattr_cache_add(inode, key, buf, NULL, buflen);
8116 }
8117
8118 return ret;
8119 }
8120
nfs4_xattr_get_nfs4_user(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * key,void * buf,size_t buflen)8121 static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler,
8122 struct dentry *unused, struct inode *inode,
8123 const char *key, void *buf, size_t buflen)
8124 {
8125 u32 mask;
8126 ssize_t ret;
8127
8128 if (!nfs_server_capable(inode, NFS_CAP_XATTR))
8129 return -EOPNOTSUPP;
8130
8131 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
8132 if (!(mask & NFS_ACCESS_XAREAD))
8133 return -EACCES;
8134 }
8135
8136 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
8137 if (ret)
8138 return ret;
8139
8140 ret = nfs4_xattr_cache_get(inode, key, buf, buflen);
8141 if (ret >= 0 || (ret < 0 && ret != -ENOENT))
8142 return ret;
8143
8144 ret = nfs42_proc_getxattr(inode, key, buf, buflen);
8145
8146 return ret;
8147 }
8148
8149 static ssize_t
nfs4_listxattr_nfs4_user(struct inode * inode,char * list,size_t list_len)8150 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
8151 {
8152 u64 cookie;
8153 bool eof;
8154 ssize_t ret, size;
8155 char *buf;
8156 size_t buflen;
8157 u32 mask;
8158
8159 if (!nfs_server_capable(inode, NFS_CAP_XATTR))
8160 return 0;
8161
8162 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
8163 if (!(mask & NFS_ACCESS_XALIST))
8164 return 0;
8165 }
8166
8167 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
8168 if (ret)
8169 return ret;
8170
8171 ret = nfs4_xattr_cache_list(inode, list, list_len);
8172 if (ret >= 0 || (ret < 0 && ret != -ENOENT))
8173 return ret;
8174
8175 cookie = 0;
8176 eof = false;
8177 buflen = list_len ? list_len : XATTR_LIST_MAX;
8178 buf = list_len ? list : NULL;
8179 size = 0;
8180
8181 while (!eof) {
8182 ret = nfs42_proc_listxattrs(inode, buf, buflen,
8183 &cookie, &eof);
8184 if (ret < 0)
8185 return ret;
8186
8187 if (list_len) {
8188 buf += ret;
8189 buflen -= ret;
8190 }
8191 size += ret;
8192 }
8193
8194 if (list_len)
8195 nfs4_xattr_cache_set_list(inode, list, size);
8196
8197 return size;
8198 }
8199
8200 #else
8201
8202 static ssize_t
nfs4_listxattr_nfs4_user(struct inode * inode,char * list,size_t list_len)8203 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
8204 {
8205 return 0;
8206 }
8207 #endif /* CONFIG_NFS_V4_2 */
8208
8209 /*
8210 * nfs_fhget will use either the mounted_on_fileid or the fileid
8211 */
nfs_fixup_referral_attributes(struct nfs_fattr * fattr)8212 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
8213 {
8214 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
8215 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
8216 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
8217 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
8218 return;
8219
8220 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
8221 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
8222 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
8223 fattr->nlink = 2;
8224 }
8225
_nfs4_proc_fs_locations(struct rpc_clnt * client,struct inode * dir,const struct qstr * name,struct nfs4_fs_locations * fs_locations,struct page * page)8226 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
8227 const struct qstr *name,
8228 struct nfs4_fs_locations *fs_locations,
8229 struct page *page)
8230 {
8231 struct nfs_server *server = NFS_SERVER(dir);
8232 u32 bitmask[3];
8233 struct nfs4_fs_locations_arg args = {
8234 .dir_fh = NFS_FH(dir),
8235 .name = name,
8236 .page = page,
8237 .bitmask = bitmask,
8238 };
8239 struct nfs4_fs_locations_res res = {
8240 .fs_locations = fs_locations,
8241 };
8242 struct rpc_message msg = {
8243 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
8244 .rpc_argp = &args,
8245 .rpc_resp = &res,
8246 };
8247 int status;
8248
8249 dprintk("%s: start\n", __func__);
8250
8251 bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS;
8252 bitmask[1] = nfs4_fattr_bitmap[1];
8253
8254 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
8255 * is not supported */
8256 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
8257 bitmask[0] &= ~FATTR4_WORD0_FILEID;
8258 else
8259 bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
8260
8261 nfs_fattr_init(fs_locations->fattr);
8262 fs_locations->server = server;
8263 fs_locations->nlocations = 0;
8264 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
8265 dprintk("%s: returned status = %d\n", __func__, status);
8266 return status;
8267 }
8268
nfs4_proc_fs_locations(struct rpc_clnt * client,struct inode * dir,const struct qstr * name,struct nfs4_fs_locations * fs_locations,struct page * page)8269 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
8270 const struct qstr *name,
8271 struct nfs4_fs_locations *fs_locations,
8272 struct page *page)
8273 {
8274 struct nfs4_exception exception = {
8275 .interruptible = true,
8276 };
8277 int err;
8278 do {
8279 err = _nfs4_proc_fs_locations(client, dir, name,
8280 fs_locations, page);
8281 trace_nfs4_get_fs_locations(dir, name, err);
8282 err = nfs4_handle_exception(NFS_SERVER(dir), err,
8283 &exception);
8284 } while (exception.retry);
8285 return err;
8286 }
8287
8288 /*
8289 * This operation also signals the server that this client is
8290 * performing migration recovery. The server can stop returning
8291 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
8292 * appended to this compound to identify the client ID which is
8293 * performing recovery.
8294 */
_nfs40_proc_get_locations(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs4_fs_locations * locations,struct page * page,const struct cred * cred)8295 static int _nfs40_proc_get_locations(struct nfs_server *server,
8296 struct nfs_fh *fhandle,
8297 struct nfs4_fs_locations *locations,
8298 struct page *page, const struct cred *cred)
8299 {
8300 struct rpc_clnt *clnt = server->client;
8301 u32 bitmask[2] = {
8302 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
8303 };
8304 struct nfs4_fs_locations_arg args = {
8305 .clientid = server->nfs_client->cl_clientid,
8306 .fh = fhandle,
8307 .page = page,
8308 .bitmask = bitmask,
8309 .migration = 1, /* skip LOOKUP */
8310 .renew = 1, /* append RENEW */
8311 };
8312 struct nfs4_fs_locations_res res = {
8313 .fs_locations = locations,
8314 .migration = 1,
8315 .renew = 1,
8316 };
8317 struct rpc_message msg = {
8318 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
8319 .rpc_argp = &args,
8320 .rpc_resp = &res,
8321 .rpc_cred = cred,
8322 };
8323 unsigned long now = jiffies;
8324 int status;
8325
8326 nfs_fattr_init(locations->fattr);
8327 locations->server = server;
8328 locations->nlocations = 0;
8329
8330 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8331 status = nfs4_call_sync_sequence(clnt, server, &msg,
8332 &args.seq_args, &res.seq_res);
8333 if (status)
8334 return status;
8335
8336 renew_lease(server, now);
8337 return 0;
8338 }
8339
8340 #ifdef CONFIG_NFS_V4_1
8341
8342 /*
8343 * This operation also signals the server that this client is
8344 * performing migration recovery. The server can stop asserting
8345 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID
8346 * performing this operation is identified in the SEQUENCE
8347 * operation in this compound.
8348 *
8349 * When the client supports GETATTR(fs_locations_info), it can
8350 * be plumbed in here.
8351 */
_nfs41_proc_get_locations(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs4_fs_locations * locations,struct page * page,const struct cred * cred)8352 static int _nfs41_proc_get_locations(struct nfs_server *server,
8353 struct nfs_fh *fhandle,
8354 struct nfs4_fs_locations *locations,
8355 struct page *page, const struct cred *cred)
8356 {
8357 struct rpc_clnt *clnt = server->client;
8358 u32 bitmask[2] = {
8359 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
8360 };
8361 struct nfs4_fs_locations_arg args = {
8362 .fh = fhandle,
8363 .page = page,
8364 .bitmask = bitmask,
8365 .migration = 1, /* skip LOOKUP */
8366 };
8367 struct nfs4_fs_locations_res res = {
8368 .fs_locations = locations,
8369 .migration = 1,
8370 };
8371 struct rpc_message msg = {
8372 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
8373 .rpc_argp = &args,
8374 .rpc_resp = &res,
8375 .rpc_cred = cred,
8376 };
8377 struct nfs4_call_sync_data data = {
8378 .seq_server = server,
8379 .seq_args = &args.seq_args,
8380 .seq_res = &res.seq_res,
8381 };
8382 struct rpc_task_setup task_setup_data = {
8383 .rpc_client = clnt,
8384 .rpc_message = &msg,
8385 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops,
8386 .callback_data = &data,
8387 .flags = RPC_TASK_NO_ROUND_ROBIN,
8388 };
8389 int status;
8390
8391 nfs_fattr_init(locations->fattr);
8392 locations->server = server;
8393 locations->nlocations = 0;
8394
8395 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8396 status = nfs4_call_sync_custom(&task_setup_data);
8397 if (status == NFS4_OK &&
8398 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
8399 status = -NFS4ERR_LEASE_MOVED;
8400 return status;
8401 }
8402
8403 #endif /* CONFIG_NFS_V4_1 */
8404
8405 /**
8406 * nfs4_proc_get_locations - discover locations for a migrated FSID
8407 * @server: pointer to nfs_server to process
8408 * @fhandle: pointer to the kernel NFS client file handle
8409 * @locations: result of query
8410 * @page: buffer
8411 * @cred: credential to use for this operation
8412 *
8413 * Returns NFS4_OK on success, a negative NFS4ERR status code if the
8414 * operation failed, or a negative errno if a local error occurred.
8415 *
8416 * On success, "locations" is filled in, but if the server has
8417 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
8418 * asserted.
8419 *
8420 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
8421 * from this client that require migration recovery.
8422 */
nfs4_proc_get_locations(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs4_fs_locations * locations,struct page * page,const struct cred * cred)8423 int nfs4_proc_get_locations(struct nfs_server *server,
8424 struct nfs_fh *fhandle,
8425 struct nfs4_fs_locations *locations,
8426 struct page *page, const struct cred *cred)
8427 {
8428 struct nfs_client *clp = server->nfs_client;
8429 const struct nfs4_mig_recovery_ops *ops =
8430 clp->cl_mvops->mig_recovery_ops;
8431 struct nfs4_exception exception = {
8432 .interruptible = true,
8433 };
8434 int status;
8435
8436 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
8437 (unsigned long long)server->fsid.major,
8438 (unsigned long long)server->fsid.minor,
8439 clp->cl_hostname);
8440 nfs_display_fhandle(fhandle, __func__);
8441
8442 do {
8443 status = ops->get_locations(server, fhandle, locations, page,
8444 cred);
8445 if (status != -NFS4ERR_DELAY)
8446 break;
8447 nfs4_handle_exception(server, status, &exception);
8448 } while (exception.retry);
8449 return status;
8450 }
8451
8452 /*
8453 * This operation also signals the server that this client is
8454 * performing "lease moved" recovery. The server can stop
8455 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
8456 * is appended to this compound to identify the client ID which is
8457 * performing recovery.
8458 */
_nfs40_proc_fsid_present(struct inode * inode,const struct cred * cred)8459 static int _nfs40_proc_fsid_present(struct inode *inode, const struct cred *cred)
8460 {
8461 struct nfs_server *server = NFS_SERVER(inode);
8462 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
8463 struct rpc_clnt *clnt = server->client;
8464 struct nfs4_fsid_present_arg args = {
8465 .fh = NFS_FH(inode),
8466 .clientid = clp->cl_clientid,
8467 .renew = 1, /* append RENEW */
8468 };
8469 struct nfs4_fsid_present_res res = {
8470 .renew = 1,
8471 };
8472 struct rpc_message msg = {
8473 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
8474 .rpc_argp = &args,
8475 .rpc_resp = &res,
8476 .rpc_cred = cred,
8477 };
8478 unsigned long now = jiffies;
8479 int status;
8480
8481 res.fh = nfs_alloc_fhandle();
8482 if (res.fh == NULL)
8483 return -ENOMEM;
8484
8485 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8486 status = nfs4_call_sync_sequence(clnt, server, &msg,
8487 &args.seq_args, &res.seq_res);
8488 nfs_free_fhandle(res.fh);
8489 if (status)
8490 return status;
8491
8492 do_renew_lease(clp, now);
8493 return 0;
8494 }
8495
8496 #ifdef CONFIG_NFS_V4_1
8497
8498 /*
8499 * This operation also signals the server that this client is
8500 * performing "lease moved" recovery. The server can stop asserting
8501 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
8502 * this operation is identified in the SEQUENCE operation in this
8503 * compound.
8504 */
_nfs41_proc_fsid_present(struct inode * inode,const struct cred * cred)8505 static int _nfs41_proc_fsid_present(struct inode *inode, const struct cred *cred)
8506 {
8507 struct nfs_server *server = NFS_SERVER(inode);
8508 struct rpc_clnt *clnt = server->client;
8509 struct nfs4_fsid_present_arg args = {
8510 .fh = NFS_FH(inode),
8511 };
8512 struct nfs4_fsid_present_res res = {
8513 };
8514 struct rpc_message msg = {
8515 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
8516 .rpc_argp = &args,
8517 .rpc_resp = &res,
8518 .rpc_cred = cred,
8519 };
8520 int status;
8521
8522 res.fh = nfs_alloc_fhandle();
8523 if (res.fh == NULL)
8524 return -ENOMEM;
8525
8526 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8527 status = nfs4_call_sync_sequence(clnt, server, &msg,
8528 &args.seq_args, &res.seq_res);
8529 nfs_free_fhandle(res.fh);
8530 if (status == NFS4_OK &&
8531 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
8532 status = -NFS4ERR_LEASE_MOVED;
8533 return status;
8534 }
8535
8536 #endif /* CONFIG_NFS_V4_1 */
8537
8538 /**
8539 * nfs4_proc_fsid_present - Is this FSID present or absent on server?
8540 * @inode: inode on FSID to check
8541 * @cred: credential to use for this operation
8542 *
8543 * Server indicates whether the FSID is present, moved, or not
8544 * recognized. This operation is necessary to clear a LEASE_MOVED
8545 * condition for this client ID.
8546 *
8547 * Returns NFS4_OK if the FSID is present on this server,
8548 * -NFS4ERR_MOVED if the FSID is no longer present, a negative
8549 * NFS4ERR code if some error occurred on the server, or a
8550 * negative errno if a local failure occurred.
8551 */
nfs4_proc_fsid_present(struct inode * inode,const struct cred * cred)8552 int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred)
8553 {
8554 struct nfs_server *server = NFS_SERVER(inode);
8555 struct nfs_client *clp = server->nfs_client;
8556 const struct nfs4_mig_recovery_ops *ops =
8557 clp->cl_mvops->mig_recovery_ops;
8558 struct nfs4_exception exception = {
8559 .interruptible = true,
8560 };
8561 int status;
8562
8563 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
8564 (unsigned long long)server->fsid.major,
8565 (unsigned long long)server->fsid.minor,
8566 clp->cl_hostname);
8567 nfs_display_fhandle(NFS_FH(inode), __func__);
8568
8569 do {
8570 status = ops->fsid_present(inode, cred);
8571 if (status != -NFS4ERR_DELAY)
8572 break;
8573 nfs4_handle_exception(server, status, &exception);
8574 } while (exception.retry);
8575 return status;
8576 }
8577
8578 /*
8579 * If 'use_integrity' is true and the state managment nfs_client
8580 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
8581 * and the machine credential as per RFC3530bis and RFC5661 Security
8582 * Considerations sections. Otherwise, just use the user cred with the
8583 * filesystem's rpc_client.
8584 */
_nfs4_proc_secinfo(struct inode * dir,const struct qstr * name,struct nfs4_secinfo_flavors * flavors,bool use_integrity)8585 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
8586 {
8587 int status;
8588 struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
8589 struct nfs_client *clp = NFS_SERVER(dir)->nfs_client;
8590 struct nfs4_secinfo_arg args = {
8591 .dir_fh = NFS_FH(dir),
8592 .name = name,
8593 };
8594 struct nfs4_secinfo_res res = {
8595 .flavors = flavors,
8596 };
8597 struct rpc_message msg = {
8598 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
8599 .rpc_argp = &args,
8600 .rpc_resp = &res,
8601 };
8602 struct nfs4_call_sync_data data = {
8603 .seq_server = NFS_SERVER(dir),
8604 .seq_args = &args.seq_args,
8605 .seq_res = &res.seq_res,
8606 };
8607 struct rpc_task_setup task_setup = {
8608 .rpc_client = clnt,
8609 .rpc_message = &msg,
8610 .callback_ops = clp->cl_mvops->call_sync_ops,
8611 .callback_data = &data,
8612 .flags = RPC_TASK_NO_ROUND_ROBIN,
8613 };
8614 const struct cred *cred = NULL;
8615
8616 if (use_integrity) {
8617 clnt = clp->cl_rpcclient;
8618 task_setup.rpc_client = clnt;
8619
8620 cred = nfs4_get_clid_cred(clp);
8621 msg.rpc_cred = cred;
8622 }
8623
8624 dprintk("NFS call secinfo %s\n", name->name);
8625
8626 nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
8627 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
8628 status = nfs4_call_sync_custom(&task_setup);
8629
8630 dprintk("NFS reply secinfo: %d\n", status);
8631
8632 put_cred(cred);
8633 return status;
8634 }
8635
nfs4_proc_secinfo(struct inode * dir,const struct qstr * name,struct nfs4_secinfo_flavors * flavors)8636 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
8637 struct nfs4_secinfo_flavors *flavors)
8638 {
8639 struct nfs4_exception exception = {
8640 .interruptible = true,
8641 };
8642 int err;
8643 do {
8644 err = -NFS4ERR_WRONGSEC;
8645
8646 /* try to use integrity protection with machine cred */
8647 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
8648 err = _nfs4_proc_secinfo(dir, name, flavors, true);
8649
8650 /*
8651 * if unable to use integrity protection, or SECINFO with
8652 * integrity protection returns NFS4ERR_WRONGSEC (which is
8653 * disallowed by spec, but exists in deployed servers) use
8654 * the current filesystem's rpc_client and the user cred.
8655 */
8656 if (err == -NFS4ERR_WRONGSEC)
8657 err = _nfs4_proc_secinfo(dir, name, flavors, false);
8658
8659 trace_nfs4_secinfo(dir, name, err);
8660 err = nfs4_handle_exception(NFS_SERVER(dir), err,
8661 &exception);
8662 } while (exception.retry);
8663 return err;
8664 }
8665
8666 #ifdef CONFIG_NFS_V4_1
8667 /*
8668 * Check the exchange flags returned by the server for invalid flags, having
8669 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
8670 * DS flags set.
8671 */
nfs4_check_cl_exchange_flags(u32 flags,u32 version)8672 static int nfs4_check_cl_exchange_flags(u32 flags, u32 version)
8673 {
8674 if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R))
8675 goto out_inval;
8676 else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R))
8677 goto out_inval;
8678 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
8679 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
8680 goto out_inval;
8681 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
8682 goto out_inval;
8683 return NFS_OK;
8684 out_inval:
8685 return -NFS4ERR_INVAL;
8686 }
8687
8688 static bool
nfs41_same_server_scope(struct nfs41_server_scope * a,struct nfs41_server_scope * b)8689 nfs41_same_server_scope(struct nfs41_server_scope *a,
8690 struct nfs41_server_scope *b)
8691 {
8692 if (a->server_scope_sz != b->server_scope_sz)
8693 return false;
8694 return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0;
8695 }
8696
8697 static void
nfs4_bind_one_conn_to_session_done(struct rpc_task * task,void * calldata)8698 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
8699 {
8700 struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp;
8701 struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp;
8702 struct nfs_client *clp = args->client;
8703
8704 switch (task->tk_status) {
8705 case -NFS4ERR_BADSESSION:
8706 case -NFS4ERR_DEADSESSION:
8707 nfs4_schedule_session_recovery(clp->cl_session,
8708 task->tk_status);
8709 return;
8710 }
8711 if (args->dir == NFS4_CDFC4_FORE_OR_BOTH &&
8712 res->dir != NFS4_CDFS4_BOTH) {
8713 rpc_task_close_connection(task);
8714 if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES)
8715 rpc_restart_call(task);
8716 }
8717 }
8718
8719 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
8720 .rpc_call_done = nfs4_bind_one_conn_to_session_done,
8721 };
8722
8723 /*
8724 * nfs4_proc_bind_one_conn_to_session()
8725 *
8726 * The 4.1 client currently uses the same TCP connection for the
8727 * fore and backchannel.
8728 */
8729 static
nfs4_proc_bind_one_conn_to_session(struct rpc_clnt * clnt,struct rpc_xprt * xprt,struct nfs_client * clp,const struct cred * cred)8730 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt,
8731 struct rpc_xprt *xprt,
8732 struct nfs_client *clp,
8733 const struct cred *cred)
8734 {
8735 int status;
8736 struct nfs41_bind_conn_to_session_args args = {
8737 .client = clp,
8738 .dir = NFS4_CDFC4_FORE_OR_BOTH,
8739 .retries = 0,
8740 };
8741 struct nfs41_bind_conn_to_session_res res;
8742 struct rpc_message msg = {
8743 .rpc_proc =
8744 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
8745 .rpc_argp = &args,
8746 .rpc_resp = &res,
8747 .rpc_cred = cred,
8748 };
8749 struct rpc_task_setup task_setup_data = {
8750 .rpc_client = clnt,
8751 .rpc_xprt = xprt,
8752 .callback_ops = &nfs4_bind_one_conn_to_session_ops,
8753 .rpc_message = &msg,
8754 .flags = RPC_TASK_TIMEOUT,
8755 };
8756 struct rpc_task *task;
8757
8758 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
8759 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
8760 args.dir = NFS4_CDFC4_FORE;
8761
8762 /* Do not set the backchannel flag unless this is clnt->cl_xprt */
8763 if (xprt != rcu_access_pointer(clnt->cl_xprt))
8764 args.dir = NFS4_CDFC4_FORE;
8765
8766 task = rpc_run_task(&task_setup_data);
8767 if (!IS_ERR(task)) {
8768 status = task->tk_status;
8769 rpc_put_task(task);
8770 } else
8771 status = PTR_ERR(task);
8772 trace_nfs4_bind_conn_to_session(clp, status);
8773 if (status == 0) {
8774 if (memcmp(res.sessionid.data,
8775 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
8776 dprintk("NFS: %s: Session ID mismatch\n", __func__);
8777 return -EIO;
8778 }
8779 if ((res.dir & args.dir) != res.dir || res.dir == 0) {
8780 dprintk("NFS: %s: Unexpected direction from server\n",
8781 __func__);
8782 return -EIO;
8783 }
8784 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
8785 dprintk("NFS: %s: Server returned RDMA mode = true\n",
8786 __func__);
8787 return -EIO;
8788 }
8789 }
8790
8791 return status;
8792 }
8793
8794 struct rpc_bind_conn_calldata {
8795 struct nfs_client *clp;
8796 const struct cred *cred;
8797 };
8798
8799 static int
nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt * clnt,struct rpc_xprt * xprt,void * calldata)8800 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt,
8801 struct rpc_xprt *xprt,
8802 void *calldata)
8803 {
8804 struct rpc_bind_conn_calldata *p = calldata;
8805
8806 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred);
8807 }
8808
nfs4_proc_bind_conn_to_session(struct nfs_client * clp,const struct cred * cred)8809 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, const struct cred *cred)
8810 {
8811 struct rpc_bind_conn_calldata data = {
8812 .clp = clp,
8813 .cred = cred,
8814 };
8815 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient,
8816 nfs4_proc_bind_conn_to_session_callback, &data);
8817 }
8818
8819 /*
8820 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
8821 * and operations we'd like to see to enable certain features in the allow map
8822 */
8823 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
8824 .how = SP4_MACH_CRED,
8825 .enforce.u.words = {
8826 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
8827 1 << (OP_EXCHANGE_ID - 32) |
8828 1 << (OP_CREATE_SESSION - 32) |
8829 1 << (OP_DESTROY_SESSION - 32) |
8830 1 << (OP_DESTROY_CLIENTID - 32)
8831 },
8832 .allow.u.words = {
8833 [0] = 1 << (OP_CLOSE) |
8834 1 << (OP_OPEN_DOWNGRADE) |
8835 1 << (OP_LOCKU) |
8836 1 << (OP_DELEGRETURN) |
8837 1 << (OP_COMMIT),
8838 [1] = 1 << (OP_SECINFO - 32) |
8839 1 << (OP_SECINFO_NO_NAME - 32) |
8840 1 << (OP_LAYOUTRETURN - 32) |
8841 1 << (OP_TEST_STATEID - 32) |
8842 1 << (OP_FREE_STATEID - 32) |
8843 1 << (OP_WRITE - 32)
8844 }
8845 };
8846
8847 /*
8848 * Select the state protection mode for client `clp' given the server results
8849 * from exchange_id in `sp'.
8850 *
8851 * Returns 0 on success, negative errno otherwise.
8852 */
nfs4_sp4_select_mode(struct nfs_client * clp,struct nfs41_state_protection * sp)8853 static int nfs4_sp4_select_mode(struct nfs_client *clp,
8854 struct nfs41_state_protection *sp)
8855 {
8856 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
8857 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
8858 1 << (OP_EXCHANGE_ID - 32) |
8859 1 << (OP_CREATE_SESSION - 32) |
8860 1 << (OP_DESTROY_SESSION - 32) |
8861 1 << (OP_DESTROY_CLIENTID - 32)
8862 };
8863 unsigned long flags = 0;
8864 unsigned int i;
8865 int ret = 0;
8866
8867 if (sp->how == SP4_MACH_CRED) {
8868 /* Print state protect result */
8869 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
8870 for (i = 0; i <= LAST_NFS4_OP; i++) {
8871 if (test_bit(i, sp->enforce.u.longs))
8872 dfprintk(MOUNT, " enforce op %d\n", i);
8873 if (test_bit(i, sp->allow.u.longs))
8874 dfprintk(MOUNT, " allow op %d\n", i);
8875 }
8876
8877 /* make sure nothing is on enforce list that isn't supported */
8878 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
8879 if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
8880 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
8881 ret = -EINVAL;
8882 goto out;
8883 }
8884 }
8885
8886 /*
8887 * Minimal mode - state operations are allowed to use machine
8888 * credential. Note this already happens by default, so the
8889 * client doesn't have to do anything more than the negotiation.
8890 *
8891 * NOTE: we don't care if EXCHANGE_ID is in the list -
8892 * we're already using the machine cred for exchange_id
8893 * and will never use a different cred.
8894 */
8895 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
8896 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
8897 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
8898 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
8899 dfprintk(MOUNT, "sp4_mach_cred:\n");
8900 dfprintk(MOUNT, " minimal mode enabled\n");
8901 __set_bit(NFS_SP4_MACH_CRED_MINIMAL, &flags);
8902 } else {
8903 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
8904 ret = -EINVAL;
8905 goto out;
8906 }
8907
8908 if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
8909 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) &&
8910 test_bit(OP_DELEGRETURN, sp->allow.u.longs) &&
8911 test_bit(OP_LOCKU, sp->allow.u.longs)) {
8912 dfprintk(MOUNT, " cleanup mode enabled\n");
8913 __set_bit(NFS_SP4_MACH_CRED_CLEANUP, &flags);
8914 }
8915
8916 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) {
8917 dfprintk(MOUNT, " pnfs cleanup mode enabled\n");
8918 __set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, &flags);
8919 }
8920
8921 if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
8922 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
8923 dfprintk(MOUNT, " secinfo mode enabled\n");
8924 __set_bit(NFS_SP4_MACH_CRED_SECINFO, &flags);
8925 }
8926
8927 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
8928 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
8929 dfprintk(MOUNT, " stateid mode enabled\n");
8930 __set_bit(NFS_SP4_MACH_CRED_STATEID, &flags);
8931 }
8932
8933 if (test_bit(OP_WRITE, sp->allow.u.longs)) {
8934 dfprintk(MOUNT, " write mode enabled\n");
8935 __set_bit(NFS_SP4_MACH_CRED_WRITE, &flags);
8936 }
8937
8938 if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
8939 dfprintk(MOUNT, " commit mode enabled\n");
8940 __set_bit(NFS_SP4_MACH_CRED_COMMIT, &flags);
8941 }
8942 }
8943 out:
8944 clp->cl_sp4_flags = flags;
8945 return ret;
8946 }
8947
8948 struct nfs41_exchange_id_data {
8949 struct nfs41_exchange_id_res res;
8950 struct nfs41_exchange_id_args args;
8951 };
8952
nfs4_exchange_id_release(void * data)8953 static void nfs4_exchange_id_release(void *data)
8954 {
8955 struct nfs41_exchange_id_data *cdata =
8956 (struct nfs41_exchange_id_data *)data;
8957
8958 nfs_put_client(cdata->args.client);
8959 kfree(cdata->res.impl_id);
8960 kfree(cdata->res.server_scope);
8961 kfree(cdata->res.server_owner);
8962 kfree(cdata);
8963 }
8964
8965 static const struct rpc_call_ops nfs4_exchange_id_call_ops = {
8966 .rpc_release = nfs4_exchange_id_release,
8967 };
8968
8969 /*
8970 * _nfs4_proc_exchange_id()
8971 *
8972 * Wrapper for EXCHANGE_ID operation.
8973 */
8974 static struct rpc_task *
nfs4_run_exchange_id(struct nfs_client * clp,const struct cred * cred,u32 sp4_how,struct rpc_xprt * xprt)8975 nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred,
8976 u32 sp4_how, struct rpc_xprt *xprt)
8977 {
8978 struct rpc_message msg = {
8979 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
8980 .rpc_cred = cred,
8981 };
8982 struct rpc_task_setup task_setup_data = {
8983 .rpc_client = clp->cl_rpcclient,
8984 .callback_ops = &nfs4_exchange_id_call_ops,
8985 .rpc_message = &msg,
8986 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
8987 };
8988 struct nfs41_exchange_id_data *calldata;
8989 int status;
8990
8991 if (!refcount_inc_not_zero(&clp->cl_count))
8992 return ERR_PTR(-EIO);
8993
8994 status = -ENOMEM;
8995 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
8996 if (!calldata)
8997 goto out;
8998
8999 nfs4_init_boot_verifier(clp, &calldata->args.verifier);
9000
9001 status = nfs4_init_uniform_client_string(clp);
9002 if (status)
9003 goto out_calldata;
9004
9005 calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
9006 GFP_NOFS);
9007 status = -ENOMEM;
9008 if (unlikely(calldata->res.server_owner == NULL))
9009 goto out_calldata;
9010
9011 calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
9012 GFP_NOFS);
9013 if (unlikely(calldata->res.server_scope == NULL))
9014 goto out_server_owner;
9015
9016 calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
9017 if (unlikely(calldata->res.impl_id == NULL))
9018 goto out_server_scope;
9019
9020 switch (sp4_how) {
9021 case SP4_NONE:
9022 calldata->args.state_protect.how = SP4_NONE;
9023 break;
9024
9025 case SP4_MACH_CRED:
9026 calldata->args.state_protect = nfs4_sp4_mach_cred_request;
9027 break;
9028
9029 default:
9030 /* unsupported! */
9031 WARN_ON_ONCE(1);
9032 status = -EINVAL;
9033 goto out_impl_id;
9034 }
9035 if (xprt) {
9036 task_setup_data.rpc_xprt = xprt;
9037 task_setup_data.flags |= RPC_TASK_SOFTCONN;
9038 memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
9039 sizeof(calldata->args.verifier.data));
9040 }
9041 calldata->args.client = clp;
9042 calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
9043 EXCHGID4_FLAG_BIND_PRINC_STATEID;
9044 #ifdef CONFIG_NFS_V4_1_MIGRATION
9045 calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR;
9046 #endif
9047 if (test_bit(NFS_CS_PNFS, &clp->cl_flags))
9048 calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS;
9049 msg.rpc_argp = &calldata->args;
9050 msg.rpc_resp = &calldata->res;
9051 task_setup_data.callback_data = calldata;
9052
9053 return rpc_run_task(&task_setup_data);
9054
9055 out_impl_id:
9056 kfree(calldata->res.impl_id);
9057 out_server_scope:
9058 kfree(calldata->res.server_scope);
9059 out_server_owner:
9060 kfree(calldata->res.server_owner);
9061 out_calldata:
9062 kfree(calldata);
9063 out:
9064 nfs_put_client(clp);
9065 return ERR_PTR(status);
9066 }
9067
9068 /*
9069 * _nfs4_proc_exchange_id()
9070 *
9071 * Wrapper for EXCHANGE_ID operation.
9072 */
_nfs4_proc_exchange_id(struct nfs_client * clp,const struct cred * cred,u32 sp4_how)9073 static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred,
9074 u32 sp4_how)
9075 {
9076 struct rpc_task *task;
9077 struct nfs41_exchange_id_args *argp;
9078 struct nfs41_exchange_id_res *resp;
9079 unsigned long now = jiffies;
9080 int status;
9081
9082 task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL);
9083 if (IS_ERR(task))
9084 return PTR_ERR(task);
9085
9086 argp = task->tk_msg.rpc_argp;
9087 resp = task->tk_msg.rpc_resp;
9088 status = task->tk_status;
9089 if (status != 0)
9090 goto out;
9091
9092 status = nfs4_check_cl_exchange_flags(resp->flags,
9093 clp->cl_mvops->minor_version);
9094 if (status != 0)
9095 goto out;
9096
9097 status = nfs4_sp4_select_mode(clp, &resp->state_protect);
9098 if (status != 0)
9099 goto out;
9100
9101 do_renew_lease(clp, now);
9102
9103 clp->cl_clientid = resp->clientid;
9104 clp->cl_exchange_flags = resp->flags;
9105 clp->cl_seqid = resp->seqid;
9106 /* Client ID is not confirmed */
9107 if (!(resp->flags & EXCHGID4_FLAG_CONFIRMED_R))
9108 clear_bit(NFS4_SESSION_ESTABLISHED,
9109 &clp->cl_session->session_state);
9110
9111 if (clp->cl_serverscope != NULL &&
9112 !nfs41_same_server_scope(clp->cl_serverscope,
9113 resp->server_scope)) {
9114 dprintk("%s: server_scope mismatch detected\n",
9115 __func__);
9116 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
9117 }
9118
9119 swap(clp->cl_serverowner, resp->server_owner);
9120 swap(clp->cl_serverscope, resp->server_scope);
9121 swap(clp->cl_implid, resp->impl_id);
9122
9123 /* Save the EXCHANGE_ID verifier session trunk tests */
9124 memcpy(clp->cl_confirm.data, argp->verifier.data,
9125 sizeof(clp->cl_confirm.data));
9126 out:
9127 trace_nfs4_exchange_id(clp, status);
9128 rpc_put_task(task);
9129 return status;
9130 }
9131
9132 /*
9133 * nfs4_proc_exchange_id()
9134 *
9135 * Returns zero, a negative errno, or a negative NFS4ERR status code.
9136 *
9137 * Since the clientid has expired, all compounds using sessions
9138 * associated with the stale clientid will be returning
9139 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
9140 * be in some phase of session reset.
9141 *
9142 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
9143 */
nfs4_proc_exchange_id(struct nfs_client * clp,const struct cred * cred)9144 int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred)
9145 {
9146 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
9147 int status;
9148
9149 /* try SP4_MACH_CRED if krb5i/p */
9150 if (authflavor == RPC_AUTH_GSS_KRB5I ||
9151 authflavor == RPC_AUTH_GSS_KRB5P) {
9152 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
9153 if (!status)
9154 return 0;
9155 }
9156
9157 /* try SP4_NONE */
9158 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
9159 }
9160
9161 /**
9162 * nfs4_test_session_trunk
9163 *
9164 * This is an add_xprt_test() test function called from
9165 * rpc_clnt_setup_test_and_add_xprt.
9166 *
9167 * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt
9168 * and is dereferrenced in nfs4_exchange_id_release
9169 *
9170 * Upon success, add the new transport to the rpc_clnt
9171 *
9172 * @clnt: struct rpc_clnt to get new transport
9173 * @xprt: the rpc_xprt to test
9174 * @data: call data for _nfs4_proc_exchange_id.
9175 */
nfs4_test_session_trunk(struct rpc_clnt * clnt,struct rpc_xprt * xprt,void * data)9176 void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
9177 void *data)
9178 {
9179 struct nfs4_add_xprt_data *adata = data;
9180 struct rpc_task *task;
9181 int status;
9182
9183 u32 sp4_how;
9184
9185 dprintk("--> %s try %s\n", __func__,
9186 xprt->address_strings[RPC_DISPLAY_ADDR]);
9187
9188 sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED);
9189
9190 try_again:
9191 /* Test connection for session trunking. Async exchange_id call */
9192 task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
9193 if (IS_ERR(task))
9194 return;
9195
9196 status = task->tk_status;
9197 if (status == 0) {
9198 status = nfs4_detect_session_trunking(adata->clp,
9199 task->tk_msg.rpc_resp, xprt);
9200 trace_nfs4_trunked_exchange_id(adata->clp,
9201 xprt->address_strings[RPC_DISPLAY_ADDR], status);
9202 }
9203 if (status == 0)
9204 rpc_clnt_xprt_switch_add_xprt(clnt, xprt);
9205 else if (status != -NFS4ERR_DELAY && rpc_clnt_xprt_switch_has_addr(clnt,
9206 (struct sockaddr *)&xprt->addr))
9207 rpc_clnt_xprt_switch_remove_xprt(clnt, xprt);
9208
9209 rpc_put_task(task);
9210 if (status == -NFS4ERR_DELAY) {
9211 ssleep(1);
9212 goto try_again;
9213 }
9214 }
9215 EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
9216
_nfs4_proc_destroy_clientid(struct nfs_client * clp,const struct cred * cred)9217 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
9218 const struct cred *cred)
9219 {
9220 struct rpc_message msg = {
9221 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
9222 .rpc_argp = clp,
9223 .rpc_cred = cred,
9224 };
9225 int status;
9226
9227 status = rpc_call_sync(clp->cl_rpcclient, &msg,
9228 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
9229 trace_nfs4_destroy_clientid(clp, status);
9230 if (status)
9231 dprintk("NFS: Got error %d from the server %s on "
9232 "DESTROY_CLIENTID.", status, clp->cl_hostname);
9233 return status;
9234 }
9235
nfs4_proc_destroy_clientid(struct nfs_client * clp,const struct cred * cred)9236 static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
9237 const struct cred *cred)
9238 {
9239 unsigned int loop;
9240 int ret;
9241
9242 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
9243 ret = _nfs4_proc_destroy_clientid(clp, cred);
9244 switch (ret) {
9245 case -NFS4ERR_DELAY:
9246 case -NFS4ERR_CLIENTID_BUSY:
9247 ssleep(1);
9248 break;
9249 default:
9250 return ret;
9251 }
9252 }
9253 return 0;
9254 }
9255
nfs4_destroy_clientid(struct nfs_client * clp)9256 int nfs4_destroy_clientid(struct nfs_client *clp)
9257 {
9258 const struct cred *cred;
9259 int ret = 0;
9260
9261 if (clp->cl_mvops->minor_version < 1)
9262 goto out;
9263 if (clp->cl_exchange_flags == 0)
9264 goto out;
9265 if (clp->cl_preserve_clid)
9266 goto out;
9267 cred = nfs4_get_clid_cred(clp);
9268 ret = nfs4_proc_destroy_clientid(clp, cred);
9269 put_cred(cred);
9270 switch (ret) {
9271 case 0:
9272 case -NFS4ERR_STALE_CLIENTID:
9273 clp->cl_exchange_flags = 0;
9274 }
9275 out:
9276 return ret;
9277 }
9278
9279 #endif /* CONFIG_NFS_V4_1 */
9280
9281 struct nfs4_get_lease_time_data {
9282 struct nfs4_get_lease_time_args *args;
9283 struct nfs4_get_lease_time_res *res;
9284 struct nfs_client *clp;
9285 };
9286
nfs4_get_lease_time_prepare(struct rpc_task * task,void * calldata)9287 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
9288 void *calldata)
9289 {
9290 struct nfs4_get_lease_time_data *data =
9291 (struct nfs4_get_lease_time_data *)calldata;
9292
9293 /* just setup sequence, do not trigger session recovery
9294 since we're invoked within one */
9295 nfs4_setup_sequence(data->clp,
9296 &data->args->la_seq_args,
9297 &data->res->lr_seq_res,
9298 task);
9299 }
9300
9301 /*
9302 * Called from nfs4_state_manager thread for session setup, so don't recover
9303 * from sequence operation or clientid errors.
9304 */
nfs4_get_lease_time_done(struct rpc_task * task,void * calldata)9305 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
9306 {
9307 struct nfs4_get_lease_time_data *data =
9308 (struct nfs4_get_lease_time_data *)calldata;
9309
9310 if (!nfs4_sequence_done(task, &data->res->lr_seq_res))
9311 return;
9312 switch (task->tk_status) {
9313 case -NFS4ERR_DELAY:
9314 case -NFS4ERR_GRACE:
9315 rpc_delay(task, NFS4_POLL_RETRY_MIN);
9316 task->tk_status = 0;
9317 fallthrough;
9318 case -NFS4ERR_RETRY_UNCACHED_REP:
9319 rpc_restart_call_prepare(task);
9320 return;
9321 }
9322 }
9323
9324 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
9325 .rpc_call_prepare = nfs4_get_lease_time_prepare,
9326 .rpc_call_done = nfs4_get_lease_time_done,
9327 };
9328
nfs4_proc_get_lease_time(struct nfs_client * clp,struct nfs_fsinfo * fsinfo)9329 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
9330 {
9331 struct nfs4_get_lease_time_args args;
9332 struct nfs4_get_lease_time_res res = {
9333 .lr_fsinfo = fsinfo,
9334 };
9335 struct nfs4_get_lease_time_data data = {
9336 .args = &args,
9337 .res = &res,
9338 .clp = clp,
9339 };
9340 struct rpc_message msg = {
9341 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
9342 .rpc_argp = &args,
9343 .rpc_resp = &res,
9344 };
9345 struct rpc_task_setup task_setup = {
9346 .rpc_client = clp->cl_rpcclient,
9347 .rpc_message = &msg,
9348 .callback_ops = &nfs4_get_lease_time_ops,
9349 .callback_data = &data,
9350 .flags = RPC_TASK_TIMEOUT,
9351 };
9352
9353 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0, 1);
9354 return nfs4_call_sync_custom(&task_setup);
9355 }
9356
9357 #ifdef CONFIG_NFS_V4_1
9358
9359 /*
9360 * Initialize the values to be used by the client in CREATE_SESSION
9361 * If nfs4_init_session set the fore channel request and response sizes,
9362 * use them.
9363 *
9364 * Set the back channel max_resp_sz_cached to zero to force the client to
9365 * always set csa_cachethis to FALSE because the current implementation
9366 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
9367 */
nfs4_init_channel_attrs(struct nfs41_create_session_args * args,struct rpc_clnt * clnt)9368 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args,
9369 struct rpc_clnt *clnt)
9370 {
9371 unsigned int max_rqst_sz, max_resp_sz;
9372 unsigned int max_bc_payload = rpc_max_bc_payload(clnt);
9373 unsigned int max_bc_slots = rpc_num_bc_slots(clnt);
9374
9375 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
9376 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
9377
9378 /* Fore channel attributes */
9379 args->fc_attrs.max_rqst_sz = max_rqst_sz;
9380 args->fc_attrs.max_resp_sz = max_resp_sz;
9381 args->fc_attrs.max_ops = NFS4_MAX_OPS;
9382 args->fc_attrs.max_reqs = max_session_slots;
9383
9384 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
9385 "max_ops=%u max_reqs=%u\n",
9386 __func__,
9387 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
9388 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
9389
9390 /* Back channel attributes */
9391 args->bc_attrs.max_rqst_sz = max_bc_payload;
9392 args->bc_attrs.max_resp_sz = max_bc_payload;
9393 args->bc_attrs.max_resp_sz_cached = 0;
9394 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
9395 args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1);
9396 if (args->bc_attrs.max_reqs > max_bc_slots)
9397 args->bc_attrs.max_reqs = max_bc_slots;
9398
9399 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
9400 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
9401 __func__,
9402 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
9403 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
9404 args->bc_attrs.max_reqs);
9405 }
9406
nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args * args,struct nfs41_create_session_res * res)9407 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
9408 struct nfs41_create_session_res *res)
9409 {
9410 struct nfs4_channel_attrs *sent = &args->fc_attrs;
9411 struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
9412
9413 if (rcvd->max_resp_sz > sent->max_resp_sz)
9414 return -EINVAL;
9415 /*
9416 * Our requested max_ops is the minimum we need; we're not
9417 * prepared to break up compounds into smaller pieces than that.
9418 * So, no point even trying to continue if the server won't
9419 * cooperate:
9420 */
9421 if (rcvd->max_ops < sent->max_ops)
9422 return -EINVAL;
9423 if (rcvd->max_reqs == 0)
9424 return -EINVAL;
9425 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
9426 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
9427 return 0;
9428 }
9429
nfs4_verify_back_channel_attrs(struct nfs41_create_session_args * args,struct nfs41_create_session_res * res)9430 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
9431 struct nfs41_create_session_res *res)
9432 {
9433 struct nfs4_channel_attrs *sent = &args->bc_attrs;
9434 struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
9435
9436 if (!(res->flags & SESSION4_BACK_CHAN))
9437 goto out;
9438 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
9439 return -EINVAL;
9440 if (rcvd->max_resp_sz < sent->max_resp_sz)
9441 return -EINVAL;
9442 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
9443 return -EINVAL;
9444 if (rcvd->max_ops > sent->max_ops)
9445 return -EINVAL;
9446 if (rcvd->max_reqs > sent->max_reqs)
9447 return -EINVAL;
9448 out:
9449 return 0;
9450 }
9451
nfs4_verify_channel_attrs(struct nfs41_create_session_args * args,struct nfs41_create_session_res * res)9452 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
9453 struct nfs41_create_session_res *res)
9454 {
9455 int ret;
9456
9457 ret = nfs4_verify_fore_channel_attrs(args, res);
9458 if (ret)
9459 return ret;
9460 return nfs4_verify_back_channel_attrs(args, res);
9461 }
9462
nfs4_update_session(struct nfs4_session * session,struct nfs41_create_session_res * res)9463 static void nfs4_update_session(struct nfs4_session *session,
9464 struct nfs41_create_session_res *res)
9465 {
9466 nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
9467 /* Mark client id and session as being confirmed */
9468 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
9469 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
9470 session->flags = res->flags;
9471 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
9472 if (res->flags & SESSION4_BACK_CHAN)
9473 memcpy(&session->bc_attrs, &res->bc_attrs,
9474 sizeof(session->bc_attrs));
9475 }
9476
_nfs4_proc_create_session(struct nfs_client * clp,const struct cred * cred)9477 static int _nfs4_proc_create_session(struct nfs_client *clp,
9478 const struct cred *cred)
9479 {
9480 struct nfs4_session *session = clp->cl_session;
9481 struct nfs41_create_session_args args = {
9482 .client = clp,
9483 .clientid = clp->cl_clientid,
9484 .seqid = clp->cl_seqid,
9485 .cb_program = NFS4_CALLBACK,
9486 };
9487 struct nfs41_create_session_res res;
9488
9489 struct rpc_message msg = {
9490 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
9491 .rpc_argp = &args,
9492 .rpc_resp = &res,
9493 .rpc_cred = cred,
9494 };
9495 int status;
9496
9497 nfs4_init_channel_attrs(&args, clp->cl_rpcclient);
9498 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
9499
9500 status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
9501 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
9502 trace_nfs4_create_session(clp, status);
9503
9504 switch (status) {
9505 case -NFS4ERR_STALE_CLIENTID:
9506 case -NFS4ERR_DELAY:
9507 case -ETIMEDOUT:
9508 case -EACCES:
9509 case -EAGAIN:
9510 goto out;
9511 }
9512
9513 clp->cl_seqid++;
9514 if (!status) {
9515 /* Verify the session's negotiated channel_attrs values */
9516 status = nfs4_verify_channel_attrs(&args, &res);
9517 /* Increment the clientid slot sequence id */
9518 if (status)
9519 goto out;
9520 nfs4_update_session(session, &res);
9521 }
9522 out:
9523 return status;
9524 }
9525
9526 /*
9527 * Issues a CREATE_SESSION operation to the server.
9528 * It is the responsibility of the caller to verify the session is
9529 * expired before calling this routine.
9530 */
nfs4_proc_create_session(struct nfs_client * clp,const struct cred * cred)9531 int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred)
9532 {
9533 int status;
9534 unsigned *ptr;
9535 struct nfs4_session *session = clp->cl_session;
9536 struct nfs4_add_xprt_data xprtdata = {
9537 .clp = clp,
9538 };
9539 struct rpc_add_xprt_test rpcdata = {
9540 .add_xprt_test = clp->cl_mvops->session_trunk,
9541 .data = &xprtdata,
9542 };
9543
9544 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
9545
9546 status = _nfs4_proc_create_session(clp, cred);
9547 if (status)
9548 goto out;
9549
9550 /* Init or reset the session slot tables */
9551 status = nfs4_setup_session_slot_tables(session);
9552 dprintk("slot table setup returned %d\n", status);
9553 if (status)
9554 goto out;
9555
9556 ptr = (unsigned *)&session->sess_id.data[0];
9557 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
9558 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
9559 rpc_clnt_probe_trunked_xprts(clp->cl_rpcclient, &rpcdata);
9560 out:
9561 return status;
9562 }
9563
9564 /*
9565 * Issue the over-the-wire RPC DESTROY_SESSION.
9566 * The caller must serialize access to this routine.
9567 */
nfs4_proc_destroy_session(struct nfs4_session * session,const struct cred * cred)9568 int nfs4_proc_destroy_session(struct nfs4_session *session,
9569 const struct cred *cred)
9570 {
9571 struct rpc_message msg = {
9572 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
9573 .rpc_argp = session,
9574 .rpc_cred = cred,
9575 };
9576 int status = 0;
9577
9578 /* session is still being setup */
9579 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
9580 return 0;
9581
9582 status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
9583 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
9584 trace_nfs4_destroy_session(session->clp, status);
9585
9586 if (status)
9587 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
9588 "Session has been destroyed regardless...\n", status);
9589 rpc_clnt_manage_trunked_xprts(session->clp->cl_rpcclient);
9590 return status;
9591 }
9592
9593 /*
9594 * Renew the cl_session lease.
9595 */
9596 struct nfs4_sequence_data {
9597 struct nfs_client *clp;
9598 struct nfs4_sequence_args args;
9599 struct nfs4_sequence_res res;
9600 };
9601
nfs41_sequence_release(void * data)9602 static void nfs41_sequence_release(void *data)
9603 {
9604 struct nfs4_sequence_data *calldata = data;
9605 struct nfs_client *clp = calldata->clp;
9606
9607 if (refcount_read(&clp->cl_count) > 1)
9608 nfs4_schedule_state_renewal(clp);
9609 nfs_put_client(clp);
9610 kfree(calldata);
9611 }
9612
nfs41_sequence_handle_errors(struct rpc_task * task,struct nfs_client * clp)9613 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
9614 {
9615 switch(task->tk_status) {
9616 case -NFS4ERR_DELAY:
9617 rpc_delay(task, NFS4_POLL_RETRY_MAX);
9618 return -EAGAIN;
9619 default:
9620 nfs4_schedule_lease_recovery(clp);
9621 }
9622 return 0;
9623 }
9624
nfs41_sequence_call_done(struct rpc_task * task,void * data)9625 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
9626 {
9627 struct nfs4_sequence_data *calldata = data;
9628 struct nfs_client *clp = calldata->clp;
9629
9630 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
9631 return;
9632
9633 trace_nfs4_sequence(clp, task->tk_status);
9634 if (task->tk_status < 0 && clp->cl_cons_state >= 0) {
9635 dprintk("%s ERROR %d\n", __func__, task->tk_status);
9636 if (refcount_read(&clp->cl_count) == 1)
9637 return;
9638
9639 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
9640 rpc_restart_call_prepare(task);
9641 return;
9642 }
9643 }
9644 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
9645 }
9646
nfs41_sequence_prepare(struct rpc_task * task,void * data)9647 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
9648 {
9649 struct nfs4_sequence_data *calldata = data;
9650 struct nfs_client *clp = calldata->clp;
9651 struct nfs4_sequence_args *args;
9652 struct nfs4_sequence_res *res;
9653
9654 args = task->tk_msg.rpc_argp;
9655 res = task->tk_msg.rpc_resp;
9656
9657 nfs4_setup_sequence(clp, args, res, task);
9658 }
9659
9660 static const struct rpc_call_ops nfs41_sequence_ops = {
9661 .rpc_call_done = nfs41_sequence_call_done,
9662 .rpc_call_prepare = nfs41_sequence_prepare,
9663 .rpc_release = nfs41_sequence_release,
9664 };
9665
_nfs41_proc_sequence(struct nfs_client * clp,const struct cred * cred,struct nfs4_slot * slot,bool is_privileged)9666 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
9667 const struct cred *cred,
9668 struct nfs4_slot *slot,
9669 bool is_privileged)
9670 {
9671 struct nfs4_sequence_data *calldata;
9672 struct rpc_message msg = {
9673 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
9674 .rpc_cred = cred,
9675 };
9676 struct rpc_task_setup task_setup_data = {
9677 .rpc_client = clp->cl_rpcclient,
9678 .rpc_message = &msg,
9679 .callback_ops = &nfs41_sequence_ops,
9680 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE,
9681 };
9682 struct rpc_task *ret;
9683
9684 ret = ERR_PTR(-EIO);
9685 if (!refcount_inc_not_zero(&clp->cl_count))
9686 goto out_err;
9687
9688 ret = ERR_PTR(-ENOMEM);
9689 calldata = kzalloc(sizeof(*calldata), GFP_KERNEL);
9690 if (calldata == NULL)
9691 goto out_put_clp;
9692 nfs4_init_sequence(&calldata->args, &calldata->res, 0, is_privileged);
9693 nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot);
9694 msg.rpc_argp = &calldata->args;
9695 msg.rpc_resp = &calldata->res;
9696 calldata->clp = clp;
9697 task_setup_data.callback_data = calldata;
9698
9699 ret = rpc_run_task(&task_setup_data);
9700 if (IS_ERR(ret))
9701 goto out_err;
9702 return ret;
9703 out_put_clp:
9704 nfs_put_client(clp);
9705 out_err:
9706 nfs41_release_slot(slot);
9707 return ret;
9708 }
9709
nfs41_proc_async_sequence(struct nfs_client * clp,const struct cred * cred,unsigned renew_flags)9710 static int nfs41_proc_async_sequence(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
9711 {
9712 struct rpc_task *task;
9713 int ret = 0;
9714
9715 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
9716 return -EAGAIN;
9717 task = _nfs41_proc_sequence(clp, cred, NULL, false);
9718 if (IS_ERR(task))
9719 ret = PTR_ERR(task);
9720 else
9721 rpc_put_task_async(task);
9722 dprintk("<-- %s status=%d\n", __func__, ret);
9723 return ret;
9724 }
9725
nfs4_proc_sequence(struct nfs_client * clp,const struct cred * cred)9726 static int nfs4_proc_sequence(struct nfs_client *clp, const struct cred *cred)
9727 {
9728 struct rpc_task *task;
9729 int ret;
9730
9731 task = _nfs41_proc_sequence(clp, cred, NULL, true);
9732 if (IS_ERR(task)) {
9733 ret = PTR_ERR(task);
9734 goto out;
9735 }
9736 ret = rpc_wait_for_completion_task(task);
9737 if (!ret)
9738 ret = task->tk_status;
9739 rpc_put_task(task);
9740 out:
9741 dprintk("<-- %s status=%d\n", __func__, ret);
9742 return ret;
9743 }
9744
9745 struct nfs4_reclaim_complete_data {
9746 struct nfs_client *clp;
9747 struct nfs41_reclaim_complete_args arg;
9748 struct nfs41_reclaim_complete_res res;
9749 };
9750
nfs4_reclaim_complete_prepare(struct rpc_task * task,void * data)9751 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
9752 {
9753 struct nfs4_reclaim_complete_data *calldata = data;
9754
9755 nfs4_setup_sequence(calldata->clp,
9756 &calldata->arg.seq_args,
9757 &calldata->res.seq_res,
9758 task);
9759 }
9760
nfs41_reclaim_complete_handle_errors(struct rpc_task * task,struct nfs_client * clp)9761 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
9762 {
9763 switch(task->tk_status) {
9764 case 0:
9765 wake_up_all(&clp->cl_lock_waitq);
9766 fallthrough;
9767 case -NFS4ERR_COMPLETE_ALREADY:
9768 case -NFS4ERR_WRONG_CRED: /* What to do here? */
9769 break;
9770 case -NFS4ERR_DELAY:
9771 rpc_delay(task, NFS4_POLL_RETRY_MAX);
9772 fallthrough;
9773 case -NFS4ERR_RETRY_UNCACHED_REP:
9774 case -EACCES:
9775 dprintk("%s: failed to reclaim complete error %d for server %s, retrying\n",
9776 __func__, task->tk_status, clp->cl_hostname);
9777 return -EAGAIN;
9778 case -NFS4ERR_BADSESSION:
9779 case -NFS4ERR_DEADSESSION:
9780 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
9781 break;
9782 default:
9783 nfs4_schedule_lease_recovery(clp);
9784 }
9785 return 0;
9786 }
9787
nfs4_reclaim_complete_done(struct rpc_task * task,void * data)9788 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
9789 {
9790 struct nfs4_reclaim_complete_data *calldata = data;
9791 struct nfs_client *clp = calldata->clp;
9792 struct nfs4_sequence_res *res = &calldata->res.seq_res;
9793
9794 if (!nfs41_sequence_done(task, res))
9795 return;
9796
9797 trace_nfs4_reclaim_complete(clp, task->tk_status);
9798 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
9799 rpc_restart_call_prepare(task);
9800 return;
9801 }
9802 }
9803
nfs4_free_reclaim_complete_data(void * data)9804 static void nfs4_free_reclaim_complete_data(void *data)
9805 {
9806 struct nfs4_reclaim_complete_data *calldata = data;
9807
9808 kfree(calldata);
9809 }
9810
9811 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
9812 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
9813 .rpc_call_done = nfs4_reclaim_complete_done,
9814 .rpc_release = nfs4_free_reclaim_complete_data,
9815 };
9816
9817 /*
9818 * Issue a global reclaim complete.
9819 */
nfs41_proc_reclaim_complete(struct nfs_client * clp,const struct cred * cred)9820 static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
9821 const struct cred *cred)
9822 {
9823 struct nfs4_reclaim_complete_data *calldata;
9824 struct rpc_message msg = {
9825 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
9826 .rpc_cred = cred,
9827 };
9828 struct rpc_task_setup task_setup_data = {
9829 .rpc_client = clp->cl_rpcclient,
9830 .rpc_message = &msg,
9831 .callback_ops = &nfs4_reclaim_complete_call_ops,
9832 .flags = RPC_TASK_NO_ROUND_ROBIN,
9833 };
9834 int status = -ENOMEM;
9835
9836 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
9837 if (calldata == NULL)
9838 goto out;
9839 calldata->clp = clp;
9840 calldata->arg.one_fs = 0;
9841
9842 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0, 1);
9843 msg.rpc_argp = &calldata->arg;
9844 msg.rpc_resp = &calldata->res;
9845 task_setup_data.callback_data = calldata;
9846 status = nfs4_call_sync_custom(&task_setup_data);
9847 out:
9848 dprintk("<-- %s status=%d\n", __func__, status);
9849 return status;
9850 }
9851
9852 static void
nfs4_layoutget_prepare(struct rpc_task * task,void * calldata)9853 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
9854 {
9855 struct nfs4_layoutget *lgp = calldata;
9856 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
9857
9858 nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args,
9859 &lgp->res.seq_res, task);
9860 }
9861
nfs4_layoutget_done(struct rpc_task * task,void * calldata)9862 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
9863 {
9864 struct nfs4_layoutget *lgp = calldata;
9865
9866 nfs41_sequence_process(task, &lgp->res.seq_res);
9867 }
9868
9869 static int
nfs4_layoutget_handle_exception(struct rpc_task * task,struct nfs4_layoutget * lgp,struct nfs4_exception * exception)9870 nfs4_layoutget_handle_exception(struct rpc_task *task,
9871 struct nfs4_layoutget *lgp, struct nfs4_exception *exception)
9872 {
9873 struct inode *inode = lgp->args.inode;
9874 struct nfs_server *server = NFS_SERVER(inode);
9875 struct pnfs_layout_hdr *lo = lgp->lo;
9876 int nfs4err = task->tk_status;
9877 int err, status = 0;
9878 LIST_HEAD(head);
9879
9880 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
9881
9882 nfs4_sequence_free_slot(&lgp->res.seq_res);
9883
9884 exception->state = NULL;
9885 exception->stateid = NULL;
9886
9887 switch (nfs4err) {
9888 case 0:
9889 goto out;
9890
9891 /*
9892 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs
9893 * on the file. set tk_status to -ENODATA to tell upper layer to
9894 * retry go inband.
9895 */
9896 case -NFS4ERR_LAYOUTUNAVAILABLE:
9897 status = -ENODATA;
9898 goto out;
9899 /*
9900 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
9901 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
9902 */
9903 case -NFS4ERR_BADLAYOUT:
9904 status = -EOVERFLOW;
9905 goto out;
9906 /*
9907 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
9908 * (or clients) writing to the same RAID stripe except when
9909 * the minlength argument is 0 (see RFC5661 section 18.43.3).
9910 *
9911 * Treat it like we would RECALLCONFLICT -- we retry for a little
9912 * while, and then eventually give up.
9913 */
9914 case -NFS4ERR_LAYOUTTRYLATER:
9915 if (lgp->args.minlength == 0) {
9916 status = -EOVERFLOW;
9917 goto out;
9918 }
9919 status = -EBUSY;
9920 break;
9921 case -NFS4ERR_RECALLCONFLICT:
9922 case -NFS4ERR_RETURNCONFLICT:
9923 status = -ERECALLCONFLICT;
9924 break;
9925 case -NFS4ERR_DELEG_REVOKED:
9926 case -NFS4ERR_ADMIN_REVOKED:
9927 case -NFS4ERR_EXPIRED:
9928 case -NFS4ERR_BAD_STATEID:
9929 exception->timeout = 0;
9930 spin_lock(&inode->i_lock);
9931 /* If the open stateid was bad, then recover it. */
9932 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
9933 !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
9934 spin_unlock(&inode->i_lock);
9935 exception->state = lgp->args.ctx->state;
9936 exception->stateid = &lgp->args.stateid;
9937 break;
9938 }
9939
9940 /*
9941 * Mark the bad layout state as invalid, then retry
9942 */
9943 pnfs_mark_layout_stateid_invalid(lo, &head);
9944 spin_unlock(&inode->i_lock);
9945 nfs_commit_inode(inode, 0);
9946 pnfs_free_lseg_list(&head);
9947 status = -EAGAIN;
9948 goto out;
9949 }
9950
9951 err = nfs4_handle_exception(server, nfs4err, exception);
9952 if (!status) {
9953 if (exception->retry)
9954 status = -EAGAIN;
9955 else
9956 status = err;
9957 }
9958 out:
9959 return status;
9960 }
9961
max_response_pages(struct nfs_server * server)9962 size_t max_response_pages(struct nfs_server *server)
9963 {
9964 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
9965 return nfs_page_array_len(0, max_resp_sz);
9966 }
9967
nfs4_layoutget_release(void * calldata)9968 static void nfs4_layoutget_release(void *calldata)
9969 {
9970 struct nfs4_layoutget *lgp = calldata;
9971
9972 nfs4_sequence_free_slot(&lgp->res.seq_res);
9973 pnfs_layoutget_free(lgp);
9974 }
9975
9976 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
9977 .rpc_call_prepare = nfs4_layoutget_prepare,
9978 .rpc_call_done = nfs4_layoutget_done,
9979 .rpc_release = nfs4_layoutget_release,
9980 };
9981
9982 struct pnfs_layout_segment *
nfs4_proc_layoutget(struct nfs4_layoutget * lgp,struct nfs4_exception * exception)9983 nfs4_proc_layoutget(struct nfs4_layoutget *lgp,
9984 struct nfs4_exception *exception)
9985 {
9986 struct inode *inode = lgp->args.inode;
9987 struct nfs_server *server = NFS_SERVER(inode);
9988 struct rpc_task *task;
9989 struct rpc_message msg = {
9990 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
9991 .rpc_argp = &lgp->args,
9992 .rpc_resp = &lgp->res,
9993 .rpc_cred = lgp->cred,
9994 };
9995 struct rpc_task_setup task_setup_data = {
9996 .rpc_client = server->client,
9997 .rpc_message = &msg,
9998 .callback_ops = &nfs4_layoutget_call_ops,
9999 .callback_data = lgp,
10000 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF |
10001 RPC_TASK_MOVEABLE,
10002 };
10003 struct pnfs_layout_segment *lseg = NULL;
10004 int status = 0;
10005
10006 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0);
10007 exception->retry = 0;
10008
10009 task = rpc_run_task(&task_setup_data);
10010 if (IS_ERR(task))
10011 return ERR_CAST(task);
10012
10013 status = rpc_wait_for_completion_task(task);
10014 if (status != 0)
10015 goto out;
10016
10017 if (task->tk_status < 0) {
10018 exception->retry = 1;
10019 status = nfs4_layoutget_handle_exception(task, lgp, exception);
10020 } else if (lgp->res.layoutp->len == 0) {
10021 exception->retry = 1;
10022 status = -EAGAIN;
10023 nfs4_update_delay(&exception->timeout);
10024 } else
10025 lseg = pnfs_layout_process(lgp);
10026 out:
10027 trace_nfs4_layoutget(lgp->args.ctx,
10028 &lgp->args.range,
10029 &lgp->res.range,
10030 &lgp->res.stateid,
10031 status);
10032
10033 rpc_put_task(task);
10034 dprintk("<-- %s status=%d\n", __func__, status);
10035 if (status)
10036 return ERR_PTR(status);
10037 return lseg;
10038 }
10039
10040 static void
nfs4_layoutreturn_prepare(struct rpc_task * task,void * calldata)10041 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
10042 {
10043 struct nfs4_layoutreturn *lrp = calldata;
10044
10045 nfs4_setup_sequence(lrp->clp,
10046 &lrp->args.seq_args,
10047 &lrp->res.seq_res,
10048 task);
10049 if (!pnfs_layout_is_valid(lrp->args.layout))
10050 rpc_exit(task, 0);
10051 }
10052
nfs4_layoutreturn_done(struct rpc_task * task,void * calldata)10053 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
10054 {
10055 struct nfs4_layoutreturn *lrp = calldata;
10056 struct nfs_server *server;
10057
10058 if (!nfs41_sequence_process(task, &lrp->res.seq_res))
10059 return;
10060
10061 if (task->tk_rpc_status == -ETIMEDOUT) {
10062 lrp->rpc_status = -EAGAIN;
10063 lrp->res.lrs_present = 0;
10064 return;
10065 }
10066 /*
10067 * Was there an RPC level error? Assume the call succeeded,
10068 * and that we need to release the layout
10069 */
10070 if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) {
10071 lrp->res.lrs_present = 0;
10072 return;
10073 }
10074
10075 server = NFS_SERVER(lrp->args.inode);
10076 switch (task->tk_status) {
10077 case -NFS4ERR_OLD_STATEID:
10078 if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid,
10079 &lrp->args.range,
10080 lrp->args.inode))
10081 goto out_restart;
10082 fallthrough;
10083 default:
10084 task->tk_status = 0;
10085 lrp->res.lrs_present = 0;
10086 fallthrough;
10087 case 0:
10088 break;
10089 case -NFS4ERR_BADSESSION:
10090 case -NFS4ERR_DEADSESSION:
10091 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
10092 nfs4_schedule_session_recovery(server->nfs_client->cl_session,
10093 task->tk_status);
10094 lrp->res.lrs_present = 0;
10095 lrp->rpc_status = -EAGAIN;
10096 task->tk_status = 0;
10097 break;
10098 case -NFS4ERR_DELAY:
10099 if (nfs4_async_handle_error(task, server, NULL, NULL) ==
10100 -EAGAIN)
10101 goto out_restart;
10102 lrp->res.lrs_present = 0;
10103 break;
10104 }
10105 return;
10106 out_restart:
10107 task->tk_status = 0;
10108 nfs4_sequence_free_slot(&lrp->res.seq_res);
10109 rpc_restart_call_prepare(task);
10110 }
10111
nfs4_layoutreturn_release(void * calldata)10112 static void nfs4_layoutreturn_release(void *calldata)
10113 {
10114 struct nfs4_layoutreturn *lrp = calldata;
10115 struct pnfs_layout_hdr *lo = lrp->args.layout;
10116
10117 if (lrp->rpc_status == 0 || !lrp->inode)
10118 pnfs_layoutreturn_free_lsegs(
10119 lo, &lrp->args.stateid, &lrp->args.range,
10120 lrp->res.lrs_present ? &lrp->res.stateid : NULL);
10121 else
10122 pnfs_layoutreturn_retry_later(lo, &lrp->args.stateid,
10123 &lrp->args.range);
10124 nfs4_sequence_free_slot(&lrp->res.seq_res);
10125 if (lrp->ld_private.ops && lrp->ld_private.ops->free)
10126 lrp->ld_private.ops->free(&lrp->ld_private);
10127 pnfs_put_layout_hdr(lrp->args.layout);
10128 nfs_iput_and_deactive(lrp->inode);
10129 put_cred(lrp->cred);
10130 kfree(calldata);
10131 }
10132
10133 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
10134 .rpc_call_prepare = nfs4_layoutreturn_prepare,
10135 .rpc_call_done = nfs4_layoutreturn_done,
10136 .rpc_release = nfs4_layoutreturn_release,
10137 };
10138
nfs4_proc_layoutreturn(struct nfs4_layoutreturn * lrp,unsigned int flags)10139 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, unsigned int flags)
10140 {
10141 struct rpc_task *task;
10142 struct rpc_message msg = {
10143 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
10144 .rpc_argp = &lrp->args,
10145 .rpc_resp = &lrp->res,
10146 .rpc_cred = lrp->cred,
10147 };
10148 struct rpc_task_setup task_setup_data = {
10149 .rpc_client = NFS_SERVER(lrp->args.inode)->client,
10150 .rpc_message = &msg,
10151 .callback_ops = &nfs4_layoutreturn_call_ops,
10152 .callback_data = lrp,
10153 .flags = RPC_TASK_MOVEABLE,
10154 };
10155 int status = 0;
10156
10157 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client,
10158 NFS_SP4_MACH_CRED_PNFS_CLEANUP,
10159 &task_setup_data.rpc_client, &msg);
10160
10161 lrp->inode = nfs_igrab_and_active(lrp->args.inode);
10162 if (flags & PNFS_FL_LAYOUTRETURN_ASYNC) {
10163 if (!lrp->inode) {
10164 nfs4_layoutreturn_release(lrp);
10165 return -EAGAIN;
10166 }
10167 task_setup_data.flags |= RPC_TASK_ASYNC;
10168 }
10169 if (!lrp->inode)
10170 flags |= PNFS_FL_LAYOUTRETURN_PRIVILEGED;
10171 if (flags & PNFS_FL_LAYOUTRETURN_PRIVILEGED)
10172 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
10173 1);
10174 else
10175 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
10176 0);
10177 task = rpc_run_task(&task_setup_data);
10178 if (IS_ERR(task))
10179 return PTR_ERR(task);
10180 if (!(flags & PNFS_FL_LAYOUTRETURN_ASYNC))
10181 status = task->tk_status;
10182 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status);
10183 dprintk("<-- %s status=%d\n", __func__, status);
10184 rpc_put_task(task);
10185 return status;
10186 }
10187
10188 static int
_nfs4_proc_getdeviceinfo(struct nfs_server * server,struct pnfs_device * pdev,const struct cred * cred)10189 _nfs4_proc_getdeviceinfo(struct nfs_server *server,
10190 struct pnfs_device *pdev,
10191 const struct cred *cred)
10192 {
10193 struct nfs4_getdeviceinfo_args args = {
10194 .pdev = pdev,
10195 .notify_types = NOTIFY_DEVICEID4_CHANGE |
10196 NOTIFY_DEVICEID4_DELETE,
10197 };
10198 struct nfs4_getdeviceinfo_res res = {
10199 .pdev = pdev,
10200 };
10201 struct rpc_message msg = {
10202 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
10203 .rpc_argp = &args,
10204 .rpc_resp = &res,
10205 .rpc_cred = cred,
10206 };
10207 int status;
10208
10209 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
10210 if (res.notification & ~args.notify_types)
10211 dprintk("%s: unsupported notification\n", __func__);
10212 if (res.notification != args.notify_types)
10213 pdev->nocache = 1;
10214
10215 trace_nfs4_getdeviceinfo(server, &pdev->dev_id, status);
10216
10217 dprintk("<-- %s status=%d\n", __func__, status);
10218
10219 return status;
10220 }
10221
nfs4_proc_getdeviceinfo(struct nfs_server * server,struct pnfs_device * pdev,const struct cred * cred)10222 int nfs4_proc_getdeviceinfo(struct nfs_server *server,
10223 struct pnfs_device *pdev,
10224 const struct cred *cred)
10225 {
10226 struct nfs4_exception exception = { };
10227 int err;
10228
10229 do {
10230 err = nfs4_handle_exception(server,
10231 _nfs4_proc_getdeviceinfo(server, pdev, cred),
10232 &exception);
10233 } while (exception.retry);
10234 return err;
10235 }
10236 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
10237
nfs4_layoutcommit_prepare(struct rpc_task * task,void * calldata)10238 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
10239 {
10240 struct nfs4_layoutcommit_data *data = calldata;
10241 struct nfs_server *server = NFS_SERVER(data->args.inode);
10242
10243 nfs4_setup_sequence(server->nfs_client,
10244 &data->args.seq_args,
10245 &data->res.seq_res,
10246 task);
10247 }
10248
10249 static void
nfs4_layoutcommit_done(struct rpc_task * task,void * calldata)10250 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
10251 {
10252 struct nfs4_layoutcommit_data *data = calldata;
10253 struct nfs_server *server = NFS_SERVER(data->args.inode);
10254
10255 if (!nfs41_sequence_done(task, &data->res.seq_res))
10256 return;
10257
10258 switch (task->tk_status) { /* Just ignore these failures */
10259 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
10260 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
10261 case -NFS4ERR_BADLAYOUT: /* no layout */
10262 case -NFS4ERR_GRACE: /* loca_recalim always false */
10263 task->tk_status = 0;
10264 break;
10265 case 0:
10266 break;
10267 default:
10268 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
10269 rpc_restart_call_prepare(task);
10270 return;
10271 }
10272 }
10273 }
10274
nfs4_layoutcommit_release(void * calldata)10275 static void nfs4_layoutcommit_release(void *calldata)
10276 {
10277 struct nfs4_layoutcommit_data *data = calldata;
10278
10279 pnfs_cleanup_layoutcommit(data);
10280 nfs_post_op_update_inode_force_wcc(data->args.inode,
10281 data->res.fattr);
10282 put_cred(data->cred);
10283 nfs_iput_and_deactive(data->inode);
10284 kfree(data);
10285 }
10286
10287 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
10288 .rpc_call_prepare = nfs4_layoutcommit_prepare,
10289 .rpc_call_done = nfs4_layoutcommit_done,
10290 .rpc_release = nfs4_layoutcommit_release,
10291 };
10292
10293 int
nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data * data,bool sync)10294 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
10295 {
10296 struct rpc_message msg = {
10297 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
10298 .rpc_argp = &data->args,
10299 .rpc_resp = &data->res,
10300 .rpc_cred = data->cred,
10301 };
10302 struct rpc_task_setup task_setup_data = {
10303 .task = &data->task,
10304 .rpc_client = NFS_CLIENT(data->args.inode),
10305 .rpc_message = &msg,
10306 .callback_ops = &nfs4_layoutcommit_ops,
10307 .callback_data = data,
10308 .flags = RPC_TASK_MOVEABLE,
10309 };
10310 struct rpc_task *task;
10311 int status = 0;
10312
10313 dprintk("NFS: initiating layoutcommit call. sync %d "
10314 "lbw: %llu inode %lu\n", sync,
10315 data->args.lastbytewritten,
10316 data->args.inode->i_ino);
10317
10318 if (!sync) {
10319 data->inode = nfs_igrab_and_active(data->args.inode);
10320 if (data->inode == NULL) {
10321 nfs4_layoutcommit_release(data);
10322 return -EAGAIN;
10323 }
10324 task_setup_data.flags = RPC_TASK_ASYNC;
10325 }
10326 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
10327 task = rpc_run_task(&task_setup_data);
10328 if (IS_ERR(task))
10329 return PTR_ERR(task);
10330 if (sync)
10331 status = task->tk_status;
10332 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status);
10333 dprintk("%s: status %d\n", __func__, status);
10334 rpc_put_task(task);
10335 return status;
10336 }
10337
10338 /*
10339 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
10340 * possible) as per RFC3530bis and RFC5661 Security Considerations sections
10341 */
10342 static int
_nfs41_proc_secinfo_no_name(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info,struct nfs4_secinfo_flavors * flavors,bool use_integrity)10343 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
10344 struct nfs_fsinfo *info,
10345 struct nfs4_secinfo_flavors *flavors, bool use_integrity)
10346 {
10347 struct nfs41_secinfo_no_name_args args = {
10348 .style = SECINFO_STYLE_CURRENT_FH,
10349 };
10350 struct nfs4_secinfo_res res = {
10351 .flavors = flavors,
10352 };
10353 struct rpc_message msg = {
10354 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
10355 .rpc_argp = &args,
10356 .rpc_resp = &res,
10357 };
10358 struct nfs4_call_sync_data data = {
10359 .seq_server = server,
10360 .seq_args = &args.seq_args,
10361 .seq_res = &res.seq_res,
10362 };
10363 struct rpc_task_setup task_setup = {
10364 .rpc_client = server->client,
10365 .rpc_message = &msg,
10366 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops,
10367 .callback_data = &data,
10368 .flags = RPC_TASK_NO_ROUND_ROBIN,
10369 };
10370 const struct cred *cred = NULL;
10371 int status;
10372
10373 if (use_integrity) {
10374 task_setup.rpc_client = server->nfs_client->cl_rpcclient;
10375
10376 cred = nfs4_get_clid_cred(server->nfs_client);
10377 msg.rpc_cred = cred;
10378 }
10379
10380 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
10381 status = nfs4_call_sync_custom(&task_setup);
10382 dprintk("<-- %s status=%d\n", __func__, status);
10383
10384 put_cred(cred);
10385
10386 return status;
10387 }
10388
10389 static int
nfs41_proc_secinfo_no_name(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info,struct nfs4_secinfo_flavors * flavors)10390 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
10391 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
10392 {
10393 struct nfs4_exception exception = {
10394 .interruptible = true,
10395 };
10396 int err;
10397 do {
10398 /* first try using integrity protection */
10399 err = -NFS4ERR_WRONGSEC;
10400
10401 /* try to use integrity protection with machine cred */
10402 if (_nfs4_is_integrity_protected(server->nfs_client))
10403 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
10404 flavors, true);
10405
10406 /*
10407 * if unable to use integrity protection, or SECINFO with
10408 * integrity protection returns NFS4ERR_WRONGSEC (which is
10409 * disallowed by spec, but exists in deployed servers) use
10410 * the current filesystem's rpc_client and the user cred.
10411 */
10412 if (err == -NFS4ERR_WRONGSEC)
10413 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
10414 flavors, false);
10415
10416 switch (err) {
10417 case 0:
10418 case -NFS4ERR_WRONGSEC:
10419 case -ENOTSUPP:
10420 goto out;
10421 default:
10422 err = nfs4_handle_exception(server, err, &exception);
10423 }
10424 } while (exception.retry);
10425 out:
10426 return err;
10427 }
10428
10429 static int
nfs41_find_root_sec(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)10430 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
10431 struct nfs_fsinfo *info)
10432 {
10433 int err;
10434 struct page *page;
10435 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
10436 struct nfs4_secinfo_flavors *flavors;
10437 struct nfs4_secinfo4 *secinfo;
10438 int i;
10439
10440 page = alloc_page(GFP_KERNEL);
10441 if (!page) {
10442 err = -ENOMEM;
10443 goto out;
10444 }
10445
10446 flavors = page_address(page);
10447 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
10448
10449 /*
10450 * Fall back on "guess and check" method if
10451 * the server doesn't support SECINFO_NO_NAME
10452 */
10453 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
10454 err = nfs4_find_root_sec(server, fhandle, info);
10455 goto out_freepage;
10456 }
10457 if (err)
10458 goto out_freepage;
10459
10460 for (i = 0; i < flavors->num_flavors; i++) {
10461 secinfo = &flavors->flavors[i];
10462
10463 switch (secinfo->flavor) {
10464 case RPC_AUTH_NULL:
10465 case RPC_AUTH_UNIX:
10466 case RPC_AUTH_GSS:
10467 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
10468 &secinfo->flavor_info);
10469 break;
10470 default:
10471 flavor = RPC_AUTH_MAXFLAVOR;
10472 break;
10473 }
10474
10475 if (!nfs_auth_info_match(&server->auth_info, flavor))
10476 flavor = RPC_AUTH_MAXFLAVOR;
10477
10478 if (flavor != RPC_AUTH_MAXFLAVOR) {
10479 err = nfs4_lookup_root_sec(server, fhandle,
10480 info, flavor);
10481 if (!err)
10482 break;
10483 }
10484 }
10485
10486 if (flavor == RPC_AUTH_MAXFLAVOR)
10487 err = -EPERM;
10488
10489 out_freepage:
10490 put_page(page);
10491 if (err == -EACCES)
10492 return -EPERM;
10493 out:
10494 return err;
10495 }
10496
_nfs41_test_stateid(struct nfs_server * server,const nfs4_stateid * stateid,const struct cred * cred)10497 static int _nfs41_test_stateid(struct nfs_server *server,
10498 const nfs4_stateid *stateid,
10499 const struct cred *cred)
10500 {
10501 int status;
10502 struct nfs41_test_stateid_args args = {
10503 .stateid = *stateid,
10504 };
10505 struct nfs41_test_stateid_res res;
10506 struct rpc_message msg = {
10507 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
10508 .rpc_argp = &args,
10509 .rpc_resp = &res,
10510 .rpc_cred = cred,
10511 };
10512 struct rpc_clnt *rpc_client = server->client;
10513
10514 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
10515 &rpc_client, &msg);
10516
10517 dprintk("NFS call test_stateid %p\n", stateid);
10518 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
10519 status = nfs4_call_sync_sequence(rpc_client, server, &msg,
10520 &args.seq_args, &res.seq_res);
10521 if (status != NFS_OK) {
10522 dprintk("NFS reply test_stateid: failed, %d\n", status);
10523 return status;
10524 }
10525 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
10526 return -res.status;
10527 }
10528
nfs4_handle_delay_or_session_error(struct nfs_server * server,int err,struct nfs4_exception * exception)10529 static void nfs4_handle_delay_or_session_error(struct nfs_server *server,
10530 int err, struct nfs4_exception *exception)
10531 {
10532 exception->retry = 0;
10533 switch(err) {
10534 case -NFS4ERR_DELAY:
10535 case -NFS4ERR_RETRY_UNCACHED_REP:
10536 nfs4_handle_exception(server, err, exception);
10537 break;
10538 case -NFS4ERR_BADSESSION:
10539 case -NFS4ERR_BADSLOT:
10540 case -NFS4ERR_BAD_HIGH_SLOT:
10541 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
10542 case -NFS4ERR_DEADSESSION:
10543 nfs4_do_handle_exception(server, err, exception);
10544 }
10545 }
10546
10547 /**
10548 * nfs41_test_stateid - perform a TEST_STATEID operation
10549 *
10550 * @server: server / transport on which to perform the operation
10551 * @stateid: state ID to test
10552 * @cred: credential
10553 *
10554 * Returns NFS_OK if the server recognizes that "stateid" is valid.
10555 * Otherwise a negative NFS4ERR value is returned if the operation
10556 * failed or the state ID is not currently valid.
10557 */
nfs41_test_stateid(struct nfs_server * server,const nfs4_stateid * stateid,const struct cred * cred)10558 static int nfs41_test_stateid(struct nfs_server *server,
10559 const nfs4_stateid *stateid,
10560 const struct cred *cred)
10561 {
10562 struct nfs4_exception exception = {
10563 .interruptible = true,
10564 };
10565 int err;
10566 do {
10567 err = _nfs41_test_stateid(server, stateid, cred);
10568 nfs4_handle_delay_or_session_error(server, err, &exception);
10569 } while (exception.retry);
10570 return err;
10571 }
10572
10573 struct nfs_free_stateid_data {
10574 struct nfs_server *server;
10575 struct nfs41_free_stateid_args args;
10576 struct nfs41_free_stateid_res res;
10577 };
10578
nfs41_free_stateid_prepare(struct rpc_task * task,void * calldata)10579 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
10580 {
10581 struct nfs_free_stateid_data *data = calldata;
10582 nfs4_setup_sequence(data->server->nfs_client,
10583 &data->args.seq_args,
10584 &data->res.seq_res,
10585 task);
10586 }
10587
nfs41_free_stateid_done(struct rpc_task * task,void * calldata)10588 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
10589 {
10590 struct nfs_free_stateid_data *data = calldata;
10591
10592 nfs41_sequence_done(task, &data->res.seq_res);
10593
10594 switch (task->tk_status) {
10595 case -NFS4ERR_DELAY:
10596 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
10597 rpc_restart_call_prepare(task);
10598 }
10599 }
10600
nfs41_free_stateid_release(void * calldata)10601 static void nfs41_free_stateid_release(void *calldata)
10602 {
10603 struct nfs_free_stateid_data *data = calldata;
10604 struct nfs_client *clp = data->server->nfs_client;
10605
10606 nfs_put_client(clp);
10607 kfree(calldata);
10608 }
10609
10610 static const struct rpc_call_ops nfs41_free_stateid_ops = {
10611 .rpc_call_prepare = nfs41_free_stateid_prepare,
10612 .rpc_call_done = nfs41_free_stateid_done,
10613 .rpc_release = nfs41_free_stateid_release,
10614 };
10615
10616 /**
10617 * nfs41_free_stateid - perform a FREE_STATEID operation
10618 *
10619 * @server: server / transport on which to perform the operation
10620 * @stateid: state ID to release
10621 * @cred: credential
10622 * @privileged: set to true if this call needs to be privileged
10623 *
10624 * Note: this function is always asynchronous.
10625 */
nfs41_free_stateid(struct nfs_server * server,nfs4_stateid * stateid,const struct cred * cred,bool privileged)10626 static int nfs41_free_stateid(struct nfs_server *server,
10627 nfs4_stateid *stateid,
10628 const struct cred *cred,
10629 bool privileged)
10630 {
10631 struct rpc_message msg = {
10632 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
10633 .rpc_cred = cred,
10634 };
10635 struct rpc_task_setup task_setup = {
10636 .rpc_client = server->client,
10637 .rpc_message = &msg,
10638 .callback_ops = &nfs41_free_stateid_ops,
10639 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE,
10640 };
10641 struct nfs_free_stateid_data *data;
10642 struct rpc_task *task;
10643 struct nfs_client *clp = server->nfs_client;
10644
10645 if (!refcount_inc_not_zero(&clp->cl_count))
10646 return -EIO;
10647
10648 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
10649 &task_setup.rpc_client, &msg);
10650
10651 dprintk("NFS call free_stateid %p\n", stateid);
10652 data = kmalloc(sizeof(*data), GFP_KERNEL);
10653 if (!data)
10654 return -ENOMEM;
10655 data->server = server;
10656 nfs4_stateid_copy(&data->args.stateid, stateid);
10657
10658 task_setup.callback_data = data;
10659
10660 msg.rpc_argp = &data->args;
10661 msg.rpc_resp = &data->res;
10662 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, privileged);
10663 task = rpc_run_task(&task_setup);
10664 if (IS_ERR(task))
10665 return PTR_ERR(task);
10666 rpc_put_task(task);
10667 stateid->type = NFS4_FREED_STATEID_TYPE;
10668 return 0;
10669 }
10670
10671 static void
nfs41_free_lock_state(struct nfs_server * server,struct nfs4_lock_state * lsp)10672 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
10673 {
10674 const struct cred *cred = lsp->ls_state->owner->so_cred;
10675
10676 nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
10677 nfs4_free_lock_state(server, lsp);
10678 }
10679
nfs41_match_stateid(const nfs4_stateid * s1,const nfs4_stateid * s2)10680 static bool nfs41_match_stateid(const nfs4_stateid *s1,
10681 const nfs4_stateid *s2)
10682 {
10683 if (s1->type != s2->type)
10684 return false;
10685
10686 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
10687 return false;
10688
10689 if (s1->seqid == s2->seqid)
10690 return true;
10691
10692 return s1->seqid == 0 || s2->seqid == 0;
10693 }
10694
10695 #endif /* CONFIG_NFS_V4_1 */
10696
nfs4_match_stateid(const nfs4_stateid * s1,const nfs4_stateid * s2)10697 static bool nfs4_match_stateid(const nfs4_stateid *s1,
10698 const nfs4_stateid *s2)
10699 {
10700 return nfs4_stateid_match(s1, s2);
10701 }
10702
10703
10704 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
10705 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
10706 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
10707 .recover_open = nfs4_open_reclaim,
10708 .recover_lock = nfs4_lock_reclaim,
10709 .establish_clid = nfs4_init_clientid,
10710 .detect_trunking = nfs40_discover_server_trunking,
10711 };
10712
10713 #if defined(CONFIG_NFS_V4_1)
10714 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
10715 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
10716 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
10717 .recover_open = nfs4_open_reclaim,
10718 .recover_lock = nfs4_lock_reclaim,
10719 .establish_clid = nfs41_init_clientid,
10720 .reclaim_complete = nfs41_proc_reclaim_complete,
10721 .detect_trunking = nfs41_discover_server_trunking,
10722 };
10723 #endif /* CONFIG_NFS_V4_1 */
10724
10725 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
10726 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
10727 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
10728 .recover_open = nfs40_open_expired,
10729 .recover_lock = nfs4_lock_expired,
10730 .establish_clid = nfs4_init_clientid,
10731 };
10732
10733 #if defined(CONFIG_NFS_V4_1)
10734 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
10735 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
10736 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
10737 .recover_open = nfs41_open_expired,
10738 .recover_lock = nfs41_lock_expired,
10739 .establish_clid = nfs41_init_clientid,
10740 };
10741 #endif /* CONFIG_NFS_V4_1 */
10742
10743 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
10744 .sched_state_renewal = nfs4_proc_async_renew,
10745 .get_state_renewal_cred = nfs4_get_renew_cred,
10746 .renew_lease = nfs4_proc_renew,
10747 };
10748
10749 #if defined(CONFIG_NFS_V4_1)
10750 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
10751 .sched_state_renewal = nfs41_proc_async_sequence,
10752 .get_state_renewal_cred = nfs4_get_machine_cred,
10753 .renew_lease = nfs4_proc_sequence,
10754 };
10755 #endif
10756
10757 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
10758 .get_locations = _nfs40_proc_get_locations,
10759 .fsid_present = _nfs40_proc_fsid_present,
10760 };
10761
10762 #if defined(CONFIG_NFS_V4_1)
10763 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
10764 .get_locations = _nfs41_proc_get_locations,
10765 .fsid_present = _nfs41_proc_fsid_present,
10766 };
10767 #endif /* CONFIG_NFS_V4_1 */
10768
10769 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
10770 .minor_version = 0,
10771 .init_caps = NFS_CAP_READDIRPLUS
10772 | NFS_CAP_ATOMIC_OPEN
10773 | NFS_CAP_POSIX_LOCK,
10774 .init_client = nfs40_init_client,
10775 .shutdown_client = nfs40_shutdown_client,
10776 .match_stateid = nfs4_match_stateid,
10777 .find_root_sec = nfs4_find_root_sec,
10778 .free_lock_state = nfs4_release_lockowner,
10779 .test_and_free_expired = nfs40_test_and_free_expired_stateid,
10780 .alloc_seqid = nfs_alloc_seqid,
10781 .call_sync_ops = &nfs40_call_sync_ops,
10782 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
10783 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
10784 .state_renewal_ops = &nfs40_state_renewal_ops,
10785 .mig_recovery_ops = &nfs40_mig_recovery_ops,
10786 };
10787
10788 #if defined(CONFIG_NFS_V4_1)
10789 static struct nfs_seqid *
nfs_alloc_no_seqid(struct nfs_seqid_counter * arg1,gfp_t arg2)10790 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
10791 {
10792 return NULL;
10793 }
10794
10795 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
10796 .minor_version = 1,
10797 .init_caps = NFS_CAP_READDIRPLUS
10798 | NFS_CAP_ATOMIC_OPEN
10799 | NFS_CAP_POSIX_LOCK
10800 | NFS_CAP_STATEID_NFSV41
10801 | NFS_CAP_ATOMIC_OPEN_V1
10802 | NFS_CAP_LGOPEN
10803 | NFS_CAP_MOVEABLE,
10804 .init_client = nfs41_init_client,
10805 .shutdown_client = nfs41_shutdown_client,
10806 .match_stateid = nfs41_match_stateid,
10807 .find_root_sec = nfs41_find_root_sec,
10808 .free_lock_state = nfs41_free_lock_state,
10809 .test_and_free_expired = nfs41_test_and_free_expired_stateid,
10810 .alloc_seqid = nfs_alloc_no_seqid,
10811 .session_trunk = nfs4_test_session_trunk,
10812 .call_sync_ops = &nfs41_call_sync_ops,
10813 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
10814 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
10815 .state_renewal_ops = &nfs41_state_renewal_ops,
10816 .mig_recovery_ops = &nfs41_mig_recovery_ops,
10817 };
10818 #endif
10819
10820 #if defined(CONFIG_NFS_V4_2)
10821 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
10822 .minor_version = 2,
10823 .init_caps = NFS_CAP_READDIRPLUS
10824 | NFS_CAP_ATOMIC_OPEN
10825 | NFS_CAP_POSIX_LOCK
10826 | NFS_CAP_STATEID_NFSV41
10827 | NFS_CAP_ATOMIC_OPEN_V1
10828 | NFS_CAP_LGOPEN
10829 | NFS_CAP_ALLOCATE
10830 | NFS_CAP_COPY
10831 | NFS_CAP_OFFLOAD_CANCEL
10832 | NFS_CAP_COPY_NOTIFY
10833 | NFS_CAP_DEALLOCATE
10834 | NFS_CAP_ZERO_RANGE
10835 | NFS_CAP_SEEK
10836 | NFS_CAP_LAYOUTSTATS
10837 | NFS_CAP_CLONE
10838 | NFS_CAP_LAYOUTERROR
10839 | NFS_CAP_READ_PLUS
10840 | NFS_CAP_MOVEABLE
10841 | NFS_CAP_OFFLOAD_STATUS,
10842 .init_client = nfs41_init_client,
10843 .shutdown_client = nfs41_shutdown_client,
10844 .match_stateid = nfs41_match_stateid,
10845 .find_root_sec = nfs41_find_root_sec,
10846 .free_lock_state = nfs41_free_lock_state,
10847 .call_sync_ops = &nfs41_call_sync_ops,
10848 .test_and_free_expired = nfs41_test_and_free_expired_stateid,
10849 .alloc_seqid = nfs_alloc_no_seqid,
10850 .session_trunk = nfs4_test_session_trunk,
10851 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
10852 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
10853 .state_renewal_ops = &nfs41_state_renewal_ops,
10854 .mig_recovery_ops = &nfs41_mig_recovery_ops,
10855 };
10856 #endif
10857
10858 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
10859 [0] = &nfs_v4_0_minor_ops,
10860 #if defined(CONFIG_NFS_V4_1)
10861 [1] = &nfs_v4_1_minor_ops,
10862 #endif
10863 #if defined(CONFIG_NFS_V4_2)
10864 [2] = &nfs_v4_2_minor_ops,
10865 #endif
10866 };
10867
nfs4_listxattr(struct dentry * dentry,char * list,size_t size)10868 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
10869 {
10870 ssize_t error, error2, error3, error4;
10871 size_t left = size;
10872
10873 error = generic_listxattr(dentry, list, left);
10874 if (error < 0)
10875 return error;
10876 if (list) {
10877 list += error;
10878 left -= error;
10879 }
10880
10881 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, left);
10882 if (error2 < 0)
10883 return error2;
10884
10885 if (list) {
10886 list += error2;
10887 left -= error2;
10888 }
10889
10890 error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left);
10891 if (error3 < 0)
10892 return error3;
10893 if (list) {
10894 list += error3;
10895 left -= error3;
10896 }
10897
10898 error4 = security_inode_listsecurity(d_inode(dentry), list, left);
10899 if (error4 < 0)
10900 return error4;
10901
10902 error += error2 + error3 + error4;
10903 if (size && error > size)
10904 return -ERANGE;
10905 return error;
10906 }
10907
nfs4_enable_swap(struct inode * inode)10908 static void nfs4_enable_swap(struct inode *inode)
10909 {
10910 /* The state manager thread must always be running.
10911 * It will notice the client is a swapper, and stay put.
10912 */
10913 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
10914
10915 nfs4_schedule_state_manager(clp);
10916 }
10917
nfs4_disable_swap(struct inode * inode)10918 static void nfs4_disable_swap(struct inode *inode)
10919 {
10920 /* The state manager thread will now exit once it is
10921 * woken.
10922 */
10923 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
10924
10925 set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
10926 clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
10927 wake_up_var(&clp->cl_state);
10928 }
10929
10930 static const struct inode_operations nfs4_dir_inode_operations = {
10931 .create = nfs_create,
10932 .lookup = nfs_lookup,
10933 .atomic_open = nfs_atomic_open,
10934 .link = nfs_link,
10935 .unlink = nfs_unlink,
10936 .symlink = nfs_symlink,
10937 .mkdir = nfs_mkdir,
10938 .rmdir = nfs_rmdir,
10939 .mknod = nfs_mknod,
10940 .rename = nfs_rename,
10941 .permission = nfs_permission,
10942 .getattr = nfs_getattr,
10943 .setattr = nfs_setattr,
10944 .listxattr = nfs4_listxattr,
10945 };
10946
10947 static const struct inode_operations nfs4_file_inode_operations = {
10948 .permission = nfs_permission,
10949 .getattr = nfs_getattr,
10950 .setattr = nfs_setattr,
10951 .listxattr = nfs4_listxattr,
10952 };
10953
10954 const struct nfs_rpc_ops nfs_v4_clientops = {
10955 .version = 4, /* protocol version */
10956 .dentry_ops = &nfs4_dentry_operations,
10957 .dir_inode_ops = &nfs4_dir_inode_operations,
10958 .file_inode_ops = &nfs4_file_inode_operations,
10959 .file_ops = &nfs4_file_operations,
10960 .getroot = nfs4_proc_get_root,
10961 .submount = nfs4_submount,
10962 .try_get_tree = nfs4_try_get_tree,
10963 .getattr = nfs4_proc_getattr,
10964 .setattr = nfs4_proc_setattr,
10965 .lookup = nfs4_proc_lookup,
10966 .lookupp = nfs4_proc_lookupp,
10967 .access = nfs4_proc_access,
10968 .readlink = nfs4_proc_readlink,
10969 .create = nfs4_proc_create,
10970 .remove = nfs4_proc_remove,
10971 .unlink_setup = nfs4_proc_unlink_setup,
10972 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
10973 .unlink_done = nfs4_proc_unlink_done,
10974 .rename_setup = nfs4_proc_rename_setup,
10975 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
10976 .rename_done = nfs4_proc_rename_done,
10977 .link = nfs4_proc_link,
10978 .symlink = nfs4_proc_symlink,
10979 .mkdir = nfs4_proc_mkdir,
10980 .rmdir = nfs4_proc_rmdir,
10981 .readdir = nfs4_proc_readdir,
10982 .mknod = nfs4_proc_mknod,
10983 .statfs = nfs4_proc_statfs,
10984 .fsinfo = nfs4_proc_fsinfo,
10985 .pathconf = nfs4_proc_pathconf,
10986 .set_capabilities = nfs4_server_capabilities,
10987 .decode_dirent = nfs4_decode_dirent,
10988 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
10989 .read_setup = nfs4_proc_read_setup,
10990 .read_done = nfs4_read_done,
10991 .write_setup = nfs4_proc_write_setup,
10992 .write_done = nfs4_write_done,
10993 .commit_setup = nfs4_proc_commit_setup,
10994 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
10995 .commit_done = nfs4_commit_done,
10996 .lock = nfs4_proc_lock,
10997 .clear_acl_cache = nfs4_zap_acl_attr,
10998 .close_context = nfs4_close_context,
10999 .open_context = nfs4_atomic_open,
11000 .have_delegation = nfs4_have_delegation,
11001 .return_delegation = nfs4_inode_return_delegation,
11002 .alloc_client = nfs4_alloc_client,
11003 .init_client = nfs4_init_client,
11004 .free_client = nfs4_free_client,
11005 .create_server = nfs4_create_server,
11006 .clone_server = nfs_clone_server,
11007 .discover_trunking = nfs4_discover_trunking,
11008 .enable_swap = nfs4_enable_swap,
11009 .disable_swap = nfs4_disable_swap,
11010 };
11011
11012 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
11013 .name = XATTR_NAME_NFSV4_ACL,
11014 .list = nfs4_xattr_list_nfs4_acl,
11015 .get = nfs4_xattr_get_nfs4_acl,
11016 .set = nfs4_xattr_set_nfs4_acl,
11017 };
11018
11019 #if defined(CONFIG_NFS_V4_1)
11020 static const struct xattr_handler nfs4_xattr_nfs4_dacl_handler = {
11021 .name = XATTR_NAME_NFSV4_DACL,
11022 .list = nfs4_xattr_list_nfs4_dacl,
11023 .get = nfs4_xattr_get_nfs4_dacl,
11024 .set = nfs4_xattr_set_nfs4_dacl,
11025 };
11026
11027 static const struct xattr_handler nfs4_xattr_nfs4_sacl_handler = {
11028 .name = XATTR_NAME_NFSV4_SACL,
11029 .list = nfs4_xattr_list_nfs4_sacl,
11030 .get = nfs4_xattr_get_nfs4_sacl,
11031 .set = nfs4_xattr_set_nfs4_sacl,
11032 };
11033 #endif
11034
11035 #ifdef CONFIG_NFS_V4_2
11036 static const struct xattr_handler nfs4_xattr_nfs4_user_handler = {
11037 .prefix = XATTR_USER_PREFIX,
11038 .get = nfs4_xattr_get_nfs4_user,
11039 .set = nfs4_xattr_set_nfs4_user,
11040 };
11041 #endif
11042
11043 const struct xattr_handler * const nfs4_xattr_handlers[] = {
11044 &nfs4_xattr_nfs4_acl_handler,
11045 #if defined(CONFIG_NFS_V4_1)
11046 &nfs4_xattr_nfs4_dacl_handler,
11047 &nfs4_xattr_nfs4_sacl_handler,
11048 #endif
11049 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
11050 &nfs4_xattr_nfs4_label_handler,
11051 #endif
11052 #ifdef CONFIG_NFS_V4_2
11053 &nfs4_xattr_nfs4_user_handler,
11054 #endif
11055 NULL
11056 };
11057