1 /*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/string.h>
42 #include <linux/ratelimit.h>
43 #include <linux/printk.h>
44 #include <linux/slab.h>
45 #include <linux/sunrpc/clnt.h>
46 #include <linux/nfs.h>
47 #include <linux/nfs4.h>
48 #include <linux/nfs_fs.h>
49 #include <linux/nfs_page.h>
50 #include <linux/nfs_mount.h>
51 #include <linux/namei.h>
52 #include <linux/mount.h>
53 #include <linux/module.h>
54 #include <linux/xattr.h>
55 #include <linux/utsname.h>
56 #include <linux/freezer.h>
57 #include <linux/iversion.h>
58
59 #include "nfs4_fs.h"
60 #include "delegation.h"
61 #include "internal.h"
62 #include "iostat.h"
63 #include "callback.h"
64 #include "pnfs.h"
65 #include "netns.h"
66 #include "sysfs.h"
67 #include "nfs4idmap.h"
68 #include "nfs4session.h"
69 #include "fscache.h"
70 #include "nfs42.h"
71
72 #include "nfs4trace.h"
73
74 #define NFSDBG_FACILITY NFSDBG_PROC
75
76 #define NFS4_BITMASK_SZ 3
77
78 #define NFS4_POLL_RETRY_MIN (HZ/10)
79 #define NFS4_POLL_RETRY_MAX (15*HZ)
80
81 /* file attributes which can be mapped to nfs attributes */
82 #define NFS4_VALID_ATTRS (ATTR_MODE \
83 | ATTR_UID \
84 | ATTR_GID \
85 | ATTR_SIZE \
86 | ATTR_ATIME \
87 | ATTR_MTIME \
88 | ATTR_CTIME \
89 | ATTR_ATIME_SET \
90 | ATTR_MTIME_SET)
91
92 struct nfs4_opendata;
93 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
94 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
95 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
96 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
97 struct nfs_fattr *fattr, struct inode *inode);
98 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
99 struct nfs_fattr *fattr, struct iattr *sattr,
100 struct nfs_open_context *ctx, struct nfs4_label *ilabel);
101 #ifdef CONFIG_NFS_V4_1
102 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
103 const struct cred *cred,
104 struct nfs4_slot *slot,
105 bool is_privileged);
106 static int nfs41_test_stateid(struct nfs_server *, const nfs4_stateid *,
107 const struct cred *);
108 static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
109 const struct cred *, bool);
110 #endif
111
112 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
113 static inline struct nfs4_label *
nfs4_label_init_security(struct inode * dir,struct dentry * dentry,struct iattr * sattr,struct nfs4_label * label)114 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
115 struct iattr *sattr, struct nfs4_label *label)
116 {
117 struct lsm_context shim;
118 int err;
119
120 if (label == NULL)
121 return NULL;
122
123 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
124 return NULL;
125
126 label->lfs = 0;
127 label->pi = 0;
128 label->len = 0;
129 label->label = NULL;
130
131 err = security_dentry_init_security(dentry, sattr->ia_mode,
132 &dentry->d_name, NULL, &shim);
133 if (err)
134 return NULL;
135
136 label->lsmid = shim.id;
137 label->label = shim.context;
138 label->len = shim.len;
139 return label;
140 }
141 static inline void
nfs4_label_release_security(struct nfs4_label * label)142 nfs4_label_release_security(struct nfs4_label *label)
143 {
144 struct lsm_context shim;
145
146 if (label) {
147 shim.context = label->label;
148 shim.len = label->len;
149 shim.id = label->lsmid;
150 security_release_secctx(&shim);
151 }
152 }
nfs4_bitmask(struct nfs_server * server,struct nfs4_label * label)153 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
154 {
155 if (label)
156 return server->attr_bitmask;
157
158 return server->attr_bitmask_nl;
159 }
160 #else
161 static inline struct nfs4_label *
nfs4_label_init_security(struct inode * dir,struct dentry * dentry,struct iattr * sattr,struct nfs4_label * l)162 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
163 struct iattr *sattr, struct nfs4_label *l)
164 { return NULL; }
165 static inline void
nfs4_label_release_security(struct nfs4_label * label)166 nfs4_label_release_security(struct nfs4_label *label)
167 { return; }
168 static inline u32 *
nfs4_bitmask(struct nfs_server * server,struct nfs4_label * label)169 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
170 { return server->attr_bitmask; }
171 #endif
172
173 /* Prevent leaks of NFSv4 errors into userland */
nfs4_map_errors(int err)174 static int nfs4_map_errors(int err)
175 {
176 if (err >= -1000)
177 return err;
178 switch (err) {
179 case -NFS4ERR_RESOURCE:
180 case -NFS4ERR_LAYOUTTRYLATER:
181 case -NFS4ERR_RECALLCONFLICT:
182 case -NFS4ERR_RETURNCONFLICT:
183 return -EREMOTEIO;
184 case -NFS4ERR_WRONGSEC:
185 case -NFS4ERR_WRONG_CRED:
186 return -EPERM;
187 case -NFS4ERR_BADOWNER:
188 case -NFS4ERR_BADNAME:
189 return -EINVAL;
190 case -NFS4ERR_SHARE_DENIED:
191 return -EACCES;
192 case -NFS4ERR_MINOR_VERS_MISMATCH:
193 return -EPROTONOSUPPORT;
194 case -NFS4ERR_FILE_OPEN:
195 return -EBUSY;
196 case -NFS4ERR_NOT_SAME:
197 return -ENOTSYNC;
198 default:
199 dprintk("%s could not handle NFSv4 error %d\n",
200 __func__, -err);
201 break;
202 }
203 return -EIO;
204 }
205
206 /*
207 * This is our standard bitmap for GETATTR requests.
208 */
209 const u32 nfs4_fattr_bitmap[3] = {
210 FATTR4_WORD0_TYPE
211 | FATTR4_WORD0_CHANGE
212 | FATTR4_WORD0_SIZE
213 | FATTR4_WORD0_FSID
214 | FATTR4_WORD0_FILEID,
215 FATTR4_WORD1_MODE
216 | FATTR4_WORD1_NUMLINKS
217 | FATTR4_WORD1_OWNER
218 | FATTR4_WORD1_OWNER_GROUP
219 | FATTR4_WORD1_RAWDEV
220 | FATTR4_WORD1_SPACE_USED
221 | FATTR4_WORD1_TIME_ACCESS
222 | FATTR4_WORD1_TIME_METADATA
223 | FATTR4_WORD1_TIME_MODIFY
224 | FATTR4_WORD1_MOUNTED_ON_FILEID,
225 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
226 FATTR4_WORD2_SECURITY_LABEL
227 #endif
228 };
229
230 static const u32 nfs4_pnfs_open_bitmap[3] = {
231 FATTR4_WORD0_TYPE
232 | FATTR4_WORD0_CHANGE
233 | FATTR4_WORD0_SIZE
234 | FATTR4_WORD0_FSID
235 | FATTR4_WORD0_FILEID,
236 FATTR4_WORD1_MODE
237 | FATTR4_WORD1_NUMLINKS
238 | FATTR4_WORD1_OWNER
239 | FATTR4_WORD1_OWNER_GROUP
240 | FATTR4_WORD1_RAWDEV
241 | FATTR4_WORD1_SPACE_USED
242 | FATTR4_WORD1_TIME_ACCESS
243 | FATTR4_WORD1_TIME_METADATA
244 | FATTR4_WORD1_TIME_MODIFY,
245 FATTR4_WORD2_MDSTHRESHOLD
246 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
247 | FATTR4_WORD2_SECURITY_LABEL
248 #endif
249 };
250
251 static const u32 nfs4_open_noattr_bitmap[3] = {
252 FATTR4_WORD0_TYPE
253 | FATTR4_WORD0_FILEID,
254 };
255
256 const u32 nfs4_statfs_bitmap[3] = {
257 FATTR4_WORD0_FILES_AVAIL
258 | FATTR4_WORD0_FILES_FREE
259 | FATTR4_WORD0_FILES_TOTAL,
260 FATTR4_WORD1_SPACE_AVAIL
261 | FATTR4_WORD1_SPACE_FREE
262 | FATTR4_WORD1_SPACE_TOTAL
263 };
264
265 const u32 nfs4_pathconf_bitmap[3] = {
266 FATTR4_WORD0_MAXLINK
267 | FATTR4_WORD0_MAXNAME,
268 0
269 };
270
271 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
272 | FATTR4_WORD0_MAXREAD
273 | FATTR4_WORD0_MAXWRITE
274 | FATTR4_WORD0_LEASE_TIME,
275 FATTR4_WORD1_TIME_DELTA
276 | FATTR4_WORD1_FS_LAYOUT_TYPES,
277 FATTR4_WORD2_LAYOUT_BLKSIZE
278 | FATTR4_WORD2_CLONE_BLKSIZE
279 | FATTR4_WORD2_CHANGE_ATTR_TYPE
280 | FATTR4_WORD2_XATTR_SUPPORT
281 };
282
283 const u32 nfs4_fs_locations_bitmap[3] = {
284 FATTR4_WORD0_CHANGE
285 | FATTR4_WORD0_SIZE
286 | FATTR4_WORD0_FSID
287 | FATTR4_WORD0_FILEID
288 | FATTR4_WORD0_FS_LOCATIONS,
289 FATTR4_WORD1_OWNER
290 | FATTR4_WORD1_OWNER_GROUP
291 | FATTR4_WORD1_RAWDEV
292 | FATTR4_WORD1_SPACE_USED
293 | FATTR4_WORD1_TIME_ACCESS
294 | FATTR4_WORD1_TIME_METADATA
295 | FATTR4_WORD1_TIME_MODIFY
296 | FATTR4_WORD1_MOUNTED_ON_FILEID,
297 };
298
nfs4_bitmap_copy_adjust(__u32 * dst,const __u32 * src,struct inode * inode,unsigned long flags)299 static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src,
300 struct inode *inode, unsigned long flags)
301 {
302 unsigned long cache_validity;
303
304 memcpy(dst, src, NFS4_BITMASK_SZ*sizeof(*dst));
305 if (!inode || !nfs_have_read_or_write_delegation(inode))
306 return;
307
308 cache_validity = READ_ONCE(NFS_I(inode)->cache_validity) | flags;
309
310 /* Remove the attributes over which we have full control */
311 dst[1] &= ~FATTR4_WORD1_RAWDEV;
312 if (!(cache_validity & NFS_INO_INVALID_SIZE))
313 dst[0] &= ~FATTR4_WORD0_SIZE;
314
315 if (!(cache_validity & NFS_INO_INVALID_CHANGE))
316 dst[0] &= ~FATTR4_WORD0_CHANGE;
317
318 if (!(cache_validity & NFS_INO_INVALID_MODE))
319 dst[1] &= ~FATTR4_WORD1_MODE;
320 if (!(cache_validity & NFS_INO_INVALID_OTHER))
321 dst[1] &= ~(FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP);
322
323 if (nfs_have_delegated_mtime(inode)) {
324 if (!(cache_validity & NFS_INO_INVALID_ATIME))
325 dst[1] &= ~FATTR4_WORD1_TIME_ACCESS;
326 if (!(cache_validity & NFS_INO_INVALID_MTIME))
327 dst[1] &= ~FATTR4_WORD1_TIME_MODIFY;
328 if (!(cache_validity & NFS_INO_INVALID_CTIME))
329 dst[1] &= ~FATTR4_WORD1_TIME_METADATA;
330 } else if (nfs_have_delegated_atime(inode)) {
331 if (!(cache_validity & NFS_INO_INVALID_ATIME))
332 dst[1] &= ~FATTR4_WORD1_TIME_ACCESS;
333 }
334 }
335
nfs4_setup_readdir(u64 cookie,__be32 * verifier,struct dentry * dentry,struct nfs4_readdir_arg * readdir)336 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
337 struct nfs4_readdir_arg *readdir)
338 {
339 unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE;
340 __be32 *start, *p;
341
342 if (cookie > 2) {
343 readdir->cookie = cookie;
344 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
345 return;
346 }
347
348 readdir->cookie = 0;
349 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
350 if (cookie == 2)
351 return;
352
353 /*
354 * NFSv4 servers do not return entries for '.' and '..'
355 * Therefore, we fake these entries here. We let '.'
356 * have cookie 0 and '..' have cookie 1. Note that
357 * when talking to the server, we always send cookie 0
358 * instead of 1 or 2.
359 */
360 start = p = kmap_atomic(*readdir->pages);
361
362 if (cookie == 0) {
363 *p++ = xdr_one; /* next */
364 *p++ = xdr_zero; /* cookie, first word */
365 *p++ = xdr_one; /* cookie, second word */
366 *p++ = xdr_one; /* entry len */
367 memcpy(p, ".\0\0\0", 4); /* entry */
368 p++;
369 *p++ = xdr_one; /* bitmap length */
370 *p++ = htonl(attrs); /* bitmap */
371 *p++ = htonl(12); /* attribute buffer length */
372 *p++ = htonl(NF4DIR);
373 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
374 }
375
376 *p++ = xdr_one; /* next */
377 *p++ = xdr_zero; /* cookie, first word */
378 *p++ = xdr_two; /* cookie, second word */
379 *p++ = xdr_two; /* entry len */
380 memcpy(p, "..\0\0", 4); /* entry */
381 p++;
382 *p++ = xdr_one; /* bitmap length */
383 *p++ = htonl(attrs); /* bitmap */
384 *p++ = htonl(12); /* attribute buffer length */
385 *p++ = htonl(NF4DIR);
386 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
387
388 readdir->pgbase = (char *)p - (char *)start;
389 readdir->count -= readdir->pgbase;
390 kunmap_atomic(start);
391 }
392
nfs4_fattr_set_prechange(struct nfs_fattr * fattr,u64 version)393 static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version)
394 {
395 if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) {
396 fattr->pre_change_attr = version;
397 fattr->valid |= NFS_ATTR_FATTR_PRECHANGE;
398 }
399 }
400
nfs4_test_and_free_stateid(struct nfs_server * server,nfs4_stateid * stateid,const struct cred * cred)401 static void nfs4_test_and_free_stateid(struct nfs_server *server,
402 nfs4_stateid *stateid,
403 const struct cred *cred)
404 {
405 const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops;
406
407 ops->test_and_free_expired(server, stateid, cred);
408 }
409
__nfs4_free_revoked_stateid(struct nfs_server * server,nfs4_stateid * stateid,const struct cred * cred)410 static void __nfs4_free_revoked_stateid(struct nfs_server *server,
411 nfs4_stateid *stateid,
412 const struct cred *cred)
413 {
414 stateid->type = NFS4_REVOKED_STATEID_TYPE;
415 nfs4_test_and_free_stateid(server, stateid, cred);
416 }
417
nfs4_free_revoked_stateid(struct nfs_server * server,const nfs4_stateid * stateid,const struct cred * cred)418 static void nfs4_free_revoked_stateid(struct nfs_server *server,
419 const nfs4_stateid *stateid,
420 const struct cred *cred)
421 {
422 nfs4_stateid tmp;
423
424 nfs4_stateid_copy(&tmp, stateid);
425 __nfs4_free_revoked_stateid(server, &tmp, cred);
426 }
427
nfs4_update_delay(long * timeout)428 static long nfs4_update_delay(long *timeout)
429 {
430 long ret;
431 if (!timeout)
432 return NFS4_POLL_RETRY_MAX;
433 if (*timeout <= 0)
434 *timeout = NFS4_POLL_RETRY_MIN;
435 if (*timeout > NFS4_POLL_RETRY_MAX)
436 *timeout = NFS4_POLL_RETRY_MAX;
437 ret = *timeout;
438 *timeout <<= 1;
439 return ret;
440 }
441
nfs4_delay_killable(long * timeout)442 static int nfs4_delay_killable(long *timeout)
443 {
444 might_sleep();
445
446 __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
447 schedule_timeout(nfs4_update_delay(timeout));
448 if (!__fatal_signal_pending(current))
449 return 0;
450 return -EINTR;
451 }
452
nfs4_delay_interruptible(long * timeout)453 static int nfs4_delay_interruptible(long *timeout)
454 {
455 might_sleep();
456
457 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE);
458 schedule_timeout(nfs4_update_delay(timeout));
459 if (!signal_pending(current))
460 return 0;
461 return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS;
462 }
463
nfs4_delay(long * timeout,bool interruptible)464 static int nfs4_delay(long *timeout, bool interruptible)
465 {
466 if (interruptible)
467 return nfs4_delay_interruptible(timeout);
468 return nfs4_delay_killable(timeout);
469 }
470
471 static const nfs4_stateid *
nfs4_recoverable_stateid(const nfs4_stateid * stateid)472 nfs4_recoverable_stateid(const nfs4_stateid *stateid)
473 {
474 if (!stateid)
475 return NULL;
476 switch (stateid->type) {
477 case NFS4_OPEN_STATEID_TYPE:
478 case NFS4_LOCK_STATEID_TYPE:
479 case NFS4_DELEGATION_STATEID_TYPE:
480 return stateid;
481 default:
482 break;
483 }
484 return NULL;
485 }
486
487 /* This is the error handling routine for processes that are allowed
488 * to sleep.
489 */
nfs4_do_handle_exception(struct nfs_server * server,int errorcode,struct nfs4_exception * exception)490 static int nfs4_do_handle_exception(struct nfs_server *server,
491 int errorcode, struct nfs4_exception *exception)
492 {
493 struct nfs_client *clp = server->nfs_client;
494 struct nfs4_state *state = exception->state;
495 const nfs4_stateid *stateid;
496 struct inode *inode = exception->inode;
497 int ret = errorcode;
498
499 exception->delay = 0;
500 exception->recovering = 0;
501 exception->retry = 0;
502
503 stateid = nfs4_recoverable_stateid(exception->stateid);
504 if (stateid == NULL && state != NULL)
505 stateid = nfs4_recoverable_stateid(&state->stateid);
506
507 switch(errorcode) {
508 case 0:
509 return 0;
510 case -NFS4ERR_BADHANDLE:
511 case -ESTALE:
512 if (inode != NULL && S_ISREG(inode->i_mode))
513 pnfs_destroy_layout(NFS_I(inode));
514 break;
515 case -NFS4ERR_DELEG_REVOKED:
516 case -NFS4ERR_ADMIN_REVOKED:
517 case -NFS4ERR_EXPIRED:
518 case -NFS4ERR_BAD_STATEID:
519 case -NFS4ERR_PARTNER_NO_AUTH:
520 if (inode != NULL && stateid != NULL) {
521 nfs_inode_find_state_and_recover(inode,
522 stateid);
523 goto wait_on_recovery;
524 }
525 fallthrough;
526 case -NFS4ERR_OPENMODE:
527 if (inode) {
528 int err;
529
530 err = nfs_async_inode_return_delegation(inode,
531 stateid);
532 if (err == 0)
533 goto wait_on_recovery;
534 if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) {
535 exception->retry = 1;
536 break;
537 }
538 }
539 if (state == NULL)
540 break;
541 ret = nfs4_schedule_stateid_recovery(server, state);
542 if (ret < 0)
543 break;
544 goto wait_on_recovery;
545 case -NFS4ERR_STALE_STATEID:
546 case -NFS4ERR_STALE_CLIENTID:
547 nfs4_schedule_lease_recovery(clp);
548 goto wait_on_recovery;
549 case -NFS4ERR_MOVED:
550 ret = nfs4_schedule_migration_recovery(server);
551 if (ret < 0)
552 break;
553 goto wait_on_recovery;
554 case -NFS4ERR_LEASE_MOVED:
555 nfs4_schedule_lease_moved_recovery(clp);
556 goto wait_on_recovery;
557 #if defined(CONFIG_NFS_V4_1)
558 case -NFS4ERR_BADSESSION:
559 case -NFS4ERR_BADSLOT:
560 case -NFS4ERR_BAD_HIGH_SLOT:
561 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
562 case -NFS4ERR_DEADSESSION:
563 case -NFS4ERR_SEQ_FALSE_RETRY:
564 case -NFS4ERR_SEQ_MISORDERED:
565 /* Handled in nfs41_sequence_process() */
566 goto wait_on_recovery;
567 #endif /* defined(CONFIG_NFS_V4_1) */
568 case -NFS4ERR_FILE_OPEN:
569 if (exception->timeout > HZ) {
570 /* We have retried a decent amount, time to
571 * fail
572 */
573 ret = -EBUSY;
574 break;
575 }
576 fallthrough;
577 case -NFS4ERR_DELAY:
578 nfs_inc_server_stats(server, NFSIOS_DELAY);
579 fallthrough;
580 case -NFS4ERR_GRACE:
581 case -NFS4ERR_LAYOUTTRYLATER:
582 case -NFS4ERR_RECALLCONFLICT:
583 case -NFS4ERR_RETURNCONFLICT:
584 exception->delay = 1;
585 return 0;
586
587 case -NFS4ERR_RETRY_UNCACHED_REP:
588 case -NFS4ERR_OLD_STATEID:
589 exception->retry = 1;
590 break;
591 case -NFS4ERR_BADOWNER:
592 /* The following works around a Linux server bug! */
593 case -NFS4ERR_BADNAME:
594 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
595 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
596 exception->retry = 1;
597 printk(KERN_WARNING "NFS: v4 server %s "
598 "does not accept raw "
599 "uid/gids. "
600 "Reenabling the idmapper.\n",
601 server->nfs_client->cl_hostname);
602 }
603 }
604 /* We failed to handle the error */
605 return nfs4_map_errors(ret);
606 wait_on_recovery:
607 exception->recovering = 1;
608 return 0;
609 }
610
611 /*
612 * Track the number of NFS4ERR_DELAY related retransmissions and return
613 * EAGAIN if the 'softerr' mount option is set, and we've exceeded the limit
614 * set by 'nfs_delay_retrans'.
615 */
nfs4_exception_should_retrans(const struct nfs_server * server,struct nfs4_exception * exception)616 static int nfs4_exception_should_retrans(const struct nfs_server *server,
617 struct nfs4_exception *exception)
618 {
619 if (server->flags & NFS_MOUNT_SOFTERR && nfs_delay_retrans >= 0) {
620 if (exception->retrans++ >= (unsigned short)nfs_delay_retrans)
621 return -EAGAIN;
622 }
623 return 0;
624 }
625
626 /* This is the error handling routine for processes that are allowed
627 * to sleep.
628 */
nfs4_handle_exception(struct nfs_server * server,int errorcode,struct nfs4_exception * exception)629 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
630 {
631 struct nfs_client *clp = server->nfs_client;
632 int ret;
633
634 ret = nfs4_do_handle_exception(server, errorcode, exception);
635 if (exception->delay) {
636 int ret2 = nfs4_exception_should_retrans(server, exception);
637 if (ret2 < 0) {
638 exception->retry = 0;
639 return ret2;
640 }
641 ret = nfs4_delay(&exception->timeout,
642 exception->interruptible);
643 goto out_retry;
644 }
645 if (exception->recovering) {
646 if (exception->task_is_privileged)
647 return -EDEADLOCK;
648 ret = nfs4_wait_clnt_recover(clp);
649 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
650 return -EIO;
651 goto out_retry;
652 }
653 return ret;
654 out_retry:
655 if (ret == 0)
656 exception->retry = 1;
657 return ret;
658 }
659
660 static int
nfs4_async_handle_exception(struct rpc_task * task,struct nfs_server * server,int errorcode,struct nfs4_exception * exception)661 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
662 int errorcode, struct nfs4_exception *exception)
663 {
664 struct nfs_client *clp = server->nfs_client;
665 int ret;
666
667 ret = nfs4_do_handle_exception(server, errorcode, exception);
668 if (exception->delay) {
669 int ret2 = nfs4_exception_should_retrans(server, exception);
670 if (ret2 < 0) {
671 exception->retry = 0;
672 return ret2;
673 }
674 rpc_delay(task, nfs4_update_delay(&exception->timeout));
675 goto out_retry;
676 }
677 if (exception->recovering) {
678 if (exception->task_is_privileged)
679 return -EDEADLOCK;
680 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
681 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
682 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
683 goto out_retry;
684 }
685 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
686 ret = -EIO;
687 return ret;
688 out_retry:
689 if (ret == 0) {
690 exception->retry = 1;
691 /*
692 * For NFS4ERR_MOVED, the client transport will need to
693 * be recomputed after migration recovery has completed.
694 */
695 if (errorcode == -NFS4ERR_MOVED)
696 rpc_task_release_transport(task);
697 }
698 return ret;
699 }
700
701 int
nfs4_async_handle_error(struct rpc_task * task,struct nfs_server * server,struct nfs4_state * state,long * timeout)702 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server,
703 struct nfs4_state *state, long *timeout)
704 {
705 struct nfs4_exception exception = {
706 .state = state,
707 };
708
709 if (task->tk_status >= 0)
710 return 0;
711 if (timeout)
712 exception.timeout = *timeout;
713 task->tk_status = nfs4_async_handle_exception(task, server,
714 task->tk_status,
715 &exception);
716 if (exception.delay && timeout)
717 *timeout = exception.timeout;
718 if (exception.retry)
719 return -EAGAIN;
720 return 0;
721 }
722
723 /*
724 * Return 'true' if 'clp' is using an rpc_client that is integrity protected
725 * or 'false' otherwise.
726 */
_nfs4_is_integrity_protected(struct nfs_client * clp)727 static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
728 {
729 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
730 return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P);
731 }
732
do_renew_lease(struct nfs_client * clp,unsigned long timestamp)733 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
734 {
735 spin_lock(&clp->cl_lock);
736 if (time_before(clp->cl_last_renewal,timestamp))
737 clp->cl_last_renewal = timestamp;
738 spin_unlock(&clp->cl_lock);
739 }
740
renew_lease(const struct nfs_server * server,unsigned long timestamp)741 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
742 {
743 struct nfs_client *clp = server->nfs_client;
744
745 if (!nfs4_has_session(clp))
746 do_renew_lease(clp, timestamp);
747 }
748
749 struct nfs4_call_sync_data {
750 const struct nfs_server *seq_server;
751 struct nfs4_sequence_args *seq_args;
752 struct nfs4_sequence_res *seq_res;
753 };
754
nfs4_init_sequence(struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,int cache_reply,int privileged)755 void nfs4_init_sequence(struct nfs4_sequence_args *args,
756 struct nfs4_sequence_res *res, int cache_reply,
757 int privileged)
758 {
759 args->sa_slot = NULL;
760 args->sa_cache_this = cache_reply;
761 args->sa_privileged = privileged;
762
763 res->sr_slot = NULL;
764 }
765
nfs40_sequence_free_slot(struct nfs4_sequence_res * res)766 static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res)
767 {
768 struct nfs4_slot *slot = res->sr_slot;
769 struct nfs4_slot_table *tbl;
770
771 tbl = slot->table;
772 spin_lock(&tbl->slot_tbl_lock);
773 if (!nfs41_wake_and_assign_slot(tbl, slot))
774 nfs4_free_slot(tbl, slot);
775 spin_unlock(&tbl->slot_tbl_lock);
776
777 res->sr_slot = NULL;
778 }
779
nfs40_sequence_done(struct rpc_task * task,struct nfs4_sequence_res * res)780 static int nfs40_sequence_done(struct rpc_task *task,
781 struct nfs4_sequence_res *res)
782 {
783 if (res->sr_slot != NULL)
784 nfs40_sequence_free_slot(res);
785 return 1;
786 }
787
788 #if defined(CONFIG_NFS_V4_1)
789
nfs41_release_slot(struct nfs4_slot * slot)790 static void nfs41_release_slot(struct nfs4_slot *slot)
791 {
792 struct nfs4_session *session;
793 struct nfs4_slot_table *tbl;
794 bool send_new_highest_used_slotid = false;
795
796 if (!slot)
797 return;
798 tbl = slot->table;
799 session = tbl->session;
800
801 /* Bump the slot sequence number */
802 if (slot->seq_done)
803 slot->seq_nr++;
804 slot->seq_done = 0;
805
806 spin_lock(&tbl->slot_tbl_lock);
807 /* Be nice to the server: try to ensure that the last transmitted
808 * value for highest_user_slotid <= target_highest_slotid
809 */
810 if (tbl->highest_used_slotid > tbl->target_highest_slotid)
811 send_new_highest_used_slotid = true;
812
813 if (nfs41_wake_and_assign_slot(tbl, slot)) {
814 send_new_highest_used_slotid = false;
815 goto out_unlock;
816 }
817 nfs4_free_slot(tbl, slot);
818
819 if (tbl->highest_used_slotid != NFS4_NO_SLOT)
820 send_new_highest_used_slotid = false;
821 out_unlock:
822 spin_unlock(&tbl->slot_tbl_lock);
823 if (send_new_highest_used_slotid)
824 nfs41_notify_server(session->clp);
825 if (waitqueue_active(&tbl->slot_waitq))
826 wake_up_all(&tbl->slot_waitq);
827 }
828
nfs41_sequence_free_slot(struct nfs4_sequence_res * res)829 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
830 {
831 nfs41_release_slot(res->sr_slot);
832 res->sr_slot = NULL;
833 }
834
nfs4_slot_sequence_record_sent(struct nfs4_slot * slot,u32 seqnr)835 static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot,
836 u32 seqnr)
837 {
838 if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0)
839 slot->seq_nr_highest_sent = seqnr;
840 }
nfs4_slot_sequence_acked(struct nfs4_slot * slot,u32 seqnr)841 static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, u32 seqnr)
842 {
843 nfs4_slot_sequence_record_sent(slot, seqnr);
844 slot->seq_nr_last_acked = seqnr;
845 }
846
nfs4_probe_sequence(struct nfs_client * client,const struct cred * cred,struct nfs4_slot * slot)847 static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred,
848 struct nfs4_slot *slot)
849 {
850 struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true);
851 if (!IS_ERR(task))
852 rpc_put_task_async(task);
853 }
854
nfs41_sequence_process(struct rpc_task * task,struct nfs4_sequence_res * res)855 static int nfs41_sequence_process(struct rpc_task *task,
856 struct nfs4_sequence_res *res)
857 {
858 struct nfs4_session *session;
859 struct nfs4_slot *slot = res->sr_slot;
860 struct nfs_client *clp;
861 int status;
862 int ret = 1;
863
864 if (slot == NULL)
865 goto out_noaction;
866 /* don't increment the sequence number if the task wasn't sent */
867 if (!RPC_WAS_SENT(task) || slot->seq_done)
868 goto out;
869
870 session = slot->table->session;
871 clp = session->clp;
872
873 trace_nfs4_sequence_done(session, res);
874
875 status = res->sr_status;
876 if (task->tk_status == -NFS4ERR_DEADSESSION)
877 status = -NFS4ERR_DEADSESSION;
878
879 /* Check the SEQUENCE operation status */
880 switch (status) {
881 case 0:
882 /* Mark this sequence number as having been acked */
883 nfs4_slot_sequence_acked(slot, slot->seq_nr);
884 /* Update the slot's sequence and clientid lease timer */
885 slot->seq_done = 1;
886 do_renew_lease(clp, res->sr_timestamp);
887 /* Check sequence flags */
888 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags,
889 !!slot->privileged);
890 nfs41_update_target_slotid(slot->table, slot, res);
891 break;
892 case 1:
893 /*
894 * sr_status remains 1 if an RPC level error occurred.
895 * The server may or may not have processed the sequence
896 * operation..
897 */
898 nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
899 slot->seq_done = 1;
900 goto out;
901 case -NFS4ERR_DELAY:
902 /* The server detected a resend of the RPC call and
903 * returned NFS4ERR_DELAY as per Section 2.10.6.2
904 * of RFC5661.
905 */
906 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
907 __func__,
908 slot->slot_nr,
909 slot->seq_nr);
910 goto out_retry;
911 case -NFS4ERR_RETRY_UNCACHED_REP:
912 case -NFS4ERR_SEQ_FALSE_RETRY:
913 /*
914 * The server thinks we tried to replay a request.
915 * Retry the call after bumping the sequence ID.
916 */
917 nfs4_slot_sequence_acked(slot, slot->seq_nr);
918 goto retry_new_seq;
919 case -NFS4ERR_BADSLOT:
920 /*
921 * The slot id we used was probably retired. Try again
922 * using a different slot id.
923 */
924 if (slot->slot_nr < slot->table->target_highest_slotid)
925 goto session_recover;
926 goto retry_nowait;
927 case -NFS4ERR_SEQ_MISORDERED:
928 nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
929 /*
930 * Were one or more calls using this slot interrupted?
931 * If the server never received the request, then our
932 * transmitted slot sequence number may be too high. However,
933 * if the server did receive the request then it might
934 * accidentally give us a reply with a mismatched operation.
935 * We can sort this out by sending a lone sequence operation
936 * to the server on the same slot.
937 */
938 if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) {
939 slot->seq_nr--;
940 if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) {
941 nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot);
942 res->sr_slot = NULL;
943 }
944 goto retry_nowait;
945 }
946 /*
947 * RFC5661:
948 * A retry might be sent while the original request is
949 * still in progress on the replier. The replier SHOULD
950 * deal with the issue by returning NFS4ERR_DELAY as the
951 * reply to SEQUENCE or CB_SEQUENCE operation, but
952 * implementations MAY return NFS4ERR_SEQ_MISORDERED.
953 *
954 * Restart the search after a delay.
955 */
956 slot->seq_nr = slot->seq_nr_highest_sent;
957 goto out_retry;
958 case -NFS4ERR_BADSESSION:
959 case -NFS4ERR_DEADSESSION:
960 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
961 goto session_recover;
962 default:
963 /* Just update the slot sequence no. */
964 slot->seq_done = 1;
965 }
966 out:
967 /* The session may be reset by one of the error handlers. */
968 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
969 out_noaction:
970 return ret;
971 session_recover:
972 set_bit(NFS4_SLOT_TBL_DRAINING, &session->fc_slot_table.slot_tbl_state);
973 nfs4_schedule_session_recovery(session, status);
974 dprintk("%s ERROR: %d Reset session\n", __func__, status);
975 nfs41_sequence_free_slot(res);
976 goto out;
977 retry_new_seq:
978 ++slot->seq_nr;
979 retry_nowait:
980 if (rpc_restart_call_prepare(task)) {
981 nfs41_sequence_free_slot(res);
982 task->tk_status = 0;
983 ret = 0;
984 }
985 goto out;
986 out_retry:
987 if (!rpc_restart_call(task))
988 goto out;
989 rpc_delay(task, NFS4_POLL_RETRY_MAX);
990 return 0;
991 }
992
nfs41_sequence_done(struct rpc_task * task,struct nfs4_sequence_res * res)993 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
994 {
995 if (!nfs41_sequence_process(task, res))
996 return 0;
997 if (res->sr_slot != NULL)
998 nfs41_sequence_free_slot(res);
999 return 1;
1000
1001 }
1002 EXPORT_SYMBOL_GPL(nfs41_sequence_done);
1003
nfs4_sequence_process(struct rpc_task * task,struct nfs4_sequence_res * res)1004 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
1005 {
1006 if (res->sr_slot == NULL)
1007 return 1;
1008 if (res->sr_slot->table->session != NULL)
1009 return nfs41_sequence_process(task, res);
1010 return nfs40_sequence_done(task, res);
1011 }
1012
nfs4_sequence_free_slot(struct nfs4_sequence_res * res)1013 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
1014 {
1015 if (res->sr_slot != NULL) {
1016 if (res->sr_slot->table->session != NULL)
1017 nfs41_sequence_free_slot(res);
1018 else
1019 nfs40_sequence_free_slot(res);
1020 }
1021 }
1022
nfs4_sequence_done(struct rpc_task * task,struct nfs4_sequence_res * res)1023 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
1024 {
1025 if (res->sr_slot == NULL)
1026 return 1;
1027 if (!res->sr_slot->table->session)
1028 return nfs40_sequence_done(task, res);
1029 return nfs41_sequence_done(task, res);
1030 }
1031 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
1032
nfs41_call_sync_prepare(struct rpc_task * task,void * calldata)1033 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
1034 {
1035 struct nfs4_call_sync_data *data = calldata;
1036
1037 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
1038
1039 nfs4_setup_sequence(data->seq_server->nfs_client,
1040 data->seq_args, data->seq_res, task);
1041 }
1042
nfs41_call_sync_done(struct rpc_task * task,void * calldata)1043 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
1044 {
1045 struct nfs4_call_sync_data *data = calldata;
1046
1047 nfs41_sequence_done(task, data->seq_res);
1048 }
1049
1050 static const struct rpc_call_ops nfs41_call_sync_ops = {
1051 .rpc_call_prepare = nfs41_call_sync_prepare,
1052 .rpc_call_done = nfs41_call_sync_done,
1053 };
1054
1055 #else /* !CONFIG_NFS_V4_1 */
1056
nfs4_sequence_process(struct rpc_task * task,struct nfs4_sequence_res * res)1057 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
1058 {
1059 return nfs40_sequence_done(task, res);
1060 }
1061
nfs4_sequence_free_slot(struct nfs4_sequence_res * res)1062 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
1063 {
1064 if (res->sr_slot != NULL)
1065 nfs40_sequence_free_slot(res);
1066 }
1067
nfs4_sequence_done(struct rpc_task * task,struct nfs4_sequence_res * res)1068 int nfs4_sequence_done(struct rpc_task *task,
1069 struct nfs4_sequence_res *res)
1070 {
1071 return nfs40_sequence_done(task, res);
1072 }
1073 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
1074
1075 #endif /* !CONFIG_NFS_V4_1 */
1076
nfs41_sequence_res_init(struct nfs4_sequence_res * res)1077 static void nfs41_sequence_res_init(struct nfs4_sequence_res *res)
1078 {
1079 res->sr_timestamp = jiffies;
1080 res->sr_status_flags = 0;
1081 res->sr_status = 1;
1082 }
1083
1084 static
nfs4_sequence_attach_slot(struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,struct nfs4_slot * slot)1085 void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
1086 struct nfs4_sequence_res *res,
1087 struct nfs4_slot *slot)
1088 {
1089 if (!slot)
1090 return;
1091 slot->privileged = args->sa_privileged ? 1 : 0;
1092 args->sa_slot = slot;
1093
1094 res->sr_slot = slot;
1095 }
1096
nfs4_setup_sequence(struct nfs_client * client,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,struct rpc_task * task)1097 int nfs4_setup_sequence(struct nfs_client *client,
1098 struct nfs4_sequence_args *args,
1099 struct nfs4_sequence_res *res,
1100 struct rpc_task *task)
1101 {
1102 struct nfs4_session *session = nfs4_get_session(client);
1103 struct nfs4_slot_table *tbl = client->cl_slot_tbl;
1104 struct nfs4_slot *slot;
1105
1106 /* slot already allocated? */
1107 if (res->sr_slot != NULL)
1108 goto out_start;
1109
1110 if (session)
1111 tbl = &session->fc_slot_table;
1112
1113 spin_lock(&tbl->slot_tbl_lock);
1114 /* The state manager will wait until the slot table is empty */
1115 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
1116 goto out_sleep;
1117
1118 slot = nfs4_alloc_slot(tbl);
1119 if (IS_ERR(slot)) {
1120 if (slot == ERR_PTR(-ENOMEM))
1121 goto out_sleep_timeout;
1122 goto out_sleep;
1123 }
1124 spin_unlock(&tbl->slot_tbl_lock);
1125
1126 nfs4_sequence_attach_slot(args, res, slot);
1127
1128 trace_nfs4_setup_sequence(session, args);
1129 out_start:
1130 nfs41_sequence_res_init(res);
1131 rpc_call_start(task);
1132 return 0;
1133 out_sleep_timeout:
1134 /* Try again in 1/4 second */
1135 if (args->sa_privileged)
1136 rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task,
1137 jiffies + (HZ >> 2), RPC_PRIORITY_PRIVILEGED);
1138 else
1139 rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task,
1140 NULL, jiffies + (HZ >> 2));
1141 spin_unlock(&tbl->slot_tbl_lock);
1142 return -EAGAIN;
1143 out_sleep:
1144 if (args->sa_privileged)
1145 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
1146 RPC_PRIORITY_PRIVILEGED);
1147 else
1148 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
1149 spin_unlock(&tbl->slot_tbl_lock);
1150 return -EAGAIN;
1151 }
1152 EXPORT_SYMBOL_GPL(nfs4_setup_sequence);
1153
nfs40_call_sync_prepare(struct rpc_task * task,void * calldata)1154 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
1155 {
1156 struct nfs4_call_sync_data *data = calldata;
1157 nfs4_setup_sequence(data->seq_server->nfs_client,
1158 data->seq_args, data->seq_res, task);
1159 }
1160
nfs40_call_sync_done(struct rpc_task * task,void * calldata)1161 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
1162 {
1163 struct nfs4_call_sync_data *data = calldata;
1164 nfs4_sequence_done(task, data->seq_res);
1165 }
1166
1167 static const struct rpc_call_ops nfs40_call_sync_ops = {
1168 .rpc_call_prepare = nfs40_call_sync_prepare,
1169 .rpc_call_done = nfs40_call_sync_done,
1170 };
1171
nfs4_call_sync_custom(struct rpc_task_setup * task_setup)1172 static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup)
1173 {
1174 int ret;
1175 struct rpc_task *task;
1176
1177 task = rpc_run_task(task_setup);
1178 if (IS_ERR(task))
1179 return PTR_ERR(task);
1180
1181 ret = task->tk_status;
1182 rpc_put_task(task);
1183 return ret;
1184 }
1185
nfs4_do_call_sync(struct rpc_clnt * clnt,struct nfs_server * server,struct rpc_message * msg,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,unsigned short task_flags)1186 static int nfs4_do_call_sync(struct rpc_clnt *clnt,
1187 struct nfs_server *server,
1188 struct rpc_message *msg,
1189 struct nfs4_sequence_args *args,
1190 struct nfs4_sequence_res *res,
1191 unsigned short task_flags)
1192 {
1193 struct nfs_client *clp = server->nfs_client;
1194 struct nfs4_call_sync_data data = {
1195 .seq_server = server,
1196 .seq_args = args,
1197 .seq_res = res,
1198 };
1199 struct rpc_task_setup task_setup = {
1200 .rpc_client = clnt,
1201 .rpc_message = msg,
1202 .callback_ops = clp->cl_mvops->call_sync_ops,
1203 .callback_data = &data,
1204 .flags = task_flags,
1205 };
1206
1207 return nfs4_call_sync_custom(&task_setup);
1208 }
1209
nfs4_call_sync_sequence(struct rpc_clnt * clnt,struct nfs_server * server,struct rpc_message * msg,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res)1210 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
1211 struct nfs_server *server,
1212 struct rpc_message *msg,
1213 struct nfs4_sequence_args *args,
1214 struct nfs4_sequence_res *res)
1215 {
1216 unsigned short task_flags = 0;
1217
1218 if (server->caps & NFS_CAP_MOVEABLE)
1219 task_flags = RPC_TASK_MOVEABLE;
1220 return nfs4_do_call_sync(clnt, server, msg, args, res, task_flags);
1221 }
1222
1223
nfs4_call_sync(struct rpc_clnt * clnt,struct nfs_server * server,struct rpc_message * msg,struct nfs4_sequence_args * args,struct nfs4_sequence_res * res,int cache_reply)1224 int nfs4_call_sync(struct rpc_clnt *clnt,
1225 struct nfs_server *server,
1226 struct rpc_message *msg,
1227 struct nfs4_sequence_args *args,
1228 struct nfs4_sequence_res *res,
1229 int cache_reply)
1230 {
1231 nfs4_init_sequence(args, res, cache_reply, 0);
1232 return nfs4_call_sync_sequence(clnt, server, msg, args, res);
1233 }
1234
1235 static void
nfs4_inc_nlink_locked(struct inode * inode)1236 nfs4_inc_nlink_locked(struct inode *inode)
1237 {
1238 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
1239 NFS_INO_INVALID_CTIME |
1240 NFS_INO_INVALID_NLINK);
1241 inc_nlink(inode);
1242 }
1243
1244 static void
nfs4_inc_nlink(struct inode * inode)1245 nfs4_inc_nlink(struct inode *inode)
1246 {
1247 spin_lock(&inode->i_lock);
1248 nfs4_inc_nlink_locked(inode);
1249 spin_unlock(&inode->i_lock);
1250 }
1251
1252 static void
nfs4_dec_nlink_locked(struct inode * inode)1253 nfs4_dec_nlink_locked(struct inode *inode)
1254 {
1255 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
1256 NFS_INO_INVALID_CTIME |
1257 NFS_INO_INVALID_NLINK);
1258 drop_nlink(inode);
1259 }
1260
1261 static void
nfs4_update_changeattr_locked(struct inode * inode,struct nfs4_change_info * cinfo,unsigned long timestamp,unsigned long cache_validity)1262 nfs4_update_changeattr_locked(struct inode *inode,
1263 struct nfs4_change_info *cinfo,
1264 unsigned long timestamp, unsigned long cache_validity)
1265 {
1266 struct nfs_inode *nfsi = NFS_I(inode);
1267 u64 change_attr = inode_peek_iversion_raw(inode);
1268
1269 if (!nfs_have_delegated_mtime(inode))
1270 cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME;
1271 if (S_ISDIR(inode->i_mode))
1272 cache_validity |= NFS_INO_INVALID_DATA;
1273
1274 switch (NFS_SERVER(inode)->change_attr_type) {
1275 case NFS4_CHANGE_TYPE_IS_UNDEFINED:
1276 if (cinfo->after == change_attr)
1277 goto out;
1278 break;
1279 default:
1280 if ((s64)(change_attr - cinfo->after) >= 0)
1281 goto out;
1282 }
1283
1284 inode_set_iversion_raw(inode, cinfo->after);
1285 if (!cinfo->atomic || cinfo->before != change_attr) {
1286 if (S_ISDIR(inode->i_mode))
1287 nfs_force_lookup_revalidate(inode);
1288
1289 if (!nfs_have_delegated_attributes(inode))
1290 cache_validity |=
1291 NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
1292 NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER |
1293 NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK |
1294 NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR;
1295 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
1296 }
1297 nfsi->attrtimeo_timestamp = jiffies;
1298 nfsi->read_cache_jiffies = timestamp;
1299 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
1300 nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE;
1301 out:
1302 nfs_set_cache_invalid(inode, cache_validity);
1303 }
1304
1305 void
nfs4_update_changeattr(struct inode * dir,struct nfs4_change_info * cinfo,unsigned long timestamp,unsigned long cache_validity)1306 nfs4_update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
1307 unsigned long timestamp, unsigned long cache_validity)
1308 {
1309 spin_lock(&dir->i_lock);
1310 nfs4_update_changeattr_locked(dir, cinfo, timestamp, cache_validity);
1311 spin_unlock(&dir->i_lock);
1312 }
1313
1314 struct nfs4_open_createattrs {
1315 struct nfs4_label *label;
1316 struct iattr *sattr;
1317 const __u32 verf[2];
1318 };
1319
nfs4_clear_cap_atomic_open_v1(struct nfs_server * server,int err,struct nfs4_exception * exception)1320 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
1321 int err, struct nfs4_exception *exception)
1322 {
1323 if (err != -EINVAL)
1324 return false;
1325 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1326 return false;
1327 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
1328 exception->retry = 1;
1329 return true;
1330 }
1331
_nfs4_ctx_to_accessmode(const struct nfs_open_context * ctx)1332 static fmode_t _nfs4_ctx_to_accessmode(const struct nfs_open_context *ctx)
1333 {
1334 return ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
1335 }
1336
_nfs4_ctx_to_openmode(const struct nfs_open_context * ctx)1337 static fmode_t _nfs4_ctx_to_openmode(const struct nfs_open_context *ctx)
1338 {
1339 fmode_t ret = ctx->mode & (FMODE_READ|FMODE_WRITE);
1340
1341 return (ctx->mode & FMODE_EXEC) ? FMODE_READ | ret : ret;
1342 }
1343
1344 static u32
nfs4_fmode_to_share_access(fmode_t fmode)1345 nfs4_fmode_to_share_access(fmode_t fmode)
1346 {
1347 u32 res = 0;
1348
1349 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
1350 case FMODE_READ:
1351 res = NFS4_SHARE_ACCESS_READ;
1352 break;
1353 case FMODE_WRITE:
1354 res = NFS4_SHARE_ACCESS_WRITE;
1355 break;
1356 case FMODE_READ|FMODE_WRITE:
1357 res = NFS4_SHARE_ACCESS_BOTH;
1358 }
1359 return res;
1360 }
1361
1362 static u32
nfs4_map_atomic_open_share(struct nfs_server * server,fmode_t fmode,int openflags)1363 nfs4_map_atomic_open_share(struct nfs_server *server,
1364 fmode_t fmode, int openflags)
1365 {
1366 u32 res = nfs4_fmode_to_share_access(fmode);
1367
1368 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1369 goto out;
1370 /* Want no delegation if we're using O_DIRECT */
1371 if (openflags & O_DIRECT) {
1372 res |= NFS4_SHARE_WANT_NO_DELEG;
1373 goto out;
1374 }
1375 /* res |= NFS4_SHARE_WANT_NO_PREFERENCE; */
1376 if (server->caps & NFS_CAP_DELEGTIME)
1377 res |= NFS4_SHARE_WANT_DELEG_TIMESTAMPS;
1378 if (server->caps & NFS_CAP_OPEN_XOR)
1379 res |= NFS4_SHARE_WANT_OPEN_XOR_DELEGATION;
1380 out:
1381 return res;
1382 }
1383
1384 static enum open_claim_type4
nfs4_map_atomic_open_claim(struct nfs_server * server,enum open_claim_type4 claim)1385 nfs4_map_atomic_open_claim(struct nfs_server *server,
1386 enum open_claim_type4 claim)
1387 {
1388 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
1389 return claim;
1390 switch (claim) {
1391 default:
1392 return claim;
1393 case NFS4_OPEN_CLAIM_FH:
1394 return NFS4_OPEN_CLAIM_NULL;
1395 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1396 return NFS4_OPEN_CLAIM_DELEGATE_CUR;
1397 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1398 return NFS4_OPEN_CLAIM_DELEGATE_PREV;
1399 }
1400 }
1401
nfs4_init_opendata_res(struct nfs4_opendata * p)1402 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
1403 {
1404 p->o_res.f_attr = &p->f_attr;
1405 p->o_res.seqid = p->o_arg.seqid;
1406 p->c_res.seqid = p->c_arg.seqid;
1407 p->o_res.server = p->o_arg.server;
1408 p->o_res.access_request = p->o_arg.access;
1409 nfs_fattr_init(&p->f_attr);
1410 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
1411 }
1412
nfs4_opendata_alloc(struct dentry * dentry,struct nfs4_state_owner * sp,fmode_t fmode,int flags,const struct nfs4_open_createattrs * c,enum open_claim_type4 claim,gfp_t gfp_mask)1413 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1414 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
1415 const struct nfs4_open_createattrs *c,
1416 enum open_claim_type4 claim,
1417 gfp_t gfp_mask)
1418 {
1419 struct dentry *parent = dget_parent(dentry);
1420 struct inode *dir = d_inode(parent);
1421 struct nfs_server *server = NFS_SERVER(dir);
1422 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
1423 struct nfs4_label *label = (c != NULL) ? c->label : NULL;
1424 struct nfs4_opendata *p;
1425
1426 p = kzalloc(sizeof(*p), gfp_mask);
1427 if (p == NULL)
1428 goto err;
1429
1430 p->f_attr.label = nfs4_label_alloc(server, gfp_mask);
1431 if (IS_ERR(p->f_attr.label))
1432 goto err_free_p;
1433
1434 p->a_label = nfs4_label_alloc(server, gfp_mask);
1435 if (IS_ERR(p->a_label))
1436 goto err_free_f;
1437
1438 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1439 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1440 if (IS_ERR(p->o_arg.seqid))
1441 goto err_free_label;
1442 nfs_sb_active(dentry->d_sb);
1443 p->dentry = dget(dentry);
1444 p->dir = parent;
1445 p->owner = sp;
1446 atomic_inc(&sp->so_count);
1447 p->o_arg.open_flags = flags;
1448 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
1449 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1450 p->o_arg.share_access = nfs4_map_atomic_open_share(server,
1451 fmode, flags);
1452 if (flags & O_CREAT) {
1453 p->o_arg.umask = current_umask();
1454 p->o_arg.label = nfs4_label_copy(p->a_label, label);
1455 if (c->sattr != NULL && c->sattr->ia_valid != 0) {
1456 p->o_arg.u.attrs = &p->attrs;
1457 memcpy(&p->attrs, c->sattr, sizeof(p->attrs));
1458
1459 memcpy(p->o_arg.u.verifier.data, c->verf,
1460 sizeof(p->o_arg.u.verifier.data));
1461 }
1462 }
1463 /* ask server to check for all possible rights as results
1464 * are cached */
1465 switch (p->o_arg.claim) {
1466 default:
1467 break;
1468 case NFS4_OPEN_CLAIM_NULL:
1469 case NFS4_OPEN_CLAIM_FH:
1470 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
1471 NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE |
1472 NFS4_ACCESS_EXECUTE |
1473 nfs_access_xattr_mask(server);
1474 }
1475 p->o_arg.clientid = server->nfs_client->cl_clientid;
1476 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
1477 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
1478 p->o_arg.name = &dentry->d_name;
1479 p->o_arg.server = server;
1480 p->o_arg.bitmask = nfs4_bitmask(server, label);
1481 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1482 switch (p->o_arg.claim) {
1483 case NFS4_OPEN_CLAIM_NULL:
1484 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1485 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1486 p->o_arg.fh = NFS_FH(dir);
1487 break;
1488 case NFS4_OPEN_CLAIM_PREVIOUS:
1489 case NFS4_OPEN_CLAIM_FH:
1490 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1491 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1492 p->o_arg.fh = NFS_FH(d_inode(dentry));
1493 }
1494 p->c_arg.fh = &p->o_res.fh;
1495 p->c_arg.stateid = &p->o_res.stateid;
1496 p->c_arg.seqid = p->o_arg.seqid;
1497 nfs4_init_opendata_res(p);
1498 kref_init(&p->kref);
1499 return p;
1500
1501 err_free_label:
1502 nfs4_label_free(p->a_label);
1503 err_free_f:
1504 nfs4_label_free(p->f_attr.label);
1505 err_free_p:
1506 kfree(p);
1507 err:
1508 dput(parent);
1509 return NULL;
1510 }
1511
nfs4_opendata_free(struct kref * kref)1512 static void nfs4_opendata_free(struct kref *kref)
1513 {
1514 struct nfs4_opendata *p = container_of(kref,
1515 struct nfs4_opendata, kref);
1516 struct super_block *sb = p->dentry->d_sb;
1517
1518 nfs4_lgopen_release(p->lgp);
1519 nfs_free_seqid(p->o_arg.seqid);
1520 nfs4_sequence_free_slot(&p->o_res.seq_res);
1521 if (p->state != NULL)
1522 nfs4_put_open_state(p->state);
1523 nfs4_put_state_owner(p->owner);
1524
1525 nfs4_label_free(p->a_label);
1526 nfs4_label_free(p->f_attr.label);
1527
1528 dput(p->dir);
1529 dput(p->dentry);
1530 nfs_sb_deactive(sb);
1531 nfs_fattr_free_names(&p->f_attr);
1532 kfree(p->f_attr.mdsthreshold);
1533 kfree(p);
1534 }
1535
nfs4_opendata_put(struct nfs4_opendata * p)1536 static void nfs4_opendata_put(struct nfs4_opendata *p)
1537 {
1538 if (p != NULL)
1539 kref_put(&p->kref, nfs4_opendata_free);
1540 }
1541
nfs4_mode_match_open_stateid(struct nfs4_state * state,fmode_t fmode)1542 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
1543 fmode_t fmode)
1544 {
1545 switch(fmode & (FMODE_READ|FMODE_WRITE)) {
1546 case FMODE_READ|FMODE_WRITE:
1547 return state->n_rdwr != 0;
1548 case FMODE_WRITE:
1549 return state->n_wronly != 0;
1550 case FMODE_READ:
1551 return state->n_rdonly != 0;
1552 }
1553 WARN_ON_ONCE(1);
1554 return false;
1555 }
1556
can_open_cached(struct nfs4_state * state,fmode_t mode,int open_mode,enum open_claim_type4 claim)1557 static int can_open_cached(struct nfs4_state *state, fmode_t mode,
1558 int open_mode, enum open_claim_type4 claim)
1559 {
1560 int ret = 0;
1561
1562 if (open_mode & (O_EXCL|O_TRUNC))
1563 goto out;
1564 switch (claim) {
1565 case NFS4_OPEN_CLAIM_NULL:
1566 case NFS4_OPEN_CLAIM_FH:
1567 goto out;
1568 default:
1569 break;
1570 }
1571 switch (mode & (FMODE_READ|FMODE_WRITE)) {
1572 case FMODE_READ:
1573 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1574 && state->n_rdonly != 0;
1575 break;
1576 case FMODE_WRITE:
1577 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1578 && state->n_wronly != 0;
1579 break;
1580 case FMODE_READ|FMODE_WRITE:
1581 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1582 && state->n_rdwr != 0;
1583 }
1584 out:
1585 return ret;
1586 }
1587
can_open_delegated(struct nfs_delegation * delegation,fmode_t fmode,enum open_claim_type4 claim)1588 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
1589 enum open_claim_type4 claim)
1590 {
1591 if (delegation == NULL)
1592 return 0;
1593 if ((delegation->type & fmode) != fmode)
1594 return 0;
1595 switch (claim) {
1596 case NFS4_OPEN_CLAIM_NULL:
1597 case NFS4_OPEN_CLAIM_FH:
1598 break;
1599 case NFS4_OPEN_CLAIM_PREVIOUS:
1600 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
1601 break;
1602 fallthrough;
1603 default:
1604 return 0;
1605 }
1606 nfs_mark_delegation_referenced(delegation);
1607 return 1;
1608 }
1609
update_open_stateflags(struct nfs4_state * state,fmode_t fmode)1610 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1611 {
1612 switch (fmode) {
1613 case FMODE_WRITE:
1614 state->n_wronly++;
1615 break;
1616 case FMODE_READ:
1617 state->n_rdonly++;
1618 break;
1619 case FMODE_READ|FMODE_WRITE:
1620 state->n_rdwr++;
1621 }
1622 nfs4_state_set_mode_locked(state, state->state | fmode);
1623 }
1624
1625 #ifdef CONFIG_NFS_V4_1
nfs_open_stateid_recover_openmode(struct nfs4_state * state)1626 static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state)
1627 {
1628 if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags))
1629 return true;
1630 if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags))
1631 return true;
1632 if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags))
1633 return true;
1634 return false;
1635 }
1636 #endif /* CONFIG_NFS_V4_1 */
1637
nfs_state_log_update_open_stateid(struct nfs4_state * state)1638 static void nfs_state_log_update_open_stateid(struct nfs4_state *state)
1639 {
1640 if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags))
1641 wake_up_all(&state->waitq);
1642 }
1643
nfs_test_and_clear_all_open_stateid(struct nfs4_state * state)1644 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
1645 {
1646 struct nfs_client *clp = state->owner->so_server->nfs_client;
1647 bool need_recover = false;
1648
1649 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
1650 need_recover = true;
1651 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
1652 need_recover = true;
1653 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
1654 need_recover = true;
1655 if (need_recover)
1656 nfs4_state_mark_reclaim_nograce(clp, state);
1657 }
1658
1659 /*
1660 * Check for whether or not the caller may update the open stateid
1661 * to the value passed in by stateid.
1662 *
1663 * Note: This function relies heavily on the server implementing
1664 * RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2
1665 * correctly.
1666 * i.e. The stateid seqids have to be initialised to 1, and
1667 * are then incremented on every state transition.
1668 */
nfs_stateid_is_sequential(struct nfs4_state * state,const nfs4_stateid * stateid)1669 static bool nfs_stateid_is_sequential(struct nfs4_state *state,
1670 const nfs4_stateid *stateid)
1671 {
1672 if (test_bit(NFS_OPEN_STATE, &state->flags)) {
1673 /* The common case - we're updating to a new sequence number */
1674 if (nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1675 if (nfs4_stateid_is_next(&state->open_stateid, stateid))
1676 return true;
1677 return false;
1678 }
1679 /* The server returned a new stateid */
1680 }
1681 /* This is the first OPEN in this generation */
1682 if (stateid->seqid == cpu_to_be32(1))
1683 return true;
1684 return false;
1685 }
1686
nfs_resync_open_stateid_locked(struct nfs4_state * state)1687 static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1688 {
1689 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1690 return;
1691 if (state->n_wronly)
1692 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1693 if (state->n_rdonly)
1694 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1695 if (state->n_rdwr)
1696 set_bit(NFS_O_RDWR_STATE, &state->flags);
1697 set_bit(NFS_OPEN_STATE, &state->flags);
1698 }
1699
nfs_clear_open_stateid_locked(struct nfs4_state * state,nfs4_stateid * stateid,fmode_t fmode)1700 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1701 nfs4_stateid *stateid, fmode_t fmode)
1702 {
1703 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1704 switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1705 case FMODE_WRITE:
1706 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1707 break;
1708 case FMODE_READ:
1709 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1710 break;
1711 case 0:
1712 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1713 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1714 clear_bit(NFS_OPEN_STATE, &state->flags);
1715 }
1716 if (stateid == NULL)
1717 return;
1718 /* Handle OPEN+OPEN_DOWNGRADE races */
1719 if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
1720 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
1721 nfs_resync_open_stateid_locked(state);
1722 goto out;
1723 }
1724 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1725 nfs4_stateid_copy(&state->stateid, stateid);
1726 nfs4_stateid_copy(&state->open_stateid, stateid);
1727 trace_nfs4_open_stateid_update(state->inode, stateid, 0);
1728 out:
1729 nfs_state_log_update_open_stateid(state);
1730 }
1731
nfs_clear_open_stateid(struct nfs4_state * state,nfs4_stateid * arg_stateid,nfs4_stateid * stateid,fmode_t fmode)1732 static void nfs_clear_open_stateid(struct nfs4_state *state,
1733 nfs4_stateid *arg_stateid,
1734 nfs4_stateid *stateid, fmode_t fmode)
1735 {
1736 write_seqlock(&state->seqlock);
1737 /* Ignore, if the CLOSE argment doesn't match the current stateid */
1738 if (nfs4_state_match_open_stateid_other(state, arg_stateid))
1739 nfs_clear_open_stateid_locked(state, stateid, fmode);
1740 write_sequnlock(&state->seqlock);
1741 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1742 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1743 }
1744
nfs_set_open_stateid_locked(struct nfs4_state * state,const nfs4_stateid * stateid,nfs4_stateid * freeme)1745 static void nfs_set_open_stateid_locked(struct nfs4_state *state,
1746 const nfs4_stateid *stateid, nfs4_stateid *freeme)
1747 __must_hold(&state->owner->so_lock)
1748 __must_hold(&state->seqlock)
1749 __must_hold(RCU)
1750
1751 {
1752 DEFINE_WAIT(wait);
1753 int status = 0;
1754 for (;;) {
1755
1756 if (nfs_stateid_is_sequential(state, stateid))
1757 break;
1758
1759 if (status)
1760 break;
1761 /* Rely on seqids for serialisation with NFSv4.0 */
1762 if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client))
1763 break;
1764
1765 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
1766 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
1767 /*
1768 * Ensure we process the state changes in the same order
1769 * in which the server processed them by delaying the
1770 * update of the stateid until we are in sequence.
1771 */
1772 write_sequnlock(&state->seqlock);
1773 spin_unlock(&state->owner->so_lock);
1774 rcu_read_unlock();
1775 trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
1776
1777 if (!fatal_signal_pending(current)) {
1778 if (schedule_timeout(5*HZ) == 0)
1779 status = -EAGAIN;
1780 else
1781 status = 0;
1782 } else
1783 status = -EINTR;
1784 finish_wait(&state->waitq, &wait);
1785 rcu_read_lock();
1786 spin_lock(&state->owner->so_lock);
1787 write_seqlock(&state->seqlock);
1788 }
1789
1790 if (test_bit(NFS_OPEN_STATE, &state->flags) &&
1791 !nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1792 nfs4_stateid_copy(freeme, &state->open_stateid);
1793 nfs_test_and_clear_all_open_stateid(state);
1794 }
1795
1796 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1797 nfs4_stateid_copy(&state->stateid, stateid);
1798 nfs4_stateid_copy(&state->open_stateid, stateid);
1799 trace_nfs4_open_stateid_update(state->inode, stateid, status);
1800 nfs_state_log_update_open_stateid(state);
1801 }
1802
nfs_state_set_open_stateid(struct nfs4_state * state,const nfs4_stateid * open_stateid,fmode_t fmode,nfs4_stateid * freeme)1803 static void nfs_state_set_open_stateid(struct nfs4_state *state,
1804 const nfs4_stateid *open_stateid,
1805 fmode_t fmode,
1806 nfs4_stateid *freeme)
1807 {
1808 /*
1809 * Protect the call to nfs4_state_set_mode_locked and
1810 * serialise the stateid update
1811 */
1812 write_seqlock(&state->seqlock);
1813 nfs_set_open_stateid_locked(state, open_stateid, freeme);
1814 switch (fmode) {
1815 case FMODE_READ:
1816 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1817 break;
1818 case FMODE_WRITE:
1819 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1820 break;
1821 case FMODE_READ|FMODE_WRITE:
1822 set_bit(NFS_O_RDWR_STATE, &state->flags);
1823 }
1824 set_bit(NFS_OPEN_STATE, &state->flags);
1825 write_sequnlock(&state->seqlock);
1826 }
1827
nfs_state_clear_open_state_flags(struct nfs4_state * state)1828 static void nfs_state_clear_open_state_flags(struct nfs4_state *state)
1829 {
1830 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1831 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1832 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1833 clear_bit(NFS_OPEN_STATE, &state->flags);
1834 }
1835
nfs_state_set_delegation(struct nfs4_state * state,const nfs4_stateid * deleg_stateid,fmode_t fmode)1836 static void nfs_state_set_delegation(struct nfs4_state *state,
1837 const nfs4_stateid *deleg_stateid,
1838 fmode_t fmode)
1839 {
1840 /*
1841 * Protect the call to nfs4_state_set_mode_locked and
1842 * serialise the stateid update
1843 */
1844 write_seqlock(&state->seqlock);
1845 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1846 set_bit(NFS_DELEGATED_STATE, &state->flags);
1847 write_sequnlock(&state->seqlock);
1848 }
1849
nfs_state_clear_delegation(struct nfs4_state * state)1850 static void nfs_state_clear_delegation(struct nfs4_state *state)
1851 {
1852 write_seqlock(&state->seqlock);
1853 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1854 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1855 write_sequnlock(&state->seqlock);
1856 }
1857
update_open_stateid(struct nfs4_state * state,const nfs4_stateid * open_stateid,const nfs4_stateid * delegation,fmode_t fmode)1858 int update_open_stateid(struct nfs4_state *state,
1859 const nfs4_stateid *open_stateid,
1860 const nfs4_stateid *delegation,
1861 fmode_t fmode)
1862 {
1863 struct nfs_server *server = NFS_SERVER(state->inode);
1864 struct nfs_client *clp = server->nfs_client;
1865 struct nfs_inode *nfsi = NFS_I(state->inode);
1866 struct nfs_delegation *deleg_cur;
1867 nfs4_stateid freeme = { };
1868 int ret = 0;
1869
1870 fmode &= (FMODE_READ|FMODE_WRITE);
1871
1872 rcu_read_lock();
1873 spin_lock(&state->owner->so_lock);
1874 if (open_stateid != NULL) {
1875 nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme);
1876 ret = 1;
1877 }
1878
1879 deleg_cur = nfs4_get_valid_delegation(state->inode);
1880 if (deleg_cur == NULL)
1881 goto no_delegation;
1882
1883 spin_lock(&deleg_cur->lock);
1884 if (rcu_dereference(nfsi->delegation) != deleg_cur ||
1885 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1886 (deleg_cur->type & fmode) != fmode)
1887 goto no_delegation_unlock;
1888
1889 if (delegation == NULL)
1890 delegation = &deleg_cur->stateid;
1891 else if (!nfs4_stateid_match_other(&deleg_cur->stateid, delegation))
1892 goto no_delegation_unlock;
1893
1894 nfs_mark_delegation_referenced(deleg_cur);
1895 nfs_state_set_delegation(state, &deleg_cur->stateid, fmode);
1896 ret = 1;
1897 no_delegation_unlock:
1898 spin_unlock(&deleg_cur->lock);
1899 no_delegation:
1900 if (ret)
1901 update_open_stateflags(state, fmode);
1902 spin_unlock(&state->owner->so_lock);
1903 rcu_read_unlock();
1904
1905 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1906 nfs4_schedule_state_manager(clp);
1907 if (freeme.type != 0)
1908 nfs4_test_and_free_stateid(server, &freeme,
1909 state->owner->so_cred);
1910
1911 return ret;
1912 }
1913
nfs4_update_lock_stateid(struct nfs4_lock_state * lsp,const nfs4_stateid * stateid)1914 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
1915 const nfs4_stateid *stateid)
1916 {
1917 struct nfs4_state *state = lsp->ls_state;
1918 bool ret = false;
1919
1920 spin_lock(&state->state_lock);
1921 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
1922 goto out_noupdate;
1923 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
1924 goto out_noupdate;
1925 nfs4_stateid_copy(&lsp->ls_stateid, stateid);
1926 ret = true;
1927 out_noupdate:
1928 spin_unlock(&state->state_lock);
1929 return ret;
1930 }
1931
nfs4_return_incompatible_delegation(struct inode * inode,fmode_t fmode)1932 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1933 {
1934 struct nfs_delegation *delegation;
1935
1936 fmode &= FMODE_READ|FMODE_WRITE;
1937 rcu_read_lock();
1938 delegation = nfs4_get_valid_delegation(inode);
1939 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1940 rcu_read_unlock();
1941 return;
1942 }
1943 rcu_read_unlock();
1944 nfs4_inode_return_delegation(inode);
1945 }
1946
nfs4_try_open_cached(struct nfs4_opendata * opendata)1947 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1948 {
1949 struct nfs4_state *state = opendata->state;
1950 struct nfs_delegation *delegation;
1951 int open_mode = opendata->o_arg.open_flags;
1952 fmode_t fmode = opendata->o_arg.fmode;
1953 enum open_claim_type4 claim = opendata->o_arg.claim;
1954 nfs4_stateid stateid;
1955 int ret = -EAGAIN;
1956
1957 for (;;) {
1958 spin_lock(&state->owner->so_lock);
1959 if (can_open_cached(state, fmode, open_mode, claim)) {
1960 update_open_stateflags(state, fmode);
1961 spin_unlock(&state->owner->so_lock);
1962 goto out_return_state;
1963 }
1964 spin_unlock(&state->owner->so_lock);
1965 rcu_read_lock();
1966 delegation = nfs4_get_valid_delegation(state->inode);
1967 if (!can_open_delegated(delegation, fmode, claim)) {
1968 rcu_read_unlock();
1969 break;
1970 }
1971 /* Save the delegation */
1972 nfs4_stateid_copy(&stateid, &delegation->stateid);
1973 rcu_read_unlock();
1974 nfs_release_seqid(opendata->o_arg.seqid);
1975 if (!opendata->is_recover) {
1976 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1977 if (ret != 0)
1978 goto out;
1979 }
1980 ret = -EAGAIN;
1981
1982 /* Try to update the stateid using the delegation */
1983 if (update_open_stateid(state, NULL, &stateid, fmode))
1984 goto out_return_state;
1985 }
1986 out:
1987 return ERR_PTR(ret);
1988 out_return_state:
1989 refcount_inc(&state->count);
1990 return state;
1991 }
1992
1993 static void
nfs4_process_delegation(struct inode * inode,const struct cred * cred,enum open_claim_type4 claim,const struct nfs4_open_delegation * delegation)1994 nfs4_process_delegation(struct inode *inode, const struct cred *cred,
1995 enum open_claim_type4 claim,
1996 const struct nfs4_open_delegation *delegation)
1997 {
1998 switch (delegation->open_delegation_type) {
1999 case NFS4_OPEN_DELEGATE_READ:
2000 case NFS4_OPEN_DELEGATE_WRITE:
2001 case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG:
2002 case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG:
2003 break;
2004 default:
2005 return;
2006 }
2007 switch (claim) {
2008 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
2009 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
2010 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
2011 "returning a delegation for "
2012 "OPEN(CLAIM_DELEGATE_CUR)\n",
2013 NFS_SERVER(inode)->nfs_client->cl_hostname);
2014 break;
2015 case NFS4_OPEN_CLAIM_PREVIOUS:
2016 nfs_inode_reclaim_delegation(inode, cred, delegation->type,
2017 &delegation->stateid,
2018 delegation->pagemod_limit,
2019 delegation->open_delegation_type);
2020 break;
2021 default:
2022 nfs_inode_set_delegation(inode, cred, delegation->type,
2023 &delegation->stateid,
2024 delegation->pagemod_limit,
2025 delegation->open_delegation_type);
2026 }
2027 if (delegation->do_recall)
2028 nfs_async_inode_return_delegation(inode, &delegation->stateid);
2029 }
2030
2031 /*
2032 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
2033 * and update the nfs4_state.
2034 */
2035 static struct nfs4_state *
_nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata * data)2036 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
2037 {
2038 struct inode *inode = data->state->inode;
2039 struct nfs4_state *state = data->state;
2040 int ret;
2041
2042 if (!data->rpc_done) {
2043 if (data->rpc_status)
2044 return ERR_PTR(data->rpc_status);
2045 return nfs4_try_open_cached(data);
2046 }
2047
2048 ret = nfs_refresh_inode(inode, &data->f_attr);
2049 if (ret)
2050 return ERR_PTR(ret);
2051
2052 nfs4_process_delegation(state->inode,
2053 data->owner->so_cred,
2054 data->o_arg.claim,
2055 &data->o_res.delegation);
2056
2057 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) {
2058 if (!update_open_stateid(state, &data->o_res.stateid,
2059 NULL, data->o_arg.fmode))
2060 return ERR_PTR(-EAGAIN);
2061 } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode))
2062 return ERR_PTR(-EAGAIN);
2063 refcount_inc(&state->count);
2064
2065 return state;
2066 }
2067
2068 static struct inode *
nfs4_opendata_get_inode(struct nfs4_opendata * data)2069 nfs4_opendata_get_inode(struct nfs4_opendata *data)
2070 {
2071 struct inode *inode;
2072
2073 switch (data->o_arg.claim) {
2074 case NFS4_OPEN_CLAIM_NULL:
2075 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
2076 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
2077 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
2078 return ERR_PTR(-EAGAIN);
2079 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh,
2080 &data->f_attr);
2081 break;
2082 default:
2083 inode = d_inode(data->dentry);
2084 ihold(inode);
2085 nfs_refresh_inode(inode, &data->f_attr);
2086 }
2087 return inode;
2088 }
2089
2090 static struct nfs4_state *
nfs4_opendata_find_nfs4_state(struct nfs4_opendata * data)2091 nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data)
2092 {
2093 struct nfs4_state *state;
2094 struct inode *inode;
2095
2096 inode = nfs4_opendata_get_inode(data);
2097 if (IS_ERR(inode))
2098 return ERR_CAST(inode);
2099 if (data->state != NULL && data->state->inode == inode) {
2100 state = data->state;
2101 refcount_inc(&state->count);
2102 } else
2103 state = nfs4_get_open_state(inode, data->owner);
2104 iput(inode);
2105 if (state == NULL)
2106 state = ERR_PTR(-ENOMEM);
2107 return state;
2108 }
2109
2110 static struct nfs4_state *
_nfs4_opendata_to_nfs4_state(struct nfs4_opendata * data)2111 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
2112 {
2113 struct nfs4_state *state;
2114
2115 if (!data->rpc_done) {
2116 state = nfs4_try_open_cached(data);
2117 trace_nfs4_cached_open(data->state);
2118 goto out;
2119 }
2120
2121 state = nfs4_opendata_find_nfs4_state(data);
2122 if (IS_ERR(state))
2123 goto out;
2124
2125 nfs4_process_delegation(state->inode,
2126 data->owner->so_cred,
2127 data->o_arg.claim,
2128 &data->o_res.delegation);
2129
2130 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) {
2131 if (!update_open_stateid(state, &data->o_res.stateid,
2132 NULL, data->o_arg.fmode)) {
2133 nfs4_put_open_state(state);
2134 state = ERR_PTR(-EAGAIN);
2135 }
2136 } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode)) {
2137 nfs4_put_open_state(state);
2138 state = ERR_PTR(-EAGAIN);
2139 }
2140 out:
2141 nfs_release_seqid(data->o_arg.seqid);
2142 return state;
2143 }
2144
2145 static struct nfs4_state *
nfs4_opendata_to_nfs4_state(struct nfs4_opendata * data)2146 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
2147 {
2148 struct nfs4_state *ret;
2149
2150 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
2151 ret =_nfs4_opendata_reclaim_to_nfs4_state(data);
2152 else
2153 ret = _nfs4_opendata_to_nfs4_state(data);
2154 nfs4_sequence_free_slot(&data->o_res.seq_res);
2155 return ret;
2156 }
2157
2158 static struct nfs_open_context *
nfs4_state_find_open_context_mode(struct nfs4_state * state,fmode_t mode)2159 nfs4_state_find_open_context_mode(struct nfs4_state *state, fmode_t mode)
2160 {
2161 struct nfs_inode *nfsi = NFS_I(state->inode);
2162 struct nfs_open_context *ctx;
2163
2164 rcu_read_lock();
2165 list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
2166 if (ctx->state != state)
2167 continue;
2168 if ((ctx->mode & mode) != mode)
2169 continue;
2170 if (!get_nfs_open_context(ctx))
2171 continue;
2172 rcu_read_unlock();
2173 return ctx;
2174 }
2175 rcu_read_unlock();
2176 return ERR_PTR(-ENOENT);
2177 }
2178
2179 static struct nfs_open_context *
nfs4_state_find_open_context(struct nfs4_state * state)2180 nfs4_state_find_open_context(struct nfs4_state *state)
2181 {
2182 struct nfs_open_context *ctx;
2183
2184 ctx = nfs4_state_find_open_context_mode(state, FMODE_READ|FMODE_WRITE);
2185 if (!IS_ERR(ctx))
2186 return ctx;
2187 ctx = nfs4_state_find_open_context_mode(state, FMODE_WRITE);
2188 if (!IS_ERR(ctx))
2189 return ctx;
2190 return nfs4_state_find_open_context_mode(state, FMODE_READ);
2191 }
2192
nfs4_open_recoverdata_alloc(struct nfs_open_context * ctx,struct nfs4_state * state,enum open_claim_type4 claim)2193 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
2194 struct nfs4_state *state, enum open_claim_type4 claim)
2195 {
2196 struct nfs4_opendata *opendata;
2197
2198 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
2199 NULL, claim, GFP_NOFS);
2200 if (opendata == NULL)
2201 return ERR_PTR(-ENOMEM);
2202 opendata->state = state;
2203 refcount_inc(&state->count);
2204 return opendata;
2205 }
2206
nfs4_open_recover_helper(struct nfs4_opendata * opendata,fmode_t fmode)2207 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
2208 fmode_t fmode)
2209 {
2210 struct nfs4_state *newstate;
2211 struct nfs_server *server = NFS_SB(opendata->dentry->d_sb);
2212 int openflags = opendata->o_arg.open_flags;
2213 int ret;
2214
2215 if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
2216 return 0;
2217 opendata->o_arg.fmode = fmode;
2218 opendata->o_arg.share_access =
2219 nfs4_map_atomic_open_share(server, fmode, openflags);
2220 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
2221 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
2222 nfs4_init_opendata_res(opendata);
2223 ret = _nfs4_recover_proc_open(opendata);
2224 if (ret != 0)
2225 return ret;
2226 newstate = nfs4_opendata_to_nfs4_state(opendata);
2227 if (IS_ERR(newstate))
2228 return PTR_ERR(newstate);
2229 if (newstate != opendata->state)
2230 ret = -ESTALE;
2231 nfs4_close_state(newstate, fmode);
2232 return ret;
2233 }
2234
nfs4_open_recover(struct nfs4_opendata * opendata,struct nfs4_state * state)2235 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
2236 {
2237 int ret;
2238
2239 /* memory barrier prior to reading state->n_* */
2240 smp_rmb();
2241 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
2242 if (ret != 0)
2243 return ret;
2244 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
2245 if (ret != 0)
2246 return ret;
2247 ret = nfs4_open_recover_helper(opendata, FMODE_READ);
2248 if (ret != 0)
2249 return ret;
2250 /*
2251 * We may have performed cached opens for all three recoveries.
2252 * Check if we need to update the current stateid.
2253 */
2254 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
2255 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
2256 write_seqlock(&state->seqlock);
2257 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
2258 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2259 write_sequnlock(&state->seqlock);
2260 }
2261 return 0;
2262 }
2263
2264 /*
2265 * OPEN_RECLAIM:
2266 * reclaim state on the server after a reboot.
2267 */
_nfs4_do_open_reclaim(struct nfs_open_context * ctx,struct nfs4_state * state)2268 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
2269 {
2270 struct nfs_delegation *delegation;
2271 struct nfs4_opendata *opendata;
2272 u32 delegation_type = NFS4_OPEN_DELEGATE_NONE;
2273 int status;
2274
2275 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2276 NFS4_OPEN_CLAIM_PREVIOUS);
2277 if (IS_ERR(opendata))
2278 return PTR_ERR(opendata);
2279 rcu_read_lock();
2280 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2281 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) {
2282 switch(delegation->type) {
2283 case FMODE_READ:
2284 delegation_type = NFS4_OPEN_DELEGATE_READ;
2285 if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags))
2286 delegation_type = NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG;
2287 break;
2288 case FMODE_WRITE:
2289 case FMODE_READ|FMODE_WRITE:
2290 delegation_type = NFS4_OPEN_DELEGATE_WRITE;
2291 if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags))
2292 delegation_type = NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG;
2293 }
2294 }
2295 rcu_read_unlock();
2296 opendata->o_arg.u.delegation_type = delegation_type;
2297 status = nfs4_open_recover(opendata, state);
2298 nfs4_opendata_put(opendata);
2299 return status;
2300 }
2301
nfs4_do_open_reclaim(struct nfs_open_context * ctx,struct nfs4_state * state)2302 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
2303 {
2304 struct nfs_server *server = NFS_SERVER(state->inode);
2305 struct nfs4_exception exception = { };
2306 int err;
2307 do {
2308 err = _nfs4_do_open_reclaim(ctx, state);
2309 trace_nfs4_open_reclaim(ctx, 0, err);
2310 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2311 continue;
2312 if (err != -NFS4ERR_DELAY)
2313 break;
2314 nfs4_handle_exception(server, err, &exception);
2315 } while (exception.retry);
2316 return err;
2317 }
2318
nfs4_open_reclaim(struct nfs4_state_owner * sp,struct nfs4_state * state)2319 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
2320 {
2321 struct nfs_open_context *ctx;
2322 int ret;
2323
2324 ctx = nfs4_state_find_open_context(state);
2325 if (IS_ERR(ctx))
2326 return -EAGAIN;
2327 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2328 nfs_state_clear_open_state_flags(state);
2329 ret = nfs4_do_open_reclaim(ctx, state);
2330 put_nfs_open_context(ctx);
2331 return ret;
2332 }
2333
nfs4_handle_delegation_recall_error(struct nfs_server * server,struct nfs4_state * state,const nfs4_stateid * stateid,struct file_lock * fl,int err)2334 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err)
2335 {
2336 switch (err) {
2337 default:
2338 printk(KERN_ERR "NFS: %s: unhandled error "
2339 "%d.\n", __func__, err);
2340 fallthrough;
2341 case 0:
2342 case -ENOENT:
2343 case -EAGAIN:
2344 case -ESTALE:
2345 case -ETIMEDOUT:
2346 break;
2347 case -NFS4ERR_BADSESSION:
2348 case -NFS4ERR_BADSLOT:
2349 case -NFS4ERR_BAD_HIGH_SLOT:
2350 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
2351 case -NFS4ERR_DEADSESSION:
2352 return -EAGAIN;
2353 case -NFS4ERR_STALE_CLIENTID:
2354 case -NFS4ERR_STALE_STATEID:
2355 /* Don't recall a delegation if it was lost */
2356 nfs4_schedule_lease_recovery(server->nfs_client);
2357 return -EAGAIN;
2358 case -NFS4ERR_MOVED:
2359 nfs4_schedule_migration_recovery(server);
2360 return -EAGAIN;
2361 case -NFS4ERR_LEASE_MOVED:
2362 nfs4_schedule_lease_moved_recovery(server->nfs_client);
2363 return -EAGAIN;
2364 case -NFS4ERR_DELEG_REVOKED:
2365 case -NFS4ERR_ADMIN_REVOKED:
2366 case -NFS4ERR_EXPIRED:
2367 case -NFS4ERR_BAD_STATEID:
2368 case -NFS4ERR_OPENMODE:
2369 nfs_inode_find_state_and_recover(state->inode,
2370 stateid);
2371 nfs4_schedule_stateid_recovery(server, state);
2372 return -EAGAIN;
2373 case -NFS4ERR_DELAY:
2374 case -NFS4ERR_GRACE:
2375 ssleep(1);
2376 return -EAGAIN;
2377 case -ENOMEM:
2378 case -NFS4ERR_DENIED:
2379 if (fl) {
2380 struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner;
2381 if (lsp)
2382 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
2383 }
2384 return 0;
2385 }
2386 return err;
2387 }
2388
nfs4_open_delegation_recall(struct nfs_open_context * ctx,struct nfs4_state * state,const nfs4_stateid * stateid)2389 int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
2390 struct nfs4_state *state, const nfs4_stateid *stateid)
2391 {
2392 struct nfs_server *server = NFS_SERVER(state->inode);
2393 struct nfs4_opendata *opendata;
2394 int err = 0;
2395
2396 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2397 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
2398 if (IS_ERR(opendata))
2399 return PTR_ERR(opendata);
2400 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
2401 if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) {
2402 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
2403 if (err)
2404 goto out;
2405 }
2406 if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) {
2407 err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
2408 if (err)
2409 goto out;
2410 }
2411 if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) {
2412 err = nfs4_open_recover_helper(opendata, FMODE_READ);
2413 if (err)
2414 goto out;
2415 }
2416 nfs_state_clear_delegation(state);
2417 out:
2418 nfs4_opendata_put(opendata);
2419 return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
2420 }
2421
nfs4_open_confirm_prepare(struct rpc_task * task,void * calldata)2422 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
2423 {
2424 struct nfs4_opendata *data = calldata;
2425
2426 nfs4_setup_sequence(data->o_arg.server->nfs_client,
2427 &data->c_arg.seq_args, &data->c_res.seq_res, task);
2428 }
2429
nfs4_open_confirm_done(struct rpc_task * task,void * calldata)2430 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
2431 {
2432 struct nfs4_opendata *data = calldata;
2433
2434 nfs40_sequence_done(task, &data->c_res.seq_res);
2435
2436 data->rpc_status = task->tk_status;
2437 if (data->rpc_status == 0) {
2438 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
2439 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2440 renew_lease(data->o_res.server, data->timestamp);
2441 data->rpc_done = true;
2442 }
2443 }
2444
nfs4_open_confirm_release(void * calldata)2445 static void nfs4_open_confirm_release(void *calldata)
2446 {
2447 struct nfs4_opendata *data = calldata;
2448 struct nfs4_state *state = NULL;
2449
2450 /* If this request hasn't been cancelled, do nothing */
2451 if (!data->cancelled)
2452 goto out_free;
2453 /* In case of error, no cleanup! */
2454 if (!data->rpc_done)
2455 goto out_free;
2456 state = nfs4_opendata_to_nfs4_state(data);
2457 if (!IS_ERR(state))
2458 nfs4_close_state(state, data->o_arg.fmode);
2459 out_free:
2460 nfs4_opendata_put(data);
2461 }
2462
2463 static const struct rpc_call_ops nfs4_open_confirm_ops = {
2464 .rpc_call_prepare = nfs4_open_confirm_prepare,
2465 .rpc_call_done = nfs4_open_confirm_done,
2466 .rpc_release = nfs4_open_confirm_release,
2467 };
2468
2469 /*
2470 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
2471 */
_nfs4_proc_open_confirm(struct nfs4_opendata * data)2472 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
2473 {
2474 struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
2475 struct rpc_task *task;
2476 struct rpc_message msg = {
2477 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
2478 .rpc_argp = &data->c_arg,
2479 .rpc_resp = &data->c_res,
2480 .rpc_cred = data->owner->so_cred,
2481 };
2482 struct rpc_task_setup task_setup_data = {
2483 .rpc_client = server->client,
2484 .rpc_message = &msg,
2485 .callback_ops = &nfs4_open_confirm_ops,
2486 .callback_data = data,
2487 .workqueue = nfsiod_workqueue,
2488 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
2489 };
2490 int status;
2491
2492 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1,
2493 data->is_recover);
2494 kref_get(&data->kref);
2495 data->rpc_done = false;
2496 data->rpc_status = 0;
2497 data->timestamp = jiffies;
2498 task = rpc_run_task(&task_setup_data);
2499 if (IS_ERR(task))
2500 return PTR_ERR(task);
2501 status = rpc_wait_for_completion_task(task);
2502 if (status != 0) {
2503 data->cancelled = true;
2504 smp_wmb();
2505 } else
2506 status = data->rpc_status;
2507 rpc_put_task(task);
2508 return status;
2509 }
2510
nfs4_open_prepare(struct rpc_task * task,void * calldata)2511 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
2512 {
2513 struct nfs4_opendata *data = calldata;
2514 struct nfs4_state_owner *sp = data->owner;
2515 struct nfs_client *clp = sp->so_server->nfs_client;
2516 enum open_claim_type4 claim = data->o_arg.claim;
2517
2518 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
2519 goto out_wait;
2520 /*
2521 * Check if we still need to send an OPEN call, or if we can use
2522 * a delegation instead.
2523 */
2524 if (data->state != NULL) {
2525 struct nfs_delegation *delegation;
2526
2527 if (can_open_cached(data->state, data->o_arg.fmode,
2528 data->o_arg.open_flags, claim))
2529 goto out_no_action;
2530 rcu_read_lock();
2531 delegation = nfs4_get_valid_delegation(data->state->inode);
2532 if (can_open_delegated(delegation, data->o_arg.fmode, claim))
2533 goto unlock_no_action;
2534 rcu_read_unlock();
2535 }
2536 /* Update client id. */
2537 data->o_arg.clientid = clp->cl_clientid;
2538 switch (claim) {
2539 default:
2540 break;
2541 case NFS4_OPEN_CLAIM_PREVIOUS:
2542 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
2543 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
2544 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
2545 fallthrough;
2546 case NFS4_OPEN_CLAIM_FH:
2547 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
2548 }
2549 data->timestamp = jiffies;
2550 if (nfs4_setup_sequence(data->o_arg.server->nfs_client,
2551 &data->o_arg.seq_args,
2552 &data->o_res.seq_res,
2553 task) != 0)
2554 nfs_release_seqid(data->o_arg.seqid);
2555
2556 /* Set the create mode (note dependency on the session type) */
2557 data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
2558 if (data->o_arg.open_flags & O_EXCL) {
2559 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
2560 if (clp->cl_mvops->minor_version == 0) {
2561 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
2562 /* don't put an ACCESS op in OPEN compound if O_EXCL,
2563 * because ACCESS will return permission denied for
2564 * all bits until close */
2565 data->o_res.access_request = data->o_arg.access = 0;
2566 } else if (nfs4_has_persistent_session(clp))
2567 data->o_arg.createmode = NFS4_CREATE_GUARDED;
2568 }
2569 return;
2570 unlock_no_action:
2571 trace_nfs4_cached_open(data->state);
2572 rcu_read_unlock();
2573 out_no_action:
2574 task->tk_action = NULL;
2575 out_wait:
2576 nfs4_sequence_done(task, &data->o_res.seq_res);
2577 }
2578
nfs4_open_done(struct rpc_task * task,void * calldata)2579 static void nfs4_open_done(struct rpc_task *task, void *calldata)
2580 {
2581 struct nfs4_opendata *data = calldata;
2582
2583 data->rpc_status = task->tk_status;
2584
2585 if (!nfs4_sequence_process(task, &data->o_res.seq_res))
2586 return;
2587
2588 if (task->tk_status == 0) {
2589 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
2590 switch (data->o_res.f_attr->mode & S_IFMT) {
2591 case S_IFREG:
2592 break;
2593 case S_IFLNK:
2594 data->rpc_status = -ELOOP;
2595 break;
2596 case S_IFDIR:
2597 data->rpc_status = -EISDIR;
2598 break;
2599 default:
2600 data->rpc_status = -ENOTDIR;
2601 }
2602 }
2603 renew_lease(data->o_res.server, data->timestamp);
2604 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
2605 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2606 }
2607 data->rpc_done = true;
2608 }
2609
nfs4_open_release(void * calldata)2610 static void nfs4_open_release(void *calldata)
2611 {
2612 struct nfs4_opendata *data = calldata;
2613 struct nfs4_state *state = NULL;
2614
2615 /* In case of error, no cleanup! */
2616 if (data->rpc_status != 0 || !data->rpc_done) {
2617 nfs_release_seqid(data->o_arg.seqid);
2618 goto out_free;
2619 }
2620 /* If this request hasn't been cancelled, do nothing */
2621 if (!data->cancelled)
2622 goto out_free;
2623 /* In case we need an open_confirm, no cleanup! */
2624 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
2625 goto out_free;
2626 state = nfs4_opendata_to_nfs4_state(data);
2627 if (!IS_ERR(state))
2628 nfs4_close_state(state, data->o_arg.fmode);
2629 out_free:
2630 nfs4_opendata_put(data);
2631 }
2632
2633 static const struct rpc_call_ops nfs4_open_ops = {
2634 .rpc_call_prepare = nfs4_open_prepare,
2635 .rpc_call_done = nfs4_open_done,
2636 .rpc_release = nfs4_open_release,
2637 };
2638
nfs4_run_open_task(struct nfs4_opendata * data,struct nfs_open_context * ctx)2639 static int nfs4_run_open_task(struct nfs4_opendata *data,
2640 struct nfs_open_context *ctx)
2641 {
2642 struct inode *dir = d_inode(data->dir);
2643 struct nfs_server *server = NFS_SERVER(dir);
2644 struct nfs_openargs *o_arg = &data->o_arg;
2645 struct nfs_openres *o_res = &data->o_res;
2646 struct rpc_task *task;
2647 struct rpc_message msg = {
2648 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
2649 .rpc_argp = o_arg,
2650 .rpc_resp = o_res,
2651 .rpc_cred = data->owner->so_cred,
2652 };
2653 struct rpc_task_setup task_setup_data = {
2654 .rpc_client = server->client,
2655 .rpc_message = &msg,
2656 .callback_ops = &nfs4_open_ops,
2657 .callback_data = data,
2658 .workqueue = nfsiod_workqueue,
2659 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
2660 };
2661 int status;
2662
2663 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE))
2664 task_setup_data.flags |= RPC_TASK_MOVEABLE;
2665
2666 kref_get(&data->kref);
2667 data->rpc_done = false;
2668 data->rpc_status = 0;
2669 data->cancelled = false;
2670 data->is_recover = false;
2671 if (!ctx) {
2672 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1);
2673 data->is_recover = true;
2674 task_setup_data.flags |= RPC_TASK_TIMEOUT;
2675 } else {
2676 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0);
2677 pnfs_lgopen_prepare(data, ctx);
2678 }
2679 task = rpc_run_task(&task_setup_data);
2680 if (IS_ERR(task))
2681 return PTR_ERR(task);
2682 status = rpc_wait_for_completion_task(task);
2683 if (status != 0) {
2684 data->cancelled = true;
2685 smp_wmb();
2686 } else
2687 status = data->rpc_status;
2688 rpc_put_task(task);
2689
2690 return status;
2691 }
2692
_nfs4_recover_proc_open(struct nfs4_opendata * data)2693 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
2694 {
2695 struct inode *dir = d_inode(data->dir);
2696 struct nfs_openres *o_res = &data->o_res;
2697 int status;
2698
2699 status = nfs4_run_open_task(data, NULL);
2700 if (status != 0 || !data->rpc_done)
2701 return status;
2702
2703 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
2704
2705 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM)
2706 status = _nfs4_proc_open_confirm(data);
2707
2708 return status;
2709 }
2710
2711 /*
2712 * Additional permission checks in order to distinguish between an
2713 * open for read, and an open for execute. This works around the
2714 * fact that NFSv4 OPEN treats read and execute permissions as being
2715 * the same.
2716 * Note that in the non-execute case, we want to turn off permission
2717 * checking if we just created a new file (POSIX open() semantics).
2718 */
nfs4_opendata_access(const struct cred * cred,struct nfs4_opendata * opendata,struct nfs4_state * state,fmode_t fmode)2719 static int nfs4_opendata_access(const struct cred *cred,
2720 struct nfs4_opendata *opendata,
2721 struct nfs4_state *state, fmode_t fmode)
2722 {
2723 struct nfs_access_entry cache;
2724 u32 mask, flags;
2725
2726 /* access call failed or for some reason the server doesn't
2727 * support any access modes -- defer access call until later */
2728 if (opendata->o_res.access_supported == 0)
2729 return 0;
2730
2731 mask = 0;
2732 if (fmode & FMODE_EXEC) {
2733 /* ONLY check for exec rights */
2734 if (S_ISDIR(state->inode->i_mode))
2735 mask = NFS4_ACCESS_LOOKUP;
2736 else
2737 mask = NFS4_ACCESS_EXECUTE;
2738 } else if ((fmode & FMODE_READ) && !opendata->file_created)
2739 mask = NFS4_ACCESS_READ;
2740
2741 nfs_access_set_mask(&cache, opendata->o_res.access_result);
2742 nfs_access_add_cache(state->inode, &cache, cred);
2743
2744 flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP;
2745 if ((mask & ~cache.mask & flags) == 0)
2746 return 0;
2747
2748 return -EACCES;
2749 }
2750
2751 /*
2752 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
2753 */
_nfs4_proc_open(struct nfs4_opendata * data,struct nfs_open_context * ctx)2754 static int _nfs4_proc_open(struct nfs4_opendata *data,
2755 struct nfs_open_context *ctx)
2756 {
2757 struct inode *dir = d_inode(data->dir);
2758 struct nfs_server *server = NFS_SERVER(dir);
2759 struct nfs_openargs *o_arg = &data->o_arg;
2760 struct nfs_openres *o_res = &data->o_res;
2761 int status;
2762
2763 status = nfs4_run_open_task(data, ctx);
2764 if (!data->rpc_done)
2765 return status;
2766 if (status != 0) {
2767 if (status == -NFS4ERR_BADNAME &&
2768 !(o_arg->open_flags & O_CREAT))
2769 return -ENOENT;
2770 return status;
2771 }
2772
2773 nfs_fattr_map_and_free_names(server, &data->f_attr);
2774
2775 if (o_arg->open_flags & O_CREAT) {
2776 if (o_arg->open_flags & O_EXCL)
2777 data->file_created = true;
2778 else if (o_res->cinfo.before != o_res->cinfo.after)
2779 data->file_created = true;
2780 if (data->file_created ||
2781 inode_peek_iversion_raw(dir) != o_res->cinfo.after)
2782 nfs4_update_changeattr(dir, &o_res->cinfo,
2783 o_res->f_attr->time_start,
2784 NFS_INO_INVALID_DATA);
2785 }
2786 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2787 server->caps &= ~NFS_CAP_POSIX_LOCK;
2788 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2789 status = _nfs4_proc_open_confirm(data);
2790 if (status != 0)
2791 return status;
2792 }
2793 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) {
2794 struct nfs_fh *fh = &o_res->fh;
2795
2796 nfs4_sequence_free_slot(&o_res->seq_res);
2797 if (o_arg->claim == NFS4_OPEN_CLAIM_FH)
2798 fh = NFS_FH(d_inode(data->dentry));
2799 nfs4_proc_getattr(server, fh, o_res->f_attr, NULL);
2800 }
2801 return 0;
2802 }
2803
2804 /*
2805 * OPEN_EXPIRED:
2806 * reclaim state on the server after a network partition.
2807 * Assumes caller holds the appropriate lock
2808 */
_nfs4_open_expired(struct nfs_open_context * ctx,struct nfs4_state * state)2809 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2810 {
2811 struct nfs4_opendata *opendata;
2812 int ret;
2813
2814 opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_FH);
2815 if (IS_ERR(opendata))
2816 return PTR_ERR(opendata);
2817 /*
2818 * We're not recovering a delegation, so ask for no delegation.
2819 * Otherwise the recovery thread could deadlock with an outstanding
2820 * delegation return.
2821 */
2822 opendata->o_arg.open_flags = O_DIRECT;
2823 ret = nfs4_open_recover(opendata, state);
2824 if (ret == -ESTALE)
2825 d_drop(ctx->dentry);
2826 nfs4_opendata_put(opendata);
2827 return ret;
2828 }
2829
nfs4_do_open_expired(struct nfs_open_context * ctx,struct nfs4_state * state)2830 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2831 {
2832 struct nfs_server *server = NFS_SERVER(state->inode);
2833 struct nfs4_exception exception = { };
2834 int err;
2835
2836 do {
2837 err = _nfs4_open_expired(ctx, state);
2838 trace_nfs4_open_expired(ctx, 0, err);
2839 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2840 continue;
2841 switch (err) {
2842 default:
2843 goto out;
2844 case -NFS4ERR_GRACE:
2845 case -NFS4ERR_DELAY:
2846 nfs4_handle_exception(server, err, &exception);
2847 err = 0;
2848 }
2849 } while (exception.retry);
2850 out:
2851 return err;
2852 }
2853
nfs4_open_expired(struct nfs4_state_owner * sp,struct nfs4_state * state)2854 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2855 {
2856 struct nfs_open_context *ctx;
2857 int ret;
2858
2859 ctx = nfs4_state_find_open_context(state);
2860 if (IS_ERR(ctx))
2861 return -EAGAIN;
2862 ret = nfs4_do_open_expired(ctx, state);
2863 put_nfs_open_context(ctx);
2864 return ret;
2865 }
2866
nfs_finish_clear_delegation_stateid(struct nfs4_state * state,const nfs4_stateid * stateid)2867 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state,
2868 const nfs4_stateid *stateid)
2869 {
2870 nfs_remove_bad_delegation(state->inode, stateid);
2871 nfs_state_clear_delegation(state);
2872 }
2873
nfs40_clear_delegation_stateid(struct nfs4_state * state)2874 static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2875 {
2876 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2877 nfs_finish_clear_delegation_stateid(state, NULL);
2878 }
2879
nfs40_open_expired(struct nfs4_state_owner * sp,struct nfs4_state * state)2880 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2881 {
2882 /* NFSv4.0 doesn't allow for delegation recovery on open expire */
2883 nfs40_clear_delegation_stateid(state);
2884 nfs_state_clear_open_state_flags(state);
2885 return nfs4_open_expired(sp, state);
2886 }
2887
nfs40_test_and_free_expired_stateid(struct nfs_server * server,const nfs4_stateid * stateid,const struct cred * cred)2888 static int nfs40_test_and_free_expired_stateid(struct nfs_server *server,
2889 const nfs4_stateid *stateid,
2890 const struct cred *cred)
2891 {
2892 return -NFS4ERR_BAD_STATEID;
2893 }
2894
2895 #if defined(CONFIG_NFS_V4_1)
nfs41_test_and_free_expired_stateid(struct nfs_server * server,const nfs4_stateid * stateid,const struct cred * cred)2896 static int nfs41_test_and_free_expired_stateid(struct nfs_server *server,
2897 const nfs4_stateid *stateid,
2898 const struct cred *cred)
2899 {
2900 int status;
2901
2902 switch (stateid->type) {
2903 default:
2904 break;
2905 case NFS4_INVALID_STATEID_TYPE:
2906 case NFS4_SPECIAL_STATEID_TYPE:
2907 return -NFS4ERR_BAD_STATEID;
2908 case NFS4_REVOKED_STATEID_TYPE:
2909 goto out_free;
2910 }
2911
2912 status = nfs41_test_stateid(server, stateid, cred);
2913 switch (status) {
2914 case -NFS4ERR_EXPIRED:
2915 case -NFS4ERR_ADMIN_REVOKED:
2916 case -NFS4ERR_DELEG_REVOKED:
2917 break;
2918 default:
2919 return status;
2920 }
2921 out_free:
2922 /* Ack the revoked state to the server */
2923 nfs41_free_stateid(server, stateid, cred, true);
2924 return -NFS4ERR_EXPIRED;
2925 }
2926
nfs41_check_delegation_stateid(struct nfs4_state * state)2927 static int nfs41_check_delegation_stateid(struct nfs4_state *state)
2928 {
2929 struct nfs_server *server = NFS_SERVER(state->inode);
2930 nfs4_stateid stateid;
2931 struct nfs_delegation *delegation;
2932 const struct cred *cred = NULL;
2933 int status, ret = NFS_OK;
2934
2935 /* Get the delegation credential for use by test/free_stateid */
2936 rcu_read_lock();
2937 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2938 if (delegation == NULL) {
2939 rcu_read_unlock();
2940 nfs_state_clear_delegation(state);
2941 return NFS_OK;
2942 }
2943
2944 spin_lock(&delegation->lock);
2945 nfs4_stateid_copy(&stateid, &delegation->stateid);
2946
2947 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
2948 &delegation->flags)) {
2949 spin_unlock(&delegation->lock);
2950 rcu_read_unlock();
2951 return NFS_OK;
2952 }
2953
2954 if (delegation->cred)
2955 cred = get_cred(delegation->cred);
2956 spin_unlock(&delegation->lock);
2957 rcu_read_unlock();
2958 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
2959 trace_nfs4_test_delegation_stateid(state, NULL, status);
2960 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
2961 nfs_finish_clear_delegation_stateid(state, &stateid);
2962 else
2963 ret = status;
2964
2965 put_cred(cred);
2966 return ret;
2967 }
2968
nfs41_delegation_recover_stateid(struct nfs4_state * state)2969 static void nfs41_delegation_recover_stateid(struct nfs4_state *state)
2970 {
2971 nfs4_stateid tmp;
2972
2973 if (test_bit(NFS_DELEGATED_STATE, &state->flags) &&
2974 nfs4_copy_delegation_stateid(state->inode, state->state,
2975 &tmp, NULL) &&
2976 nfs4_stateid_match_other(&state->stateid, &tmp))
2977 nfs_state_set_delegation(state, &tmp, state->state);
2978 else
2979 nfs_state_clear_delegation(state);
2980 }
2981
2982 /**
2983 * nfs41_check_expired_locks - possibly free a lock stateid
2984 *
2985 * @state: NFSv4 state for an inode
2986 *
2987 * Returns NFS_OK if recovery for this stateid is now finished.
2988 * Otherwise a negative NFS4ERR value is returned.
2989 */
nfs41_check_expired_locks(struct nfs4_state * state)2990 static int nfs41_check_expired_locks(struct nfs4_state *state)
2991 {
2992 int status, ret = NFS_OK;
2993 struct nfs4_lock_state *lsp, *prev = NULL;
2994 struct nfs_server *server = NFS_SERVER(state->inode);
2995
2996 if (!test_bit(LK_STATE_IN_USE, &state->flags))
2997 goto out;
2998
2999 spin_lock(&state->state_lock);
3000 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
3001 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
3002 const struct cred *cred = lsp->ls_state->owner->so_cred;
3003
3004 refcount_inc(&lsp->ls_count);
3005 spin_unlock(&state->state_lock);
3006
3007 nfs4_put_lock_state(prev);
3008 prev = lsp;
3009
3010 status = nfs41_test_and_free_expired_stateid(server,
3011 &lsp->ls_stateid,
3012 cred);
3013 trace_nfs4_test_lock_stateid(state, lsp, status);
3014 if (status == -NFS4ERR_EXPIRED ||
3015 status == -NFS4ERR_BAD_STATEID) {
3016 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
3017 lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE;
3018 if (!recover_lost_locks)
3019 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
3020 } else if (status != NFS_OK) {
3021 ret = status;
3022 nfs4_put_lock_state(prev);
3023 goto out;
3024 }
3025 spin_lock(&state->state_lock);
3026 }
3027 }
3028 spin_unlock(&state->state_lock);
3029 nfs4_put_lock_state(prev);
3030 out:
3031 return ret;
3032 }
3033
3034 /**
3035 * nfs41_check_open_stateid - possibly free an open stateid
3036 *
3037 * @state: NFSv4 state for an inode
3038 *
3039 * Returns NFS_OK if recovery for this stateid is now finished.
3040 * Otherwise a negative NFS4ERR value is returned.
3041 */
nfs41_check_open_stateid(struct nfs4_state * state)3042 static int nfs41_check_open_stateid(struct nfs4_state *state)
3043 {
3044 struct nfs_server *server = NFS_SERVER(state->inode);
3045 nfs4_stateid *stateid = &state->open_stateid;
3046 const struct cred *cred = state->owner->so_cred;
3047 int status;
3048
3049 if (test_bit(NFS_OPEN_STATE, &state->flags) == 0)
3050 return -NFS4ERR_BAD_STATEID;
3051 status = nfs41_test_and_free_expired_stateid(server, stateid, cred);
3052 trace_nfs4_test_open_stateid(state, NULL, status);
3053 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) {
3054 nfs_state_clear_open_state_flags(state);
3055 stateid->type = NFS4_INVALID_STATEID_TYPE;
3056 return status;
3057 }
3058 if (nfs_open_stateid_recover_openmode(state))
3059 return -NFS4ERR_OPENMODE;
3060 return NFS_OK;
3061 }
3062
nfs41_open_expired(struct nfs4_state_owner * sp,struct nfs4_state * state)3063 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
3064 {
3065 int status;
3066
3067 status = nfs41_check_delegation_stateid(state);
3068 if (status != NFS_OK)
3069 return status;
3070 nfs41_delegation_recover_stateid(state);
3071
3072 status = nfs41_check_expired_locks(state);
3073 if (status != NFS_OK)
3074 return status;
3075 status = nfs41_check_open_stateid(state);
3076 if (status != NFS_OK)
3077 status = nfs4_open_expired(sp, state);
3078 return status;
3079 }
3080 #endif
3081
3082 /*
3083 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
3084 * fields corresponding to attributes that were used to store the verifier.
3085 * Make sure we clobber those fields in the later setattr call
3086 */
nfs4_exclusive_attrset(struct nfs4_opendata * opendata,struct iattr * sattr,struct nfs4_label ** label)3087 static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
3088 struct iattr *sattr, struct nfs4_label **label)
3089 {
3090 const __u32 *bitmask = opendata->o_arg.server->exclcreat_bitmask;
3091 __u32 attrset[3];
3092 unsigned ret;
3093 unsigned i;
3094
3095 for (i = 0; i < ARRAY_SIZE(attrset); i++) {
3096 attrset[i] = opendata->o_res.attrset[i];
3097 if (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE4_1)
3098 attrset[i] &= ~bitmask[i];
3099 }
3100
3101 ret = (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE) ?
3102 sattr->ia_valid : 0;
3103
3104 if ((attrset[1] & (FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET))) {
3105 if (sattr->ia_valid & ATTR_ATIME_SET)
3106 ret |= ATTR_ATIME_SET;
3107 else
3108 ret |= ATTR_ATIME;
3109 }
3110
3111 if ((attrset[1] & (FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET))) {
3112 if (sattr->ia_valid & ATTR_MTIME_SET)
3113 ret |= ATTR_MTIME_SET;
3114 else
3115 ret |= ATTR_MTIME;
3116 }
3117
3118 if (!(attrset[2] & FATTR4_WORD2_SECURITY_LABEL))
3119 *label = NULL;
3120 return ret;
3121 }
3122
_nfs4_open_and_get_state(struct nfs4_opendata * opendata,struct nfs_open_context * ctx)3123 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
3124 struct nfs_open_context *ctx)
3125 {
3126 struct nfs4_state_owner *sp = opendata->owner;
3127 struct nfs_server *server = sp->so_server;
3128 struct dentry *dentry;
3129 struct nfs4_state *state;
3130 fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx);
3131 struct inode *dir = d_inode(opendata->dir);
3132 unsigned long dir_verifier;
3133 int ret;
3134
3135 dir_verifier = nfs_save_change_attribute(dir);
3136
3137 ret = _nfs4_proc_open(opendata, ctx);
3138 if (ret != 0)
3139 goto out;
3140
3141 state = _nfs4_opendata_to_nfs4_state(opendata);
3142 ret = PTR_ERR(state);
3143 if (IS_ERR(state))
3144 goto out;
3145 ctx->state = state;
3146 if (server->caps & NFS_CAP_POSIX_LOCK)
3147 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
3148 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK)
3149 set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags);
3150 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_PRESERVE_UNLINKED)
3151 set_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(state->inode)->flags);
3152
3153 dentry = opendata->dentry;
3154 if (d_really_is_negative(dentry)) {
3155 struct dentry *alias;
3156 d_drop(dentry);
3157 alias = d_exact_alias(dentry, state->inode);
3158 if (!alias)
3159 alias = d_splice_alias(igrab(state->inode), dentry);
3160 /* d_splice_alias() can't fail here - it's a non-directory */
3161 if (alias) {
3162 dput(ctx->dentry);
3163 ctx->dentry = dentry = alias;
3164 }
3165 }
3166
3167 switch(opendata->o_arg.claim) {
3168 default:
3169 break;
3170 case NFS4_OPEN_CLAIM_NULL:
3171 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
3172 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
3173 if (!opendata->rpc_done)
3174 break;
3175 if (opendata->o_res.delegation.type != 0)
3176 dir_verifier = nfs_save_change_attribute(dir);
3177 nfs_set_verifier(dentry, dir_verifier);
3178 }
3179
3180 /* Parse layoutget results before we check for access */
3181 pnfs_parse_lgopen(state->inode, opendata->lgp, ctx);
3182
3183 ret = nfs4_opendata_access(sp->so_cred, opendata, state, acc_mode);
3184 if (ret != 0)
3185 goto out;
3186
3187 if (d_inode(dentry) == state->inode)
3188 nfs_inode_attach_open_context(ctx);
3189
3190 out:
3191 if (!opendata->cancelled) {
3192 if (opendata->lgp) {
3193 nfs4_lgopen_release(opendata->lgp);
3194 opendata->lgp = NULL;
3195 }
3196 nfs4_sequence_free_slot(&opendata->o_res.seq_res);
3197 }
3198 return ret;
3199 }
3200
3201 /*
3202 * Returns a referenced nfs4_state
3203 */
_nfs4_do_open(struct inode * dir,struct nfs_open_context * ctx,int flags,const struct nfs4_open_createattrs * c,int * opened)3204 static int _nfs4_do_open(struct inode *dir,
3205 struct nfs_open_context *ctx,
3206 int flags,
3207 const struct nfs4_open_createattrs *c,
3208 int *opened)
3209 {
3210 struct nfs4_state_owner *sp;
3211 struct nfs4_state *state = NULL;
3212 struct nfs_server *server = NFS_SERVER(dir);
3213 struct nfs4_opendata *opendata;
3214 struct dentry *dentry = ctx->dentry;
3215 const struct cred *cred = ctx->cred;
3216 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
3217 fmode_t fmode = _nfs4_ctx_to_openmode(ctx);
3218 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
3219 struct iattr *sattr = c->sattr;
3220 struct nfs4_label *label = c->label;
3221 int status;
3222
3223 /* Protect against reboot recovery conflicts */
3224 status = -ENOMEM;
3225 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
3226 if (sp == NULL) {
3227 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
3228 goto out_err;
3229 }
3230 status = nfs4_client_recover_expired_lease(server->nfs_client);
3231 if (status != 0)
3232 goto err_put_state_owner;
3233 if (d_really_is_positive(dentry))
3234 nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
3235 status = -ENOMEM;
3236 if (d_really_is_positive(dentry))
3237 claim = NFS4_OPEN_CLAIM_FH;
3238 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags,
3239 c, claim, GFP_KERNEL);
3240 if (opendata == NULL)
3241 goto err_put_state_owner;
3242
3243 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
3244 if (!opendata->f_attr.mdsthreshold) {
3245 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
3246 if (!opendata->f_attr.mdsthreshold)
3247 goto err_opendata_put;
3248 }
3249 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
3250 }
3251 if (d_really_is_positive(dentry))
3252 opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
3253
3254 status = _nfs4_open_and_get_state(opendata, ctx);
3255 if (status != 0)
3256 goto err_opendata_put;
3257 state = ctx->state;
3258
3259 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
3260 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
3261 unsigned attrs = nfs4_exclusive_attrset(opendata, sattr, &label);
3262 /*
3263 * send create attributes which was not set by open
3264 * with an extra setattr.
3265 */
3266 if (attrs || label) {
3267 unsigned ia_old = sattr->ia_valid;
3268
3269 sattr->ia_valid = attrs;
3270 nfs_fattr_init(opendata->o_res.f_attr);
3271 status = nfs4_do_setattr(state->inode, cred,
3272 opendata->o_res.f_attr, sattr,
3273 ctx, label);
3274 if (status == 0) {
3275 nfs_setattr_update_inode(state->inode, sattr,
3276 opendata->o_res.f_attr);
3277 nfs_setsecurity(state->inode, opendata->o_res.f_attr);
3278 }
3279 sattr->ia_valid = ia_old;
3280 }
3281 }
3282 if (opened && opendata->file_created)
3283 *opened = 1;
3284
3285 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
3286 *ctx_th = opendata->f_attr.mdsthreshold;
3287 opendata->f_attr.mdsthreshold = NULL;
3288 }
3289
3290 nfs4_opendata_put(opendata);
3291 nfs4_put_state_owner(sp);
3292 return 0;
3293 err_opendata_put:
3294 nfs4_opendata_put(opendata);
3295 err_put_state_owner:
3296 nfs4_put_state_owner(sp);
3297 out_err:
3298 return status;
3299 }
3300
3301
nfs4_do_open(struct inode * dir,struct nfs_open_context * ctx,int flags,struct iattr * sattr,struct nfs4_label * label,int * opened)3302 static struct nfs4_state *nfs4_do_open(struct inode *dir,
3303 struct nfs_open_context *ctx,
3304 int flags,
3305 struct iattr *sattr,
3306 struct nfs4_label *label,
3307 int *opened)
3308 {
3309 struct nfs_server *server = NFS_SERVER(dir);
3310 struct nfs4_exception exception = {
3311 .interruptible = true,
3312 };
3313 struct nfs4_state *res;
3314 struct nfs4_open_createattrs c = {
3315 .label = label,
3316 .sattr = sattr,
3317 .verf = {
3318 [0] = (__u32)jiffies,
3319 [1] = (__u32)current->pid,
3320 },
3321 };
3322 int status;
3323
3324 do {
3325 status = _nfs4_do_open(dir, ctx, flags, &c, opened);
3326 res = ctx->state;
3327 trace_nfs4_open_file(ctx, flags, status);
3328 if (status == 0)
3329 break;
3330 /* NOTE: BAD_SEQID means the server and client disagree about the
3331 * book-keeping w.r.t. state-changing operations
3332 * (OPEN/CLOSE/LOCK/LOCKU...)
3333 * It is actually a sign of a bug on the client or on the server.
3334 *
3335 * If we receive a BAD_SEQID error in the particular case of
3336 * doing an OPEN, we assume that nfs_increment_open_seqid() will
3337 * have unhashed the old state_owner for us, and that we can
3338 * therefore safely retry using a new one. We should still warn
3339 * the user though...
3340 */
3341 if (status == -NFS4ERR_BAD_SEQID) {
3342 pr_warn_ratelimited("NFS: v4 server %s "
3343 " returned a bad sequence-id error!\n",
3344 NFS_SERVER(dir)->nfs_client->cl_hostname);
3345 exception.retry = 1;
3346 continue;
3347 }
3348 /*
3349 * BAD_STATEID on OPEN means that the server cancelled our
3350 * state before it received the OPEN_CONFIRM.
3351 * Recover by retrying the request as per the discussion
3352 * on Page 181 of RFC3530.
3353 */
3354 if (status == -NFS4ERR_BAD_STATEID) {
3355 exception.retry = 1;
3356 continue;
3357 }
3358 if (status == -NFS4ERR_EXPIRED) {
3359 nfs4_schedule_lease_recovery(server->nfs_client);
3360 exception.retry = 1;
3361 continue;
3362 }
3363 if (status == -EAGAIN) {
3364 /* We must have found a delegation */
3365 exception.retry = 1;
3366 continue;
3367 }
3368 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
3369 continue;
3370 res = ERR_PTR(nfs4_handle_exception(server,
3371 status, &exception));
3372 } while (exception.retry);
3373 return res;
3374 }
3375
_nfs4_do_setattr(struct inode * inode,struct nfs_setattrargs * arg,struct nfs_setattrres * res,const struct cred * cred,struct nfs_open_context * ctx)3376 static int _nfs4_do_setattr(struct inode *inode,
3377 struct nfs_setattrargs *arg,
3378 struct nfs_setattrres *res,
3379 const struct cred *cred,
3380 struct nfs_open_context *ctx)
3381 {
3382 struct nfs_server *server = NFS_SERVER(inode);
3383 struct rpc_message msg = {
3384 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
3385 .rpc_argp = arg,
3386 .rpc_resp = res,
3387 .rpc_cred = cred,
3388 };
3389 const struct cred *delegation_cred = NULL;
3390 unsigned long timestamp = jiffies;
3391 bool truncate;
3392 int status;
3393
3394 nfs_fattr_init(res->fattr);
3395
3396 /* Servers should only apply open mode checks for file size changes */
3397 truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false;
3398 if (!truncate) {
3399 nfs4_inode_make_writeable(inode);
3400 goto zero_stateid;
3401 }
3402
3403 if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
3404 /* Use that stateid */
3405 } else if (ctx != NULL && ctx->state) {
3406 struct nfs_lock_context *l_ctx;
3407 if (!nfs4_valid_open_stateid(ctx->state))
3408 return -EBADF;
3409 l_ctx = nfs_get_lock_context(ctx);
3410 if (IS_ERR(l_ctx))
3411 return PTR_ERR(l_ctx);
3412 status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx,
3413 &arg->stateid, &delegation_cred);
3414 nfs_put_lock_context(l_ctx);
3415 if (status == -EIO)
3416 return -EBADF;
3417 else if (status == -EAGAIN)
3418 goto zero_stateid;
3419 } else {
3420 zero_stateid:
3421 nfs4_stateid_copy(&arg->stateid, &zero_stateid);
3422 }
3423 if (delegation_cred)
3424 msg.rpc_cred = delegation_cred;
3425
3426 status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1);
3427
3428 put_cred(delegation_cred);
3429 if (status == 0 && ctx != NULL)
3430 renew_lease(server, timestamp);
3431 trace_nfs4_setattr(inode, &arg->stateid, status);
3432 return status;
3433 }
3434
nfs4_do_setattr(struct inode * inode,const struct cred * cred,struct nfs_fattr * fattr,struct iattr * sattr,struct nfs_open_context * ctx,struct nfs4_label * ilabel)3435 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
3436 struct nfs_fattr *fattr, struct iattr *sattr,
3437 struct nfs_open_context *ctx, struct nfs4_label *ilabel)
3438 {
3439 struct nfs_server *server = NFS_SERVER(inode);
3440 __u32 bitmask[NFS4_BITMASK_SZ];
3441 struct nfs4_state *state = ctx ? ctx->state : NULL;
3442 struct nfs_setattrargs arg = {
3443 .fh = NFS_FH(inode),
3444 .iap = sattr,
3445 .server = server,
3446 .bitmask = bitmask,
3447 .label = ilabel,
3448 };
3449 struct nfs_setattrres res = {
3450 .fattr = fattr,
3451 .server = server,
3452 };
3453 struct nfs4_exception exception = {
3454 .state = state,
3455 .inode = inode,
3456 .stateid = &arg.stateid,
3457 };
3458 unsigned long adjust_flags = NFS_INO_INVALID_CHANGE |
3459 NFS_INO_INVALID_CTIME;
3460 int err;
3461
3462 if (sattr->ia_valid & (ATTR_MODE | ATTR_KILL_SUID | ATTR_KILL_SGID))
3463 adjust_flags |= NFS_INO_INVALID_MODE;
3464 if (sattr->ia_valid & (ATTR_UID | ATTR_GID))
3465 adjust_flags |= NFS_INO_INVALID_OTHER;
3466 if (sattr->ia_valid & ATTR_ATIME)
3467 adjust_flags |= NFS_INO_INVALID_ATIME;
3468 if (sattr->ia_valid & ATTR_MTIME)
3469 adjust_flags |= NFS_INO_INVALID_MTIME;
3470
3471 do {
3472 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label),
3473 inode, adjust_flags);
3474
3475 err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx);
3476 switch (err) {
3477 case -NFS4ERR_OPENMODE:
3478 if (!(sattr->ia_valid & ATTR_SIZE)) {
3479 pr_warn_once("NFSv4: server %s is incorrectly "
3480 "applying open mode checks to "
3481 "a SETATTR that is not "
3482 "changing file size.\n",
3483 server->nfs_client->cl_hostname);
3484 }
3485 if (state && !(state->state & FMODE_WRITE)) {
3486 err = -EBADF;
3487 if (sattr->ia_valid & ATTR_OPEN)
3488 err = -EACCES;
3489 goto out;
3490 }
3491 }
3492 err = nfs4_handle_exception(server, err, &exception);
3493 } while (exception.retry);
3494 out:
3495 return err;
3496 }
3497
3498 static bool
nfs4_wait_on_layoutreturn(struct inode * inode,struct rpc_task * task)3499 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
3500 {
3501 if (inode == NULL || !nfs_have_layout(inode))
3502 return false;
3503
3504 return pnfs_wait_on_layoutreturn(inode, task);
3505 }
3506
3507 /*
3508 * Update the seqid of an open stateid
3509 */
nfs4_sync_open_stateid(nfs4_stateid * dst,struct nfs4_state * state)3510 static void nfs4_sync_open_stateid(nfs4_stateid *dst,
3511 struct nfs4_state *state)
3512 {
3513 __be32 seqid_open;
3514 u32 dst_seqid;
3515 int seq;
3516
3517 for (;;) {
3518 if (!nfs4_valid_open_stateid(state))
3519 break;
3520 seq = read_seqbegin(&state->seqlock);
3521 if (!nfs4_state_match_open_stateid_other(state, dst)) {
3522 nfs4_stateid_copy(dst, &state->open_stateid);
3523 if (read_seqretry(&state->seqlock, seq))
3524 continue;
3525 break;
3526 }
3527 seqid_open = state->open_stateid.seqid;
3528 if (read_seqretry(&state->seqlock, seq))
3529 continue;
3530
3531 dst_seqid = be32_to_cpu(dst->seqid);
3532 if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0)
3533 dst->seqid = seqid_open;
3534 break;
3535 }
3536 }
3537
3538 /*
3539 * Update the seqid of an open stateid after receiving
3540 * NFS4ERR_OLD_STATEID
3541 */
nfs4_refresh_open_old_stateid(nfs4_stateid * dst,struct nfs4_state * state)3542 static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
3543 struct nfs4_state *state)
3544 {
3545 __be32 seqid_open;
3546 u32 dst_seqid;
3547 bool ret;
3548 int seq, status = -EAGAIN;
3549 DEFINE_WAIT(wait);
3550
3551 for (;;) {
3552 ret = false;
3553 if (!nfs4_valid_open_stateid(state))
3554 break;
3555 seq = read_seqbegin(&state->seqlock);
3556 if (!nfs4_state_match_open_stateid_other(state, dst)) {
3557 if (read_seqretry(&state->seqlock, seq))
3558 continue;
3559 break;
3560 }
3561
3562 write_seqlock(&state->seqlock);
3563 seqid_open = state->open_stateid.seqid;
3564
3565 dst_seqid = be32_to_cpu(dst->seqid);
3566
3567 /* Did another OPEN bump the state's seqid? try again: */
3568 if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) {
3569 dst->seqid = seqid_open;
3570 write_sequnlock(&state->seqlock);
3571 ret = true;
3572 break;
3573 }
3574
3575 /* server says we're behind but we haven't seen the update yet */
3576 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
3577 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
3578 write_sequnlock(&state->seqlock);
3579 trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
3580
3581 if (fatal_signal_pending(current))
3582 status = -EINTR;
3583 else
3584 if (schedule_timeout(5*HZ) != 0)
3585 status = 0;
3586
3587 finish_wait(&state->waitq, &wait);
3588
3589 if (!status)
3590 continue;
3591 if (status == -EINTR)
3592 break;
3593
3594 /* we slept the whole 5 seconds, we must have lost a seqid */
3595 dst->seqid = cpu_to_be32(dst_seqid + 1);
3596 ret = true;
3597 break;
3598 }
3599
3600 return ret;
3601 }
3602
3603 struct nfs4_closedata {
3604 struct inode *inode;
3605 struct nfs4_state *state;
3606 struct nfs_closeargs arg;
3607 struct nfs_closeres res;
3608 struct {
3609 struct nfs4_layoutreturn_args arg;
3610 struct nfs4_layoutreturn_res res;
3611 struct nfs4_xdr_opaque_data ld_private;
3612 u32 roc_barrier;
3613 bool roc;
3614 } lr;
3615 struct nfs_fattr fattr;
3616 unsigned long timestamp;
3617 };
3618
nfs4_free_closedata(void * data)3619 static void nfs4_free_closedata(void *data)
3620 {
3621 struct nfs4_closedata *calldata = data;
3622 struct nfs4_state_owner *sp = calldata->state->owner;
3623 struct super_block *sb = calldata->state->inode->i_sb;
3624
3625 if (calldata->lr.roc)
3626 pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res,
3627 calldata->res.lr_ret);
3628 nfs4_put_open_state(calldata->state);
3629 nfs_free_seqid(calldata->arg.seqid);
3630 nfs4_put_state_owner(sp);
3631 nfs_sb_deactive(sb);
3632 kfree(calldata);
3633 }
3634
nfs4_close_done(struct rpc_task * task,void * data)3635 static void nfs4_close_done(struct rpc_task *task, void *data)
3636 {
3637 struct nfs4_closedata *calldata = data;
3638 struct nfs4_state *state = calldata->state;
3639 struct nfs_server *server = NFS_SERVER(calldata->inode);
3640 nfs4_stateid *res_stateid = NULL;
3641 struct nfs4_exception exception = {
3642 .state = state,
3643 .inode = calldata->inode,
3644 .stateid = &calldata->arg.stateid,
3645 };
3646
3647 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
3648 return;
3649 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
3650
3651 /* Handle Layoutreturn errors */
3652 if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res,
3653 &calldata->res.lr_ret) == -EAGAIN)
3654 goto out_restart;
3655
3656 /* hmm. we are done with the inode, and in the process of freeing
3657 * the state_owner. we keep this around to process errors
3658 */
3659 switch (task->tk_status) {
3660 case 0:
3661 res_stateid = &calldata->res.stateid;
3662 renew_lease(server, calldata->timestamp);
3663 break;
3664 case -NFS4ERR_ACCESS:
3665 if (calldata->arg.bitmask != NULL) {
3666 calldata->arg.bitmask = NULL;
3667 calldata->res.fattr = NULL;
3668 goto out_restart;
3669
3670 }
3671 break;
3672 case -NFS4ERR_OLD_STATEID:
3673 /* Did we race with OPEN? */
3674 if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid,
3675 state))
3676 goto out_restart;
3677 goto out_release;
3678 case -NFS4ERR_ADMIN_REVOKED:
3679 case -NFS4ERR_STALE_STATEID:
3680 case -NFS4ERR_EXPIRED:
3681 nfs4_free_revoked_stateid(server,
3682 &calldata->arg.stateid,
3683 task->tk_msg.rpc_cred);
3684 fallthrough;
3685 case -NFS4ERR_BAD_STATEID:
3686 if (calldata->arg.fmode == 0)
3687 break;
3688 fallthrough;
3689 default:
3690 task->tk_status = nfs4_async_handle_exception(task,
3691 server, task->tk_status, &exception);
3692 if (exception.retry)
3693 goto out_restart;
3694 }
3695 nfs_clear_open_stateid(state, &calldata->arg.stateid,
3696 res_stateid, calldata->arg.fmode);
3697 out_release:
3698 task->tk_status = 0;
3699 nfs_release_seqid(calldata->arg.seqid);
3700 nfs_refresh_inode(calldata->inode, &calldata->fattr);
3701 dprintk("%s: ret = %d\n", __func__, task->tk_status);
3702 return;
3703 out_restart:
3704 task->tk_status = 0;
3705 rpc_restart_call_prepare(task);
3706 goto out_release;
3707 }
3708
nfs4_close_prepare(struct rpc_task * task,void * data)3709 static void nfs4_close_prepare(struct rpc_task *task, void *data)
3710 {
3711 struct nfs4_closedata *calldata = data;
3712 struct nfs4_state *state = calldata->state;
3713 struct inode *inode = calldata->inode;
3714 struct nfs_server *server = NFS_SERVER(inode);
3715 struct pnfs_layout_hdr *lo;
3716 bool is_rdonly, is_wronly, is_rdwr;
3717 int call_close = 0;
3718
3719 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
3720 goto out_wait;
3721
3722 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
3723 spin_lock(&state->owner->so_lock);
3724 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
3725 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
3726 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
3727 /* Calculate the change in open mode */
3728 calldata->arg.fmode = 0;
3729 if (state->n_rdwr == 0) {
3730 if (state->n_rdonly == 0)
3731 call_close |= is_rdonly;
3732 else if (is_rdonly)
3733 calldata->arg.fmode |= FMODE_READ;
3734 if (state->n_wronly == 0)
3735 call_close |= is_wronly;
3736 else if (is_wronly)
3737 calldata->arg.fmode |= FMODE_WRITE;
3738 if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
3739 call_close |= is_rdwr;
3740 } else if (is_rdwr)
3741 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
3742
3743 nfs4_sync_open_stateid(&calldata->arg.stateid, state);
3744 if (!nfs4_valid_open_stateid(state))
3745 call_close = 0;
3746 spin_unlock(&state->owner->so_lock);
3747
3748 if (!call_close) {
3749 /* Note: exit _without_ calling nfs4_close_done */
3750 goto out_no_action;
3751 }
3752
3753 if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) {
3754 nfs_release_seqid(calldata->arg.seqid);
3755 goto out_wait;
3756 }
3757
3758 lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL;
3759 if (lo && !pnfs_layout_is_valid(lo)) {
3760 calldata->arg.lr_args = NULL;
3761 calldata->res.lr_res = NULL;
3762 }
3763
3764 if (calldata->arg.fmode == 0)
3765 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
3766
3767 if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) {
3768 /* Close-to-open cache consistency revalidation */
3769 if (!nfs4_have_delegation(inode, FMODE_READ, 0)) {
3770 nfs4_bitmask_set(calldata->arg.bitmask_store,
3771 server->cache_consistency_bitmask,
3772 inode, 0);
3773 calldata->arg.bitmask = calldata->arg.bitmask_store;
3774 } else
3775 calldata->arg.bitmask = NULL;
3776 }
3777
3778 calldata->arg.share_access =
3779 nfs4_fmode_to_share_access(calldata->arg.fmode);
3780
3781 if (calldata->res.fattr == NULL)
3782 calldata->arg.bitmask = NULL;
3783 else if (calldata->arg.bitmask == NULL)
3784 calldata->res.fattr = NULL;
3785 calldata->timestamp = jiffies;
3786 if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client,
3787 &calldata->arg.seq_args,
3788 &calldata->res.seq_res,
3789 task) != 0)
3790 nfs_release_seqid(calldata->arg.seqid);
3791 return;
3792 out_no_action:
3793 task->tk_action = NULL;
3794 out_wait:
3795 nfs4_sequence_done(task, &calldata->res.seq_res);
3796 }
3797
3798 static const struct rpc_call_ops nfs4_close_ops = {
3799 .rpc_call_prepare = nfs4_close_prepare,
3800 .rpc_call_done = nfs4_close_done,
3801 .rpc_release = nfs4_free_closedata,
3802 };
3803
3804 /*
3805 * It is possible for data to be read/written from a mem-mapped file
3806 * after the sys_close call (which hits the vfs layer as a flush).
3807 * This means that we can't safely call nfsv4 close on a file until
3808 * the inode is cleared. This in turn means that we are not good
3809 * NFSv4 citizens - we do not indicate to the server to update the file's
3810 * share state even when we are done with one of the three share
3811 * stateid's in the inode.
3812 *
3813 * NOTE: Caller must be holding the sp->so_owner semaphore!
3814 */
nfs4_do_close(struct nfs4_state * state,gfp_t gfp_mask,int wait)3815 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
3816 {
3817 struct nfs_server *server = NFS_SERVER(state->inode);
3818 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
3819 struct nfs4_closedata *calldata;
3820 struct nfs4_state_owner *sp = state->owner;
3821 struct rpc_task *task;
3822 struct rpc_message msg = {
3823 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
3824 .rpc_cred = state->owner->so_cred,
3825 };
3826 struct rpc_task_setup task_setup_data = {
3827 .rpc_client = server->client,
3828 .rpc_message = &msg,
3829 .callback_ops = &nfs4_close_ops,
3830 .workqueue = nfsiod_workqueue,
3831 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
3832 };
3833 int status = -ENOMEM;
3834
3835 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE))
3836 task_setup_data.flags |= RPC_TASK_MOVEABLE;
3837
3838 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
3839 &task_setup_data.rpc_client, &msg);
3840
3841 calldata = kzalloc(sizeof(*calldata), gfp_mask);
3842 if (calldata == NULL)
3843 goto out;
3844 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1, 0);
3845 calldata->inode = state->inode;
3846 calldata->state = state;
3847 calldata->arg.fh = NFS_FH(state->inode);
3848 if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state))
3849 goto out_free_calldata;
3850 /* Serialization for the sequence id */
3851 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
3852 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
3853 if (IS_ERR(calldata->arg.seqid))
3854 goto out_free_calldata;
3855 nfs_fattr_init(&calldata->fattr);
3856 calldata->arg.fmode = 0;
3857 calldata->lr.arg.ld_private = &calldata->lr.ld_private;
3858 calldata->res.fattr = &calldata->fattr;
3859 calldata->res.seqid = calldata->arg.seqid;
3860 calldata->res.server = server;
3861 calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
3862 calldata->lr.roc = pnfs_roc(state->inode,
3863 &calldata->lr.arg, &calldata->lr.res, msg.rpc_cred);
3864 if (calldata->lr.roc) {
3865 calldata->arg.lr_args = &calldata->lr.arg;
3866 calldata->res.lr_res = &calldata->lr.res;
3867 }
3868 nfs_sb_active(calldata->inode->i_sb);
3869
3870 msg.rpc_argp = &calldata->arg;
3871 msg.rpc_resp = &calldata->res;
3872 task_setup_data.callback_data = calldata;
3873 task = rpc_run_task(&task_setup_data);
3874 if (IS_ERR(task))
3875 return PTR_ERR(task);
3876 status = 0;
3877 if (wait)
3878 status = rpc_wait_for_completion_task(task);
3879 rpc_put_task(task);
3880 return status;
3881 out_free_calldata:
3882 kfree(calldata);
3883 out:
3884 nfs4_put_open_state(state);
3885 nfs4_put_state_owner(sp);
3886 return status;
3887 }
3888
3889 static struct inode *
nfs4_atomic_open(struct inode * dir,struct nfs_open_context * ctx,int open_flags,struct iattr * attr,int * opened)3890 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
3891 int open_flags, struct iattr *attr, int *opened)
3892 {
3893 struct nfs4_state *state;
3894 struct nfs4_label l, *label;
3895
3896 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
3897
3898 /* Protect against concurrent sillydeletes */
3899 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
3900
3901 nfs4_label_release_security(label);
3902
3903 if (IS_ERR(state))
3904 return ERR_CAST(state);
3905 return state->inode;
3906 }
3907
nfs4_close_context(struct nfs_open_context * ctx,int is_sync)3908 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
3909 {
3910 struct dentry *dentry = ctx->dentry;
3911 if (ctx->state == NULL)
3912 return;
3913 if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
3914 nfs4_inode_set_return_delegation_on_close(d_inode(dentry));
3915 if (is_sync)
3916 nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx));
3917 else
3918 nfs4_close_state(ctx->state, _nfs4_ctx_to_openmode(ctx));
3919 }
3920
3921 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
3922 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
3923 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_OPEN_ARGUMENTS - 1UL)
3924
3925 #define FATTR4_WORD2_NFS42_TIME_DELEG_MASK \
3926 (FATTR4_WORD2_TIME_DELEG_MODIFY|FATTR4_WORD2_TIME_DELEG_ACCESS)
nfs4_server_delegtime_capable(struct nfs4_server_caps_res * res)3927 static bool nfs4_server_delegtime_capable(struct nfs4_server_caps_res *res)
3928 {
3929 u32 share_access_want = res->open_caps.oa_share_access_want[0];
3930 u32 attr_bitmask = res->attr_bitmask[2];
3931
3932 return (share_access_want & NFS4_SHARE_WANT_DELEG_TIMESTAMPS) &&
3933 ((attr_bitmask & FATTR4_WORD2_NFS42_TIME_DELEG_MASK) ==
3934 FATTR4_WORD2_NFS42_TIME_DELEG_MASK);
3935 }
3936
_nfs4_server_capabilities(struct nfs_server * server,struct nfs_fh * fhandle)3937 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3938 {
3939 u32 minorversion = server->nfs_client->cl_minorversion;
3940 u32 bitmask[3] = {
3941 [0] = FATTR4_WORD0_SUPPORTED_ATTRS,
3942 };
3943 struct nfs4_server_caps_arg args = {
3944 .fhandle = fhandle,
3945 .bitmask = bitmask,
3946 };
3947 struct nfs4_server_caps_res res = {};
3948 struct rpc_message msg = {
3949 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
3950 .rpc_argp = &args,
3951 .rpc_resp = &res,
3952 };
3953 int status;
3954 int i;
3955
3956 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
3957 FATTR4_WORD0_FH_EXPIRE_TYPE |
3958 FATTR4_WORD0_LINK_SUPPORT |
3959 FATTR4_WORD0_SYMLINK_SUPPORT |
3960 FATTR4_WORD0_ACLSUPPORT |
3961 FATTR4_WORD0_CASE_INSENSITIVE |
3962 FATTR4_WORD0_CASE_PRESERVING;
3963 if (minorversion)
3964 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT |
3965 FATTR4_WORD2_OPEN_ARGUMENTS;
3966
3967 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3968 if (status == 0) {
3969 bitmask[0] = (FATTR4_WORD0_SUPPORTED_ATTRS |
3970 FATTR4_WORD0_FH_EXPIRE_TYPE |
3971 FATTR4_WORD0_LINK_SUPPORT |
3972 FATTR4_WORD0_SYMLINK_SUPPORT |
3973 FATTR4_WORD0_ACLSUPPORT |
3974 FATTR4_WORD0_CASE_INSENSITIVE |
3975 FATTR4_WORD0_CASE_PRESERVING) &
3976 res.attr_bitmask[0];
3977 /* Sanity check the server answers */
3978 switch (minorversion) {
3979 case 0:
3980 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
3981 res.attr_bitmask[2] = 0;
3982 break;
3983 case 1:
3984 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
3985 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT &
3986 res.attr_bitmask[2];
3987 break;
3988 case 2:
3989 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
3990 bitmask[2] = (FATTR4_WORD2_SUPPATTR_EXCLCREAT |
3991 FATTR4_WORD2_OPEN_ARGUMENTS) &
3992 res.attr_bitmask[2];
3993 }
3994 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
3995 server->caps &= ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS |
3996 NFS_CAP_SYMLINKS| NFS_CAP_SECURITY_LABEL);
3997 server->fattr_valid = NFS_ATTR_FATTR_V4;
3998 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
3999 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
4000 server->caps |= NFS_CAP_ACLS;
4001 if (res.has_links != 0)
4002 server->caps |= NFS_CAP_HARDLINKS;
4003 if (res.has_symlinks != 0)
4004 server->caps |= NFS_CAP_SYMLINKS;
4005 if (res.case_insensitive)
4006 server->caps |= NFS_CAP_CASE_INSENSITIVE;
4007 if (res.case_preserving)
4008 server->caps |= NFS_CAP_CASE_PRESERVING;
4009 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
4010 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
4011 server->caps |= NFS_CAP_SECURITY_LABEL;
4012 #endif
4013 if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS)
4014 server->caps |= NFS_CAP_FS_LOCATIONS;
4015 if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID))
4016 server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID;
4017 if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE))
4018 server->fattr_valid &= ~NFS_ATTR_FATTR_MODE;
4019 if (!(res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS))
4020 server->fattr_valid &= ~NFS_ATTR_FATTR_NLINK;
4021 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER))
4022 server->fattr_valid &= ~(NFS_ATTR_FATTR_OWNER |
4023 NFS_ATTR_FATTR_OWNER_NAME);
4024 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP))
4025 server->fattr_valid &= ~(NFS_ATTR_FATTR_GROUP |
4026 NFS_ATTR_FATTR_GROUP_NAME);
4027 if (!(res.attr_bitmask[1] & FATTR4_WORD1_SPACE_USED))
4028 server->fattr_valid &= ~NFS_ATTR_FATTR_SPACE_USED;
4029 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS))
4030 server->fattr_valid &= ~NFS_ATTR_FATTR_ATIME;
4031 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA))
4032 server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME;
4033 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY))
4034 server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME;
4035 memcpy(server->attr_bitmask_nl, res.attr_bitmask,
4036 sizeof(server->attr_bitmask));
4037 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
4038
4039 if (res.open_caps.oa_share_access_want[0] &
4040 NFS4_SHARE_WANT_OPEN_XOR_DELEGATION)
4041 server->caps |= NFS_CAP_OPEN_XOR;
4042 if (nfs4_server_delegtime_capable(&res))
4043 server->caps |= NFS_CAP_DELEGTIME;
4044
4045 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
4046 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
4047 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
4048 server->cache_consistency_bitmask[2] = 0;
4049
4050 /* Avoid a regression due to buggy server */
4051 for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++)
4052 res.exclcreat_bitmask[i] &= res.attr_bitmask[i];
4053 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
4054 sizeof(server->exclcreat_bitmask));
4055
4056 server->acl_bitmask = res.acl_bitmask;
4057 server->fh_expire_type = res.fh_expire_type;
4058 }
4059
4060 return status;
4061 }
4062
nfs4_server_capabilities(struct nfs_server * server,struct nfs_fh * fhandle)4063 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
4064 {
4065 struct nfs4_exception exception = {
4066 .interruptible = true,
4067 };
4068 int err;
4069
4070 nfs4_server_set_init_caps(server);
4071 do {
4072 err = nfs4_handle_exception(server,
4073 _nfs4_server_capabilities(server, fhandle),
4074 &exception);
4075 } while (exception.retry);
4076 return err;
4077 }
4078
test_fs_location_for_trunking(struct nfs4_fs_location * location,struct nfs_client * clp,struct nfs_server * server)4079 static void test_fs_location_for_trunking(struct nfs4_fs_location *location,
4080 struct nfs_client *clp,
4081 struct nfs_server *server)
4082 {
4083 int i;
4084
4085 for (i = 0; i < location->nservers; i++) {
4086 struct nfs4_string *srv_loc = &location->servers[i];
4087 struct sockaddr_storage addr;
4088 size_t addrlen;
4089 struct xprt_create xprt_args = {
4090 .ident = 0,
4091 .net = clp->cl_net,
4092 };
4093 struct nfs4_add_xprt_data xprtdata = {
4094 .clp = clp,
4095 };
4096 struct rpc_add_xprt_test rpcdata = {
4097 .add_xprt_test = clp->cl_mvops->session_trunk,
4098 .data = &xprtdata,
4099 };
4100 char *servername = NULL;
4101
4102 if (!srv_loc->len)
4103 continue;
4104
4105 addrlen = nfs_parse_server_name(srv_loc->data, srv_loc->len,
4106 &addr, sizeof(addr),
4107 clp->cl_net, server->port);
4108 if (!addrlen)
4109 return;
4110 xprt_args.dstaddr = (struct sockaddr *)&addr;
4111 xprt_args.addrlen = addrlen;
4112 servername = kmalloc(srv_loc->len + 1, GFP_KERNEL);
4113 if (!servername)
4114 return;
4115 memcpy(servername, srv_loc->data, srv_loc->len);
4116 servername[srv_loc->len] = '\0';
4117 xprt_args.servername = servername;
4118
4119 xprtdata.cred = nfs4_get_clid_cred(clp);
4120 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
4121 rpc_clnt_setup_test_and_add_xprt,
4122 &rpcdata);
4123 if (xprtdata.cred)
4124 put_cred(xprtdata.cred);
4125 kfree(servername);
4126 }
4127 }
4128
_is_same_nfs4_pathname(struct nfs4_pathname * path1,struct nfs4_pathname * path2)4129 static bool _is_same_nfs4_pathname(struct nfs4_pathname *path1,
4130 struct nfs4_pathname *path2)
4131 {
4132 int i;
4133
4134 if (path1->ncomponents != path2->ncomponents)
4135 return false;
4136 for (i = 0; i < path1->ncomponents; i++) {
4137 if (path1->components[i].len != path2->components[i].len)
4138 return false;
4139 if (memcmp(path1->components[i].data, path2->components[i].data,
4140 path1->components[i].len))
4141 return false;
4142 }
4143 return true;
4144 }
4145
_nfs4_discover_trunking(struct nfs_server * server,struct nfs_fh * fhandle)4146 static int _nfs4_discover_trunking(struct nfs_server *server,
4147 struct nfs_fh *fhandle)
4148 {
4149 struct nfs4_fs_locations *locations = NULL;
4150 struct page *page;
4151 const struct cred *cred;
4152 struct nfs_client *clp = server->nfs_client;
4153 const struct nfs4_state_maintenance_ops *ops =
4154 clp->cl_mvops->state_renewal_ops;
4155 int status = -ENOMEM, i;
4156
4157 cred = ops->get_state_renewal_cred(clp);
4158 if (cred == NULL) {
4159 cred = nfs4_get_clid_cred(clp);
4160 if (cred == NULL)
4161 return -ENOKEY;
4162 }
4163
4164 page = alloc_page(GFP_KERNEL);
4165 if (!page)
4166 goto out_put_cred;
4167 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
4168 if (!locations)
4169 goto out_free;
4170 locations->fattr = nfs_alloc_fattr();
4171 if (!locations->fattr)
4172 goto out_free_2;
4173
4174 status = nfs4_proc_get_locations(server, fhandle, locations, page,
4175 cred);
4176 if (status)
4177 goto out_free_3;
4178
4179 for (i = 0; i < locations->nlocations; i++) {
4180 if (!_is_same_nfs4_pathname(&locations->fs_path,
4181 &locations->locations[i].rootpath))
4182 continue;
4183 test_fs_location_for_trunking(&locations->locations[i], clp,
4184 server);
4185 }
4186 out_free_3:
4187 kfree(locations->fattr);
4188 out_free_2:
4189 kfree(locations);
4190 out_free:
4191 __free_page(page);
4192 out_put_cred:
4193 put_cred(cred);
4194 return status;
4195 }
4196
nfs4_discover_trunking(struct nfs_server * server,struct nfs_fh * fhandle)4197 static int nfs4_discover_trunking(struct nfs_server *server,
4198 struct nfs_fh *fhandle)
4199 {
4200 struct nfs4_exception exception = {
4201 .interruptible = true,
4202 };
4203 struct nfs_client *clp = server->nfs_client;
4204 int err = 0;
4205
4206 if (!nfs4_has_session(clp))
4207 goto out;
4208 do {
4209 err = nfs4_handle_exception(server,
4210 _nfs4_discover_trunking(server, fhandle),
4211 &exception);
4212 } while (exception.retry);
4213 out:
4214 return err;
4215 }
4216
_nfs4_lookup_root(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)4217 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
4218 struct nfs_fsinfo *info)
4219 {
4220 u32 bitmask[3];
4221 struct nfs4_lookup_root_arg args = {
4222 .bitmask = bitmask,
4223 };
4224 struct nfs4_lookup_res res = {
4225 .server = server,
4226 .fattr = info->fattr,
4227 .fh = fhandle,
4228 };
4229 struct rpc_message msg = {
4230 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
4231 .rpc_argp = &args,
4232 .rpc_resp = &res,
4233 };
4234
4235 bitmask[0] = nfs4_fattr_bitmap[0];
4236 bitmask[1] = nfs4_fattr_bitmap[1];
4237 /*
4238 * Process the label in the upcoming getfattr
4239 */
4240 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
4241
4242 nfs_fattr_init(info->fattr);
4243 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4244 }
4245
nfs4_lookup_root(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)4246 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
4247 struct nfs_fsinfo *info)
4248 {
4249 struct nfs4_exception exception = {
4250 .interruptible = true,
4251 };
4252 int err;
4253 do {
4254 err = _nfs4_lookup_root(server, fhandle, info);
4255 trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
4256 switch (err) {
4257 case 0:
4258 case -NFS4ERR_WRONGSEC:
4259 goto out;
4260 default:
4261 err = nfs4_handle_exception(server, err, &exception);
4262 }
4263 } while (exception.retry);
4264 out:
4265 return err;
4266 }
4267
nfs4_lookup_root_sec(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info,rpc_authflavor_t flavor)4268 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
4269 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
4270 {
4271 struct rpc_auth_create_args auth_args = {
4272 .pseudoflavor = flavor,
4273 };
4274 struct rpc_auth *auth;
4275
4276 auth = rpcauth_create(&auth_args, server->client);
4277 if (IS_ERR(auth))
4278 return -EACCES;
4279 return nfs4_lookup_root(server, fhandle, info);
4280 }
4281
4282 /*
4283 * Retry pseudoroot lookup with various security flavors. We do this when:
4284 *
4285 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
4286 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation
4287 *
4288 * Returns zero on success, or a negative NFS4ERR value, or a
4289 * negative errno value.
4290 */
nfs4_find_root_sec(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)4291 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
4292 struct nfs_fsinfo *info)
4293 {
4294 /* Per 3530bis 15.33.5 */
4295 static const rpc_authflavor_t flav_array[] = {
4296 RPC_AUTH_GSS_KRB5P,
4297 RPC_AUTH_GSS_KRB5I,
4298 RPC_AUTH_GSS_KRB5,
4299 RPC_AUTH_UNIX, /* courtesy */
4300 RPC_AUTH_NULL,
4301 };
4302 int status = -EPERM;
4303 size_t i;
4304
4305 if (server->auth_info.flavor_len > 0) {
4306 /* try each flavor specified by user */
4307 for (i = 0; i < server->auth_info.flavor_len; i++) {
4308 status = nfs4_lookup_root_sec(server, fhandle, info,
4309 server->auth_info.flavors[i]);
4310 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
4311 continue;
4312 break;
4313 }
4314 } else {
4315 /* no flavors specified by user, try default list */
4316 for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
4317 status = nfs4_lookup_root_sec(server, fhandle, info,
4318 flav_array[i]);
4319 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
4320 continue;
4321 break;
4322 }
4323 }
4324
4325 /*
4326 * -EACCES could mean that the user doesn't have correct permissions
4327 * to access the mount. It could also mean that we tried to mount
4328 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
4329 * existing mount programs don't handle -EACCES very well so it should
4330 * be mapped to -EPERM instead.
4331 */
4332 if (status == -EACCES)
4333 status = -EPERM;
4334 return status;
4335 }
4336
4337 /**
4338 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
4339 * @server: initialized nfs_server handle
4340 * @fhandle: we fill in the pseudo-fs root file handle
4341 * @info: we fill in an FSINFO struct
4342 * @auth_probe: probe the auth flavours
4343 *
4344 * Returns zero on success, or a negative errno.
4345 */
nfs4_proc_get_rootfh(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info,bool auth_probe)4346 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
4347 struct nfs_fsinfo *info,
4348 bool auth_probe)
4349 {
4350 int status = 0;
4351
4352 if (!auth_probe)
4353 status = nfs4_lookup_root(server, fhandle, info);
4354
4355 if (auth_probe || status == NFS4ERR_WRONGSEC)
4356 status = server->nfs_client->cl_mvops->find_root_sec(server,
4357 fhandle, info);
4358
4359 if (status == 0)
4360 status = nfs4_server_capabilities(server, fhandle);
4361 if (status == 0)
4362 status = nfs4_do_fsinfo(server, fhandle, info);
4363
4364 return nfs4_map_errors(status);
4365 }
4366
nfs4_proc_get_root(struct nfs_server * server,struct nfs_fh * mntfh,struct nfs_fsinfo * info)4367 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
4368 struct nfs_fsinfo *info)
4369 {
4370 int error;
4371 struct nfs_fattr *fattr = info->fattr;
4372
4373 error = nfs4_server_capabilities(server, mntfh);
4374 if (error < 0) {
4375 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
4376 return error;
4377 }
4378
4379 error = nfs4_proc_getattr(server, mntfh, fattr, NULL);
4380 if (error < 0) {
4381 dprintk("nfs4_get_root: getattr error = %d\n", -error);
4382 goto out;
4383 }
4384
4385 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
4386 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
4387 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
4388
4389 out:
4390 return error;
4391 }
4392
4393 /*
4394 * Get locations and (maybe) other attributes of a referral.
4395 * Note that we'll actually follow the referral later when
4396 * we detect fsid mismatch in inode revalidation
4397 */
nfs4_get_referral(struct rpc_clnt * client,struct inode * dir,const struct qstr * name,struct nfs_fattr * fattr,struct nfs_fh * fhandle)4398 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
4399 const struct qstr *name, struct nfs_fattr *fattr,
4400 struct nfs_fh *fhandle)
4401 {
4402 int status = -ENOMEM;
4403 struct page *page = NULL;
4404 struct nfs4_fs_locations *locations = NULL;
4405
4406 page = alloc_page(GFP_KERNEL);
4407 if (page == NULL)
4408 goto out;
4409 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
4410 if (locations == NULL)
4411 goto out;
4412
4413 locations->fattr = fattr;
4414
4415 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
4416 if (status != 0)
4417 goto out;
4418
4419 /*
4420 * If the fsid didn't change, this is a migration event, not a
4421 * referral. Cause us to drop into the exception handler, which
4422 * will kick off migration recovery.
4423 */
4424 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &fattr->fsid)) {
4425 dprintk("%s: server did not return a different fsid for"
4426 " a referral at %s\n", __func__, name->name);
4427 status = -NFS4ERR_MOVED;
4428 goto out;
4429 }
4430 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
4431 nfs_fixup_referral_attributes(fattr);
4432 memset(fhandle, 0, sizeof(struct nfs_fh));
4433 out:
4434 if (page)
4435 __free_page(page);
4436 kfree(locations);
4437 return status;
4438 }
4439
_nfs4_proc_getattr(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fattr * fattr,struct inode * inode)4440 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
4441 struct nfs_fattr *fattr, struct inode *inode)
4442 {
4443 __u32 bitmask[NFS4_BITMASK_SZ];
4444 struct nfs4_getattr_arg args = {
4445 .fh = fhandle,
4446 .bitmask = bitmask,
4447 };
4448 struct nfs4_getattr_res res = {
4449 .fattr = fattr,
4450 .server = server,
4451 };
4452 struct rpc_message msg = {
4453 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
4454 .rpc_argp = &args,
4455 .rpc_resp = &res,
4456 };
4457 unsigned short task_flags = 0;
4458
4459 if (nfs4_has_session(server->nfs_client))
4460 task_flags = RPC_TASK_MOVEABLE;
4461
4462 /* Is this is an attribute revalidation, subject to softreval? */
4463 if (inode && (server->flags & NFS_MOUNT_SOFTREVAL))
4464 task_flags |= RPC_TASK_TIMEOUT;
4465
4466 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), inode, 0);
4467 nfs_fattr_init(fattr);
4468 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
4469 return nfs4_do_call_sync(server->client, server, &msg,
4470 &args.seq_args, &res.seq_res, task_flags);
4471 }
4472
nfs4_proc_getattr(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fattr * fattr,struct inode * inode)4473 int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
4474 struct nfs_fattr *fattr, struct inode *inode)
4475 {
4476 struct nfs4_exception exception = {
4477 .interruptible = true,
4478 };
4479 int err;
4480 do {
4481 err = _nfs4_proc_getattr(server, fhandle, fattr, inode);
4482 trace_nfs4_getattr(server, fhandle, fattr, err);
4483 err = nfs4_handle_exception(server, err,
4484 &exception);
4485 } while (exception.retry);
4486 return err;
4487 }
4488
4489 /*
4490 * The file is not closed if it is opened due to the a request to change
4491 * the size of the file. The open call will not be needed once the
4492 * VFS layer lookup-intents are implemented.
4493 *
4494 * Close is called when the inode is destroyed.
4495 * If we haven't opened the file for O_WRONLY, we
4496 * need to in the size_change case to obtain a stateid.
4497 *
4498 * Got race?
4499 * Because OPEN is always done by name in nfsv4, it is
4500 * possible that we opened a different file by the same
4501 * name. We can recognize this race condition, but we
4502 * can't do anything about it besides returning an error.
4503 *
4504 * This will be fixed with VFS changes (lookup-intent).
4505 */
4506 static int
nfs4_proc_setattr(struct dentry * dentry,struct nfs_fattr * fattr,struct iattr * sattr)4507 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
4508 struct iattr *sattr)
4509 {
4510 struct inode *inode = d_inode(dentry);
4511 const struct cred *cred = NULL;
4512 struct nfs_open_context *ctx = NULL;
4513 int status;
4514
4515 if (pnfs_ld_layoutret_on_setattr(inode) &&
4516 sattr->ia_valid & ATTR_SIZE &&
4517 sattr->ia_size < i_size_read(inode))
4518 pnfs_commit_and_return_layout(inode);
4519
4520 nfs_fattr_init(fattr);
4521
4522 /* Deal with open(O_TRUNC) */
4523 if (sattr->ia_valid & ATTR_OPEN)
4524 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
4525
4526 /* Optimization: if the end result is no change, don't RPC */
4527 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
4528 return 0;
4529
4530 /* Search for an existing open(O_WRITE) file */
4531 if (sattr->ia_valid & ATTR_FILE) {
4532
4533 ctx = nfs_file_open_context(sattr->ia_file);
4534 if (ctx)
4535 cred = ctx->cred;
4536 }
4537
4538 /* Return any delegations if we're going to change ACLs */
4539 if ((sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
4540 nfs4_inode_make_writeable(inode);
4541
4542 status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL);
4543 if (status == 0) {
4544 nfs_setattr_update_inode(inode, sattr, fattr);
4545 nfs_setsecurity(inode, fattr);
4546 }
4547 return status;
4548 }
4549
_nfs4_proc_lookup(struct rpc_clnt * clnt,struct inode * dir,struct dentry * dentry,const struct qstr * name,struct nfs_fh * fhandle,struct nfs_fattr * fattr)4550 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
4551 struct dentry *dentry, const struct qstr *name,
4552 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
4553 {
4554 struct nfs_server *server = NFS_SERVER(dir);
4555 int status;
4556 struct nfs4_lookup_arg args = {
4557 .bitmask = server->attr_bitmask,
4558 .dir_fh = NFS_FH(dir),
4559 .name = name,
4560 };
4561 struct nfs4_lookup_res res = {
4562 .server = server,
4563 .fattr = fattr,
4564 .fh = fhandle,
4565 };
4566 struct rpc_message msg = {
4567 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
4568 .rpc_argp = &args,
4569 .rpc_resp = &res,
4570 };
4571 unsigned short task_flags = 0;
4572
4573 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE))
4574 task_flags = RPC_TASK_MOVEABLE;
4575
4576 /* Is this is an attribute revalidation, subject to softreval? */
4577 if (nfs_lookup_is_soft_revalidate(dentry))
4578 task_flags |= RPC_TASK_TIMEOUT;
4579
4580 args.bitmask = nfs4_bitmask(server, fattr->label);
4581
4582 nfs_fattr_init(fattr);
4583
4584 dprintk("NFS call lookup %pd2\n", dentry);
4585 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
4586 status = nfs4_do_call_sync(clnt, server, &msg,
4587 &args.seq_args, &res.seq_res, task_flags);
4588 dprintk("NFS reply lookup: %d\n", status);
4589 return status;
4590 }
4591
nfs_fixup_secinfo_attributes(struct nfs_fattr * fattr)4592 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
4593 {
4594 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
4595 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
4596 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
4597 fattr->nlink = 2;
4598 }
4599
nfs4_proc_lookup_common(struct rpc_clnt ** clnt,struct inode * dir,struct dentry * dentry,const struct qstr * name,struct nfs_fh * fhandle,struct nfs_fattr * fattr)4600 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
4601 struct dentry *dentry, const struct qstr *name,
4602 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
4603 {
4604 struct nfs4_exception exception = {
4605 .interruptible = true,
4606 };
4607 struct rpc_clnt *client = *clnt;
4608 int err;
4609 do {
4610 err = _nfs4_proc_lookup(client, dir, dentry, name, fhandle, fattr);
4611 trace_nfs4_lookup(dir, name, err);
4612 switch (err) {
4613 case -NFS4ERR_BADNAME:
4614 err = -ENOENT;
4615 goto out;
4616 case -NFS4ERR_MOVED:
4617 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
4618 if (err == -NFS4ERR_MOVED)
4619 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
4620 goto out;
4621 case -NFS4ERR_WRONGSEC:
4622 err = -EPERM;
4623 if (client != *clnt)
4624 goto out;
4625 client = nfs4_negotiate_security(client, dir, name);
4626 if (IS_ERR(client))
4627 return PTR_ERR(client);
4628
4629 exception.retry = 1;
4630 break;
4631 default:
4632 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
4633 }
4634 } while (exception.retry);
4635
4636 out:
4637 if (err == 0)
4638 *clnt = client;
4639 else if (client != *clnt)
4640 rpc_shutdown_client(client);
4641
4642 return err;
4643 }
4644
nfs4_proc_lookup(struct inode * dir,struct dentry * dentry,const struct qstr * name,struct nfs_fh * fhandle,struct nfs_fattr * fattr)4645 static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry, const struct qstr *name,
4646 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
4647 {
4648 int status;
4649 struct rpc_clnt *client = NFS_CLIENT(dir);
4650
4651 status = nfs4_proc_lookup_common(&client, dir, dentry, name, fhandle, fattr);
4652 if (client != NFS_CLIENT(dir)) {
4653 rpc_shutdown_client(client);
4654 nfs_fixup_secinfo_attributes(fattr);
4655 }
4656 return status;
4657 }
4658
4659 struct rpc_clnt *
nfs4_proc_lookup_mountpoint(struct inode * dir,struct dentry * dentry,struct nfs_fh * fhandle,struct nfs_fattr * fattr)4660 nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry,
4661 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
4662 {
4663 struct rpc_clnt *client = NFS_CLIENT(dir);
4664 int status;
4665
4666 status = nfs4_proc_lookup_common(&client, dir, dentry, &dentry->d_name,
4667 fhandle, fattr);
4668 if (status < 0)
4669 return ERR_PTR(status);
4670 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
4671 }
4672
_nfs4_proc_lookupp(struct inode * inode,struct nfs_fh * fhandle,struct nfs_fattr * fattr)4673 static int _nfs4_proc_lookupp(struct inode *inode,
4674 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
4675 {
4676 struct rpc_clnt *clnt = NFS_CLIENT(inode);
4677 struct nfs_server *server = NFS_SERVER(inode);
4678 int status;
4679 struct nfs4_lookupp_arg args = {
4680 .bitmask = server->attr_bitmask,
4681 .fh = NFS_FH(inode),
4682 };
4683 struct nfs4_lookupp_res res = {
4684 .server = server,
4685 .fattr = fattr,
4686 .fh = fhandle,
4687 };
4688 struct rpc_message msg = {
4689 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP],
4690 .rpc_argp = &args,
4691 .rpc_resp = &res,
4692 };
4693 unsigned short task_flags = 0;
4694
4695 if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
4696 task_flags |= RPC_TASK_TIMEOUT;
4697
4698 args.bitmask = nfs4_bitmask(server, fattr->label);
4699
4700 nfs_fattr_init(fattr);
4701
4702 dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino);
4703 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
4704 &res.seq_res, task_flags);
4705 dprintk("NFS reply lookupp: %d\n", status);
4706 return status;
4707 }
4708
nfs4_proc_lookupp(struct inode * inode,struct nfs_fh * fhandle,struct nfs_fattr * fattr)4709 static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle,
4710 struct nfs_fattr *fattr)
4711 {
4712 struct nfs4_exception exception = {
4713 .interruptible = true,
4714 };
4715 int err;
4716 do {
4717 err = _nfs4_proc_lookupp(inode, fhandle, fattr);
4718 trace_nfs4_lookupp(inode, err);
4719 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4720 &exception);
4721 } while (exception.retry);
4722 return err;
4723 }
4724
_nfs4_proc_access(struct inode * inode,struct nfs_access_entry * entry,const struct cred * cred)4725 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry,
4726 const struct cred *cred)
4727 {
4728 struct nfs_server *server = NFS_SERVER(inode);
4729 struct nfs4_accessargs args = {
4730 .fh = NFS_FH(inode),
4731 .access = entry->mask,
4732 };
4733 struct nfs4_accessres res = {
4734 .server = server,
4735 };
4736 struct rpc_message msg = {
4737 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
4738 .rpc_argp = &args,
4739 .rpc_resp = &res,
4740 .rpc_cred = cred,
4741 };
4742 int status = 0;
4743
4744 if (!nfs4_have_delegation(inode, FMODE_READ, 0)) {
4745 res.fattr = nfs_alloc_fattr();
4746 if (res.fattr == NULL)
4747 return -ENOMEM;
4748 args.bitmask = server->cache_consistency_bitmask;
4749 }
4750 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4751 if (!status) {
4752 nfs_access_set_mask(entry, res.access);
4753 if (res.fattr)
4754 nfs_refresh_inode(inode, res.fattr);
4755 }
4756 nfs_free_fattr(res.fattr);
4757 return status;
4758 }
4759
nfs4_proc_access(struct inode * inode,struct nfs_access_entry * entry,const struct cred * cred)4760 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry,
4761 const struct cred *cred)
4762 {
4763 struct nfs4_exception exception = {
4764 .interruptible = true,
4765 };
4766 int err;
4767 do {
4768 err = _nfs4_proc_access(inode, entry, cred);
4769 trace_nfs4_access(inode, err);
4770 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4771 &exception);
4772 } while (exception.retry);
4773 return err;
4774 }
4775
4776 /*
4777 * TODO: For the time being, we don't try to get any attributes
4778 * along with any of the zero-copy operations READ, READDIR,
4779 * READLINK, WRITE.
4780 *
4781 * In the case of the first three, we want to put the GETATTR
4782 * after the read-type operation -- this is because it is hard
4783 * to predict the length of a GETATTR response in v4, and thus
4784 * align the READ data correctly. This means that the GETATTR
4785 * may end up partially falling into the page cache, and we should
4786 * shift it into the 'tail' of the xdr_buf before processing.
4787 * To do this efficiently, we need to know the total length
4788 * of data received, which doesn't seem to be available outside
4789 * of the RPC layer.
4790 *
4791 * In the case of WRITE, we also want to put the GETATTR after
4792 * the operation -- in this case because we want to make sure
4793 * we get the post-operation mtime and size.
4794 *
4795 * Both of these changes to the XDR layer would in fact be quite
4796 * minor, but I decided to leave them for a subsequent patch.
4797 */
_nfs4_proc_readlink(struct inode * inode,struct page * page,unsigned int pgbase,unsigned int pglen)4798 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
4799 unsigned int pgbase, unsigned int pglen)
4800 {
4801 struct nfs4_readlink args = {
4802 .fh = NFS_FH(inode),
4803 .pgbase = pgbase,
4804 .pglen = pglen,
4805 .pages = &page,
4806 };
4807 struct nfs4_readlink_res res;
4808 struct rpc_message msg = {
4809 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
4810 .rpc_argp = &args,
4811 .rpc_resp = &res,
4812 };
4813
4814 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
4815 }
4816
nfs4_proc_readlink(struct inode * inode,struct page * page,unsigned int pgbase,unsigned int pglen)4817 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
4818 unsigned int pgbase, unsigned int pglen)
4819 {
4820 struct nfs4_exception exception = {
4821 .interruptible = true,
4822 };
4823 int err;
4824 do {
4825 err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
4826 trace_nfs4_readlink(inode, err);
4827 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4828 &exception);
4829 } while (exception.retry);
4830 return err;
4831 }
4832
4833 /*
4834 * This is just for mknod. open(O_CREAT) will always do ->open_context().
4835 */
4836 static int
nfs4_proc_create(struct inode * dir,struct dentry * dentry,struct iattr * sattr,int flags)4837 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
4838 int flags)
4839 {
4840 struct nfs_server *server = NFS_SERVER(dir);
4841 struct nfs4_label l, *ilabel;
4842 struct nfs_open_context *ctx;
4843 struct nfs4_state *state;
4844 int status = 0;
4845
4846 ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL);
4847 if (IS_ERR(ctx))
4848 return PTR_ERR(ctx);
4849
4850 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
4851
4852 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
4853 sattr->ia_mode &= ~current_umask();
4854 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL);
4855 if (IS_ERR(state)) {
4856 status = PTR_ERR(state);
4857 goto out;
4858 }
4859 out:
4860 nfs4_label_release_security(ilabel);
4861 put_nfs_open_context(ctx);
4862 return status;
4863 }
4864
4865 static int
_nfs4_proc_remove(struct inode * dir,const struct qstr * name,u32 ftype)4866 _nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype)
4867 {
4868 struct nfs_server *server = NFS_SERVER(dir);
4869 struct nfs_removeargs args = {
4870 .fh = NFS_FH(dir),
4871 .name = *name,
4872 };
4873 struct nfs_removeres res = {
4874 .server = server,
4875 };
4876 struct rpc_message msg = {
4877 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
4878 .rpc_argp = &args,
4879 .rpc_resp = &res,
4880 };
4881 unsigned long timestamp = jiffies;
4882 int status;
4883
4884 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
4885 if (status == 0) {
4886 spin_lock(&dir->i_lock);
4887 /* Removing a directory decrements nlink in the parent */
4888 if (ftype == NF4DIR && dir->i_nlink > 2)
4889 nfs4_dec_nlink_locked(dir);
4890 nfs4_update_changeattr_locked(dir, &res.cinfo, timestamp,
4891 NFS_INO_INVALID_DATA);
4892 spin_unlock(&dir->i_lock);
4893 }
4894 return status;
4895 }
4896
nfs4_proc_remove(struct inode * dir,struct dentry * dentry)4897 static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry)
4898 {
4899 struct nfs4_exception exception = {
4900 .interruptible = true,
4901 };
4902 struct inode *inode = d_inode(dentry);
4903 int err;
4904
4905 if (inode) {
4906 if (inode->i_nlink == 1)
4907 nfs4_inode_return_delegation(inode);
4908 else
4909 nfs4_inode_make_writeable(inode);
4910 }
4911 do {
4912 err = _nfs4_proc_remove(dir, &dentry->d_name, NF4REG);
4913 trace_nfs4_remove(dir, &dentry->d_name, err);
4914 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4915 &exception);
4916 } while (exception.retry);
4917 return err;
4918 }
4919
nfs4_proc_rmdir(struct inode * dir,const struct qstr * name)4920 static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name)
4921 {
4922 struct nfs4_exception exception = {
4923 .interruptible = true,
4924 };
4925 int err;
4926
4927 do {
4928 err = _nfs4_proc_remove(dir, name, NF4DIR);
4929 trace_nfs4_remove(dir, name, err);
4930 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4931 &exception);
4932 } while (exception.retry);
4933 return err;
4934 }
4935
nfs4_proc_unlink_setup(struct rpc_message * msg,struct dentry * dentry,struct inode * inode)4936 static void nfs4_proc_unlink_setup(struct rpc_message *msg,
4937 struct dentry *dentry,
4938 struct inode *inode)
4939 {
4940 struct nfs_removeargs *args = msg->rpc_argp;
4941 struct nfs_removeres *res = msg->rpc_resp;
4942
4943 res->server = NFS_SB(dentry->d_sb);
4944 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
4945 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1, 0);
4946
4947 nfs_fattr_init(res->dir_attr);
4948
4949 if (inode) {
4950 nfs4_inode_return_delegation(inode);
4951 nfs_d_prune_case_insensitive_aliases(inode);
4952 }
4953 }
4954
nfs4_proc_unlink_rpc_prepare(struct rpc_task * task,struct nfs_unlinkdata * data)4955 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
4956 {
4957 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client,
4958 &data->args.seq_args,
4959 &data->res.seq_res,
4960 task);
4961 }
4962
nfs4_proc_unlink_done(struct rpc_task * task,struct inode * dir)4963 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
4964 {
4965 struct nfs_unlinkdata *data = task->tk_calldata;
4966 struct nfs_removeres *res = &data->res;
4967
4968 if (!nfs4_sequence_done(task, &res->seq_res))
4969 return 0;
4970 if (nfs4_async_handle_error(task, res->server, NULL,
4971 &data->timeout) == -EAGAIN)
4972 return 0;
4973 if (task->tk_status == 0)
4974 nfs4_update_changeattr(dir, &res->cinfo,
4975 res->dir_attr->time_start,
4976 NFS_INO_INVALID_DATA);
4977 return 1;
4978 }
4979
nfs4_proc_rename_setup(struct rpc_message * msg,struct dentry * old_dentry,struct dentry * new_dentry)4980 static void nfs4_proc_rename_setup(struct rpc_message *msg,
4981 struct dentry *old_dentry,
4982 struct dentry *new_dentry)
4983 {
4984 struct nfs_renameargs *arg = msg->rpc_argp;
4985 struct nfs_renameres *res = msg->rpc_resp;
4986 struct inode *old_inode = d_inode(old_dentry);
4987 struct inode *new_inode = d_inode(new_dentry);
4988
4989 if (old_inode)
4990 nfs4_inode_make_writeable(old_inode);
4991 if (new_inode)
4992 nfs4_inode_return_delegation(new_inode);
4993 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
4994 res->server = NFS_SB(old_dentry->d_sb);
4995 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1, 0);
4996 }
4997
nfs4_proc_rename_rpc_prepare(struct rpc_task * task,struct nfs_renamedata * data)4998 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
4999 {
5000 nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client,
5001 &data->args.seq_args,
5002 &data->res.seq_res,
5003 task);
5004 }
5005
nfs4_proc_rename_done(struct rpc_task * task,struct inode * old_dir,struct inode * new_dir)5006 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
5007 struct inode *new_dir)
5008 {
5009 struct nfs_renamedata *data = task->tk_calldata;
5010 struct nfs_renameres *res = &data->res;
5011
5012 if (!nfs4_sequence_done(task, &res->seq_res))
5013 return 0;
5014 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
5015 return 0;
5016
5017 if (task->tk_status == 0) {
5018 nfs_d_prune_case_insensitive_aliases(d_inode(data->old_dentry));
5019 if (new_dir != old_dir) {
5020 /* Note: If we moved a directory, nlink will change */
5021 nfs4_update_changeattr(old_dir, &res->old_cinfo,
5022 res->old_fattr->time_start,
5023 NFS_INO_INVALID_NLINK |
5024 NFS_INO_INVALID_DATA);
5025 nfs4_update_changeattr(new_dir, &res->new_cinfo,
5026 res->new_fattr->time_start,
5027 NFS_INO_INVALID_NLINK |
5028 NFS_INO_INVALID_DATA);
5029 } else
5030 nfs4_update_changeattr(old_dir, &res->old_cinfo,
5031 res->old_fattr->time_start,
5032 NFS_INO_INVALID_DATA);
5033 }
5034 return 1;
5035 }
5036
_nfs4_proc_link(struct inode * inode,struct inode * dir,const struct qstr * name)5037 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
5038 {
5039 struct nfs_server *server = NFS_SERVER(inode);
5040 __u32 bitmask[NFS4_BITMASK_SZ];
5041 struct nfs4_link_arg arg = {
5042 .fh = NFS_FH(inode),
5043 .dir_fh = NFS_FH(dir),
5044 .name = name,
5045 .bitmask = bitmask,
5046 };
5047 struct nfs4_link_res res = {
5048 .server = server,
5049 };
5050 struct rpc_message msg = {
5051 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
5052 .rpc_argp = &arg,
5053 .rpc_resp = &res,
5054 };
5055 int status = -ENOMEM;
5056
5057 res.fattr = nfs_alloc_fattr_with_label(server);
5058 if (res.fattr == NULL)
5059 goto out;
5060
5061 nfs4_inode_make_writeable(inode);
5062 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, res.fattr->label),
5063 inode,
5064 NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME);
5065 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5066 if (!status) {
5067 nfs4_update_changeattr(dir, &res.cinfo, res.fattr->time_start,
5068 NFS_INO_INVALID_DATA);
5069 nfs4_inc_nlink(inode);
5070 status = nfs_post_op_update_inode(inode, res.fattr);
5071 if (!status)
5072 nfs_setsecurity(inode, res.fattr);
5073 }
5074
5075 out:
5076 nfs_free_fattr(res.fattr);
5077 return status;
5078 }
5079
nfs4_proc_link(struct inode * inode,struct inode * dir,const struct qstr * name)5080 static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
5081 {
5082 struct nfs4_exception exception = {
5083 .interruptible = true,
5084 };
5085 int err;
5086 do {
5087 err = nfs4_handle_exception(NFS_SERVER(inode),
5088 _nfs4_proc_link(inode, dir, name),
5089 &exception);
5090 } while (exception.retry);
5091 return err;
5092 }
5093
5094 struct nfs4_createdata {
5095 struct rpc_message msg;
5096 struct nfs4_create_arg arg;
5097 struct nfs4_create_res res;
5098 struct nfs_fh fh;
5099 struct nfs_fattr fattr;
5100 };
5101
nfs4_alloc_createdata(struct inode * dir,const struct qstr * name,struct iattr * sattr,u32 ftype)5102 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
5103 const struct qstr *name, struct iattr *sattr, u32 ftype)
5104 {
5105 struct nfs4_createdata *data;
5106
5107 data = kzalloc(sizeof(*data), GFP_KERNEL);
5108 if (data != NULL) {
5109 struct nfs_server *server = NFS_SERVER(dir);
5110
5111 data->fattr.label = nfs4_label_alloc(server, GFP_KERNEL);
5112 if (IS_ERR(data->fattr.label))
5113 goto out_free;
5114
5115 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
5116 data->msg.rpc_argp = &data->arg;
5117 data->msg.rpc_resp = &data->res;
5118 data->arg.dir_fh = NFS_FH(dir);
5119 data->arg.server = server;
5120 data->arg.name = name;
5121 data->arg.attrs = sattr;
5122 data->arg.ftype = ftype;
5123 data->arg.bitmask = nfs4_bitmask(server, data->fattr.label);
5124 data->arg.umask = current_umask();
5125 data->res.server = server;
5126 data->res.fh = &data->fh;
5127 data->res.fattr = &data->fattr;
5128 nfs_fattr_init(data->res.fattr);
5129 }
5130 return data;
5131 out_free:
5132 kfree(data);
5133 return NULL;
5134 }
5135
nfs4_do_create(struct inode * dir,struct dentry * dentry,struct nfs4_createdata * data)5136 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
5137 {
5138 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
5139 &data->arg.seq_args, &data->res.seq_res, 1);
5140 if (status == 0) {
5141 spin_lock(&dir->i_lock);
5142 /* Creating a directory bumps nlink in the parent */
5143 if (data->arg.ftype == NF4DIR)
5144 nfs4_inc_nlink_locked(dir);
5145 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo,
5146 data->res.fattr->time_start,
5147 NFS_INO_INVALID_DATA);
5148 spin_unlock(&dir->i_lock);
5149 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
5150 }
5151 return status;
5152 }
5153
nfs4_free_createdata(struct nfs4_createdata * data)5154 static void nfs4_free_createdata(struct nfs4_createdata *data)
5155 {
5156 nfs4_label_free(data->fattr.label);
5157 kfree(data);
5158 }
5159
_nfs4_proc_symlink(struct inode * dir,struct dentry * dentry,struct folio * folio,unsigned int len,struct iattr * sattr,struct nfs4_label * label)5160 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
5161 struct folio *folio, unsigned int len, struct iattr *sattr,
5162 struct nfs4_label *label)
5163 {
5164 struct page *page = &folio->page;
5165 struct nfs4_createdata *data;
5166 int status = -ENAMETOOLONG;
5167
5168 if (len > NFS4_MAXPATHLEN)
5169 goto out;
5170
5171 status = -ENOMEM;
5172 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
5173 if (data == NULL)
5174 goto out;
5175
5176 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
5177 data->arg.u.symlink.pages = &page;
5178 data->arg.u.symlink.len = len;
5179 data->arg.label = label;
5180
5181 status = nfs4_do_create(dir, dentry, data);
5182
5183 nfs4_free_createdata(data);
5184 out:
5185 return status;
5186 }
5187
nfs4_proc_symlink(struct inode * dir,struct dentry * dentry,struct folio * folio,unsigned int len,struct iattr * sattr)5188 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
5189 struct folio *folio, unsigned int len, struct iattr *sattr)
5190 {
5191 struct nfs4_exception exception = {
5192 .interruptible = true,
5193 };
5194 struct nfs4_label l, *label;
5195 int err;
5196
5197 label = nfs4_label_init_security(dir, dentry, sattr, &l);
5198
5199 do {
5200 err = _nfs4_proc_symlink(dir, dentry, folio, len, sattr, label);
5201 trace_nfs4_symlink(dir, &dentry->d_name, err);
5202 err = nfs4_handle_exception(NFS_SERVER(dir), err,
5203 &exception);
5204 } while (exception.retry);
5205
5206 nfs4_label_release_security(label);
5207 return err;
5208 }
5209
_nfs4_proc_mkdir(struct inode * dir,struct dentry * dentry,struct iattr * sattr,struct nfs4_label * label)5210 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
5211 struct iattr *sattr, struct nfs4_label *label)
5212 {
5213 struct nfs4_createdata *data;
5214 int status = -ENOMEM;
5215
5216 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
5217 if (data == NULL)
5218 goto out;
5219
5220 data->arg.label = label;
5221 status = nfs4_do_create(dir, dentry, data);
5222
5223 nfs4_free_createdata(data);
5224 out:
5225 return status;
5226 }
5227
nfs4_proc_mkdir(struct inode * dir,struct dentry * dentry,struct iattr * sattr)5228 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
5229 struct iattr *sattr)
5230 {
5231 struct nfs_server *server = NFS_SERVER(dir);
5232 struct nfs4_exception exception = {
5233 .interruptible = true,
5234 };
5235 struct nfs4_label l, *label;
5236 int err;
5237
5238 label = nfs4_label_init_security(dir, dentry, sattr, &l);
5239
5240 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
5241 sattr->ia_mode &= ~current_umask();
5242 do {
5243 err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
5244 trace_nfs4_mkdir(dir, &dentry->d_name, err);
5245 err = nfs4_handle_exception(NFS_SERVER(dir), err,
5246 &exception);
5247 } while (exception.retry);
5248 nfs4_label_release_security(label);
5249
5250 return err;
5251 }
5252
_nfs4_proc_readdir(struct nfs_readdir_arg * nr_arg,struct nfs_readdir_res * nr_res)5253 static int _nfs4_proc_readdir(struct nfs_readdir_arg *nr_arg,
5254 struct nfs_readdir_res *nr_res)
5255 {
5256 struct inode *dir = d_inode(nr_arg->dentry);
5257 struct nfs_server *server = NFS_SERVER(dir);
5258 struct nfs4_readdir_arg args = {
5259 .fh = NFS_FH(dir),
5260 .pages = nr_arg->pages,
5261 .pgbase = 0,
5262 .count = nr_arg->page_len,
5263 .plus = nr_arg->plus,
5264 };
5265 struct nfs4_readdir_res res;
5266 struct rpc_message msg = {
5267 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
5268 .rpc_argp = &args,
5269 .rpc_resp = &res,
5270 .rpc_cred = nr_arg->cred,
5271 };
5272 int status;
5273
5274 dprintk("%s: dentry = %pd2, cookie = %llu\n", __func__,
5275 nr_arg->dentry, (unsigned long long)nr_arg->cookie);
5276 if (!(server->caps & NFS_CAP_SECURITY_LABEL))
5277 args.bitmask = server->attr_bitmask_nl;
5278 else
5279 args.bitmask = server->attr_bitmask;
5280
5281 nfs4_setup_readdir(nr_arg->cookie, nr_arg->verf, nr_arg->dentry, &args);
5282 res.pgbase = args.pgbase;
5283 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
5284 &res.seq_res, 0);
5285 if (status >= 0) {
5286 memcpy(nr_res->verf, res.verifier.data, NFS4_VERIFIER_SIZE);
5287 status += args.pgbase;
5288 }
5289
5290 nfs_invalidate_atime(dir);
5291
5292 dprintk("%s: returns %d\n", __func__, status);
5293 return status;
5294 }
5295
nfs4_proc_readdir(struct nfs_readdir_arg * arg,struct nfs_readdir_res * res)5296 static int nfs4_proc_readdir(struct nfs_readdir_arg *arg,
5297 struct nfs_readdir_res *res)
5298 {
5299 struct nfs4_exception exception = {
5300 .interruptible = true,
5301 };
5302 int err;
5303 do {
5304 err = _nfs4_proc_readdir(arg, res);
5305 trace_nfs4_readdir(d_inode(arg->dentry), err);
5306 err = nfs4_handle_exception(NFS_SERVER(d_inode(arg->dentry)),
5307 err, &exception);
5308 } while (exception.retry);
5309 return err;
5310 }
5311
_nfs4_proc_mknod(struct inode * dir,struct dentry * dentry,struct iattr * sattr,struct nfs4_label * label,dev_t rdev)5312 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
5313 struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
5314 {
5315 struct nfs4_createdata *data;
5316 int mode = sattr->ia_mode;
5317 int status = -ENOMEM;
5318
5319 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
5320 if (data == NULL)
5321 goto out;
5322
5323 if (S_ISFIFO(mode))
5324 data->arg.ftype = NF4FIFO;
5325 else if (S_ISBLK(mode)) {
5326 data->arg.ftype = NF4BLK;
5327 data->arg.u.device.specdata1 = MAJOR(rdev);
5328 data->arg.u.device.specdata2 = MINOR(rdev);
5329 }
5330 else if (S_ISCHR(mode)) {
5331 data->arg.ftype = NF4CHR;
5332 data->arg.u.device.specdata1 = MAJOR(rdev);
5333 data->arg.u.device.specdata2 = MINOR(rdev);
5334 } else if (!S_ISSOCK(mode)) {
5335 status = -EINVAL;
5336 goto out_free;
5337 }
5338
5339 data->arg.label = label;
5340 status = nfs4_do_create(dir, dentry, data);
5341 out_free:
5342 nfs4_free_createdata(data);
5343 out:
5344 return status;
5345 }
5346
nfs4_proc_mknod(struct inode * dir,struct dentry * dentry,struct iattr * sattr,dev_t rdev)5347 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
5348 struct iattr *sattr, dev_t rdev)
5349 {
5350 struct nfs_server *server = NFS_SERVER(dir);
5351 struct nfs4_exception exception = {
5352 .interruptible = true,
5353 };
5354 struct nfs4_label l, *label;
5355 int err;
5356
5357 label = nfs4_label_init_security(dir, dentry, sattr, &l);
5358
5359 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
5360 sattr->ia_mode &= ~current_umask();
5361 do {
5362 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
5363 trace_nfs4_mknod(dir, &dentry->d_name, err);
5364 err = nfs4_handle_exception(NFS_SERVER(dir), err,
5365 &exception);
5366 } while (exception.retry);
5367
5368 nfs4_label_release_security(label);
5369
5370 return err;
5371 }
5372
_nfs4_proc_statfs(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsstat * fsstat)5373 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
5374 struct nfs_fsstat *fsstat)
5375 {
5376 struct nfs4_statfs_arg args = {
5377 .fh = fhandle,
5378 .bitmask = server->attr_bitmask,
5379 };
5380 struct nfs4_statfs_res res = {
5381 .fsstat = fsstat,
5382 };
5383 struct rpc_message msg = {
5384 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
5385 .rpc_argp = &args,
5386 .rpc_resp = &res,
5387 };
5388
5389 nfs_fattr_init(fsstat->fattr);
5390 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5391 }
5392
nfs4_proc_statfs(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsstat * fsstat)5393 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
5394 {
5395 struct nfs4_exception exception = {
5396 .interruptible = true,
5397 };
5398 int err;
5399 do {
5400 err = nfs4_handle_exception(server,
5401 _nfs4_proc_statfs(server, fhandle, fsstat),
5402 &exception);
5403 } while (exception.retry);
5404 return err;
5405 }
5406
_nfs4_do_fsinfo(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * fsinfo)5407 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
5408 struct nfs_fsinfo *fsinfo)
5409 {
5410 struct nfs4_fsinfo_arg args = {
5411 .fh = fhandle,
5412 .bitmask = server->attr_bitmask,
5413 };
5414 struct nfs4_fsinfo_res res = {
5415 .fsinfo = fsinfo,
5416 };
5417 struct rpc_message msg = {
5418 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
5419 .rpc_argp = &args,
5420 .rpc_resp = &res,
5421 };
5422
5423 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5424 }
5425
nfs4_do_fsinfo(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * fsinfo)5426 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
5427 {
5428 struct nfs4_exception exception = {
5429 .interruptible = true,
5430 };
5431 int err;
5432
5433 do {
5434 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
5435 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
5436 if (err == 0) {
5437 nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ);
5438 break;
5439 }
5440 err = nfs4_handle_exception(server, err, &exception);
5441 } while (exception.retry);
5442 return err;
5443 }
5444
nfs4_proc_fsinfo(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * fsinfo)5445 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
5446 {
5447 int error;
5448
5449 nfs_fattr_init(fsinfo->fattr);
5450 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
5451 if (error == 0) {
5452 /* block layout checks this! */
5453 server->pnfs_blksize = fsinfo->blksize;
5454 set_pnfs_layoutdriver(server, fhandle, fsinfo);
5455 }
5456
5457 return error;
5458 }
5459
_nfs4_proc_pathconf(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_pathconf * pathconf)5460 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
5461 struct nfs_pathconf *pathconf)
5462 {
5463 struct nfs4_pathconf_arg args = {
5464 .fh = fhandle,
5465 .bitmask = server->attr_bitmask,
5466 };
5467 struct nfs4_pathconf_res res = {
5468 .pathconf = pathconf,
5469 };
5470 struct rpc_message msg = {
5471 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
5472 .rpc_argp = &args,
5473 .rpc_resp = &res,
5474 };
5475
5476 /* None of the pathconf attributes are mandatory to implement */
5477 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
5478 memset(pathconf, 0, sizeof(*pathconf));
5479 return 0;
5480 }
5481
5482 nfs_fattr_init(pathconf->fattr);
5483 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5484 }
5485
nfs4_proc_pathconf(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_pathconf * pathconf)5486 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
5487 struct nfs_pathconf *pathconf)
5488 {
5489 struct nfs4_exception exception = {
5490 .interruptible = true,
5491 };
5492 int err;
5493
5494 do {
5495 err = nfs4_handle_exception(server,
5496 _nfs4_proc_pathconf(server, fhandle, pathconf),
5497 &exception);
5498 } while (exception.retry);
5499 return err;
5500 }
5501
nfs4_set_rw_stateid(nfs4_stateid * stateid,const struct nfs_open_context * ctx,const struct nfs_lock_context * l_ctx,fmode_t fmode)5502 int nfs4_set_rw_stateid(nfs4_stateid *stateid,
5503 const struct nfs_open_context *ctx,
5504 const struct nfs_lock_context *l_ctx,
5505 fmode_t fmode)
5506 {
5507 return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL);
5508 }
5509 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
5510
nfs4_stateid_is_current(nfs4_stateid * stateid,const struct nfs_open_context * ctx,const struct nfs_lock_context * l_ctx,fmode_t fmode)5511 static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
5512 const struct nfs_open_context *ctx,
5513 const struct nfs_lock_context *l_ctx,
5514 fmode_t fmode)
5515 {
5516 nfs4_stateid _current_stateid;
5517
5518 /* If the current stateid represents a lost lock, then exit */
5519 if (nfs4_set_rw_stateid(&_current_stateid, ctx, l_ctx, fmode) == -EIO)
5520 return true;
5521 return nfs4_stateid_match(stateid, &_current_stateid);
5522 }
5523
nfs4_error_stateid_expired(int err)5524 static bool nfs4_error_stateid_expired(int err)
5525 {
5526 switch (err) {
5527 case -NFS4ERR_DELEG_REVOKED:
5528 case -NFS4ERR_ADMIN_REVOKED:
5529 case -NFS4ERR_BAD_STATEID:
5530 case -NFS4ERR_STALE_STATEID:
5531 case -NFS4ERR_OLD_STATEID:
5532 case -NFS4ERR_OPENMODE:
5533 case -NFS4ERR_EXPIRED:
5534 return true;
5535 }
5536 return false;
5537 }
5538
nfs4_read_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)5539 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
5540 {
5541 struct nfs_server *server = NFS_SERVER(hdr->inode);
5542
5543 trace_nfs4_read(hdr, task->tk_status);
5544 if (task->tk_status < 0) {
5545 struct nfs4_exception exception = {
5546 .inode = hdr->inode,
5547 .state = hdr->args.context->state,
5548 .stateid = &hdr->args.stateid,
5549 };
5550 task->tk_status = nfs4_async_handle_exception(task,
5551 server, task->tk_status, &exception);
5552 if (exception.retry) {
5553 rpc_restart_call_prepare(task);
5554 return -EAGAIN;
5555 }
5556 }
5557
5558 if (task->tk_status > 0)
5559 renew_lease(server, hdr->timestamp);
5560 return 0;
5561 }
5562
nfs4_read_stateid_changed(struct rpc_task * task,struct nfs_pgio_args * args)5563 static bool nfs4_read_stateid_changed(struct rpc_task *task,
5564 struct nfs_pgio_args *args)
5565 {
5566
5567 if (!nfs4_error_stateid_expired(task->tk_status) ||
5568 nfs4_stateid_is_current(&args->stateid,
5569 args->context,
5570 args->lock_context,
5571 FMODE_READ))
5572 return false;
5573 rpc_restart_call_prepare(task);
5574 return true;
5575 }
5576
nfs4_read_plus_not_supported(struct rpc_task * task,struct nfs_pgio_header * hdr)5577 static bool nfs4_read_plus_not_supported(struct rpc_task *task,
5578 struct nfs_pgio_header *hdr)
5579 {
5580 struct nfs_server *server = NFS_SERVER(hdr->inode);
5581 struct rpc_message *msg = &task->tk_msg;
5582
5583 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] &&
5584 task->tk_status == -ENOTSUPP) {
5585 server->caps &= ~NFS_CAP_READ_PLUS;
5586 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
5587 rpc_restart_call_prepare(task);
5588 return true;
5589 }
5590 return false;
5591 }
5592
nfs4_read_done(struct rpc_task * task,struct nfs_pgio_header * hdr)5593 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
5594 {
5595 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
5596 return -EAGAIN;
5597 if (nfs4_read_stateid_changed(task, &hdr->args))
5598 return -EAGAIN;
5599 if (nfs4_read_plus_not_supported(task, hdr))
5600 return -EAGAIN;
5601 if (task->tk_status > 0)
5602 nfs_invalidate_atime(hdr->inode);
5603 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
5604 nfs4_read_done_cb(task, hdr);
5605 }
5606
5607 #if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS
nfs42_read_plus_support(struct nfs_pgio_header * hdr,struct rpc_message * msg)5608 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr,
5609 struct rpc_message *msg)
5610 {
5611 /* Note: We don't use READ_PLUS with pNFS yet */
5612 if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp) {
5613 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS];
5614 return nfs_read_alloc_scratch(hdr, READ_PLUS_SCRATCH_SIZE);
5615 }
5616 return false;
5617 }
5618 #else
nfs42_read_plus_support(struct nfs_pgio_header * hdr,struct rpc_message * msg)5619 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr,
5620 struct rpc_message *msg)
5621 {
5622 return false;
5623 }
5624 #endif /* CONFIG_NFS_V4_2 */
5625
nfs4_proc_read_setup(struct nfs_pgio_header * hdr,struct rpc_message * msg)5626 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
5627 struct rpc_message *msg)
5628 {
5629 hdr->timestamp = jiffies;
5630 if (!hdr->pgio_done_cb)
5631 hdr->pgio_done_cb = nfs4_read_done_cb;
5632 if (!nfs42_read_plus_support(hdr, msg))
5633 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
5634 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
5635 }
5636
nfs4_proc_pgio_rpc_prepare(struct rpc_task * task,struct nfs_pgio_header * hdr)5637 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
5638 struct nfs_pgio_header *hdr)
5639 {
5640 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client,
5641 &hdr->args.seq_args,
5642 &hdr->res.seq_res,
5643 task))
5644 return 0;
5645 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
5646 hdr->args.lock_context,
5647 hdr->rw_mode) == -EIO)
5648 return -EIO;
5649 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
5650 return -EIO;
5651 return 0;
5652 }
5653
nfs4_write_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)5654 static int nfs4_write_done_cb(struct rpc_task *task,
5655 struct nfs_pgio_header *hdr)
5656 {
5657 struct inode *inode = hdr->inode;
5658
5659 trace_nfs4_write(hdr, task->tk_status);
5660 if (task->tk_status < 0) {
5661 struct nfs4_exception exception = {
5662 .inode = hdr->inode,
5663 .state = hdr->args.context->state,
5664 .stateid = &hdr->args.stateid,
5665 };
5666 task->tk_status = nfs4_async_handle_exception(task,
5667 NFS_SERVER(inode), task->tk_status,
5668 &exception);
5669 if (exception.retry) {
5670 rpc_restart_call_prepare(task);
5671 return -EAGAIN;
5672 }
5673 }
5674 if (task->tk_status >= 0) {
5675 renew_lease(NFS_SERVER(inode), hdr->timestamp);
5676 nfs_writeback_update_inode(hdr);
5677 }
5678 return 0;
5679 }
5680
nfs4_write_stateid_changed(struct rpc_task * task,struct nfs_pgio_args * args)5681 static bool nfs4_write_stateid_changed(struct rpc_task *task,
5682 struct nfs_pgio_args *args)
5683 {
5684
5685 if (!nfs4_error_stateid_expired(task->tk_status) ||
5686 nfs4_stateid_is_current(&args->stateid,
5687 args->context,
5688 args->lock_context,
5689 FMODE_WRITE))
5690 return false;
5691 rpc_restart_call_prepare(task);
5692 return true;
5693 }
5694
nfs4_write_done(struct rpc_task * task,struct nfs_pgio_header * hdr)5695 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
5696 {
5697 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
5698 return -EAGAIN;
5699 if (nfs4_write_stateid_changed(task, &hdr->args))
5700 return -EAGAIN;
5701 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
5702 nfs4_write_done_cb(task, hdr);
5703 }
5704
5705 static
nfs4_write_need_cache_consistency_data(struct nfs_pgio_header * hdr)5706 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
5707 {
5708 /* Don't request attributes for pNFS or O_DIRECT writes */
5709 if (hdr->ds_clp != NULL || hdr->dreq != NULL)
5710 return false;
5711 /* Otherwise, request attributes if and only if we don't hold
5712 * a delegation
5713 */
5714 return nfs4_have_delegation(hdr->inode, FMODE_READ, 0) == 0;
5715 }
5716
nfs4_bitmask_set(__u32 bitmask[],const __u32 src[],struct inode * inode,unsigned long cache_validity)5717 void nfs4_bitmask_set(__u32 bitmask[], const __u32 src[],
5718 struct inode *inode, unsigned long cache_validity)
5719 {
5720 struct nfs_server *server = NFS_SERVER(inode);
5721 unsigned int i;
5722
5723 memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ);
5724 cache_validity |= READ_ONCE(NFS_I(inode)->cache_validity);
5725
5726 if (cache_validity & NFS_INO_INVALID_CHANGE)
5727 bitmask[0] |= FATTR4_WORD0_CHANGE;
5728 if (cache_validity & NFS_INO_INVALID_ATIME)
5729 bitmask[1] |= FATTR4_WORD1_TIME_ACCESS;
5730 if (cache_validity & NFS_INO_INVALID_MODE)
5731 bitmask[1] |= FATTR4_WORD1_MODE;
5732 if (cache_validity & NFS_INO_INVALID_OTHER)
5733 bitmask[1] |= FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP;
5734 if (cache_validity & NFS_INO_INVALID_NLINK)
5735 bitmask[1] |= FATTR4_WORD1_NUMLINKS;
5736 if (cache_validity & NFS_INO_INVALID_CTIME)
5737 bitmask[1] |= FATTR4_WORD1_TIME_METADATA;
5738 if (cache_validity & NFS_INO_INVALID_MTIME)
5739 bitmask[1] |= FATTR4_WORD1_TIME_MODIFY;
5740 if (cache_validity & NFS_INO_INVALID_BLOCKS)
5741 bitmask[1] |= FATTR4_WORD1_SPACE_USED;
5742
5743 if (cache_validity & NFS_INO_INVALID_SIZE)
5744 bitmask[0] |= FATTR4_WORD0_SIZE;
5745
5746 for (i = 0; i < NFS4_BITMASK_SZ; i++)
5747 bitmask[i] &= server->attr_bitmask[i];
5748 }
5749
nfs4_proc_write_setup(struct nfs_pgio_header * hdr,struct rpc_message * msg,struct rpc_clnt ** clnt)5750 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
5751 struct rpc_message *msg,
5752 struct rpc_clnt **clnt)
5753 {
5754 struct nfs_server *server = NFS_SERVER(hdr->inode);
5755
5756 if (!nfs4_write_need_cache_consistency_data(hdr)) {
5757 hdr->args.bitmask = NULL;
5758 hdr->res.fattr = NULL;
5759 } else {
5760 nfs4_bitmask_set(hdr->args.bitmask_store,
5761 server->cache_consistency_bitmask,
5762 hdr->inode, NFS_INO_INVALID_BLOCKS);
5763 hdr->args.bitmask = hdr->args.bitmask_store;
5764 }
5765
5766 if (!hdr->pgio_done_cb)
5767 hdr->pgio_done_cb = nfs4_write_done_cb;
5768 hdr->res.server = server;
5769 hdr->timestamp = jiffies;
5770
5771 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
5772 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
5773 nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr);
5774 }
5775
nfs4_proc_commit_rpc_prepare(struct rpc_task * task,struct nfs_commit_data * data)5776 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
5777 {
5778 nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client,
5779 &data->args.seq_args,
5780 &data->res.seq_res,
5781 task);
5782 }
5783
nfs4_commit_done_cb(struct rpc_task * task,struct nfs_commit_data * data)5784 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
5785 {
5786 struct inode *inode = data->inode;
5787
5788 trace_nfs4_commit(data, task->tk_status);
5789 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
5790 NULL, NULL) == -EAGAIN) {
5791 rpc_restart_call_prepare(task);
5792 return -EAGAIN;
5793 }
5794 return 0;
5795 }
5796
nfs4_commit_done(struct rpc_task * task,struct nfs_commit_data * data)5797 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
5798 {
5799 if (!nfs4_sequence_done(task, &data->res.seq_res))
5800 return -EAGAIN;
5801 return data->commit_done_cb(task, data);
5802 }
5803
nfs4_proc_commit_setup(struct nfs_commit_data * data,struct rpc_message * msg,struct rpc_clnt ** clnt)5804 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg,
5805 struct rpc_clnt **clnt)
5806 {
5807 struct nfs_server *server = NFS_SERVER(data->inode);
5808
5809 if (data->commit_done_cb == NULL)
5810 data->commit_done_cb = nfs4_commit_done_cb;
5811 data->res.server = server;
5812 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
5813 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
5814 nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client,
5815 NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
5816 }
5817
_nfs4_proc_commit(struct file * dst,struct nfs_commitargs * args,struct nfs_commitres * res)5818 static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args,
5819 struct nfs_commitres *res)
5820 {
5821 struct inode *dst_inode = file_inode(dst);
5822 struct nfs_server *server = NFS_SERVER(dst_inode);
5823 struct rpc_message msg = {
5824 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT],
5825 .rpc_argp = args,
5826 .rpc_resp = res,
5827 };
5828
5829 args->fh = NFS_FH(dst_inode);
5830 return nfs4_call_sync(server->client, server, &msg,
5831 &args->seq_args, &res->seq_res, 1);
5832 }
5833
nfs4_proc_commit(struct file * dst,__u64 offset,__u32 count,struct nfs_commitres * res)5834 int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res)
5835 {
5836 struct nfs_commitargs args = {
5837 .offset = offset,
5838 .count = count,
5839 };
5840 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
5841 struct nfs4_exception exception = { };
5842 int status;
5843
5844 do {
5845 status = _nfs4_proc_commit(dst, &args, res);
5846 status = nfs4_handle_exception(dst_server, status, &exception);
5847 } while (exception.retry);
5848
5849 return status;
5850 }
5851
5852 struct nfs4_renewdata {
5853 struct nfs_client *client;
5854 unsigned long timestamp;
5855 };
5856
5857 /*
5858 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
5859 * standalone procedure for queueing an asynchronous RENEW.
5860 */
nfs4_renew_release(void * calldata)5861 static void nfs4_renew_release(void *calldata)
5862 {
5863 struct nfs4_renewdata *data = calldata;
5864 struct nfs_client *clp = data->client;
5865
5866 if (refcount_read(&clp->cl_count) > 1)
5867 nfs4_schedule_state_renewal(clp);
5868 nfs_put_client(clp);
5869 kfree(data);
5870 }
5871
nfs4_renew_done(struct rpc_task * task,void * calldata)5872 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
5873 {
5874 struct nfs4_renewdata *data = calldata;
5875 struct nfs_client *clp = data->client;
5876 unsigned long timestamp = data->timestamp;
5877
5878 trace_nfs4_renew_async(clp, task->tk_status);
5879 switch (task->tk_status) {
5880 case 0:
5881 break;
5882 case -NFS4ERR_LEASE_MOVED:
5883 nfs4_schedule_lease_moved_recovery(clp);
5884 break;
5885 default:
5886 /* Unless we're shutting down, schedule state recovery! */
5887 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
5888 return;
5889 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
5890 nfs4_schedule_lease_recovery(clp);
5891 return;
5892 }
5893 nfs4_schedule_path_down_recovery(clp);
5894 }
5895 do_renew_lease(clp, timestamp);
5896 }
5897
5898 static const struct rpc_call_ops nfs4_renew_ops = {
5899 .rpc_call_done = nfs4_renew_done,
5900 .rpc_release = nfs4_renew_release,
5901 };
5902
nfs4_proc_async_renew(struct nfs_client * clp,const struct cred * cred,unsigned renew_flags)5903 static int nfs4_proc_async_renew(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
5904 {
5905 struct rpc_message msg = {
5906 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
5907 .rpc_argp = clp,
5908 .rpc_cred = cred,
5909 };
5910 struct nfs4_renewdata *data;
5911
5912 if (renew_flags == 0)
5913 return 0;
5914 if (!refcount_inc_not_zero(&clp->cl_count))
5915 return -EIO;
5916 data = kmalloc(sizeof(*data), GFP_NOFS);
5917 if (data == NULL) {
5918 nfs_put_client(clp);
5919 return -ENOMEM;
5920 }
5921 data->client = clp;
5922 data->timestamp = jiffies;
5923 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
5924 &nfs4_renew_ops, data);
5925 }
5926
nfs4_proc_renew(struct nfs_client * clp,const struct cred * cred)5927 static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred)
5928 {
5929 struct rpc_message msg = {
5930 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
5931 .rpc_argp = clp,
5932 .rpc_cred = cred,
5933 };
5934 unsigned long now = jiffies;
5935 int status;
5936
5937 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5938 if (status < 0)
5939 return status;
5940 do_renew_lease(clp, now);
5941 return 0;
5942 }
5943
nfs4_server_supports_acls(const struct nfs_server * server,enum nfs4_acl_type type)5944 static bool nfs4_server_supports_acls(const struct nfs_server *server,
5945 enum nfs4_acl_type type)
5946 {
5947 switch (type) {
5948 default:
5949 return server->attr_bitmask[0] & FATTR4_WORD0_ACL;
5950 case NFS4ACL_DACL:
5951 return server->attr_bitmask[1] & FATTR4_WORD1_DACL;
5952 case NFS4ACL_SACL:
5953 return server->attr_bitmask[1] & FATTR4_WORD1_SACL;
5954 }
5955 }
5956
5957 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
5958 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
5959 * the stack.
5960 */
5961 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
5962
nfs4_buf_to_pages_noslab(const void * buf,size_t buflen,struct page ** pages)5963 int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen,
5964 struct page **pages)
5965 {
5966 struct page *newpage, **spages;
5967 int rc = 0;
5968 size_t len;
5969 spages = pages;
5970
5971 do {
5972 len = min_t(size_t, PAGE_SIZE, buflen);
5973 newpage = alloc_page(GFP_KERNEL);
5974
5975 if (newpage == NULL)
5976 goto unwind;
5977 memcpy(page_address(newpage), buf, len);
5978 buf += len;
5979 buflen -= len;
5980 *pages++ = newpage;
5981 rc++;
5982 } while (buflen != 0);
5983
5984 return rc;
5985
5986 unwind:
5987 for(; rc > 0; rc--)
5988 __free_page(spages[rc-1]);
5989 return -ENOMEM;
5990 }
5991
5992 struct nfs4_cached_acl {
5993 enum nfs4_acl_type type;
5994 int cached;
5995 size_t len;
5996 char data[];
5997 };
5998
nfs4_set_cached_acl(struct inode * inode,struct nfs4_cached_acl * acl)5999 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
6000 {
6001 struct nfs_inode *nfsi = NFS_I(inode);
6002
6003 spin_lock(&inode->i_lock);
6004 kfree(nfsi->nfs4_acl);
6005 nfsi->nfs4_acl = acl;
6006 spin_unlock(&inode->i_lock);
6007 }
6008
nfs4_zap_acl_attr(struct inode * inode)6009 static void nfs4_zap_acl_attr(struct inode *inode)
6010 {
6011 nfs4_set_cached_acl(inode, NULL);
6012 }
6013
nfs4_read_cached_acl(struct inode * inode,char * buf,size_t buflen,enum nfs4_acl_type type)6014 static ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf,
6015 size_t buflen, enum nfs4_acl_type type)
6016 {
6017 struct nfs_inode *nfsi = NFS_I(inode);
6018 struct nfs4_cached_acl *acl;
6019 int ret = -ENOENT;
6020
6021 spin_lock(&inode->i_lock);
6022 acl = nfsi->nfs4_acl;
6023 if (acl == NULL)
6024 goto out;
6025 if (acl->type != type)
6026 goto out;
6027 if (buf == NULL) /* user is just asking for length */
6028 goto out_len;
6029 if (acl->cached == 0)
6030 goto out;
6031 ret = -ERANGE; /* see getxattr(2) man page */
6032 if (acl->len > buflen)
6033 goto out;
6034 memcpy(buf, acl->data, acl->len);
6035 out_len:
6036 ret = acl->len;
6037 out:
6038 spin_unlock(&inode->i_lock);
6039 return ret;
6040 }
6041
nfs4_write_cached_acl(struct inode * inode,struct page ** pages,size_t pgbase,size_t acl_len,enum nfs4_acl_type type)6042 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages,
6043 size_t pgbase, size_t acl_len,
6044 enum nfs4_acl_type type)
6045 {
6046 struct nfs4_cached_acl *acl;
6047 size_t buflen = sizeof(*acl) + acl_len;
6048
6049 if (buflen <= PAGE_SIZE) {
6050 acl = kmalloc(buflen, GFP_KERNEL);
6051 if (acl == NULL)
6052 goto out;
6053 acl->cached = 1;
6054 _copy_from_pages(acl->data, pages, pgbase, acl_len);
6055 } else {
6056 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
6057 if (acl == NULL)
6058 goto out;
6059 acl->cached = 0;
6060 }
6061 acl->type = type;
6062 acl->len = acl_len;
6063 out:
6064 nfs4_set_cached_acl(inode, acl);
6065 }
6066
6067 /*
6068 * The getxattr API returns the required buffer length when called with a
6069 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
6070 * the required buf. On a NULL buf, we send a page of data to the server
6071 * guessing that the ACL request can be serviced by a page. If so, we cache
6072 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
6073 * the cache. If not so, we throw away the page, and cache the required
6074 * length. The next getxattr call will then produce another round trip to
6075 * the server, this time with the input buf of the required size.
6076 */
__nfs4_get_acl_uncached(struct inode * inode,void * buf,size_t buflen,enum nfs4_acl_type type)6077 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf,
6078 size_t buflen, enum nfs4_acl_type type)
6079 {
6080 struct page **pages;
6081 struct nfs_getaclargs args = {
6082 .fh = NFS_FH(inode),
6083 .acl_type = type,
6084 .acl_len = buflen,
6085 };
6086 struct nfs_getaclres res = {
6087 .acl_type = type,
6088 .acl_len = buflen,
6089 };
6090 struct rpc_message msg = {
6091 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
6092 .rpc_argp = &args,
6093 .rpc_resp = &res,
6094 };
6095 unsigned int npages;
6096 int ret = -ENOMEM, i;
6097 struct nfs_server *server = NFS_SERVER(inode);
6098
6099 if (buflen == 0)
6100 buflen = server->rsize;
6101
6102 npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
6103 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
6104 if (!pages)
6105 return -ENOMEM;
6106
6107 args.acl_pages = pages;
6108
6109 for (i = 0; i < npages; i++) {
6110 pages[i] = alloc_page(GFP_KERNEL);
6111 if (!pages[i])
6112 goto out_free;
6113 }
6114
6115 /* for decoding across pages */
6116 res.acl_scratch = alloc_page(GFP_KERNEL);
6117 if (!res.acl_scratch)
6118 goto out_free;
6119
6120 args.acl_len = npages * PAGE_SIZE;
6121
6122 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
6123 __func__, buf, buflen, npages, args.acl_len);
6124 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
6125 &msg, &args.seq_args, &res.seq_res, 0);
6126 if (ret)
6127 goto out_free;
6128
6129 /* Handle the case where the passed-in buffer is too short */
6130 if (res.acl_flags & NFS4_ACL_TRUNC) {
6131 /* Did the user only issue a request for the acl length? */
6132 if (buf == NULL)
6133 goto out_ok;
6134 ret = -ERANGE;
6135 goto out_free;
6136 }
6137 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len,
6138 type);
6139 if (buf) {
6140 if (res.acl_len > buflen) {
6141 ret = -ERANGE;
6142 goto out_free;
6143 }
6144 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
6145 }
6146 out_ok:
6147 ret = res.acl_len;
6148 out_free:
6149 while (--i >= 0)
6150 __free_page(pages[i]);
6151 if (res.acl_scratch)
6152 __free_page(res.acl_scratch);
6153 kfree(pages);
6154 return ret;
6155 }
6156
nfs4_get_acl_uncached(struct inode * inode,void * buf,size_t buflen,enum nfs4_acl_type type)6157 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf,
6158 size_t buflen, enum nfs4_acl_type type)
6159 {
6160 struct nfs4_exception exception = {
6161 .interruptible = true,
6162 };
6163 ssize_t ret;
6164 do {
6165 ret = __nfs4_get_acl_uncached(inode, buf, buflen, type);
6166 trace_nfs4_get_acl(inode, ret);
6167 if (ret >= 0)
6168 break;
6169 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
6170 } while (exception.retry);
6171 return ret;
6172 }
6173
nfs4_proc_get_acl(struct inode * inode,void * buf,size_t buflen,enum nfs4_acl_type type)6174 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen,
6175 enum nfs4_acl_type type)
6176 {
6177 struct nfs_server *server = NFS_SERVER(inode);
6178 int ret;
6179
6180 if (!nfs4_server_supports_acls(server, type))
6181 return -EOPNOTSUPP;
6182 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
6183 if (ret < 0)
6184 return ret;
6185 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
6186 nfs_zap_acl_cache(inode);
6187 ret = nfs4_read_cached_acl(inode, buf, buflen, type);
6188 if (ret != -ENOENT)
6189 /* -ENOENT is returned if there is no ACL or if there is an ACL
6190 * but no cached acl data, just the acl length */
6191 return ret;
6192 return nfs4_get_acl_uncached(inode, buf, buflen, type);
6193 }
6194
__nfs4_proc_set_acl(struct inode * inode,const void * buf,size_t buflen,enum nfs4_acl_type type)6195 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf,
6196 size_t buflen, enum nfs4_acl_type type)
6197 {
6198 struct nfs_server *server = NFS_SERVER(inode);
6199 struct page *pages[NFS4ACL_MAXPAGES];
6200 struct nfs_setaclargs arg = {
6201 .fh = NFS_FH(inode),
6202 .acl_type = type,
6203 .acl_len = buflen,
6204 .acl_pages = pages,
6205 };
6206 struct nfs_setaclres res;
6207 struct rpc_message msg = {
6208 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
6209 .rpc_argp = &arg,
6210 .rpc_resp = &res,
6211 };
6212 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
6213 int ret, i;
6214
6215 /* You can't remove system.nfs4_acl: */
6216 if (buflen == 0)
6217 return -EINVAL;
6218 if (!nfs4_server_supports_acls(server, type))
6219 return -EOPNOTSUPP;
6220 if (npages > ARRAY_SIZE(pages))
6221 return -ERANGE;
6222 i = nfs4_buf_to_pages_noslab(buf, buflen, arg.acl_pages);
6223 if (i < 0)
6224 return i;
6225 nfs4_inode_make_writeable(inode);
6226 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
6227
6228 /*
6229 * Free each page after tx, so the only ref left is
6230 * held by the network stack
6231 */
6232 for (; i > 0; i--)
6233 put_page(pages[i-1]);
6234
6235 /*
6236 * Acl update can result in inode attribute update.
6237 * so mark the attribute cache invalid.
6238 */
6239 spin_lock(&inode->i_lock);
6240 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
6241 NFS_INO_INVALID_CTIME |
6242 NFS_INO_REVAL_FORCED);
6243 spin_unlock(&inode->i_lock);
6244 nfs_access_zap_cache(inode);
6245 nfs_zap_acl_cache(inode);
6246 return ret;
6247 }
6248
nfs4_proc_set_acl(struct inode * inode,const void * buf,size_t buflen,enum nfs4_acl_type type)6249 static int nfs4_proc_set_acl(struct inode *inode, const void *buf,
6250 size_t buflen, enum nfs4_acl_type type)
6251 {
6252 struct nfs4_exception exception = { };
6253 int err;
6254 do {
6255 err = __nfs4_proc_set_acl(inode, buf, buflen, type);
6256 trace_nfs4_set_acl(inode, err);
6257 if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) {
6258 /*
6259 * no need to retry since the kernel
6260 * isn't involved in encoding the ACEs.
6261 */
6262 err = -EINVAL;
6263 break;
6264 }
6265 err = nfs4_handle_exception(NFS_SERVER(inode), err,
6266 &exception);
6267 } while (exception.retry);
6268 return err;
6269 }
6270
6271 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
_nfs4_get_security_label(struct inode * inode,void * buf,size_t buflen)6272 static int _nfs4_get_security_label(struct inode *inode, void *buf,
6273 size_t buflen)
6274 {
6275 struct nfs_server *server = NFS_SERVER(inode);
6276 struct nfs4_label label = {0, 0, 0, buflen, buf};
6277
6278 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
6279 struct nfs_fattr fattr = {
6280 .label = &label,
6281 };
6282 struct nfs4_getattr_arg arg = {
6283 .fh = NFS_FH(inode),
6284 .bitmask = bitmask,
6285 };
6286 struct nfs4_getattr_res res = {
6287 .fattr = &fattr,
6288 .server = server,
6289 };
6290 struct rpc_message msg = {
6291 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
6292 .rpc_argp = &arg,
6293 .rpc_resp = &res,
6294 };
6295 int ret;
6296
6297 nfs_fattr_init(&fattr);
6298
6299 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
6300 if (ret)
6301 return ret;
6302 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
6303 return -ENOENT;
6304 return label.len;
6305 }
6306
nfs4_get_security_label(struct inode * inode,void * buf,size_t buflen)6307 static int nfs4_get_security_label(struct inode *inode, void *buf,
6308 size_t buflen)
6309 {
6310 struct nfs4_exception exception = {
6311 .interruptible = true,
6312 };
6313 int err;
6314
6315 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
6316 return -EOPNOTSUPP;
6317
6318 do {
6319 err = _nfs4_get_security_label(inode, buf, buflen);
6320 trace_nfs4_get_security_label(inode, err);
6321 err = nfs4_handle_exception(NFS_SERVER(inode), err,
6322 &exception);
6323 } while (exception.retry);
6324 return err;
6325 }
6326
_nfs4_do_set_security_label(struct inode * inode,struct nfs4_label * ilabel,struct nfs_fattr * fattr)6327 static int _nfs4_do_set_security_label(struct inode *inode,
6328 struct nfs4_label *ilabel,
6329 struct nfs_fattr *fattr)
6330 {
6331
6332 struct iattr sattr = {0};
6333 struct nfs_server *server = NFS_SERVER(inode);
6334 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
6335 struct nfs_setattrargs arg = {
6336 .fh = NFS_FH(inode),
6337 .iap = &sattr,
6338 .server = server,
6339 .bitmask = bitmask,
6340 .label = ilabel,
6341 };
6342 struct nfs_setattrres res = {
6343 .fattr = fattr,
6344 .server = server,
6345 };
6346 struct rpc_message msg = {
6347 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
6348 .rpc_argp = &arg,
6349 .rpc_resp = &res,
6350 };
6351 int status;
6352
6353 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
6354
6355 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
6356 if (status)
6357 dprintk("%s failed: %d\n", __func__, status);
6358
6359 return status;
6360 }
6361
nfs4_do_set_security_label(struct inode * inode,struct nfs4_label * ilabel,struct nfs_fattr * fattr)6362 static int nfs4_do_set_security_label(struct inode *inode,
6363 struct nfs4_label *ilabel,
6364 struct nfs_fattr *fattr)
6365 {
6366 struct nfs4_exception exception = { };
6367 int err;
6368
6369 do {
6370 err = _nfs4_do_set_security_label(inode, ilabel, fattr);
6371 trace_nfs4_set_security_label(inode, err);
6372 err = nfs4_handle_exception(NFS_SERVER(inode), err,
6373 &exception);
6374 } while (exception.retry);
6375 return err;
6376 }
6377
6378 static int
nfs4_set_security_label(struct inode * inode,const void * buf,size_t buflen)6379 nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
6380 {
6381 struct nfs4_label ilabel = {0, 0, 0, buflen, (char *)buf };
6382 struct nfs_fattr *fattr;
6383 int status;
6384
6385 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
6386 return -EOPNOTSUPP;
6387
6388 fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode));
6389 if (fattr == NULL)
6390 return -ENOMEM;
6391
6392 status = nfs4_do_set_security_label(inode, &ilabel, fattr);
6393 if (status == 0)
6394 nfs_setsecurity(inode, fattr);
6395
6396 nfs_free_fattr(fattr);
6397 return status;
6398 }
6399 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */
6400
6401
nfs4_init_boot_verifier(const struct nfs_client * clp,nfs4_verifier * bootverf)6402 static void nfs4_init_boot_verifier(const struct nfs_client *clp,
6403 nfs4_verifier *bootverf)
6404 {
6405 __be32 verf[2];
6406
6407 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
6408 /* An impossible timestamp guarantees this value
6409 * will never match a generated boot time. */
6410 verf[0] = cpu_to_be32(U32_MAX);
6411 verf[1] = cpu_to_be32(U32_MAX);
6412 } else {
6413 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
6414 u64 ns = ktime_to_ns(nn->boot_time);
6415
6416 verf[0] = cpu_to_be32(ns >> 32);
6417 verf[1] = cpu_to_be32(ns);
6418 }
6419 memcpy(bootverf->data, verf, sizeof(bootverf->data));
6420 }
6421
6422 static size_t
nfs4_get_uniquifier(struct nfs_client * clp,char * buf,size_t buflen)6423 nfs4_get_uniquifier(struct nfs_client *clp, char *buf, size_t buflen)
6424 {
6425 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
6426 struct nfs_netns_client *nn_clp = nn->nfs_client;
6427 const char *id;
6428
6429 buf[0] = '\0';
6430
6431 if (nn_clp) {
6432 rcu_read_lock();
6433 id = rcu_dereference(nn_clp->identifier);
6434 if (id)
6435 strscpy(buf, id, buflen);
6436 rcu_read_unlock();
6437 }
6438
6439 if (nfs4_client_id_uniquifier[0] != '\0' && buf[0] == '\0')
6440 strscpy(buf, nfs4_client_id_uniquifier, buflen);
6441
6442 return strlen(buf);
6443 }
6444
6445 static int
nfs4_init_nonuniform_client_string(struct nfs_client * clp)6446 nfs4_init_nonuniform_client_string(struct nfs_client *clp)
6447 {
6448 char buf[NFS4_CLIENT_ID_UNIQ_LEN];
6449 size_t buflen;
6450 size_t len;
6451 char *str;
6452
6453 if (clp->cl_owner_id != NULL)
6454 return 0;
6455
6456 rcu_read_lock();
6457 len = 14 +
6458 strlen(clp->cl_rpcclient->cl_nodename) +
6459 1 +
6460 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
6461 1;
6462 rcu_read_unlock();
6463
6464 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf));
6465 if (buflen)
6466 len += buflen + 1;
6467
6468 if (len > NFS4_OPAQUE_LIMIT + 1)
6469 return -EINVAL;
6470
6471 /*
6472 * Since this string is allocated at mount time, and held until the
6473 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
6474 * about a memory-reclaim deadlock.
6475 */
6476 str = kmalloc(len, GFP_KERNEL);
6477 if (!str)
6478 return -ENOMEM;
6479
6480 rcu_read_lock();
6481 if (buflen)
6482 scnprintf(str, len, "Linux NFSv4.0 %s/%s/%s",
6483 clp->cl_rpcclient->cl_nodename, buf,
6484 rpc_peeraddr2str(clp->cl_rpcclient,
6485 RPC_DISPLAY_ADDR));
6486 else
6487 scnprintf(str, len, "Linux NFSv4.0 %s/%s",
6488 clp->cl_rpcclient->cl_nodename,
6489 rpc_peeraddr2str(clp->cl_rpcclient,
6490 RPC_DISPLAY_ADDR));
6491 rcu_read_unlock();
6492
6493 clp->cl_owner_id = str;
6494 return 0;
6495 }
6496
6497 static int
nfs4_init_uniform_client_string(struct nfs_client * clp)6498 nfs4_init_uniform_client_string(struct nfs_client *clp)
6499 {
6500 char buf[NFS4_CLIENT_ID_UNIQ_LEN];
6501 size_t buflen;
6502 size_t len;
6503 char *str;
6504
6505 if (clp->cl_owner_id != NULL)
6506 return 0;
6507
6508 len = 10 + 10 + 1 + 10 + 1 +
6509 strlen(clp->cl_rpcclient->cl_nodename) + 1;
6510
6511 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf));
6512 if (buflen)
6513 len += buflen + 1;
6514
6515 if (len > NFS4_OPAQUE_LIMIT + 1)
6516 return -EINVAL;
6517
6518 /*
6519 * Since this string is allocated at mount time, and held until the
6520 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
6521 * about a memory-reclaim deadlock.
6522 */
6523 str = kmalloc(len, GFP_KERNEL);
6524 if (!str)
6525 return -ENOMEM;
6526
6527 if (buflen)
6528 scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
6529 clp->rpc_ops->version, clp->cl_minorversion,
6530 buf, clp->cl_rpcclient->cl_nodename);
6531 else
6532 scnprintf(str, len, "Linux NFSv%u.%u %s",
6533 clp->rpc_ops->version, clp->cl_minorversion,
6534 clp->cl_rpcclient->cl_nodename);
6535 clp->cl_owner_id = str;
6536 return 0;
6537 }
6538
6539 /*
6540 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
6541 * services. Advertise one based on the address family of the
6542 * clientaddr.
6543 */
6544 static unsigned int
nfs4_init_callback_netid(const struct nfs_client * clp,char * buf,size_t len)6545 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
6546 {
6547 if (strchr(clp->cl_ipaddr, ':') != NULL)
6548 return scnprintf(buf, len, "tcp6");
6549 else
6550 return scnprintf(buf, len, "tcp");
6551 }
6552
nfs4_setclientid_done(struct rpc_task * task,void * calldata)6553 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
6554 {
6555 struct nfs4_setclientid *sc = calldata;
6556
6557 if (task->tk_status == 0)
6558 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
6559 }
6560
6561 static const struct rpc_call_ops nfs4_setclientid_ops = {
6562 .rpc_call_done = nfs4_setclientid_done,
6563 };
6564
6565 /**
6566 * nfs4_proc_setclientid - Negotiate client ID
6567 * @clp: state data structure
6568 * @program: RPC program for NFSv4 callback service
6569 * @port: IP port number for NFS4 callback service
6570 * @cred: credential to use for this call
6571 * @res: where to place the result
6572 *
6573 * Returns zero, a negative errno, or a negative NFS4ERR status code.
6574 */
nfs4_proc_setclientid(struct nfs_client * clp,u32 program,unsigned short port,const struct cred * cred,struct nfs4_setclientid_res * res)6575 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
6576 unsigned short port, const struct cred *cred,
6577 struct nfs4_setclientid_res *res)
6578 {
6579 nfs4_verifier sc_verifier;
6580 struct nfs4_setclientid setclientid = {
6581 .sc_verifier = &sc_verifier,
6582 .sc_prog = program,
6583 .sc_clnt = clp,
6584 };
6585 struct rpc_message msg = {
6586 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
6587 .rpc_argp = &setclientid,
6588 .rpc_resp = res,
6589 .rpc_cred = cred,
6590 };
6591 struct rpc_task_setup task_setup_data = {
6592 .rpc_client = clp->cl_rpcclient,
6593 .rpc_message = &msg,
6594 .callback_ops = &nfs4_setclientid_ops,
6595 .callback_data = &setclientid,
6596 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
6597 };
6598 unsigned long now = jiffies;
6599 int status;
6600
6601 /* nfs_client_id4 */
6602 nfs4_init_boot_verifier(clp, &sc_verifier);
6603
6604 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
6605 status = nfs4_init_uniform_client_string(clp);
6606 else
6607 status = nfs4_init_nonuniform_client_string(clp);
6608
6609 if (status)
6610 goto out;
6611
6612 /* cb_client4 */
6613 setclientid.sc_netid_len =
6614 nfs4_init_callback_netid(clp,
6615 setclientid.sc_netid,
6616 sizeof(setclientid.sc_netid));
6617 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
6618 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
6619 clp->cl_ipaddr, port >> 8, port & 255);
6620
6621 dprintk("NFS call setclientid auth=%s, '%s'\n",
6622 clp->cl_rpcclient->cl_auth->au_ops->au_name,
6623 clp->cl_owner_id);
6624
6625 status = nfs4_call_sync_custom(&task_setup_data);
6626 if (setclientid.sc_cred) {
6627 kfree(clp->cl_acceptor);
6628 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
6629 put_rpccred(setclientid.sc_cred);
6630 }
6631
6632 if (status == 0)
6633 do_renew_lease(clp, now);
6634 out:
6635 trace_nfs4_setclientid(clp, status);
6636 dprintk("NFS reply setclientid: %d\n", status);
6637 return status;
6638 }
6639
6640 /**
6641 * nfs4_proc_setclientid_confirm - Confirm client ID
6642 * @clp: state data structure
6643 * @arg: result of a previous SETCLIENTID
6644 * @cred: credential to use for this call
6645 *
6646 * Returns zero, a negative errno, or a negative NFS4ERR status code.
6647 */
nfs4_proc_setclientid_confirm(struct nfs_client * clp,struct nfs4_setclientid_res * arg,const struct cred * cred)6648 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
6649 struct nfs4_setclientid_res *arg,
6650 const struct cred *cred)
6651 {
6652 struct rpc_message msg = {
6653 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
6654 .rpc_argp = arg,
6655 .rpc_cred = cred,
6656 };
6657 int status;
6658
6659 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
6660 clp->cl_rpcclient->cl_auth->au_ops->au_name,
6661 clp->cl_clientid);
6662 status = rpc_call_sync(clp->cl_rpcclient, &msg,
6663 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
6664 trace_nfs4_setclientid_confirm(clp, status);
6665 dprintk("NFS reply setclientid_confirm: %d\n", status);
6666 return status;
6667 }
6668
6669 struct nfs4_delegreturndata {
6670 struct nfs4_delegreturnargs args;
6671 struct nfs4_delegreturnres res;
6672 struct nfs_fh fh;
6673 nfs4_stateid stateid;
6674 unsigned long timestamp;
6675 struct {
6676 struct nfs4_layoutreturn_args arg;
6677 struct nfs4_layoutreturn_res res;
6678 struct nfs4_xdr_opaque_data ld_private;
6679 u32 roc_barrier;
6680 bool roc;
6681 } lr;
6682 struct nfs4_delegattr sattr;
6683 struct nfs_fattr fattr;
6684 int rpc_status;
6685 struct inode *inode;
6686 };
6687
nfs4_delegreturn_done(struct rpc_task * task,void * calldata)6688 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
6689 {
6690 struct nfs4_delegreturndata *data = calldata;
6691 struct nfs4_exception exception = {
6692 .inode = data->inode,
6693 .stateid = &data->stateid,
6694 .task_is_privileged = data->args.seq_args.sa_privileged,
6695 };
6696
6697 if (!nfs4_sequence_done(task, &data->res.seq_res))
6698 return;
6699
6700 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
6701
6702 /* Handle Layoutreturn errors */
6703 if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res,
6704 &data->res.lr_ret) == -EAGAIN)
6705 goto out_restart;
6706
6707 if (data->args.sattr_args && task->tk_status != 0) {
6708 switch(data->res.sattr_ret) {
6709 case 0:
6710 data->args.sattr_args = NULL;
6711 data->res.sattr_res = false;
6712 break;
6713 case -NFS4ERR_ADMIN_REVOKED:
6714 case -NFS4ERR_DELEG_REVOKED:
6715 case -NFS4ERR_EXPIRED:
6716 case -NFS4ERR_BAD_STATEID:
6717 /* Let the main handler below do stateid recovery */
6718 break;
6719 case -NFS4ERR_OLD_STATEID:
6720 if (nfs4_refresh_delegation_stateid(&data->stateid,
6721 data->inode))
6722 goto out_restart;
6723 fallthrough;
6724 default:
6725 data->args.sattr_args = NULL;
6726 data->res.sattr_res = false;
6727 goto out_restart;
6728 }
6729 }
6730
6731 switch (task->tk_status) {
6732 case 0:
6733 renew_lease(data->res.server, data->timestamp);
6734 break;
6735 case -NFS4ERR_ADMIN_REVOKED:
6736 case -NFS4ERR_DELEG_REVOKED:
6737 case -NFS4ERR_EXPIRED:
6738 nfs4_free_revoked_stateid(data->res.server,
6739 data->args.stateid,
6740 task->tk_msg.rpc_cred);
6741 fallthrough;
6742 case -NFS4ERR_BAD_STATEID:
6743 case -NFS4ERR_STALE_STATEID:
6744 case -ETIMEDOUT:
6745 task->tk_status = 0;
6746 break;
6747 case -NFS4ERR_OLD_STATEID:
6748 if (!nfs4_refresh_delegation_stateid(&data->stateid, data->inode))
6749 nfs4_stateid_seqid_inc(&data->stateid);
6750 if (data->args.bitmask) {
6751 data->args.bitmask = NULL;
6752 data->res.fattr = NULL;
6753 }
6754 goto out_restart;
6755 case -NFS4ERR_ACCESS:
6756 if (data->args.bitmask) {
6757 data->args.bitmask = NULL;
6758 data->res.fattr = NULL;
6759 goto out_restart;
6760 }
6761 fallthrough;
6762 default:
6763 task->tk_status = nfs4_async_handle_exception(task,
6764 data->res.server, task->tk_status,
6765 &exception);
6766 if (exception.retry)
6767 goto out_restart;
6768 }
6769 nfs_delegation_mark_returned(data->inode, data->args.stateid);
6770 data->rpc_status = task->tk_status;
6771 return;
6772 out_restart:
6773 task->tk_status = 0;
6774 rpc_restart_call_prepare(task);
6775 }
6776
nfs4_delegreturn_release(void * calldata)6777 static void nfs4_delegreturn_release(void *calldata)
6778 {
6779 struct nfs4_delegreturndata *data = calldata;
6780 struct inode *inode = data->inode;
6781
6782 if (data->lr.roc)
6783 pnfs_roc_release(&data->lr.arg, &data->lr.res,
6784 data->res.lr_ret);
6785 if (inode) {
6786 nfs4_fattr_set_prechange(&data->fattr,
6787 inode_peek_iversion_raw(inode));
6788 nfs_refresh_inode(inode, &data->fattr);
6789 nfs_iput_and_deactive(inode);
6790 }
6791 kfree(calldata);
6792 }
6793
nfs4_delegreturn_prepare(struct rpc_task * task,void * data)6794 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
6795 {
6796 struct nfs4_delegreturndata *d_data;
6797 struct pnfs_layout_hdr *lo;
6798
6799 d_data = data;
6800
6801 if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) {
6802 nfs4_sequence_done(task, &d_data->res.seq_res);
6803 return;
6804 }
6805
6806 lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL;
6807 if (lo && !pnfs_layout_is_valid(lo)) {
6808 d_data->args.lr_args = NULL;
6809 d_data->res.lr_res = NULL;
6810 }
6811
6812 nfs4_setup_sequence(d_data->res.server->nfs_client,
6813 &d_data->args.seq_args,
6814 &d_data->res.seq_res,
6815 task);
6816 }
6817
6818 static const struct rpc_call_ops nfs4_delegreturn_ops = {
6819 .rpc_call_prepare = nfs4_delegreturn_prepare,
6820 .rpc_call_done = nfs4_delegreturn_done,
6821 .rpc_release = nfs4_delegreturn_release,
6822 };
6823
_nfs4_proc_delegreturn(struct inode * inode,const struct cred * cred,const nfs4_stateid * stateid,struct nfs_delegation * delegation,int issync)6824 static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
6825 const nfs4_stateid *stateid,
6826 struct nfs_delegation *delegation,
6827 int issync)
6828 {
6829 struct nfs4_delegreturndata *data;
6830 struct nfs_server *server = NFS_SERVER(inode);
6831 struct rpc_task *task;
6832 struct rpc_message msg = {
6833 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
6834 .rpc_cred = cred,
6835 };
6836 struct rpc_task_setup task_setup_data = {
6837 .rpc_client = server->client,
6838 .rpc_message = &msg,
6839 .callback_ops = &nfs4_delegreturn_ops,
6840 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
6841 };
6842 int status = 0;
6843
6844 if (nfs_server_capable(inode, NFS_CAP_MOVEABLE))
6845 task_setup_data.flags |= RPC_TASK_MOVEABLE;
6846
6847 data = kzalloc(sizeof(*data), GFP_KERNEL);
6848 if (data == NULL)
6849 return -ENOMEM;
6850
6851 nfs4_state_protect(server->nfs_client,
6852 NFS_SP4_MACH_CRED_CLEANUP,
6853 &task_setup_data.rpc_client, &msg);
6854
6855 data->args.fhandle = &data->fh;
6856 data->args.stateid = &data->stateid;
6857 nfs4_bitmask_set(data->args.bitmask_store,
6858 server->cache_consistency_bitmask, inode, 0);
6859 data->args.bitmask = data->args.bitmask_store;
6860 nfs_copy_fh(&data->fh, NFS_FH(inode));
6861 nfs4_stateid_copy(&data->stateid, stateid);
6862 data->res.fattr = &data->fattr;
6863 data->res.server = server;
6864 data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
6865 data->lr.arg.ld_private = &data->lr.ld_private;
6866 nfs_fattr_init(data->res.fattr);
6867 data->timestamp = jiffies;
6868 data->rpc_status = 0;
6869 data->inode = nfs_igrab_and_active(inode);
6870 if (data->inode || issync) {
6871 data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res,
6872 cred);
6873 if (data->lr.roc) {
6874 data->args.lr_args = &data->lr.arg;
6875 data->res.lr_res = &data->lr.res;
6876 }
6877 }
6878
6879 if (delegation &&
6880 test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) {
6881 if (delegation->type & FMODE_READ) {
6882 data->sattr.atime = inode_get_atime(inode);
6883 data->sattr.atime_set = true;
6884 }
6885 if (delegation->type & FMODE_WRITE) {
6886 data->sattr.mtime = inode_get_mtime(inode);
6887 data->sattr.mtime_set = true;
6888 }
6889 data->args.sattr_args = &data->sattr;
6890 data->res.sattr_res = true;
6891 }
6892
6893 if (!data->inode)
6894 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
6895 1);
6896 else
6897 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
6898 0);
6899
6900 task_setup_data.callback_data = data;
6901 msg.rpc_argp = &data->args;
6902 msg.rpc_resp = &data->res;
6903 task = rpc_run_task(&task_setup_data);
6904 if (IS_ERR(task))
6905 return PTR_ERR(task);
6906 if (!issync)
6907 goto out;
6908 status = rpc_wait_for_completion_task(task);
6909 if (status != 0)
6910 goto out;
6911 status = data->rpc_status;
6912 out:
6913 rpc_put_task(task);
6914 return status;
6915 }
6916
nfs4_proc_delegreturn(struct inode * inode,const struct cred * cred,const nfs4_stateid * stateid,struct nfs_delegation * delegation,int issync)6917 int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
6918 const nfs4_stateid *stateid,
6919 struct nfs_delegation *delegation, int issync)
6920 {
6921 struct nfs_server *server = NFS_SERVER(inode);
6922 struct nfs4_exception exception = { };
6923 int err;
6924 do {
6925 err = _nfs4_proc_delegreturn(inode, cred, stateid,
6926 delegation, issync);
6927 trace_nfs4_delegreturn(inode, stateid, err);
6928 switch (err) {
6929 case -NFS4ERR_STALE_STATEID:
6930 case -NFS4ERR_EXPIRED:
6931 case 0:
6932 return 0;
6933 }
6934 err = nfs4_handle_exception(server, err, &exception);
6935 } while (exception.retry);
6936 return err;
6937 }
6938
_nfs4_proc_getlk(struct nfs4_state * state,int cmd,struct file_lock * request)6939 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6940 {
6941 struct inode *inode = state->inode;
6942 struct nfs_server *server = NFS_SERVER(inode);
6943 struct nfs_client *clp = server->nfs_client;
6944 struct nfs_lockt_args arg = {
6945 .fh = NFS_FH(inode),
6946 .fl = request,
6947 };
6948 struct nfs_lockt_res res = {
6949 .denied = request,
6950 };
6951 struct rpc_message msg = {
6952 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
6953 .rpc_argp = &arg,
6954 .rpc_resp = &res,
6955 .rpc_cred = state->owner->so_cred,
6956 };
6957 struct nfs4_lock_state *lsp;
6958 int status;
6959
6960 arg.lock_owner.clientid = clp->cl_clientid;
6961 status = nfs4_set_lock_state(state, request);
6962 if (status != 0)
6963 goto out;
6964 lsp = request->fl_u.nfs4_fl.owner;
6965 arg.lock_owner.id = lsp->ls_seqid.owner_id;
6966 arg.lock_owner.s_dev = server->s_dev;
6967 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
6968 switch (status) {
6969 case 0:
6970 request->c.flc_type = F_UNLCK;
6971 break;
6972 case -NFS4ERR_DENIED:
6973 status = 0;
6974 }
6975 request->fl_ops->fl_release_private(request);
6976 request->fl_ops = NULL;
6977 out:
6978 return status;
6979 }
6980
nfs4_proc_getlk(struct nfs4_state * state,int cmd,struct file_lock * request)6981 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6982 {
6983 struct nfs4_exception exception = {
6984 .interruptible = true,
6985 };
6986 int err;
6987
6988 do {
6989 err = _nfs4_proc_getlk(state, cmd, request);
6990 trace_nfs4_get_lock(request, state, cmd, err);
6991 err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
6992 &exception);
6993 } while (exception.retry);
6994 return err;
6995 }
6996
6997 /*
6998 * Update the seqid of a lock stateid after receiving
6999 * NFS4ERR_OLD_STATEID
7000 */
nfs4_refresh_lock_old_stateid(nfs4_stateid * dst,struct nfs4_lock_state * lsp)7001 static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst,
7002 struct nfs4_lock_state *lsp)
7003 {
7004 struct nfs4_state *state = lsp->ls_state;
7005 bool ret = false;
7006
7007 spin_lock(&state->state_lock);
7008 if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid))
7009 goto out;
7010 if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst))
7011 nfs4_stateid_seqid_inc(dst);
7012 else
7013 dst->seqid = lsp->ls_stateid.seqid;
7014 ret = true;
7015 out:
7016 spin_unlock(&state->state_lock);
7017 return ret;
7018 }
7019
nfs4_sync_lock_stateid(nfs4_stateid * dst,struct nfs4_lock_state * lsp)7020 static bool nfs4_sync_lock_stateid(nfs4_stateid *dst,
7021 struct nfs4_lock_state *lsp)
7022 {
7023 struct nfs4_state *state = lsp->ls_state;
7024 bool ret;
7025
7026 spin_lock(&state->state_lock);
7027 ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid);
7028 nfs4_stateid_copy(dst, &lsp->ls_stateid);
7029 spin_unlock(&state->state_lock);
7030 return ret;
7031 }
7032
7033 struct nfs4_unlockdata {
7034 struct nfs_locku_args arg;
7035 struct nfs_locku_res res;
7036 struct nfs4_lock_state *lsp;
7037 struct nfs_open_context *ctx;
7038 struct nfs_lock_context *l_ctx;
7039 struct file_lock fl;
7040 struct nfs_server *server;
7041 unsigned long timestamp;
7042 };
7043
nfs4_alloc_unlockdata(struct file_lock * fl,struct nfs_open_context * ctx,struct nfs4_lock_state * lsp,struct nfs_seqid * seqid)7044 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
7045 struct nfs_open_context *ctx,
7046 struct nfs4_lock_state *lsp,
7047 struct nfs_seqid *seqid)
7048 {
7049 struct nfs4_unlockdata *p;
7050 struct nfs4_state *state = lsp->ls_state;
7051 struct inode *inode = state->inode;
7052
7053 p = kzalloc(sizeof(*p), GFP_KERNEL);
7054 if (p == NULL)
7055 return NULL;
7056 p->arg.fh = NFS_FH(inode);
7057 p->arg.fl = &p->fl;
7058 p->arg.seqid = seqid;
7059 p->res.seqid = seqid;
7060 p->lsp = lsp;
7061 /* Ensure we don't close file until we're done freeing locks! */
7062 p->ctx = get_nfs_open_context(ctx);
7063 p->l_ctx = nfs_get_lock_context(ctx);
7064 locks_init_lock(&p->fl);
7065 locks_copy_lock(&p->fl, fl);
7066 p->server = NFS_SERVER(inode);
7067 spin_lock(&state->state_lock);
7068 nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid);
7069 spin_unlock(&state->state_lock);
7070 return p;
7071 }
7072
nfs4_locku_release_calldata(void * data)7073 static void nfs4_locku_release_calldata(void *data)
7074 {
7075 struct nfs4_unlockdata *calldata = data;
7076 nfs_free_seqid(calldata->arg.seqid);
7077 nfs4_put_lock_state(calldata->lsp);
7078 nfs_put_lock_context(calldata->l_ctx);
7079 put_nfs_open_context(calldata->ctx);
7080 kfree(calldata);
7081 }
7082
nfs4_locku_done(struct rpc_task * task,void * data)7083 static void nfs4_locku_done(struct rpc_task *task, void *data)
7084 {
7085 struct nfs4_unlockdata *calldata = data;
7086 struct nfs4_exception exception = {
7087 .inode = calldata->lsp->ls_state->inode,
7088 .stateid = &calldata->arg.stateid,
7089 };
7090
7091 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
7092 return;
7093 switch (task->tk_status) {
7094 case 0:
7095 renew_lease(calldata->server, calldata->timestamp);
7096 locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl);
7097 if (nfs4_update_lock_stateid(calldata->lsp,
7098 &calldata->res.stateid))
7099 break;
7100 fallthrough;
7101 case -NFS4ERR_ADMIN_REVOKED:
7102 case -NFS4ERR_EXPIRED:
7103 nfs4_free_revoked_stateid(calldata->server,
7104 &calldata->arg.stateid,
7105 task->tk_msg.rpc_cred);
7106 fallthrough;
7107 case -NFS4ERR_BAD_STATEID:
7108 case -NFS4ERR_STALE_STATEID:
7109 if (nfs4_sync_lock_stateid(&calldata->arg.stateid,
7110 calldata->lsp))
7111 rpc_restart_call_prepare(task);
7112 break;
7113 case -NFS4ERR_OLD_STATEID:
7114 if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid,
7115 calldata->lsp))
7116 rpc_restart_call_prepare(task);
7117 break;
7118 default:
7119 task->tk_status = nfs4_async_handle_exception(task,
7120 calldata->server, task->tk_status,
7121 &exception);
7122 if (exception.retry)
7123 rpc_restart_call_prepare(task);
7124 }
7125 nfs_release_seqid(calldata->arg.seqid);
7126 }
7127
nfs4_locku_prepare(struct rpc_task * task,void * data)7128 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
7129 {
7130 struct nfs4_unlockdata *calldata = data;
7131
7132 if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) &&
7133 nfs_async_iocounter_wait(task, calldata->l_ctx))
7134 return;
7135
7136 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
7137 goto out_wait;
7138 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
7139 /* Note: exit _without_ running nfs4_locku_done */
7140 goto out_no_action;
7141 }
7142 calldata->timestamp = jiffies;
7143 if (nfs4_setup_sequence(calldata->server->nfs_client,
7144 &calldata->arg.seq_args,
7145 &calldata->res.seq_res,
7146 task) != 0)
7147 nfs_release_seqid(calldata->arg.seqid);
7148 return;
7149 out_no_action:
7150 task->tk_action = NULL;
7151 out_wait:
7152 nfs4_sequence_done(task, &calldata->res.seq_res);
7153 }
7154
7155 static const struct rpc_call_ops nfs4_locku_ops = {
7156 .rpc_call_prepare = nfs4_locku_prepare,
7157 .rpc_call_done = nfs4_locku_done,
7158 .rpc_release = nfs4_locku_release_calldata,
7159 };
7160
nfs4_do_unlck(struct file_lock * fl,struct nfs_open_context * ctx,struct nfs4_lock_state * lsp,struct nfs_seqid * seqid)7161 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
7162 struct nfs_open_context *ctx,
7163 struct nfs4_lock_state *lsp,
7164 struct nfs_seqid *seqid)
7165 {
7166 struct nfs4_unlockdata *data;
7167 struct rpc_message msg = {
7168 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
7169 .rpc_cred = ctx->cred,
7170 };
7171 struct rpc_task_setup task_setup_data = {
7172 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
7173 .rpc_message = &msg,
7174 .callback_ops = &nfs4_locku_ops,
7175 .workqueue = nfsiod_workqueue,
7176 .flags = RPC_TASK_ASYNC,
7177 };
7178
7179 if (nfs_server_capable(lsp->ls_state->inode, NFS_CAP_MOVEABLE))
7180 task_setup_data.flags |= RPC_TASK_MOVEABLE;
7181
7182 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
7183 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
7184
7185 /* Ensure this is an unlock - when canceling a lock, the
7186 * canceled lock is passed in, and it won't be an unlock.
7187 */
7188 fl->c.flc_type = F_UNLCK;
7189 if (fl->c.flc_flags & FL_CLOSE)
7190 set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags);
7191
7192 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
7193 if (data == NULL) {
7194 nfs_free_seqid(seqid);
7195 return ERR_PTR(-ENOMEM);
7196 }
7197
7198 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 0);
7199 msg.rpc_argp = &data->arg;
7200 msg.rpc_resp = &data->res;
7201 task_setup_data.callback_data = data;
7202 return rpc_run_task(&task_setup_data);
7203 }
7204
nfs4_proc_unlck(struct nfs4_state * state,int cmd,struct file_lock * request)7205 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
7206 {
7207 struct inode *inode = state->inode;
7208 struct nfs4_state_owner *sp = state->owner;
7209 struct nfs_inode *nfsi = NFS_I(inode);
7210 struct nfs_seqid *seqid;
7211 struct nfs4_lock_state *lsp;
7212 struct rpc_task *task;
7213 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
7214 int status = 0;
7215 unsigned char saved_flags = request->c.flc_flags;
7216
7217 status = nfs4_set_lock_state(state, request);
7218 /* Unlock _before_ we do the RPC call */
7219 request->c.flc_flags |= FL_EXISTS;
7220 /* Exclude nfs_delegation_claim_locks() */
7221 mutex_lock(&sp->so_delegreturn_mutex);
7222 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
7223 down_read(&nfsi->rwsem);
7224 if (locks_lock_inode_wait(inode, request) == -ENOENT) {
7225 up_read(&nfsi->rwsem);
7226 mutex_unlock(&sp->so_delegreturn_mutex);
7227 goto out;
7228 }
7229 lsp = request->fl_u.nfs4_fl.owner;
7230 set_bit(NFS_LOCK_UNLOCKING, &lsp->ls_flags);
7231 up_read(&nfsi->rwsem);
7232 mutex_unlock(&sp->so_delegreturn_mutex);
7233 if (status != 0)
7234 goto out;
7235 /* Is this a delegated lock? */
7236 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
7237 goto out;
7238 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
7239 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
7240 status = -ENOMEM;
7241 if (IS_ERR(seqid))
7242 goto out;
7243 task = nfs4_do_unlck(request,
7244 nfs_file_open_context(request->c.flc_file),
7245 lsp, seqid);
7246 status = PTR_ERR(task);
7247 if (IS_ERR(task))
7248 goto out;
7249 status = rpc_wait_for_completion_task(task);
7250 rpc_put_task(task);
7251 out:
7252 request->c.flc_flags = saved_flags;
7253 trace_nfs4_unlock(request, state, F_SETLK, status);
7254 return status;
7255 }
7256
7257 struct nfs4_lockdata {
7258 struct nfs_lock_args arg;
7259 struct nfs_lock_res res;
7260 struct nfs4_lock_state *lsp;
7261 struct nfs_open_context *ctx;
7262 struct file_lock fl;
7263 unsigned long timestamp;
7264 int rpc_status;
7265 int cancelled;
7266 struct nfs_server *server;
7267 };
7268
nfs4_alloc_lockdata(struct file_lock * fl,struct nfs_open_context * ctx,struct nfs4_lock_state * lsp,gfp_t gfp_mask)7269 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
7270 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
7271 gfp_t gfp_mask)
7272 {
7273 struct nfs4_lockdata *p;
7274 struct inode *inode = lsp->ls_state->inode;
7275 struct nfs_server *server = NFS_SERVER(inode);
7276 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
7277
7278 p = kzalloc(sizeof(*p), gfp_mask);
7279 if (p == NULL)
7280 return NULL;
7281
7282 p->arg.fh = NFS_FH(inode);
7283 p->arg.fl = &p->fl;
7284 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
7285 if (IS_ERR(p->arg.open_seqid))
7286 goto out_free;
7287 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
7288 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
7289 if (IS_ERR(p->arg.lock_seqid))
7290 goto out_free_seqid;
7291 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
7292 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
7293 p->arg.lock_owner.s_dev = server->s_dev;
7294 p->res.lock_seqid = p->arg.lock_seqid;
7295 p->lsp = lsp;
7296 p->server = server;
7297 p->ctx = get_nfs_open_context(ctx);
7298 locks_init_lock(&p->fl);
7299 locks_copy_lock(&p->fl, fl);
7300 return p;
7301 out_free_seqid:
7302 nfs_free_seqid(p->arg.open_seqid);
7303 out_free:
7304 kfree(p);
7305 return NULL;
7306 }
7307
nfs4_lock_prepare(struct rpc_task * task,void * calldata)7308 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
7309 {
7310 struct nfs4_lockdata *data = calldata;
7311 struct nfs4_state *state = data->lsp->ls_state;
7312
7313 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
7314 goto out_wait;
7315 /* Do we need to do an open_to_lock_owner? */
7316 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
7317 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
7318 goto out_release_lock_seqid;
7319 }
7320 nfs4_stateid_copy(&data->arg.open_stateid,
7321 &state->open_stateid);
7322 data->arg.new_lock_owner = 1;
7323 data->res.open_seqid = data->arg.open_seqid;
7324 } else {
7325 data->arg.new_lock_owner = 0;
7326 nfs4_stateid_copy(&data->arg.lock_stateid,
7327 &data->lsp->ls_stateid);
7328 }
7329 if (!nfs4_valid_open_stateid(state)) {
7330 data->rpc_status = -EBADF;
7331 task->tk_action = NULL;
7332 goto out_release_open_seqid;
7333 }
7334 data->timestamp = jiffies;
7335 if (nfs4_setup_sequence(data->server->nfs_client,
7336 &data->arg.seq_args,
7337 &data->res.seq_res,
7338 task) == 0)
7339 return;
7340 out_release_open_seqid:
7341 nfs_release_seqid(data->arg.open_seqid);
7342 out_release_lock_seqid:
7343 nfs_release_seqid(data->arg.lock_seqid);
7344 out_wait:
7345 nfs4_sequence_done(task, &data->res.seq_res);
7346 dprintk("%s: ret = %d\n", __func__, data->rpc_status);
7347 }
7348
nfs4_lock_done(struct rpc_task * task,void * calldata)7349 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
7350 {
7351 struct nfs4_lockdata *data = calldata;
7352 struct nfs4_lock_state *lsp = data->lsp;
7353
7354 if (!nfs4_sequence_done(task, &data->res.seq_res))
7355 return;
7356
7357 data->rpc_status = task->tk_status;
7358 switch (task->tk_status) {
7359 case 0:
7360 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
7361 data->timestamp);
7362 if (data->arg.new_lock && !data->cancelled) {
7363 data->fl.c.flc_flags &= ~(FL_SLEEP | FL_ACCESS);
7364 if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
7365 goto out_restart;
7366 }
7367 if (data->arg.new_lock_owner != 0) {
7368 nfs_confirm_seqid(&lsp->ls_seqid, 0);
7369 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
7370 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
7371 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
7372 goto out_restart;
7373 break;
7374 case -NFS4ERR_OLD_STATEID:
7375 if (data->arg.new_lock_owner != 0 &&
7376 nfs4_refresh_open_old_stateid(&data->arg.open_stateid,
7377 lsp->ls_state))
7378 goto out_restart;
7379 if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp))
7380 goto out_restart;
7381 fallthrough;
7382 case -NFS4ERR_BAD_STATEID:
7383 case -NFS4ERR_STALE_STATEID:
7384 case -NFS4ERR_EXPIRED:
7385 if (data->arg.new_lock_owner != 0) {
7386 if (!nfs4_stateid_match(&data->arg.open_stateid,
7387 &lsp->ls_state->open_stateid))
7388 goto out_restart;
7389 } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
7390 &lsp->ls_stateid))
7391 goto out_restart;
7392 }
7393 out_done:
7394 dprintk("%s: ret = %d!\n", __func__, data->rpc_status);
7395 return;
7396 out_restart:
7397 if (!data->cancelled)
7398 rpc_restart_call_prepare(task);
7399 goto out_done;
7400 }
7401
nfs4_lock_release(void * calldata)7402 static void nfs4_lock_release(void *calldata)
7403 {
7404 struct nfs4_lockdata *data = calldata;
7405
7406 nfs_free_seqid(data->arg.open_seqid);
7407 if (data->cancelled && data->rpc_status == 0) {
7408 struct rpc_task *task;
7409 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
7410 data->arg.lock_seqid);
7411 if (!IS_ERR(task))
7412 rpc_put_task_async(task);
7413 dprintk("%s: cancelling lock!\n", __func__);
7414 } else
7415 nfs_free_seqid(data->arg.lock_seqid);
7416 nfs4_put_lock_state(data->lsp);
7417 put_nfs_open_context(data->ctx);
7418 kfree(data);
7419 }
7420
7421 static const struct rpc_call_ops nfs4_lock_ops = {
7422 .rpc_call_prepare = nfs4_lock_prepare,
7423 .rpc_call_done = nfs4_lock_done,
7424 .rpc_release = nfs4_lock_release,
7425 };
7426
nfs4_handle_setlk_error(struct nfs_server * server,struct nfs4_lock_state * lsp,int new_lock_owner,int error)7427 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
7428 {
7429 switch (error) {
7430 case -NFS4ERR_ADMIN_REVOKED:
7431 case -NFS4ERR_EXPIRED:
7432 case -NFS4ERR_BAD_STATEID:
7433 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
7434 if (new_lock_owner != 0 ||
7435 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
7436 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
7437 break;
7438 case -NFS4ERR_STALE_STATEID:
7439 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
7440 nfs4_schedule_lease_recovery(server->nfs_client);
7441 }
7442 }
7443
_nfs4_do_setlk(struct nfs4_state * state,int cmd,struct file_lock * fl,int recovery_type)7444 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
7445 {
7446 struct nfs4_lockdata *data;
7447 struct rpc_task *task;
7448 struct rpc_message msg = {
7449 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
7450 .rpc_cred = state->owner->so_cred,
7451 };
7452 struct rpc_task_setup task_setup_data = {
7453 .rpc_client = NFS_CLIENT(state->inode),
7454 .rpc_message = &msg,
7455 .callback_ops = &nfs4_lock_ops,
7456 .workqueue = nfsiod_workqueue,
7457 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
7458 };
7459 int ret;
7460
7461 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE))
7462 task_setup_data.flags |= RPC_TASK_MOVEABLE;
7463
7464 data = nfs4_alloc_lockdata(fl,
7465 nfs_file_open_context(fl->c.flc_file),
7466 fl->fl_u.nfs4_fl.owner, GFP_KERNEL);
7467 if (data == NULL)
7468 return -ENOMEM;
7469 if (IS_SETLKW(cmd))
7470 data->arg.block = 1;
7471 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1,
7472 recovery_type > NFS_LOCK_NEW);
7473 msg.rpc_argp = &data->arg;
7474 msg.rpc_resp = &data->res;
7475 task_setup_data.callback_data = data;
7476 if (recovery_type > NFS_LOCK_NEW) {
7477 if (recovery_type == NFS_LOCK_RECLAIM)
7478 data->arg.reclaim = NFS_LOCK_RECLAIM;
7479 } else
7480 data->arg.new_lock = 1;
7481 task = rpc_run_task(&task_setup_data);
7482 if (IS_ERR(task))
7483 return PTR_ERR(task);
7484 ret = rpc_wait_for_completion_task(task);
7485 if (ret == 0) {
7486 ret = data->rpc_status;
7487 if (ret)
7488 nfs4_handle_setlk_error(data->server, data->lsp,
7489 data->arg.new_lock_owner, ret);
7490 } else
7491 data->cancelled = true;
7492 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
7493 rpc_put_task(task);
7494 dprintk("%s: ret = %d\n", __func__, ret);
7495 return ret;
7496 }
7497
nfs4_lock_reclaim(struct nfs4_state * state,struct file_lock * request)7498 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
7499 {
7500 struct nfs_server *server = NFS_SERVER(state->inode);
7501 struct nfs4_exception exception = {
7502 .inode = state->inode,
7503 };
7504 int err;
7505
7506 do {
7507 /* Cache the lock if possible... */
7508 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
7509 return 0;
7510 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
7511 if (err != -NFS4ERR_DELAY)
7512 break;
7513 nfs4_handle_exception(server, err, &exception);
7514 } while (exception.retry);
7515 return err;
7516 }
7517
nfs4_lock_expired(struct nfs4_state * state,struct file_lock * request)7518 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
7519 {
7520 struct nfs_server *server = NFS_SERVER(state->inode);
7521 struct nfs4_exception exception = {
7522 .inode = state->inode,
7523 };
7524 int err;
7525
7526 err = nfs4_set_lock_state(state, request);
7527 if (err != 0)
7528 return err;
7529 if (!recover_lost_locks) {
7530 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
7531 return 0;
7532 }
7533 do {
7534 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
7535 return 0;
7536 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
7537 switch (err) {
7538 default:
7539 goto out;
7540 case -NFS4ERR_GRACE:
7541 case -NFS4ERR_DELAY:
7542 nfs4_handle_exception(server, err, &exception);
7543 err = 0;
7544 }
7545 } while (exception.retry);
7546 out:
7547 return err;
7548 }
7549
7550 #if defined(CONFIG_NFS_V4_1)
nfs41_lock_expired(struct nfs4_state * state,struct file_lock * request)7551 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
7552 {
7553 struct nfs4_lock_state *lsp;
7554 int status;
7555
7556 status = nfs4_set_lock_state(state, request);
7557 if (status != 0)
7558 return status;
7559 lsp = request->fl_u.nfs4_fl.owner;
7560 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) ||
7561 test_bit(NFS_LOCK_LOST, &lsp->ls_flags))
7562 return 0;
7563 return nfs4_lock_expired(state, request);
7564 }
7565 #endif
7566
_nfs4_proc_setlk(struct nfs4_state * state,int cmd,struct file_lock * request)7567 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7568 {
7569 struct nfs_inode *nfsi = NFS_I(state->inode);
7570 struct nfs4_state_owner *sp = state->owner;
7571 unsigned char flags = request->c.flc_flags;
7572 int status;
7573
7574 request->c.flc_flags |= FL_ACCESS;
7575 status = locks_lock_inode_wait(state->inode, request);
7576 if (status < 0)
7577 goto out;
7578 mutex_lock(&sp->so_delegreturn_mutex);
7579 down_read(&nfsi->rwsem);
7580 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
7581 /* Yes: cache locks! */
7582 /* ...but avoid races with delegation recall... */
7583 request->c.flc_flags = flags & ~FL_SLEEP;
7584 status = locks_lock_inode_wait(state->inode, request);
7585 up_read(&nfsi->rwsem);
7586 mutex_unlock(&sp->so_delegreturn_mutex);
7587 goto out;
7588 }
7589 up_read(&nfsi->rwsem);
7590 mutex_unlock(&sp->so_delegreturn_mutex);
7591 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
7592 out:
7593 request->c.flc_flags = flags;
7594 return status;
7595 }
7596
nfs4_proc_setlk(struct nfs4_state * state,int cmd,struct file_lock * request)7597 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7598 {
7599 struct nfs4_exception exception = {
7600 .state = state,
7601 .inode = state->inode,
7602 .interruptible = true,
7603 };
7604 int err;
7605
7606 do {
7607 err = _nfs4_proc_setlk(state, cmd, request);
7608 if (err == -NFS4ERR_DENIED)
7609 err = -EAGAIN;
7610 err = nfs4_handle_exception(NFS_SERVER(state->inode),
7611 err, &exception);
7612 } while (exception.retry);
7613 return err;
7614 }
7615
7616 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
7617 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
7618
7619 static int
nfs4_retry_setlk_simple(struct nfs4_state * state,int cmd,struct file_lock * request)7620 nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd,
7621 struct file_lock *request)
7622 {
7623 int status = -ERESTARTSYS;
7624 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
7625
7626 while(!signalled()) {
7627 status = nfs4_proc_setlk(state, cmd, request);
7628 if ((status != -EAGAIN) || IS_SETLK(cmd))
7629 break;
7630 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
7631 schedule_timeout(timeout);
7632 timeout *= 2;
7633 timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout);
7634 status = -ERESTARTSYS;
7635 }
7636 return status;
7637 }
7638
7639 #ifdef CONFIG_NFS_V4_1
7640 struct nfs4_lock_waiter {
7641 struct inode *inode;
7642 struct nfs_lowner owner;
7643 wait_queue_entry_t wait;
7644 };
7645
7646 static int
nfs4_wake_lock_waiter(wait_queue_entry_t * wait,unsigned int mode,int flags,void * key)7647 nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key)
7648 {
7649 struct nfs4_lock_waiter *waiter =
7650 container_of(wait, struct nfs4_lock_waiter, wait);
7651
7652 /* NULL key means to wake up everyone */
7653 if (key) {
7654 struct cb_notify_lock_args *cbnl = key;
7655 struct nfs_lowner *lowner = &cbnl->cbnl_owner,
7656 *wowner = &waiter->owner;
7657
7658 /* Only wake if the callback was for the same owner. */
7659 if (lowner->id != wowner->id || lowner->s_dev != wowner->s_dev)
7660 return 0;
7661
7662 /* Make sure it's for the right inode */
7663 if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh))
7664 return 0;
7665 }
7666
7667 return woken_wake_function(wait, mode, flags, key);
7668 }
7669
7670 static int
nfs4_retry_setlk(struct nfs4_state * state,int cmd,struct file_lock * request)7671 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7672 {
7673 struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
7674 struct nfs_server *server = NFS_SERVER(state->inode);
7675 struct nfs_client *clp = server->nfs_client;
7676 wait_queue_head_t *q = &clp->cl_lock_waitq;
7677 struct nfs4_lock_waiter waiter = {
7678 .inode = state->inode,
7679 .owner = { .clientid = clp->cl_clientid,
7680 .id = lsp->ls_seqid.owner_id,
7681 .s_dev = server->s_dev },
7682 };
7683 int status;
7684
7685 /* Don't bother with waitqueue if we don't expect a callback */
7686 if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags))
7687 return nfs4_retry_setlk_simple(state, cmd, request);
7688
7689 init_wait(&waiter.wait);
7690 waiter.wait.func = nfs4_wake_lock_waiter;
7691 add_wait_queue(q, &waiter.wait);
7692
7693 do {
7694 status = nfs4_proc_setlk(state, cmd, request);
7695 if (status != -EAGAIN || IS_SETLK(cmd))
7696 break;
7697
7698 status = -ERESTARTSYS;
7699 wait_woken(&waiter.wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE,
7700 NFS4_LOCK_MAXTIMEOUT);
7701 } while (!signalled());
7702
7703 remove_wait_queue(q, &waiter.wait);
7704
7705 return status;
7706 }
7707 #else /* !CONFIG_NFS_V4_1 */
7708 static inline int
nfs4_retry_setlk(struct nfs4_state * state,int cmd,struct file_lock * request)7709 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7710 {
7711 return nfs4_retry_setlk_simple(state, cmd, request);
7712 }
7713 #endif
7714
7715 static int
nfs4_proc_lock(struct file * filp,int cmd,struct file_lock * request)7716 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
7717 {
7718 struct nfs_open_context *ctx;
7719 struct nfs4_state *state;
7720 int status;
7721
7722 /* verify open state */
7723 ctx = nfs_file_open_context(filp);
7724 state = ctx->state;
7725
7726 if (IS_GETLK(cmd)) {
7727 if (state != NULL)
7728 return nfs4_proc_getlk(state, F_GETLK, request);
7729 return 0;
7730 }
7731
7732 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
7733 return -EINVAL;
7734
7735 if (lock_is_unlock(request)) {
7736 if (state != NULL)
7737 return nfs4_proc_unlck(state, cmd, request);
7738 return 0;
7739 }
7740
7741 if (state == NULL)
7742 return -ENOLCK;
7743
7744 if ((request->c.flc_flags & FL_POSIX) &&
7745 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
7746 return -ENOLCK;
7747
7748 /*
7749 * Don't rely on the VFS having checked the file open mode,
7750 * since it won't do this for flock() locks.
7751 */
7752 switch (request->c.flc_type) {
7753 case F_RDLCK:
7754 if (!(filp->f_mode & FMODE_READ))
7755 return -EBADF;
7756 break;
7757 case F_WRLCK:
7758 if (!(filp->f_mode & FMODE_WRITE))
7759 return -EBADF;
7760 }
7761
7762 status = nfs4_set_lock_state(state, request);
7763 if (status != 0)
7764 return status;
7765
7766 return nfs4_retry_setlk(state, cmd, request);
7767 }
7768
nfs4_delete_lease(struct file * file,void ** priv)7769 static int nfs4_delete_lease(struct file *file, void **priv)
7770 {
7771 return generic_setlease(file, F_UNLCK, NULL, priv);
7772 }
7773
nfs4_add_lease(struct file * file,int arg,struct file_lease ** lease,void ** priv)7774 static int nfs4_add_lease(struct file *file, int arg, struct file_lease **lease,
7775 void **priv)
7776 {
7777 struct inode *inode = file_inode(file);
7778 fmode_t type = arg == F_RDLCK ? FMODE_READ : FMODE_WRITE;
7779 int ret;
7780
7781 /* No delegation, no lease */
7782 if (!nfs4_have_delegation(inode, type, 0))
7783 return -EAGAIN;
7784 ret = generic_setlease(file, arg, lease, priv);
7785 if (ret || nfs4_have_delegation(inode, type, 0))
7786 return ret;
7787 /* We raced with a delegation return */
7788 nfs4_delete_lease(file, priv);
7789 return -EAGAIN;
7790 }
7791
nfs4_proc_setlease(struct file * file,int arg,struct file_lease ** lease,void ** priv)7792 int nfs4_proc_setlease(struct file *file, int arg, struct file_lease **lease,
7793 void **priv)
7794 {
7795 switch (arg) {
7796 case F_RDLCK:
7797 case F_WRLCK:
7798 return nfs4_add_lease(file, arg, lease, priv);
7799 case F_UNLCK:
7800 return nfs4_delete_lease(file, priv);
7801 default:
7802 return -EINVAL;
7803 }
7804 }
7805
nfs4_lock_delegation_recall(struct file_lock * fl,struct nfs4_state * state,const nfs4_stateid * stateid)7806 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
7807 {
7808 struct nfs_server *server = NFS_SERVER(state->inode);
7809 int err;
7810
7811 err = nfs4_set_lock_state(state, fl);
7812 if (err != 0)
7813 return err;
7814 do {
7815 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
7816 if (err != -NFS4ERR_DELAY)
7817 break;
7818 ssleep(1);
7819 } while (err == -NFS4ERR_DELAY);
7820 return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
7821 }
7822
7823 struct nfs_release_lockowner_data {
7824 struct nfs4_lock_state *lsp;
7825 struct nfs_server *server;
7826 struct nfs_release_lockowner_args args;
7827 struct nfs_release_lockowner_res res;
7828 unsigned long timestamp;
7829 };
7830
nfs4_release_lockowner_prepare(struct rpc_task * task,void * calldata)7831 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
7832 {
7833 struct nfs_release_lockowner_data *data = calldata;
7834 struct nfs_server *server = data->server;
7835 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
7836 &data->res.seq_res, task);
7837 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
7838 data->timestamp = jiffies;
7839 }
7840
nfs4_release_lockowner_done(struct rpc_task * task,void * calldata)7841 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
7842 {
7843 struct nfs_release_lockowner_data *data = calldata;
7844 struct nfs_server *server = data->server;
7845
7846 nfs40_sequence_done(task, &data->res.seq_res);
7847
7848 switch (task->tk_status) {
7849 case 0:
7850 renew_lease(server, data->timestamp);
7851 break;
7852 case -NFS4ERR_STALE_CLIENTID:
7853 case -NFS4ERR_EXPIRED:
7854 nfs4_schedule_lease_recovery(server->nfs_client);
7855 break;
7856 case -NFS4ERR_LEASE_MOVED:
7857 case -NFS4ERR_DELAY:
7858 if (nfs4_async_handle_error(task, server,
7859 NULL, NULL) == -EAGAIN)
7860 rpc_restart_call_prepare(task);
7861 }
7862 }
7863
nfs4_release_lockowner_release(void * calldata)7864 static void nfs4_release_lockowner_release(void *calldata)
7865 {
7866 struct nfs_release_lockowner_data *data = calldata;
7867 nfs4_free_lock_state(data->server, data->lsp);
7868 kfree(calldata);
7869 }
7870
7871 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
7872 .rpc_call_prepare = nfs4_release_lockowner_prepare,
7873 .rpc_call_done = nfs4_release_lockowner_done,
7874 .rpc_release = nfs4_release_lockowner_release,
7875 };
7876
7877 static void
nfs4_release_lockowner(struct nfs_server * server,struct nfs4_lock_state * lsp)7878 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
7879 {
7880 struct nfs_release_lockowner_data *data;
7881 struct rpc_message msg = {
7882 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
7883 };
7884
7885 if (server->nfs_client->cl_mvops->minor_version != 0)
7886 return;
7887
7888 data = kmalloc(sizeof(*data), GFP_KERNEL);
7889 if (!data)
7890 return;
7891 data->lsp = lsp;
7892 data->server = server;
7893 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
7894 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
7895 data->args.lock_owner.s_dev = server->s_dev;
7896
7897 msg.rpc_argp = &data->args;
7898 msg.rpc_resp = &data->res;
7899 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
7900 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
7901 }
7902
7903 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
7904
nfs4_xattr_set_nfs4_acl(const struct xattr_handler * handler,struct mnt_idmap * idmap,struct dentry * unused,struct inode * inode,const char * key,const void * buf,size_t buflen,int flags)7905 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
7906 struct mnt_idmap *idmap,
7907 struct dentry *unused, struct inode *inode,
7908 const char *key, const void *buf,
7909 size_t buflen, int flags)
7910 {
7911 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_ACL);
7912 }
7913
nfs4_xattr_get_nfs4_acl(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * key,void * buf,size_t buflen)7914 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
7915 struct dentry *unused, struct inode *inode,
7916 const char *key, void *buf, size_t buflen)
7917 {
7918 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_ACL);
7919 }
7920
nfs4_xattr_list_nfs4_acl(struct dentry * dentry)7921 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry)
7922 {
7923 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_ACL);
7924 }
7925
7926 #if defined(CONFIG_NFS_V4_1)
7927 #define XATTR_NAME_NFSV4_DACL "system.nfs4_dacl"
7928
nfs4_xattr_set_nfs4_dacl(const struct xattr_handler * handler,struct mnt_idmap * idmap,struct dentry * unused,struct inode * inode,const char * key,const void * buf,size_t buflen,int flags)7929 static int nfs4_xattr_set_nfs4_dacl(const struct xattr_handler *handler,
7930 struct mnt_idmap *idmap,
7931 struct dentry *unused, struct inode *inode,
7932 const char *key, const void *buf,
7933 size_t buflen, int flags)
7934 {
7935 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_DACL);
7936 }
7937
nfs4_xattr_get_nfs4_dacl(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * key,void * buf,size_t buflen)7938 static int nfs4_xattr_get_nfs4_dacl(const struct xattr_handler *handler,
7939 struct dentry *unused, struct inode *inode,
7940 const char *key, void *buf, size_t buflen)
7941 {
7942 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_DACL);
7943 }
7944
nfs4_xattr_list_nfs4_dacl(struct dentry * dentry)7945 static bool nfs4_xattr_list_nfs4_dacl(struct dentry *dentry)
7946 {
7947 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_DACL);
7948 }
7949
7950 #define XATTR_NAME_NFSV4_SACL "system.nfs4_sacl"
7951
nfs4_xattr_set_nfs4_sacl(const struct xattr_handler * handler,struct mnt_idmap * idmap,struct dentry * unused,struct inode * inode,const char * key,const void * buf,size_t buflen,int flags)7952 static int nfs4_xattr_set_nfs4_sacl(const struct xattr_handler *handler,
7953 struct mnt_idmap *idmap,
7954 struct dentry *unused, struct inode *inode,
7955 const char *key, const void *buf,
7956 size_t buflen, int flags)
7957 {
7958 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_SACL);
7959 }
7960
nfs4_xattr_get_nfs4_sacl(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * key,void * buf,size_t buflen)7961 static int nfs4_xattr_get_nfs4_sacl(const struct xattr_handler *handler,
7962 struct dentry *unused, struct inode *inode,
7963 const char *key, void *buf, size_t buflen)
7964 {
7965 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_SACL);
7966 }
7967
nfs4_xattr_list_nfs4_sacl(struct dentry * dentry)7968 static bool nfs4_xattr_list_nfs4_sacl(struct dentry *dentry)
7969 {
7970 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_SACL);
7971 }
7972
7973 #endif
7974
7975 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
7976
nfs4_xattr_set_nfs4_label(const struct xattr_handler * handler,struct mnt_idmap * idmap,struct dentry * unused,struct inode * inode,const char * key,const void * buf,size_t buflen,int flags)7977 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
7978 struct mnt_idmap *idmap,
7979 struct dentry *unused, struct inode *inode,
7980 const char *key, const void *buf,
7981 size_t buflen, int flags)
7982 {
7983 if (security_ismaclabel(key))
7984 return nfs4_set_security_label(inode, buf, buflen);
7985
7986 return -EOPNOTSUPP;
7987 }
7988
nfs4_xattr_get_nfs4_label(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * key,void * buf,size_t buflen)7989 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler,
7990 struct dentry *unused, struct inode *inode,
7991 const char *key, void *buf, size_t buflen)
7992 {
7993 if (security_ismaclabel(key))
7994 return nfs4_get_security_label(inode, buf, buflen);
7995 return -EOPNOTSUPP;
7996 }
7997
7998 static ssize_t
nfs4_listxattr_nfs4_label(struct inode * inode,char * list,size_t list_len)7999 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
8000 {
8001 int len = 0;
8002
8003 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) {
8004 len = security_inode_listsecurity(inode, list, list_len);
8005 if (len >= 0 && list_len && len > list_len)
8006 return -ERANGE;
8007 }
8008 return len;
8009 }
8010
8011 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
8012 .prefix = XATTR_SECURITY_PREFIX,
8013 .get = nfs4_xattr_get_nfs4_label,
8014 .set = nfs4_xattr_set_nfs4_label,
8015 };
8016
8017 #else
8018
8019 static ssize_t
nfs4_listxattr_nfs4_label(struct inode * inode,char * list,size_t list_len)8020 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
8021 {
8022 return 0;
8023 }
8024
8025 #endif
8026
8027 #ifdef CONFIG_NFS_V4_2
nfs4_xattr_set_nfs4_user(const struct xattr_handler * handler,struct mnt_idmap * idmap,struct dentry * unused,struct inode * inode,const char * key,const void * buf,size_t buflen,int flags)8028 static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler,
8029 struct mnt_idmap *idmap,
8030 struct dentry *unused, struct inode *inode,
8031 const char *key, const void *buf,
8032 size_t buflen, int flags)
8033 {
8034 u32 mask;
8035 int ret;
8036
8037 if (!nfs_server_capable(inode, NFS_CAP_XATTR))
8038 return -EOPNOTSUPP;
8039
8040 /*
8041 * There is no mapping from the MAY_* flags to the NFS_ACCESS_XA*
8042 * flags right now. Handling of xattr operations use the normal
8043 * file read/write permissions.
8044 *
8045 * Just in case the server has other ideas (which RFC 8276 allows),
8046 * do a cached access check for the XA* flags to possibly avoid
8047 * doing an RPC and getting EACCES back.
8048 */
8049 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
8050 if (!(mask & NFS_ACCESS_XAWRITE))
8051 return -EACCES;
8052 }
8053
8054 if (buf == NULL) {
8055 ret = nfs42_proc_removexattr(inode, key);
8056 if (!ret)
8057 nfs4_xattr_cache_remove(inode, key);
8058 } else {
8059 ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags);
8060 if (!ret)
8061 nfs4_xattr_cache_add(inode, key, buf, NULL, buflen);
8062 }
8063
8064 return ret;
8065 }
8066
nfs4_xattr_get_nfs4_user(const struct xattr_handler * handler,struct dentry * unused,struct inode * inode,const char * key,void * buf,size_t buflen)8067 static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler,
8068 struct dentry *unused, struct inode *inode,
8069 const char *key, void *buf, size_t buflen)
8070 {
8071 u32 mask;
8072 ssize_t ret;
8073
8074 if (!nfs_server_capable(inode, NFS_CAP_XATTR))
8075 return -EOPNOTSUPP;
8076
8077 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
8078 if (!(mask & NFS_ACCESS_XAREAD))
8079 return -EACCES;
8080 }
8081
8082 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
8083 if (ret)
8084 return ret;
8085
8086 ret = nfs4_xattr_cache_get(inode, key, buf, buflen);
8087 if (ret >= 0 || (ret < 0 && ret != -ENOENT))
8088 return ret;
8089
8090 ret = nfs42_proc_getxattr(inode, key, buf, buflen);
8091
8092 return ret;
8093 }
8094
8095 static ssize_t
nfs4_listxattr_nfs4_user(struct inode * inode,char * list,size_t list_len)8096 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
8097 {
8098 u64 cookie;
8099 bool eof;
8100 ssize_t ret, size;
8101 char *buf;
8102 size_t buflen;
8103 u32 mask;
8104
8105 if (!nfs_server_capable(inode, NFS_CAP_XATTR))
8106 return 0;
8107
8108 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
8109 if (!(mask & NFS_ACCESS_XALIST))
8110 return 0;
8111 }
8112
8113 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
8114 if (ret)
8115 return ret;
8116
8117 ret = nfs4_xattr_cache_list(inode, list, list_len);
8118 if (ret >= 0 || (ret < 0 && ret != -ENOENT))
8119 return ret;
8120
8121 cookie = 0;
8122 eof = false;
8123 buflen = list_len ? list_len : XATTR_LIST_MAX;
8124 buf = list_len ? list : NULL;
8125 size = 0;
8126
8127 while (!eof) {
8128 ret = nfs42_proc_listxattrs(inode, buf, buflen,
8129 &cookie, &eof);
8130 if (ret < 0)
8131 return ret;
8132
8133 if (list_len) {
8134 buf += ret;
8135 buflen -= ret;
8136 }
8137 size += ret;
8138 }
8139
8140 if (list_len)
8141 nfs4_xattr_cache_set_list(inode, list, size);
8142
8143 return size;
8144 }
8145
8146 #else
8147
8148 static ssize_t
nfs4_listxattr_nfs4_user(struct inode * inode,char * list,size_t list_len)8149 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
8150 {
8151 return 0;
8152 }
8153 #endif /* CONFIG_NFS_V4_2 */
8154
8155 /*
8156 * nfs_fhget will use either the mounted_on_fileid or the fileid
8157 */
nfs_fixup_referral_attributes(struct nfs_fattr * fattr)8158 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
8159 {
8160 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
8161 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
8162 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
8163 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
8164 return;
8165
8166 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
8167 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
8168 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
8169 fattr->nlink = 2;
8170 }
8171
_nfs4_proc_fs_locations(struct rpc_clnt * client,struct inode * dir,const struct qstr * name,struct nfs4_fs_locations * fs_locations,struct page * page)8172 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
8173 const struct qstr *name,
8174 struct nfs4_fs_locations *fs_locations,
8175 struct page *page)
8176 {
8177 struct nfs_server *server = NFS_SERVER(dir);
8178 u32 bitmask[3];
8179 struct nfs4_fs_locations_arg args = {
8180 .dir_fh = NFS_FH(dir),
8181 .name = name,
8182 .page = page,
8183 .bitmask = bitmask,
8184 };
8185 struct nfs4_fs_locations_res res = {
8186 .fs_locations = fs_locations,
8187 };
8188 struct rpc_message msg = {
8189 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
8190 .rpc_argp = &args,
8191 .rpc_resp = &res,
8192 };
8193 int status;
8194
8195 dprintk("%s: start\n", __func__);
8196
8197 bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS;
8198 bitmask[1] = nfs4_fattr_bitmap[1];
8199
8200 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
8201 * is not supported */
8202 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
8203 bitmask[0] &= ~FATTR4_WORD0_FILEID;
8204 else
8205 bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
8206
8207 nfs_fattr_init(fs_locations->fattr);
8208 fs_locations->server = server;
8209 fs_locations->nlocations = 0;
8210 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
8211 dprintk("%s: returned status = %d\n", __func__, status);
8212 return status;
8213 }
8214
nfs4_proc_fs_locations(struct rpc_clnt * client,struct inode * dir,const struct qstr * name,struct nfs4_fs_locations * fs_locations,struct page * page)8215 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
8216 const struct qstr *name,
8217 struct nfs4_fs_locations *fs_locations,
8218 struct page *page)
8219 {
8220 struct nfs4_exception exception = {
8221 .interruptible = true,
8222 };
8223 int err;
8224 do {
8225 err = _nfs4_proc_fs_locations(client, dir, name,
8226 fs_locations, page);
8227 trace_nfs4_get_fs_locations(dir, name, err);
8228 err = nfs4_handle_exception(NFS_SERVER(dir), err,
8229 &exception);
8230 } while (exception.retry);
8231 return err;
8232 }
8233
8234 /*
8235 * This operation also signals the server that this client is
8236 * performing migration recovery. The server can stop returning
8237 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
8238 * appended to this compound to identify the client ID which is
8239 * performing recovery.
8240 */
_nfs40_proc_get_locations(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs4_fs_locations * locations,struct page * page,const struct cred * cred)8241 static int _nfs40_proc_get_locations(struct nfs_server *server,
8242 struct nfs_fh *fhandle,
8243 struct nfs4_fs_locations *locations,
8244 struct page *page, const struct cred *cred)
8245 {
8246 struct rpc_clnt *clnt = server->client;
8247 u32 bitmask[2] = {
8248 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
8249 };
8250 struct nfs4_fs_locations_arg args = {
8251 .clientid = server->nfs_client->cl_clientid,
8252 .fh = fhandle,
8253 .page = page,
8254 .bitmask = bitmask,
8255 .migration = 1, /* skip LOOKUP */
8256 .renew = 1, /* append RENEW */
8257 };
8258 struct nfs4_fs_locations_res res = {
8259 .fs_locations = locations,
8260 .migration = 1,
8261 .renew = 1,
8262 };
8263 struct rpc_message msg = {
8264 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
8265 .rpc_argp = &args,
8266 .rpc_resp = &res,
8267 .rpc_cred = cred,
8268 };
8269 unsigned long now = jiffies;
8270 int status;
8271
8272 nfs_fattr_init(locations->fattr);
8273 locations->server = server;
8274 locations->nlocations = 0;
8275
8276 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8277 status = nfs4_call_sync_sequence(clnt, server, &msg,
8278 &args.seq_args, &res.seq_res);
8279 if (status)
8280 return status;
8281
8282 renew_lease(server, now);
8283 return 0;
8284 }
8285
8286 #ifdef CONFIG_NFS_V4_1
8287
8288 /*
8289 * This operation also signals the server that this client is
8290 * performing migration recovery. The server can stop asserting
8291 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID
8292 * performing this operation is identified in the SEQUENCE
8293 * operation in this compound.
8294 *
8295 * When the client supports GETATTR(fs_locations_info), it can
8296 * be plumbed in here.
8297 */
_nfs41_proc_get_locations(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs4_fs_locations * locations,struct page * page,const struct cred * cred)8298 static int _nfs41_proc_get_locations(struct nfs_server *server,
8299 struct nfs_fh *fhandle,
8300 struct nfs4_fs_locations *locations,
8301 struct page *page, const struct cred *cred)
8302 {
8303 struct rpc_clnt *clnt = server->client;
8304 u32 bitmask[2] = {
8305 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
8306 };
8307 struct nfs4_fs_locations_arg args = {
8308 .fh = fhandle,
8309 .page = page,
8310 .bitmask = bitmask,
8311 .migration = 1, /* skip LOOKUP */
8312 };
8313 struct nfs4_fs_locations_res res = {
8314 .fs_locations = locations,
8315 .migration = 1,
8316 };
8317 struct rpc_message msg = {
8318 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
8319 .rpc_argp = &args,
8320 .rpc_resp = &res,
8321 .rpc_cred = cred,
8322 };
8323 struct nfs4_call_sync_data data = {
8324 .seq_server = server,
8325 .seq_args = &args.seq_args,
8326 .seq_res = &res.seq_res,
8327 };
8328 struct rpc_task_setup task_setup_data = {
8329 .rpc_client = clnt,
8330 .rpc_message = &msg,
8331 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops,
8332 .callback_data = &data,
8333 .flags = RPC_TASK_NO_ROUND_ROBIN,
8334 };
8335 int status;
8336
8337 nfs_fattr_init(locations->fattr);
8338 locations->server = server;
8339 locations->nlocations = 0;
8340
8341 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8342 status = nfs4_call_sync_custom(&task_setup_data);
8343 if (status == NFS4_OK &&
8344 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
8345 status = -NFS4ERR_LEASE_MOVED;
8346 return status;
8347 }
8348
8349 #endif /* CONFIG_NFS_V4_1 */
8350
8351 /**
8352 * nfs4_proc_get_locations - discover locations for a migrated FSID
8353 * @server: pointer to nfs_server to process
8354 * @fhandle: pointer to the kernel NFS client file handle
8355 * @locations: result of query
8356 * @page: buffer
8357 * @cred: credential to use for this operation
8358 *
8359 * Returns NFS4_OK on success, a negative NFS4ERR status code if the
8360 * operation failed, or a negative errno if a local error occurred.
8361 *
8362 * On success, "locations" is filled in, but if the server has
8363 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
8364 * asserted.
8365 *
8366 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
8367 * from this client that require migration recovery.
8368 */
nfs4_proc_get_locations(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs4_fs_locations * locations,struct page * page,const struct cred * cred)8369 int nfs4_proc_get_locations(struct nfs_server *server,
8370 struct nfs_fh *fhandle,
8371 struct nfs4_fs_locations *locations,
8372 struct page *page, const struct cred *cred)
8373 {
8374 struct nfs_client *clp = server->nfs_client;
8375 const struct nfs4_mig_recovery_ops *ops =
8376 clp->cl_mvops->mig_recovery_ops;
8377 struct nfs4_exception exception = {
8378 .interruptible = true,
8379 };
8380 int status;
8381
8382 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
8383 (unsigned long long)server->fsid.major,
8384 (unsigned long long)server->fsid.minor,
8385 clp->cl_hostname);
8386 nfs_display_fhandle(fhandle, __func__);
8387
8388 do {
8389 status = ops->get_locations(server, fhandle, locations, page,
8390 cred);
8391 if (status != -NFS4ERR_DELAY)
8392 break;
8393 nfs4_handle_exception(server, status, &exception);
8394 } while (exception.retry);
8395 return status;
8396 }
8397
8398 /*
8399 * This operation also signals the server that this client is
8400 * performing "lease moved" recovery. The server can stop
8401 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
8402 * is appended to this compound to identify the client ID which is
8403 * performing recovery.
8404 */
_nfs40_proc_fsid_present(struct inode * inode,const struct cred * cred)8405 static int _nfs40_proc_fsid_present(struct inode *inode, const struct cred *cred)
8406 {
8407 struct nfs_server *server = NFS_SERVER(inode);
8408 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
8409 struct rpc_clnt *clnt = server->client;
8410 struct nfs4_fsid_present_arg args = {
8411 .fh = NFS_FH(inode),
8412 .clientid = clp->cl_clientid,
8413 .renew = 1, /* append RENEW */
8414 };
8415 struct nfs4_fsid_present_res res = {
8416 .renew = 1,
8417 };
8418 struct rpc_message msg = {
8419 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
8420 .rpc_argp = &args,
8421 .rpc_resp = &res,
8422 .rpc_cred = cred,
8423 };
8424 unsigned long now = jiffies;
8425 int status;
8426
8427 res.fh = nfs_alloc_fhandle();
8428 if (res.fh == NULL)
8429 return -ENOMEM;
8430
8431 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8432 status = nfs4_call_sync_sequence(clnt, server, &msg,
8433 &args.seq_args, &res.seq_res);
8434 nfs_free_fhandle(res.fh);
8435 if (status)
8436 return status;
8437
8438 do_renew_lease(clp, now);
8439 return 0;
8440 }
8441
8442 #ifdef CONFIG_NFS_V4_1
8443
8444 /*
8445 * This operation also signals the server that this client is
8446 * performing "lease moved" recovery. The server can stop asserting
8447 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
8448 * this operation is identified in the SEQUENCE operation in this
8449 * compound.
8450 */
_nfs41_proc_fsid_present(struct inode * inode,const struct cred * cred)8451 static int _nfs41_proc_fsid_present(struct inode *inode, const struct cred *cred)
8452 {
8453 struct nfs_server *server = NFS_SERVER(inode);
8454 struct rpc_clnt *clnt = server->client;
8455 struct nfs4_fsid_present_arg args = {
8456 .fh = NFS_FH(inode),
8457 };
8458 struct nfs4_fsid_present_res res = {
8459 };
8460 struct rpc_message msg = {
8461 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
8462 .rpc_argp = &args,
8463 .rpc_resp = &res,
8464 .rpc_cred = cred,
8465 };
8466 int status;
8467
8468 res.fh = nfs_alloc_fhandle();
8469 if (res.fh == NULL)
8470 return -ENOMEM;
8471
8472 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
8473 status = nfs4_call_sync_sequence(clnt, server, &msg,
8474 &args.seq_args, &res.seq_res);
8475 nfs_free_fhandle(res.fh);
8476 if (status == NFS4_OK &&
8477 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
8478 status = -NFS4ERR_LEASE_MOVED;
8479 return status;
8480 }
8481
8482 #endif /* CONFIG_NFS_V4_1 */
8483
8484 /**
8485 * nfs4_proc_fsid_present - Is this FSID present or absent on server?
8486 * @inode: inode on FSID to check
8487 * @cred: credential to use for this operation
8488 *
8489 * Server indicates whether the FSID is present, moved, or not
8490 * recognized. This operation is necessary to clear a LEASE_MOVED
8491 * condition for this client ID.
8492 *
8493 * Returns NFS4_OK if the FSID is present on this server,
8494 * -NFS4ERR_MOVED if the FSID is no longer present, a negative
8495 * NFS4ERR code if some error occurred on the server, or a
8496 * negative errno if a local failure occurred.
8497 */
nfs4_proc_fsid_present(struct inode * inode,const struct cred * cred)8498 int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred)
8499 {
8500 struct nfs_server *server = NFS_SERVER(inode);
8501 struct nfs_client *clp = server->nfs_client;
8502 const struct nfs4_mig_recovery_ops *ops =
8503 clp->cl_mvops->mig_recovery_ops;
8504 struct nfs4_exception exception = {
8505 .interruptible = true,
8506 };
8507 int status;
8508
8509 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
8510 (unsigned long long)server->fsid.major,
8511 (unsigned long long)server->fsid.minor,
8512 clp->cl_hostname);
8513 nfs_display_fhandle(NFS_FH(inode), __func__);
8514
8515 do {
8516 status = ops->fsid_present(inode, cred);
8517 if (status != -NFS4ERR_DELAY)
8518 break;
8519 nfs4_handle_exception(server, status, &exception);
8520 } while (exception.retry);
8521 return status;
8522 }
8523
8524 /*
8525 * If 'use_integrity' is true and the state managment nfs_client
8526 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
8527 * and the machine credential as per RFC3530bis and RFC5661 Security
8528 * Considerations sections. Otherwise, just use the user cred with the
8529 * filesystem's rpc_client.
8530 */
_nfs4_proc_secinfo(struct inode * dir,const struct qstr * name,struct nfs4_secinfo_flavors * flavors,bool use_integrity)8531 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
8532 {
8533 int status;
8534 struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
8535 struct nfs_client *clp = NFS_SERVER(dir)->nfs_client;
8536 struct nfs4_secinfo_arg args = {
8537 .dir_fh = NFS_FH(dir),
8538 .name = name,
8539 };
8540 struct nfs4_secinfo_res res = {
8541 .flavors = flavors,
8542 };
8543 struct rpc_message msg = {
8544 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
8545 .rpc_argp = &args,
8546 .rpc_resp = &res,
8547 };
8548 struct nfs4_call_sync_data data = {
8549 .seq_server = NFS_SERVER(dir),
8550 .seq_args = &args.seq_args,
8551 .seq_res = &res.seq_res,
8552 };
8553 struct rpc_task_setup task_setup = {
8554 .rpc_client = clnt,
8555 .rpc_message = &msg,
8556 .callback_ops = clp->cl_mvops->call_sync_ops,
8557 .callback_data = &data,
8558 .flags = RPC_TASK_NO_ROUND_ROBIN,
8559 };
8560 const struct cred *cred = NULL;
8561
8562 if (use_integrity) {
8563 clnt = clp->cl_rpcclient;
8564 task_setup.rpc_client = clnt;
8565
8566 cred = nfs4_get_clid_cred(clp);
8567 msg.rpc_cred = cred;
8568 }
8569
8570 dprintk("NFS call secinfo %s\n", name->name);
8571
8572 nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
8573 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
8574 status = nfs4_call_sync_custom(&task_setup);
8575
8576 dprintk("NFS reply secinfo: %d\n", status);
8577
8578 put_cred(cred);
8579 return status;
8580 }
8581
nfs4_proc_secinfo(struct inode * dir,const struct qstr * name,struct nfs4_secinfo_flavors * flavors)8582 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
8583 struct nfs4_secinfo_flavors *flavors)
8584 {
8585 struct nfs4_exception exception = {
8586 .interruptible = true,
8587 };
8588 int err;
8589 do {
8590 err = -NFS4ERR_WRONGSEC;
8591
8592 /* try to use integrity protection with machine cred */
8593 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
8594 err = _nfs4_proc_secinfo(dir, name, flavors, true);
8595
8596 /*
8597 * if unable to use integrity protection, or SECINFO with
8598 * integrity protection returns NFS4ERR_WRONGSEC (which is
8599 * disallowed by spec, but exists in deployed servers) use
8600 * the current filesystem's rpc_client and the user cred.
8601 */
8602 if (err == -NFS4ERR_WRONGSEC)
8603 err = _nfs4_proc_secinfo(dir, name, flavors, false);
8604
8605 trace_nfs4_secinfo(dir, name, err);
8606 err = nfs4_handle_exception(NFS_SERVER(dir), err,
8607 &exception);
8608 } while (exception.retry);
8609 return err;
8610 }
8611
8612 #ifdef CONFIG_NFS_V4_1
8613 /*
8614 * Check the exchange flags returned by the server for invalid flags, having
8615 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
8616 * DS flags set.
8617 */
nfs4_check_cl_exchange_flags(u32 flags,u32 version)8618 static int nfs4_check_cl_exchange_flags(u32 flags, u32 version)
8619 {
8620 if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R))
8621 goto out_inval;
8622 else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R))
8623 goto out_inval;
8624 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
8625 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
8626 goto out_inval;
8627 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
8628 goto out_inval;
8629 return NFS_OK;
8630 out_inval:
8631 return -NFS4ERR_INVAL;
8632 }
8633
8634 static bool
nfs41_same_server_scope(struct nfs41_server_scope * a,struct nfs41_server_scope * b)8635 nfs41_same_server_scope(struct nfs41_server_scope *a,
8636 struct nfs41_server_scope *b)
8637 {
8638 if (a->server_scope_sz != b->server_scope_sz)
8639 return false;
8640 return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0;
8641 }
8642
8643 static void
nfs4_bind_one_conn_to_session_done(struct rpc_task * task,void * calldata)8644 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
8645 {
8646 struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp;
8647 struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp;
8648 struct nfs_client *clp = args->client;
8649
8650 switch (task->tk_status) {
8651 case -NFS4ERR_BADSESSION:
8652 case -NFS4ERR_DEADSESSION:
8653 nfs4_schedule_session_recovery(clp->cl_session,
8654 task->tk_status);
8655 return;
8656 }
8657 if (args->dir == NFS4_CDFC4_FORE_OR_BOTH &&
8658 res->dir != NFS4_CDFS4_BOTH) {
8659 rpc_task_close_connection(task);
8660 if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES)
8661 rpc_restart_call(task);
8662 }
8663 }
8664
8665 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
8666 .rpc_call_done = nfs4_bind_one_conn_to_session_done,
8667 };
8668
8669 /*
8670 * nfs4_proc_bind_one_conn_to_session()
8671 *
8672 * The 4.1 client currently uses the same TCP connection for the
8673 * fore and backchannel.
8674 */
8675 static
nfs4_proc_bind_one_conn_to_session(struct rpc_clnt * clnt,struct rpc_xprt * xprt,struct nfs_client * clp,const struct cred * cred)8676 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt,
8677 struct rpc_xprt *xprt,
8678 struct nfs_client *clp,
8679 const struct cred *cred)
8680 {
8681 int status;
8682 struct nfs41_bind_conn_to_session_args args = {
8683 .client = clp,
8684 .dir = NFS4_CDFC4_FORE_OR_BOTH,
8685 .retries = 0,
8686 };
8687 struct nfs41_bind_conn_to_session_res res;
8688 struct rpc_message msg = {
8689 .rpc_proc =
8690 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
8691 .rpc_argp = &args,
8692 .rpc_resp = &res,
8693 .rpc_cred = cred,
8694 };
8695 struct rpc_task_setup task_setup_data = {
8696 .rpc_client = clnt,
8697 .rpc_xprt = xprt,
8698 .callback_ops = &nfs4_bind_one_conn_to_session_ops,
8699 .rpc_message = &msg,
8700 .flags = RPC_TASK_TIMEOUT,
8701 };
8702 struct rpc_task *task;
8703
8704 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
8705 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
8706 args.dir = NFS4_CDFC4_FORE;
8707
8708 /* Do not set the backchannel flag unless this is clnt->cl_xprt */
8709 if (xprt != rcu_access_pointer(clnt->cl_xprt))
8710 args.dir = NFS4_CDFC4_FORE;
8711
8712 task = rpc_run_task(&task_setup_data);
8713 if (!IS_ERR(task)) {
8714 status = task->tk_status;
8715 rpc_put_task(task);
8716 } else
8717 status = PTR_ERR(task);
8718 trace_nfs4_bind_conn_to_session(clp, status);
8719 if (status == 0) {
8720 if (memcmp(res.sessionid.data,
8721 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
8722 dprintk("NFS: %s: Session ID mismatch\n", __func__);
8723 return -EIO;
8724 }
8725 if ((res.dir & args.dir) != res.dir || res.dir == 0) {
8726 dprintk("NFS: %s: Unexpected direction from server\n",
8727 __func__);
8728 return -EIO;
8729 }
8730 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
8731 dprintk("NFS: %s: Server returned RDMA mode = true\n",
8732 __func__);
8733 return -EIO;
8734 }
8735 }
8736
8737 return status;
8738 }
8739
8740 struct rpc_bind_conn_calldata {
8741 struct nfs_client *clp;
8742 const struct cred *cred;
8743 };
8744
8745 static int
nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt * clnt,struct rpc_xprt * xprt,void * calldata)8746 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt,
8747 struct rpc_xprt *xprt,
8748 void *calldata)
8749 {
8750 struct rpc_bind_conn_calldata *p = calldata;
8751
8752 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred);
8753 }
8754
nfs4_proc_bind_conn_to_session(struct nfs_client * clp,const struct cred * cred)8755 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, const struct cred *cred)
8756 {
8757 struct rpc_bind_conn_calldata data = {
8758 .clp = clp,
8759 .cred = cred,
8760 };
8761 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient,
8762 nfs4_proc_bind_conn_to_session_callback, &data);
8763 }
8764
8765 /*
8766 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
8767 * and operations we'd like to see to enable certain features in the allow map
8768 */
8769 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
8770 .how = SP4_MACH_CRED,
8771 .enforce.u.words = {
8772 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
8773 1 << (OP_EXCHANGE_ID - 32) |
8774 1 << (OP_CREATE_SESSION - 32) |
8775 1 << (OP_DESTROY_SESSION - 32) |
8776 1 << (OP_DESTROY_CLIENTID - 32)
8777 },
8778 .allow.u.words = {
8779 [0] = 1 << (OP_CLOSE) |
8780 1 << (OP_OPEN_DOWNGRADE) |
8781 1 << (OP_LOCKU) |
8782 1 << (OP_DELEGRETURN) |
8783 1 << (OP_COMMIT),
8784 [1] = 1 << (OP_SECINFO - 32) |
8785 1 << (OP_SECINFO_NO_NAME - 32) |
8786 1 << (OP_LAYOUTRETURN - 32) |
8787 1 << (OP_TEST_STATEID - 32) |
8788 1 << (OP_FREE_STATEID - 32) |
8789 1 << (OP_WRITE - 32)
8790 }
8791 };
8792
8793 /*
8794 * Select the state protection mode for client `clp' given the server results
8795 * from exchange_id in `sp'.
8796 *
8797 * Returns 0 on success, negative errno otherwise.
8798 */
nfs4_sp4_select_mode(struct nfs_client * clp,struct nfs41_state_protection * sp)8799 static int nfs4_sp4_select_mode(struct nfs_client *clp,
8800 struct nfs41_state_protection *sp)
8801 {
8802 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
8803 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
8804 1 << (OP_EXCHANGE_ID - 32) |
8805 1 << (OP_CREATE_SESSION - 32) |
8806 1 << (OP_DESTROY_SESSION - 32) |
8807 1 << (OP_DESTROY_CLIENTID - 32)
8808 };
8809 unsigned long flags = 0;
8810 unsigned int i;
8811 int ret = 0;
8812
8813 if (sp->how == SP4_MACH_CRED) {
8814 /* Print state protect result */
8815 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
8816 for (i = 0; i <= LAST_NFS4_OP; i++) {
8817 if (test_bit(i, sp->enforce.u.longs))
8818 dfprintk(MOUNT, " enforce op %d\n", i);
8819 if (test_bit(i, sp->allow.u.longs))
8820 dfprintk(MOUNT, " allow op %d\n", i);
8821 }
8822
8823 /* make sure nothing is on enforce list that isn't supported */
8824 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
8825 if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
8826 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
8827 ret = -EINVAL;
8828 goto out;
8829 }
8830 }
8831
8832 /*
8833 * Minimal mode - state operations are allowed to use machine
8834 * credential. Note this already happens by default, so the
8835 * client doesn't have to do anything more than the negotiation.
8836 *
8837 * NOTE: we don't care if EXCHANGE_ID is in the list -
8838 * we're already using the machine cred for exchange_id
8839 * and will never use a different cred.
8840 */
8841 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
8842 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
8843 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
8844 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
8845 dfprintk(MOUNT, "sp4_mach_cred:\n");
8846 dfprintk(MOUNT, " minimal mode enabled\n");
8847 __set_bit(NFS_SP4_MACH_CRED_MINIMAL, &flags);
8848 } else {
8849 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
8850 ret = -EINVAL;
8851 goto out;
8852 }
8853
8854 if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
8855 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) &&
8856 test_bit(OP_DELEGRETURN, sp->allow.u.longs) &&
8857 test_bit(OP_LOCKU, sp->allow.u.longs)) {
8858 dfprintk(MOUNT, " cleanup mode enabled\n");
8859 __set_bit(NFS_SP4_MACH_CRED_CLEANUP, &flags);
8860 }
8861
8862 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) {
8863 dfprintk(MOUNT, " pnfs cleanup mode enabled\n");
8864 __set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, &flags);
8865 }
8866
8867 if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
8868 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
8869 dfprintk(MOUNT, " secinfo mode enabled\n");
8870 __set_bit(NFS_SP4_MACH_CRED_SECINFO, &flags);
8871 }
8872
8873 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
8874 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
8875 dfprintk(MOUNT, " stateid mode enabled\n");
8876 __set_bit(NFS_SP4_MACH_CRED_STATEID, &flags);
8877 }
8878
8879 if (test_bit(OP_WRITE, sp->allow.u.longs)) {
8880 dfprintk(MOUNT, " write mode enabled\n");
8881 __set_bit(NFS_SP4_MACH_CRED_WRITE, &flags);
8882 }
8883
8884 if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
8885 dfprintk(MOUNT, " commit mode enabled\n");
8886 __set_bit(NFS_SP4_MACH_CRED_COMMIT, &flags);
8887 }
8888 }
8889 out:
8890 clp->cl_sp4_flags = flags;
8891 return ret;
8892 }
8893
8894 struct nfs41_exchange_id_data {
8895 struct nfs41_exchange_id_res res;
8896 struct nfs41_exchange_id_args args;
8897 };
8898
nfs4_exchange_id_release(void * data)8899 static void nfs4_exchange_id_release(void *data)
8900 {
8901 struct nfs41_exchange_id_data *cdata =
8902 (struct nfs41_exchange_id_data *)data;
8903
8904 nfs_put_client(cdata->args.client);
8905 kfree(cdata->res.impl_id);
8906 kfree(cdata->res.server_scope);
8907 kfree(cdata->res.server_owner);
8908 kfree(cdata);
8909 }
8910
8911 static const struct rpc_call_ops nfs4_exchange_id_call_ops = {
8912 .rpc_release = nfs4_exchange_id_release,
8913 };
8914
8915 /*
8916 * _nfs4_proc_exchange_id()
8917 *
8918 * Wrapper for EXCHANGE_ID operation.
8919 */
8920 static struct rpc_task *
nfs4_run_exchange_id(struct nfs_client * clp,const struct cred * cred,u32 sp4_how,struct rpc_xprt * xprt)8921 nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred,
8922 u32 sp4_how, struct rpc_xprt *xprt)
8923 {
8924 struct rpc_message msg = {
8925 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
8926 .rpc_cred = cred,
8927 };
8928 struct rpc_task_setup task_setup_data = {
8929 .rpc_client = clp->cl_rpcclient,
8930 .callback_ops = &nfs4_exchange_id_call_ops,
8931 .rpc_message = &msg,
8932 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
8933 };
8934 struct nfs41_exchange_id_data *calldata;
8935 int status;
8936
8937 if (!refcount_inc_not_zero(&clp->cl_count))
8938 return ERR_PTR(-EIO);
8939
8940 status = -ENOMEM;
8941 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
8942 if (!calldata)
8943 goto out;
8944
8945 nfs4_init_boot_verifier(clp, &calldata->args.verifier);
8946
8947 status = nfs4_init_uniform_client_string(clp);
8948 if (status)
8949 goto out_calldata;
8950
8951 calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
8952 GFP_NOFS);
8953 status = -ENOMEM;
8954 if (unlikely(calldata->res.server_owner == NULL))
8955 goto out_calldata;
8956
8957 calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
8958 GFP_NOFS);
8959 if (unlikely(calldata->res.server_scope == NULL))
8960 goto out_server_owner;
8961
8962 calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
8963 if (unlikely(calldata->res.impl_id == NULL))
8964 goto out_server_scope;
8965
8966 switch (sp4_how) {
8967 case SP4_NONE:
8968 calldata->args.state_protect.how = SP4_NONE;
8969 break;
8970
8971 case SP4_MACH_CRED:
8972 calldata->args.state_protect = nfs4_sp4_mach_cred_request;
8973 break;
8974
8975 default:
8976 /* unsupported! */
8977 WARN_ON_ONCE(1);
8978 status = -EINVAL;
8979 goto out_impl_id;
8980 }
8981 if (xprt) {
8982 task_setup_data.rpc_xprt = xprt;
8983 task_setup_data.flags |= RPC_TASK_SOFTCONN;
8984 memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
8985 sizeof(calldata->args.verifier.data));
8986 }
8987 calldata->args.client = clp;
8988 calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
8989 EXCHGID4_FLAG_BIND_PRINC_STATEID;
8990 #ifdef CONFIG_NFS_V4_1_MIGRATION
8991 calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR;
8992 #endif
8993 if (test_bit(NFS_CS_PNFS, &clp->cl_flags))
8994 calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS;
8995 msg.rpc_argp = &calldata->args;
8996 msg.rpc_resp = &calldata->res;
8997 task_setup_data.callback_data = calldata;
8998
8999 return rpc_run_task(&task_setup_data);
9000
9001 out_impl_id:
9002 kfree(calldata->res.impl_id);
9003 out_server_scope:
9004 kfree(calldata->res.server_scope);
9005 out_server_owner:
9006 kfree(calldata->res.server_owner);
9007 out_calldata:
9008 kfree(calldata);
9009 out:
9010 nfs_put_client(clp);
9011 return ERR_PTR(status);
9012 }
9013
9014 /*
9015 * _nfs4_proc_exchange_id()
9016 *
9017 * Wrapper for EXCHANGE_ID operation.
9018 */
_nfs4_proc_exchange_id(struct nfs_client * clp,const struct cred * cred,u32 sp4_how)9019 static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred,
9020 u32 sp4_how)
9021 {
9022 struct rpc_task *task;
9023 struct nfs41_exchange_id_args *argp;
9024 struct nfs41_exchange_id_res *resp;
9025 unsigned long now = jiffies;
9026 int status;
9027
9028 task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL);
9029 if (IS_ERR(task))
9030 return PTR_ERR(task);
9031
9032 argp = task->tk_msg.rpc_argp;
9033 resp = task->tk_msg.rpc_resp;
9034 status = task->tk_status;
9035 if (status != 0)
9036 goto out;
9037
9038 status = nfs4_check_cl_exchange_flags(resp->flags,
9039 clp->cl_mvops->minor_version);
9040 if (status != 0)
9041 goto out;
9042
9043 status = nfs4_sp4_select_mode(clp, &resp->state_protect);
9044 if (status != 0)
9045 goto out;
9046
9047 do_renew_lease(clp, now);
9048
9049 clp->cl_clientid = resp->clientid;
9050 clp->cl_exchange_flags = resp->flags;
9051 clp->cl_seqid = resp->seqid;
9052 /* Client ID is not confirmed */
9053 if (!(resp->flags & EXCHGID4_FLAG_CONFIRMED_R))
9054 clear_bit(NFS4_SESSION_ESTABLISHED,
9055 &clp->cl_session->session_state);
9056
9057 if (clp->cl_serverscope != NULL &&
9058 !nfs41_same_server_scope(clp->cl_serverscope,
9059 resp->server_scope)) {
9060 dprintk("%s: server_scope mismatch detected\n",
9061 __func__);
9062 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
9063 }
9064
9065 swap(clp->cl_serverowner, resp->server_owner);
9066 swap(clp->cl_serverscope, resp->server_scope);
9067 swap(clp->cl_implid, resp->impl_id);
9068
9069 /* Save the EXCHANGE_ID verifier session trunk tests */
9070 memcpy(clp->cl_confirm.data, argp->verifier.data,
9071 sizeof(clp->cl_confirm.data));
9072 out:
9073 trace_nfs4_exchange_id(clp, status);
9074 rpc_put_task(task);
9075 return status;
9076 }
9077
9078 /*
9079 * nfs4_proc_exchange_id()
9080 *
9081 * Returns zero, a negative errno, or a negative NFS4ERR status code.
9082 *
9083 * Since the clientid has expired, all compounds using sessions
9084 * associated with the stale clientid will be returning
9085 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
9086 * be in some phase of session reset.
9087 *
9088 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
9089 */
nfs4_proc_exchange_id(struct nfs_client * clp,const struct cred * cred)9090 int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred)
9091 {
9092 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
9093 int status;
9094
9095 /* try SP4_MACH_CRED if krb5i/p */
9096 if (authflavor == RPC_AUTH_GSS_KRB5I ||
9097 authflavor == RPC_AUTH_GSS_KRB5P) {
9098 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
9099 if (!status)
9100 return 0;
9101 }
9102
9103 /* try SP4_NONE */
9104 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
9105 }
9106
9107 /**
9108 * nfs4_test_session_trunk
9109 *
9110 * This is an add_xprt_test() test function called from
9111 * rpc_clnt_setup_test_and_add_xprt.
9112 *
9113 * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt
9114 * and is dereferrenced in nfs4_exchange_id_release
9115 *
9116 * Upon success, add the new transport to the rpc_clnt
9117 *
9118 * @clnt: struct rpc_clnt to get new transport
9119 * @xprt: the rpc_xprt to test
9120 * @data: call data for _nfs4_proc_exchange_id.
9121 */
nfs4_test_session_trunk(struct rpc_clnt * clnt,struct rpc_xprt * xprt,void * data)9122 void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
9123 void *data)
9124 {
9125 struct nfs4_add_xprt_data *adata = data;
9126 struct rpc_task *task;
9127 int status;
9128
9129 u32 sp4_how;
9130
9131 dprintk("--> %s try %s\n", __func__,
9132 xprt->address_strings[RPC_DISPLAY_ADDR]);
9133
9134 sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED);
9135
9136 try_again:
9137 /* Test connection for session trunking. Async exchange_id call */
9138 task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
9139 if (IS_ERR(task))
9140 return;
9141
9142 status = task->tk_status;
9143 if (status == 0) {
9144 status = nfs4_detect_session_trunking(adata->clp,
9145 task->tk_msg.rpc_resp, xprt);
9146 trace_nfs4_trunked_exchange_id(adata->clp,
9147 xprt->address_strings[RPC_DISPLAY_ADDR], status);
9148 }
9149 if (status == 0)
9150 rpc_clnt_xprt_switch_add_xprt(clnt, xprt);
9151 else if (status != -NFS4ERR_DELAY && rpc_clnt_xprt_switch_has_addr(clnt,
9152 (struct sockaddr *)&xprt->addr))
9153 rpc_clnt_xprt_switch_remove_xprt(clnt, xprt);
9154
9155 rpc_put_task(task);
9156 if (status == -NFS4ERR_DELAY) {
9157 ssleep(1);
9158 goto try_again;
9159 }
9160 }
9161 EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
9162
_nfs4_proc_destroy_clientid(struct nfs_client * clp,const struct cred * cred)9163 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
9164 const struct cred *cred)
9165 {
9166 struct rpc_message msg = {
9167 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
9168 .rpc_argp = clp,
9169 .rpc_cred = cred,
9170 };
9171 int status;
9172
9173 status = rpc_call_sync(clp->cl_rpcclient, &msg,
9174 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
9175 trace_nfs4_destroy_clientid(clp, status);
9176 if (status)
9177 dprintk("NFS: Got error %d from the server %s on "
9178 "DESTROY_CLIENTID.", status, clp->cl_hostname);
9179 return status;
9180 }
9181
nfs4_proc_destroy_clientid(struct nfs_client * clp,const struct cred * cred)9182 static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
9183 const struct cred *cred)
9184 {
9185 unsigned int loop;
9186 int ret;
9187
9188 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
9189 ret = _nfs4_proc_destroy_clientid(clp, cred);
9190 switch (ret) {
9191 case -NFS4ERR_DELAY:
9192 case -NFS4ERR_CLIENTID_BUSY:
9193 ssleep(1);
9194 break;
9195 default:
9196 return ret;
9197 }
9198 }
9199 return 0;
9200 }
9201
nfs4_destroy_clientid(struct nfs_client * clp)9202 int nfs4_destroy_clientid(struct nfs_client *clp)
9203 {
9204 const struct cred *cred;
9205 int ret = 0;
9206
9207 if (clp->cl_mvops->minor_version < 1)
9208 goto out;
9209 if (clp->cl_exchange_flags == 0)
9210 goto out;
9211 if (clp->cl_preserve_clid)
9212 goto out;
9213 cred = nfs4_get_clid_cred(clp);
9214 ret = nfs4_proc_destroy_clientid(clp, cred);
9215 put_cred(cred);
9216 switch (ret) {
9217 case 0:
9218 case -NFS4ERR_STALE_CLIENTID:
9219 clp->cl_exchange_flags = 0;
9220 }
9221 out:
9222 return ret;
9223 }
9224
9225 #endif /* CONFIG_NFS_V4_1 */
9226
9227 struct nfs4_get_lease_time_data {
9228 struct nfs4_get_lease_time_args *args;
9229 struct nfs4_get_lease_time_res *res;
9230 struct nfs_client *clp;
9231 };
9232
nfs4_get_lease_time_prepare(struct rpc_task * task,void * calldata)9233 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
9234 void *calldata)
9235 {
9236 struct nfs4_get_lease_time_data *data =
9237 (struct nfs4_get_lease_time_data *)calldata;
9238
9239 /* just setup sequence, do not trigger session recovery
9240 since we're invoked within one */
9241 nfs4_setup_sequence(data->clp,
9242 &data->args->la_seq_args,
9243 &data->res->lr_seq_res,
9244 task);
9245 }
9246
9247 /*
9248 * Called from nfs4_state_manager thread for session setup, so don't recover
9249 * from sequence operation or clientid errors.
9250 */
nfs4_get_lease_time_done(struct rpc_task * task,void * calldata)9251 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
9252 {
9253 struct nfs4_get_lease_time_data *data =
9254 (struct nfs4_get_lease_time_data *)calldata;
9255
9256 if (!nfs4_sequence_done(task, &data->res->lr_seq_res))
9257 return;
9258 switch (task->tk_status) {
9259 case -NFS4ERR_DELAY:
9260 case -NFS4ERR_GRACE:
9261 rpc_delay(task, NFS4_POLL_RETRY_MIN);
9262 task->tk_status = 0;
9263 fallthrough;
9264 case -NFS4ERR_RETRY_UNCACHED_REP:
9265 rpc_restart_call_prepare(task);
9266 return;
9267 }
9268 }
9269
9270 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
9271 .rpc_call_prepare = nfs4_get_lease_time_prepare,
9272 .rpc_call_done = nfs4_get_lease_time_done,
9273 };
9274
nfs4_proc_get_lease_time(struct nfs_client * clp,struct nfs_fsinfo * fsinfo)9275 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
9276 {
9277 struct nfs4_get_lease_time_args args;
9278 struct nfs4_get_lease_time_res res = {
9279 .lr_fsinfo = fsinfo,
9280 };
9281 struct nfs4_get_lease_time_data data = {
9282 .args = &args,
9283 .res = &res,
9284 .clp = clp,
9285 };
9286 struct rpc_message msg = {
9287 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
9288 .rpc_argp = &args,
9289 .rpc_resp = &res,
9290 };
9291 struct rpc_task_setup task_setup = {
9292 .rpc_client = clp->cl_rpcclient,
9293 .rpc_message = &msg,
9294 .callback_ops = &nfs4_get_lease_time_ops,
9295 .callback_data = &data,
9296 .flags = RPC_TASK_TIMEOUT,
9297 };
9298
9299 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0, 1);
9300 return nfs4_call_sync_custom(&task_setup);
9301 }
9302
9303 #ifdef CONFIG_NFS_V4_1
9304
9305 /*
9306 * Initialize the values to be used by the client in CREATE_SESSION
9307 * If nfs4_init_session set the fore channel request and response sizes,
9308 * use them.
9309 *
9310 * Set the back channel max_resp_sz_cached to zero to force the client to
9311 * always set csa_cachethis to FALSE because the current implementation
9312 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
9313 */
nfs4_init_channel_attrs(struct nfs41_create_session_args * args,struct rpc_clnt * clnt)9314 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args,
9315 struct rpc_clnt *clnt)
9316 {
9317 unsigned int max_rqst_sz, max_resp_sz;
9318 unsigned int max_bc_payload = rpc_max_bc_payload(clnt);
9319 unsigned int max_bc_slots = rpc_num_bc_slots(clnt);
9320
9321 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
9322 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
9323
9324 /* Fore channel attributes */
9325 args->fc_attrs.max_rqst_sz = max_rqst_sz;
9326 args->fc_attrs.max_resp_sz = max_resp_sz;
9327 args->fc_attrs.max_ops = NFS4_MAX_OPS;
9328 args->fc_attrs.max_reqs = max_session_slots;
9329
9330 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
9331 "max_ops=%u max_reqs=%u\n",
9332 __func__,
9333 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
9334 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
9335
9336 /* Back channel attributes */
9337 args->bc_attrs.max_rqst_sz = max_bc_payload;
9338 args->bc_attrs.max_resp_sz = max_bc_payload;
9339 args->bc_attrs.max_resp_sz_cached = 0;
9340 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
9341 args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1);
9342 if (args->bc_attrs.max_reqs > max_bc_slots)
9343 args->bc_attrs.max_reqs = max_bc_slots;
9344
9345 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
9346 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
9347 __func__,
9348 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
9349 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
9350 args->bc_attrs.max_reqs);
9351 }
9352
nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args * args,struct nfs41_create_session_res * res)9353 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
9354 struct nfs41_create_session_res *res)
9355 {
9356 struct nfs4_channel_attrs *sent = &args->fc_attrs;
9357 struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
9358
9359 if (rcvd->max_resp_sz > sent->max_resp_sz)
9360 return -EINVAL;
9361 /*
9362 * Our requested max_ops is the minimum we need; we're not
9363 * prepared to break up compounds into smaller pieces than that.
9364 * So, no point even trying to continue if the server won't
9365 * cooperate:
9366 */
9367 if (rcvd->max_ops < sent->max_ops)
9368 return -EINVAL;
9369 if (rcvd->max_reqs == 0)
9370 return -EINVAL;
9371 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
9372 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
9373 return 0;
9374 }
9375
nfs4_verify_back_channel_attrs(struct nfs41_create_session_args * args,struct nfs41_create_session_res * res)9376 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
9377 struct nfs41_create_session_res *res)
9378 {
9379 struct nfs4_channel_attrs *sent = &args->bc_attrs;
9380 struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
9381
9382 if (!(res->flags & SESSION4_BACK_CHAN))
9383 goto out;
9384 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
9385 return -EINVAL;
9386 if (rcvd->max_resp_sz < sent->max_resp_sz)
9387 return -EINVAL;
9388 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
9389 return -EINVAL;
9390 if (rcvd->max_ops > sent->max_ops)
9391 return -EINVAL;
9392 if (rcvd->max_reqs > sent->max_reqs)
9393 return -EINVAL;
9394 out:
9395 return 0;
9396 }
9397
nfs4_verify_channel_attrs(struct nfs41_create_session_args * args,struct nfs41_create_session_res * res)9398 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
9399 struct nfs41_create_session_res *res)
9400 {
9401 int ret;
9402
9403 ret = nfs4_verify_fore_channel_attrs(args, res);
9404 if (ret)
9405 return ret;
9406 return nfs4_verify_back_channel_attrs(args, res);
9407 }
9408
nfs4_update_session(struct nfs4_session * session,struct nfs41_create_session_res * res)9409 static void nfs4_update_session(struct nfs4_session *session,
9410 struct nfs41_create_session_res *res)
9411 {
9412 nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
9413 /* Mark client id and session as being confirmed */
9414 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
9415 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
9416 session->flags = res->flags;
9417 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
9418 if (res->flags & SESSION4_BACK_CHAN)
9419 memcpy(&session->bc_attrs, &res->bc_attrs,
9420 sizeof(session->bc_attrs));
9421 }
9422
_nfs4_proc_create_session(struct nfs_client * clp,const struct cred * cred)9423 static int _nfs4_proc_create_session(struct nfs_client *clp,
9424 const struct cred *cred)
9425 {
9426 struct nfs4_session *session = clp->cl_session;
9427 struct nfs41_create_session_args args = {
9428 .client = clp,
9429 .clientid = clp->cl_clientid,
9430 .seqid = clp->cl_seqid,
9431 .cb_program = NFS4_CALLBACK,
9432 };
9433 struct nfs41_create_session_res res;
9434
9435 struct rpc_message msg = {
9436 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
9437 .rpc_argp = &args,
9438 .rpc_resp = &res,
9439 .rpc_cred = cred,
9440 };
9441 int status;
9442
9443 nfs4_init_channel_attrs(&args, clp->cl_rpcclient);
9444 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
9445
9446 status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
9447 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
9448 trace_nfs4_create_session(clp, status);
9449
9450 switch (status) {
9451 case -NFS4ERR_STALE_CLIENTID:
9452 case -NFS4ERR_DELAY:
9453 case -ETIMEDOUT:
9454 case -EACCES:
9455 case -EAGAIN:
9456 goto out;
9457 }
9458
9459 clp->cl_seqid++;
9460 if (!status) {
9461 /* Verify the session's negotiated channel_attrs values */
9462 status = nfs4_verify_channel_attrs(&args, &res);
9463 /* Increment the clientid slot sequence id */
9464 if (status)
9465 goto out;
9466 nfs4_update_session(session, &res);
9467 }
9468 out:
9469 return status;
9470 }
9471
9472 /*
9473 * Issues a CREATE_SESSION operation to the server.
9474 * It is the responsibility of the caller to verify the session is
9475 * expired before calling this routine.
9476 */
nfs4_proc_create_session(struct nfs_client * clp,const struct cred * cred)9477 int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred)
9478 {
9479 int status;
9480 unsigned *ptr;
9481 struct nfs4_session *session = clp->cl_session;
9482 struct nfs4_add_xprt_data xprtdata = {
9483 .clp = clp,
9484 };
9485 struct rpc_add_xprt_test rpcdata = {
9486 .add_xprt_test = clp->cl_mvops->session_trunk,
9487 .data = &xprtdata,
9488 };
9489
9490 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
9491
9492 status = _nfs4_proc_create_session(clp, cred);
9493 if (status)
9494 goto out;
9495
9496 /* Init or reset the session slot tables */
9497 status = nfs4_setup_session_slot_tables(session);
9498 dprintk("slot table setup returned %d\n", status);
9499 if (status)
9500 goto out;
9501
9502 ptr = (unsigned *)&session->sess_id.data[0];
9503 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
9504 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
9505 rpc_clnt_probe_trunked_xprts(clp->cl_rpcclient, &rpcdata);
9506 out:
9507 return status;
9508 }
9509
9510 /*
9511 * Issue the over-the-wire RPC DESTROY_SESSION.
9512 * The caller must serialize access to this routine.
9513 */
nfs4_proc_destroy_session(struct nfs4_session * session,const struct cred * cred)9514 int nfs4_proc_destroy_session(struct nfs4_session *session,
9515 const struct cred *cred)
9516 {
9517 struct rpc_message msg = {
9518 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
9519 .rpc_argp = session,
9520 .rpc_cred = cred,
9521 };
9522 int status = 0;
9523
9524 /* session is still being setup */
9525 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
9526 return 0;
9527
9528 status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
9529 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
9530 trace_nfs4_destroy_session(session->clp, status);
9531
9532 if (status)
9533 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
9534 "Session has been destroyed regardless...\n", status);
9535 rpc_clnt_manage_trunked_xprts(session->clp->cl_rpcclient);
9536 return status;
9537 }
9538
9539 /*
9540 * Renew the cl_session lease.
9541 */
9542 struct nfs4_sequence_data {
9543 struct nfs_client *clp;
9544 struct nfs4_sequence_args args;
9545 struct nfs4_sequence_res res;
9546 };
9547
nfs41_sequence_release(void * data)9548 static void nfs41_sequence_release(void *data)
9549 {
9550 struct nfs4_sequence_data *calldata = data;
9551 struct nfs_client *clp = calldata->clp;
9552
9553 if (refcount_read(&clp->cl_count) > 1)
9554 nfs4_schedule_state_renewal(clp);
9555 nfs_put_client(clp);
9556 kfree(calldata);
9557 }
9558
nfs41_sequence_handle_errors(struct rpc_task * task,struct nfs_client * clp)9559 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
9560 {
9561 switch(task->tk_status) {
9562 case -NFS4ERR_DELAY:
9563 rpc_delay(task, NFS4_POLL_RETRY_MAX);
9564 return -EAGAIN;
9565 default:
9566 nfs4_schedule_lease_recovery(clp);
9567 }
9568 return 0;
9569 }
9570
nfs41_sequence_call_done(struct rpc_task * task,void * data)9571 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
9572 {
9573 struct nfs4_sequence_data *calldata = data;
9574 struct nfs_client *clp = calldata->clp;
9575
9576 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
9577 return;
9578
9579 trace_nfs4_sequence(clp, task->tk_status);
9580 if (task->tk_status < 0 && !task->tk_client->cl_shutdown) {
9581 dprintk("%s ERROR %d\n", __func__, task->tk_status);
9582 if (refcount_read(&clp->cl_count) == 1)
9583 return;
9584
9585 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
9586 rpc_restart_call_prepare(task);
9587 return;
9588 }
9589 }
9590 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
9591 }
9592
nfs41_sequence_prepare(struct rpc_task * task,void * data)9593 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
9594 {
9595 struct nfs4_sequence_data *calldata = data;
9596 struct nfs_client *clp = calldata->clp;
9597 struct nfs4_sequence_args *args;
9598 struct nfs4_sequence_res *res;
9599
9600 args = task->tk_msg.rpc_argp;
9601 res = task->tk_msg.rpc_resp;
9602
9603 nfs4_setup_sequence(clp, args, res, task);
9604 }
9605
9606 static const struct rpc_call_ops nfs41_sequence_ops = {
9607 .rpc_call_done = nfs41_sequence_call_done,
9608 .rpc_call_prepare = nfs41_sequence_prepare,
9609 .rpc_release = nfs41_sequence_release,
9610 };
9611
_nfs41_proc_sequence(struct nfs_client * clp,const struct cred * cred,struct nfs4_slot * slot,bool is_privileged)9612 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
9613 const struct cred *cred,
9614 struct nfs4_slot *slot,
9615 bool is_privileged)
9616 {
9617 struct nfs4_sequence_data *calldata;
9618 struct rpc_message msg = {
9619 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
9620 .rpc_cred = cred,
9621 };
9622 struct rpc_task_setup task_setup_data = {
9623 .rpc_client = clp->cl_rpcclient,
9624 .rpc_message = &msg,
9625 .callback_ops = &nfs41_sequence_ops,
9626 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE,
9627 };
9628 struct rpc_task *ret;
9629
9630 ret = ERR_PTR(-EIO);
9631 if (!refcount_inc_not_zero(&clp->cl_count))
9632 goto out_err;
9633
9634 ret = ERR_PTR(-ENOMEM);
9635 calldata = kzalloc(sizeof(*calldata), GFP_KERNEL);
9636 if (calldata == NULL)
9637 goto out_put_clp;
9638 nfs4_init_sequence(&calldata->args, &calldata->res, 0, is_privileged);
9639 nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot);
9640 msg.rpc_argp = &calldata->args;
9641 msg.rpc_resp = &calldata->res;
9642 calldata->clp = clp;
9643 task_setup_data.callback_data = calldata;
9644
9645 ret = rpc_run_task(&task_setup_data);
9646 if (IS_ERR(ret))
9647 goto out_err;
9648 return ret;
9649 out_put_clp:
9650 nfs_put_client(clp);
9651 out_err:
9652 nfs41_release_slot(slot);
9653 return ret;
9654 }
9655
nfs41_proc_async_sequence(struct nfs_client * clp,const struct cred * cred,unsigned renew_flags)9656 static int nfs41_proc_async_sequence(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
9657 {
9658 struct rpc_task *task;
9659 int ret = 0;
9660
9661 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
9662 return -EAGAIN;
9663 task = _nfs41_proc_sequence(clp, cred, NULL, false);
9664 if (IS_ERR(task))
9665 ret = PTR_ERR(task);
9666 else
9667 rpc_put_task_async(task);
9668 dprintk("<-- %s status=%d\n", __func__, ret);
9669 return ret;
9670 }
9671
nfs4_proc_sequence(struct nfs_client * clp,const struct cred * cred)9672 static int nfs4_proc_sequence(struct nfs_client *clp, const struct cred *cred)
9673 {
9674 struct rpc_task *task;
9675 int ret;
9676
9677 task = _nfs41_proc_sequence(clp, cred, NULL, true);
9678 if (IS_ERR(task)) {
9679 ret = PTR_ERR(task);
9680 goto out;
9681 }
9682 ret = rpc_wait_for_completion_task(task);
9683 if (!ret)
9684 ret = task->tk_status;
9685 rpc_put_task(task);
9686 out:
9687 dprintk("<-- %s status=%d\n", __func__, ret);
9688 return ret;
9689 }
9690
9691 struct nfs4_reclaim_complete_data {
9692 struct nfs_client *clp;
9693 struct nfs41_reclaim_complete_args arg;
9694 struct nfs41_reclaim_complete_res res;
9695 };
9696
nfs4_reclaim_complete_prepare(struct rpc_task * task,void * data)9697 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
9698 {
9699 struct nfs4_reclaim_complete_data *calldata = data;
9700
9701 nfs4_setup_sequence(calldata->clp,
9702 &calldata->arg.seq_args,
9703 &calldata->res.seq_res,
9704 task);
9705 }
9706
nfs41_reclaim_complete_handle_errors(struct rpc_task * task,struct nfs_client * clp)9707 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
9708 {
9709 switch(task->tk_status) {
9710 case 0:
9711 wake_up_all(&clp->cl_lock_waitq);
9712 fallthrough;
9713 case -NFS4ERR_COMPLETE_ALREADY:
9714 case -NFS4ERR_WRONG_CRED: /* What to do here? */
9715 break;
9716 case -NFS4ERR_DELAY:
9717 rpc_delay(task, NFS4_POLL_RETRY_MAX);
9718 fallthrough;
9719 case -NFS4ERR_RETRY_UNCACHED_REP:
9720 case -EACCES:
9721 dprintk("%s: failed to reclaim complete error %d for server %s, retrying\n",
9722 __func__, task->tk_status, clp->cl_hostname);
9723 return -EAGAIN;
9724 case -NFS4ERR_BADSESSION:
9725 case -NFS4ERR_DEADSESSION:
9726 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
9727 break;
9728 default:
9729 nfs4_schedule_lease_recovery(clp);
9730 }
9731 return 0;
9732 }
9733
nfs4_reclaim_complete_done(struct rpc_task * task,void * data)9734 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
9735 {
9736 struct nfs4_reclaim_complete_data *calldata = data;
9737 struct nfs_client *clp = calldata->clp;
9738 struct nfs4_sequence_res *res = &calldata->res.seq_res;
9739
9740 if (!nfs41_sequence_done(task, res))
9741 return;
9742
9743 trace_nfs4_reclaim_complete(clp, task->tk_status);
9744 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
9745 rpc_restart_call_prepare(task);
9746 return;
9747 }
9748 }
9749
nfs4_free_reclaim_complete_data(void * data)9750 static void nfs4_free_reclaim_complete_data(void *data)
9751 {
9752 struct nfs4_reclaim_complete_data *calldata = data;
9753
9754 kfree(calldata);
9755 }
9756
9757 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
9758 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
9759 .rpc_call_done = nfs4_reclaim_complete_done,
9760 .rpc_release = nfs4_free_reclaim_complete_data,
9761 };
9762
9763 /*
9764 * Issue a global reclaim complete.
9765 */
nfs41_proc_reclaim_complete(struct nfs_client * clp,const struct cred * cred)9766 static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
9767 const struct cred *cred)
9768 {
9769 struct nfs4_reclaim_complete_data *calldata;
9770 struct rpc_message msg = {
9771 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
9772 .rpc_cred = cred,
9773 };
9774 struct rpc_task_setup task_setup_data = {
9775 .rpc_client = clp->cl_rpcclient,
9776 .rpc_message = &msg,
9777 .callback_ops = &nfs4_reclaim_complete_call_ops,
9778 .flags = RPC_TASK_NO_ROUND_ROBIN,
9779 };
9780 int status = -ENOMEM;
9781
9782 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
9783 if (calldata == NULL)
9784 goto out;
9785 calldata->clp = clp;
9786 calldata->arg.one_fs = 0;
9787
9788 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0, 1);
9789 msg.rpc_argp = &calldata->arg;
9790 msg.rpc_resp = &calldata->res;
9791 task_setup_data.callback_data = calldata;
9792 status = nfs4_call_sync_custom(&task_setup_data);
9793 out:
9794 dprintk("<-- %s status=%d\n", __func__, status);
9795 return status;
9796 }
9797
9798 static void
nfs4_layoutget_prepare(struct rpc_task * task,void * calldata)9799 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
9800 {
9801 struct nfs4_layoutget *lgp = calldata;
9802 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
9803
9804 nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args,
9805 &lgp->res.seq_res, task);
9806 }
9807
nfs4_layoutget_done(struct rpc_task * task,void * calldata)9808 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
9809 {
9810 struct nfs4_layoutget *lgp = calldata;
9811
9812 nfs41_sequence_process(task, &lgp->res.seq_res);
9813 }
9814
9815 static int
nfs4_layoutget_handle_exception(struct rpc_task * task,struct nfs4_layoutget * lgp,struct nfs4_exception * exception)9816 nfs4_layoutget_handle_exception(struct rpc_task *task,
9817 struct nfs4_layoutget *lgp, struct nfs4_exception *exception)
9818 {
9819 struct inode *inode = lgp->args.inode;
9820 struct nfs_server *server = NFS_SERVER(inode);
9821 struct pnfs_layout_hdr *lo = lgp->lo;
9822 int nfs4err = task->tk_status;
9823 int err, status = 0;
9824 LIST_HEAD(head);
9825
9826 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
9827
9828 nfs4_sequence_free_slot(&lgp->res.seq_res);
9829
9830 exception->state = NULL;
9831 exception->stateid = NULL;
9832
9833 switch (nfs4err) {
9834 case 0:
9835 goto out;
9836
9837 /*
9838 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs
9839 * on the file. set tk_status to -ENODATA to tell upper layer to
9840 * retry go inband.
9841 */
9842 case -NFS4ERR_LAYOUTUNAVAILABLE:
9843 status = -ENODATA;
9844 goto out;
9845 /*
9846 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
9847 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
9848 */
9849 case -NFS4ERR_BADLAYOUT:
9850 status = -EOVERFLOW;
9851 goto out;
9852 /*
9853 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
9854 * (or clients) writing to the same RAID stripe except when
9855 * the minlength argument is 0 (see RFC5661 section 18.43.3).
9856 *
9857 * Treat it like we would RECALLCONFLICT -- we retry for a little
9858 * while, and then eventually give up.
9859 */
9860 case -NFS4ERR_LAYOUTTRYLATER:
9861 if (lgp->args.minlength == 0) {
9862 status = -EOVERFLOW;
9863 goto out;
9864 }
9865 status = -EBUSY;
9866 break;
9867 case -NFS4ERR_RECALLCONFLICT:
9868 case -NFS4ERR_RETURNCONFLICT:
9869 status = -ERECALLCONFLICT;
9870 break;
9871 case -NFS4ERR_DELEG_REVOKED:
9872 case -NFS4ERR_ADMIN_REVOKED:
9873 case -NFS4ERR_EXPIRED:
9874 case -NFS4ERR_BAD_STATEID:
9875 exception->timeout = 0;
9876 spin_lock(&inode->i_lock);
9877 /* If the open stateid was bad, then recover it. */
9878 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
9879 !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
9880 spin_unlock(&inode->i_lock);
9881 exception->state = lgp->args.ctx->state;
9882 exception->stateid = &lgp->args.stateid;
9883 break;
9884 }
9885
9886 /*
9887 * Mark the bad layout state as invalid, then retry
9888 */
9889 pnfs_mark_layout_stateid_invalid(lo, &head);
9890 spin_unlock(&inode->i_lock);
9891 nfs_commit_inode(inode, 0);
9892 pnfs_free_lseg_list(&head);
9893 status = -EAGAIN;
9894 goto out;
9895 }
9896
9897 err = nfs4_handle_exception(server, nfs4err, exception);
9898 if (!status) {
9899 if (exception->retry)
9900 status = -EAGAIN;
9901 else
9902 status = err;
9903 }
9904 out:
9905 return status;
9906 }
9907
max_response_pages(struct nfs_server * server)9908 size_t max_response_pages(struct nfs_server *server)
9909 {
9910 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
9911 return nfs_page_array_len(0, max_resp_sz);
9912 }
9913
nfs4_layoutget_release(void * calldata)9914 static void nfs4_layoutget_release(void *calldata)
9915 {
9916 struct nfs4_layoutget *lgp = calldata;
9917
9918 nfs4_sequence_free_slot(&lgp->res.seq_res);
9919 pnfs_layoutget_free(lgp);
9920 }
9921
9922 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
9923 .rpc_call_prepare = nfs4_layoutget_prepare,
9924 .rpc_call_done = nfs4_layoutget_done,
9925 .rpc_release = nfs4_layoutget_release,
9926 };
9927
9928 struct pnfs_layout_segment *
nfs4_proc_layoutget(struct nfs4_layoutget * lgp,struct nfs4_exception * exception)9929 nfs4_proc_layoutget(struct nfs4_layoutget *lgp,
9930 struct nfs4_exception *exception)
9931 {
9932 struct inode *inode = lgp->args.inode;
9933 struct nfs_server *server = NFS_SERVER(inode);
9934 struct rpc_task *task;
9935 struct rpc_message msg = {
9936 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
9937 .rpc_argp = &lgp->args,
9938 .rpc_resp = &lgp->res,
9939 .rpc_cred = lgp->cred,
9940 };
9941 struct rpc_task_setup task_setup_data = {
9942 .rpc_client = server->client,
9943 .rpc_message = &msg,
9944 .callback_ops = &nfs4_layoutget_call_ops,
9945 .callback_data = lgp,
9946 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF |
9947 RPC_TASK_MOVEABLE,
9948 };
9949 struct pnfs_layout_segment *lseg = NULL;
9950 int status = 0;
9951
9952 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0);
9953 exception->retry = 0;
9954
9955 task = rpc_run_task(&task_setup_data);
9956 if (IS_ERR(task))
9957 return ERR_CAST(task);
9958
9959 status = rpc_wait_for_completion_task(task);
9960 if (status != 0)
9961 goto out;
9962
9963 if (task->tk_status < 0) {
9964 exception->retry = 1;
9965 status = nfs4_layoutget_handle_exception(task, lgp, exception);
9966 } else if (lgp->res.layoutp->len == 0) {
9967 exception->retry = 1;
9968 status = -EAGAIN;
9969 nfs4_update_delay(&exception->timeout);
9970 } else
9971 lseg = pnfs_layout_process(lgp);
9972 out:
9973 trace_nfs4_layoutget(lgp->args.ctx,
9974 &lgp->args.range,
9975 &lgp->res.range,
9976 &lgp->res.stateid,
9977 status);
9978
9979 rpc_put_task(task);
9980 dprintk("<-- %s status=%d\n", __func__, status);
9981 if (status)
9982 return ERR_PTR(status);
9983 return lseg;
9984 }
9985
9986 static void
nfs4_layoutreturn_prepare(struct rpc_task * task,void * calldata)9987 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
9988 {
9989 struct nfs4_layoutreturn *lrp = calldata;
9990
9991 nfs4_setup_sequence(lrp->clp,
9992 &lrp->args.seq_args,
9993 &lrp->res.seq_res,
9994 task);
9995 if (!pnfs_layout_is_valid(lrp->args.layout))
9996 rpc_exit(task, 0);
9997 }
9998
nfs4_layoutreturn_done(struct rpc_task * task,void * calldata)9999 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
10000 {
10001 struct nfs4_layoutreturn *lrp = calldata;
10002 struct nfs_server *server;
10003
10004 if (!nfs41_sequence_process(task, &lrp->res.seq_res))
10005 return;
10006
10007 if (task->tk_rpc_status == -ETIMEDOUT) {
10008 lrp->rpc_status = -EAGAIN;
10009 lrp->res.lrs_present = 0;
10010 return;
10011 }
10012 /*
10013 * Was there an RPC level error? Assume the call succeeded,
10014 * and that we need to release the layout
10015 */
10016 if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) {
10017 lrp->res.lrs_present = 0;
10018 return;
10019 }
10020
10021 server = NFS_SERVER(lrp->args.inode);
10022 switch (task->tk_status) {
10023 case -NFS4ERR_OLD_STATEID:
10024 if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid,
10025 &lrp->args.range,
10026 lrp->args.inode))
10027 goto out_restart;
10028 fallthrough;
10029 default:
10030 task->tk_status = 0;
10031 lrp->res.lrs_present = 0;
10032 fallthrough;
10033 case 0:
10034 break;
10035 case -NFS4ERR_BADSESSION:
10036 case -NFS4ERR_DEADSESSION:
10037 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
10038 nfs4_schedule_session_recovery(server->nfs_client->cl_session,
10039 task->tk_status);
10040 lrp->res.lrs_present = 0;
10041 lrp->rpc_status = -EAGAIN;
10042 task->tk_status = 0;
10043 break;
10044 case -NFS4ERR_DELAY:
10045 if (nfs4_async_handle_error(task, server, NULL, NULL) ==
10046 -EAGAIN)
10047 goto out_restart;
10048 lrp->res.lrs_present = 0;
10049 break;
10050 }
10051 return;
10052 out_restart:
10053 task->tk_status = 0;
10054 nfs4_sequence_free_slot(&lrp->res.seq_res);
10055 rpc_restart_call_prepare(task);
10056 }
10057
nfs4_layoutreturn_release(void * calldata)10058 static void nfs4_layoutreturn_release(void *calldata)
10059 {
10060 struct nfs4_layoutreturn *lrp = calldata;
10061 struct pnfs_layout_hdr *lo = lrp->args.layout;
10062
10063 if (lrp->rpc_status == 0 || !lrp->inode)
10064 pnfs_layoutreturn_free_lsegs(
10065 lo, &lrp->args.stateid, &lrp->args.range,
10066 lrp->res.lrs_present ? &lrp->res.stateid : NULL);
10067 else
10068 pnfs_layoutreturn_retry_later(lo, &lrp->args.stateid,
10069 &lrp->args.range);
10070 nfs4_sequence_free_slot(&lrp->res.seq_res);
10071 if (lrp->ld_private.ops && lrp->ld_private.ops->free)
10072 lrp->ld_private.ops->free(&lrp->ld_private);
10073 pnfs_put_layout_hdr(lrp->args.layout);
10074 nfs_iput_and_deactive(lrp->inode);
10075 put_cred(lrp->cred);
10076 kfree(calldata);
10077 }
10078
10079 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
10080 .rpc_call_prepare = nfs4_layoutreturn_prepare,
10081 .rpc_call_done = nfs4_layoutreturn_done,
10082 .rpc_release = nfs4_layoutreturn_release,
10083 };
10084
nfs4_proc_layoutreturn(struct nfs4_layoutreturn * lrp,unsigned int flags)10085 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, unsigned int flags)
10086 {
10087 struct rpc_task *task;
10088 struct rpc_message msg = {
10089 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
10090 .rpc_argp = &lrp->args,
10091 .rpc_resp = &lrp->res,
10092 .rpc_cred = lrp->cred,
10093 };
10094 struct rpc_task_setup task_setup_data = {
10095 .rpc_client = NFS_SERVER(lrp->args.inode)->client,
10096 .rpc_message = &msg,
10097 .callback_ops = &nfs4_layoutreturn_call_ops,
10098 .callback_data = lrp,
10099 .flags = RPC_TASK_MOVEABLE,
10100 };
10101 int status = 0;
10102
10103 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client,
10104 NFS_SP4_MACH_CRED_PNFS_CLEANUP,
10105 &task_setup_data.rpc_client, &msg);
10106
10107 lrp->inode = nfs_igrab_and_active(lrp->args.inode);
10108 if (flags & PNFS_FL_LAYOUTRETURN_ASYNC) {
10109 if (!lrp->inode) {
10110 nfs4_layoutreturn_release(lrp);
10111 return -EAGAIN;
10112 }
10113 task_setup_data.flags |= RPC_TASK_ASYNC;
10114 }
10115 if (!lrp->inode)
10116 flags |= PNFS_FL_LAYOUTRETURN_PRIVILEGED;
10117 if (flags & PNFS_FL_LAYOUTRETURN_PRIVILEGED)
10118 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
10119 1);
10120 else
10121 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
10122 0);
10123 task = rpc_run_task(&task_setup_data);
10124 if (IS_ERR(task))
10125 return PTR_ERR(task);
10126 if (!(flags & PNFS_FL_LAYOUTRETURN_ASYNC))
10127 status = task->tk_status;
10128 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status);
10129 dprintk("<-- %s status=%d\n", __func__, status);
10130 rpc_put_task(task);
10131 return status;
10132 }
10133
10134 static int
_nfs4_proc_getdeviceinfo(struct nfs_server * server,struct pnfs_device * pdev,const struct cred * cred)10135 _nfs4_proc_getdeviceinfo(struct nfs_server *server,
10136 struct pnfs_device *pdev,
10137 const struct cred *cred)
10138 {
10139 struct nfs4_getdeviceinfo_args args = {
10140 .pdev = pdev,
10141 .notify_types = NOTIFY_DEVICEID4_CHANGE |
10142 NOTIFY_DEVICEID4_DELETE,
10143 };
10144 struct nfs4_getdeviceinfo_res res = {
10145 .pdev = pdev,
10146 };
10147 struct rpc_message msg = {
10148 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
10149 .rpc_argp = &args,
10150 .rpc_resp = &res,
10151 .rpc_cred = cred,
10152 };
10153 int status;
10154
10155 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
10156 if (res.notification & ~args.notify_types)
10157 dprintk("%s: unsupported notification\n", __func__);
10158 if (res.notification != args.notify_types)
10159 pdev->nocache = 1;
10160
10161 trace_nfs4_getdeviceinfo(server, &pdev->dev_id, status);
10162
10163 dprintk("<-- %s status=%d\n", __func__, status);
10164
10165 return status;
10166 }
10167
nfs4_proc_getdeviceinfo(struct nfs_server * server,struct pnfs_device * pdev,const struct cred * cred)10168 int nfs4_proc_getdeviceinfo(struct nfs_server *server,
10169 struct pnfs_device *pdev,
10170 const struct cred *cred)
10171 {
10172 struct nfs4_exception exception = { };
10173 int err;
10174
10175 do {
10176 err = nfs4_handle_exception(server,
10177 _nfs4_proc_getdeviceinfo(server, pdev, cred),
10178 &exception);
10179 } while (exception.retry);
10180 return err;
10181 }
10182 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
10183
nfs4_layoutcommit_prepare(struct rpc_task * task,void * calldata)10184 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
10185 {
10186 struct nfs4_layoutcommit_data *data = calldata;
10187 struct nfs_server *server = NFS_SERVER(data->args.inode);
10188
10189 nfs4_setup_sequence(server->nfs_client,
10190 &data->args.seq_args,
10191 &data->res.seq_res,
10192 task);
10193 }
10194
10195 static void
nfs4_layoutcommit_done(struct rpc_task * task,void * calldata)10196 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
10197 {
10198 struct nfs4_layoutcommit_data *data = calldata;
10199 struct nfs_server *server = NFS_SERVER(data->args.inode);
10200
10201 if (!nfs41_sequence_done(task, &data->res.seq_res))
10202 return;
10203
10204 switch (task->tk_status) { /* Just ignore these failures */
10205 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
10206 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
10207 case -NFS4ERR_BADLAYOUT: /* no layout */
10208 case -NFS4ERR_GRACE: /* loca_recalim always false */
10209 task->tk_status = 0;
10210 break;
10211 case 0:
10212 break;
10213 default:
10214 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
10215 rpc_restart_call_prepare(task);
10216 return;
10217 }
10218 }
10219 }
10220
nfs4_layoutcommit_release(void * calldata)10221 static void nfs4_layoutcommit_release(void *calldata)
10222 {
10223 struct nfs4_layoutcommit_data *data = calldata;
10224
10225 pnfs_cleanup_layoutcommit(data);
10226 nfs_post_op_update_inode_force_wcc(data->args.inode,
10227 data->res.fattr);
10228 put_cred(data->cred);
10229 nfs_iput_and_deactive(data->inode);
10230 kfree(data);
10231 }
10232
10233 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
10234 .rpc_call_prepare = nfs4_layoutcommit_prepare,
10235 .rpc_call_done = nfs4_layoutcommit_done,
10236 .rpc_release = nfs4_layoutcommit_release,
10237 };
10238
10239 int
nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data * data,bool sync)10240 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
10241 {
10242 struct rpc_message msg = {
10243 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
10244 .rpc_argp = &data->args,
10245 .rpc_resp = &data->res,
10246 .rpc_cred = data->cred,
10247 };
10248 struct rpc_task_setup task_setup_data = {
10249 .task = &data->task,
10250 .rpc_client = NFS_CLIENT(data->args.inode),
10251 .rpc_message = &msg,
10252 .callback_ops = &nfs4_layoutcommit_ops,
10253 .callback_data = data,
10254 .flags = RPC_TASK_MOVEABLE,
10255 };
10256 struct rpc_task *task;
10257 int status = 0;
10258
10259 dprintk("NFS: initiating layoutcommit call. sync %d "
10260 "lbw: %llu inode %lu\n", sync,
10261 data->args.lastbytewritten,
10262 data->args.inode->i_ino);
10263
10264 if (!sync) {
10265 data->inode = nfs_igrab_and_active(data->args.inode);
10266 if (data->inode == NULL) {
10267 nfs4_layoutcommit_release(data);
10268 return -EAGAIN;
10269 }
10270 task_setup_data.flags = RPC_TASK_ASYNC;
10271 }
10272 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
10273 task = rpc_run_task(&task_setup_data);
10274 if (IS_ERR(task))
10275 return PTR_ERR(task);
10276 if (sync)
10277 status = task->tk_status;
10278 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status);
10279 dprintk("%s: status %d\n", __func__, status);
10280 rpc_put_task(task);
10281 return status;
10282 }
10283
10284 /*
10285 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
10286 * possible) as per RFC3530bis and RFC5661 Security Considerations sections
10287 */
10288 static int
_nfs41_proc_secinfo_no_name(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info,struct nfs4_secinfo_flavors * flavors,bool use_integrity)10289 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
10290 struct nfs_fsinfo *info,
10291 struct nfs4_secinfo_flavors *flavors, bool use_integrity)
10292 {
10293 struct nfs41_secinfo_no_name_args args = {
10294 .style = SECINFO_STYLE_CURRENT_FH,
10295 };
10296 struct nfs4_secinfo_res res = {
10297 .flavors = flavors,
10298 };
10299 struct rpc_message msg = {
10300 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
10301 .rpc_argp = &args,
10302 .rpc_resp = &res,
10303 };
10304 struct nfs4_call_sync_data data = {
10305 .seq_server = server,
10306 .seq_args = &args.seq_args,
10307 .seq_res = &res.seq_res,
10308 };
10309 struct rpc_task_setup task_setup = {
10310 .rpc_client = server->client,
10311 .rpc_message = &msg,
10312 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops,
10313 .callback_data = &data,
10314 .flags = RPC_TASK_NO_ROUND_ROBIN,
10315 };
10316 const struct cred *cred = NULL;
10317 int status;
10318
10319 if (use_integrity) {
10320 task_setup.rpc_client = server->nfs_client->cl_rpcclient;
10321
10322 cred = nfs4_get_clid_cred(server->nfs_client);
10323 msg.rpc_cred = cred;
10324 }
10325
10326 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
10327 status = nfs4_call_sync_custom(&task_setup);
10328 dprintk("<-- %s status=%d\n", __func__, status);
10329
10330 put_cred(cred);
10331
10332 return status;
10333 }
10334
10335 static int
nfs41_proc_secinfo_no_name(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info,struct nfs4_secinfo_flavors * flavors)10336 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
10337 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
10338 {
10339 struct nfs4_exception exception = {
10340 .interruptible = true,
10341 };
10342 int err;
10343 do {
10344 /* first try using integrity protection */
10345 err = -NFS4ERR_WRONGSEC;
10346
10347 /* try to use integrity protection with machine cred */
10348 if (_nfs4_is_integrity_protected(server->nfs_client))
10349 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
10350 flavors, true);
10351
10352 /*
10353 * if unable to use integrity protection, or SECINFO with
10354 * integrity protection returns NFS4ERR_WRONGSEC (which is
10355 * disallowed by spec, but exists in deployed servers) use
10356 * the current filesystem's rpc_client and the user cred.
10357 */
10358 if (err == -NFS4ERR_WRONGSEC)
10359 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
10360 flavors, false);
10361
10362 switch (err) {
10363 case 0:
10364 case -NFS4ERR_WRONGSEC:
10365 case -ENOTSUPP:
10366 goto out;
10367 default:
10368 err = nfs4_handle_exception(server, err, &exception);
10369 }
10370 } while (exception.retry);
10371 out:
10372 return err;
10373 }
10374
10375 static int
nfs41_find_root_sec(struct nfs_server * server,struct nfs_fh * fhandle,struct nfs_fsinfo * info)10376 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
10377 struct nfs_fsinfo *info)
10378 {
10379 int err;
10380 struct page *page;
10381 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
10382 struct nfs4_secinfo_flavors *flavors;
10383 struct nfs4_secinfo4 *secinfo;
10384 int i;
10385
10386 page = alloc_page(GFP_KERNEL);
10387 if (!page) {
10388 err = -ENOMEM;
10389 goto out;
10390 }
10391
10392 flavors = page_address(page);
10393 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
10394
10395 /*
10396 * Fall back on "guess and check" method if
10397 * the server doesn't support SECINFO_NO_NAME
10398 */
10399 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
10400 err = nfs4_find_root_sec(server, fhandle, info);
10401 goto out_freepage;
10402 }
10403 if (err)
10404 goto out_freepage;
10405
10406 for (i = 0; i < flavors->num_flavors; i++) {
10407 secinfo = &flavors->flavors[i];
10408
10409 switch (secinfo->flavor) {
10410 case RPC_AUTH_NULL:
10411 case RPC_AUTH_UNIX:
10412 case RPC_AUTH_GSS:
10413 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
10414 &secinfo->flavor_info);
10415 break;
10416 default:
10417 flavor = RPC_AUTH_MAXFLAVOR;
10418 break;
10419 }
10420
10421 if (!nfs_auth_info_match(&server->auth_info, flavor))
10422 flavor = RPC_AUTH_MAXFLAVOR;
10423
10424 if (flavor != RPC_AUTH_MAXFLAVOR) {
10425 err = nfs4_lookup_root_sec(server, fhandle,
10426 info, flavor);
10427 if (!err)
10428 break;
10429 }
10430 }
10431
10432 if (flavor == RPC_AUTH_MAXFLAVOR)
10433 err = -EPERM;
10434
10435 out_freepage:
10436 put_page(page);
10437 if (err == -EACCES)
10438 return -EPERM;
10439 out:
10440 return err;
10441 }
10442
_nfs41_test_stateid(struct nfs_server * server,const nfs4_stateid * stateid,const struct cred * cred)10443 static int _nfs41_test_stateid(struct nfs_server *server,
10444 const nfs4_stateid *stateid,
10445 const struct cred *cred)
10446 {
10447 int status;
10448 struct nfs41_test_stateid_args args = {
10449 .stateid = *stateid,
10450 };
10451 struct nfs41_test_stateid_res res;
10452 struct rpc_message msg = {
10453 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
10454 .rpc_argp = &args,
10455 .rpc_resp = &res,
10456 .rpc_cred = cred,
10457 };
10458 struct rpc_clnt *rpc_client = server->client;
10459
10460 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
10461 &rpc_client, &msg);
10462
10463 dprintk("NFS call test_stateid %p\n", stateid);
10464 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
10465 status = nfs4_call_sync_sequence(rpc_client, server, &msg,
10466 &args.seq_args, &res.seq_res);
10467 if (status != NFS_OK) {
10468 dprintk("NFS reply test_stateid: failed, %d\n", status);
10469 return status;
10470 }
10471 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
10472 return -res.status;
10473 }
10474
nfs4_handle_delay_or_session_error(struct nfs_server * server,int err,struct nfs4_exception * exception)10475 static void nfs4_handle_delay_or_session_error(struct nfs_server *server,
10476 int err, struct nfs4_exception *exception)
10477 {
10478 exception->retry = 0;
10479 switch(err) {
10480 case -NFS4ERR_DELAY:
10481 case -NFS4ERR_RETRY_UNCACHED_REP:
10482 nfs4_handle_exception(server, err, exception);
10483 break;
10484 case -NFS4ERR_BADSESSION:
10485 case -NFS4ERR_BADSLOT:
10486 case -NFS4ERR_BAD_HIGH_SLOT:
10487 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
10488 case -NFS4ERR_DEADSESSION:
10489 nfs4_do_handle_exception(server, err, exception);
10490 }
10491 }
10492
10493 /**
10494 * nfs41_test_stateid - perform a TEST_STATEID operation
10495 *
10496 * @server: server / transport on which to perform the operation
10497 * @stateid: state ID to test
10498 * @cred: credential
10499 *
10500 * Returns NFS_OK if the server recognizes that "stateid" is valid.
10501 * Otherwise a negative NFS4ERR value is returned if the operation
10502 * failed or the state ID is not currently valid.
10503 */
nfs41_test_stateid(struct nfs_server * server,const nfs4_stateid * stateid,const struct cred * cred)10504 static int nfs41_test_stateid(struct nfs_server *server,
10505 const nfs4_stateid *stateid,
10506 const struct cred *cred)
10507 {
10508 struct nfs4_exception exception = {
10509 .interruptible = true,
10510 };
10511 int err;
10512 do {
10513 err = _nfs41_test_stateid(server, stateid, cred);
10514 nfs4_handle_delay_or_session_error(server, err, &exception);
10515 } while (exception.retry);
10516 return err;
10517 }
10518
10519 struct nfs_free_stateid_data {
10520 struct nfs_server *server;
10521 struct nfs41_free_stateid_args args;
10522 struct nfs41_free_stateid_res res;
10523 };
10524
nfs41_free_stateid_prepare(struct rpc_task * task,void * calldata)10525 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
10526 {
10527 struct nfs_free_stateid_data *data = calldata;
10528 nfs4_setup_sequence(data->server->nfs_client,
10529 &data->args.seq_args,
10530 &data->res.seq_res,
10531 task);
10532 }
10533
nfs41_free_stateid_done(struct rpc_task * task,void * calldata)10534 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
10535 {
10536 struct nfs_free_stateid_data *data = calldata;
10537
10538 nfs41_sequence_done(task, &data->res.seq_res);
10539
10540 switch (task->tk_status) {
10541 case -NFS4ERR_DELAY:
10542 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
10543 rpc_restart_call_prepare(task);
10544 }
10545 }
10546
nfs41_free_stateid_release(void * calldata)10547 static void nfs41_free_stateid_release(void *calldata)
10548 {
10549 struct nfs_free_stateid_data *data = calldata;
10550 struct nfs_client *clp = data->server->nfs_client;
10551
10552 nfs_put_client(clp);
10553 kfree(calldata);
10554 }
10555
10556 static const struct rpc_call_ops nfs41_free_stateid_ops = {
10557 .rpc_call_prepare = nfs41_free_stateid_prepare,
10558 .rpc_call_done = nfs41_free_stateid_done,
10559 .rpc_release = nfs41_free_stateid_release,
10560 };
10561
10562 /**
10563 * nfs41_free_stateid - perform a FREE_STATEID operation
10564 *
10565 * @server: server / transport on which to perform the operation
10566 * @stateid: state ID to release
10567 * @cred: credential
10568 * @privileged: set to true if this call needs to be privileged
10569 *
10570 * Note: this function is always asynchronous.
10571 */
nfs41_free_stateid(struct nfs_server * server,const nfs4_stateid * stateid,const struct cred * cred,bool privileged)10572 static int nfs41_free_stateid(struct nfs_server *server,
10573 const nfs4_stateid *stateid,
10574 const struct cred *cred,
10575 bool privileged)
10576 {
10577 struct rpc_message msg = {
10578 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
10579 .rpc_cred = cred,
10580 };
10581 struct rpc_task_setup task_setup = {
10582 .rpc_client = server->client,
10583 .rpc_message = &msg,
10584 .callback_ops = &nfs41_free_stateid_ops,
10585 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE,
10586 };
10587 struct nfs_free_stateid_data *data;
10588 struct rpc_task *task;
10589 struct nfs_client *clp = server->nfs_client;
10590
10591 if (!refcount_inc_not_zero(&clp->cl_count))
10592 return -EIO;
10593
10594 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
10595 &task_setup.rpc_client, &msg);
10596
10597 dprintk("NFS call free_stateid %p\n", stateid);
10598 data = kmalloc(sizeof(*data), GFP_KERNEL);
10599 if (!data)
10600 return -ENOMEM;
10601 data->server = server;
10602 nfs4_stateid_copy(&data->args.stateid, stateid);
10603
10604 task_setup.callback_data = data;
10605
10606 msg.rpc_argp = &data->args;
10607 msg.rpc_resp = &data->res;
10608 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, privileged);
10609 task = rpc_run_task(&task_setup);
10610 if (IS_ERR(task))
10611 return PTR_ERR(task);
10612 rpc_put_task(task);
10613 return 0;
10614 }
10615
10616 static void
nfs41_free_lock_state(struct nfs_server * server,struct nfs4_lock_state * lsp)10617 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
10618 {
10619 const struct cred *cred = lsp->ls_state->owner->so_cred;
10620
10621 nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
10622 nfs4_free_lock_state(server, lsp);
10623 }
10624
nfs41_match_stateid(const nfs4_stateid * s1,const nfs4_stateid * s2)10625 static bool nfs41_match_stateid(const nfs4_stateid *s1,
10626 const nfs4_stateid *s2)
10627 {
10628 if (s1->type != s2->type)
10629 return false;
10630
10631 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
10632 return false;
10633
10634 if (s1->seqid == s2->seqid)
10635 return true;
10636
10637 return s1->seqid == 0 || s2->seqid == 0;
10638 }
10639
10640 #endif /* CONFIG_NFS_V4_1 */
10641
nfs4_match_stateid(const nfs4_stateid * s1,const nfs4_stateid * s2)10642 static bool nfs4_match_stateid(const nfs4_stateid *s1,
10643 const nfs4_stateid *s2)
10644 {
10645 return nfs4_stateid_match(s1, s2);
10646 }
10647
10648
10649 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
10650 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
10651 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
10652 .recover_open = nfs4_open_reclaim,
10653 .recover_lock = nfs4_lock_reclaim,
10654 .establish_clid = nfs4_init_clientid,
10655 .detect_trunking = nfs40_discover_server_trunking,
10656 };
10657
10658 #if defined(CONFIG_NFS_V4_1)
10659 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
10660 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
10661 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
10662 .recover_open = nfs4_open_reclaim,
10663 .recover_lock = nfs4_lock_reclaim,
10664 .establish_clid = nfs41_init_clientid,
10665 .reclaim_complete = nfs41_proc_reclaim_complete,
10666 .detect_trunking = nfs41_discover_server_trunking,
10667 };
10668 #endif /* CONFIG_NFS_V4_1 */
10669
10670 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
10671 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
10672 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
10673 .recover_open = nfs40_open_expired,
10674 .recover_lock = nfs4_lock_expired,
10675 .establish_clid = nfs4_init_clientid,
10676 };
10677
10678 #if defined(CONFIG_NFS_V4_1)
10679 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
10680 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
10681 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
10682 .recover_open = nfs41_open_expired,
10683 .recover_lock = nfs41_lock_expired,
10684 .establish_clid = nfs41_init_clientid,
10685 };
10686 #endif /* CONFIG_NFS_V4_1 */
10687
10688 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
10689 .sched_state_renewal = nfs4_proc_async_renew,
10690 .get_state_renewal_cred = nfs4_get_renew_cred,
10691 .renew_lease = nfs4_proc_renew,
10692 };
10693
10694 #if defined(CONFIG_NFS_V4_1)
10695 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
10696 .sched_state_renewal = nfs41_proc_async_sequence,
10697 .get_state_renewal_cred = nfs4_get_machine_cred,
10698 .renew_lease = nfs4_proc_sequence,
10699 };
10700 #endif
10701
10702 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
10703 .get_locations = _nfs40_proc_get_locations,
10704 .fsid_present = _nfs40_proc_fsid_present,
10705 };
10706
10707 #if defined(CONFIG_NFS_V4_1)
10708 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
10709 .get_locations = _nfs41_proc_get_locations,
10710 .fsid_present = _nfs41_proc_fsid_present,
10711 };
10712 #endif /* CONFIG_NFS_V4_1 */
10713
10714 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
10715 .minor_version = 0,
10716 .init_caps = NFS_CAP_READDIRPLUS
10717 | NFS_CAP_ATOMIC_OPEN
10718 | NFS_CAP_POSIX_LOCK,
10719 .init_client = nfs40_init_client,
10720 .shutdown_client = nfs40_shutdown_client,
10721 .match_stateid = nfs4_match_stateid,
10722 .find_root_sec = nfs4_find_root_sec,
10723 .free_lock_state = nfs4_release_lockowner,
10724 .test_and_free_expired = nfs40_test_and_free_expired_stateid,
10725 .alloc_seqid = nfs_alloc_seqid,
10726 .call_sync_ops = &nfs40_call_sync_ops,
10727 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
10728 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
10729 .state_renewal_ops = &nfs40_state_renewal_ops,
10730 .mig_recovery_ops = &nfs40_mig_recovery_ops,
10731 };
10732
10733 #if defined(CONFIG_NFS_V4_1)
10734 static struct nfs_seqid *
nfs_alloc_no_seqid(struct nfs_seqid_counter * arg1,gfp_t arg2)10735 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
10736 {
10737 return NULL;
10738 }
10739
10740 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
10741 .minor_version = 1,
10742 .init_caps = NFS_CAP_READDIRPLUS
10743 | NFS_CAP_ATOMIC_OPEN
10744 | NFS_CAP_POSIX_LOCK
10745 | NFS_CAP_STATEID_NFSV41
10746 | NFS_CAP_ATOMIC_OPEN_V1
10747 | NFS_CAP_LGOPEN
10748 | NFS_CAP_MOVEABLE,
10749 .init_client = nfs41_init_client,
10750 .shutdown_client = nfs41_shutdown_client,
10751 .match_stateid = nfs41_match_stateid,
10752 .find_root_sec = nfs41_find_root_sec,
10753 .free_lock_state = nfs41_free_lock_state,
10754 .test_and_free_expired = nfs41_test_and_free_expired_stateid,
10755 .alloc_seqid = nfs_alloc_no_seqid,
10756 .session_trunk = nfs4_test_session_trunk,
10757 .call_sync_ops = &nfs41_call_sync_ops,
10758 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
10759 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
10760 .state_renewal_ops = &nfs41_state_renewal_ops,
10761 .mig_recovery_ops = &nfs41_mig_recovery_ops,
10762 };
10763 #endif
10764
10765 #if defined(CONFIG_NFS_V4_2)
10766 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
10767 .minor_version = 2,
10768 .init_caps = NFS_CAP_READDIRPLUS
10769 | NFS_CAP_ATOMIC_OPEN
10770 | NFS_CAP_POSIX_LOCK
10771 | NFS_CAP_STATEID_NFSV41
10772 | NFS_CAP_ATOMIC_OPEN_V1
10773 | NFS_CAP_LGOPEN
10774 | NFS_CAP_ALLOCATE
10775 | NFS_CAP_COPY
10776 | NFS_CAP_OFFLOAD_CANCEL
10777 | NFS_CAP_COPY_NOTIFY
10778 | NFS_CAP_DEALLOCATE
10779 | NFS_CAP_SEEK
10780 | NFS_CAP_LAYOUTSTATS
10781 | NFS_CAP_CLONE
10782 | NFS_CAP_LAYOUTERROR
10783 | NFS_CAP_READ_PLUS
10784 | NFS_CAP_MOVEABLE,
10785 .init_client = nfs41_init_client,
10786 .shutdown_client = nfs41_shutdown_client,
10787 .match_stateid = nfs41_match_stateid,
10788 .find_root_sec = nfs41_find_root_sec,
10789 .free_lock_state = nfs41_free_lock_state,
10790 .call_sync_ops = &nfs41_call_sync_ops,
10791 .test_and_free_expired = nfs41_test_and_free_expired_stateid,
10792 .alloc_seqid = nfs_alloc_no_seqid,
10793 .session_trunk = nfs4_test_session_trunk,
10794 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
10795 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
10796 .state_renewal_ops = &nfs41_state_renewal_ops,
10797 .mig_recovery_ops = &nfs41_mig_recovery_ops,
10798 };
10799 #endif
10800
10801 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
10802 [0] = &nfs_v4_0_minor_ops,
10803 #if defined(CONFIG_NFS_V4_1)
10804 [1] = &nfs_v4_1_minor_ops,
10805 #endif
10806 #if defined(CONFIG_NFS_V4_2)
10807 [2] = &nfs_v4_2_minor_ops,
10808 #endif
10809 };
10810
nfs4_listxattr(struct dentry * dentry,char * list,size_t size)10811 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
10812 {
10813 ssize_t error, error2, error3;
10814 size_t left = size;
10815
10816 error = generic_listxattr(dentry, list, left);
10817 if (error < 0)
10818 return error;
10819 if (list) {
10820 list += error;
10821 left -= error;
10822 }
10823
10824 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, left);
10825 if (error2 < 0)
10826 return error2;
10827
10828 if (list) {
10829 list += error2;
10830 left -= error2;
10831 }
10832
10833 error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left);
10834 if (error3 < 0)
10835 return error3;
10836
10837 error += error2 + error3;
10838 if (size && error > size)
10839 return -ERANGE;
10840 return error;
10841 }
10842
nfs4_enable_swap(struct inode * inode)10843 static void nfs4_enable_swap(struct inode *inode)
10844 {
10845 /* The state manager thread must always be running.
10846 * It will notice the client is a swapper, and stay put.
10847 */
10848 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
10849
10850 nfs4_schedule_state_manager(clp);
10851 }
10852
nfs4_disable_swap(struct inode * inode)10853 static void nfs4_disable_swap(struct inode *inode)
10854 {
10855 /* The state manager thread will now exit once it is
10856 * woken.
10857 */
10858 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
10859
10860 set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
10861 clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
10862 wake_up_var(&clp->cl_state);
10863 }
10864
10865 static const struct inode_operations nfs4_dir_inode_operations = {
10866 .create = nfs_create,
10867 .lookup = nfs_lookup,
10868 .atomic_open = nfs_atomic_open,
10869 .link = nfs_link,
10870 .unlink = nfs_unlink,
10871 .symlink = nfs_symlink,
10872 .mkdir = nfs_mkdir,
10873 .rmdir = nfs_rmdir,
10874 .mknod = nfs_mknod,
10875 .rename = nfs_rename,
10876 .permission = nfs_permission,
10877 .getattr = nfs_getattr,
10878 .setattr = nfs_setattr,
10879 .listxattr = nfs4_listxattr,
10880 };
10881
10882 static const struct inode_operations nfs4_file_inode_operations = {
10883 .permission = nfs_permission,
10884 .getattr = nfs_getattr,
10885 .setattr = nfs_setattr,
10886 .listxattr = nfs4_listxattr,
10887 };
10888
10889 const struct nfs_rpc_ops nfs_v4_clientops = {
10890 .version = 4, /* protocol version */
10891 .dentry_ops = &nfs4_dentry_operations,
10892 .dir_inode_ops = &nfs4_dir_inode_operations,
10893 .file_inode_ops = &nfs4_file_inode_operations,
10894 .file_ops = &nfs4_file_operations,
10895 .getroot = nfs4_proc_get_root,
10896 .submount = nfs4_submount,
10897 .try_get_tree = nfs4_try_get_tree,
10898 .getattr = nfs4_proc_getattr,
10899 .setattr = nfs4_proc_setattr,
10900 .lookup = nfs4_proc_lookup,
10901 .lookupp = nfs4_proc_lookupp,
10902 .access = nfs4_proc_access,
10903 .readlink = nfs4_proc_readlink,
10904 .create = nfs4_proc_create,
10905 .remove = nfs4_proc_remove,
10906 .unlink_setup = nfs4_proc_unlink_setup,
10907 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
10908 .unlink_done = nfs4_proc_unlink_done,
10909 .rename_setup = nfs4_proc_rename_setup,
10910 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
10911 .rename_done = nfs4_proc_rename_done,
10912 .link = nfs4_proc_link,
10913 .symlink = nfs4_proc_symlink,
10914 .mkdir = nfs4_proc_mkdir,
10915 .rmdir = nfs4_proc_rmdir,
10916 .readdir = nfs4_proc_readdir,
10917 .mknod = nfs4_proc_mknod,
10918 .statfs = nfs4_proc_statfs,
10919 .fsinfo = nfs4_proc_fsinfo,
10920 .pathconf = nfs4_proc_pathconf,
10921 .set_capabilities = nfs4_server_capabilities,
10922 .decode_dirent = nfs4_decode_dirent,
10923 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
10924 .read_setup = nfs4_proc_read_setup,
10925 .read_done = nfs4_read_done,
10926 .write_setup = nfs4_proc_write_setup,
10927 .write_done = nfs4_write_done,
10928 .commit_setup = nfs4_proc_commit_setup,
10929 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
10930 .commit_done = nfs4_commit_done,
10931 .lock = nfs4_proc_lock,
10932 .clear_acl_cache = nfs4_zap_acl_attr,
10933 .close_context = nfs4_close_context,
10934 .open_context = nfs4_atomic_open,
10935 .have_delegation = nfs4_have_delegation,
10936 .return_delegation = nfs4_inode_return_delegation,
10937 .alloc_client = nfs4_alloc_client,
10938 .init_client = nfs4_init_client,
10939 .free_client = nfs4_free_client,
10940 .create_server = nfs4_create_server,
10941 .clone_server = nfs_clone_server,
10942 .discover_trunking = nfs4_discover_trunking,
10943 .enable_swap = nfs4_enable_swap,
10944 .disable_swap = nfs4_disable_swap,
10945 };
10946
10947 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
10948 .name = XATTR_NAME_NFSV4_ACL,
10949 .list = nfs4_xattr_list_nfs4_acl,
10950 .get = nfs4_xattr_get_nfs4_acl,
10951 .set = nfs4_xattr_set_nfs4_acl,
10952 };
10953
10954 #if defined(CONFIG_NFS_V4_1)
10955 static const struct xattr_handler nfs4_xattr_nfs4_dacl_handler = {
10956 .name = XATTR_NAME_NFSV4_DACL,
10957 .list = nfs4_xattr_list_nfs4_dacl,
10958 .get = nfs4_xattr_get_nfs4_dacl,
10959 .set = nfs4_xattr_set_nfs4_dacl,
10960 };
10961
10962 static const struct xattr_handler nfs4_xattr_nfs4_sacl_handler = {
10963 .name = XATTR_NAME_NFSV4_SACL,
10964 .list = nfs4_xattr_list_nfs4_sacl,
10965 .get = nfs4_xattr_get_nfs4_sacl,
10966 .set = nfs4_xattr_set_nfs4_sacl,
10967 };
10968 #endif
10969
10970 #ifdef CONFIG_NFS_V4_2
10971 static const struct xattr_handler nfs4_xattr_nfs4_user_handler = {
10972 .prefix = XATTR_USER_PREFIX,
10973 .get = nfs4_xattr_get_nfs4_user,
10974 .set = nfs4_xattr_set_nfs4_user,
10975 };
10976 #endif
10977
10978 const struct xattr_handler * const nfs4_xattr_handlers[] = {
10979 &nfs4_xattr_nfs4_acl_handler,
10980 #if defined(CONFIG_NFS_V4_1)
10981 &nfs4_xattr_nfs4_dacl_handler,
10982 &nfs4_xattr_nfs4_sacl_handler,
10983 #endif
10984 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
10985 &nfs4_xattr_nfs4_label_handler,
10986 #endif
10987 #ifdef CONFIG_NFS_V4_2
10988 &nfs4_xattr_nfs4_user_handler,
10989 #endif
10990 NULL
10991 };
10992