1 /* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <linux/mm.h> 39 #include <linux/delay.h> 40 #include <linux/errno.h> 41 #include <linux/string.h> 42 #include <linux/ratelimit.h> 43 #include <linux/printk.h> 44 #include <linux/slab.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/nfs.h> 47 #include <linux/nfs4.h> 48 #include <linux/nfs_fs.h> 49 #include <linux/nfs_page.h> 50 #include <linux/nfs_mount.h> 51 #include <linux/namei.h> 52 #include <linux/mount.h> 53 #include <linux/module.h> 54 #include <linux/xattr.h> 55 #include <linux/utsname.h> 56 #include <linux/freezer.h> 57 #include <linux/iversion.h> 58 59 #include "nfs4_fs.h" 60 #include "delegation.h" 61 #include "internal.h" 62 #include "iostat.h" 63 #include "callback.h" 64 #include "pnfs.h" 65 #include "netns.h" 66 #include "sysfs.h" 67 #include "nfs4idmap.h" 68 #include "nfs4session.h" 69 #include "fscache.h" 70 #include "nfs42.h" 71 72 #include "nfs4trace.h" 73 74 #define NFSDBG_FACILITY NFSDBG_PROC 75 76 #define NFS4_BITMASK_SZ 3 77 78 #define NFS4_POLL_RETRY_MIN (HZ/10) 79 #define NFS4_POLL_RETRY_MAX (15*HZ) 80 81 /* file attributes which can be mapped to nfs attributes */ 82 #define NFS4_VALID_ATTRS (ATTR_MODE \ 83 | ATTR_UID \ 84 | ATTR_GID \ 85 | ATTR_SIZE \ 86 | ATTR_ATIME \ 87 | ATTR_MTIME \ 88 | ATTR_CTIME \ 89 | ATTR_ATIME_SET \ 90 | ATTR_MTIME_SET) 91 92 struct nfs4_opendata; 93 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 94 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 95 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 96 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 97 struct nfs_fattr *fattr, struct inode *inode); 98 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred, 99 struct nfs_fattr *fattr, struct iattr *sattr, 100 struct nfs_open_context *ctx, struct nfs4_label *ilabel); 101 #ifdef CONFIG_NFS_V4_1 102 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 103 const struct cred *cred, 104 struct nfs4_slot *slot, 105 bool is_privileged); 106 static int nfs41_test_stateid(struct nfs_server *, const nfs4_stateid *, 107 const struct cred *); 108 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *, 109 const struct cred *, bool); 110 #endif 111 112 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 113 static inline struct nfs4_label * 114 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 115 struct iattr *sattr, struct nfs4_label *label) 116 { 117 struct lsm_context shim; 118 int err; 119 120 if (label == NULL) 121 return NULL; 122 123 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0) 124 return NULL; 125 126 label->lfs = 0; 127 label->pi = 0; 128 label->len = 0; 129 label->label = NULL; 130 131 err = security_dentry_init_security(dentry, sattr->ia_mode, 132 &dentry->d_name, NULL, &shim); 133 if (err) 134 return NULL; 135 136 label->lsmid = shim.id; 137 label->label = shim.context; 138 label->len = shim.len; 139 return label; 140 } 141 static inline void 142 nfs4_label_release_security(struct nfs4_label *label) 143 { 144 struct lsm_context shim; 145 146 if (label) { 147 shim.context = label->label; 148 shim.len = label->len; 149 shim.id = label->lsmid; 150 security_release_secctx(&shim); 151 } 152 } 153 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 154 { 155 if (label) 156 return server->attr_bitmask; 157 158 return server->attr_bitmask_nl; 159 } 160 #else 161 static inline struct nfs4_label * 162 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 163 struct iattr *sattr, struct nfs4_label *l) 164 { return NULL; } 165 static inline void 166 nfs4_label_release_security(struct nfs4_label *label) 167 { return; } 168 static inline u32 * 169 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 170 { return server->attr_bitmask; } 171 #endif 172 173 /* Prevent leaks of NFSv4 errors into userland */ 174 static int nfs4_map_errors(int err) 175 { 176 if (err >= -1000) 177 return err; 178 switch (err) { 179 case -NFS4ERR_RESOURCE: 180 case -NFS4ERR_LAYOUTTRYLATER: 181 case -NFS4ERR_RECALLCONFLICT: 182 case -NFS4ERR_RETURNCONFLICT: 183 return -EREMOTEIO; 184 case -NFS4ERR_WRONGSEC: 185 case -NFS4ERR_WRONG_CRED: 186 return -EPERM; 187 case -NFS4ERR_BADOWNER: 188 case -NFS4ERR_BADNAME: 189 return -EINVAL; 190 case -NFS4ERR_SHARE_DENIED: 191 return -EACCES; 192 case -NFS4ERR_MINOR_VERS_MISMATCH: 193 return -EPROTONOSUPPORT; 194 case -NFS4ERR_FILE_OPEN: 195 return -EBUSY; 196 case -NFS4ERR_NOT_SAME: 197 return -ENOTSYNC; 198 case -ENETDOWN: 199 case -ENETUNREACH: 200 break; 201 default: 202 dprintk("%s could not handle NFSv4 error %d\n", 203 __func__, -err); 204 break; 205 } 206 return -EIO; 207 } 208 209 /* 210 * This is our standard bitmap for GETATTR requests. 211 */ 212 const u32 nfs4_fattr_bitmap[3] = { 213 FATTR4_WORD0_TYPE 214 | FATTR4_WORD0_CHANGE 215 | FATTR4_WORD0_SIZE 216 | FATTR4_WORD0_FSID 217 | FATTR4_WORD0_FILEID, 218 FATTR4_WORD1_MODE 219 | FATTR4_WORD1_NUMLINKS 220 | FATTR4_WORD1_OWNER 221 | FATTR4_WORD1_OWNER_GROUP 222 | FATTR4_WORD1_RAWDEV 223 | FATTR4_WORD1_SPACE_USED 224 | FATTR4_WORD1_TIME_ACCESS 225 | FATTR4_WORD1_TIME_CREATE 226 | FATTR4_WORD1_TIME_METADATA 227 | FATTR4_WORD1_TIME_MODIFY 228 | FATTR4_WORD1_MOUNTED_ON_FILEID, 229 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 230 FATTR4_WORD2_SECURITY_LABEL 231 #endif 232 }; 233 234 static const u32 nfs4_pnfs_open_bitmap[3] = { 235 FATTR4_WORD0_TYPE 236 | FATTR4_WORD0_CHANGE 237 | FATTR4_WORD0_SIZE 238 | FATTR4_WORD0_FSID 239 | FATTR4_WORD0_FILEID, 240 FATTR4_WORD1_MODE 241 | FATTR4_WORD1_NUMLINKS 242 | FATTR4_WORD1_OWNER 243 | FATTR4_WORD1_OWNER_GROUP 244 | FATTR4_WORD1_RAWDEV 245 | FATTR4_WORD1_SPACE_USED 246 | FATTR4_WORD1_TIME_ACCESS 247 | FATTR4_WORD1_TIME_CREATE 248 | FATTR4_WORD1_TIME_METADATA 249 | FATTR4_WORD1_TIME_MODIFY, 250 FATTR4_WORD2_MDSTHRESHOLD 251 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 252 | FATTR4_WORD2_SECURITY_LABEL 253 #endif 254 }; 255 256 static const u32 nfs4_open_noattr_bitmap[3] = { 257 FATTR4_WORD0_TYPE 258 | FATTR4_WORD0_FILEID, 259 }; 260 261 const u32 nfs4_statfs_bitmap[3] = { 262 FATTR4_WORD0_FILES_AVAIL 263 | FATTR4_WORD0_FILES_FREE 264 | FATTR4_WORD0_FILES_TOTAL, 265 FATTR4_WORD1_SPACE_AVAIL 266 | FATTR4_WORD1_SPACE_FREE 267 | FATTR4_WORD1_SPACE_TOTAL 268 }; 269 270 const u32 nfs4_pathconf_bitmap[3] = { 271 FATTR4_WORD0_MAXLINK 272 | FATTR4_WORD0_MAXNAME, 273 0 274 }; 275 276 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 277 | FATTR4_WORD0_MAXREAD 278 | FATTR4_WORD0_MAXWRITE 279 | FATTR4_WORD0_LEASE_TIME, 280 FATTR4_WORD1_TIME_DELTA 281 | FATTR4_WORD1_FS_LAYOUT_TYPES, 282 FATTR4_WORD2_LAYOUT_BLKSIZE 283 | FATTR4_WORD2_CLONE_BLKSIZE 284 | FATTR4_WORD2_CHANGE_ATTR_TYPE 285 | FATTR4_WORD2_XATTR_SUPPORT 286 }; 287 288 const u32 nfs4_fs_locations_bitmap[3] = { 289 FATTR4_WORD0_CHANGE 290 | FATTR4_WORD0_SIZE 291 | FATTR4_WORD0_FSID 292 | FATTR4_WORD0_FILEID 293 | FATTR4_WORD0_FS_LOCATIONS, 294 FATTR4_WORD1_OWNER 295 | FATTR4_WORD1_OWNER_GROUP 296 | FATTR4_WORD1_RAWDEV 297 | FATTR4_WORD1_SPACE_USED 298 | FATTR4_WORD1_TIME_ACCESS 299 | FATTR4_WORD1_TIME_METADATA 300 | FATTR4_WORD1_TIME_MODIFY 301 | FATTR4_WORD1_MOUNTED_ON_FILEID, 302 }; 303 304 static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src, 305 struct inode *inode, unsigned long flags) 306 { 307 unsigned long cache_validity; 308 309 memcpy(dst, src, NFS4_BITMASK_SZ*sizeof(*dst)); 310 if (!inode || !nfs_have_read_or_write_delegation(inode)) 311 return; 312 313 cache_validity = READ_ONCE(NFS_I(inode)->cache_validity) | flags; 314 315 /* Remove the attributes over which we have full control */ 316 dst[1] &= ~FATTR4_WORD1_RAWDEV; 317 if (!(cache_validity & NFS_INO_INVALID_SIZE)) 318 dst[0] &= ~FATTR4_WORD0_SIZE; 319 320 if (!(cache_validity & NFS_INO_INVALID_CHANGE)) 321 dst[0] &= ~FATTR4_WORD0_CHANGE; 322 323 if (!(cache_validity & NFS_INO_INVALID_MODE)) 324 dst[1] &= ~FATTR4_WORD1_MODE; 325 if (!(cache_validity & NFS_INO_INVALID_OTHER)) 326 dst[1] &= ~(FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP); 327 328 if (!(cache_validity & NFS_INO_INVALID_BTIME)) 329 dst[1] &= ~FATTR4_WORD1_TIME_CREATE; 330 331 if (nfs_have_delegated_mtime(inode)) { 332 if (!(cache_validity & NFS_INO_INVALID_ATIME)) 333 dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET); 334 if (!(cache_validity & NFS_INO_INVALID_MTIME)) 335 dst[1] &= ~(FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET); 336 if (!(cache_validity & NFS_INO_INVALID_CTIME)) 337 dst[1] &= ~(FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY_SET); 338 } else if (nfs_have_delegated_atime(inode)) { 339 if (!(cache_validity & NFS_INO_INVALID_ATIME)) 340 dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET); 341 } 342 } 343 344 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 345 struct nfs4_readdir_arg *readdir) 346 { 347 unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE; 348 __be32 *start, *p; 349 350 if (cookie > 2) { 351 readdir->cookie = cookie; 352 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 353 return; 354 } 355 356 readdir->cookie = 0; 357 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 358 if (cookie == 2) 359 return; 360 361 /* 362 * NFSv4 servers do not return entries for '.' and '..' 363 * Therefore, we fake these entries here. We let '.' 364 * have cookie 0 and '..' have cookie 1. Note that 365 * when talking to the server, we always send cookie 0 366 * instead of 1 or 2. 367 */ 368 start = p = kmap_atomic(*readdir->pages); 369 370 if (cookie == 0) { 371 *p++ = xdr_one; /* next */ 372 *p++ = xdr_zero; /* cookie, first word */ 373 *p++ = xdr_one; /* cookie, second word */ 374 *p++ = xdr_one; /* entry len */ 375 memcpy(p, ".\0\0\0", 4); /* entry */ 376 p++; 377 *p++ = xdr_one; /* bitmap length */ 378 *p++ = htonl(attrs); /* bitmap */ 379 *p++ = htonl(12); /* attribute buffer length */ 380 *p++ = htonl(NF4DIR); 381 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry))); 382 } 383 384 *p++ = xdr_one; /* next */ 385 *p++ = xdr_zero; /* cookie, first word */ 386 *p++ = xdr_two; /* cookie, second word */ 387 *p++ = xdr_two; /* entry len */ 388 memcpy(p, "..\0\0", 4); /* entry */ 389 p++; 390 *p++ = xdr_one; /* bitmap length */ 391 *p++ = htonl(attrs); /* bitmap */ 392 *p++ = htonl(12); /* attribute buffer length */ 393 *p++ = htonl(NF4DIR); 394 spin_lock(&dentry->d_lock); 395 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent))); 396 spin_unlock(&dentry->d_lock); 397 398 readdir->pgbase = (char *)p - (char *)start; 399 readdir->count -= readdir->pgbase; 400 kunmap_atomic(start); 401 } 402 403 static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version) 404 { 405 if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) { 406 fattr->pre_change_attr = version; 407 fattr->valid |= NFS_ATTR_FATTR_PRECHANGE; 408 } 409 } 410 411 static void nfs4_test_and_free_stateid(struct nfs_server *server, 412 nfs4_stateid *stateid, 413 const struct cred *cred) 414 { 415 const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops; 416 417 ops->test_and_free_expired(server, stateid, cred); 418 } 419 420 static void __nfs4_free_revoked_stateid(struct nfs_server *server, 421 nfs4_stateid *stateid, 422 const struct cred *cred) 423 { 424 stateid->type = NFS4_REVOKED_STATEID_TYPE; 425 nfs4_test_and_free_stateid(server, stateid, cred); 426 } 427 428 static void nfs4_free_revoked_stateid(struct nfs_server *server, 429 const nfs4_stateid *stateid, 430 const struct cred *cred) 431 { 432 nfs4_stateid tmp; 433 434 nfs4_stateid_copy(&tmp, stateid); 435 __nfs4_free_revoked_stateid(server, &tmp, cred); 436 } 437 438 static long nfs4_update_delay(long *timeout) 439 { 440 long ret; 441 if (!timeout) 442 return NFS4_POLL_RETRY_MAX; 443 if (*timeout <= 0) 444 *timeout = NFS4_POLL_RETRY_MIN; 445 if (*timeout > NFS4_POLL_RETRY_MAX) 446 *timeout = NFS4_POLL_RETRY_MAX; 447 ret = *timeout; 448 *timeout <<= 1; 449 return ret; 450 } 451 452 static int nfs4_delay_killable(long *timeout) 453 { 454 might_sleep(); 455 456 if (unlikely(nfs_current_task_exiting())) 457 return -EINTR; 458 __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 459 schedule_timeout(nfs4_update_delay(timeout)); 460 if (!__fatal_signal_pending(current)) 461 return 0; 462 return -EINTR; 463 } 464 465 static int nfs4_delay_interruptible(long *timeout) 466 { 467 might_sleep(); 468 469 if (unlikely(nfs_current_task_exiting())) 470 return -EINTR; 471 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE); 472 schedule_timeout(nfs4_update_delay(timeout)); 473 if (!signal_pending(current)) 474 return 0; 475 return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS; 476 } 477 478 static int nfs4_delay(long *timeout, bool interruptible) 479 { 480 if (interruptible) 481 return nfs4_delay_interruptible(timeout); 482 return nfs4_delay_killable(timeout); 483 } 484 485 static const nfs4_stateid * 486 nfs4_recoverable_stateid(const nfs4_stateid *stateid) 487 { 488 if (!stateid) 489 return NULL; 490 switch (stateid->type) { 491 case NFS4_OPEN_STATEID_TYPE: 492 case NFS4_LOCK_STATEID_TYPE: 493 case NFS4_DELEGATION_STATEID_TYPE: 494 return stateid; 495 default: 496 break; 497 } 498 return NULL; 499 } 500 501 /* This is the error handling routine for processes that are allowed 502 * to sleep. 503 */ 504 static int nfs4_do_handle_exception(struct nfs_server *server, 505 int errorcode, struct nfs4_exception *exception) 506 { 507 struct nfs_client *clp = server->nfs_client; 508 struct nfs4_state *state = exception->state; 509 const nfs4_stateid *stateid; 510 struct inode *inode = exception->inode; 511 int ret = errorcode; 512 513 exception->delay = 0; 514 exception->recovering = 0; 515 exception->retry = 0; 516 517 stateid = nfs4_recoverable_stateid(exception->stateid); 518 if (stateid == NULL && state != NULL) 519 stateid = nfs4_recoverable_stateid(&state->stateid); 520 521 switch(errorcode) { 522 case 0: 523 return 0; 524 case -NFS4ERR_BADHANDLE: 525 case -ESTALE: 526 if (inode != NULL && S_ISREG(inode->i_mode)) 527 pnfs_destroy_layout(NFS_I(inode)); 528 break; 529 case -NFS4ERR_DELEG_REVOKED: 530 case -NFS4ERR_ADMIN_REVOKED: 531 case -NFS4ERR_EXPIRED: 532 case -NFS4ERR_BAD_STATEID: 533 case -NFS4ERR_PARTNER_NO_AUTH: 534 if (inode != NULL && stateid != NULL) { 535 nfs_inode_find_state_and_recover(inode, 536 stateid); 537 goto wait_on_recovery; 538 } 539 fallthrough; 540 case -NFS4ERR_OPENMODE: 541 if (inode) { 542 int err; 543 544 err = nfs_async_inode_return_delegation(inode, 545 stateid); 546 if (err == 0) 547 goto wait_on_recovery; 548 if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) { 549 exception->retry = 1; 550 break; 551 } 552 } 553 if (state == NULL) 554 break; 555 ret = nfs4_schedule_stateid_recovery(server, state); 556 if (ret < 0) 557 break; 558 goto wait_on_recovery; 559 case -NFS4ERR_STALE_STATEID: 560 case -NFS4ERR_STALE_CLIENTID: 561 nfs4_schedule_lease_recovery(clp); 562 goto wait_on_recovery; 563 case -NFS4ERR_MOVED: 564 ret = nfs4_schedule_migration_recovery(server); 565 if (ret < 0) 566 break; 567 goto wait_on_recovery; 568 case -NFS4ERR_LEASE_MOVED: 569 nfs4_schedule_lease_moved_recovery(clp); 570 goto wait_on_recovery; 571 #if defined(CONFIG_NFS_V4_1) 572 case -NFS4ERR_BADSESSION: 573 case -NFS4ERR_BADSLOT: 574 case -NFS4ERR_BAD_HIGH_SLOT: 575 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 576 case -NFS4ERR_DEADSESSION: 577 case -NFS4ERR_SEQ_FALSE_RETRY: 578 case -NFS4ERR_SEQ_MISORDERED: 579 /* Handled in nfs41_sequence_process() */ 580 goto wait_on_recovery; 581 #endif /* defined(CONFIG_NFS_V4_1) */ 582 case -NFS4ERR_FILE_OPEN: 583 if (exception->timeout > HZ) { 584 /* We have retried a decent amount, time to 585 * fail 586 */ 587 ret = -EBUSY; 588 break; 589 } 590 fallthrough; 591 case -NFS4ERR_DELAY: 592 nfs_inc_server_stats(server, NFSIOS_DELAY); 593 fallthrough; 594 case -NFS4ERR_GRACE: 595 case -NFS4ERR_LAYOUTTRYLATER: 596 case -NFS4ERR_RECALLCONFLICT: 597 case -NFS4ERR_RETURNCONFLICT: 598 exception->delay = 1; 599 return 0; 600 601 case -NFS4ERR_RETRY_UNCACHED_REP: 602 case -NFS4ERR_OLD_STATEID: 603 exception->retry = 1; 604 break; 605 case -NFS4ERR_BADOWNER: 606 /* The following works around a Linux server bug! */ 607 case -NFS4ERR_BADNAME: 608 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 609 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 610 exception->retry = 1; 611 printk(KERN_WARNING "NFS: v4 server %s " 612 "does not accept raw " 613 "uid/gids. " 614 "Reenabling the idmapper.\n", 615 server->nfs_client->cl_hostname); 616 } 617 } 618 /* We failed to handle the error */ 619 return nfs4_map_errors(ret); 620 wait_on_recovery: 621 exception->recovering = 1; 622 return 0; 623 } 624 625 /* 626 * Track the number of NFS4ERR_DELAY related retransmissions and return 627 * EAGAIN if the 'softerr' mount option is set, and we've exceeded the limit 628 * set by 'nfs_delay_retrans'. 629 */ 630 static int nfs4_exception_should_retrans(const struct nfs_server *server, 631 struct nfs4_exception *exception) 632 { 633 if (server->flags & NFS_MOUNT_SOFTERR && nfs_delay_retrans >= 0) { 634 if (exception->retrans++ >= (unsigned short)nfs_delay_retrans) 635 return -EAGAIN; 636 } 637 return 0; 638 } 639 640 /* This is the error handling routine for processes that are allowed 641 * to sleep. 642 */ 643 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 644 { 645 struct nfs_client *clp = server->nfs_client; 646 int ret; 647 648 ret = nfs4_do_handle_exception(server, errorcode, exception); 649 if (exception->delay) { 650 int ret2 = nfs4_exception_should_retrans(server, exception); 651 if (ret2 < 0) { 652 exception->retry = 0; 653 return ret2; 654 } 655 ret = nfs4_delay(&exception->timeout, 656 exception->interruptible); 657 goto out_retry; 658 } 659 if (exception->recovering) { 660 if (exception->task_is_privileged) 661 return -EDEADLOCK; 662 ret = nfs4_wait_clnt_recover(clp); 663 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 664 return -EIO; 665 goto out_retry; 666 } 667 return ret; 668 out_retry: 669 if (ret == 0) 670 exception->retry = 1; 671 return ret; 672 } 673 674 static int 675 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server, 676 int errorcode, struct nfs4_exception *exception) 677 { 678 struct nfs_client *clp = server->nfs_client; 679 int ret; 680 681 if ((task->tk_rpc_status == -ENETDOWN || 682 task->tk_rpc_status == -ENETUNREACH) && 683 task->tk_flags & RPC_TASK_NETUNREACH_FATAL) { 684 exception->delay = 0; 685 exception->recovering = 0; 686 exception->retry = 0; 687 return -EIO; 688 } 689 690 ret = nfs4_do_handle_exception(server, errorcode, exception); 691 if (exception->delay) { 692 int ret2 = nfs4_exception_should_retrans(server, exception); 693 if (ret2 < 0) { 694 exception->retry = 0; 695 return ret2; 696 } 697 rpc_delay(task, nfs4_update_delay(&exception->timeout)); 698 goto out_retry; 699 } 700 if (exception->recovering) { 701 if (exception->task_is_privileged) 702 return -EDEADLOCK; 703 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 704 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 705 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 706 goto out_retry; 707 } 708 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 709 ret = -EIO; 710 return ret; 711 out_retry: 712 if (ret == 0) { 713 exception->retry = 1; 714 /* 715 * For NFS4ERR_MOVED, the client transport will need to 716 * be recomputed after migration recovery has completed. 717 */ 718 if (errorcode == -NFS4ERR_MOVED) 719 rpc_task_release_transport(task); 720 } 721 return ret; 722 } 723 724 int 725 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server, 726 struct nfs4_state *state, long *timeout) 727 { 728 struct nfs4_exception exception = { 729 .state = state, 730 }; 731 732 if (task->tk_status >= 0) 733 return 0; 734 if (timeout) 735 exception.timeout = *timeout; 736 task->tk_status = nfs4_async_handle_exception(task, server, 737 task->tk_status, 738 &exception); 739 if (exception.delay && timeout) 740 *timeout = exception.timeout; 741 if (exception.retry) 742 return -EAGAIN; 743 return 0; 744 } 745 746 /* 747 * Return 'true' if 'clp' is using an rpc_client that is integrity protected 748 * or 'false' otherwise. 749 */ 750 static bool _nfs4_is_integrity_protected(struct nfs_client *clp) 751 { 752 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor; 753 return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P); 754 } 755 756 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 757 { 758 spin_lock(&clp->cl_lock); 759 if (time_before(clp->cl_last_renewal,timestamp)) 760 clp->cl_last_renewal = timestamp; 761 spin_unlock(&clp->cl_lock); 762 } 763 764 static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 765 { 766 struct nfs_client *clp = server->nfs_client; 767 768 if (!nfs4_has_session(clp)) 769 do_renew_lease(clp, timestamp); 770 } 771 772 struct nfs4_call_sync_data { 773 const struct nfs_server *seq_server; 774 struct nfs4_sequence_args *seq_args; 775 struct nfs4_sequence_res *seq_res; 776 }; 777 778 void nfs4_init_sequence(struct nfs4_sequence_args *args, 779 struct nfs4_sequence_res *res, int cache_reply, 780 int privileged) 781 { 782 args->sa_slot = NULL; 783 args->sa_cache_this = cache_reply; 784 args->sa_privileged = privileged; 785 786 res->sr_slot = NULL; 787 } 788 789 static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res) 790 { 791 struct nfs4_slot *slot = res->sr_slot; 792 struct nfs4_slot_table *tbl; 793 794 tbl = slot->table; 795 spin_lock(&tbl->slot_tbl_lock); 796 if (!nfs41_wake_and_assign_slot(tbl, slot)) 797 nfs4_free_slot(tbl, slot); 798 spin_unlock(&tbl->slot_tbl_lock); 799 800 res->sr_slot = NULL; 801 } 802 803 static int nfs40_sequence_done(struct rpc_task *task, 804 struct nfs4_sequence_res *res) 805 { 806 if (res->sr_slot != NULL) 807 nfs40_sequence_free_slot(res); 808 return 1; 809 } 810 811 #if defined(CONFIG_NFS_V4_1) 812 813 static void nfs41_release_slot(struct nfs4_slot *slot) 814 { 815 struct nfs4_session *session; 816 struct nfs4_slot_table *tbl; 817 bool send_new_highest_used_slotid = false; 818 819 if (!slot) 820 return; 821 tbl = slot->table; 822 session = tbl->session; 823 824 /* Bump the slot sequence number */ 825 if (slot->seq_done) 826 slot->seq_nr++; 827 slot->seq_done = 0; 828 829 spin_lock(&tbl->slot_tbl_lock); 830 /* Be nice to the server: try to ensure that the last transmitted 831 * value for highest_user_slotid <= target_highest_slotid 832 */ 833 if (tbl->highest_used_slotid > tbl->target_highest_slotid) 834 send_new_highest_used_slotid = true; 835 836 if (nfs41_wake_and_assign_slot(tbl, slot)) { 837 send_new_highest_used_slotid = false; 838 goto out_unlock; 839 } 840 nfs4_free_slot(tbl, slot); 841 842 if (tbl->highest_used_slotid != NFS4_NO_SLOT) 843 send_new_highest_used_slotid = false; 844 out_unlock: 845 spin_unlock(&tbl->slot_tbl_lock); 846 if (send_new_highest_used_slotid) 847 nfs41_notify_server(session->clp); 848 if (waitqueue_active(&tbl->slot_waitq)) 849 wake_up_all(&tbl->slot_waitq); 850 } 851 852 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 853 { 854 nfs41_release_slot(res->sr_slot); 855 res->sr_slot = NULL; 856 } 857 858 static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot, 859 u32 seqnr) 860 { 861 if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0) 862 slot->seq_nr_highest_sent = seqnr; 863 } 864 static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, u32 seqnr) 865 { 866 nfs4_slot_sequence_record_sent(slot, seqnr); 867 slot->seq_nr_last_acked = seqnr; 868 } 869 870 static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred, 871 struct nfs4_slot *slot) 872 { 873 struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true); 874 if (!IS_ERR(task)) 875 rpc_put_task_async(task); 876 } 877 878 static int nfs41_sequence_process(struct rpc_task *task, 879 struct nfs4_sequence_res *res) 880 { 881 struct nfs4_session *session; 882 struct nfs4_slot *slot = res->sr_slot; 883 struct nfs_client *clp; 884 int status; 885 int ret = 1; 886 887 if (slot == NULL) 888 goto out_noaction; 889 /* don't increment the sequence number if the task wasn't sent */ 890 if (!RPC_WAS_SENT(task) || slot->seq_done) 891 goto out; 892 893 session = slot->table->session; 894 clp = session->clp; 895 896 trace_nfs4_sequence_done(session, res); 897 898 status = res->sr_status; 899 if (task->tk_status == -NFS4ERR_DEADSESSION) 900 status = -NFS4ERR_DEADSESSION; 901 902 /* Check the SEQUENCE operation status */ 903 switch (status) { 904 case 0: 905 /* Mark this sequence number as having been acked */ 906 nfs4_slot_sequence_acked(slot, slot->seq_nr); 907 /* Update the slot's sequence and clientid lease timer */ 908 slot->seq_done = 1; 909 do_renew_lease(clp, res->sr_timestamp); 910 /* Check sequence flags */ 911 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags, 912 !!slot->privileged); 913 nfs41_update_target_slotid(slot->table, slot, res); 914 break; 915 case 1: 916 /* 917 * sr_status remains 1 if an RPC level error occurred. 918 * The server may or may not have processed the sequence 919 * operation.. 920 */ 921 nfs4_slot_sequence_record_sent(slot, slot->seq_nr); 922 slot->seq_done = 1; 923 goto out; 924 case -NFS4ERR_DELAY: 925 /* The server detected a resend of the RPC call and 926 * returned NFS4ERR_DELAY as per Section 2.10.6.2 927 * of RFC5661. 928 */ 929 dprintk("%s: slot=%u seq=%u: Operation in progress\n", 930 __func__, 931 slot->slot_nr, 932 slot->seq_nr); 933 goto out_retry; 934 case -NFS4ERR_RETRY_UNCACHED_REP: 935 case -NFS4ERR_SEQ_FALSE_RETRY: 936 /* 937 * The server thinks we tried to replay a request. 938 * Retry the call after bumping the sequence ID. 939 */ 940 nfs4_slot_sequence_acked(slot, slot->seq_nr); 941 goto retry_new_seq; 942 case -NFS4ERR_BADSLOT: 943 /* 944 * The slot id we used was probably retired. Try again 945 * using a different slot id. 946 */ 947 if (slot->slot_nr < slot->table->target_highest_slotid) 948 goto session_recover; 949 goto retry_nowait; 950 case -NFS4ERR_SEQ_MISORDERED: 951 nfs4_slot_sequence_record_sent(slot, slot->seq_nr); 952 /* 953 * Were one or more calls using this slot interrupted? 954 * If the server never received the request, then our 955 * transmitted slot sequence number may be too high. However, 956 * if the server did receive the request then it might 957 * accidentally give us a reply with a mismatched operation. 958 * We can sort this out by sending a lone sequence operation 959 * to the server on the same slot. 960 */ 961 if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) { 962 slot->seq_nr--; 963 if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) { 964 nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot); 965 res->sr_slot = NULL; 966 } 967 goto retry_nowait; 968 } 969 /* 970 * RFC5661: 971 * A retry might be sent while the original request is 972 * still in progress on the replier. The replier SHOULD 973 * deal with the issue by returning NFS4ERR_DELAY as the 974 * reply to SEQUENCE or CB_SEQUENCE operation, but 975 * implementations MAY return NFS4ERR_SEQ_MISORDERED. 976 * 977 * Restart the search after a delay. 978 */ 979 slot->seq_nr = slot->seq_nr_highest_sent; 980 goto out_retry; 981 case -NFS4ERR_BADSESSION: 982 case -NFS4ERR_DEADSESSION: 983 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 984 goto session_recover; 985 default: 986 /* Just update the slot sequence no. */ 987 slot->seq_done = 1; 988 } 989 out: 990 /* The session may be reset by one of the error handlers. */ 991 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 992 out_noaction: 993 return ret; 994 session_recover: 995 set_bit(NFS4_SLOT_TBL_DRAINING, &session->fc_slot_table.slot_tbl_state); 996 nfs4_schedule_session_recovery(session, status); 997 dprintk("%s ERROR: %d Reset session\n", __func__, status); 998 nfs41_sequence_free_slot(res); 999 goto out; 1000 retry_new_seq: 1001 ++slot->seq_nr; 1002 retry_nowait: 1003 if (rpc_restart_call_prepare(task)) { 1004 nfs41_sequence_free_slot(res); 1005 task->tk_status = 0; 1006 ret = 0; 1007 } 1008 goto out; 1009 out_retry: 1010 if (!rpc_restart_call(task)) 1011 goto out; 1012 rpc_delay(task, NFS4_POLL_RETRY_MAX); 1013 return 0; 1014 } 1015 1016 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 1017 { 1018 if (!nfs41_sequence_process(task, res)) 1019 return 0; 1020 if (res->sr_slot != NULL) 1021 nfs41_sequence_free_slot(res); 1022 return 1; 1023 1024 } 1025 EXPORT_SYMBOL_GPL(nfs41_sequence_done); 1026 1027 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) 1028 { 1029 if (res->sr_slot == NULL) 1030 return 1; 1031 if (res->sr_slot->table->session != NULL) 1032 return nfs41_sequence_process(task, res); 1033 return nfs40_sequence_done(task, res); 1034 } 1035 1036 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res) 1037 { 1038 if (res->sr_slot != NULL) { 1039 if (res->sr_slot->table->session != NULL) 1040 nfs41_sequence_free_slot(res); 1041 else 1042 nfs40_sequence_free_slot(res); 1043 } 1044 } 1045 1046 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 1047 { 1048 if (res->sr_slot == NULL) 1049 return 1; 1050 if (!res->sr_slot->table->session) 1051 return nfs40_sequence_done(task, res); 1052 return nfs41_sequence_done(task, res); 1053 } 1054 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 1055 1056 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 1057 { 1058 struct nfs4_call_sync_data *data = calldata; 1059 1060 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 1061 1062 nfs4_setup_sequence(data->seq_server->nfs_client, 1063 data->seq_args, data->seq_res, task); 1064 } 1065 1066 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 1067 { 1068 struct nfs4_call_sync_data *data = calldata; 1069 1070 nfs41_sequence_done(task, data->seq_res); 1071 } 1072 1073 static const struct rpc_call_ops nfs41_call_sync_ops = { 1074 .rpc_call_prepare = nfs41_call_sync_prepare, 1075 .rpc_call_done = nfs41_call_sync_done, 1076 }; 1077 1078 #else /* !CONFIG_NFS_V4_1 */ 1079 1080 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) 1081 { 1082 return nfs40_sequence_done(task, res); 1083 } 1084 1085 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res) 1086 { 1087 if (res->sr_slot != NULL) 1088 nfs40_sequence_free_slot(res); 1089 } 1090 1091 int nfs4_sequence_done(struct rpc_task *task, 1092 struct nfs4_sequence_res *res) 1093 { 1094 return nfs40_sequence_done(task, res); 1095 } 1096 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 1097 1098 #endif /* !CONFIG_NFS_V4_1 */ 1099 1100 static void nfs41_sequence_res_init(struct nfs4_sequence_res *res) 1101 { 1102 res->sr_timestamp = jiffies; 1103 res->sr_status_flags = 0; 1104 res->sr_status = 1; 1105 } 1106 1107 static 1108 void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args, 1109 struct nfs4_sequence_res *res, 1110 struct nfs4_slot *slot) 1111 { 1112 if (!slot) 1113 return; 1114 slot->privileged = args->sa_privileged ? 1 : 0; 1115 args->sa_slot = slot; 1116 1117 res->sr_slot = slot; 1118 } 1119 1120 int nfs4_setup_sequence(struct nfs_client *client, 1121 struct nfs4_sequence_args *args, 1122 struct nfs4_sequence_res *res, 1123 struct rpc_task *task) 1124 { 1125 struct nfs4_session *session = nfs4_get_session(client); 1126 struct nfs4_slot_table *tbl = client->cl_slot_tbl; 1127 struct nfs4_slot *slot; 1128 1129 /* slot already allocated? */ 1130 if (res->sr_slot != NULL) 1131 goto out_start; 1132 1133 if (session) 1134 tbl = &session->fc_slot_table; 1135 1136 spin_lock(&tbl->slot_tbl_lock); 1137 /* The state manager will wait until the slot table is empty */ 1138 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) 1139 goto out_sleep; 1140 1141 slot = nfs4_alloc_slot(tbl); 1142 if (IS_ERR(slot)) { 1143 if (slot == ERR_PTR(-ENOMEM)) 1144 goto out_sleep_timeout; 1145 goto out_sleep; 1146 } 1147 spin_unlock(&tbl->slot_tbl_lock); 1148 1149 nfs4_sequence_attach_slot(args, res, slot); 1150 1151 trace_nfs4_setup_sequence(session, args); 1152 out_start: 1153 nfs41_sequence_res_init(res); 1154 rpc_call_start(task); 1155 return 0; 1156 out_sleep_timeout: 1157 /* Try again in 1/4 second */ 1158 if (args->sa_privileged) 1159 rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task, 1160 jiffies + (HZ >> 2), RPC_PRIORITY_PRIVILEGED); 1161 else 1162 rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task, 1163 NULL, jiffies + (HZ >> 2)); 1164 spin_unlock(&tbl->slot_tbl_lock); 1165 return -EAGAIN; 1166 out_sleep: 1167 if (args->sa_privileged) 1168 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 1169 RPC_PRIORITY_PRIVILEGED); 1170 else 1171 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 1172 spin_unlock(&tbl->slot_tbl_lock); 1173 return -EAGAIN; 1174 } 1175 EXPORT_SYMBOL_GPL(nfs4_setup_sequence); 1176 1177 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata) 1178 { 1179 struct nfs4_call_sync_data *data = calldata; 1180 nfs4_setup_sequence(data->seq_server->nfs_client, 1181 data->seq_args, data->seq_res, task); 1182 } 1183 1184 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata) 1185 { 1186 struct nfs4_call_sync_data *data = calldata; 1187 nfs4_sequence_done(task, data->seq_res); 1188 } 1189 1190 static const struct rpc_call_ops nfs40_call_sync_ops = { 1191 .rpc_call_prepare = nfs40_call_sync_prepare, 1192 .rpc_call_done = nfs40_call_sync_done, 1193 }; 1194 1195 static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup) 1196 { 1197 int ret; 1198 struct rpc_task *task; 1199 1200 task = rpc_run_task(task_setup); 1201 if (IS_ERR(task)) 1202 return PTR_ERR(task); 1203 1204 ret = task->tk_status; 1205 rpc_put_task(task); 1206 return ret; 1207 } 1208 1209 static int nfs4_do_call_sync(struct rpc_clnt *clnt, 1210 struct nfs_server *server, 1211 struct rpc_message *msg, 1212 struct nfs4_sequence_args *args, 1213 struct nfs4_sequence_res *res, 1214 unsigned short task_flags) 1215 { 1216 struct nfs_client *clp = server->nfs_client; 1217 struct nfs4_call_sync_data data = { 1218 .seq_server = server, 1219 .seq_args = args, 1220 .seq_res = res, 1221 }; 1222 struct rpc_task_setup task_setup = { 1223 .rpc_client = clnt, 1224 .rpc_message = msg, 1225 .callback_ops = clp->cl_mvops->call_sync_ops, 1226 .callback_data = &data, 1227 .flags = task_flags, 1228 }; 1229 1230 return nfs4_call_sync_custom(&task_setup); 1231 } 1232 1233 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 1234 struct nfs_server *server, 1235 struct rpc_message *msg, 1236 struct nfs4_sequence_args *args, 1237 struct nfs4_sequence_res *res) 1238 { 1239 unsigned short task_flags = 0; 1240 1241 if (server->caps & NFS_CAP_MOVEABLE) 1242 task_flags = RPC_TASK_MOVEABLE; 1243 return nfs4_do_call_sync(clnt, server, msg, args, res, task_flags); 1244 } 1245 1246 1247 int nfs4_call_sync(struct rpc_clnt *clnt, 1248 struct nfs_server *server, 1249 struct rpc_message *msg, 1250 struct nfs4_sequence_args *args, 1251 struct nfs4_sequence_res *res, 1252 int cache_reply) 1253 { 1254 nfs4_init_sequence(args, res, cache_reply, 0); 1255 return nfs4_call_sync_sequence(clnt, server, msg, args, res); 1256 } 1257 1258 static void 1259 nfs4_inc_nlink_locked(struct inode *inode) 1260 { 1261 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 1262 NFS_INO_INVALID_CTIME | 1263 NFS_INO_INVALID_NLINK); 1264 inc_nlink(inode); 1265 } 1266 1267 static void 1268 nfs4_inc_nlink(struct inode *inode) 1269 { 1270 spin_lock(&inode->i_lock); 1271 nfs4_inc_nlink_locked(inode); 1272 spin_unlock(&inode->i_lock); 1273 } 1274 1275 static void 1276 nfs4_dec_nlink_locked(struct inode *inode) 1277 { 1278 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 1279 NFS_INO_INVALID_CTIME | 1280 NFS_INO_INVALID_NLINK); 1281 drop_nlink(inode); 1282 } 1283 1284 static void 1285 nfs4_update_changeattr_locked(struct inode *inode, 1286 struct nfs4_change_info *cinfo, 1287 unsigned long timestamp, unsigned long cache_validity) 1288 { 1289 struct nfs_inode *nfsi = NFS_I(inode); 1290 u64 change_attr = inode_peek_iversion_raw(inode); 1291 1292 if (!nfs_have_delegated_mtime(inode)) 1293 cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME; 1294 if (S_ISDIR(inode->i_mode)) 1295 cache_validity |= NFS_INO_INVALID_DATA; 1296 1297 switch (NFS_SERVER(inode)->change_attr_type) { 1298 case NFS4_CHANGE_TYPE_IS_UNDEFINED: 1299 if (cinfo->after == change_attr) 1300 goto out; 1301 break; 1302 default: 1303 if ((s64)(change_attr - cinfo->after) >= 0) 1304 goto out; 1305 } 1306 1307 inode_set_iversion_raw(inode, cinfo->after); 1308 if (!cinfo->atomic || cinfo->before != change_attr) { 1309 if (S_ISDIR(inode->i_mode)) 1310 nfs_force_lookup_revalidate(inode); 1311 1312 if (!nfs_have_delegated_attributes(inode)) 1313 cache_validity |= 1314 NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL | 1315 NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER | 1316 NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK | 1317 NFS_INO_INVALID_MODE | NFS_INO_INVALID_BTIME | 1318 NFS_INO_INVALID_XATTR; 1319 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); 1320 } 1321 nfsi->attrtimeo_timestamp = jiffies; 1322 nfsi->read_cache_jiffies = timestamp; 1323 nfsi->attr_gencount = nfs_inc_attr_generation_counter(); 1324 nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE; 1325 out: 1326 nfs_set_cache_invalid(inode, cache_validity); 1327 } 1328 1329 void 1330 nfs4_update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo, 1331 unsigned long timestamp, unsigned long cache_validity) 1332 { 1333 spin_lock(&dir->i_lock); 1334 nfs4_update_changeattr_locked(dir, cinfo, timestamp, cache_validity); 1335 spin_unlock(&dir->i_lock); 1336 } 1337 1338 struct nfs4_open_createattrs { 1339 struct nfs4_label *label; 1340 struct iattr *sattr; 1341 const __u32 verf[2]; 1342 }; 1343 1344 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server, 1345 int err, struct nfs4_exception *exception) 1346 { 1347 if (err != -EINVAL) 1348 return false; 1349 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1350 return false; 1351 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1; 1352 exception->retry = 1; 1353 return true; 1354 } 1355 1356 static fmode_t _nfs4_ctx_to_accessmode(const struct nfs_open_context *ctx) 1357 { 1358 return ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC); 1359 } 1360 1361 static fmode_t _nfs4_ctx_to_openmode(const struct nfs_open_context *ctx) 1362 { 1363 fmode_t ret = ctx->mode & (FMODE_READ|FMODE_WRITE); 1364 1365 return (ctx->mode & FMODE_EXEC) ? FMODE_READ | ret : ret; 1366 } 1367 1368 static u32 1369 nfs4_fmode_to_share_access(fmode_t fmode) 1370 { 1371 u32 res = 0; 1372 1373 switch (fmode & (FMODE_READ | FMODE_WRITE)) { 1374 case FMODE_READ: 1375 res = NFS4_SHARE_ACCESS_READ; 1376 break; 1377 case FMODE_WRITE: 1378 res = NFS4_SHARE_ACCESS_WRITE; 1379 break; 1380 case FMODE_READ|FMODE_WRITE: 1381 res = NFS4_SHARE_ACCESS_BOTH; 1382 } 1383 return res; 1384 } 1385 1386 static u32 1387 nfs4_map_atomic_open_share(struct nfs_server *server, 1388 fmode_t fmode, int openflags) 1389 { 1390 u32 res = nfs4_fmode_to_share_access(fmode); 1391 1392 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1393 goto out; 1394 /* Want no delegation if we're using O_DIRECT */ 1395 if (openflags & O_DIRECT) { 1396 res |= NFS4_SHARE_WANT_NO_DELEG; 1397 goto out; 1398 } 1399 /* res |= NFS4_SHARE_WANT_NO_PREFERENCE; */ 1400 if (server->caps & NFS_CAP_DELEGTIME) 1401 res |= NFS4_SHARE_WANT_DELEG_TIMESTAMPS; 1402 if (server->caps & NFS_CAP_OPEN_XOR) 1403 res |= NFS4_SHARE_WANT_OPEN_XOR_DELEGATION; 1404 out: 1405 return res; 1406 } 1407 1408 static enum open_claim_type4 1409 nfs4_map_atomic_open_claim(struct nfs_server *server, 1410 enum open_claim_type4 claim) 1411 { 1412 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1) 1413 return claim; 1414 switch (claim) { 1415 default: 1416 return claim; 1417 case NFS4_OPEN_CLAIM_FH: 1418 return NFS4_OPEN_CLAIM_NULL; 1419 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1420 return NFS4_OPEN_CLAIM_DELEGATE_CUR; 1421 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1422 return NFS4_OPEN_CLAIM_DELEGATE_PREV; 1423 } 1424 } 1425 1426 static void nfs4_init_opendata_res(struct nfs4_opendata *p) 1427 { 1428 p->o_res.f_attr = &p->f_attr; 1429 p->o_res.seqid = p->o_arg.seqid; 1430 p->c_res.seqid = p->c_arg.seqid; 1431 p->o_res.server = p->o_arg.server; 1432 p->o_res.access_request = p->o_arg.access; 1433 nfs_fattr_init(&p->f_attr); 1434 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 1435 } 1436 1437 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 1438 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 1439 const struct nfs4_open_createattrs *c, 1440 enum open_claim_type4 claim, 1441 gfp_t gfp_mask) 1442 { 1443 struct dentry *parent = dget_parent(dentry); 1444 struct inode *dir = d_inode(parent); 1445 struct nfs_server *server = NFS_SERVER(dir); 1446 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 1447 struct nfs4_label *label = (c != NULL) ? c->label : NULL; 1448 struct nfs4_opendata *p; 1449 1450 p = kzalloc(sizeof(*p), gfp_mask); 1451 if (p == NULL) 1452 goto err; 1453 1454 p->f_attr.label = nfs4_label_alloc(server, gfp_mask); 1455 if (IS_ERR(p->f_attr.label)) 1456 goto err_free_p; 1457 1458 p->a_label = nfs4_label_alloc(server, gfp_mask); 1459 if (IS_ERR(p->a_label)) 1460 goto err_free_f; 1461 1462 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 1463 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); 1464 if (IS_ERR(p->o_arg.seqid)) 1465 goto err_free_label; 1466 nfs_sb_active(dentry->d_sb); 1467 p->dentry = dget(dentry); 1468 p->dir = parent; 1469 p->owner = sp; 1470 atomic_inc(&sp->so_count); 1471 p->o_arg.open_flags = flags; 1472 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 1473 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 1474 p->o_arg.share_access = nfs4_map_atomic_open_share(server, 1475 fmode, flags); 1476 if (flags & O_CREAT) { 1477 p->o_arg.umask = current_umask(); 1478 p->o_arg.label = nfs4_label_copy(p->a_label, label); 1479 if (c->sattr != NULL && c->sattr->ia_valid != 0) { 1480 p->o_arg.u.attrs = &p->attrs; 1481 memcpy(&p->attrs, c->sattr, sizeof(p->attrs)); 1482 1483 memcpy(p->o_arg.u.verifier.data, c->verf, 1484 sizeof(p->o_arg.u.verifier.data)); 1485 } 1486 } 1487 /* ask server to check for all possible rights as results 1488 * are cached */ 1489 switch (p->o_arg.claim) { 1490 default: 1491 break; 1492 case NFS4_OPEN_CLAIM_NULL: 1493 case NFS4_OPEN_CLAIM_FH: 1494 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY | 1495 NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE | 1496 NFS4_ACCESS_EXECUTE | 1497 nfs_access_xattr_mask(server); 1498 } 1499 p->o_arg.clientid = server->nfs_client->cl_clientid; 1500 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 1501 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 1502 p->o_arg.name = &dentry->d_name; 1503 p->o_arg.server = server; 1504 p->o_arg.bitmask = nfs4_bitmask(server, label); 1505 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 1506 switch (p->o_arg.claim) { 1507 case NFS4_OPEN_CLAIM_NULL: 1508 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1509 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 1510 p->o_arg.fh = NFS_FH(dir); 1511 break; 1512 case NFS4_OPEN_CLAIM_PREVIOUS: 1513 case NFS4_OPEN_CLAIM_FH: 1514 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1515 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1516 p->o_arg.fh = NFS_FH(d_inode(dentry)); 1517 } 1518 p->c_arg.fh = &p->o_res.fh; 1519 p->c_arg.stateid = &p->o_res.stateid; 1520 p->c_arg.seqid = p->o_arg.seqid; 1521 nfs4_init_opendata_res(p); 1522 kref_init(&p->kref); 1523 return p; 1524 1525 err_free_label: 1526 nfs4_label_free(p->a_label); 1527 err_free_f: 1528 nfs4_label_free(p->f_attr.label); 1529 err_free_p: 1530 kfree(p); 1531 err: 1532 dput(parent); 1533 return NULL; 1534 } 1535 1536 static void nfs4_opendata_free(struct kref *kref) 1537 { 1538 struct nfs4_opendata *p = container_of(kref, 1539 struct nfs4_opendata, kref); 1540 struct super_block *sb = p->dentry->d_sb; 1541 1542 nfs4_lgopen_release(p->lgp); 1543 nfs_free_seqid(p->o_arg.seqid); 1544 nfs4_sequence_free_slot(&p->o_res.seq_res); 1545 if (p->state != NULL) 1546 nfs4_put_open_state(p->state); 1547 nfs4_put_state_owner(p->owner); 1548 1549 nfs4_label_free(p->a_label); 1550 nfs4_label_free(p->f_attr.label); 1551 1552 dput(p->dir); 1553 dput(p->dentry); 1554 nfs_sb_deactive(sb); 1555 nfs_fattr_free_names(&p->f_attr); 1556 kfree(p->f_attr.mdsthreshold); 1557 kfree(p); 1558 } 1559 1560 static void nfs4_opendata_put(struct nfs4_opendata *p) 1561 { 1562 if (p != NULL) 1563 kref_put(&p->kref, nfs4_opendata_free); 1564 } 1565 1566 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state, 1567 fmode_t fmode) 1568 { 1569 switch(fmode & (FMODE_READ|FMODE_WRITE)) { 1570 case FMODE_READ|FMODE_WRITE: 1571 return state->n_rdwr != 0; 1572 case FMODE_WRITE: 1573 return state->n_wronly != 0; 1574 case FMODE_READ: 1575 return state->n_rdonly != 0; 1576 } 1577 WARN_ON_ONCE(1); 1578 return false; 1579 } 1580 1581 static int can_open_cached(struct nfs4_state *state, fmode_t mode, 1582 int open_mode, enum open_claim_type4 claim) 1583 { 1584 int ret = 0; 1585 1586 if (open_mode & (O_EXCL|O_TRUNC)) 1587 goto out; 1588 switch (claim) { 1589 case NFS4_OPEN_CLAIM_NULL: 1590 case NFS4_OPEN_CLAIM_FH: 1591 goto out; 1592 default: 1593 break; 1594 } 1595 switch (mode & (FMODE_READ|FMODE_WRITE)) { 1596 case FMODE_READ: 1597 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 1598 && state->n_rdonly != 0; 1599 break; 1600 case FMODE_WRITE: 1601 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 1602 && state->n_wronly != 0; 1603 break; 1604 case FMODE_READ|FMODE_WRITE: 1605 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 1606 && state->n_rdwr != 0; 1607 } 1608 out: 1609 return ret; 1610 } 1611 1612 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode, 1613 enum open_claim_type4 claim) 1614 { 1615 if (delegation == NULL) 1616 return 0; 1617 if ((delegation->type & fmode) != fmode) 1618 return 0; 1619 switch (claim) { 1620 case NFS4_OPEN_CLAIM_NULL: 1621 case NFS4_OPEN_CLAIM_FH: 1622 break; 1623 case NFS4_OPEN_CLAIM_PREVIOUS: 1624 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 1625 break; 1626 fallthrough; 1627 default: 1628 return 0; 1629 } 1630 nfs_mark_delegation_referenced(delegation); 1631 return 1; 1632 } 1633 1634 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 1635 { 1636 switch (fmode) { 1637 case FMODE_WRITE: 1638 state->n_wronly++; 1639 break; 1640 case FMODE_READ: 1641 state->n_rdonly++; 1642 break; 1643 case FMODE_READ|FMODE_WRITE: 1644 state->n_rdwr++; 1645 } 1646 nfs4_state_set_mode_locked(state, state->state | fmode); 1647 } 1648 1649 #ifdef CONFIG_NFS_V4_1 1650 static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state) 1651 { 1652 if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags)) 1653 return true; 1654 if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags)) 1655 return true; 1656 if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags)) 1657 return true; 1658 return false; 1659 } 1660 #endif /* CONFIG_NFS_V4_1 */ 1661 1662 static void nfs_state_log_update_open_stateid(struct nfs4_state *state) 1663 { 1664 if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags)) 1665 wake_up_all(&state->waitq); 1666 } 1667 1668 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) 1669 { 1670 struct nfs_client *clp = state->owner->so_server->nfs_client; 1671 bool need_recover = false; 1672 1673 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly) 1674 need_recover = true; 1675 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly) 1676 need_recover = true; 1677 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr) 1678 need_recover = true; 1679 if (need_recover) 1680 nfs4_state_mark_reclaim_nograce(clp, state); 1681 } 1682 1683 /* 1684 * Check for whether or not the caller may update the open stateid 1685 * to the value passed in by stateid. 1686 * 1687 * Note: This function relies heavily on the server implementing 1688 * RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2 1689 * correctly. 1690 * i.e. The stateid seqids have to be initialised to 1, and 1691 * are then incremented on every state transition. 1692 */ 1693 static bool nfs_stateid_is_sequential(struct nfs4_state *state, 1694 const nfs4_stateid *stateid) 1695 { 1696 if (test_bit(NFS_OPEN_STATE, &state->flags)) { 1697 /* The common case - we're updating to a new sequence number */ 1698 if (nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1699 if (nfs4_stateid_is_next(&state->open_stateid, stateid)) 1700 return true; 1701 return false; 1702 } 1703 /* The server returned a new stateid */ 1704 } 1705 /* This is the first OPEN in this generation */ 1706 if (stateid->seqid == cpu_to_be32(1)) 1707 return true; 1708 return false; 1709 } 1710 1711 static void nfs_resync_open_stateid_locked(struct nfs4_state *state) 1712 { 1713 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr)) 1714 return; 1715 if (state->n_wronly) 1716 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1717 if (state->n_rdonly) 1718 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1719 if (state->n_rdwr) 1720 set_bit(NFS_O_RDWR_STATE, &state->flags); 1721 set_bit(NFS_OPEN_STATE, &state->flags); 1722 } 1723 1724 static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1725 nfs4_stateid *stateid, fmode_t fmode) 1726 { 1727 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1728 switch (fmode & (FMODE_READ|FMODE_WRITE)) { 1729 case FMODE_WRITE: 1730 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1731 break; 1732 case FMODE_READ: 1733 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1734 break; 1735 case 0: 1736 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1737 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1738 clear_bit(NFS_OPEN_STATE, &state->flags); 1739 } 1740 if (stateid == NULL) 1741 return; 1742 /* Handle OPEN+OPEN_DOWNGRADE races */ 1743 if (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1744 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { 1745 nfs_resync_open_stateid_locked(state); 1746 goto out; 1747 } 1748 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1749 nfs4_stateid_copy(&state->stateid, stateid); 1750 nfs4_stateid_copy(&state->open_stateid, stateid); 1751 trace_nfs4_open_stateid_update(state->inode, stateid, 0); 1752 out: 1753 nfs_state_log_update_open_stateid(state); 1754 } 1755 1756 static void nfs_clear_open_stateid(struct nfs4_state *state, 1757 nfs4_stateid *arg_stateid, 1758 nfs4_stateid *stateid, fmode_t fmode) 1759 { 1760 write_seqlock(&state->seqlock); 1761 /* Ignore, if the CLOSE argment doesn't match the current stateid */ 1762 if (nfs4_state_match_open_stateid_other(state, arg_stateid)) 1763 nfs_clear_open_stateid_locked(state, stateid, fmode); 1764 write_sequnlock(&state->seqlock); 1765 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1766 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1767 } 1768 1769 static void nfs_set_open_stateid_locked(struct nfs4_state *state, 1770 const nfs4_stateid *stateid, nfs4_stateid *freeme) 1771 __must_hold(&state->owner->so_lock) 1772 __must_hold(&state->seqlock) 1773 __must_hold(RCU) 1774 1775 { 1776 DEFINE_WAIT(wait); 1777 int status = 0; 1778 for (;;) { 1779 1780 if (nfs_stateid_is_sequential(state, stateid)) 1781 break; 1782 1783 if (status) { 1784 if (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1785 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { 1786 trace_nfs4_open_stateid_update_skip(state->inode, 1787 stateid, status); 1788 return; 1789 } else { 1790 break; 1791 } 1792 } 1793 1794 /* Rely on seqids for serialisation with NFSv4.0 */ 1795 if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client)) 1796 break; 1797 1798 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); 1799 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); 1800 /* 1801 * Ensure we process the state changes in the same order 1802 * in which the server processed them by delaying the 1803 * update of the stateid until we are in sequence. 1804 */ 1805 write_sequnlock(&state->seqlock); 1806 spin_unlock(&state->owner->so_lock); 1807 rcu_read_unlock(); 1808 trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0); 1809 1810 if (!fatal_signal_pending(current) && 1811 !nfs_current_task_exiting()) { 1812 if (schedule_timeout(5*HZ) == 0) 1813 status = -EAGAIN; 1814 else 1815 status = 0; 1816 } else 1817 status = -EINTR; 1818 finish_wait(&state->waitq, &wait); 1819 rcu_read_lock(); 1820 spin_lock(&state->owner->so_lock); 1821 write_seqlock(&state->seqlock); 1822 } 1823 1824 if (test_bit(NFS_OPEN_STATE, &state->flags) && 1825 !nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1826 nfs4_stateid_copy(freeme, &state->open_stateid); 1827 nfs_test_and_clear_all_open_stateid(state); 1828 } 1829 1830 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1831 nfs4_stateid_copy(&state->stateid, stateid); 1832 nfs4_stateid_copy(&state->open_stateid, stateid); 1833 trace_nfs4_open_stateid_update(state->inode, stateid, status); 1834 nfs_state_log_update_open_stateid(state); 1835 } 1836 1837 static void nfs_state_set_open_stateid(struct nfs4_state *state, 1838 const nfs4_stateid *open_stateid, 1839 fmode_t fmode, 1840 nfs4_stateid *freeme) 1841 { 1842 /* 1843 * Protect the call to nfs4_state_set_mode_locked and 1844 * serialise the stateid update 1845 */ 1846 write_seqlock(&state->seqlock); 1847 nfs_set_open_stateid_locked(state, open_stateid, freeme); 1848 switch (fmode) { 1849 case FMODE_READ: 1850 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1851 break; 1852 case FMODE_WRITE: 1853 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1854 break; 1855 case FMODE_READ|FMODE_WRITE: 1856 set_bit(NFS_O_RDWR_STATE, &state->flags); 1857 } 1858 set_bit(NFS_OPEN_STATE, &state->flags); 1859 write_sequnlock(&state->seqlock); 1860 } 1861 1862 static void nfs_state_clear_open_state_flags(struct nfs4_state *state) 1863 { 1864 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1865 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1866 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1867 clear_bit(NFS_OPEN_STATE, &state->flags); 1868 } 1869 1870 static void nfs_state_set_delegation(struct nfs4_state *state, 1871 const nfs4_stateid *deleg_stateid, 1872 fmode_t fmode) 1873 { 1874 /* 1875 * Protect the call to nfs4_state_set_mode_locked and 1876 * serialise the stateid update 1877 */ 1878 write_seqlock(&state->seqlock); 1879 nfs4_stateid_copy(&state->stateid, deleg_stateid); 1880 set_bit(NFS_DELEGATED_STATE, &state->flags); 1881 write_sequnlock(&state->seqlock); 1882 } 1883 1884 static void nfs_state_clear_delegation(struct nfs4_state *state) 1885 { 1886 write_seqlock(&state->seqlock); 1887 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1888 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1889 write_sequnlock(&state->seqlock); 1890 } 1891 1892 int update_open_stateid(struct nfs4_state *state, 1893 const nfs4_stateid *open_stateid, 1894 const nfs4_stateid *delegation, 1895 fmode_t fmode) 1896 { 1897 struct nfs_server *server = NFS_SERVER(state->inode); 1898 struct nfs_client *clp = server->nfs_client; 1899 struct nfs_inode *nfsi = NFS_I(state->inode); 1900 struct nfs_delegation *deleg_cur; 1901 nfs4_stateid freeme = { }; 1902 int ret = 0; 1903 1904 fmode &= (FMODE_READ|FMODE_WRITE); 1905 1906 rcu_read_lock(); 1907 spin_lock(&state->owner->so_lock); 1908 if (open_stateid != NULL) { 1909 nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme); 1910 ret = 1; 1911 } 1912 1913 deleg_cur = nfs4_get_valid_delegation(state->inode); 1914 if (deleg_cur == NULL) 1915 goto no_delegation; 1916 1917 spin_lock(&deleg_cur->lock); 1918 if (rcu_dereference(nfsi->delegation) != deleg_cur || 1919 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) || 1920 (deleg_cur->type & fmode) != fmode) 1921 goto no_delegation_unlock; 1922 1923 if (delegation == NULL) 1924 delegation = &deleg_cur->stateid; 1925 else if (!nfs4_stateid_match_other(&deleg_cur->stateid, delegation)) 1926 goto no_delegation_unlock; 1927 1928 nfs_mark_delegation_referenced(deleg_cur); 1929 nfs_state_set_delegation(state, &deleg_cur->stateid, fmode); 1930 ret = 1; 1931 no_delegation_unlock: 1932 spin_unlock(&deleg_cur->lock); 1933 no_delegation: 1934 if (ret) 1935 update_open_stateflags(state, fmode); 1936 spin_unlock(&state->owner->so_lock); 1937 rcu_read_unlock(); 1938 1939 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1940 nfs4_schedule_state_manager(clp); 1941 if (freeme.type != 0) 1942 nfs4_test_and_free_stateid(server, &freeme, 1943 state->owner->so_cred); 1944 1945 return ret; 1946 } 1947 1948 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp, 1949 const nfs4_stateid *stateid) 1950 { 1951 struct nfs4_state *state = lsp->ls_state; 1952 bool ret = false; 1953 1954 spin_lock(&state->state_lock); 1955 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid)) 1956 goto out_noupdate; 1957 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid)) 1958 goto out_noupdate; 1959 nfs4_stateid_copy(&lsp->ls_stateid, stateid); 1960 ret = true; 1961 out_noupdate: 1962 spin_unlock(&state->state_lock); 1963 return ret; 1964 } 1965 1966 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1967 { 1968 struct nfs_delegation *delegation; 1969 1970 fmode &= FMODE_READ|FMODE_WRITE; 1971 rcu_read_lock(); 1972 delegation = nfs4_get_valid_delegation(inode); 1973 if (delegation == NULL || (delegation->type & fmode) == fmode) { 1974 rcu_read_unlock(); 1975 return; 1976 } 1977 rcu_read_unlock(); 1978 nfs4_inode_return_delegation(inode); 1979 } 1980 1981 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1982 { 1983 struct nfs4_state *state = opendata->state; 1984 struct nfs_delegation *delegation; 1985 int open_mode = opendata->o_arg.open_flags; 1986 fmode_t fmode = opendata->o_arg.fmode; 1987 enum open_claim_type4 claim = opendata->o_arg.claim; 1988 nfs4_stateid stateid; 1989 int ret = -EAGAIN; 1990 1991 for (;;) { 1992 spin_lock(&state->owner->so_lock); 1993 if (can_open_cached(state, fmode, open_mode, claim)) { 1994 update_open_stateflags(state, fmode); 1995 spin_unlock(&state->owner->so_lock); 1996 goto out_return_state; 1997 } 1998 spin_unlock(&state->owner->so_lock); 1999 rcu_read_lock(); 2000 delegation = nfs4_get_valid_delegation(state->inode); 2001 if (!can_open_delegated(delegation, fmode, claim)) { 2002 rcu_read_unlock(); 2003 break; 2004 } 2005 /* Save the delegation */ 2006 nfs4_stateid_copy(&stateid, &delegation->stateid); 2007 rcu_read_unlock(); 2008 nfs_release_seqid(opendata->o_arg.seqid); 2009 if (!opendata->is_recover) { 2010 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 2011 if (ret != 0) 2012 goto out; 2013 } 2014 ret = -EAGAIN; 2015 2016 /* Try to update the stateid using the delegation */ 2017 if (update_open_stateid(state, NULL, &stateid, fmode)) 2018 goto out_return_state; 2019 } 2020 out: 2021 return ERR_PTR(ret); 2022 out_return_state: 2023 refcount_inc(&state->count); 2024 return state; 2025 } 2026 2027 static void 2028 nfs4_process_delegation(struct inode *inode, const struct cred *cred, 2029 enum open_claim_type4 claim, 2030 const struct nfs4_open_delegation *delegation) 2031 { 2032 switch (delegation->open_delegation_type) { 2033 case NFS4_OPEN_DELEGATE_READ: 2034 case NFS4_OPEN_DELEGATE_WRITE: 2035 case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG: 2036 case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG: 2037 break; 2038 default: 2039 return; 2040 } 2041 switch (claim) { 2042 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 2043 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 2044 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 2045 "returning a delegation for " 2046 "OPEN(CLAIM_DELEGATE_CUR)\n", 2047 NFS_SERVER(inode)->nfs_client->cl_hostname); 2048 break; 2049 case NFS4_OPEN_CLAIM_PREVIOUS: 2050 nfs_inode_reclaim_delegation(inode, cred, delegation->type, 2051 &delegation->stateid, 2052 delegation->pagemod_limit, 2053 delegation->open_delegation_type); 2054 break; 2055 default: 2056 nfs_inode_set_delegation(inode, cred, delegation->type, 2057 &delegation->stateid, 2058 delegation->pagemod_limit, 2059 delegation->open_delegation_type); 2060 } 2061 if (delegation->do_recall) 2062 nfs_async_inode_return_delegation(inode, &delegation->stateid); 2063 } 2064 2065 /* 2066 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes 2067 * and update the nfs4_state. 2068 */ 2069 static struct nfs4_state * 2070 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) 2071 { 2072 struct inode *inode = data->state->inode; 2073 struct nfs4_state *state = data->state; 2074 int ret; 2075 2076 if (!data->rpc_done) { 2077 if (data->rpc_status) 2078 return ERR_PTR(data->rpc_status); 2079 return nfs4_try_open_cached(data); 2080 } 2081 2082 ret = nfs_refresh_inode(inode, &data->f_attr); 2083 if (ret) 2084 return ERR_PTR(ret); 2085 2086 nfs4_process_delegation(state->inode, 2087 data->owner->so_cred, 2088 data->o_arg.claim, 2089 &data->o_res.delegation); 2090 2091 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) { 2092 if (!update_open_stateid(state, &data->o_res.stateid, 2093 NULL, data->o_arg.fmode)) 2094 return ERR_PTR(-EAGAIN); 2095 } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode)) 2096 return ERR_PTR(-EAGAIN); 2097 refcount_inc(&state->count); 2098 2099 return state; 2100 } 2101 2102 static struct inode * 2103 nfs4_opendata_get_inode(struct nfs4_opendata *data) 2104 { 2105 struct inode *inode; 2106 2107 switch (data->o_arg.claim) { 2108 case NFS4_OPEN_CLAIM_NULL: 2109 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 2110 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 2111 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 2112 return ERR_PTR(-EAGAIN); 2113 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, 2114 &data->f_attr); 2115 break; 2116 default: 2117 inode = d_inode(data->dentry); 2118 ihold(inode); 2119 nfs_refresh_inode(inode, &data->f_attr); 2120 } 2121 return inode; 2122 } 2123 2124 static struct nfs4_state * 2125 nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data) 2126 { 2127 struct nfs4_state *state; 2128 struct inode *inode; 2129 2130 inode = nfs4_opendata_get_inode(data); 2131 if (IS_ERR(inode)) 2132 return ERR_CAST(inode); 2133 if (data->state != NULL && data->state->inode == inode) { 2134 state = data->state; 2135 refcount_inc(&state->count); 2136 } else 2137 state = nfs4_get_open_state(inode, data->owner); 2138 iput(inode); 2139 if (state == NULL) 2140 state = ERR_PTR(-ENOMEM); 2141 return state; 2142 } 2143 2144 static struct nfs4_state * 2145 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 2146 { 2147 struct nfs4_state *state; 2148 2149 if (!data->rpc_done) { 2150 state = nfs4_try_open_cached(data); 2151 trace_nfs4_cached_open(data->state); 2152 goto out; 2153 } 2154 2155 state = nfs4_opendata_find_nfs4_state(data); 2156 if (IS_ERR(state)) 2157 goto out; 2158 2159 nfs4_process_delegation(state->inode, 2160 data->owner->so_cred, 2161 data->o_arg.claim, 2162 &data->o_res.delegation); 2163 2164 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) { 2165 if (!update_open_stateid(state, &data->o_res.stateid, 2166 NULL, data->o_arg.fmode)) { 2167 nfs4_put_open_state(state); 2168 state = ERR_PTR(-EAGAIN); 2169 } 2170 } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode)) { 2171 nfs4_put_open_state(state); 2172 state = ERR_PTR(-EAGAIN); 2173 } 2174 out: 2175 nfs_release_seqid(data->o_arg.seqid); 2176 return state; 2177 } 2178 2179 static struct nfs4_state * 2180 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 2181 { 2182 struct nfs4_state *ret; 2183 2184 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 2185 ret =_nfs4_opendata_reclaim_to_nfs4_state(data); 2186 else 2187 ret = _nfs4_opendata_to_nfs4_state(data); 2188 nfs4_sequence_free_slot(&data->o_res.seq_res); 2189 return ret; 2190 } 2191 2192 static struct nfs_open_context * 2193 nfs4_state_find_open_context_mode(struct nfs4_state *state, fmode_t mode) 2194 { 2195 struct nfs_inode *nfsi = NFS_I(state->inode); 2196 struct nfs_open_context *ctx; 2197 2198 rcu_read_lock(); 2199 list_for_each_entry_rcu(ctx, &nfsi->open_files, list) { 2200 if (ctx->state != state) 2201 continue; 2202 if ((ctx->mode & mode) != mode) 2203 continue; 2204 if (!get_nfs_open_context(ctx)) 2205 continue; 2206 rcu_read_unlock(); 2207 return ctx; 2208 } 2209 rcu_read_unlock(); 2210 return ERR_PTR(-ENOENT); 2211 } 2212 2213 static struct nfs_open_context * 2214 nfs4_state_find_open_context(struct nfs4_state *state) 2215 { 2216 struct nfs_open_context *ctx; 2217 2218 ctx = nfs4_state_find_open_context_mode(state, FMODE_READ|FMODE_WRITE); 2219 if (!IS_ERR(ctx)) 2220 return ctx; 2221 ctx = nfs4_state_find_open_context_mode(state, FMODE_WRITE); 2222 if (!IS_ERR(ctx)) 2223 return ctx; 2224 return nfs4_state_find_open_context_mode(state, FMODE_READ); 2225 } 2226 2227 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, 2228 struct nfs4_state *state, enum open_claim_type4 claim) 2229 { 2230 struct nfs4_opendata *opendata; 2231 2232 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, 2233 NULL, claim, GFP_NOFS); 2234 if (opendata == NULL) 2235 return ERR_PTR(-ENOMEM); 2236 opendata->state = state; 2237 refcount_inc(&state->count); 2238 return opendata; 2239 } 2240 2241 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, 2242 fmode_t fmode) 2243 { 2244 struct nfs4_state *newstate; 2245 struct nfs_server *server = NFS_SB(opendata->dentry->d_sb); 2246 int openflags = opendata->o_arg.open_flags; 2247 int ret; 2248 2249 if (!nfs4_mode_match_open_stateid(opendata->state, fmode)) 2250 return 0; 2251 opendata->o_arg.fmode = fmode; 2252 opendata->o_arg.share_access = 2253 nfs4_map_atomic_open_share(server, fmode, openflags); 2254 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 2255 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 2256 nfs4_init_opendata_res(opendata); 2257 ret = _nfs4_recover_proc_open(opendata); 2258 if (ret != 0) 2259 return ret; 2260 newstate = nfs4_opendata_to_nfs4_state(opendata); 2261 if (IS_ERR(newstate)) 2262 return PTR_ERR(newstate); 2263 if (newstate != opendata->state) 2264 ret = -ESTALE; 2265 nfs4_close_state(newstate, fmode); 2266 return ret; 2267 } 2268 2269 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 2270 { 2271 int ret; 2272 2273 /* memory barrier prior to reading state->n_* */ 2274 smp_rmb(); 2275 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 2276 if (ret != 0) 2277 return ret; 2278 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE); 2279 if (ret != 0) 2280 return ret; 2281 ret = nfs4_open_recover_helper(opendata, FMODE_READ); 2282 if (ret != 0) 2283 return ret; 2284 /* 2285 * We may have performed cached opens for all three recoveries. 2286 * Check if we need to update the current stateid. 2287 */ 2288 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 2289 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 2290 write_seqlock(&state->seqlock); 2291 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 2292 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 2293 write_sequnlock(&state->seqlock); 2294 } 2295 return 0; 2296 } 2297 2298 /* 2299 * OPEN_RECLAIM: 2300 * reclaim state on the server after a reboot. 2301 */ 2302 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 2303 { 2304 struct nfs_delegation *delegation; 2305 struct nfs4_opendata *opendata; 2306 u32 delegation_type = NFS4_OPEN_DELEGATE_NONE; 2307 int status; 2308 2309 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2310 NFS4_OPEN_CLAIM_PREVIOUS); 2311 if (IS_ERR(opendata)) 2312 return PTR_ERR(opendata); 2313 rcu_read_lock(); 2314 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2315 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) { 2316 switch(delegation->type) { 2317 case FMODE_READ: 2318 delegation_type = NFS4_OPEN_DELEGATE_READ; 2319 if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) 2320 delegation_type = NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG; 2321 break; 2322 case FMODE_WRITE: 2323 case FMODE_READ|FMODE_WRITE: 2324 delegation_type = NFS4_OPEN_DELEGATE_WRITE; 2325 if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) 2326 delegation_type = NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG; 2327 } 2328 } 2329 rcu_read_unlock(); 2330 opendata->o_arg.u.delegation_type = delegation_type; 2331 status = nfs4_open_recover(opendata, state); 2332 nfs4_opendata_put(opendata); 2333 return status; 2334 } 2335 2336 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 2337 { 2338 struct nfs_server *server = NFS_SERVER(state->inode); 2339 struct nfs4_exception exception = { }; 2340 int err; 2341 do { 2342 err = _nfs4_do_open_reclaim(ctx, state); 2343 trace_nfs4_open_reclaim(ctx, 0, err); 2344 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2345 continue; 2346 if (err != -NFS4ERR_DELAY) 2347 break; 2348 nfs4_handle_exception(server, err, &exception); 2349 } while (exception.retry); 2350 return err; 2351 } 2352 2353 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 2354 { 2355 struct nfs_open_context *ctx; 2356 int ret; 2357 2358 ctx = nfs4_state_find_open_context(state); 2359 if (IS_ERR(ctx)) 2360 return -EAGAIN; 2361 clear_bit(NFS_DELEGATED_STATE, &state->flags); 2362 nfs_state_clear_open_state_flags(state); 2363 ret = nfs4_do_open_reclaim(ctx, state); 2364 put_nfs_open_context(ctx); 2365 return ret; 2366 } 2367 2368 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err) 2369 { 2370 switch (err) { 2371 default: 2372 printk(KERN_ERR "NFS: %s: unhandled error " 2373 "%d.\n", __func__, err); 2374 fallthrough; 2375 case 0: 2376 case -ENOENT: 2377 case -EAGAIN: 2378 case -ESTALE: 2379 case -ETIMEDOUT: 2380 break; 2381 case -NFS4ERR_BADSESSION: 2382 case -NFS4ERR_BADSLOT: 2383 case -NFS4ERR_BAD_HIGH_SLOT: 2384 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 2385 case -NFS4ERR_DEADSESSION: 2386 return -EAGAIN; 2387 case -NFS4ERR_STALE_CLIENTID: 2388 case -NFS4ERR_STALE_STATEID: 2389 /* Don't recall a delegation if it was lost */ 2390 nfs4_schedule_lease_recovery(server->nfs_client); 2391 return -EAGAIN; 2392 case -NFS4ERR_MOVED: 2393 nfs4_schedule_migration_recovery(server); 2394 return -EAGAIN; 2395 case -NFS4ERR_LEASE_MOVED: 2396 nfs4_schedule_lease_moved_recovery(server->nfs_client); 2397 return -EAGAIN; 2398 case -NFS4ERR_DELEG_REVOKED: 2399 case -NFS4ERR_ADMIN_REVOKED: 2400 case -NFS4ERR_EXPIRED: 2401 case -NFS4ERR_BAD_STATEID: 2402 case -NFS4ERR_OPENMODE: 2403 nfs_inode_find_state_and_recover(state->inode, 2404 stateid); 2405 nfs4_schedule_stateid_recovery(server, state); 2406 return -EAGAIN; 2407 case -NFS4ERR_DELAY: 2408 case -NFS4ERR_GRACE: 2409 ssleep(1); 2410 return -EAGAIN; 2411 case -ENOMEM: 2412 case -NFS4ERR_DENIED: 2413 if (fl) { 2414 struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner; 2415 if (lsp) 2416 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 2417 } 2418 return 0; 2419 } 2420 return err; 2421 } 2422 2423 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, 2424 struct nfs4_state *state, const nfs4_stateid *stateid) 2425 { 2426 struct nfs_server *server = NFS_SERVER(state->inode); 2427 struct nfs4_opendata *opendata; 2428 int err = 0; 2429 2430 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2431 NFS4_OPEN_CLAIM_DELEG_CUR_FH); 2432 if (IS_ERR(opendata)) 2433 return PTR_ERR(opendata); 2434 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 2435 if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) { 2436 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 2437 if (err) 2438 goto out; 2439 } 2440 if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) { 2441 err = nfs4_open_recover_helper(opendata, FMODE_WRITE); 2442 if (err) 2443 goto out; 2444 } 2445 if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) { 2446 err = nfs4_open_recover_helper(opendata, FMODE_READ); 2447 if (err) 2448 goto out; 2449 } 2450 nfs_state_clear_delegation(state); 2451 out: 2452 nfs4_opendata_put(opendata); 2453 return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err); 2454 } 2455 2456 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) 2457 { 2458 struct nfs4_opendata *data = calldata; 2459 2460 nfs4_setup_sequence(data->o_arg.server->nfs_client, 2461 &data->c_arg.seq_args, &data->c_res.seq_res, task); 2462 } 2463 2464 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 2465 { 2466 struct nfs4_opendata *data = calldata; 2467 2468 nfs40_sequence_done(task, &data->c_res.seq_res); 2469 2470 data->rpc_status = task->tk_status; 2471 if (data->rpc_status == 0) { 2472 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 2473 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2474 renew_lease(data->o_res.server, data->timestamp); 2475 data->rpc_done = true; 2476 } 2477 } 2478 2479 static void nfs4_open_confirm_release(void *calldata) 2480 { 2481 struct nfs4_opendata *data = calldata; 2482 struct nfs4_state *state = NULL; 2483 2484 /* If this request hasn't been cancelled, do nothing */ 2485 if (!data->cancelled) 2486 goto out_free; 2487 /* In case of error, no cleanup! */ 2488 if (!data->rpc_done) 2489 goto out_free; 2490 state = nfs4_opendata_to_nfs4_state(data); 2491 if (!IS_ERR(state)) 2492 nfs4_close_state(state, data->o_arg.fmode); 2493 out_free: 2494 nfs4_opendata_put(data); 2495 } 2496 2497 static const struct rpc_call_ops nfs4_open_confirm_ops = { 2498 .rpc_call_prepare = nfs4_open_confirm_prepare, 2499 .rpc_call_done = nfs4_open_confirm_done, 2500 .rpc_release = nfs4_open_confirm_release, 2501 }; 2502 2503 /* 2504 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 2505 */ 2506 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 2507 { 2508 struct nfs_server *server = NFS_SERVER(d_inode(data->dir)); 2509 struct rpc_task *task; 2510 struct rpc_message msg = { 2511 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 2512 .rpc_argp = &data->c_arg, 2513 .rpc_resp = &data->c_res, 2514 .rpc_cred = data->owner->so_cred, 2515 }; 2516 struct rpc_task_setup task_setup_data = { 2517 .rpc_client = server->client, 2518 .rpc_message = &msg, 2519 .callback_ops = &nfs4_open_confirm_ops, 2520 .callback_data = data, 2521 .workqueue = nfsiod_workqueue, 2522 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 2523 }; 2524 int status; 2525 2526 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1, 2527 data->is_recover); 2528 kref_get(&data->kref); 2529 data->rpc_done = false; 2530 data->rpc_status = 0; 2531 data->timestamp = jiffies; 2532 task = rpc_run_task(&task_setup_data); 2533 if (IS_ERR(task)) 2534 return PTR_ERR(task); 2535 status = rpc_wait_for_completion_task(task); 2536 if (status != 0) { 2537 data->cancelled = true; 2538 smp_wmb(); 2539 } else 2540 status = data->rpc_status; 2541 rpc_put_task(task); 2542 return status; 2543 } 2544 2545 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 2546 { 2547 struct nfs4_opendata *data = calldata; 2548 struct nfs4_state_owner *sp = data->owner; 2549 struct nfs_client *clp = sp->so_server->nfs_client; 2550 enum open_claim_type4 claim = data->o_arg.claim; 2551 2552 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 2553 goto out_wait; 2554 /* 2555 * Check if we still need to send an OPEN call, or if we can use 2556 * a delegation instead. 2557 */ 2558 if (data->state != NULL) { 2559 struct nfs_delegation *delegation; 2560 2561 if (can_open_cached(data->state, data->o_arg.fmode, 2562 data->o_arg.open_flags, claim)) 2563 goto out_no_action; 2564 rcu_read_lock(); 2565 delegation = nfs4_get_valid_delegation(data->state->inode); 2566 if (can_open_delegated(delegation, data->o_arg.fmode, claim)) 2567 goto unlock_no_action; 2568 rcu_read_unlock(); 2569 } 2570 /* Update client id. */ 2571 data->o_arg.clientid = clp->cl_clientid; 2572 switch (claim) { 2573 default: 2574 break; 2575 case NFS4_OPEN_CLAIM_PREVIOUS: 2576 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 2577 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 2578 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 2579 fallthrough; 2580 case NFS4_OPEN_CLAIM_FH: 2581 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 2582 } 2583 data->timestamp = jiffies; 2584 if (nfs4_setup_sequence(data->o_arg.server->nfs_client, 2585 &data->o_arg.seq_args, 2586 &data->o_res.seq_res, 2587 task) != 0) 2588 nfs_release_seqid(data->o_arg.seqid); 2589 2590 /* Set the create mode (note dependency on the session type) */ 2591 data->o_arg.createmode = NFS4_CREATE_UNCHECKED; 2592 if (data->o_arg.open_flags & O_EXCL) { 2593 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1; 2594 if (clp->cl_mvops->minor_version == 0) { 2595 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE; 2596 /* don't put an ACCESS op in OPEN compound if O_EXCL, 2597 * because ACCESS will return permission denied for 2598 * all bits until close */ 2599 data->o_res.access_request = data->o_arg.access = 0; 2600 } else if (nfs4_has_persistent_session(clp)) 2601 data->o_arg.createmode = NFS4_CREATE_GUARDED; 2602 } 2603 return; 2604 unlock_no_action: 2605 trace_nfs4_cached_open(data->state); 2606 rcu_read_unlock(); 2607 out_no_action: 2608 task->tk_action = NULL; 2609 out_wait: 2610 nfs4_sequence_done(task, &data->o_res.seq_res); 2611 } 2612 2613 static void nfs4_open_done(struct rpc_task *task, void *calldata) 2614 { 2615 struct nfs4_opendata *data = calldata; 2616 2617 data->rpc_status = task->tk_status; 2618 2619 if (!nfs4_sequence_process(task, &data->o_res.seq_res)) 2620 return; 2621 2622 if (task->tk_status == 0) { 2623 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) { 2624 switch (data->o_res.f_attr->mode & S_IFMT) { 2625 case S_IFREG: 2626 break; 2627 case S_IFLNK: 2628 data->rpc_status = -ELOOP; 2629 break; 2630 case S_IFDIR: 2631 data->rpc_status = -EISDIR; 2632 break; 2633 default: 2634 data->rpc_status = -ENOTDIR; 2635 } 2636 } 2637 renew_lease(data->o_res.server, data->timestamp); 2638 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 2639 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2640 } 2641 data->rpc_done = true; 2642 } 2643 2644 static void nfs4_open_release(void *calldata) 2645 { 2646 struct nfs4_opendata *data = calldata; 2647 struct nfs4_state *state = NULL; 2648 2649 /* In case of error, no cleanup! */ 2650 if (data->rpc_status != 0 || !data->rpc_done) { 2651 nfs_release_seqid(data->o_arg.seqid); 2652 goto out_free; 2653 } 2654 /* If this request hasn't been cancelled, do nothing */ 2655 if (!data->cancelled) 2656 goto out_free; 2657 /* In case we need an open_confirm, no cleanup! */ 2658 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 2659 goto out_free; 2660 state = nfs4_opendata_to_nfs4_state(data); 2661 if (!IS_ERR(state)) 2662 nfs4_close_state(state, data->o_arg.fmode); 2663 out_free: 2664 nfs4_opendata_put(data); 2665 } 2666 2667 static const struct rpc_call_ops nfs4_open_ops = { 2668 .rpc_call_prepare = nfs4_open_prepare, 2669 .rpc_call_done = nfs4_open_done, 2670 .rpc_release = nfs4_open_release, 2671 }; 2672 2673 static int nfs4_run_open_task(struct nfs4_opendata *data, 2674 struct nfs_open_context *ctx) 2675 { 2676 struct inode *dir = d_inode(data->dir); 2677 struct nfs_server *server = NFS_SERVER(dir); 2678 struct nfs_openargs *o_arg = &data->o_arg; 2679 struct nfs_openres *o_res = &data->o_res; 2680 struct rpc_task *task; 2681 struct rpc_message msg = { 2682 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 2683 .rpc_argp = o_arg, 2684 .rpc_resp = o_res, 2685 .rpc_cred = data->owner->so_cred, 2686 }; 2687 struct rpc_task_setup task_setup_data = { 2688 .rpc_client = server->client, 2689 .rpc_message = &msg, 2690 .callback_ops = &nfs4_open_ops, 2691 .callback_data = data, 2692 .workqueue = nfsiod_workqueue, 2693 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 2694 }; 2695 int status; 2696 2697 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE)) 2698 task_setup_data.flags |= RPC_TASK_MOVEABLE; 2699 2700 kref_get(&data->kref); 2701 data->rpc_done = false; 2702 data->rpc_status = 0; 2703 data->cancelled = false; 2704 data->is_recover = false; 2705 if (!ctx) { 2706 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1); 2707 data->is_recover = true; 2708 task_setup_data.flags |= RPC_TASK_TIMEOUT; 2709 } else { 2710 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0); 2711 pnfs_lgopen_prepare(data, ctx); 2712 } 2713 task = rpc_run_task(&task_setup_data); 2714 if (IS_ERR(task)) 2715 return PTR_ERR(task); 2716 status = rpc_wait_for_completion_task(task); 2717 if (status != 0) { 2718 data->cancelled = true; 2719 smp_wmb(); 2720 } else 2721 status = data->rpc_status; 2722 rpc_put_task(task); 2723 2724 return status; 2725 } 2726 2727 static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 2728 { 2729 struct inode *dir = d_inode(data->dir); 2730 struct nfs_openres *o_res = &data->o_res; 2731 int status; 2732 2733 status = nfs4_run_open_task(data, NULL); 2734 if (status != 0 || !data->rpc_done) 2735 return status; 2736 2737 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 2738 2739 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) 2740 status = _nfs4_proc_open_confirm(data); 2741 2742 return status; 2743 } 2744 2745 /* 2746 * Additional permission checks in order to distinguish between an 2747 * open for read, and an open for execute. This works around the 2748 * fact that NFSv4 OPEN treats read and execute permissions as being 2749 * the same. 2750 * Note that in the non-execute case, we want to turn off permission 2751 * checking if we just created a new file (POSIX open() semantics). 2752 */ 2753 static int nfs4_opendata_access(const struct cred *cred, 2754 struct nfs4_opendata *opendata, 2755 struct nfs4_state *state, fmode_t fmode) 2756 { 2757 struct nfs_access_entry cache; 2758 u32 mask, flags; 2759 2760 /* access call failed or for some reason the server doesn't 2761 * support any access modes -- defer access call until later */ 2762 if (opendata->o_res.access_supported == 0) 2763 return 0; 2764 2765 mask = 0; 2766 if (fmode & FMODE_EXEC) { 2767 /* ONLY check for exec rights */ 2768 if (S_ISDIR(state->inode->i_mode)) 2769 mask = NFS4_ACCESS_LOOKUP; 2770 else 2771 mask = NFS4_ACCESS_EXECUTE; 2772 } else if ((fmode & FMODE_READ) && !opendata->file_created) 2773 mask = NFS4_ACCESS_READ; 2774 2775 nfs_access_set_mask(&cache, opendata->o_res.access_result); 2776 nfs_access_add_cache(state->inode, &cache, cred); 2777 2778 flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP; 2779 if ((mask & ~cache.mask & flags) == 0) 2780 return 0; 2781 2782 return -EACCES; 2783 } 2784 2785 /* 2786 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 2787 */ 2788 static int _nfs4_proc_open(struct nfs4_opendata *data, 2789 struct nfs_open_context *ctx) 2790 { 2791 struct inode *dir = d_inode(data->dir); 2792 struct nfs_server *server = NFS_SERVER(dir); 2793 struct nfs_openargs *o_arg = &data->o_arg; 2794 struct nfs_openres *o_res = &data->o_res; 2795 int status; 2796 2797 status = nfs4_run_open_task(data, ctx); 2798 if (!data->rpc_done) 2799 return status; 2800 if (status != 0) { 2801 if (status == -NFS4ERR_BADNAME && 2802 !(o_arg->open_flags & O_CREAT)) 2803 return -ENOENT; 2804 return status; 2805 } 2806 2807 nfs_fattr_map_and_free_names(server, &data->f_attr); 2808 2809 if (o_arg->open_flags & O_CREAT) { 2810 if (o_arg->open_flags & O_EXCL) 2811 data->file_created = true; 2812 else if (o_res->cinfo.before != o_res->cinfo.after) 2813 data->file_created = true; 2814 if (data->file_created || 2815 inode_peek_iversion_raw(dir) != o_res->cinfo.after) 2816 nfs4_update_changeattr(dir, &o_res->cinfo, 2817 o_res->f_attr->time_start, 2818 NFS_INO_INVALID_DATA); 2819 } 2820 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 2821 server->caps &= ~NFS_CAP_POSIX_LOCK; 2822 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2823 status = _nfs4_proc_open_confirm(data); 2824 if (status != 0) 2825 return status; 2826 } 2827 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) { 2828 struct nfs_fh *fh = &o_res->fh; 2829 2830 nfs4_sequence_free_slot(&o_res->seq_res); 2831 if (o_arg->claim == NFS4_OPEN_CLAIM_FH) 2832 fh = NFS_FH(d_inode(data->dentry)); 2833 nfs4_proc_getattr(server, fh, o_res->f_attr, NULL); 2834 } 2835 return 0; 2836 } 2837 2838 /* 2839 * OPEN_EXPIRED: 2840 * reclaim state on the server after a network partition. 2841 * Assumes caller holds the appropriate lock 2842 */ 2843 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2844 { 2845 struct nfs4_opendata *opendata; 2846 int ret; 2847 2848 opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_FH); 2849 if (IS_ERR(opendata)) 2850 return PTR_ERR(opendata); 2851 /* 2852 * We're not recovering a delegation, so ask for no delegation. 2853 * Otherwise the recovery thread could deadlock with an outstanding 2854 * delegation return. 2855 */ 2856 opendata->o_arg.open_flags = O_DIRECT; 2857 ret = nfs4_open_recover(opendata, state); 2858 if (ret == -ESTALE) 2859 d_drop(ctx->dentry); 2860 nfs4_opendata_put(opendata); 2861 return ret; 2862 } 2863 2864 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2865 { 2866 struct nfs_server *server = NFS_SERVER(state->inode); 2867 struct nfs4_exception exception = { }; 2868 int err; 2869 2870 do { 2871 err = _nfs4_open_expired(ctx, state); 2872 trace_nfs4_open_expired(ctx, 0, err); 2873 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2874 continue; 2875 switch (err) { 2876 default: 2877 goto out; 2878 case -NFS4ERR_GRACE: 2879 case -NFS4ERR_DELAY: 2880 nfs4_handle_exception(server, err, &exception); 2881 err = 0; 2882 } 2883 } while (exception.retry); 2884 out: 2885 return err; 2886 } 2887 2888 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2889 { 2890 struct nfs_open_context *ctx; 2891 int ret; 2892 2893 ctx = nfs4_state_find_open_context(state); 2894 if (IS_ERR(ctx)) 2895 return -EAGAIN; 2896 ret = nfs4_do_open_expired(ctx, state); 2897 put_nfs_open_context(ctx); 2898 return ret; 2899 } 2900 2901 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state, 2902 const nfs4_stateid *stateid) 2903 { 2904 nfs_remove_bad_delegation(state->inode, stateid); 2905 nfs_state_clear_delegation(state); 2906 } 2907 2908 static void nfs40_clear_delegation_stateid(struct nfs4_state *state) 2909 { 2910 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL) 2911 nfs_finish_clear_delegation_stateid(state, NULL); 2912 } 2913 2914 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2915 { 2916 /* NFSv4.0 doesn't allow for delegation recovery on open expire */ 2917 nfs40_clear_delegation_stateid(state); 2918 nfs_state_clear_open_state_flags(state); 2919 return nfs4_open_expired(sp, state); 2920 } 2921 2922 static int nfs40_test_and_free_expired_stateid(struct nfs_server *server, 2923 nfs4_stateid *stateid, const struct cred *cred) 2924 { 2925 return -NFS4ERR_BAD_STATEID; 2926 } 2927 2928 #if defined(CONFIG_NFS_V4_1) 2929 static int nfs41_test_and_free_expired_stateid(struct nfs_server *server, 2930 nfs4_stateid *stateid, const struct cred *cred) 2931 { 2932 int status; 2933 2934 switch (stateid->type) { 2935 default: 2936 break; 2937 case NFS4_INVALID_STATEID_TYPE: 2938 case NFS4_SPECIAL_STATEID_TYPE: 2939 case NFS4_FREED_STATEID_TYPE: 2940 return -NFS4ERR_BAD_STATEID; 2941 case NFS4_REVOKED_STATEID_TYPE: 2942 goto out_free; 2943 } 2944 2945 status = nfs41_test_stateid(server, stateid, cred); 2946 switch (status) { 2947 case -NFS4ERR_EXPIRED: 2948 case -NFS4ERR_ADMIN_REVOKED: 2949 case -NFS4ERR_DELEG_REVOKED: 2950 break; 2951 default: 2952 return status; 2953 } 2954 out_free: 2955 /* Ack the revoked state to the server */ 2956 nfs41_free_stateid(server, stateid, cred, true); 2957 return -NFS4ERR_EXPIRED; 2958 } 2959 2960 static int nfs41_check_delegation_stateid(struct nfs4_state *state) 2961 { 2962 struct nfs_server *server = NFS_SERVER(state->inode); 2963 nfs4_stateid stateid; 2964 struct nfs_delegation *delegation; 2965 const struct cred *cred = NULL; 2966 int status, ret = NFS_OK; 2967 2968 /* Get the delegation credential for use by test/free_stateid */ 2969 rcu_read_lock(); 2970 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2971 if (delegation == NULL) { 2972 rcu_read_unlock(); 2973 nfs_state_clear_delegation(state); 2974 return NFS_OK; 2975 } 2976 2977 spin_lock(&delegation->lock); 2978 nfs4_stateid_copy(&stateid, &delegation->stateid); 2979 2980 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, 2981 &delegation->flags)) { 2982 spin_unlock(&delegation->lock); 2983 rcu_read_unlock(); 2984 return NFS_OK; 2985 } 2986 2987 if (delegation->cred) 2988 cred = get_cred(delegation->cred); 2989 spin_unlock(&delegation->lock); 2990 rcu_read_unlock(); 2991 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred); 2992 trace_nfs4_test_delegation_stateid(state, NULL, status); 2993 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) 2994 nfs_finish_clear_delegation_stateid(state, &stateid); 2995 else 2996 ret = status; 2997 2998 put_cred(cred); 2999 return ret; 3000 } 3001 3002 static void nfs41_delegation_recover_stateid(struct nfs4_state *state) 3003 { 3004 nfs4_stateid tmp; 3005 3006 if (test_bit(NFS_DELEGATED_STATE, &state->flags) && 3007 nfs4_copy_delegation_stateid(state->inode, state->state, 3008 &tmp, NULL) && 3009 nfs4_stateid_match_other(&state->stateid, &tmp)) 3010 nfs_state_set_delegation(state, &tmp, state->state); 3011 else 3012 nfs_state_clear_delegation(state); 3013 } 3014 3015 /** 3016 * nfs41_check_expired_locks - possibly free a lock stateid 3017 * 3018 * @state: NFSv4 state for an inode 3019 * 3020 * Returns NFS_OK if recovery for this stateid is now finished. 3021 * Otherwise a negative NFS4ERR value is returned. 3022 */ 3023 static int nfs41_check_expired_locks(struct nfs4_state *state) 3024 { 3025 int status, ret = NFS_OK; 3026 struct nfs4_lock_state *lsp, *prev = NULL; 3027 struct nfs_server *server = NFS_SERVER(state->inode); 3028 3029 if (!test_bit(LK_STATE_IN_USE, &state->flags)) 3030 goto out; 3031 3032 spin_lock(&state->state_lock); 3033 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 3034 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 3035 const struct cred *cred = lsp->ls_state->owner->so_cred; 3036 3037 refcount_inc(&lsp->ls_count); 3038 spin_unlock(&state->state_lock); 3039 3040 nfs4_put_lock_state(prev); 3041 prev = lsp; 3042 3043 status = nfs41_test_and_free_expired_stateid(server, 3044 &lsp->ls_stateid, 3045 cred); 3046 trace_nfs4_test_lock_stateid(state, lsp, status); 3047 if (status == -NFS4ERR_EXPIRED || 3048 status == -NFS4ERR_BAD_STATEID) { 3049 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 3050 lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE; 3051 if (!recover_lost_locks) 3052 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 3053 } else if (status != NFS_OK) { 3054 ret = status; 3055 nfs4_put_lock_state(prev); 3056 goto out; 3057 } 3058 spin_lock(&state->state_lock); 3059 } 3060 } 3061 spin_unlock(&state->state_lock); 3062 nfs4_put_lock_state(prev); 3063 out: 3064 return ret; 3065 } 3066 3067 /** 3068 * nfs41_check_open_stateid - possibly free an open stateid 3069 * 3070 * @state: NFSv4 state for an inode 3071 * 3072 * Returns NFS_OK if recovery for this stateid is now finished. 3073 * Otherwise a negative NFS4ERR value is returned. 3074 */ 3075 static int nfs41_check_open_stateid(struct nfs4_state *state) 3076 { 3077 struct nfs_server *server = NFS_SERVER(state->inode); 3078 nfs4_stateid *stateid = &state->open_stateid; 3079 const struct cred *cred = state->owner->so_cred; 3080 int status; 3081 3082 if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) 3083 return -NFS4ERR_BAD_STATEID; 3084 status = nfs41_test_and_free_expired_stateid(server, stateid, cred); 3085 trace_nfs4_test_open_stateid(state, NULL, status); 3086 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) { 3087 nfs_state_clear_open_state_flags(state); 3088 stateid->type = NFS4_INVALID_STATEID_TYPE; 3089 return status; 3090 } 3091 if (nfs_open_stateid_recover_openmode(state)) 3092 return -NFS4ERR_OPENMODE; 3093 return NFS_OK; 3094 } 3095 3096 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 3097 { 3098 int status; 3099 3100 status = nfs41_check_delegation_stateid(state); 3101 if (status != NFS_OK) 3102 return status; 3103 nfs41_delegation_recover_stateid(state); 3104 3105 status = nfs41_check_expired_locks(state); 3106 if (status != NFS_OK) 3107 return status; 3108 status = nfs41_check_open_stateid(state); 3109 if (status != NFS_OK) 3110 status = nfs4_open_expired(sp, state); 3111 return status; 3112 } 3113 #endif 3114 3115 /* 3116 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 3117 * fields corresponding to attributes that were used to store the verifier. 3118 * Make sure we clobber those fields in the later setattr call 3119 */ 3120 static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata, 3121 struct iattr *sattr, struct nfs4_label **label) 3122 { 3123 const __u32 *bitmask = opendata->o_arg.server->exclcreat_bitmask; 3124 __u32 attrset[3]; 3125 unsigned ret; 3126 unsigned i; 3127 3128 for (i = 0; i < ARRAY_SIZE(attrset); i++) { 3129 attrset[i] = opendata->o_res.attrset[i]; 3130 if (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE4_1) 3131 attrset[i] &= ~bitmask[i]; 3132 } 3133 3134 ret = (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE) ? 3135 sattr->ia_valid : 0; 3136 3137 if ((attrset[1] & (FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET))) { 3138 if (sattr->ia_valid & ATTR_ATIME_SET) 3139 ret |= ATTR_ATIME_SET; 3140 else 3141 ret |= ATTR_ATIME; 3142 } 3143 3144 if ((attrset[1] & (FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET))) { 3145 if (sattr->ia_valid & ATTR_MTIME_SET) 3146 ret |= ATTR_MTIME_SET; 3147 else 3148 ret |= ATTR_MTIME; 3149 } 3150 3151 if (!(attrset[2] & FATTR4_WORD2_SECURITY_LABEL)) 3152 *label = NULL; 3153 return ret; 3154 } 3155 3156 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, 3157 struct nfs_open_context *ctx) 3158 { 3159 struct nfs4_state_owner *sp = opendata->owner; 3160 struct nfs_server *server = sp->so_server; 3161 struct dentry *dentry; 3162 struct nfs4_state *state; 3163 fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx); 3164 struct inode *dir = d_inode(opendata->dir); 3165 unsigned long dir_verifier; 3166 int ret; 3167 3168 dir_verifier = nfs_save_change_attribute(dir); 3169 3170 ret = _nfs4_proc_open(opendata, ctx); 3171 if (ret != 0) 3172 goto out; 3173 3174 state = _nfs4_opendata_to_nfs4_state(opendata); 3175 ret = PTR_ERR(state); 3176 if (IS_ERR(state)) 3177 goto out; 3178 ctx->state = state; 3179 if (server->caps & NFS_CAP_POSIX_LOCK) 3180 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 3181 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK) 3182 set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags); 3183 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_PRESERVE_UNLINKED) 3184 set_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(state->inode)->flags); 3185 3186 switch(opendata->o_arg.claim) { 3187 default: 3188 break; 3189 case NFS4_OPEN_CLAIM_NULL: 3190 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 3191 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 3192 if (!opendata->rpc_done) 3193 break; 3194 if (opendata->o_res.delegation.type != 0) 3195 dir_verifier = nfs_save_change_attribute(dir); 3196 } 3197 3198 dentry = opendata->dentry; 3199 nfs_set_verifier(dentry, dir_verifier); 3200 if (d_really_is_negative(dentry)) { 3201 struct dentry *alias; 3202 d_drop(dentry); 3203 alias = d_splice_alias(igrab(state->inode), dentry); 3204 /* d_splice_alias() can't fail here - it's a non-directory */ 3205 if (alias) { 3206 dput(ctx->dentry); 3207 nfs_set_verifier(alias, dir_verifier); 3208 ctx->dentry = dentry = alias; 3209 } 3210 } 3211 3212 /* Parse layoutget results before we check for access */ 3213 pnfs_parse_lgopen(state->inode, opendata->lgp, ctx); 3214 3215 ret = nfs4_opendata_access(sp->so_cred, opendata, state, acc_mode); 3216 if (ret != 0) 3217 goto out; 3218 3219 if (d_inode(dentry) == state->inode) 3220 nfs_inode_attach_open_context(ctx); 3221 3222 out: 3223 if (!opendata->cancelled) { 3224 if (opendata->lgp) { 3225 nfs4_lgopen_release(opendata->lgp); 3226 opendata->lgp = NULL; 3227 } 3228 nfs4_sequence_free_slot(&opendata->o_res.seq_res); 3229 } 3230 return ret; 3231 } 3232 3233 /* 3234 * Returns a referenced nfs4_state 3235 */ 3236 static int _nfs4_do_open(struct inode *dir, 3237 struct nfs_open_context *ctx, 3238 int flags, 3239 const struct nfs4_open_createattrs *c, 3240 int *opened) 3241 { 3242 struct nfs4_state_owner *sp; 3243 struct nfs4_state *state = NULL; 3244 struct nfs_server *server = NFS_SERVER(dir); 3245 struct nfs4_opendata *opendata; 3246 struct dentry *dentry = ctx->dentry; 3247 const struct cred *cred = ctx->cred; 3248 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold; 3249 fmode_t fmode = _nfs4_ctx_to_openmode(ctx); 3250 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL; 3251 struct iattr *sattr = c->sattr; 3252 struct nfs4_label *label = c->label; 3253 int status; 3254 3255 /* Protect against reboot recovery conflicts */ 3256 status = -ENOMEM; 3257 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 3258 if (sp == NULL) { 3259 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 3260 goto out_err; 3261 } 3262 status = nfs4_client_recover_expired_lease(server->nfs_client); 3263 if (status != 0) 3264 goto err_put_state_owner; 3265 if (d_really_is_positive(dentry)) 3266 nfs4_return_incompatible_delegation(d_inode(dentry), fmode); 3267 status = -ENOMEM; 3268 if (d_really_is_positive(dentry)) 3269 claim = NFS4_OPEN_CLAIM_FH; 3270 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, 3271 c, claim, GFP_KERNEL); 3272 if (opendata == NULL) 3273 goto err_put_state_owner; 3274 3275 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 3276 if (!opendata->f_attr.mdsthreshold) { 3277 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 3278 if (!opendata->f_attr.mdsthreshold) 3279 goto err_opendata_put; 3280 } 3281 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 3282 } 3283 if (d_really_is_positive(dentry)) 3284 opendata->state = nfs4_get_open_state(d_inode(dentry), sp); 3285 3286 status = _nfs4_open_and_get_state(opendata, ctx); 3287 if (status != 0) 3288 goto err_opendata_put; 3289 state = ctx->state; 3290 3291 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) && 3292 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) { 3293 unsigned attrs = nfs4_exclusive_attrset(opendata, sattr, &label); 3294 /* 3295 * send create attributes which was not set by open 3296 * with an extra setattr. 3297 */ 3298 if (attrs || label) { 3299 unsigned ia_old = sattr->ia_valid; 3300 3301 sattr->ia_valid = attrs; 3302 nfs_fattr_init(opendata->o_res.f_attr); 3303 status = nfs4_do_setattr(state->inode, cred, 3304 opendata->o_res.f_attr, sattr, 3305 ctx, label); 3306 if (status == 0) { 3307 nfs_setattr_update_inode(state->inode, sattr, 3308 opendata->o_res.f_attr); 3309 nfs_setsecurity(state->inode, opendata->o_res.f_attr); 3310 } 3311 sattr->ia_valid = ia_old; 3312 } 3313 } 3314 if (opened && opendata->file_created) 3315 *opened = 1; 3316 3317 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) { 3318 *ctx_th = opendata->f_attr.mdsthreshold; 3319 opendata->f_attr.mdsthreshold = NULL; 3320 } 3321 3322 nfs4_opendata_put(opendata); 3323 nfs4_put_state_owner(sp); 3324 return 0; 3325 err_opendata_put: 3326 nfs4_opendata_put(opendata); 3327 err_put_state_owner: 3328 nfs4_put_state_owner(sp); 3329 out_err: 3330 return status; 3331 } 3332 3333 3334 static struct nfs4_state *nfs4_do_open(struct inode *dir, 3335 struct nfs_open_context *ctx, 3336 int flags, 3337 struct iattr *sattr, 3338 struct nfs4_label *label, 3339 int *opened) 3340 { 3341 struct nfs_server *server = NFS_SERVER(dir); 3342 struct nfs4_exception exception = { 3343 .interruptible = true, 3344 }; 3345 struct nfs4_state *res; 3346 struct nfs4_open_createattrs c = { 3347 .label = label, 3348 .sattr = sattr, 3349 .verf = { 3350 [0] = (__u32)jiffies, 3351 [1] = (__u32)current->pid, 3352 }, 3353 }; 3354 int status; 3355 3356 do { 3357 status = _nfs4_do_open(dir, ctx, flags, &c, opened); 3358 res = ctx->state; 3359 trace_nfs4_open_file(ctx, flags, status); 3360 if (status == 0) 3361 break; 3362 /* NOTE: BAD_SEQID means the server and client disagree about the 3363 * book-keeping w.r.t. state-changing operations 3364 * (OPEN/CLOSE/LOCK/LOCKU...) 3365 * It is actually a sign of a bug on the client or on the server. 3366 * 3367 * If we receive a BAD_SEQID error in the particular case of 3368 * doing an OPEN, we assume that nfs_increment_open_seqid() will 3369 * have unhashed the old state_owner for us, and that we can 3370 * therefore safely retry using a new one. We should still warn 3371 * the user though... 3372 */ 3373 if (status == -NFS4ERR_BAD_SEQID) { 3374 pr_warn_ratelimited("NFS: v4 server %s " 3375 " returned a bad sequence-id error!\n", 3376 NFS_SERVER(dir)->nfs_client->cl_hostname); 3377 exception.retry = 1; 3378 continue; 3379 } 3380 /* 3381 * BAD_STATEID on OPEN means that the server cancelled our 3382 * state before it received the OPEN_CONFIRM. 3383 * Recover by retrying the request as per the discussion 3384 * on Page 181 of RFC3530. 3385 */ 3386 if (status == -NFS4ERR_BAD_STATEID) { 3387 exception.retry = 1; 3388 continue; 3389 } 3390 if (status == -NFS4ERR_EXPIRED) { 3391 nfs4_schedule_lease_recovery(server->nfs_client); 3392 exception.retry = 1; 3393 continue; 3394 } 3395 if (status == -EAGAIN) { 3396 /* We must have found a delegation */ 3397 exception.retry = 1; 3398 continue; 3399 } 3400 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception)) 3401 continue; 3402 res = ERR_PTR(nfs4_handle_exception(server, 3403 status, &exception)); 3404 } while (exception.retry); 3405 return res; 3406 } 3407 3408 static int _nfs4_do_setattr(struct inode *inode, 3409 struct nfs_setattrargs *arg, 3410 struct nfs_setattrres *res, 3411 const struct cred *cred, 3412 struct nfs_open_context *ctx) 3413 { 3414 struct nfs_server *server = NFS_SERVER(inode); 3415 struct rpc_message msg = { 3416 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 3417 .rpc_argp = arg, 3418 .rpc_resp = res, 3419 .rpc_cred = cred, 3420 }; 3421 const struct cred *delegation_cred = NULL; 3422 unsigned long timestamp = jiffies; 3423 bool truncate; 3424 int status; 3425 3426 nfs_fattr_init(res->fattr); 3427 3428 /* Servers should only apply open mode checks for file size changes */ 3429 truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false; 3430 if (!truncate) { 3431 nfs4_inode_make_writeable(inode); 3432 goto zero_stateid; 3433 } 3434 3435 if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) { 3436 /* Use that stateid */ 3437 } else if (ctx != NULL && ctx->state) { 3438 struct nfs_lock_context *l_ctx; 3439 if (!nfs4_valid_open_stateid(ctx->state)) 3440 return -EBADF; 3441 l_ctx = nfs_get_lock_context(ctx); 3442 if (IS_ERR(l_ctx)) 3443 return PTR_ERR(l_ctx); 3444 status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx, 3445 &arg->stateid, &delegation_cred); 3446 nfs_put_lock_context(l_ctx); 3447 if (status == -EIO) 3448 return -EBADF; 3449 else if (status == -EAGAIN) 3450 goto zero_stateid; 3451 } else { 3452 zero_stateid: 3453 nfs4_stateid_copy(&arg->stateid, &zero_stateid); 3454 } 3455 if (delegation_cred) 3456 msg.rpc_cred = delegation_cred; 3457 3458 status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1); 3459 3460 put_cred(delegation_cred); 3461 if (status == 0 && ctx != NULL) 3462 renew_lease(server, timestamp); 3463 trace_nfs4_setattr(inode, &arg->stateid, status); 3464 return status; 3465 } 3466 3467 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred, 3468 struct nfs_fattr *fattr, struct iattr *sattr, 3469 struct nfs_open_context *ctx, struct nfs4_label *ilabel) 3470 { 3471 struct nfs_server *server = NFS_SERVER(inode); 3472 __u32 bitmask[NFS4_BITMASK_SZ]; 3473 struct nfs4_state *state = ctx ? ctx->state : NULL; 3474 struct nfs_setattrargs arg = { 3475 .fh = NFS_FH(inode), 3476 .iap = sattr, 3477 .server = server, 3478 .bitmask = bitmask, 3479 .label = ilabel, 3480 }; 3481 struct nfs_setattrres res = { 3482 .fattr = fattr, 3483 .server = server, 3484 }; 3485 struct nfs4_exception exception = { 3486 .state = state, 3487 .inode = inode, 3488 .stateid = &arg.stateid, 3489 }; 3490 unsigned long adjust_flags = NFS_INO_INVALID_CHANGE | 3491 NFS_INO_INVALID_CTIME; 3492 int err; 3493 3494 if (sattr->ia_valid & (ATTR_MODE | ATTR_KILL_SUID | ATTR_KILL_SGID)) 3495 adjust_flags |= NFS_INO_INVALID_MODE; 3496 if (sattr->ia_valid & (ATTR_UID | ATTR_GID)) 3497 adjust_flags |= NFS_INO_INVALID_OTHER; 3498 if (sattr->ia_valid & ATTR_ATIME) 3499 adjust_flags |= NFS_INO_INVALID_ATIME; 3500 if (sattr->ia_valid & ATTR_MTIME) 3501 adjust_flags |= NFS_INO_INVALID_MTIME; 3502 3503 do { 3504 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), 3505 inode, adjust_flags); 3506 3507 err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx); 3508 switch (err) { 3509 case -NFS4ERR_OPENMODE: 3510 if (!(sattr->ia_valid & ATTR_SIZE)) { 3511 pr_warn_once("NFSv4: server %s is incorrectly " 3512 "applying open mode checks to " 3513 "a SETATTR that is not " 3514 "changing file size.\n", 3515 server->nfs_client->cl_hostname); 3516 } 3517 if (state && !(state->state & FMODE_WRITE)) { 3518 err = -EBADF; 3519 if (sattr->ia_valid & ATTR_OPEN) 3520 err = -EACCES; 3521 goto out; 3522 } 3523 } 3524 err = nfs4_handle_exception(server, err, &exception); 3525 } while (exception.retry); 3526 out: 3527 return err; 3528 } 3529 3530 static bool 3531 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task) 3532 { 3533 if (inode == NULL || !nfs_have_layout(inode)) 3534 return false; 3535 3536 return pnfs_wait_on_layoutreturn(inode, task); 3537 } 3538 3539 /* 3540 * Update the seqid of an open stateid 3541 */ 3542 static void nfs4_sync_open_stateid(nfs4_stateid *dst, 3543 struct nfs4_state *state) 3544 { 3545 __be32 seqid_open; 3546 u32 dst_seqid; 3547 int seq; 3548 3549 for (;;) { 3550 if (!nfs4_valid_open_stateid(state)) 3551 break; 3552 seq = read_seqbegin(&state->seqlock); 3553 if (!nfs4_state_match_open_stateid_other(state, dst)) { 3554 nfs4_stateid_copy(dst, &state->open_stateid); 3555 if (read_seqretry(&state->seqlock, seq)) 3556 continue; 3557 break; 3558 } 3559 seqid_open = state->open_stateid.seqid; 3560 if (read_seqretry(&state->seqlock, seq)) 3561 continue; 3562 3563 dst_seqid = be32_to_cpu(dst->seqid); 3564 if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0) 3565 dst->seqid = seqid_open; 3566 break; 3567 } 3568 } 3569 3570 /* 3571 * Update the seqid of an open stateid after receiving 3572 * NFS4ERR_OLD_STATEID 3573 */ 3574 static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst, 3575 struct nfs4_state *state) 3576 { 3577 __be32 seqid_open; 3578 u32 dst_seqid; 3579 bool ret; 3580 int seq, status = -EAGAIN; 3581 DEFINE_WAIT(wait); 3582 3583 for (;;) { 3584 ret = false; 3585 if (!nfs4_valid_open_stateid(state)) 3586 break; 3587 seq = read_seqbegin(&state->seqlock); 3588 if (!nfs4_state_match_open_stateid_other(state, dst)) { 3589 if (read_seqretry(&state->seqlock, seq)) 3590 continue; 3591 break; 3592 } 3593 3594 write_seqlock(&state->seqlock); 3595 seqid_open = state->open_stateid.seqid; 3596 3597 dst_seqid = be32_to_cpu(dst->seqid); 3598 3599 /* Did another OPEN bump the state's seqid? try again: */ 3600 if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) { 3601 dst->seqid = seqid_open; 3602 write_sequnlock(&state->seqlock); 3603 ret = true; 3604 break; 3605 } 3606 3607 /* server says we're behind but we haven't seen the update yet */ 3608 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); 3609 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); 3610 write_sequnlock(&state->seqlock); 3611 trace_nfs4_close_stateid_update_wait(state->inode, dst, 0); 3612 3613 if (fatal_signal_pending(current) || nfs_current_task_exiting()) 3614 status = -EINTR; 3615 else 3616 if (schedule_timeout(5*HZ) != 0) 3617 status = 0; 3618 3619 finish_wait(&state->waitq, &wait); 3620 3621 if (!status) 3622 continue; 3623 if (status == -EINTR) 3624 break; 3625 3626 /* we slept the whole 5 seconds, we must have lost a seqid */ 3627 dst->seqid = cpu_to_be32(dst_seqid + 1); 3628 ret = true; 3629 break; 3630 } 3631 3632 return ret; 3633 } 3634 3635 struct nfs4_closedata { 3636 struct inode *inode; 3637 struct nfs4_state *state; 3638 struct nfs_closeargs arg; 3639 struct nfs_closeres res; 3640 struct { 3641 struct nfs4_layoutreturn_args arg; 3642 struct nfs4_layoutreturn_res res; 3643 struct nfs4_xdr_opaque_data ld_private; 3644 u32 roc_barrier; 3645 bool roc; 3646 } lr; 3647 struct nfs_fattr fattr; 3648 unsigned long timestamp; 3649 unsigned short retrans; 3650 }; 3651 3652 static void nfs4_free_closedata(void *data) 3653 { 3654 struct nfs4_closedata *calldata = data; 3655 struct nfs4_state_owner *sp = calldata->state->owner; 3656 struct super_block *sb = calldata->state->inode->i_sb; 3657 3658 if (calldata->lr.roc) 3659 pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res, 3660 calldata->res.lr_ret); 3661 nfs4_put_open_state(calldata->state); 3662 nfs_free_seqid(calldata->arg.seqid); 3663 nfs4_put_state_owner(sp); 3664 nfs_sb_deactive(sb); 3665 kfree(calldata); 3666 } 3667 3668 static void nfs4_close_done(struct rpc_task *task, void *data) 3669 { 3670 struct nfs4_closedata *calldata = data; 3671 struct nfs4_state *state = calldata->state; 3672 struct nfs_server *server = NFS_SERVER(calldata->inode); 3673 nfs4_stateid *res_stateid = NULL; 3674 struct nfs4_exception exception = { 3675 .state = state, 3676 .inode = calldata->inode, 3677 .stateid = &calldata->arg.stateid, 3678 .retrans = calldata->retrans, 3679 }; 3680 3681 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 3682 return; 3683 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status); 3684 3685 /* Handle Layoutreturn errors */ 3686 if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res, 3687 &calldata->res.lr_ret) == -EAGAIN) 3688 goto out_restart; 3689 3690 /* hmm. we are done with the inode, and in the process of freeing 3691 * the state_owner. we keep this around to process errors 3692 */ 3693 switch (task->tk_status) { 3694 case 0: 3695 res_stateid = &calldata->res.stateid; 3696 renew_lease(server, calldata->timestamp); 3697 break; 3698 case -NFS4ERR_ACCESS: 3699 if (calldata->arg.bitmask != NULL) { 3700 calldata->arg.bitmask = NULL; 3701 calldata->res.fattr = NULL; 3702 goto out_restart; 3703 3704 } 3705 break; 3706 case -NFS4ERR_OLD_STATEID: 3707 /* Did we race with OPEN? */ 3708 if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid, 3709 state)) 3710 goto out_restart; 3711 goto out_release; 3712 case -NFS4ERR_ADMIN_REVOKED: 3713 case -NFS4ERR_STALE_STATEID: 3714 case -NFS4ERR_EXPIRED: 3715 nfs4_free_revoked_stateid(server, 3716 &calldata->arg.stateid, 3717 task->tk_msg.rpc_cred); 3718 fallthrough; 3719 case -NFS4ERR_BAD_STATEID: 3720 if (calldata->arg.fmode == 0) 3721 break; 3722 fallthrough; 3723 default: 3724 task->tk_status = nfs4_async_handle_exception(task, 3725 server, task->tk_status, &exception); 3726 calldata->retrans = exception.retrans; 3727 if (exception.retry) 3728 goto out_restart; 3729 } 3730 nfs_clear_open_stateid(state, &calldata->arg.stateid, 3731 res_stateid, calldata->arg.fmode); 3732 out_release: 3733 task->tk_status = 0; 3734 nfs_release_seqid(calldata->arg.seqid); 3735 nfs_refresh_inode(calldata->inode, &calldata->fattr); 3736 dprintk("%s: ret = %d\n", __func__, task->tk_status); 3737 return; 3738 out_restart: 3739 task->tk_status = 0; 3740 rpc_restart_call_prepare(task); 3741 goto out_release; 3742 } 3743 3744 static void nfs4_close_prepare(struct rpc_task *task, void *data) 3745 { 3746 struct nfs4_closedata *calldata = data; 3747 struct nfs4_state *state = calldata->state; 3748 struct inode *inode = calldata->inode; 3749 struct nfs_server *server = NFS_SERVER(inode); 3750 struct pnfs_layout_hdr *lo; 3751 bool is_rdonly, is_wronly, is_rdwr; 3752 int call_close = 0; 3753 3754 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 3755 goto out_wait; 3756 3757 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 3758 spin_lock(&state->owner->so_lock); 3759 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); 3760 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); 3761 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); 3762 /* Calculate the change in open mode */ 3763 calldata->arg.fmode = 0; 3764 if (state->n_rdwr == 0) { 3765 if (state->n_rdonly == 0) 3766 call_close |= is_rdonly; 3767 else if (is_rdonly) 3768 calldata->arg.fmode |= FMODE_READ; 3769 if (state->n_wronly == 0) 3770 call_close |= is_wronly; 3771 else if (is_wronly) 3772 calldata->arg.fmode |= FMODE_WRITE; 3773 if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE)) 3774 call_close |= is_rdwr; 3775 } else if (is_rdwr) 3776 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; 3777 3778 nfs4_sync_open_stateid(&calldata->arg.stateid, state); 3779 if (!nfs4_valid_open_stateid(state)) 3780 call_close = 0; 3781 spin_unlock(&state->owner->so_lock); 3782 3783 if (!call_close) { 3784 /* Note: exit _without_ calling nfs4_close_done */ 3785 goto out_no_action; 3786 } 3787 3788 if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) { 3789 nfs_release_seqid(calldata->arg.seqid); 3790 goto out_wait; 3791 } 3792 3793 lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL; 3794 if (lo && !pnfs_layout_is_valid(lo)) { 3795 calldata->arg.lr_args = NULL; 3796 calldata->res.lr_res = NULL; 3797 } 3798 3799 if (calldata->arg.fmode == 0) 3800 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 3801 3802 if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) { 3803 /* Close-to-open cache consistency revalidation */ 3804 if (!nfs4_have_delegation(inode, FMODE_READ, 0)) { 3805 nfs4_bitmask_set(calldata->arg.bitmask_store, 3806 server->cache_consistency_bitmask, 3807 inode, 0); 3808 calldata->arg.bitmask = calldata->arg.bitmask_store; 3809 } else 3810 calldata->arg.bitmask = NULL; 3811 } 3812 3813 calldata->arg.share_access = 3814 nfs4_fmode_to_share_access(calldata->arg.fmode); 3815 3816 if (calldata->res.fattr == NULL) 3817 calldata->arg.bitmask = NULL; 3818 else if (calldata->arg.bitmask == NULL) 3819 calldata->res.fattr = NULL; 3820 calldata->timestamp = jiffies; 3821 if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client, 3822 &calldata->arg.seq_args, 3823 &calldata->res.seq_res, 3824 task) != 0) 3825 nfs_release_seqid(calldata->arg.seqid); 3826 return; 3827 out_no_action: 3828 task->tk_action = NULL; 3829 out_wait: 3830 nfs4_sequence_done(task, &calldata->res.seq_res); 3831 } 3832 3833 static const struct rpc_call_ops nfs4_close_ops = { 3834 .rpc_call_prepare = nfs4_close_prepare, 3835 .rpc_call_done = nfs4_close_done, 3836 .rpc_release = nfs4_free_closedata, 3837 }; 3838 3839 /* 3840 * It is possible for data to be read/written from a mem-mapped file 3841 * after the sys_close call (which hits the vfs layer as a flush). 3842 * This means that we can't safely call nfsv4 close on a file until 3843 * the inode is cleared. This in turn means that we are not good 3844 * NFSv4 citizens - we do not indicate to the server to update the file's 3845 * share state even when we are done with one of the three share 3846 * stateid's in the inode. 3847 * 3848 * NOTE: Caller must be holding the sp->so_owner semaphore! 3849 */ 3850 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) 3851 { 3852 struct nfs_server *server = NFS_SERVER(state->inode); 3853 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 3854 struct nfs4_closedata *calldata; 3855 struct nfs4_state_owner *sp = state->owner; 3856 struct rpc_task *task; 3857 struct rpc_message msg = { 3858 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 3859 .rpc_cred = state->owner->so_cred, 3860 }; 3861 struct rpc_task_setup task_setup_data = { 3862 .rpc_client = server->client, 3863 .rpc_message = &msg, 3864 .callback_ops = &nfs4_close_ops, 3865 .workqueue = nfsiod_workqueue, 3866 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 3867 }; 3868 int status = -ENOMEM; 3869 3870 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE)) 3871 task_setup_data.flags |= RPC_TASK_MOVEABLE; 3872 3873 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP, 3874 &task_setup_data.rpc_client, &msg); 3875 3876 calldata = kzalloc(sizeof(*calldata), gfp_mask); 3877 if (calldata == NULL) 3878 goto out; 3879 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1, 0); 3880 calldata->inode = state->inode; 3881 calldata->state = state; 3882 calldata->arg.fh = NFS_FH(state->inode); 3883 if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state)) 3884 goto out_free_calldata; 3885 /* Serialization for the sequence id */ 3886 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 3887 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask); 3888 if (IS_ERR(calldata->arg.seqid)) 3889 goto out_free_calldata; 3890 nfs_fattr_init(&calldata->fattr); 3891 calldata->arg.fmode = 0; 3892 calldata->lr.arg.ld_private = &calldata->lr.ld_private; 3893 calldata->res.fattr = &calldata->fattr; 3894 calldata->res.seqid = calldata->arg.seqid; 3895 calldata->res.server = server; 3896 calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT; 3897 calldata->lr.roc = pnfs_roc(state->inode, &calldata->lr.arg, 3898 &calldata->lr.res, msg.rpc_cred, wait); 3899 if (calldata->lr.roc) { 3900 calldata->arg.lr_args = &calldata->lr.arg; 3901 calldata->res.lr_res = &calldata->lr.res; 3902 } 3903 nfs_sb_active(calldata->inode->i_sb); 3904 3905 msg.rpc_argp = &calldata->arg; 3906 msg.rpc_resp = &calldata->res; 3907 task_setup_data.callback_data = calldata; 3908 task = rpc_run_task(&task_setup_data); 3909 if (IS_ERR(task)) 3910 return PTR_ERR(task); 3911 status = 0; 3912 if (wait) 3913 status = rpc_wait_for_completion_task(task); 3914 rpc_put_task(task); 3915 return status; 3916 out_free_calldata: 3917 kfree(calldata); 3918 out: 3919 nfs4_put_open_state(state); 3920 nfs4_put_state_owner(sp); 3921 return status; 3922 } 3923 3924 static struct inode * 3925 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, 3926 int open_flags, struct iattr *attr, int *opened) 3927 { 3928 struct nfs4_state *state; 3929 struct nfs4_label l, *label; 3930 3931 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); 3932 3933 /* Protect against concurrent sillydeletes */ 3934 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened); 3935 3936 nfs4_label_release_security(label); 3937 3938 if (IS_ERR(state)) 3939 return ERR_CAST(state); 3940 return state->inode; 3941 } 3942 3943 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 3944 { 3945 struct dentry *dentry = ctx->dentry; 3946 if (ctx->state == NULL) 3947 return; 3948 if (dentry->d_flags & DCACHE_NFSFS_RENAMED) 3949 nfs4_inode_set_return_delegation_on_close(d_inode(dentry)); 3950 if (is_sync) 3951 nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx)); 3952 else 3953 nfs4_close_state(ctx->state, _nfs4_ctx_to_openmode(ctx)); 3954 } 3955 3956 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL) 3957 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL) 3958 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_OPEN_ARGUMENTS - 1UL) 3959 3960 #define FATTR4_WORD2_NFS42_TIME_DELEG_MASK \ 3961 (FATTR4_WORD2_TIME_DELEG_MODIFY|FATTR4_WORD2_TIME_DELEG_ACCESS) 3962 static bool nfs4_server_delegtime_capable(struct nfs4_server_caps_res *res) 3963 { 3964 u32 share_access_want = res->open_caps.oa_share_access_want[0]; 3965 u32 attr_bitmask = res->attr_bitmask[2]; 3966 3967 return (share_access_want & NFS4_SHARE_WANT_DELEG_TIMESTAMPS) && 3968 ((attr_bitmask & FATTR4_WORD2_NFS42_TIME_DELEG_MASK) == 3969 FATTR4_WORD2_NFS42_TIME_DELEG_MASK); 3970 } 3971 3972 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 3973 { 3974 u32 minorversion = server->nfs_client->cl_minorversion; 3975 u32 bitmask[3] = { 3976 [0] = FATTR4_WORD0_SUPPORTED_ATTRS, 3977 }; 3978 struct nfs4_server_caps_arg args = { 3979 .fhandle = fhandle, 3980 .bitmask = bitmask, 3981 }; 3982 struct nfs4_server_caps_res res = {}; 3983 struct rpc_message msg = { 3984 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 3985 .rpc_argp = &args, 3986 .rpc_resp = &res, 3987 }; 3988 int status; 3989 int i; 3990 3991 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS | 3992 FATTR4_WORD0_FH_EXPIRE_TYPE | 3993 FATTR4_WORD0_LINK_SUPPORT | 3994 FATTR4_WORD0_SYMLINK_SUPPORT | 3995 FATTR4_WORD0_ACLSUPPORT | 3996 FATTR4_WORD0_CASE_INSENSITIVE | 3997 FATTR4_WORD0_CASE_PRESERVING; 3998 if (minorversion) 3999 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT; 4000 if (minorversion > 1) 4001 bitmask[2] |= FATTR4_WORD2_OPEN_ARGUMENTS; 4002 4003 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4004 if (status == 0) { 4005 bitmask[0] = (FATTR4_WORD0_SUPPORTED_ATTRS | 4006 FATTR4_WORD0_FH_EXPIRE_TYPE | 4007 FATTR4_WORD0_LINK_SUPPORT | 4008 FATTR4_WORD0_SYMLINK_SUPPORT | 4009 FATTR4_WORD0_ACLSUPPORT | 4010 FATTR4_WORD0_CASE_INSENSITIVE | 4011 FATTR4_WORD0_CASE_PRESERVING) & 4012 res.attr_bitmask[0]; 4013 /* Sanity check the server answers */ 4014 switch (minorversion) { 4015 case 0: 4016 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK; 4017 res.attr_bitmask[2] = 0; 4018 break; 4019 case 1: 4020 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK; 4021 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT & 4022 res.attr_bitmask[2]; 4023 break; 4024 case 2: 4025 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK; 4026 bitmask[2] = (FATTR4_WORD2_SUPPATTR_EXCLCREAT | 4027 FATTR4_WORD2_OPEN_ARGUMENTS) & 4028 res.attr_bitmask[2]; 4029 } 4030 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 4031 server->caps &= 4032 ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS | 4033 NFS_CAP_SECURITY_LABEL | NFS_CAP_FS_LOCATIONS | 4034 NFS_CAP_OPEN_XOR | NFS_CAP_DELEGTIME); 4035 server->fattr_valid = NFS_ATTR_FATTR_V4; 4036 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL && 4037 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 4038 server->caps |= NFS_CAP_ACLS; 4039 if (res.has_links != 0) 4040 server->caps |= NFS_CAP_HARDLINKS; 4041 if (res.has_symlinks != 0) 4042 server->caps |= NFS_CAP_SYMLINKS; 4043 if (res.case_insensitive) 4044 server->caps |= NFS_CAP_CASE_INSENSITIVE; 4045 if (res.case_preserving) 4046 server->caps |= NFS_CAP_CASE_PRESERVING; 4047 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 4048 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL) 4049 server->caps |= NFS_CAP_SECURITY_LABEL; 4050 #endif 4051 if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS) 4052 server->caps |= NFS_CAP_FS_LOCATIONS; 4053 if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID)) 4054 server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID; 4055 if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE)) 4056 server->fattr_valid &= ~NFS_ATTR_FATTR_MODE; 4057 if (!(res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)) 4058 server->fattr_valid &= ~NFS_ATTR_FATTR_NLINK; 4059 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER)) 4060 server->fattr_valid &= ~(NFS_ATTR_FATTR_OWNER | 4061 NFS_ATTR_FATTR_OWNER_NAME); 4062 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)) 4063 server->fattr_valid &= ~(NFS_ATTR_FATTR_GROUP | 4064 NFS_ATTR_FATTR_GROUP_NAME); 4065 if (!(res.attr_bitmask[1] & FATTR4_WORD1_SPACE_USED)) 4066 server->fattr_valid &= ~NFS_ATTR_FATTR_SPACE_USED; 4067 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)) 4068 server->fattr_valid &= ~NFS_ATTR_FATTR_ATIME; 4069 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)) 4070 server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME; 4071 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)) 4072 server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME; 4073 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)) 4074 server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME; 4075 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_CREATE)) 4076 server->fattr_valid &= ~NFS_ATTR_FATTR_BTIME; 4077 memcpy(server->attr_bitmask_nl, res.attr_bitmask, 4078 sizeof(server->attr_bitmask)); 4079 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL; 4080 4081 if (res.open_caps.oa_share_access_want[0] & 4082 NFS4_SHARE_WANT_OPEN_XOR_DELEGATION) 4083 server->caps |= NFS_CAP_OPEN_XOR; 4084 if (nfs4_server_delegtime_capable(&res)) 4085 server->caps |= NFS_CAP_DELEGTIME; 4086 4087 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 4088 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 4089 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 4090 server->cache_consistency_bitmask[2] = 0; 4091 4092 /* Avoid a regression due to buggy server */ 4093 for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++) 4094 res.exclcreat_bitmask[i] &= res.attr_bitmask[i]; 4095 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask, 4096 sizeof(server->exclcreat_bitmask)); 4097 4098 server->acl_bitmask = res.acl_bitmask; 4099 server->fh_expire_type = res.fh_expire_type; 4100 } 4101 4102 return status; 4103 } 4104 4105 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 4106 { 4107 struct nfs4_exception exception = { 4108 .interruptible = true, 4109 }; 4110 int err; 4111 4112 do { 4113 err = nfs4_handle_exception(server, 4114 _nfs4_server_capabilities(server, fhandle), 4115 &exception); 4116 } while (exception.retry); 4117 return err; 4118 } 4119 4120 static void test_fs_location_for_trunking(struct nfs4_fs_location *location, 4121 struct nfs_client *clp, 4122 struct nfs_server *server) 4123 { 4124 int i; 4125 4126 for (i = 0; i < location->nservers; i++) { 4127 struct nfs4_string *srv_loc = &location->servers[i]; 4128 struct sockaddr_storage addr; 4129 size_t addrlen; 4130 struct xprt_create xprt_args = { 4131 .ident = 0, 4132 .net = clp->cl_net, 4133 }; 4134 struct nfs4_add_xprt_data xprtdata = { 4135 .clp = clp, 4136 }; 4137 struct rpc_add_xprt_test rpcdata = { 4138 .add_xprt_test = clp->cl_mvops->session_trunk, 4139 .data = &xprtdata, 4140 }; 4141 char *servername = NULL; 4142 4143 if (!srv_loc->len) 4144 continue; 4145 4146 addrlen = nfs_parse_server_name(srv_loc->data, srv_loc->len, 4147 &addr, sizeof(addr), 4148 clp->cl_net, server->port); 4149 if (!addrlen) 4150 return; 4151 xprt_args.dstaddr = (struct sockaddr *)&addr; 4152 xprt_args.addrlen = addrlen; 4153 servername = kmalloc(srv_loc->len + 1, GFP_KERNEL); 4154 if (!servername) 4155 return; 4156 memcpy(servername, srv_loc->data, srv_loc->len); 4157 servername[srv_loc->len] = '\0'; 4158 xprt_args.servername = servername; 4159 4160 xprtdata.cred = nfs4_get_clid_cred(clp); 4161 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args, 4162 rpc_clnt_setup_test_and_add_xprt, 4163 &rpcdata); 4164 if (xprtdata.cred) 4165 put_cred(xprtdata.cred); 4166 kfree(servername); 4167 } 4168 } 4169 4170 static bool _is_same_nfs4_pathname(struct nfs4_pathname *path1, 4171 struct nfs4_pathname *path2) 4172 { 4173 int i; 4174 4175 if (path1->ncomponents != path2->ncomponents) 4176 return false; 4177 for (i = 0; i < path1->ncomponents; i++) { 4178 if (path1->components[i].len != path2->components[i].len) 4179 return false; 4180 if (memcmp(path1->components[i].data, path2->components[i].data, 4181 path1->components[i].len)) 4182 return false; 4183 } 4184 return true; 4185 } 4186 4187 static int _nfs4_discover_trunking(struct nfs_server *server, 4188 struct nfs_fh *fhandle) 4189 { 4190 struct nfs4_fs_locations *locations = NULL; 4191 struct page *page; 4192 const struct cred *cred; 4193 struct nfs_client *clp = server->nfs_client; 4194 const struct nfs4_state_maintenance_ops *ops = 4195 clp->cl_mvops->state_renewal_ops; 4196 int status = -ENOMEM, i; 4197 4198 cred = ops->get_state_renewal_cred(clp); 4199 if (cred == NULL) { 4200 cred = nfs4_get_clid_cred(clp); 4201 if (cred == NULL) 4202 return -ENOKEY; 4203 } 4204 4205 page = alloc_page(GFP_KERNEL); 4206 if (!page) 4207 goto out_put_cred; 4208 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 4209 if (!locations) 4210 goto out_free; 4211 locations->fattr = nfs_alloc_fattr(); 4212 if (!locations->fattr) 4213 goto out_free_2; 4214 4215 status = nfs4_proc_get_locations(server, fhandle, locations, page, 4216 cred); 4217 if (status) 4218 goto out_free_3; 4219 4220 for (i = 0; i < locations->nlocations; i++) { 4221 if (!_is_same_nfs4_pathname(&locations->fs_path, 4222 &locations->locations[i].rootpath)) 4223 continue; 4224 test_fs_location_for_trunking(&locations->locations[i], clp, 4225 server); 4226 } 4227 out_free_3: 4228 kfree(locations->fattr); 4229 out_free_2: 4230 kfree(locations); 4231 out_free: 4232 __free_page(page); 4233 out_put_cred: 4234 put_cred(cred); 4235 return status; 4236 } 4237 4238 static int nfs4_discover_trunking(struct nfs_server *server, 4239 struct nfs_fh *fhandle) 4240 { 4241 struct nfs4_exception exception = { 4242 .interruptible = true, 4243 }; 4244 struct nfs_client *clp = server->nfs_client; 4245 int err = 0; 4246 4247 if (!nfs4_has_session(clp)) 4248 goto out; 4249 do { 4250 err = nfs4_handle_exception(server, 4251 _nfs4_discover_trunking(server, fhandle), 4252 &exception); 4253 } while (exception.retry); 4254 out: 4255 return err; 4256 } 4257 4258 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 4259 struct nfs_fattr *fattr) 4260 { 4261 u32 bitmask[3] = { 4262 [0] = FATTR4_WORD0_TYPE | FATTR4_WORD0_CHANGE | 4263 FATTR4_WORD0_SIZE | FATTR4_WORD0_FSID, 4264 }; 4265 struct nfs4_lookup_root_arg args = { 4266 .bitmask = bitmask, 4267 }; 4268 struct nfs4_lookup_res res = { 4269 .server = server, 4270 .fattr = fattr, 4271 .fh = fhandle, 4272 }; 4273 struct rpc_message msg = { 4274 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 4275 .rpc_argp = &args, 4276 .rpc_resp = &res, 4277 }; 4278 4279 nfs_fattr_init(fattr); 4280 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4281 } 4282 4283 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 4284 struct nfs_fattr *fattr) 4285 { 4286 struct nfs4_exception exception = { 4287 .interruptible = true, 4288 }; 4289 int err; 4290 do { 4291 err = _nfs4_lookup_root(server, fhandle, fattr); 4292 trace_nfs4_lookup_root(server, fhandle, fattr, err); 4293 switch (err) { 4294 case 0: 4295 case -NFS4ERR_WRONGSEC: 4296 goto out; 4297 default: 4298 err = nfs4_handle_exception(server, err, &exception); 4299 } 4300 } while (exception.retry); 4301 out: 4302 return err; 4303 } 4304 4305 static int nfs4_lookup_root_sec(struct nfs_server *server, 4306 struct nfs_fh *fhandle, struct nfs_fattr *fattr, 4307 rpc_authflavor_t flavor) 4308 { 4309 struct rpc_auth_create_args auth_args = { 4310 .pseudoflavor = flavor, 4311 }; 4312 struct rpc_auth *auth; 4313 4314 auth = rpcauth_create(&auth_args, server->client); 4315 if (IS_ERR(auth)) 4316 return -EACCES; 4317 return nfs4_lookup_root(server, fhandle, fattr); 4318 } 4319 4320 /* 4321 * Retry pseudoroot lookup with various security flavors. We do this when: 4322 * 4323 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC 4324 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation 4325 * 4326 * Returns zero on success, or a negative NFS4ERR value, or a 4327 * negative errno value. 4328 */ 4329 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 4330 struct nfs_fattr *fattr) 4331 { 4332 /* Per 3530bis 15.33.5 */ 4333 static const rpc_authflavor_t flav_array[] = { 4334 RPC_AUTH_GSS_KRB5P, 4335 RPC_AUTH_GSS_KRB5I, 4336 RPC_AUTH_GSS_KRB5, 4337 RPC_AUTH_UNIX, /* courtesy */ 4338 RPC_AUTH_NULL, 4339 }; 4340 int status = -EPERM; 4341 size_t i; 4342 4343 if (server->auth_info.flavor_len > 0) { 4344 /* try each flavor specified by user */ 4345 for (i = 0; i < server->auth_info.flavor_len; i++) { 4346 status = nfs4_lookup_root_sec( 4347 server, fhandle, fattr, 4348 server->auth_info.flavors[i]); 4349 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 4350 continue; 4351 break; 4352 } 4353 } else { 4354 /* no flavors specified by user, try default list */ 4355 for (i = 0; i < ARRAY_SIZE(flav_array); i++) { 4356 status = nfs4_lookup_root_sec(server, fhandle, fattr, 4357 flav_array[i]); 4358 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 4359 continue; 4360 break; 4361 } 4362 } 4363 4364 /* 4365 * -EACCES could mean that the user doesn't have correct permissions 4366 * to access the mount. It could also mean that we tried to mount 4367 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 4368 * existing mount programs don't handle -EACCES very well so it should 4369 * be mapped to -EPERM instead. 4370 */ 4371 if (status == -EACCES) 4372 status = -EPERM; 4373 return status; 4374 } 4375 4376 /** 4377 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot 4378 * @server: initialized nfs_server handle 4379 * @fhandle: we fill in the pseudo-fs root file handle 4380 * @fattr: we fill in a bare bones struct fattr 4381 * @auth_probe: probe the auth flavours 4382 * 4383 * Returns zero on success, or a negative errno. 4384 */ 4385 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 4386 struct nfs_fattr *fattr, bool auth_probe) 4387 { 4388 int status = 0; 4389 4390 if (!auth_probe) 4391 status = nfs4_lookup_root(server, fhandle, fattr); 4392 4393 if (auth_probe || status == NFS4ERR_WRONGSEC) 4394 status = server->nfs_client->cl_mvops->find_root_sec( 4395 server, fhandle, fattr); 4396 4397 return nfs4_map_errors(status); 4398 } 4399 4400 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 4401 struct nfs_fsinfo *info) 4402 { 4403 int error; 4404 struct nfs_fattr *fattr = info->fattr; 4405 4406 error = nfs4_server_capabilities(server, mntfh); 4407 if (error < 0) { 4408 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 4409 return error; 4410 } 4411 4412 error = nfs4_proc_getattr(server, mntfh, fattr, NULL); 4413 if (error < 0) { 4414 dprintk("nfs4_get_root: getattr error = %d\n", -error); 4415 goto out; 4416 } 4417 4418 if (fattr->valid & NFS_ATTR_FATTR_FSID && 4419 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 4420 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 4421 4422 out: 4423 return error; 4424 } 4425 4426 /* 4427 * Get locations and (maybe) other attributes of a referral. 4428 * Note that we'll actually follow the referral later when 4429 * we detect fsid mismatch in inode revalidation 4430 */ 4431 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 4432 const struct qstr *name, struct nfs_fattr *fattr, 4433 struct nfs_fh *fhandle) 4434 { 4435 int status = -ENOMEM; 4436 struct page *page = NULL; 4437 struct nfs4_fs_locations *locations = NULL; 4438 4439 page = alloc_page(GFP_KERNEL); 4440 if (page == NULL) 4441 goto out; 4442 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 4443 if (locations == NULL) 4444 goto out; 4445 4446 locations->fattr = fattr; 4447 4448 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 4449 if (status != 0) 4450 goto out; 4451 4452 /* 4453 * If the fsid didn't change, this is a migration event, not a 4454 * referral. Cause us to drop into the exception handler, which 4455 * will kick off migration recovery. 4456 */ 4457 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &fattr->fsid)) { 4458 dprintk("%s: server did not return a different fsid for" 4459 " a referral at %s\n", __func__, name->name); 4460 status = -NFS4ERR_MOVED; 4461 goto out; 4462 } 4463 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 4464 nfs_fixup_referral_attributes(fattr); 4465 memset(fhandle, 0, sizeof(struct nfs_fh)); 4466 out: 4467 if (page) 4468 __free_page(page); 4469 kfree(locations); 4470 return status; 4471 } 4472 4473 #if IS_ENABLED(CONFIG_NFS_V4_1) 4474 static bool should_request_dir_deleg(struct inode *inode) 4475 { 4476 if (!directory_delegations) 4477 return false; 4478 if (!inode) 4479 return false; 4480 if (!S_ISDIR(inode->i_mode)) 4481 return false; 4482 if (!nfs_server_capable(inode, NFS_CAP_DIR_DELEG)) 4483 return false; 4484 if (!test_and_clear_bit(NFS_INO_REQ_DIR_DELEG, &(NFS_I(inode)->flags))) 4485 return false; 4486 if (nfs4_have_delegation(inode, FMODE_READ, 0)) 4487 return false; 4488 return true; 4489 } 4490 #else 4491 static bool should_request_dir_deleg(struct inode *inode) 4492 { 4493 return false; 4494 } 4495 #endif /* CONFIG_NFS_V4_1 */ 4496 4497 static void nfs4_call_getattr_prepare(struct rpc_task *task, void *calldata) 4498 { 4499 struct nfs4_call_sync_data *data = calldata; 4500 nfs4_setup_sequence(data->seq_server->nfs_client, data->seq_args, 4501 data->seq_res, task); 4502 } 4503 4504 static void nfs4_call_getattr_done(struct rpc_task *task, void *calldata) 4505 { 4506 struct nfs4_call_sync_data *data = calldata; 4507 4508 nfs4_sequence_process(task, data->seq_res); 4509 } 4510 4511 static const struct rpc_call_ops nfs4_call_getattr_ops = { 4512 .rpc_call_prepare = nfs4_call_getattr_prepare, 4513 .rpc_call_done = nfs4_call_getattr_done, 4514 }; 4515 4516 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 4517 struct nfs_fattr *fattr, struct inode *inode) 4518 { 4519 __u32 bitmask[NFS4_BITMASK_SZ]; 4520 struct nfs4_getattr_arg args = { 4521 .fh = fhandle, 4522 .bitmask = bitmask, 4523 }; 4524 struct nfs4_getattr_res res = { 4525 .fattr = fattr, 4526 .server = server, 4527 }; 4528 struct rpc_message msg = { 4529 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 4530 .rpc_argp = &args, 4531 .rpc_resp = &res, 4532 }; 4533 struct nfs4_call_sync_data data = { 4534 .seq_server = server, 4535 .seq_args = &args.seq_args, 4536 .seq_res = &res.seq_res, 4537 }; 4538 struct rpc_task_setup task_setup = { 4539 .rpc_client = server->client, 4540 .rpc_message = &msg, 4541 .callback_ops = &nfs4_call_getattr_ops, 4542 .callback_data = &data, 4543 }; 4544 struct nfs4_gdd_res gdd_res; 4545 int status; 4546 4547 if (nfs4_has_session(server->nfs_client)) 4548 task_setup.flags = RPC_TASK_MOVEABLE; 4549 4550 /* Is this is an attribute revalidation, subject to softreval? */ 4551 if (inode && (server->flags & NFS_MOUNT_SOFTREVAL)) 4552 task_setup.flags |= RPC_TASK_TIMEOUT; 4553 4554 args.get_dir_deleg = should_request_dir_deleg(inode); 4555 if (args.get_dir_deleg) 4556 res.gdd_res = &gdd_res; 4557 4558 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), inode, 0); 4559 nfs_fattr_init(fattr); 4560 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 4561 4562 status = nfs4_call_sync_custom(&task_setup); 4563 4564 if (args.get_dir_deleg) { 4565 switch (status) { 4566 case 0: 4567 if (gdd_res.status != GDD4_OK) 4568 break; 4569 nfs_inode_set_delegation(inode, current_cred(), 4570 FMODE_READ, &gdd_res.deleg, 0, 4571 NFS4_OPEN_DELEGATE_READ); 4572 break; 4573 case -ENOTSUPP: 4574 case -EOPNOTSUPP: 4575 server->caps &= ~NFS_CAP_DIR_DELEG; 4576 } 4577 } 4578 4579 nfs4_sequence_free_slot(&res.seq_res); 4580 return status; 4581 } 4582 4583 int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 4584 struct nfs_fattr *fattr, struct inode *inode) 4585 { 4586 struct nfs4_exception exception = { 4587 .interruptible = true, 4588 }; 4589 int err; 4590 do { 4591 err = _nfs4_proc_getattr(server, fhandle, fattr, inode); 4592 trace_nfs4_getattr(server, fhandle, fattr, err); 4593 switch (err) { 4594 default: 4595 err = nfs4_handle_exception(server, err, &exception); 4596 break; 4597 case -ENOTSUPP: 4598 case -EOPNOTSUPP: 4599 exception.retry = true; 4600 } 4601 } while (exception.retry); 4602 return err; 4603 } 4604 4605 /* 4606 * The file is not closed if it is opened due to the a request to change 4607 * the size of the file. The open call will not be needed once the 4608 * VFS layer lookup-intents are implemented. 4609 * 4610 * Close is called when the inode is destroyed. 4611 * If we haven't opened the file for O_WRONLY, we 4612 * need to in the size_change case to obtain a stateid. 4613 * 4614 * Got race? 4615 * Because OPEN is always done by name in nfsv4, it is 4616 * possible that we opened a different file by the same 4617 * name. We can recognize this race condition, but we 4618 * can't do anything about it besides returning an error. 4619 * 4620 * This will be fixed with VFS changes (lookup-intent). 4621 */ 4622 static int 4623 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 4624 struct iattr *sattr) 4625 { 4626 struct inode *inode = d_inode(dentry); 4627 const struct cred *cred = NULL; 4628 struct nfs_open_context *ctx = NULL; 4629 int status; 4630 4631 if (pnfs_ld_layoutret_on_setattr(inode) && 4632 sattr->ia_valid & ATTR_SIZE && 4633 sattr->ia_size < i_size_read(inode)) 4634 pnfs_commit_and_return_layout(inode); 4635 4636 nfs_fattr_init(fattr); 4637 4638 /* Deal with open(O_TRUNC) */ 4639 if (sattr->ia_valid & ATTR_OPEN) 4640 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME); 4641 4642 /* Optimization: if the end result is no change, don't RPC */ 4643 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0) 4644 return 0; 4645 4646 /* Search for an existing open(O_WRITE) file */ 4647 if (sattr->ia_valid & ATTR_FILE) { 4648 4649 ctx = nfs_file_open_context(sattr->ia_file); 4650 if (ctx) 4651 cred = ctx->cred; 4652 } 4653 4654 /* Return any delegations if we're going to change ACLs */ 4655 if ((sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) 4656 nfs4_inode_make_writeable(inode); 4657 4658 status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL); 4659 if (status == 0) { 4660 nfs_setattr_update_inode(inode, sattr, fattr); 4661 nfs_setsecurity(inode, fattr); 4662 } 4663 return status; 4664 } 4665 4666 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 4667 struct dentry *dentry, const struct qstr *name, 4668 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4669 { 4670 struct nfs_server *server = NFS_SERVER(dir); 4671 int status; 4672 struct nfs4_lookup_arg args = { 4673 .bitmask = server->attr_bitmask, 4674 .dir_fh = NFS_FH(dir), 4675 .name = name, 4676 }; 4677 struct nfs4_lookup_res res = { 4678 .server = server, 4679 .fattr = fattr, 4680 .fh = fhandle, 4681 }; 4682 struct rpc_message msg = { 4683 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 4684 .rpc_argp = &args, 4685 .rpc_resp = &res, 4686 }; 4687 unsigned short task_flags = 0; 4688 4689 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE)) 4690 task_flags = RPC_TASK_MOVEABLE; 4691 4692 /* Is this is an attribute revalidation, subject to softreval? */ 4693 if (nfs_lookup_is_soft_revalidate(dentry)) 4694 task_flags |= RPC_TASK_TIMEOUT; 4695 4696 args.bitmask = nfs4_bitmask(server, fattr->label); 4697 4698 nfs_fattr_init(fattr); 4699 4700 dprintk("NFS call lookup %pd2\n", dentry); 4701 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 4702 status = nfs4_do_call_sync(clnt, server, &msg, 4703 &args.seq_args, &res.seq_res, task_flags); 4704 dprintk("NFS reply lookup: %d\n", status); 4705 return status; 4706 } 4707 4708 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 4709 { 4710 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 4711 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 4712 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 4713 fattr->nlink = 2; 4714 } 4715 4716 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 4717 struct dentry *dentry, const struct qstr *name, 4718 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4719 { 4720 struct nfs4_exception exception = { 4721 .interruptible = true, 4722 }; 4723 struct rpc_clnt *client = *clnt; 4724 int err; 4725 do { 4726 err = _nfs4_proc_lookup(client, dir, dentry, name, fhandle, fattr); 4727 trace_nfs4_lookup(dir, name, err); 4728 switch (err) { 4729 case -NFS4ERR_BADNAME: 4730 err = -ENOENT; 4731 goto out; 4732 case -NFS4ERR_MOVED: 4733 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 4734 if (err == -NFS4ERR_MOVED) 4735 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 4736 goto out; 4737 case -NFS4ERR_WRONGSEC: 4738 err = -EPERM; 4739 if (client != *clnt) 4740 goto out; 4741 client = nfs4_negotiate_security(client, dir, name); 4742 if (IS_ERR(client)) 4743 return PTR_ERR(client); 4744 4745 exception.retry = 1; 4746 break; 4747 default: 4748 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 4749 } 4750 } while (exception.retry); 4751 4752 out: 4753 if (err == 0) 4754 *clnt = client; 4755 else if (client != *clnt) 4756 rpc_shutdown_client(client); 4757 4758 return err; 4759 } 4760 4761 static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry, const struct qstr *name, 4762 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4763 { 4764 int status; 4765 struct rpc_clnt *client = NFS_CLIENT(dir); 4766 4767 status = nfs4_proc_lookup_common(&client, dir, dentry, name, fhandle, fattr); 4768 if (client != NFS_CLIENT(dir)) { 4769 rpc_shutdown_client(client); 4770 nfs_fixup_secinfo_attributes(fattr); 4771 } 4772 return status; 4773 } 4774 4775 struct rpc_clnt * 4776 nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry, 4777 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4778 { 4779 struct rpc_clnt *client = NFS_CLIENT(dir); 4780 int status; 4781 4782 status = nfs4_proc_lookup_common(&client, dir, dentry, &dentry->d_name, 4783 fhandle, fattr); 4784 if (status < 0) 4785 return ERR_PTR(status); 4786 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client; 4787 } 4788 4789 static int _nfs4_proc_lookupp(struct inode *inode, 4790 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4791 { 4792 struct rpc_clnt *clnt = NFS_CLIENT(inode); 4793 struct nfs_server *server = NFS_SERVER(inode); 4794 int status; 4795 struct nfs4_lookupp_arg args = { 4796 .bitmask = server->attr_bitmask, 4797 .fh = NFS_FH(inode), 4798 }; 4799 struct nfs4_lookupp_res res = { 4800 .server = server, 4801 .fattr = fattr, 4802 .fh = fhandle, 4803 }; 4804 struct rpc_message msg = { 4805 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP], 4806 .rpc_argp = &args, 4807 .rpc_resp = &res, 4808 }; 4809 unsigned short task_flags = 0; 4810 4811 if (server->flags & NFS_MOUNT_SOFTREVAL) 4812 task_flags |= RPC_TASK_TIMEOUT; 4813 if (server->caps & NFS_CAP_MOVEABLE) 4814 task_flags |= RPC_TASK_MOVEABLE; 4815 4816 args.bitmask = nfs4_bitmask(server, fattr->label); 4817 4818 nfs_fattr_init(fattr); 4819 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 4820 4821 dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino); 4822 status = nfs4_do_call_sync(clnt, server, &msg, &args.seq_args, 4823 &res.seq_res, task_flags); 4824 dprintk("NFS reply lookupp: %d\n", status); 4825 return status; 4826 } 4827 4828 static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle, 4829 struct nfs_fattr *fattr) 4830 { 4831 struct nfs4_exception exception = { 4832 .interruptible = true, 4833 }; 4834 int err; 4835 do { 4836 err = _nfs4_proc_lookupp(inode, fhandle, fattr); 4837 trace_nfs4_lookupp(inode, err); 4838 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4839 &exception); 4840 } while (exception.retry); 4841 return err; 4842 } 4843 4844 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry, 4845 const struct cred *cred) 4846 { 4847 struct nfs_server *server = NFS_SERVER(inode); 4848 struct nfs4_accessargs args = { 4849 .fh = NFS_FH(inode), 4850 .access = entry->mask, 4851 }; 4852 struct nfs4_accessres res = { 4853 .server = server, 4854 }; 4855 struct rpc_message msg = { 4856 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 4857 .rpc_argp = &args, 4858 .rpc_resp = &res, 4859 .rpc_cred = cred, 4860 }; 4861 int status = 0; 4862 4863 if (!nfs4_have_delegation(inode, FMODE_READ, 0)) { 4864 nfs_request_directory_delegation(inode); 4865 res.fattr = nfs_alloc_fattr(); 4866 if (res.fattr == NULL) 4867 return -ENOMEM; 4868 args.bitmask = server->cache_consistency_bitmask; 4869 } 4870 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4871 if (!status) { 4872 nfs_access_set_mask(entry, res.access); 4873 if (res.fattr) 4874 nfs_refresh_inode(inode, res.fattr); 4875 } 4876 nfs_free_fattr(res.fattr); 4877 return status; 4878 } 4879 4880 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry, 4881 const struct cred *cred) 4882 { 4883 struct nfs4_exception exception = { 4884 .interruptible = true, 4885 }; 4886 int err; 4887 do { 4888 err = _nfs4_proc_access(inode, entry, cred); 4889 trace_nfs4_access(inode, err); 4890 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4891 &exception); 4892 } while (exception.retry); 4893 return err; 4894 } 4895 4896 /* 4897 * TODO: For the time being, we don't try to get any attributes 4898 * along with any of the zero-copy operations READ, READDIR, 4899 * READLINK, WRITE. 4900 * 4901 * In the case of the first three, we want to put the GETATTR 4902 * after the read-type operation -- this is because it is hard 4903 * to predict the length of a GETATTR response in v4, and thus 4904 * align the READ data correctly. This means that the GETATTR 4905 * may end up partially falling into the page cache, and we should 4906 * shift it into the 'tail' of the xdr_buf before processing. 4907 * To do this efficiently, we need to know the total length 4908 * of data received, which doesn't seem to be available outside 4909 * of the RPC layer. 4910 * 4911 * In the case of WRITE, we also want to put the GETATTR after 4912 * the operation -- in this case because we want to make sure 4913 * we get the post-operation mtime and size. 4914 * 4915 * Both of these changes to the XDR layer would in fact be quite 4916 * minor, but I decided to leave them for a subsequent patch. 4917 */ 4918 static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 4919 unsigned int pgbase, unsigned int pglen) 4920 { 4921 struct nfs4_readlink args = { 4922 .fh = NFS_FH(inode), 4923 .pgbase = pgbase, 4924 .pglen = pglen, 4925 .pages = &page, 4926 }; 4927 struct nfs4_readlink_res res; 4928 struct rpc_message msg = { 4929 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 4930 .rpc_argp = &args, 4931 .rpc_resp = &res, 4932 }; 4933 4934 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 4935 } 4936 4937 static int nfs4_proc_readlink(struct inode *inode, struct page *page, 4938 unsigned int pgbase, unsigned int pglen) 4939 { 4940 struct nfs4_exception exception = { 4941 .interruptible = true, 4942 }; 4943 int err; 4944 do { 4945 err = _nfs4_proc_readlink(inode, page, pgbase, pglen); 4946 trace_nfs4_readlink(inode, err); 4947 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4948 &exception); 4949 } while (exception.retry); 4950 return err; 4951 } 4952 4953 /* 4954 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 4955 */ 4956 static int 4957 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 4958 int flags) 4959 { 4960 struct nfs_server *server = NFS_SERVER(dir); 4961 struct nfs4_label l, *ilabel; 4962 struct nfs_open_context *ctx; 4963 struct nfs4_state *state; 4964 int status = 0; 4965 4966 ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL); 4967 if (IS_ERR(ctx)) 4968 return PTR_ERR(ctx); 4969 4970 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); 4971 4972 nfs_request_directory_delegation(dir); 4973 4974 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 4975 sattr->ia_mode &= ~current_umask(); 4976 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL); 4977 if (IS_ERR(state)) { 4978 status = PTR_ERR(state); 4979 goto out; 4980 } 4981 out: 4982 nfs4_label_release_security(ilabel); 4983 put_nfs_open_context(ctx); 4984 return status; 4985 } 4986 4987 static int 4988 _nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype) 4989 { 4990 struct nfs_server *server = NFS_SERVER(dir); 4991 struct nfs_removeargs args = { 4992 .fh = NFS_FH(dir), 4993 .name = *name, 4994 }; 4995 struct nfs_removeres res = { 4996 .server = server, 4997 }; 4998 struct rpc_message msg = { 4999 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 5000 .rpc_argp = &args, 5001 .rpc_resp = &res, 5002 }; 5003 unsigned long timestamp = jiffies; 5004 int status; 5005 5006 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 5007 if (status == 0) { 5008 spin_lock(&dir->i_lock); 5009 /* Removing a directory decrements nlink in the parent */ 5010 if (ftype == NF4DIR && dir->i_nlink > 2) 5011 nfs4_dec_nlink_locked(dir); 5012 nfs4_update_changeattr_locked(dir, &res.cinfo, timestamp, 5013 NFS_INO_INVALID_DATA); 5014 spin_unlock(&dir->i_lock); 5015 } 5016 return status; 5017 } 5018 5019 static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry) 5020 { 5021 struct nfs4_exception exception = { 5022 .interruptible = true, 5023 }; 5024 struct inode *inode = d_inode(dentry); 5025 int err; 5026 5027 if (inode) { 5028 if (inode->i_nlink == 1) 5029 nfs4_inode_return_delegation(inode); 5030 else 5031 nfs4_inode_make_writeable(inode); 5032 } 5033 do { 5034 err = _nfs4_proc_remove(dir, &dentry->d_name, NF4REG); 5035 trace_nfs4_remove(dir, &dentry->d_name, err); 5036 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5037 &exception); 5038 } while (exception.retry); 5039 return err; 5040 } 5041 5042 static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name) 5043 { 5044 struct nfs4_exception exception = { 5045 .interruptible = true, 5046 }; 5047 int err; 5048 5049 do { 5050 err = _nfs4_proc_remove(dir, name, NF4DIR); 5051 trace_nfs4_remove(dir, name, err); 5052 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5053 &exception); 5054 } while (exception.retry); 5055 return err; 5056 } 5057 5058 static void nfs4_proc_unlink_setup(struct rpc_message *msg, 5059 struct dentry *dentry, 5060 struct inode *inode) 5061 { 5062 struct nfs_removeargs *args = msg->rpc_argp; 5063 struct nfs_removeres *res = msg->rpc_resp; 5064 5065 res->server = NFS_SB(dentry->d_sb); 5066 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 5067 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1, 0); 5068 5069 nfs_fattr_init(res->dir_attr); 5070 nfs_request_directory_delegation(d_inode(dentry->d_parent)); 5071 5072 if (inode) { 5073 nfs4_inode_return_delegation(inode); 5074 nfs_d_prune_case_insensitive_aliases(inode); 5075 } 5076 } 5077 5078 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 5079 { 5080 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client, 5081 &data->args.seq_args, 5082 &data->res.seq_res, 5083 task); 5084 } 5085 5086 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 5087 { 5088 struct nfs_unlinkdata *data = task->tk_calldata; 5089 struct nfs_removeres *res = &data->res; 5090 5091 if (!nfs4_sequence_done(task, &res->seq_res)) 5092 return 0; 5093 if (nfs4_async_handle_error(task, res->server, NULL, 5094 &data->timeout) == -EAGAIN) 5095 return 0; 5096 if (task->tk_status == 0) 5097 nfs4_update_changeattr(dir, &res->cinfo, 5098 res->dir_attr->time_start, 5099 NFS_INO_INVALID_DATA); 5100 return 1; 5101 } 5102 5103 static void nfs4_proc_rename_setup(struct rpc_message *msg, 5104 struct dentry *old_dentry, 5105 struct dentry *new_dentry, 5106 struct inode *same_parent) 5107 { 5108 struct nfs_renameargs *arg = msg->rpc_argp; 5109 struct nfs_renameres *res = msg->rpc_resp; 5110 struct inode *old_inode = d_inode(old_dentry); 5111 struct inode *new_inode = d_inode(new_dentry); 5112 5113 if (old_inode) 5114 nfs4_inode_make_writeable(old_inode); 5115 if (new_inode) 5116 nfs4_inode_return_delegation(new_inode); 5117 if (same_parent) 5118 nfs_request_directory_delegation(same_parent); 5119 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 5120 res->server = NFS_SB(old_dentry->d_sb); 5121 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1, 0); 5122 } 5123 5124 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 5125 { 5126 nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client, 5127 &data->args.seq_args, 5128 &data->res.seq_res, 5129 task); 5130 } 5131 5132 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 5133 struct inode *new_dir) 5134 { 5135 struct nfs_renamedata *data = task->tk_calldata; 5136 struct nfs_renameres *res = &data->res; 5137 5138 if (!nfs4_sequence_done(task, &res->seq_res)) 5139 return 0; 5140 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) 5141 return 0; 5142 5143 if (task->tk_status == 0) { 5144 nfs_d_prune_case_insensitive_aliases(d_inode(data->old_dentry)); 5145 if (new_dir != old_dir) { 5146 /* Note: If we moved a directory, nlink will change */ 5147 nfs4_update_changeattr(old_dir, &res->old_cinfo, 5148 res->old_fattr->time_start, 5149 NFS_INO_INVALID_NLINK | 5150 NFS_INO_INVALID_DATA); 5151 nfs4_update_changeattr(new_dir, &res->new_cinfo, 5152 res->new_fattr->time_start, 5153 NFS_INO_INVALID_NLINK | 5154 NFS_INO_INVALID_DATA); 5155 } else 5156 nfs4_update_changeattr(old_dir, &res->old_cinfo, 5157 res->old_fattr->time_start, 5158 NFS_INO_INVALID_DATA); 5159 } 5160 return 1; 5161 } 5162 5163 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name) 5164 { 5165 struct nfs_server *server = NFS_SERVER(inode); 5166 __u32 bitmask[NFS4_BITMASK_SZ]; 5167 struct nfs4_link_arg arg = { 5168 .fh = NFS_FH(inode), 5169 .dir_fh = NFS_FH(dir), 5170 .name = name, 5171 .bitmask = bitmask, 5172 }; 5173 struct nfs4_link_res res = { 5174 .server = server, 5175 }; 5176 struct rpc_message msg = { 5177 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 5178 .rpc_argp = &arg, 5179 .rpc_resp = &res, 5180 }; 5181 int status = -ENOMEM; 5182 5183 res.fattr = nfs_alloc_fattr_with_label(server); 5184 if (res.fattr == NULL) 5185 goto out; 5186 5187 nfs4_inode_make_writeable(inode); 5188 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, res.fattr->label), 5189 inode, 5190 NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME); 5191 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 5192 if (!status) { 5193 nfs4_update_changeattr(dir, &res.cinfo, res.fattr->time_start, 5194 NFS_INO_INVALID_DATA); 5195 nfs4_inc_nlink(inode); 5196 status = nfs_post_op_update_inode(inode, res.fattr); 5197 if (!status) 5198 nfs_setsecurity(inode, res.fattr); 5199 } 5200 5201 out: 5202 nfs_free_fattr(res.fattr); 5203 return status; 5204 } 5205 5206 static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name) 5207 { 5208 struct nfs4_exception exception = { 5209 .interruptible = true, 5210 }; 5211 int err; 5212 do { 5213 err = nfs4_handle_exception(NFS_SERVER(inode), 5214 _nfs4_proc_link(inode, dir, name), 5215 &exception); 5216 } while (exception.retry); 5217 return err; 5218 } 5219 5220 struct nfs4_createdata { 5221 struct rpc_message msg; 5222 struct nfs4_create_arg arg; 5223 struct nfs4_create_res res; 5224 struct nfs_fh fh; 5225 struct nfs_fattr fattr; 5226 }; 5227 5228 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 5229 const struct qstr *name, struct iattr *sattr, u32 ftype) 5230 { 5231 struct nfs4_createdata *data; 5232 5233 data = kzalloc(sizeof(*data), GFP_KERNEL); 5234 if (data != NULL) { 5235 struct nfs_server *server = NFS_SERVER(dir); 5236 5237 data->fattr.label = nfs4_label_alloc(server, GFP_KERNEL); 5238 if (IS_ERR(data->fattr.label)) 5239 goto out_free; 5240 5241 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 5242 data->msg.rpc_argp = &data->arg; 5243 data->msg.rpc_resp = &data->res; 5244 data->arg.dir_fh = NFS_FH(dir); 5245 data->arg.server = server; 5246 data->arg.name = name; 5247 data->arg.attrs = sattr; 5248 data->arg.ftype = ftype; 5249 data->arg.bitmask = nfs4_bitmask(server, data->fattr.label); 5250 data->arg.umask = current_umask(); 5251 data->res.server = server; 5252 data->res.fh = &data->fh; 5253 data->res.fattr = &data->fattr; 5254 nfs_fattr_init(data->res.fattr); 5255 } 5256 return data; 5257 out_free: 5258 kfree(data); 5259 return NULL; 5260 } 5261 5262 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 5263 { 5264 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 5265 &data->arg.seq_args, &data->res.seq_res, 1); 5266 if (status == 0) { 5267 spin_lock(&dir->i_lock); 5268 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo, 5269 data->res.fattr->time_start, 5270 NFS_INO_INVALID_DATA); 5271 spin_unlock(&dir->i_lock); 5272 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); 5273 } 5274 return status; 5275 } 5276 5277 static struct dentry *nfs4_do_mkdir(struct inode *dir, struct dentry *dentry, 5278 struct nfs4_createdata *data, int *statusp) 5279 { 5280 struct dentry *ret; 5281 5282 *statusp = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 5283 &data->arg.seq_args, &data->res.seq_res, 1); 5284 5285 if (*statusp) 5286 return NULL; 5287 5288 spin_lock(&dir->i_lock); 5289 /* Creating a directory bumps nlink in the parent */ 5290 nfs4_inc_nlink_locked(dir); 5291 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo, 5292 data->res.fattr->time_start, 5293 NFS_INO_INVALID_DATA); 5294 spin_unlock(&dir->i_lock); 5295 ret = nfs_add_or_obtain(dentry, data->res.fh, data->res.fattr); 5296 if (!IS_ERR(ret)) 5297 return ret; 5298 *statusp = PTR_ERR(ret); 5299 return NULL; 5300 } 5301 5302 static void nfs4_free_createdata(struct nfs4_createdata *data) 5303 { 5304 nfs4_label_free(data->fattr.label); 5305 kfree(data); 5306 } 5307 5308 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 5309 struct folio *folio, unsigned int len, struct iattr *sattr, 5310 struct nfs4_label *label) 5311 { 5312 struct page *page = &folio->page; 5313 struct nfs4_createdata *data; 5314 int status = -ENAMETOOLONG; 5315 5316 if (len > NFS4_MAXPATHLEN) 5317 goto out; 5318 5319 status = -ENOMEM; 5320 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 5321 if (data == NULL) 5322 goto out; 5323 5324 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 5325 data->arg.u.symlink.pages = &page; 5326 data->arg.u.symlink.len = len; 5327 data->arg.label = label; 5328 5329 status = nfs4_do_create(dir, dentry, data); 5330 5331 nfs4_free_createdata(data); 5332 out: 5333 return status; 5334 } 5335 5336 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 5337 struct folio *folio, unsigned int len, struct iattr *sattr) 5338 { 5339 struct nfs4_exception exception = { 5340 .interruptible = true, 5341 }; 5342 struct nfs4_label l, *label; 5343 int err; 5344 5345 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5346 5347 do { 5348 err = _nfs4_proc_symlink(dir, dentry, folio, len, sattr, label); 5349 trace_nfs4_symlink(dir, &dentry->d_name, err); 5350 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5351 &exception); 5352 } while (exception.retry); 5353 5354 nfs4_label_release_security(label); 5355 return err; 5356 } 5357 5358 static struct dentry *_nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 5359 struct iattr *sattr, 5360 struct nfs4_label *label, int *statusp) 5361 { 5362 struct nfs4_createdata *data; 5363 struct dentry *ret = NULL; 5364 5365 *statusp = -ENOMEM; 5366 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 5367 if (data == NULL) 5368 goto out; 5369 5370 data->arg.label = label; 5371 ret = nfs4_do_mkdir(dir, dentry, data, statusp); 5372 5373 nfs4_free_createdata(data); 5374 out: 5375 return ret; 5376 } 5377 5378 static struct dentry *nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 5379 struct iattr *sattr) 5380 { 5381 struct nfs_server *server = NFS_SERVER(dir); 5382 struct nfs4_exception exception = { 5383 .interruptible = true, 5384 }; 5385 struct nfs4_label l, *label; 5386 struct dentry *alias; 5387 int err; 5388 5389 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5390 5391 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 5392 sattr->ia_mode &= ~current_umask(); 5393 do { 5394 alias = _nfs4_proc_mkdir(dir, dentry, sattr, label, &err); 5395 trace_nfs4_mkdir(dir, &dentry->d_name, err); 5396 if (err) 5397 alias = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir), 5398 err, 5399 &exception)); 5400 } while (exception.retry); 5401 nfs4_label_release_security(label); 5402 5403 return alias; 5404 } 5405 5406 static int _nfs4_proc_readdir(struct nfs_readdir_arg *nr_arg, 5407 struct nfs_readdir_res *nr_res) 5408 { 5409 struct inode *dir = d_inode(nr_arg->dentry); 5410 struct nfs_server *server = NFS_SERVER(dir); 5411 struct nfs4_readdir_arg args = { 5412 .fh = NFS_FH(dir), 5413 .pages = nr_arg->pages, 5414 .pgbase = 0, 5415 .count = nr_arg->page_len, 5416 .plus = nr_arg->plus, 5417 }; 5418 struct nfs4_readdir_res res; 5419 struct rpc_message msg = { 5420 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 5421 .rpc_argp = &args, 5422 .rpc_resp = &res, 5423 .rpc_cred = nr_arg->cred, 5424 }; 5425 int status; 5426 5427 dprintk("%s: dentry = %pd2, cookie = %llu\n", __func__, 5428 nr_arg->dentry, (unsigned long long)nr_arg->cookie); 5429 if (!(server->caps & NFS_CAP_SECURITY_LABEL)) 5430 args.bitmask = server->attr_bitmask_nl; 5431 else 5432 args.bitmask = server->attr_bitmask; 5433 5434 nfs4_setup_readdir(nr_arg->cookie, nr_arg->verf, nr_arg->dentry, &args); 5435 res.pgbase = args.pgbase; 5436 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 5437 &res.seq_res, 0); 5438 if (status >= 0) { 5439 memcpy(nr_res->verf, res.verifier.data, NFS4_VERIFIER_SIZE); 5440 status += args.pgbase; 5441 } 5442 5443 nfs_invalidate_atime(dir); 5444 5445 dprintk("%s: returns %d\n", __func__, status); 5446 return status; 5447 } 5448 5449 static int nfs4_proc_readdir(struct nfs_readdir_arg *arg, 5450 struct nfs_readdir_res *res) 5451 { 5452 struct nfs4_exception exception = { 5453 .interruptible = true, 5454 }; 5455 int err; 5456 do { 5457 err = _nfs4_proc_readdir(arg, res); 5458 trace_nfs4_readdir(d_inode(arg->dentry), err); 5459 err = nfs4_handle_exception(NFS_SERVER(d_inode(arg->dentry)), 5460 err, &exception); 5461 } while (exception.retry); 5462 return err; 5463 } 5464 5465 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 5466 struct iattr *sattr, struct nfs4_label *label, dev_t rdev) 5467 { 5468 struct nfs4_createdata *data; 5469 int mode = sattr->ia_mode; 5470 int status = -ENOMEM; 5471 5472 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 5473 if (data == NULL) 5474 goto out; 5475 5476 if (S_ISFIFO(mode)) 5477 data->arg.ftype = NF4FIFO; 5478 else if (S_ISBLK(mode)) { 5479 data->arg.ftype = NF4BLK; 5480 data->arg.u.device.specdata1 = MAJOR(rdev); 5481 data->arg.u.device.specdata2 = MINOR(rdev); 5482 } 5483 else if (S_ISCHR(mode)) { 5484 data->arg.ftype = NF4CHR; 5485 data->arg.u.device.specdata1 = MAJOR(rdev); 5486 data->arg.u.device.specdata2 = MINOR(rdev); 5487 } else if (!S_ISSOCK(mode)) { 5488 status = -EINVAL; 5489 goto out_free; 5490 } 5491 5492 data->arg.label = label; 5493 status = nfs4_do_create(dir, dentry, data); 5494 out_free: 5495 nfs4_free_createdata(data); 5496 out: 5497 return status; 5498 } 5499 5500 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 5501 struct iattr *sattr, dev_t rdev) 5502 { 5503 struct nfs_server *server = NFS_SERVER(dir); 5504 struct nfs4_exception exception = { 5505 .interruptible = true, 5506 }; 5507 struct nfs4_label l, *label; 5508 int err; 5509 5510 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5511 5512 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 5513 sattr->ia_mode &= ~current_umask(); 5514 do { 5515 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev); 5516 trace_nfs4_mknod(dir, &dentry->d_name, err); 5517 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5518 &exception); 5519 } while (exception.retry); 5520 5521 nfs4_label_release_security(label); 5522 5523 return err; 5524 } 5525 5526 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 5527 struct nfs_fsstat *fsstat) 5528 { 5529 struct nfs4_statfs_arg args = { 5530 .fh = fhandle, 5531 .bitmask = server->attr_bitmask, 5532 }; 5533 struct nfs4_statfs_res res = { 5534 .fsstat = fsstat, 5535 }; 5536 struct rpc_message msg = { 5537 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 5538 .rpc_argp = &args, 5539 .rpc_resp = &res, 5540 }; 5541 5542 nfs_fattr_init(fsstat->fattr); 5543 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5544 } 5545 5546 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 5547 { 5548 struct nfs4_exception exception = { 5549 .interruptible = true, 5550 }; 5551 int err; 5552 do { 5553 err = nfs4_handle_exception(server, 5554 _nfs4_proc_statfs(server, fhandle, fsstat), 5555 &exception); 5556 } while (exception.retry); 5557 return err; 5558 } 5559 5560 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 5561 struct nfs_fsinfo *fsinfo) 5562 { 5563 struct nfs4_fsinfo_arg args = { 5564 .fh = fhandle, 5565 .bitmask = server->attr_bitmask, 5566 }; 5567 struct nfs4_fsinfo_res res = { 5568 .fsinfo = fsinfo, 5569 }; 5570 struct rpc_message msg = { 5571 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 5572 .rpc_argp = &args, 5573 .rpc_resp = &res, 5574 }; 5575 5576 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5577 } 5578 5579 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 5580 { 5581 struct nfs4_exception exception = { 5582 .interruptible = true, 5583 }; 5584 int err; 5585 5586 do { 5587 err = _nfs4_do_fsinfo(server, fhandle, fsinfo); 5588 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); 5589 if (err == 0) { 5590 nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ); 5591 break; 5592 } 5593 err = nfs4_handle_exception(server, err, &exception); 5594 } while (exception.retry); 5595 return err; 5596 } 5597 5598 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 5599 { 5600 int error; 5601 5602 nfs_fattr_init(fsinfo->fattr); 5603 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 5604 if (error == 0) { 5605 /* block layout checks this! */ 5606 server->pnfs_blksize = fsinfo->blksize; 5607 set_pnfs_layoutdriver(server, fhandle, fsinfo); 5608 } 5609 5610 return error; 5611 } 5612 5613 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 5614 struct nfs_pathconf *pathconf) 5615 { 5616 struct nfs4_pathconf_arg args = { 5617 .fh = fhandle, 5618 .bitmask = server->attr_bitmask, 5619 }; 5620 struct nfs4_pathconf_res res = { 5621 .pathconf = pathconf, 5622 }; 5623 struct rpc_message msg = { 5624 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 5625 .rpc_argp = &args, 5626 .rpc_resp = &res, 5627 }; 5628 5629 /* None of the pathconf attributes are mandatory to implement */ 5630 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 5631 memset(pathconf, 0, sizeof(*pathconf)); 5632 return 0; 5633 } 5634 5635 nfs_fattr_init(pathconf->fattr); 5636 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5637 } 5638 5639 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 5640 struct nfs_pathconf *pathconf) 5641 { 5642 struct nfs4_exception exception = { 5643 .interruptible = true, 5644 }; 5645 int err; 5646 5647 do { 5648 err = nfs4_handle_exception(server, 5649 _nfs4_proc_pathconf(server, fhandle, pathconf), 5650 &exception); 5651 } while (exception.retry); 5652 return err; 5653 } 5654 5655 int nfs4_set_rw_stateid(nfs4_stateid *stateid, 5656 const struct nfs_open_context *ctx, 5657 const struct nfs_lock_context *l_ctx, 5658 fmode_t fmode) 5659 { 5660 return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL); 5661 } 5662 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid); 5663 5664 static bool nfs4_stateid_is_current(nfs4_stateid *stateid, 5665 const struct nfs_open_context *ctx, 5666 const struct nfs_lock_context *l_ctx, 5667 fmode_t fmode) 5668 { 5669 nfs4_stateid _current_stateid; 5670 5671 /* If the current stateid represents a lost lock, then exit */ 5672 if (nfs4_set_rw_stateid(&_current_stateid, ctx, l_ctx, fmode) == -EIO) 5673 return true; 5674 return nfs4_stateid_match(stateid, &_current_stateid); 5675 } 5676 5677 static bool nfs4_error_stateid_expired(int err) 5678 { 5679 switch (err) { 5680 case -NFS4ERR_DELEG_REVOKED: 5681 case -NFS4ERR_ADMIN_REVOKED: 5682 case -NFS4ERR_BAD_STATEID: 5683 case -NFS4ERR_STALE_STATEID: 5684 case -NFS4ERR_OLD_STATEID: 5685 case -NFS4ERR_OPENMODE: 5686 case -NFS4ERR_EXPIRED: 5687 return true; 5688 } 5689 return false; 5690 } 5691 5692 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) 5693 { 5694 struct nfs_server *server = NFS_SERVER(hdr->inode); 5695 5696 trace_nfs4_read(hdr, task->tk_status); 5697 if (task->tk_status < 0) { 5698 struct nfs4_exception exception = { 5699 .inode = hdr->inode, 5700 .state = hdr->args.context->state, 5701 .stateid = &hdr->args.stateid, 5702 .retrans = hdr->retrans, 5703 }; 5704 task->tk_status = nfs4_async_handle_exception(task, 5705 server, task->tk_status, &exception); 5706 hdr->retrans = exception.retrans; 5707 if (exception.retry) { 5708 rpc_restart_call_prepare(task); 5709 return -EAGAIN; 5710 } 5711 } 5712 5713 if (task->tk_status > 0) 5714 renew_lease(server, hdr->timestamp); 5715 return 0; 5716 } 5717 5718 static bool nfs4_read_stateid_changed(struct rpc_task *task, 5719 struct nfs_pgio_args *args) 5720 { 5721 5722 if (!nfs4_error_stateid_expired(task->tk_status) || 5723 nfs4_stateid_is_current(&args->stateid, 5724 args->context, 5725 args->lock_context, 5726 FMODE_READ)) 5727 return false; 5728 rpc_restart_call_prepare(task); 5729 return true; 5730 } 5731 5732 static bool nfs4_read_plus_not_supported(struct rpc_task *task, 5733 struct nfs_pgio_header *hdr) 5734 { 5735 struct nfs_server *server = NFS_SERVER(hdr->inode); 5736 struct rpc_message *msg = &task->tk_msg; 5737 5738 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] && 5739 task->tk_status == -ENOTSUPP) { 5740 server->caps &= ~NFS_CAP_READ_PLUS; 5741 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 5742 rpc_restart_call_prepare(task); 5743 return true; 5744 } 5745 return false; 5746 } 5747 5748 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 5749 { 5750 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 5751 return -EAGAIN; 5752 if (nfs4_read_stateid_changed(task, &hdr->args)) 5753 return -EAGAIN; 5754 if (nfs4_read_plus_not_supported(task, hdr)) 5755 return -EAGAIN; 5756 if (task->tk_status > 0) 5757 nfs_invalidate_atime(hdr->inode); 5758 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 5759 nfs4_read_done_cb(task, hdr); 5760 } 5761 5762 #if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS 5763 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr, 5764 struct rpc_message *msg) 5765 { 5766 /* Note: We don't use READ_PLUS with pNFS yet */ 5767 if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp) { 5768 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS]; 5769 return nfs_read_alloc_scratch(hdr, READ_PLUS_SCRATCH_SIZE); 5770 } 5771 return false; 5772 } 5773 #else 5774 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr, 5775 struct rpc_message *msg) 5776 { 5777 return false; 5778 } 5779 #endif /* CONFIG_NFS_V4_2 */ 5780 5781 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr, 5782 struct rpc_message *msg) 5783 { 5784 hdr->timestamp = jiffies; 5785 if (!hdr->pgio_done_cb) 5786 hdr->pgio_done_cb = nfs4_read_done_cb; 5787 if (!nfs42_read_plus_support(hdr, msg)) 5788 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 5789 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0); 5790 } 5791 5792 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, 5793 struct nfs_pgio_header *hdr) 5794 { 5795 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client, 5796 &hdr->args.seq_args, 5797 &hdr->res.seq_res, 5798 task)) 5799 return 0; 5800 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, 5801 hdr->args.lock_context, 5802 hdr->rw_mode) == -EIO) 5803 return -EIO; 5804 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) 5805 return -EIO; 5806 return 0; 5807 } 5808 5809 static int nfs4_write_done_cb(struct rpc_task *task, 5810 struct nfs_pgio_header *hdr) 5811 { 5812 struct inode *inode = hdr->inode; 5813 5814 trace_nfs4_write(hdr, task->tk_status); 5815 if (task->tk_status < 0) { 5816 struct nfs4_exception exception = { 5817 .inode = hdr->inode, 5818 .state = hdr->args.context->state, 5819 .stateid = &hdr->args.stateid, 5820 .retrans = hdr->retrans, 5821 }; 5822 task->tk_status = nfs4_async_handle_exception(task, 5823 NFS_SERVER(inode), task->tk_status, 5824 &exception); 5825 hdr->retrans = exception.retrans; 5826 if (exception.retry) { 5827 rpc_restart_call_prepare(task); 5828 return -EAGAIN; 5829 } 5830 } 5831 if (task->tk_status >= 0) { 5832 renew_lease(NFS_SERVER(inode), hdr->timestamp); 5833 nfs_writeback_update_inode(hdr); 5834 } 5835 return 0; 5836 } 5837 5838 static bool nfs4_write_stateid_changed(struct rpc_task *task, 5839 struct nfs_pgio_args *args) 5840 { 5841 5842 if (!nfs4_error_stateid_expired(task->tk_status) || 5843 nfs4_stateid_is_current(&args->stateid, 5844 args->context, 5845 args->lock_context, 5846 FMODE_WRITE)) 5847 return false; 5848 rpc_restart_call_prepare(task); 5849 return true; 5850 } 5851 5852 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 5853 { 5854 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 5855 return -EAGAIN; 5856 if (nfs4_write_stateid_changed(task, &hdr->args)) 5857 return -EAGAIN; 5858 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 5859 nfs4_write_done_cb(task, hdr); 5860 } 5861 5862 static 5863 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr) 5864 { 5865 /* Don't request attributes for pNFS or O_DIRECT writes */ 5866 if (hdr->ds_clp != NULL || hdr->dreq != NULL) 5867 return false; 5868 /* Otherwise, request attributes if and only if we don't hold 5869 * a delegation 5870 */ 5871 return nfs4_have_delegation(hdr->inode, FMODE_READ, 0) == 0; 5872 } 5873 5874 void nfs4_bitmask_set(__u32 bitmask[], const __u32 src[], 5875 struct inode *inode, unsigned long cache_validity) 5876 { 5877 struct nfs_server *server = NFS_SERVER(inode); 5878 unsigned int i; 5879 5880 memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ); 5881 cache_validity |= READ_ONCE(NFS_I(inode)->cache_validity); 5882 5883 if (cache_validity & NFS_INO_INVALID_CHANGE) 5884 bitmask[0] |= FATTR4_WORD0_CHANGE; 5885 if (cache_validity & NFS_INO_INVALID_ATIME) 5886 bitmask[1] |= FATTR4_WORD1_TIME_ACCESS; 5887 if (cache_validity & NFS_INO_INVALID_MODE) 5888 bitmask[1] |= FATTR4_WORD1_MODE; 5889 if (cache_validity & NFS_INO_INVALID_OTHER) 5890 bitmask[1] |= FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP; 5891 if (cache_validity & NFS_INO_INVALID_NLINK) 5892 bitmask[1] |= FATTR4_WORD1_NUMLINKS; 5893 if (cache_validity & NFS_INO_INVALID_CTIME) 5894 bitmask[1] |= FATTR4_WORD1_TIME_METADATA; 5895 if (cache_validity & NFS_INO_INVALID_MTIME) 5896 bitmask[1] |= FATTR4_WORD1_TIME_MODIFY; 5897 if (cache_validity & NFS_INO_INVALID_BLOCKS) 5898 bitmask[1] |= FATTR4_WORD1_SPACE_USED; 5899 if (cache_validity & NFS_INO_INVALID_BTIME) 5900 bitmask[1] |= FATTR4_WORD1_TIME_CREATE; 5901 5902 if (cache_validity & NFS_INO_INVALID_SIZE) 5903 bitmask[0] |= FATTR4_WORD0_SIZE; 5904 5905 for (i = 0; i < NFS4_BITMASK_SZ; i++) 5906 bitmask[i] &= server->attr_bitmask[i]; 5907 } 5908 5909 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, 5910 struct rpc_message *msg, 5911 struct rpc_clnt **clnt) 5912 { 5913 struct nfs_server *server = NFS_SERVER(hdr->inode); 5914 5915 if (!nfs4_write_need_cache_consistency_data(hdr)) { 5916 hdr->args.bitmask = NULL; 5917 hdr->res.fattr = NULL; 5918 } else { 5919 nfs4_bitmask_set(hdr->args.bitmask_store, 5920 server->cache_consistency_bitmask, 5921 hdr->inode, NFS_INO_INVALID_BLOCKS); 5922 hdr->args.bitmask = hdr->args.bitmask_store; 5923 } 5924 5925 if (!hdr->pgio_done_cb) 5926 hdr->pgio_done_cb = nfs4_write_done_cb; 5927 hdr->res.server = server; 5928 hdr->timestamp = jiffies; 5929 5930 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 5931 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0); 5932 nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr); 5933 } 5934 5935 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 5936 { 5937 nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client, 5938 &data->args.seq_args, 5939 &data->res.seq_res, 5940 task); 5941 } 5942 5943 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 5944 { 5945 struct inode *inode = data->inode; 5946 5947 trace_nfs4_commit(data, task->tk_status); 5948 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 5949 NULL, NULL) == -EAGAIN) { 5950 rpc_restart_call_prepare(task); 5951 return -EAGAIN; 5952 } 5953 return 0; 5954 } 5955 5956 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 5957 { 5958 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5959 return -EAGAIN; 5960 return data->commit_done_cb(task, data); 5961 } 5962 5963 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg, 5964 struct rpc_clnt **clnt) 5965 { 5966 struct nfs_server *server = NFS_SERVER(data->inode); 5967 5968 if (data->commit_done_cb == NULL) 5969 data->commit_done_cb = nfs4_commit_done_cb; 5970 data->res.server = server; 5971 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 5972 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0); 5973 nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client, 5974 NFS_SP4_MACH_CRED_COMMIT, clnt, msg); 5975 } 5976 5977 static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args, 5978 struct nfs_commitres *res) 5979 { 5980 struct inode *dst_inode = file_inode(dst); 5981 struct nfs_server *server = NFS_SERVER(dst_inode); 5982 struct rpc_message msg = { 5983 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT], 5984 .rpc_argp = args, 5985 .rpc_resp = res, 5986 }; 5987 5988 args->fh = NFS_FH(dst_inode); 5989 return nfs4_call_sync(server->client, server, &msg, 5990 &args->seq_args, &res->seq_res, 1); 5991 } 5992 5993 int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res) 5994 { 5995 struct nfs_commitargs args = { 5996 .offset = offset, 5997 .count = count, 5998 }; 5999 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); 6000 struct nfs4_exception exception = { }; 6001 int status; 6002 6003 do { 6004 status = _nfs4_proc_commit(dst, &args, res); 6005 status = nfs4_handle_exception(dst_server, status, &exception); 6006 } while (exception.retry); 6007 6008 return status; 6009 } 6010 6011 struct nfs4_renewdata { 6012 struct nfs_client *client; 6013 unsigned long timestamp; 6014 }; 6015 6016 /* 6017 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 6018 * standalone procedure for queueing an asynchronous RENEW. 6019 */ 6020 static void nfs4_renew_release(void *calldata) 6021 { 6022 struct nfs4_renewdata *data = calldata; 6023 struct nfs_client *clp = data->client; 6024 6025 if (refcount_read(&clp->cl_count) > 1) 6026 nfs4_schedule_state_renewal(clp); 6027 nfs_put_client(clp); 6028 kfree(data); 6029 } 6030 6031 static void nfs4_renew_done(struct rpc_task *task, void *calldata) 6032 { 6033 struct nfs4_renewdata *data = calldata; 6034 struct nfs_client *clp = data->client; 6035 unsigned long timestamp = data->timestamp; 6036 6037 trace_nfs4_renew_async(clp, task->tk_status); 6038 switch (task->tk_status) { 6039 case 0: 6040 break; 6041 case -NFS4ERR_LEASE_MOVED: 6042 nfs4_schedule_lease_moved_recovery(clp); 6043 break; 6044 default: 6045 /* Unless we're shutting down, schedule state recovery! */ 6046 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 6047 return; 6048 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 6049 nfs4_schedule_lease_recovery(clp); 6050 return; 6051 } 6052 nfs4_schedule_path_down_recovery(clp); 6053 } 6054 do_renew_lease(clp, timestamp); 6055 } 6056 6057 static const struct rpc_call_ops nfs4_renew_ops = { 6058 .rpc_call_done = nfs4_renew_done, 6059 .rpc_release = nfs4_renew_release, 6060 }; 6061 6062 static int nfs4_proc_async_renew(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags) 6063 { 6064 struct rpc_message msg = { 6065 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 6066 .rpc_argp = clp, 6067 .rpc_cred = cred, 6068 }; 6069 struct nfs4_renewdata *data; 6070 6071 if (renew_flags == 0) 6072 return 0; 6073 if (!refcount_inc_not_zero(&clp->cl_count)) 6074 return -EIO; 6075 data = kmalloc(sizeof(*data), GFP_NOFS); 6076 if (data == NULL) { 6077 nfs_put_client(clp); 6078 return -ENOMEM; 6079 } 6080 data->client = clp; 6081 data->timestamp = jiffies; 6082 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT, 6083 &nfs4_renew_ops, data); 6084 } 6085 6086 static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred) 6087 { 6088 struct rpc_message msg = { 6089 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 6090 .rpc_argp = clp, 6091 .rpc_cred = cred, 6092 }; 6093 unsigned long now = jiffies; 6094 int status; 6095 6096 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 6097 if (status < 0) 6098 return status; 6099 do_renew_lease(clp, now); 6100 return 0; 6101 } 6102 6103 static bool nfs4_server_supports_acls(const struct nfs_server *server, 6104 enum nfs4_acl_type type) 6105 { 6106 switch (type) { 6107 default: 6108 return server->attr_bitmask[0] & FATTR4_WORD0_ACL; 6109 case NFS4ACL_DACL: 6110 return server->attr_bitmask[1] & FATTR4_WORD1_DACL; 6111 case NFS4ACL_SACL: 6112 return server->attr_bitmask[1] & FATTR4_WORD1_SACL; 6113 } 6114 } 6115 6116 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that 6117 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on 6118 * the stack. 6119 */ 6120 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 6121 6122 int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen, 6123 struct page **pages) 6124 { 6125 struct page *newpage, **spages; 6126 int rc = 0; 6127 size_t len; 6128 spages = pages; 6129 6130 do { 6131 len = min_t(size_t, PAGE_SIZE, buflen); 6132 newpage = alloc_page(GFP_KERNEL); 6133 6134 if (newpage == NULL) 6135 goto unwind; 6136 memcpy(page_address(newpage), buf, len); 6137 buf += len; 6138 buflen -= len; 6139 *pages++ = newpage; 6140 rc++; 6141 } while (buflen != 0); 6142 6143 return rc; 6144 6145 unwind: 6146 for(; rc > 0; rc--) 6147 __free_page(spages[rc-1]); 6148 return -ENOMEM; 6149 } 6150 6151 struct nfs4_cached_acl { 6152 enum nfs4_acl_type type; 6153 int cached; 6154 size_t len; 6155 char data[]; 6156 }; 6157 6158 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 6159 { 6160 struct nfs_inode *nfsi = NFS_I(inode); 6161 6162 spin_lock(&inode->i_lock); 6163 kfree(nfsi->nfs4_acl); 6164 nfsi->nfs4_acl = acl; 6165 spin_unlock(&inode->i_lock); 6166 } 6167 6168 static void nfs4_zap_acl_attr(struct inode *inode) 6169 { 6170 nfs4_set_cached_acl(inode, NULL); 6171 } 6172 6173 static ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, 6174 size_t buflen, enum nfs4_acl_type type) 6175 { 6176 struct nfs_inode *nfsi = NFS_I(inode); 6177 struct nfs4_cached_acl *acl; 6178 int ret = -ENOENT; 6179 6180 spin_lock(&inode->i_lock); 6181 acl = nfsi->nfs4_acl; 6182 if (acl == NULL) 6183 goto out; 6184 if (acl->type != type) 6185 goto out; 6186 if (buf == NULL) /* user is just asking for length */ 6187 goto out_len; 6188 if (acl->cached == 0) 6189 goto out; 6190 ret = -ERANGE; /* see getxattr(2) man page */ 6191 if (acl->len > buflen) 6192 goto out; 6193 memcpy(buf, acl->data, acl->len); 6194 out_len: 6195 ret = acl->len; 6196 out: 6197 spin_unlock(&inode->i_lock); 6198 return ret; 6199 } 6200 6201 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, 6202 size_t pgbase, size_t acl_len, 6203 enum nfs4_acl_type type) 6204 { 6205 struct nfs4_cached_acl *acl; 6206 size_t buflen = sizeof(*acl) + acl_len; 6207 6208 if (buflen <= PAGE_SIZE) { 6209 acl = kmalloc(buflen, GFP_KERNEL); 6210 if (acl == NULL) 6211 goto out; 6212 acl->cached = 1; 6213 _copy_from_pages(acl->data, pages, pgbase, acl_len); 6214 } else { 6215 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 6216 if (acl == NULL) 6217 goto out; 6218 acl->cached = 0; 6219 } 6220 acl->type = type; 6221 acl->len = acl_len; 6222 out: 6223 nfs4_set_cached_acl(inode, acl); 6224 } 6225 6226 /* 6227 * The getxattr API returns the required buffer length when called with a 6228 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 6229 * the required buf. On a NULL buf, we send a page of data to the server 6230 * guessing that the ACL request can be serviced by a page. If so, we cache 6231 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 6232 * the cache. If not so, we throw away the page, and cache the required 6233 * length. The next getxattr call will then produce another round trip to 6234 * the server, this time with the input buf of the required size. 6235 */ 6236 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, 6237 size_t buflen, enum nfs4_acl_type type) 6238 { 6239 struct page **pages; 6240 struct nfs_getaclargs args = { 6241 .fh = NFS_FH(inode), 6242 .acl_type = type, 6243 .acl_len = buflen, 6244 }; 6245 struct nfs_getaclres res = { 6246 .acl_type = type, 6247 .acl_len = buflen, 6248 }; 6249 struct rpc_message msg = { 6250 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 6251 .rpc_argp = &args, 6252 .rpc_resp = &res, 6253 }; 6254 unsigned int npages; 6255 int ret = -ENOMEM, i; 6256 struct nfs_server *server = NFS_SERVER(inode); 6257 6258 if (buflen == 0) 6259 buflen = server->rsize; 6260 6261 npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1; 6262 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 6263 if (!pages) 6264 return -ENOMEM; 6265 6266 args.acl_pages = pages; 6267 6268 for (i = 0; i < npages; i++) { 6269 pages[i] = alloc_page(GFP_KERNEL); 6270 if (!pages[i]) 6271 goto out_free; 6272 } 6273 6274 /* for decoding across pages */ 6275 res.acl_scratch = folio_alloc(GFP_KERNEL, 0); 6276 if (!res.acl_scratch) 6277 goto out_free; 6278 6279 args.acl_len = npages * PAGE_SIZE; 6280 6281 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 6282 __func__, buf, buflen, npages, args.acl_len); 6283 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 6284 &msg, &args.seq_args, &res.seq_res, 0); 6285 if (ret) 6286 goto out_free; 6287 6288 /* Handle the case where the passed-in buffer is too short */ 6289 if (res.acl_flags & NFS4_ACL_TRUNC) { 6290 /* Did the user only issue a request for the acl length? */ 6291 if (buf == NULL) 6292 goto out_ok; 6293 ret = -ERANGE; 6294 goto out_free; 6295 } 6296 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len, 6297 type); 6298 if (buf) { 6299 if (res.acl_len > buflen) { 6300 ret = -ERANGE; 6301 goto out_free; 6302 } 6303 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); 6304 } 6305 out_ok: 6306 ret = res.acl_len; 6307 out_free: 6308 while (--i >= 0) 6309 __free_page(pages[i]); 6310 if (res.acl_scratch) 6311 folio_put(res.acl_scratch); 6312 kfree(pages); 6313 return ret; 6314 } 6315 6316 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, 6317 size_t buflen, enum nfs4_acl_type type) 6318 { 6319 struct nfs4_exception exception = { 6320 .interruptible = true, 6321 }; 6322 ssize_t ret; 6323 do { 6324 ret = __nfs4_get_acl_uncached(inode, buf, buflen, type); 6325 trace_nfs4_get_acl(inode, ret); 6326 if (ret >= 0) 6327 break; 6328 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 6329 } while (exception.retry); 6330 return ret; 6331 } 6332 6333 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen, 6334 enum nfs4_acl_type type) 6335 { 6336 struct nfs_server *server = NFS_SERVER(inode); 6337 int ret; 6338 6339 if (unlikely(NFS_FH(inode)->size == 0)) 6340 return -ENODATA; 6341 if (!nfs4_server_supports_acls(server, type)) 6342 return -EOPNOTSUPP; 6343 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 6344 if (ret < 0) 6345 return ret; 6346 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 6347 nfs_zap_acl_cache(inode); 6348 ret = nfs4_read_cached_acl(inode, buf, buflen, type); 6349 if (ret != -ENOENT) 6350 /* -ENOENT is returned if there is no ACL or if there is an ACL 6351 * but no cached acl data, just the acl length */ 6352 return ret; 6353 return nfs4_get_acl_uncached(inode, buf, buflen, type); 6354 } 6355 6356 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, 6357 size_t buflen, enum nfs4_acl_type type) 6358 { 6359 struct nfs_server *server = NFS_SERVER(inode); 6360 struct page *pages[NFS4ACL_MAXPAGES]; 6361 struct nfs_setaclargs arg = { 6362 .fh = NFS_FH(inode), 6363 .acl_type = type, 6364 .acl_len = buflen, 6365 .acl_pages = pages, 6366 }; 6367 struct nfs_setaclres res; 6368 struct rpc_message msg = { 6369 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 6370 .rpc_argp = &arg, 6371 .rpc_resp = &res, 6372 }; 6373 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 6374 int ret, i; 6375 6376 /* You can't remove system.nfs4_acl: */ 6377 if (buflen == 0) 6378 return -EINVAL; 6379 if (!nfs4_server_supports_acls(server, type)) 6380 return -EOPNOTSUPP; 6381 if (npages > ARRAY_SIZE(pages)) 6382 return -ERANGE; 6383 i = nfs4_buf_to_pages_noslab(buf, buflen, arg.acl_pages); 6384 if (i < 0) 6385 return i; 6386 nfs4_inode_make_writeable(inode); 6387 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6388 6389 /* 6390 * Free each page after tx, so the only ref left is 6391 * held by the network stack 6392 */ 6393 for (; i > 0; i--) 6394 put_page(pages[i-1]); 6395 6396 /* 6397 * Acl update can result in inode attribute update. 6398 * so mark the attribute cache invalid. 6399 */ 6400 spin_lock(&inode->i_lock); 6401 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 6402 NFS_INO_INVALID_CTIME | 6403 NFS_INO_REVAL_FORCED); 6404 spin_unlock(&inode->i_lock); 6405 nfs_access_zap_cache(inode); 6406 nfs_zap_acl_cache(inode); 6407 return ret; 6408 } 6409 6410 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, 6411 size_t buflen, enum nfs4_acl_type type) 6412 { 6413 struct nfs4_exception exception = { }; 6414 int err; 6415 6416 if (unlikely(NFS_FH(inode)->size == 0)) 6417 return -ENODATA; 6418 do { 6419 err = __nfs4_proc_set_acl(inode, buf, buflen, type); 6420 trace_nfs4_set_acl(inode, err); 6421 if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) { 6422 /* 6423 * no need to retry since the kernel 6424 * isn't involved in encoding the ACEs. 6425 */ 6426 err = -EINVAL; 6427 break; 6428 } 6429 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6430 &exception); 6431 } while (exception.retry); 6432 return err; 6433 } 6434 6435 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 6436 static int _nfs4_get_security_label(struct inode *inode, void *buf, 6437 size_t buflen) 6438 { 6439 struct nfs_server *server = NFS_SERVER(inode); 6440 struct nfs4_label label = {0, 0, 0, buflen, buf}; 6441 6442 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 6443 struct nfs_fattr fattr = { 6444 .label = &label, 6445 }; 6446 struct nfs4_getattr_arg arg = { 6447 .fh = NFS_FH(inode), 6448 .bitmask = bitmask, 6449 }; 6450 struct nfs4_getattr_res res = { 6451 .fattr = &fattr, 6452 .server = server, 6453 }; 6454 struct rpc_message msg = { 6455 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 6456 .rpc_argp = &arg, 6457 .rpc_resp = &res, 6458 }; 6459 int ret; 6460 6461 nfs_fattr_init(&fattr); 6462 6463 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0); 6464 if (ret) 6465 return ret; 6466 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) 6467 return -ENOENT; 6468 return label.len; 6469 } 6470 6471 static int nfs4_get_security_label(struct inode *inode, void *buf, 6472 size_t buflen) 6473 { 6474 struct nfs4_exception exception = { 6475 .interruptible = true, 6476 }; 6477 int err; 6478 6479 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 6480 return -EOPNOTSUPP; 6481 6482 do { 6483 err = _nfs4_get_security_label(inode, buf, buflen); 6484 trace_nfs4_get_security_label(inode, err); 6485 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6486 &exception); 6487 } while (exception.retry); 6488 return err; 6489 } 6490 6491 static int _nfs4_do_set_security_label(struct inode *inode, 6492 struct nfs4_label *ilabel, 6493 struct nfs_fattr *fattr) 6494 { 6495 6496 struct iattr sattr = {0}; 6497 struct nfs_server *server = NFS_SERVER(inode); 6498 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 6499 struct nfs_setattrargs arg = { 6500 .fh = NFS_FH(inode), 6501 .iap = &sattr, 6502 .server = server, 6503 .bitmask = bitmask, 6504 .label = ilabel, 6505 }; 6506 struct nfs_setattrres res = { 6507 .fattr = fattr, 6508 .server = server, 6509 }; 6510 struct rpc_message msg = { 6511 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 6512 .rpc_argp = &arg, 6513 .rpc_resp = &res, 6514 }; 6515 int status; 6516 6517 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 6518 6519 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6520 if (status) 6521 dprintk("%s failed: %d\n", __func__, status); 6522 6523 return status; 6524 } 6525 6526 static int nfs4_do_set_security_label(struct inode *inode, 6527 struct nfs4_label *ilabel, 6528 struct nfs_fattr *fattr) 6529 { 6530 struct nfs4_exception exception = { }; 6531 int err; 6532 6533 do { 6534 err = _nfs4_do_set_security_label(inode, ilabel, fattr); 6535 trace_nfs4_set_security_label(inode, err); 6536 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6537 &exception); 6538 } while (exception.retry); 6539 return err; 6540 } 6541 6542 static int 6543 nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen) 6544 { 6545 struct nfs4_label ilabel = {0, 0, 0, buflen, (char *)buf }; 6546 struct nfs_fattr *fattr; 6547 int status; 6548 6549 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 6550 return -EOPNOTSUPP; 6551 6552 fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode)); 6553 if (fattr == NULL) 6554 return -ENOMEM; 6555 6556 status = nfs4_do_set_security_label(inode, &ilabel, fattr); 6557 if (status == 0) 6558 nfs_setsecurity(inode, fattr); 6559 6560 nfs_free_fattr(fattr); 6561 return status; 6562 } 6563 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 6564 6565 6566 static void nfs4_init_boot_verifier(const struct nfs_client *clp, 6567 nfs4_verifier *bootverf) 6568 { 6569 __be32 verf[2]; 6570 6571 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 6572 /* An impossible timestamp guarantees this value 6573 * will never match a generated boot time. */ 6574 verf[0] = cpu_to_be32(U32_MAX); 6575 verf[1] = cpu_to_be32(U32_MAX); 6576 } else { 6577 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 6578 u64 ns = ktime_to_ns(nn->boot_time); 6579 6580 verf[0] = cpu_to_be32(ns >> 32); 6581 verf[1] = cpu_to_be32(ns); 6582 } 6583 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 6584 } 6585 6586 static size_t 6587 nfs4_get_uniquifier(struct nfs_client *clp, char *buf, size_t buflen) 6588 { 6589 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 6590 struct nfs_netns_client *nn_clp = nn->nfs_client; 6591 const char *id; 6592 6593 buf[0] = '\0'; 6594 6595 if (nn_clp) { 6596 rcu_read_lock(); 6597 id = rcu_dereference(nn_clp->identifier); 6598 if (id) 6599 strscpy(buf, id, buflen); 6600 rcu_read_unlock(); 6601 } 6602 6603 if (nfs4_client_id_uniquifier[0] != '\0' && buf[0] == '\0') 6604 strscpy(buf, nfs4_client_id_uniquifier, buflen); 6605 6606 return strlen(buf); 6607 } 6608 6609 static int 6610 nfs4_init_nonuniform_client_string(struct nfs_client *clp) 6611 { 6612 char buf[NFS4_CLIENT_ID_UNIQ_LEN]; 6613 size_t buflen; 6614 size_t len; 6615 char *str; 6616 6617 if (clp->cl_owner_id != NULL) 6618 return 0; 6619 6620 rcu_read_lock(); 6621 len = 14 + 6622 strlen(clp->cl_rpcclient->cl_nodename) + 6623 1 + 6624 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) + 6625 1; 6626 rcu_read_unlock(); 6627 6628 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf)); 6629 if (buflen) 6630 len += buflen + 1; 6631 6632 if (len > NFS4_OPAQUE_LIMIT + 1) 6633 return -EINVAL; 6634 6635 /* 6636 * Since this string is allocated at mount time, and held until the 6637 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 6638 * about a memory-reclaim deadlock. 6639 */ 6640 str = kmalloc(len, GFP_KERNEL); 6641 if (!str) 6642 return -ENOMEM; 6643 6644 rcu_read_lock(); 6645 if (buflen) 6646 scnprintf(str, len, "Linux NFSv4.0 %s/%s/%s", 6647 clp->cl_rpcclient->cl_nodename, buf, 6648 rpc_peeraddr2str(clp->cl_rpcclient, 6649 RPC_DISPLAY_ADDR)); 6650 else 6651 scnprintf(str, len, "Linux NFSv4.0 %s/%s", 6652 clp->cl_rpcclient->cl_nodename, 6653 rpc_peeraddr2str(clp->cl_rpcclient, 6654 RPC_DISPLAY_ADDR)); 6655 rcu_read_unlock(); 6656 6657 clp->cl_owner_id = str; 6658 return 0; 6659 } 6660 6661 static int 6662 nfs4_init_uniform_client_string(struct nfs_client *clp) 6663 { 6664 char buf[NFS4_CLIENT_ID_UNIQ_LEN]; 6665 size_t buflen; 6666 size_t len; 6667 char *str; 6668 6669 if (clp->cl_owner_id != NULL) 6670 return 0; 6671 6672 len = 10 + 10 + 1 + 10 + 1 + 6673 strlen(clp->cl_rpcclient->cl_nodename) + 1; 6674 6675 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf)); 6676 if (buflen) 6677 len += buflen + 1; 6678 6679 if (len > NFS4_OPAQUE_LIMIT + 1) 6680 return -EINVAL; 6681 6682 /* 6683 * Since this string is allocated at mount time, and held until the 6684 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 6685 * about a memory-reclaim deadlock. 6686 */ 6687 str = kmalloc(len, GFP_KERNEL); 6688 if (!str) 6689 return -ENOMEM; 6690 6691 if (buflen) 6692 scnprintf(str, len, "Linux NFSv%u.%u %s/%s", 6693 clp->rpc_ops->version, clp->cl_minorversion, 6694 buf, clp->cl_rpcclient->cl_nodename); 6695 else 6696 scnprintf(str, len, "Linux NFSv%u.%u %s", 6697 clp->rpc_ops->version, clp->cl_minorversion, 6698 clp->cl_rpcclient->cl_nodename); 6699 clp->cl_owner_id = str; 6700 return 0; 6701 } 6702 6703 /* 6704 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback 6705 * services. Advertise one based on the address family of the 6706 * clientaddr. 6707 */ 6708 static unsigned int 6709 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len) 6710 { 6711 if (strchr(clp->cl_ipaddr, ':') != NULL) 6712 return scnprintf(buf, len, "tcp6"); 6713 else 6714 return scnprintf(buf, len, "tcp"); 6715 } 6716 6717 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata) 6718 { 6719 struct nfs4_setclientid *sc = calldata; 6720 6721 if (task->tk_status == 0) 6722 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred); 6723 } 6724 6725 static const struct rpc_call_ops nfs4_setclientid_ops = { 6726 .rpc_call_done = nfs4_setclientid_done, 6727 }; 6728 6729 /** 6730 * nfs4_proc_setclientid - Negotiate client ID 6731 * @clp: state data structure 6732 * @program: RPC program for NFSv4 callback service 6733 * @port: IP port number for NFS4 callback service 6734 * @cred: credential to use for this call 6735 * @res: where to place the result 6736 * 6737 * Returns zero, a negative errno, or a negative NFS4ERR status code. 6738 */ 6739 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 6740 unsigned short port, const struct cred *cred, 6741 struct nfs4_setclientid_res *res) 6742 { 6743 nfs4_verifier sc_verifier; 6744 struct nfs4_setclientid setclientid = { 6745 .sc_verifier = &sc_verifier, 6746 .sc_prog = program, 6747 .sc_clnt = clp, 6748 }; 6749 struct rpc_message msg = { 6750 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 6751 .rpc_argp = &setclientid, 6752 .rpc_resp = res, 6753 .rpc_cred = cred, 6754 }; 6755 struct rpc_task_setup task_setup_data = { 6756 .rpc_client = clp->cl_rpcclient, 6757 .rpc_message = &msg, 6758 .callback_ops = &nfs4_setclientid_ops, 6759 .callback_data = &setclientid, 6760 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN, 6761 }; 6762 unsigned long now = jiffies; 6763 int status; 6764 6765 /* nfs_client_id4 */ 6766 nfs4_init_boot_verifier(clp, &sc_verifier); 6767 6768 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags)) 6769 status = nfs4_init_uniform_client_string(clp); 6770 else 6771 status = nfs4_init_nonuniform_client_string(clp); 6772 6773 if (status) 6774 goto out; 6775 6776 /* cb_client4 */ 6777 setclientid.sc_netid_len = 6778 nfs4_init_callback_netid(clp, 6779 setclientid.sc_netid, 6780 sizeof(setclientid.sc_netid)); 6781 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 6782 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 6783 clp->cl_ipaddr, port >> 8, port & 255); 6784 6785 dprintk("NFS call setclientid auth=%s, '%s'\n", 6786 clp->cl_rpcclient->cl_auth->au_ops->au_name, 6787 clp->cl_owner_id); 6788 6789 status = nfs4_call_sync_custom(&task_setup_data); 6790 if (setclientid.sc_cred) { 6791 kfree(clp->cl_acceptor); 6792 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred); 6793 put_rpccred(setclientid.sc_cred); 6794 } 6795 6796 if (status == 0) 6797 do_renew_lease(clp, now); 6798 out: 6799 trace_nfs4_setclientid(clp, status); 6800 dprintk("NFS reply setclientid: %d\n", status); 6801 return status; 6802 } 6803 6804 /** 6805 * nfs4_proc_setclientid_confirm - Confirm client ID 6806 * @clp: state data structure 6807 * @arg: result of a previous SETCLIENTID 6808 * @cred: credential to use for this call 6809 * 6810 * Returns zero, a negative errno, or a negative NFS4ERR status code. 6811 */ 6812 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 6813 struct nfs4_setclientid_res *arg, 6814 const struct cred *cred) 6815 { 6816 struct rpc_message msg = { 6817 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 6818 .rpc_argp = arg, 6819 .rpc_cred = cred, 6820 }; 6821 int status; 6822 6823 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 6824 clp->cl_rpcclient->cl_auth->au_ops->au_name, 6825 clp->cl_clientid); 6826 status = rpc_call_sync(clp->cl_rpcclient, &msg, 6827 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 6828 trace_nfs4_setclientid_confirm(clp, status); 6829 dprintk("NFS reply setclientid_confirm: %d\n", status); 6830 return status; 6831 } 6832 6833 struct nfs4_delegreturndata { 6834 struct nfs4_delegreturnargs args; 6835 struct nfs4_delegreturnres res; 6836 struct nfs_fh fh; 6837 nfs4_stateid stateid; 6838 unsigned long timestamp; 6839 unsigned short retrans; 6840 struct { 6841 struct nfs4_layoutreturn_args arg; 6842 struct nfs4_layoutreturn_res res; 6843 struct nfs4_xdr_opaque_data ld_private; 6844 u32 roc_barrier; 6845 bool roc; 6846 } lr; 6847 struct nfs4_delegattr sattr; 6848 struct nfs_fattr fattr; 6849 int rpc_status; 6850 struct inode *inode; 6851 }; 6852 6853 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 6854 { 6855 struct nfs4_delegreturndata *data = calldata; 6856 struct nfs4_exception exception = { 6857 .inode = data->inode, 6858 .stateid = &data->stateid, 6859 .task_is_privileged = data->args.seq_args.sa_privileged, 6860 .retrans = data->retrans, 6861 }; 6862 6863 if (!nfs4_sequence_done(task, &data->res.seq_res)) 6864 return; 6865 6866 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); 6867 6868 /* Handle Layoutreturn errors */ 6869 if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res, 6870 &data->res.lr_ret) == -EAGAIN) 6871 goto out_restart; 6872 6873 if (data->args.sattr_args && task->tk_status != 0) { 6874 switch(data->res.sattr_ret) { 6875 case 0: 6876 data->args.sattr_args = NULL; 6877 data->res.sattr_res = false; 6878 break; 6879 case -NFS4ERR_ADMIN_REVOKED: 6880 case -NFS4ERR_DELEG_REVOKED: 6881 case -NFS4ERR_EXPIRED: 6882 case -NFS4ERR_BAD_STATEID: 6883 /* Let the main handler below do stateid recovery */ 6884 break; 6885 case -NFS4ERR_OLD_STATEID: 6886 if (nfs4_refresh_delegation_stateid(&data->stateid, 6887 data->inode)) 6888 goto out_restart; 6889 fallthrough; 6890 default: 6891 data->args.sattr_args = NULL; 6892 data->res.sattr_res = false; 6893 goto out_restart; 6894 } 6895 } 6896 6897 switch (task->tk_status) { 6898 case 0: 6899 renew_lease(data->res.server, data->timestamp); 6900 break; 6901 case -NFS4ERR_ADMIN_REVOKED: 6902 case -NFS4ERR_DELEG_REVOKED: 6903 case -NFS4ERR_EXPIRED: 6904 nfs4_free_revoked_stateid(data->res.server, 6905 data->args.stateid, 6906 task->tk_msg.rpc_cred); 6907 fallthrough; 6908 case -NFS4ERR_BAD_STATEID: 6909 case -NFS4ERR_STALE_STATEID: 6910 case -ETIMEDOUT: 6911 task->tk_status = 0; 6912 break; 6913 case -NFS4ERR_OLD_STATEID: 6914 if (!nfs4_refresh_delegation_stateid(&data->stateid, data->inode)) 6915 nfs4_stateid_seqid_inc(&data->stateid); 6916 if (data->args.bitmask) { 6917 data->args.bitmask = NULL; 6918 data->res.fattr = NULL; 6919 } 6920 goto out_restart; 6921 case -NFS4ERR_ACCESS: 6922 if (data->args.bitmask) { 6923 data->args.bitmask = NULL; 6924 data->res.fattr = NULL; 6925 goto out_restart; 6926 } 6927 fallthrough; 6928 default: 6929 task->tk_status = nfs4_async_handle_exception(task, 6930 data->res.server, task->tk_status, 6931 &exception); 6932 data->retrans = exception.retrans; 6933 if (exception.retry) 6934 goto out_restart; 6935 } 6936 nfs_delegation_mark_returned(data->inode, data->args.stateid); 6937 data->rpc_status = task->tk_status; 6938 return; 6939 out_restart: 6940 task->tk_status = 0; 6941 rpc_restart_call_prepare(task); 6942 } 6943 6944 static void nfs4_delegreturn_release(void *calldata) 6945 { 6946 struct nfs4_delegreturndata *data = calldata; 6947 struct inode *inode = data->inode; 6948 6949 if (data->lr.roc) 6950 pnfs_roc_release(&data->lr.arg, &data->lr.res, 6951 data->res.lr_ret); 6952 if (inode) { 6953 nfs4_fattr_set_prechange(&data->fattr, 6954 inode_peek_iversion_raw(inode)); 6955 nfs_refresh_inode(inode, &data->fattr); 6956 nfs_iput_and_deactive(inode); 6957 } 6958 kfree(calldata); 6959 } 6960 6961 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 6962 { 6963 struct nfs4_delegreturndata *d_data; 6964 struct pnfs_layout_hdr *lo; 6965 6966 d_data = data; 6967 6968 if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) { 6969 nfs4_sequence_done(task, &d_data->res.seq_res); 6970 return; 6971 } 6972 6973 lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL; 6974 if (lo && !pnfs_layout_is_valid(lo)) { 6975 d_data->args.lr_args = NULL; 6976 d_data->res.lr_res = NULL; 6977 } 6978 6979 nfs4_setup_sequence(d_data->res.server->nfs_client, 6980 &d_data->args.seq_args, 6981 &d_data->res.seq_res, 6982 task); 6983 } 6984 6985 static const struct rpc_call_ops nfs4_delegreturn_ops = { 6986 .rpc_call_prepare = nfs4_delegreturn_prepare, 6987 .rpc_call_done = nfs4_delegreturn_done, 6988 .rpc_release = nfs4_delegreturn_release, 6989 }; 6990 6991 static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, 6992 const nfs4_stateid *stateid, 6993 struct nfs_delegation *delegation, 6994 int issync) 6995 { 6996 struct nfs4_delegreturndata *data; 6997 struct nfs_server *server = NFS_SERVER(inode); 6998 struct rpc_task *task; 6999 struct rpc_message msg = { 7000 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 7001 .rpc_cred = cred, 7002 }; 7003 struct rpc_task_setup task_setup_data = { 7004 .rpc_client = server->client, 7005 .rpc_message = &msg, 7006 .callback_ops = &nfs4_delegreturn_ops, 7007 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT, 7008 }; 7009 int status = 0; 7010 7011 if (nfs_server_capable(inode, NFS_CAP_MOVEABLE)) 7012 task_setup_data.flags |= RPC_TASK_MOVEABLE; 7013 7014 data = kzalloc(sizeof(*data), GFP_KERNEL); 7015 if (data == NULL) 7016 return -ENOMEM; 7017 7018 nfs4_state_protect(server->nfs_client, 7019 NFS_SP4_MACH_CRED_CLEANUP, 7020 &task_setup_data.rpc_client, &msg); 7021 7022 data->args.fhandle = &data->fh; 7023 data->args.stateid = &data->stateid; 7024 nfs4_bitmask_set(data->args.bitmask_store, 7025 server->cache_consistency_bitmask, inode, 0); 7026 data->args.bitmask = data->args.bitmask_store; 7027 nfs_copy_fh(&data->fh, NFS_FH(inode)); 7028 nfs4_stateid_copy(&data->stateid, stateid); 7029 data->res.fattr = &data->fattr; 7030 data->res.server = server; 7031 data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT; 7032 data->lr.arg.ld_private = &data->lr.ld_private; 7033 nfs_fattr_init(data->res.fattr); 7034 data->timestamp = jiffies; 7035 data->rpc_status = 0; 7036 data->inode = nfs_igrab_and_active(inode); 7037 if (data->inode || issync) { 7038 data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res, 7039 cred, issync); 7040 if (data->lr.roc) { 7041 data->args.lr_args = &data->lr.arg; 7042 data->res.lr_res = &data->lr.res; 7043 } 7044 } 7045 7046 if (delegation && 7047 test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) { 7048 if (delegation->type & FMODE_READ) { 7049 data->sattr.atime = inode_get_atime(inode); 7050 data->sattr.atime_set = true; 7051 } 7052 if (delegation->type & FMODE_WRITE) { 7053 data->sattr.mtime = inode_get_mtime(inode); 7054 data->sattr.mtime_set = true; 7055 } 7056 data->args.sattr_args = &data->sattr; 7057 data->res.sattr_res = true; 7058 } 7059 7060 if (!data->inode) 7061 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 7062 1); 7063 else 7064 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 7065 0); 7066 7067 task_setup_data.callback_data = data; 7068 msg.rpc_argp = &data->args; 7069 msg.rpc_resp = &data->res; 7070 task = rpc_run_task(&task_setup_data); 7071 if (IS_ERR(task)) 7072 return PTR_ERR(task); 7073 if (!issync) 7074 goto out; 7075 status = rpc_wait_for_completion_task(task); 7076 if (status != 0) 7077 goto out; 7078 status = data->rpc_status; 7079 out: 7080 rpc_put_task(task); 7081 return status; 7082 } 7083 7084 int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, 7085 const nfs4_stateid *stateid, 7086 struct nfs_delegation *delegation, int issync) 7087 { 7088 struct nfs_server *server = NFS_SERVER(inode); 7089 struct nfs4_exception exception = { }; 7090 int err; 7091 do { 7092 err = _nfs4_proc_delegreturn(inode, cred, stateid, 7093 delegation, issync); 7094 trace_nfs4_delegreturn(inode, stateid, err); 7095 switch (err) { 7096 case -NFS4ERR_STALE_STATEID: 7097 case -NFS4ERR_EXPIRED: 7098 case 0: 7099 return 0; 7100 } 7101 err = nfs4_handle_exception(server, err, &exception); 7102 } while (exception.retry); 7103 return err; 7104 } 7105 7106 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7107 { 7108 struct inode *inode = state->inode; 7109 struct nfs_server *server = NFS_SERVER(inode); 7110 struct nfs_client *clp = server->nfs_client; 7111 struct nfs_lockt_args arg = { 7112 .fh = NFS_FH(inode), 7113 .fl = request, 7114 }; 7115 struct nfs_lockt_res res = { 7116 .denied = request, 7117 }; 7118 struct rpc_message msg = { 7119 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 7120 .rpc_argp = &arg, 7121 .rpc_resp = &res, 7122 .rpc_cred = state->owner->so_cred, 7123 }; 7124 struct nfs4_lock_state *lsp; 7125 int status; 7126 7127 arg.lock_owner.clientid = clp->cl_clientid; 7128 status = nfs4_set_lock_state(state, request); 7129 if (status != 0) 7130 goto out; 7131 lsp = request->fl_u.nfs4_fl.owner; 7132 arg.lock_owner.id = lsp->ls_seqid.owner_id; 7133 arg.lock_owner.s_dev = server->s_dev; 7134 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 7135 switch (status) { 7136 case 0: 7137 request->c.flc_type = F_UNLCK; 7138 break; 7139 case -NFS4ERR_DENIED: 7140 status = 0; 7141 } 7142 request->fl_ops->fl_release_private(request); 7143 request->fl_ops = NULL; 7144 out: 7145 return status; 7146 } 7147 7148 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7149 { 7150 struct nfs4_exception exception = { 7151 .interruptible = true, 7152 }; 7153 int err; 7154 7155 do { 7156 err = _nfs4_proc_getlk(state, cmd, request); 7157 trace_nfs4_get_lock(request, state, cmd, err); 7158 err = nfs4_handle_exception(NFS_SERVER(state->inode), err, 7159 &exception); 7160 } while (exception.retry); 7161 return err; 7162 } 7163 7164 /* 7165 * Update the seqid of a lock stateid after receiving 7166 * NFS4ERR_OLD_STATEID 7167 */ 7168 static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst, 7169 struct nfs4_lock_state *lsp) 7170 { 7171 struct nfs4_state *state = lsp->ls_state; 7172 bool ret = false; 7173 7174 spin_lock(&state->state_lock); 7175 if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid)) 7176 goto out; 7177 if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst)) 7178 nfs4_stateid_seqid_inc(dst); 7179 else 7180 dst->seqid = lsp->ls_stateid.seqid; 7181 ret = true; 7182 out: 7183 spin_unlock(&state->state_lock); 7184 return ret; 7185 } 7186 7187 static bool nfs4_sync_lock_stateid(nfs4_stateid *dst, 7188 struct nfs4_lock_state *lsp) 7189 { 7190 struct nfs4_state *state = lsp->ls_state; 7191 bool ret; 7192 7193 spin_lock(&state->state_lock); 7194 ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid); 7195 nfs4_stateid_copy(dst, &lsp->ls_stateid); 7196 spin_unlock(&state->state_lock); 7197 return ret; 7198 } 7199 7200 struct nfs4_unlockdata { 7201 struct nfs_locku_args arg; 7202 struct nfs_locku_res res; 7203 struct nfs4_lock_state *lsp; 7204 struct nfs_open_context *ctx; 7205 struct nfs_lock_context *l_ctx; 7206 struct file_lock fl; 7207 struct nfs_server *server; 7208 unsigned long timestamp; 7209 unsigned short retrans; 7210 }; 7211 7212 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 7213 struct nfs_open_context *ctx, 7214 struct nfs4_lock_state *lsp, 7215 struct nfs_seqid *seqid) 7216 { 7217 struct nfs4_unlockdata *p; 7218 struct nfs4_state *state = lsp->ls_state; 7219 struct inode *inode = state->inode; 7220 struct nfs_lock_context *l_ctx; 7221 7222 p = kzalloc(sizeof(*p), GFP_KERNEL); 7223 if (p == NULL) 7224 return NULL; 7225 l_ctx = nfs_get_lock_context(ctx); 7226 if (!IS_ERR(l_ctx)) { 7227 p->l_ctx = l_ctx; 7228 } else { 7229 kfree(p); 7230 return NULL; 7231 } 7232 p->arg.fh = NFS_FH(inode); 7233 p->arg.fl = &p->fl; 7234 p->arg.seqid = seqid; 7235 p->res.seqid = seqid; 7236 p->lsp = lsp; 7237 /* Ensure we don't close file until we're done freeing locks! */ 7238 p->ctx = get_nfs_open_context(ctx); 7239 locks_init_lock(&p->fl); 7240 locks_copy_lock(&p->fl, fl); 7241 p->server = NFS_SERVER(inode); 7242 spin_lock(&state->state_lock); 7243 nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid); 7244 spin_unlock(&state->state_lock); 7245 return p; 7246 } 7247 7248 static void nfs4_locku_release_calldata(void *data) 7249 { 7250 struct nfs4_unlockdata *calldata = data; 7251 nfs_free_seqid(calldata->arg.seqid); 7252 nfs4_put_lock_state(calldata->lsp); 7253 nfs_put_lock_context(calldata->l_ctx); 7254 put_nfs_open_context(calldata->ctx); 7255 kfree(calldata); 7256 } 7257 7258 static void nfs4_locku_done(struct rpc_task *task, void *data) 7259 { 7260 struct nfs4_unlockdata *calldata = data; 7261 struct nfs4_exception exception = { 7262 .inode = calldata->lsp->ls_state->inode, 7263 .stateid = &calldata->arg.stateid, 7264 .retrans = calldata->retrans, 7265 }; 7266 7267 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 7268 return; 7269 switch (task->tk_status) { 7270 case 0: 7271 renew_lease(calldata->server, calldata->timestamp); 7272 locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl); 7273 if (nfs4_update_lock_stateid(calldata->lsp, 7274 &calldata->res.stateid)) 7275 break; 7276 fallthrough; 7277 case -NFS4ERR_ADMIN_REVOKED: 7278 case -NFS4ERR_EXPIRED: 7279 nfs4_free_revoked_stateid(calldata->server, 7280 &calldata->arg.stateid, 7281 task->tk_msg.rpc_cred); 7282 fallthrough; 7283 case -NFS4ERR_BAD_STATEID: 7284 case -NFS4ERR_STALE_STATEID: 7285 if (nfs4_sync_lock_stateid(&calldata->arg.stateid, 7286 calldata->lsp)) 7287 rpc_restart_call_prepare(task); 7288 break; 7289 case -NFS4ERR_OLD_STATEID: 7290 if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid, 7291 calldata->lsp)) 7292 rpc_restart_call_prepare(task); 7293 break; 7294 default: 7295 task->tk_status = nfs4_async_handle_exception(task, 7296 calldata->server, task->tk_status, 7297 &exception); 7298 calldata->retrans = exception.retrans; 7299 if (exception.retry) 7300 rpc_restart_call_prepare(task); 7301 } 7302 nfs_release_seqid(calldata->arg.seqid); 7303 } 7304 7305 static void nfs4_locku_prepare(struct rpc_task *task, void *data) 7306 { 7307 struct nfs4_unlockdata *calldata = data; 7308 7309 if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) && 7310 nfs_async_iocounter_wait(task, calldata->l_ctx)) 7311 return; 7312 7313 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 7314 goto out_wait; 7315 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { 7316 /* Note: exit _without_ running nfs4_locku_done */ 7317 goto out_no_action; 7318 } 7319 calldata->timestamp = jiffies; 7320 if (nfs4_setup_sequence(calldata->server->nfs_client, 7321 &calldata->arg.seq_args, 7322 &calldata->res.seq_res, 7323 task) != 0) 7324 nfs_release_seqid(calldata->arg.seqid); 7325 return; 7326 out_no_action: 7327 task->tk_action = NULL; 7328 out_wait: 7329 nfs4_sequence_done(task, &calldata->res.seq_res); 7330 } 7331 7332 static const struct rpc_call_ops nfs4_locku_ops = { 7333 .rpc_call_prepare = nfs4_locku_prepare, 7334 .rpc_call_done = nfs4_locku_done, 7335 .rpc_release = nfs4_locku_release_calldata, 7336 }; 7337 7338 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 7339 struct nfs_open_context *ctx, 7340 struct nfs4_lock_state *lsp, 7341 struct nfs_seqid *seqid) 7342 { 7343 struct nfs4_unlockdata *data; 7344 struct rpc_message msg = { 7345 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 7346 .rpc_cred = ctx->cred, 7347 }; 7348 struct rpc_task_setup task_setup_data = { 7349 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 7350 .rpc_message = &msg, 7351 .callback_ops = &nfs4_locku_ops, 7352 .workqueue = nfsiod_workqueue, 7353 .flags = RPC_TASK_ASYNC, 7354 }; 7355 7356 if (nfs_server_capable(lsp->ls_state->inode, NFS_CAP_MOVEABLE)) 7357 task_setup_data.flags |= RPC_TASK_MOVEABLE; 7358 7359 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client, 7360 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg); 7361 7362 /* Ensure this is an unlock - when canceling a lock, the 7363 * canceled lock is passed in, and it won't be an unlock. 7364 */ 7365 fl->c.flc_type = F_UNLCK; 7366 if (fl->c.flc_flags & FL_CLOSE) 7367 set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags); 7368 7369 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 7370 if (data == NULL) { 7371 nfs_free_seqid(seqid); 7372 return ERR_PTR(-ENOMEM); 7373 } 7374 7375 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 0); 7376 msg.rpc_argp = &data->arg; 7377 msg.rpc_resp = &data->res; 7378 task_setup_data.callback_data = data; 7379 return rpc_run_task(&task_setup_data); 7380 } 7381 7382 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 7383 { 7384 struct inode *inode = state->inode; 7385 struct nfs4_state_owner *sp = state->owner; 7386 struct nfs_inode *nfsi = NFS_I(inode); 7387 struct nfs_seqid *seqid; 7388 struct nfs4_lock_state *lsp; 7389 struct rpc_task *task; 7390 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 7391 int status = 0; 7392 unsigned char saved_flags = request->c.flc_flags; 7393 7394 status = nfs4_set_lock_state(state, request); 7395 /* Unlock _before_ we do the RPC call */ 7396 request->c.flc_flags |= FL_EXISTS; 7397 /* Exclude nfs_delegation_claim_locks() */ 7398 mutex_lock(&sp->so_delegreturn_mutex); 7399 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ 7400 down_read(&nfsi->rwsem); 7401 if (locks_lock_inode_wait(inode, request) == -ENOENT) { 7402 up_read(&nfsi->rwsem); 7403 mutex_unlock(&sp->so_delegreturn_mutex); 7404 goto out; 7405 } 7406 lsp = request->fl_u.nfs4_fl.owner; 7407 set_bit(NFS_LOCK_UNLOCKING, &lsp->ls_flags); 7408 up_read(&nfsi->rwsem); 7409 mutex_unlock(&sp->so_delegreturn_mutex); 7410 if (status != 0) 7411 goto out; 7412 /* Is this a delegated lock? */ 7413 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0) 7414 goto out; 7415 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid; 7416 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 7417 status = -ENOMEM; 7418 if (IS_ERR(seqid)) 7419 goto out; 7420 task = nfs4_do_unlck(request, 7421 nfs_file_open_context(request->c.flc_file), 7422 lsp, seqid); 7423 status = PTR_ERR(task); 7424 if (IS_ERR(task)) 7425 goto out; 7426 status = rpc_wait_for_completion_task(task); 7427 rpc_put_task(task); 7428 out: 7429 request->c.flc_flags = saved_flags; 7430 trace_nfs4_unlock(request, state, F_SETLK, status); 7431 return status; 7432 } 7433 7434 struct nfs4_lockdata { 7435 struct nfs_lock_args arg; 7436 struct nfs_lock_res res; 7437 struct nfs4_lock_state *lsp; 7438 struct nfs_open_context *ctx; 7439 struct file_lock fl; 7440 unsigned long timestamp; 7441 int rpc_status; 7442 int cancelled; 7443 struct nfs_server *server; 7444 }; 7445 7446 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 7447 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 7448 gfp_t gfp_mask) 7449 { 7450 struct nfs4_lockdata *p; 7451 struct inode *inode = lsp->ls_state->inode; 7452 struct nfs_server *server = NFS_SERVER(inode); 7453 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 7454 7455 p = kzalloc(sizeof(*p), gfp_mask); 7456 if (p == NULL) 7457 return NULL; 7458 7459 p->arg.fh = NFS_FH(inode); 7460 p->arg.fl = &p->fl; 7461 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 7462 if (IS_ERR(p->arg.open_seqid)) 7463 goto out_free; 7464 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 7465 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask); 7466 if (IS_ERR(p->arg.lock_seqid)) 7467 goto out_free_seqid; 7468 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 7469 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 7470 p->arg.lock_owner.s_dev = server->s_dev; 7471 p->res.lock_seqid = p->arg.lock_seqid; 7472 p->lsp = lsp; 7473 p->server = server; 7474 p->ctx = get_nfs_open_context(ctx); 7475 locks_init_lock(&p->fl); 7476 locks_copy_lock(&p->fl, fl); 7477 return p; 7478 out_free_seqid: 7479 nfs_free_seqid(p->arg.open_seqid); 7480 out_free: 7481 kfree(p); 7482 return NULL; 7483 } 7484 7485 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 7486 { 7487 struct nfs4_lockdata *data = calldata; 7488 struct nfs4_state *state = data->lsp->ls_state; 7489 7490 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 7491 goto out_wait; 7492 /* Do we need to do an open_to_lock_owner? */ 7493 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) { 7494 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { 7495 goto out_release_lock_seqid; 7496 } 7497 nfs4_stateid_copy(&data->arg.open_stateid, 7498 &state->open_stateid); 7499 data->arg.new_lock_owner = 1; 7500 data->res.open_seqid = data->arg.open_seqid; 7501 } else { 7502 data->arg.new_lock_owner = 0; 7503 nfs4_stateid_copy(&data->arg.lock_stateid, 7504 &data->lsp->ls_stateid); 7505 } 7506 if (!nfs4_valid_open_stateid(state)) { 7507 data->rpc_status = -EBADF; 7508 task->tk_action = NULL; 7509 goto out_release_open_seqid; 7510 } 7511 data->timestamp = jiffies; 7512 if (nfs4_setup_sequence(data->server->nfs_client, 7513 &data->arg.seq_args, 7514 &data->res.seq_res, 7515 task) == 0) 7516 return; 7517 out_release_open_seqid: 7518 nfs_release_seqid(data->arg.open_seqid); 7519 out_release_lock_seqid: 7520 nfs_release_seqid(data->arg.lock_seqid); 7521 out_wait: 7522 nfs4_sequence_done(task, &data->res.seq_res); 7523 dprintk("%s: ret = %d\n", __func__, data->rpc_status); 7524 } 7525 7526 static void nfs4_lock_done(struct rpc_task *task, void *calldata) 7527 { 7528 struct nfs4_lockdata *data = calldata; 7529 struct nfs4_lock_state *lsp = data->lsp; 7530 7531 if (!nfs4_sequence_done(task, &data->res.seq_res)) 7532 return; 7533 7534 data->rpc_status = task->tk_status; 7535 switch (task->tk_status) { 7536 case 0: 7537 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)), 7538 data->timestamp); 7539 if (data->arg.new_lock && !data->cancelled) { 7540 data->fl.c.flc_flags &= ~(FL_SLEEP | FL_ACCESS); 7541 if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) 7542 goto out_restart; 7543 } 7544 if (data->arg.new_lock_owner != 0) { 7545 nfs_confirm_seqid(&lsp->ls_seqid, 0); 7546 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid); 7547 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 7548 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) 7549 goto out_restart; 7550 break; 7551 case -NFS4ERR_OLD_STATEID: 7552 if (data->arg.new_lock_owner != 0 && 7553 nfs4_refresh_open_old_stateid(&data->arg.open_stateid, 7554 lsp->ls_state)) 7555 goto out_restart; 7556 if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp)) 7557 goto out_restart; 7558 fallthrough; 7559 case -NFS4ERR_BAD_STATEID: 7560 case -NFS4ERR_STALE_STATEID: 7561 case -NFS4ERR_EXPIRED: 7562 if (data->arg.new_lock_owner != 0) { 7563 if (!nfs4_stateid_match(&data->arg.open_stateid, 7564 &lsp->ls_state->open_stateid)) 7565 goto out_restart; 7566 } else if (!nfs4_stateid_match(&data->arg.lock_stateid, 7567 &lsp->ls_stateid)) 7568 goto out_restart; 7569 } 7570 out_done: 7571 dprintk("%s: ret = %d!\n", __func__, data->rpc_status); 7572 return; 7573 out_restart: 7574 if (!data->cancelled) 7575 rpc_restart_call_prepare(task); 7576 goto out_done; 7577 } 7578 7579 static void nfs4_lock_release(void *calldata) 7580 { 7581 struct nfs4_lockdata *data = calldata; 7582 7583 nfs_free_seqid(data->arg.open_seqid); 7584 if (data->cancelled && data->rpc_status == 0) { 7585 struct rpc_task *task; 7586 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 7587 data->arg.lock_seqid); 7588 if (!IS_ERR(task)) 7589 rpc_put_task_async(task); 7590 dprintk("%s: cancelling lock!\n", __func__); 7591 } else 7592 nfs_free_seqid(data->arg.lock_seqid); 7593 nfs4_put_lock_state(data->lsp); 7594 put_nfs_open_context(data->ctx); 7595 kfree(data); 7596 } 7597 7598 static const struct rpc_call_ops nfs4_lock_ops = { 7599 .rpc_call_prepare = nfs4_lock_prepare, 7600 .rpc_call_done = nfs4_lock_done, 7601 .rpc_release = nfs4_lock_release, 7602 }; 7603 7604 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 7605 { 7606 switch (error) { 7607 case -NFS4ERR_ADMIN_REVOKED: 7608 case -NFS4ERR_EXPIRED: 7609 case -NFS4ERR_BAD_STATEID: 7610 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 7611 if (new_lock_owner != 0 || 7612 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) 7613 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 7614 break; 7615 case -NFS4ERR_STALE_STATEID: 7616 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 7617 nfs4_schedule_lease_recovery(server->nfs_client); 7618 } 7619 } 7620 7621 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 7622 { 7623 struct nfs4_lockdata *data; 7624 struct rpc_task *task; 7625 struct rpc_message msg = { 7626 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 7627 .rpc_cred = state->owner->so_cred, 7628 }; 7629 struct rpc_task_setup task_setup_data = { 7630 .rpc_client = NFS_CLIENT(state->inode), 7631 .rpc_message = &msg, 7632 .callback_ops = &nfs4_lock_ops, 7633 .workqueue = nfsiod_workqueue, 7634 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 7635 }; 7636 int ret; 7637 7638 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE)) 7639 task_setup_data.flags |= RPC_TASK_MOVEABLE; 7640 7641 data = nfs4_alloc_lockdata(fl, 7642 nfs_file_open_context(fl->c.flc_file), 7643 fl->fl_u.nfs4_fl.owner, GFP_KERNEL); 7644 if (data == NULL) 7645 return -ENOMEM; 7646 if (IS_SETLKW(cmd)) 7647 data->arg.block = 1; 7648 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 7649 recovery_type > NFS_LOCK_NEW); 7650 msg.rpc_argp = &data->arg; 7651 msg.rpc_resp = &data->res; 7652 task_setup_data.callback_data = data; 7653 if (recovery_type > NFS_LOCK_NEW) { 7654 if (recovery_type == NFS_LOCK_RECLAIM) 7655 data->arg.reclaim = NFS_LOCK_RECLAIM; 7656 } else 7657 data->arg.new_lock = 1; 7658 task = rpc_run_task(&task_setup_data); 7659 if (IS_ERR(task)) 7660 return PTR_ERR(task); 7661 ret = rpc_wait_for_completion_task(task); 7662 if (ret == 0) { 7663 ret = data->rpc_status; 7664 if (ret) 7665 nfs4_handle_setlk_error(data->server, data->lsp, 7666 data->arg.new_lock_owner, ret); 7667 } else 7668 data->cancelled = true; 7669 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret); 7670 rpc_put_task(task); 7671 dprintk("%s: ret = %d\n", __func__, ret); 7672 return ret; 7673 } 7674 7675 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 7676 { 7677 struct nfs_server *server = NFS_SERVER(state->inode); 7678 struct nfs4_exception exception = { 7679 .inode = state->inode, 7680 }; 7681 int err; 7682 7683 do { 7684 /* Cache the lock if possible... */ 7685 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 7686 return 0; 7687 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 7688 if (err != -NFS4ERR_DELAY) 7689 break; 7690 nfs4_handle_exception(server, err, &exception); 7691 } while (exception.retry); 7692 return err; 7693 } 7694 7695 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 7696 { 7697 struct nfs_server *server = NFS_SERVER(state->inode); 7698 struct nfs4_exception exception = { 7699 .inode = state->inode, 7700 }; 7701 int err; 7702 7703 err = nfs4_set_lock_state(state, request); 7704 if (err != 0) 7705 return err; 7706 if (!recover_lost_locks) { 7707 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags); 7708 return 0; 7709 } 7710 do { 7711 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 7712 return 0; 7713 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 7714 switch (err) { 7715 default: 7716 goto out; 7717 case -NFS4ERR_GRACE: 7718 case -NFS4ERR_DELAY: 7719 nfs4_handle_exception(server, err, &exception); 7720 err = 0; 7721 } 7722 } while (exception.retry); 7723 out: 7724 return err; 7725 } 7726 7727 #if defined(CONFIG_NFS_V4_1) 7728 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 7729 { 7730 struct nfs4_lock_state *lsp; 7731 int status; 7732 7733 status = nfs4_set_lock_state(state, request); 7734 if (status != 0) 7735 return status; 7736 lsp = request->fl_u.nfs4_fl.owner; 7737 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) || 7738 test_bit(NFS_LOCK_LOST, &lsp->ls_flags)) 7739 return 0; 7740 return nfs4_lock_expired(state, request); 7741 } 7742 #endif 7743 7744 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7745 { 7746 struct nfs_inode *nfsi = NFS_I(state->inode); 7747 struct nfs4_state_owner *sp = state->owner; 7748 unsigned char flags = request->c.flc_flags; 7749 int status; 7750 7751 request->c.flc_flags |= FL_ACCESS; 7752 status = locks_lock_inode_wait(state->inode, request); 7753 if (status < 0) 7754 goto out; 7755 mutex_lock(&sp->so_delegreturn_mutex); 7756 down_read(&nfsi->rwsem); 7757 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 7758 /* Yes: cache locks! */ 7759 /* ...but avoid races with delegation recall... */ 7760 request->c.flc_flags = flags & ~FL_SLEEP; 7761 status = locks_lock_inode_wait(state->inode, request); 7762 up_read(&nfsi->rwsem); 7763 mutex_unlock(&sp->so_delegreturn_mutex); 7764 goto out; 7765 } 7766 up_read(&nfsi->rwsem); 7767 mutex_unlock(&sp->so_delegreturn_mutex); 7768 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 7769 out: 7770 request->c.flc_flags = flags; 7771 return status; 7772 } 7773 7774 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7775 { 7776 struct nfs4_exception exception = { 7777 .state = state, 7778 .inode = state->inode, 7779 .interruptible = true, 7780 }; 7781 int err; 7782 7783 do { 7784 err = _nfs4_proc_setlk(state, cmd, request); 7785 if (err == -NFS4ERR_DENIED) 7786 err = -EAGAIN; 7787 err = nfs4_handle_exception(NFS_SERVER(state->inode), 7788 err, &exception); 7789 } while (exception.retry); 7790 return err; 7791 } 7792 7793 #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 7794 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 7795 7796 static int 7797 nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd, 7798 struct file_lock *request) 7799 { 7800 int status = -ERESTARTSYS; 7801 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 7802 7803 while(!signalled()) { 7804 status = nfs4_proc_setlk(state, cmd, request); 7805 if ((status != -EAGAIN) || IS_SETLK(cmd)) 7806 break; 7807 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); 7808 schedule_timeout(timeout); 7809 timeout *= 2; 7810 timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout); 7811 status = -ERESTARTSYS; 7812 } 7813 return status; 7814 } 7815 7816 #ifdef CONFIG_NFS_V4_1 7817 struct nfs4_lock_waiter { 7818 struct inode *inode; 7819 struct nfs_lowner owner; 7820 wait_queue_entry_t wait; 7821 }; 7822 7823 static int 7824 nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key) 7825 { 7826 struct nfs4_lock_waiter *waiter = 7827 container_of(wait, struct nfs4_lock_waiter, wait); 7828 7829 /* NULL key means to wake up everyone */ 7830 if (key) { 7831 struct cb_notify_lock_args *cbnl = key; 7832 struct nfs_lowner *lowner = &cbnl->cbnl_owner, 7833 *wowner = &waiter->owner; 7834 7835 /* Only wake if the callback was for the same owner. */ 7836 if (lowner->id != wowner->id || lowner->s_dev != wowner->s_dev) 7837 return 0; 7838 7839 /* Make sure it's for the right inode */ 7840 if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh)) 7841 return 0; 7842 } 7843 7844 return woken_wake_function(wait, mode, flags, key); 7845 } 7846 7847 static int 7848 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7849 { 7850 struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner; 7851 struct nfs_server *server = NFS_SERVER(state->inode); 7852 struct nfs_client *clp = server->nfs_client; 7853 wait_queue_head_t *q = &clp->cl_lock_waitq; 7854 struct nfs4_lock_waiter waiter = { 7855 .inode = state->inode, 7856 .owner = { .clientid = clp->cl_clientid, 7857 .id = lsp->ls_seqid.owner_id, 7858 .s_dev = server->s_dev }, 7859 }; 7860 int status; 7861 7862 /* Don't bother with waitqueue if we don't expect a callback */ 7863 if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags)) 7864 return nfs4_retry_setlk_simple(state, cmd, request); 7865 7866 init_wait(&waiter.wait); 7867 waiter.wait.func = nfs4_wake_lock_waiter; 7868 add_wait_queue(q, &waiter.wait); 7869 7870 do { 7871 status = nfs4_proc_setlk(state, cmd, request); 7872 if (status != -EAGAIN || IS_SETLK(cmd)) 7873 break; 7874 7875 status = -ERESTARTSYS; 7876 wait_woken(&waiter.wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE, 7877 NFS4_LOCK_MAXTIMEOUT); 7878 } while (!signalled()); 7879 7880 remove_wait_queue(q, &waiter.wait); 7881 7882 return status; 7883 } 7884 #else /* !CONFIG_NFS_V4_1 */ 7885 static inline int 7886 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7887 { 7888 return nfs4_retry_setlk_simple(state, cmd, request); 7889 } 7890 #endif 7891 7892 static int 7893 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 7894 { 7895 struct nfs_open_context *ctx; 7896 struct nfs4_state *state; 7897 int status; 7898 7899 /* verify open state */ 7900 ctx = nfs_file_open_context(filp); 7901 state = ctx->state; 7902 7903 if (IS_GETLK(cmd)) { 7904 if (state != NULL) 7905 return nfs4_proc_getlk(state, F_GETLK, request); 7906 return 0; 7907 } 7908 7909 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 7910 return -EINVAL; 7911 7912 if (lock_is_unlock(request)) { 7913 if (state != NULL) 7914 return nfs4_proc_unlck(state, cmd, request); 7915 return 0; 7916 } 7917 7918 if (state == NULL) 7919 return -ENOLCK; 7920 7921 if ((request->c.flc_flags & FL_POSIX) && 7922 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 7923 return -ENOLCK; 7924 7925 /* 7926 * Don't rely on the VFS having checked the file open mode, 7927 * since it won't do this for flock() locks. 7928 */ 7929 switch (request->c.flc_type) { 7930 case F_RDLCK: 7931 if (!(filp->f_mode & FMODE_READ)) 7932 return -EBADF; 7933 break; 7934 case F_WRLCK: 7935 if (!(filp->f_mode & FMODE_WRITE)) 7936 return -EBADF; 7937 } 7938 7939 status = nfs4_set_lock_state(state, request); 7940 if (status != 0) 7941 return status; 7942 7943 return nfs4_retry_setlk(state, cmd, request); 7944 } 7945 7946 static int nfs4_delete_lease(struct file *file, void **priv) 7947 { 7948 return generic_setlease(file, F_UNLCK, NULL, priv); 7949 } 7950 7951 static int nfs4_add_lease(struct file *file, int arg, struct file_lease **lease, 7952 void **priv) 7953 { 7954 struct inode *inode = file_inode(file); 7955 fmode_t type = arg == F_RDLCK ? FMODE_READ : FMODE_WRITE; 7956 int ret; 7957 7958 /* No delegation, no lease */ 7959 if (!nfs4_have_delegation(inode, type, 0)) 7960 return -EAGAIN; 7961 ret = generic_setlease(file, arg, lease, priv); 7962 if (ret || nfs4_have_delegation(inode, type, 0)) 7963 return ret; 7964 /* We raced with a delegation return */ 7965 nfs4_delete_lease(file, priv); 7966 return -EAGAIN; 7967 } 7968 7969 int nfs4_proc_setlease(struct file *file, int arg, struct file_lease **lease, 7970 void **priv) 7971 { 7972 switch (arg) { 7973 case F_RDLCK: 7974 case F_WRLCK: 7975 return nfs4_add_lease(file, arg, lease, priv); 7976 case F_UNLCK: 7977 return nfs4_delete_lease(file, priv); 7978 default: 7979 return -EINVAL; 7980 } 7981 } 7982 7983 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid) 7984 { 7985 struct nfs_server *server = NFS_SERVER(state->inode); 7986 int err; 7987 7988 err = nfs4_set_lock_state(state, fl); 7989 if (err != 0) 7990 return err; 7991 do { 7992 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 7993 if (err != -NFS4ERR_DELAY && err != -NFS4ERR_GRACE) 7994 break; 7995 ssleep(1); 7996 } while (err == -NFS4ERR_DELAY || err == -NFSERR_GRACE); 7997 return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err); 7998 } 7999 8000 struct nfs_release_lockowner_data { 8001 struct nfs4_lock_state *lsp; 8002 struct nfs_server *server; 8003 struct nfs_release_lockowner_args args; 8004 struct nfs_release_lockowner_res res; 8005 unsigned long timestamp; 8006 }; 8007 8008 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata) 8009 { 8010 struct nfs_release_lockowner_data *data = calldata; 8011 struct nfs_server *server = data->server; 8012 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 8013 &data->res.seq_res, task); 8014 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 8015 data->timestamp = jiffies; 8016 } 8017 8018 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata) 8019 { 8020 struct nfs_release_lockowner_data *data = calldata; 8021 struct nfs_server *server = data->server; 8022 8023 nfs40_sequence_done(task, &data->res.seq_res); 8024 8025 switch (task->tk_status) { 8026 case 0: 8027 renew_lease(server, data->timestamp); 8028 break; 8029 case -NFS4ERR_STALE_CLIENTID: 8030 case -NFS4ERR_EXPIRED: 8031 nfs4_schedule_lease_recovery(server->nfs_client); 8032 break; 8033 case -NFS4ERR_LEASE_MOVED: 8034 case -NFS4ERR_DELAY: 8035 if (nfs4_async_handle_error(task, server, 8036 NULL, NULL) == -EAGAIN) 8037 rpc_restart_call_prepare(task); 8038 } 8039 } 8040 8041 static void nfs4_release_lockowner_release(void *calldata) 8042 { 8043 struct nfs_release_lockowner_data *data = calldata; 8044 nfs4_free_lock_state(data->server, data->lsp); 8045 kfree(calldata); 8046 } 8047 8048 static const struct rpc_call_ops nfs4_release_lockowner_ops = { 8049 .rpc_call_prepare = nfs4_release_lockowner_prepare, 8050 .rpc_call_done = nfs4_release_lockowner_done, 8051 .rpc_release = nfs4_release_lockowner_release, 8052 }; 8053 8054 static void 8055 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp) 8056 { 8057 struct nfs_release_lockowner_data *data; 8058 struct rpc_message msg = { 8059 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], 8060 }; 8061 8062 if (server->nfs_client->cl_mvops->minor_version != 0) 8063 return; 8064 8065 data = kmalloc(sizeof(*data), GFP_KERNEL); 8066 if (!data) 8067 return; 8068 data->lsp = lsp; 8069 data->server = server; 8070 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 8071 data->args.lock_owner.id = lsp->ls_seqid.owner_id; 8072 data->args.lock_owner.s_dev = server->s_dev; 8073 8074 msg.rpc_argp = &data->args; 8075 msg.rpc_resp = &data->res; 8076 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 8077 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 8078 } 8079 8080 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 8081 8082 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler, 8083 struct mnt_idmap *idmap, 8084 struct dentry *unused, struct inode *inode, 8085 const char *key, const void *buf, 8086 size_t buflen, int flags) 8087 { 8088 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_ACL); 8089 } 8090 8091 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler, 8092 struct dentry *unused, struct inode *inode, 8093 const char *key, void *buf, size_t buflen) 8094 { 8095 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_ACL); 8096 } 8097 8098 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry) 8099 { 8100 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_ACL); 8101 } 8102 8103 #if defined(CONFIG_NFS_V4_1) 8104 #define XATTR_NAME_NFSV4_DACL "system.nfs4_dacl" 8105 8106 static int nfs4_xattr_set_nfs4_dacl(const struct xattr_handler *handler, 8107 struct mnt_idmap *idmap, 8108 struct dentry *unused, struct inode *inode, 8109 const char *key, const void *buf, 8110 size_t buflen, int flags) 8111 { 8112 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_DACL); 8113 } 8114 8115 static int nfs4_xattr_get_nfs4_dacl(const struct xattr_handler *handler, 8116 struct dentry *unused, struct inode *inode, 8117 const char *key, void *buf, size_t buflen) 8118 { 8119 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_DACL); 8120 } 8121 8122 static bool nfs4_xattr_list_nfs4_dacl(struct dentry *dentry) 8123 { 8124 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_DACL); 8125 } 8126 8127 #define XATTR_NAME_NFSV4_SACL "system.nfs4_sacl" 8128 8129 static int nfs4_xattr_set_nfs4_sacl(const struct xattr_handler *handler, 8130 struct mnt_idmap *idmap, 8131 struct dentry *unused, struct inode *inode, 8132 const char *key, const void *buf, 8133 size_t buflen, int flags) 8134 { 8135 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_SACL); 8136 } 8137 8138 static int nfs4_xattr_get_nfs4_sacl(const struct xattr_handler *handler, 8139 struct dentry *unused, struct inode *inode, 8140 const char *key, void *buf, size_t buflen) 8141 { 8142 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_SACL); 8143 } 8144 8145 static bool nfs4_xattr_list_nfs4_sacl(struct dentry *dentry) 8146 { 8147 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_SACL); 8148 } 8149 8150 #endif 8151 8152 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 8153 8154 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler, 8155 struct mnt_idmap *idmap, 8156 struct dentry *unused, struct inode *inode, 8157 const char *key, const void *buf, 8158 size_t buflen, int flags) 8159 { 8160 if (security_ismaclabel(key)) 8161 return nfs4_set_security_label(inode, buf, buflen); 8162 8163 return -EOPNOTSUPP; 8164 } 8165 8166 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler, 8167 struct dentry *unused, struct inode *inode, 8168 const char *key, void *buf, size_t buflen) 8169 { 8170 if (security_ismaclabel(key)) 8171 return nfs4_get_security_label(inode, buf, buflen); 8172 return -EOPNOTSUPP; 8173 } 8174 8175 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = { 8176 .prefix = XATTR_SECURITY_PREFIX, 8177 .get = nfs4_xattr_get_nfs4_label, 8178 .set = nfs4_xattr_set_nfs4_label, 8179 }; 8180 8181 #endif 8182 8183 #ifdef CONFIG_NFS_V4_2 8184 static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler, 8185 struct mnt_idmap *idmap, 8186 struct dentry *unused, struct inode *inode, 8187 const char *key, const void *buf, 8188 size_t buflen, int flags) 8189 { 8190 u32 mask; 8191 int ret; 8192 8193 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8194 return -EOPNOTSUPP; 8195 8196 /* 8197 * There is no mapping from the MAY_* flags to the NFS_ACCESS_XA* 8198 * flags right now. Handling of xattr operations use the normal 8199 * file read/write permissions. 8200 * 8201 * Just in case the server has other ideas (which RFC 8276 allows), 8202 * do a cached access check for the XA* flags to possibly avoid 8203 * doing an RPC and getting EACCES back. 8204 */ 8205 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8206 if (!(mask & NFS_ACCESS_XAWRITE)) 8207 return -EACCES; 8208 } 8209 8210 if (buf == NULL) { 8211 ret = nfs42_proc_removexattr(inode, key); 8212 if (!ret) 8213 nfs4_xattr_cache_remove(inode, key); 8214 } else { 8215 ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags); 8216 if (!ret) 8217 nfs4_xattr_cache_add(inode, key, buf, NULL, buflen); 8218 } 8219 8220 return ret; 8221 } 8222 8223 static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler, 8224 struct dentry *unused, struct inode *inode, 8225 const char *key, void *buf, size_t buflen) 8226 { 8227 u32 mask; 8228 ssize_t ret; 8229 8230 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8231 return -EOPNOTSUPP; 8232 8233 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8234 if (!(mask & NFS_ACCESS_XAREAD)) 8235 return -EACCES; 8236 } 8237 8238 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 8239 if (ret) 8240 return ret; 8241 8242 ret = nfs4_xattr_cache_get(inode, key, buf, buflen); 8243 if (ret >= 0 || (ret < 0 && ret != -ENOENT)) 8244 return ret; 8245 8246 ret = nfs42_proc_getxattr(inode, key, buf, buflen); 8247 8248 return ret; 8249 } 8250 8251 static ssize_t 8252 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) 8253 { 8254 u64 cookie; 8255 bool eof; 8256 ssize_t ret, size; 8257 char *buf; 8258 size_t buflen; 8259 u32 mask; 8260 8261 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8262 return 0; 8263 8264 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8265 if (!(mask & NFS_ACCESS_XALIST)) 8266 return 0; 8267 } 8268 8269 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 8270 if (ret) 8271 return ret; 8272 8273 ret = nfs4_xattr_cache_list(inode, list, list_len); 8274 if (ret >= 0 || (ret < 0 && ret != -ENOENT)) 8275 return ret; 8276 8277 cookie = 0; 8278 eof = false; 8279 buflen = list_len ? list_len : XATTR_LIST_MAX; 8280 buf = list_len ? list : NULL; 8281 size = 0; 8282 8283 while (!eof) { 8284 ret = nfs42_proc_listxattrs(inode, buf, buflen, 8285 &cookie, &eof); 8286 if (ret < 0) 8287 return ret; 8288 8289 if (list_len) { 8290 buf += ret; 8291 buflen -= ret; 8292 } 8293 size += ret; 8294 } 8295 8296 if (list_len) 8297 nfs4_xattr_cache_set_list(inode, list, size); 8298 8299 return size; 8300 } 8301 8302 #else 8303 8304 static ssize_t 8305 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) 8306 { 8307 return 0; 8308 } 8309 #endif /* CONFIG_NFS_V4_2 */ 8310 8311 /* 8312 * nfs_fhget will use either the mounted_on_fileid or the fileid 8313 */ 8314 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 8315 { 8316 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 8317 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 8318 (fattr->valid & NFS_ATTR_FATTR_FSID) && 8319 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 8320 return; 8321 8322 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 8323 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 8324 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 8325 fattr->nlink = 2; 8326 } 8327 8328 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 8329 const struct qstr *name, 8330 struct nfs4_fs_locations *fs_locations, 8331 struct page *page) 8332 { 8333 struct nfs_server *server = NFS_SERVER(dir); 8334 u32 bitmask[3]; 8335 struct nfs4_fs_locations_arg args = { 8336 .dir_fh = NFS_FH(dir), 8337 .name = name, 8338 .page = page, 8339 .bitmask = bitmask, 8340 }; 8341 struct nfs4_fs_locations_res res = { 8342 .fs_locations = fs_locations, 8343 }; 8344 struct rpc_message msg = { 8345 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8346 .rpc_argp = &args, 8347 .rpc_resp = &res, 8348 }; 8349 int status; 8350 8351 dprintk("%s: start\n", __func__); 8352 8353 bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS; 8354 bitmask[1] = nfs4_fattr_bitmap[1]; 8355 8356 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 8357 * is not supported */ 8358 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 8359 bitmask[0] &= ~FATTR4_WORD0_FILEID; 8360 else 8361 bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID; 8362 8363 nfs_fattr_init(fs_locations->fattr); 8364 fs_locations->server = server; 8365 fs_locations->nlocations = 0; 8366 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 8367 dprintk("%s: returned status = %d\n", __func__, status); 8368 return status; 8369 } 8370 8371 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 8372 const struct qstr *name, 8373 struct nfs4_fs_locations *fs_locations, 8374 struct page *page) 8375 { 8376 struct nfs4_exception exception = { 8377 .interruptible = true, 8378 }; 8379 int err; 8380 do { 8381 err = _nfs4_proc_fs_locations(client, dir, name, 8382 fs_locations, page); 8383 trace_nfs4_get_fs_locations(dir, name, err); 8384 err = nfs4_handle_exception(NFS_SERVER(dir), err, 8385 &exception); 8386 } while (exception.retry); 8387 return err; 8388 } 8389 8390 /* 8391 * This operation also signals the server that this client is 8392 * performing migration recovery. The server can stop returning 8393 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is 8394 * appended to this compound to identify the client ID which is 8395 * performing recovery. 8396 */ 8397 static int _nfs40_proc_get_locations(struct nfs_server *server, 8398 struct nfs_fh *fhandle, 8399 struct nfs4_fs_locations *locations, 8400 struct page *page, const struct cred *cred) 8401 { 8402 struct rpc_clnt *clnt = server->client; 8403 u32 bitmask[2] = { 8404 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 8405 }; 8406 struct nfs4_fs_locations_arg args = { 8407 .clientid = server->nfs_client->cl_clientid, 8408 .fh = fhandle, 8409 .page = page, 8410 .bitmask = bitmask, 8411 .migration = 1, /* skip LOOKUP */ 8412 .renew = 1, /* append RENEW */ 8413 }; 8414 struct nfs4_fs_locations_res res = { 8415 .fs_locations = locations, 8416 .migration = 1, 8417 .renew = 1, 8418 }; 8419 struct rpc_message msg = { 8420 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8421 .rpc_argp = &args, 8422 .rpc_resp = &res, 8423 .rpc_cred = cred, 8424 }; 8425 unsigned long now = jiffies; 8426 int status; 8427 8428 nfs_fattr_init(locations->fattr); 8429 locations->server = server; 8430 locations->nlocations = 0; 8431 8432 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8433 status = nfs4_call_sync_sequence(clnt, server, &msg, 8434 &args.seq_args, &res.seq_res); 8435 if (status) 8436 return status; 8437 8438 renew_lease(server, now); 8439 return 0; 8440 } 8441 8442 #ifdef CONFIG_NFS_V4_1 8443 8444 /* 8445 * This operation also signals the server that this client is 8446 * performing migration recovery. The server can stop asserting 8447 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID 8448 * performing this operation is identified in the SEQUENCE 8449 * operation in this compound. 8450 * 8451 * When the client supports GETATTR(fs_locations_info), it can 8452 * be plumbed in here. 8453 */ 8454 static int _nfs41_proc_get_locations(struct nfs_server *server, 8455 struct nfs_fh *fhandle, 8456 struct nfs4_fs_locations *locations, 8457 struct page *page, const struct cred *cred) 8458 { 8459 struct rpc_clnt *clnt = server->client; 8460 u32 bitmask[2] = { 8461 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 8462 }; 8463 struct nfs4_fs_locations_arg args = { 8464 .fh = fhandle, 8465 .page = page, 8466 .bitmask = bitmask, 8467 .migration = 1, /* skip LOOKUP */ 8468 }; 8469 struct nfs4_fs_locations_res res = { 8470 .fs_locations = locations, 8471 .migration = 1, 8472 }; 8473 struct rpc_message msg = { 8474 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8475 .rpc_argp = &args, 8476 .rpc_resp = &res, 8477 .rpc_cred = cred, 8478 }; 8479 struct nfs4_call_sync_data data = { 8480 .seq_server = server, 8481 .seq_args = &args.seq_args, 8482 .seq_res = &res.seq_res, 8483 }; 8484 struct rpc_task_setup task_setup_data = { 8485 .rpc_client = clnt, 8486 .rpc_message = &msg, 8487 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops, 8488 .callback_data = &data, 8489 .flags = RPC_TASK_NO_ROUND_ROBIN, 8490 }; 8491 int status; 8492 8493 nfs_fattr_init(locations->fattr); 8494 locations->server = server; 8495 locations->nlocations = 0; 8496 8497 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8498 status = nfs4_call_sync_custom(&task_setup_data); 8499 if (status == NFS4_OK && 8500 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 8501 status = -NFS4ERR_LEASE_MOVED; 8502 return status; 8503 } 8504 8505 #endif /* CONFIG_NFS_V4_1 */ 8506 8507 /** 8508 * nfs4_proc_get_locations - discover locations for a migrated FSID 8509 * @server: pointer to nfs_server to process 8510 * @fhandle: pointer to the kernel NFS client file handle 8511 * @locations: result of query 8512 * @page: buffer 8513 * @cred: credential to use for this operation 8514 * 8515 * Returns NFS4_OK on success, a negative NFS4ERR status code if the 8516 * operation failed, or a negative errno if a local error occurred. 8517 * 8518 * On success, "locations" is filled in, but if the server has 8519 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not 8520 * asserted. 8521 * 8522 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases 8523 * from this client that require migration recovery. 8524 */ 8525 int nfs4_proc_get_locations(struct nfs_server *server, 8526 struct nfs_fh *fhandle, 8527 struct nfs4_fs_locations *locations, 8528 struct page *page, const struct cred *cred) 8529 { 8530 struct nfs_client *clp = server->nfs_client; 8531 const struct nfs4_mig_recovery_ops *ops = 8532 clp->cl_mvops->mig_recovery_ops; 8533 struct nfs4_exception exception = { 8534 .interruptible = true, 8535 }; 8536 int status; 8537 8538 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 8539 (unsigned long long)server->fsid.major, 8540 (unsigned long long)server->fsid.minor, 8541 clp->cl_hostname); 8542 nfs_display_fhandle(fhandle, __func__); 8543 8544 do { 8545 status = ops->get_locations(server, fhandle, locations, page, 8546 cred); 8547 if (status != -NFS4ERR_DELAY) 8548 break; 8549 nfs4_handle_exception(server, status, &exception); 8550 } while (exception.retry); 8551 return status; 8552 } 8553 8554 /* 8555 * This operation also signals the server that this client is 8556 * performing "lease moved" recovery. The server can stop 8557 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation 8558 * is appended to this compound to identify the client ID which is 8559 * performing recovery. 8560 */ 8561 static int _nfs40_proc_fsid_present(struct inode *inode, const struct cred *cred) 8562 { 8563 struct nfs_server *server = NFS_SERVER(inode); 8564 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 8565 struct rpc_clnt *clnt = server->client; 8566 struct nfs4_fsid_present_arg args = { 8567 .fh = NFS_FH(inode), 8568 .clientid = clp->cl_clientid, 8569 .renew = 1, /* append RENEW */ 8570 }; 8571 struct nfs4_fsid_present_res res = { 8572 .renew = 1, 8573 }; 8574 struct rpc_message msg = { 8575 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 8576 .rpc_argp = &args, 8577 .rpc_resp = &res, 8578 .rpc_cred = cred, 8579 }; 8580 unsigned long now = jiffies; 8581 int status; 8582 8583 res.fh = nfs_alloc_fhandle(); 8584 if (res.fh == NULL) 8585 return -ENOMEM; 8586 8587 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8588 status = nfs4_call_sync_sequence(clnt, server, &msg, 8589 &args.seq_args, &res.seq_res); 8590 nfs_free_fhandle(res.fh); 8591 if (status) 8592 return status; 8593 8594 do_renew_lease(clp, now); 8595 return 0; 8596 } 8597 8598 #ifdef CONFIG_NFS_V4_1 8599 8600 /* 8601 * This operation also signals the server that this client is 8602 * performing "lease moved" recovery. The server can stop asserting 8603 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing 8604 * this operation is identified in the SEQUENCE operation in this 8605 * compound. 8606 */ 8607 static int _nfs41_proc_fsid_present(struct inode *inode, const struct cred *cred) 8608 { 8609 struct nfs_server *server = NFS_SERVER(inode); 8610 struct rpc_clnt *clnt = server->client; 8611 struct nfs4_fsid_present_arg args = { 8612 .fh = NFS_FH(inode), 8613 }; 8614 struct nfs4_fsid_present_res res = { 8615 }; 8616 struct rpc_message msg = { 8617 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 8618 .rpc_argp = &args, 8619 .rpc_resp = &res, 8620 .rpc_cred = cred, 8621 }; 8622 int status; 8623 8624 res.fh = nfs_alloc_fhandle(); 8625 if (res.fh == NULL) 8626 return -ENOMEM; 8627 8628 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8629 status = nfs4_call_sync_sequence(clnt, server, &msg, 8630 &args.seq_args, &res.seq_res); 8631 nfs_free_fhandle(res.fh); 8632 if (status == NFS4_OK && 8633 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 8634 status = -NFS4ERR_LEASE_MOVED; 8635 return status; 8636 } 8637 8638 #endif /* CONFIG_NFS_V4_1 */ 8639 8640 /** 8641 * nfs4_proc_fsid_present - Is this FSID present or absent on server? 8642 * @inode: inode on FSID to check 8643 * @cred: credential to use for this operation 8644 * 8645 * Server indicates whether the FSID is present, moved, or not 8646 * recognized. This operation is necessary to clear a LEASE_MOVED 8647 * condition for this client ID. 8648 * 8649 * Returns NFS4_OK if the FSID is present on this server, 8650 * -NFS4ERR_MOVED if the FSID is no longer present, a negative 8651 * NFS4ERR code if some error occurred on the server, or a 8652 * negative errno if a local failure occurred. 8653 */ 8654 int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred) 8655 { 8656 struct nfs_server *server = NFS_SERVER(inode); 8657 struct nfs_client *clp = server->nfs_client; 8658 const struct nfs4_mig_recovery_ops *ops = 8659 clp->cl_mvops->mig_recovery_ops; 8660 struct nfs4_exception exception = { 8661 .interruptible = true, 8662 }; 8663 int status; 8664 8665 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 8666 (unsigned long long)server->fsid.major, 8667 (unsigned long long)server->fsid.minor, 8668 clp->cl_hostname); 8669 nfs_display_fhandle(NFS_FH(inode), __func__); 8670 8671 do { 8672 status = ops->fsid_present(inode, cred); 8673 if (status != -NFS4ERR_DELAY) 8674 break; 8675 nfs4_handle_exception(server, status, &exception); 8676 } while (exception.retry); 8677 return status; 8678 } 8679 8680 /* 8681 * If 'use_integrity' is true and the state managment nfs_client 8682 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient 8683 * and the machine credential as per RFC3530bis and RFC5661 Security 8684 * Considerations sections. Otherwise, just use the user cred with the 8685 * filesystem's rpc_client. 8686 */ 8687 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity) 8688 { 8689 int status; 8690 struct rpc_clnt *clnt = NFS_SERVER(dir)->client; 8691 struct nfs_client *clp = NFS_SERVER(dir)->nfs_client; 8692 struct nfs4_secinfo_arg args = { 8693 .dir_fh = NFS_FH(dir), 8694 .name = name, 8695 }; 8696 struct nfs4_secinfo_res res = { 8697 .flavors = flavors, 8698 }; 8699 struct rpc_message msg = { 8700 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 8701 .rpc_argp = &args, 8702 .rpc_resp = &res, 8703 }; 8704 struct nfs4_call_sync_data data = { 8705 .seq_server = NFS_SERVER(dir), 8706 .seq_args = &args.seq_args, 8707 .seq_res = &res.seq_res, 8708 }; 8709 struct rpc_task_setup task_setup = { 8710 .rpc_client = clnt, 8711 .rpc_message = &msg, 8712 .callback_ops = clp->cl_mvops->call_sync_ops, 8713 .callback_data = &data, 8714 .flags = RPC_TASK_NO_ROUND_ROBIN, 8715 }; 8716 const struct cred *cred = NULL; 8717 8718 if (use_integrity) { 8719 clnt = clp->cl_rpcclient; 8720 task_setup.rpc_client = clnt; 8721 8722 cred = nfs4_get_clid_cred(clp); 8723 msg.rpc_cred = cred; 8724 } 8725 8726 dprintk("NFS call secinfo %s\n", name->name); 8727 8728 nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg); 8729 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 8730 status = nfs4_call_sync_custom(&task_setup); 8731 8732 dprintk("NFS reply secinfo: %d\n", status); 8733 8734 put_cred(cred); 8735 return status; 8736 } 8737 8738 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 8739 struct nfs4_secinfo_flavors *flavors) 8740 { 8741 struct nfs4_exception exception = { 8742 .interruptible = true, 8743 }; 8744 int err; 8745 do { 8746 err = -NFS4ERR_WRONGSEC; 8747 8748 /* try to use integrity protection with machine cred */ 8749 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client)) 8750 err = _nfs4_proc_secinfo(dir, name, flavors, true); 8751 8752 /* 8753 * if unable to use integrity protection, or SECINFO with 8754 * integrity protection returns NFS4ERR_WRONGSEC (which is 8755 * disallowed by spec, but exists in deployed servers) use 8756 * the current filesystem's rpc_client and the user cred. 8757 */ 8758 if (err == -NFS4ERR_WRONGSEC) 8759 err = _nfs4_proc_secinfo(dir, name, flavors, false); 8760 8761 trace_nfs4_secinfo(dir, name, err); 8762 err = nfs4_handle_exception(NFS_SERVER(dir), err, 8763 &exception); 8764 } while (exception.retry); 8765 return err; 8766 } 8767 8768 #ifdef CONFIG_NFS_V4_1 8769 /* 8770 * Check the exchange flags returned by the server for invalid flags, having 8771 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 8772 * DS flags set. 8773 */ 8774 static int nfs4_check_cl_exchange_flags(u32 flags, u32 version) 8775 { 8776 if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R)) 8777 goto out_inval; 8778 else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R)) 8779 goto out_inval; 8780 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 8781 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 8782 goto out_inval; 8783 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 8784 goto out_inval; 8785 return NFS_OK; 8786 out_inval: 8787 return -NFS4ERR_INVAL; 8788 } 8789 8790 static bool 8791 nfs41_same_server_scope(struct nfs41_server_scope *a, 8792 struct nfs41_server_scope *b) 8793 { 8794 if (a->server_scope_sz != b->server_scope_sz) 8795 return false; 8796 return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0; 8797 } 8798 8799 static void 8800 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) 8801 { 8802 struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp; 8803 struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp; 8804 struct nfs_client *clp = args->client; 8805 8806 switch (task->tk_status) { 8807 case -NFS4ERR_BADSESSION: 8808 case -NFS4ERR_DEADSESSION: 8809 nfs4_schedule_session_recovery(clp->cl_session, 8810 task->tk_status); 8811 return; 8812 } 8813 if (args->dir == NFS4_CDFC4_FORE_OR_BOTH && 8814 res->dir != NFS4_CDFS4_BOTH) { 8815 rpc_task_close_connection(task); 8816 if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES) 8817 rpc_restart_call(task); 8818 } 8819 } 8820 8821 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { 8822 .rpc_call_done = nfs4_bind_one_conn_to_session_done, 8823 }; 8824 8825 /* 8826 * nfs4_proc_bind_one_conn_to_session() 8827 * 8828 * The 4.1 client currently uses the same TCP connection for the 8829 * fore and backchannel. 8830 */ 8831 static 8832 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt, 8833 struct rpc_xprt *xprt, 8834 struct nfs_client *clp, 8835 const struct cred *cred) 8836 { 8837 int status; 8838 struct nfs41_bind_conn_to_session_args args = { 8839 .client = clp, 8840 .dir = NFS4_CDFC4_FORE_OR_BOTH, 8841 .retries = 0, 8842 }; 8843 struct nfs41_bind_conn_to_session_res res; 8844 struct rpc_message msg = { 8845 .rpc_proc = 8846 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 8847 .rpc_argp = &args, 8848 .rpc_resp = &res, 8849 .rpc_cred = cred, 8850 }; 8851 struct rpc_task_setup task_setup_data = { 8852 .rpc_client = clnt, 8853 .rpc_xprt = xprt, 8854 .callback_ops = &nfs4_bind_one_conn_to_session_ops, 8855 .rpc_message = &msg, 8856 .flags = RPC_TASK_TIMEOUT, 8857 }; 8858 struct rpc_task *task; 8859 8860 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id); 8861 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) 8862 args.dir = NFS4_CDFC4_FORE; 8863 8864 /* Do not set the backchannel flag unless this is clnt->cl_xprt */ 8865 if (xprt != rcu_access_pointer(clnt->cl_xprt)) 8866 args.dir = NFS4_CDFC4_FORE; 8867 8868 task = rpc_run_task(&task_setup_data); 8869 if (!IS_ERR(task)) { 8870 status = task->tk_status; 8871 rpc_put_task(task); 8872 } else 8873 status = PTR_ERR(task); 8874 trace_nfs4_bind_conn_to_session(clp, status); 8875 if (status == 0) { 8876 if (memcmp(res.sessionid.data, 8877 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 8878 dprintk("NFS: %s: Session ID mismatch\n", __func__); 8879 return -EIO; 8880 } 8881 if ((res.dir & args.dir) != res.dir || res.dir == 0) { 8882 dprintk("NFS: %s: Unexpected direction from server\n", 8883 __func__); 8884 return -EIO; 8885 } 8886 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) { 8887 dprintk("NFS: %s: Server returned RDMA mode = true\n", 8888 __func__); 8889 return -EIO; 8890 } 8891 } 8892 8893 return status; 8894 } 8895 8896 struct rpc_bind_conn_calldata { 8897 struct nfs_client *clp; 8898 const struct cred *cred; 8899 }; 8900 8901 static int 8902 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt, 8903 struct rpc_xprt *xprt, 8904 void *calldata) 8905 { 8906 struct rpc_bind_conn_calldata *p = calldata; 8907 8908 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred); 8909 } 8910 8911 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, const struct cred *cred) 8912 { 8913 struct rpc_bind_conn_calldata data = { 8914 .clp = clp, 8915 .cred = cred, 8916 }; 8917 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient, 8918 nfs4_proc_bind_conn_to_session_callback, &data); 8919 } 8920 8921 /* 8922 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map 8923 * and operations we'd like to see to enable certain features in the allow map 8924 */ 8925 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = { 8926 .how = SP4_MACH_CRED, 8927 .enforce.u.words = { 8928 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 8929 1 << (OP_EXCHANGE_ID - 32) | 8930 1 << (OP_CREATE_SESSION - 32) | 8931 1 << (OP_DESTROY_SESSION - 32) | 8932 1 << (OP_DESTROY_CLIENTID - 32) 8933 }, 8934 .allow.u.words = { 8935 [0] = 1 << (OP_CLOSE) | 8936 1 << (OP_OPEN_DOWNGRADE) | 8937 1 << (OP_LOCKU) | 8938 1 << (OP_DELEGRETURN) | 8939 1 << (OP_COMMIT), 8940 [1] = 1 << (OP_SECINFO - 32) | 8941 1 << (OP_SECINFO_NO_NAME - 32) | 8942 1 << (OP_LAYOUTRETURN - 32) | 8943 1 << (OP_TEST_STATEID - 32) | 8944 1 << (OP_FREE_STATEID - 32) | 8945 1 << (OP_WRITE - 32) 8946 } 8947 }; 8948 8949 /* 8950 * Select the state protection mode for client `clp' given the server results 8951 * from exchange_id in `sp'. 8952 * 8953 * Returns 0 on success, negative errno otherwise. 8954 */ 8955 static int nfs4_sp4_select_mode(struct nfs_client *clp, 8956 struct nfs41_state_protection *sp) 8957 { 8958 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = { 8959 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 8960 1 << (OP_EXCHANGE_ID - 32) | 8961 1 << (OP_CREATE_SESSION - 32) | 8962 1 << (OP_DESTROY_SESSION - 32) | 8963 1 << (OP_DESTROY_CLIENTID - 32) 8964 }; 8965 unsigned long flags = 0; 8966 unsigned int i; 8967 int ret = 0; 8968 8969 if (sp->how == SP4_MACH_CRED) { 8970 /* Print state protect result */ 8971 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n"); 8972 for (i = 0; i <= LAST_NFS4_OP; i++) { 8973 if (test_bit(i, sp->enforce.u.longs)) 8974 dfprintk(MOUNT, " enforce op %d\n", i); 8975 if (test_bit(i, sp->allow.u.longs)) 8976 dfprintk(MOUNT, " allow op %d\n", i); 8977 } 8978 8979 /* make sure nothing is on enforce list that isn't supported */ 8980 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) { 8981 if (sp->enforce.u.words[i] & ~supported_enforce[i]) { 8982 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 8983 ret = -EINVAL; 8984 goto out; 8985 } 8986 } 8987 8988 /* 8989 * Minimal mode - state operations are allowed to use machine 8990 * credential. Note this already happens by default, so the 8991 * client doesn't have to do anything more than the negotiation. 8992 * 8993 * NOTE: we don't care if EXCHANGE_ID is in the list - 8994 * we're already using the machine cred for exchange_id 8995 * and will never use a different cred. 8996 */ 8997 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) && 8998 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) && 8999 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) && 9000 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) { 9001 dfprintk(MOUNT, "sp4_mach_cred:\n"); 9002 dfprintk(MOUNT, " minimal mode enabled\n"); 9003 __set_bit(NFS_SP4_MACH_CRED_MINIMAL, &flags); 9004 } else { 9005 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 9006 ret = -EINVAL; 9007 goto out; 9008 } 9009 9010 if (test_bit(OP_CLOSE, sp->allow.u.longs) && 9011 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) && 9012 test_bit(OP_DELEGRETURN, sp->allow.u.longs) && 9013 test_bit(OP_LOCKU, sp->allow.u.longs)) { 9014 dfprintk(MOUNT, " cleanup mode enabled\n"); 9015 __set_bit(NFS_SP4_MACH_CRED_CLEANUP, &flags); 9016 } 9017 9018 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) { 9019 dfprintk(MOUNT, " pnfs cleanup mode enabled\n"); 9020 __set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, &flags); 9021 } 9022 9023 if (test_bit(OP_SECINFO, sp->allow.u.longs) && 9024 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) { 9025 dfprintk(MOUNT, " secinfo mode enabled\n"); 9026 __set_bit(NFS_SP4_MACH_CRED_SECINFO, &flags); 9027 } 9028 9029 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) && 9030 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) { 9031 dfprintk(MOUNT, " stateid mode enabled\n"); 9032 __set_bit(NFS_SP4_MACH_CRED_STATEID, &flags); 9033 } 9034 9035 if (test_bit(OP_WRITE, sp->allow.u.longs)) { 9036 dfprintk(MOUNT, " write mode enabled\n"); 9037 __set_bit(NFS_SP4_MACH_CRED_WRITE, &flags); 9038 } 9039 9040 if (test_bit(OP_COMMIT, sp->allow.u.longs)) { 9041 dfprintk(MOUNT, " commit mode enabled\n"); 9042 __set_bit(NFS_SP4_MACH_CRED_COMMIT, &flags); 9043 } 9044 } 9045 out: 9046 clp->cl_sp4_flags = flags; 9047 return ret; 9048 } 9049 9050 struct nfs41_exchange_id_data { 9051 struct nfs41_exchange_id_res res; 9052 struct nfs41_exchange_id_args args; 9053 }; 9054 9055 static void nfs4_exchange_id_release(void *data) 9056 { 9057 struct nfs41_exchange_id_data *cdata = 9058 (struct nfs41_exchange_id_data *)data; 9059 9060 nfs_put_client(cdata->args.client); 9061 kfree(cdata->res.impl_id); 9062 kfree(cdata->res.server_scope); 9063 kfree(cdata->res.server_owner); 9064 kfree(cdata); 9065 } 9066 9067 static const struct rpc_call_ops nfs4_exchange_id_call_ops = { 9068 .rpc_release = nfs4_exchange_id_release, 9069 }; 9070 9071 /* 9072 * _nfs4_proc_exchange_id() 9073 * 9074 * Wrapper for EXCHANGE_ID operation. 9075 */ 9076 static struct rpc_task * 9077 nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred, 9078 u32 sp4_how, struct rpc_xprt *xprt) 9079 { 9080 struct rpc_message msg = { 9081 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 9082 .rpc_cred = cred, 9083 }; 9084 struct rpc_task_setup task_setup_data = { 9085 .rpc_client = clp->cl_rpcclient, 9086 .callback_ops = &nfs4_exchange_id_call_ops, 9087 .rpc_message = &msg, 9088 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN, 9089 }; 9090 struct nfs41_exchange_id_data *calldata; 9091 int status; 9092 9093 if (!refcount_inc_not_zero(&clp->cl_count)) 9094 return ERR_PTR(-EIO); 9095 9096 status = -ENOMEM; 9097 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 9098 if (!calldata) 9099 goto out; 9100 9101 nfs4_init_boot_verifier(clp, &calldata->args.verifier); 9102 9103 status = nfs4_init_uniform_client_string(clp); 9104 if (status) 9105 goto out_calldata; 9106 9107 calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 9108 GFP_NOFS); 9109 status = -ENOMEM; 9110 if (unlikely(calldata->res.server_owner == NULL)) 9111 goto out_calldata; 9112 9113 calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 9114 GFP_NOFS); 9115 if (unlikely(calldata->res.server_scope == NULL)) 9116 goto out_server_owner; 9117 9118 calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 9119 if (unlikely(calldata->res.impl_id == NULL)) 9120 goto out_server_scope; 9121 9122 switch (sp4_how) { 9123 case SP4_NONE: 9124 calldata->args.state_protect.how = SP4_NONE; 9125 break; 9126 9127 case SP4_MACH_CRED: 9128 calldata->args.state_protect = nfs4_sp4_mach_cred_request; 9129 break; 9130 9131 default: 9132 /* unsupported! */ 9133 WARN_ON_ONCE(1); 9134 status = -EINVAL; 9135 goto out_impl_id; 9136 } 9137 if (xprt) { 9138 task_setup_data.rpc_xprt = xprt; 9139 task_setup_data.flags |= RPC_TASK_SOFTCONN; 9140 memcpy(calldata->args.verifier.data, clp->cl_confirm.data, 9141 sizeof(calldata->args.verifier.data)); 9142 } 9143 calldata->args.client = clp; 9144 calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 9145 EXCHGID4_FLAG_BIND_PRINC_STATEID; 9146 #ifdef CONFIG_NFS_V4_1_MIGRATION 9147 calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR; 9148 #endif 9149 if (test_bit(NFS_CS_PNFS, &clp->cl_flags)) 9150 calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS; 9151 msg.rpc_argp = &calldata->args; 9152 msg.rpc_resp = &calldata->res; 9153 task_setup_data.callback_data = calldata; 9154 9155 return rpc_run_task(&task_setup_data); 9156 9157 out_impl_id: 9158 kfree(calldata->res.impl_id); 9159 out_server_scope: 9160 kfree(calldata->res.server_scope); 9161 out_server_owner: 9162 kfree(calldata->res.server_owner); 9163 out_calldata: 9164 kfree(calldata); 9165 out: 9166 nfs_put_client(clp); 9167 return ERR_PTR(status); 9168 } 9169 9170 /* 9171 * _nfs4_proc_exchange_id() 9172 * 9173 * Wrapper for EXCHANGE_ID operation. 9174 */ 9175 static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred, 9176 u32 sp4_how) 9177 { 9178 struct rpc_task *task; 9179 struct nfs41_exchange_id_args *argp; 9180 struct nfs41_exchange_id_res *resp; 9181 unsigned long now = jiffies; 9182 int status; 9183 9184 task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL); 9185 if (IS_ERR(task)) 9186 return PTR_ERR(task); 9187 9188 argp = task->tk_msg.rpc_argp; 9189 resp = task->tk_msg.rpc_resp; 9190 status = task->tk_status; 9191 if (status != 0) 9192 goto out; 9193 9194 status = nfs4_check_cl_exchange_flags(resp->flags, 9195 clp->cl_mvops->minor_version); 9196 if (status != 0) 9197 goto out; 9198 9199 status = nfs4_sp4_select_mode(clp, &resp->state_protect); 9200 if (status != 0) 9201 goto out; 9202 9203 do_renew_lease(clp, now); 9204 9205 clp->cl_clientid = resp->clientid; 9206 clp->cl_exchange_flags = resp->flags; 9207 clp->cl_seqid = resp->seqid; 9208 /* Client ID is not confirmed */ 9209 if (!(resp->flags & EXCHGID4_FLAG_CONFIRMED_R)) 9210 clear_bit(NFS4_SESSION_ESTABLISHED, 9211 &clp->cl_session->session_state); 9212 9213 if (clp->cl_serverscope != NULL && 9214 !nfs41_same_server_scope(clp->cl_serverscope, 9215 resp->server_scope)) { 9216 dprintk("%s: server_scope mismatch detected\n", 9217 __func__); 9218 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 9219 } 9220 9221 swap(clp->cl_serverowner, resp->server_owner); 9222 swap(clp->cl_serverscope, resp->server_scope); 9223 swap(clp->cl_implid, resp->impl_id); 9224 9225 /* Save the EXCHANGE_ID verifier session trunk tests */ 9226 memcpy(clp->cl_confirm.data, argp->verifier.data, 9227 sizeof(clp->cl_confirm.data)); 9228 out: 9229 trace_nfs4_exchange_id(clp, status); 9230 rpc_put_task(task); 9231 return status; 9232 } 9233 9234 /* 9235 * nfs4_proc_exchange_id() 9236 * 9237 * Returns zero, a negative errno, or a negative NFS4ERR status code. 9238 * 9239 * Since the clientid has expired, all compounds using sessions 9240 * associated with the stale clientid will be returning 9241 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 9242 * be in some phase of session reset. 9243 * 9244 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used. 9245 */ 9246 int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred) 9247 { 9248 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor; 9249 int status; 9250 9251 /* try SP4_MACH_CRED if krb5i/p */ 9252 if (authflavor == RPC_AUTH_GSS_KRB5I || 9253 authflavor == RPC_AUTH_GSS_KRB5P) { 9254 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED); 9255 if (!status) 9256 return 0; 9257 } 9258 9259 /* try SP4_NONE */ 9260 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE); 9261 } 9262 9263 /** 9264 * nfs4_test_session_trunk 9265 * 9266 * This is an add_xprt_test() test function called from 9267 * rpc_clnt_setup_test_and_add_xprt. 9268 * 9269 * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt 9270 * and is dereferrenced in nfs4_exchange_id_release 9271 * 9272 * Upon success, add the new transport to the rpc_clnt 9273 * 9274 * @clnt: struct rpc_clnt to get new transport 9275 * @xprt: the rpc_xprt to test 9276 * @data: call data for _nfs4_proc_exchange_id. 9277 */ 9278 void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt, 9279 void *data) 9280 { 9281 struct nfs4_add_xprt_data *adata = data; 9282 struct rpc_task *task; 9283 int status; 9284 9285 u32 sp4_how; 9286 9287 dprintk("--> %s try %s\n", __func__, 9288 xprt->address_strings[RPC_DISPLAY_ADDR]); 9289 9290 sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED); 9291 9292 try_again: 9293 /* Test connection for session trunking. Async exchange_id call */ 9294 task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt); 9295 if (IS_ERR(task)) 9296 return; 9297 9298 status = task->tk_status; 9299 if (status == 0) { 9300 status = nfs4_detect_session_trunking(adata->clp, 9301 task->tk_msg.rpc_resp, xprt); 9302 trace_nfs4_trunked_exchange_id(adata->clp, 9303 xprt->address_strings[RPC_DISPLAY_ADDR], status); 9304 } 9305 if (status == 0) 9306 rpc_clnt_xprt_switch_add_xprt(clnt, xprt); 9307 else if (status != -NFS4ERR_DELAY && rpc_clnt_xprt_switch_has_addr(clnt, 9308 (struct sockaddr *)&xprt->addr)) 9309 rpc_clnt_xprt_switch_remove_xprt(clnt, xprt); 9310 9311 rpc_put_task(task); 9312 if (status == -NFS4ERR_DELAY) { 9313 ssleep(1); 9314 goto try_again; 9315 } 9316 } 9317 EXPORT_SYMBOL_GPL(nfs4_test_session_trunk); 9318 9319 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 9320 const struct cred *cred) 9321 { 9322 struct rpc_message msg = { 9323 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 9324 .rpc_argp = clp, 9325 .rpc_cred = cred, 9326 }; 9327 int status; 9328 9329 status = rpc_call_sync(clp->cl_rpcclient, &msg, 9330 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9331 trace_nfs4_destroy_clientid(clp, status); 9332 if (status) 9333 dprintk("NFS: Got error %d from the server %s on " 9334 "DESTROY_CLIENTID.", status, clp->cl_hostname); 9335 return status; 9336 } 9337 9338 static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 9339 const struct cred *cred) 9340 { 9341 unsigned int loop; 9342 int ret; 9343 9344 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 9345 ret = _nfs4_proc_destroy_clientid(clp, cred); 9346 switch (ret) { 9347 case -NFS4ERR_DELAY: 9348 case -NFS4ERR_CLIENTID_BUSY: 9349 ssleep(1); 9350 break; 9351 default: 9352 return ret; 9353 } 9354 } 9355 return 0; 9356 } 9357 9358 int nfs4_destroy_clientid(struct nfs_client *clp) 9359 { 9360 const struct cred *cred; 9361 int ret = 0; 9362 9363 if (clp->cl_mvops->minor_version < 1) 9364 goto out; 9365 if (clp->cl_exchange_flags == 0) 9366 goto out; 9367 if (clp->cl_preserve_clid) 9368 goto out; 9369 cred = nfs4_get_clid_cred(clp); 9370 ret = nfs4_proc_destroy_clientid(clp, cred); 9371 put_cred(cred); 9372 switch (ret) { 9373 case 0: 9374 case -NFS4ERR_STALE_CLIENTID: 9375 clp->cl_exchange_flags = 0; 9376 } 9377 out: 9378 return ret; 9379 } 9380 9381 #endif /* CONFIG_NFS_V4_1 */ 9382 9383 struct nfs4_get_lease_time_data { 9384 struct nfs4_get_lease_time_args *args; 9385 struct nfs4_get_lease_time_res *res; 9386 struct nfs_client *clp; 9387 }; 9388 9389 static void nfs4_get_lease_time_prepare(struct rpc_task *task, 9390 void *calldata) 9391 { 9392 struct nfs4_get_lease_time_data *data = 9393 (struct nfs4_get_lease_time_data *)calldata; 9394 9395 /* just setup sequence, do not trigger session recovery 9396 since we're invoked within one */ 9397 nfs4_setup_sequence(data->clp, 9398 &data->args->la_seq_args, 9399 &data->res->lr_seq_res, 9400 task); 9401 } 9402 9403 /* 9404 * Called from nfs4_state_manager thread for session setup, so don't recover 9405 * from sequence operation or clientid errors. 9406 */ 9407 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 9408 { 9409 struct nfs4_get_lease_time_data *data = 9410 (struct nfs4_get_lease_time_data *)calldata; 9411 9412 if (!nfs4_sequence_done(task, &data->res->lr_seq_res)) 9413 return; 9414 switch (task->tk_status) { 9415 case -NFS4ERR_DELAY: 9416 case -NFS4ERR_GRACE: 9417 rpc_delay(task, NFS4_POLL_RETRY_MIN); 9418 task->tk_status = 0; 9419 fallthrough; 9420 case -NFS4ERR_RETRY_UNCACHED_REP: 9421 rpc_restart_call_prepare(task); 9422 return; 9423 } 9424 } 9425 9426 static const struct rpc_call_ops nfs4_get_lease_time_ops = { 9427 .rpc_call_prepare = nfs4_get_lease_time_prepare, 9428 .rpc_call_done = nfs4_get_lease_time_done, 9429 }; 9430 9431 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 9432 { 9433 struct nfs4_get_lease_time_args args; 9434 struct nfs4_get_lease_time_res res = { 9435 .lr_fsinfo = fsinfo, 9436 }; 9437 struct nfs4_get_lease_time_data data = { 9438 .args = &args, 9439 .res = &res, 9440 .clp = clp, 9441 }; 9442 struct rpc_message msg = { 9443 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 9444 .rpc_argp = &args, 9445 .rpc_resp = &res, 9446 }; 9447 struct rpc_task_setup task_setup = { 9448 .rpc_client = clp->cl_rpcclient, 9449 .rpc_message = &msg, 9450 .callback_ops = &nfs4_get_lease_time_ops, 9451 .callback_data = &data, 9452 .flags = RPC_TASK_TIMEOUT, 9453 }; 9454 9455 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0, 1); 9456 return nfs4_call_sync_custom(&task_setup); 9457 } 9458 9459 #ifdef CONFIG_NFS_V4_1 9460 9461 /* 9462 * Initialize the values to be used by the client in CREATE_SESSION 9463 * If nfs4_init_session set the fore channel request and response sizes, 9464 * use them. 9465 * 9466 * Set the back channel max_resp_sz_cached to zero to force the client to 9467 * always set csa_cachethis to FALSE because the current implementation 9468 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 9469 */ 9470 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args, 9471 struct rpc_clnt *clnt) 9472 { 9473 unsigned int max_rqst_sz, max_resp_sz; 9474 unsigned int max_bc_payload = rpc_max_bc_payload(clnt); 9475 unsigned int max_bc_slots = rpc_num_bc_slots(clnt); 9476 9477 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead; 9478 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead; 9479 9480 /* Fore channel attributes */ 9481 args->fc_attrs.max_rqst_sz = max_rqst_sz; 9482 args->fc_attrs.max_resp_sz = max_resp_sz; 9483 args->fc_attrs.max_ops = NFS4_MAX_OPS; 9484 args->fc_attrs.max_reqs = max_session_slots; 9485 9486 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 9487 "max_ops=%u max_reqs=%u\n", 9488 __func__, 9489 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 9490 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 9491 9492 /* Back channel attributes */ 9493 args->bc_attrs.max_rqst_sz = max_bc_payload; 9494 args->bc_attrs.max_resp_sz = max_bc_payload; 9495 args->bc_attrs.max_resp_sz_cached = 0; 9496 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 9497 args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1); 9498 if (args->bc_attrs.max_reqs > max_bc_slots) 9499 args->bc_attrs.max_reqs = max_bc_slots; 9500 9501 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 9502 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 9503 __func__, 9504 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 9505 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 9506 args->bc_attrs.max_reqs); 9507 } 9508 9509 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, 9510 struct nfs41_create_session_res *res) 9511 { 9512 struct nfs4_channel_attrs *sent = &args->fc_attrs; 9513 struct nfs4_channel_attrs *rcvd = &res->fc_attrs; 9514 9515 if (rcvd->max_resp_sz > sent->max_resp_sz) 9516 return -EINVAL; 9517 /* 9518 * Our requested max_ops is the minimum we need; we're not 9519 * prepared to break up compounds into smaller pieces than that. 9520 * So, no point even trying to continue if the server won't 9521 * cooperate: 9522 */ 9523 if (rcvd->max_ops < sent->max_ops) 9524 return -EINVAL; 9525 if (rcvd->max_reqs == 0) 9526 return -EINVAL; 9527 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 9528 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 9529 return 0; 9530 } 9531 9532 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, 9533 struct nfs41_create_session_res *res) 9534 { 9535 struct nfs4_channel_attrs *sent = &args->bc_attrs; 9536 struct nfs4_channel_attrs *rcvd = &res->bc_attrs; 9537 9538 if (!(res->flags & SESSION4_BACK_CHAN)) 9539 goto out; 9540 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 9541 return -EINVAL; 9542 if (rcvd->max_resp_sz > sent->max_resp_sz) 9543 return -EINVAL; 9544 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 9545 return -EINVAL; 9546 if (rcvd->max_ops > sent->max_ops) 9547 return -EINVAL; 9548 if (rcvd->max_reqs > sent->max_reqs) 9549 return -EINVAL; 9550 out: 9551 return 0; 9552 } 9553 9554 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 9555 struct nfs41_create_session_res *res) 9556 { 9557 int ret; 9558 9559 ret = nfs4_verify_fore_channel_attrs(args, res); 9560 if (ret) 9561 return ret; 9562 return nfs4_verify_back_channel_attrs(args, res); 9563 } 9564 9565 static void nfs4_update_session(struct nfs4_session *session, 9566 struct nfs41_create_session_res *res) 9567 { 9568 nfs4_copy_sessionid(&session->sess_id, &res->sessionid); 9569 /* Mark client id and session as being confirmed */ 9570 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 9571 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state); 9572 session->flags = res->flags; 9573 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); 9574 if (res->flags & SESSION4_BACK_CHAN) 9575 memcpy(&session->bc_attrs, &res->bc_attrs, 9576 sizeof(session->bc_attrs)); 9577 } 9578 9579 static int _nfs4_proc_create_session(struct nfs_client *clp, 9580 const struct cred *cred) 9581 { 9582 struct nfs4_session *session = clp->cl_session; 9583 struct nfs41_create_session_args args = { 9584 .client = clp, 9585 .clientid = clp->cl_clientid, 9586 .seqid = clp->cl_seqid, 9587 .cb_program = NFS4_CALLBACK, 9588 }; 9589 struct nfs41_create_session_res res; 9590 9591 struct rpc_message msg = { 9592 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 9593 .rpc_argp = &args, 9594 .rpc_resp = &res, 9595 .rpc_cred = cred, 9596 }; 9597 int status; 9598 9599 nfs4_init_channel_attrs(&args, clp->cl_rpcclient); 9600 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 9601 9602 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 9603 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9604 trace_nfs4_create_session(clp, status); 9605 9606 switch (status) { 9607 case -NFS4ERR_STALE_CLIENTID: 9608 case -NFS4ERR_DELAY: 9609 case -ETIMEDOUT: 9610 case -EACCES: 9611 case -EAGAIN: 9612 goto out; 9613 } 9614 9615 clp->cl_seqid++; 9616 if (!status) { 9617 /* Verify the session's negotiated channel_attrs values */ 9618 status = nfs4_verify_channel_attrs(&args, &res); 9619 /* Increment the clientid slot sequence id */ 9620 if (status) 9621 goto out; 9622 nfs4_update_session(session, &res); 9623 } 9624 out: 9625 return status; 9626 } 9627 9628 /* 9629 * Issues a CREATE_SESSION operation to the server. 9630 * It is the responsibility of the caller to verify the session is 9631 * expired before calling this routine. 9632 */ 9633 int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred) 9634 { 9635 int status; 9636 unsigned *ptr; 9637 struct nfs4_session *session = clp->cl_session; 9638 struct nfs4_add_xprt_data xprtdata = { 9639 .clp = clp, 9640 }; 9641 struct rpc_add_xprt_test rpcdata = { 9642 .add_xprt_test = clp->cl_mvops->session_trunk, 9643 .data = &xprtdata, 9644 }; 9645 9646 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 9647 9648 status = _nfs4_proc_create_session(clp, cred); 9649 if (status) 9650 goto out; 9651 9652 /* Init or reset the session slot tables */ 9653 status = nfs4_setup_session_slot_tables(session); 9654 dprintk("slot table setup returned %d\n", status); 9655 if (status) 9656 goto out; 9657 9658 ptr = (unsigned *)&session->sess_id.data[0]; 9659 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 9660 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 9661 rpc_clnt_probe_trunked_xprts(clp->cl_rpcclient, &rpcdata); 9662 out: 9663 return status; 9664 } 9665 9666 /* 9667 * Issue the over-the-wire RPC DESTROY_SESSION. 9668 * The caller must serialize access to this routine. 9669 */ 9670 int nfs4_proc_destroy_session(struct nfs4_session *session, 9671 const struct cred *cred) 9672 { 9673 struct rpc_message msg = { 9674 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 9675 .rpc_argp = session, 9676 .rpc_cred = cred, 9677 }; 9678 int status = 0; 9679 9680 /* session is still being setup */ 9681 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state)) 9682 return 0; 9683 9684 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 9685 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9686 trace_nfs4_destroy_session(session->clp, status); 9687 9688 if (status) 9689 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 9690 "Session has been destroyed regardless...\n", status); 9691 rpc_clnt_manage_trunked_xprts(session->clp->cl_rpcclient); 9692 return status; 9693 } 9694 9695 /* 9696 * Renew the cl_session lease. 9697 */ 9698 struct nfs4_sequence_data { 9699 struct nfs_client *clp; 9700 struct nfs4_sequence_args args; 9701 struct nfs4_sequence_res res; 9702 }; 9703 9704 static void nfs41_sequence_release(void *data) 9705 { 9706 struct nfs4_sequence_data *calldata = data; 9707 struct nfs_client *clp = calldata->clp; 9708 9709 if (refcount_read(&clp->cl_count) > 1) 9710 nfs4_schedule_state_renewal(clp); 9711 nfs_put_client(clp); 9712 kfree(calldata); 9713 } 9714 9715 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 9716 { 9717 switch(task->tk_status) { 9718 case -NFS4ERR_DELAY: 9719 rpc_delay(task, NFS4_POLL_RETRY_MAX); 9720 return -EAGAIN; 9721 default: 9722 nfs4_schedule_lease_recovery(clp); 9723 } 9724 return 0; 9725 } 9726 9727 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 9728 { 9729 struct nfs4_sequence_data *calldata = data; 9730 struct nfs_client *clp = calldata->clp; 9731 9732 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 9733 return; 9734 9735 trace_nfs4_sequence(clp, task->tk_status); 9736 if (task->tk_status < 0 && clp->cl_cons_state >= 0) { 9737 dprintk("%s ERROR %d\n", __func__, task->tk_status); 9738 if (refcount_read(&clp->cl_count) == 1) 9739 return; 9740 9741 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 9742 rpc_restart_call_prepare(task); 9743 return; 9744 } 9745 } 9746 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 9747 } 9748 9749 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 9750 { 9751 struct nfs4_sequence_data *calldata = data; 9752 struct nfs_client *clp = calldata->clp; 9753 struct nfs4_sequence_args *args; 9754 struct nfs4_sequence_res *res; 9755 9756 args = task->tk_msg.rpc_argp; 9757 res = task->tk_msg.rpc_resp; 9758 9759 nfs4_setup_sequence(clp, args, res, task); 9760 } 9761 9762 static const struct rpc_call_ops nfs41_sequence_ops = { 9763 .rpc_call_done = nfs41_sequence_call_done, 9764 .rpc_call_prepare = nfs41_sequence_prepare, 9765 .rpc_release = nfs41_sequence_release, 9766 }; 9767 9768 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 9769 const struct cred *cred, 9770 struct nfs4_slot *slot, 9771 bool is_privileged) 9772 { 9773 struct nfs4_sequence_data *calldata; 9774 struct rpc_message msg = { 9775 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 9776 .rpc_cred = cred, 9777 }; 9778 struct rpc_task_setup task_setup_data = { 9779 .rpc_client = clp->cl_rpcclient, 9780 .rpc_message = &msg, 9781 .callback_ops = &nfs41_sequence_ops, 9782 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE, 9783 }; 9784 struct rpc_task *ret; 9785 9786 ret = ERR_PTR(-EIO); 9787 if (!refcount_inc_not_zero(&clp->cl_count)) 9788 goto out_err; 9789 9790 ret = ERR_PTR(-ENOMEM); 9791 calldata = kzalloc(sizeof(*calldata), GFP_KERNEL); 9792 if (calldata == NULL) 9793 goto out_put_clp; 9794 nfs4_init_sequence(&calldata->args, &calldata->res, 0, is_privileged); 9795 nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot); 9796 msg.rpc_argp = &calldata->args; 9797 msg.rpc_resp = &calldata->res; 9798 calldata->clp = clp; 9799 task_setup_data.callback_data = calldata; 9800 9801 ret = rpc_run_task(&task_setup_data); 9802 if (IS_ERR(ret)) 9803 goto out_err; 9804 return ret; 9805 out_put_clp: 9806 nfs_put_client(clp); 9807 out_err: 9808 nfs41_release_slot(slot); 9809 return ret; 9810 } 9811 9812 static int nfs41_proc_async_sequence(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags) 9813 { 9814 struct rpc_task *task; 9815 int ret = 0; 9816 9817 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 9818 return -EAGAIN; 9819 task = _nfs41_proc_sequence(clp, cred, NULL, false); 9820 if (IS_ERR(task)) 9821 ret = PTR_ERR(task); 9822 else 9823 rpc_put_task_async(task); 9824 dprintk("<-- %s status=%d\n", __func__, ret); 9825 return ret; 9826 } 9827 9828 static int nfs4_proc_sequence(struct nfs_client *clp, const struct cred *cred) 9829 { 9830 struct rpc_task *task; 9831 int ret; 9832 9833 task = _nfs41_proc_sequence(clp, cred, NULL, true); 9834 if (IS_ERR(task)) { 9835 ret = PTR_ERR(task); 9836 goto out; 9837 } 9838 ret = rpc_wait_for_completion_task(task); 9839 if (!ret) 9840 ret = task->tk_status; 9841 rpc_put_task(task); 9842 out: 9843 dprintk("<-- %s status=%d\n", __func__, ret); 9844 return ret; 9845 } 9846 9847 struct nfs4_reclaim_complete_data { 9848 struct nfs_client *clp; 9849 struct nfs41_reclaim_complete_args arg; 9850 struct nfs41_reclaim_complete_res res; 9851 }; 9852 9853 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 9854 { 9855 struct nfs4_reclaim_complete_data *calldata = data; 9856 9857 nfs4_setup_sequence(calldata->clp, 9858 &calldata->arg.seq_args, 9859 &calldata->res.seq_res, 9860 task); 9861 } 9862 9863 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 9864 { 9865 switch(task->tk_status) { 9866 case 0: 9867 wake_up_all(&clp->cl_lock_waitq); 9868 fallthrough; 9869 case -NFS4ERR_COMPLETE_ALREADY: 9870 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 9871 break; 9872 case -NFS4ERR_DELAY: 9873 rpc_delay(task, NFS4_POLL_RETRY_MAX); 9874 fallthrough; 9875 case -NFS4ERR_RETRY_UNCACHED_REP: 9876 case -EACCES: 9877 dprintk("%s: failed to reclaim complete error %d for server %s, retrying\n", 9878 __func__, task->tk_status, clp->cl_hostname); 9879 return -EAGAIN; 9880 case -NFS4ERR_BADSESSION: 9881 case -NFS4ERR_DEADSESSION: 9882 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 9883 break; 9884 default: 9885 nfs4_schedule_lease_recovery(clp); 9886 } 9887 return 0; 9888 } 9889 9890 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 9891 { 9892 struct nfs4_reclaim_complete_data *calldata = data; 9893 struct nfs_client *clp = calldata->clp; 9894 struct nfs4_sequence_res *res = &calldata->res.seq_res; 9895 9896 if (!nfs41_sequence_done(task, res)) 9897 return; 9898 9899 trace_nfs4_reclaim_complete(clp, task->tk_status); 9900 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 9901 rpc_restart_call_prepare(task); 9902 return; 9903 } 9904 } 9905 9906 static void nfs4_free_reclaim_complete_data(void *data) 9907 { 9908 struct nfs4_reclaim_complete_data *calldata = data; 9909 9910 kfree(calldata); 9911 } 9912 9913 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 9914 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 9915 .rpc_call_done = nfs4_reclaim_complete_done, 9916 .rpc_release = nfs4_free_reclaim_complete_data, 9917 }; 9918 9919 /* 9920 * Issue a global reclaim complete. 9921 */ 9922 static int nfs41_proc_reclaim_complete(struct nfs_client *clp, 9923 const struct cred *cred) 9924 { 9925 struct nfs4_reclaim_complete_data *calldata; 9926 struct rpc_message msg = { 9927 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 9928 .rpc_cred = cred, 9929 }; 9930 struct rpc_task_setup task_setup_data = { 9931 .rpc_client = clp->cl_rpcclient, 9932 .rpc_message = &msg, 9933 .callback_ops = &nfs4_reclaim_complete_call_ops, 9934 .flags = RPC_TASK_NO_ROUND_ROBIN, 9935 }; 9936 int status = -ENOMEM; 9937 9938 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 9939 if (calldata == NULL) 9940 goto out; 9941 calldata->clp = clp; 9942 calldata->arg.one_fs = 0; 9943 9944 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0, 1); 9945 msg.rpc_argp = &calldata->arg; 9946 msg.rpc_resp = &calldata->res; 9947 task_setup_data.callback_data = calldata; 9948 status = nfs4_call_sync_custom(&task_setup_data); 9949 out: 9950 dprintk("<-- %s status=%d\n", __func__, status); 9951 return status; 9952 } 9953 9954 static void 9955 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 9956 { 9957 struct nfs4_layoutget *lgp = calldata; 9958 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 9959 9960 nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args, 9961 &lgp->res.seq_res, task); 9962 } 9963 9964 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 9965 { 9966 struct nfs4_layoutget *lgp = calldata; 9967 9968 nfs41_sequence_process(task, &lgp->res.seq_res); 9969 } 9970 9971 static int 9972 nfs4_layoutget_handle_exception(struct rpc_task *task, 9973 struct nfs4_layoutget *lgp, struct nfs4_exception *exception) 9974 { 9975 struct inode *inode = lgp->args.inode; 9976 struct nfs_server *server = NFS_SERVER(inode); 9977 struct pnfs_layout_hdr *lo = lgp->lo; 9978 int nfs4err = task->tk_status; 9979 int err, status = 0; 9980 LIST_HEAD(head); 9981 9982 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); 9983 9984 nfs4_sequence_free_slot(&lgp->res.seq_res); 9985 9986 exception->state = NULL; 9987 exception->stateid = NULL; 9988 9989 switch (nfs4err) { 9990 case 0: 9991 goto out; 9992 9993 /* 9994 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs 9995 * on the file. set tk_status to -ENODATA to tell upper layer to 9996 * retry go inband. 9997 */ 9998 case -NFS4ERR_LAYOUTUNAVAILABLE: 9999 status = -ENODATA; 10000 goto out; 10001 /* 10002 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of 10003 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3). 10004 */ 10005 case -NFS4ERR_BADLAYOUT: 10006 status = -EOVERFLOW; 10007 goto out; 10008 /* 10009 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client 10010 * (or clients) writing to the same RAID stripe except when 10011 * the minlength argument is 0 (see RFC5661 section 18.43.3). 10012 * 10013 * Treat it like we would RECALLCONFLICT -- we retry for a little 10014 * while, and then eventually give up. 10015 */ 10016 case -NFS4ERR_LAYOUTTRYLATER: 10017 if (lgp->args.minlength == 0) { 10018 status = -EOVERFLOW; 10019 goto out; 10020 } 10021 status = -EBUSY; 10022 break; 10023 case -NFS4ERR_RECALLCONFLICT: 10024 case -NFS4ERR_RETURNCONFLICT: 10025 status = -ERECALLCONFLICT; 10026 break; 10027 case -NFS4ERR_DELEG_REVOKED: 10028 case -NFS4ERR_ADMIN_REVOKED: 10029 case -NFS4ERR_EXPIRED: 10030 case -NFS4ERR_BAD_STATEID: 10031 exception->timeout = 0; 10032 spin_lock(&inode->i_lock); 10033 /* If the open stateid was bad, then recover it. */ 10034 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) || 10035 !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) { 10036 spin_unlock(&inode->i_lock); 10037 exception->state = lgp->args.ctx->state; 10038 exception->stateid = &lgp->args.stateid; 10039 break; 10040 } 10041 10042 /* 10043 * Mark the bad layout state as invalid, then retry 10044 */ 10045 pnfs_mark_layout_stateid_invalid(lo, &head); 10046 spin_unlock(&inode->i_lock); 10047 nfs_commit_inode(inode, 0); 10048 pnfs_free_lseg_list(&head); 10049 status = -EAGAIN; 10050 goto out; 10051 } 10052 10053 err = nfs4_handle_exception(server, nfs4err, exception); 10054 if (!status) { 10055 if (exception->retry) 10056 status = -EAGAIN; 10057 else 10058 status = err; 10059 } 10060 out: 10061 return status; 10062 } 10063 10064 size_t max_response_pages(struct nfs_server *server) 10065 { 10066 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 10067 return nfs_page_array_len(0, max_resp_sz); 10068 } 10069 10070 static void nfs4_layoutget_release(void *calldata) 10071 { 10072 struct nfs4_layoutget *lgp = calldata; 10073 10074 nfs4_sequence_free_slot(&lgp->res.seq_res); 10075 pnfs_layoutget_free(lgp); 10076 } 10077 10078 static const struct rpc_call_ops nfs4_layoutget_call_ops = { 10079 .rpc_call_prepare = nfs4_layoutget_prepare, 10080 .rpc_call_done = nfs4_layoutget_done, 10081 .rpc_release = nfs4_layoutget_release, 10082 }; 10083 10084 struct pnfs_layout_segment * 10085 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, 10086 struct nfs4_exception *exception) 10087 { 10088 struct inode *inode = lgp->args.inode; 10089 struct nfs_server *server = NFS_SERVER(inode); 10090 struct rpc_task *task; 10091 struct rpc_message msg = { 10092 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 10093 .rpc_argp = &lgp->args, 10094 .rpc_resp = &lgp->res, 10095 .rpc_cred = lgp->cred, 10096 }; 10097 struct rpc_task_setup task_setup_data = { 10098 .rpc_client = server->client, 10099 .rpc_message = &msg, 10100 .callback_ops = &nfs4_layoutget_call_ops, 10101 .callback_data = lgp, 10102 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF | 10103 RPC_TASK_MOVEABLE, 10104 }; 10105 struct pnfs_layout_segment *lseg = NULL; 10106 int status = 0; 10107 10108 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0); 10109 exception->retry = 0; 10110 10111 task = rpc_run_task(&task_setup_data); 10112 if (IS_ERR(task)) 10113 return ERR_CAST(task); 10114 10115 status = rpc_wait_for_completion_task(task); 10116 if (status != 0) 10117 goto out; 10118 10119 if (task->tk_status < 0) { 10120 exception->retry = 1; 10121 status = nfs4_layoutget_handle_exception(task, lgp, exception); 10122 } else if (lgp->res.layoutp->len == 0) { 10123 exception->retry = 1; 10124 status = -EAGAIN; 10125 nfs4_update_delay(&exception->timeout); 10126 } else 10127 lseg = pnfs_layout_process(lgp); 10128 out: 10129 trace_nfs4_layoutget(lgp->args.ctx, 10130 &lgp->args.range, 10131 &lgp->res.range, 10132 &lgp->res.stateid, 10133 status); 10134 10135 rpc_put_task(task); 10136 dprintk("<-- %s status=%d\n", __func__, status); 10137 if (status) 10138 return ERR_PTR(status); 10139 return lseg; 10140 } 10141 10142 static void 10143 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 10144 { 10145 struct nfs4_layoutreturn *lrp = calldata; 10146 10147 nfs4_setup_sequence(lrp->clp, 10148 &lrp->args.seq_args, 10149 &lrp->res.seq_res, 10150 task); 10151 if (!pnfs_layout_is_valid(lrp->args.layout)) 10152 rpc_exit(task, 0); 10153 } 10154 10155 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 10156 { 10157 struct nfs4_layoutreturn *lrp = calldata; 10158 struct nfs_server *server; 10159 10160 if (!nfs41_sequence_process(task, &lrp->res.seq_res)) 10161 return; 10162 10163 if (task->tk_rpc_status == -ETIMEDOUT) { 10164 lrp->rpc_status = -EAGAIN; 10165 lrp->res.lrs_present = 0; 10166 return; 10167 } 10168 /* 10169 * Was there an RPC level error? Assume the call succeeded, 10170 * and that we need to release the layout 10171 */ 10172 if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) { 10173 lrp->res.lrs_present = 0; 10174 return; 10175 } 10176 10177 server = NFS_SERVER(lrp->args.inode); 10178 switch (task->tk_status) { 10179 case -NFS4ERR_OLD_STATEID: 10180 if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid, 10181 &lrp->args.range, 10182 lrp->args.inode)) 10183 goto out_restart; 10184 fallthrough; 10185 default: 10186 task->tk_status = 0; 10187 lrp->res.lrs_present = 0; 10188 fallthrough; 10189 case 0: 10190 break; 10191 case -NFS4ERR_BADSESSION: 10192 case -NFS4ERR_DEADSESSION: 10193 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 10194 nfs4_schedule_session_recovery(server->nfs_client->cl_session, 10195 task->tk_status); 10196 lrp->res.lrs_present = 0; 10197 lrp->rpc_status = -EAGAIN; 10198 task->tk_status = 0; 10199 break; 10200 case -NFS4ERR_DELAY: 10201 if (nfs4_async_handle_error(task, server, NULL, NULL) == 10202 -EAGAIN) 10203 goto out_restart; 10204 lrp->res.lrs_present = 0; 10205 break; 10206 } 10207 return; 10208 out_restart: 10209 task->tk_status = 0; 10210 nfs4_sequence_free_slot(&lrp->res.seq_res); 10211 rpc_restart_call_prepare(task); 10212 } 10213 10214 static void nfs4_layoutreturn_release(void *calldata) 10215 { 10216 struct nfs4_layoutreturn *lrp = calldata; 10217 struct pnfs_layout_hdr *lo = lrp->args.layout; 10218 10219 if (lrp->rpc_status == 0 || !lrp->inode) 10220 pnfs_layoutreturn_free_lsegs( 10221 lo, &lrp->args.stateid, &lrp->args.range, 10222 lrp->res.lrs_present ? &lrp->res.stateid : NULL); 10223 else 10224 pnfs_layoutreturn_retry_later(lo, &lrp->args.stateid, 10225 &lrp->args.range); 10226 nfs4_sequence_free_slot(&lrp->res.seq_res); 10227 if (lrp->ld_private.ops && lrp->ld_private.ops->free) 10228 lrp->ld_private.ops->free(&lrp->ld_private); 10229 pnfs_put_layout_hdr(lrp->args.layout); 10230 nfs_iput_and_deactive(lrp->inode); 10231 put_cred(lrp->cred); 10232 kfree(calldata); 10233 } 10234 10235 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 10236 .rpc_call_prepare = nfs4_layoutreturn_prepare, 10237 .rpc_call_done = nfs4_layoutreturn_done, 10238 .rpc_release = nfs4_layoutreturn_release, 10239 }; 10240 10241 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, unsigned int flags) 10242 { 10243 struct rpc_task *task; 10244 struct rpc_message msg = { 10245 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 10246 .rpc_argp = &lrp->args, 10247 .rpc_resp = &lrp->res, 10248 .rpc_cred = lrp->cred, 10249 }; 10250 struct rpc_task_setup task_setup_data = { 10251 .rpc_client = NFS_SERVER(lrp->args.inode)->client, 10252 .rpc_message = &msg, 10253 .callback_ops = &nfs4_layoutreturn_call_ops, 10254 .callback_data = lrp, 10255 .flags = RPC_TASK_MOVEABLE, 10256 }; 10257 int status = 0; 10258 10259 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client, 10260 NFS_SP4_MACH_CRED_PNFS_CLEANUP, 10261 &task_setup_data.rpc_client, &msg); 10262 10263 lrp->inode = nfs_igrab_and_active(lrp->args.inode); 10264 if (flags & PNFS_FL_LAYOUTRETURN_ASYNC) { 10265 if (!lrp->inode) { 10266 nfs4_layoutreturn_release(lrp); 10267 return -EAGAIN; 10268 } 10269 task_setup_data.flags |= RPC_TASK_ASYNC; 10270 } 10271 if (!lrp->inode) 10272 flags |= PNFS_FL_LAYOUTRETURN_PRIVILEGED; 10273 if (flags & PNFS_FL_LAYOUTRETURN_PRIVILEGED) 10274 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 10275 1); 10276 else 10277 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 10278 0); 10279 task = rpc_run_task(&task_setup_data); 10280 if (IS_ERR(task)) 10281 return PTR_ERR(task); 10282 if (!(flags & PNFS_FL_LAYOUTRETURN_ASYNC)) 10283 status = task->tk_status; 10284 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status); 10285 dprintk("<-- %s status=%d\n", __func__, status); 10286 rpc_put_task(task); 10287 return status; 10288 } 10289 10290 static int 10291 _nfs4_proc_getdeviceinfo(struct nfs_server *server, 10292 struct pnfs_device *pdev, 10293 const struct cred *cred) 10294 { 10295 struct nfs4_getdeviceinfo_args args = { 10296 .pdev = pdev, 10297 .notify_types = NOTIFY_DEVICEID4_CHANGE | 10298 NOTIFY_DEVICEID4_DELETE, 10299 }; 10300 struct nfs4_getdeviceinfo_res res = { 10301 .pdev = pdev, 10302 }; 10303 struct rpc_message msg = { 10304 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 10305 .rpc_argp = &args, 10306 .rpc_resp = &res, 10307 .rpc_cred = cred, 10308 }; 10309 int status; 10310 10311 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 10312 if (res.notification & ~args.notify_types) 10313 dprintk("%s: unsupported notification\n", __func__); 10314 if (res.notification != args.notify_types) 10315 pdev->nocache = 1; 10316 10317 trace_nfs4_getdeviceinfo(server, &pdev->dev_id, status); 10318 10319 dprintk("<-- %s status=%d\n", __func__, status); 10320 10321 return status; 10322 } 10323 10324 int nfs4_proc_getdeviceinfo(struct nfs_server *server, 10325 struct pnfs_device *pdev, 10326 const struct cred *cred) 10327 { 10328 struct nfs4_exception exception = { }; 10329 int err; 10330 10331 do { 10332 err = nfs4_handle_exception(server, 10333 _nfs4_proc_getdeviceinfo(server, pdev, cred), 10334 &exception); 10335 } while (exception.retry); 10336 return err; 10337 } 10338 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 10339 10340 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 10341 { 10342 struct nfs4_layoutcommit_data *data = calldata; 10343 struct nfs_server *server = NFS_SERVER(data->args.inode); 10344 10345 nfs4_setup_sequence(server->nfs_client, 10346 &data->args.seq_args, 10347 &data->res.seq_res, 10348 task); 10349 } 10350 10351 static void 10352 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 10353 { 10354 struct nfs4_layoutcommit_data *data = calldata; 10355 struct nfs_server *server = NFS_SERVER(data->args.inode); 10356 10357 if (!nfs41_sequence_done(task, &data->res.seq_res)) 10358 return; 10359 10360 switch (task->tk_status) { /* Just ignore these failures */ 10361 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 10362 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 10363 case -NFS4ERR_BADLAYOUT: /* no layout */ 10364 case -NFS4ERR_GRACE: /* loca_recalim always false */ 10365 task->tk_status = 0; 10366 break; 10367 case 0: 10368 break; 10369 default: 10370 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) { 10371 rpc_restart_call_prepare(task); 10372 return; 10373 } 10374 } 10375 } 10376 10377 static void nfs4_layoutcommit_release(void *calldata) 10378 { 10379 struct nfs4_layoutcommit_data *data = calldata; 10380 10381 pnfs_cleanup_layoutcommit(data); 10382 nfs_post_op_update_inode_force_wcc(data->args.inode, 10383 data->res.fattr); 10384 put_cred(data->cred); 10385 nfs_iput_and_deactive(data->inode); 10386 kfree(data); 10387 } 10388 10389 static const struct rpc_call_ops nfs4_layoutcommit_ops = { 10390 .rpc_call_prepare = nfs4_layoutcommit_prepare, 10391 .rpc_call_done = nfs4_layoutcommit_done, 10392 .rpc_release = nfs4_layoutcommit_release, 10393 }; 10394 10395 int 10396 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 10397 { 10398 struct rpc_message msg = { 10399 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 10400 .rpc_argp = &data->args, 10401 .rpc_resp = &data->res, 10402 .rpc_cred = data->cred, 10403 }; 10404 struct rpc_task_setup task_setup_data = { 10405 .task = &data->task, 10406 .rpc_client = NFS_CLIENT(data->args.inode), 10407 .rpc_message = &msg, 10408 .callback_ops = &nfs4_layoutcommit_ops, 10409 .callback_data = data, 10410 .flags = RPC_TASK_MOVEABLE, 10411 }; 10412 struct rpc_task *task; 10413 int status = 0; 10414 10415 dprintk("NFS: initiating layoutcommit call. sync %d " 10416 "lbw: %llu inode %lu\n", sync, 10417 data->args.lastbytewritten, 10418 data->args.inode->i_ino); 10419 10420 if (!sync) { 10421 data->inode = nfs_igrab_and_active(data->args.inode); 10422 if (data->inode == NULL) { 10423 nfs4_layoutcommit_release(data); 10424 return -EAGAIN; 10425 } 10426 task_setup_data.flags = RPC_TASK_ASYNC; 10427 } 10428 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0); 10429 task = rpc_run_task(&task_setup_data); 10430 if (IS_ERR(task)) 10431 return PTR_ERR(task); 10432 if (sync) 10433 status = task->tk_status; 10434 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status); 10435 dprintk("%s: status %d\n", __func__, status); 10436 rpc_put_task(task); 10437 return status; 10438 } 10439 10440 /* 10441 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if 10442 * possible) as per RFC3530bis and RFC5661 Security Considerations sections 10443 */ 10444 static int _nfs41_proc_secinfo_no_name(struct nfs_server *server, 10445 struct nfs_fh *fhandle, 10446 struct nfs4_secinfo_flavors *flavors, 10447 bool use_integrity) 10448 { 10449 struct nfs41_secinfo_no_name_args args = { 10450 .style = SECINFO_STYLE_CURRENT_FH, 10451 }; 10452 struct nfs4_secinfo_res res = { 10453 .flavors = flavors, 10454 }; 10455 struct rpc_message msg = { 10456 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 10457 .rpc_argp = &args, 10458 .rpc_resp = &res, 10459 }; 10460 struct nfs4_call_sync_data data = { 10461 .seq_server = server, 10462 .seq_args = &args.seq_args, 10463 .seq_res = &res.seq_res, 10464 }; 10465 struct rpc_task_setup task_setup = { 10466 .rpc_client = server->client, 10467 .rpc_message = &msg, 10468 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops, 10469 .callback_data = &data, 10470 .flags = RPC_TASK_NO_ROUND_ROBIN, 10471 }; 10472 const struct cred *cred = NULL; 10473 int status; 10474 10475 if (use_integrity) { 10476 task_setup.rpc_client = server->nfs_client->cl_rpcclient; 10477 10478 cred = nfs4_get_clid_cred(server->nfs_client); 10479 msg.rpc_cred = cred; 10480 } 10481 10482 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 10483 status = nfs4_call_sync_custom(&task_setup); 10484 dprintk("<-- %s status=%d\n", __func__, status); 10485 10486 put_cred(cred); 10487 10488 return status; 10489 } 10490 10491 static int nfs41_proc_secinfo_no_name(struct nfs_server *server, 10492 struct nfs_fh *fhandle, 10493 struct nfs4_secinfo_flavors *flavors) 10494 { 10495 struct nfs4_exception exception = { 10496 .interruptible = true, 10497 }; 10498 int err; 10499 do { 10500 /* first try using integrity protection */ 10501 err = -NFS4ERR_WRONGSEC; 10502 10503 /* try to use integrity protection with machine cred */ 10504 if (_nfs4_is_integrity_protected(server->nfs_client)) 10505 err = _nfs41_proc_secinfo_no_name(server, fhandle, 10506 flavors, true); 10507 10508 /* 10509 * if unable to use integrity protection, or SECINFO with 10510 * integrity protection returns NFS4ERR_WRONGSEC (which is 10511 * disallowed by spec, but exists in deployed servers) use 10512 * the current filesystem's rpc_client and the user cred. 10513 */ 10514 if (err == -NFS4ERR_WRONGSEC) 10515 err = _nfs41_proc_secinfo_no_name(server, fhandle, 10516 flavors, false); 10517 10518 switch (err) { 10519 case 0: 10520 case -NFS4ERR_WRONGSEC: 10521 case -ENOTSUPP: 10522 goto out; 10523 default: 10524 err = nfs4_handle_exception(server, err, &exception); 10525 } 10526 } while (exception.retry); 10527 out: 10528 return err; 10529 } 10530 10531 static int nfs41_find_root_sec(struct nfs_server *server, 10532 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 10533 { 10534 int err; 10535 struct page *page; 10536 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR; 10537 struct nfs4_secinfo_flavors *flavors; 10538 struct nfs4_secinfo4 *secinfo; 10539 int i; 10540 10541 page = alloc_page(GFP_KERNEL); 10542 if (!page) { 10543 err = -ENOMEM; 10544 goto out; 10545 } 10546 10547 flavors = page_address(page); 10548 err = nfs41_proc_secinfo_no_name(server, fhandle, flavors); 10549 10550 /* 10551 * Fall back on "guess and check" method if 10552 * the server doesn't support SECINFO_NO_NAME 10553 */ 10554 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) { 10555 err = nfs4_find_root_sec(server, fhandle, fattr); 10556 goto out_freepage; 10557 } 10558 if (err) 10559 goto out_freepage; 10560 10561 for (i = 0; i < flavors->num_flavors; i++) { 10562 secinfo = &flavors->flavors[i]; 10563 10564 switch (secinfo->flavor) { 10565 case RPC_AUTH_NULL: 10566 case RPC_AUTH_UNIX: 10567 case RPC_AUTH_GSS: 10568 flavor = rpcauth_get_pseudoflavor(secinfo->flavor, 10569 &secinfo->flavor_info); 10570 break; 10571 default: 10572 flavor = RPC_AUTH_MAXFLAVOR; 10573 break; 10574 } 10575 10576 if (!nfs_auth_info_match(&server->auth_info, flavor)) 10577 flavor = RPC_AUTH_MAXFLAVOR; 10578 10579 if (flavor != RPC_AUTH_MAXFLAVOR) { 10580 err = nfs4_lookup_root_sec(server, fhandle, fattr, 10581 flavor); 10582 if (!err) 10583 break; 10584 } 10585 } 10586 10587 if (flavor == RPC_AUTH_MAXFLAVOR) 10588 err = -EPERM; 10589 10590 out_freepage: 10591 put_page(page); 10592 if (err == -EACCES) 10593 return -EPERM; 10594 out: 10595 return err; 10596 } 10597 10598 static int _nfs41_test_stateid(struct nfs_server *server, 10599 const nfs4_stateid *stateid, 10600 const struct cred *cred) 10601 { 10602 int status; 10603 struct nfs41_test_stateid_args args = { 10604 .stateid = *stateid, 10605 }; 10606 struct nfs41_test_stateid_res res; 10607 struct rpc_message msg = { 10608 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 10609 .rpc_argp = &args, 10610 .rpc_resp = &res, 10611 .rpc_cred = cred, 10612 }; 10613 struct rpc_clnt *rpc_client = server->client; 10614 10615 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 10616 &rpc_client, &msg); 10617 10618 dprintk("NFS call test_stateid %p\n", stateid); 10619 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 10620 status = nfs4_call_sync_sequence(rpc_client, server, &msg, 10621 &args.seq_args, &res.seq_res); 10622 if (status != NFS_OK) { 10623 dprintk("NFS reply test_stateid: failed, %d\n", status); 10624 return status; 10625 } 10626 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 10627 return -res.status; 10628 } 10629 10630 static void nfs4_handle_delay_or_session_error(struct nfs_server *server, 10631 int err, struct nfs4_exception *exception) 10632 { 10633 exception->retry = 0; 10634 switch(err) { 10635 case -NFS4ERR_DELAY: 10636 case -NFS4ERR_RETRY_UNCACHED_REP: 10637 nfs4_handle_exception(server, err, exception); 10638 break; 10639 case -NFS4ERR_BADSESSION: 10640 case -NFS4ERR_BADSLOT: 10641 case -NFS4ERR_BAD_HIGH_SLOT: 10642 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 10643 case -NFS4ERR_DEADSESSION: 10644 nfs4_do_handle_exception(server, err, exception); 10645 } 10646 } 10647 10648 /** 10649 * nfs41_test_stateid - perform a TEST_STATEID operation 10650 * 10651 * @server: server / transport on which to perform the operation 10652 * @stateid: state ID to test 10653 * @cred: credential 10654 * 10655 * Returns NFS_OK if the server recognizes that "stateid" is valid. 10656 * Otherwise a negative NFS4ERR value is returned if the operation 10657 * failed or the state ID is not currently valid. 10658 */ 10659 static int nfs41_test_stateid(struct nfs_server *server, 10660 const nfs4_stateid *stateid, 10661 const struct cred *cred) 10662 { 10663 struct nfs4_exception exception = { 10664 .interruptible = true, 10665 }; 10666 int err; 10667 do { 10668 err = _nfs41_test_stateid(server, stateid, cred); 10669 nfs4_handle_delay_or_session_error(server, err, &exception); 10670 } while (exception.retry); 10671 return err; 10672 } 10673 10674 struct nfs_free_stateid_data { 10675 struct nfs_server *server; 10676 struct nfs41_free_stateid_args args; 10677 struct nfs41_free_stateid_res res; 10678 }; 10679 10680 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) 10681 { 10682 struct nfs_free_stateid_data *data = calldata; 10683 nfs4_setup_sequence(data->server->nfs_client, 10684 &data->args.seq_args, 10685 &data->res.seq_res, 10686 task); 10687 } 10688 10689 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata) 10690 { 10691 struct nfs_free_stateid_data *data = calldata; 10692 10693 nfs41_sequence_done(task, &data->res.seq_res); 10694 10695 switch (task->tk_status) { 10696 case -NFS4ERR_DELAY: 10697 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN) 10698 rpc_restart_call_prepare(task); 10699 } 10700 } 10701 10702 static void nfs41_free_stateid_release(void *calldata) 10703 { 10704 struct nfs_free_stateid_data *data = calldata; 10705 struct nfs_client *clp = data->server->nfs_client; 10706 10707 nfs_put_client(clp); 10708 kfree(calldata); 10709 } 10710 10711 static const struct rpc_call_ops nfs41_free_stateid_ops = { 10712 .rpc_call_prepare = nfs41_free_stateid_prepare, 10713 .rpc_call_done = nfs41_free_stateid_done, 10714 .rpc_release = nfs41_free_stateid_release, 10715 }; 10716 10717 /** 10718 * nfs41_free_stateid - perform a FREE_STATEID operation 10719 * 10720 * @server: server / transport on which to perform the operation 10721 * @stateid: state ID to release 10722 * @cred: credential 10723 * @privileged: set to true if this call needs to be privileged 10724 * 10725 * Note: this function is always asynchronous. 10726 */ 10727 static int nfs41_free_stateid(struct nfs_server *server, 10728 nfs4_stateid *stateid, 10729 const struct cred *cred, 10730 bool privileged) 10731 { 10732 struct rpc_message msg = { 10733 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 10734 .rpc_cred = cred, 10735 }; 10736 struct rpc_task_setup task_setup = { 10737 .rpc_client = server->client, 10738 .rpc_message = &msg, 10739 .callback_ops = &nfs41_free_stateid_ops, 10740 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE, 10741 }; 10742 struct nfs_free_stateid_data *data; 10743 struct rpc_task *task; 10744 struct nfs_client *clp = server->nfs_client; 10745 10746 if (!refcount_inc_not_zero(&clp->cl_count)) 10747 return -EIO; 10748 10749 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 10750 &task_setup.rpc_client, &msg); 10751 10752 dprintk("NFS call free_stateid %p\n", stateid); 10753 data = kmalloc(sizeof(*data), GFP_KERNEL); 10754 if (!data) 10755 return -ENOMEM; 10756 data->server = server; 10757 nfs4_stateid_copy(&data->args.stateid, stateid); 10758 10759 task_setup.callback_data = data; 10760 10761 msg.rpc_argp = &data->args; 10762 msg.rpc_resp = &data->res; 10763 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, privileged); 10764 task = rpc_run_task(&task_setup); 10765 if (IS_ERR(task)) 10766 return PTR_ERR(task); 10767 rpc_put_task(task); 10768 stateid->type = NFS4_FREED_STATEID_TYPE; 10769 return 0; 10770 } 10771 10772 static void 10773 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) 10774 { 10775 const struct cred *cred = lsp->ls_state->owner->so_cred; 10776 10777 nfs41_free_stateid(server, &lsp->ls_stateid, cred, false); 10778 nfs4_free_lock_state(server, lsp); 10779 } 10780 10781 static bool nfs41_match_stateid(const nfs4_stateid *s1, 10782 const nfs4_stateid *s2) 10783 { 10784 trace_nfs41_match_stateid(s1, s2); 10785 10786 if (s1->type != s2->type) 10787 return false; 10788 10789 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 10790 return false; 10791 10792 if (s1->seqid == s2->seqid) 10793 return true; 10794 10795 return s1->seqid == 0 || s2->seqid == 0; 10796 } 10797 10798 #endif /* CONFIG_NFS_V4_1 */ 10799 10800 static bool nfs4_match_stateid(const nfs4_stateid *s1, 10801 const nfs4_stateid *s2) 10802 { 10803 trace_nfs4_match_stateid(s1, s2); 10804 10805 return nfs4_stateid_match(s1, s2); 10806 } 10807 10808 10809 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 10810 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 10811 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 10812 .recover_open = nfs4_open_reclaim, 10813 .recover_lock = nfs4_lock_reclaim, 10814 .establish_clid = nfs4_init_clientid, 10815 .detect_trunking = nfs40_discover_server_trunking, 10816 }; 10817 10818 #if defined(CONFIG_NFS_V4_1) 10819 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 10820 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 10821 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 10822 .recover_open = nfs4_open_reclaim, 10823 .recover_lock = nfs4_lock_reclaim, 10824 .establish_clid = nfs41_init_clientid, 10825 .reclaim_complete = nfs41_proc_reclaim_complete, 10826 .detect_trunking = nfs41_discover_server_trunking, 10827 }; 10828 #endif /* CONFIG_NFS_V4_1 */ 10829 10830 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 10831 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 10832 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 10833 .recover_open = nfs40_open_expired, 10834 .recover_lock = nfs4_lock_expired, 10835 .establish_clid = nfs4_init_clientid, 10836 }; 10837 10838 #if defined(CONFIG_NFS_V4_1) 10839 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 10840 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 10841 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 10842 .recover_open = nfs41_open_expired, 10843 .recover_lock = nfs41_lock_expired, 10844 .establish_clid = nfs41_init_clientid, 10845 }; 10846 #endif /* CONFIG_NFS_V4_1 */ 10847 10848 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { 10849 .sched_state_renewal = nfs4_proc_async_renew, 10850 .get_state_renewal_cred = nfs4_get_renew_cred, 10851 .renew_lease = nfs4_proc_renew, 10852 }; 10853 10854 #if defined(CONFIG_NFS_V4_1) 10855 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 10856 .sched_state_renewal = nfs41_proc_async_sequence, 10857 .get_state_renewal_cred = nfs4_get_machine_cred, 10858 .renew_lease = nfs4_proc_sequence, 10859 }; 10860 #endif 10861 10862 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = { 10863 .get_locations = _nfs40_proc_get_locations, 10864 .fsid_present = _nfs40_proc_fsid_present, 10865 }; 10866 10867 #if defined(CONFIG_NFS_V4_1) 10868 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = { 10869 .get_locations = _nfs41_proc_get_locations, 10870 .fsid_present = _nfs41_proc_fsid_present, 10871 }; 10872 #endif /* CONFIG_NFS_V4_1 */ 10873 10874 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { 10875 .minor_version = 0, 10876 .init_caps = NFS_CAP_READDIRPLUS 10877 | NFS_CAP_ATOMIC_OPEN 10878 | NFS_CAP_POSIX_LOCK, 10879 .init_client = nfs40_init_client, 10880 .shutdown_client = nfs40_shutdown_client, 10881 .match_stateid = nfs4_match_stateid, 10882 .find_root_sec = nfs4_find_root_sec, 10883 .free_lock_state = nfs4_release_lockowner, 10884 .test_and_free_expired = nfs40_test_and_free_expired_stateid, 10885 .alloc_seqid = nfs_alloc_seqid, 10886 .call_sync_ops = &nfs40_call_sync_ops, 10887 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 10888 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 10889 .state_renewal_ops = &nfs40_state_renewal_ops, 10890 .mig_recovery_ops = &nfs40_mig_recovery_ops, 10891 }; 10892 10893 #if defined(CONFIG_NFS_V4_1) 10894 static struct nfs_seqid * 10895 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2) 10896 { 10897 return NULL; 10898 } 10899 10900 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 10901 .minor_version = 1, 10902 .init_caps = NFS_CAP_READDIRPLUS 10903 | NFS_CAP_ATOMIC_OPEN 10904 | NFS_CAP_DIR_DELEG 10905 | NFS_CAP_POSIX_LOCK 10906 | NFS_CAP_STATEID_NFSV41 10907 | NFS_CAP_ATOMIC_OPEN_V1 10908 | NFS_CAP_LGOPEN 10909 | NFS_CAP_MOVEABLE, 10910 .init_client = nfs41_init_client, 10911 .shutdown_client = nfs41_shutdown_client, 10912 .match_stateid = nfs41_match_stateid, 10913 .find_root_sec = nfs41_find_root_sec, 10914 .free_lock_state = nfs41_free_lock_state, 10915 .test_and_free_expired = nfs41_test_and_free_expired_stateid, 10916 .alloc_seqid = nfs_alloc_no_seqid, 10917 .session_trunk = nfs4_test_session_trunk, 10918 .call_sync_ops = &nfs41_call_sync_ops, 10919 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 10920 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 10921 .state_renewal_ops = &nfs41_state_renewal_ops, 10922 .mig_recovery_ops = &nfs41_mig_recovery_ops, 10923 }; 10924 #endif 10925 10926 #if defined(CONFIG_NFS_V4_2) 10927 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { 10928 .minor_version = 2, 10929 .init_caps = NFS_CAP_READDIRPLUS 10930 | NFS_CAP_ATOMIC_OPEN 10931 | NFS_CAP_DIR_DELEG 10932 | NFS_CAP_POSIX_LOCK 10933 | NFS_CAP_STATEID_NFSV41 10934 | NFS_CAP_ATOMIC_OPEN_V1 10935 | NFS_CAP_LGOPEN 10936 | NFS_CAP_ALLOCATE 10937 | NFS_CAP_COPY 10938 | NFS_CAP_OFFLOAD_CANCEL 10939 | NFS_CAP_COPY_NOTIFY 10940 | NFS_CAP_DEALLOCATE 10941 | NFS_CAP_ZERO_RANGE 10942 | NFS_CAP_SEEK 10943 | NFS_CAP_LAYOUTSTATS 10944 | NFS_CAP_CLONE 10945 | NFS_CAP_LAYOUTERROR 10946 | NFS_CAP_READ_PLUS 10947 | NFS_CAP_MOVEABLE 10948 | NFS_CAP_OFFLOAD_STATUS, 10949 .init_client = nfs41_init_client, 10950 .shutdown_client = nfs41_shutdown_client, 10951 .match_stateid = nfs41_match_stateid, 10952 .find_root_sec = nfs41_find_root_sec, 10953 .free_lock_state = nfs41_free_lock_state, 10954 .call_sync_ops = &nfs41_call_sync_ops, 10955 .test_and_free_expired = nfs41_test_and_free_expired_stateid, 10956 .alloc_seqid = nfs_alloc_no_seqid, 10957 .session_trunk = nfs4_test_session_trunk, 10958 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 10959 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 10960 .state_renewal_ops = &nfs41_state_renewal_ops, 10961 .mig_recovery_ops = &nfs41_mig_recovery_ops, 10962 }; 10963 #endif 10964 10965 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 10966 [0] = &nfs_v4_0_minor_ops, 10967 #if defined(CONFIG_NFS_V4_1) 10968 [1] = &nfs_v4_1_minor_ops, 10969 #endif 10970 #if defined(CONFIG_NFS_V4_2) 10971 [2] = &nfs_v4_2_minor_ops, 10972 #endif 10973 }; 10974 10975 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) 10976 { 10977 ssize_t error, error2, error3; 10978 size_t left = size; 10979 10980 error = generic_listxattr(dentry, list, left); 10981 if (error < 0) 10982 return error; 10983 if (list) { 10984 list += error; 10985 left -= error; 10986 } 10987 10988 error2 = security_inode_listsecurity(d_inode(dentry), list, left); 10989 if (error2 < 0) 10990 return error2; 10991 if (list) { 10992 list += error2; 10993 left -= error2; 10994 } 10995 10996 error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left); 10997 if (error3 < 0) 10998 return error3; 10999 11000 error += error2 + error3; 11001 if (size && error > size) 11002 return -ERANGE; 11003 return error; 11004 } 11005 11006 static void nfs4_enable_swap(struct inode *inode) 11007 { 11008 /* The state manager thread must always be running. 11009 * It will notice the client is a swapper, and stay put. 11010 */ 11011 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 11012 11013 nfs4_schedule_state_manager(clp); 11014 } 11015 11016 static void nfs4_disable_swap(struct inode *inode) 11017 { 11018 /* The state manager thread will now exit once it is 11019 * woken. 11020 */ 11021 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 11022 11023 set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state); 11024 clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state); 11025 wake_up_var(&clp->cl_state); 11026 } 11027 11028 static const struct inode_operations nfs4_dir_inode_operations = { 11029 .create = nfs_create, 11030 .lookup = nfs_lookup, 11031 .atomic_open = nfs_atomic_open, 11032 .link = nfs_link, 11033 .unlink = nfs_unlink, 11034 .symlink = nfs_symlink, 11035 .mkdir = nfs_mkdir, 11036 .rmdir = nfs_rmdir, 11037 .mknod = nfs_mknod, 11038 .rename = nfs_rename, 11039 .permission = nfs_permission, 11040 .getattr = nfs_getattr, 11041 .setattr = nfs_setattr, 11042 .listxattr = nfs4_listxattr, 11043 }; 11044 11045 static const struct inode_operations nfs4_file_inode_operations = { 11046 .permission = nfs_permission, 11047 .getattr = nfs_getattr, 11048 .setattr = nfs_setattr, 11049 .listxattr = nfs4_listxattr, 11050 }; 11051 11052 static struct nfs_server *nfs4_clone_server(struct nfs_server *source, 11053 struct nfs_fh *fh, struct nfs_fattr *fattr, 11054 rpc_authflavor_t flavor) 11055 { 11056 struct nfs_server *server; 11057 int error; 11058 11059 server = nfs_clone_server(source, fh, fattr, flavor); 11060 if (IS_ERR(server)) 11061 return server; 11062 11063 error = nfs4_delegation_hash_alloc(server); 11064 if (error) { 11065 nfs_free_server(server); 11066 return ERR_PTR(error); 11067 } 11068 11069 return server; 11070 } 11071 11072 const struct nfs_rpc_ops nfs_v4_clientops = { 11073 .version = 4, /* protocol version */ 11074 .dentry_ops = &nfs4_dentry_operations, 11075 .dir_inode_ops = &nfs4_dir_inode_operations, 11076 .file_inode_ops = &nfs4_file_inode_operations, 11077 .file_ops = &nfs4_file_operations, 11078 .getroot = nfs4_proc_get_root, 11079 .submount = nfs4_submount, 11080 .try_get_tree = nfs4_try_get_tree, 11081 .getattr = nfs4_proc_getattr, 11082 .setattr = nfs4_proc_setattr, 11083 .lookup = nfs4_proc_lookup, 11084 .lookupp = nfs4_proc_lookupp, 11085 .access = nfs4_proc_access, 11086 .readlink = nfs4_proc_readlink, 11087 .create = nfs4_proc_create, 11088 .remove = nfs4_proc_remove, 11089 .unlink_setup = nfs4_proc_unlink_setup, 11090 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 11091 .unlink_done = nfs4_proc_unlink_done, 11092 .rename_setup = nfs4_proc_rename_setup, 11093 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 11094 .rename_done = nfs4_proc_rename_done, 11095 .link = nfs4_proc_link, 11096 .symlink = nfs4_proc_symlink, 11097 .mkdir = nfs4_proc_mkdir, 11098 .rmdir = nfs4_proc_rmdir, 11099 .readdir = nfs4_proc_readdir, 11100 .mknod = nfs4_proc_mknod, 11101 .statfs = nfs4_proc_statfs, 11102 .fsinfo = nfs4_proc_fsinfo, 11103 .pathconf = nfs4_proc_pathconf, 11104 .set_capabilities = nfs4_server_capabilities, 11105 .decode_dirent = nfs4_decode_dirent, 11106 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare, 11107 .read_setup = nfs4_proc_read_setup, 11108 .read_done = nfs4_read_done, 11109 .write_setup = nfs4_proc_write_setup, 11110 .write_done = nfs4_write_done, 11111 .commit_setup = nfs4_proc_commit_setup, 11112 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 11113 .commit_done = nfs4_commit_done, 11114 .lock = nfs4_proc_lock, 11115 .clear_acl_cache = nfs4_zap_acl_attr, 11116 .close_context = nfs4_close_context, 11117 .open_context = nfs4_atomic_open, 11118 .have_delegation = nfs4_have_delegation, 11119 .return_delegation = nfs4_inode_return_delegation, 11120 .alloc_client = nfs4_alloc_client, 11121 .init_client = nfs4_init_client, 11122 .free_client = nfs4_free_client, 11123 .create_server = nfs4_create_server, 11124 .clone_server = nfs4_clone_server, 11125 .discover_trunking = nfs4_discover_trunking, 11126 .enable_swap = nfs4_enable_swap, 11127 .disable_swap = nfs4_disable_swap, 11128 }; 11129 11130 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 11131 .name = XATTR_NAME_NFSV4_ACL, 11132 .list = nfs4_xattr_list_nfs4_acl, 11133 .get = nfs4_xattr_get_nfs4_acl, 11134 .set = nfs4_xattr_set_nfs4_acl, 11135 }; 11136 11137 #if defined(CONFIG_NFS_V4_1) 11138 static const struct xattr_handler nfs4_xattr_nfs4_dacl_handler = { 11139 .name = XATTR_NAME_NFSV4_DACL, 11140 .list = nfs4_xattr_list_nfs4_dacl, 11141 .get = nfs4_xattr_get_nfs4_dacl, 11142 .set = nfs4_xattr_set_nfs4_dacl, 11143 }; 11144 11145 static const struct xattr_handler nfs4_xattr_nfs4_sacl_handler = { 11146 .name = XATTR_NAME_NFSV4_SACL, 11147 .list = nfs4_xattr_list_nfs4_sacl, 11148 .get = nfs4_xattr_get_nfs4_sacl, 11149 .set = nfs4_xattr_set_nfs4_sacl, 11150 }; 11151 #endif 11152 11153 #ifdef CONFIG_NFS_V4_2 11154 static const struct xattr_handler nfs4_xattr_nfs4_user_handler = { 11155 .prefix = XATTR_USER_PREFIX, 11156 .get = nfs4_xattr_get_nfs4_user, 11157 .set = nfs4_xattr_set_nfs4_user, 11158 }; 11159 #endif 11160 11161 const struct xattr_handler * const nfs4_xattr_handlers[] = { 11162 &nfs4_xattr_nfs4_acl_handler, 11163 #if defined(CONFIG_NFS_V4_1) 11164 &nfs4_xattr_nfs4_dacl_handler, 11165 &nfs4_xattr_nfs4_sacl_handler, 11166 #endif 11167 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 11168 &nfs4_xattr_nfs4_label_handler, 11169 #endif 11170 #ifdef CONFIG_NFS_V4_2 11171 &nfs4_xattr_nfs4_user_handler, 11172 #endif 11173 NULL 11174 }; 11175