1 /* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <linux/mm.h> 39 #include <linux/delay.h> 40 #include <linux/errno.h> 41 #include <linux/string.h> 42 #include <linux/ratelimit.h> 43 #include <linux/printk.h> 44 #include <linux/slab.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/nfs.h> 47 #include <linux/nfs4.h> 48 #include <linux/nfs_fs.h> 49 #include <linux/nfs_page.h> 50 #include <linux/nfs_mount.h> 51 #include <linux/namei.h> 52 #include <linux/mount.h> 53 #include <linux/module.h> 54 #include <linux/xattr.h> 55 #include <linux/utsname.h> 56 #include <linux/freezer.h> 57 #include <linux/iversion.h> 58 59 #include "nfs4_fs.h" 60 #include "delegation.h" 61 #include "internal.h" 62 #include "iostat.h" 63 #include "callback.h" 64 #include "pnfs.h" 65 #include "netns.h" 66 #include "sysfs.h" 67 #include "nfs4idmap.h" 68 #include "nfs4session.h" 69 #include "fscache.h" 70 #include "nfs42.h" 71 72 #include "nfs4trace.h" 73 74 #define NFSDBG_FACILITY NFSDBG_PROC 75 76 #define NFS4_BITMASK_SZ 3 77 78 #define NFS4_POLL_RETRY_MIN (HZ/10) 79 #define NFS4_POLL_RETRY_MAX (15*HZ) 80 81 /* file attributes which can be mapped to nfs attributes */ 82 #define NFS4_VALID_ATTRS (ATTR_MODE \ 83 | ATTR_UID \ 84 | ATTR_GID \ 85 | ATTR_SIZE \ 86 | ATTR_ATIME \ 87 | ATTR_MTIME \ 88 | ATTR_CTIME \ 89 | ATTR_ATIME_SET \ 90 | ATTR_MTIME_SET) 91 92 struct nfs4_opendata; 93 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 94 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 95 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 96 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 97 struct nfs_fattr *fattr, struct inode *inode); 98 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred, 99 struct nfs_fattr *fattr, struct iattr *sattr, 100 struct nfs_open_context *ctx, struct nfs4_label *ilabel); 101 #ifdef CONFIG_NFS_V4_1 102 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 103 const struct cred *cred, 104 struct nfs4_slot *slot, 105 bool is_privileged); 106 static int nfs41_test_stateid(struct nfs_server *, const nfs4_stateid *, 107 const struct cred *); 108 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *, 109 const struct cred *, bool); 110 #endif 111 112 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 113 static inline struct nfs4_label * 114 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 115 struct iattr *sattr, struct nfs4_label *label) 116 { 117 struct lsm_context shim; 118 int err; 119 120 if (label == NULL) 121 return NULL; 122 123 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0) 124 return NULL; 125 126 label->lfs = 0; 127 label->pi = 0; 128 label->len = 0; 129 label->label = NULL; 130 131 err = security_dentry_init_security(dentry, sattr->ia_mode, 132 &dentry->d_name, NULL, &shim); 133 if (err) 134 return NULL; 135 136 label->lsmid = shim.id; 137 label->label = shim.context; 138 label->len = shim.len; 139 return label; 140 } 141 static inline void 142 nfs4_label_release_security(struct nfs4_label *label) 143 { 144 struct lsm_context shim; 145 146 if (label) { 147 shim.context = label->label; 148 shim.len = label->len; 149 shim.id = label->lsmid; 150 security_release_secctx(&shim); 151 } 152 } 153 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 154 { 155 if (label) 156 return server->attr_bitmask; 157 158 return server->attr_bitmask_nl; 159 } 160 #else 161 static inline struct nfs4_label * 162 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 163 struct iattr *sattr, struct nfs4_label *l) 164 { return NULL; } 165 static inline void 166 nfs4_label_release_security(struct nfs4_label *label) 167 { return; } 168 static inline u32 * 169 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 170 { return server->attr_bitmask; } 171 #endif 172 173 /* Prevent leaks of NFSv4 errors into userland */ 174 static int nfs4_map_errors(int err) 175 { 176 if (err >= -1000) 177 return err; 178 switch (err) { 179 case -NFS4ERR_RESOURCE: 180 case -NFS4ERR_LAYOUTTRYLATER: 181 case -NFS4ERR_RECALLCONFLICT: 182 case -NFS4ERR_RETURNCONFLICT: 183 return -EREMOTEIO; 184 case -NFS4ERR_WRONGSEC: 185 case -NFS4ERR_WRONG_CRED: 186 return -EPERM; 187 case -NFS4ERR_BADOWNER: 188 case -NFS4ERR_BADNAME: 189 return -EINVAL; 190 case -NFS4ERR_SHARE_DENIED: 191 return -EACCES; 192 case -NFS4ERR_MINOR_VERS_MISMATCH: 193 return -EPROTONOSUPPORT; 194 case -NFS4ERR_FILE_OPEN: 195 return -EBUSY; 196 case -NFS4ERR_NOT_SAME: 197 return -ENOTSYNC; 198 case -ENETDOWN: 199 case -ENETUNREACH: 200 break; 201 default: 202 dprintk("%s could not handle NFSv4 error %d\n", 203 __func__, -err); 204 break; 205 } 206 return -EIO; 207 } 208 209 /* 210 * This is our standard bitmap for GETATTR requests. 211 */ 212 const u32 nfs4_fattr_bitmap[3] = { 213 FATTR4_WORD0_TYPE 214 | FATTR4_WORD0_CHANGE 215 | FATTR4_WORD0_SIZE 216 | FATTR4_WORD0_FSID 217 | FATTR4_WORD0_FILEID, 218 FATTR4_WORD1_MODE 219 | FATTR4_WORD1_NUMLINKS 220 | FATTR4_WORD1_OWNER 221 | FATTR4_WORD1_OWNER_GROUP 222 | FATTR4_WORD1_RAWDEV 223 | FATTR4_WORD1_SPACE_USED 224 | FATTR4_WORD1_TIME_ACCESS 225 | FATTR4_WORD1_TIME_CREATE 226 | FATTR4_WORD1_TIME_METADATA 227 | FATTR4_WORD1_TIME_MODIFY 228 | FATTR4_WORD1_MOUNTED_ON_FILEID, 229 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 230 FATTR4_WORD2_SECURITY_LABEL 231 #endif 232 }; 233 234 static const u32 nfs4_pnfs_open_bitmap[3] = { 235 FATTR4_WORD0_TYPE 236 | FATTR4_WORD0_CHANGE 237 | FATTR4_WORD0_SIZE 238 | FATTR4_WORD0_FSID 239 | FATTR4_WORD0_FILEID, 240 FATTR4_WORD1_MODE 241 | FATTR4_WORD1_NUMLINKS 242 | FATTR4_WORD1_OWNER 243 | FATTR4_WORD1_OWNER_GROUP 244 | FATTR4_WORD1_RAWDEV 245 | FATTR4_WORD1_SPACE_USED 246 | FATTR4_WORD1_TIME_ACCESS 247 | FATTR4_WORD1_TIME_CREATE 248 | FATTR4_WORD1_TIME_METADATA 249 | FATTR4_WORD1_TIME_MODIFY, 250 FATTR4_WORD2_MDSTHRESHOLD 251 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 252 | FATTR4_WORD2_SECURITY_LABEL 253 #endif 254 }; 255 256 static const u32 nfs4_open_noattr_bitmap[3] = { 257 FATTR4_WORD0_TYPE 258 | FATTR4_WORD0_FILEID, 259 }; 260 261 const u32 nfs4_statfs_bitmap[3] = { 262 FATTR4_WORD0_FILES_AVAIL 263 | FATTR4_WORD0_FILES_FREE 264 | FATTR4_WORD0_FILES_TOTAL, 265 FATTR4_WORD1_SPACE_AVAIL 266 | FATTR4_WORD1_SPACE_FREE 267 | FATTR4_WORD1_SPACE_TOTAL 268 }; 269 270 const u32 nfs4_pathconf_bitmap[3] = { 271 FATTR4_WORD0_MAXLINK 272 | FATTR4_WORD0_MAXNAME, 273 0 274 }; 275 276 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 277 | FATTR4_WORD0_MAXREAD 278 | FATTR4_WORD0_MAXWRITE 279 | FATTR4_WORD0_LEASE_TIME, 280 FATTR4_WORD1_TIME_DELTA 281 | FATTR4_WORD1_FS_LAYOUT_TYPES, 282 FATTR4_WORD2_LAYOUT_BLKSIZE 283 | FATTR4_WORD2_CLONE_BLKSIZE 284 | FATTR4_WORD2_CHANGE_ATTR_TYPE 285 | FATTR4_WORD2_XATTR_SUPPORT 286 }; 287 288 const u32 nfs4_fs_locations_bitmap[3] = { 289 FATTR4_WORD0_CHANGE 290 | FATTR4_WORD0_SIZE 291 | FATTR4_WORD0_FSID 292 | FATTR4_WORD0_FILEID 293 | FATTR4_WORD0_FS_LOCATIONS, 294 FATTR4_WORD1_OWNER 295 | FATTR4_WORD1_OWNER_GROUP 296 | FATTR4_WORD1_RAWDEV 297 | FATTR4_WORD1_SPACE_USED 298 | FATTR4_WORD1_TIME_ACCESS 299 | FATTR4_WORD1_TIME_METADATA 300 | FATTR4_WORD1_TIME_MODIFY 301 | FATTR4_WORD1_MOUNTED_ON_FILEID, 302 }; 303 304 static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src, 305 struct inode *inode, unsigned long flags) 306 { 307 unsigned long cache_validity; 308 309 memcpy(dst, src, NFS4_BITMASK_SZ*sizeof(*dst)); 310 if (!inode || !nfs_have_read_or_write_delegation(inode)) 311 return; 312 313 cache_validity = READ_ONCE(NFS_I(inode)->cache_validity) | flags; 314 315 /* Remove the attributes over which we have full control */ 316 dst[1] &= ~FATTR4_WORD1_RAWDEV; 317 if (!(cache_validity & NFS_INO_INVALID_SIZE)) 318 dst[0] &= ~FATTR4_WORD0_SIZE; 319 320 if (!(cache_validity & NFS_INO_INVALID_CHANGE)) 321 dst[0] &= ~FATTR4_WORD0_CHANGE; 322 323 if (!(cache_validity & NFS_INO_INVALID_MODE)) 324 dst[1] &= ~FATTR4_WORD1_MODE; 325 if (!(cache_validity & NFS_INO_INVALID_OTHER)) 326 dst[1] &= ~(FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP); 327 328 if (!(cache_validity & NFS_INO_INVALID_BTIME)) 329 dst[1] &= ~FATTR4_WORD1_TIME_CREATE; 330 331 if (nfs_have_delegated_mtime(inode)) { 332 if (!(cache_validity & NFS_INO_INVALID_ATIME)) 333 dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET); 334 if (!(cache_validity & NFS_INO_INVALID_MTIME)) 335 dst[1] &= ~(FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET); 336 if (!(cache_validity & NFS_INO_INVALID_CTIME)) 337 dst[1] &= ~(FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY_SET); 338 } else if (nfs_have_delegated_atime(inode)) { 339 if (!(cache_validity & NFS_INO_INVALID_ATIME)) 340 dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET); 341 } 342 } 343 344 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 345 struct nfs4_readdir_arg *readdir) 346 { 347 unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE; 348 __be32 *start, *p; 349 350 if (cookie > 2) { 351 readdir->cookie = cookie; 352 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 353 return; 354 } 355 356 readdir->cookie = 0; 357 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 358 if (cookie == 2) 359 return; 360 361 /* 362 * NFSv4 servers do not return entries for '.' and '..' 363 * Therefore, we fake these entries here. We let '.' 364 * have cookie 0 and '..' have cookie 1. Note that 365 * when talking to the server, we always send cookie 0 366 * instead of 1 or 2. 367 */ 368 start = p = kmap_atomic(*readdir->pages); 369 370 if (cookie == 0) { 371 *p++ = xdr_one; /* next */ 372 *p++ = xdr_zero; /* cookie, first word */ 373 *p++ = xdr_one; /* cookie, second word */ 374 *p++ = xdr_one; /* entry len */ 375 memcpy(p, ".\0\0\0", 4); /* entry */ 376 p++; 377 *p++ = xdr_one; /* bitmap length */ 378 *p++ = htonl(attrs); /* bitmap */ 379 *p++ = htonl(12); /* attribute buffer length */ 380 *p++ = htonl(NF4DIR); 381 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry))); 382 } 383 384 *p++ = xdr_one; /* next */ 385 *p++ = xdr_zero; /* cookie, first word */ 386 *p++ = xdr_two; /* cookie, second word */ 387 *p++ = xdr_two; /* entry len */ 388 memcpy(p, "..\0\0", 4); /* entry */ 389 p++; 390 *p++ = xdr_one; /* bitmap length */ 391 *p++ = htonl(attrs); /* bitmap */ 392 *p++ = htonl(12); /* attribute buffer length */ 393 *p++ = htonl(NF4DIR); 394 spin_lock(&dentry->d_lock); 395 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent))); 396 spin_unlock(&dentry->d_lock); 397 398 readdir->pgbase = (char *)p - (char *)start; 399 readdir->count -= readdir->pgbase; 400 kunmap_atomic(start); 401 } 402 403 static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version) 404 { 405 if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) { 406 fattr->pre_change_attr = version; 407 fattr->valid |= NFS_ATTR_FATTR_PRECHANGE; 408 } 409 } 410 411 static void nfs4_test_and_free_stateid(struct nfs_server *server, 412 nfs4_stateid *stateid, 413 const struct cred *cred) 414 { 415 const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops; 416 417 ops->test_and_free_expired(server, stateid, cred); 418 } 419 420 static void __nfs4_free_revoked_stateid(struct nfs_server *server, 421 nfs4_stateid *stateid, 422 const struct cred *cred) 423 { 424 stateid->type = NFS4_REVOKED_STATEID_TYPE; 425 nfs4_test_and_free_stateid(server, stateid, cred); 426 } 427 428 static void nfs4_free_revoked_stateid(struct nfs_server *server, 429 const nfs4_stateid *stateid, 430 const struct cred *cred) 431 { 432 nfs4_stateid tmp; 433 434 nfs4_stateid_copy(&tmp, stateid); 435 __nfs4_free_revoked_stateid(server, &tmp, cred); 436 } 437 438 static long nfs4_update_delay(long *timeout) 439 { 440 long ret; 441 if (!timeout) 442 return NFS4_POLL_RETRY_MAX; 443 if (*timeout <= 0) 444 *timeout = NFS4_POLL_RETRY_MIN; 445 if (*timeout > NFS4_POLL_RETRY_MAX) 446 *timeout = NFS4_POLL_RETRY_MAX; 447 ret = *timeout; 448 *timeout <<= 1; 449 return ret; 450 } 451 452 static int nfs4_delay_killable(long *timeout) 453 { 454 might_sleep(); 455 456 if (unlikely(nfs_current_task_exiting())) 457 return -EINTR; 458 __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 459 schedule_timeout(nfs4_update_delay(timeout)); 460 if (!__fatal_signal_pending(current)) 461 return 0; 462 return -EINTR; 463 } 464 465 static int nfs4_delay_interruptible(long *timeout) 466 { 467 might_sleep(); 468 469 if (unlikely(nfs_current_task_exiting())) 470 return -EINTR; 471 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE); 472 schedule_timeout(nfs4_update_delay(timeout)); 473 if (!signal_pending(current)) 474 return 0; 475 return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS; 476 } 477 478 static int nfs4_delay(long *timeout, bool interruptible) 479 { 480 if (interruptible) 481 return nfs4_delay_interruptible(timeout); 482 return nfs4_delay_killable(timeout); 483 } 484 485 static const nfs4_stateid * 486 nfs4_recoverable_stateid(const nfs4_stateid *stateid) 487 { 488 if (!stateid) 489 return NULL; 490 switch (stateid->type) { 491 case NFS4_OPEN_STATEID_TYPE: 492 case NFS4_LOCK_STATEID_TYPE: 493 case NFS4_DELEGATION_STATEID_TYPE: 494 return stateid; 495 default: 496 break; 497 } 498 return NULL; 499 } 500 501 /* This is the error handling routine for processes that are allowed 502 * to sleep. 503 */ 504 static int nfs4_do_handle_exception(struct nfs_server *server, 505 int errorcode, struct nfs4_exception *exception) 506 { 507 struct nfs_client *clp = server->nfs_client; 508 struct nfs4_state *state = exception->state; 509 const nfs4_stateid *stateid; 510 struct inode *inode = exception->inode; 511 int ret = errorcode; 512 513 exception->delay = 0; 514 exception->recovering = 0; 515 exception->retry = 0; 516 517 stateid = nfs4_recoverable_stateid(exception->stateid); 518 if (stateid == NULL && state != NULL) 519 stateid = nfs4_recoverable_stateid(&state->stateid); 520 521 switch(errorcode) { 522 case 0: 523 return 0; 524 case -NFS4ERR_BADHANDLE: 525 case -ESTALE: 526 if (inode != NULL && S_ISREG(inode->i_mode)) 527 pnfs_destroy_layout(NFS_I(inode)); 528 break; 529 case -NFS4ERR_DELEG_REVOKED: 530 case -NFS4ERR_ADMIN_REVOKED: 531 case -NFS4ERR_EXPIRED: 532 case -NFS4ERR_BAD_STATEID: 533 case -NFS4ERR_PARTNER_NO_AUTH: 534 if (inode != NULL && stateid != NULL) { 535 nfs_inode_find_state_and_recover(inode, 536 stateid); 537 goto wait_on_recovery; 538 } 539 fallthrough; 540 case -NFS4ERR_OPENMODE: 541 if (inode) { 542 int err; 543 544 err = nfs_async_inode_return_delegation(inode, 545 stateid); 546 if (err == 0) 547 goto wait_on_recovery; 548 if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) { 549 exception->retry = 1; 550 break; 551 } 552 } 553 if (state == NULL) 554 break; 555 ret = nfs4_schedule_stateid_recovery(server, state); 556 if (ret < 0) 557 break; 558 goto wait_on_recovery; 559 case -NFS4ERR_STALE_STATEID: 560 case -NFS4ERR_STALE_CLIENTID: 561 nfs4_schedule_lease_recovery(clp); 562 goto wait_on_recovery; 563 case -NFS4ERR_MOVED: 564 ret = nfs4_schedule_migration_recovery(server); 565 if (ret < 0) 566 break; 567 goto wait_on_recovery; 568 case -NFS4ERR_LEASE_MOVED: 569 nfs4_schedule_lease_moved_recovery(clp); 570 goto wait_on_recovery; 571 #if defined(CONFIG_NFS_V4_1) 572 case -NFS4ERR_BADSESSION: 573 case -NFS4ERR_BADSLOT: 574 case -NFS4ERR_BAD_HIGH_SLOT: 575 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 576 case -NFS4ERR_DEADSESSION: 577 case -NFS4ERR_SEQ_FALSE_RETRY: 578 case -NFS4ERR_SEQ_MISORDERED: 579 /* Handled in nfs41_sequence_process() */ 580 goto wait_on_recovery; 581 #endif /* defined(CONFIG_NFS_V4_1) */ 582 case -NFS4ERR_FILE_OPEN: 583 if (exception->timeout > HZ) { 584 /* We have retried a decent amount, time to 585 * fail 586 */ 587 ret = -EBUSY; 588 break; 589 } 590 fallthrough; 591 case -NFS4ERR_DELAY: 592 nfs_inc_server_stats(server, NFSIOS_DELAY); 593 fallthrough; 594 case -NFS4ERR_GRACE: 595 case -NFS4ERR_LAYOUTTRYLATER: 596 case -NFS4ERR_RECALLCONFLICT: 597 case -NFS4ERR_RETURNCONFLICT: 598 exception->delay = 1; 599 return 0; 600 601 case -NFS4ERR_RETRY_UNCACHED_REP: 602 case -NFS4ERR_OLD_STATEID: 603 exception->retry = 1; 604 break; 605 case -NFS4ERR_BADOWNER: 606 /* The following works around a Linux server bug! */ 607 case -NFS4ERR_BADNAME: 608 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 609 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 610 exception->retry = 1; 611 printk(KERN_WARNING "NFS: v4 server %s " 612 "does not accept raw " 613 "uid/gids. " 614 "Reenabling the idmapper.\n", 615 server->nfs_client->cl_hostname); 616 } 617 } 618 /* We failed to handle the error */ 619 return nfs4_map_errors(ret); 620 wait_on_recovery: 621 exception->recovering = 1; 622 return 0; 623 } 624 625 /* 626 * Track the number of NFS4ERR_DELAY related retransmissions and return 627 * EAGAIN if the 'softerr' mount option is set, and we've exceeded the limit 628 * set by 'nfs_delay_retrans'. 629 */ 630 static int nfs4_exception_should_retrans(const struct nfs_server *server, 631 struct nfs4_exception *exception) 632 { 633 if (server->flags & NFS_MOUNT_SOFTERR && nfs_delay_retrans >= 0) { 634 if (exception->retrans++ >= (unsigned short)nfs_delay_retrans) 635 return -EAGAIN; 636 } 637 return 0; 638 } 639 640 /* This is the error handling routine for processes that are allowed 641 * to sleep. 642 */ 643 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 644 { 645 struct nfs_client *clp = server->nfs_client; 646 int ret; 647 648 ret = nfs4_do_handle_exception(server, errorcode, exception); 649 if (exception->delay) { 650 int ret2 = nfs4_exception_should_retrans(server, exception); 651 if (ret2 < 0) { 652 exception->retry = 0; 653 return ret2; 654 } 655 ret = nfs4_delay(&exception->timeout, 656 exception->interruptible); 657 goto out_retry; 658 } 659 if (exception->recovering) { 660 if (exception->task_is_privileged) 661 return -EDEADLOCK; 662 ret = nfs4_wait_clnt_recover(clp); 663 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 664 return -EIO; 665 goto out_retry; 666 } 667 return ret; 668 out_retry: 669 if (ret == 0) 670 exception->retry = 1; 671 return ret; 672 } 673 674 static int 675 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server, 676 int errorcode, struct nfs4_exception *exception) 677 { 678 struct nfs_client *clp = server->nfs_client; 679 int ret; 680 681 if ((task->tk_rpc_status == -ENETDOWN || 682 task->tk_rpc_status == -ENETUNREACH) && 683 task->tk_flags & RPC_TASK_NETUNREACH_FATAL) { 684 exception->delay = 0; 685 exception->recovering = 0; 686 exception->retry = 0; 687 return -EIO; 688 } 689 690 ret = nfs4_do_handle_exception(server, errorcode, exception); 691 if (exception->delay) { 692 int ret2 = nfs4_exception_should_retrans(server, exception); 693 if (ret2 < 0) { 694 exception->retry = 0; 695 return ret2; 696 } 697 rpc_delay(task, nfs4_update_delay(&exception->timeout)); 698 goto out_retry; 699 } 700 if (exception->recovering) { 701 if (exception->task_is_privileged) 702 return -EDEADLOCK; 703 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 704 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 705 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 706 goto out_retry; 707 } 708 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 709 ret = -EIO; 710 return ret; 711 out_retry: 712 if (ret == 0) { 713 exception->retry = 1; 714 /* 715 * For NFS4ERR_MOVED, the client transport will need to 716 * be recomputed after migration recovery has completed. 717 */ 718 if (errorcode == -NFS4ERR_MOVED) 719 rpc_task_release_transport(task); 720 } 721 return ret; 722 } 723 724 int 725 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server, 726 struct nfs4_state *state, long *timeout) 727 { 728 struct nfs4_exception exception = { 729 .state = state, 730 }; 731 732 if (task->tk_status >= 0) 733 return 0; 734 if (timeout) 735 exception.timeout = *timeout; 736 task->tk_status = nfs4_async_handle_exception(task, server, 737 task->tk_status, 738 &exception); 739 if (exception.delay && timeout) 740 *timeout = exception.timeout; 741 if (exception.retry) 742 return -EAGAIN; 743 return 0; 744 } 745 746 /* 747 * Return 'true' if 'clp' is using an rpc_client that is integrity protected 748 * or 'false' otherwise. 749 */ 750 static bool _nfs4_is_integrity_protected(struct nfs_client *clp) 751 { 752 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor; 753 return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P); 754 } 755 756 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 757 { 758 spin_lock(&clp->cl_lock); 759 if (time_before(clp->cl_last_renewal,timestamp)) 760 clp->cl_last_renewal = timestamp; 761 spin_unlock(&clp->cl_lock); 762 } 763 764 static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 765 { 766 struct nfs_client *clp = server->nfs_client; 767 768 if (!nfs4_has_session(clp)) 769 do_renew_lease(clp, timestamp); 770 } 771 772 struct nfs4_call_sync_data { 773 const struct nfs_server *seq_server; 774 struct nfs4_sequence_args *seq_args; 775 struct nfs4_sequence_res *seq_res; 776 }; 777 778 void nfs4_init_sequence(struct nfs4_sequence_args *args, 779 struct nfs4_sequence_res *res, int cache_reply, 780 int privileged) 781 { 782 args->sa_slot = NULL; 783 args->sa_cache_this = cache_reply; 784 args->sa_privileged = privileged; 785 786 res->sr_slot = NULL; 787 } 788 789 static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res) 790 { 791 struct nfs4_slot *slot = res->sr_slot; 792 struct nfs4_slot_table *tbl; 793 794 tbl = slot->table; 795 spin_lock(&tbl->slot_tbl_lock); 796 if (!nfs41_wake_and_assign_slot(tbl, slot)) 797 nfs4_free_slot(tbl, slot); 798 spin_unlock(&tbl->slot_tbl_lock); 799 800 res->sr_slot = NULL; 801 } 802 803 static int nfs40_sequence_done(struct rpc_task *task, 804 struct nfs4_sequence_res *res) 805 { 806 if (res->sr_slot != NULL) 807 nfs40_sequence_free_slot(res); 808 return 1; 809 } 810 811 #if defined(CONFIG_NFS_V4_1) 812 813 static void nfs41_release_slot(struct nfs4_slot *slot) 814 { 815 struct nfs4_session *session; 816 struct nfs4_slot_table *tbl; 817 bool send_new_highest_used_slotid = false; 818 819 if (!slot) 820 return; 821 tbl = slot->table; 822 session = tbl->session; 823 824 /* Bump the slot sequence number */ 825 if (slot->seq_done) 826 slot->seq_nr++; 827 slot->seq_done = 0; 828 829 spin_lock(&tbl->slot_tbl_lock); 830 /* Be nice to the server: try to ensure that the last transmitted 831 * value for highest_user_slotid <= target_highest_slotid 832 */ 833 if (tbl->highest_used_slotid > tbl->target_highest_slotid) 834 send_new_highest_used_slotid = true; 835 836 if (nfs41_wake_and_assign_slot(tbl, slot)) { 837 send_new_highest_used_slotid = false; 838 goto out_unlock; 839 } 840 nfs4_free_slot(tbl, slot); 841 842 if (tbl->highest_used_slotid != NFS4_NO_SLOT) 843 send_new_highest_used_slotid = false; 844 out_unlock: 845 spin_unlock(&tbl->slot_tbl_lock); 846 if (send_new_highest_used_slotid) 847 nfs41_notify_server(session->clp); 848 if (waitqueue_active(&tbl->slot_waitq)) 849 wake_up_all(&tbl->slot_waitq); 850 } 851 852 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 853 { 854 nfs41_release_slot(res->sr_slot); 855 res->sr_slot = NULL; 856 } 857 858 static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot, 859 u32 seqnr) 860 { 861 if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0) 862 slot->seq_nr_highest_sent = seqnr; 863 } 864 static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, u32 seqnr) 865 { 866 nfs4_slot_sequence_record_sent(slot, seqnr); 867 slot->seq_nr_last_acked = seqnr; 868 } 869 870 static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred, 871 struct nfs4_slot *slot) 872 { 873 struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true); 874 if (!IS_ERR(task)) 875 rpc_put_task_async(task); 876 } 877 878 static int nfs41_sequence_process(struct rpc_task *task, 879 struct nfs4_sequence_res *res) 880 { 881 struct nfs4_session *session; 882 struct nfs4_slot *slot = res->sr_slot; 883 struct nfs_client *clp; 884 int status; 885 int ret = 1; 886 887 if (slot == NULL) 888 goto out_noaction; 889 /* don't increment the sequence number if the task wasn't sent */ 890 if (!RPC_WAS_SENT(task) || slot->seq_done) 891 goto out; 892 893 session = slot->table->session; 894 clp = session->clp; 895 896 trace_nfs4_sequence_done(session, res); 897 898 status = res->sr_status; 899 if (task->tk_status == -NFS4ERR_DEADSESSION) 900 status = -NFS4ERR_DEADSESSION; 901 902 /* Check the SEQUENCE operation status */ 903 switch (status) { 904 case 0: 905 /* Mark this sequence number as having been acked */ 906 nfs4_slot_sequence_acked(slot, slot->seq_nr); 907 /* Update the slot's sequence and clientid lease timer */ 908 slot->seq_done = 1; 909 do_renew_lease(clp, res->sr_timestamp); 910 /* Check sequence flags */ 911 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags, 912 !!slot->privileged); 913 nfs41_update_target_slotid(slot->table, slot, res); 914 break; 915 case 1: 916 /* 917 * sr_status remains 1 if an RPC level error occurred. 918 * The server may or may not have processed the sequence 919 * operation.. 920 */ 921 nfs4_slot_sequence_record_sent(slot, slot->seq_nr); 922 slot->seq_done = 1; 923 goto out; 924 case -NFS4ERR_DELAY: 925 /* The server detected a resend of the RPC call and 926 * returned NFS4ERR_DELAY as per Section 2.10.6.2 927 * of RFC5661. 928 */ 929 dprintk("%s: slot=%u seq=%u: Operation in progress\n", 930 __func__, 931 slot->slot_nr, 932 slot->seq_nr); 933 goto out_retry; 934 case -NFS4ERR_RETRY_UNCACHED_REP: 935 case -NFS4ERR_SEQ_FALSE_RETRY: 936 /* 937 * The server thinks we tried to replay a request. 938 * Retry the call after bumping the sequence ID. 939 */ 940 nfs4_slot_sequence_acked(slot, slot->seq_nr); 941 goto retry_new_seq; 942 case -NFS4ERR_BADSLOT: 943 /* 944 * The slot id we used was probably retired. Try again 945 * using a different slot id. 946 */ 947 if (slot->slot_nr < slot->table->target_highest_slotid) 948 goto session_recover; 949 goto retry_nowait; 950 case -NFS4ERR_SEQ_MISORDERED: 951 nfs4_slot_sequence_record_sent(slot, slot->seq_nr); 952 /* 953 * Were one or more calls using this slot interrupted? 954 * If the server never received the request, then our 955 * transmitted slot sequence number may be too high. However, 956 * if the server did receive the request then it might 957 * accidentally give us a reply with a mismatched operation. 958 * We can sort this out by sending a lone sequence operation 959 * to the server on the same slot. 960 */ 961 if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) { 962 slot->seq_nr--; 963 if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) { 964 nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot); 965 res->sr_slot = NULL; 966 } 967 goto retry_nowait; 968 } 969 /* 970 * RFC5661: 971 * A retry might be sent while the original request is 972 * still in progress on the replier. The replier SHOULD 973 * deal with the issue by returning NFS4ERR_DELAY as the 974 * reply to SEQUENCE or CB_SEQUENCE operation, but 975 * implementations MAY return NFS4ERR_SEQ_MISORDERED. 976 * 977 * Restart the search after a delay. 978 */ 979 slot->seq_nr = slot->seq_nr_highest_sent; 980 goto out_retry; 981 case -NFS4ERR_BADSESSION: 982 case -NFS4ERR_DEADSESSION: 983 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 984 goto session_recover; 985 default: 986 /* Just update the slot sequence no. */ 987 slot->seq_done = 1; 988 } 989 out: 990 /* The session may be reset by one of the error handlers. */ 991 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 992 out_noaction: 993 return ret; 994 session_recover: 995 set_bit(NFS4_SLOT_TBL_DRAINING, &session->fc_slot_table.slot_tbl_state); 996 nfs4_schedule_session_recovery(session, status); 997 dprintk("%s ERROR: %d Reset session\n", __func__, status); 998 nfs41_sequence_free_slot(res); 999 goto out; 1000 retry_new_seq: 1001 ++slot->seq_nr; 1002 retry_nowait: 1003 if (rpc_restart_call_prepare(task)) { 1004 nfs41_sequence_free_slot(res); 1005 task->tk_status = 0; 1006 ret = 0; 1007 } 1008 goto out; 1009 out_retry: 1010 if (!rpc_restart_call(task)) 1011 goto out; 1012 rpc_delay(task, NFS4_POLL_RETRY_MAX); 1013 return 0; 1014 } 1015 1016 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 1017 { 1018 if (!nfs41_sequence_process(task, res)) 1019 return 0; 1020 if (res->sr_slot != NULL) 1021 nfs41_sequence_free_slot(res); 1022 return 1; 1023 1024 } 1025 EXPORT_SYMBOL_GPL(nfs41_sequence_done); 1026 1027 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) 1028 { 1029 if (res->sr_slot == NULL) 1030 return 1; 1031 if (res->sr_slot->table->session != NULL) 1032 return nfs41_sequence_process(task, res); 1033 return nfs40_sequence_done(task, res); 1034 } 1035 1036 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res) 1037 { 1038 if (res->sr_slot != NULL) { 1039 if (res->sr_slot->table->session != NULL) 1040 nfs41_sequence_free_slot(res); 1041 else 1042 nfs40_sequence_free_slot(res); 1043 } 1044 } 1045 1046 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 1047 { 1048 if (res->sr_slot == NULL) 1049 return 1; 1050 if (!res->sr_slot->table->session) 1051 return nfs40_sequence_done(task, res); 1052 return nfs41_sequence_done(task, res); 1053 } 1054 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 1055 1056 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 1057 { 1058 struct nfs4_call_sync_data *data = calldata; 1059 1060 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 1061 1062 nfs4_setup_sequence(data->seq_server->nfs_client, 1063 data->seq_args, data->seq_res, task); 1064 } 1065 1066 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 1067 { 1068 struct nfs4_call_sync_data *data = calldata; 1069 1070 nfs41_sequence_done(task, data->seq_res); 1071 } 1072 1073 static const struct rpc_call_ops nfs41_call_sync_ops = { 1074 .rpc_call_prepare = nfs41_call_sync_prepare, 1075 .rpc_call_done = nfs41_call_sync_done, 1076 }; 1077 1078 #else /* !CONFIG_NFS_V4_1 */ 1079 1080 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) 1081 { 1082 return nfs40_sequence_done(task, res); 1083 } 1084 1085 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res) 1086 { 1087 if (res->sr_slot != NULL) 1088 nfs40_sequence_free_slot(res); 1089 } 1090 1091 int nfs4_sequence_done(struct rpc_task *task, 1092 struct nfs4_sequence_res *res) 1093 { 1094 return nfs40_sequence_done(task, res); 1095 } 1096 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 1097 1098 #endif /* !CONFIG_NFS_V4_1 */ 1099 1100 static void nfs41_sequence_res_init(struct nfs4_sequence_res *res) 1101 { 1102 res->sr_timestamp = jiffies; 1103 res->sr_status_flags = 0; 1104 res->sr_status = 1; 1105 } 1106 1107 static 1108 void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args, 1109 struct nfs4_sequence_res *res, 1110 struct nfs4_slot *slot) 1111 { 1112 if (!slot) 1113 return; 1114 slot->privileged = args->sa_privileged ? 1 : 0; 1115 args->sa_slot = slot; 1116 1117 res->sr_slot = slot; 1118 } 1119 1120 int nfs4_setup_sequence(struct nfs_client *client, 1121 struct nfs4_sequence_args *args, 1122 struct nfs4_sequence_res *res, 1123 struct rpc_task *task) 1124 { 1125 struct nfs4_session *session = nfs4_get_session(client); 1126 struct nfs4_slot_table *tbl = client->cl_slot_tbl; 1127 struct nfs4_slot *slot; 1128 1129 /* slot already allocated? */ 1130 if (res->sr_slot != NULL) 1131 goto out_start; 1132 1133 if (session) 1134 tbl = &session->fc_slot_table; 1135 1136 spin_lock(&tbl->slot_tbl_lock); 1137 /* The state manager will wait until the slot table is empty */ 1138 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) 1139 goto out_sleep; 1140 1141 slot = nfs4_alloc_slot(tbl); 1142 if (IS_ERR(slot)) { 1143 if (slot == ERR_PTR(-ENOMEM)) 1144 goto out_sleep_timeout; 1145 goto out_sleep; 1146 } 1147 spin_unlock(&tbl->slot_tbl_lock); 1148 1149 nfs4_sequence_attach_slot(args, res, slot); 1150 1151 trace_nfs4_setup_sequence(session, args); 1152 out_start: 1153 nfs41_sequence_res_init(res); 1154 rpc_call_start(task); 1155 return 0; 1156 out_sleep_timeout: 1157 /* Try again in 1/4 second */ 1158 if (args->sa_privileged) 1159 rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task, 1160 jiffies + (HZ >> 2), RPC_PRIORITY_PRIVILEGED); 1161 else 1162 rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task, 1163 NULL, jiffies + (HZ >> 2)); 1164 spin_unlock(&tbl->slot_tbl_lock); 1165 return -EAGAIN; 1166 out_sleep: 1167 if (args->sa_privileged) 1168 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 1169 RPC_PRIORITY_PRIVILEGED); 1170 else 1171 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 1172 spin_unlock(&tbl->slot_tbl_lock); 1173 return -EAGAIN; 1174 } 1175 EXPORT_SYMBOL_GPL(nfs4_setup_sequence); 1176 1177 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata) 1178 { 1179 struct nfs4_call_sync_data *data = calldata; 1180 nfs4_setup_sequence(data->seq_server->nfs_client, 1181 data->seq_args, data->seq_res, task); 1182 } 1183 1184 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata) 1185 { 1186 struct nfs4_call_sync_data *data = calldata; 1187 nfs4_sequence_done(task, data->seq_res); 1188 } 1189 1190 static const struct rpc_call_ops nfs40_call_sync_ops = { 1191 .rpc_call_prepare = nfs40_call_sync_prepare, 1192 .rpc_call_done = nfs40_call_sync_done, 1193 }; 1194 1195 static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup) 1196 { 1197 int ret; 1198 struct rpc_task *task; 1199 1200 task = rpc_run_task(task_setup); 1201 if (IS_ERR(task)) 1202 return PTR_ERR(task); 1203 1204 ret = task->tk_status; 1205 rpc_put_task(task); 1206 return ret; 1207 } 1208 1209 static int nfs4_do_call_sync(struct rpc_clnt *clnt, 1210 struct nfs_server *server, 1211 struct rpc_message *msg, 1212 struct nfs4_sequence_args *args, 1213 struct nfs4_sequence_res *res, 1214 unsigned short task_flags) 1215 { 1216 struct nfs_client *clp = server->nfs_client; 1217 struct nfs4_call_sync_data data = { 1218 .seq_server = server, 1219 .seq_args = args, 1220 .seq_res = res, 1221 }; 1222 struct rpc_task_setup task_setup = { 1223 .rpc_client = clnt, 1224 .rpc_message = msg, 1225 .callback_ops = clp->cl_mvops->call_sync_ops, 1226 .callback_data = &data, 1227 .flags = task_flags, 1228 }; 1229 1230 return nfs4_call_sync_custom(&task_setup); 1231 } 1232 1233 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 1234 struct nfs_server *server, 1235 struct rpc_message *msg, 1236 struct nfs4_sequence_args *args, 1237 struct nfs4_sequence_res *res) 1238 { 1239 unsigned short task_flags = 0; 1240 1241 if (server->caps & NFS_CAP_MOVEABLE) 1242 task_flags = RPC_TASK_MOVEABLE; 1243 return nfs4_do_call_sync(clnt, server, msg, args, res, task_flags); 1244 } 1245 1246 1247 int nfs4_call_sync(struct rpc_clnt *clnt, 1248 struct nfs_server *server, 1249 struct rpc_message *msg, 1250 struct nfs4_sequence_args *args, 1251 struct nfs4_sequence_res *res, 1252 int cache_reply) 1253 { 1254 nfs4_init_sequence(args, res, cache_reply, 0); 1255 return nfs4_call_sync_sequence(clnt, server, msg, args, res); 1256 } 1257 1258 static void 1259 nfs4_inc_nlink_locked(struct inode *inode) 1260 { 1261 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 1262 NFS_INO_INVALID_CTIME | 1263 NFS_INO_INVALID_NLINK); 1264 inc_nlink(inode); 1265 } 1266 1267 static void 1268 nfs4_inc_nlink(struct inode *inode) 1269 { 1270 spin_lock(&inode->i_lock); 1271 nfs4_inc_nlink_locked(inode); 1272 spin_unlock(&inode->i_lock); 1273 } 1274 1275 static void 1276 nfs4_dec_nlink_locked(struct inode *inode) 1277 { 1278 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 1279 NFS_INO_INVALID_CTIME | 1280 NFS_INO_INVALID_NLINK); 1281 drop_nlink(inode); 1282 } 1283 1284 static void 1285 nfs4_update_changeattr_locked(struct inode *inode, 1286 struct nfs4_change_info *cinfo, 1287 unsigned long timestamp, unsigned long cache_validity) 1288 { 1289 struct nfs_inode *nfsi = NFS_I(inode); 1290 u64 change_attr = inode_peek_iversion_raw(inode); 1291 1292 if (!nfs_have_delegated_mtime(inode)) 1293 cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME; 1294 if (S_ISDIR(inode->i_mode)) 1295 cache_validity |= NFS_INO_INVALID_DATA; 1296 1297 switch (NFS_SERVER(inode)->change_attr_type) { 1298 case NFS4_CHANGE_TYPE_IS_UNDEFINED: 1299 if (cinfo->after == change_attr) 1300 goto out; 1301 break; 1302 default: 1303 if ((s64)(change_attr - cinfo->after) >= 0) 1304 goto out; 1305 } 1306 1307 inode_set_iversion_raw(inode, cinfo->after); 1308 if (!cinfo->atomic || cinfo->before != change_attr) { 1309 if (S_ISDIR(inode->i_mode)) 1310 nfs_force_lookup_revalidate(inode); 1311 1312 if (!nfs_have_delegated_attributes(inode)) 1313 cache_validity |= 1314 NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL | 1315 NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER | 1316 NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK | 1317 NFS_INO_INVALID_MODE | NFS_INO_INVALID_BTIME | 1318 NFS_INO_INVALID_XATTR; 1319 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); 1320 } 1321 nfsi->attrtimeo_timestamp = jiffies; 1322 nfsi->read_cache_jiffies = timestamp; 1323 nfsi->attr_gencount = nfs_inc_attr_generation_counter(); 1324 nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE; 1325 out: 1326 nfs_set_cache_invalid(inode, cache_validity); 1327 } 1328 1329 void 1330 nfs4_update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo, 1331 unsigned long timestamp, unsigned long cache_validity) 1332 { 1333 spin_lock(&dir->i_lock); 1334 nfs4_update_changeattr_locked(dir, cinfo, timestamp, cache_validity); 1335 spin_unlock(&dir->i_lock); 1336 } 1337 1338 struct nfs4_open_createattrs { 1339 struct nfs4_label *label; 1340 struct iattr *sattr; 1341 const __u32 verf[2]; 1342 }; 1343 1344 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server, 1345 int err, struct nfs4_exception *exception) 1346 { 1347 if (err != -EINVAL) 1348 return false; 1349 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1350 return false; 1351 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1; 1352 exception->retry = 1; 1353 return true; 1354 } 1355 1356 static fmode_t _nfs4_ctx_to_accessmode(const struct nfs_open_context *ctx) 1357 { 1358 return ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC); 1359 } 1360 1361 static fmode_t _nfs4_ctx_to_openmode(const struct nfs_open_context *ctx) 1362 { 1363 fmode_t ret = ctx->mode & (FMODE_READ|FMODE_WRITE); 1364 1365 return (ctx->mode & FMODE_EXEC) ? FMODE_READ | ret : ret; 1366 } 1367 1368 static u32 1369 nfs4_fmode_to_share_access(fmode_t fmode) 1370 { 1371 u32 res = 0; 1372 1373 switch (fmode & (FMODE_READ | FMODE_WRITE)) { 1374 case FMODE_READ: 1375 res = NFS4_SHARE_ACCESS_READ; 1376 break; 1377 case FMODE_WRITE: 1378 res = NFS4_SHARE_ACCESS_WRITE; 1379 break; 1380 case FMODE_READ|FMODE_WRITE: 1381 res = NFS4_SHARE_ACCESS_BOTH; 1382 } 1383 return res; 1384 } 1385 1386 static u32 1387 nfs4_map_atomic_open_share(struct nfs_server *server, 1388 fmode_t fmode, int openflags) 1389 { 1390 u32 res = nfs4_fmode_to_share_access(fmode); 1391 1392 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1393 goto out; 1394 /* Want no delegation if we're using O_DIRECT */ 1395 if (openflags & O_DIRECT) { 1396 res |= NFS4_SHARE_WANT_NO_DELEG; 1397 goto out; 1398 } 1399 /* res |= NFS4_SHARE_WANT_NO_PREFERENCE; */ 1400 if (server->caps & NFS_CAP_DELEGTIME) 1401 res |= NFS4_SHARE_WANT_DELEG_TIMESTAMPS; 1402 if (server->caps & NFS_CAP_OPEN_XOR) 1403 res |= NFS4_SHARE_WANT_OPEN_XOR_DELEGATION; 1404 out: 1405 return res; 1406 } 1407 1408 static enum open_claim_type4 1409 nfs4_map_atomic_open_claim(struct nfs_server *server, 1410 enum open_claim_type4 claim) 1411 { 1412 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1) 1413 return claim; 1414 switch (claim) { 1415 default: 1416 return claim; 1417 case NFS4_OPEN_CLAIM_FH: 1418 return NFS4_OPEN_CLAIM_NULL; 1419 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1420 return NFS4_OPEN_CLAIM_DELEGATE_CUR; 1421 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1422 return NFS4_OPEN_CLAIM_DELEGATE_PREV; 1423 } 1424 } 1425 1426 static void nfs4_init_opendata_res(struct nfs4_opendata *p) 1427 { 1428 p->o_res.f_attr = &p->f_attr; 1429 p->o_res.seqid = p->o_arg.seqid; 1430 p->c_res.seqid = p->c_arg.seqid; 1431 p->o_res.server = p->o_arg.server; 1432 p->o_res.access_request = p->o_arg.access; 1433 nfs_fattr_init(&p->f_attr); 1434 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 1435 } 1436 1437 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 1438 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 1439 const struct nfs4_open_createattrs *c, 1440 enum open_claim_type4 claim, 1441 gfp_t gfp_mask) 1442 { 1443 struct dentry *parent = dget_parent(dentry); 1444 struct inode *dir = d_inode(parent); 1445 struct nfs_server *server = NFS_SERVER(dir); 1446 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 1447 struct nfs4_label *label = (c != NULL) ? c->label : NULL; 1448 struct nfs4_opendata *p; 1449 1450 p = kzalloc(sizeof(*p), gfp_mask); 1451 if (p == NULL) 1452 goto err; 1453 1454 p->f_attr.label = nfs4_label_alloc(server, gfp_mask); 1455 if (IS_ERR(p->f_attr.label)) 1456 goto err_free_p; 1457 1458 p->a_label = nfs4_label_alloc(server, gfp_mask); 1459 if (IS_ERR(p->a_label)) 1460 goto err_free_f; 1461 1462 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 1463 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); 1464 if (IS_ERR(p->o_arg.seqid)) 1465 goto err_free_label; 1466 nfs_sb_active(dentry->d_sb); 1467 p->dentry = dget(dentry); 1468 p->dir = parent; 1469 p->owner = sp; 1470 atomic_inc(&sp->so_count); 1471 p->o_arg.open_flags = flags; 1472 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 1473 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 1474 p->o_arg.share_access = nfs4_map_atomic_open_share(server, 1475 fmode, flags); 1476 if (flags & O_CREAT) { 1477 p->o_arg.umask = current_umask(); 1478 p->o_arg.label = nfs4_label_copy(p->a_label, label); 1479 if (c->sattr != NULL && c->sattr->ia_valid != 0) { 1480 p->o_arg.u.attrs = &p->attrs; 1481 memcpy(&p->attrs, c->sattr, sizeof(p->attrs)); 1482 1483 memcpy(p->o_arg.u.verifier.data, c->verf, 1484 sizeof(p->o_arg.u.verifier.data)); 1485 } 1486 } 1487 /* ask server to check for all possible rights as results 1488 * are cached */ 1489 switch (p->o_arg.claim) { 1490 default: 1491 break; 1492 case NFS4_OPEN_CLAIM_NULL: 1493 case NFS4_OPEN_CLAIM_FH: 1494 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY | 1495 NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE | 1496 NFS4_ACCESS_EXECUTE | 1497 nfs_access_xattr_mask(server); 1498 } 1499 p->o_arg.clientid = server->nfs_client->cl_clientid; 1500 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 1501 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 1502 p->o_arg.name = &dentry->d_name; 1503 p->o_arg.server = server; 1504 p->o_arg.bitmask = nfs4_bitmask(server, label); 1505 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 1506 switch (p->o_arg.claim) { 1507 case NFS4_OPEN_CLAIM_NULL: 1508 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1509 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 1510 p->o_arg.fh = NFS_FH(dir); 1511 break; 1512 case NFS4_OPEN_CLAIM_PREVIOUS: 1513 case NFS4_OPEN_CLAIM_FH: 1514 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1515 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1516 p->o_arg.fh = NFS_FH(d_inode(dentry)); 1517 } 1518 p->c_arg.fh = &p->o_res.fh; 1519 p->c_arg.stateid = &p->o_res.stateid; 1520 p->c_arg.seqid = p->o_arg.seqid; 1521 nfs4_init_opendata_res(p); 1522 kref_init(&p->kref); 1523 return p; 1524 1525 err_free_label: 1526 nfs4_label_free(p->a_label); 1527 err_free_f: 1528 nfs4_label_free(p->f_attr.label); 1529 err_free_p: 1530 kfree(p); 1531 err: 1532 dput(parent); 1533 return NULL; 1534 } 1535 1536 static void nfs4_opendata_free(struct kref *kref) 1537 { 1538 struct nfs4_opendata *p = container_of(kref, 1539 struct nfs4_opendata, kref); 1540 struct super_block *sb = p->dentry->d_sb; 1541 1542 nfs4_lgopen_release(p->lgp); 1543 nfs_free_seqid(p->o_arg.seqid); 1544 nfs4_sequence_free_slot(&p->o_res.seq_res); 1545 if (p->state != NULL) 1546 nfs4_put_open_state(p->state); 1547 nfs4_put_state_owner(p->owner); 1548 1549 nfs4_label_free(p->a_label); 1550 nfs4_label_free(p->f_attr.label); 1551 1552 dput(p->dir); 1553 dput(p->dentry); 1554 nfs_sb_deactive(sb); 1555 nfs_fattr_free_names(&p->f_attr); 1556 kfree(p->f_attr.mdsthreshold); 1557 kfree(p); 1558 } 1559 1560 static void nfs4_opendata_put(struct nfs4_opendata *p) 1561 { 1562 if (p != NULL) 1563 kref_put(&p->kref, nfs4_opendata_free); 1564 } 1565 1566 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state, 1567 fmode_t fmode) 1568 { 1569 switch(fmode & (FMODE_READ|FMODE_WRITE)) { 1570 case FMODE_READ|FMODE_WRITE: 1571 return state->n_rdwr != 0; 1572 case FMODE_WRITE: 1573 return state->n_wronly != 0; 1574 case FMODE_READ: 1575 return state->n_rdonly != 0; 1576 } 1577 WARN_ON_ONCE(1); 1578 return false; 1579 } 1580 1581 static int can_open_cached(struct nfs4_state *state, fmode_t mode, 1582 int open_mode, enum open_claim_type4 claim) 1583 { 1584 int ret = 0; 1585 1586 if (open_mode & (O_EXCL|O_TRUNC)) 1587 goto out; 1588 switch (claim) { 1589 case NFS4_OPEN_CLAIM_NULL: 1590 case NFS4_OPEN_CLAIM_FH: 1591 goto out; 1592 default: 1593 break; 1594 } 1595 switch (mode & (FMODE_READ|FMODE_WRITE)) { 1596 case FMODE_READ: 1597 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 1598 && state->n_rdonly != 0; 1599 break; 1600 case FMODE_WRITE: 1601 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 1602 && state->n_wronly != 0; 1603 break; 1604 case FMODE_READ|FMODE_WRITE: 1605 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 1606 && state->n_rdwr != 0; 1607 } 1608 out: 1609 return ret; 1610 } 1611 1612 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode, 1613 enum open_claim_type4 claim) 1614 { 1615 if (delegation == NULL) 1616 return 0; 1617 if ((delegation->type & fmode) != fmode) 1618 return 0; 1619 switch (claim) { 1620 case NFS4_OPEN_CLAIM_NULL: 1621 case NFS4_OPEN_CLAIM_FH: 1622 break; 1623 case NFS4_OPEN_CLAIM_PREVIOUS: 1624 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 1625 break; 1626 fallthrough; 1627 default: 1628 return 0; 1629 } 1630 nfs_mark_delegation_referenced(delegation); 1631 return 1; 1632 } 1633 1634 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 1635 { 1636 switch (fmode) { 1637 case FMODE_WRITE: 1638 state->n_wronly++; 1639 break; 1640 case FMODE_READ: 1641 state->n_rdonly++; 1642 break; 1643 case FMODE_READ|FMODE_WRITE: 1644 state->n_rdwr++; 1645 } 1646 nfs4_state_set_mode_locked(state, state->state | fmode); 1647 } 1648 1649 #ifdef CONFIG_NFS_V4_1 1650 static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state) 1651 { 1652 if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags)) 1653 return true; 1654 if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags)) 1655 return true; 1656 if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags)) 1657 return true; 1658 return false; 1659 } 1660 #endif /* CONFIG_NFS_V4_1 */ 1661 1662 static void nfs_state_log_update_open_stateid(struct nfs4_state *state) 1663 { 1664 if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags)) 1665 wake_up_all(&state->waitq); 1666 } 1667 1668 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) 1669 { 1670 struct nfs_client *clp = state->owner->so_server->nfs_client; 1671 bool need_recover = false; 1672 1673 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly) 1674 need_recover = true; 1675 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly) 1676 need_recover = true; 1677 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr) 1678 need_recover = true; 1679 if (need_recover) 1680 nfs4_state_mark_reclaim_nograce(clp, state); 1681 } 1682 1683 /* 1684 * Check for whether or not the caller may update the open stateid 1685 * to the value passed in by stateid. 1686 * 1687 * Note: This function relies heavily on the server implementing 1688 * RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2 1689 * correctly. 1690 * i.e. The stateid seqids have to be initialised to 1, and 1691 * are then incremented on every state transition. 1692 */ 1693 static bool nfs_stateid_is_sequential(struct nfs4_state *state, 1694 const nfs4_stateid *stateid) 1695 { 1696 if (test_bit(NFS_OPEN_STATE, &state->flags)) { 1697 /* The common case - we're updating to a new sequence number */ 1698 if (nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1699 if (nfs4_stateid_is_next(&state->open_stateid, stateid)) 1700 return true; 1701 return false; 1702 } 1703 /* The server returned a new stateid */ 1704 } 1705 /* This is the first OPEN in this generation */ 1706 if (stateid->seqid == cpu_to_be32(1)) 1707 return true; 1708 return false; 1709 } 1710 1711 static void nfs_resync_open_stateid_locked(struct nfs4_state *state) 1712 { 1713 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr)) 1714 return; 1715 if (state->n_wronly) 1716 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1717 if (state->n_rdonly) 1718 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1719 if (state->n_rdwr) 1720 set_bit(NFS_O_RDWR_STATE, &state->flags); 1721 set_bit(NFS_OPEN_STATE, &state->flags); 1722 } 1723 1724 static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1725 nfs4_stateid *stateid, fmode_t fmode) 1726 { 1727 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1728 switch (fmode & (FMODE_READ|FMODE_WRITE)) { 1729 case FMODE_WRITE: 1730 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1731 break; 1732 case FMODE_READ: 1733 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1734 break; 1735 case 0: 1736 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1737 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1738 clear_bit(NFS_OPEN_STATE, &state->flags); 1739 } 1740 if (stateid == NULL) 1741 return; 1742 /* Handle OPEN+OPEN_DOWNGRADE races */ 1743 if (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1744 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { 1745 nfs_resync_open_stateid_locked(state); 1746 goto out; 1747 } 1748 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1749 nfs4_stateid_copy(&state->stateid, stateid); 1750 nfs4_stateid_copy(&state->open_stateid, stateid); 1751 trace_nfs4_open_stateid_update(state->inode, stateid, 0); 1752 out: 1753 nfs_state_log_update_open_stateid(state); 1754 } 1755 1756 static void nfs_clear_open_stateid(struct nfs4_state *state, 1757 nfs4_stateid *arg_stateid, 1758 nfs4_stateid *stateid, fmode_t fmode) 1759 { 1760 write_seqlock(&state->seqlock); 1761 /* Ignore, if the CLOSE argment doesn't match the current stateid */ 1762 if (nfs4_state_match_open_stateid_other(state, arg_stateid)) 1763 nfs_clear_open_stateid_locked(state, stateid, fmode); 1764 write_sequnlock(&state->seqlock); 1765 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1766 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1767 } 1768 1769 static void nfs_set_open_stateid_locked(struct nfs4_state *state, 1770 const nfs4_stateid *stateid, nfs4_stateid *freeme) 1771 __must_hold(&state->owner->so_lock) 1772 __must_hold(&state->seqlock) 1773 __must_hold(RCU) 1774 1775 { 1776 DEFINE_WAIT(wait); 1777 int status = 0; 1778 for (;;) { 1779 1780 if (nfs_stateid_is_sequential(state, stateid)) 1781 break; 1782 1783 if (status) 1784 break; 1785 /* Rely on seqids for serialisation with NFSv4.0 */ 1786 if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client)) 1787 break; 1788 1789 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); 1790 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); 1791 /* 1792 * Ensure we process the state changes in the same order 1793 * in which the server processed them by delaying the 1794 * update of the stateid until we are in sequence. 1795 */ 1796 write_sequnlock(&state->seqlock); 1797 spin_unlock(&state->owner->so_lock); 1798 rcu_read_unlock(); 1799 trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0); 1800 1801 if (!fatal_signal_pending(current) && 1802 !nfs_current_task_exiting()) { 1803 if (schedule_timeout(5*HZ) == 0) 1804 status = -EAGAIN; 1805 else 1806 status = 0; 1807 } else 1808 status = -EINTR; 1809 finish_wait(&state->waitq, &wait); 1810 rcu_read_lock(); 1811 spin_lock(&state->owner->so_lock); 1812 write_seqlock(&state->seqlock); 1813 } 1814 1815 if (test_bit(NFS_OPEN_STATE, &state->flags) && 1816 !nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1817 nfs4_stateid_copy(freeme, &state->open_stateid); 1818 nfs_test_and_clear_all_open_stateid(state); 1819 } 1820 1821 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1822 nfs4_stateid_copy(&state->stateid, stateid); 1823 nfs4_stateid_copy(&state->open_stateid, stateid); 1824 trace_nfs4_open_stateid_update(state->inode, stateid, status); 1825 nfs_state_log_update_open_stateid(state); 1826 } 1827 1828 static void nfs_state_set_open_stateid(struct nfs4_state *state, 1829 const nfs4_stateid *open_stateid, 1830 fmode_t fmode, 1831 nfs4_stateid *freeme) 1832 { 1833 /* 1834 * Protect the call to nfs4_state_set_mode_locked and 1835 * serialise the stateid update 1836 */ 1837 write_seqlock(&state->seqlock); 1838 nfs_set_open_stateid_locked(state, open_stateid, freeme); 1839 switch (fmode) { 1840 case FMODE_READ: 1841 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1842 break; 1843 case FMODE_WRITE: 1844 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1845 break; 1846 case FMODE_READ|FMODE_WRITE: 1847 set_bit(NFS_O_RDWR_STATE, &state->flags); 1848 } 1849 set_bit(NFS_OPEN_STATE, &state->flags); 1850 write_sequnlock(&state->seqlock); 1851 } 1852 1853 static void nfs_state_clear_open_state_flags(struct nfs4_state *state) 1854 { 1855 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1856 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1857 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1858 clear_bit(NFS_OPEN_STATE, &state->flags); 1859 } 1860 1861 static void nfs_state_set_delegation(struct nfs4_state *state, 1862 const nfs4_stateid *deleg_stateid, 1863 fmode_t fmode) 1864 { 1865 /* 1866 * Protect the call to nfs4_state_set_mode_locked and 1867 * serialise the stateid update 1868 */ 1869 write_seqlock(&state->seqlock); 1870 nfs4_stateid_copy(&state->stateid, deleg_stateid); 1871 set_bit(NFS_DELEGATED_STATE, &state->flags); 1872 write_sequnlock(&state->seqlock); 1873 } 1874 1875 static void nfs_state_clear_delegation(struct nfs4_state *state) 1876 { 1877 write_seqlock(&state->seqlock); 1878 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1879 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1880 write_sequnlock(&state->seqlock); 1881 } 1882 1883 int update_open_stateid(struct nfs4_state *state, 1884 const nfs4_stateid *open_stateid, 1885 const nfs4_stateid *delegation, 1886 fmode_t fmode) 1887 { 1888 struct nfs_server *server = NFS_SERVER(state->inode); 1889 struct nfs_client *clp = server->nfs_client; 1890 struct nfs_inode *nfsi = NFS_I(state->inode); 1891 struct nfs_delegation *deleg_cur; 1892 nfs4_stateid freeme = { }; 1893 int ret = 0; 1894 1895 fmode &= (FMODE_READ|FMODE_WRITE); 1896 1897 rcu_read_lock(); 1898 spin_lock(&state->owner->so_lock); 1899 if (open_stateid != NULL) { 1900 nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme); 1901 ret = 1; 1902 } 1903 1904 deleg_cur = nfs4_get_valid_delegation(state->inode); 1905 if (deleg_cur == NULL) 1906 goto no_delegation; 1907 1908 spin_lock(&deleg_cur->lock); 1909 if (rcu_dereference(nfsi->delegation) != deleg_cur || 1910 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) || 1911 (deleg_cur->type & fmode) != fmode) 1912 goto no_delegation_unlock; 1913 1914 if (delegation == NULL) 1915 delegation = &deleg_cur->stateid; 1916 else if (!nfs4_stateid_match_other(&deleg_cur->stateid, delegation)) 1917 goto no_delegation_unlock; 1918 1919 nfs_mark_delegation_referenced(deleg_cur); 1920 nfs_state_set_delegation(state, &deleg_cur->stateid, fmode); 1921 ret = 1; 1922 no_delegation_unlock: 1923 spin_unlock(&deleg_cur->lock); 1924 no_delegation: 1925 if (ret) 1926 update_open_stateflags(state, fmode); 1927 spin_unlock(&state->owner->so_lock); 1928 rcu_read_unlock(); 1929 1930 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1931 nfs4_schedule_state_manager(clp); 1932 if (freeme.type != 0) 1933 nfs4_test_and_free_stateid(server, &freeme, 1934 state->owner->so_cred); 1935 1936 return ret; 1937 } 1938 1939 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp, 1940 const nfs4_stateid *stateid) 1941 { 1942 struct nfs4_state *state = lsp->ls_state; 1943 bool ret = false; 1944 1945 spin_lock(&state->state_lock); 1946 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid)) 1947 goto out_noupdate; 1948 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid)) 1949 goto out_noupdate; 1950 nfs4_stateid_copy(&lsp->ls_stateid, stateid); 1951 ret = true; 1952 out_noupdate: 1953 spin_unlock(&state->state_lock); 1954 return ret; 1955 } 1956 1957 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1958 { 1959 struct nfs_delegation *delegation; 1960 1961 fmode &= FMODE_READ|FMODE_WRITE; 1962 rcu_read_lock(); 1963 delegation = nfs4_get_valid_delegation(inode); 1964 if (delegation == NULL || (delegation->type & fmode) == fmode) { 1965 rcu_read_unlock(); 1966 return; 1967 } 1968 rcu_read_unlock(); 1969 nfs4_inode_return_delegation(inode); 1970 } 1971 1972 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1973 { 1974 struct nfs4_state *state = opendata->state; 1975 struct nfs_delegation *delegation; 1976 int open_mode = opendata->o_arg.open_flags; 1977 fmode_t fmode = opendata->o_arg.fmode; 1978 enum open_claim_type4 claim = opendata->o_arg.claim; 1979 nfs4_stateid stateid; 1980 int ret = -EAGAIN; 1981 1982 for (;;) { 1983 spin_lock(&state->owner->so_lock); 1984 if (can_open_cached(state, fmode, open_mode, claim)) { 1985 update_open_stateflags(state, fmode); 1986 spin_unlock(&state->owner->so_lock); 1987 goto out_return_state; 1988 } 1989 spin_unlock(&state->owner->so_lock); 1990 rcu_read_lock(); 1991 delegation = nfs4_get_valid_delegation(state->inode); 1992 if (!can_open_delegated(delegation, fmode, claim)) { 1993 rcu_read_unlock(); 1994 break; 1995 } 1996 /* Save the delegation */ 1997 nfs4_stateid_copy(&stateid, &delegation->stateid); 1998 rcu_read_unlock(); 1999 nfs_release_seqid(opendata->o_arg.seqid); 2000 if (!opendata->is_recover) { 2001 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 2002 if (ret != 0) 2003 goto out; 2004 } 2005 ret = -EAGAIN; 2006 2007 /* Try to update the stateid using the delegation */ 2008 if (update_open_stateid(state, NULL, &stateid, fmode)) 2009 goto out_return_state; 2010 } 2011 out: 2012 return ERR_PTR(ret); 2013 out_return_state: 2014 refcount_inc(&state->count); 2015 return state; 2016 } 2017 2018 static void 2019 nfs4_process_delegation(struct inode *inode, const struct cred *cred, 2020 enum open_claim_type4 claim, 2021 const struct nfs4_open_delegation *delegation) 2022 { 2023 switch (delegation->open_delegation_type) { 2024 case NFS4_OPEN_DELEGATE_READ: 2025 case NFS4_OPEN_DELEGATE_WRITE: 2026 case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG: 2027 case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG: 2028 break; 2029 default: 2030 return; 2031 } 2032 switch (claim) { 2033 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 2034 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 2035 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 2036 "returning a delegation for " 2037 "OPEN(CLAIM_DELEGATE_CUR)\n", 2038 NFS_SERVER(inode)->nfs_client->cl_hostname); 2039 break; 2040 case NFS4_OPEN_CLAIM_PREVIOUS: 2041 nfs_inode_reclaim_delegation(inode, cred, delegation->type, 2042 &delegation->stateid, 2043 delegation->pagemod_limit, 2044 delegation->open_delegation_type); 2045 break; 2046 default: 2047 nfs_inode_set_delegation(inode, cred, delegation->type, 2048 &delegation->stateid, 2049 delegation->pagemod_limit, 2050 delegation->open_delegation_type); 2051 } 2052 if (delegation->do_recall) 2053 nfs_async_inode_return_delegation(inode, &delegation->stateid); 2054 } 2055 2056 /* 2057 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes 2058 * and update the nfs4_state. 2059 */ 2060 static struct nfs4_state * 2061 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) 2062 { 2063 struct inode *inode = data->state->inode; 2064 struct nfs4_state *state = data->state; 2065 int ret; 2066 2067 if (!data->rpc_done) { 2068 if (data->rpc_status) 2069 return ERR_PTR(data->rpc_status); 2070 return nfs4_try_open_cached(data); 2071 } 2072 2073 ret = nfs_refresh_inode(inode, &data->f_attr); 2074 if (ret) 2075 return ERR_PTR(ret); 2076 2077 nfs4_process_delegation(state->inode, 2078 data->owner->so_cred, 2079 data->o_arg.claim, 2080 &data->o_res.delegation); 2081 2082 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) { 2083 if (!update_open_stateid(state, &data->o_res.stateid, 2084 NULL, data->o_arg.fmode)) 2085 return ERR_PTR(-EAGAIN); 2086 } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode)) 2087 return ERR_PTR(-EAGAIN); 2088 refcount_inc(&state->count); 2089 2090 return state; 2091 } 2092 2093 static struct inode * 2094 nfs4_opendata_get_inode(struct nfs4_opendata *data) 2095 { 2096 struct inode *inode; 2097 2098 switch (data->o_arg.claim) { 2099 case NFS4_OPEN_CLAIM_NULL: 2100 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 2101 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 2102 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 2103 return ERR_PTR(-EAGAIN); 2104 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, 2105 &data->f_attr); 2106 break; 2107 default: 2108 inode = d_inode(data->dentry); 2109 ihold(inode); 2110 nfs_refresh_inode(inode, &data->f_attr); 2111 } 2112 return inode; 2113 } 2114 2115 static struct nfs4_state * 2116 nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data) 2117 { 2118 struct nfs4_state *state; 2119 struct inode *inode; 2120 2121 inode = nfs4_opendata_get_inode(data); 2122 if (IS_ERR(inode)) 2123 return ERR_CAST(inode); 2124 if (data->state != NULL && data->state->inode == inode) { 2125 state = data->state; 2126 refcount_inc(&state->count); 2127 } else 2128 state = nfs4_get_open_state(inode, data->owner); 2129 iput(inode); 2130 if (state == NULL) 2131 state = ERR_PTR(-ENOMEM); 2132 return state; 2133 } 2134 2135 static struct nfs4_state * 2136 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 2137 { 2138 struct nfs4_state *state; 2139 2140 if (!data->rpc_done) { 2141 state = nfs4_try_open_cached(data); 2142 trace_nfs4_cached_open(data->state); 2143 goto out; 2144 } 2145 2146 state = nfs4_opendata_find_nfs4_state(data); 2147 if (IS_ERR(state)) 2148 goto out; 2149 2150 nfs4_process_delegation(state->inode, 2151 data->owner->so_cred, 2152 data->o_arg.claim, 2153 &data->o_res.delegation); 2154 2155 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) { 2156 if (!update_open_stateid(state, &data->o_res.stateid, 2157 NULL, data->o_arg.fmode)) { 2158 nfs4_put_open_state(state); 2159 state = ERR_PTR(-EAGAIN); 2160 } 2161 } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode)) { 2162 nfs4_put_open_state(state); 2163 state = ERR_PTR(-EAGAIN); 2164 } 2165 out: 2166 nfs_release_seqid(data->o_arg.seqid); 2167 return state; 2168 } 2169 2170 static struct nfs4_state * 2171 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 2172 { 2173 struct nfs4_state *ret; 2174 2175 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 2176 ret =_nfs4_opendata_reclaim_to_nfs4_state(data); 2177 else 2178 ret = _nfs4_opendata_to_nfs4_state(data); 2179 nfs4_sequence_free_slot(&data->o_res.seq_res); 2180 return ret; 2181 } 2182 2183 static struct nfs_open_context * 2184 nfs4_state_find_open_context_mode(struct nfs4_state *state, fmode_t mode) 2185 { 2186 struct nfs_inode *nfsi = NFS_I(state->inode); 2187 struct nfs_open_context *ctx; 2188 2189 rcu_read_lock(); 2190 list_for_each_entry_rcu(ctx, &nfsi->open_files, list) { 2191 if (ctx->state != state) 2192 continue; 2193 if ((ctx->mode & mode) != mode) 2194 continue; 2195 if (!get_nfs_open_context(ctx)) 2196 continue; 2197 rcu_read_unlock(); 2198 return ctx; 2199 } 2200 rcu_read_unlock(); 2201 return ERR_PTR(-ENOENT); 2202 } 2203 2204 static struct nfs_open_context * 2205 nfs4_state_find_open_context(struct nfs4_state *state) 2206 { 2207 struct nfs_open_context *ctx; 2208 2209 ctx = nfs4_state_find_open_context_mode(state, FMODE_READ|FMODE_WRITE); 2210 if (!IS_ERR(ctx)) 2211 return ctx; 2212 ctx = nfs4_state_find_open_context_mode(state, FMODE_WRITE); 2213 if (!IS_ERR(ctx)) 2214 return ctx; 2215 return nfs4_state_find_open_context_mode(state, FMODE_READ); 2216 } 2217 2218 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, 2219 struct nfs4_state *state, enum open_claim_type4 claim) 2220 { 2221 struct nfs4_opendata *opendata; 2222 2223 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, 2224 NULL, claim, GFP_NOFS); 2225 if (opendata == NULL) 2226 return ERR_PTR(-ENOMEM); 2227 opendata->state = state; 2228 refcount_inc(&state->count); 2229 return opendata; 2230 } 2231 2232 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, 2233 fmode_t fmode) 2234 { 2235 struct nfs4_state *newstate; 2236 struct nfs_server *server = NFS_SB(opendata->dentry->d_sb); 2237 int openflags = opendata->o_arg.open_flags; 2238 int ret; 2239 2240 if (!nfs4_mode_match_open_stateid(opendata->state, fmode)) 2241 return 0; 2242 opendata->o_arg.fmode = fmode; 2243 opendata->o_arg.share_access = 2244 nfs4_map_atomic_open_share(server, fmode, openflags); 2245 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 2246 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 2247 nfs4_init_opendata_res(opendata); 2248 ret = _nfs4_recover_proc_open(opendata); 2249 if (ret != 0) 2250 return ret; 2251 newstate = nfs4_opendata_to_nfs4_state(opendata); 2252 if (IS_ERR(newstate)) 2253 return PTR_ERR(newstate); 2254 if (newstate != opendata->state) 2255 ret = -ESTALE; 2256 nfs4_close_state(newstate, fmode); 2257 return ret; 2258 } 2259 2260 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 2261 { 2262 int ret; 2263 2264 /* memory barrier prior to reading state->n_* */ 2265 smp_rmb(); 2266 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 2267 if (ret != 0) 2268 return ret; 2269 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE); 2270 if (ret != 0) 2271 return ret; 2272 ret = nfs4_open_recover_helper(opendata, FMODE_READ); 2273 if (ret != 0) 2274 return ret; 2275 /* 2276 * We may have performed cached opens for all three recoveries. 2277 * Check if we need to update the current stateid. 2278 */ 2279 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 2280 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 2281 write_seqlock(&state->seqlock); 2282 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 2283 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 2284 write_sequnlock(&state->seqlock); 2285 } 2286 return 0; 2287 } 2288 2289 /* 2290 * OPEN_RECLAIM: 2291 * reclaim state on the server after a reboot. 2292 */ 2293 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 2294 { 2295 struct nfs_delegation *delegation; 2296 struct nfs4_opendata *opendata; 2297 u32 delegation_type = NFS4_OPEN_DELEGATE_NONE; 2298 int status; 2299 2300 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2301 NFS4_OPEN_CLAIM_PREVIOUS); 2302 if (IS_ERR(opendata)) 2303 return PTR_ERR(opendata); 2304 rcu_read_lock(); 2305 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2306 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) { 2307 switch(delegation->type) { 2308 case FMODE_READ: 2309 delegation_type = NFS4_OPEN_DELEGATE_READ; 2310 if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) 2311 delegation_type = NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG; 2312 break; 2313 case FMODE_WRITE: 2314 case FMODE_READ|FMODE_WRITE: 2315 delegation_type = NFS4_OPEN_DELEGATE_WRITE; 2316 if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) 2317 delegation_type = NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG; 2318 } 2319 } 2320 rcu_read_unlock(); 2321 opendata->o_arg.u.delegation_type = delegation_type; 2322 status = nfs4_open_recover(opendata, state); 2323 nfs4_opendata_put(opendata); 2324 return status; 2325 } 2326 2327 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 2328 { 2329 struct nfs_server *server = NFS_SERVER(state->inode); 2330 struct nfs4_exception exception = { }; 2331 int err; 2332 do { 2333 err = _nfs4_do_open_reclaim(ctx, state); 2334 trace_nfs4_open_reclaim(ctx, 0, err); 2335 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2336 continue; 2337 if (err != -NFS4ERR_DELAY) 2338 break; 2339 nfs4_handle_exception(server, err, &exception); 2340 } while (exception.retry); 2341 return err; 2342 } 2343 2344 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 2345 { 2346 struct nfs_open_context *ctx; 2347 int ret; 2348 2349 ctx = nfs4_state_find_open_context(state); 2350 if (IS_ERR(ctx)) 2351 return -EAGAIN; 2352 clear_bit(NFS_DELEGATED_STATE, &state->flags); 2353 nfs_state_clear_open_state_flags(state); 2354 ret = nfs4_do_open_reclaim(ctx, state); 2355 put_nfs_open_context(ctx); 2356 return ret; 2357 } 2358 2359 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err) 2360 { 2361 switch (err) { 2362 default: 2363 printk(KERN_ERR "NFS: %s: unhandled error " 2364 "%d.\n", __func__, err); 2365 fallthrough; 2366 case 0: 2367 case -ENOENT: 2368 case -EAGAIN: 2369 case -ESTALE: 2370 case -ETIMEDOUT: 2371 break; 2372 case -NFS4ERR_BADSESSION: 2373 case -NFS4ERR_BADSLOT: 2374 case -NFS4ERR_BAD_HIGH_SLOT: 2375 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 2376 case -NFS4ERR_DEADSESSION: 2377 return -EAGAIN; 2378 case -NFS4ERR_STALE_CLIENTID: 2379 case -NFS4ERR_STALE_STATEID: 2380 /* Don't recall a delegation if it was lost */ 2381 nfs4_schedule_lease_recovery(server->nfs_client); 2382 return -EAGAIN; 2383 case -NFS4ERR_MOVED: 2384 nfs4_schedule_migration_recovery(server); 2385 return -EAGAIN; 2386 case -NFS4ERR_LEASE_MOVED: 2387 nfs4_schedule_lease_moved_recovery(server->nfs_client); 2388 return -EAGAIN; 2389 case -NFS4ERR_DELEG_REVOKED: 2390 case -NFS4ERR_ADMIN_REVOKED: 2391 case -NFS4ERR_EXPIRED: 2392 case -NFS4ERR_BAD_STATEID: 2393 case -NFS4ERR_OPENMODE: 2394 nfs_inode_find_state_and_recover(state->inode, 2395 stateid); 2396 nfs4_schedule_stateid_recovery(server, state); 2397 return -EAGAIN; 2398 case -NFS4ERR_DELAY: 2399 case -NFS4ERR_GRACE: 2400 ssleep(1); 2401 return -EAGAIN; 2402 case -ENOMEM: 2403 case -NFS4ERR_DENIED: 2404 if (fl) { 2405 struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner; 2406 if (lsp) 2407 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 2408 } 2409 return 0; 2410 } 2411 return err; 2412 } 2413 2414 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, 2415 struct nfs4_state *state, const nfs4_stateid *stateid) 2416 { 2417 struct nfs_server *server = NFS_SERVER(state->inode); 2418 struct nfs4_opendata *opendata; 2419 int err = 0; 2420 2421 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2422 NFS4_OPEN_CLAIM_DELEG_CUR_FH); 2423 if (IS_ERR(opendata)) 2424 return PTR_ERR(opendata); 2425 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 2426 if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) { 2427 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 2428 if (err) 2429 goto out; 2430 } 2431 if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) { 2432 err = nfs4_open_recover_helper(opendata, FMODE_WRITE); 2433 if (err) 2434 goto out; 2435 } 2436 if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) { 2437 err = nfs4_open_recover_helper(opendata, FMODE_READ); 2438 if (err) 2439 goto out; 2440 } 2441 nfs_state_clear_delegation(state); 2442 out: 2443 nfs4_opendata_put(opendata); 2444 return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err); 2445 } 2446 2447 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) 2448 { 2449 struct nfs4_opendata *data = calldata; 2450 2451 nfs4_setup_sequence(data->o_arg.server->nfs_client, 2452 &data->c_arg.seq_args, &data->c_res.seq_res, task); 2453 } 2454 2455 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 2456 { 2457 struct nfs4_opendata *data = calldata; 2458 2459 nfs40_sequence_done(task, &data->c_res.seq_res); 2460 2461 data->rpc_status = task->tk_status; 2462 if (data->rpc_status == 0) { 2463 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 2464 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2465 renew_lease(data->o_res.server, data->timestamp); 2466 data->rpc_done = true; 2467 } 2468 } 2469 2470 static void nfs4_open_confirm_release(void *calldata) 2471 { 2472 struct nfs4_opendata *data = calldata; 2473 struct nfs4_state *state = NULL; 2474 2475 /* If this request hasn't been cancelled, do nothing */ 2476 if (!data->cancelled) 2477 goto out_free; 2478 /* In case of error, no cleanup! */ 2479 if (!data->rpc_done) 2480 goto out_free; 2481 state = nfs4_opendata_to_nfs4_state(data); 2482 if (!IS_ERR(state)) 2483 nfs4_close_state(state, data->o_arg.fmode); 2484 out_free: 2485 nfs4_opendata_put(data); 2486 } 2487 2488 static const struct rpc_call_ops nfs4_open_confirm_ops = { 2489 .rpc_call_prepare = nfs4_open_confirm_prepare, 2490 .rpc_call_done = nfs4_open_confirm_done, 2491 .rpc_release = nfs4_open_confirm_release, 2492 }; 2493 2494 /* 2495 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 2496 */ 2497 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 2498 { 2499 struct nfs_server *server = NFS_SERVER(d_inode(data->dir)); 2500 struct rpc_task *task; 2501 struct rpc_message msg = { 2502 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 2503 .rpc_argp = &data->c_arg, 2504 .rpc_resp = &data->c_res, 2505 .rpc_cred = data->owner->so_cred, 2506 }; 2507 struct rpc_task_setup task_setup_data = { 2508 .rpc_client = server->client, 2509 .rpc_message = &msg, 2510 .callback_ops = &nfs4_open_confirm_ops, 2511 .callback_data = data, 2512 .workqueue = nfsiod_workqueue, 2513 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 2514 }; 2515 int status; 2516 2517 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1, 2518 data->is_recover); 2519 kref_get(&data->kref); 2520 data->rpc_done = false; 2521 data->rpc_status = 0; 2522 data->timestamp = jiffies; 2523 task = rpc_run_task(&task_setup_data); 2524 if (IS_ERR(task)) 2525 return PTR_ERR(task); 2526 status = rpc_wait_for_completion_task(task); 2527 if (status != 0) { 2528 data->cancelled = true; 2529 smp_wmb(); 2530 } else 2531 status = data->rpc_status; 2532 rpc_put_task(task); 2533 return status; 2534 } 2535 2536 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 2537 { 2538 struct nfs4_opendata *data = calldata; 2539 struct nfs4_state_owner *sp = data->owner; 2540 struct nfs_client *clp = sp->so_server->nfs_client; 2541 enum open_claim_type4 claim = data->o_arg.claim; 2542 2543 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 2544 goto out_wait; 2545 /* 2546 * Check if we still need to send an OPEN call, or if we can use 2547 * a delegation instead. 2548 */ 2549 if (data->state != NULL) { 2550 struct nfs_delegation *delegation; 2551 2552 if (can_open_cached(data->state, data->o_arg.fmode, 2553 data->o_arg.open_flags, claim)) 2554 goto out_no_action; 2555 rcu_read_lock(); 2556 delegation = nfs4_get_valid_delegation(data->state->inode); 2557 if (can_open_delegated(delegation, data->o_arg.fmode, claim)) 2558 goto unlock_no_action; 2559 rcu_read_unlock(); 2560 } 2561 /* Update client id. */ 2562 data->o_arg.clientid = clp->cl_clientid; 2563 switch (claim) { 2564 default: 2565 break; 2566 case NFS4_OPEN_CLAIM_PREVIOUS: 2567 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 2568 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 2569 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 2570 fallthrough; 2571 case NFS4_OPEN_CLAIM_FH: 2572 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 2573 } 2574 data->timestamp = jiffies; 2575 if (nfs4_setup_sequence(data->o_arg.server->nfs_client, 2576 &data->o_arg.seq_args, 2577 &data->o_res.seq_res, 2578 task) != 0) 2579 nfs_release_seqid(data->o_arg.seqid); 2580 2581 /* Set the create mode (note dependency on the session type) */ 2582 data->o_arg.createmode = NFS4_CREATE_UNCHECKED; 2583 if (data->o_arg.open_flags & O_EXCL) { 2584 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1; 2585 if (clp->cl_mvops->minor_version == 0) { 2586 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE; 2587 /* don't put an ACCESS op in OPEN compound if O_EXCL, 2588 * because ACCESS will return permission denied for 2589 * all bits until close */ 2590 data->o_res.access_request = data->o_arg.access = 0; 2591 } else if (nfs4_has_persistent_session(clp)) 2592 data->o_arg.createmode = NFS4_CREATE_GUARDED; 2593 } 2594 return; 2595 unlock_no_action: 2596 trace_nfs4_cached_open(data->state); 2597 rcu_read_unlock(); 2598 out_no_action: 2599 task->tk_action = NULL; 2600 out_wait: 2601 nfs4_sequence_done(task, &data->o_res.seq_res); 2602 } 2603 2604 static void nfs4_open_done(struct rpc_task *task, void *calldata) 2605 { 2606 struct nfs4_opendata *data = calldata; 2607 2608 data->rpc_status = task->tk_status; 2609 2610 if (!nfs4_sequence_process(task, &data->o_res.seq_res)) 2611 return; 2612 2613 if (task->tk_status == 0) { 2614 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) { 2615 switch (data->o_res.f_attr->mode & S_IFMT) { 2616 case S_IFREG: 2617 break; 2618 case S_IFLNK: 2619 data->rpc_status = -ELOOP; 2620 break; 2621 case S_IFDIR: 2622 data->rpc_status = -EISDIR; 2623 break; 2624 default: 2625 data->rpc_status = -ENOTDIR; 2626 } 2627 } 2628 renew_lease(data->o_res.server, data->timestamp); 2629 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 2630 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2631 } 2632 data->rpc_done = true; 2633 } 2634 2635 static void nfs4_open_release(void *calldata) 2636 { 2637 struct nfs4_opendata *data = calldata; 2638 struct nfs4_state *state = NULL; 2639 2640 /* In case of error, no cleanup! */ 2641 if (data->rpc_status != 0 || !data->rpc_done) { 2642 nfs_release_seqid(data->o_arg.seqid); 2643 goto out_free; 2644 } 2645 /* If this request hasn't been cancelled, do nothing */ 2646 if (!data->cancelled) 2647 goto out_free; 2648 /* In case we need an open_confirm, no cleanup! */ 2649 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 2650 goto out_free; 2651 state = nfs4_opendata_to_nfs4_state(data); 2652 if (!IS_ERR(state)) 2653 nfs4_close_state(state, data->o_arg.fmode); 2654 out_free: 2655 nfs4_opendata_put(data); 2656 } 2657 2658 static const struct rpc_call_ops nfs4_open_ops = { 2659 .rpc_call_prepare = nfs4_open_prepare, 2660 .rpc_call_done = nfs4_open_done, 2661 .rpc_release = nfs4_open_release, 2662 }; 2663 2664 static int nfs4_run_open_task(struct nfs4_opendata *data, 2665 struct nfs_open_context *ctx) 2666 { 2667 struct inode *dir = d_inode(data->dir); 2668 struct nfs_server *server = NFS_SERVER(dir); 2669 struct nfs_openargs *o_arg = &data->o_arg; 2670 struct nfs_openres *o_res = &data->o_res; 2671 struct rpc_task *task; 2672 struct rpc_message msg = { 2673 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 2674 .rpc_argp = o_arg, 2675 .rpc_resp = o_res, 2676 .rpc_cred = data->owner->so_cred, 2677 }; 2678 struct rpc_task_setup task_setup_data = { 2679 .rpc_client = server->client, 2680 .rpc_message = &msg, 2681 .callback_ops = &nfs4_open_ops, 2682 .callback_data = data, 2683 .workqueue = nfsiod_workqueue, 2684 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 2685 }; 2686 int status; 2687 2688 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE)) 2689 task_setup_data.flags |= RPC_TASK_MOVEABLE; 2690 2691 kref_get(&data->kref); 2692 data->rpc_done = false; 2693 data->rpc_status = 0; 2694 data->cancelled = false; 2695 data->is_recover = false; 2696 if (!ctx) { 2697 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1); 2698 data->is_recover = true; 2699 task_setup_data.flags |= RPC_TASK_TIMEOUT; 2700 } else { 2701 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0); 2702 pnfs_lgopen_prepare(data, ctx); 2703 } 2704 task = rpc_run_task(&task_setup_data); 2705 if (IS_ERR(task)) 2706 return PTR_ERR(task); 2707 status = rpc_wait_for_completion_task(task); 2708 if (status != 0) { 2709 data->cancelled = true; 2710 smp_wmb(); 2711 } else 2712 status = data->rpc_status; 2713 rpc_put_task(task); 2714 2715 return status; 2716 } 2717 2718 static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 2719 { 2720 struct inode *dir = d_inode(data->dir); 2721 struct nfs_openres *o_res = &data->o_res; 2722 int status; 2723 2724 status = nfs4_run_open_task(data, NULL); 2725 if (status != 0 || !data->rpc_done) 2726 return status; 2727 2728 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 2729 2730 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) 2731 status = _nfs4_proc_open_confirm(data); 2732 2733 return status; 2734 } 2735 2736 /* 2737 * Additional permission checks in order to distinguish between an 2738 * open for read, and an open for execute. This works around the 2739 * fact that NFSv4 OPEN treats read and execute permissions as being 2740 * the same. 2741 * Note that in the non-execute case, we want to turn off permission 2742 * checking if we just created a new file (POSIX open() semantics). 2743 */ 2744 static int nfs4_opendata_access(const struct cred *cred, 2745 struct nfs4_opendata *opendata, 2746 struct nfs4_state *state, fmode_t fmode) 2747 { 2748 struct nfs_access_entry cache; 2749 u32 mask, flags; 2750 2751 /* access call failed or for some reason the server doesn't 2752 * support any access modes -- defer access call until later */ 2753 if (opendata->o_res.access_supported == 0) 2754 return 0; 2755 2756 mask = 0; 2757 if (fmode & FMODE_EXEC) { 2758 /* ONLY check for exec rights */ 2759 if (S_ISDIR(state->inode->i_mode)) 2760 mask = NFS4_ACCESS_LOOKUP; 2761 else 2762 mask = NFS4_ACCESS_EXECUTE; 2763 } else if ((fmode & FMODE_READ) && !opendata->file_created) 2764 mask = NFS4_ACCESS_READ; 2765 2766 nfs_access_set_mask(&cache, opendata->o_res.access_result); 2767 nfs_access_add_cache(state->inode, &cache, cred); 2768 2769 flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP; 2770 if ((mask & ~cache.mask & flags) == 0) 2771 return 0; 2772 2773 return -EACCES; 2774 } 2775 2776 /* 2777 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 2778 */ 2779 static int _nfs4_proc_open(struct nfs4_opendata *data, 2780 struct nfs_open_context *ctx) 2781 { 2782 struct inode *dir = d_inode(data->dir); 2783 struct nfs_server *server = NFS_SERVER(dir); 2784 struct nfs_openargs *o_arg = &data->o_arg; 2785 struct nfs_openres *o_res = &data->o_res; 2786 int status; 2787 2788 status = nfs4_run_open_task(data, ctx); 2789 if (!data->rpc_done) 2790 return status; 2791 if (status != 0) { 2792 if (status == -NFS4ERR_BADNAME && 2793 !(o_arg->open_flags & O_CREAT)) 2794 return -ENOENT; 2795 return status; 2796 } 2797 2798 nfs_fattr_map_and_free_names(server, &data->f_attr); 2799 2800 if (o_arg->open_flags & O_CREAT) { 2801 if (o_arg->open_flags & O_EXCL) 2802 data->file_created = true; 2803 else if (o_res->cinfo.before != o_res->cinfo.after) 2804 data->file_created = true; 2805 if (data->file_created || 2806 inode_peek_iversion_raw(dir) != o_res->cinfo.after) 2807 nfs4_update_changeattr(dir, &o_res->cinfo, 2808 o_res->f_attr->time_start, 2809 NFS_INO_INVALID_DATA); 2810 } 2811 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 2812 server->caps &= ~NFS_CAP_POSIX_LOCK; 2813 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2814 status = _nfs4_proc_open_confirm(data); 2815 if (status != 0) 2816 return status; 2817 } 2818 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) { 2819 struct nfs_fh *fh = &o_res->fh; 2820 2821 nfs4_sequence_free_slot(&o_res->seq_res); 2822 if (o_arg->claim == NFS4_OPEN_CLAIM_FH) 2823 fh = NFS_FH(d_inode(data->dentry)); 2824 nfs4_proc_getattr(server, fh, o_res->f_attr, NULL); 2825 } 2826 return 0; 2827 } 2828 2829 /* 2830 * OPEN_EXPIRED: 2831 * reclaim state on the server after a network partition. 2832 * Assumes caller holds the appropriate lock 2833 */ 2834 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2835 { 2836 struct nfs4_opendata *opendata; 2837 int ret; 2838 2839 opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_FH); 2840 if (IS_ERR(opendata)) 2841 return PTR_ERR(opendata); 2842 /* 2843 * We're not recovering a delegation, so ask for no delegation. 2844 * Otherwise the recovery thread could deadlock with an outstanding 2845 * delegation return. 2846 */ 2847 opendata->o_arg.open_flags = O_DIRECT; 2848 ret = nfs4_open_recover(opendata, state); 2849 if (ret == -ESTALE) 2850 d_drop(ctx->dentry); 2851 nfs4_opendata_put(opendata); 2852 return ret; 2853 } 2854 2855 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2856 { 2857 struct nfs_server *server = NFS_SERVER(state->inode); 2858 struct nfs4_exception exception = { }; 2859 int err; 2860 2861 do { 2862 err = _nfs4_open_expired(ctx, state); 2863 trace_nfs4_open_expired(ctx, 0, err); 2864 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2865 continue; 2866 switch (err) { 2867 default: 2868 goto out; 2869 case -NFS4ERR_GRACE: 2870 case -NFS4ERR_DELAY: 2871 nfs4_handle_exception(server, err, &exception); 2872 err = 0; 2873 } 2874 } while (exception.retry); 2875 out: 2876 return err; 2877 } 2878 2879 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2880 { 2881 struct nfs_open_context *ctx; 2882 int ret; 2883 2884 ctx = nfs4_state_find_open_context(state); 2885 if (IS_ERR(ctx)) 2886 return -EAGAIN; 2887 ret = nfs4_do_open_expired(ctx, state); 2888 put_nfs_open_context(ctx); 2889 return ret; 2890 } 2891 2892 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state, 2893 const nfs4_stateid *stateid) 2894 { 2895 nfs_remove_bad_delegation(state->inode, stateid); 2896 nfs_state_clear_delegation(state); 2897 } 2898 2899 static void nfs40_clear_delegation_stateid(struct nfs4_state *state) 2900 { 2901 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL) 2902 nfs_finish_clear_delegation_stateid(state, NULL); 2903 } 2904 2905 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2906 { 2907 /* NFSv4.0 doesn't allow for delegation recovery on open expire */ 2908 nfs40_clear_delegation_stateid(state); 2909 nfs_state_clear_open_state_flags(state); 2910 return nfs4_open_expired(sp, state); 2911 } 2912 2913 static int nfs40_test_and_free_expired_stateid(struct nfs_server *server, 2914 nfs4_stateid *stateid, const struct cred *cred) 2915 { 2916 return -NFS4ERR_BAD_STATEID; 2917 } 2918 2919 #if defined(CONFIG_NFS_V4_1) 2920 static int nfs41_test_and_free_expired_stateid(struct nfs_server *server, 2921 nfs4_stateid *stateid, const struct cred *cred) 2922 { 2923 int status; 2924 2925 switch (stateid->type) { 2926 default: 2927 break; 2928 case NFS4_INVALID_STATEID_TYPE: 2929 case NFS4_SPECIAL_STATEID_TYPE: 2930 case NFS4_FREED_STATEID_TYPE: 2931 return -NFS4ERR_BAD_STATEID; 2932 case NFS4_REVOKED_STATEID_TYPE: 2933 goto out_free; 2934 } 2935 2936 status = nfs41_test_stateid(server, stateid, cred); 2937 switch (status) { 2938 case -NFS4ERR_EXPIRED: 2939 case -NFS4ERR_ADMIN_REVOKED: 2940 case -NFS4ERR_DELEG_REVOKED: 2941 break; 2942 default: 2943 return status; 2944 } 2945 out_free: 2946 /* Ack the revoked state to the server */ 2947 nfs41_free_stateid(server, stateid, cred, true); 2948 return -NFS4ERR_EXPIRED; 2949 } 2950 2951 static int nfs41_check_delegation_stateid(struct nfs4_state *state) 2952 { 2953 struct nfs_server *server = NFS_SERVER(state->inode); 2954 nfs4_stateid stateid; 2955 struct nfs_delegation *delegation; 2956 const struct cred *cred = NULL; 2957 int status, ret = NFS_OK; 2958 2959 /* Get the delegation credential for use by test/free_stateid */ 2960 rcu_read_lock(); 2961 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2962 if (delegation == NULL) { 2963 rcu_read_unlock(); 2964 nfs_state_clear_delegation(state); 2965 return NFS_OK; 2966 } 2967 2968 spin_lock(&delegation->lock); 2969 nfs4_stateid_copy(&stateid, &delegation->stateid); 2970 2971 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, 2972 &delegation->flags)) { 2973 spin_unlock(&delegation->lock); 2974 rcu_read_unlock(); 2975 return NFS_OK; 2976 } 2977 2978 if (delegation->cred) 2979 cred = get_cred(delegation->cred); 2980 spin_unlock(&delegation->lock); 2981 rcu_read_unlock(); 2982 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred); 2983 trace_nfs4_test_delegation_stateid(state, NULL, status); 2984 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) 2985 nfs_finish_clear_delegation_stateid(state, &stateid); 2986 else 2987 ret = status; 2988 2989 put_cred(cred); 2990 return ret; 2991 } 2992 2993 static void nfs41_delegation_recover_stateid(struct nfs4_state *state) 2994 { 2995 nfs4_stateid tmp; 2996 2997 if (test_bit(NFS_DELEGATED_STATE, &state->flags) && 2998 nfs4_copy_delegation_stateid(state->inode, state->state, 2999 &tmp, NULL) && 3000 nfs4_stateid_match_other(&state->stateid, &tmp)) 3001 nfs_state_set_delegation(state, &tmp, state->state); 3002 else 3003 nfs_state_clear_delegation(state); 3004 } 3005 3006 /** 3007 * nfs41_check_expired_locks - possibly free a lock stateid 3008 * 3009 * @state: NFSv4 state for an inode 3010 * 3011 * Returns NFS_OK if recovery for this stateid is now finished. 3012 * Otherwise a negative NFS4ERR value is returned. 3013 */ 3014 static int nfs41_check_expired_locks(struct nfs4_state *state) 3015 { 3016 int status, ret = NFS_OK; 3017 struct nfs4_lock_state *lsp, *prev = NULL; 3018 struct nfs_server *server = NFS_SERVER(state->inode); 3019 3020 if (!test_bit(LK_STATE_IN_USE, &state->flags)) 3021 goto out; 3022 3023 spin_lock(&state->state_lock); 3024 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 3025 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 3026 const struct cred *cred = lsp->ls_state->owner->so_cred; 3027 3028 refcount_inc(&lsp->ls_count); 3029 spin_unlock(&state->state_lock); 3030 3031 nfs4_put_lock_state(prev); 3032 prev = lsp; 3033 3034 status = nfs41_test_and_free_expired_stateid(server, 3035 &lsp->ls_stateid, 3036 cred); 3037 trace_nfs4_test_lock_stateid(state, lsp, status); 3038 if (status == -NFS4ERR_EXPIRED || 3039 status == -NFS4ERR_BAD_STATEID) { 3040 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 3041 lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE; 3042 if (!recover_lost_locks) 3043 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 3044 } else if (status != NFS_OK) { 3045 ret = status; 3046 nfs4_put_lock_state(prev); 3047 goto out; 3048 } 3049 spin_lock(&state->state_lock); 3050 } 3051 } 3052 spin_unlock(&state->state_lock); 3053 nfs4_put_lock_state(prev); 3054 out: 3055 return ret; 3056 } 3057 3058 /** 3059 * nfs41_check_open_stateid - possibly free an open stateid 3060 * 3061 * @state: NFSv4 state for an inode 3062 * 3063 * Returns NFS_OK if recovery for this stateid is now finished. 3064 * Otherwise a negative NFS4ERR value is returned. 3065 */ 3066 static int nfs41_check_open_stateid(struct nfs4_state *state) 3067 { 3068 struct nfs_server *server = NFS_SERVER(state->inode); 3069 nfs4_stateid *stateid = &state->open_stateid; 3070 const struct cred *cred = state->owner->so_cred; 3071 int status; 3072 3073 if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) 3074 return -NFS4ERR_BAD_STATEID; 3075 status = nfs41_test_and_free_expired_stateid(server, stateid, cred); 3076 trace_nfs4_test_open_stateid(state, NULL, status); 3077 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) { 3078 nfs_state_clear_open_state_flags(state); 3079 stateid->type = NFS4_INVALID_STATEID_TYPE; 3080 return status; 3081 } 3082 if (nfs_open_stateid_recover_openmode(state)) 3083 return -NFS4ERR_OPENMODE; 3084 return NFS_OK; 3085 } 3086 3087 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 3088 { 3089 int status; 3090 3091 status = nfs41_check_delegation_stateid(state); 3092 if (status != NFS_OK) 3093 return status; 3094 nfs41_delegation_recover_stateid(state); 3095 3096 status = nfs41_check_expired_locks(state); 3097 if (status != NFS_OK) 3098 return status; 3099 status = nfs41_check_open_stateid(state); 3100 if (status != NFS_OK) 3101 status = nfs4_open_expired(sp, state); 3102 return status; 3103 } 3104 #endif 3105 3106 /* 3107 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 3108 * fields corresponding to attributes that were used to store the verifier. 3109 * Make sure we clobber those fields in the later setattr call 3110 */ 3111 static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata, 3112 struct iattr *sattr, struct nfs4_label **label) 3113 { 3114 const __u32 *bitmask = opendata->o_arg.server->exclcreat_bitmask; 3115 __u32 attrset[3]; 3116 unsigned ret; 3117 unsigned i; 3118 3119 for (i = 0; i < ARRAY_SIZE(attrset); i++) { 3120 attrset[i] = opendata->o_res.attrset[i]; 3121 if (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE4_1) 3122 attrset[i] &= ~bitmask[i]; 3123 } 3124 3125 ret = (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE) ? 3126 sattr->ia_valid : 0; 3127 3128 if ((attrset[1] & (FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET))) { 3129 if (sattr->ia_valid & ATTR_ATIME_SET) 3130 ret |= ATTR_ATIME_SET; 3131 else 3132 ret |= ATTR_ATIME; 3133 } 3134 3135 if ((attrset[1] & (FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET))) { 3136 if (sattr->ia_valid & ATTR_MTIME_SET) 3137 ret |= ATTR_MTIME_SET; 3138 else 3139 ret |= ATTR_MTIME; 3140 } 3141 3142 if (!(attrset[2] & FATTR4_WORD2_SECURITY_LABEL)) 3143 *label = NULL; 3144 return ret; 3145 } 3146 3147 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, 3148 struct nfs_open_context *ctx) 3149 { 3150 struct nfs4_state_owner *sp = opendata->owner; 3151 struct nfs_server *server = sp->so_server; 3152 struct dentry *dentry; 3153 struct nfs4_state *state; 3154 fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx); 3155 struct inode *dir = d_inode(opendata->dir); 3156 unsigned long dir_verifier; 3157 int ret; 3158 3159 dir_verifier = nfs_save_change_attribute(dir); 3160 3161 ret = _nfs4_proc_open(opendata, ctx); 3162 if (ret != 0) 3163 goto out; 3164 3165 state = _nfs4_opendata_to_nfs4_state(opendata); 3166 ret = PTR_ERR(state); 3167 if (IS_ERR(state)) 3168 goto out; 3169 ctx->state = state; 3170 if (server->caps & NFS_CAP_POSIX_LOCK) 3171 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 3172 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK) 3173 set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags); 3174 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_PRESERVE_UNLINKED) 3175 set_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(state->inode)->flags); 3176 3177 dentry = opendata->dentry; 3178 if (d_really_is_negative(dentry)) { 3179 struct dentry *alias; 3180 d_drop(dentry); 3181 alias = d_splice_alias(igrab(state->inode), dentry); 3182 /* d_splice_alias() can't fail here - it's a non-directory */ 3183 if (alias) { 3184 dput(ctx->dentry); 3185 ctx->dentry = dentry = alias; 3186 } 3187 } 3188 3189 switch(opendata->o_arg.claim) { 3190 default: 3191 break; 3192 case NFS4_OPEN_CLAIM_NULL: 3193 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 3194 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 3195 if (!opendata->rpc_done) 3196 break; 3197 if (opendata->o_res.delegation.type != 0) 3198 dir_verifier = nfs_save_change_attribute(dir); 3199 nfs_set_verifier(dentry, dir_verifier); 3200 } 3201 3202 /* Parse layoutget results before we check for access */ 3203 pnfs_parse_lgopen(state->inode, opendata->lgp, ctx); 3204 3205 ret = nfs4_opendata_access(sp->so_cred, opendata, state, acc_mode); 3206 if (ret != 0) 3207 goto out; 3208 3209 if (d_inode(dentry) == state->inode) 3210 nfs_inode_attach_open_context(ctx); 3211 3212 out: 3213 if (!opendata->cancelled) { 3214 if (opendata->lgp) { 3215 nfs4_lgopen_release(opendata->lgp); 3216 opendata->lgp = NULL; 3217 } 3218 nfs4_sequence_free_slot(&opendata->o_res.seq_res); 3219 } 3220 return ret; 3221 } 3222 3223 /* 3224 * Returns a referenced nfs4_state 3225 */ 3226 static int _nfs4_do_open(struct inode *dir, 3227 struct nfs_open_context *ctx, 3228 int flags, 3229 const struct nfs4_open_createattrs *c, 3230 int *opened) 3231 { 3232 struct nfs4_state_owner *sp; 3233 struct nfs4_state *state = NULL; 3234 struct nfs_server *server = NFS_SERVER(dir); 3235 struct nfs4_opendata *opendata; 3236 struct dentry *dentry = ctx->dentry; 3237 const struct cred *cred = ctx->cred; 3238 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold; 3239 fmode_t fmode = _nfs4_ctx_to_openmode(ctx); 3240 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL; 3241 struct iattr *sattr = c->sattr; 3242 struct nfs4_label *label = c->label; 3243 int status; 3244 3245 /* Protect against reboot recovery conflicts */ 3246 status = -ENOMEM; 3247 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 3248 if (sp == NULL) { 3249 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 3250 goto out_err; 3251 } 3252 status = nfs4_client_recover_expired_lease(server->nfs_client); 3253 if (status != 0) 3254 goto err_put_state_owner; 3255 if (d_really_is_positive(dentry)) 3256 nfs4_return_incompatible_delegation(d_inode(dentry), fmode); 3257 status = -ENOMEM; 3258 if (d_really_is_positive(dentry)) 3259 claim = NFS4_OPEN_CLAIM_FH; 3260 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, 3261 c, claim, GFP_KERNEL); 3262 if (opendata == NULL) 3263 goto err_put_state_owner; 3264 3265 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 3266 if (!opendata->f_attr.mdsthreshold) { 3267 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 3268 if (!opendata->f_attr.mdsthreshold) 3269 goto err_opendata_put; 3270 } 3271 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 3272 } 3273 if (d_really_is_positive(dentry)) 3274 opendata->state = nfs4_get_open_state(d_inode(dentry), sp); 3275 3276 status = _nfs4_open_and_get_state(opendata, ctx); 3277 if (status != 0) 3278 goto err_opendata_put; 3279 state = ctx->state; 3280 3281 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) && 3282 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) { 3283 unsigned attrs = nfs4_exclusive_attrset(opendata, sattr, &label); 3284 /* 3285 * send create attributes which was not set by open 3286 * with an extra setattr. 3287 */ 3288 if (attrs || label) { 3289 unsigned ia_old = sattr->ia_valid; 3290 3291 sattr->ia_valid = attrs; 3292 nfs_fattr_init(opendata->o_res.f_attr); 3293 status = nfs4_do_setattr(state->inode, cred, 3294 opendata->o_res.f_attr, sattr, 3295 ctx, label); 3296 if (status == 0) { 3297 nfs_setattr_update_inode(state->inode, sattr, 3298 opendata->o_res.f_attr); 3299 nfs_setsecurity(state->inode, opendata->o_res.f_attr); 3300 } 3301 sattr->ia_valid = ia_old; 3302 } 3303 } 3304 if (opened && opendata->file_created) 3305 *opened = 1; 3306 3307 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) { 3308 *ctx_th = opendata->f_attr.mdsthreshold; 3309 opendata->f_attr.mdsthreshold = NULL; 3310 } 3311 3312 nfs4_opendata_put(opendata); 3313 nfs4_put_state_owner(sp); 3314 return 0; 3315 err_opendata_put: 3316 nfs4_opendata_put(opendata); 3317 err_put_state_owner: 3318 nfs4_put_state_owner(sp); 3319 out_err: 3320 return status; 3321 } 3322 3323 3324 static struct nfs4_state *nfs4_do_open(struct inode *dir, 3325 struct nfs_open_context *ctx, 3326 int flags, 3327 struct iattr *sattr, 3328 struct nfs4_label *label, 3329 int *opened) 3330 { 3331 struct nfs_server *server = NFS_SERVER(dir); 3332 struct nfs4_exception exception = { 3333 .interruptible = true, 3334 }; 3335 struct nfs4_state *res; 3336 struct nfs4_open_createattrs c = { 3337 .label = label, 3338 .sattr = sattr, 3339 .verf = { 3340 [0] = (__u32)jiffies, 3341 [1] = (__u32)current->pid, 3342 }, 3343 }; 3344 int status; 3345 3346 do { 3347 status = _nfs4_do_open(dir, ctx, flags, &c, opened); 3348 res = ctx->state; 3349 trace_nfs4_open_file(ctx, flags, status); 3350 if (status == 0) 3351 break; 3352 /* NOTE: BAD_SEQID means the server and client disagree about the 3353 * book-keeping w.r.t. state-changing operations 3354 * (OPEN/CLOSE/LOCK/LOCKU...) 3355 * It is actually a sign of a bug on the client or on the server. 3356 * 3357 * If we receive a BAD_SEQID error in the particular case of 3358 * doing an OPEN, we assume that nfs_increment_open_seqid() will 3359 * have unhashed the old state_owner for us, and that we can 3360 * therefore safely retry using a new one. We should still warn 3361 * the user though... 3362 */ 3363 if (status == -NFS4ERR_BAD_SEQID) { 3364 pr_warn_ratelimited("NFS: v4 server %s " 3365 " returned a bad sequence-id error!\n", 3366 NFS_SERVER(dir)->nfs_client->cl_hostname); 3367 exception.retry = 1; 3368 continue; 3369 } 3370 /* 3371 * BAD_STATEID on OPEN means that the server cancelled our 3372 * state before it received the OPEN_CONFIRM. 3373 * Recover by retrying the request as per the discussion 3374 * on Page 181 of RFC3530. 3375 */ 3376 if (status == -NFS4ERR_BAD_STATEID) { 3377 exception.retry = 1; 3378 continue; 3379 } 3380 if (status == -NFS4ERR_EXPIRED) { 3381 nfs4_schedule_lease_recovery(server->nfs_client); 3382 exception.retry = 1; 3383 continue; 3384 } 3385 if (status == -EAGAIN) { 3386 /* We must have found a delegation */ 3387 exception.retry = 1; 3388 continue; 3389 } 3390 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception)) 3391 continue; 3392 res = ERR_PTR(nfs4_handle_exception(server, 3393 status, &exception)); 3394 } while (exception.retry); 3395 return res; 3396 } 3397 3398 static int _nfs4_do_setattr(struct inode *inode, 3399 struct nfs_setattrargs *arg, 3400 struct nfs_setattrres *res, 3401 const struct cred *cred, 3402 struct nfs_open_context *ctx) 3403 { 3404 struct nfs_server *server = NFS_SERVER(inode); 3405 struct rpc_message msg = { 3406 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 3407 .rpc_argp = arg, 3408 .rpc_resp = res, 3409 .rpc_cred = cred, 3410 }; 3411 const struct cred *delegation_cred = NULL; 3412 unsigned long timestamp = jiffies; 3413 bool truncate; 3414 int status; 3415 3416 nfs_fattr_init(res->fattr); 3417 3418 /* Servers should only apply open mode checks for file size changes */ 3419 truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false; 3420 if (!truncate) { 3421 nfs4_inode_make_writeable(inode); 3422 goto zero_stateid; 3423 } 3424 3425 if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) { 3426 /* Use that stateid */ 3427 } else if (ctx != NULL && ctx->state) { 3428 struct nfs_lock_context *l_ctx; 3429 if (!nfs4_valid_open_stateid(ctx->state)) 3430 return -EBADF; 3431 l_ctx = nfs_get_lock_context(ctx); 3432 if (IS_ERR(l_ctx)) 3433 return PTR_ERR(l_ctx); 3434 status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx, 3435 &arg->stateid, &delegation_cred); 3436 nfs_put_lock_context(l_ctx); 3437 if (status == -EIO) 3438 return -EBADF; 3439 else if (status == -EAGAIN) 3440 goto zero_stateid; 3441 } else { 3442 zero_stateid: 3443 nfs4_stateid_copy(&arg->stateid, &zero_stateid); 3444 } 3445 if (delegation_cred) 3446 msg.rpc_cred = delegation_cred; 3447 3448 status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1); 3449 3450 put_cred(delegation_cred); 3451 if (status == 0 && ctx != NULL) 3452 renew_lease(server, timestamp); 3453 trace_nfs4_setattr(inode, &arg->stateid, status); 3454 return status; 3455 } 3456 3457 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred, 3458 struct nfs_fattr *fattr, struct iattr *sattr, 3459 struct nfs_open_context *ctx, struct nfs4_label *ilabel) 3460 { 3461 struct nfs_server *server = NFS_SERVER(inode); 3462 __u32 bitmask[NFS4_BITMASK_SZ]; 3463 struct nfs4_state *state = ctx ? ctx->state : NULL; 3464 struct nfs_setattrargs arg = { 3465 .fh = NFS_FH(inode), 3466 .iap = sattr, 3467 .server = server, 3468 .bitmask = bitmask, 3469 .label = ilabel, 3470 }; 3471 struct nfs_setattrres res = { 3472 .fattr = fattr, 3473 .server = server, 3474 }; 3475 struct nfs4_exception exception = { 3476 .state = state, 3477 .inode = inode, 3478 .stateid = &arg.stateid, 3479 }; 3480 unsigned long adjust_flags = NFS_INO_INVALID_CHANGE | 3481 NFS_INO_INVALID_CTIME; 3482 int err; 3483 3484 if (sattr->ia_valid & (ATTR_MODE | ATTR_KILL_SUID | ATTR_KILL_SGID)) 3485 adjust_flags |= NFS_INO_INVALID_MODE; 3486 if (sattr->ia_valid & (ATTR_UID | ATTR_GID)) 3487 adjust_flags |= NFS_INO_INVALID_OTHER; 3488 if (sattr->ia_valid & ATTR_ATIME) 3489 adjust_flags |= NFS_INO_INVALID_ATIME; 3490 if (sattr->ia_valid & ATTR_MTIME) 3491 adjust_flags |= NFS_INO_INVALID_MTIME; 3492 3493 do { 3494 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), 3495 inode, adjust_flags); 3496 3497 err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx); 3498 switch (err) { 3499 case -NFS4ERR_OPENMODE: 3500 if (!(sattr->ia_valid & ATTR_SIZE)) { 3501 pr_warn_once("NFSv4: server %s is incorrectly " 3502 "applying open mode checks to " 3503 "a SETATTR that is not " 3504 "changing file size.\n", 3505 server->nfs_client->cl_hostname); 3506 } 3507 if (state && !(state->state & FMODE_WRITE)) { 3508 err = -EBADF; 3509 if (sattr->ia_valid & ATTR_OPEN) 3510 err = -EACCES; 3511 goto out; 3512 } 3513 } 3514 err = nfs4_handle_exception(server, err, &exception); 3515 } while (exception.retry); 3516 out: 3517 return err; 3518 } 3519 3520 static bool 3521 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task) 3522 { 3523 if (inode == NULL || !nfs_have_layout(inode)) 3524 return false; 3525 3526 return pnfs_wait_on_layoutreturn(inode, task); 3527 } 3528 3529 /* 3530 * Update the seqid of an open stateid 3531 */ 3532 static void nfs4_sync_open_stateid(nfs4_stateid *dst, 3533 struct nfs4_state *state) 3534 { 3535 __be32 seqid_open; 3536 u32 dst_seqid; 3537 int seq; 3538 3539 for (;;) { 3540 if (!nfs4_valid_open_stateid(state)) 3541 break; 3542 seq = read_seqbegin(&state->seqlock); 3543 if (!nfs4_state_match_open_stateid_other(state, dst)) { 3544 nfs4_stateid_copy(dst, &state->open_stateid); 3545 if (read_seqretry(&state->seqlock, seq)) 3546 continue; 3547 break; 3548 } 3549 seqid_open = state->open_stateid.seqid; 3550 if (read_seqretry(&state->seqlock, seq)) 3551 continue; 3552 3553 dst_seqid = be32_to_cpu(dst->seqid); 3554 if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0) 3555 dst->seqid = seqid_open; 3556 break; 3557 } 3558 } 3559 3560 /* 3561 * Update the seqid of an open stateid after receiving 3562 * NFS4ERR_OLD_STATEID 3563 */ 3564 static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst, 3565 struct nfs4_state *state) 3566 { 3567 __be32 seqid_open; 3568 u32 dst_seqid; 3569 bool ret; 3570 int seq, status = -EAGAIN; 3571 DEFINE_WAIT(wait); 3572 3573 for (;;) { 3574 ret = false; 3575 if (!nfs4_valid_open_stateid(state)) 3576 break; 3577 seq = read_seqbegin(&state->seqlock); 3578 if (!nfs4_state_match_open_stateid_other(state, dst)) { 3579 if (read_seqretry(&state->seqlock, seq)) 3580 continue; 3581 break; 3582 } 3583 3584 write_seqlock(&state->seqlock); 3585 seqid_open = state->open_stateid.seqid; 3586 3587 dst_seqid = be32_to_cpu(dst->seqid); 3588 3589 /* Did another OPEN bump the state's seqid? try again: */ 3590 if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) { 3591 dst->seqid = seqid_open; 3592 write_sequnlock(&state->seqlock); 3593 ret = true; 3594 break; 3595 } 3596 3597 /* server says we're behind but we haven't seen the update yet */ 3598 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); 3599 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); 3600 write_sequnlock(&state->seqlock); 3601 trace_nfs4_close_stateid_update_wait(state->inode, dst, 0); 3602 3603 if (fatal_signal_pending(current) || nfs_current_task_exiting()) 3604 status = -EINTR; 3605 else 3606 if (schedule_timeout(5*HZ) != 0) 3607 status = 0; 3608 3609 finish_wait(&state->waitq, &wait); 3610 3611 if (!status) 3612 continue; 3613 if (status == -EINTR) 3614 break; 3615 3616 /* we slept the whole 5 seconds, we must have lost a seqid */ 3617 dst->seqid = cpu_to_be32(dst_seqid + 1); 3618 ret = true; 3619 break; 3620 } 3621 3622 return ret; 3623 } 3624 3625 struct nfs4_closedata { 3626 struct inode *inode; 3627 struct nfs4_state *state; 3628 struct nfs_closeargs arg; 3629 struct nfs_closeres res; 3630 struct { 3631 struct nfs4_layoutreturn_args arg; 3632 struct nfs4_layoutreturn_res res; 3633 struct nfs4_xdr_opaque_data ld_private; 3634 u32 roc_barrier; 3635 bool roc; 3636 } lr; 3637 struct nfs_fattr fattr; 3638 unsigned long timestamp; 3639 unsigned short retrans; 3640 }; 3641 3642 static void nfs4_free_closedata(void *data) 3643 { 3644 struct nfs4_closedata *calldata = data; 3645 struct nfs4_state_owner *sp = calldata->state->owner; 3646 struct super_block *sb = calldata->state->inode->i_sb; 3647 3648 if (calldata->lr.roc) 3649 pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res, 3650 calldata->res.lr_ret); 3651 nfs4_put_open_state(calldata->state); 3652 nfs_free_seqid(calldata->arg.seqid); 3653 nfs4_put_state_owner(sp); 3654 nfs_sb_deactive(sb); 3655 kfree(calldata); 3656 } 3657 3658 static void nfs4_close_done(struct rpc_task *task, void *data) 3659 { 3660 struct nfs4_closedata *calldata = data; 3661 struct nfs4_state *state = calldata->state; 3662 struct nfs_server *server = NFS_SERVER(calldata->inode); 3663 nfs4_stateid *res_stateid = NULL; 3664 struct nfs4_exception exception = { 3665 .state = state, 3666 .inode = calldata->inode, 3667 .stateid = &calldata->arg.stateid, 3668 .retrans = calldata->retrans, 3669 }; 3670 3671 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 3672 return; 3673 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status); 3674 3675 /* Handle Layoutreturn errors */ 3676 if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res, 3677 &calldata->res.lr_ret) == -EAGAIN) 3678 goto out_restart; 3679 3680 /* hmm. we are done with the inode, and in the process of freeing 3681 * the state_owner. we keep this around to process errors 3682 */ 3683 switch (task->tk_status) { 3684 case 0: 3685 res_stateid = &calldata->res.stateid; 3686 renew_lease(server, calldata->timestamp); 3687 break; 3688 case -NFS4ERR_ACCESS: 3689 if (calldata->arg.bitmask != NULL) { 3690 calldata->arg.bitmask = NULL; 3691 calldata->res.fattr = NULL; 3692 goto out_restart; 3693 3694 } 3695 break; 3696 case -NFS4ERR_OLD_STATEID: 3697 /* Did we race with OPEN? */ 3698 if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid, 3699 state)) 3700 goto out_restart; 3701 goto out_release; 3702 case -NFS4ERR_ADMIN_REVOKED: 3703 case -NFS4ERR_STALE_STATEID: 3704 case -NFS4ERR_EXPIRED: 3705 nfs4_free_revoked_stateid(server, 3706 &calldata->arg.stateid, 3707 task->tk_msg.rpc_cred); 3708 fallthrough; 3709 case -NFS4ERR_BAD_STATEID: 3710 if (calldata->arg.fmode == 0) 3711 break; 3712 fallthrough; 3713 default: 3714 task->tk_status = nfs4_async_handle_exception(task, 3715 server, task->tk_status, &exception); 3716 calldata->retrans = exception.retrans; 3717 if (exception.retry) 3718 goto out_restart; 3719 } 3720 nfs_clear_open_stateid(state, &calldata->arg.stateid, 3721 res_stateid, calldata->arg.fmode); 3722 out_release: 3723 task->tk_status = 0; 3724 nfs_release_seqid(calldata->arg.seqid); 3725 nfs_refresh_inode(calldata->inode, &calldata->fattr); 3726 dprintk("%s: ret = %d\n", __func__, task->tk_status); 3727 return; 3728 out_restart: 3729 task->tk_status = 0; 3730 rpc_restart_call_prepare(task); 3731 goto out_release; 3732 } 3733 3734 static void nfs4_close_prepare(struct rpc_task *task, void *data) 3735 { 3736 struct nfs4_closedata *calldata = data; 3737 struct nfs4_state *state = calldata->state; 3738 struct inode *inode = calldata->inode; 3739 struct nfs_server *server = NFS_SERVER(inode); 3740 struct pnfs_layout_hdr *lo; 3741 bool is_rdonly, is_wronly, is_rdwr; 3742 int call_close = 0; 3743 3744 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 3745 goto out_wait; 3746 3747 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 3748 spin_lock(&state->owner->so_lock); 3749 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); 3750 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); 3751 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); 3752 /* Calculate the change in open mode */ 3753 calldata->arg.fmode = 0; 3754 if (state->n_rdwr == 0) { 3755 if (state->n_rdonly == 0) 3756 call_close |= is_rdonly; 3757 else if (is_rdonly) 3758 calldata->arg.fmode |= FMODE_READ; 3759 if (state->n_wronly == 0) 3760 call_close |= is_wronly; 3761 else if (is_wronly) 3762 calldata->arg.fmode |= FMODE_WRITE; 3763 if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE)) 3764 call_close |= is_rdwr; 3765 } else if (is_rdwr) 3766 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; 3767 3768 nfs4_sync_open_stateid(&calldata->arg.stateid, state); 3769 if (!nfs4_valid_open_stateid(state)) 3770 call_close = 0; 3771 spin_unlock(&state->owner->so_lock); 3772 3773 if (!call_close) { 3774 /* Note: exit _without_ calling nfs4_close_done */ 3775 goto out_no_action; 3776 } 3777 3778 if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) { 3779 nfs_release_seqid(calldata->arg.seqid); 3780 goto out_wait; 3781 } 3782 3783 lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL; 3784 if (lo && !pnfs_layout_is_valid(lo)) { 3785 calldata->arg.lr_args = NULL; 3786 calldata->res.lr_res = NULL; 3787 } 3788 3789 if (calldata->arg.fmode == 0) 3790 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 3791 3792 if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) { 3793 /* Close-to-open cache consistency revalidation */ 3794 if (!nfs4_have_delegation(inode, FMODE_READ, 0)) { 3795 nfs4_bitmask_set(calldata->arg.bitmask_store, 3796 server->cache_consistency_bitmask, 3797 inode, 0); 3798 calldata->arg.bitmask = calldata->arg.bitmask_store; 3799 } else 3800 calldata->arg.bitmask = NULL; 3801 } 3802 3803 calldata->arg.share_access = 3804 nfs4_fmode_to_share_access(calldata->arg.fmode); 3805 3806 if (calldata->res.fattr == NULL) 3807 calldata->arg.bitmask = NULL; 3808 else if (calldata->arg.bitmask == NULL) 3809 calldata->res.fattr = NULL; 3810 calldata->timestamp = jiffies; 3811 if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client, 3812 &calldata->arg.seq_args, 3813 &calldata->res.seq_res, 3814 task) != 0) 3815 nfs_release_seqid(calldata->arg.seqid); 3816 return; 3817 out_no_action: 3818 task->tk_action = NULL; 3819 out_wait: 3820 nfs4_sequence_done(task, &calldata->res.seq_res); 3821 } 3822 3823 static const struct rpc_call_ops nfs4_close_ops = { 3824 .rpc_call_prepare = nfs4_close_prepare, 3825 .rpc_call_done = nfs4_close_done, 3826 .rpc_release = nfs4_free_closedata, 3827 }; 3828 3829 /* 3830 * It is possible for data to be read/written from a mem-mapped file 3831 * after the sys_close call (which hits the vfs layer as a flush). 3832 * This means that we can't safely call nfsv4 close on a file until 3833 * the inode is cleared. This in turn means that we are not good 3834 * NFSv4 citizens - we do not indicate to the server to update the file's 3835 * share state even when we are done with one of the three share 3836 * stateid's in the inode. 3837 * 3838 * NOTE: Caller must be holding the sp->so_owner semaphore! 3839 */ 3840 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) 3841 { 3842 struct nfs_server *server = NFS_SERVER(state->inode); 3843 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 3844 struct nfs4_closedata *calldata; 3845 struct nfs4_state_owner *sp = state->owner; 3846 struct rpc_task *task; 3847 struct rpc_message msg = { 3848 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 3849 .rpc_cred = state->owner->so_cred, 3850 }; 3851 struct rpc_task_setup task_setup_data = { 3852 .rpc_client = server->client, 3853 .rpc_message = &msg, 3854 .callback_ops = &nfs4_close_ops, 3855 .workqueue = nfsiod_workqueue, 3856 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 3857 }; 3858 int status = -ENOMEM; 3859 3860 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE)) 3861 task_setup_data.flags |= RPC_TASK_MOVEABLE; 3862 3863 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP, 3864 &task_setup_data.rpc_client, &msg); 3865 3866 calldata = kzalloc(sizeof(*calldata), gfp_mask); 3867 if (calldata == NULL) 3868 goto out; 3869 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1, 0); 3870 calldata->inode = state->inode; 3871 calldata->state = state; 3872 calldata->arg.fh = NFS_FH(state->inode); 3873 if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state)) 3874 goto out_free_calldata; 3875 /* Serialization for the sequence id */ 3876 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 3877 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask); 3878 if (IS_ERR(calldata->arg.seqid)) 3879 goto out_free_calldata; 3880 nfs_fattr_init(&calldata->fattr); 3881 calldata->arg.fmode = 0; 3882 calldata->lr.arg.ld_private = &calldata->lr.ld_private; 3883 calldata->res.fattr = &calldata->fattr; 3884 calldata->res.seqid = calldata->arg.seqid; 3885 calldata->res.server = server; 3886 calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT; 3887 calldata->lr.roc = pnfs_roc(state->inode, 3888 &calldata->lr.arg, &calldata->lr.res, msg.rpc_cred); 3889 if (calldata->lr.roc) { 3890 calldata->arg.lr_args = &calldata->lr.arg; 3891 calldata->res.lr_res = &calldata->lr.res; 3892 } 3893 nfs_sb_active(calldata->inode->i_sb); 3894 3895 msg.rpc_argp = &calldata->arg; 3896 msg.rpc_resp = &calldata->res; 3897 task_setup_data.callback_data = calldata; 3898 task = rpc_run_task(&task_setup_data); 3899 if (IS_ERR(task)) 3900 return PTR_ERR(task); 3901 status = 0; 3902 if (wait) 3903 status = rpc_wait_for_completion_task(task); 3904 rpc_put_task(task); 3905 return status; 3906 out_free_calldata: 3907 kfree(calldata); 3908 out: 3909 nfs4_put_open_state(state); 3910 nfs4_put_state_owner(sp); 3911 return status; 3912 } 3913 3914 static struct inode * 3915 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, 3916 int open_flags, struct iattr *attr, int *opened) 3917 { 3918 struct nfs4_state *state; 3919 struct nfs4_label l, *label; 3920 3921 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); 3922 3923 /* Protect against concurrent sillydeletes */ 3924 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened); 3925 3926 nfs4_label_release_security(label); 3927 3928 if (IS_ERR(state)) 3929 return ERR_CAST(state); 3930 return state->inode; 3931 } 3932 3933 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 3934 { 3935 struct dentry *dentry = ctx->dentry; 3936 if (ctx->state == NULL) 3937 return; 3938 if (dentry->d_flags & DCACHE_NFSFS_RENAMED) 3939 nfs4_inode_set_return_delegation_on_close(d_inode(dentry)); 3940 if (is_sync) 3941 nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx)); 3942 else 3943 nfs4_close_state(ctx->state, _nfs4_ctx_to_openmode(ctx)); 3944 } 3945 3946 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL) 3947 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL) 3948 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_OPEN_ARGUMENTS - 1UL) 3949 3950 #define FATTR4_WORD2_NFS42_TIME_DELEG_MASK \ 3951 (FATTR4_WORD2_TIME_DELEG_MODIFY|FATTR4_WORD2_TIME_DELEG_ACCESS) 3952 static bool nfs4_server_delegtime_capable(struct nfs4_server_caps_res *res) 3953 { 3954 u32 share_access_want = res->open_caps.oa_share_access_want[0]; 3955 u32 attr_bitmask = res->attr_bitmask[2]; 3956 3957 return (share_access_want & NFS4_SHARE_WANT_DELEG_TIMESTAMPS) && 3958 ((attr_bitmask & FATTR4_WORD2_NFS42_TIME_DELEG_MASK) == 3959 FATTR4_WORD2_NFS42_TIME_DELEG_MASK); 3960 } 3961 3962 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 3963 { 3964 u32 minorversion = server->nfs_client->cl_minorversion; 3965 u32 bitmask[3] = { 3966 [0] = FATTR4_WORD0_SUPPORTED_ATTRS, 3967 }; 3968 struct nfs4_server_caps_arg args = { 3969 .fhandle = fhandle, 3970 .bitmask = bitmask, 3971 }; 3972 struct nfs4_server_caps_res res = {}; 3973 struct rpc_message msg = { 3974 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 3975 .rpc_argp = &args, 3976 .rpc_resp = &res, 3977 }; 3978 int status; 3979 int i; 3980 3981 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS | 3982 FATTR4_WORD0_FH_EXPIRE_TYPE | 3983 FATTR4_WORD0_LINK_SUPPORT | 3984 FATTR4_WORD0_SYMLINK_SUPPORT | 3985 FATTR4_WORD0_ACLSUPPORT | 3986 FATTR4_WORD0_CASE_INSENSITIVE | 3987 FATTR4_WORD0_CASE_PRESERVING; 3988 if (minorversion) 3989 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT; 3990 if (minorversion > 1) 3991 bitmask[2] |= FATTR4_WORD2_OPEN_ARGUMENTS; 3992 3993 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3994 if (status == 0) { 3995 bitmask[0] = (FATTR4_WORD0_SUPPORTED_ATTRS | 3996 FATTR4_WORD0_FH_EXPIRE_TYPE | 3997 FATTR4_WORD0_LINK_SUPPORT | 3998 FATTR4_WORD0_SYMLINK_SUPPORT | 3999 FATTR4_WORD0_ACLSUPPORT | 4000 FATTR4_WORD0_CASE_INSENSITIVE | 4001 FATTR4_WORD0_CASE_PRESERVING) & 4002 res.attr_bitmask[0]; 4003 /* Sanity check the server answers */ 4004 switch (minorversion) { 4005 case 0: 4006 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK; 4007 res.attr_bitmask[2] = 0; 4008 break; 4009 case 1: 4010 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK; 4011 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT & 4012 res.attr_bitmask[2]; 4013 break; 4014 case 2: 4015 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK; 4016 bitmask[2] = (FATTR4_WORD2_SUPPATTR_EXCLCREAT | 4017 FATTR4_WORD2_OPEN_ARGUMENTS) & 4018 res.attr_bitmask[2]; 4019 } 4020 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 4021 server->caps &= 4022 ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS | 4023 NFS_CAP_SECURITY_LABEL | NFS_CAP_FS_LOCATIONS | 4024 NFS_CAP_OPEN_XOR | NFS_CAP_DELEGTIME); 4025 server->fattr_valid = NFS_ATTR_FATTR_V4; 4026 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL && 4027 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 4028 server->caps |= NFS_CAP_ACLS; 4029 if (res.has_links != 0) 4030 server->caps |= NFS_CAP_HARDLINKS; 4031 if (res.has_symlinks != 0) 4032 server->caps |= NFS_CAP_SYMLINKS; 4033 if (res.case_insensitive) 4034 server->caps |= NFS_CAP_CASE_INSENSITIVE; 4035 if (res.case_preserving) 4036 server->caps |= NFS_CAP_CASE_PRESERVING; 4037 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 4038 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL) 4039 server->caps |= NFS_CAP_SECURITY_LABEL; 4040 #endif 4041 if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS) 4042 server->caps |= NFS_CAP_FS_LOCATIONS; 4043 if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID)) 4044 server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID; 4045 if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE)) 4046 server->fattr_valid &= ~NFS_ATTR_FATTR_MODE; 4047 if (!(res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)) 4048 server->fattr_valid &= ~NFS_ATTR_FATTR_NLINK; 4049 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER)) 4050 server->fattr_valid &= ~(NFS_ATTR_FATTR_OWNER | 4051 NFS_ATTR_FATTR_OWNER_NAME); 4052 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)) 4053 server->fattr_valid &= ~(NFS_ATTR_FATTR_GROUP | 4054 NFS_ATTR_FATTR_GROUP_NAME); 4055 if (!(res.attr_bitmask[1] & FATTR4_WORD1_SPACE_USED)) 4056 server->fattr_valid &= ~NFS_ATTR_FATTR_SPACE_USED; 4057 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)) 4058 server->fattr_valid &= ~NFS_ATTR_FATTR_ATIME; 4059 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)) 4060 server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME; 4061 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)) 4062 server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME; 4063 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)) 4064 server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME; 4065 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_CREATE)) 4066 server->fattr_valid &= ~NFS_ATTR_FATTR_BTIME; 4067 memcpy(server->attr_bitmask_nl, res.attr_bitmask, 4068 sizeof(server->attr_bitmask)); 4069 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL; 4070 4071 if (res.open_caps.oa_share_access_want[0] & 4072 NFS4_SHARE_WANT_OPEN_XOR_DELEGATION) 4073 server->caps |= NFS_CAP_OPEN_XOR; 4074 if (nfs4_server_delegtime_capable(&res)) 4075 server->caps |= NFS_CAP_DELEGTIME; 4076 4077 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 4078 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 4079 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 4080 server->cache_consistency_bitmask[2] = 0; 4081 4082 /* Avoid a regression due to buggy server */ 4083 for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++) 4084 res.exclcreat_bitmask[i] &= res.attr_bitmask[i]; 4085 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask, 4086 sizeof(server->exclcreat_bitmask)); 4087 4088 server->acl_bitmask = res.acl_bitmask; 4089 server->fh_expire_type = res.fh_expire_type; 4090 } 4091 4092 return status; 4093 } 4094 4095 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 4096 { 4097 struct nfs4_exception exception = { 4098 .interruptible = true, 4099 }; 4100 int err; 4101 4102 do { 4103 err = nfs4_handle_exception(server, 4104 _nfs4_server_capabilities(server, fhandle), 4105 &exception); 4106 } while (exception.retry); 4107 return err; 4108 } 4109 4110 static void test_fs_location_for_trunking(struct nfs4_fs_location *location, 4111 struct nfs_client *clp, 4112 struct nfs_server *server) 4113 { 4114 int i; 4115 4116 for (i = 0; i < location->nservers; i++) { 4117 struct nfs4_string *srv_loc = &location->servers[i]; 4118 struct sockaddr_storage addr; 4119 size_t addrlen; 4120 struct xprt_create xprt_args = { 4121 .ident = 0, 4122 .net = clp->cl_net, 4123 }; 4124 struct nfs4_add_xprt_data xprtdata = { 4125 .clp = clp, 4126 }; 4127 struct rpc_add_xprt_test rpcdata = { 4128 .add_xprt_test = clp->cl_mvops->session_trunk, 4129 .data = &xprtdata, 4130 }; 4131 char *servername = NULL; 4132 4133 if (!srv_loc->len) 4134 continue; 4135 4136 addrlen = nfs_parse_server_name(srv_loc->data, srv_loc->len, 4137 &addr, sizeof(addr), 4138 clp->cl_net, server->port); 4139 if (!addrlen) 4140 return; 4141 xprt_args.dstaddr = (struct sockaddr *)&addr; 4142 xprt_args.addrlen = addrlen; 4143 servername = kmalloc(srv_loc->len + 1, GFP_KERNEL); 4144 if (!servername) 4145 return; 4146 memcpy(servername, srv_loc->data, srv_loc->len); 4147 servername[srv_loc->len] = '\0'; 4148 xprt_args.servername = servername; 4149 4150 xprtdata.cred = nfs4_get_clid_cred(clp); 4151 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args, 4152 rpc_clnt_setup_test_and_add_xprt, 4153 &rpcdata); 4154 if (xprtdata.cred) 4155 put_cred(xprtdata.cred); 4156 kfree(servername); 4157 } 4158 } 4159 4160 static bool _is_same_nfs4_pathname(struct nfs4_pathname *path1, 4161 struct nfs4_pathname *path2) 4162 { 4163 int i; 4164 4165 if (path1->ncomponents != path2->ncomponents) 4166 return false; 4167 for (i = 0; i < path1->ncomponents; i++) { 4168 if (path1->components[i].len != path2->components[i].len) 4169 return false; 4170 if (memcmp(path1->components[i].data, path2->components[i].data, 4171 path1->components[i].len)) 4172 return false; 4173 } 4174 return true; 4175 } 4176 4177 static int _nfs4_discover_trunking(struct nfs_server *server, 4178 struct nfs_fh *fhandle) 4179 { 4180 struct nfs4_fs_locations *locations = NULL; 4181 struct page *page; 4182 const struct cred *cred; 4183 struct nfs_client *clp = server->nfs_client; 4184 const struct nfs4_state_maintenance_ops *ops = 4185 clp->cl_mvops->state_renewal_ops; 4186 int status = -ENOMEM, i; 4187 4188 cred = ops->get_state_renewal_cred(clp); 4189 if (cred == NULL) { 4190 cred = nfs4_get_clid_cred(clp); 4191 if (cred == NULL) 4192 return -ENOKEY; 4193 } 4194 4195 page = alloc_page(GFP_KERNEL); 4196 if (!page) 4197 goto out_put_cred; 4198 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 4199 if (!locations) 4200 goto out_free; 4201 locations->fattr = nfs_alloc_fattr(); 4202 if (!locations->fattr) 4203 goto out_free_2; 4204 4205 status = nfs4_proc_get_locations(server, fhandle, locations, page, 4206 cred); 4207 if (status) 4208 goto out_free_3; 4209 4210 for (i = 0; i < locations->nlocations; i++) { 4211 if (!_is_same_nfs4_pathname(&locations->fs_path, 4212 &locations->locations[i].rootpath)) 4213 continue; 4214 test_fs_location_for_trunking(&locations->locations[i], clp, 4215 server); 4216 } 4217 out_free_3: 4218 kfree(locations->fattr); 4219 out_free_2: 4220 kfree(locations); 4221 out_free: 4222 __free_page(page); 4223 out_put_cred: 4224 put_cred(cred); 4225 return status; 4226 } 4227 4228 static int nfs4_discover_trunking(struct nfs_server *server, 4229 struct nfs_fh *fhandle) 4230 { 4231 struct nfs4_exception exception = { 4232 .interruptible = true, 4233 }; 4234 struct nfs_client *clp = server->nfs_client; 4235 int err = 0; 4236 4237 if (!nfs4_has_session(clp)) 4238 goto out; 4239 do { 4240 err = nfs4_handle_exception(server, 4241 _nfs4_discover_trunking(server, fhandle), 4242 &exception); 4243 } while (exception.retry); 4244 out: 4245 return err; 4246 } 4247 4248 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 4249 struct nfs_fattr *fattr) 4250 { 4251 u32 bitmask[3] = { 4252 [0] = FATTR4_WORD0_TYPE | FATTR4_WORD0_CHANGE | 4253 FATTR4_WORD0_SIZE | FATTR4_WORD0_FSID, 4254 }; 4255 struct nfs4_lookup_root_arg args = { 4256 .bitmask = bitmask, 4257 }; 4258 struct nfs4_lookup_res res = { 4259 .server = server, 4260 .fattr = fattr, 4261 .fh = fhandle, 4262 }; 4263 struct rpc_message msg = { 4264 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 4265 .rpc_argp = &args, 4266 .rpc_resp = &res, 4267 }; 4268 4269 nfs_fattr_init(fattr); 4270 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4271 } 4272 4273 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 4274 struct nfs_fattr *fattr) 4275 { 4276 struct nfs4_exception exception = { 4277 .interruptible = true, 4278 }; 4279 int err; 4280 do { 4281 err = _nfs4_lookup_root(server, fhandle, fattr); 4282 trace_nfs4_lookup_root(server, fhandle, fattr, err); 4283 switch (err) { 4284 case 0: 4285 case -NFS4ERR_WRONGSEC: 4286 goto out; 4287 default: 4288 err = nfs4_handle_exception(server, err, &exception); 4289 } 4290 } while (exception.retry); 4291 out: 4292 return err; 4293 } 4294 4295 static int nfs4_lookup_root_sec(struct nfs_server *server, 4296 struct nfs_fh *fhandle, struct nfs_fattr *fattr, 4297 rpc_authflavor_t flavor) 4298 { 4299 struct rpc_auth_create_args auth_args = { 4300 .pseudoflavor = flavor, 4301 }; 4302 struct rpc_auth *auth; 4303 4304 auth = rpcauth_create(&auth_args, server->client); 4305 if (IS_ERR(auth)) 4306 return -EACCES; 4307 return nfs4_lookup_root(server, fhandle, fattr); 4308 } 4309 4310 /* 4311 * Retry pseudoroot lookup with various security flavors. We do this when: 4312 * 4313 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC 4314 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation 4315 * 4316 * Returns zero on success, or a negative NFS4ERR value, or a 4317 * negative errno value. 4318 */ 4319 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 4320 struct nfs_fattr *fattr) 4321 { 4322 /* Per 3530bis 15.33.5 */ 4323 static const rpc_authflavor_t flav_array[] = { 4324 RPC_AUTH_GSS_KRB5P, 4325 RPC_AUTH_GSS_KRB5I, 4326 RPC_AUTH_GSS_KRB5, 4327 RPC_AUTH_UNIX, /* courtesy */ 4328 RPC_AUTH_NULL, 4329 }; 4330 int status = -EPERM; 4331 size_t i; 4332 4333 if (server->auth_info.flavor_len > 0) { 4334 /* try each flavor specified by user */ 4335 for (i = 0; i < server->auth_info.flavor_len; i++) { 4336 status = nfs4_lookup_root_sec( 4337 server, fhandle, fattr, 4338 server->auth_info.flavors[i]); 4339 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 4340 continue; 4341 break; 4342 } 4343 } else { 4344 /* no flavors specified by user, try default list */ 4345 for (i = 0; i < ARRAY_SIZE(flav_array); i++) { 4346 status = nfs4_lookup_root_sec(server, fhandle, fattr, 4347 flav_array[i]); 4348 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 4349 continue; 4350 break; 4351 } 4352 } 4353 4354 /* 4355 * -EACCES could mean that the user doesn't have correct permissions 4356 * to access the mount. It could also mean that we tried to mount 4357 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 4358 * existing mount programs don't handle -EACCES very well so it should 4359 * be mapped to -EPERM instead. 4360 */ 4361 if (status == -EACCES) 4362 status = -EPERM; 4363 return status; 4364 } 4365 4366 /** 4367 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot 4368 * @server: initialized nfs_server handle 4369 * @fhandle: we fill in the pseudo-fs root file handle 4370 * @fattr: we fill in a bare bones struct fattr 4371 * @auth_probe: probe the auth flavours 4372 * 4373 * Returns zero on success, or a negative errno. 4374 */ 4375 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 4376 struct nfs_fattr *fattr, bool auth_probe) 4377 { 4378 int status = 0; 4379 4380 if (!auth_probe) 4381 status = nfs4_lookup_root(server, fhandle, fattr); 4382 4383 if (auth_probe || status == NFS4ERR_WRONGSEC) 4384 status = server->nfs_client->cl_mvops->find_root_sec( 4385 server, fhandle, fattr); 4386 4387 return nfs4_map_errors(status); 4388 } 4389 4390 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 4391 struct nfs_fsinfo *info) 4392 { 4393 int error; 4394 struct nfs_fattr *fattr = info->fattr; 4395 4396 error = nfs4_server_capabilities(server, mntfh); 4397 if (error < 0) { 4398 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 4399 return error; 4400 } 4401 4402 error = nfs4_proc_getattr(server, mntfh, fattr, NULL); 4403 if (error < 0) { 4404 dprintk("nfs4_get_root: getattr error = %d\n", -error); 4405 goto out; 4406 } 4407 4408 if (fattr->valid & NFS_ATTR_FATTR_FSID && 4409 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 4410 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 4411 4412 out: 4413 return error; 4414 } 4415 4416 /* 4417 * Get locations and (maybe) other attributes of a referral. 4418 * Note that we'll actually follow the referral later when 4419 * we detect fsid mismatch in inode revalidation 4420 */ 4421 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 4422 const struct qstr *name, struct nfs_fattr *fattr, 4423 struct nfs_fh *fhandle) 4424 { 4425 int status = -ENOMEM; 4426 struct page *page = NULL; 4427 struct nfs4_fs_locations *locations = NULL; 4428 4429 page = alloc_page(GFP_KERNEL); 4430 if (page == NULL) 4431 goto out; 4432 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 4433 if (locations == NULL) 4434 goto out; 4435 4436 locations->fattr = fattr; 4437 4438 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 4439 if (status != 0) 4440 goto out; 4441 4442 /* 4443 * If the fsid didn't change, this is a migration event, not a 4444 * referral. Cause us to drop into the exception handler, which 4445 * will kick off migration recovery. 4446 */ 4447 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &fattr->fsid)) { 4448 dprintk("%s: server did not return a different fsid for" 4449 " a referral at %s\n", __func__, name->name); 4450 status = -NFS4ERR_MOVED; 4451 goto out; 4452 } 4453 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 4454 nfs_fixup_referral_attributes(fattr); 4455 memset(fhandle, 0, sizeof(struct nfs_fh)); 4456 out: 4457 if (page) 4458 __free_page(page); 4459 kfree(locations); 4460 return status; 4461 } 4462 4463 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 4464 struct nfs_fattr *fattr, struct inode *inode) 4465 { 4466 __u32 bitmask[NFS4_BITMASK_SZ]; 4467 struct nfs4_getattr_arg args = { 4468 .fh = fhandle, 4469 .bitmask = bitmask, 4470 }; 4471 struct nfs4_getattr_res res = { 4472 .fattr = fattr, 4473 .server = server, 4474 }; 4475 struct rpc_message msg = { 4476 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 4477 .rpc_argp = &args, 4478 .rpc_resp = &res, 4479 }; 4480 unsigned short task_flags = 0; 4481 4482 if (nfs4_has_session(server->nfs_client)) 4483 task_flags = RPC_TASK_MOVEABLE; 4484 4485 /* Is this is an attribute revalidation, subject to softreval? */ 4486 if (inode && (server->flags & NFS_MOUNT_SOFTREVAL)) 4487 task_flags |= RPC_TASK_TIMEOUT; 4488 4489 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), inode, 0); 4490 nfs_fattr_init(fattr); 4491 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 4492 return nfs4_do_call_sync(server->client, server, &msg, 4493 &args.seq_args, &res.seq_res, task_flags); 4494 } 4495 4496 int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 4497 struct nfs_fattr *fattr, struct inode *inode) 4498 { 4499 struct nfs4_exception exception = { 4500 .interruptible = true, 4501 }; 4502 int err; 4503 do { 4504 err = _nfs4_proc_getattr(server, fhandle, fattr, inode); 4505 trace_nfs4_getattr(server, fhandle, fattr, err); 4506 err = nfs4_handle_exception(server, err, 4507 &exception); 4508 } while (exception.retry); 4509 return err; 4510 } 4511 4512 /* 4513 * The file is not closed if it is opened due to the a request to change 4514 * the size of the file. The open call will not be needed once the 4515 * VFS layer lookup-intents are implemented. 4516 * 4517 * Close is called when the inode is destroyed. 4518 * If we haven't opened the file for O_WRONLY, we 4519 * need to in the size_change case to obtain a stateid. 4520 * 4521 * Got race? 4522 * Because OPEN is always done by name in nfsv4, it is 4523 * possible that we opened a different file by the same 4524 * name. We can recognize this race condition, but we 4525 * can't do anything about it besides returning an error. 4526 * 4527 * This will be fixed with VFS changes (lookup-intent). 4528 */ 4529 static int 4530 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 4531 struct iattr *sattr) 4532 { 4533 struct inode *inode = d_inode(dentry); 4534 const struct cred *cred = NULL; 4535 struct nfs_open_context *ctx = NULL; 4536 int status; 4537 4538 if (pnfs_ld_layoutret_on_setattr(inode) && 4539 sattr->ia_valid & ATTR_SIZE && 4540 sattr->ia_size < i_size_read(inode)) 4541 pnfs_commit_and_return_layout(inode); 4542 4543 nfs_fattr_init(fattr); 4544 4545 /* Deal with open(O_TRUNC) */ 4546 if (sattr->ia_valid & ATTR_OPEN) 4547 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME); 4548 4549 /* Optimization: if the end result is no change, don't RPC */ 4550 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0) 4551 return 0; 4552 4553 /* Search for an existing open(O_WRITE) file */ 4554 if (sattr->ia_valid & ATTR_FILE) { 4555 4556 ctx = nfs_file_open_context(sattr->ia_file); 4557 if (ctx) 4558 cred = ctx->cred; 4559 } 4560 4561 /* Return any delegations if we're going to change ACLs */ 4562 if ((sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) 4563 nfs4_inode_make_writeable(inode); 4564 4565 status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL); 4566 if (status == 0) { 4567 nfs_setattr_update_inode(inode, sattr, fattr); 4568 nfs_setsecurity(inode, fattr); 4569 } 4570 return status; 4571 } 4572 4573 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 4574 struct dentry *dentry, const struct qstr *name, 4575 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4576 { 4577 struct nfs_server *server = NFS_SERVER(dir); 4578 int status; 4579 struct nfs4_lookup_arg args = { 4580 .bitmask = server->attr_bitmask, 4581 .dir_fh = NFS_FH(dir), 4582 .name = name, 4583 }; 4584 struct nfs4_lookup_res res = { 4585 .server = server, 4586 .fattr = fattr, 4587 .fh = fhandle, 4588 }; 4589 struct rpc_message msg = { 4590 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 4591 .rpc_argp = &args, 4592 .rpc_resp = &res, 4593 }; 4594 unsigned short task_flags = 0; 4595 4596 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE)) 4597 task_flags = RPC_TASK_MOVEABLE; 4598 4599 /* Is this is an attribute revalidation, subject to softreval? */ 4600 if (nfs_lookup_is_soft_revalidate(dentry)) 4601 task_flags |= RPC_TASK_TIMEOUT; 4602 4603 args.bitmask = nfs4_bitmask(server, fattr->label); 4604 4605 nfs_fattr_init(fattr); 4606 4607 dprintk("NFS call lookup %pd2\n", dentry); 4608 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 4609 status = nfs4_do_call_sync(clnt, server, &msg, 4610 &args.seq_args, &res.seq_res, task_flags); 4611 dprintk("NFS reply lookup: %d\n", status); 4612 return status; 4613 } 4614 4615 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 4616 { 4617 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 4618 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 4619 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 4620 fattr->nlink = 2; 4621 } 4622 4623 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 4624 struct dentry *dentry, const struct qstr *name, 4625 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4626 { 4627 struct nfs4_exception exception = { 4628 .interruptible = true, 4629 }; 4630 struct rpc_clnt *client = *clnt; 4631 int err; 4632 do { 4633 err = _nfs4_proc_lookup(client, dir, dentry, name, fhandle, fattr); 4634 trace_nfs4_lookup(dir, name, err); 4635 switch (err) { 4636 case -NFS4ERR_BADNAME: 4637 err = -ENOENT; 4638 goto out; 4639 case -NFS4ERR_MOVED: 4640 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 4641 if (err == -NFS4ERR_MOVED) 4642 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 4643 goto out; 4644 case -NFS4ERR_WRONGSEC: 4645 err = -EPERM; 4646 if (client != *clnt) 4647 goto out; 4648 client = nfs4_negotiate_security(client, dir, name); 4649 if (IS_ERR(client)) 4650 return PTR_ERR(client); 4651 4652 exception.retry = 1; 4653 break; 4654 default: 4655 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 4656 } 4657 } while (exception.retry); 4658 4659 out: 4660 if (err == 0) 4661 *clnt = client; 4662 else if (client != *clnt) 4663 rpc_shutdown_client(client); 4664 4665 return err; 4666 } 4667 4668 static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry, const struct qstr *name, 4669 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4670 { 4671 int status; 4672 struct rpc_clnt *client = NFS_CLIENT(dir); 4673 4674 status = nfs4_proc_lookup_common(&client, dir, dentry, name, fhandle, fattr); 4675 if (client != NFS_CLIENT(dir)) { 4676 rpc_shutdown_client(client); 4677 nfs_fixup_secinfo_attributes(fattr); 4678 } 4679 return status; 4680 } 4681 4682 struct rpc_clnt * 4683 nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry, 4684 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4685 { 4686 struct rpc_clnt *client = NFS_CLIENT(dir); 4687 int status; 4688 4689 status = nfs4_proc_lookup_common(&client, dir, dentry, &dentry->d_name, 4690 fhandle, fattr); 4691 if (status < 0) 4692 return ERR_PTR(status); 4693 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client; 4694 } 4695 4696 static int _nfs4_proc_lookupp(struct inode *inode, 4697 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4698 { 4699 struct rpc_clnt *clnt = NFS_CLIENT(inode); 4700 struct nfs_server *server = NFS_SERVER(inode); 4701 int status; 4702 struct nfs4_lookupp_arg args = { 4703 .bitmask = server->attr_bitmask, 4704 .fh = NFS_FH(inode), 4705 }; 4706 struct nfs4_lookupp_res res = { 4707 .server = server, 4708 .fattr = fattr, 4709 .fh = fhandle, 4710 }; 4711 struct rpc_message msg = { 4712 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP], 4713 .rpc_argp = &args, 4714 .rpc_resp = &res, 4715 }; 4716 unsigned short task_flags = 0; 4717 4718 if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL) 4719 task_flags |= RPC_TASK_TIMEOUT; 4720 4721 args.bitmask = nfs4_bitmask(server, fattr->label); 4722 4723 nfs_fattr_init(fattr); 4724 4725 dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino); 4726 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, 4727 &res.seq_res, task_flags); 4728 dprintk("NFS reply lookupp: %d\n", status); 4729 return status; 4730 } 4731 4732 static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle, 4733 struct nfs_fattr *fattr) 4734 { 4735 struct nfs4_exception exception = { 4736 .interruptible = true, 4737 }; 4738 int err; 4739 do { 4740 err = _nfs4_proc_lookupp(inode, fhandle, fattr); 4741 trace_nfs4_lookupp(inode, err); 4742 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4743 &exception); 4744 } while (exception.retry); 4745 return err; 4746 } 4747 4748 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry, 4749 const struct cred *cred) 4750 { 4751 struct nfs_server *server = NFS_SERVER(inode); 4752 struct nfs4_accessargs args = { 4753 .fh = NFS_FH(inode), 4754 .access = entry->mask, 4755 }; 4756 struct nfs4_accessres res = { 4757 .server = server, 4758 }; 4759 struct rpc_message msg = { 4760 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 4761 .rpc_argp = &args, 4762 .rpc_resp = &res, 4763 .rpc_cred = cred, 4764 }; 4765 int status = 0; 4766 4767 if (!nfs4_have_delegation(inode, FMODE_READ, 0)) { 4768 res.fattr = nfs_alloc_fattr(); 4769 if (res.fattr == NULL) 4770 return -ENOMEM; 4771 args.bitmask = server->cache_consistency_bitmask; 4772 } 4773 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4774 if (!status) { 4775 nfs_access_set_mask(entry, res.access); 4776 if (res.fattr) 4777 nfs_refresh_inode(inode, res.fattr); 4778 } 4779 nfs_free_fattr(res.fattr); 4780 return status; 4781 } 4782 4783 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry, 4784 const struct cred *cred) 4785 { 4786 struct nfs4_exception exception = { 4787 .interruptible = true, 4788 }; 4789 int err; 4790 do { 4791 err = _nfs4_proc_access(inode, entry, cred); 4792 trace_nfs4_access(inode, err); 4793 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4794 &exception); 4795 } while (exception.retry); 4796 return err; 4797 } 4798 4799 /* 4800 * TODO: For the time being, we don't try to get any attributes 4801 * along with any of the zero-copy operations READ, READDIR, 4802 * READLINK, WRITE. 4803 * 4804 * In the case of the first three, we want to put the GETATTR 4805 * after the read-type operation -- this is because it is hard 4806 * to predict the length of a GETATTR response in v4, and thus 4807 * align the READ data correctly. This means that the GETATTR 4808 * may end up partially falling into the page cache, and we should 4809 * shift it into the 'tail' of the xdr_buf before processing. 4810 * To do this efficiently, we need to know the total length 4811 * of data received, which doesn't seem to be available outside 4812 * of the RPC layer. 4813 * 4814 * In the case of WRITE, we also want to put the GETATTR after 4815 * the operation -- in this case because we want to make sure 4816 * we get the post-operation mtime and size. 4817 * 4818 * Both of these changes to the XDR layer would in fact be quite 4819 * minor, but I decided to leave them for a subsequent patch. 4820 */ 4821 static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 4822 unsigned int pgbase, unsigned int pglen) 4823 { 4824 struct nfs4_readlink args = { 4825 .fh = NFS_FH(inode), 4826 .pgbase = pgbase, 4827 .pglen = pglen, 4828 .pages = &page, 4829 }; 4830 struct nfs4_readlink_res res; 4831 struct rpc_message msg = { 4832 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 4833 .rpc_argp = &args, 4834 .rpc_resp = &res, 4835 }; 4836 4837 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 4838 } 4839 4840 static int nfs4_proc_readlink(struct inode *inode, struct page *page, 4841 unsigned int pgbase, unsigned int pglen) 4842 { 4843 struct nfs4_exception exception = { 4844 .interruptible = true, 4845 }; 4846 int err; 4847 do { 4848 err = _nfs4_proc_readlink(inode, page, pgbase, pglen); 4849 trace_nfs4_readlink(inode, err); 4850 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4851 &exception); 4852 } while (exception.retry); 4853 return err; 4854 } 4855 4856 /* 4857 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 4858 */ 4859 static int 4860 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 4861 int flags) 4862 { 4863 struct nfs_server *server = NFS_SERVER(dir); 4864 struct nfs4_label l, *ilabel; 4865 struct nfs_open_context *ctx; 4866 struct nfs4_state *state; 4867 int status = 0; 4868 4869 ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL); 4870 if (IS_ERR(ctx)) 4871 return PTR_ERR(ctx); 4872 4873 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); 4874 4875 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 4876 sattr->ia_mode &= ~current_umask(); 4877 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL); 4878 if (IS_ERR(state)) { 4879 status = PTR_ERR(state); 4880 goto out; 4881 } 4882 out: 4883 nfs4_label_release_security(ilabel); 4884 put_nfs_open_context(ctx); 4885 return status; 4886 } 4887 4888 static int 4889 _nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype) 4890 { 4891 struct nfs_server *server = NFS_SERVER(dir); 4892 struct nfs_removeargs args = { 4893 .fh = NFS_FH(dir), 4894 .name = *name, 4895 }; 4896 struct nfs_removeres res = { 4897 .server = server, 4898 }; 4899 struct rpc_message msg = { 4900 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 4901 .rpc_argp = &args, 4902 .rpc_resp = &res, 4903 }; 4904 unsigned long timestamp = jiffies; 4905 int status; 4906 4907 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 4908 if (status == 0) { 4909 spin_lock(&dir->i_lock); 4910 /* Removing a directory decrements nlink in the parent */ 4911 if (ftype == NF4DIR && dir->i_nlink > 2) 4912 nfs4_dec_nlink_locked(dir); 4913 nfs4_update_changeattr_locked(dir, &res.cinfo, timestamp, 4914 NFS_INO_INVALID_DATA); 4915 spin_unlock(&dir->i_lock); 4916 } 4917 return status; 4918 } 4919 4920 static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry) 4921 { 4922 struct nfs4_exception exception = { 4923 .interruptible = true, 4924 }; 4925 struct inode *inode = d_inode(dentry); 4926 int err; 4927 4928 if (inode) { 4929 if (inode->i_nlink == 1) 4930 nfs4_inode_return_delegation(inode); 4931 else 4932 nfs4_inode_make_writeable(inode); 4933 } 4934 do { 4935 err = _nfs4_proc_remove(dir, &dentry->d_name, NF4REG); 4936 trace_nfs4_remove(dir, &dentry->d_name, err); 4937 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4938 &exception); 4939 } while (exception.retry); 4940 return err; 4941 } 4942 4943 static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name) 4944 { 4945 struct nfs4_exception exception = { 4946 .interruptible = true, 4947 }; 4948 int err; 4949 4950 do { 4951 err = _nfs4_proc_remove(dir, name, NF4DIR); 4952 trace_nfs4_remove(dir, name, err); 4953 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4954 &exception); 4955 } while (exception.retry); 4956 return err; 4957 } 4958 4959 static void nfs4_proc_unlink_setup(struct rpc_message *msg, 4960 struct dentry *dentry, 4961 struct inode *inode) 4962 { 4963 struct nfs_removeargs *args = msg->rpc_argp; 4964 struct nfs_removeres *res = msg->rpc_resp; 4965 4966 res->server = NFS_SB(dentry->d_sb); 4967 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 4968 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1, 0); 4969 4970 nfs_fattr_init(res->dir_attr); 4971 4972 if (inode) { 4973 nfs4_inode_return_delegation(inode); 4974 nfs_d_prune_case_insensitive_aliases(inode); 4975 } 4976 } 4977 4978 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 4979 { 4980 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client, 4981 &data->args.seq_args, 4982 &data->res.seq_res, 4983 task); 4984 } 4985 4986 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 4987 { 4988 struct nfs_unlinkdata *data = task->tk_calldata; 4989 struct nfs_removeres *res = &data->res; 4990 4991 if (!nfs4_sequence_done(task, &res->seq_res)) 4992 return 0; 4993 if (nfs4_async_handle_error(task, res->server, NULL, 4994 &data->timeout) == -EAGAIN) 4995 return 0; 4996 if (task->tk_status == 0) 4997 nfs4_update_changeattr(dir, &res->cinfo, 4998 res->dir_attr->time_start, 4999 NFS_INO_INVALID_DATA); 5000 return 1; 5001 } 5002 5003 static void nfs4_proc_rename_setup(struct rpc_message *msg, 5004 struct dentry *old_dentry, 5005 struct dentry *new_dentry) 5006 { 5007 struct nfs_renameargs *arg = msg->rpc_argp; 5008 struct nfs_renameres *res = msg->rpc_resp; 5009 struct inode *old_inode = d_inode(old_dentry); 5010 struct inode *new_inode = d_inode(new_dentry); 5011 5012 if (old_inode) 5013 nfs4_inode_make_writeable(old_inode); 5014 if (new_inode) 5015 nfs4_inode_return_delegation(new_inode); 5016 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 5017 res->server = NFS_SB(old_dentry->d_sb); 5018 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1, 0); 5019 } 5020 5021 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 5022 { 5023 nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client, 5024 &data->args.seq_args, 5025 &data->res.seq_res, 5026 task); 5027 } 5028 5029 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 5030 struct inode *new_dir) 5031 { 5032 struct nfs_renamedata *data = task->tk_calldata; 5033 struct nfs_renameres *res = &data->res; 5034 5035 if (!nfs4_sequence_done(task, &res->seq_res)) 5036 return 0; 5037 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) 5038 return 0; 5039 5040 if (task->tk_status == 0) { 5041 nfs_d_prune_case_insensitive_aliases(d_inode(data->old_dentry)); 5042 if (new_dir != old_dir) { 5043 /* Note: If we moved a directory, nlink will change */ 5044 nfs4_update_changeattr(old_dir, &res->old_cinfo, 5045 res->old_fattr->time_start, 5046 NFS_INO_INVALID_NLINK | 5047 NFS_INO_INVALID_DATA); 5048 nfs4_update_changeattr(new_dir, &res->new_cinfo, 5049 res->new_fattr->time_start, 5050 NFS_INO_INVALID_NLINK | 5051 NFS_INO_INVALID_DATA); 5052 } else 5053 nfs4_update_changeattr(old_dir, &res->old_cinfo, 5054 res->old_fattr->time_start, 5055 NFS_INO_INVALID_DATA); 5056 } 5057 return 1; 5058 } 5059 5060 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name) 5061 { 5062 struct nfs_server *server = NFS_SERVER(inode); 5063 __u32 bitmask[NFS4_BITMASK_SZ]; 5064 struct nfs4_link_arg arg = { 5065 .fh = NFS_FH(inode), 5066 .dir_fh = NFS_FH(dir), 5067 .name = name, 5068 .bitmask = bitmask, 5069 }; 5070 struct nfs4_link_res res = { 5071 .server = server, 5072 }; 5073 struct rpc_message msg = { 5074 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 5075 .rpc_argp = &arg, 5076 .rpc_resp = &res, 5077 }; 5078 int status = -ENOMEM; 5079 5080 res.fattr = nfs_alloc_fattr_with_label(server); 5081 if (res.fattr == NULL) 5082 goto out; 5083 5084 nfs4_inode_make_writeable(inode); 5085 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, res.fattr->label), 5086 inode, 5087 NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME); 5088 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 5089 if (!status) { 5090 nfs4_update_changeattr(dir, &res.cinfo, res.fattr->time_start, 5091 NFS_INO_INVALID_DATA); 5092 nfs4_inc_nlink(inode); 5093 status = nfs_post_op_update_inode(inode, res.fattr); 5094 if (!status) 5095 nfs_setsecurity(inode, res.fattr); 5096 } 5097 5098 out: 5099 nfs_free_fattr(res.fattr); 5100 return status; 5101 } 5102 5103 static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name) 5104 { 5105 struct nfs4_exception exception = { 5106 .interruptible = true, 5107 }; 5108 int err; 5109 do { 5110 err = nfs4_handle_exception(NFS_SERVER(inode), 5111 _nfs4_proc_link(inode, dir, name), 5112 &exception); 5113 } while (exception.retry); 5114 return err; 5115 } 5116 5117 struct nfs4_createdata { 5118 struct rpc_message msg; 5119 struct nfs4_create_arg arg; 5120 struct nfs4_create_res res; 5121 struct nfs_fh fh; 5122 struct nfs_fattr fattr; 5123 }; 5124 5125 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 5126 const struct qstr *name, struct iattr *sattr, u32 ftype) 5127 { 5128 struct nfs4_createdata *data; 5129 5130 data = kzalloc(sizeof(*data), GFP_KERNEL); 5131 if (data != NULL) { 5132 struct nfs_server *server = NFS_SERVER(dir); 5133 5134 data->fattr.label = nfs4_label_alloc(server, GFP_KERNEL); 5135 if (IS_ERR(data->fattr.label)) 5136 goto out_free; 5137 5138 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 5139 data->msg.rpc_argp = &data->arg; 5140 data->msg.rpc_resp = &data->res; 5141 data->arg.dir_fh = NFS_FH(dir); 5142 data->arg.server = server; 5143 data->arg.name = name; 5144 data->arg.attrs = sattr; 5145 data->arg.ftype = ftype; 5146 data->arg.bitmask = nfs4_bitmask(server, data->fattr.label); 5147 data->arg.umask = current_umask(); 5148 data->res.server = server; 5149 data->res.fh = &data->fh; 5150 data->res.fattr = &data->fattr; 5151 nfs_fattr_init(data->res.fattr); 5152 } 5153 return data; 5154 out_free: 5155 kfree(data); 5156 return NULL; 5157 } 5158 5159 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 5160 { 5161 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 5162 &data->arg.seq_args, &data->res.seq_res, 1); 5163 if (status == 0) { 5164 spin_lock(&dir->i_lock); 5165 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo, 5166 data->res.fattr->time_start, 5167 NFS_INO_INVALID_DATA); 5168 spin_unlock(&dir->i_lock); 5169 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); 5170 } 5171 return status; 5172 } 5173 5174 static struct dentry *nfs4_do_mkdir(struct inode *dir, struct dentry *dentry, 5175 struct nfs4_createdata *data, int *statusp) 5176 { 5177 struct dentry *ret; 5178 5179 *statusp = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 5180 &data->arg.seq_args, &data->res.seq_res, 1); 5181 5182 if (*statusp) 5183 return NULL; 5184 5185 spin_lock(&dir->i_lock); 5186 /* Creating a directory bumps nlink in the parent */ 5187 nfs4_inc_nlink_locked(dir); 5188 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo, 5189 data->res.fattr->time_start, 5190 NFS_INO_INVALID_DATA); 5191 spin_unlock(&dir->i_lock); 5192 ret = nfs_add_or_obtain(dentry, data->res.fh, data->res.fattr); 5193 if (!IS_ERR(ret)) 5194 return ret; 5195 *statusp = PTR_ERR(ret); 5196 return NULL; 5197 } 5198 5199 static void nfs4_free_createdata(struct nfs4_createdata *data) 5200 { 5201 nfs4_label_free(data->fattr.label); 5202 kfree(data); 5203 } 5204 5205 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 5206 struct folio *folio, unsigned int len, struct iattr *sattr, 5207 struct nfs4_label *label) 5208 { 5209 struct page *page = &folio->page; 5210 struct nfs4_createdata *data; 5211 int status = -ENAMETOOLONG; 5212 5213 if (len > NFS4_MAXPATHLEN) 5214 goto out; 5215 5216 status = -ENOMEM; 5217 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 5218 if (data == NULL) 5219 goto out; 5220 5221 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 5222 data->arg.u.symlink.pages = &page; 5223 data->arg.u.symlink.len = len; 5224 data->arg.label = label; 5225 5226 status = nfs4_do_create(dir, dentry, data); 5227 5228 nfs4_free_createdata(data); 5229 out: 5230 return status; 5231 } 5232 5233 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 5234 struct folio *folio, unsigned int len, struct iattr *sattr) 5235 { 5236 struct nfs4_exception exception = { 5237 .interruptible = true, 5238 }; 5239 struct nfs4_label l, *label; 5240 int err; 5241 5242 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5243 5244 do { 5245 err = _nfs4_proc_symlink(dir, dentry, folio, len, sattr, label); 5246 trace_nfs4_symlink(dir, &dentry->d_name, err); 5247 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5248 &exception); 5249 } while (exception.retry); 5250 5251 nfs4_label_release_security(label); 5252 return err; 5253 } 5254 5255 static struct dentry *_nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 5256 struct iattr *sattr, 5257 struct nfs4_label *label, int *statusp) 5258 { 5259 struct nfs4_createdata *data; 5260 struct dentry *ret = NULL; 5261 5262 *statusp = -ENOMEM; 5263 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 5264 if (data == NULL) 5265 goto out; 5266 5267 data->arg.label = label; 5268 ret = nfs4_do_mkdir(dir, dentry, data, statusp); 5269 5270 nfs4_free_createdata(data); 5271 out: 5272 return ret; 5273 } 5274 5275 static struct dentry *nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 5276 struct iattr *sattr) 5277 { 5278 struct nfs_server *server = NFS_SERVER(dir); 5279 struct nfs4_exception exception = { 5280 .interruptible = true, 5281 }; 5282 struct nfs4_label l, *label; 5283 struct dentry *alias; 5284 int err; 5285 5286 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5287 5288 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 5289 sattr->ia_mode &= ~current_umask(); 5290 do { 5291 alias = _nfs4_proc_mkdir(dir, dentry, sattr, label, &err); 5292 trace_nfs4_mkdir(dir, &dentry->d_name, err); 5293 if (err) 5294 alias = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir), 5295 err, 5296 &exception)); 5297 } while (exception.retry); 5298 nfs4_label_release_security(label); 5299 5300 return alias; 5301 } 5302 5303 static int _nfs4_proc_readdir(struct nfs_readdir_arg *nr_arg, 5304 struct nfs_readdir_res *nr_res) 5305 { 5306 struct inode *dir = d_inode(nr_arg->dentry); 5307 struct nfs_server *server = NFS_SERVER(dir); 5308 struct nfs4_readdir_arg args = { 5309 .fh = NFS_FH(dir), 5310 .pages = nr_arg->pages, 5311 .pgbase = 0, 5312 .count = nr_arg->page_len, 5313 .plus = nr_arg->plus, 5314 }; 5315 struct nfs4_readdir_res res; 5316 struct rpc_message msg = { 5317 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 5318 .rpc_argp = &args, 5319 .rpc_resp = &res, 5320 .rpc_cred = nr_arg->cred, 5321 }; 5322 int status; 5323 5324 dprintk("%s: dentry = %pd2, cookie = %llu\n", __func__, 5325 nr_arg->dentry, (unsigned long long)nr_arg->cookie); 5326 if (!(server->caps & NFS_CAP_SECURITY_LABEL)) 5327 args.bitmask = server->attr_bitmask_nl; 5328 else 5329 args.bitmask = server->attr_bitmask; 5330 5331 nfs4_setup_readdir(nr_arg->cookie, nr_arg->verf, nr_arg->dentry, &args); 5332 res.pgbase = args.pgbase; 5333 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 5334 &res.seq_res, 0); 5335 if (status >= 0) { 5336 memcpy(nr_res->verf, res.verifier.data, NFS4_VERIFIER_SIZE); 5337 status += args.pgbase; 5338 } 5339 5340 nfs_invalidate_atime(dir); 5341 5342 dprintk("%s: returns %d\n", __func__, status); 5343 return status; 5344 } 5345 5346 static int nfs4_proc_readdir(struct nfs_readdir_arg *arg, 5347 struct nfs_readdir_res *res) 5348 { 5349 struct nfs4_exception exception = { 5350 .interruptible = true, 5351 }; 5352 int err; 5353 do { 5354 err = _nfs4_proc_readdir(arg, res); 5355 trace_nfs4_readdir(d_inode(arg->dentry), err); 5356 err = nfs4_handle_exception(NFS_SERVER(d_inode(arg->dentry)), 5357 err, &exception); 5358 } while (exception.retry); 5359 return err; 5360 } 5361 5362 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 5363 struct iattr *sattr, struct nfs4_label *label, dev_t rdev) 5364 { 5365 struct nfs4_createdata *data; 5366 int mode = sattr->ia_mode; 5367 int status = -ENOMEM; 5368 5369 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 5370 if (data == NULL) 5371 goto out; 5372 5373 if (S_ISFIFO(mode)) 5374 data->arg.ftype = NF4FIFO; 5375 else if (S_ISBLK(mode)) { 5376 data->arg.ftype = NF4BLK; 5377 data->arg.u.device.specdata1 = MAJOR(rdev); 5378 data->arg.u.device.specdata2 = MINOR(rdev); 5379 } 5380 else if (S_ISCHR(mode)) { 5381 data->arg.ftype = NF4CHR; 5382 data->arg.u.device.specdata1 = MAJOR(rdev); 5383 data->arg.u.device.specdata2 = MINOR(rdev); 5384 } else if (!S_ISSOCK(mode)) { 5385 status = -EINVAL; 5386 goto out_free; 5387 } 5388 5389 data->arg.label = label; 5390 status = nfs4_do_create(dir, dentry, data); 5391 out_free: 5392 nfs4_free_createdata(data); 5393 out: 5394 return status; 5395 } 5396 5397 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 5398 struct iattr *sattr, dev_t rdev) 5399 { 5400 struct nfs_server *server = NFS_SERVER(dir); 5401 struct nfs4_exception exception = { 5402 .interruptible = true, 5403 }; 5404 struct nfs4_label l, *label; 5405 int err; 5406 5407 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5408 5409 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 5410 sattr->ia_mode &= ~current_umask(); 5411 do { 5412 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev); 5413 trace_nfs4_mknod(dir, &dentry->d_name, err); 5414 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5415 &exception); 5416 } while (exception.retry); 5417 5418 nfs4_label_release_security(label); 5419 5420 return err; 5421 } 5422 5423 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 5424 struct nfs_fsstat *fsstat) 5425 { 5426 struct nfs4_statfs_arg args = { 5427 .fh = fhandle, 5428 .bitmask = server->attr_bitmask, 5429 }; 5430 struct nfs4_statfs_res res = { 5431 .fsstat = fsstat, 5432 }; 5433 struct rpc_message msg = { 5434 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 5435 .rpc_argp = &args, 5436 .rpc_resp = &res, 5437 }; 5438 5439 nfs_fattr_init(fsstat->fattr); 5440 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5441 } 5442 5443 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 5444 { 5445 struct nfs4_exception exception = { 5446 .interruptible = true, 5447 }; 5448 int err; 5449 do { 5450 err = nfs4_handle_exception(server, 5451 _nfs4_proc_statfs(server, fhandle, fsstat), 5452 &exception); 5453 } while (exception.retry); 5454 return err; 5455 } 5456 5457 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 5458 struct nfs_fsinfo *fsinfo) 5459 { 5460 struct nfs4_fsinfo_arg args = { 5461 .fh = fhandle, 5462 .bitmask = server->attr_bitmask, 5463 }; 5464 struct nfs4_fsinfo_res res = { 5465 .fsinfo = fsinfo, 5466 }; 5467 struct rpc_message msg = { 5468 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 5469 .rpc_argp = &args, 5470 .rpc_resp = &res, 5471 }; 5472 5473 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5474 } 5475 5476 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 5477 { 5478 struct nfs4_exception exception = { 5479 .interruptible = true, 5480 }; 5481 int err; 5482 5483 do { 5484 err = _nfs4_do_fsinfo(server, fhandle, fsinfo); 5485 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); 5486 if (err == 0) { 5487 nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ); 5488 break; 5489 } 5490 err = nfs4_handle_exception(server, err, &exception); 5491 } while (exception.retry); 5492 return err; 5493 } 5494 5495 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 5496 { 5497 int error; 5498 5499 nfs_fattr_init(fsinfo->fattr); 5500 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 5501 if (error == 0) { 5502 /* block layout checks this! */ 5503 server->pnfs_blksize = fsinfo->blksize; 5504 set_pnfs_layoutdriver(server, fhandle, fsinfo); 5505 } 5506 5507 return error; 5508 } 5509 5510 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 5511 struct nfs_pathconf *pathconf) 5512 { 5513 struct nfs4_pathconf_arg args = { 5514 .fh = fhandle, 5515 .bitmask = server->attr_bitmask, 5516 }; 5517 struct nfs4_pathconf_res res = { 5518 .pathconf = pathconf, 5519 }; 5520 struct rpc_message msg = { 5521 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 5522 .rpc_argp = &args, 5523 .rpc_resp = &res, 5524 }; 5525 5526 /* None of the pathconf attributes are mandatory to implement */ 5527 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 5528 memset(pathconf, 0, sizeof(*pathconf)); 5529 return 0; 5530 } 5531 5532 nfs_fattr_init(pathconf->fattr); 5533 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5534 } 5535 5536 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 5537 struct nfs_pathconf *pathconf) 5538 { 5539 struct nfs4_exception exception = { 5540 .interruptible = true, 5541 }; 5542 int err; 5543 5544 do { 5545 err = nfs4_handle_exception(server, 5546 _nfs4_proc_pathconf(server, fhandle, pathconf), 5547 &exception); 5548 } while (exception.retry); 5549 return err; 5550 } 5551 5552 int nfs4_set_rw_stateid(nfs4_stateid *stateid, 5553 const struct nfs_open_context *ctx, 5554 const struct nfs_lock_context *l_ctx, 5555 fmode_t fmode) 5556 { 5557 return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL); 5558 } 5559 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid); 5560 5561 static bool nfs4_stateid_is_current(nfs4_stateid *stateid, 5562 const struct nfs_open_context *ctx, 5563 const struct nfs_lock_context *l_ctx, 5564 fmode_t fmode) 5565 { 5566 nfs4_stateid _current_stateid; 5567 5568 /* If the current stateid represents a lost lock, then exit */ 5569 if (nfs4_set_rw_stateid(&_current_stateid, ctx, l_ctx, fmode) == -EIO) 5570 return true; 5571 return nfs4_stateid_match(stateid, &_current_stateid); 5572 } 5573 5574 static bool nfs4_error_stateid_expired(int err) 5575 { 5576 switch (err) { 5577 case -NFS4ERR_DELEG_REVOKED: 5578 case -NFS4ERR_ADMIN_REVOKED: 5579 case -NFS4ERR_BAD_STATEID: 5580 case -NFS4ERR_STALE_STATEID: 5581 case -NFS4ERR_OLD_STATEID: 5582 case -NFS4ERR_OPENMODE: 5583 case -NFS4ERR_EXPIRED: 5584 return true; 5585 } 5586 return false; 5587 } 5588 5589 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) 5590 { 5591 struct nfs_server *server = NFS_SERVER(hdr->inode); 5592 5593 trace_nfs4_read(hdr, task->tk_status); 5594 if (task->tk_status < 0) { 5595 struct nfs4_exception exception = { 5596 .inode = hdr->inode, 5597 .state = hdr->args.context->state, 5598 .stateid = &hdr->args.stateid, 5599 .retrans = hdr->retrans, 5600 }; 5601 task->tk_status = nfs4_async_handle_exception(task, 5602 server, task->tk_status, &exception); 5603 hdr->retrans = exception.retrans; 5604 if (exception.retry) { 5605 rpc_restart_call_prepare(task); 5606 return -EAGAIN; 5607 } 5608 } 5609 5610 if (task->tk_status > 0) 5611 renew_lease(server, hdr->timestamp); 5612 return 0; 5613 } 5614 5615 static bool nfs4_read_stateid_changed(struct rpc_task *task, 5616 struct nfs_pgio_args *args) 5617 { 5618 5619 if (!nfs4_error_stateid_expired(task->tk_status) || 5620 nfs4_stateid_is_current(&args->stateid, 5621 args->context, 5622 args->lock_context, 5623 FMODE_READ)) 5624 return false; 5625 rpc_restart_call_prepare(task); 5626 return true; 5627 } 5628 5629 static bool nfs4_read_plus_not_supported(struct rpc_task *task, 5630 struct nfs_pgio_header *hdr) 5631 { 5632 struct nfs_server *server = NFS_SERVER(hdr->inode); 5633 struct rpc_message *msg = &task->tk_msg; 5634 5635 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] && 5636 task->tk_status == -ENOTSUPP) { 5637 server->caps &= ~NFS_CAP_READ_PLUS; 5638 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 5639 rpc_restart_call_prepare(task); 5640 return true; 5641 } 5642 return false; 5643 } 5644 5645 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 5646 { 5647 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 5648 return -EAGAIN; 5649 if (nfs4_read_stateid_changed(task, &hdr->args)) 5650 return -EAGAIN; 5651 if (nfs4_read_plus_not_supported(task, hdr)) 5652 return -EAGAIN; 5653 if (task->tk_status > 0) 5654 nfs_invalidate_atime(hdr->inode); 5655 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 5656 nfs4_read_done_cb(task, hdr); 5657 } 5658 5659 #if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS 5660 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr, 5661 struct rpc_message *msg) 5662 { 5663 /* Note: We don't use READ_PLUS with pNFS yet */ 5664 if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp) { 5665 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS]; 5666 return nfs_read_alloc_scratch(hdr, READ_PLUS_SCRATCH_SIZE); 5667 } 5668 return false; 5669 } 5670 #else 5671 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr, 5672 struct rpc_message *msg) 5673 { 5674 return false; 5675 } 5676 #endif /* CONFIG_NFS_V4_2 */ 5677 5678 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr, 5679 struct rpc_message *msg) 5680 { 5681 hdr->timestamp = jiffies; 5682 if (!hdr->pgio_done_cb) 5683 hdr->pgio_done_cb = nfs4_read_done_cb; 5684 if (!nfs42_read_plus_support(hdr, msg)) 5685 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 5686 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0); 5687 } 5688 5689 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, 5690 struct nfs_pgio_header *hdr) 5691 { 5692 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client, 5693 &hdr->args.seq_args, 5694 &hdr->res.seq_res, 5695 task)) 5696 return 0; 5697 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, 5698 hdr->args.lock_context, 5699 hdr->rw_mode) == -EIO) 5700 return -EIO; 5701 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) 5702 return -EIO; 5703 return 0; 5704 } 5705 5706 static int nfs4_write_done_cb(struct rpc_task *task, 5707 struct nfs_pgio_header *hdr) 5708 { 5709 struct inode *inode = hdr->inode; 5710 5711 trace_nfs4_write(hdr, task->tk_status); 5712 if (task->tk_status < 0) { 5713 struct nfs4_exception exception = { 5714 .inode = hdr->inode, 5715 .state = hdr->args.context->state, 5716 .stateid = &hdr->args.stateid, 5717 .retrans = hdr->retrans, 5718 }; 5719 task->tk_status = nfs4_async_handle_exception(task, 5720 NFS_SERVER(inode), task->tk_status, 5721 &exception); 5722 hdr->retrans = exception.retrans; 5723 if (exception.retry) { 5724 rpc_restart_call_prepare(task); 5725 return -EAGAIN; 5726 } 5727 } 5728 if (task->tk_status >= 0) { 5729 renew_lease(NFS_SERVER(inode), hdr->timestamp); 5730 nfs_writeback_update_inode(hdr); 5731 } 5732 return 0; 5733 } 5734 5735 static bool nfs4_write_stateid_changed(struct rpc_task *task, 5736 struct nfs_pgio_args *args) 5737 { 5738 5739 if (!nfs4_error_stateid_expired(task->tk_status) || 5740 nfs4_stateid_is_current(&args->stateid, 5741 args->context, 5742 args->lock_context, 5743 FMODE_WRITE)) 5744 return false; 5745 rpc_restart_call_prepare(task); 5746 return true; 5747 } 5748 5749 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 5750 { 5751 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 5752 return -EAGAIN; 5753 if (nfs4_write_stateid_changed(task, &hdr->args)) 5754 return -EAGAIN; 5755 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 5756 nfs4_write_done_cb(task, hdr); 5757 } 5758 5759 static 5760 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr) 5761 { 5762 /* Don't request attributes for pNFS or O_DIRECT writes */ 5763 if (hdr->ds_clp != NULL || hdr->dreq != NULL) 5764 return false; 5765 /* Otherwise, request attributes if and only if we don't hold 5766 * a delegation 5767 */ 5768 return nfs4_have_delegation(hdr->inode, FMODE_READ, 0) == 0; 5769 } 5770 5771 void nfs4_bitmask_set(__u32 bitmask[], const __u32 src[], 5772 struct inode *inode, unsigned long cache_validity) 5773 { 5774 struct nfs_server *server = NFS_SERVER(inode); 5775 unsigned int i; 5776 5777 memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ); 5778 cache_validity |= READ_ONCE(NFS_I(inode)->cache_validity); 5779 5780 if (cache_validity & NFS_INO_INVALID_CHANGE) 5781 bitmask[0] |= FATTR4_WORD0_CHANGE; 5782 if (cache_validity & NFS_INO_INVALID_ATIME) 5783 bitmask[1] |= FATTR4_WORD1_TIME_ACCESS; 5784 if (cache_validity & NFS_INO_INVALID_MODE) 5785 bitmask[1] |= FATTR4_WORD1_MODE; 5786 if (cache_validity & NFS_INO_INVALID_OTHER) 5787 bitmask[1] |= FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP; 5788 if (cache_validity & NFS_INO_INVALID_NLINK) 5789 bitmask[1] |= FATTR4_WORD1_NUMLINKS; 5790 if (cache_validity & NFS_INO_INVALID_CTIME) 5791 bitmask[1] |= FATTR4_WORD1_TIME_METADATA; 5792 if (cache_validity & NFS_INO_INVALID_MTIME) 5793 bitmask[1] |= FATTR4_WORD1_TIME_MODIFY; 5794 if (cache_validity & NFS_INO_INVALID_BLOCKS) 5795 bitmask[1] |= FATTR4_WORD1_SPACE_USED; 5796 if (cache_validity & NFS_INO_INVALID_BTIME) 5797 bitmask[1] |= FATTR4_WORD1_TIME_CREATE; 5798 5799 if (cache_validity & NFS_INO_INVALID_SIZE) 5800 bitmask[0] |= FATTR4_WORD0_SIZE; 5801 5802 for (i = 0; i < NFS4_BITMASK_SZ; i++) 5803 bitmask[i] &= server->attr_bitmask[i]; 5804 } 5805 5806 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, 5807 struct rpc_message *msg, 5808 struct rpc_clnt **clnt) 5809 { 5810 struct nfs_server *server = NFS_SERVER(hdr->inode); 5811 5812 if (!nfs4_write_need_cache_consistency_data(hdr)) { 5813 hdr->args.bitmask = NULL; 5814 hdr->res.fattr = NULL; 5815 } else { 5816 nfs4_bitmask_set(hdr->args.bitmask_store, 5817 server->cache_consistency_bitmask, 5818 hdr->inode, NFS_INO_INVALID_BLOCKS); 5819 hdr->args.bitmask = hdr->args.bitmask_store; 5820 } 5821 5822 if (!hdr->pgio_done_cb) 5823 hdr->pgio_done_cb = nfs4_write_done_cb; 5824 hdr->res.server = server; 5825 hdr->timestamp = jiffies; 5826 5827 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 5828 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0); 5829 nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr); 5830 } 5831 5832 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 5833 { 5834 nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client, 5835 &data->args.seq_args, 5836 &data->res.seq_res, 5837 task); 5838 } 5839 5840 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 5841 { 5842 struct inode *inode = data->inode; 5843 5844 trace_nfs4_commit(data, task->tk_status); 5845 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 5846 NULL, NULL) == -EAGAIN) { 5847 rpc_restart_call_prepare(task); 5848 return -EAGAIN; 5849 } 5850 return 0; 5851 } 5852 5853 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 5854 { 5855 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5856 return -EAGAIN; 5857 return data->commit_done_cb(task, data); 5858 } 5859 5860 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg, 5861 struct rpc_clnt **clnt) 5862 { 5863 struct nfs_server *server = NFS_SERVER(data->inode); 5864 5865 if (data->commit_done_cb == NULL) 5866 data->commit_done_cb = nfs4_commit_done_cb; 5867 data->res.server = server; 5868 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 5869 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0); 5870 nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client, 5871 NFS_SP4_MACH_CRED_COMMIT, clnt, msg); 5872 } 5873 5874 static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args, 5875 struct nfs_commitres *res) 5876 { 5877 struct inode *dst_inode = file_inode(dst); 5878 struct nfs_server *server = NFS_SERVER(dst_inode); 5879 struct rpc_message msg = { 5880 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT], 5881 .rpc_argp = args, 5882 .rpc_resp = res, 5883 }; 5884 5885 args->fh = NFS_FH(dst_inode); 5886 return nfs4_call_sync(server->client, server, &msg, 5887 &args->seq_args, &res->seq_res, 1); 5888 } 5889 5890 int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res) 5891 { 5892 struct nfs_commitargs args = { 5893 .offset = offset, 5894 .count = count, 5895 }; 5896 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); 5897 struct nfs4_exception exception = { }; 5898 int status; 5899 5900 do { 5901 status = _nfs4_proc_commit(dst, &args, res); 5902 status = nfs4_handle_exception(dst_server, status, &exception); 5903 } while (exception.retry); 5904 5905 return status; 5906 } 5907 5908 struct nfs4_renewdata { 5909 struct nfs_client *client; 5910 unsigned long timestamp; 5911 }; 5912 5913 /* 5914 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 5915 * standalone procedure for queueing an asynchronous RENEW. 5916 */ 5917 static void nfs4_renew_release(void *calldata) 5918 { 5919 struct nfs4_renewdata *data = calldata; 5920 struct nfs_client *clp = data->client; 5921 5922 if (refcount_read(&clp->cl_count) > 1) 5923 nfs4_schedule_state_renewal(clp); 5924 nfs_put_client(clp); 5925 kfree(data); 5926 } 5927 5928 static void nfs4_renew_done(struct rpc_task *task, void *calldata) 5929 { 5930 struct nfs4_renewdata *data = calldata; 5931 struct nfs_client *clp = data->client; 5932 unsigned long timestamp = data->timestamp; 5933 5934 trace_nfs4_renew_async(clp, task->tk_status); 5935 switch (task->tk_status) { 5936 case 0: 5937 break; 5938 case -NFS4ERR_LEASE_MOVED: 5939 nfs4_schedule_lease_moved_recovery(clp); 5940 break; 5941 default: 5942 /* Unless we're shutting down, schedule state recovery! */ 5943 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 5944 return; 5945 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 5946 nfs4_schedule_lease_recovery(clp); 5947 return; 5948 } 5949 nfs4_schedule_path_down_recovery(clp); 5950 } 5951 do_renew_lease(clp, timestamp); 5952 } 5953 5954 static const struct rpc_call_ops nfs4_renew_ops = { 5955 .rpc_call_done = nfs4_renew_done, 5956 .rpc_release = nfs4_renew_release, 5957 }; 5958 5959 static int nfs4_proc_async_renew(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags) 5960 { 5961 struct rpc_message msg = { 5962 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 5963 .rpc_argp = clp, 5964 .rpc_cred = cred, 5965 }; 5966 struct nfs4_renewdata *data; 5967 5968 if (renew_flags == 0) 5969 return 0; 5970 if (!refcount_inc_not_zero(&clp->cl_count)) 5971 return -EIO; 5972 data = kmalloc(sizeof(*data), GFP_NOFS); 5973 if (data == NULL) { 5974 nfs_put_client(clp); 5975 return -ENOMEM; 5976 } 5977 data->client = clp; 5978 data->timestamp = jiffies; 5979 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT, 5980 &nfs4_renew_ops, data); 5981 } 5982 5983 static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred) 5984 { 5985 struct rpc_message msg = { 5986 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 5987 .rpc_argp = clp, 5988 .rpc_cred = cred, 5989 }; 5990 unsigned long now = jiffies; 5991 int status; 5992 5993 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5994 if (status < 0) 5995 return status; 5996 do_renew_lease(clp, now); 5997 return 0; 5998 } 5999 6000 static bool nfs4_server_supports_acls(const struct nfs_server *server, 6001 enum nfs4_acl_type type) 6002 { 6003 switch (type) { 6004 default: 6005 return server->attr_bitmask[0] & FATTR4_WORD0_ACL; 6006 case NFS4ACL_DACL: 6007 return server->attr_bitmask[1] & FATTR4_WORD1_DACL; 6008 case NFS4ACL_SACL: 6009 return server->attr_bitmask[1] & FATTR4_WORD1_SACL; 6010 } 6011 } 6012 6013 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that 6014 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on 6015 * the stack. 6016 */ 6017 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 6018 6019 int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen, 6020 struct page **pages) 6021 { 6022 struct page *newpage, **spages; 6023 int rc = 0; 6024 size_t len; 6025 spages = pages; 6026 6027 do { 6028 len = min_t(size_t, PAGE_SIZE, buflen); 6029 newpage = alloc_page(GFP_KERNEL); 6030 6031 if (newpage == NULL) 6032 goto unwind; 6033 memcpy(page_address(newpage), buf, len); 6034 buf += len; 6035 buflen -= len; 6036 *pages++ = newpage; 6037 rc++; 6038 } while (buflen != 0); 6039 6040 return rc; 6041 6042 unwind: 6043 for(; rc > 0; rc--) 6044 __free_page(spages[rc-1]); 6045 return -ENOMEM; 6046 } 6047 6048 struct nfs4_cached_acl { 6049 enum nfs4_acl_type type; 6050 int cached; 6051 size_t len; 6052 char data[]; 6053 }; 6054 6055 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 6056 { 6057 struct nfs_inode *nfsi = NFS_I(inode); 6058 6059 spin_lock(&inode->i_lock); 6060 kfree(nfsi->nfs4_acl); 6061 nfsi->nfs4_acl = acl; 6062 spin_unlock(&inode->i_lock); 6063 } 6064 6065 static void nfs4_zap_acl_attr(struct inode *inode) 6066 { 6067 nfs4_set_cached_acl(inode, NULL); 6068 } 6069 6070 static ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, 6071 size_t buflen, enum nfs4_acl_type type) 6072 { 6073 struct nfs_inode *nfsi = NFS_I(inode); 6074 struct nfs4_cached_acl *acl; 6075 int ret = -ENOENT; 6076 6077 spin_lock(&inode->i_lock); 6078 acl = nfsi->nfs4_acl; 6079 if (acl == NULL) 6080 goto out; 6081 if (acl->type != type) 6082 goto out; 6083 if (buf == NULL) /* user is just asking for length */ 6084 goto out_len; 6085 if (acl->cached == 0) 6086 goto out; 6087 ret = -ERANGE; /* see getxattr(2) man page */ 6088 if (acl->len > buflen) 6089 goto out; 6090 memcpy(buf, acl->data, acl->len); 6091 out_len: 6092 ret = acl->len; 6093 out: 6094 spin_unlock(&inode->i_lock); 6095 return ret; 6096 } 6097 6098 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, 6099 size_t pgbase, size_t acl_len, 6100 enum nfs4_acl_type type) 6101 { 6102 struct nfs4_cached_acl *acl; 6103 size_t buflen = sizeof(*acl) + acl_len; 6104 6105 if (buflen <= PAGE_SIZE) { 6106 acl = kmalloc(buflen, GFP_KERNEL); 6107 if (acl == NULL) 6108 goto out; 6109 acl->cached = 1; 6110 _copy_from_pages(acl->data, pages, pgbase, acl_len); 6111 } else { 6112 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 6113 if (acl == NULL) 6114 goto out; 6115 acl->cached = 0; 6116 } 6117 acl->type = type; 6118 acl->len = acl_len; 6119 out: 6120 nfs4_set_cached_acl(inode, acl); 6121 } 6122 6123 /* 6124 * The getxattr API returns the required buffer length when called with a 6125 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 6126 * the required buf. On a NULL buf, we send a page of data to the server 6127 * guessing that the ACL request can be serviced by a page. If so, we cache 6128 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 6129 * the cache. If not so, we throw away the page, and cache the required 6130 * length. The next getxattr call will then produce another round trip to 6131 * the server, this time with the input buf of the required size. 6132 */ 6133 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, 6134 size_t buflen, enum nfs4_acl_type type) 6135 { 6136 struct page **pages; 6137 struct nfs_getaclargs args = { 6138 .fh = NFS_FH(inode), 6139 .acl_type = type, 6140 .acl_len = buflen, 6141 }; 6142 struct nfs_getaclres res = { 6143 .acl_type = type, 6144 .acl_len = buflen, 6145 }; 6146 struct rpc_message msg = { 6147 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 6148 .rpc_argp = &args, 6149 .rpc_resp = &res, 6150 }; 6151 unsigned int npages; 6152 int ret = -ENOMEM, i; 6153 struct nfs_server *server = NFS_SERVER(inode); 6154 6155 if (buflen == 0) 6156 buflen = server->rsize; 6157 6158 npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1; 6159 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 6160 if (!pages) 6161 return -ENOMEM; 6162 6163 args.acl_pages = pages; 6164 6165 for (i = 0; i < npages; i++) { 6166 pages[i] = alloc_page(GFP_KERNEL); 6167 if (!pages[i]) 6168 goto out_free; 6169 } 6170 6171 /* for decoding across pages */ 6172 res.acl_scratch = folio_alloc(GFP_KERNEL, 0); 6173 if (!res.acl_scratch) 6174 goto out_free; 6175 6176 args.acl_len = npages * PAGE_SIZE; 6177 6178 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 6179 __func__, buf, buflen, npages, args.acl_len); 6180 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 6181 &msg, &args.seq_args, &res.seq_res, 0); 6182 if (ret) 6183 goto out_free; 6184 6185 /* Handle the case where the passed-in buffer is too short */ 6186 if (res.acl_flags & NFS4_ACL_TRUNC) { 6187 /* Did the user only issue a request for the acl length? */ 6188 if (buf == NULL) 6189 goto out_ok; 6190 ret = -ERANGE; 6191 goto out_free; 6192 } 6193 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len, 6194 type); 6195 if (buf) { 6196 if (res.acl_len > buflen) { 6197 ret = -ERANGE; 6198 goto out_free; 6199 } 6200 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); 6201 } 6202 out_ok: 6203 ret = res.acl_len; 6204 out_free: 6205 while (--i >= 0) 6206 __free_page(pages[i]); 6207 if (res.acl_scratch) 6208 folio_put(res.acl_scratch); 6209 kfree(pages); 6210 return ret; 6211 } 6212 6213 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, 6214 size_t buflen, enum nfs4_acl_type type) 6215 { 6216 struct nfs4_exception exception = { 6217 .interruptible = true, 6218 }; 6219 ssize_t ret; 6220 do { 6221 ret = __nfs4_get_acl_uncached(inode, buf, buflen, type); 6222 trace_nfs4_get_acl(inode, ret); 6223 if (ret >= 0) 6224 break; 6225 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 6226 } while (exception.retry); 6227 return ret; 6228 } 6229 6230 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen, 6231 enum nfs4_acl_type type) 6232 { 6233 struct nfs_server *server = NFS_SERVER(inode); 6234 int ret; 6235 6236 if (unlikely(NFS_FH(inode)->size == 0)) 6237 return -ENODATA; 6238 if (!nfs4_server_supports_acls(server, type)) 6239 return -EOPNOTSUPP; 6240 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 6241 if (ret < 0) 6242 return ret; 6243 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 6244 nfs_zap_acl_cache(inode); 6245 ret = nfs4_read_cached_acl(inode, buf, buflen, type); 6246 if (ret != -ENOENT) 6247 /* -ENOENT is returned if there is no ACL or if there is an ACL 6248 * but no cached acl data, just the acl length */ 6249 return ret; 6250 return nfs4_get_acl_uncached(inode, buf, buflen, type); 6251 } 6252 6253 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, 6254 size_t buflen, enum nfs4_acl_type type) 6255 { 6256 struct nfs_server *server = NFS_SERVER(inode); 6257 struct page *pages[NFS4ACL_MAXPAGES]; 6258 struct nfs_setaclargs arg = { 6259 .fh = NFS_FH(inode), 6260 .acl_type = type, 6261 .acl_len = buflen, 6262 .acl_pages = pages, 6263 }; 6264 struct nfs_setaclres res; 6265 struct rpc_message msg = { 6266 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 6267 .rpc_argp = &arg, 6268 .rpc_resp = &res, 6269 }; 6270 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 6271 int ret, i; 6272 6273 /* You can't remove system.nfs4_acl: */ 6274 if (buflen == 0) 6275 return -EINVAL; 6276 if (!nfs4_server_supports_acls(server, type)) 6277 return -EOPNOTSUPP; 6278 if (npages > ARRAY_SIZE(pages)) 6279 return -ERANGE; 6280 i = nfs4_buf_to_pages_noslab(buf, buflen, arg.acl_pages); 6281 if (i < 0) 6282 return i; 6283 nfs4_inode_make_writeable(inode); 6284 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6285 6286 /* 6287 * Free each page after tx, so the only ref left is 6288 * held by the network stack 6289 */ 6290 for (; i > 0; i--) 6291 put_page(pages[i-1]); 6292 6293 /* 6294 * Acl update can result in inode attribute update. 6295 * so mark the attribute cache invalid. 6296 */ 6297 spin_lock(&inode->i_lock); 6298 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 6299 NFS_INO_INVALID_CTIME | 6300 NFS_INO_REVAL_FORCED); 6301 spin_unlock(&inode->i_lock); 6302 nfs_access_zap_cache(inode); 6303 nfs_zap_acl_cache(inode); 6304 return ret; 6305 } 6306 6307 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, 6308 size_t buflen, enum nfs4_acl_type type) 6309 { 6310 struct nfs4_exception exception = { }; 6311 int err; 6312 6313 if (unlikely(NFS_FH(inode)->size == 0)) 6314 return -ENODATA; 6315 do { 6316 err = __nfs4_proc_set_acl(inode, buf, buflen, type); 6317 trace_nfs4_set_acl(inode, err); 6318 if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) { 6319 /* 6320 * no need to retry since the kernel 6321 * isn't involved in encoding the ACEs. 6322 */ 6323 err = -EINVAL; 6324 break; 6325 } 6326 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6327 &exception); 6328 } while (exception.retry); 6329 return err; 6330 } 6331 6332 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 6333 static int _nfs4_get_security_label(struct inode *inode, void *buf, 6334 size_t buflen) 6335 { 6336 struct nfs_server *server = NFS_SERVER(inode); 6337 struct nfs4_label label = {0, 0, 0, buflen, buf}; 6338 6339 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 6340 struct nfs_fattr fattr = { 6341 .label = &label, 6342 }; 6343 struct nfs4_getattr_arg arg = { 6344 .fh = NFS_FH(inode), 6345 .bitmask = bitmask, 6346 }; 6347 struct nfs4_getattr_res res = { 6348 .fattr = &fattr, 6349 .server = server, 6350 }; 6351 struct rpc_message msg = { 6352 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 6353 .rpc_argp = &arg, 6354 .rpc_resp = &res, 6355 }; 6356 int ret; 6357 6358 nfs_fattr_init(&fattr); 6359 6360 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0); 6361 if (ret) 6362 return ret; 6363 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) 6364 return -ENOENT; 6365 return label.len; 6366 } 6367 6368 static int nfs4_get_security_label(struct inode *inode, void *buf, 6369 size_t buflen) 6370 { 6371 struct nfs4_exception exception = { 6372 .interruptible = true, 6373 }; 6374 int err; 6375 6376 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 6377 return -EOPNOTSUPP; 6378 6379 do { 6380 err = _nfs4_get_security_label(inode, buf, buflen); 6381 trace_nfs4_get_security_label(inode, err); 6382 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6383 &exception); 6384 } while (exception.retry); 6385 return err; 6386 } 6387 6388 static int _nfs4_do_set_security_label(struct inode *inode, 6389 struct nfs4_label *ilabel, 6390 struct nfs_fattr *fattr) 6391 { 6392 6393 struct iattr sattr = {0}; 6394 struct nfs_server *server = NFS_SERVER(inode); 6395 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 6396 struct nfs_setattrargs arg = { 6397 .fh = NFS_FH(inode), 6398 .iap = &sattr, 6399 .server = server, 6400 .bitmask = bitmask, 6401 .label = ilabel, 6402 }; 6403 struct nfs_setattrres res = { 6404 .fattr = fattr, 6405 .server = server, 6406 }; 6407 struct rpc_message msg = { 6408 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 6409 .rpc_argp = &arg, 6410 .rpc_resp = &res, 6411 }; 6412 int status; 6413 6414 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 6415 6416 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6417 if (status) 6418 dprintk("%s failed: %d\n", __func__, status); 6419 6420 return status; 6421 } 6422 6423 static int nfs4_do_set_security_label(struct inode *inode, 6424 struct nfs4_label *ilabel, 6425 struct nfs_fattr *fattr) 6426 { 6427 struct nfs4_exception exception = { }; 6428 int err; 6429 6430 do { 6431 err = _nfs4_do_set_security_label(inode, ilabel, fattr); 6432 trace_nfs4_set_security_label(inode, err); 6433 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6434 &exception); 6435 } while (exception.retry); 6436 return err; 6437 } 6438 6439 static int 6440 nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen) 6441 { 6442 struct nfs4_label ilabel = {0, 0, 0, buflen, (char *)buf }; 6443 struct nfs_fattr *fattr; 6444 int status; 6445 6446 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 6447 return -EOPNOTSUPP; 6448 6449 fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode)); 6450 if (fattr == NULL) 6451 return -ENOMEM; 6452 6453 status = nfs4_do_set_security_label(inode, &ilabel, fattr); 6454 if (status == 0) 6455 nfs_setsecurity(inode, fattr); 6456 6457 nfs_free_fattr(fattr); 6458 return status; 6459 } 6460 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 6461 6462 6463 static void nfs4_init_boot_verifier(const struct nfs_client *clp, 6464 nfs4_verifier *bootverf) 6465 { 6466 __be32 verf[2]; 6467 6468 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 6469 /* An impossible timestamp guarantees this value 6470 * will never match a generated boot time. */ 6471 verf[0] = cpu_to_be32(U32_MAX); 6472 verf[1] = cpu_to_be32(U32_MAX); 6473 } else { 6474 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 6475 u64 ns = ktime_to_ns(nn->boot_time); 6476 6477 verf[0] = cpu_to_be32(ns >> 32); 6478 verf[1] = cpu_to_be32(ns); 6479 } 6480 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 6481 } 6482 6483 static size_t 6484 nfs4_get_uniquifier(struct nfs_client *clp, char *buf, size_t buflen) 6485 { 6486 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 6487 struct nfs_netns_client *nn_clp = nn->nfs_client; 6488 const char *id; 6489 6490 buf[0] = '\0'; 6491 6492 if (nn_clp) { 6493 rcu_read_lock(); 6494 id = rcu_dereference(nn_clp->identifier); 6495 if (id) 6496 strscpy(buf, id, buflen); 6497 rcu_read_unlock(); 6498 } 6499 6500 if (nfs4_client_id_uniquifier[0] != '\0' && buf[0] == '\0') 6501 strscpy(buf, nfs4_client_id_uniquifier, buflen); 6502 6503 return strlen(buf); 6504 } 6505 6506 static int 6507 nfs4_init_nonuniform_client_string(struct nfs_client *clp) 6508 { 6509 char buf[NFS4_CLIENT_ID_UNIQ_LEN]; 6510 size_t buflen; 6511 size_t len; 6512 char *str; 6513 6514 if (clp->cl_owner_id != NULL) 6515 return 0; 6516 6517 rcu_read_lock(); 6518 len = 14 + 6519 strlen(clp->cl_rpcclient->cl_nodename) + 6520 1 + 6521 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) + 6522 1; 6523 rcu_read_unlock(); 6524 6525 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf)); 6526 if (buflen) 6527 len += buflen + 1; 6528 6529 if (len > NFS4_OPAQUE_LIMIT + 1) 6530 return -EINVAL; 6531 6532 /* 6533 * Since this string is allocated at mount time, and held until the 6534 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 6535 * about a memory-reclaim deadlock. 6536 */ 6537 str = kmalloc(len, GFP_KERNEL); 6538 if (!str) 6539 return -ENOMEM; 6540 6541 rcu_read_lock(); 6542 if (buflen) 6543 scnprintf(str, len, "Linux NFSv4.0 %s/%s/%s", 6544 clp->cl_rpcclient->cl_nodename, buf, 6545 rpc_peeraddr2str(clp->cl_rpcclient, 6546 RPC_DISPLAY_ADDR)); 6547 else 6548 scnprintf(str, len, "Linux NFSv4.0 %s/%s", 6549 clp->cl_rpcclient->cl_nodename, 6550 rpc_peeraddr2str(clp->cl_rpcclient, 6551 RPC_DISPLAY_ADDR)); 6552 rcu_read_unlock(); 6553 6554 clp->cl_owner_id = str; 6555 return 0; 6556 } 6557 6558 static int 6559 nfs4_init_uniform_client_string(struct nfs_client *clp) 6560 { 6561 char buf[NFS4_CLIENT_ID_UNIQ_LEN]; 6562 size_t buflen; 6563 size_t len; 6564 char *str; 6565 6566 if (clp->cl_owner_id != NULL) 6567 return 0; 6568 6569 len = 10 + 10 + 1 + 10 + 1 + 6570 strlen(clp->cl_rpcclient->cl_nodename) + 1; 6571 6572 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf)); 6573 if (buflen) 6574 len += buflen + 1; 6575 6576 if (len > NFS4_OPAQUE_LIMIT + 1) 6577 return -EINVAL; 6578 6579 /* 6580 * Since this string is allocated at mount time, and held until the 6581 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 6582 * about a memory-reclaim deadlock. 6583 */ 6584 str = kmalloc(len, GFP_KERNEL); 6585 if (!str) 6586 return -ENOMEM; 6587 6588 if (buflen) 6589 scnprintf(str, len, "Linux NFSv%u.%u %s/%s", 6590 clp->rpc_ops->version, clp->cl_minorversion, 6591 buf, clp->cl_rpcclient->cl_nodename); 6592 else 6593 scnprintf(str, len, "Linux NFSv%u.%u %s", 6594 clp->rpc_ops->version, clp->cl_minorversion, 6595 clp->cl_rpcclient->cl_nodename); 6596 clp->cl_owner_id = str; 6597 return 0; 6598 } 6599 6600 /* 6601 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback 6602 * services. Advertise one based on the address family of the 6603 * clientaddr. 6604 */ 6605 static unsigned int 6606 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len) 6607 { 6608 if (strchr(clp->cl_ipaddr, ':') != NULL) 6609 return scnprintf(buf, len, "tcp6"); 6610 else 6611 return scnprintf(buf, len, "tcp"); 6612 } 6613 6614 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata) 6615 { 6616 struct nfs4_setclientid *sc = calldata; 6617 6618 if (task->tk_status == 0) 6619 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred); 6620 } 6621 6622 static const struct rpc_call_ops nfs4_setclientid_ops = { 6623 .rpc_call_done = nfs4_setclientid_done, 6624 }; 6625 6626 /** 6627 * nfs4_proc_setclientid - Negotiate client ID 6628 * @clp: state data structure 6629 * @program: RPC program for NFSv4 callback service 6630 * @port: IP port number for NFS4 callback service 6631 * @cred: credential to use for this call 6632 * @res: where to place the result 6633 * 6634 * Returns zero, a negative errno, or a negative NFS4ERR status code. 6635 */ 6636 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 6637 unsigned short port, const struct cred *cred, 6638 struct nfs4_setclientid_res *res) 6639 { 6640 nfs4_verifier sc_verifier; 6641 struct nfs4_setclientid setclientid = { 6642 .sc_verifier = &sc_verifier, 6643 .sc_prog = program, 6644 .sc_clnt = clp, 6645 }; 6646 struct rpc_message msg = { 6647 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 6648 .rpc_argp = &setclientid, 6649 .rpc_resp = res, 6650 .rpc_cred = cred, 6651 }; 6652 struct rpc_task_setup task_setup_data = { 6653 .rpc_client = clp->cl_rpcclient, 6654 .rpc_message = &msg, 6655 .callback_ops = &nfs4_setclientid_ops, 6656 .callback_data = &setclientid, 6657 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN, 6658 }; 6659 unsigned long now = jiffies; 6660 int status; 6661 6662 /* nfs_client_id4 */ 6663 nfs4_init_boot_verifier(clp, &sc_verifier); 6664 6665 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags)) 6666 status = nfs4_init_uniform_client_string(clp); 6667 else 6668 status = nfs4_init_nonuniform_client_string(clp); 6669 6670 if (status) 6671 goto out; 6672 6673 /* cb_client4 */ 6674 setclientid.sc_netid_len = 6675 nfs4_init_callback_netid(clp, 6676 setclientid.sc_netid, 6677 sizeof(setclientid.sc_netid)); 6678 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 6679 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 6680 clp->cl_ipaddr, port >> 8, port & 255); 6681 6682 dprintk("NFS call setclientid auth=%s, '%s'\n", 6683 clp->cl_rpcclient->cl_auth->au_ops->au_name, 6684 clp->cl_owner_id); 6685 6686 status = nfs4_call_sync_custom(&task_setup_data); 6687 if (setclientid.sc_cred) { 6688 kfree(clp->cl_acceptor); 6689 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred); 6690 put_rpccred(setclientid.sc_cred); 6691 } 6692 6693 if (status == 0) 6694 do_renew_lease(clp, now); 6695 out: 6696 trace_nfs4_setclientid(clp, status); 6697 dprintk("NFS reply setclientid: %d\n", status); 6698 return status; 6699 } 6700 6701 /** 6702 * nfs4_proc_setclientid_confirm - Confirm client ID 6703 * @clp: state data structure 6704 * @arg: result of a previous SETCLIENTID 6705 * @cred: credential to use for this call 6706 * 6707 * Returns zero, a negative errno, or a negative NFS4ERR status code. 6708 */ 6709 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 6710 struct nfs4_setclientid_res *arg, 6711 const struct cred *cred) 6712 { 6713 struct rpc_message msg = { 6714 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 6715 .rpc_argp = arg, 6716 .rpc_cred = cred, 6717 }; 6718 int status; 6719 6720 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 6721 clp->cl_rpcclient->cl_auth->au_ops->au_name, 6722 clp->cl_clientid); 6723 status = rpc_call_sync(clp->cl_rpcclient, &msg, 6724 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 6725 trace_nfs4_setclientid_confirm(clp, status); 6726 dprintk("NFS reply setclientid_confirm: %d\n", status); 6727 return status; 6728 } 6729 6730 struct nfs4_delegreturndata { 6731 struct nfs4_delegreturnargs args; 6732 struct nfs4_delegreturnres res; 6733 struct nfs_fh fh; 6734 nfs4_stateid stateid; 6735 unsigned long timestamp; 6736 unsigned short retrans; 6737 struct { 6738 struct nfs4_layoutreturn_args arg; 6739 struct nfs4_layoutreturn_res res; 6740 struct nfs4_xdr_opaque_data ld_private; 6741 u32 roc_barrier; 6742 bool roc; 6743 } lr; 6744 struct nfs4_delegattr sattr; 6745 struct nfs_fattr fattr; 6746 int rpc_status; 6747 struct inode *inode; 6748 }; 6749 6750 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 6751 { 6752 struct nfs4_delegreturndata *data = calldata; 6753 struct nfs4_exception exception = { 6754 .inode = data->inode, 6755 .stateid = &data->stateid, 6756 .task_is_privileged = data->args.seq_args.sa_privileged, 6757 .retrans = data->retrans, 6758 }; 6759 6760 if (!nfs4_sequence_done(task, &data->res.seq_res)) 6761 return; 6762 6763 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); 6764 6765 /* Handle Layoutreturn errors */ 6766 if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res, 6767 &data->res.lr_ret) == -EAGAIN) 6768 goto out_restart; 6769 6770 if (data->args.sattr_args && task->tk_status != 0) { 6771 switch(data->res.sattr_ret) { 6772 case 0: 6773 data->args.sattr_args = NULL; 6774 data->res.sattr_res = false; 6775 break; 6776 case -NFS4ERR_ADMIN_REVOKED: 6777 case -NFS4ERR_DELEG_REVOKED: 6778 case -NFS4ERR_EXPIRED: 6779 case -NFS4ERR_BAD_STATEID: 6780 /* Let the main handler below do stateid recovery */ 6781 break; 6782 case -NFS4ERR_OLD_STATEID: 6783 if (nfs4_refresh_delegation_stateid(&data->stateid, 6784 data->inode)) 6785 goto out_restart; 6786 fallthrough; 6787 default: 6788 data->args.sattr_args = NULL; 6789 data->res.sattr_res = false; 6790 goto out_restart; 6791 } 6792 } 6793 6794 switch (task->tk_status) { 6795 case 0: 6796 renew_lease(data->res.server, data->timestamp); 6797 break; 6798 case -NFS4ERR_ADMIN_REVOKED: 6799 case -NFS4ERR_DELEG_REVOKED: 6800 case -NFS4ERR_EXPIRED: 6801 nfs4_free_revoked_stateid(data->res.server, 6802 data->args.stateid, 6803 task->tk_msg.rpc_cred); 6804 fallthrough; 6805 case -NFS4ERR_BAD_STATEID: 6806 case -NFS4ERR_STALE_STATEID: 6807 case -ETIMEDOUT: 6808 task->tk_status = 0; 6809 break; 6810 case -NFS4ERR_OLD_STATEID: 6811 if (!nfs4_refresh_delegation_stateid(&data->stateid, data->inode)) 6812 nfs4_stateid_seqid_inc(&data->stateid); 6813 if (data->args.bitmask) { 6814 data->args.bitmask = NULL; 6815 data->res.fattr = NULL; 6816 } 6817 goto out_restart; 6818 case -NFS4ERR_ACCESS: 6819 if (data->args.bitmask) { 6820 data->args.bitmask = NULL; 6821 data->res.fattr = NULL; 6822 goto out_restart; 6823 } 6824 fallthrough; 6825 default: 6826 task->tk_status = nfs4_async_handle_exception(task, 6827 data->res.server, task->tk_status, 6828 &exception); 6829 data->retrans = exception.retrans; 6830 if (exception.retry) 6831 goto out_restart; 6832 } 6833 nfs_delegation_mark_returned(data->inode, data->args.stateid); 6834 data->rpc_status = task->tk_status; 6835 return; 6836 out_restart: 6837 task->tk_status = 0; 6838 rpc_restart_call_prepare(task); 6839 } 6840 6841 static void nfs4_delegreturn_release(void *calldata) 6842 { 6843 struct nfs4_delegreturndata *data = calldata; 6844 struct inode *inode = data->inode; 6845 6846 if (data->lr.roc) 6847 pnfs_roc_release(&data->lr.arg, &data->lr.res, 6848 data->res.lr_ret); 6849 if (inode) { 6850 nfs4_fattr_set_prechange(&data->fattr, 6851 inode_peek_iversion_raw(inode)); 6852 nfs_refresh_inode(inode, &data->fattr); 6853 nfs_iput_and_deactive(inode); 6854 } 6855 kfree(calldata); 6856 } 6857 6858 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 6859 { 6860 struct nfs4_delegreturndata *d_data; 6861 struct pnfs_layout_hdr *lo; 6862 6863 d_data = data; 6864 6865 if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) { 6866 nfs4_sequence_done(task, &d_data->res.seq_res); 6867 return; 6868 } 6869 6870 lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL; 6871 if (lo && !pnfs_layout_is_valid(lo)) { 6872 d_data->args.lr_args = NULL; 6873 d_data->res.lr_res = NULL; 6874 } 6875 6876 nfs4_setup_sequence(d_data->res.server->nfs_client, 6877 &d_data->args.seq_args, 6878 &d_data->res.seq_res, 6879 task); 6880 } 6881 6882 static const struct rpc_call_ops nfs4_delegreturn_ops = { 6883 .rpc_call_prepare = nfs4_delegreturn_prepare, 6884 .rpc_call_done = nfs4_delegreturn_done, 6885 .rpc_release = nfs4_delegreturn_release, 6886 }; 6887 6888 static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, 6889 const nfs4_stateid *stateid, 6890 struct nfs_delegation *delegation, 6891 int issync) 6892 { 6893 struct nfs4_delegreturndata *data; 6894 struct nfs_server *server = NFS_SERVER(inode); 6895 struct rpc_task *task; 6896 struct rpc_message msg = { 6897 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 6898 .rpc_cred = cred, 6899 }; 6900 struct rpc_task_setup task_setup_data = { 6901 .rpc_client = server->client, 6902 .rpc_message = &msg, 6903 .callback_ops = &nfs4_delegreturn_ops, 6904 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT, 6905 }; 6906 int status = 0; 6907 6908 if (nfs_server_capable(inode, NFS_CAP_MOVEABLE)) 6909 task_setup_data.flags |= RPC_TASK_MOVEABLE; 6910 6911 data = kzalloc(sizeof(*data), GFP_KERNEL); 6912 if (data == NULL) 6913 return -ENOMEM; 6914 6915 nfs4_state_protect(server->nfs_client, 6916 NFS_SP4_MACH_CRED_CLEANUP, 6917 &task_setup_data.rpc_client, &msg); 6918 6919 data->args.fhandle = &data->fh; 6920 data->args.stateid = &data->stateid; 6921 nfs4_bitmask_set(data->args.bitmask_store, 6922 server->cache_consistency_bitmask, inode, 0); 6923 data->args.bitmask = data->args.bitmask_store; 6924 nfs_copy_fh(&data->fh, NFS_FH(inode)); 6925 nfs4_stateid_copy(&data->stateid, stateid); 6926 data->res.fattr = &data->fattr; 6927 data->res.server = server; 6928 data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT; 6929 data->lr.arg.ld_private = &data->lr.ld_private; 6930 nfs_fattr_init(data->res.fattr); 6931 data->timestamp = jiffies; 6932 data->rpc_status = 0; 6933 data->inode = nfs_igrab_and_active(inode); 6934 if (data->inode || issync) { 6935 data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res, 6936 cred); 6937 if (data->lr.roc) { 6938 data->args.lr_args = &data->lr.arg; 6939 data->res.lr_res = &data->lr.res; 6940 } 6941 } 6942 6943 if (delegation && 6944 test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) { 6945 if (delegation->type & FMODE_READ) { 6946 data->sattr.atime = inode_get_atime(inode); 6947 data->sattr.atime_set = true; 6948 } 6949 if (delegation->type & FMODE_WRITE) { 6950 data->sattr.mtime = inode_get_mtime(inode); 6951 data->sattr.mtime_set = true; 6952 } 6953 data->args.sattr_args = &data->sattr; 6954 data->res.sattr_res = true; 6955 } 6956 6957 if (!data->inode) 6958 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 6959 1); 6960 else 6961 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 6962 0); 6963 6964 task_setup_data.callback_data = data; 6965 msg.rpc_argp = &data->args; 6966 msg.rpc_resp = &data->res; 6967 task = rpc_run_task(&task_setup_data); 6968 if (IS_ERR(task)) 6969 return PTR_ERR(task); 6970 if (!issync) 6971 goto out; 6972 status = rpc_wait_for_completion_task(task); 6973 if (status != 0) 6974 goto out; 6975 status = data->rpc_status; 6976 out: 6977 rpc_put_task(task); 6978 return status; 6979 } 6980 6981 int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, 6982 const nfs4_stateid *stateid, 6983 struct nfs_delegation *delegation, int issync) 6984 { 6985 struct nfs_server *server = NFS_SERVER(inode); 6986 struct nfs4_exception exception = { }; 6987 int err; 6988 do { 6989 err = _nfs4_proc_delegreturn(inode, cred, stateid, 6990 delegation, issync); 6991 trace_nfs4_delegreturn(inode, stateid, err); 6992 switch (err) { 6993 case -NFS4ERR_STALE_STATEID: 6994 case -NFS4ERR_EXPIRED: 6995 case 0: 6996 return 0; 6997 } 6998 err = nfs4_handle_exception(server, err, &exception); 6999 } while (exception.retry); 7000 return err; 7001 } 7002 7003 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7004 { 7005 struct inode *inode = state->inode; 7006 struct nfs_server *server = NFS_SERVER(inode); 7007 struct nfs_client *clp = server->nfs_client; 7008 struct nfs_lockt_args arg = { 7009 .fh = NFS_FH(inode), 7010 .fl = request, 7011 }; 7012 struct nfs_lockt_res res = { 7013 .denied = request, 7014 }; 7015 struct rpc_message msg = { 7016 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 7017 .rpc_argp = &arg, 7018 .rpc_resp = &res, 7019 .rpc_cred = state->owner->so_cred, 7020 }; 7021 struct nfs4_lock_state *lsp; 7022 int status; 7023 7024 arg.lock_owner.clientid = clp->cl_clientid; 7025 status = nfs4_set_lock_state(state, request); 7026 if (status != 0) 7027 goto out; 7028 lsp = request->fl_u.nfs4_fl.owner; 7029 arg.lock_owner.id = lsp->ls_seqid.owner_id; 7030 arg.lock_owner.s_dev = server->s_dev; 7031 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 7032 switch (status) { 7033 case 0: 7034 request->c.flc_type = F_UNLCK; 7035 break; 7036 case -NFS4ERR_DENIED: 7037 status = 0; 7038 } 7039 request->fl_ops->fl_release_private(request); 7040 request->fl_ops = NULL; 7041 out: 7042 return status; 7043 } 7044 7045 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7046 { 7047 struct nfs4_exception exception = { 7048 .interruptible = true, 7049 }; 7050 int err; 7051 7052 do { 7053 err = _nfs4_proc_getlk(state, cmd, request); 7054 trace_nfs4_get_lock(request, state, cmd, err); 7055 err = nfs4_handle_exception(NFS_SERVER(state->inode), err, 7056 &exception); 7057 } while (exception.retry); 7058 return err; 7059 } 7060 7061 /* 7062 * Update the seqid of a lock stateid after receiving 7063 * NFS4ERR_OLD_STATEID 7064 */ 7065 static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst, 7066 struct nfs4_lock_state *lsp) 7067 { 7068 struct nfs4_state *state = lsp->ls_state; 7069 bool ret = false; 7070 7071 spin_lock(&state->state_lock); 7072 if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid)) 7073 goto out; 7074 if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst)) 7075 nfs4_stateid_seqid_inc(dst); 7076 else 7077 dst->seqid = lsp->ls_stateid.seqid; 7078 ret = true; 7079 out: 7080 spin_unlock(&state->state_lock); 7081 return ret; 7082 } 7083 7084 static bool nfs4_sync_lock_stateid(nfs4_stateid *dst, 7085 struct nfs4_lock_state *lsp) 7086 { 7087 struct nfs4_state *state = lsp->ls_state; 7088 bool ret; 7089 7090 spin_lock(&state->state_lock); 7091 ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid); 7092 nfs4_stateid_copy(dst, &lsp->ls_stateid); 7093 spin_unlock(&state->state_lock); 7094 return ret; 7095 } 7096 7097 struct nfs4_unlockdata { 7098 struct nfs_locku_args arg; 7099 struct nfs_locku_res res; 7100 struct nfs4_lock_state *lsp; 7101 struct nfs_open_context *ctx; 7102 struct nfs_lock_context *l_ctx; 7103 struct file_lock fl; 7104 struct nfs_server *server; 7105 unsigned long timestamp; 7106 unsigned short retrans; 7107 }; 7108 7109 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 7110 struct nfs_open_context *ctx, 7111 struct nfs4_lock_state *lsp, 7112 struct nfs_seqid *seqid) 7113 { 7114 struct nfs4_unlockdata *p; 7115 struct nfs4_state *state = lsp->ls_state; 7116 struct inode *inode = state->inode; 7117 struct nfs_lock_context *l_ctx; 7118 7119 p = kzalloc(sizeof(*p), GFP_KERNEL); 7120 if (p == NULL) 7121 return NULL; 7122 l_ctx = nfs_get_lock_context(ctx); 7123 if (!IS_ERR(l_ctx)) { 7124 p->l_ctx = l_ctx; 7125 } else { 7126 kfree(p); 7127 return NULL; 7128 } 7129 p->arg.fh = NFS_FH(inode); 7130 p->arg.fl = &p->fl; 7131 p->arg.seqid = seqid; 7132 p->res.seqid = seqid; 7133 p->lsp = lsp; 7134 /* Ensure we don't close file until we're done freeing locks! */ 7135 p->ctx = get_nfs_open_context(ctx); 7136 locks_init_lock(&p->fl); 7137 locks_copy_lock(&p->fl, fl); 7138 p->server = NFS_SERVER(inode); 7139 spin_lock(&state->state_lock); 7140 nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid); 7141 spin_unlock(&state->state_lock); 7142 return p; 7143 } 7144 7145 static void nfs4_locku_release_calldata(void *data) 7146 { 7147 struct nfs4_unlockdata *calldata = data; 7148 nfs_free_seqid(calldata->arg.seqid); 7149 nfs4_put_lock_state(calldata->lsp); 7150 nfs_put_lock_context(calldata->l_ctx); 7151 put_nfs_open_context(calldata->ctx); 7152 kfree(calldata); 7153 } 7154 7155 static void nfs4_locku_done(struct rpc_task *task, void *data) 7156 { 7157 struct nfs4_unlockdata *calldata = data; 7158 struct nfs4_exception exception = { 7159 .inode = calldata->lsp->ls_state->inode, 7160 .stateid = &calldata->arg.stateid, 7161 .retrans = calldata->retrans, 7162 }; 7163 7164 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 7165 return; 7166 switch (task->tk_status) { 7167 case 0: 7168 renew_lease(calldata->server, calldata->timestamp); 7169 locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl); 7170 if (nfs4_update_lock_stateid(calldata->lsp, 7171 &calldata->res.stateid)) 7172 break; 7173 fallthrough; 7174 case -NFS4ERR_ADMIN_REVOKED: 7175 case -NFS4ERR_EXPIRED: 7176 nfs4_free_revoked_stateid(calldata->server, 7177 &calldata->arg.stateid, 7178 task->tk_msg.rpc_cred); 7179 fallthrough; 7180 case -NFS4ERR_BAD_STATEID: 7181 case -NFS4ERR_STALE_STATEID: 7182 if (nfs4_sync_lock_stateid(&calldata->arg.stateid, 7183 calldata->lsp)) 7184 rpc_restart_call_prepare(task); 7185 break; 7186 case -NFS4ERR_OLD_STATEID: 7187 if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid, 7188 calldata->lsp)) 7189 rpc_restart_call_prepare(task); 7190 break; 7191 default: 7192 task->tk_status = nfs4_async_handle_exception(task, 7193 calldata->server, task->tk_status, 7194 &exception); 7195 calldata->retrans = exception.retrans; 7196 if (exception.retry) 7197 rpc_restart_call_prepare(task); 7198 } 7199 nfs_release_seqid(calldata->arg.seqid); 7200 } 7201 7202 static void nfs4_locku_prepare(struct rpc_task *task, void *data) 7203 { 7204 struct nfs4_unlockdata *calldata = data; 7205 7206 if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) && 7207 nfs_async_iocounter_wait(task, calldata->l_ctx)) 7208 return; 7209 7210 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 7211 goto out_wait; 7212 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { 7213 /* Note: exit _without_ running nfs4_locku_done */ 7214 goto out_no_action; 7215 } 7216 calldata->timestamp = jiffies; 7217 if (nfs4_setup_sequence(calldata->server->nfs_client, 7218 &calldata->arg.seq_args, 7219 &calldata->res.seq_res, 7220 task) != 0) 7221 nfs_release_seqid(calldata->arg.seqid); 7222 return; 7223 out_no_action: 7224 task->tk_action = NULL; 7225 out_wait: 7226 nfs4_sequence_done(task, &calldata->res.seq_res); 7227 } 7228 7229 static const struct rpc_call_ops nfs4_locku_ops = { 7230 .rpc_call_prepare = nfs4_locku_prepare, 7231 .rpc_call_done = nfs4_locku_done, 7232 .rpc_release = nfs4_locku_release_calldata, 7233 }; 7234 7235 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 7236 struct nfs_open_context *ctx, 7237 struct nfs4_lock_state *lsp, 7238 struct nfs_seqid *seqid) 7239 { 7240 struct nfs4_unlockdata *data; 7241 struct rpc_message msg = { 7242 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 7243 .rpc_cred = ctx->cred, 7244 }; 7245 struct rpc_task_setup task_setup_data = { 7246 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 7247 .rpc_message = &msg, 7248 .callback_ops = &nfs4_locku_ops, 7249 .workqueue = nfsiod_workqueue, 7250 .flags = RPC_TASK_ASYNC, 7251 }; 7252 7253 if (nfs_server_capable(lsp->ls_state->inode, NFS_CAP_MOVEABLE)) 7254 task_setup_data.flags |= RPC_TASK_MOVEABLE; 7255 7256 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client, 7257 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg); 7258 7259 /* Ensure this is an unlock - when canceling a lock, the 7260 * canceled lock is passed in, and it won't be an unlock. 7261 */ 7262 fl->c.flc_type = F_UNLCK; 7263 if (fl->c.flc_flags & FL_CLOSE) 7264 set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags); 7265 7266 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 7267 if (data == NULL) { 7268 nfs_free_seqid(seqid); 7269 return ERR_PTR(-ENOMEM); 7270 } 7271 7272 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 0); 7273 msg.rpc_argp = &data->arg; 7274 msg.rpc_resp = &data->res; 7275 task_setup_data.callback_data = data; 7276 return rpc_run_task(&task_setup_data); 7277 } 7278 7279 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 7280 { 7281 struct inode *inode = state->inode; 7282 struct nfs4_state_owner *sp = state->owner; 7283 struct nfs_inode *nfsi = NFS_I(inode); 7284 struct nfs_seqid *seqid; 7285 struct nfs4_lock_state *lsp; 7286 struct rpc_task *task; 7287 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 7288 int status = 0; 7289 unsigned char saved_flags = request->c.flc_flags; 7290 7291 status = nfs4_set_lock_state(state, request); 7292 /* Unlock _before_ we do the RPC call */ 7293 request->c.flc_flags |= FL_EXISTS; 7294 /* Exclude nfs_delegation_claim_locks() */ 7295 mutex_lock(&sp->so_delegreturn_mutex); 7296 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ 7297 down_read(&nfsi->rwsem); 7298 if (locks_lock_inode_wait(inode, request) == -ENOENT) { 7299 up_read(&nfsi->rwsem); 7300 mutex_unlock(&sp->so_delegreturn_mutex); 7301 goto out; 7302 } 7303 lsp = request->fl_u.nfs4_fl.owner; 7304 set_bit(NFS_LOCK_UNLOCKING, &lsp->ls_flags); 7305 up_read(&nfsi->rwsem); 7306 mutex_unlock(&sp->so_delegreturn_mutex); 7307 if (status != 0) 7308 goto out; 7309 /* Is this a delegated lock? */ 7310 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0) 7311 goto out; 7312 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid; 7313 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 7314 status = -ENOMEM; 7315 if (IS_ERR(seqid)) 7316 goto out; 7317 task = nfs4_do_unlck(request, 7318 nfs_file_open_context(request->c.flc_file), 7319 lsp, seqid); 7320 status = PTR_ERR(task); 7321 if (IS_ERR(task)) 7322 goto out; 7323 status = rpc_wait_for_completion_task(task); 7324 rpc_put_task(task); 7325 out: 7326 request->c.flc_flags = saved_flags; 7327 trace_nfs4_unlock(request, state, F_SETLK, status); 7328 return status; 7329 } 7330 7331 struct nfs4_lockdata { 7332 struct nfs_lock_args arg; 7333 struct nfs_lock_res res; 7334 struct nfs4_lock_state *lsp; 7335 struct nfs_open_context *ctx; 7336 struct file_lock fl; 7337 unsigned long timestamp; 7338 int rpc_status; 7339 int cancelled; 7340 struct nfs_server *server; 7341 }; 7342 7343 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 7344 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 7345 gfp_t gfp_mask) 7346 { 7347 struct nfs4_lockdata *p; 7348 struct inode *inode = lsp->ls_state->inode; 7349 struct nfs_server *server = NFS_SERVER(inode); 7350 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 7351 7352 p = kzalloc(sizeof(*p), gfp_mask); 7353 if (p == NULL) 7354 return NULL; 7355 7356 p->arg.fh = NFS_FH(inode); 7357 p->arg.fl = &p->fl; 7358 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 7359 if (IS_ERR(p->arg.open_seqid)) 7360 goto out_free; 7361 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 7362 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask); 7363 if (IS_ERR(p->arg.lock_seqid)) 7364 goto out_free_seqid; 7365 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 7366 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 7367 p->arg.lock_owner.s_dev = server->s_dev; 7368 p->res.lock_seqid = p->arg.lock_seqid; 7369 p->lsp = lsp; 7370 p->server = server; 7371 p->ctx = get_nfs_open_context(ctx); 7372 locks_init_lock(&p->fl); 7373 locks_copy_lock(&p->fl, fl); 7374 return p; 7375 out_free_seqid: 7376 nfs_free_seqid(p->arg.open_seqid); 7377 out_free: 7378 kfree(p); 7379 return NULL; 7380 } 7381 7382 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 7383 { 7384 struct nfs4_lockdata *data = calldata; 7385 struct nfs4_state *state = data->lsp->ls_state; 7386 7387 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 7388 goto out_wait; 7389 /* Do we need to do an open_to_lock_owner? */ 7390 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) { 7391 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { 7392 goto out_release_lock_seqid; 7393 } 7394 nfs4_stateid_copy(&data->arg.open_stateid, 7395 &state->open_stateid); 7396 data->arg.new_lock_owner = 1; 7397 data->res.open_seqid = data->arg.open_seqid; 7398 } else { 7399 data->arg.new_lock_owner = 0; 7400 nfs4_stateid_copy(&data->arg.lock_stateid, 7401 &data->lsp->ls_stateid); 7402 } 7403 if (!nfs4_valid_open_stateid(state)) { 7404 data->rpc_status = -EBADF; 7405 task->tk_action = NULL; 7406 goto out_release_open_seqid; 7407 } 7408 data->timestamp = jiffies; 7409 if (nfs4_setup_sequence(data->server->nfs_client, 7410 &data->arg.seq_args, 7411 &data->res.seq_res, 7412 task) == 0) 7413 return; 7414 out_release_open_seqid: 7415 nfs_release_seqid(data->arg.open_seqid); 7416 out_release_lock_seqid: 7417 nfs_release_seqid(data->arg.lock_seqid); 7418 out_wait: 7419 nfs4_sequence_done(task, &data->res.seq_res); 7420 dprintk("%s: ret = %d\n", __func__, data->rpc_status); 7421 } 7422 7423 static void nfs4_lock_done(struct rpc_task *task, void *calldata) 7424 { 7425 struct nfs4_lockdata *data = calldata; 7426 struct nfs4_lock_state *lsp = data->lsp; 7427 7428 if (!nfs4_sequence_done(task, &data->res.seq_res)) 7429 return; 7430 7431 data->rpc_status = task->tk_status; 7432 switch (task->tk_status) { 7433 case 0: 7434 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)), 7435 data->timestamp); 7436 if (data->arg.new_lock && !data->cancelled) { 7437 data->fl.c.flc_flags &= ~(FL_SLEEP | FL_ACCESS); 7438 if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) 7439 goto out_restart; 7440 } 7441 if (data->arg.new_lock_owner != 0) { 7442 nfs_confirm_seqid(&lsp->ls_seqid, 0); 7443 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid); 7444 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 7445 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) 7446 goto out_restart; 7447 break; 7448 case -NFS4ERR_OLD_STATEID: 7449 if (data->arg.new_lock_owner != 0 && 7450 nfs4_refresh_open_old_stateid(&data->arg.open_stateid, 7451 lsp->ls_state)) 7452 goto out_restart; 7453 if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp)) 7454 goto out_restart; 7455 fallthrough; 7456 case -NFS4ERR_BAD_STATEID: 7457 case -NFS4ERR_STALE_STATEID: 7458 case -NFS4ERR_EXPIRED: 7459 if (data->arg.new_lock_owner != 0) { 7460 if (!nfs4_stateid_match(&data->arg.open_stateid, 7461 &lsp->ls_state->open_stateid)) 7462 goto out_restart; 7463 } else if (!nfs4_stateid_match(&data->arg.lock_stateid, 7464 &lsp->ls_stateid)) 7465 goto out_restart; 7466 } 7467 out_done: 7468 dprintk("%s: ret = %d!\n", __func__, data->rpc_status); 7469 return; 7470 out_restart: 7471 if (!data->cancelled) 7472 rpc_restart_call_prepare(task); 7473 goto out_done; 7474 } 7475 7476 static void nfs4_lock_release(void *calldata) 7477 { 7478 struct nfs4_lockdata *data = calldata; 7479 7480 nfs_free_seqid(data->arg.open_seqid); 7481 if (data->cancelled && data->rpc_status == 0) { 7482 struct rpc_task *task; 7483 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 7484 data->arg.lock_seqid); 7485 if (!IS_ERR(task)) 7486 rpc_put_task_async(task); 7487 dprintk("%s: cancelling lock!\n", __func__); 7488 } else 7489 nfs_free_seqid(data->arg.lock_seqid); 7490 nfs4_put_lock_state(data->lsp); 7491 put_nfs_open_context(data->ctx); 7492 kfree(data); 7493 } 7494 7495 static const struct rpc_call_ops nfs4_lock_ops = { 7496 .rpc_call_prepare = nfs4_lock_prepare, 7497 .rpc_call_done = nfs4_lock_done, 7498 .rpc_release = nfs4_lock_release, 7499 }; 7500 7501 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 7502 { 7503 switch (error) { 7504 case -NFS4ERR_ADMIN_REVOKED: 7505 case -NFS4ERR_EXPIRED: 7506 case -NFS4ERR_BAD_STATEID: 7507 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 7508 if (new_lock_owner != 0 || 7509 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) 7510 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 7511 break; 7512 case -NFS4ERR_STALE_STATEID: 7513 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 7514 nfs4_schedule_lease_recovery(server->nfs_client); 7515 } 7516 } 7517 7518 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 7519 { 7520 struct nfs4_lockdata *data; 7521 struct rpc_task *task; 7522 struct rpc_message msg = { 7523 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 7524 .rpc_cred = state->owner->so_cred, 7525 }; 7526 struct rpc_task_setup task_setup_data = { 7527 .rpc_client = NFS_CLIENT(state->inode), 7528 .rpc_message = &msg, 7529 .callback_ops = &nfs4_lock_ops, 7530 .workqueue = nfsiod_workqueue, 7531 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 7532 }; 7533 int ret; 7534 7535 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE)) 7536 task_setup_data.flags |= RPC_TASK_MOVEABLE; 7537 7538 data = nfs4_alloc_lockdata(fl, 7539 nfs_file_open_context(fl->c.flc_file), 7540 fl->fl_u.nfs4_fl.owner, GFP_KERNEL); 7541 if (data == NULL) 7542 return -ENOMEM; 7543 if (IS_SETLKW(cmd)) 7544 data->arg.block = 1; 7545 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 7546 recovery_type > NFS_LOCK_NEW); 7547 msg.rpc_argp = &data->arg; 7548 msg.rpc_resp = &data->res; 7549 task_setup_data.callback_data = data; 7550 if (recovery_type > NFS_LOCK_NEW) { 7551 if (recovery_type == NFS_LOCK_RECLAIM) 7552 data->arg.reclaim = NFS_LOCK_RECLAIM; 7553 } else 7554 data->arg.new_lock = 1; 7555 task = rpc_run_task(&task_setup_data); 7556 if (IS_ERR(task)) 7557 return PTR_ERR(task); 7558 ret = rpc_wait_for_completion_task(task); 7559 if (ret == 0) { 7560 ret = data->rpc_status; 7561 if (ret) 7562 nfs4_handle_setlk_error(data->server, data->lsp, 7563 data->arg.new_lock_owner, ret); 7564 } else 7565 data->cancelled = true; 7566 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret); 7567 rpc_put_task(task); 7568 dprintk("%s: ret = %d\n", __func__, ret); 7569 return ret; 7570 } 7571 7572 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 7573 { 7574 struct nfs_server *server = NFS_SERVER(state->inode); 7575 struct nfs4_exception exception = { 7576 .inode = state->inode, 7577 }; 7578 int err; 7579 7580 do { 7581 /* Cache the lock if possible... */ 7582 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 7583 return 0; 7584 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 7585 if (err != -NFS4ERR_DELAY) 7586 break; 7587 nfs4_handle_exception(server, err, &exception); 7588 } while (exception.retry); 7589 return err; 7590 } 7591 7592 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 7593 { 7594 struct nfs_server *server = NFS_SERVER(state->inode); 7595 struct nfs4_exception exception = { 7596 .inode = state->inode, 7597 }; 7598 int err; 7599 7600 err = nfs4_set_lock_state(state, request); 7601 if (err != 0) 7602 return err; 7603 if (!recover_lost_locks) { 7604 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags); 7605 return 0; 7606 } 7607 do { 7608 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 7609 return 0; 7610 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 7611 switch (err) { 7612 default: 7613 goto out; 7614 case -NFS4ERR_GRACE: 7615 case -NFS4ERR_DELAY: 7616 nfs4_handle_exception(server, err, &exception); 7617 err = 0; 7618 } 7619 } while (exception.retry); 7620 out: 7621 return err; 7622 } 7623 7624 #if defined(CONFIG_NFS_V4_1) 7625 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 7626 { 7627 struct nfs4_lock_state *lsp; 7628 int status; 7629 7630 status = nfs4_set_lock_state(state, request); 7631 if (status != 0) 7632 return status; 7633 lsp = request->fl_u.nfs4_fl.owner; 7634 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) || 7635 test_bit(NFS_LOCK_LOST, &lsp->ls_flags)) 7636 return 0; 7637 return nfs4_lock_expired(state, request); 7638 } 7639 #endif 7640 7641 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7642 { 7643 struct nfs_inode *nfsi = NFS_I(state->inode); 7644 struct nfs4_state_owner *sp = state->owner; 7645 unsigned char flags = request->c.flc_flags; 7646 int status; 7647 7648 request->c.flc_flags |= FL_ACCESS; 7649 status = locks_lock_inode_wait(state->inode, request); 7650 if (status < 0) 7651 goto out; 7652 mutex_lock(&sp->so_delegreturn_mutex); 7653 down_read(&nfsi->rwsem); 7654 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 7655 /* Yes: cache locks! */ 7656 /* ...but avoid races with delegation recall... */ 7657 request->c.flc_flags = flags & ~FL_SLEEP; 7658 status = locks_lock_inode_wait(state->inode, request); 7659 up_read(&nfsi->rwsem); 7660 mutex_unlock(&sp->so_delegreturn_mutex); 7661 goto out; 7662 } 7663 up_read(&nfsi->rwsem); 7664 mutex_unlock(&sp->so_delegreturn_mutex); 7665 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 7666 out: 7667 request->c.flc_flags = flags; 7668 return status; 7669 } 7670 7671 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7672 { 7673 struct nfs4_exception exception = { 7674 .state = state, 7675 .inode = state->inode, 7676 .interruptible = true, 7677 }; 7678 int err; 7679 7680 do { 7681 err = _nfs4_proc_setlk(state, cmd, request); 7682 if (err == -NFS4ERR_DENIED) 7683 err = -EAGAIN; 7684 err = nfs4_handle_exception(NFS_SERVER(state->inode), 7685 err, &exception); 7686 } while (exception.retry); 7687 return err; 7688 } 7689 7690 #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 7691 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 7692 7693 static int 7694 nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd, 7695 struct file_lock *request) 7696 { 7697 int status = -ERESTARTSYS; 7698 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 7699 7700 while(!signalled()) { 7701 status = nfs4_proc_setlk(state, cmd, request); 7702 if ((status != -EAGAIN) || IS_SETLK(cmd)) 7703 break; 7704 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); 7705 schedule_timeout(timeout); 7706 timeout *= 2; 7707 timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout); 7708 status = -ERESTARTSYS; 7709 } 7710 return status; 7711 } 7712 7713 #ifdef CONFIG_NFS_V4_1 7714 struct nfs4_lock_waiter { 7715 struct inode *inode; 7716 struct nfs_lowner owner; 7717 wait_queue_entry_t wait; 7718 }; 7719 7720 static int 7721 nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key) 7722 { 7723 struct nfs4_lock_waiter *waiter = 7724 container_of(wait, struct nfs4_lock_waiter, wait); 7725 7726 /* NULL key means to wake up everyone */ 7727 if (key) { 7728 struct cb_notify_lock_args *cbnl = key; 7729 struct nfs_lowner *lowner = &cbnl->cbnl_owner, 7730 *wowner = &waiter->owner; 7731 7732 /* Only wake if the callback was for the same owner. */ 7733 if (lowner->id != wowner->id || lowner->s_dev != wowner->s_dev) 7734 return 0; 7735 7736 /* Make sure it's for the right inode */ 7737 if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh)) 7738 return 0; 7739 } 7740 7741 return woken_wake_function(wait, mode, flags, key); 7742 } 7743 7744 static int 7745 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7746 { 7747 struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner; 7748 struct nfs_server *server = NFS_SERVER(state->inode); 7749 struct nfs_client *clp = server->nfs_client; 7750 wait_queue_head_t *q = &clp->cl_lock_waitq; 7751 struct nfs4_lock_waiter waiter = { 7752 .inode = state->inode, 7753 .owner = { .clientid = clp->cl_clientid, 7754 .id = lsp->ls_seqid.owner_id, 7755 .s_dev = server->s_dev }, 7756 }; 7757 int status; 7758 7759 /* Don't bother with waitqueue if we don't expect a callback */ 7760 if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags)) 7761 return nfs4_retry_setlk_simple(state, cmd, request); 7762 7763 init_wait(&waiter.wait); 7764 waiter.wait.func = nfs4_wake_lock_waiter; 7765 add_wait_queue(q, &waiter.wait); 7766 7767 do { 7768 status = nfs4_proc_setlk(state, cmd, request); 7769 if (status != -EAGAIN || IS_SETLK(cmd)) 7770 break; 7771 7772 status = -ERESTARTSYS; 7773 wait_woken(&waiter.wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE, 7774 NFS4_LOCK_MAXTIMEOUT); 7775 } while (!signalled()); 7776 7777 remove_wait_queue(q, &waiter.wait); 7778 7779 return status; 7780 } 7781 #else /* !CONFIG_NFS_V4_1 */ 7782 static inline int 7783 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7784 { 7785 return nfs4_retry_setlk_simple(state, cmd, request); 7786 } 7787 #endif 7788 7789 static int 7790 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 7791 { 7792 struct nfs_open_context *ctx; 7793 struct nfs4_state *state; 7794 int status; 7795 7796 /* verify open state */ 7797 ctx = nfs_file_open_context(filp); 7798 state = ctx->state; 7799 7800 if (IS_GETLK(cmd)) { 7801 if (state != NULL) 7802 return nfs4_proc_getlk(state, F_GETLK, request); 7803 return 0; 7804 } 7805 7806 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 7807 return -EINVAL; 7808 7809 if (lock_is_unlock(request)) { 7810 if (state != NULL) 7811 return nfs4_proc_unlck(state, cmd, request); 7812 return 0; 7813 } 7814 7815 if (state == NULL) 7816 return -ENOLCK; 7817 7818 if ((request->c.flc_flags & FL_POSIX) && 7819 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 7820 return -ENOLCK; 7821 7822 /* 7823 * Don't rely on the VFS having checked the file open mode, 7824 * since it won't do this for flock() locks. 7825 */ 7826 switch (request->c.flc_type) { 7827 case F_RDLCK: 7828 if (!(filp->f_mode & FMODE_READ)) 7829 return -EBADF; 7830 break; 7831 case F_WRLCK: 7832 if (!(filp->f_mode & FMODE_WRITE)) 7833 return -EBADF; 7834 } 7835 7836 status = nfs4_set_lock_state(state, request); 7837 if (status != 0) 7838 return status; 7839 7840 return nfs4_retry_setlk(state, cmd, request); 7841 } 7842 7843 static int nfs4_delete_lease(struct file *file, void **priv) 7844 { 7845 return generic_setlease(file, F_UNLCK, NULL, priv); 7846 } 7847 7848 static int nfs4_add_lease(struct file *file, int arg, struct file_lease **lease, 7849 void **priv) 7850 { 7851 struct inode *inode = file_inode(file); 7852 fmode_t type = arg == F_RDLCK ? FMODE_READ : FMODE_WRITE; 7853 int ret; 7854 7855 /* No delegation, no lease */ 7856 if (!nfs4_have_delegation(inode, type, 0)) 7857 return -EAGAIN; 7858 ret = generic_setlease(file, arg, lease, priv); 7859 if (ret || nfs4_have_delegation(inode, type, 0)) 7860 return ret; 7861 /* We raced with a delegation return */ 7862 nfs4_delete_lease(file, priv); 7863 return -EAGAIN; 7864 } 7865 7866 int nfs4_proc_setlease(struct file *file, int arg, struct file_lease **lease, 7867 void **priv) 7868 { 7869 switch (arg) { 7870 case F_RDLCK: 7871 case F_WRLCK: 7872 return nfs4_add_lease(file, arg, lease, priv); 7873 case F_UNLCK: 7874 return nfs4_delete_lease(file, priv); 7875 default: 7876 return -EINVAL; 7877 } 7878 } 7879 7880 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid) 7881 { 7882 struct nfs_server *server = NFS_SERVER(state->inode); 7883 int err; 7884 7885 err = nfs4_set_lock_state(state, fl); 7886 if (err != 0) 7887 return err; 7888 do { 7889 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 7890 if (err != -NFS4ERR_DELAY && err != -NFS4ERR_GRACE) 7891 break; 7892 ssleep(1); 7893 } while (err == -NFS4ERR_DELAY || err == -NFSERR_GRACE); 7894 return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err); 7895 } 7896 7897 struct nfs_release_lockowner_data { 7898 struct nfs4_lock_state *lsp; 7899 struct nfs_server *server; 7900 struct nfs_release_lockowner_args args; 7901 struct nfs_release_lockowner_res res; 7902 unsigned long timestamp; 7903 }; 7904 7905 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata) 7906 { 7907 struct nfs_release_lockowner_data *data = calldata; 7908 struct nfs_server *server = data->server; 7909 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 7910 &data->res.seq_res, task); 7911 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 7912 data->timestamp = jiffies; 7913 } 7914 7915 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata) 7916 { 7917 struct nfs_release_lockowner_data *data = calldata; 7918 struct nfs_server *server = data->server; 7919 7920 nfs40_sequence_done(task, &data->res.seq_res); 7921 7922 switch (task->tk_status) { 7923 case 0: 7924 renew_lease(server, data->timestamp); 7925 break; 7926 case -NFS4ERR_STALE_CLIENTID: 7927 case -NFS4ERR_EXPIRED: 7928 nfs4_schedule_lease_recovery(server->nfs_client); 7929 break; 7930 case -NFS4ERR_LEASE_MOVED: 7931 case -NFS4ERR_DELAY: 7932 if (nfs4_async_handle_error(task, server, 7933 NULL, NULL) == -EAGAIN) 7934 rpc_restart_call_prepare(task); 7935 } 7936 } 7937 7938 static void nfs4_release_lockowner_release(void *calldata) 7939 { 7940 struct nfs_release_lockowner_data *data = calldata; 7941 nfs4_free_lock_state(data->server, data->lsp); 7942 kfree(calldata); 7943 } 7944 7945 static const struct rpc_call_ops nfs4_release_lockowner_ops = { 7946 .rpc_call_prepare = nfs4_release_lockowner_prepare, 7947 .rpc_call_done = nfs4_release_lockowner_done, 7948 .rpc_release = nfs4_release_lockowner_release, 7949 }; 7950 7951 static void 7952 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp) 7953 { 7954 struct nfs_release_lockowner_data *data; 7955 struct rpc_message msg = { 7956 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], 7957 }; 7958 7959 if (server->nfs_client->cl_mvops->minor_version != 0) 7960 return; 7961 7962 data = kmalloc(sizeof(*data), GFP_KERNEL); 7963 if (!data) 7964 return; 7965 data->lsp = lsp; 7966 data->server = server; 7967 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 7968 data->args.lock_owner.id = lsp->ls_seqid.owner_id; 7969 data->args.lock_owner.s_dev = server->s_dev; 7970 7971 msg.rpc_argp = &data->args; 7972 msg.rpc_resp = &data->res; 7973 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 7974 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 7975 } 7976 7977 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 7978 7979 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler, 7980 struct mnt_idmap *idmap, 7981 struct dentry *unused, struct inode *inode, 7982 const char *key, const void *buf, 7983 size_t buflen, int flags) 7984 { 7985 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_ACL); 7986 } 7987 7988 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler, 7989 struct dentry *unused, struct inode *inode, 7990 const char *key, void *buf, size_t buflen) 7991 { 7992 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_ACL); 7993 } 7994 7995 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry) 7996 { 7997 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_ACL); 7998 } 7999 8000 #if defined(CONFIG_NFS_V4_1) 8001 #define XATTR_NAME_NFSV4_DACL "system.nfs4_dacl" 8002 8003 static int nfs4_xattr_set_nfs4_dacl(const struct xattr_handler *handler, 8004 struct mnt_idmap *idmap, 8005 struct dentry *unused, struct inode *inode, 8006 const char *key, const void *buf, 8007 size_t buflen, int flags) 8008 { 8009 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_DACL); 8010 } 8011 8012 static int nfs4_xattr_get_nfs4_dacl(const struct xattr_handler *handler, 8013 struct dentry *unused, struct inode *inode, 8014 const char *key, void *buf, size_t buflen) 8015 { 8016 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_DACL); 8017 } 8018 8019 static bool nfs4_xattr_list_nfs4_dacl(struct dentry *dentry) 8020 { 8021 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_DACL); 8022 } 8023 8024 #define XATTR_NAME_NFSV4_SACL "system.nfs4_sacl" 8025 8026 static int nfs4_xattr_set_nfs4_sacl(const struct xattr_handler *handler, 8027 struct mnt_idmap *idmap, 8028 struct dentry *unused, struct inode *inode, 8029 const char *key, const void *buf, 8030 size_t buflen, int flags) 8031 { 8032 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_SACL); 8033 } 8034 8035 static int nfs4_xattr_get_nfs4_sacl(const struct xattr_handler *handler, 8036 struct dentry *unused, struct inode *inode, 8037 const char *key, void *buf, size_t buflen) 8038 { 8039 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_SACL); 8040 } 8041 8042 static bool nfs4_xattr_list_nfs4_sacl(struct dentry *dentry) 8043 { 8044 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_SACL); 8045 } 8046 8047 #endif 8048 8049 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 8050 8051 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler, 8052 struct mnt_idmap *idmap, 8053 struct dentry *unused, struct inode *inode, 8054 const char *key, const void *buf, 8055 size_t buflen, int flags) 8056 { 8057 if (security_ismaclabel(key)) 8058 return nfs4_set_security_label(inode, buf, buflen); 8059 8060 return -EOPNOTSUPP; 8061 } 8062 8063 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler, 8064 struct dentry *unused, struct inode *inode, 8065 const char *key, void *buf, size_t buflen) 8066 { 8067 if (security_ismaclabel(key)) 8068 return nfs4_get_security_label(inode, buf, buflen); 8069 return -EOPNOTSUPP; 8070 } 8071 8072 static ssize_t 8073 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 8074 { 8075 int len = 0; 8076 8077 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) { 8078 len = security_inode_listsecurity(inode, list, list_len); 8079 if (len >= 0 && list_len && len > list_len) 8080 return -ERANGE; 8081 } 8082 return len; 8083 } 8084 8085 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = { 8086 .prefix = XATTR_SECURITY_PREFIX, 8087 .get = nfs4_xattr_get_nfs4_label, 8088 .set = nfs4_xattr_set_nfs4_label, 8089 }; 8090 8091 #else 8092 8093 static ssize_t 8094 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 8095 { 8096 return 0; 8097 } 8098 8099 #endif 8100 8101 #ifdef CONFIG_NFS_V4_2 8102 static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler, 8103 struct mnt_idmap *idmap, 8104 struct dentry *unused, struct inode *inode, 8105 const char *key, const void *buf, 8106 size_t buflen, int flags) 8107 { 8108 u32 mask; 8109 int ret; 8110 8111 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8112 return -EOPNOTSUPP; 8113 8114 /* 8115 * There is no mapping from the MAY_* flags to the NFS_ACCESS_XA* 8116 * flags right now. Handling of xattr operations use the normal 8117 * file read/write permissions. 8118 * 8119 * Just in case the server has other ideas (which RFC 8276 allows), 8120 * do a cached access check for the XA* flags to possibly avoid 8121 * doing an RPC and getting EACCES back. 8122 */ 8123 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8124 if (!(mask & NFS_ACCESS_XAWRITE)) 8125 return -EACCES; 8126 } 8127 8128 if (buf == NULL) { 8129 ret = nfs42_proc_removexattr(inode, key); 8130 if (!ret) 8131 nfs4_xattr_cache_remove(inode, key); 8132 } else { 8133 ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags); 8134 if (!ret) 8135 nfs4_xattr_cache_add(inode, key, buf, NULL, buflen); 8136 } 8137 8138 return ret; 8139 } 8140 8141 static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler, 8142 struct dentry *unused, struct inode *inode, 8143 const char *key, void *buf, size_t buflen) 8144 { 8145 u32 mask; 8146 ssize_t ret; 8147 8148 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8149 return -EOPNOTSUPP; 8150 8151 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8152 if (!(mask & NFS_ACCESS_XAREAD)) 8153 return -EACCES; 8154 } 8155 8156 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 8157 if (ret) 8158 return ret; 8159 8160 ret = nfs4_xattr_cache_get(inode, key, buf, buflen); 8161 if (ret >= 0 || (ret < 0 && ret != -ENOENT)) 8162 return ret; 8163 8164 ret = nfs42_proc_getxattr(inode, key, buf, buflen); 8165 8166 return ret; 8167 } 8168 8169 static ssize_t 8170 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) 8171 { 8172 u64 cookie; 8173 bool eof; 8174 ssize_t ret, size; 8175 char *buf; 8176 size_t buflen; 8177 u32 mask; 8178 8179 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8180 return 0; 8181 8182 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8183 if (!(mask & NFS_ACCESS_XALIST)) 8184 return 0; 8185 } 8186 8187 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 8188 if (ret) 8189 return ret; 8190 8191 ret = nfs4_xattr_cache_list(inode, list, list_len); 8192 if (ret >= 0 || (ret < 0 && ret != -ENOENT)) 8193 return ret; 8194 8195 cookie = 0; 8196 eof = false; 8197 buflen = list_len ? list_len : XATTR_LIST_MAX; 8198 buf = list_len ? list : NULL; 8199 size = 0; 8200 8201 while (!eof) { 8202 ret = nfs42_proc_listxattrs(inode, buf, buflen, 8203 &cookie, &eof); 8204 if (ret < 0) 8205 return ret; 8206 8207 if (list_len) { 8208 buf += ret; 8209 buflen -= ret; 8210 } 8211 size += ret; 8212 } 8213 8214 if (list_len) 8215 nfs4_xattr_cache_set_list(inode, list, size); 8216 8217 return size; 8218 } 8219 8220 #else 8221 8222 static ssize_t 8223 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) 8224 { 8225 return 0; 8226 } 8227 #endif /* CONFIG_NFS_V4_2 */ 8228 8229 /* 8230 * nfs_fhget will use either the mounted_on_fileid or the fileid 8231 */ 8232 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 8233 { 8234 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 8235 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 8236 (fattr->valid & NFS_ATTR_FATTR_FSID) && 8237 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 8238 return; 8239 8240 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 8241 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 8242 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 8243 fattr->nlink = 2; 8244 } 8245 8246 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 8247 const struct qstr *name, 8248 struct nfs4_fs_locations *fs_locations, 8249 struct page *page) 8250 { 8251 struct nfs_server *server = NFS_SERVER(dir); 8252 u32 bitmask[3]; 8253 struct nfs4_fs_locations_arg args = { 8254 .dir_fh = NFS_FH(dir), 8255 .name = name, 8256 .page = page, 8257 .bitmask = bitmask, 8258 }; 8259 struct nfs4_fs_locations_res res = { 8260 .fs_locations = fs_locations, 8261 }; 8262 struct rpc_message msg = { 8263 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8264 .rpc_argp = &args, 8265 .rpc_resp = &res, 8266 }; 8267 int status; 8268 8269 dprintk("%s: start\n", __func__); 8270 8271 bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS; 8272 bitmask[1] = nfs4_fattr_bitmap[1]; 8273 8274 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 8275 * is not supported */ 8276 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 8277 bitmask[0] &= ~FATTR4_WORD0_FILEID; 8278 else 8279 bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID; 8280 8281 nfs_fattr_init(fs_locations->fattr); 8282 fs_locations->server = server; 8283 fs_locations->nlocations = 0; 8284 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 8285 dprintk("%s: returned status = %d\n", __func__, status); 8286 return status; 8287 } 8288 8289 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 8290 const struct qstr *name, 8291 struct nfs4_fs_locations *fs_locations, 8292 struct page *page) 8293 { 8294 struct nfs4_exception exception = { 8295 .interruptible = true, 8296 }; 8297 int err; 8298 do { 8299 err = _nfs4_proc_fs_locations(client, dir, name, 8300 fs_locations, page); 8301 trace_nfs4_get_fs_locations(dir, name, err); 8302 err = nfs4_handle_exception(NFS_SERVER(dir), err, 8303 &exception); 8304 } while (exception.retry); 8305 return err; 8306 } 8307 8308 /* 8309 * This operation also signals the server that this client is 8310 * performing migration recovery. The server can stop returning 8311 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is 8312 * appended to this compound to identify the client ID which is 8313 * performing recovery. 8314 */ 8315 static int _nfs40_proc_get_locations(struct nfs_server *server, 8316 struct nfs_fh *fhandle, 8317 struct nfs4_fs_locations *locations, 8318 struct page *page, const struct cred *cred) 8319 { 8320 struct rpc_clnt *clnt = server->client; 8321 u32 bitmask[2] = { 8322 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 8323 }; 8324 struct nfs4_fs_locations_arg args = { 8325 .clientid = server->nfs_client->cl_clientid, 8326 .fh = fhandle, 8327 .page = page, 8328 .bitmask = bitmask, 8329 .migration = 1, /* skip LOOKUP */ 8330 .renew = 1, /* append RENEW */ 8331 }; 8332 struct nfs4_fs_locations_res res = { 8333 .fs_locations = locations, 8334 .migration = 1, 8335 .renew = 1, 8336 }; 8337 struct rpc_message msg = { 8338 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8339 .rpc_argp = &args, 8340 .rpc_resp = &res, 8341 .rpc_cred = cred, 8342 }; 8343 unsigned long now = jiffies; 8344 int status; 8345 8346 nfs_fattr_init(locations->fattr); 8347 locations->server = server; 8348 locations->nlocations = 0; 8349 8350 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8351 status = nfs4_call_sync_sequence(clnt, server, &msg, 8352 &args.seq_args, &res.seq_res); 8353 if (status) 8354 return status; 8355 8356 renew_lease(server, now); 8357 return 0; 8358 } 8359 8360 #ifdef CONFIG_NFS_V4_1 8361 8362 /* 8363 * This operation also signals the server that this client is 8364 * performing migration recovery. The server can stop asserting 8365 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID 8366 * performing this operation is identified in the SEQUENCE 8367 * operation in this compound. 8368 * 8369 * When the client supports GETATTR(fs_locations_info), it can 8370 * be plumbed in here. 8371 */ 8372 static int _nfs41_proc_get_locations(struct nfs_server *server, 8373 struct nfs_fh *fhandle, 8374 struct nfs4_fs_locations *locations, 8375 struct page *page, const struct cred *cred) 8376 { 8377 struct rpc_clnt *clnt = server->client; 8378 u32 bitmask[2] = { 8379 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 8380 }; 8381 struct nfs4_fs_locations_arg args = { 8382 .fh = fhandle, 8383 .page = page, 8384 .bitmask = bitmask, 8385 .migration = 1, /* skip LOOKUP */ 8386 }; 8387 struct nfs4_fs_locations_res res = { 8388 .fs_locations = locations, 8389 .migration = 1, 8390 }; 8391 struct rpc_message msg = { 8392 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8393 .rpc_argp = &args, 8394 .rpc_resp = &res, 8395 .rpc_cred = cred, 8396 }; 8397 struct nfs4_call_sync_data data = { 8398 .seq_server = server, 8399 .seq_args = &args.seq_args, 8400 .seq_res = &res.seq_res, 8401 }; 8402 struct rpc_task_setup task_setup_data = { 8403 .rpc_client = clnt, 8404 .rpc_message = &msg, 8405 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops, 8406 .callback_data = &data, 8407 .flags = RPC_TASK_NO_ROUND_ROBIN, 8408 }; 8409 int status; 8410 8411 nfs_fattr_init(locations->fattr); 8412 locations->server = server; 8413 locations->nlocations = 0; 8414 8415 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8416 status = nfs4_call_sync_custom(&task_setup_data); 8417 if (status == NFS4_OK && 8418 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 8419 status = -NFS4ERR_LEASE_MOVED; 8420 return status; 8421 } 8422 8423 #endif /* CONFIG_NFS_V4_1 */ 8424 8425 /** 8426 * nfs4_proc_get_locations - discover locations for a migrated FSID 8427 * @server: pointer to nfs_server to process 8428 * @fhandle: pointer to the kernel NFS client file handle 8429 * @locations: result of query 8430 * @page: buffer 8431 * @cred: credential to use for this operation 8432 * 8433 * Returns NFS4_OK on success, a negative NFS4ERR status code if the 8434 * operation failed, or a negative errno if a local error occurred. 8435 * 8436 * On success, "locations" is filled in, but if the server has 8437 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not 8438 * asserted. 8439 * 8440 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases 8441 * from this client that require migration recovery. 8442 */ 8443 int nfs4_proc_get_locations(struct nfs_server *server, 8444 struct nfs_fh *fhandle, 8445 struct nfs4_fs_locations *locations, 8446 struct page *page, const struct cred *cred) 8447 { 8448 struct nfs_client *clp = server->nfs_client; 8449 const struct nfs4_mig_recovery_ops *ops = 8450 clp->cl_mvops->mig_recovery_ops; 8451 struct nfs4_exception exception = { 8452 .interruptible = true, 8453 }; 8454 int status; 8455 8456 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 8457 (unsigned long long)server->fsid.major, 8458 (unsigned long long)server->fsid.minor, 8459 clp->cl_hostname); 8460 nfs_display_fhandle(fhandle, __func__); 8461 8462 do { 8463 status = ops->get_locations(server, fhandle, locations, page, 8464 cred); 8465 if (status != -NFS4ERR_DELAY) 8466 break; 8467 nfs4_handle_exception(server, status, &exception); 8468 } while (exception.retry); 8469 return status; 8470 } 8471 8472 /* 8473 * This operation also signals the server that this client is 8474 * performing "lease moved" recovery. The server can stop 8475 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation 8476 * is appended to this compound to identify the client ID which is 8477 * performing recovery. 8478 */ 8479 static int _nfs40_proc_fsid_present(struct inode *inode, const struct cred *cred) 8480 { 8481 struct nfs_server *server = NFS_SERVER(inode); 8482 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 8483 struct rpc_clnt *clnt = server->client; 8484 struct nfs4_fsid_present_arg args = { 8485 .fh = NFS_FH(inode), 8486 .clientid = clp->cl_clientid, 8487 .renew = 1, /* append RENEW */ 8488 }; 8489 struct nfs4_fsid_present_res res = { 8490 .renew = 1, 8491 }; 8492 struct rpc_message msg = { 8493 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 8494 .rpc_argp = &args, 8495 .rpc_resp = &res, 8496 .rpc_cred = cred, 8497 }; 8498 unsigned long now = jiffies; 8499 int status; 8500 8501 res.fh = nfs_alloc_fhandle(); 8502 if (res.fh == NULL) 8503 return -ENOMEM; 8504 8505 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8506 status = nfs4_call_sync_sequence(clnt, server, &msg, 8507 &args.seq_args, &res.seq_res); 8508 nfs_free_fhandle(res.fh); 8509 if (status) 8510 return status; 8511 8512 do_renew_lease(clp, now); 8513 return 0; 8514 } 8515 8516 #ifdef CONFIG_NFS_V4_1 8517 8518 /* 8519 * This operation also signals the server that this client is 8520 * performing "lease moved" recovery. The server can stop asserting 8521 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing 8522 * this operation is identified in the SEQUENCE operation in this 8523 * compound. 8524 */ 8525 static int _nfs41_proc_fsid_present(struct inode *inode, const struct cred *cred) 8526 { 8527 struct nfs_server *server = NFS_SERVER(inode); 8528 struct rpc_clnt *clnt = server->client; 8529 struct nfs4_fsid_present_arg args = { 8530 .fh = NFS_FH(inode), 8531 }; 8532 struct nfs4_fsid_present_res res = { 8533 }; 8534 struct rpc_message msg = { 8535 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 8536 .rpc_argp = &args, 8537 .rpc_resp = &res, 8538 .rpc_cred = cred, 8539 }; 8540 int status; 8541 8542 res.fh = nfs_alloc_fhandle(); 8543 if (res.fh == NULL) 8544 return -ENOMEM; 8545 8546 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8547 status = nfs4_call_sync_sequence(clnt, server, &msg, 8548 &args.seq_args, &res.seq_res); 8549 nfs_free_fhandle(res.fh); 8550 if (status == NFS4_OK && 8551 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 8552 status = -NFS4ERR_LEASE_MOVED; 8553 return status; 8554 } 8555 8556 #endif /* CONFIG_NFS_V4_1 */ 8557 8558 /** 8559 * nfs4_proc_fsid_present - Is this FSID present or absent on server? 8560 * @inode: inode on FSID to check 8561 * @cred: credential to use for this operation 8562 * 8563 * Server indicates whether the FSID is present, moved, or not 8564 * recognized. This operation is necessary to clear a LEASE_MOVED 8565 * condition for this client ID. 8566 * 8567 * Returns NFS4_OK if the FSID is present on this server, 8568 * -NFS4ERR_MOVED if the FSID is no longer present, a negative 8569 * NFS4ERR code if some error occurred on the server, or a 8570 * negative errno if a local failure occurred. 8571 */ 8572 int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred) 8573 { 8574 struct nfs_server *server = NFS_SERVER(inode); 8575 struct nfs_client *clp = server->nfs_client; 8576 const struct nfs4_mig_recovery_ops *ops = 8577 clp->cl_mvops->mig_recovery_ops; 8578 struct nfs4_exception exception = { 8579 .interruptible = true, 8580 }; 8581 int status; 8582 8583 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 8584 (unsigned long long)server->fsid.major, 8585 (unsigned long long)server->fsid.minor, 8586 clp->cl_hostname); 8587 nfs_display_fhandle(NFS_FH(inode), __func__); 8588 8589 do { 8590 status = ops->fsid_present(inode, cred); 8591 if (status != -NFS4ERR_DELAY) 8592 break; 8593 nfs4_handle_exception(server, status, &exception); 8594 } while (exception.retry); 8595 return status; 8596 } 8597 8598 /* 8599 * If 'use_integrity' is true and the state managment nfs_client 8600 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient 8601 * and the machine credential as per RFC3530bis and RFC5661 Security 8602 * Considerations sections. Otherwise, just use the user cred with the 8603 * filesystem's rpc_client. 8604 */ 8605 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity) 8606 { 8607 int status; 8608 struct rpc_clnt *clnt = NFS_SERVER(dir)->client; 8609 struct nfs_client *clp = NFS_SERVER(dir)->nfs_client; 8610 struct nfs4_secinfo_arg args = { 8611 .dir_fh = NFS_FH(dir), 8612 .name = name, 8613 }; 8614 struct nfs4_secinfo_res res = { 8615 .flavors = flavors, 8616 }; 8617 struct rpc_message msg = { 8618 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 8619 .rpc_argp = &args, 8620 .rpc_resp = &res, 8621 }; 8622 struct nfs4_call_sync_data data = { 8623 .seq_server = NFS_SERVER(dir), 8624 .seq_args = &args.seq_args, 8625 .seq_res = &res.seq_res, 8626 }; 8627 struct rpc_task_setup task_setup = { 8628 .rpc_client = clnt, 8629 .rpc_message = &msg, 8630 .callback_ops = clp->cl_mvops->call_sync_ops, 8631 .callback_data = &data, 8632 .flags = RPC_TASK_NO_ROUND_ROBIN, 8633 }; 8634 const struct cred *cred = NULL; 8635 8636 if (use_integrity) { 8637 clnt = clp->cl_rpcclient; 8638 task_setup.rpc_client = clnt; 8639 8640 cred = nfs4_get_clid_cred(clp); 8641 msg.rpc_cred = cred; 8642 } 8643 8644 dprintk("NFS call secinfo %s\n", name->name); 8645 8646 nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg); 8647 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 8648 status = nfs4_call_sync_custom(&task_setup); 8649 8650 dprintk("NFS reply secinfo: %d\n", status); 8651 8652 put_cred(cred); 8653 return status; 8654 } 8655 8656 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 8657 struct nfs4_secinfo_flavors *flavors) 8658 { 8659 struct nfs4_exception exception = { 8660 .interruptible = true, 8661 }; 8662 int err; 8663 do { 8664 err = -NFS4ERR_WRONGSEC; 8665 8666 /* try to use integrity protection with machine cred */ 8667 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client)) 8668 err = _nfs4_proc_secinfo(dir, name, flavors, true); 8669 8670 /* 8671 * if unable to use integrity protection, or SECINFO with 8672 * integrity protection returns NFS4ERR_WRONGSEC (which is 8673 * disallowed by spec, but exists in deployed servers) use 8674 * the current filesystem's rpc_client and the user cred. 8675 */ 8676 if (err == -NFS4ERR_WRONGSEC) 8677 err = _nfs4_proc_secinfo(dir, name, flavors, false); 8678 8679 trace_nfs4_secinfo(dir, name, err); 8680 err = nfs4_handle_exception(NFS_SERVER(dir), err, 8681 &exception); 8682 } while (exception.retry); 8683 return err; 8684 } 8685 8686 #ifdef CONFIG_NFS_V4_1 8687 /* 8688 * Check the exchange flags returned by the server for invalid flags, having 8689 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 8690 * DS flags set. 8691 */ 8692 static int nfs4_check_cl_exchange_flags(u32 flags, u32 version) 8693 { 8694 if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R)) 8695 goto out_inval; 8696 else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R)) 8697 goto out_inval; 8698 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 8699 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 8700 goto out_inval; 8701 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 8702 goto out_inval; 8703 return NFS_OK; 8704 out_inval: 8705 return -NFS4ERR_INVAL; 8706 } 8707 8708 static bool 8709 nfs41_same_server_scope(struct nfs41_server_scope *a, 8710 struct nfs41_server_scope *b) 8711 { 8712 if (a->server_scope_sz != b->server_scope_sz) 8713 return false; 8714 return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0; 8715 } 8716 8717 static void 8718 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) 8719 { 8720 struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp; 8721 struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp; 8722 struct nfs_client *clp = args->client; 8723 8724 switch (task->tk_status) { 8725 case -NFS4ERR_BADSESSION: 8726 case -NFS4ERR_DEADSESSION: 8727 nfs4_schedule_session_recovery(clp->cl_session, 8728 task->tk_status); 8729 return; 8730 } 8731 if (args->dir == NFS4_CDFC4_FORE_OR_BOTH && 8732 res->dir != NFS4_CDFS4_BOTH) { 8733 rpc_task_close_connection(task); 8734 if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES) 8735 rpc_restart_call(task); 8736 } 8737 } 8738 8739 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { 8740 .rpc_call_done = nfs4_bind_one_conn_to_session_done, 8741 }; 8742 8743 /* 8744 * nfs4_proc_bind_one_conn_to_session() 8745 * 8746 * The 4.1 client currently uses the same TCP connection for the 8747 * fore and backchannel. 8748 */ 8749 static 8750 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt, 8751 struct rpc_xprt *xprt, 8752 struct nfs_client *clp, 8753 const struct cred *cred) 8754 { 8755 int status; 8756 struct nfs41_bind_conn_to_session_args args = { 8757 .client = clp, 8758 .dir = NFS4_CDFC4_FORE_OR_BOTH, 8759 .retries = 0, 8760 }; 8761 struct nfs41_bind_conn_to_session_res res; 8762 struct rpc_message msg = { 8763 .rpc_proc = 8764 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 8765 .rpc_argp = &args, 8766 .rpc_resp = &res, 8767 .rpc_cred = cred, 8768 }; 8769 struct rpc_task_setup task_setup_data = { 8770 .rpc_client = clnt, 8771 .rpc_xprt = xprt, 8772 .callback_ops = &nfs4_bind_one_conn_to_session_ops, 8773 .rpc_message = &msg, 8774 .flags = RPC_TASK_TIMEOUT, 8775 }; 8776 struct rpc_task *task; 8777 8778 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id); 8779 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) 8780 args.dir = NFS4_CDFC4_FORE; 8781 8782 /* Do not set the backchannel flag unless this is clnt->cl_xprt */ 8783 if (xprt != rcu_access_pointer(clnt->cl_xprt)) 8784 args.dir = NFS4_CDFC4_FORE; 8785 8786 task = rpc_run_task(&task_setup_data); 8787 if (!IS_ERR(task)) { 8788 status = task->tk_status; 8789 rpc_put_task(task); 8790 } else 8791 status = PTR_ERR(task); 8792 trace_nfs4_bind_conn_to_session(clp, status); 8793 if (status == 0) { 8794 if (memcmp(res.sessionid.data, 8795 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 8796 dprintk("NFS: %s: Session ID mismatch\n", __func__); 8797 return -EIO; 8798 } 8799 if ((res.dir & args.dir) != res.dir || res.dir == 0) { 8800 dprintk("NFS: %s: Unexpected direction from server\n", 8801 __func__); 8802 return -EIO; 8803 } 8804 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) { 8805 dprintk("NFS: %s: Server returned RDMA mode = true\n", 8806 __func__); 8807 return -EIO; 8808 } 8809 } 8810 8811 return status; 8812 } 8813 8814 struct rpc_bind_conn_calldata { 8815 struct nfs_client *clp; 8816 const struct cred *cred; 8817 }; 8818 8819 static int 8820 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt, 8821 struct rpc_xprt *xprt, 8822 void *calldata) 8823 { 8824 struct rpc_bind_conn_calldata *p = calldata; 8825 8826 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred); 8827 } 8828 8829 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, const struct cred *cred) 8830 { 8831 struct rpc_bind_conn_calldata data = { 8832 .clp = clp, 8833 .cred = cred, 8834 }; 8835 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient, 8836 nfs4_proc_bind_conn_to_session_callback, &data); 8837 } 8838 8839 /* 8840 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map 8841 * and operations we'd like to see to enable certain features in the allow map 8842 */ 8843 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = { 8844 .how = SP4_MACH_CRED, 8845 .enforce.u.words = { 8846 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 8847 1 << (OP_EXCHANGE_ID - 32) | 8848 1 << (OP_CREATE_SESSION - 32) | 8849 1 << (OP_DESTROY_SESSION - 32) | 8850 1 << (OP_DESTROY_CLIENTID - 32) 8851 }, 8852 .allow.u.words = { 8853 [0] = 1 << (OP_CLOSE) | 8854 1 << (OP_OPEN_DOWNGRADE) | 8855 1 << (OP_LOCKU) | 8856 1 << (OP_DELEGRETURN) | 8857 1 << (OP_COMMIT), 8858 [1] = 1 << (OP_SECINFO - 32) | 8859 1 << (OP_SECINFO_NO_NAME - 32) | 8860 1 << (OP_LAYOUTRETURN - 32) | 8861 1 << (OP_TEST_STATEID - 32) | 8862 1 << (OP_FREE_STATEID - 32) | 8863 1 << (OP_WRITE - 32) 8864 } 8865 }; 8866 8867 /* 8868 * Select the state protection mode for client `clp' given the server results 8869 * from exchange_id in `sp'. 8870 * 8871 * Returns 0 on success, negative errno otherwise. 8872 */ 8873 static int nfs4_sp4_select_mode(struct nfs_client *clp, 8874 struct nfs41_state_protection *sp) 8875 { 8876 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = { 8877 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 8878 1 << (OP_EXCHANGE_ID - 32) | 8879 1 << (OP_CREATE_SESSION - 32) | 8880 1 << (OP_DESTROY_SESSION - 32) | 8881 1 << (OP_DESTROY_CLIENTID - 32) 8882 }; 8883 unsigned long flags = 0; 8884 unsigned int i; 8885 int ret = 0; 8886 8887 if (sp->how == SP4_MACH_CRED) { 8888 /* Print state protect result */ 8889 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n"); 8890 for (i = 0; i <= LAST_NFS4_OP; i++) { 8891 if (test_bit(i, sp->enforce.u.longs)) 8892 dfprintk(MOUNT, " enforce op %d\n", i); 8893 if (test_bit(i, sp->allow.u.longs)) 8894 dfprintk(MOUNT, " allow op %d\n", i); 8895 } 8896 8897 /* make sure nothing is on enforce list that isn't supported */ 8898 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) { 8899 if (sp->enforce.u.words[i] & ~supported_enforce[i]) { 8900 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 8901 ret = -EINVAL; 8902 goto out; 8903 } 8904 } 8905 8906 /* 8907 * Minimal mode - state operations are allowed to use machine 8908 * credential. Note this already happens by default, so the 8909 * client doesn't have to do anything more than the negotiation. 8910 * 8911 * NOTE: we don't care if EXCHANGE_ID is in the list - 8912 * we're already using the machine cred for exchange_id 8913 * and will never use a different cred. 8914 */ 8915 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) && 8916 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) && 8917 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) && 8918 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) { 8919 dfprintk(MOUNT, "sp4_mach_cred:\n"); 8920 dfprintk(MOUNT, " minimal mode enabled\n"); 8921 __set_bit(NFS_SP4_MACH_CRED_MINIMAL, &flags); 8922 } else { 8923 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 8924 ret = -EINVAL; 8925 goto out; 8926 } 8927 8928 if (test_bit(OP_CLOSE, sp->allow.u.longs) && 8929 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) && 8930 test_bit(OP_DELEGRETURN, sp->allow.u.longs) && 8931 test_bit(OP_LOCKU, sp->allow.u.longs)) { 8932 dfprintk(MOUNT, " cleanup mode enabled\n"); 8933 __set_bit(NFS_SP4_MACH_CRED_CLEANUP, &flags); 8934 } 8935 8936 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) { 8937 dfprintk(MOUNT, " pnfs cleanup mode enabled\n"); 8938 __set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, &flags); 8939 } 8940 8941 if (test_bit(OP_SECINFO, sp->allow.u.longs) && 8942 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) { 8943 dfprintk(MOUNT, " secinfo mode enabled\n"); 8944 __set_bit(NFS_SP4_MACH_CRED_SECINFO, &flags); 8945 } 8946 8947 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) && 8948 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) { 8949 dfprintk(MOUNT, " stateid mode enabled\n"); 8950 __set_bit(NFS_SP4_MACH_CRED_STATEID, &flags); 8951 } 8952 8953 if (test_bit(OP_WRITE, sp->allow.u.longs)) { 8954 dfprintk(MOUNT, " write mode enabled\n"); 8955 __set_bit(NFS_SP4_MACH_CRED_WRITE, &flags); 8956 } 8957 8958 if (test_bit(OP_COMMIT, sp->allow.u.longs)) { 8959 dfprintk(MOUNT, " commit mode enabled\n"); 8960 __set_bit(NFS_SP4_MACH_CRED_COMMIT, &flags); 8961 } 8962 } 8963 out: 8964 clp->cl_sp4_flags = flags; 8965 return ret; 8966 } 8967 8968 struct nfs41_exchange_id_data { 8969 struct nfs41_exchange_id_res res; 8970 struct nfs41_exchange_id_args args; 8971 }; 8972 8973 static void nfs4_exchange_id_release(void *data) 8974 { 8975 struct nfs41_exchange_id_data *cdata = 8976 (struct nfs41_exchange_id_data *)data; 8977 8978 nfs_put_client(cdata->args.client); 8979 kfree(cdata->res.impl_id); 8980 kfree(cdata->res.server_scope); 8981 kfree(cdata->res.server_owner); 8982 kfree(cdata); 8983 } 8984 8985 static const struct rpc_call_ops nfs4_exchange_id_call_ops = { 8986 .rpc_release = nfs4_exchange_id_release, 8987 }; 8988 8989 /* 8990 * _nfs4_proc_exchange_id() 8991 * 8992 * Wrapper for EXCHANGE_ID operation. 8993 */ 8994 static struct rpc_task * 8995 nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred, 8996 u32 sp4_how, struct rpc_xprt *xprt) 8997 { 8998 struct rpc_message msg = { 8999 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 9000 .rpc_cred = cred, 9001 }; 9002 struct rpc_task_setup task_setup_data = { 9003 .rpc_client = clp->cl_rpcclient, 9004 .callback_ops = &nfs4_exchange_id_call_ops, 9005 .rpc_message = &msg, 9006 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN, 9007 }; 9008 struct nfs41_exchange_id_data *calldata; 9009 int status; 9010 9011 if (!refcount_inc_not_zero(&clp->cl_count)) 9012 return ERR_PTR(-EIO); 9013 9014 status = -ENOMEM; 9015 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 9016 if (!calldata) 9017 goto out; 9018 9019 nfs4_init_boot_verifier(clp, &calldata->args.verifier); 9020 9021 status = nfs4_init_uniform_client_string(clp); 9022 if (status) 9023 goto out_calldata; 9024 9025 calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 9026 GFP_NOFS); 9027 status = -ENOMEM; 9028 if (unlikely(calldata->res.server_owner == NULL)) 9029 goto out_calldata; 9030 9031 calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 9032 GFP_NOFS); 9033 if (unlikely(calldata->res.server_scope == NULL)) 9034 goto out_server_owner; 9035 9036 calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 9037 if (unlikely(calldata->res.impl_id == NULL)) 9038 goto out_server_scope; 9039 9040 switch (sp4_how) { 9041 case SP4_NONE: 9042 calldata->args.state_protect.how = SP4_NONE; 9043 break; 9044 9045 case SP4_MACH_CRED: 9046 calldata->args.state_protect = nfs4_sp4_mach_cred_request; 9047 break; 9048 9049 default: 9050 /* unsupported! */ 9051 WARN_ON_ONCE(1); 9052 status = -EINVAL; 9053 goto out_impl_id; 9054 } 9055 if (xprt) { 9056 task_setup_data.rpc_xprt = xprt; 9057 task_setup_data.flags |= RPC_TASK_SOFTCONN; 9058 memcpy(calldata->args.verifier.data, clp->cl_confirm.data, 9059 sizeof(calldata->args.verifier.data)); 9060 } 9061 calldata->args.client = clp; 9062 calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 9063 EXCHGID4_FLAG_BIND_PRINC_STATEID; 9064 #ifdef CONFIG_NFS_V4_1_MIGRATION 9065 calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR; 9066 #endif 9067 if (test_bit(NFS_CS_PNFS, &clp->cl_flags)) 9068 calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS; 9069 msg.rpc_argp = &calldata->args; 9070 msg.rpc_resp = &calldata->res; 9071 task_setup_data.callback_data = calldata; 9072 9073 return rpc_run_task(&task_setup_data); 9074 9075 out_impl_id: 9076 kfree(calldata->res.impl_id); 9077 out_server_scope: 9078 kfree(calldata->res.server_scope); 9079 out_server_owner: 9080 kfree(calldata->res.server_owner); 9081 out_calldata: 9082 kfree(calldata); 9083 out: 9084 nfs_put_client(clp); 9085 return ERR_PTR(status); 9086 } 9087 9088 /* 9089 * _nfs4_proc_exchange_id() 9090 * 9091 * Wrapper for EXCHANGE_ID operation. 9092 */ 9093 static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred, 9094 u32 sp4_how) 9095 { 9096 struct rpc_task *task; 9097 struct nfs41_exchange_id_args *argp; 9098 struct nfs41_exchange_id_res *resp; 9099 unsigned long now = jiffies; 9100 int status; 9101 9102 task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL); 9103 if (IS_ERR(task)) 9104 return PTR_ERR(task); 9105 9106 argp = task->tk_msg.rpc_argp; 9107 resp = task->tk_msg.rpc_resp; 9108 status = task->tk_status; 9109 if (status != 0) 9110 goto out; 9111 9112 status = nfs4_check_cl_exchange_flags(resp->flags, 9113 clp->cl_mvops->minor_version); 9114 if (status != 0) 9115 goto out; 9116 9117 status = nfs4_sp4_select_mode(clp, &resp->state_protect); 9118 if (status != 0) 9119 goto out; 9120 9121 do_renew_lease(clp, now); 9122 9123 clp->cl_clientid = resp->clientid; 9124 clp->cl_exchange_flags = resp->flags; 9125 clp->cl_seqid = resp->seqid; 9126 /* Client ID is not confirmed */ 9127 if (!(resp->flags & EXCHGID4_FLAG_CONFIRMED_R)) 9128 clear_bit(NFS4_SESSION_ESTABLISHED, 9129 &clp->cl_session->session_state); 9130 9131 if (clp->cl_serverscope != NULL && 9132 !nfs41_same_server_scope(clp->cl_serverscope, 9133 resp->server_scope)) { 9134 dprintk("%s: server_scope mismatch detected\n", 9135 __func__); 9136 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 9137 } 9138 9139 swap(clp->cl_serverowner, resp->server_owner); 9140 swap(clp->cl_serverscope, resp->server_scope); 9141 swap(clp->cl_implid, resp->impl_id); 9142 9143 /* Save the EXCHANGE_ID verifier session trunk tests */ 9144 memcpy(clp->cl_confirm.data, argp->verifier.data, 9145 sizeof(clp->cl_confirm.data)); 9146 out: 9147 trace_nfs4_exchange_id(clp, status); 9148 rpc_put_task(task); 9149 return status; 9150 } 9151 9152 /* 9153 * nfs4_proc_exchange_id() 9154 * 9155 * Returns zero, a negative errno, or a negative NFS4ERR status code. 9156 * 9157 * Since the clientid has expired, all compounds using sessions 9158 * associated with the stale clientid will be returning 9159 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 9160 * be in some phase of session reset. 9161 * 9162 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used. 9163 */ 9164 int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred) 9165 { 9166 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor; 9167 int status; 9168 9169 /* try SP4_MACH_CRED if krb5i/p */ 9170 if (authflavor == RPC_AUTH_GSS_KRB5I || 9171 authflavor == RPC_AUTH_GSS_KRB5P) { 9172 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED); 9173 if (!status) 9174 return 0; 9175 } 9176 9177 /* try SP4_NONE */ 9178 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE); 9179 } 9180 9181 /** 9182 * nfs4_test_session_trunk 9183 * 9184 * This is an add_xprt_test() test function called from 9185 * rpc_clnt_setup_test_and_add_xprt. 9186 * 9187 * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt 9188 * and is dereferrenced in nfs4_exchange_id_release 9189 * 9190 * Upon success, add the new transport to the rpc_clnt 9191 * 9192 * @clnt: struct rpc_clnt to get new transport 9193 * @xprt: the rpc_xprt to test 9194 * @data: call data for _nfs4_proc_exchange_id. 9195 */ 9196 void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt, 9197 void *data) 9198 { 9199 struct nfs4_add_xprt_data *adata = data; 9200 struct rpc_task *task; 9201 int status; 9202 9203 u32 sp4_how; 9204 9205 dprintk("--> %s try %s\n", __func__, 9206 xprt->address_strings[RPC_DISPLAY_ADDR]); 9207 9208 sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED); 9209 9210 try_again: 9211 /* Test connection for session trunking. Async exchange_id call */ 9212 task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt); 9213 if (IS_ERR(task)) 9214 return; 9215 9216 status = task->tk_status; 9217 if (status == 0) { 9218 status = nfs4_detect_session_trunking(adata->clp, 9219 task->tk_msg.rpc_resp, xprt); 9220 trace_nfs4_trunked_exchange_id(adata->clp, 9221 xprt->address_strings[RPC_DISPLAY_ADDR], status); 9222 } 9223 if (status == 0) 9224 rpc_clnt_xprt_switch_add_xprt(clnt, xprt); 9225 else if (status != -NFS4ERR_DELAY && rpc_clnt_xprt_switch_has_addr(clnt, 9226 (struct sockaddr *)&xprt->addr)) 9227 rpc_clnt_xprt_switch_remove_xprt(clnt, xprt); 9228 9229 rpc_put_task(task); 9230 if (status == -NFS4ERR_DELAY) { 9231 ssleep(1); 9232 goto try_again; 9233 } 9234 } 9235 EXPORT_SYMBOL_GPL(nfs4_test_session_trunk); 9236 9237 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 9238 const struct cred *cred) 9239 { 9240 struct rpc_message msg = { 9241 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 9242 .rpc_argp = clp, 9243 .rpc_cred = cred, 9244 }; 9245 int status; 9246 9247 status = rpc_call_sync(clp->cl_rpcclient, &msg, 9248 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9249 trace_nfs4_destroy_clientid(clp, status); 9250 if (status) 9251 dprintk("NFS: Got error %d from the server %s on " 9252 "DESTROY_CLIENTID.", status, clp->cl_hostname); 9253 return status; 9254 } 9255 9256 static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 9257 const struct cred *cred) 9258 { 9259 unsigned int loop; 9260 int ret; 9261 9262 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 9263 ret = _nfs4_proc_destroy_clientid(clp, cred); 9264 switch (ret) { 9265 case -NFS4ERR_DELAY: 9266 case -NFS4ERR_CLIENTID_BUSY: 9267 ssleep(1); 9268 break; 9269 default: 9270 return ret; 9271 } 9272 } 9273 return 0; 9274 } 9275 9276 int nfs4_destroy_clientid(struct nfs_client *clp) 9277 { 9278 const struct cred *cred; 9279 int ret = 0; 9280 9281 if (clp->cl_mvops->minor_version < 1) 9282 goto out; 9283 if (clp->cl_exchange_flags == 0) 9284 goto out; 9285 if (clp->cl_preserve_clid) 9286 goto out; 9287 cred = nfs4_get_clid_cred(clp); 9288 ret = nfs4_proc_destroy_clientid(clp, cred); 9289 put_cred(cred); 9290 switch (ret) { 9291 case 0: 9292 case -NFS4ERR_STALE_CLIENTID: 9293 clp->cl_exchange_flags = 0; 9294 } 9295 out: 9296 return ret; 9297 } 9298 9299 #endif /* CONFIG_NFS_V4_1 */ 9300 9301 struct nfs4_get_lease_time_data { 9302 struct nfs4_get_lease_time_args *args; 9303 struct nfs4_get_lease_time_res *res; 9304 struct nfs_client *clp; 9305 }; 9306 9307 static void nfs4_get_lease_time_prepare(struct rpc_task *task, 9308 void *calldata) 9309 { 9310 struct nfs4_get_lease_time_data *data = 9311 (struct nfs4_get_lease_time_data *)calldata; 9312 9313 /* just setup sequence, do not trigger session recovery 9314 since we're invoked within one */ 9315 nfs4_setup_sequence(data->clp, 9316 &data->args->la_seq_args, 9317 &data->res->lr_seq_res, 9318 task); 9319 } 9320 9321 /* 9322 * Called from nfs4_state_manager thread for session setup, so don't recover 9323 * from sequence operation or clientid errors. 9324 */ 9325 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 9326 { 9327 struct nfs4_get_lease_time_data *data = 9328 (struct nfs4_get_lease_time_data *)calldata; 9329 9330 if (!nfs4_sequence_done(task, &data->res->lr_seq_res)) 9331 return; 9332 switch (task->tk_status) { 9333 case -NFS4ERR_DELAY: 9334 case -NFS4ERR_GRACE: 9335 rpc_delay(task, NFS4_POLL_RETRY_MIN); 9336 task->tk_status = 0; 9337 fallthrough; 9338 case -NFS4ERR_RETRY_UNCACHED_REP: 9339 rpc_restart_call_prepare(task); 9340 return; 9341 } 9342 } 9343 9344 static const struct rpc_call_ops nfs4_get_lease_time_ops = { 9345 .rpc_call_prepare = nfs4_get_lease_time_prepare, 9346 .rpc_call_done = nfs4_get_lease_time_done, 9347 }; 9348 9349 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 9350 { 9351 struct nfs4_get_lease_time_args args; 9352 struct nfs4_get_lease_time_res res = { 9353 .lr_fsinfo = fsinfo, 9354 }; 9355 struct nfs4_get_lease_time_data data = { 9356 .args = &args, 9357 .res = &res, 9358 .clp = clp, 9359 }; 9360 struct rpc_message msg = { 9361 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 9362 .rpc_argp = &args, 9363 .rpc_resp = &res, 9364 }; 9365 struct rpc_task_setup task_setup = { 9366 .rpc_client = clp->cl_rpcclient, 9367 .rpc_message = &msg, 9368 .callback_ops = &nfs4_get_lease_time_ops, 9369 .callback_data = &data, 9370 .flags = RPC_TASK_TIMEOUT, 9371 }; 9372 9373 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0, 1); 9374 return nfs4_call_sync_custom(&task_setup); 9375 } 9376 9377 #ifdef CONFIG_NFS_V4_1 9378 9379 /* 9380 * Initialize the values to be used by the client in CREATE_SESSION 9381 * If nfs4_init_session set the fore channel request and response sizes, 9382 * use them. 9383 * 9384 * Set the back channel max_resp_sz_cached to zero to force the client to 9385 * always set csa_cachethis to FALSE because the current implementation 9386 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 9387 */ 9388 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args, 9389 struct rpc_clnt *clnt) 9390 { 9391 unsigned int max_rqst_sz, max_resp_sz; 9392 unsigned int max_bc_payload = rpc_max_bc_payload(clnt); 9393 unsigned int max_bc_slots = rpc_num_bc_slots(clnt); 9394 9395 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead; 9396 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead; 9397 9398 /* Fore channel attributes */ 9399 args->fc_attrs.max_rqst_sz = max_rqst_sz; 9400 args->fc_attrs.max_resp_sz = max_resp_sz; 9401 args->fc_attrs.max_ops = NFS4_MAX_OPS; 9402 args->fc_attrs.max_reqs = max_session_slots; 9403 9404 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 9405 "max_ops=%u max_reqs=%u\n", 9406 __func__, 9407 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 9408 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 9409 9410 /* Back channel attributes */ 9411 args->bc_attrs.max_rqst_sz = max_bc_payload; 9412 args->bc_attrs.max_resp_sz = max_bc_payload; 9413 args->bc_attrs.max_resp_sz_cached = 0; 9414 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 9415 args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1); 9416 if (args->bc_attrs.max_reqs > max_bc_slots) 9417 args->bc_attrs.max_reqs = max_bc_slots; 9418 9419 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 9420 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 9421 __func__, 9422 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 9423 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 9424 args->bc_attrs.max_reqs); 9425 } 9426 9427 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, 9428 struct nfs41_create_session_res *res) 9429 { 9430 struct nfs4_channel_attrs *sent = &args->fc_attrs; 9431 struct nfs4_channel_attrs *rcvd = &res->fc_attrs; 9432 9433 if (rcvd->max_resp_sz > sent->max_resp_sz) 9434 return -EINVAL; 9435 /* 9436 * Our requested max_ops is the minimum we need; we're not 9437 * prepared to break up compounds into smaller pieces than that. 9438 * So, no point even trying to continue if the server won't 9439 * cooperate: 9440 */ 9441 if (rcvd->max_ops < sent->max_ops) 9442 return -EINVAL; 9443 if (rcvd->max_reqs == 0) 9444 return -EINVAL; 9445 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 9446 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 9447 return 0; 9448 } 9449 9450 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, 9451 struct nfs41_create_session_res *res) 9452 { 9453 struct nfs4_channel_attrs *sent = &args->bc_attrs; 9454 struct nfs4_channel_attrs *rcvd = &res->bc_attrs; 9455 9456 if (!(res->flags & SESSION4_BACK_CHAN)) 9457 goto out; 9458 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 9459 return -EINVAL; 9460 if (rcvd->max_resp_sz > sent->max_resp_sz) 9461 return -EINVAL; 9462 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 9463 return -EINVAL; 9464 if (rcvd->max_ops > sent->max_ops) 9465 return -EINVAL; 9466 if (rcvd->max_reqs > sent->max_reqs) 9467 return -EINVAL; 9468 out: 9469 return 0; 9470 } 9471 9472 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 9473 struct nfs41_create_session_res *res) 9474 { 9475 int ret; 9476 9477 ret = nfs4_verify_fore_channel_attrs(args, res); 9478 if (ret) 9479 return ret; 9480 return nfs4_verify_back_channel_attrs(args, res); 9481 } 9482 9483 static void nfs4_update_session(struct nfs4_session *session, 9484 struct nfs41_create_session_res *res) 9485 { 9486 nfs4_copy_sessionid(&session->sess_id, &res->sessionid); 9487 /* Mark client id and session as being confirmed */ 9488 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 9489 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state); 9490 session->flags = res->flags; 9491 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); 9492 if (res->flags & SESSION4_BACK_CHAN) 9493 memcpy(&session->bc_attrs, &res->bc_attrs, 9494 sizeof(session->bc_attrs)); 9495 } 9496 9497 static int _nfs4_proc_create_session(struct nfs_client *clp, 9498 const struct cred *cred) 9499 { 9500 struct nfs4_session *session = clp->cl_session; 9501 struct nfs41_create_session_args args = { 9502 .client = clp, 9503 .clientid = clp->cl_clientid, 9504 .seqid = clp->cl_seqid, 9505 .cb_program = NFS4_CALLBACK, 9506 }; 9507 struct nfs41_create_session_res res; 9508 9509 struct rpc_message msg = { 9510 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 9511 .rpc_argp = &args, 9512 .rpc_resp = &res, 9513 .rpc_cred = cred, 9514 }; 9515 int status; 9516 9517 nfs4_init_channel_attrs(&args, clp->cl_rpcclient); 9518 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 9519 9520 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 9521 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9522 trace_nfs4_create_session(clp, status); 9523 9524 switch (status) { 9525 case -NFS4ERR_STALE_CLIENTID: 9526 case -NFS4ERR_DELAY: 9527 case -ETIMEDOUT: 9528 case -EACCES: 9529 case -EAGAIN: 9530 goto out; 9531 } 9532 9533 clp->cl_seqid++; 9534 if (!status) { 9535 /* Verify the session's negotiated channel_attrs values */ 9536 status = nfs4_verify_channel_attrs(&args, &res); 9537 /* Increment the clientid slot sequence id */ 9538 if (status) 9539 goto out; 9540 nfs4_update_session(session, &res); 9541 } 9542 out: 9543 return status; 9544 } 9545 9546 /* 9547 * Issues a CREATE_SESSION operation to the server. 9548 * It is the responsibility of the caller to verify the session is 9549 * expired before calling this routine. 9550 */ 9551 int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred) 9552 { 9553 int status; 9554 unsigned *ptr; 9555 struct nfs4_session *session = clp->cl_session; 9556 struct nfs4_add_xprt_data xprtdata = { 9557 .clp = clp, 9558 }; 9559 struct rpc_add_xprt_test rpcdata = { 9560 .add_xprt_test = clp->cl_mvops->session_trunk, 9561 .data = &xprtdata, 9562 }; 9563 9564 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 9565 9566 status = _nfs4_proc_create_session(clp, cred); 9567 if (status) 9568 goto out; 9569 9570 /* Init or reset the session slot tables */ 9571 status = nfs4_setup_session_slot_tables(session); 9572 dprintk("slot table setup returned %d\n", status); 9573 if (status) 9574 goto out; 9575 9576 ptr = (unsigned *)&session->sess_id.data[0]; 9577 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 9578 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 9579 rpc_clnt_probe_trunked_xprts(clp->cl_rpcclient, &rpcdata); 9580 out: 9581 return status; 9582 } 9583 9584 /* 9585 * Issue the over-the-wire RPC DESTROY_SESSION. 9586 * The caller must serialize access to this routine. 9587 */ 9588 int nfs4_proc_destroy_session(struct nfs4_session *session, 9589 const struct cred *cred) 9590 { 9591 struct rpc_message msg = { 9592 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 9593 .rpc_argp = session, 9594 .rpc_cred = cred, 9595 }; 9596 int status = 0; 9597 9598 /* session is still being setup */ 9599 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state)) 9600 return 0; 9601 9602 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 9603 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9604 trace_nfs4_destroy_session(session->clp, status); 9605 9606 if (status) 9607 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 9608 "Session has been destroyed regardless...\n", status); 9609 rpc_clnt_manage_trunked_xprts(session->clp->cl_rpcclient); 9610 return status; 9611 } 9612 9613 /* 9614 * Renew the cl_session lease. 9615 */ 9616 struct nfs4_sequence_data { 9617 struct nfs_client *clp; 9618 struct nfs4_sequence_args args; 9619 struct nfs4_sequence_res res; 9620 }; 9621 9622 static void nfs41_sequence_release(void *data) 9623 { 9624 struct nfs4_sequence_data *calldata = data; 9625 struct nfs_client *clp = calldata->clp; 9626 9627 if (refcount_read(&clp->cl_count) > 1) 9628 nfs4_schedule_state_renewal(clp); 9629 nfs_put_client(clp); 9630 kfree(calldata); 9631 } 9632 9633 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 9634 { 9635 switch(task->tk_status) { 9636 case -NFS4ERR_DELAY: 9637 rpc_delay(task, NFS4_POLL_RETRY_MAX); 9638 return -EAGAIN; 9639 default: 9640 nfs4_schedule_lease_recovery(clp); 9641 } 9642 return 0; 9643 } 9644 9645 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 9646 { 9647 struct nfs4_sequence_data *calldata = data; 9648 struct nfs_client *clp = calldata->clp; 9649 9650 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 9651 return; 9652 9653 trace_nfs4_sequence(clp, task->tk_status); 9654 if (task->tk_status < 0 && clp->cl_cons_state >= 0) { 9655 dprintk("%s ERROR %d\n", __func__, task->tk_status); 9656 if (refcount_read(&clp->cl_count) == 1) 9657 return; 9658 9659 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 9660 rpc_restart_call_prepare(task); 9661 return; 9662 } 9663 } 9664 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 9665 } 9666 9667 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 9668 { 9669 struct nfs4_sequence_data *calldata = data; 9670 struct nfs_client *clp = calldata->clp; 9671 struct nfs4_sequence_args *args; 9672 struct nfs4_sequence_res *res; 9673 9674 args = task->tk_msg.rpc_argp; 9675 res = task->tk_msg.rpc_resp; 9676 9677 nfs4_setup_sequence(clp, args, res, task); 9678 } 9679 9680 static const struct rpc_call_ops nfs41_sequence_ops = { 9681 .rpc_call_done = nfs41_sequence_call_done, 9682 .rpc_call_prepare = nfs41_sequence_prepare, 9683 .rpc_release = nfs41_sequence_release, 9684 }; 9685 9686 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 9687 const struct cred *cred, 9688 struct nfs4_slot *slot, 9689 bool is_privileged) 9690 { 9691 struct nfs4_sequence_data *calldata; 9692 struct rpc_message msg = { 9693 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 9694 .rpc_cred = cred, 9695 }; 9696 struct rpc_task_setup task_setup_data = { 9697 .rpc_client = clp->cl_rpcclient, 9698 .rpc_message = &msg, 9699 .callback_ops = &nfs41_sequence_ops, 9700 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE, 9701 }; 9702 struct rpc_task *ret; 9703 9704 ret = ERR_PTR(-EIO); 9705 if (!refcount_inc_not_zero(&clp->cl_count)) 9706 goto out_err; 9707 9708 ret = ERR_PTR(-ENOMEM); 9709 calldata = kzalloc(sizeof(*calldata), GFP_KERNEL); 9710 if (calldata == NULL) 9711 goto out_put_clp; 9712 nfs4_init_sequence(&calldata->args, &calldata->res, 0, is_privileged); 9713 nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot); 9714 msg.rpc_argp = &calldata->args; 9715 msg.rpc_resp = &calldata->res; 9716 calldata->clp = clp; 9717 task_setup_data.callback_data = calldata; 9718 9719 ret = rpc_run_task(&task_setup_data); 9720 if (IS_ERR(ret)) 9721 goto out_err; 9722 return ret; 9723 out_put_clp: 9724 nfs_put_client(clp); 9725 out_err: 9726 nfs41_release_slot(slot); 9727 return ret; 9728 } 9729 9730 static int nfs41_proc_async_sequence(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags) 9731 { 9732 struct rpc_task *task; 9733 int ret = 0; 9734 9735 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 9736 return -EAGAIN; 9737 task = _nfs41_proc_sequence(clp, cred, NULL, false); 9738 if (IS_ERR(task)) 9739 ret = PTR_ERR(task); 9740 else 9741 rpc_put_task_async(task); 9742 dprintk("<-- %s status=%d\n", __func__, ret); 9743 return ret; 9744 } 9745 9746 static int nfs4_proc_sequence(struct nfs_client *clp, const struct cred *cred) 9747 { 9748 struct rpc_task *task; 9749 int ret; 9750 9751 task = _nfs41_proc_sequence(clp, cred, NULL, true); 9752 if (IS_ERR(task)) { 9753 ret = PTR_ERR(task); 9754 goto out; 9755 } 9756 ret = rpc_wait_for_completion_task(task); 9757 if (!ret) 9758 ret = task->tk_status; 9759 rpc_put_task(task); 9760 out: 9761 dprintk("<-- %s status=%d\n", __func__, ret); 9762 return ret; 9763 } 9764 9765 struct nfs4_reclaim_complete_data { 9766 struct nfs_client *clp; 9767 struct nfs41_reclaim_complete_args arg; 9768 struct nfs41_reclaim_complete_res res; 9769 }; 9770 9771 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 9772 { 9773 struct nfs4_reclaim_complete_data *calldata = data; 9774 9775 nfs4_setup_sequence(calldata->clp, 9776 &calldata->arg.seq_args, 9777 &calldata->res.seq_res, 9778 task); 9779 } 9780 9781 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 9782 { 9783 switch(task->tk_status) { 9784 case 0: 9785 wake_up_all(&clp->cl_lock_waitq); 9786 fallthrough; 9787 case -NFS4ERR_COMPLETE_ALREADY: 9788 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 9789 break; 9790 case -NFS4ERR_DELAY: 9791 rpc_delay(task, NFS4_POLL_RETRY_MAX); 9792 fallthrough; 9793 case -NFS4ERR_RETRY_UNCACHED_REP: 9794 case -EACCES: 9795 dprintk("%s: failed to reclaim complete error %d for server %s, retrying\n", 9796 __func__, task->tk_status, clp->cl_hostname); 9797 return -EAGAIN; 9798 case -NFS4ERR_BADSESSION: 9799 case -NFS4ERR_DEADSESSION: 9800 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 9801 break; 9802 default: 9803 nfs4_schedule_lease_recovery(clp); 9804 } 9805 return 0; 9806 } 9807 9808 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 9809 { 9810 struct nfs4_reclaim_complete_data *calldata = data; 9811 struct nfs_client *clp = calldata->clp; 9812 struct nfs4_sequence_res *res = &calldata->res.seq_res; 9813 9814 if (!nfs41_sequence_done(task, res)) 9815 return; 9816 9817 trace_nfs4_reclaim_complete(clp, task->tk_status); 9818 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 9819 rpc_restart_call_prepare(task); 9820 return; 9821 } 9822 } 9823 9824 static void nfs4_free_reclaim_complete_data(void *data) 9825 { 9826 struct nfs4_reclaim_complete_data *calldata = data; 9827 9828 kfree(calldata); 9829 } 9830 9831 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 9832 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 9833 .rpc_call_done = nfs4_reclaim_complete_done, 9834 .rpc_release = nfs4_free_reclaim_complete_data, 9835 }; 9836 9837 /* 9838 * Issue a global reclaim complete. 9839 */ 9840 static int nfs41_proc_reclaim_complete(struct nfs_client *clp, 9841 const struct cred *cred) 9842 { 9843 struct nfs4_reclaim_complete_data *calldata; 9844 struct rpc_message msg = { 9845 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 9846 .rpc_cred = cred, 9847 }; 9848 struct rpc_task_setup task_setup_data = { 9849 .rpc_client = clp->cl_rpcclient, 9850 .rpc_message = &msg, 9851 .callback_ops = &nfs4_reclaim_complete_call_ops, 9852 .flags = RPC_TASK_NO_ROUND_ROBIN, 9853 }; 9854 int status = -ENOMEM; 9855 9856 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 9857 if (calldata == NULL) 9858 goto out; 9859 calldata->clp = clp; 9860 calldata->arg.one_fs = 0; 9861 9862 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0, 1); 9863 msg.rpc_argp = &calldata->arg; 9864 msg.rpc_resp = &calldata->res; 9865 task_setup_data.callback_data = calldata; 9866 status = nfs4_call_sync_custom(&task_setup_data); 9867 out: 9868 dprintk("<-- %s status=%d\n", __func__, status); 9869 return status; 9870 } 9871 9872 static void 9873 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 9874 { 9875 struct nfs4_layoutget *lgp = calldata; 9876 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 9877 9878 nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args, 9879 &lgp->res.seq_res, task); 9880 } 9881 9882 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 9883 { 9884 struct nfs4_layoutget *lgp = calldata; 9885 9886 nfs41_sequence_process(task, &lgp->res.seq_res); 9887 } 9888 9889 static int 9890 nfs4_layoutget_handle_exception(struct rpc_task *task, 9891 struct nfs4_layoutget *lgp, struct nfs4_exception *exception) 9892 { 9893 struct inode *inode = lgp->args.inode; 9894 struct nfs_server *server = NFS_SERVER(inode); 9895 struct pnfs_layout_hdr *lo = lgp->lo; 9896 int nfs4err = task->tk_status; 9897 int err, status = 0; 9898 LIST_HEAD(head); 9899 9900 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); 9901 9902 nfs4_sequence_free_slot(&lgp->res.seq_res); 9903 9904 exception->state = NULL; 9905 exception->stateid = NULL; 9906 9907 switch (nfs4err) { 9908 case 0: 9909 goto out; 9910 9911 /* 9912 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs 9913 * on the file. set tk_status to -ENODATA to tell upper layer to 9914 * retry go inband. 9915 */ 9916 case -NFS4ERR_LAYOUTUNAVAILABLE: 9917 status = -ENODATA; 9918 goto out; 9919 /* 9920 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of 9921 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3). 9922 */ 9923 case -NFS4ERR_BADLAYOUT: 9924 status = -EOVERFLOW; 9925 goto out; 9926 /* 9927 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client 9928 * (or clients) writing to the same RAID stripe except when 9929 * the minlength argument is 0 (see RFC5661 section 18.43.3). 9930 * 9931 * Treat it like we would RECALLCONFLICT -- we retry for a little 9932 * while, and then eventually give up. 9933 */ 9934 case -NFS4ERR_LAYOUTTRYLATER: 9935 if (lgp->args.minlength == 0) { 9936 status = -EOVERFLOW; 9937 goto out; 9938 } 9939 status = -EBUSY; 9940 break; 9941 case -NFS4ERR_RECALLCONFLICT: 9942 case -NFS4ERR_RETURNCONFLICT: 9943 status = -ERECALLCONFLICT; 9944 break; 9945 case -NFS4ERR_DELEG_REVOKED: 9946 case -NFS4ERR_ADMIN_REVOKED: 9947 case -NFS4ERR_EXPIRED: 9948 case -NFS4ERR_BAD_STATEID: 9949 exception->timeout = 0; 9950 spin_lock(&inode->i_lock); 9951 /* If the open stateid was bad, then recover it. */ 9952 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) || 9953 !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) { 9954 spin_unlock(&inode->i_lock); 9955 exception->state = lgp->args.ctx->state; 9956 exception->stateid = &lgp->args.stateid; 9957 break; 9958 } 9959 9960 /* 9961 * Mark the bad layout state as invalid, then retry 9962 */ 9963 pnfs_mark_layout_stateid_invalid(lo, &head); 9964 spin_unlock(&inode->i_lock); 9965 nfs_commit_inode(inode, 0); 9966 pnfs_free_lseg_list(&head); 9967 status = -EAGAIN; 9968 goto out; 9969 } 9970 9971 err = nfs4_handle_exception(server, nfs4err, exception); 9972 if (!status) { 9973 if (exception->retry) 9974 status = -EAGAIN; 9975 else 9976 status = err; 9977 } 9978 out: 9979 return status; 9980 } 9981 9982 size_t max_response_pages(struct nfs_server *server) 9983 { 9984 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 9985 return nfs_page_array_len(0, max_resp_sz); 9986 } 9987 9988 static void nfs4_layoutget_release(void *calldata) 9989 { 9990 struct nfs4_layoutget *lgp = calldata; 9991 9992 nfs4_sequence_free_slot(&lgp->res.seq_res); 9993 pnfs_layoutget_free(lgp); 9994 } 9995 9996 static const struct rpc_call_ops nfs4_layoutget_call_ops = { 9997 .rpc_call_prepare = nfs4_layoutget_prepare, 9998 .rpc_call_done = nfs4_layoutget_done, 9999 .rpc_release = nfs4_layoutget_release, 10000 }; 10001 10002 struct pnfs_layout_segment * 10003 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, 10004 struct nfs4_exception *exception) 10005 { 10006 struct inode *inode = lgp->args.inode; 10007 struct nfs_server *server = NFS_SERVER(inode); 10008 struct rpc_task *task; 10009 struct rpc_message msg = { 10010 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 10011 .rpc_argp = &lgp->args, 10012 .rpc_resp = &lgp->res, 10013 .rpc_cred = lgp->cred, 10014 }; 10015 struct rpc_task_setup task_setup_data = { 10016 .rpc_client = server->client, 10017 .rpc_message = &msg, 10018 .callback_ops = &nfs4_layoutget_call_ops, 10019 .callback_data = lgp, 10020 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF | 10021 RPC_TASK_MOVEABLE, 10022 }; 10023 struct pnfs_layout_segment *lseg = NULL; 10024 int status = 0; 10025 10026 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0); 10027 exception->retry = 0; 10028 10029 task = rpc_run_task(&task_setup_data); 10030 if (IS_ERR(task)) 10031 return ERR_CAST(task); 10032 10033 status = rpc_wait_for_completion_task(task); 10034 if (status != 0) 10035 goto out; 10036 10037 if (task->tk_status < 0) { 10038 exception->retry = 1; 10039 status = nfs4_layoutget_handle_exception(task, lgp, exception); 10040 } else if (lgp->res.layoutp->len == 0) { 10041 exception->retry = 1; 10042 status = -EAGAIN; 10043 nfs4_update_delay(&exception->timeout); 10044 } else 10045 lseg = pnfs_layout_process(lgp); 10046 out: 10047 trace_nfs4_layoutget(lgp->args.ctx, 10048 &lgp->args.range, 10049 &lgp->res.range, 10050 &lgp->res.stateid, 10051 status); 10052 10053 rpc_put_task(task); 10054 dprintk("<-- %s status=%d\n", __func__, status); 10055 if (status) 10056 return ERR_PTR(status); 10057 return lseg; 10058 } 10059 10060 static void 10061 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 10062 { 10063 struct nfs4_layoutreturn *lrp = calldata; 10064 10065 nfs4_setup_sequence(lrp->clp, 10066 &lrp->args.seq_args, 10067 &lrp->res.seq_res, 10068 task); 10069 if (!pnfs_layout_is_valid(lrp->args.layout)) 10070 rpc_exit(task, 0); 10071 } 10072 10073 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 10074 { 10075 struct nfs4_layoutreturn *lrp = calldata; 10076 struct nfs_server *server; 10077 10078 if (!nfs41_sequence_process(task, &lrp->res.seq_res)) 10079 return; 10080 10081 if (task->tk_rpc_status == -ETIMEDOUT) { 10082 lrp->rpc_status = -EAGAIN; 10083 lrp->res.lrs_present = 0; 10084 return; 10085 } 10086 /* 10087 * Was there an RPC level error? Assume the call succeeded, 10088 * and that we need to release the layout 10089 */ 10090 if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) { 10091 lrp->res.lrs_present = 0; 10092 return; 10093 } 10094 10095 server = NFS_SERVER(lrp->args.inode); 10096 switch (task->tk_status) { 10097 case -NFS4ERR_OLD_STATEID: 10098 if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid, 10099 &lrp->args.range, 10100 lrp->args.inode)) 10101 goto out_restart; 10102 fallthrough; 10103 default: 10104 task->tk_status = 0; 10105 lrp->res.lrs_present = 0; 10106 fallthrough; 10107 case 0: 10108 break; 10109 case -NFS4ERR_BADSESSION: 10110 case -NFS4ERR_DEADSESSION: 10111 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 10112 nfs4_schedule_session_recovery(server->nfs_client->cl_session, 10113 task->tk_status); 10114 lrp->res.lrs_present = 0; 10115 lrp->rpc_status = -EAGAIN; 10116 task->tk_status = 0; 10117 break; 10118 case -NFS4ERR_DELAY: 10119 if (nfs4_async_handle_error(task, server, NULL, NULL) == 10120 -EAGAIN) 10121 goto out_restart; 10122 lrp->res.lrs_present = 0; 10123 break; 10124 } 10125 return; 10126 out_restart: 10127 task->tk_status = 0; 10128 nfs4_sequence_free_slot(&lrp->res.seq_res); 10129 rpc_restart_call_prepare(task); 10130 } 10131 10132 static void nfs4_layoutreturn_release(void *calldata) 10133 { 10134 struct nfs4_layoutreturn *lrp = calldata; 10135 struct pnfs_layout_hdr *lo = lrp->args.layout; 10136 10137 if (lrp->rpc_status == 0 || !lrp->inode) 10138 pnfs_layoutreturn_free_lsegs( 10139 lo, &lrp->args.stateid, &lrp->args.range, 10140 lrp->res.lrs_present ? &lrp->res.stateid : NULL); 10141 else 10142 pnfs_layoutreturn_retry_later(lo, &lrp->args.stateid, 10143 &lrp->args.range); 10144 nfs4_sequence_free_slot(&lrp->res.seq_res); 10145 if (lrp->ld_private.ops && lrp->ld_private.ops->free) 10146 lrp->ld_private.ops->free(&lrp->ld_private); 10147 pnfs_put_layout_hdr(lrp->args.layout); 10148 nfs_iput_and_deactive(lrp->inode); 10149 put_cred(lrp->cred); 10150 kfree(calldata); 10151 } 10152 10153 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 10154 .rpc_call_prepare = nfs4_layoutreturn_prepare, 10155 .rpc_call_done = nfs4_layoutreturn_done, 10156 .rpc_release = nfs4_layoutreturn_release, 10157 }; 10158 10159 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, unsigned int flags) 10160 { 10161 struct rpc_task *task; 10162 struct rpc_message msg = { 10163 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 10164 .rpc_argp = &lrp->args, 10165 .rpc_resp = &lrp->res, 10166 .rpc_cred = lrp->cred, 10167 }; 10168 struct rpc_task_setup task_setup_data = { 10169 .rpc_client = NFS_SERVER(lrp->args.inode)->client, 10170 .rpc_message = &msg, 10171 .callback_ops = &nfs4_layoutreturn_call_ops, 10172 .callback_data = lrp, 10173 .flags = RPC_TASK_MOVEABLE, 10174 }; 10175 int status = 0; 10176 10177 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client, 10178 NFS_SP4_MACH_CRED_PNFS_CLEANUP, 10179 &task_setup_data.rpc_client, &msg); 10180 10181 lrp->inode = nfs_igrab_and_active(lrp->args.inode); 10182 if (flags & PNFS_FL_LAYOUTRETURN_ASYNC) { 10183 if (!lrp->inode) { 10184 nfs4_layoutreturn_release(lrp); 10185 return -EAGAIN; 10186 } 10187 task_setup_data.flags |= RPC_TASK_ASYNC; 10188 } 10189 if (!lrp->inode) 10190 flags |= PNFS_FL_LAYOUTRETURN_PRIVILEGED; 10191 if (flags & PNFS_FL_LAYOUTRETURN_PRIVILEGED) 10192 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 10193 1); 10194 else 10195 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 10196 0); 10197 task = rpc_run_task(&task_setup_data); 10198 if (IS_ERR(task)) 10199 return PTR_ERR(task); 10200 if (!(flags & PNFS_FL_LAYOUTRETURN_ASYNC)) 10201 status = task->tk_status; 10202 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status); 10203 dprintk("<-- %s status=%d\n", __func__, status); 10204 rpc_put_task(task); 10205 return status; 10206 } 10207 10208 static int 10209 _nfs4_proc_getdeviceinfo(struct nfs_server *server, 10210 struct pnfs_device *pdev, 10211 const struct cred *cred) 10212 { 10213 struct nfs4_getdeviceinfo_args args = { 10214 .pdev = pdev, 10215 .notify_types = NOTIFY_DEVICEID4_CHANGE | 10216 NOTIFY_DEVICEID4_DELETE, 10217 }; 10218 struct nfs4_getdeviceinfo_res res = { 10219 .pdev = pdev, 10220 }; 10221 struct rpc_message msg = { 10222 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 10223 .rpc_argp = &args, 10224 .rpc_resp = &res, 10225 .rpc_cred = cred, 10226 }; 10227 int status; 10228 10229 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 10230 if (res.notification & ~args.notify_types) 10231 dprintk("%s: unsupported notification\n", __func__); 10232 if (res.notification != args.notify_types) 10233 pdev->nocache = 1; 10234 10235 trace_nfs4_getdeviceinfo(server, &pdev->dev_id, status); 10236 10237 dprintk("<-- %s status=%d\n", __func__, status); 10238 10239 return status; 10240 } 10241 10242 int nfs4_proc_getdeviceinfo(struct nfs_server *server, 10243 struct pnfs_device *pdev, 10244 const struct cred *cred) 10245 { 10246 struct nfs4_exception exception = { }; 10247 int err; 10248 10249 do { 10250 err = nfs4_handle_exception(server, 10251 _nfs4_proc_getdeviceinfo(server, pdev, cred), 10252 &exception); 10253 } while (exception.retry); 10254 return err; 10255 } 10256 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 10257 10258 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 10259 { 10260 struct nfs4_layoutcommit_data *data = calldata; 10261 struct nfs_server *server = NFS_SERVER(data->args.inode); 10262 10263 nfs4_setup_sequence(server->nfs_client, 10264 &data->args.seq_args, 10265 &data->res.seq_res, 10266 task); 10267 } 10268 10269 static void 10270 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 10271 { 10272 struct nfs4_layoutcommit_data *data = calldata; 10273 struct nfs_server *server = NFS_SERVER(data->args.inode); 10274 10275 if (!nfs41_sequence_done(task, &data->res.seq_res)) 10276 return; 10277 10278 switch (task->tk_status) { /* Just ignore these failures */ 10279 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 10280 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 10281 case -NFS4ERR_BADLAYOUT: /* no layout */ 10282 case -NFS4ERR_GRACE: /* loca_recalim always false */ 10283 task->tk_status = 0; 10284 break; 10285 case 0: 10286 break; 10287 default: 10288 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) { 10289 rpc_restart_call_prepare(task); 10290 return; 10291 } 10292 } 10293 } 10294 10295 static void nfs4_layoutcommit_release(void *calldata) 10296 { 10297 struct nfs4_layoutcommit_data *data = calldata; 10298 10299 pnfs_cleanup_layoutcommit(data); 10300 nfs_post_op_update_inode_force_wcc(data->args.inode, 10301 data->res.fattr); 10302 put_cred(data->cred); 10303 nfs_iput_and_deactive(data->inode); 10304 kfree(data); 10305 } 10306 10307 static const struct rpc_call_ops nfs4_layoutcommit_ops = { 10308 .rpc_call_prepare = nfs4_layoutcommit_prepare, 10309 .rpc_call_done = nfs4_layoutcommit_done, 10310 .rpc_release = nfs4_layoutcommit_release, 10311 }; 10312 10313 int 10314 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 10315 { 10316 struct rpc_message msg = { 10317 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 10318 .rpc_argp = &data->args, 10319 .rpc_resp = &data->res, 10320 .rpc_cred = data->cred, 10321 }; 10322 struct rpc_task_setup task_setup_data = { 10323 .task = &data->task, 10324 .rpc_client = NFS_CLIENT(data->args.inode), 10325 .rpc_message = &msg, 10326 .callback_ops = &nfs4_layoutcommit_ops, 10327 .callback_data = data, 10328 .flags = RPC_TASK_MOVEABLE, 10329 }; 10330 struct rpc_task *task; 10331 int status = 0; 10332 10333 dprintk("NFS: initiating layoutcommit call. sync %d " 10334 "lbw: %llu inode %lu\n", sync, 10335 data->args.lastbytewritten, 10336 data->args.inode->i_ino); 10337 10338 if (!sync) { 10339 data->inode = nfs_igrab_and_active(data->args.inode); 10340 if (data->inode == NULL) { 10341 nfs4_layoutcommit_release(data); 10342 return -EAGAIN; 10343 } 10344 task_setup_data.flags = RPC_TASK_ASYNC; 10345 } 10346 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0); 10347 task = rpc_run_task(&task_setup_data); 10348 if (IS_ERR(task)) 10349 return PTR_ERR(task); 10350 if (sync) 10351 status = task->tk_status; 10352 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status); 10353 dprintk("%s: status %d\n", __func__, status); 10354 rpc_put_task(task); 10355 return status; 10356 } 10357 10358 /* 10359 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if 10360 * possible) as per RFC3530bis and RFC5661 Security Considerations sections 10361 */ 10362 static int _nfs41_proc_secinfo_no_name(struct nfs_server *server, 10363 struct nfs_fh *fhandle, 10364 struct nfs4_secinfo_flavors *flavors, 10365 bool use_integrity) 10366 { 10367 struct nfs41_secinfo_no_name_args args = { 10368 .style = SECINFO_STYLE_CURRENT_FH, 10369 }; 10370 struct nfs4_secinfo_res res = { 10371 .flavors = flavors, 10372 }; 10373 struct rpc_message msg = { 10374 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 10375 .rpc_argp = &args, 10376 .rpc_resp = &res, 10377 }; 10378 struct nfs4_call_sync_data data = { 10379 .seq_server = server, 10380 .seq_args = &args.seq_args, 10381 .seq_res = &res.seq_res, 10382 }; 10383 struct rpc_task_setup task_setup = { 10384 .rpc_client = server->client, 10385 .rpc_message = &msg, 10386 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops, 10387 .callback_data = &data, 10388 .flags = RPC_TASK_NO_ROUND_ROBIN, 10389 }; 10390 const struct cred *cred = NULL; 10391 int status; 10392 10393 if (use_integrity) { 10394 task_setup.rpc_client = server->nfs_client->cl_rpcclient; 10395 10396 cred = nfs4_get_clid_cred(server->nfs_client); 10397 msg.rpc_cred = cred; 10398 } 10399 10400 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 10401 status = nfs4_call_sync_custom(&task_setup); 10402 dprintk("<-- %s status=%d\n", __func__, status); 10403 10404 put_cred(cred); 10405 10406 return status; 10407 } 10408 10409 static int nfs41_proc_secinfo_no_name(struct nfs_server *server, 10410 struct nfs_fh *fhandle, 10411 struct nfs4_secinfo_flavors *flavors) 10412 { 10413 struct nfs4_exception exception = { 10414 .interruptible = true, 10415 }; 10416 int err; 10417 do { 10418 /* first try using integrity protection */ 10419 err = -NFS4ERR_WRONGSEC; 10420 10421 /* try to use integrity protection with machine cred */ 10422 if (_nfs4_is_integrity_protected(server->nfs_client)) 10423 err = _nfs41_proc_secinfo_no_name(server, fhandle, 10424 flavors, true); 10425 10426 /* 10427 * if unable to use integrity protection, or SECINFO with 10428 * integrity protection returns NFS4ERR_WRONGSEC (which is 10429 * disallowed by spec, but exists in deployed servers) use 10430 * the current filesystem's rpc_client and the user cred. 10431 */ 10432 if (err == -NFS4ERR_WRONGSEC) 10433 err = _nfs41_proc_secinfo_no_name(server, fhandle, 10434 flavors, false); 10435 10436 switch (err) { 10437 case 0: 10438 case -NFS4ERR_WRONGSEC: 10439 case -ENOTSUPP: 10440 goto out; 10441 default: 10442 err = nfs4_handle_exception(server, err, &exception); 10443 } 10444 } while (exception.retry); 10445 out: 10446 return err; 10447 } 10448 10449 static int nfs41_find_root_sec(struct nfs_server *server, 10450 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 10451 { 10452 int err; 10453 struct page *page; 10454 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR; 10455 struct nfs4_secinfo_flavors *flavors; 10456 struct nfs4_secinfo4 *secinfo; 10457 int i; 10458 10459 page = alloc_page(GFP_KERNEL); 10460 if (!page) { 10461 err = -ENOMEM; 10462 goto out; 10463 } 10464 10465 flavors = page_address(page); 10466 err = nfs41_proc_secinfo_no_name(server, fhandle, flavors); 10467 10468 /* 10469 * Fall back on "guess and check" method if 10470 * the server doesn't support SECINFO_NO_NAME 10471 */ 10472 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) { 10473 err = nfs4_find_root_sec(server, fhandle, fattr); 10474 goto out_freepage; 10475 } 10476 if (err) 10477 goto out_freepage; 10478 10479 for (i = 0; i < flavors->num_flavors; i++) { 10480 secinfo = &flavors->flavors[i]; 10481 10482 switch (secinfo->flavor) { 10483 case RPC_AUTH_NULL: 10484 case RPC_AUTH_UNIX: 10485 case RPC_AUTH_GSS: 10486 flavor = rpcauth_get_pseudoflavor(secinfo->flavor, 10487 &secinfo->flavor_info); 10488 break; 10489 default: 10490 flavor = RPC_AUTH_MAXFLAVOR; 10491 break; 10492 } 10493 10494 if (!nfs_auth_info_match(&server->auth_info, flavor)) 10495 flavor = RPC_AUTH_MAXFLAVOR; 10496 10497 if (flavor != RPC_AUTH_MAXFLAVOR) { 10498 err = nfs4_lookup_root_sec(server, fhandle, fattr, 10499 flavor); 10500 if (!err) 10501 break; 10502 } 10503 } 10504 10505 if (flavor == RPC_AUTH_MAXFLAVOR) 10506 err = -EPERM; 10507 10508 out_freepage: 10509 put_page(page); 10510 if (err == -EACCES) 10511 return -EPERM; 10512 out: 10513 return err; 10514 } 10515 10516 static int _nfs41_test_stateid(struct nfs_server *server, 10517 const nfs4_stateid *stateid, 10518 const struct cred *cred) 10519 { 10520 int status; 10521 struct nfs41_test_stateid_args args = { 10522 .stateid = *stateid, 10523 }; 10524 struct nfs41_test_stateid_res res; 10525 struct rpc_message msg = { 10526 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 10527 .rpc_argp = &args, 10528 .rpc_resp = &res, 10529 .rpc_cred = cred, 10530 }; 10531 struct rpc_clnt *rpc_client = server->client; 10532 10533 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 10534 &rpc_client, &msg); 10535 10536 dprintk("NFS call test_stateid %p\n", stateid); 10537 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 10538 status = nfs4_call_sync_sequence(rpc_client, server, &msg, 10539 &args.seq_args, &res.seq_res); 10540 if (status != NFS_OK) { 10541 dprintk("NFS reply test_stateid: failed, %d\n", status); 10542 return status; 10543 } 10544 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 10545 return -res.status; 10546 } 10547 10548 static void nfs4_handle_delay_or_session_error(struct nfs_server *server, 10549 int err, struct nfs4_exception *exception) 10550 { 10551 exception->retry = 0; 10552 switch(err) { 10553 case -NFS4ERR_DELAY: 10554 case -NFS4ERR_RETRY_UNCACHED_REP: 10555 nfs4_handle_exception(server, err, exception); 10556 break; 10557 case -NFS4ERR_BADSESSION: 10558 case -NFS4ERR_BADSLOT: 10559 case -NFS4ERR_BAD_HIGH_SLOT: 10560 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 10561 case -NFS4ERR_DEADSESSION: 10562 nfs4_do_handle_exception(server, err, exception); 10563 } 10564 } 10565 10566 /** 10567 * nfs41_test_stateid - perform a TEST_STATEID operation 10568 * 10569 * @server: server / transport on which to perform the operation 10570 * @stateid: state ID to test 10571 * @cred: credential 10572 * 10573 * Returns NFS_OK if the server recognizes that "stateid" is valid. 10574 * Otherwise a negative NFS4ERR value is returned if the operation 10575 * failed or the state ID is not currently valid. 10576 */ 10577 static int nfs41_test_stateid(struct nfs_server *server, 10578 const nfs4_stateid *stateid, 10579 const struct cred *cred) 10580 { 10581 struct nfs4_exception exception = { 10582 .interruptible = true, 10583 }; 10584 int err; 10585 do { 10586 err = _nfs41_test_stateid(server, stateid, cred); 10587 nfs4_handle_delay_or_session_error(server, err, &exception); 10588 } while (exception.retry); 10589 return err; 10590 } 10591 10592 struct nfs_free_stateid_data { 10593 struct nfs_server *server; 10594 struct nfs41_free_stateid_args args; 10595 struct nfs41_free_stateid_res res; 10596 }; 10597 10598 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) 10599 { 10600 struct nfs_free_stateid_data *data = calldata; 10601 nfs4_setup_sequence(data->server->nfs_client, 10602 &data->args.seq_args, 10603 &data->res.seq_res, 10604 task); 10605 } 10606 10607 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata) 10608 { 10609 struct nfs_free_stateid_data *data = calldata; 10610 10611 nfs41_sequence_done(task, &data->res.seq_res); 10612 10613 switch (task->tk_status) { 10614 case -NFS4ERR_DELAY: 10615 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN) 10616 rpc_restart_call_prepare(task); 10617 } 10618 } 10619 10620 static void nfs41_free_stateid_release(void *calldata) 10621 { 10622 struct nfs_free_stateid_data *data = calldata; 10623 struct nfs_client *clp = data->server->nfs_client; 10624 10625 nfs_put_client(clp); 10626 kfree(calldata); 10627 } 10628 10629 static const struct rpc_call_ops nfs41_free_stateid_ops = { 10630 .rpc_call_prepare = nfs41_free_stateid_prepare, 10631 .rpc_call_done = nfs41_free_stateid_done, 10632 .rpc_release = nfs41_free_stateid_release, 10633 }; 10634 10635 /** 10636 * nfs41_free_stateid - perform a FREE_STATEID operation 10637 * 10638 * @server: server / transport on which to perform the operation 10639 * @stateid: state ID to release 10640 * @cred: credential 10641 * @privileged: set to true if this call needs to be privileged 10642 * 10643 * Note: this function is always asynchronous. 10644 */ 10645 static int nfs41_free_stateid(struct nfs_server *server, 10646 nfs4_stateid *stateid, 10647 const struct cred *cred, 10648 bool privileged) 10649 { 10650 struct rpc_message msg = { 10651 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 10652 .rpc_cred = cred, 10653 }; 10654 struct rpc_task_setup task_setup = { 10655 .rpc_client = server->client, 10656 .rpc_message = &msg, 10657 .callback_ops = &nfs41_free_stateid_ops, 10658 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE, 10659 }; 10660 struct nfs_free_stateid_data *data; 10661 struct rpc_task *task; 10662 struct nfs_client *clp = server->nfs_client; 10663 10664 if (!refcount_inc_not_zero(&clp->cl_count)) 10665 return -EIO; 10666 10667 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 10668 &task_setup.rpc_client, &msg); 10669 10670 dprintk("NFS call free_stateid %p\n", stateid); 10671 data = kmalloc(sizeof(*data), GFP_KERNEL); 10672 if (!data) 10673 return -ENOMEM; 10674 data->server = server; 10675 nfs4_stateid_copy(&data->args.stateid, stateid); 10676 10677 task_setup.callback_data = data; 10678 10679 msg.rpc_argp = &data->args; 10680 msg.rpc_resp = &data->res; 10681 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, privileged); 10682 task = rpc_run_task(&task_setup); 10683 if (IS_ERR(task)) 10684 return PTR_ERR(task); 10685 rpc_put_task(task); 10686 stateid->type = NFS4_FREED_STATEID_TYPE; 10687 return 0; 10688 } 10689 10690 static void 10691 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) 10692 { 10693 const struct cred *cred = lsp->ls_state->owner->so_cred; 10694 10695 nfs41_free_stateid(server, &lsp->ls_stateid, cred, false); 10696 nfs4_free_lock_state(server, lsp); 10697 } 10698 10699 static bool nfs41_match_stateid(const nfs4_stateid *s1, 10700 const nfs4_stateid *s2) 10701 { 10702 trace_nfs41_match_stateid(s1, s2); 10703 10704 if (s1->type != s2->type) 10705 return false; 10706 10707 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 10708 return false; 10709 10710 if (s1->seqid == s2->seqid) 10711 return true; 10712 10713 return s1->seqid == 0 || s2->seqid == 0; 10714 } 10715 10716 #endif /* CONFIG_NFS_V4_1 */ 10717 10718 static bool nfs4_match_stateid(const nfs4_stateid *s1, 10719 const nfs4_stateid *s2) 10720 { 10721 trace_nfs4_match_stateid(s1, s2); 10722 10723 return nfs4_stateid_match(s1, s2); 10724 } 10725 10726 10727 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 10728 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 10729 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 10730 .recover_open = nfs4_open_reclaim, 10731 .recover_lock = nfs4_lock_reclaim, 10732 .establish_clid = nfs4_init_clientid, 10733 .detect_trunking = nfs40_discover_server_trunking, 10734 }; 10735 10736 #if defined(CONFIG_NFS_V4_1) 10737 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 10738 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 10739 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 10740 .recover_open = nfs4_open_reclaim, 10741 .recover_lock = nfs4_lock_reclaim, 10742 .establish_clid = nfs41_init_clientid, 10743 .reclaim_complete = nfs41_proc_reclaim_complete, 10744 .detect_trunking = nfs41_discover_server_trunking, 10745 }; 10746 #endif /* CONFIG_NFS_V4_1 */ 10747 10748 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 10749 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 10750 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 10751 .recover_open = nfs40_open_expired, 10752 .recover_lock = nfs4_lock_expired, 10753 .establish_clid = nfs4_init_clientid, 10754 }; 10755 10756 #if defined(CONFIG_NFS_V4_1) 10757 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 10758 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 10759 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 10760 .recover_open = nfs41_open_expired, 10761 .recover_lock = nfs41_lock_expired, 10762 .establish_clid = nfs41_init_clientid, 10763 }; 10764 #endif /* CONFIG_NFS_V4_1 */ 10765 10766 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { 10767 .sched_state_renewal = nfs4_proc_async_renew, 10768 .get_state_renewal_cred = nfs4_get_renew_cred, 10769 .renew_lease = nfs4_proc_renew, 10770 }; 10771 10772 #if defined(CONFIG_NFS_V4_1) 10773 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 10774 .sched_state_renewal = nfs41_proc_async_sequence, 10775 .get_state_renewal_cred = nfs4_get_machine_cred, 10776 .renew_lease = nfs4_proc_sequence, 10777 }; 10778 #endif 10779 10780 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = { 10781 .get_locations = _nfs40_proc_get_locations, 10782 .fsid_present = _nfs40_proc_fsid_present, 10783 }; 10784 10785 #if defined(CONFIG_NFS_V4_1) 10786 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = { 10787 .get_locations = _nfs41_proc_get_locations, 10788 .fsid_present = _nfs41_proc_fsid_present, 10789 }; 10790 #endif /* CONFIG_NFS_V4_1 */ 10791 10792 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { 10793 .minor_version = 0, 10794 .init_caps = NFS_CAP_READDIRPLUS 10795 | NFS_CAP_ATOMIC_OPEN 10796 | NFS_CAP_POSIX_LOCK, 10797 .init_client = nfs40_init_client, 10798 .shutdown_client = nfs40_shutdown_client, 10799 .match_stateid = nfs4_match_stateid, 10800 .find_root_sec = nfs4_find_root_sec, 10801 .free_lock_state = nfs4_release_lockowner, 10802 .test_and_free_expired = nfs40_test_and_free_expired_stateid, 10803 .alloc_seqid = nfs_alloc_seqid, 10804 .call_sync_ops = &nfs40_call_sync_ops, 10805 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 10806 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 10807 .state_renewal_ops = &nfs40_state_renewal_ops, 10808 .mig_recovery_ops = &nfs40_mig_recovery_ops, 10809 }; 10810 10811 #if defined(CONFIG_NFS_V4_1) 10812 static struct nfs_seqid * 10813 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2) 10814 { 10815 return NULL; 10816 } 10817 10818 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 10819 .minor_version = 1, 10820 .init_caps = NFS_CAP_READDIRPLUS 10821 | NFS_CAP_ATOMIC_OPEN 10822 | NFS_CAP_POSIX_LOCK 10823 | NFS_CAP_STATEID_NFSV41 10824 | NFS_CAP_ATOMIC_OPEN_V1 10825 | NFS_CAP_LGOPEN 10826 | NFS_CAP_MOVEABLE, 10827 .init_client = nfs41_init_client, 10828 .shutdown_client = nfs41_shutdown_client, 10829 .match_stateid = nfs41_match_stateid, 10830 .find_root_sec = nfs41_find_root_sec, 10831 .free_lock_state = nfs41_free_lock_state, 10832 .test_and_free_expired = nfs41_test_and_free_expired_stateid, 10833 .alloc_seqid = nfs_alloc_no_seqid, 10834 .session_trunk = nfs4_test_session_trunk, 10835 .call_sync_ops = &nfs41_call_sync_ops, 10836 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 10837 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 10838 .state_renewal_ops = &nfs41_state_renewal_ops, 10839 .mig_recovery_ops = &nfs41_mig_recovery_ops, 10840 }; 10841 #endif 10842 10843 #if defined(CONFIG_NFS_V4_2) 10844 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { 10845 .minor_version = 2, 10846 .init_caps = NFS_CAP_READDIRPLUS 10847 | NFS_CAP_ATOMIC_OPEN 10848 | NFS_CAP_POSIX_LOCK 10849 | NFS_CAP_STATEID_NFSV41 10850 | NFS_CAP_ATOMIC_OPEN_V1 10851 | NFS_CAP_LGOPEN 10852 | NFS_CAP_ALLOCATE 10853 | NFS_CAP_COPY 10854 | NFS_CAP_OFFLOAD_CANCEL 10855 | NFS_CAP_COPY_NOTIFY 10856 | NFS_CAP_DEALLOCATE 10857 | NFS_CAP_ZERO_RANGE 10858 | NFS_CAP_SEEK 10859 | NFS_CAP_LAYOUTSTATS 10860 | NFS_CAP_CLONE 10861 | NFS_CAP_LAYOUTERROR 10862 | NFS_CAP_READ_PLUS 10863 | NFS_CAP_MOVEABLE 10864 | NFS_CAP_OFFLOAD_STATUS, 10865 .init_client = nfs41_init_client, 10866 .shutdown_client = nfs41_shutdown_client, 10867 .match_stateid = nfs41_match_stateid, 10868 .find_root_sec = nfs41_find_root_sec, 10869 .free_lock_state = nfs41_free_lock_state, 10870 .call_sync_ops = &nfs41_call_sync_ops, 10871 .test_and_free_expired = nfs41_test_and_free_expired_stateid, 10872 .alloc_seqid = nfs_alloc_no_seqid, 10873 .session_trunk = nfs4_test_session_trunk, 10874 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 10875 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 10876 .state_renewal_ops = &nfs41_state_renewal_ops, 10877 .mig_recovery_ops = &nfs41_mig_recovery_ops, 10878 }; 10879 #endif 10880 10881 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 10882 [0] = &nfs_v4_0_minor_ops, 10883 #if defined(CONFIG_NFS_V4_1) 10884 [1] = &nfs_v4_1_minor_ops, 10885 #endif 10886 #if defined(CONFIG_NFS_V4_2) 10887 [2] = &nfs_v4_2_minor_ops, 10888 #endif 10889 }; 10890 10891 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) 10892 { 10893 ssize_t error, error2, error3, error4 = 0; 10894 size_t left = size; 10895 10896 error = generic_listxattr(dentry, list, left); 10897 if (error < 0) 10898 return error; 10899 if (list) { 10900 list += error; 10901 left -= error; 10902 } 10903 10904 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, left); 10905 if (error2 < 0) 10906 return error2; 10907 10908 if (list) { 10909 list += error2; 10910 left -= error2; 10911 } 10912 10913 error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left); 10914 if (error3 < 0) 10915 return error3; 10916 if (list) { 10917 list += error3; 10918 left -= error3; 10919 } 10920 10921 if (!nfs_server_capable(d_inode(dentry), NFS_CAP_SECURITY_LABEL)) { 10922 error4 = security_inode_listsecurity(d_inode(dentry), list, left); 10923 if (error4 < 0) 10924 return error4; 10925 } 10926 10927 error += error2 + error3 + error4; 10928 if (size && error > size) 10929 return -ERANGE; 10930 return error; 10931 } 10932 10933 static void nfs4_enable_swap(struct inode *inode) 10934 { 10935 /* The state manager thread must always be running. 10936 * It will notice the client is a swapper, and stay put. 10937 */ 10938 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 10939 10940 nfs4_schedule_state_manager(clp); 10941 } 10942 10943 static void nfs4_disable_swap(struct inode *inode) 10944 { 10945 /* The state manager thread will now exit once it is 10946 * woken. 10947 */ 10948 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 10949 10950 set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state); 10951 clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state); 10952 wake_up_var(&clp->cl_state); 10953 } 10954 10955 static const struct inode_operations nfs4_dir_inode_operations = { 10956 .create = nfs_create, 10957 .lookup = nfs_lookup, 10958 .atomic_open = nfs_atomic_open, 10959 .link = nfs_link, 10960 .unlink = nfs_unlink, 10961 .symlink = nfs_symlink, 10962 .mkdir = nfs_mkdir, 10963 .rmdir = nfs_rmdir, 10964 .mknod = nfs_mknod, 10965 .rename = nfs_rename, 10966 .permission = nfs_permission, 10967 .getattr = nfs_getattr, 10968 .setattr = nfs_setattr, 10969 .listxattr = nfs4_listxattr, 10970 }; 10971 10972 static const struct inode_operations nfs4_file_inode_operations = { 10973 .permission = nfs_permission, 10974 .getattr = nfs_getattr, 10975 .setattr = nfs_setattr, 10976 .listxattr = nfs4_listxattr, 10977 }; 10978 10979 static struct nfs_server *nfs4_clone_server(struct nfs_server *source, 10980 struct nfs_fh *fh, struct nfs_fattr *fattr, 10981 rpc_authflavor_t flavor) 10982 { 10983 struct nfs_server *server; 10984 int error; 10985 10986 server = nfs_clone_server(source, fh, fattr, flavor); 10987 if (IS_ERR(server)) 10988 return server; 10989 10990 error = nfs4_delegation_hash_alloc(server); 10991 if (error) { 10992 nfs_free_server(server); 10993 return ERR_PTR(error); 10994 } 10995 10996 return server; 10997 } 10998 10999 const struct nfs_rpc_ops nfs_v4_clientops = { 11000 .version = 4, /* protocol version */ 11001 .dentry_ops = &nfs4_dentry_operations, 11002 .dir_inode_ops = &nfs4_dir_inode_operations, 11003 .file_inode_ops = &nfs4_file_inode_operations, 11004 .file_ops = &nfs4_file_operations, 11005 .getroot = nfs4_proc_get_root, 11006 .submount = nfs4_submount, 11007 .try_get_tree = nfs4_try_get_tree, 11008 .getattr = nfs4_proc_getattr, 11009 .setattr = nfs4_proc_setattr, 11010 .lookup = nfs4_proc_lookup, 11011 .lookupp = nfs4_proc_lookupp, 11012 .access = nfs4_proc_access, 11013 .readlink = nfs4_proc_readlink, 11014 .create = nfs4_proc_create, 11015 .remove = nfs4_proc_remove, 11016 .unlink_setup = nfs4_proc_unlink_setup, 11017 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 11018 .unlink_done = nfs4_proc_unlink_done, 11019 .rename_setup = nfs4_proc_rename_setup, 11020 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 11021 .rename_done = nfs4_proc_rename_done, 11022 .link = nfs4_proc_link, 11023 .symlink = nfs4_proc_symlink, 11024 .mkdir = nfs4_proc_mkdir, 11025 .rmdir = nfs4_proc_rmdir, 11026 .readdir = nfs4_proc_readdir, 11027 .mknod = nfs4_proc_mknod, 11028 .statfs = nfs4_proc_statfs, 11029 .fsinfo = nfs4_proc_fsinfo, 11030 .pathconf = nfs4_proc_pathconf, 11031 .set_capabilities = nfs4_server_capabilities, 11032 .decode_dirent = nfs4_decode_dirent, 11033 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare, 11034 .read_setup = nfs4_proc_read_setup, 11035 .read_done = nfs4_read_done, 11036 .write_setup = nfs4_proc_write_setup, 11037 .write_done = nfs4_write_done, 11038 .commit_setup = nfs4_proc_commit_setup, 11039 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 11040 .commit_done = nfs4_commit_done, 11041 .lock = nfs4_proc_lock, 11042 .clear_acl_cache = nfs4_zap_acl_attr, 11043 .close_context = nfs4_close_context, 11044 .open_context = nfs4_atomic_open, 11045 .have_delegation = nfs4_have_delegation, 11046 .return_delegation = nfs4_inode_return_delegation, 11047 .alloc_client = nfs4_alloc_client, 11048 .init_client = nfs4_init_client, 11049 .free_client = nfs4_free_client, 11050 .create_server = nfs4_create_server, 11051 .clone_server = nfs4_clone_server, 11052 .discover_trunking = nfs4_discover_trunking, 11053 .enable_swap = nfs4_enable_swap, 11054 .disable_swap = nfs4_disable_swap, 11055 }; 11056 11057 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 11058 .name = XATTR_NAME_NFSV4_ACL, 11059 .list = nfs4_xattr_list_nfs4_acl, 11060 .get = nfs4_xattr_get_nfs4_acl, 11061 .set = nfs4_xattr_set_nfs4_acl, 11062 }; 11063 11064 #if defined(CONFIG_NFS_V4_1) 11065 static const struct xattr_handler nfs4_xattr_nfs4_dacl_handler = { 11066 .name = XATTR_NAME_NFSV4_DACL, 11067 .list = nfs4_xattr_list_nfs4_dacl, 11068 .get = nfs4_xattr_get_nfs4_dacl, 11069 .set = nfs4_xattr_set_nfs4_dacl, 11070 }; 11071 11072 static const struct xattr_handler nfs4_xattr_nfs4_sacl_handler = { 11073 .name = XATTR_NAME_NFSV4_SACL, 11074 .list = nfs4_xattr_list_nfs4_sacl, 11075 .get = nfs4_xattr_get_nfs4_sacl, 11076 .set = nfs4_xattr_set_nfs4_sacl, 11077 }; 11078 #endif 11079 11080 #ifdef CONFIG_NFS_V4_2 11081 static const struct xattr_handler nfs4_xattr_nfs4_user_handler = { 11082 .prefix = XATTR_USER_PREFIX, 11083 .get = nfs4_xattr_get_nfs4_user, 11084 .set = nfs4_xattr_set_nfs4_user, 11085 }; 11086 #endif 11087 11088 const struct xattr_handler * const nfs4_xattr_handlers[] = { 11089 &nfs4_xattr_nfs4_acl_handler, 11090 #if defined(CONFIG_NFS_V4_1) 11091 &nfs4_xattr_nfs4_dacl_handler, 11092 &nfs4_xattr_nfs4_sacl_handler, 11093 #endif 11094 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 11095 &nfs4_xattr_nfs4_label_handler, 11096 #endif 11097 #ifdef CONFIG_NFS_V4_2 11098 &nfs4_xattr_nfs4_user_handler, 11099 #endif 11100 NULL 11101 }; 11102