1 /* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <linux/mm.h> 39 #include <linux/delay.h> 40 #include <linux/errno.h> 41 #include <linux/string.h> 42 #include <linux/ratelimit.h> 43 #include <linux/printk.h> 44 #include <linux/slab.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/nfs.h> 47 #include <linux/nfs4.h> 48 #include <linux/nfs_fs.h> 49 #include <linux/nfs_page.h> 50 #include <linux/nfs_mount.h> 51 #include <linux/namei.h> 52 #include <linux/mount.h> 53 #include <linux/module.h> 54 #include <linux/xattr.h> 55 #include <linux/utsname.h> 56 #include <linux/freezer.h> 57 #include <linux/iversion.h> 58 59 #include "nfs4_fs.h" 60 #include "delegation.h" 61 #include "internal.h" 62 #include "iostat.h" 63 #include "callback.h" 64 #include "pnfs.h" 65 #include "netns.h" 66 #include "sysfs.h" 67 #include "nfs4idmap.h" 68 #include "nfs4session.h" 69 #include "fscache.h" 70 #include "nfs40.h" 71 #include "nfs42.h" 72 73 #include "nfs4trace.h" 74 75 #define NFSDBG_FACILITY NFSDBG_PROC 76 77 #define NFS4_BITMASK_SZ 3 78 79 #define NFS4_POLL_RETRY_MIN (HZ/10) 80 #define NFS4_POLL_RETRY_MAX (15*HZ) 81 82 /* file attributes which can be mapped to nfs attributes */ 83 #define NFS4_VALID_ATTRS (ATTR_MODE \ 84 | ATTR_UID \ 85 | ATTR_GID \ 86 | ATTR_SIZE \ 87 | ATTR_ATIME \ 88 | ATTR_MTIME \ 89 | ATTR_CTIME \ 90 | ATTR_ATIME_SET \ 91 | ATTR_MTIME_SET) 92 93 struct nfs4_opendata; 94 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 95 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 96 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 97 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 98 struct nfs_fattr *fattr, struct inode *inode); 99 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred, 100 struct nfs_fattr *fattr, struct iattr *sattr, 101 struct nfs_open_context *ctx, struct nfs4_label *ilabel); 102 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 103 const struct cred *cred, 104 struct nfs4_slot *slot, 105 bool is_privileged); 106 static int nfs41_test_stateid(struct nfs_server *, const nfs4_stateid *, 107 const struct cred *); 108 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *, 109 const struct cred *, bool); 110 111 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 112 static inline struct nfs4_label * 113 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 114 struct iattr *sattr, struct nfs4_label *label) 115 { 116 struct lsm_context shim; 117 int err; 118 119 if (label == NULL) 120 return NULL; 121 122 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0) 123 return NULL; 124 125 label->lfs = 0; 126 label->pi = 0; 127 label->len = 0; 128 label->label = NULL; 129 130 err = security_dentry_init_security(dentry, sattr->ia_mode, 131 &dentry->d_name, NULL, &shim); 132 if (err) 133 return NULL; 134 135 label->lsmid = shim.id; 136 label->label = shim.context; 137 label->len = shim.len; 138 return label; 139 } 140 static inline void 141 nfs4_label_release_security(struct nfs4_label *label) 142 { 143 struct lsm_context shim; 144 145 if (label) { 146 shim.context = label->label; 147 shim.len = label->len; 148 shim.id = label->lsmid; 149 security_release_secctx(&shim); 150 } 151 } 152 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 153 { 154 if (label) 155 return server->attr_bitmask; 156 157 return server->attr_bitmask_nl; 158 } 159 #else 160 static inline struct nfs4_label * 161 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 162 struct iattr *sattr, struct nfs4_label *l) 163 { return NULL; } 164 static inline void 165 nfs4_label_release_security(struct nfs4_label *label) 166 { return; } 167 static inline u32 * 168 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 169 { return server->attr_bitmask; } 170 #endif 171 172 /* Prevent leaks of NFSv4 errors into userland */ 173 static int nfs4_map_errors(int err) 174 { 175 if (err >= -1000) 176 return err; 177 switch (err) { 178 case -NFS4ERR_RESOURCE: 179 case -NFS4ERR_LAYOUTTRYLATER: 180 case -NFS4ERR_RECALLCONFLICT: 181 case -NFS4ERR_RETURNCONFLICT: 182 return -EREMOTEIO; 183 case -NFS4ERR_WRONGSEC: 184 case -NFS4ERR_WRONG_CRED: 185 return -EPERM; 186 case -NFS4ERR_BADOWNER: 187 case -NFS4ERR_BADNAME: 188 return -EINVAL; 189 case -NFS4ERR_SHARE_DENIED: 190 return -EACCES; 191 case -NFS4ERR_MINOR_VERS_MISMATCH: 192 return -EPROTONOSUPPORT; 193 case -NFS4ERR_FILE_OPEN: 194 return -EBUSY; 195 case -NFS4ERR_NOT_SAME: 196 return -ENOTSYNC; 197 case -ENETDOWN: 198 case -ENETUNREACH: 199 break; 200 default: 201 dprintk("%s could not handle NFSv4 error %d\n", 202 __func__, -err); 203 break; 204 } 205 return -EIO; 206 } 207 208 /* 209 * This is our standard bitmap for GETATTR requests. 210 */ 211 const u32 nfs4_fattr_bitmap[3] = { 212 FATTR4_WORD0_TYPE 213 | FATTR4_WORD0_CHANGE 214 | FATTR4_WORD0_SIZE 215 | FATTR4_WORD0_FSID 216 | FATTR4_WORD0_FILEID, 217 FATTR4_WORD1_MODE 218 | FATTR4_WORD1_NUMLINKS 219 | FATTR4_WORD1_OWNER 220 | FATTR4_WORD1_OWNER_GROUP 221 | FATTR4_WORD1_RAWDEV 222 | FATTR4_WORD1_SPACE_USED 223 | FATTR4_WORD1_TIME_ACCESS 224 | FATTR4_WORD1_TIME_CREATE 225 | FATTR4_WORD1_TIME_METADATA 226 | FATTR4_WORD1_TIME_MODIFY 227 | FATTR4_WORD1_MOUNTED_ON_FILEID, 228 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 229 FATTR4_WORD2_SECURITY_LABEL 230 #endif 231 }; 232 233 static const u32 nfs4_pnfs_open_bitmap[3] = { 234 FATTR4_WORD0_TYPE 235 | FATTR4_WORD0_CHANGE 236 | FATTR4_WORD0_SIZE 237 | FATTR4_WORD0_FSID 238 | FATTR4_WORD0_FILEID, 239 FATTR4_WORD1_MODE 240 | FATTR4_WORD1_NUMLINKS 241 | FATTR4_WORD1_OWNER 242 | FATTR4_WORD1_OWNER_GROUP 243 | FATTR4_WORD1_RAWDEV 244 | FATTR4_WORD1_SPACE_USED 245 | FATTR4_WORD1_TIME_ACCESS 246 | FATTR4_WORD1_TIME_CREATE 247 | FATTR4_WORD1_TIME_METADATA 248 | FATTR4_WORD1_TIME_MODIFY, 249 FATTR4_WORD2_MDSTHRESHOLD 250 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 251 | FATTR4_WORD2_SECURITY_LABEL 252 #endif 253 }; 254 255 static const u32 nfs4_open_noattr_bitmap[3] = { 256 FATTR4_WORD0_TYPE 257 | FATTR4_WORD0_FILEID, 258 }; 259 260 const u32 nfs4_statfs_bitmap[3] = { 261 FATTR4_WORD0_FILES_AVAIL 262 | FATTR4_WORD0_FILES_FREE 263 | FATTR4_WORD0_FILES_TOTAL, 264 FATTR4_WORD1_SPACE_AVAIL 265 | FATTR4_WORD1_SPACE_FREE 266 | FATTR4_WORD1_SPACE_TOTAL 267 }; 268 269 const u32 nfs4_pathconf_bitmap[3] = { 270 FATTR4_WORD0_MAXLINK 271 | FATTR4_WORD0_MAXNAME, 272 0 273 }; 274 275 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 276 | FATTR4_WORD0_MAXREAD 277 | FATTR4_WORD0_MAXWRITE 278 | FATTR4_WORD0_LEASE_TIME, 279 FATTR4_WORD1_TIME_DELTA 280 | FATTR4_WORD1_FS_LAYOUT_TYPES, 281 FATTR4_WORD2_LAYOUT_BLKSIZE 282 | FATTR4_WORD2_CLONE_BLKSIZE 283 | FATTR4_WORD2_CHANGE_ATTR_TYPE 284 | FATTR4_WORD2_XATTR_SUPPORT 285 }; 286 287 const u32 nfs4_fs_locations_bitmap[3] = { 288 FATTR4_WORD0_CHANGE 289 | FATTR4_WORD0_SIZE 290 | FATTR4_WORD0_FSID 291 | FATTR4_WORD0_FILEID 292 | FATTR4_WORD0_FS_LOCATIONS, 293 FATTR4_WORD1_OWNER 294 | FATTR4_WORD1_OWNER_GROUP 295 | FATTR4_WORD1_RAWDEV 296 | FATTR4_WORD1_SPACE_USED 297 | FATTR4_WORD1_TIME_ACCESS 298 | FATTR4_WORD1_TIME_METADATA 299 | FATTR4_WORD1_TIME_MODIFY 300 | FATTR4_WORD1_MOUNTED_ON_FILEID, 301 }; 302 303 static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src, 304 struct inode *inode, unsigned long flags) 305 { 306 unsigned long cache_validity; 307 308 memcpy(dst, src, NFS4_BITMASK_SZ*sizeof(*dst)); 309 if (!inode || !nfs_have_read_or_write_delegation(inode)) 310 return; 311 312 cache_validity = READ_ONCE(NFS_I(inode)->cache_validity) | flags; 313 314 /* Remove the attributes over which we have full control */ 315 dst[1] &= ~FATTR4_WORD1_RAWDEV; 316 if (!(cache_validity & NFS_INO_INVALID_SIZE)) 317 dst[0] &= ~FATTR4_WORD0_SIZE; 318 319 if (!(cache_validity & NFS_INO_INVALID_CHANGE)) 320 dst[0] &= ~FATTR4_WORD0_CHANGE; 321 322 if (!(cache_validity & NFS_INO_INVALID_MODE)) 323 dst[1] &= ~FATTR4_WORD1_MODE; 324 if (!(cache_validity & NFS_INO_INVALID_OTHER)) 325 dst[1] &= ~(FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP); 326 327 if (!(cache_validity & NFS_INO_INVALID_BTIME)) 328 dst[1] &= ~FATTR4_WORD1_TIME_CREATE; 329 330 if (nfs_have_delegated_mtime(inode)) { 331 if (!(cache_validity & NFS_INO_INVALID_ATIME)) 332 dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET); 333 if (!(cache_validity & NFS_INO_INVALID_MTIME)) 334 dst[1] &= ~(FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET); 335 if (!(cache_validity & NFS_INO_INVALID_CTIME)) 336 dst[1] &= ~(FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY_SET); 337 } else if (nfs_have_delegated_atime(inode)) { 338 if (!(cache_validity & NFS_INO_INVALID_ATIME)) 339 dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET); 340 } 341 } 342 343 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 344 struct nfs4_readdir_arg *readdir) 345 { 346 unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE; 347 __be32 *start, *p; 348 349 if (cookie > 2) { 350 readdir->cookie = cookie; 351 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 352 return; 353 } 354 355 readdir->cookie = 0; 356 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 357 if (cookie == 2) 358 return; 359 360 /* 361 * NFSv4 servers do not return entries for '.' and '..' 362 * Therefore, we fake these entries here. We let '.' 363 * have cookie 0 and '..' have cookie 1. Note that 364 * when talking to the server, we always send cookie 0 365 * instead of 1 or 2. 366 */ 367 start = p = kmap_atomic(*readdir->pages); 368 369 if (cookie == 0) { 370 *p++ = xdr_one; /* next */ 371 *p++ = xdr_zero; /* cookie, first word */ 372 *p++ = xdr_one; /* cookie, second word */ 373 *p++ = xdr_one; /* entry len */ 374 memcpy(p, ".\0\0\0", 4); /* entry */ 375 p++; 376 *p++ = xdr_one; /* bitmap length */ 377 *p++ = htonl(attrs); /* bitmap */ 378 *p++ = htonl(12); /* attribute buffer length */ 379 *p++ = htonl(NF4DIR); 380 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry))); 381 } 382 383 *p++ = xdr_one; /* next */ 384 *p++ = xdr_zero; /* cookie, first word */ 385 *p++ = xdr_two; /* cookie, second word */ 386 *p++ = xdr_two; /* entry len */ 387 memcpy(p, "..\0\0", 4); /* entry */ 388 p++; 389 *p++ = xdr_one; /* bitmap length */ 390 *p++ = htonl(attrs); /* bitmap */ 391 *p++ = htonl(12); /* attribute buffer length */ 392 *p++ = htonl(NF4DIR); 393 spin_lock(&dentry->d_lock); 394 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent))); 395 spin_unlock(&dentry->d_lock); 396 397 readdir->pgbase = (char *)p - (char *)start; 398 readdir->count -= readdir->pgbase; 399 kunmap_atomic(start); 400 } 401 402 static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version) 403 { 404 if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) { 405 fattr->pre_change_attr = version; 406 fattr->valid |= NFS_ATTR_FATTR_PRECHANGE; 407 } 408 } 409 410 static void nfs4_test_and_free_stateid(struct nfs_server *server, 411 nfs4_stateid *stateid, 412 const struct cred *cred) 413 { 414 const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops; 415 416 ops->test_and_free_expired(server, stateid, cred); 417 } 418 419 static void __nfs4_free_revoked_stateid(struct nfs_server *server, 420 nfs4_stateid *stateid, 421 const struct cred *cred) 422 { 423 stateid->type = NFS4_REVOKED_STATEID_TYPE; 424 nfs4_test_and_free_stateid(server, stateid, cred); 425 } 426 427 static void nfs4_free_revoked_stateid(struct nfs_server *server, 428 const nfs4_stateid *stateid, 429 const struct cred *cred) 430 { 431 nfs4_stateid tmp; 432 433 nfs4_stateid_copy(&tmp, stateid); 434 __nfs4_free_revoked_stateid(server, &tmp, cred); 435 } 436 437 static long nfs4_update_delay(long *timeout) 438 { 439 long ret; 440 if (!timeout) 441 return NFS4_POLL_RETRY_MAX; 442 if (*timeout <= 0) 443 *timeout = NFS4_POLL_RETRY_MIN; 444 if (*timeout > NFS4_POLL_RETRY_MAX) 445 *timeout = NFS4_POLL_RETRY_MAX; 446 ret = *timeout; 447 *timeout <<= 1; 448 return ret; 449 } 450 451 static int nfs4_delay_killable(long *timeout) 452 { 453 might_sleep(); 454 455 if (unlikely(nfs_current_task_exiting())) 456 return -EINTR; 457 __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 458 schedule_timeout(nfs4_update_delay(timeout)); 459 if (!__fatal_signal_pending(current)) 460 return 0; 461 return -EINTR; 462 } 463 464 static int nfs4_delay_interruptible(long *timeout) 465 { 466 might_sleep(); 467 468 if (unlikely(nfs_current_task_exiting())) 469 return -EINTR; 470 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE); 471 schedule_timeout(nfs4_update_delay(timeout)); 472 if (!signal_pending(current)) 473 return 0; 474 return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS; 475 } 476 477 static int nfs4_delay(long *timeout, bool interruptible) 478 { 479 if (interruptible) 480 return nfs4_delay_interruptible(timeout); 481 return nfs4_delay_killable(timeout); 482 } 483 484 static const nfs4_stateid * 485 nfs4_recoverable_stateid(const nfs4_stateid *stateid) 486 { 487 if (!stateid) 488 return NULL; 489 switch (stateid->type) { 490 case NFS4_OPEN_STATEID_TYPE: 491 case NFS4_LOCK_STATEID_TYPE: 492 case NFS4_DELEGATION_STATEID_TYPE: 493 return stateid; 494 default: 495 break; 496 } 497 return NULL; 498 } 499 500 /* This is the error handling routine for processes that are allowed 501 * to sleep. 502 */ 503 static int nfs4_do_handle_exception(struct nfs_server *server, 504 int errorcode, struct nfs4_exception *exception) 505 { 506 struct nfs_client *clp = server->nfs_client; 507 struct nfs4_state *state = exception->state; 508 const nfs4_stateid *stateid; 509 struct inode *inode = exception->inode; 510 int ret = errorcode; 511 512 exception->delay = 0; 513 exception->recovering = 0; 514 exception->retry = 0; 515 516 stateid = nfs4_recoverable_stateid(exception->stateid); 517 if (stateid == NULL && state != NULL) 518 stateid = nfs4_recoverable_stateid(&state->stateid); 519 520 switch(errorcode) { 521 case 0: 522 return 0; 523 case -NFS4ERR_BADHANDLE: 524 case -ESTALE: 525 if (inode != NULL && S_ISREG(inode->i_mode)) 526 pnfs_destroy_layout(NFS_I(inode)); 527 break; 528 case -NFS4ERR_DELEG_REVOKED: 529 case -NFS4ERR_ADMIN_REVOKED: 530 case -NFS4ERR_EXPIRED: 531 case -NFS4ERR_BAD_STATEID: 532 case -NFS4ERR_PARTNER_NO_AUTH: 533 if (inode != NULL && stateid != NULL) { 534 nfs_inode_find_state_and_recover(inode, 535 stateid); 536 goto wait_on_recovery; 537 } 538 fallthrough; 539 case -NFS4ERR_OPENMODE: 540 if (inode) { 541 int err; 542 543 err = nfs_async_inode_return_delegation(inode, 544 stateid); 545 if (err == 0) 546 goto wait_on_recovery; 547 if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) { 548 exception->retry = 1; 549 break; 550 } 551 } 552 if (state == NULL) 553 break; 554 ret = nfs4_schedule_stateid_recovery(server, state); 555 if (ret < 0) 556 break; 557 goto wait_on_recovery; 558 case -NFS4ERR_STALE_STATEID: 559 case -NFS4ERR_STALE_CLIENTID: 560 nfs4_schedule_lease_recovery(clp); 561 goto wait_on_recovery; 562 case -NFS4ERR_MOVED: 563 ret = nfs4_schedule_migration_recovery(server); 564 if (ret < 0) 565 break; 566 goto wait_on_recovery; 567 case -NFS4ERR_LEASE_MOVED: 568 nfs4_schedule_lease_moved_recovery(clp); 569 goto wait_on_recovery; 570 case -NFS4ERR_BADSESSION: 571 case -NFS4ERR_BADSLOT: 572 case -NFS4ERR_BAD_HIGH_SLOT: 573 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 574 case -NFS4ERR_DEADSESSION: 575 case -NFS4ERR_SEQ_FALSE_RETRY: 576 case -NFS4ERR_SEQ_MISORDERED: 577 /* Handled in nfs41_sequence_process() */ 578 goto wait_on_recovery; 579 case -NFS4ERR_FILE_OPEN: 580 if (exception->timeout > HZ) { 581 /* We have retried a decent amount, time to 582 * fail 583 */ 584 ret = -EBUSY; 585 break; 586 } 587 fallthrough; 588 case -NFS4ERR_DELAY: 589 nfs_inc_server_stats(server, NFSIOS_DELAY); 590 fallthrough; 591 case -NFS4ERR_GRACE: 592 case -NFS4ERR_LAYOUTTRYLATER: 593 case -NFS4ERR_RECALLCONFLICT: 594 case -NFS4ERR_RETURNCONFLICT: 595 exception->delay = 1; 596 return 0; 597 598 case -NFS4ERR_RETRY_UNCACHED_REP: 599 case -NFS4ERR_OLD_STATEID: 600 exception->retry = 1; 601 break; 602 case -NFS4ERR_BADOWNER: 603 /* The following works around a Linux server bug! */ 604 case -NFS4ERR_BADNAME: 605 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 606 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 607 exception->retry = 1; 608 printk(KERN_WARNING "NFS: v4 server %s " 609 "does not accept raw " 610 "uid/gids. " 611 "Reenabling the idmapper.\n", 612 server->nfs_client->cl_hostname); 613 } 614 } 615 /* We failed to handle the error */ 616 return nfs4_map_errors(ret); 617 wait_on_recovery: 618 exception->recovering = 1; 619 return 0; 620 } 621 622 /* 623 * Track the number of NFS4ERR_DELAY related retransmissions and return 624 * EAGAIN if the 'softerr' mount option is set, and we've exceeded the limit 625 * set by 'nfs_delay_retrans'. 626 */ 627 static int nfs4_exception_should_retrans(const struct nfs_server *server, 628 struct nfs4_exception *exception) 629 { 630 if (server->flags & NFS_MOUNT_SOFTERR && nfs_delay_retrans >= 0) { 631 if (exception->retrans++ >= (unsigned short)nfs_delay_retrans) 632 return -EAGAIN; 633 } 634 return 0; 635 } 636 637 /* This is the error handling routine for processes that are allowed 638 * to sleep. 639 */ 640 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 641 { 642 struct nfs_client *clp = server->nfs_client; 643 int ret; 644 645 ret = nfs4_do_handle_exception(server, errorcode, exception); 646 if (exception->delay) { 647 int ret2 = nfs4_exception_should_retrans(server, exception); 648 if (ret2 < 0) { 649 exception->retry = 0; 650 return ret2; 651 } 652 ret = nfs4_delay(&exception->timeout, 653 exception->interruptible); 654 goto out_retry; 655 } 656 if (exception->recovering) { 657 if (exception->task_is_privileged) 658 return -EDEADLOCK; 659 ret = nfs4_wait_clnt_recover(clp); 660 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 661 return -EIO; 662 goto out_retry; 663 } 664 return ret; 665 out_retry: 666 if (ret == 0) 667 exception->retry = 1; 668 return ret; 669 } 670 671 static int 672 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server, 673 int errorcode, struct nfs4_exception *exception) 674 { 675 struct nfs_client *clp = server->nfs_client; 676 int ret; 677 678 if ((task->tk_rpc_status == -ENETDOWN || 679 task->tk_rpc_status == -ENETUNREACH) && 680 task->tk_flags & RPC_TASK_NETUNREACH_FATAL) { 681 exception->delay = 0; 682 exception->recovering = 0; 683 exception->retry = 0; 684 return -EIO; 685 } 686 687 ret = nfs4_do_handle_exception(server, errorcode, exception); 688 if (exception->delay) { 689 int ret2 = nfs4_exception_should_retrans(server, exception); 690 if (ret2 < 0) { 691 exception->retry = 0; 692 return ret2; 693 } 694 rpc_delay(task, nfs4_update_delay(&exception->timeout)); 695 goto out_retry; 696 } 697 if (exception->recovering) { 698 if (exception->task_is_privileged) 699 return -EDEADLOCK; 700 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 701 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 702 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 703 goto out_retry; 704 } 705 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 706 ret = -EIO; 707 return ret; 708 out_retry: 709 if (ret == 0) { 710 exception->retry = 1; 711 /* 712 * For NFS4ERR_MOVED, the client transport will need to 713 * be recomputed after migration recovery has completed. 714 */ 715 if (errorcode == -NFS4ERR_MOVED) 716 rpc_task_release_transport(task); 717 } 718 return ret; 719 } 720 721 int 722 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server, 723 struct nfs4_state *state, long *timeout) 724 { 725 struct nfs4_exception exception = { 726 .state = state, 727 }; 728 729 if (task->tk_status >= 0) 730 return 0; 731 if (timeout) 732 exception.timeout = *timeout; 733 task->tk_status = nfs4_async_handle_exception(task, server, 734 task->tk_status, 735 &exception); 736 if (exception.delay && timeout) 737 *timeout = exception.timeout; 738 if (exception.retry) 739 return -EAGAIN; 740 return 0; 741 } 742 743 /* 744 * Return 'true' if 'clp' is using an rpc_client that is integrity protected 745 * or 'false' otherwise. 746 */ 747 static bool _nfs4_is_integrity_protected(struct nfs_client *clp) 748 { 749 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor; 750 return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P); 751 } 752 753 void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 754 { 755 spin_lock(&clp->cl_lock); 756 if (time_before(clp->cl_last_renewal,timestamp)) 757 clp->cl_last_renewal = timestamp; 758 spin_unlock(&clp->cl_lock); 759 } 760 761 void renew_lease(const struct nfs_server *server, unsigned long timestamp) 762 { 763 struct nfs_client *clp = server->nfs_client; 764 765 if (!nfs4_has_session(clp)) 766 do_renew_lease(clp, timestamp); 767 } 768 769 void nfs4_init_sequence(struct nfs_client *clp, 770 struct nfs4_sequence_args *args, 771 struct nfs4_sequence_res *res, int cache_reply, 772 int privileged) 773 { 774 args->sa_slot = NULL; 775 args->sa_cache_this = cache_reply; 776 args->sa_privileged = privileged; 777 778 res->sr_slot = NULL; 779 res->sr_slot_ops = clp->cl_mvops->sequence_slot_ops; 780 } 781 782 static void nfs41_release_slot(struct nfs4_slot *slot) 783 { 784 struct nfs4_session *session; 785 struct nfs4_slot_table *tbl; 786 bool send_new_highest_used_slotid = false; 787 788 if (!slot) 789 return; 790 tbl = slot->table; 791 session = tbl->session; 792 793 /* Bump the slot sequence number */ 794 if (slot->seq_done) 795 slot->seq_nr++; 796 slot->seq_done = 0; 797 798 spin_lock(&tbl->slot_tbl_lock); 799 /* Be nice to the server: try to ensure that the last transmitted 800 * value for highest_user_slotid <= target_highest_slotid 801 */ 802 if (tbl->highest_used_slotid > tbl->target_highest_slotid) 803 send_new_highest_used_slotid = true; 804 805 if (nfs41_wake_and_assign_slot(tbl, slot)) { 806 send_new_highest_used_slotid = false; 807 goto out_unlock; 808 } 809 nfs4_free_slot(tbl, slot); 810 811 if (tbl->highest_used_slotid != NFS4_NO_SLOT) 812 send_new_highest_used_slotid = false; 813 out_unlock: 814 spin_unlock(&tbl->slot_tbl_lock); 815 if (send_new_highest_used_slotid) 816 nfs41_notify_server(session->clp); 817 if (waitqueue_active(&tbl->slot_waitq)) 818 wake_up_all(&tbl->slot_waitq); 819 } 820 821 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 822 { 823 nfs41_release_slot(res->sr_slot); 824 res->sr_slot = NULL; 825 } 826 827 static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot, 828 u32 seqnr) 829 { 830 if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0) 831 slot->seq_nr_highest_sent = seqnr; 832 } 833 static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, u32 seqnr) 834 { 835 nfs4_slot_sequence_record_sent(slot, seqnr); 836 slot->seq_nr_last_acked = seqnr; 837 } 838 839 static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred, 840 struct nfs4_slot *slot) 841 { 842 struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true); 843 if (!IS_ERR(task)) 844 rpc_put_task_async(task); 845 } 846 847 static int nfs41_sequence_process(struct rpc_task *task, 848 struct nfs4_sequence_res *res) 849 { 850 struct nfs4_session *session; 851 struct nfs4_slot *slot = res->sr_slot; 852 struct nfs_client *clp; 853 int status; 854 int ret = 1; 855 856 if (slot == NULL) 857 goto out_noaction; 858 /* don't increment the sequence number if the task wasn't sent */ 859 if (!RPC_WAS_SENT(task) || slot->seq_done) 860 goto out; 861 862 session = slot->table->session; 863 clp = session->clp; 864 865 trace_nfs4_sequence_done(session, res); 866 867 status = res->sr_status; 868 if (task->tk_status == -NFS4ERR_DEADSESSION) 869 status = -NFS4ERR_DEADSESSION; 870 871 /* Check the SEQUENCE operation status */ 872 switch (status) { 873 case 0: 874 /* Mark this sequence number as having been acked */ 875 nfs4_slot_sequence_acked(slot, slot->seq_nr); 876 /* Update the slot's sequence and clientid lease timer */ 877 slot->seq_done = 1; 878 do_renew_lease(clp, res->sr_timestamp); 879 /* Check sequence flags */ 880 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags, 881 !!slot->privileged); 882 nfs41_update_target_slotid(slot->table, slot, res); 883 break; 884 case 1: 885 /* 886 * sr_status remains 1 if an RPC level error occurred. 887 * The server may or may not have processed the sequence 888 * operation.. 889 */ 890 nfs4_slot_sequence_record_sent(slot, slot->seq_nr); 891 slot->seq_done = 1; 892 goto out; 893 case -NFS4ERR_DELAY: 894 /* The server detected a resend of the RPC call and 895 * returned NFS4ERR_DELAY as per Section 2.10.6.2 896 * of RFC5661. 897 */ 898 dprintk("%s: slot=%u seq=%u: Operation in progress\n", 899 __func__, 900 slot->slot_nr, 901 slot->seq_nr); 902 goto out_retry; 903 case -NFS4ERR_RETRY_UNCACHED_REP: 904 case -NFS4ERR_SEQ_FALSE_RETRY: 905 /* 906 * The server thinks we tried to replay a request. 907 * Retry the call after bumping the sequence ID. 908 */ 909 nfs4_slot_sequence_acked(slot, slot->seq_nr); 910 goto retry_new_seq; 911 case -NFS4ERR_BADSLOT: 912 /* 913 * The slot id we used was probably retired. Try again 914 * using a different slot id. 915 */ 916 if (slot->slot_nr < slot->table->target_highest_slotid) 917 goto session_recover; 918 goto retry_nowait; 919 case -NFS4ERR_SEQ_MISORDERED: 920 nfs4_slot_sequence_record_sent(slot, slot->seq_nr); 921 /* 922 * Were one or more calls using this slot interrupted? 923 * If the server never received the request, then our 924 * transmitted slot sequence number may be too high. However, 925 * if the server did receive the request then it might 926 * accidentally give us a reply with a mismatched operation. 927 * We can sort this out by sending a lone sequence operation 928 * to the server on the same slot. 929 */ 930 if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) { 931 slot->seq_nr--; 932 if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) { 933 nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot); 934 res->sr_slot = NULL; 935 } 936 goto retry_nowait; 937 } 938 /* 939 * RFC5661: 940 * A retry might be sent while the original request is 941 * still in progress on the replier. The replier SHOULD 942 * deal with the issue by returning NFS4ERR_DELAY as the 943 * reply to SEQUENCE or CB_SEQUENCE operation, but 944 * implementations MAY return NFS4ERR_SEQ_MISORDERED. 945 * 946 * Restart the search after a delay. 947 */ 948 slot->seq_nr = slot->seq_nr_highest_sent; 949 goto out_retry; 950 case -NFS4ERR_BADSESSION: 951 case -NFS4ERR_DEADSESSION: 952 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 953 goto session_recover; 954 default: 955 /* Just update the slot sequence no. */ 956 slot->seq_done = 1; 957 } 958 out: 959 /* The session may be reset by one of the error handlers. */ 960 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 961 out_noaction: 962 return ret; 963 session_recover: 964 set_bit(NFS4_SLOT_TBL_DRAINING, &session->fc_slot_table.slot_tbl_state); 965 nfs4_schedule_session_recovery(session, status); 966 dprintk("%s ERROR: %d Reset session\n", __func__, status); 967 nfs41_sequence_free_slot(res); 968 goto out; 969 retry_new_seq: 970 ++slot->seq_nr; 971 retry_nowait: 972 if (rpc_restart_call_prepare(task)) { 973 nfs41_sequence_free_slot(res); 974 task->tk_status = 0; 975 ret = 0; 976 } 977 goto out; 978 out_retry: 979 if (!rpc_restart_call(task)) 980 goto out; 981 rpc_delay(task, NFS4_POLL_RETRY_MAX); 982 return 0; 983 } 984 985 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 986 { 987 if (!nfs41_sequence_process(task, res)) 988 return 0; 989 if (res->sr_slot != NULL) 990 nfs41_sequence_free_slot(res); 991 return 1; 992 993 } 994 EXPORT_SYMBOL_GPL(nfs41_sequence_done); 995 996 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 997 { 998 struct nfs4_call_sync_data *data = calldata; 999 1000 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 1001 1002 nfs4_setup_sequence(data->seq_server->nfs_client, 1003 data->seq_args, data->seq_res, task); 1004 } 1005 1006 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 1007 { 1008 struct nfs4_call_sync_data *data = calldata; 1009 1010 nfs41_sequence_done(task, data->seq_res); 1011 } 1012 1013 static const struct rpc_call_ops nfs41_call_sync_ops = { 1014 .rpc_call_prepare = nfs41_call_sync_prepare, 1015 .rpc_call_done = nfs41_call_sync_done, 1016 }; 1017 1018 1019 static void nfs41_sequence_res_init(struct nfs4_sequence_res *res) 1020 { 1021 res->sr_timestamp = jiffies; 1022 res->sr_status_flags = 0; 1023 res->sr_status = 1; 1024 } 1025 1026 static 1027 void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args, 1028 struct nfs4_sequence_res *res, 1029 struct nfs4_slot *slot) 1030 { 1031 if (!slot) 1032 return; 1033 slot->privileged = args->sa_privileged ? 1 : 0; 1034 args->sa_slot = slot; 1035 1036 res->sr_slot = slot; 1037 } 1038 1039 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res) 1040 { 1041 if (res->sr_slot != NULL) 1042 res->sr_slot_ops->free_slot(res); 1043 } 1044 1045 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) 1046 { 1047 if (res->sr_slot == NULL) 1048 return 1; 1049 return res->sr_slot_ops->process(task, res); 1050 } 1051 1052 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 1053 { 1054 if (res->sr_slot == NULL) 1055 return 1; 1056 return res->sr_slot_ops->done(task, res); 1057 } 1058 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 1059 1060 1061 int nfs4_setup_sequence(struct nfs_client *client, 1062 struct nfs4_sequence_args *args, 1063 struct nfs4_sequence_res *res, 1064 struct rpc_task *task) 1065 { 1066 struct nfs4_session *session = nfs4_get_session(client); 1067 struct nfs4_slot_table *tbl = client->cl_slot_tbl; 1068 struct nfs4_slot *slot; 1069 1070 /* slot already allocated? */ 1071 if (res->sr_slot != NULL) 1072 goto out_start; 1073 1074 if (session) 1075 tbl = &session->fc_slot_table; 1076 1077 spin_lock(&tbl->slot_tbl_lock); 1078 /* The state manager will wait until the slot table is empty */ 1079 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) 1080 goto out_sleep; 1081 1082 slot = nfs4_alloc_slot(tbl); 1083 if (IS_ERR(slot)) { 1084 if (slot == ERR_PTR(-ENOMEM)) 1085 goto out_sleep_timeout; 1086 goto out_sleep; 1087 } 1088 spin_unlock(&tbl->slot_tbl_lock); 1089 1090 nfs4_sequence_attach_slot(args, res, slot); 1091 1092 trace_nfs4_setup_sequence(session, args); 1093 out_start: 1094 nfs41_sequence_res_init(res); 1095 rpc_call_start(task); 1096 return 0; 1097 out_sleep_timeout: 1098 /* Try again in 1/4 second */ 1099 if (args->sa_privileged) 1100 rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task, 1101 jiffies + (HZ >> 2), RPC_PRIORITY_PRIVILEGED); 1102 else 1103 rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task, 1104 NULL, jiffies + (HZ >> 2)); 1105 spin_unlock(&tbl->slot_tbl_lock); 1106 return -EAGAIN; 1107 out_sleep: 1108 if (args->sa_privileged) 1109 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 1110 RPC_PRIORITY_PRIVILEGED); 1111 else 1112 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 1113 spin_unlock(&tbl->slot_tbl_lock); 1114 return -EAGAIN; 1115 } 1116 EXPORT_SYMBOL_GPL(nfs4_setup_sequence); 1117 1118 static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup) 1119 { 1120 int ret; 1121 struct rpc_task *task; 1122 1123 task = rpc_run_task(task_setup); 1124 if (IS_ERR(task)) 1125 return PTR_ERR(task); 1126 1127 ret = task->tk_status; 1128 rpc_put_task(task); 1129 return ret; 1130 } 1131 1132 static int nfs4_do_call_sync(struct rpc_clnt *clnt, 1133 struct nfs_server *server, 1134 struct rpc_message *msg, 1135 struct nfs4_sequence_args *args, 1136 struct nfs4_sequence_res *res, 1137 unsigned short task_flags) 1138 { 1139 struct nfs_client *clp = server->nfs_client; 1140 struct nfs4_call_sync_data data = { 1141 .seq_server = server, 1142 .seq_args = args, 1143 .seq_res = res, 1144 }; 1145 struct rpc_task_setup task_setup = { 1146 .rpc_client = clnt, 1147 .rpc_message = msg, 1148 .callback_ops = clp->cl_mvops->call_sync_ops, 1149 .callback_data = &data, 1150 .flags = task_flags, 1151 }; 1152 1153 return nfs4_call_sync_custom(&task_setup); 1154 } 1155 1156 int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 1157 struct nfs_server *server, 1158 struct rpc_message *msg, 1159 struct nfs4_sequence_args *args, 1160 struct nfs4_sequence_res *res) 1161 { 1162 unsigned short task_flags = 0; 1163 1164 if (server->caps & NFS_CAP_MOVEABLE) 1165 task_flags = RPC_TASK_MOVEABLE; 1166 return nfs4_do_call_sync(clnt, server, msg, args, res, task_flags); 1167 } 1168 1169 1170 int nfs4_call_sync(struct rpc_clnt *clnt, 1171 struct nfs_server *server, 1172 struct rpc_message *msg, 1173 struct nfs4_sequence_args *args, 1174 struct nfs4_sequence_res *res, 1175 int cache_reply) 1176 { 1177 nfs4_init_sequence(server->nfs_client, args, res, cache_reply, 0); 1178 return nfs4_call_sync_sequence(clnt, server, msg, args, res); 1179 } 1180 1181 static void 1182 nfs4_inc_nlink_locked(struct inode *inode) 1183 { 1184 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 1185 NFS_INO_INVALID_CTIME | 1186 NFS_INO_INVALID_NLINK); 1187 inc_nlink(inode); 1188 } 1189 1190 static void 1191 nfs4_inc_nlink(struct inode *inode) 1192 { 1193 spin_lock(&inode->i_lock); 1194 nfs4_inc_nlink_locked(inode); 1195 spin_unlock(&inode->i_lock); 1196 } 1197 1198 static void 1199 nfs4_dec_nlink_locked(struct inode *inode) 1200 { 1201 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 1202 NFS_INO_INVALID_CTIME | 1203 NFS_INO_INVALID_NLINK); 1204 drop_nlink(inode); 1205 } 1206 1207 static void 1208 nfs4_update_changeattr_locked(struct inode *inode, 1209 struct nfs4_change_info *cinfo, 1210 unsigned long timestamp, unsigned long cache_validity) 1211 { 1212 struct nfs_inode *nfsi = NFS_I(inode); 1213 u64 change_attr = inode_peek_iversion_raw(inode); 1214 1215 if (!nfs_have_delegated_mtime(inode)) 1216 cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME; 1217 if (S_ISDIR(inode->i_mode)) 1218 cache_validity |= NFS_INO_INVALID_DATA; 1219 1220 switch (NFS_SERVER(inode)->change_attr_type) { 1221 case NFS4_CHANGE_TYPE_IS_UNDEFINED: 1222 if (cinfo->after == change_attr) 1223 goto out; 1224 break; 1225 default: 1226 if ((s64)(change_attr - cinfo->after) >= 0) 1227 goto out; 1228 } 1229 1230 inode_set_iversion_raw(inode, cinfo->after); 1231 if (!cinfo->atomic || cinfo->before != change_attr) { 1232 if (S_ISDIR(inode->i_mode)) 1233 nfs_force_lookup_revalidate(inode); 1234 1235 if (!nfs_have_delegated_attributes(inode)) 1236 cache_validity |= 1237 NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL | 1238 NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER | 1239 NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK | 1240 NFS_INO_INVALID_MODE | NFS_INO_INVALID_BTIME | 1241 NFS_INO_INVALID_XATTR; 1242 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); 1243 } 1244 nfsi->attrtimeo_timestamp = jiffies; 1245 nfsi->read_cache_jiffies = timestamp; 1246 nfsi->attr_gencount = nfs_inc_attr_generation_counter(); 1247 nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE; 1248 out: 1249 nfs_set_cache_invalid(inode, cache_validity); 1250 } 1251 1252 void 1253 nfs4_update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo, 1254 unsigned long timestamp, unsigned long cache_validity) 1255 { 1256 spin_lock(&dir->i_lock); 1257 nfs4_update_changeattr_locked(dir, cinfo, timestamp, cache_validity); 1258 spin_unlock(&dir->i_lock); 1259 } 1260 1261 struct nfs4_open_createattrs { 1262 struct nfs4_label *label; 1263 struct iattr *sattr; 1264 const __u32 verf[2]; 1265 }; 1266 1267 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server, 1268 int err, struct nfs4_exception *exception) 1269 { 1270 if (err != -EINVAL) 1271 return false; 1272 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1273 return false; 1274 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1; 1275 exception->retry = 1; 1276 return true; 1277 } 1278 1279 static fmode_t _nfs4_ctx_to_accessmode(const struct nfs_open_context *ctx) 1280 { 1281 return ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC); 1282 } 1283 1284 static fmode_t _nfs4_ctx_to_openmode(const struct nfs_open_context *ctx) 1285 { 1286 fmode_t ret = ctx->mode & (FMODE_READ|FMODE_WRITE); 1287 1288 return (ctx->mode & FMODE_EXEC) ? FMODE_READ | ret : ret; 1289 } 1290 1291 static u32 1292 nfs4_fmode_to_share_access(fmode_t fmode) 1293 { 1294 u32 res = 0; 1295 1296 switch (fmode & (FMODE_READ | FMODE_WRITE)) { 1297 case FMODE_READ: 1298 res = NFS4_SHARE_ACCESS_READ; 1299 break; 1300 case FMODE_WRITE: 1301 res = NFS4_SHARE_ACCESS_WRITE; 1302 break; 1303 case FMODE_READ|FMODE_WRITE: 1304 res = NFS4_SHARE_ACCESS_BOTH; 1305 } 1306 return res; 1307 } 1308 1309 static u32 1310 nfs4_map_atomic_open_share(struct nfs_server *server, 1311 fmode_t fmode, int openflags) 1312 { 1313 u32 res = nfs4_fmode_to_share_access(fmode); 1314 1315 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1316 goto out; 1317 /* Want no delegation if we're using O_DIRECT */ 1318 if (openflags & O_DIRECT) { 1319 res |= NFS4_SHARE_WANT_NO_DELEG; 1320 goto out; 1321 } 1322 /* res |= NFS4_SHARE_WANT_NO_PREFERENCE; */ 1323 if (server->caps & NFS_CAP_DELEGTIME) 1324 res |= NFS4_SHARE_WANT_DELEG_TIMESTAMPS; 1325 if (server->caps & NFS_CAP_OPEN_XOR) 1326 res |= NFS4_SHARE_WANT_OPEN_XOR_DELEGATION; 1327 out: 1328 return res; 1329 } 1330 1331 static enum open_claim_type4 1332 nfs4_map_atomic_open_claim(struct nfs_server *server, 1333 enum open_claim_type4 claim) 1334 { 1335 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1) 1336 return claim; 1337 switch (claim) { 1338 default: 1339 return claim; 1340 case NFS4_OPEN_CLAIM_FH: 1341 return NFS4_OPEN_CLAIM_NULL; 1342 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1343 return NFS4_OPEN_CLAIM_DELEGATE_CUR; 1344 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1345 return NFS4_OPEN_CLAIM_DELEGATE_PREV; 1346 } 1347 } 1348 1349 static void nfs4_init_opendata_res(struct nfs4_opendata *p) 1350 { 1351 p->o_res.f_attr = &p->f_attr; 1352 p->o_res.seqid = p->o_arg.seqid; 1353 p->c_res.seqid = p->c_arg.seqid; 1354 p->o_res.server = p->o_arg.server; 1355 p->o_res.access_request = p->o_arg.access; 1356 nfs_fattr_init(&p->f_attr); 1357 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 1358 } 1359 1360 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 1361 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 1362 const struct nfs4_open_createattrs *c, 1363 enum open_claim_type4 claim, 1364 gfp_t gfp_mask) 1365 { 1366 struct dentry *parent = dget_parent(dentry); 1367 struct inode *dir = d_inode(parent); 1368 struct nfs_server *server = NFS_SERVER(dir); 1369 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 1370 struct nfs4_label *label = (c != NULL) ? c->label : NULL; 1371 struct nfs4_opendata *p; 1372 1373 p = kzalloc(sizeof(*p), gfp_mask); 1374 if (p == NULL) 1375 goto err; 1376 1377 p->f_attr.label = nfs4_label_alloc(server, gfp_mask); 1378 if (IS_ERR(p->f_attr.label)) 1379 goto err_free_p; 1380 1381 p->a_label = nfs4_label_alloc(server, gfp_mask); 1382 if (IS_ERR(p->a_label)) 1383 goto err_free_f; 1384 1385 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 1386 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); 1387 if (IS_ERR(p->o_arg.seqid)) 1388 goto err_free_label; 1389 nfs_sb_active(dentry->d_sb); 1390 p->dentry = dget(dentry); 1391 p->dir = parent; 1392 p->owner = sp; 1393 atomic_inc(&sp->so_count); 1394 p->o_arg.open_flags = flags; 1395 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 1396 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 1397 p->o_arg.share_access = nfs4_map_atomic_open_share(server, 1398 fmode, flags); 1399 if (flags & O_CREAT) { 1400 p->o_arg.umask = current_umask(); 1401 p->o_arg.label = nfs4_label_copy(p->a_label, label); 1402 if (c->sattr != NULL && c->sattr->ia_valid != 0) { 1403 p->o_arg.u.attrs = &p->attrs; 1404 memcpy(&p->attrs, c->sattr, sizeof(p->attrs)); 1405 1406 memcpy(p->o_arg.u.verifier.data, c->verf, 1407 sizeof(p->o_arg.u.verifier.data)); 1408 } 1409 } 1410 /* ask server to check for all possible rights as results 1411 * are cached */ 1412 switch (p->o_arg.claim) { 1413 default: 1414 break; 1415 case NFS4_OPEN_CLAIM_NULL: 1416 case NFS4_OPEN_CLAIM_FH: 1417 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY | 1418 NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE | 1419 NFS4_ACCESS_EXECUTE | 1420 nfs_access_xattr_mask(server); 1421 } 1422 p->o_arg.clientid = server->nfs_client->cl_clientid; 1423 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 1424 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 1425 p->o_arg.name = &dentry->d_name; 1426 p->o_arg.server = server; 1427 p->o_arg.bitmask = nfs4_bitmask(server, label); 1428 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 1429 switch (p->o_arg.claim) { 1430 case NFS4_OPEN_CLAIM_NULL: 1431 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1432 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 1433 p->o_arg.fh = NFS_FH(dir); 1434 break; 1435 case NFS4_OPEN_CLAIM_PREVIOUS: 1436 case NFS4_OPEN_CLAIM_FH: 1437 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1438 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1439 p->o_arg.fh = NFS_FH(d_inode(dentry)); 1440 } 1441 p->c_arg.fh = &p->o_res.fh; 1442 p->c_arg.stateid = &p->o_res.stateid; 1443 p->c_arg.seqid = p->o_arg.seqid; 1444 nfs4_init_opendata_res(p); 1445 kref_init(&p->kref); 1446 return p; 1447 1448 err_free_label: 1449 nfs4_label_free(p->a_label); 1450 err_free_f: 1451 nfs4_label_free(p->f_attr.label); 1452 err_free_p: 1453 kfree(p); 1454 err: 1455 dput(parent); 1456 return NULL; 1457 } 1458 1459 static void nfs4_opendata_free(struct kref *kref) 1460 { 1461 struct nfs4_opendata *p = container_of(kref, 1462 struct nfs4_opendata, kref); 1463 struct super_block *sb = p->dentry->d_sb; 1464 1465 nfs4_lgopen_release(p->lgp); 1466 nfs_free_seqid(p->o_arg.seqid); 1467 nfs4_sequence_free_slot(&p->o_res.seq_res); 1468 if (p->state != NULL) 1469 nfs4_put_open_state(p->state); 1470 nfs4_put_state_owner(p->owner); 1471 1472 nfs4_label_free(p->a_label); 1473 nfs4_label_free(p->f_attr.label); 1474 1475 dput(p->dir); 1476 dput(p->dentry); 1477 nfs_sb_deactive(sb); 1478 nfs_fattr_free_names(&p->f_attr); 1479 kfree(p->f_attr.mdsthreshold); 1480 kfree(p); 1481 } 1482 1483 static void nfs4_opendata_put(struct nfs4_opendata *p) 1484 { 1485 if (p != NULL) 1486 kref_put(&p->kref, nfs4_opendata_free); 1487 } 1488 1489 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state, 1490 fmode_t fmode) 1491 { 1492 switch(fmode & (FMODE_READ|FMODE_WRITE)) { 1493 case FMODE_READ|FMODE_WRITE: 1494 return state->n_rdwr != 0; 1495 case FMODE_WRITE: 1496 return state->n_wronly != 0; 1497 case FMODE_READ: 1498 return state->n_rdonly != 0; 1499 } 1500 WARN_ON_ONCE(1); 1501 return false; 1502 } 1503 1504 static int can_open_cached(struct nfs4_state *state, fmode_t mode, 1505 int open_mode, enum open_claim_type4 claim) 1506 { 1507 int ret = 0; 1508 1509 if (open_mode & (O_EXCL|O_TRUNC)) 1510 goto out; 1511 switch (claim) { 1512 case NFS4_OPEN_CLAIM_NULL: 1513 case NFS4_OPEN_CLAIM_FH: 1514 goto out; 1515 default: 1516 break; 1517 } 1518 switch (mode & (FMODE_READ|FMODE_WRITE)) { 1519 case FMODE_READ: 1520 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 1521 && state->n_rdonly != 0; 1522 break; 1523 case FMODE_WRITE: 1524 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 1525 && state->n_wronly != 0; 1526 break; 1527 case FMODE_READ|FMODE_WRITE: 1528 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 1529 && state->n_rdwr != 0; 1530 } 1531 out: 1532 return ret; 1533 } 1534 1535 static bool can_open_delegated(const struct inode *inode, fmode_t fmode, 1536 enum open_claim_type4 claim, nfs4_stateid *stateid) 1537 { 1538 struct nfs_delegation *delegation; 1539 bool ret = false; 1540 1541 delegation = nfs4_get_valid_delegation(inode); 1542 if (!delegation) 1543 return false; 1544 if ((delegation->type & fmode) != fmode) 1545 goto out_put_delegation; 1546 1547 switch (claim) { 1548 case NFS4_OPEN_CLAIM_PREVIOUS: 1549 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 1550 break; 1551 fallthrough; 1552 case NFS4_OPEN_CLAIM_NULL: 1553 case NFS4_OPEN_CLAIM_FH: 1554 nfs_mark_delegation_referenced(delegation); 1555 /* Save the delegation stateid */ 1556 if (stateid) 1557 nfs4_stateid_copy(stateid, &delegation->stateid); 1558 ret = true; 1559 break; 1560 default: 1561 break; 1562 } 1563 1564 out_put_delegation: 1565 nfs_put_delegation(delegation); 1566 return ret; 1567 } 1568 1569 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 1570 { 1571 switch (fmode) { 1572 case FMODE_WRITE: 1573 state->n_wronly++; 1574 break; 1575 case FMODE_READ: 1576 state->n_rdonly++; 1577 break; 1578 case FMODE_READ|FMODE_WRITE: 1579 state->n_rdwr++; 1580 } 1581 nfs4_state_set_mode_locked(state, state->state | fmode); 1582 } 1583 1584 static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state) 1585 { 1586 if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags)) 1587 return true; 1588 if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags)) 1589 return true; 1590 if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags)) 1591 return true; 1592 return false; 1593 } 1594 1595 static void nfs_state_log_update_open_stateid(struct nfs4_state *state) 1596 { 1597 if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags)) 1598 wake_up_all(&state->waitq); 1599 } 1600 1601 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) 1602 { 1603 struct nfs_client *clp = state->owner->so_server->nfs_client; 1604 bool need_recover = false; 1605 1606 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly) 1607 need_recover = true; 1608 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly) 1609 need_recover = true; 1610 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr) 1611 need_recover = true; 1612 if (need_recover) 1613 nfs4_state_mark_reclaim_nograce(clp, state); 1614 } 1615 1616 /* 1617 * Check for whether or not the caller may update the open stateid 1618 * to the value passed in by stateid. 1619 * 1620 * Note: This function relies heavily on the server implementing 1621 * RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2 1622 * correctly. 1623 * i.e. The stateid seqids have to be initialised to 1, and 1624 * are then incremented on every state transition. 1625 */ 1626 static bool nfs_stateid_is_sequential(struct nfs4_state *state, 1627 const nfs4_stateid *stateid) 1628 { 1629 if (test_bit(NFS_OPEN_STATE, &state->flags)) { 1630 /* The common case - we're updating to a new sequence number */ 1631 if (nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1632 if (nfs4_stateid_is_next(&state->open_stateid, stateid)) 1633 return true; 1634 return false; 1635 } 1636 /* The server returned a new stateid */ 1637 } 1638 /* This is the first OPEN in this generation */ 1639 if (stateid->seqid == cpu_to_be32(1)) 1640 return true; 1641 return false; 1642 } 1643 1644 static void nfs_resync_open_stateid_locked(struct nfs4_state *state) 1645 { 1646 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr)) 1647 return; 1648 if (state->n_wronly) 1649 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1650 if (state->n_rdonly) 1651 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1652 if (state->n_rdwr) 1653 set_bit(NFS_O_RDWR_STATE, &state->flags); 1654 set_bit(NFS_OPEN_STATE, &state->flags); 1655 } 1656 1657 static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1658 nfs4_stateid *stateid, fmode_t fmode) 1659 { 1660 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1661 switch (fmode & (FMODE_READ|FMODE_WRITE)) { 1662 case FMODE_WRITE: 1663 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1664 break; 1665 case FMODE_READ: 1666 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1667 break; 1668 case 0: 1669 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1670 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1671 clear_bit(NFS_OPEN_STATE, &state->flags); 1672 } 1673 if (stateid == NULL) 1674 return; 1675 /* Handle OPEN+OPEN_DOWNGRADE races */ 1676 if (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1677 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { 1678 nfs_resync_open_stateid_locked(state); 1679 goto out; 1680 } 1681 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1682 nfs4_stateid_copy(&state->stateid, stateid); 1683 nfs4_stateid_copy(&state->open_stateid, stateid); 1684 trace_nfs4_open_stateid_update(state->inode, stateid, 0); 1685 out: 1686 nfs_state_log_update_open_stateid(state); 1687 } 1688 1689 static void nfs_clear_open_stateid(struct nfs4_state *state, 1690 nfs4_stateid *arg_stateid, 1691 nfs4_stateid *stateid, fmode_t fmode) 1692 { 1693 write_seqlock(&state->seqlock); 1694 /* Ignore, if the CLOSE argment doesn't match the current stateid */ 1695 if (nfs4_state_match_open_stateid_other(state, arg_stateid)) 1696 nfs_clear_open_stateid_locked(state, stateid, fmode); 1697 write_sequnlock(&state->seqlock); 1698 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1699 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1700 } 1701 1702 static void nfs_set_open_stateid_locked(struct nfs4_state *state, 1703 const nfs4_stateid *stateid, nfs4_stateid *freeme) 1704 __must_hold(&state->owner->so_lock) 1705 __must_hold(&state->seqlock) 1706 __must_hold(RCU) 1707 1708 { 1709 DEFINE_WAIT(wait); 1710 int status = 0; 1711 for (;;) { 1712 1713 if (nfs_stateid_is_sequential(state, stateid)) 1714 break; 1715 1716 if (status) { 1717 if (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1718 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { 1719 trace_nfs4_open_stateid_update_skip(state->inode, 1720 stateid, status); 1721 return; 1722 } else { 1723 break; 1724 } 1725 } 1726 1727 /* Rely on seqids for serialisation with NFSv4.0 */ 1728 if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client)) 1729 break; 1730 1731 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); 1732 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); 1733 /* 1734 * Ensure we process the state changes in the same order 1735 * in which the server processed them by delaying the 1736 * update of the stateid until we are in sequence. 1737 */ 1738 write_sequnlock(&state->seqlock); 1739 spin_unlock(&state->owner->so_lock); 1740 rcu_read_unlock(); 1741 trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0); 1742 1743 if (!fatal_signal_pending(current) && 1744 !nfs_current_task_exiting()) { 1745 if (schedule_timeout(5*HZ) == 0) 1746 status = -EAGAIN; 1747 else 1748 status = 0; 1749 } else 1750 status = -EINTR; 1751 finish_wait(&state->waitq, &wait); 1752 rcu_read_lock(); 1753 spin_lock(&state->owner->so_lock); 1754 write_seqlock(&state->seqlock); 1755 } 1756 1757 if (test_bit(NFS_OPEN_STATE, &state->flags) && 1758 !nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1759 nfs4_stateid_copy(freeme, &state->open_stateid); 1760 nfs_test_and_clear_all_open_stateid(state); 1761 } 1762 1763 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1764 nfs4_stateid_copy(&state->stateid, stateid); 1765 nfs4_stateid_copy(&state->open_stateid, stateid); 1766 trace_nfs4_open_stateid_update(state->inode, stateid, status); 1767 nfs_state_log_update_open_stateid(state); 1768 } 1769 1770 static void nfs_state_set_open_stateid(struct nfs4_state *state, 1771 const nfs4_stateid *open_stateid, 1772 fmode_t fmode, 1773 nfs4_stateid *freeme) 1774 { 1775 /* 1776 * Protect the call to nfs4_state_set_mode_locked and 1777 * serialise the stateid update 1778 */ 1779 write_seqlock(&state->seqlock); 1780 nfs_set_open_stateid_locked(state, open_stateid, freeme); 1781 switch (fmode) { 1782 case FMODE_READ: 1783 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1784 break; 1785 case FMODE_WRITE: 1786 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1787 break; 1788 case FMODE_READ|FMODE_WRITE: 1789 set_bit(NFS_O_RDWR_STATE, &state->flags); 1790 } 1791 set_bit(NFS_OPEN_STATE, &state->flags); 1792 write_sequnlock(&state->seqlock); 1793 } 1794 1795 void nfs_state_clear_open_state_flags(struct nfs4_state *state) 1796 { 1797 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1798 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1799 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1800 clear_bit(NFS_OPEN_STATE, &state->flags); 1801 } 1802 1803 static void nfs_state_set_delegation(struct nfs4_state *state, 1804 const nfs4_stateid *deleg_stateid, 1805 fmode_t fmode) 1806 { 1807 /* 1808 * Protect the call to nfs4_state_set_mode_locked and 1809 * serialise the stateid update 1810 */ 1811 write_seqlock(&state->seqlock); 1812 nfs4_stateid_copy(&state->stateid, deleg_stateid); 1813 set_bit(NFS_DELEGATED_STATE, &state->flags); 1814 write_sequnlock(&state->seqlock); 1815 } 1816 1817 void nfs_state_clear_delegation(struct nfs4_state *state) 1818 { 1819 write_seqlock(&state->seqlock); 1820 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1821 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1822 write_sequnlock(&state->seqlock); 1823 } 1824 1825 int update_open_stateid(struct nfs4_state *state, 1826 const nfs4_stateid *open_stateid, 1827 const nfs4_stateid *delegation, 1828 fmode_t fmode) 1829 { 1830 struct nfs_server *server = NFS_SERVER(state->inode); 1831 struct nfs_client *clp = server->nfs_client; 1832 struct nfs_delegation *deleg_cur; 1833 nfs4_stateid freeme = { }; 1834 int ret = 0; 1835 1836 fmode &= (FMODE_READ|FMODE_WRITE); 1837 1838 spin_lock(&state->owner->so_lock); 1839 if (open_stateid != NULL) { 1840 rcu_read_lock(); 1841 nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme); 1842 rcu_read_unlock(); 1843 ret = 1; 1844 } 1845 1846 deleg_cur = nfs4_get_valid_delegation(state->inode); 1847 if (deleg_cur == NULL) 1848 goto no_delegation; 1849 1850 spin_lock(&deleg_cur->lock); 1851 if (!deleg_cur->inode || 1852 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) || 1853 (deleg_cur->type & fmode) != fmode) 1854 goto no_delegation_unlock; 1855 1856 if (delegation == NULL) 1857 delegation = &deleg_cur->stateid; 1858 else if (!nfs4_stateid_match_other(&deleg_cur->stateid, delegation)) 1859 goto no_delegation_unlock; 1860 1861 nfs_mark_delegation_referenced(deleg_cur); 1862 nfs_state_set_delegation(state, &deleg_cur->stateid, fmode); 1863 ret = 1; 1864 no_delegation_unlock: 1865 spin_unlock(&deleg_cur->lock); 1866 nfs_put_delegation(deleg_cur); 1867 no_delegation: 1868 if (ret) 1869 update_open_stateflags(state, fmode); 1870 spin_unlock(&state->owner->so_lock); 1871 1872 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1873 nfs4_schedule_state_manager(clp); 1874 if (freeme.type != 0) 1875 nfs4_test_and_free_stateid(server, &freeme, 1876 state->owner->so_cred); 1877 1878 return ret; 1879 } 1880 1881 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp, 1882 const nfs4_stateid *stateid) 1883 { 1884 struct nfs4_state *state = lsp->ls_state; 1885 bool ret = false; 1886 1887 spin_lock(&state->state_lock); 1888 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid)) 1889 goto out_noupdate; 1890 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid)) 1891 goto out_noupdate; 1892 nfs4_stateid_copy(&lsp->ls_stateid, stateid); 1893 ret = true; 1894 out_noupdate: 1895 spin_unlock(&state->state_lock); 1896 return ret; 1897 } 1898 1899 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1900 { 1901 struct nfs_delegation *delegation; 1902 1903 fmode &= FMODE_READ|FMODE_WRITE; 1904 delegation = nfs4_get_valid_delegation(inode); 1905 if (!delegation) 1906 return; 1907 if ((delegation->type & fmode) != fmode) 1908 nfs4_inode_return_delegation(inode); 1909 nfs_put_delegation(delegation); 1910 } 1911 1912 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1913 { 1914 struct nfs4_state *state = opendata->state; 1915 int open_mode = opendata->o_arg.open_flags; 1916 fmode_t fmode = opendata->o_arg.fmode; 1917 enum open_claim_type4 claim = opendata->o_arg.claim; 1918 nfs4_stateid stateid; 1919 int ret = -EAGAIN; 1920 1921 for (;;) { 1922 spin_lock(&state->owner->so_lock); 1923 if (can_open_cached(state, fmode, open_mode, claim)) { 1924 update_open_stateflags(state, fmode); 1925 spin_unlock(&state->owner->so_lock); 1926 goto out_return_state; 1927 } 1928 spin_unlock(&state->owner->so_lock); 1929 1930 if (!can_open_delegated(state->inode, fmode, claim, &stateid)) 1931 break; 1932 1933 nfs_release_seqid(opendata->o_arg.seqid); 1934 if (!opendata->is_recover) { 1935 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 1936 if (ret != 0) 1937 goto out; 1938 } 1939 ret = -EAGAIN; 1940 1941 /* Try to update the stateid using the delegation */ 1942 if (update_open_stateid(state, NULL, &stateid, fmode)) 1943 goto out_return_state; 1944 } 1945 out: 1946 return ERR_PTR(ret); 1947 out_return_state: 1948 refcount_inc(&state->count); 1949 return state; 1950 } 1951 1952 static void 1953 nfs4_process_delegation(struct inode *inode, const struct cred *cred, 1954 enum open_claim_type4 claim, 1955 const struct nfs4_open_delegation *delegation) 1956 { 1957 switch (delegation->open_delegation_type) { 1958 case NFS4_OPEN_DELEGATE_READ: 1959 case NFS4_OPEN_DELEGATE_WRITE: 1960 case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG: 1961 case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG: 1962 break; 1963 default: 1964 return; 1965 } 1966 switch (claim) { 1967 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1968 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1969 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 1970 "returning a delegation for " 1971 "OPEN(CLAIM_DELEGATE_CUR)\n", 1972 NFS_SERVER(inode)->nfs_client->cl_hostname); 1973 break; 1974 case NFS4_OPEN_CLAIM_PREVIOUS: 1975 nfs_inode_reclaim_delegation(inode, cred, delegation->type, 1976 &delegation->stateid, 1977 delegation->pagemod_limit, 1978 delegation->open_delegation_type); 1979 break; 1980 default: 1981 nfs_inode_set_delegation(inode, cred, delegation->type, 1982 &delegation->stateid, 1983 delegation->pagemod_limit, 1984 delegation->open_delegation_type); 1985 } 1986 if (delegation->do_recall) 1987 nfs_async_inode_return_delegation(inode, &delegation->stateid); 1988 } 1989 1990 /* 1991 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes 1992 * and update the nfs4_state. 1993 */ 1994 static struct nfs4_state * 1995 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) 1996 { 1997 struct inode *inode = data->state->inode; 1998 struct nfs4_state *state = data->state; 1999 int ret; 2000 2001 if (!data->rpc_done) { 2002 if (data->rpc_status) 2003 return ERR_PTR(data->rpc_status); 2004 return nfs4_try_open_cached(data); 2005 } 2006 2007 ret = nfs_refresh_inode(inode, &data->f_attr); 2008 if (ret) 2009 return ERR_PTR(ret); 2010 2011 nfs4_process_delegation(state->inode, 2012 data->owner->so_cred, 2013 data->o_arg.claim, 2014 &data->o_res.delegation); 2015 2016 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) { 2017 if (!update_open_stateid(state, &data->o_res.stateid, 2018 NULL, data->o_arg.fmode)) 2019 return ERR_PTR(-EAGAIN); 2020 } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode)) 2021 return ERR_PTR(-EAGAIN); 2022 refcount_inc(&state->count); 2023 2024 return state; 2025 } 2026 2027 static struct inode * 2028 nfs4_opendata_get_inode(struct nfs4_opendata *data) 2029 { 2030 struct inode *inode; 2031 2032 switch (data->o_arg.claim) { 2033 case NFS4_OPEN_CLAIM_NULL: 2034 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 2035 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 2036 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 2037 return ERR_PTR(-EAGAIN); 2038 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, 2039 &data->f_attr); 2040 break; 2041 default: 2042 inode = d_inode(data->dentry); 2043 ihold(inode); 2044 nfs_refresh_inode(inode, &data->f_attr); 2045 } 2046 return inode; 2047 } 2048 2049 static struct nfs4_state * 2050 nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data) 2051 { 2052 struct nfs4_state *state; 2053 struct inode *inode; 2054 2055 inode = nfs4_opendata_get_inode(data); 2056 if (IS_ERR(inode)) 2057 return ERR_CAST(inode); 2058 if (data->state != NULL && data->state->inode == inode) { 2059 state = data->state; 2060 refcount_inc(&state->count); 2061 } else 2062 state = nfs4_get_open_state(inode, data->owner); 2063 iput(inode); 2064 if (state == NULL) 2065 state = ERR_PTR(-ENOMEM); 2066 return state; 2067 } 2068 2069 static struct nfs4_state * 2070 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 2071 { 2072 struct nfs4_state *state; 2073 2074 if (!data->rpc_done) { 2075 state = nfs4_try_open_cached(data); 2076 trace_nfs4_cached_open(data->state); 2077 goto out; 2078 } 2079 2080 state = nfs4_opendata_find_nfs4_state(data); 2081 if (IS_ERR(state)) 2082 goto out; 2083 2084 nfs4_process_delegation(state->inode, 2085 data->owner->so_cred, 2086 data->o_arg.claim, 2087 &data->o_res.delegation); 2088 2089 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) { 2090 if (!update_open_stateid(state, &data->o_res.stateid, 2091 NULL, data->o_arg.fmode)) { 2092 nfs4_put_open_state(state); 2093 state = ERR_PTR(-EAGAIN); 2094 } 2095 } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode)) { 2096 nfs4_put_open_state(state); 2097 state = ERR_PTR(-EAGAIN); 2098 } 2099 out: 2100 nfs_release_seqid(data->o_arg.seqid); 2101 return state; 2102 } 2103 2104 static struct nfs4_state * 2105 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 2106 { 2107 struct nfs4_state *ret; 2108 2109 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 2110 ret =_nfs4_opendata_reclaim_to_nfs4_state(data); 2111 else 2112 ret = _nfs4_opendata_to_nfs4_state(data); 2113 nfs4_sequence_free_slot(&data->o_res.seq_res); 2114 return ret; 2115 } 2116 2117 static struct nfs_open_context * 2118 nfs4_state_find_open_context_mode(struct nfs4_state *state, fmode_t mode) 2119 { 2120 struct nfs_inode *nfsi = NFS_I(state->inode); 2121 struct nfs_open_context *ctx; 2122 2123 rcu_read_lock(); 2124 list_for_each_entry_rcu(ctx, &nfsi->open_files, list) { 2125 if (ctx->state != state) 2126 continue; 2127 if ((ctx->mode & mode) != mode) 2128 continue; 2129 if (!get_nfs_open_context(ctx)) 2130 continue; 2131 rcu_read_unlock(); 2132 return ctx; 2133 } 2134 rcu_read_unlock(); 2135 return ERR_PTR(-ENOENT); 2136 } 2137 2138 static struct nfs_open_context * 2139 nfs4_state_find_open_context(struct nfs4_state *state) 2140 { 2141 struct nfs_open_context *ctx; 2142 2143 ctx = nfs4_state_find_open_context_mode(state, FMODE_READ|FMODE_WRITE); 2144 if (!IS_ERR(ctx)) 2145 return ctx; 2146 ctx = nfs4_state_find_open_context_mode(state, FMODE_WRITE); 2147 if (!IS_ERR(ctx)) 2148 return ctx; 2149 return nfs4_state_find_open_context_mode(state, FMODE_READ); 2150 } 2151 2152 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, 2153 struct nfs4_state *state, enum open_claim_type4 claim) 2154 { 2155 struct nfs4_opendata *opendata; 2156 2157 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, 2158 NULL, claim, GFP_NOFS); 2159 if (opendata == NULL) 2160 return ERR_PTR(-ENOMEM); 2161 opendata->state = state; 2162 refcount_inc(&state->count); 2163 return opendata; 2164 } 2165 2166 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, 2167 fmode_t fmode) 2168 { 2169 struct nfs4_state *newstate; 2170 struct nfs_server *server = NFS_SB(opendata->dentry->d_sb); 2171 int openflags = opendata->o_arg.open_flags; 2172 int ret; 2173 2174 if (!nfs4_mode_match_open_stateid(opendata->state, fmode)) 2175 return 0; 2176 opendata->o_arg.fmode = fmode; 2177 opendata->o_arg.share_access = 2178 nfs4_map_atomic_open_share(server, fmode, openflags); 2179 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 2180 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 2181 nfs4_init_opendata_res(opendata); 2182 ret = _nfs4_recover_proc_open(opendata); 2183 if (ret != 0) 2184 return ret; 2185 newstate = nfs4_opendata_to_nfs4_state(opendata); 2186 if (IS_ERR(newstate)) 2187 return PTR_ERR(newstate); 2188 if (newstate != opendata->state) 2189 ret = -ESTALE; 2190 nfs4_close_state(newstate, fmode); 2191 return ret; 2192 } 2193 2194 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 2195 { 2196 int ret; 2197 2198 /* memory barrier prior to reading state->n_* */ 2199 smp_rmb(); 2200 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 2201 if (ret != 0) 2202 return ret; 2203 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE); 2204 if (ret != 0) 2205 return ret; 2206 ret = nfs4_open_recover_helper(opendata, FMODE_READ); 2207 if (ret != 0) 2208 return ret; 2209 /* 2210 * We may have performed cached opens for all three recoveries. 2211 * Check if we need to update the current stateid. 2212 */ 2213 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 2214 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 2215 write_seqlock(&state->seqlock); 2216 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 2217 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 2218 write_sequnlock(&state->seqlock); 2219 } 2220 return 0; 2221 } 2222 2223 /* 2224 * OPEN_RECLAIM: 2225 * reclaim state on the server after a reboot. 2226 */ 2227 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 2228 { 2229 struct nfs_delegation *delegation; 2230 struct nfs4_opendata *opendata; 2231 u32 delegation_type = NFS4_OPEN_DELEGATE_NONE; 2232 int status; 2233 2234 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2235 NFS4_OPEN_CLAIM_PREVIOUS); 2236 if (IS_ERR(opendata)) 2237 return PTR_ERR(opendata); 2238 rcu_read_lock(); 2239 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2240 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) { 2241 switch(delegation->type) { 2242 case FMODE_READ: 2243 delegation_type = NFS4_OPEN_DELEGATE_READ; 2244 if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) 2245 delegation_type = NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG; 2246 break; 2247 case FMODE_WRITE: 2248 case FMODE_READ|FMODE_WRITE: 2249 delegation_type = NFS4_OPEN_DELEGATE_WRITE; 2250 if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) 2251 delegation_type = NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG; 2252 } 2253 } 2254 rcu_read_unlock(); 2255 opendata->o_arg.u.delegation_type = delegation_type; 2256 status = nfs4_open_recover(opendata, state); 2257 nfs4_opendata_put(opendata); 2258 return status; 2259 } 2260 2261 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 2262 { 2263 struct nfs_server *server = NFS_SERVER(state->inode); 2264 struct nfs4_exception exception = { }; 2265 int err; 2266 do { 2267 err = _nfs4_do_open_reclaim(ctx, state); 2268 trace_nfs4_open_reclaim(ctx, 0, err); 2269 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2270 continue; 2271 if (err != -NFS4ERR_DELAY) 2272 break; 2273 nfs4_handle_exception(server, err, &exception); 2274 } while (exception.retry); 2275 return err; 2276 } 2277 2278 int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 2279 { 2280 struct nfs_open_context *ctx; 2281 int ret; 2282 2283 ctx = nfs4_state_find_open_context(state); 2284 if (IS_ERR(ctx)) 2285 return -EAGAIN; 2286 clear_bit(NFS_DELEGATED_STATE, &state->flags); 2287 nfs_state_clear_open_state_flags(state); 2288 ret = nfs4_do_open_reclaim(ctx, state); 2289 put_nfs_open_context(ctx); 2290 return ret; 2291 } 2292 2293 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err) 2294 { 2295 switch (err) { 2296 default: 2297 printk(KERN_ERR "NFS: %s: unhandled error " 2298 "%d.\n", __func__, err); 2299 fallthrough; 2300 case 0: 2301 case -ENOENT: 2302 case -EAGAIN: 2303 case -ESTALE: 2304 case -ETIMEDOUT: 2305 break; 2306 case -NFS4ERR_BADSESSION: 2307 case -NFS4ERR_BADSLOT: 2308 case -NFS4ERR_BAD_HIGH_SLOT: 2309 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 2310 case -NFS4ERR_DEADSESSION: 2311 return -EAGAIN; 2312 case -NFS4ERR_STALE_CLIENTID: 2313 case -NFS4ERR_STALE_STATEID: 2314 /* Don't recall a delegation if it was lost */ 2315 nfs4_schedule_lease_recovery(server->nfs_client); 2316 return -EAGAIN; 2317 case -NFS4ERR_MOVED: 2318 nfs4_schedule_migration_recovery(server); 2319 return -EAGAIN; 2320 case -NFS4ERR_LEASE_MOVED: 2321 nfs4_schedule_lease_moved_recovery(server->nfs_client); 2322 return -EAGAIN; 2323 case -NFS4ERR_DELEG_REVOKED: 2324 case -NFS4ERR_ADMIN_REVOKED: 2325 case -NFS4ERR_EXPIRED: 2326 case -NFS4ERR_BAD_STATEID: 2327 case -NFS4ERR_OPENMODE: 2328 nfs_inode_find_state_and_recover(state->inode, 2329 stateid); 2330 nfs4_schedule_stateid_recovery(server, state); 2331 return -EAGAIN; 2332 case -NFS4ERR_DELAY: 2333 case -NFS4ERR_GRACE: 2334 ssleep(1); 2335 return -EAGAIN; 2336 case -ENOMEM: 2337 case -NFS4ERR_DENIED: 2338 if (fl) { 2339 struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner; 2340 if (lsp) 2341 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 2342 } 2343 return 0; 2344 } 2345 return err; 2346 } 2347 2348 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, 2349 struct nfs4_state *state, const nfs4_stateid *stateid) 2350 { 2351 struct nfs_server *server = NFS_SERVER(state->inode); 2352 struct nfs4_opendata *opendata; 2353 int err = 0; 2354 2355 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2356 NFS4_OPEN_CLAIM_DELEG_CUR_FH); 2357 if (IS_ERR(opendata)) 2358 return PTR_ERR(opendata); 2359 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 2360 if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) { 2361 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 2362 if (err) 2363 goto out; 2364 } 2365 if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) { 2366 err = nfs4_open_recover_helper(opendata, FMODE_WRITE); 2367 if (err) 2368 goto out; 2369 } 2370 if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) { 2371 err = nfs4_open_recover_helper(opendata, FMODE_READ); 2372 if (err) 2373 goto out; 2374 } 2375 nfs_state_clear_delegation(state); 2376 out: 2377 nfs4_opendata_put(opendata); 2378 return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err); 2379 } 2380 2381 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) 2382 { 2383 struct nfs4_opendata *data = calldata; 2384 2385 nfs4_setup_sequence(data->o_arg.server->nfs_client, 2386 &data->c_arg.seq_args, &data->c_res.seq_res, task); 2387 } 2388 2389 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 2390 { 2391 struct nfs4_opendata *data = calldata; 2392 2393 data->c_res.seq_res.sr_slot_ops->done(task, &data->c_res.seq_res); 2394 2395 data->rpc_status = task->tk_status; 2396 if (data->rpc_status == 0) { 2397 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 2398 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2399 renew_lease(data->o_res.server, data->timestamp); 2400 data->rpc_done = true; 2401 } 2402 } 2403 2404 static void nfs4_open_confirm_release(void *calldata) 2405 { 2406 struct nfs4_opendata *data = calldata; 2407 struct nfs4_state *state = NULL; 2408 2409 /* If this request hasn't been cancelled, do nothing */ 2410 if (!data->cancelled) 2411 goto out_free; 2412 /* In case of error, no cleanup! */ 2413 if (!data->rpc_done) 2414 goto out_free; 2415 state = nfs4_opendata_to_nfs4_state(data); 2416 if (!IS_ERR(state)) 2417 nfs4_close_state(state, data->o_arg.fmode); 2418 out_free: 2419 nfs4_opendata_put(data); 2420 } 2421 2422 static const struct rpc_call_ops nfs4_open_confirm_ops = { 2423 .rpc_call_prepare = nfs4_open_confirm_prepare, 2424 .rpc_call_done = nfs4_open_confirm_done, 2425 .rpc_release = nfs4_open_confirm_release, 2426 }; 2427 2428 /* 2429 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 2430 */ 2431 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 2432 { 2433 struct nfs_server *server = NFS_SERVER(d_inode(data->dir)); 2434 struct rpc_task *task; 2435 struct rpc_message msg = { 2436 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 2437 .rpc_argp = &data->c_arg, 2438 .rpc_resp = &data->c_res, 2439 .rpc_cred = data->owner->so_cred, 2440 }; 2441 struct rpc_task_setup task_setup_data = { 2442 .rpc_client = server->client, 2443 .rpc_message = &msg, 2444 .callback_ops = &nfs4_open_confirm_ops, 2445 .callback_data = data, 2446 .workqueue = nfsiod_workqueue, 2447 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 2448 }; 2449 int status; 2450 2451 nfs4_init_sequence(server->nfs_client, &data->c_arg.seq_args, 2452 &data->c_res.seq_res, 1, data->is_recover); 2453 kref_get(&data->kref); 2454 data->rpc_done = false; 2455 data->rpc_status = 0; 2456 data->timestamp = jiffies; 2457 task = rpc_run_task(&task_setup_data); 2458 if (IS_ERR(task)) 2459 return PTR_ERR(task); 2460 status = rpc_wait_for_completion_task(task); 2461 if (status != 0) { 2462 data->cancelled = true; 2463 smp_wmb(); 2464 } else 2465 status = data->rpc_status; 2466 rpc_put_task(task); 2467 return status; 2468 } 2469 2470 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 2471 { 2472 struct nfs4_opendata *data = calldata; 2473 struct nfs4_state_owner *sp = data->owner; 2474 struct nfs_client *clp = sp->so_server->nfs_client; 2475 enum open_claim_type4 claim = data->o_arg.claim; 2476 2477 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 2478 goto out_wait; 2479 /* 2480 * Check if we still need to send an OPEN call, or if we can use 2481 * a delegation instead. 2482 */ 2483 if (data->state != NULL) { 2484 if (can_open_cached(data->state, data->o_arg.fmode, 2485 data->o_arg.open_flags, claim)) 2486 goto out_no_action; 2487 if (can_open_delegated(data->state->inode, data->o_arg.fmode, 2488 claim, NULL)) { 2489 trace_nfs4_cached_open(data->state); 2490 goto out_no_action; 2491 } 2492 } 2493 /* Update client id. */ 2494 data->o_arg.clientid = clp->cl_clientid; 2495 switch (claim) { 2496 default: 2497 break; 2498 case NFS4_OPEN_CLAIM_PREVIOUS: 2499 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 2500 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 2501 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 2502 fallthrough; 2503 case NFS4_OPEN_CLAIM_FH: 2504 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 2505 } 2506 data->timestamp = jiffies; 2507 if (nfs4_setup_sequence(data->o_arg.server->nfs_client, 2508 &data->o_arg.seq_args, 2509 &data->o_res.seq_res, 2510 task) != 0) 2511 nfs_release_seqid(data->o_arg.seqid); 2512 2513 /* Set the create mode (note dependency on the session type) */ 2514 data->o_arg.createmode = NFS4_CREATE_UNCHECKED; 2515 if (data->o_arg.open_flags & O_EXCL) { 2516 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1; 2517 if (clp->cl_mvops->minor_version == 0) { 2518 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE; 2519 /* don't put an ACCESS op in OPEN compound if O_EXCL, 2520 * because ACCESS will return permission denied for 2521 * all bits until close */ 2522 data->o_res.access_request = data->o_arg.access = 0; 2523 } else if (nfs4_has_persistent_session(clp)) 2524 data->o_arg.createmode = NFS4_CREATE_GUARDED; 2525 } 2526 return; 2527 2528 out_no_action: 2529 task->tk_action = NULL; 2530 out_wait: 2531 nfs4_sequence_done(task, &data->o_res.seq_res); 2532 } 2533 2534 static void nfs4_open_done(struct rpc_task *task, void *calldata) 2535 { 2536 struct nfs4_opendata *data = calldata; 2537 2538 data->rpc_status = task->tk_status; 2539 2540 if (!nfs4_sequence_process(task, &data->o_res.seq_res)) 2541 return; 2542 2543 if (task->tk_status == 0) { 2544 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) { 2545 switch (data->o_res.f_attr->mode & S_IFMT) { 2546 case S_IFREG: 2547 break; 2548 case S_IFLNK: 2549 data->rpc_status = -ELOOP; 2550 break; 2551 case S_IFDIR: 2552 data->rpc_status = -EISDIR; 2553 break; 2554 default: 2555 data->rpc_status = -ENOTDIR; 2556 } 2557 } 2558 renew_lease(data->o_res.server, data->timestamp); 2559 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 2560 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2561 } 2562 data->rpc_done = true; 2563 } 2564 2565 static void nfs4_open_release(void *calldata) 2566 { 2567 struct nfs4_opendata *data = calldata; 2568 struct nfs4_state *state = NULL; 2569 2570 /* In case of error, no cleanup! */ 2571 if (data->rpc_status != 0 || !data->rpc_done) { 2572 nfs_release_seqid(data->o_arg.seqid); 2573 goto out_free; 2574 } 2575 /* If this request hasn't been cancelled, do nothing */ 2576 if (!data->cancelled) 2577 goto out_free; 2578 /* In case we need an open_confirm, no cleanup! */ 2579 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 2580 goto out_free; 2581 state = nfs4_opendata_to_nfs4_state(data); 2582 if (!IS_ERR(state)) 2583 nfs4_close_state(state, data->o_arg.fmode); 2584 out_free: 2585 nfs4_opendata_put(data); 2586 } 2587 2588 static const struct rpc_call_ops nfs4_open_ops = { 2589 .rpc_call_prepare = nfs4_open_prepare, 2590 .rpc_call_done = nfs4_open_done, 2591 .rpc_release = nfs4_open_release, 2592 }; 2593 2594 static int nfs4_run_open_task(struct nfs4_opendata *data, 2595 struct nfs_open_context *ctx) 2596 { 2597 struct inode *dir = d_inode(data->dir); 2598 struct nfs_server *server = NFS_SERVER(dir); 2599 struct nfs_client *clp = server->nfs_client; 2600 struct nfs_openargs *o_arg = &data->o_arg; 2601 struct nfs_openres *o_res = &data->o_res; 2602 struct rpc_task *task; 2603 struct rpc_message msg = { 2604 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 2605 .rpc_argp = o_arg, 2606 .rpc_resp = o_res, 2607 .rpc_cred = data->owner->so_cred, 2608 }; 2609 struct rpc_task_setup task_setup_data = { 2610 .rpc_client = server->client, 2611 .rpc_message = &msg, 2612 .callback_ops = &nfs4_open_ops, 2613 .callback_data = data, 2614 .workqueue = nfsiod_workqueue, 2615 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 2616 }; 2617 int status; 2618 2619 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE)) 2620 task_setup_data.flags |= RPC_TASK_MOVEABLE; 2621 2622 kref_get(&data->kref); 2623 data->rpc_done = false; 2624 data->rpc_status = 0; 2625 data->cancelled = false; 2626 data->is_recover = false; 2627 if (!ctx) { 2628 nfs4_init_sequence(clp, &o_arg->seq_args, &o_res->seq_res, 1, 1); 2629 data->is_recover = true; 2630 task_setup_data.flags |= RPC_TASK_TIMEOUT; 2631 } else { 2632 nfs4_init_sequence(clp, &o_arg->seq_args, &o_res->seq_res, 1, 0); 2633 pnfs_lgopen_prepare(data, ctx); 2634 } 2635 task = rpc_run_task(&task_setup_data); 2636 if (IS_ERR(task)) 2637 return PTR_ERR(task); 2638 status = rpc_wait_for_completion_task(task); 2639 if (status != 0) { 2640 data->cancelled = true; 2641 smp_wmb(); 2642 } else 2643 status = data->rpc_status; 2644 rpc_put_task(task); 2645 2646 return status; 2647 } 2648 2649 static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 2650 { 2651 struct inode *dir = d_inode(data->dir); 2652 struct nfs_openres *o_res = &data->o_res; 2653 int status; 2654 2655 status = nfs4_run_open_task(data, NULL); 2656 if (status != 0 || !data->rpc_done) 2657 return status; 2658 2659 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 2660 2661 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) 2662 status = _nfs4_proc_open_confirm(data); 2663 2664 return status; 2665 } 2666 2667 /* 2668 * Additional permission checks in order to distinguish between an 2669 * open for read, and an open for execute. This works around the 2670 * fact that NFSv4 OPEN treats read and execute permissions as being 2671 * the same. 2672 * Note that in the non-execute case, we want to turn off permission 2673 * checking if we just created a new file (POSIX open() semantics). 2674 */ 2675 static int nfs4_opendata_access(const struct cred *cred, 2676 struct nfs4_opendata *opendata, 2677 struct nfs4_state *state, fmode_t fmode) 2678 { 2679 struct nfs_access_entry cache; 2680 u32 mask, flags; 2681 2682 /* access call failed or for some reason the server doesn't 2683 * support any access modes -- defer access call until later */ 2684 if (opendata->o_res.access_supported == 0) 2685 return 0; 2686 2687 mask = 0; 2688 if (fmode & FMODE_EXEC) { 2689 /* ONLY check for exec rights */ 2690 if (S_ISDIR(state->inode->i_mode)) 2691 mask = NFS4_ACCESS_LOOKUP; 2692 else 2693 mask = NFS4_ACCESS_EXECUTE; 2694 } else if ((fmode & FMODE_READ) && !opendata->file_created) 2695 mask = NFS4_ACCESS_READ; 2696 2697 nfs_access_set_mask(&cache, opendata->o_res.access_result); 2698 nfs_access_add_cache(state->inode, &cache, cred); 2699 2700 flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP; 2701 if ((mask & ~cache.mask & flags) == 0) 2702 return 0; 2703 2704 return -EACCES; 2705 } 2706 2707 /* 2708 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 2709 */ 2710 static int _nfs4_proc_open(struct nfs4_opendata *data, 2711 struct nfs_open_context *ctx) 2712 { 2713 struct inode *dir = d_inode(data->dir); 2714 struct nfs_server *server = NFS_SERVER(dir); 2715 struct nfs_openargs *o_arg = &data->o_arg; 2716 struct nfs_openres *o_res = &data->o_res; 2717 int status; 2718 2719 status = nfs4_run_open_task(data, ctx); 2720 if (!data->rpc_done) 2721 return status; 2722 if (status != 0) { 2723 if (status == -NFS4ERR_BADNAME && 2724 !(o_arg->open_flags & O_CREAT)) 2725 return -ENOENT; 2726 return status; 2727 } 2728 2729 nfs_fattr_map_and_free_names(server, &data->f_attr); 2730 2731 if (o_arg->open_flags & O_CREAT) { 2732 if (o_arg->open_flags & O_EXCL) 2733 data->file_created = true; 2734 else if (o_res->cinfo.before != o_res->cinfo.after) 2735 data->file_created = true; 2736 if (data->file_created || 2737 inode_peek_iversion_raw(dir) != o_res->cinfo.after) 2738 nfs4_update_changeattr(dir, &o_res->cinfo, 2739 o_res->f_attr->time_start, 2740 NFS_INO_INVALID_DATA); 2741 } 2742 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 2743 server->caps &= ~NFS_CAP_POSIX_LOCK; 2744 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2745 status = _nfs4_proc_open_confirm(data); 2746 if (status != 0) 2747 return status; 2748 } 2749 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) { 2750 struct nfs_fh *fh = &o_res->fh; 2751 2752 nfs4_sequence_free_slot(&o_res->seq_res); 2753 if (o_arg->claim == NFS4_OPEN_CLAIM_FH) 2754 fh = NFS_FH(d_inode(data->dentry)); 2755 nfs4_proc_getattr(server, fh, o_res->f_attr, NULL); 2756 } 2757 return 0; 2758 } 2759 2760 /* 2761 * OPEN_EXPIRED: 2762 * reclaim state on the server after a network partition. 2763 * Assumes caller holds the appropriate lock 2764 */ 2765 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2766 { 2767 struct nfs4_opendata *opendata; 2768 int ret; 2769 2770 opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_FH); 2771 if (IS_ERR(opendata)) 2772 return PTR_ERR(opendata); 2773 /* 2774 * We're not recovering a delegation, so ask for no delegation. 2775 * Otherwise the recovery thread could deadlock with an outstanding 2776 * delegation return. 2777 */ 2778 opendata->o_arg.open_flags = O_DIRECT; 2779 ret = nfs4_open_recover(opendata, state); 2780 if (ret == -ESTALE) 2781 d_drop(ctx->dentry); 2782 nfs4_opendata_put(opendata); 2783 return ret; 2784 } 2785 2786 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2787 { 2788 struct nfs_server *server = NFS_SERVER(state->inode); 2789 struct nfs4_exception exception = { }; 2790 int err; 2791 2792 do { 2793 err = _nfs4_open_expired(ctx, state); 2794 trace_nfs4_open_expired(ctx, 0, err); 2795 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2796 continue; 2797 switch (err) { 2798 default: 2799 goto out; 2800 case -NFS4ERR_GRACE: 2801 case -NFS4ERR_DELAY: 2802 nfs4_handle_exception(server, err, &exception); 2803 err = 0; 2804 } 2805 } while (exception.retry); 2806 out: 2807 return err; 2808 } 2809 2810 int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2811 { 2812 struct nfs_open_context *ctx; 2813 int ret; 2814 2815 ctx = nfs4_state_find_open_context(state); 2816 if (IS_ERR(ctx)) 2817 return -EAGAIN; 2818 ret = nfs4_do_open_expired(ctx, state); 2819 put_nfs_open_context(ctx); 2820 return ret; 2821 } 2822 2823 void nfs_finish_clear_delegation_stateid(struct nfs4_state *state, 2824 const nfs4_stateid *stateid) 2825 { 2826 nfs_remove_bad_delegation(state->inode, stateid); 2827 nfs_state_clear_delegation(state); 2828 } 2829 2830 static int nfs41_test_and_free_expired_stateid(struct nfs_server *server, 2831 nfs4_stateid *stateid, const struct cred *cred) 2832 { 2833 int status; 2834 2835 switch (stateid->type) { 2836 default: 2837 break; 2838 case NFS4_INVALID_STATEID_TYPE: 2839 case NFS4_SPECIAL_STATEID_TYPE: 2840 case NFS4_FREED_STATEID_TYPE: 2841 return -NFS4ERR_BAD_STATEID; 2842 case NFS4_REVOKED_STATEID_TYPE: 2843 goto out_free; 2844 } 2845 2846 status = nfs41_test_stateid(server, stateid, cred); 2847 switch (status) { 2848 case -NFS4ERR_EXPIRED: 2849 case -NFS4ERR_ADMIN_REVOKED: 2850 case -NFS4ERR_DELEG_REVOKED: 2851 break; 2852 default: 2853 return status; 2854 } 2855 out_free: 2856 /* Ack the revoked state to the server */ 2857 nfs41_free_stateid(server, stateid, cred, true); 2858 return -NFS4ERR_EXPIRED; 2859 } 2860 2861 static int nfs41_check_delegation_stateid(struct nfs4_state *state) 2862 { 2863 struct nfs_server *server = NFS_SERVER(state->inode); 2864 nfs4_stateid stateid; 2865 struct nfs_delegation *delegation; 2866 const struct cred *cred = NULL; 2867 int status, ret = NFS_OK; 2868 2869 /* Get the delegation credential for use by test/free_stateid */ 2870 rcu_read_lock(); 2871 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2872 if (delegation == NULL) { 2873 rcu_read_unlock(); 2874 nfs_state_clear_delegation(state); 2875 return NFS_OK; 2876 } 2877 2878 spin_lock(&delegation->lock); 2879 nfs4_stateid_copy(&stateid, &delegation->stateid); 2880 2881 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, 2882 &delegation->flags)) { 2883 spin_unlock(&delegation->lock); 2884 rcu_read_unlock(); 2885 return NFS_OK; 2886 } 2887 2888 if (delegation->cred) 2889 cred = get_cred(delegation->cred); 2890 spin_unlock(&delegation->lock); 2891 rcu_read_unlock(); 2892 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred); 2893 trace_nfs4_test_delegation_stateid(state, NULL, status); 2894 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) 2895 nfs_finish_clear_delegation_stateid(state, &stateid); 2896 else 2897 ret = status; 2898 2899 put_cred(cred); 2900 return ret; 2901 } 2902 2903 static void nfs41_delegation_recover_stateid(struct nfs4_state *state) 2904 { 2905 nfs4_stateid tmp; 2906 2907 if (test_bit(NFS_DELEGATED_STATE, &state->flags) && 2908 nfs4_copy_delegation_stateid(state->inode, state->state, 2909 &tmp, NULL) && 2910 nfs4_stateid_match_other(&state->stateid, &tmp)) 2911 nfs_state_set_delegation(state, &tmp, state->state); 2912 else 2913 nfs_state_clear_delegation(state); 2914 } 2915 2916 /** 2917 * nfs41_check_expired_locks - possibly free a lock stateid 2918 * 2919 * @state: NFSv4 state for an inode 2920 * 2921 * Returns NFS_OK if recovery for this stateid is now finished. 2922 * Otherwise a negative NFS4ERR value is returned. 2923 */ 2924 static int nfs41_check_expired_locks(struct nfs4_state *state) 2925 { 2926 int status, ret = NFS_OK; 2927 struct nfs4_lock_state *lsp, *prev = NULL; 2928 struct nfs_server *server = NFS_SERVER(state->inode); 2929 2930 if (!test_bit(LK_STATE_IN_USE, &state->flags)) 2931 goto out; 2932 2933 spin_lock(&state->state_lock); 2934 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 2935 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 2936 const struct cred *cred = lsp->ls_state->owner->so_cred; 2937 2938 refcount_inc(&lsp->ls_count); 2939 spin_unlock(&state->state_lock); 2940 2941 nfs4_put_lock_state(prev); 2942 prev = lsp; 2943 2944 status = nfs41_test_and_free_expired_stateid(server, 2945 &lsp->ls_stateid, 2946 cred); 2947 trace_nfs4_test_lock_stateid(state, lsp, status); 2948 if (status == -NFS4ERR_EXPIRED || 2949 status == -NFS4ERR_BAD_STATEID) { 2950 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 2951 lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE; 2952 if (!recover_lost_locks) 2953 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 2954 } else if (status != NFS_OK) { 2955 ret = status; 2956 nfs4_put_lock_state(prev); 2957 goto out; 2958 } 2959 spin_lock(&state->state_lock); 2960 } 2961 } 2962 spin_unlock(&state->state_lock); 2963 nfs4_put_lock_state(prev); 2964 out: 2965 return ret; 2966 } 2967 2968 /** 2969 * nfs41_check_open_stateid - possibly free an open stateid 2970 * 2971 * @state: NFSv4 state for an inode 2972 * 2973 * Returns NFS_OK if recovery for this stateid is now finished. 2974 * Otherwise a negative NFS4ERR value is returned. 2975 */ 2976 static int nfs41_check_open_stateid(struct nfs4_state *state) 2977 { 2978 struct nfs_server *server = NFS_SERVER(state->inode); 2979 nfs4_stateid *stateid = &state->open_stateid; 2980 const struct cred *cred = state->owner->so_cred; 2981 int status; 2982 2983 if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) 2984 return -NFS4ERR_BAD_STATEID; 2985 status = nfs41_test_and_free_expired_stateid(server, stateid, cred); 2986 trace_nfs4_test_open_stateid(state, NULL, status); 2987 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) { 2988 nfs_state_clear_open_state_flags(state); 2989 stateid->type = NFS4_INVALID_STATEID_TYPE; 2990 return status; 2991 } 2992 if (nfs_open_stateid_recover_openmode(state)) 2993 return -NFS4ERR_OPENMODE; 2994 return NFS_OK; 2995 } 2996 2997 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2998 { 2999 int status; 3000 3001 status = nfs41_check_delegation_stateid(state); 3002 if (status != NFS_OK) 3003 return status; 3004 nfs41_delegation_recover_stateid(state); 3005 3006 status = nfs41_check_expired_locks(state); 3007 if (status != NFS_OK) 3008 return status; 3009 status = nfs41_check_open_stateid(state); 3010 if (status != NFS_OK) 3011 status = nfs4_open_expired(sp, state); 3012 return status; 3013 } 3014 3015 /* 3016 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 3017 * fields corresponding to attributes that were used to store the verifier. 3018 * Make sure we clobber those fields in the later setattr call 3019 */ 3020 static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata, 3021 struct iattr *sattr, struct nfs4_label **label) 3022 { 3023 const __u32 *bitmask = opendata->o_arg.server->exclcreat_bitmask; 3024 __u32 attrset[3]; 3025 unsigned ret; 3026 unsigned i; 3027 3028 for (i = 0; i < ARRAY_SIZE(attrset); i++) { 3029 attrset[i] = opendata->o_res.attrset[i]; 3030 if (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE4_1) 3031 attrset[i] &= ~bitmask[i]; 3032 } 3033 3034 ret = (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE) ? 3035 sattr->ia_valid : 0; 3036 3037 if ((attrset[1] & (FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET))) { 3038 if (sattr->ia_valid & ATTR_ATIME_SET) 3039 ret |= ATTR_ATIME_SET; 3040 else 3041 ret |= ATTR_ATIME; 3042 } 3043 3044 if ((attrset[1] & (FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET))) { 3045 if (sattr->ia_valid & ATTR_MTIME_SET) 3046 ret |= ATTR_MTIME_SET; 3047 else 3048 ret |= ATTR_MTIME; 3049 } 3050 3051 if (!(attrset[2] & FATTR4_WORD2_SECURITY_LABEL)) 3052 *label = NULL; 3053 return ret; 3054 } 3055 3056 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, 3057 struct nfs_open_context *ctx) 3058 { 3059 struct nfs4_state_owner *sp = opendata->owner; 3060 struct nfs_server *server = sp->so_server; 3061 struct dentry *dentry; 3062 struct nfs4_state *state; 3063 fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx); 3064 struct inode *dir = d_inode(opendata->dir); 3065 unsigned long dir_verifier; 3066 int ret; 3067 3068 dir_verifier = nfs_save_change_attribute(dir); 3069 3070 ret = _nfs4_proc_open(opendata, ctx); 3071 if (ret != 0) 3072 goto out; 3073 3074 state = _nfs4_opendata_to_nfs4_state(opendata); 3075 ret = PTR_ERR(state); 3076 if (IS_ERR(state)) 3077 goto out; 3078 ctx->state = state; 3079 if (server->caps & NFS_CAP_POSIX_LOCK) 3080 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 3081 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK) 3082 set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags); 3083 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_PRESERVE_UNLINKED) 3084 set_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(state->inode)->flags); 3085 3086 switch(opendata->o_arg.claim) { 3087 default: 3088 break; 3089 case NFS4_OPEN_CLAIM_NULL: 3090 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 3091 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 3092 if (!opendata->rpc_done) 3093 break; 3094 if (opendata->o_res.delegation.type != 0) 3095 dir_verifier = nfs_save_change_attribute(dir); 3096 } 3097 3098 dentry = opendata->dentry; 3099 nfs_set_verifier(dentry, dir_verifier); 3100 if (d_really_is_negative(dentry)) { 3101 struct dentry *alias; 3102 d_drop(dentry); 3103 alias = d_splice_alias(igrab(state->inode), dentry); 3104 /* d_splice_alias() can't fail here - it's a non-directory */ 3105 if (alias) { 3106 dput(ctx->dentry); 3107 nfs_set_verifier(alias, dir_verifier); 3108 ctx->dentry = dentry = alias; 3109 } 3110 } 3111 3112 /* Parse layoutget results before we check for access */ 3113 pnfs_parse_lgopen(state->inode, opendata->lgp, ctx); 3114 3115 ret = nfs4_opendata_access(sp->so_cred, opendata, state, acc_mode); 3116 if (ret != 0) 3117 goto out; 3118 3119 if (d_inode(dentry) == state->inode) 3120 nfs_inode_attach_open_context(ctx); 3121 3122 out: 3123 if (!opendata->cancelled) { 3124 if (opendata->lgp) { 3125 nfs4_lgopen_release(opendata->lgp); 3126 opendata->lgp = NULL; 3127 } 3128 nfs4_sequence_free_slot(&opendata->o_res.seq_res); 3129 } 3130 return ret; 3131 } 3132 3133 /* 3134 * Returns a referenced nfs4_state 3135 */ 3136 static int _nfs4_do_open(struct inode *dir, 3137 struct nfs_open_context *ctx, 3138 int flags, 3139 const struct nfs4_open_createattrs *c, 3140 int *opened) 3141 { 3142 struct nfs4_state_owner *sp; 3143 struct nfs4_state *state = NULL; 3144 struct nfs_server *server = NFS_SERVER(dir); 3145 struct nfs4_opendata *opendata; 3146 struct dentry *dentry = ctx->dentry; 3147 const struct cred *cred = ctx->cred; 3148 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold; 3149 fmode_t fmode = _nfs4_ctx_to_openmode(ctx); 3150 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL; 3151 struct iattr *sattr = c->sattr; 3152 struct nfs4_label *label = c->label; 3153 int status; 3154 3155 /* Protect against reboot recovery conflicts */ 3156 status = -ENOMEM; 3157 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 3158 if (sp == NULL) { 3159 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 3160 goto out_err; 3161 } 3162 status = nfs4_client_recover_expired_lease(server->nfs_client); 3163 if (status != 0) 3164 goto err_put_state_owner; 3165 if (d_really_is_positive(dentry)) 3166 nfs4_return_incompatible_delegation(d_inode(dentry), fmode); 3167 status = -ENOMEM; 3168 if (d_really_is_positive(dentry)) 3169 claim = NFS4_OPEN_CLAIM_FH; 3170 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, 3171 c, claim, GFP_KERNEL); 3172 if (opendata == NULL) 3173 goto err_put_state_owner; 3174 3175 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 3176 if (!opendata->f_attr.mdsthreshold) { 3177 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 3178 if (!opendata->f_attr.mdsthreshold) 3179 goto err_opendata_put; 3180 } 3181 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 3182 } 3183 if (d_really_is_positive(dentry)) 3184 opendata->state = nfs4_get_open_state(d_inode(dentry), sp); 3185 3186 status = _nfs4_open_and_get_state(opendata, ctx); 3187 if (status != 0) 3188 goto err_opendata_put; 3189 state = ctx->state; 3190 3191 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) && 3192 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) { 3193 unsigned attrs = nfs4_exclusive_attrset(opendata, sattr, &label); 3194 /* 3195 * send create attributes which was not set by open 3196 * with an extra setattr. 3197 */ 3198 if (attrs || label) { 3199 unsigned ia_old = sattr->ia_valid; 3200 3201 sattr->ia_valid = attrs; 3202 nfs_fattr_init(opendata->o_res.f_attr); 3203 status = nfs4_do_setattr(state->inode, cred, 3204 opendata->o_res.f_attr, sattr, 3205 ctx, label); 3206 if (status == 0) { 3207 nfs_setattr_update_inode(state->inode, sattr, 3208 opendata->o_res.f_attr); 3209 nfs_setsecurity(state->inode, opendata->o_res.f_attr); 3210 } 3211 sattr->ia_valid = ia_old; 3212 } 3213 } 3214 if (opened && opendata->file_created) 3215 *opened = 1; 3216 3217 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) { 3218 *ctx_th = opendata->f_attr.mdsthreshold; 3219 opendata->f_attr.mdsthreshold = NULL; 3220 } 3221 3222 nfs4_opendata_put(opendata); 3223 nfs4_put_state_owner(sp); 3224 return 0; 3225 err_opendata_put: 3226 nfs4_opendata_put(opendata); 3227 err_put_state_owner: 3228 nfs4_put_state_owner(sp); 3229 out_err: 3230 return status; 3231 } 3232 3233 3234 static struct nfs4_state *nfs4_do_open(struct inode *dir, 3235 struct nfs_open_context *ctx, 3236 int flags, 3237 struct iattr *sattr, 3238 struct nfs4_label *label, 3239 int *opened) 3240 { 3241 struct nfs_server *server = NFS_SERVER(dir); 3242 struct nfs4_exception exception = { 3243 .interruptible = true, 3244 }; 3245 struct nfs4_state *res; 3246 struct nfs4_open_createattrs c = { 3247 .label = label, 3248 .sattr = sattr, 3249 .verf = { 3250 [0] = (__u32)jiffies, 3251 [1] = (__u32)current->pid, 3252 }, 3253 }; 3254 int status; 3255 3256 do { 3257 status = _nfs4_do_open(dir, ctx, flags, &c, opened); 3258 res = ctx->state; 3259 trace_nfs4_open_file(ctx, flags, status); 3260 if (status == 0) 3261 break; 3262 /* NOTE: BAD_SEQID means the server and client disagree about the 3263 * book-keeping w.r.t. state-changing operations 3264 * (OPEN/CLOSE/LOCK/LOCKU...) 3265 * It is actually a sign of a bug on the client or on the server. 3266 * 3267 * If we receive a BAD_SEQID error in the particular case of 3268 * doing an OPEN, we assume that nfs_increment_open_seqid() will 3269 * have unhashed the old state_owner for us, and that we can 3270 * therefore safely retry using a new one. We should still warn 3271 * the user though... 3272 */ 3273 if (status == -NFS4ERR_BAD_SEQID) { 3274 pr_warn_ratelimited("NFS: v4 server %s " 3275 " returned a bad sequence-id error!\n", 3276 NFS_SERVER(dir)->nfs_client->cl_hostname); 3277 exception.retry = 1; 3278 continue; 3279 } 3280 /* 3281 * BAD_STATEID on OPEN means that the server cancelled our 3282 * state before it received the OPEN_CONFIRM. 3283 * Recover by retrying the request as per the discussion 3284 * on Page 181 of RFC3530. 3285 */ 3286 if (status == -NFS4ERR_BAD_STATEID) { 3287 exception.retry = 1; 3288 continue; 3289 } 3290 if (status == -NFS4ERR_EXPIRED) { 3291 nfs4_schedule_lease_recovery(server->nfs_client); 3292 exception.retry = 1; 3293 continue; 3294 } 3295 if (status == -EAGAIN) { 3296 /* We must have found a delegation */ 3297 exception.retry = 1; 3298 continue; 3299 } 3300 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception)) 3301 continue; 3302 res = ERR_PTR(nfs4_handle_exception(server, 3303 status, &exception)); 3304 } while (exception.retry); 3305 return res; 3306 } 3307 3308 static int _nfs4_do_setattr(struct inode *inode, 3309 struct nfs_setattrargs *arg, 3310 struct nfs_setattrres *res, 3311 const struct cred *cred, 3312 struct nfs_open_context *ctx) 3313 { 3314 struct nfs_server *server = NFS_SERVER(inode); 3315 struct rpc_message msg = { 3316 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 3317 .rpc_argp = arg, 3318 .rpc_resp = res, 3319 .rpc_cred = cred, 3320 }; 3321 const struct cred *delegation_cred = NULL; 3322 unsigned long timestamp = jiffies; 3323 bool truncate; 3324 int status; 3325 3326 nfs_fattr_init(res->fattr); 3327 3328 /* Servers should only apply open mode checks for file size changes */ 3329 truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false; 3330 if (!truncate) { 3331 nfs4_inode_make_writeable(inode); 3332 goto zero_stateid; 3333 } 3334 3335 if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) { 3336 /* Use that stateid */ 3337 } else if (ctx != NULL && ctx->state) { 3338 struct nfs_lock_context *l_ctx; 3339 if (!nfs4_valid_open_stateid(ctx->state)) 3340 return -EBADF; 3341 l_ctx = nfs_get_lock_context(ctx); 3342 if (IS_ERR(l_ctx)) 3343 return PTR_ERR(l_ctx); 3344 status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx, 3345 &arg->stateid, &delegation_cred); 3346 nfs_put_lock_context(l_ctx); 3347 if (status == -EIO) 3348 return -EBADF; 3349 else if (status == -EAGAIN) 3350 goto zero_stateid; 3351 } else { 3352 zero_stateid: 3353 nfs4_stateid_copy(&arg->stateid, &zero_stateid); 3354 } 3355 if (delegation_cred) 3356 msg.rpc_cred = delegation_cred; 3357 3358 status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1); 3359 3360 put_cred(delegation_cred); 3361 if (status == 0 && ctx != NULL) 3362 renew_lease(server, timestamp); 3363 trace_nfs4_setattr(inode, &arg->stateid, status); 3364 return status; 3365 } 3366 3367 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred, 3368 struct nfs_fattr *fattr, struct iattr *sattr, 3369 struct nfs_open_context *ctx, struct nfs4_label *ilabel) 3370 { 3371 struct nfs_server *server = NFS_SERVER(inode); 3372 __u32 bitmask[NFS4_BITMASK_SZ]; 3373 struct nfs4_state *state = ctx ? ctx->state : NULL; 3374 struct nfs_setattrargs arg = { 3375 .fh = NFS_FH(inode), 3376 .iap = sattr, 3377 .server = server, 3378 .bitmask = bitmask, 3379 .label = ilabel, 3380 }; 3381 struct nfs_setattrres res = { 3382 .fattr = fattr, 3383 .server = server, 3384 }; 3385 struct nfs4_exception exception = { 3386 .state = state, 3387 .inode = inode, 3388 .stateid = &arg.stateid, 3389 }; 3390 unsigned long adjust_flags = NFS_INO_INVALID_CHANGE | 3391 NFS_INO_INVALID_CTIME; 3392 int err; 3393 3394 if (sattr->ia_valid & (ATTR_MODE | ATTR_KILL_SUID | ATTR_KILL_SGID)) 3395 adjust_flags |= NFS_INO_INVALID_MODE; 3396 if (sattr->ia_valid & (ATTR_UID | ATTR_GID)) 3397 adjust_flags |= NFS_INO_INVALID_OTHER; 3398 if (sattr->ia_valid & ATTR_ATIME) 3399 adjust_flags |= NFS_INO_INVALID_ATIME; 3400 if (sattr->ia_valid & ATTR_MTIME) 3401 adjust_flags |= NFS_INO_INVALID_MTIME; 3402 3403 do { 3404 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), 3405 inode, adjust_flags); 3406 3407 err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx); 3408 switch (err) { 3409 case -NFS4ERR_OPENMODE: 3410 if (!(sattr->ia_valid & ATTR_SIZE)) { 3411 pr_warn_once("NFSv4: server %s is incorrectly " 3412 "applying open mode checks to " 3413 "a SETATTR that is not " 3414 "changing file size.\n", 3415 server->nfs_client->cl_hostname); 3416 } 3417 if (state && !(state->state & FMODE_WRITE)) { 3418 err = -EBADF; 3419 if (sattr->ia_valid & ATTR_OPEN) 3420 err = -EACCES; 3421 goto out; 3422 } 3423 } 3424 err = nfs4_handle_exception(server, err, &exception); 3425 } while (exception.retry); 3426 out: 3427 return err; 3428 } 3429 3430 static bool 3431 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task) 3432 { 3433 if (inode == NULL || !nfs_have_layout(inode)) 3434 return false; 3435 3436 return pnfs_wait_on_layoutreturn(inode, task); 3437 } 3438 3439 /* 3440 * Update the seqid of an open stateid 3441 */ 3442 static void nfs4_sync_open_stateid(nfs4_stateid *dst, 3443 struct nfs4_state *state) 3444 { 3445 __be32 seqid_open; 3446 u32 dst_seqid; 3447 int seq; 3448 3449 for (;;) { 3450 if (!nfs4_valid_open_stateid(state)) 3451 break; 3452 seq = read_seqbegin(&state->seqlock); 3453 if (!nfs4_state_match_open_stateid_other(state, dst)) { 3454 nfs4_stateid_copy(dst, &state->open_stateid); 3455 if (read_seqretry(&state->seqlock, seq)) 3456 continue; 3457 break; 3458 } 3459 seqid_open = state->open_stateid.seqid; 3460 if (read_seqretry(&state->seqlock, seq)) 3461 continue; 3462 3463 dst_seqid = be32_to_cpu(dst->seqid); 3464 if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0) 3465 dst->seqid = seqid_open; 3466 break; 3467 } 3468 } 3469 3470 /* 3471 * Update the seqid of an open stateid after receiving 3472 * NFS4ERR_OLD_STATEID 3473 */ 3474 static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst, 3475 struct nfs4_state *state) 3476 { 3477 __be32 seqid_open; 3478 u32 dst_seqid; 3479 bool ret; 3480 int seq, status = -EAGAIN; 3481 DEFINE_WAIT(wait); 3482 3483 for (;;) { 3484 ret = false; 3485 if (!nfs4_valid_open_stateid(state)) 3486 break; 3487 seq = read_seqbegin(&state->seqlock); 3488 if (!nfs4_state_match_open_stateid_other(state, dst)) { 3489 if (read_seqretry(&state->seqlock, seq)) 3490 continue; 3491 break; 3492 } 3493 3494 write_seqlock(&state->seqlock); 3495 seqid_open = state->open_stateid.seqid; 3496 3497 dst_seqid = be32_to_cpu(dst->seqid); 3498 3499 /* Did another OPEN bump the state's seqid? try again: */ 3500 if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) { 3501 dst->seqid = seqid_open; 3502 write_sequnlock(&state->seqlock); 3503 ret = true; 3504 break; 3505 } 3506 3507 /* server says we're behind but we haven't seen the update yet */ 3508 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); 3509 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); 3510 write_sequnlock(&state->seqlock); 3511 trace_nfs4_close_stateid_update_wait(state->inode, dst, 0); 3512 3513 if (fatal_signal_pending(current) || nfs_current_task_exiting()) 3514 status = -EINTR; 3515 else 3516 if (schedule_timeout(5*HZ) != 0) 3517 status = 0; 3518 3519 finish_wait(&state->waitq, &wait); 3520 3521 if (!status) 3522 continue; 3523 if (status == -EINTR) 3524 break; 3525 3526 /* we slept the whole 5 seconds, we must have lost a seqid */ 3527 dst->seqid = cpu_to_be32(dst_seqid + 1); 3528 ret = true; 3529 break; 3530 } 3531 3532 return ret; 3533 } 3534 3535 struct nfs4_closedata { 3536 struct inode *inode; 3537 struct nfs4_state *state; 3538 struct nfs_closeargs arg; 3539 struct nfs_closeres res; 3540 struct { 3541 struct nfs4_layoutreturn_args arg; 3542 struct nfs4_layoutreturn_res res; 3543 struct nfs4_xdr_opaque_data ld_private; 3544 u32 roc_barrier; 3545 bool roc; 3546 } lr; 3547 struct nfs_fattr fattr; 3548 unsigned long timestamp; 3549 unsigned short retrans; 3550 }; 3551 3552 static void nfs4_free_closedata(void *data) 3553 { 3554 struct nfs4_closedata *calldata = data; 3555 struct nfs4_state_owner *sp = calldata->state->owner; 3556 struct super_block *sb = calldata->state->inode->i_sb; 3557 3558 if (calldata->lr.roc) 3559 pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res, 3560 calldata->res.lr_ret); 3561 nfs4_put_open_state(calldata->state); 3562 nfs_free_seqid(calldata->arg.seqid); 3563 nfs4_put_state_owner(sp); 3564 nfs_sb_deactive(sb); 3565 kfree(calldata); 3566 } 3567 3568 static void nfs4_close_done(struct rpc_task *task, void *data) 3569 { 3570 struct nfs4_closedata *calldata = data; 3571 struct nfs4_state *state = calldata->state; 3572 struct nfs_server *server = NFS_SERVER(calldata->inode); 3573 nfs4_stateid *res_stateid = NULL; 3574 struct nfs4_exception exception = { 3575 .state = state, 3576 .inode = calldata->inode, 3577 .stateid = &calldata->arg.stateid, 3578 .retrans = calldata->retrans, 3579 }; 3580 3581 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 3582 return; 3583 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status); 3584 3585 /* Handle Layoutreturn errors */ 3586 if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res, 3587 &calldata->res.lr_ret) == -EAGAIN) 3588 goto out_restart; 3589 3590 /* hmm. we are done with the inode, and in the process of freeing 3591 * the state_owner. we keep this around to process errors 3592 */ 3593 switch (task->tk_status) { 3594 case 0: 3595 res_stateid = &calldata->res.stateid; 3596 renew_lease(server, calldata->timestamp); 3597 break; 3598 case -NFS4ERR_ACCESS: 3599 if (calldata->arg.bitmask != NULL) { 3600 calldata->arg.bitmask = NULL; 3601 calldata->res.fattr = NULL; 3602 goto out_restart; 3603 3604 } 3605 break; 3606 case -NFS4ERR_OLD_STATEID: 3607 /* Did we race with OPEN? */ 3608 if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid, 3609 state)) 3610 goto out_restart; 3611 goto out_release; 3612 case -NFS4ERR_ADMIN_REVOKED: 3613 case -NFS4ERR_STALE_STATEID: 3614 case -NFS4ERR_EXPIRED: 3615 nfs4_free_revoked_stateid(server, 3616 &calldata->arg.stateid, 3617 task->tk_msg.rpc_cred); 3618 fallthrough; 3619 case -NFS4ERR_BAD_STATEID: 3620 if (calldata->arg.fmode == 0) 3621 break; 3622 fallthrough; 3623 default: 3624 task->tk_status = nfs4_async_handle_exception(task, 3625 server, task->tk_status, &exception); 3626 calldata->retrans = exception.retrans; 3627 if (exception.retry) 3628 goto out_restart; 3629 } 3630 nfs_clear_open_stateid(state, &calldata->arg.stateid, 3631 res_stateid, calldata->arg.fmode); 3632 out_release: 3633 task->tk_status = 0; 3634 nfs_release_seqid(calldata->arg.seqid); 3635 nfs_refresh_inode(calldata->inode, &calldata->fattr); 3636 dprintk("%s: ret = %d\n", __func__, task->tk_status); 3637 return; 3638 out_restart: 3639 task->tk_status = 0; 3640 rpc_restart_call_prepare(task); 3641 goto out_release; 3642 } 3643 3644 static void nfs4_close_prepare(struct rpc_task *task, void *data) 3645 { 3646 struct nfs4_closedata *calldata = data; 3647 struct nfs4_state *state = calldata->state; 3648 struct inode *inode = calldata->inode; 3649 struct nfs_server *server = NFS_SERVER(inode); 3650 struct pnfs_layout_hdr *lo; 3651 bool is_rdonly, is_wronly, is_rdwr; 3652 int call_close = 0; 3653 3654 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 3655 goto out_wait; 3656 3657 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 3658 spin_lock(&state->owner->so_lock); 3659 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); 3660 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); 3661 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); 3662 /* Calculate the change in open mode */ 3663 calldata->arg.fmode = 0; 3664 if (state->n_rdwr == 0) { 3665 if (state->n_rdonly == 0) 3666 call_close |= is_rdonly; 3667 else if (is_rdonly) 3668 calldata->arg.fmode |= FMODE_READ; 3669 if (state->n_wronly == 0) 3670 call_close |= is_wronly; 3671 else if (is_wronly) 3672 calldata->arg.fmode |= FMODE_WRITE; 3673 if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE)) 3674 call_close |= is_rdwr; 3675 } else if (is_rdwr) 3676 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; 3677 3678 nfs4_sync_open_stateid(&calldata->arg.stateid, state); 3679 if (!nfs4_valid_open_stateid(state)) 3680 call_close = 0; 3681 spin_unlock(&state->owner->so_lock); 3682 3683 if (!call_close) { 3684 /* Note: exit _without_ calling nfs4_close_done */ 3685 goto out_no_action; 3686 } 3687 3688 if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) { 3689 nfs_release_seqid(calldata->arg.seqid); 3690 goto out_wait; 3691 } 3692 3693 lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL; 3694 if (lo && !pnfs_layout_is_valid(lo)) { 3695 calldata->arg.lr_args = NULL; 3696 calldata->res.lr_res = NULL; 3697 } 3698 3699 if (calldata->arg.fmode == 0) 3700 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 3701 3702 if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) { 3703 /* Close-to-open cache consistency revalidation */ 3704 if (!nfs4_have_delegation(inode, FMODE_READ, 0)) { 3705 nfs4_bitmask_set(calldata->arg.bitmask_store, 3706 server->cache_consistency_bitmask, 3707 inode, 0); 3708 calldata->arg.bitmask = calldata->arg.bitmask_store; 3709 } else 3710 calldata->arg.bitmask = NULL; 3711 } 3712 3713 calldata->arg.share_access = 3714 nfs4_fmode_to_share_access(calldata->arg.fmode); 3715 3716 if (calldata->res.fattr == NULL) 3717 calldata->arg.bitmask = NULL; 3718 else if (calldata->arg.bitmask == NULL) 3719 calldata->res.fattr = NULL; 3720 calldata->timestamp = jiffies; 3721 if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client, 3722 &calldata->arg.seq_args, 3723 &calldata->res.seq_res, 3724 task) != 0) 3725 nfs_release_seqid(calldata->arg.seqid); 3726 return; 3727 out_no_action: 3728 task->tk_action = NULL; 3729 out_wait: 3730 nfs4_sequence_done(task, &calldata->res.seq_res); 3731 } 3732 3733 static const struct rpc_call_ops nfs4_close_ops = { 3734 .rpc_call_prepare = nfs4_close_prepare, 3735 .rpc_call_done = nfs4_close_done, 3736 .rpc_release = nfs4_free_closedata, 3737 }; 3738 3739 /* 3740 * It is possible for data to be read/written from a mem-mapped file 3741 * after the sys_close call (which hits the vfs layer as a flush). 3742 * This means that we can't safely call nfsv4 close on a file until 3743 * the inode is cleared. This in turn means that we are not good 3744 * NFSv4 citizens - we do not indicate to the server to update the file's 3745 * share state even when we are done with one of the three share 3746 * stateid's in the inode. 3747 * 3748 * NOTE: Caller must be holding the sp->so_owner semaphore! 3749 */ 3750 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) 3751 { 3752 struct nfs_server *server = NFS_SERVER(state->inode); 3753 struct nfs_client *clp = server->nfs_client; 3754 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 3755 struct nfs4_closedata *calldata; 3756 struct nfs4_state_owner *sp = state->owner; 3757 struct rpc_task *task; 3758 struct rpc_message msg = { 3759 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 3760 .rpc_cred = state->owner->so_cred, 3761 }; 3762 struct rpc_task_setup task_setup_data = { 3763 .rpc_client = server->client, 3764 .rpc_message = &msg, 3765 .callback_ops = &nfs4_close_ops, 3766 .workqueue = nfsiod_workqueue, 3767 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 3768 }; 3769 int status = -ENOMEM; 3770 3771 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE)) 3772 task_setup_data.flags |= RPC_TASK_MOVEABLE; 3773 3774 nfs4_state_protect(clp, NFS_SP4_MACH_CRED_CLEANUP, 3775 &task_setup_data.rpc_client, &msg); 3776 3777 calldata = kzalloc(sizeof(*calldata), gfp_mask); 3778 if (calldata == NULL) 3779 goto out; 3780 nfs4_init_sequence(clp, &calldata->arg.seq_args, 3781 &calldata->res.seq_res, 1, 0); 3782 calldata->inode = state->inode; 3783 calldata->state = state; 3784 calldata->arg.fh = NFS_FH(state->inode); 3785 if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state)) 3786 goto out_free_calldata; 3787 /* Serialization for the sequence id */ 3788 alloc_seqid = clp->cl_mvops->alloc_seqid; 3789 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask); 3790 if (IS_ERR(calldata->arg.seqid)) 3791 goto out_free_calldata; 3792 nfs_fattr_init(&calldata->fattr); 3793 calldata->arg.fmode = 0; 3794 calldata->lr.arg.ld_private = &calldata->lr.ld_private; 3795 calldata->res.fattr = &calldata->fattr; 3796 calldata->res.seqid = calldata->arg.seqid; 3797 calldata->res.server = server; 3798 calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT; 3799 calldata->lr.roc = pnfs_roc(state->inode, &calldata->lr.arg, 3800 &calldata->lr.res, msg.rpc_cred, wait); 3801 if (calldata->lr.roc) { 3802 calldata->arg.lr_args = &calldata->lr.arg; 3803 calldata->res.lr_res = &calldata->lr.res; 3804 } 3805 nfs_sb_active(calldata->inode->i_sb); 3806 3807 msg.rpc_argp = &calldata->arg; 3808 msg.rpc_resp = &calldata->res; 3809 task_setup_data.callback_data = calldata; 3810 task = rpc_run_task(&task_setup_data); 3811 if (IS_ERR(task)) 3812 return PTR_ERR(task); 3813 status = 0; 3814 if (wait) 3815 status = rpc_wait_for_completion_task(task); 3816 rpc_put_task(task); 3817 return status; 3818 out_free_calldata: 3819 kfree(calldata); 3820 out: 3821 nfs4_put_open_state(state); 3822 nfs4_put_state_owner(sp); 3823 return status; 3824 } 3825 3826 static struct inode * 3827 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, 3828 int open_flags, struct iattr *attr, int *opened) 3829 { 3830 struct nfs4_state *state; 3831 struct nfs4_label l, *label; 3832 3833 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); 3834 3835 /* Protect against concurrent sillydeletes */ 3836 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened); 3837 3838 nfs4_label_release_security(label); 3839 3840 if (IS_ERR(state)) 3841 return ERR_CAST(state); 3842 return state->inode; 3843 } 3844 3845 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 3846 { 3847 struct dentry *dentry = ctx->dentry; 3848 if (ctx->state == NULL) 3849 return; 3850 if (dentry->d_flags & DCACHE_NFSFS_RENAMED) 3851 nfs4_inode_set_return_delegation_on_close(d_inode(dentry)); 3852 if (is_sync) 3853 nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx)); 3854 else 3855 nfs4_close_state(ctx->state, _nfs4_ctx_to_openmode(ctx)); 3856 } 3857 3858 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL) 3859 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL) 3860 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_OPEN_ARGUMENTS - 1UL) 3861 3862 #define FATTR4_WORD2_NFS42_TIME_DELEG_MASK \ 3863 (FATTR4_WORD2_TIME_DELEG_MODIFY|FATTR4_WORD2_TIME_DELEG_ACCESS) 3864 static bool nfs4_server_delegtime_capable(struct nfs4_server_caps_res *res) 3865 { 3866 u32 share_access_want = res->open_caps.oa_share_access_want[0]; 3867 u32 attr_bitmask = res->attr_bitmask[2]; 3868 3869 return (share_access_want & NFS4_SHARE_WANT_DELEG_TIMESTAMPS) && 3870 ((attr_bitmask & FATTR4_WORD2_NFS42_TIME_DELEG_MASK) == 3871 FATTR4_WORD2_NFS42_TIME_DELEG_MASK); 3872 } 3873 3874 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 3875 { 3876 u32 minorversion = server->nfs_client->cl_minorversion; 3877 u32 bitmask[3] = { 3878 [0] = FATTR4_WORD0_SUPPORTED_ATTRS, 3879 }; 3880 struct nfs4_server_caps_arg args = { 3881 .fhandle = fhandle, 3882 .bitmask = bitmask, 3883 }; 3884 struct nfs4_server_caps_res res = {}; 3885 struct rpc_message msg = { 3886 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 3887 .rpc_argp = &args, 3888 .rpc_resp = &res, 3889 }; 3890 int status; 3891 int i; 3892 3893 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS | 3894 FATTR4_WORD0_FH_EXPIRE_TYPE | 3895 FATTR4_WORD0_LINK_SUPPORT | 3896 FATTR4_WORD0_SYMLINK_SUPPORT | 3897 FATTR4_WORD0_ACLSUPPORT | 3898 FATTR4_WORD0_CASE_INSENSITIVE | 3899 FATTR4_WORD0_CASE_PRESERVING; 3900 if (minorversion) 3901 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT; 3902 if (minorversion > 1) 3903 bitmask[2] |= FATTR4_WORD2_OPEN_ARGUMENTS; 3904 3905 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3906 if (status == 0) { 3907 bitmask[0] = (FATTR4_WORD0_SUPPORTED_ATTRS | 3908 FATTR4_WORD0_FH_EXPIRE_TYPE | 3909 FATTR4_WORD0_LINK_SUPPORT | 3910 FATTR4_WORD0_SYMLINK_SUPPORT | 3911 FATTR4_WORD0_ACLSUPPORT | 3912 FATTR4_WORD0_CASE_INSENSITIVE | 3913 FATTR4_WORD0_CASE_PRESERVING) & 3914 res.attr_bitmask[0]; 3915 /* Sanity check the server answers */ 3916 switch (minorversion) { 3917 case 0: 3918 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK; 3919 res.attr_bitmask[2] = 0; 3920 break; 3921 case 1: 3922 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK; 3923 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT & 3924 res.attr_bitmask[2]; 3925 break; 3926 case 2: 3927 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK; 3928 bitmask[2] = (FATTR4_WORD2_SUPPATTR_EXCLCREAT | 3929 FATTR4_WORD2_OPEN_ARGUMENTS) & 3930 res.attr_bitmask[2]; 3931 } 3932 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 3933 server->caps &= 3934 ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS | 3935 NFS_CAP_SECURITY_LABEL | NFS_CAP_FS_LOCATIONS | 3936 NFS_CAP_OPEN_XOR | NFS_CAP_DELEGTIME); 3937 server->fattr_valid = NFS_ATTR_FATTR_V4; 3938 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL && 3939 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 3940 server->caps |= NFS_CAP_ACLS; 3941 if (res.has_links != 0) 3942 server->caps |= NFS_CAP_HARDLINKS; 3943 if (res.has_symlinks != 0) 3944 server->caps |= NFS_CAP_SYMLINKS; 3945 if (res.case_insensitive) 3946 server->caps |= NFS_CAP_CASE_INSENSITIVE; 3947 if (res.case_preserving) 3948 server->caps |= NFS_CAP_CASE_PRESERVING; 3949 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 3950 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL) 3951 server->caps |= NFS_CAP_SECURITY_LABEL; 3952 #endif 3953 if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS) 3954 server->caps |= NFS_CAP_FS_LOCATIONS; 3955 if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID)) 3956 server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID; 3957 if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE)) 3958 server->fattr_valid &= ~NFS_ATTR_FATTR_MODE; 3959 if (!(res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)) 3960 server->fattr_valid &= ~NFS_ATTR_FATTR_NLINK; 3961 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER)) 3962 server->fattr_valid &= ~(NFS_ATTR_FATTR_OWNER | 3963 NFS_ATTR_FATTR_OWNER_NAME); 3964 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)) 3965 server->fattr_valid &= ~(NFS_ATTR_FATTR_GROUP | 3966 NFS_ATTR_FATTR_GROUP_NAME); 3967 if (!(res.attr_bitmask[1] & FATTR4_WORD1_SPACE_USED)) 3968 server->fattr_valid &= ~NFS_ATTR_FATTR_SPACE_USED; 3969 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)) 3970 server->fattr_valid &= ~NFS_ATTR_FATTR_ATIME; 3971 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)) 3972 server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME; 3973 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)) 3974 server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME; 3975 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)) 3976 server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME; 3977 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_CREATE)) 3978 server->fattr_valid &= ~NFS_ATTR_FATTR_BTIME; 3979 memcpy(server->attr_bitmask_nl, res.attr_bitmask, 3980 sizeof(server->attr_bitmask)); 3981 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL; 3982 3983 if (res.open_caps.oa_share_access_want[0] & 3984 NFS4_SHARE_WANT_OPEN_XOR_DELEGATION) 3985 server->caps |= NFS_CAP_OPEN_XOR; 3986 if (nfs4_server_delegtime_capable(&res)) 3987 server->caps |= NFS_CAP_DELEGTIME; 3988 3989 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 3990 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 3991 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 3992 server->cache_consistency_bitmask[2] = 0; 3993 3994 /* Avoid a regression due to buggy server */ 3995 for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++) 3996 res.exclcreat_bitmask[i] &= res.attr_bitmask[i]; 3997 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask, 3998 sizeof(server->exclcreat_bitmask)); 3999 4000 server->acl_bitmask = res.acl_bitmask; 4001 server->fh_expire_type = res.fh_expire_type; 4002 } 4003 4004 return status; 4005 } 4006 4007 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 4008 { 4009 struct nfs4_exception exception = { 4010 .interruptible = true, 4011 }; 4012 int err; 4013 4014 do { 4015 err = nfs4_handle_exception(server, 4016 _nfs4_server_capabilities(server, fhandle), 4017 &exception); 4018 } while (exception.retry); 4019 return err; 4020 } 4021 4022 static void test_fs_location_for_trunking(struct nfs4_fs_location *location, 4023 struct nfs_client *clp, 4024 struct nfs_server *server) 4025 { 4026 int i; 4027 4028 for (i = 0; i < location->nservers; i++) { 4029 struct nfs4_string *srv_loc = &location->servers[i]; 4030 struct sockaddr_storage addr; 4031 size_t addrlen; 4032 struct xprt_create xprt_args = { 4033 .ident = 0, 4034 .net = clp->cl_net, 4035 }; 4036 struct nfs4_add_xprt_data xprtdata = { 4037 .clp = clp, 4038 }; 4039 struct rpc_add_xprt_test rpcdata = { 4040 .add_xprt_test = clp->cl_mvops->session_trunk, 4041 .data = &xprtdata, 4042 }; 4043 char *servername = NULL; 4044 4045 if (!srv_loc->len) 4046 continue; 4047 4048 addrlen = nfs_parse_server_name(srv_loc->data, srv_loc->len, 4049 &addr, sizeof(addr), 4050 clp->cl_net, server->port); 4051 if (!addrlen) 4052 return; 4053 xprt_args.dstaddr = (struct sockaddr *)&addr; 4054 xprt_args.addrlen = addrlen; 4055 servername = kmalloc(srv_loc->len + 1, GFP_KERNEL); 4056 if (!servername) 4057 return; 4058 memcpy(servername, srv_loc->data, srv_loc->len); 4059 servername[srv_loc->len] = '\0'; 4060 xprt_args.servername = servername; 4061 4062 xprtdata.cred = nfs4_get_clid_cred(clp); 4063 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args, 4064 rpc_clnt_setup_test_and_add_xprt, 4065 &rpcdata); 4066 if (xprtdata.cred) 4067 put_cred(xprtdata.cred); 4068 kfree(servername); 4069 } 4070 } 4071 4072 static bool _is_same_nfs4_pathname(struct nfs4_pathname *path1, 4073 struct nfs4_pathname *path2) 4074 { 4075 int i; 4076 4077 if (path1->ncomponents != path2->ncomponents) 4078 return false; 4079 for (i = 0; i < path1->ncomponents; i++) { 4080 if (path1->components[i].len != path2->components[i].len) 4081 return false; 4082 if (memcmp(path1->components[i].data, path2->components[i].data, 4083 path1->components[i].len)) 4084 return false; 4085 } 4086 return true; 4087 } 4088 4089 static int _nfs4_discover_trunking(struct nfs_server *server, 4090 struct nfs_fh *fhandle) 4091 { 4092 struct nfs4_fs_locations *locations = NULL; 4093 struct page *page; 4094 const struct cred *cred; 4095 struct nfs_client *clp = server->nfs_client; 4096 const struct nfs4_state_maintenance_ops *ops = 4097 clp->cl_mvops->state_renewal_ops; 4098 int status = -ENOMEM, i; 4099 4100 cred = ops->get_state_renewal_cred(clp); 4101 if (cred == NULL) { 4102 cred = nfs4_get_clid_cred(clp); 4103 if (cred == NULL) 4104 return -ENOKEY; 4105 } 4106 4107 page = alloc_page(GFP_KERNEL); 4108 if (!page) 4109 goto out_put_cred; 4110 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 4111 if (!locations) 4112 goto out_free; 4113 locations->fattr = nfs_alloc_fattr(); 4114 if (!locations->fattr) 4115 goto out_free_2; 4116 4117 status = nfs4_proc_get_locations(server, fhandle, locations, page, 4118 cred); 4119 if (status) 4120 goto out_free_3; 4121 4122 for (i = 0; i < locations->nlocations; i++) { 4123 if (!_is_same_nfs4_pathname(&locations->fs_path, 4124 &locations->locations[i].rootpath)) 4125 continue; 4126 test_fs_location_for_trunking(&locations->locations[i], clp, 4127 server); 4128 } 4129 out_free_3: 4130 kfree(locations->fattr); 4131 out_free_2: 4132 kfree(locations); 4133 out_free: 4134 __free_page(page); 4135 out_put_cred: 4136 put_cred(cred); 4137 return status; 4138 } 4139 4140 static int nfs4_discover_trunking(struct nfs_server *server, 4141 struct nfs_fh *fhandle) 4142 { 4143 struct nfs4_exception exception = { 4144 .interruptible = true, 4145 }; 4146 struct nfs_client *clp = server->nfs_client; 4147 int err = 0; 4148 4149 if (!nfs4_has_session(clp)) 4150 goto out; 4151 do { 4152 err = nfs4_handle_exception(server, 4153 _nfs4_discover_trunking(server, fhandle), 4154 &exception); 4155 } while (exception.retry); 4156 out: 4157 return err; 4158 } 4159 4160 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 4161 struct nfs_fattr *fattr) 4162 { 4163 u32 bitmask[3] = { 4164 [0] = FATTR4_WORD0_TYPE | FATTR4_WORD0_CHANGE | 4165 FATTR4_WORD0_SIZE | FATTR4_WORD0_FSID, 4166 }; 4167 struct nfs4_lookup_root_arg args = { 4168 .bitmask = bitmask, 4169 }; 4170 struct nfs4_lookup_res res = { 4171 .server = server, 4172 .fattr = fattr, 4173 .fh = fhandle, 4174 }; 4175 struct rpc_message msg = { 4176 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 4177 .rpc_argp = &args, 4178 .rpc_resp = &res, 4179 }; 4180 4181 nfs_fattr_init(fattr); 4182 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4183 } 4184 4185 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 4186 struct nfs_fattr *fattr) 4187 { 4188 struct nfs4_exception exception = { 4189 .interruptible = true, 4190 }; 4191 int err; 4192 do { 4193 err = _nfs4_lookup_root(server, fhandle, fattr); 4194 trace_nfs4_lookup_root(server, fhandle, fattr, err); 4195 switch (err) { 4196 case 0: 4197 case -NFS4ERR_WRONGSEC: 4198 goto out; 4199 default: 4200 err = nfs4_handle_exception(server, err, &exception); 4201 } 4202 } while (exception.retry); 4203 out: 4204 return err; 4205 } 4206 4207 static int nfs4_lookup_root_sec(struct nfs_server *server, 4208 struct nfs_fh *fhandle, struct nfs_fattr *fattr, 4209 rpc_authflavor_t flavor) 4210 { 4211 struct rpc_auth_create_args auth_args = { 4212 .pseudoflavor = flavor, 4213 }; 4214 struct rpc_auth *auth; 4215 4216 auth = rpcauth_create(&auth_args, server->client); 4217 if (IS_ERR(auth)) 4218 return -EACCES; 4219 return nfs4_lookup_root(server, fhandle, fattr); 4220 } 4221 4222 /* 4223 * Retry pseudoroot lookup with various security flavors. We do this when: 4224 * 4225 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC 4226 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation 4227 * 4228 * Returns zero on success, or a negative NFS4ERR value, or a 4229 * negative errno value. 4230 */ 4231 int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 4232 struct nfs_fattr *fattr) 4233 { 4234 /* Per 3530bis 15.33.5 */ 4235 static const rpc_authflavor_t flav_array[] = { 4236 RPC_AUTH_GSS_KRB5P, 4237 RPC_AUTH_GSS_KRB5I, 4238 RPC_AUTH_GSS_KRB5, 4239 RPC_AUTH_UNIX, /* courtesy */ 4240 RPC_AUTH_NULL, 4241 }; 4242 int status = -EPERM; 4243 size_t i; 4244 4245 if (server->auth_info.flavor_len > 0) { 4246 /* try each flavor specified by user */ 4247 for (i = 0; i < server->auth_info.flavor_len; i++) { 4248 status = nfs4_lookup_root_sec( 4249 server, fhandle, fattr, 4250 server->auth_info.flavors[i]); 4251 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 4252 continue; 4253 break; 4254 } 4255 } else { 4256 /* no flavors specified by user, try default list */ 4257 for (i = 0; i < ARRAY_SIZE(flav_array); i++) { 4258 status = nfs4_lookup_root_sec(server, fhandle, fattr, 4259 flav_array[i]); 4260 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 4261 continue; 4262 break; 4263 } 4264 } 4265 4266 /* 4267 * -EACCES could mean that the user doesn't have correct permissions 4268 * to access the mount. It could also mean that we tried to mount 4269 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 4270 * existing mount programs don't handle -EACCES very well so it should 4271 * be mapped to -EPERM instead. 4272 */ 4273 if (status == -EACCES) 4274 status = -EPERM; 4275 return status; 4276 } 4277 4278 /** 4279 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot 4280 * @server: initialized nfs_server handle 4281 * @fhandle: we fill in the pseudo-fs root file handle 4282 * @fattr: we fill in a bare bones struct fattr 4283 * @auth_probe: probe the auth flavours 4284 * 4285 * Returns zero on success, or a negative errno. 4286 */ 4287 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 4288 struct nfs_fattr *fattr, bool auth_probe) 4289 { 4290 int status = 0; 4291 4292 if (!auth_probe) 4293 status = nfs4_lookup_root(server, fhandle, fattr); 4294 4295 if (auth_probe || status == NFS4ERR_WRONGSEC) 4296 status = server->nfs_client->cl_mvops->find_root_sec( 4297 server, fhandle, fattr); 4298 4299 return nfs4_map_errors(status); 4300 } 4301 4302 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 4303 struct nfs_fsinfo *info) 4304 { 4305 int error; 4306 struct nfs_fattr *fattr = info->fattr; 4307 4308 error = nfs4_server_capabilities(server, mntfh); 4309 if (error < 0) { 4310 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 4311 return error; 4312 } 4313 4314 error = nfs4_proc_getattr(server, mntfh, fattr, NULL); 4315 if (error < 0) { 4316 dprintk("nfs4_get_root: getattr error = %d\n", -error); 4317 goto out; 4318 } 4319 4320 if (fattr->valid & NFS_ATTR_FATTR_FSID && 4321 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 4322 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 4323 4324 out: 4325 return error; 4326 } 4327 4328 /* 4329 * Get locations and (maybe) other attributes of a referral. 4330 * Note that we'll actually follow the referral later when 4331 * we detect fsid mismatch in inode revalidation 4332 */ 4333 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 4334 const struct qstr *name, struct nfs_fattr *fattr, 4335 struct nfs_fh *fhandle) 4336 { 4337 int status = -ENOMEM; 4338 struct page *page = NULL; 4339 struct nfs4_fs_locations *locations = NULL; 4340 4341 page = alloc_page(GFP_KERNEL); 4342 if (page == NULL) 4343 goto out; 4344 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 4345 if (locations == NULL) 4346 goto out; 4347 4348 locations->fattr = fattr; 4349 4350 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 4351 if (status != 0) 4352 goto out; 4353 4354 /* 4355 * If the fsid didn't change, this is a migration event, not a 4356 * referral. Cause us to drop into the exception handler, which 4357 * will kick off migration recovery. 4358 */ 4359 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &fattr->fsid)) { 4360 dprintk("%s: server did not return a different fsid for" 4361 " a referral at %s\n", __func__, name->name); 4362 status = -NFS4ERR_MOVED; 4363 goto out; 4364 } 4365 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 4366 nfs_fixup_referral_attributes(fattr); 4367 memset(fhandle, 0, sizeof(struct nfs_fh)); 4368 out: 4369 if (page) 4370 __free_page(page); 4371 kfree(locations); 4372 return status; 4373 } 4374 4375 static bool should_request_dir_deleg(struct inode *inode) 4376 { 4377 if (!directory_delegations) 4378 return false; 4379 if (!inode) 4380 return false; 4381 if (!S_ISDIR(inode->i_mode)) 4382 return false; 4383 if (!nfs_server_capable(inode, NFS_CAP_DIR_DELEG)) 4384 return false; 4385 if (!test_and_clear_bit(NFS_INO_REQ_DIR_DELEG, &(NFS_I(inode)->flags))) 4386 return false; 4387 if (nfs4_have_delegation(inode, FMODE_READ, 0)) 4388 return false; 4389 return true; 4390 } 4391 4392 static void nfs4_call_getattr_prepare(struct rpc_task *task, void *calldata) 4393 { 4394 struct nfs4_call_sync_data *data = calldata; 4395 nfs4_setup_sequence(data->seq_server->nfs_client, data->seq_args, 4396 data->seq_res, task); 4397 } 4398 4399 static void nfs4_call_getattr_done(struct rpc_task *task, void *calldata) 4400 { 4401 struct nfs4_call_sync_data *data = calldata; 4402 4403 nfs4_sequence_process(task, data->seq_res); 4404 } 4405 4406 static const struct rpc_call_ops nfs4_call_getattr_ops = { 4407 .rpc_call_prepare = nfs4_call_getattr_prepare, 4408 .rpc_call_done = nfs4_call_getattr_done, 4409 }; 4410 4411 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 4412 struct nfs_fattr *fattr, struct inode *inode) 4413 { 4414 __u32 bitmask[NFS4_BITMASK_SZ]; 4415 struct nfs4_getattr_arg args = { 4416 .fh = fhandle, 4417 .bitmask = bitmask, 4418 }; 4419 struct nfs4_getattr_res res = { 4420 .fattr = fattr, 4421 .server = server, 4422 }; 4423 struct rpc_message msg = { 4424 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 4425 .rpc_argp = &args, 4426 .rpc_resp = &res, 4427 }; 4428 struct nfs4_call_sync_data data = { 4429 .seq_server = server, 4430 .seq_args = &args.seq_args, 4431 .seq_res = &res.seq_res, 4432 }; 4433 struct rpc_task_setup task_setup = { 4434 .rpc_client = server->client, 4435 .rpc_message = &msg, 4436 .callback_ops = &nfs4_call_getattr_ops, 4437 .callback_data = &data, 4438 }; 4439 struct nfs_client *clp = server->nfs_client; 4440 struct nfs4_gdd_res gdd_res; 4441 int status; 4442 4443 if (nfs4_has_session(clp)) 4444 task_setup.flags = RPC_TASK_MOVEABLE; 4445 4446 /* Is this is an attribute revalidation, subject to softreval? */ 4447 if (inode && (server->flags & NFS_MOUNT_SOFTREVAL)) 4448 task_setup.flags |= RPC_TASK_TIMEOUT; 4449 4450 args.get_dir_deleg = should_request_dir_deleg(inode); 4451 if (args.get_dir_deleg) 4452 res.gdd_res = &gdd_res; 4453 4454 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), inode, 0); 4455 nfs_fattr_init(fattr); 4456 nfs4_init_sequence(clp, &args.seq_args, &res.seq_res, 0, 0); 4457 4458 status = nfs4_call_sync_custom(&task_setup); 4459 4460 if (args.get_dir_deleg) { 4461 switch (status) { 4462 case 0: 4463 if (gdd_res.status != GDD4_OK) 4464 break; 4465 nfs_inode_set_delegation(inode, current_cred(), 4466 FMODE_READ, &gdd_res.deleg, 0, 4467 NFS4_OPEN_DELEGATE_READ); 4468 break; 4469 case -ENOTSUPP: 4470 case -EOPNOTSUPP: 4471 server->caps &= ~NFS_CAP_DIR_DELEG; 4472 } 4473 } 4474 4475 nfs4_sequence_free_slot(&res.seq_res); 4476 return status; 4477 } 4478 4479 int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 4480 struct nfs_fattr *fattr, struct inode *inode) 4481 { 4482 struct nfs4_exception exception = { 4483 .interruptible = true, 4484 }; 4485 int err; 4486 do { 4487 err = _nfs4_proc_getattr(server, fhandle, fattr, inode); 4488 trace_nfs4_getattr(server, fhandle, fattr, err); 4489 switch (err) { 4490 default: 4491 err = nfs4_handle_exception(server, err, &exception); 4492 break; 4493 case -ENOTSUPP: 4494 case -EOPNOTSUPP: 4495 exception.retry = true; 4496 } 4497 } while (exception.retry); 4498 return err; 4499 } 4500 4501 /* 4502 * The file is not closed if it is opened due to the a request to change 4503 * the size of the file. The open call will not be needed once the 4504 * VFS layer lookup-intents are implemented. 4505 * 4506 * Close is called when the inode is destroyed. 4507 * If we haven't opened the file for O_WRONLY, we 4508 * need to in the size_change case to obtain a stateid. 4509 * 4510 * Got race? 4511 * Because OPEN is always done by name in nfsv4, it is 4512 * possible that we opened a different file by the same 4513 * name. We can recognize this race condition, but we 4514 * can't do anything about it besides returning an error. 4515 * 4516 * This will be fixed with VFS changes (lookup-intent). 4517 */ 4518 static int 4519 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 4520 struct iattr *sattr) 4521 { 4522 struct inode *inode = d_inode(dentry); 4523 const struct cred *cred = NULL; 4524 struct nfs_open_context *ctx = NULL; 4525 int status; 4526 4527 if (pnfs_ld_layoutret_on_setattr(inode) && 4528 sattr->ia_valid & ATTR_SIZE && 4529 sattr->ia_size < i_size_read(inode)) 4530 pnfs_commit_and_return_layout(inode); 4531 4532 nfs_fattr_init(fattr); 4533 4534 /* Deal with open(O_TRUNC) */ 4535 if (sattr->ia_valid & ATTR_OPEN) 4536 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME); 4537 4538 /* Optimization: if the end result is no change, don't RPC */ 4539 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0) 4540 return 0; 4541 4542 /* Search for an existing open(O_WRITE) file */ 4543 if (sattr->ia_valid & ATTR_FILE) { 4544 4545 ctx = nfs_file_open_context(sattr->ia_file); 4546 if (ctx) 4547 cred = ctx->cred; 4548 } 4549 4550 /* Return any delegations if we're going to change ACLs */ 4551 if ((sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) 4552 nfs4_inode_make_writeable(inode); 4553 4554 status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL); 4555 if (status == 0) { 4556 nfs_setattr_update_inode(inode, sattr, fattr); 4557 nfs_setsecurity(inode, fattr); 4558 } 4559 return status; 4560 } 4561 4562 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 4563 struct dentry *dentry, const struct qstr *name, 4564 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4565 { 4566 struct nfs_server *server = NFS_SERVER(dir); 4567 int status; 4568 struct nfs4_lookup_arg args = { 4569 .bitmask = server->attr_bitmask, 4570 .dir_fh = NFS_FH(dir), 4571 .name = name, 4572 }; 4573 struct nfs4_lookup_res res = { 4574 .server = server, 4575 .fattr = fattr, 4576 .fh = fhandle, 4577 }; 4578 struct rpc_message msg = { 4579 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 4580 .rpc_argp = &args, 4581 .rpc_resp = &res, 4582 }; 4583 unsigned short task_flags = 0; 4584 4585 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE)) 4586 task_flags = RPC_TASK_MOVEABLE; 4587 4588 /* Is this is an attribute revalidation, subject to softreval? */ 4589 if (nfs_lookup_is_soft_revalidate(dentry)) 4590 task_flags |= RPC_TASK_TIMEOUT; 4591 4592 args.bitmask = nfs4_bitmask(server, fattr->label); 4593 4594 nfs_fattr_init(fattr); 4595 4596 dprintk("NFS call lookup %pd2\n", dentry); 4597 nfs4_init_sequence(server->nfs_client, &args.seq_args, &res.seq_res, 0, 0); 4598 status = nfs4_do_call_sync(clnt, server, &msg, 4599 &args.seq_args, &res.seq_res, task_flags); 4600 dprintk("NFS reply lookup: %d\n", status); 4601 return status; 4602 } 4603 4604 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 4605 { 4606 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 4607 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 4608 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 4609 fattr->nlink = 2; 4610 } 4611 4612 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 4613 struct dentry *dentry, const struct qstr *name, 4614 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4615 { 4616 struct nfs4_exception exception = { 4617 .interruptible = true, 4618 }; 4619 struct rpc_clnt *client = *clnt; 4620 int err; 4621 do { 4622 err = _nfs4_proc_lookup(client, dir, dentry, name, fhandle, fattr); 4623 trace_nfs4_lookup(dir, name, err); 4624 switch (err) { 4625 case -NFS4ERR_BADNAME: 4626 err = -ENOENT; 4627 goto out; 4628 case -NFS4ERR_MOVED: 4629 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 4630 if (err == -NFS4ERR_MOVED) 4631 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 4632 goto out; 4633 case -NFS4ERR_WRONGSEC: 4634 err = -EPERM; 4635 if (client != *clnt) 4636 goto out; 4637 client = nfs4_negotiate_security(client, dir, name); 4638 if (IS_ERR(client)) 4639 return PTR_ERR(client); 4640 4641 exception.retry = 1; 4642 break; 4643 default: 4644 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 4645 } 4646 } while (exception.retry); 4647 4648 out: 4649 if (err == 0) 4650 *clnt = client; 4651 else if (client != *clnt) 4652 rpc_shutdown_client(client); 4653 4654 return err; 4655 } 4656 4657 static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry, const struct qstr *name, 4658 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4659 { 4660 int status; 4661 struct rpc_clnt *client = NFS_CLIENT(dir); 4662 4663 status = nfs4_proc_lookup_common(&client, dir, dentry, name, fhandle, fattr); 4664 if (client != NFS_CLIENT(dir)) { 4665 rpc_shutdown_client(client); 4666 nfs_fixup_secinfo_attributes(fattr); 4667 } 4668 return status; 4669 } 4670 4671 struct rpc_clnt * 4672 nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry, 4673 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4674 { 4675 struct rpc_clnt *client = NFS_CLIENT(dir); 4676 int status; 4677 4678 status = nfs4_proc_lookup_common(&client, dir, dentry, &dentry->d_name, 4679 fhandle, fattr); 4680 if (status < 0) 4681 return ERR_PTR(status); 4682 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client; 4683 } 4684 4685 static int _nfs4_proc_lookupp(struct inode *inode, 4686 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4687 { 4688 struct rpc_clnt *clnt = NFS_CLIENT(inode); 4689 struct nfs_server *server = NFS_SERVER(inode); 4690 int status; 4691 struct nfs4_lookupp_arg args = { 4692 .bitmask = server->attr_bitmask, 4693 .fh = NFS_FH(inode), 4694 }; 4695 struct nfs4_lookupp_res res = { 4696 .server = server, 4697 .fattr = fattr, 4698 .fh = fhandle, 4699 }; 4700 struct rpc_message msg = { 4701 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP], 4702 .rpc_argp = &args, 4703 .rpc_resp = &res, 4704 }; 4705 unsigned short task_flags = 0; 4706 4707 if (server->flags & NFS_MOUNT_SOFTREVAL) 4708 task_flags |= RPC_TASK_TIMEOUT; 4709 if (server->caps & NFS_CAP_MOVEABLE) 4710 task_flags |= RPC_TASK_MOVEABLE; 4711 4712 args.bitmask = nfs4_bitmask(server, fattr->label); 4713 4714 nfs_fattr_init(fattr); 4715 nfs4_init_sequence(server->nfs_client, &args.seq_args, &res.seq_res, 0, 0); 4716 4717 dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino); 4718 status = nfs4_do_call_sync(clnt, server, &msg, &args.seq_args, 4719 &res.seq_res, task_flags); 4720 dprintk("NFS reply lookupp: %d\n", status); 4721 return status; 4722 } 4723 4724 static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle, 4725 struct nfs_fattr *fattr) 4726 { 4727 struct nfs4_exception exception = { 4728 .interruptible = true, 4729 }; 4730 int err; 4731 do { 4732 err = _nfs4_proc_lookupp(inode, fhandle, fattr); 4733 trace_nfs4_lookupp(inode, err); 4734 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4735 &exception); 4736 } while (exception.retry); 4737 return err; 4738 } 4739 4740 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry, 4741 const struct cred *cred) 4742 { 4743 struct nfs_server *server = NFS_SERVER(inode); 4744 struct nfs4_accessargs args = { 4745 .fh = NFS_FH(inode), 4746 .access = entry->mask, 4747 }; 4748 struct nfs4_accessres res = { 4749 .server = server, 4750 }; 4751 struct rpc_message msg = { 4752 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 4753 .rpc_argp = &args, 4754 .rpc_resp = &res, 4755 .rpc_cred = cred, 4756 }; 4757 int status = 0; 4758 4759 if (!nfs4_have_delegation(inode, FMODE_READ, 0)) { 4760 nfs_request_directory_delegation(inode); 4761 res.fattr = nfs_alloc_fattr(); 4762 if (res.fattr == NULL) 4763 return -ENOMEM; 4764 args.bitmask = server->cache_consistency_bitmask; 4765 } 4766 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4767 if (!status) { 4768 nfs_access_set_mask(entry, res.access); 4769 if (res.fattr) 4770 nfs_refresh_inode(inode, res.fattr); 4771 } 4772 nfs_free_fattr(res.fattr); 4773 return status; 4774 } 4775 4776 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry, 4777 const struct cred *cred) 4778 { 4779 struct nfs4_exception exception = { 4780 .interruptible = true, 4781 }; 4782 int err; 4783 do { 4784 err = _nfs4_proc_access(inode, entry, cred); 4785 trace_nfs4_access(inode, err); 4786 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4787 &exception); 4788 } while (exception.retry); 4789 return err; 4790 } 4791 4792 /* 4793 * TODO: For the time being, we don't try to get any attributes 4794 * along with any of the zero-copy operations READ, READDIR, 4795 * READLINK, WRITE. 4796 * 4797 * In the case of the first three, we want to put the GETATTR 4798 * after the read-type operation -- this is because it is hard 4799 * to predict the length of a GETATTR response in v4, and thus 4800 * align the READ data correctly. This means that the GETATTR 4801 * may end up partially falling into the page cache, and we should 4802 * shift it into the 'tail' of the xdr_buf before processing. 4803 * To do this efficiently, we need to know the total length 4804 * of data received, which doesn't seem to be available outside 4805 * of the RPC layer. 4806 * 4807 * In the case of WRITE, we also want to put the GETATTR after 4808 * the operation -- in this case because we want to make sure 4809 * we get the post-operation mtime and size. 4810 * 4811 * Both of these changes to the XDR layer would in fact be quite 4812 * minor, but I decided to leave them for a subsequent patch. 4813 */ 4814 static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 4815 unsigned int pgbase, unsigned int pglen) 4816 { 4817 struct nfs4_readlink args = { 4818 .fh = NFS_FH(inode), 4819 .pgbase = pgbase, 4820 .pglen = pglen, 4821 .pages = &page, 4822 }; 4823 struct nfs4_readlink_res res; 4824 struct rpc_message msg = { 4825 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 4826 .rpc_argp = &args, 4827 .rpc_resp = &res, 4828 }; 4829 4830 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 4831 } 4832 4833 static int nfs4_proc_readlink(struct inode *inode, struct page *page, 4834 unsigned int pgbase, unsigned int pglen) 4835 { 4836 struct nfs4_exception exception = { 4837 .interruptible = true, 4838 }; 4839 int err; 4840 do { 4841 err = _nfs4_proc_readlink(inode, page, pgbase, pglen); 4842 trace_nfs4_readlink(inode, err); 4843 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4844 &exception); 4845 } while (exception.retry); 4846 return err; 4847 } 4848 4849 /* 4850 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 4851 */ 4852 static int 4853 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 4854 int flags) 4855 { 4856 struct nfs_server *server = NFS_SERVER(dir); 4857 struct nfs4_label l, *ilabel; 4858 struct nfs_open_context *ctx; 4859 struct nfs4_state *state; 4860 int status = 0; 4861 4862 ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL); 4863 if (IS_ERR(ctx)) 4864 return PTR_ERR(ctx); 4865 4866 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); 4867 4868 nfs_request_directory_delegation(dir); 4869 4870 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 4871 sattr->ia_mode &= ~current_umask(); 4872 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL); 4873 if (IS_ERR(state)) { 4874 status = PTR_ERR(state); 4875 goto out; 4876 } 4877 out: 4878 nfs4_label_release_security(ilabel); 4879 put_nfs_open_context(ctx); 4880 return status; 4881 } 4882 4883 static int 4884 _nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype) 4885 { 4886 struct nfs_server *server = NFS_SERVER(dir); 4887 struct nfs_removeargs args = { 4888 .fh = NFS_FH(dir), 4889 .name = *name, 4890 }; 4891 struct nfs_removeres res = { 4892 .server = server, 4893 }; 4894 struct rpc_message msg = { 4895 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 4896 .rpc_argp = &args, 4897 .rpc_resp = &res, 4898 }; 4899 unsigned long timestamp = jiffies; 4900 int status; 4901 4902 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 4903 if (status == 0) { 4904 spin_lock(&dir->i_lock); 4905 /* Removing a directory decrements nlink in the parent */ 4906 if (ftype == NF4DIR && dir->i_nlink > 2) 4907 nfs4_dec_nlink_locked(dir); 4908 nfs4_update_changeattr_locked(dir, &res.cinfo, timestamp, 4909 NFS_INO_INVALID_DATA); 4910 spin_unlock(&dir->i_lock); 4911 } 4912 return status; 4913 } 4914 4915 static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry) 4916 { 4917 struct nfs4_exception exception = { 4918 .interruptible = true, 4919 }; 4920 struct inode *inode = d_inode(dentry); 4921 int err; 4922 4923 if (inode) { 4924 if (inode->i_nlink == 1) 4925 nfs4_inode_return_delegation(inode); 4926 else 4927 nfs4_inode_make_writeable(inode); 4928 } 4929 do { 4930 err = _nfs4_proc_remove(dir, &dentry->d_name, NF4REG); 4931 trace_nfs4_remove(dir, &dentry->d_name, err); 4932 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4933 &exception); 4934 } while (exception.retry); 4935 return err; 4936 } 4937 4938 static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name) 4939 { 4940 struct nfs4_exception exception = { 4941 .interruptible = true, 4942 }; 4943 int err; 4944 4945 do { 4946 err = _nfs4_proc_remove(dir, name, NF4DIR); 4947 trace_nfs4_remove(dir, name, err); 4948 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4949 &exception); 4950 } while (exception.retry); 4951 return err; 4952 } 4953 4954 static void nfs4_proc_unlink_setup(struct rpc_message *msg, 4955 struct dentry *dentry, 4956 struct inode *inode) 4957 { 4958 struct nfs_removeargs *args = msg->rpc_argp; 4959 struct nfs_removeres *res = msg->rpc_resp; 4960 struct nfs_server *server = NFS_SB(dentry->d_sb); 4961 4962 res->server = server; 4963 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 4964 nfs4_init_sequence(server->nfs_client, &args->seq_args, 4965 &res->seq_res, 1, 0); 4966 4967 nfs_fattr_init(res->dir_attr); 4968 nfs_request_directory_delegation(d_inode(dentry->d_parent)); 4969 4970 if (inode) { 4971 nfs4_inode_return_delegation(inode); 4972 nfs_d_prune_case_insensitive_aliases(inode); 4973 } 4974 } 4975 4976 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 4977 { 4978 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client, 4979 &data->args.seq_args, 4980 &data->res.seq_res, 4981 task); 4982 } 4983 4984 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 4985 { 4986 struct nfs_unlinkdata *data = task->tk_calldata; 4987 struct nfs_removeres *res = &data->res; 4988 4989 if (!nfs4_sequence_done(task, &res->seq_res)) 4990 return 0; 4991 if (nfs4_async_handle_error(task, res->server, NULL, 4992 &data->timeout) == -EAGAIN) 4993 return 0; 4994 if (task->tk_status == 0) 4995 nfs4_update_changeattr(dir, &res->cinfo, 4996 res->dir_attr->time_start, 4997 NFS_INO_INVALID_DATA); 4998 return 1; 4999 } 5000 5001 static void nfs4_proc_rename_setup(struct rpc_message *msg, 5002 struct dentry *old_dentry, 5003 struct dentry *new_dentry, 5004 struct inode *same_parent) 5005 { 5006 struct nfs_server *server = NFS_SB(old_dentry->d_sb); 5007 struct nfs_renameargs *arg = msg->rpc_argp; 5008 struct nfs_renameres *res = msg->rpc_resp; 5009 struct inode *old_inode = d_inode(old_dentry); 5010 struct inode *new_inode = d_inode(new_dentry); 5011 5012 if (old_inode) 5013 nfs4_inode_make_writeable(old_inode); 5014 if (new_inode) 5015 nfs4_inode_return_delegation(new_inode); 5016 if (same_parent) 5017 nfs_request_directory_delegation(same_parent); 5018 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 5019 res->server = server; 5020 nfs4_init_sequence(server->nfs_client, &arg->seq_args, 5021 &res->seq_res, 1, 0); 5022 } 5023 5024 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 5025 { 5026 nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client, 5027 &data->args.seq_args, 5028 &data->res.seq_res, 5029 task); 5030 } 5031 5032 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 5033 struct inode *new_dir) 5034 { 5035 struct nfs_renamedata *data = task->tk_calldata; 5036 struct nfs_renameres *res = &data->res; 5037 5038 if (!nfs4_sequence_done(task, &res->seq_res)) 5039 return 0; 5040 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) 5041 return 0; 5042 5043 if (task->tk_status == 0) { 5044 nfs_d_prune_case_insensitive_aliases(d_inode(data->old_dentry)); 5045 if (new_dir != old_dir) { 5046 /* Note: If we moved a directory, nlink will change */ 5047 nfs4_update_changeattr(old_dir, &res->old_cinfo, 5048 res->old_fattr->time_start, 5049 NFS_INO_INVALID_NLINK | 5050 NFS_INO_INVALID_DATA); 5051 nfs4_update_changeattr(new_dir, &res->new_cinfo, 5052 res->new_fattr->time_start, 5053 NFS_INO_INVALID_NLINK | 5054 NFS_INO_INVALID_DATA); 5055 } else 5056 nfs4_update_changeattr(old_dir, &res->old_cinfo, 5057 res->old_fattr->time_start, 5058 NFS_INO_INVALID_DATA); 5059 } 5060 return 1; 5061 } 5062 5063 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name) 5064 { 5065 struct nfs_server *server = NFS_SERVER(inode); 5066 __u32 bitmask[NFS4_BITMASK_SZ]; 5067 struct nfs4_link_arg arg = { 5068 .fh = NFS_FH(inode), 5069 .dir_fh = NFS_FH(dir), 5070 .name = name, 5071 .bitmask = bitmask, 5072 }; 5073 struct nfs4_link_res res = { 5074 .server = server, 5075 }; 5076 struct rpc_message msg = { 5077 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 5078 .rpc_argp = &arg, 5079 .rpc_resp = &res, 5080 }; 5081 int status = -ENOMEM; 5082 5083 res.fattr = nfs_alloc_fattr_with_label(server); 5084 if (res.fattr == NULL) 5085 goto out; 5086 5087 nfs4_inode_make_writeable(inode); 5088 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, res.fattr->label), 5089 inode, 5090 NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME); 5091 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 5092 if (!status) { 5093 nfs4_update_changeattr(dir, &res.cinfo, res.fattr->time_start, 5094 NFS_INO_INVALID_DATA); 5095 nfs4_inc_nlink(inode); 5096 status = nfs_post_op_update_inode(inode, res.fattr); 5097 if (!status) 5098 nfs_setsecurity(inode, res.fattr); 5099 } 5100 5101 out: 5102 nfs_free_fattr(res.fattr); 5103 return status; 5104 } 5105 5106 static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name) 5107 { 5108 struct nfs4_exception exception = { 5109 .interruptible = true, 5110 }; 5111 int err; 5112 do { 5113 err = nfs4_handle_exception(NFS_SERVER(inode), 5114 _nfs4_proc_link(inode, dir, name), 5115 &exception); 5116 } while (exception.retry); 5117 return err; 5118 } 5119 5120 struct nfs4_createdata { 5121 struct rpc_message msg; 5122 struct nfs4_create_arg arg; 5123 struct nfs4_create_res res; 5124 struct nfs_fh fh; 5125 struct nfs_fattr fattr; 5126 }; 5127 5128 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 5129 const struct qstr *name, struct iattr *sattr, u32 ftype) 5130 { 5131 struct nfs4_createdata *data; 5132 5133 data = kzalloc(sizeof(*data), GFP_KERNEL); 5134 if (data != NULL) { 5135 struct nfs_server *server = NFS_SERVER(dir); 5136 5137 data->fattr.label = nfs4_label_alloc(server, GFP_KERNEL); 5138 if (IS_ERR(data->fattr.label)) 5139 goto out_free; 5140 5141 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 5142 data->msg.rpc_argp = &data->arg; 5143 data->msg.rpc_resp = &data->res; 5144 data->arg.dir_fh = NFS_FH(dir); 5145 data->arg.server = server; 5146 data->arg.name = name; 5147 data->arg.attrs = sattr; 5148 data->arg.ftype = ftype; 5149 data->arg.bitmask = nfs4_bitmask(server, data->fattr.label); 5150 data->arg.umask = current_umask(); 5151 data->res.server = server; 5152 data->res.fh = &data->fh; 5153 data->res.fattr = &data->fattr; 5154 nfs_fattr_init(data->res.fattr); 5155 } 5156 return data; 5157 out_free: 5158 kfree(data); 5159 return NULL; 5160 } 5161 5162 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 5163 { 5164 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 5165 &data->arg.seq_args, &data->res.seq_res, 1); 5166 if (status == 0) { 5167 spin_lock(&dir->i_lock); 5168 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo, 5169 data->res.fattr->time_start, 5170 NFS_INO_INVALID_DATA); 5171 spin_unlock(&dir->i_lock); 5172 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); 5173 } 5174 return status; 5175 } 5176 5177 static struct dentry *nfs4_do_mkdir(struct inode *dir, struct dentry *dentry, 5178 struct nfs4_createdata *data, int *statusp) 5179 { 5180 struct dentry *ret; 5181 5182 *statusp = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 5183 &data->arg.seq_args, &data->res.seq_res, 1); 5184 5185 if (*statusp) 5186 return NULL; 5187 5188 spin_lock(&dir->i_lock); 5189 /* Creating a directory bumps nlink in the parent */ 5190 nfs4_inc_nlink_locked(dir); 5191 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo, 5192 data->res.fattr->time_start, 5193 NFS_INO_INVALID_DATA); 5194 spin_unlock(&dir->i_lock); 5195 ret = nfs_add_or_obtain(dentry, data->res.fh, data->res.fattr); 5196 if (!IS_ERR(ret)) 5197 return ret; 5198 *statusp = PTR_ERR(ret); 5199 return NULL; 5200 } 5201 5202 static void nfs4_free_createdata(struct nfs4_createdata *data) 5203 { 5204 nfs4_label_free(data->fattr.label); 5205 kfree(data); 5206 } 5207 5208 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 5209 struct folio *folio, unsigned int len, struct iattr *sattr, 5210 struct nfs4_label *label) 5211 { 5212 struct page *page = &folio->page; 5213 struct nfs4_createdata *data; 5214 int status = -ENAMETOOLONG; 5215 5216 if (len > NFS4_MAXPATHLEN) 5217 goto out; 5218 5219 status = -ENOMEM; 5220 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 5221 if (data == NULL) 5222 goto out; 5223 5224 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 5225 data->arg.u.symlink.pages = &page; 5226 data->arg.u.symlink.len = len; 5227 data->arg.label = label; 5228 5229 status = nfs4_do_create(dir, dentry, data); 5230 5231 nfs4_free_createdata(data); 5232 out: 5233 return status; 5234 } 5235 5236 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 5237 struct folio *folio, unsigned int len, struct iattr *sattr) 5238 { 5239 struct nfs4_exception exception = { 5240 .interruptible = true, 5241 }; 5242 struct nfs4_label l, *label; 5243 int err; 5244 5245 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5246 5247 do { 5248 err = _nfs4_proc_symlink(dir, dentry, folio, len, sattr, label); 5249 trace_nfs4_symlink(dir, &dentry->d_name, err); 5250 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5251 &exception); 5252 } while (exception.retry); 5253 5254 nfs4_label_release_security(label); 5255 return err; 5256 } 5257 5258 static struct dentry *_nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 5259 struct iattr *sattr, 5260 struct nfs4_label *label, int *statusp) 5261 { 5262 struct nfs4_createdata *data; 5263 struct dentry *ret = NULL; 5264 5265 *statusp = -ENOMEM; 5266 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 5267 if (data == NULL) 5268 goto out; 5269 5270 data->arg.label = label; 5271 ret = nfs4_do_mkdir(dir, dentry, data, statusp); 5272 5273 nfs4_free_createdata(data); 5274 out: 5275 return ret; 5276 } 5277 5278 static struct dentry *nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 5279 struct iattr *sattr) 5280 { 5281 struct nfs_server *server = NFS_SERVER(dir); 5282 struct nfs4_exception exception = { 5283 .interruptible = true, 5284 }; 5285 struct nfs4_label l, *label; 5286 struct dentry *alias; 5287 int err; 5288 5289 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5290 5291 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 5292 sattr->ia_mode &= ~current_umask(); 5293 do { 5294 alias = _nfs4_proc_mkdir(dir, dentry, sattr, label, &err); 5295 trace_nfs4_mkdir(dir, &dentry->d_name, err); 5296 if (err) 5297 alias = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir), 5298 err, 5299 &exception)); 5300 } while (exception.retry); 5301 nfs4_label_release_security(label); 5302 5303 return alias; 5304 } 5305 5306 static int _nfs4_proc_readdir(struct nfs_readdir_arg *nr_arg, 5307 struct nfs_readdir_res *nr_res) 5308 { 5309 struct inode *dir = d_inode(nr_arg->dentry); 5310 struct nfs_server *server = NFS_SERVER(dir); 5311 struct nfs4_readdir_arg args = { 5312 .fh = NFS_FH(dir), 5313 .pages = nr_arg->pages, 5314 .pgbase = 0, 5315 .count = nr_arg->page_len, 5316 .plus = nr_arg->plus, 5317 }; 5318 struct nfs4_readdir_res res; 5319 struct rpc_message msg = { 5320 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 5321 .rpc_argp = &args, 5322 .rpc_resp = &res, 5323 .rpc_cred = nr_arg->cred, 5324 }; 5325 int status; 5326 5327 dprintk("%s: dentry = %pd2, cookie = %llu\n", __func__, 5328 nr_arg->dentry, (unsigned long long)nr_arg->cookie); 5329 if (!(server->caps & NFS_CAP_SECURITY_LABEL)) 5330 args.bitmask = server->attr_bitmask_nl; 5331 else 5332 args.bitmask = server->attr_bitmask; 5333 5334 nfs4_setup_readdir(nr_arg->cookie, nr_arg->verf, nr_arg->dentry, &args); 5335 res.pgbase = args.pgbase; 5336 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 5337 &res.seq_res, 0); 5338 if (status >= 0) { 5339 memcpy(nr_res->verf, res.verifier.data, NFS4_VERIFIER_SIZE); 5340 status += args.pgbase; 5341 } 5342 5343 nfs_invalidate_atime(dir); 5344 5345 dprintk("%s: returns %d\n", __func__, status); 5346 return status; 5347 } 5348 5349 static int nfs4_proc_readdir(struct nfs_readdir_arg *arg, 5350 struct nfs_readdir_res *res) 5351 { 5352 struct nfs4_exception exception = { 5353 .interruptible = true, 5354 }; 5355 int err; 5356 do { 5357 err = _nfs4_proc_readdir(arg, res); 5358 trace_nfs4_readdir(d_inode(arg->dentry), err); 5359 err = nfs4_handle_exception(NFS_SERVER(d_inode(arg->dentry)), 5360 err, &exception); 5361 } while (exception.retry); 5362 return err; 5363 } 5364 5365 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 5366 struct iattr *sattr, struct nfs4_label *label, dev_t rdev) 5367 { 5368 struct nfs4_createdata *data; 5369 int mode = sattr->ia_mode; 5370 int status = -ENOMEM; 5371 5372 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 5373 if (data == NULL) 5374 goto out; 5375 5376 if (S_ISFIFO(mode)) 5377 data->arg.ftype = NF4FIFO; 5378 else if (S_ISBLK(mode)) { 5379 data->arg.ftype = NF4BLK; 5380 data->arg.u.device.specdata1 = MAJOR(rdev); 5381 data->arg.u.device.specdata2 = MINOR(rdev); 5382 } 5383 else if (S_ISCHR(mode)) { 5384 data->arg.ftype = NF4CHR; 5385 data->arg.u.device.specdata1 = MAJOR(rdev); 5386 data->arg.u.device.specdata2 = MINOR(rdev); 5387 } else if (!S_ISSOCK(mode)) { 5388 status = -EINVAL; 5389 goto out_free; 5390 } 5391 5392 data->arg.label = label; 5393 status = nfs4_do_create(dir, dentry, data); 5394 out_free: 5395 nfs4_free_createdata(data); 5396 out: 5397 return status; 5398 } 5399 5400 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 5401 struct iattr *sattr, dev_t rdev) 5402 { 5403 struct nfs_server *server = NFS_SERVER(dir); 5404 struct nfs4_exception exception = { 5405 .interruptible = true, 5406 }; 5407 struct nfs4_label l, *label; 5408 int err; 5409 5410 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5411 5412 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 5413 sattr->ia_mode &= ~current_umask(); 5414 do { 5415 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev); 5416 trace_nfs4_mknod(dir, &dentry->d_name, err); 5417 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5418 &exception); 5419 } while (exception.retry); 5420 5421 nfs4_label_release_security(label); 5422 5423 return err; 5424 } 5425 5426 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 5427 struct nfs_fsstat *fsstat) 5428 { 5429 struct nfs4_statfs_arg args = { 5430 .fh = fhandle, 5431 .bitmask = server->attr_bitmask, 5432 }; 5433 struct nfs4_statfs_res res = { 5434 .fsstat = fsstat, 5435 }; 5436 struct rpc_message msg = { 5437 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 5438 .rpc_argp = &args, 5439 .rpc_resp = &res, 5440 }; 5441 5442 nfs_fattr_init(fsstat->fattr); 5443 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5444 } 5445 5446 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 5447 { 5448 struct nfs4_exception exception = { 5449 .interruptible = true, 5450 }; 5451 int err; 5452 do { 5453 err = nfs4_handle_exception(server, 5454 _nfs4_proc_statfs(server, fhandle, fsstat), 5455 &exception); 5456 } while (exception.retry); 5457 return err; 5458 } 5459 5460 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 5461 struct nfs_fsinfo *fsinfo) 5462 { 5463 struct nfs4_fsinfo_arg args = { 5464 .fh = fhandle, 5465 .bitmask = server->attr_bitmask, 5466 }; 5467 struct nfs4_fsinfo_res res = { 5468 .fsinfo = fsinfo, 5469 }; 5470 struct rpc_message msg = { 5471 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 5472 .rpc_argp = &args, 5473 .rpc_resp = &res, 5474 }; 5475 5476 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5477 } 5478 5479 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 5480 { 5481 struct nfs4_exception exception = { 5482 .interruptible = true, 5483 }; 5484 int err; 5485 5486 do { 5487 err = _nfs4_do_fsinfo(server, fhandle, fsinfo); 5488 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); 5489 if (err == 0) { 5490 nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time); 5491 break; 5492 } 5493 err = nfs4_handle_exception(server, err, &exception); 5494 } while (exception.retry); 5495 return err; 5496 } 5497 5498 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 5499 { 5500 int error; 5501 5502 nfs_fattr_init(fsinfo->fattr); 5503 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 5504 if (error == 0) { 5505 /* block layout checks this! */ 5506 server->pnfs_blksize = fsinfo->blksize; 5507 set_pnfs_layoutdriver(server, fhandle, fsinfo); 5508 } 5509 5510 return error; 5511 } 5512 5513 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 5514 struct nfs_pathconf *pathconf) 5515 { 5516 struct nfs4_pathconf_arg args = { 5517 .fh = fhandle, 5518 .bitmask = server->attr_bitmask, 5519 }; 5520 struct nfs4_pathconf_res res = { 5521 .pathconf = pathconf, 5522 }; 5523 struct rpc_message msg = { 5524 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 5525 .rpc_argp = &args, 5526 .rpc_resp = &res, 5527 }; 5528 5529 /* None of the pathconf attributes are mandatory to implement */ 5530 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 5531 memset(pathconf, 0, sizeof(*pathconf)); 5532 return 0; 5533 } 5534 5535 nfs_fattr_init(pathconf->fattr); 5536 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5537 } 5538 5539 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 5540 struct nfs_pathconf *pathconf) 5541 { 5542 struct nfs4_exception exception = { 5543 .interruptible = true, 5544 }; 5545 int err; 5546 5547 do { 5548 err = nfs4_handle_exception(server, 5549 _nfs4_proc_pathconf(server, fhandle, pathconf), 5550 &exception); 5551 } while (exception.retry); 5552 return err; 5553 } 5554 5555 int nfs4_set_rw_stateid(nfs4_stateid *stateid, 5556 const struct nfs_open_context *ctx, 5557 const struct nfs_lock_context *l_ctx, 5558 fmode_t fmode) 5559 { 5560 return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL); 5561 } 5562 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid); 5563 5564 static bool nfs4_stateid_is_current(nfs4_stateid *stateid, 5565 const struct nfs_open_context *ctx, 5566 const struct nfs_lock_context *l_ctx, 5567 fmode_t fmode) 5568 { 5569 nfs4_stateid _current_stateid; 5570 5571 /* If the current stateid represents a lost lock, then exit */ 5572 if (nfs4_set_rw_stateid(&_current_stateid, ctx, l_ctx, fmode) == -EIO) 5573 return true; 5574 return nfs4_stateid_match(stateid, &_current_stateid); 5575 } 5576 5577 static bool nfs4_error_stateid_expired(int err) 5578 { 5579 switch (err) { 5580 case -NFS4ERR_DELEG_REVOKED: 5581 case -NFS4ERR_ADMIN_REVOKED: 5582 case -NFS4ERR_BAD_STATEID: 5583 case -NFS4ERR_STALE_STATEID: 5584 case -NFS4ERR_OLD_STATEID: 5585 case -NFS4ERR_OPENMODE: 5586 case -NFS4ERR_EXPIRED: 5587 return true; 5588 } 5589 return false; 5590 } 5591 5592 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) 5593 { 5594 struct nfs_server *server = NFS_SERVER(hdr->inode); 5595 5596 trace_nfs4_read(hdr, task->tk_status); 5597 if (task->tk_status < 0) { 5598 struct nfs4_exception exception = { 5599 .inode = hdr->inode, 5600 .state = hdr->args.context->state, 5601 .stateid = &hdr->args.stateid, 5602 .retrans = hdr->retrans, 5603 }; 5604 task->tk_status = nfs4_async_handle_exception(task, 5605 server, task->tk_status, &exception); 5606 hdr->retrans = exception.retrans; 5607 if (exception.retry) { 5608 rpc_restart_call_prepare(task); 5609 return -EAGAIN; 5610 } 5611 } 5612 5613 if (task->tk_status > 0) 5614 renew_lease(server, hdr->timestamp); 5615 return 0; 5616 } 5617 5618 static bool nfs4_read_stateid_changed(struct rpc_task *task, 5619 struct nfs_pgio_args *args) 5620 { 5621 5622 if (!nfs4_error_stateid_expired(task->tk_status) || 5623 nfs4_stateid_is_current(&args->stateid, 5624 args->context, 5625 args->lock_context, 5626 FMODE_READ)) 5627 return false; 5628 rpc_restart_call_prepare(task); 5629 return true; 5630 } 5631 5632 static bool nfs4_read_plus_not_supported(struct rpc_task *task, 5633 struct nfs_pgio_header *hdr) 5634 { 5635 struct nfs_server *server = NFS_SERVER(hdr->inode); 5636 struct rpc_message *msg = &task->tk_msg; 5637 5638 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] && 5639 task->tk_status == -ENOTSUPP) { 5640 server->caps &= ~NFS_CAP_READ_PLUS; 5641 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 5642 rpc_restart_call_prepare(task); 5643 return true; 5644 } 5645 return false; 5646 } 5647 5648 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 5649 { 5650 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 5651 return -EAGAIN; 5652 if (nfs4_read_stateid_changed(task, &hdr->args)) 5653 return -EAGAIN; 5654 if (nfs4_read_plus_not_supported(task, hdr)) 5655 return -EAGAIN; 5656 if (task->tk_status > 0) 5657 nfs_invalidate_atime(hdr->inode); 5658 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 5659 nfs4_read_done_cb(task, hdr); 5660 } 5661 5662 #if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS 5663 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr, 5664 struct rpc_message *msg) 5665 { 5666 /* Note: We don't use READ_PLUS with pNFS yet */ 5667 if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp) { 5668 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS]; 5669 return nfs_read_alloc_scratch(hdr, READ_PLUS_SCRATCH_SIZE); 5670 } 5671 return false; 5672 } 5673 #else 5674 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr, 5675 struct rpc_message *msg) 5676 { 5677 return false; 5678 } 5679 #endif /* CONFIG_NFS_V4_2 */ 5680 5681 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr, 5682 struct rpc_message *msg) 5683 { 5684 hdr->timestamp = jiffies; 5685 if (!hdr->pgio_done_cb) 5686 hdr->pgio_done_cb = nfs4_read_done_cb; 5687 if (!nfs42_read_plus_support(hdr, msg)) 5688 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 5689 nfs4_init_sequence(NFS_SERVER(hdr->inode)->nfs_client, 5690 &hdr->args.seq_args, &hdr->res.seq_res, 0, 0); 5691 } 5692 5693 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, 5694 struct nfs_pgio_header *hdr) 5695 { 5696 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client, 5697 &hdr->args.seq_args, 5698 &hdr->res.seq_res, 5699 task)) 5700 return 0; 5701 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, 5702 hdr->args.lock_context, 5703 hdr->rw_mode) == -EIO) 5704 return -EIO; 5705 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) 5706 return -EIO; 5707 return 0; 5708 } 5709 5710 static int nfs4_write_done_cb(struct rpc_task *task, 5711 struct nfs_pgio_header *hdr) 5712 { 5713 struct inode *inode = hdr->inode; 5714 5715 trace_nfs4_write(hdr, task->tk_status); 5716 if (task->tk_status < 0) { 5717 struct nfs4_exception exception = { 5718 .inode = hdr->inode, 5719 .state = hdr->args.context->state, 5720 .stateid = &hdr->args.stateid, 5721 .retrans = hdr->retrans, 5722 }; 5723 task->tk_status = nfs4_async_handle_exception(task, 5724 NFS_SERVER(inode), task->tk_status, 5725 &exception); 5726 hdr->retrans = exception.retrans; 5727 if (exception.retry) { 5728 rpc_restart_call_prepare(task); 5729 return -EAGAIN; 5730 } 5731 } 5732 if (task->tk_status >= 0) { 5733 renew_lease(NFS_SERVER(inode), hdr->timestamp); 5734 nfs_writeback_update_inode(hdr); 5735 } 5736 return 0; 5737 } 5738 5739 static bool nfs4_write_stateid_changed(struct rpc_task *task, 5740 struct nfs_pgio_args *args) 5741 { 5742 5743 if (!nfs4_error_stateid_expired(task->tk_status) || 5744 nfs4_stateid_is_current(&args->stateid, 5745 args->context, 5746 args->lock_context, 5747 FMODE_WRITE)) 5748 return false; 5749 rpc_restart_call_prepare(task); 5750 return true; 5751 } 5752 5753 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 5754 { 5755 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 5756 return -EAGAIN; 5757 if (nfs4_write_stateid_changed(task, &hdr->args)) 5758 return -EAGAIN; 5759 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 5760 nfs4_write_done_cb(task, hdr); 5761 } 5762 5763 static 5764 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr) 5765 { 5766 /* Don't request attributes for pNFS or O_DIRECT writes */ 5767 if (hdr->ds_clp != NULL || hdr->dreq != NULL) 5768 return false; 5769 /* Otherwise, request attributes if and only if we don't hold 5770 * a delegation 5771 */ 5772 return nfs4_have_delegation(hdr->inode, FMODE_READ, 0) == 0; 5773 } 5774 5775 void nfs4_bitmask_set(__u32 bitmask[], const __u32 src[], 5776 struct inode *inode, unsigned long cache_validity) 5777 { 5778 struct nfs_server *server = NFS_SERVER(inode); 5779 unsigned int i; 5780 5781 memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ); 5782 cache_validity |= READ_ONCE(NFS_I(inode)->cache_validity); 5783 5784 if (cache_validity & NFS_INO_INVALID_CHANGE) 5785 bitmask[0] |= FATTR4_WORD0_CHANGE; 5786 if (cache_validity & NFS_INO_INVALID_ATIME) 5787 bitmask[1] |= FATTR4_WORD1_TIME_ACCESS; 5788 if (cache_validity & NFS_INO_INVALID_MODE) 5789 bitmask[1] |= FATTR4_WORD1_MODE; 5790 if (cache_validity & NFS_INO_INVALID_OTHER) 5791 bitmask[1] |= FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP; 5792 if (cache_validity & NFS_INO_INVALID_NLINK) 5793 bitmask[1] |= FATTR4_WORD1_NUMLINKS; 5794 if (cache_validity & NFS_INO_INVALID_CTIME) 5795 bitmask[1] |= FATTR4_WORD1_TIME_METADATA; 5796 if (cache_validity & NFS_INO_INVALID_MTIME) 5797 bitmask[1] |= FATTR4_WORD1_TIME_MODIFY; 5798 if (cache_validity & NFS_INO_INVALID_BLOCKS) 5799 bitmask[1] |= FATTR4_WORD1_SPACE_USED; 5800 if (cache_validity & NFS_INO_INVALID_BTIME) 5801 bitmask[1] |= FATTR4_WORD1_TIME_CREATE; 5802 5803 if (cache_validity & NFS_INO_INVALID_SIZE) 5804 bitmask[0] |= FATTR4_WORD0_SIZE; 5805 5806 for (i = 0; i < NFS4_BITMASK_SZ; i++) 5807 bitmask[i] &= server->attr_bitmask[i]; 5808 } 5809 5810 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, 5811 struct rpc_message *msg, 5812 struct rpc_clnt **clnt) 5813 { 5814 struct nfs_server *server = NFS_SERVER(hdr->inode); 5815 5816 if (!nfs4_write_need_cache_consistency_data(hdr)) { 5817 hdr->args.bitmask = NULL; 5818 hdr->res.fattr = NULL; 5819 } else { 5820 nfs4_bitmask_set(hdr->args.bitmask_store, 5821 server->cache_consistency_bitmask, 5822 hdr->inode, NFS_INO_INVALID_BLOCKS); 5823 hdr->args.bitmask = hdr->args.bitmask_store; 5824 } 5825 5826 if (!hdr->pgio_done_cb) 5827 hdr->pgio_done_cb = nfs4_write_done_cb; 5828 hdr->res.server = server; 5829 hdr->timestamp = jiffies; 5830 5831 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 5832 nfs4_init_sequence(server->nfs_client, &hdr->args.seq_args, 5833 &hdr->res.seq_res, 0, 0); 5834 nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr); 5835 } 5836 5837 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 5838 { 5839 nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client, 5840 &data->args.seq_args, 5841 &data->res.seq_res, 5842 task); 5843 } 5844 5845 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 5846 { 5847 struct inode *inode = data->inode; 5848 5849 trace_nfs4_commit(data, task->tk_status); 5850 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 5851 NULL, NULL) == -EAGAIN) { 5852 rpc_restart_call_prepare(task); 5853 return -EAGAIN; 5854 } 5855 return 0; 5856 } 5857 5858 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 5859 { 5860 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5861 return -EAGAIN; 5862 return data->commit_done_cb(task, data); 5863 } 5864 5865 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg, 5866 struct rpc_clnt **clnt) 5867 { 5868 struct nfs_server *server = NFS_SERVER(data->inode); 5869 5870 if (data->commit_done_cb == NULL) 5871 data->commit_done_cb = nfs4_commit_done_cb; 5872 data->res.server = server; 5873 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 5874 nfs4_init_sequence(server->nfs_client, &data->args.seq_args, 5875 &data->res.seq_res, 1, 0); 5876 nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client, 5877 NFS_SP4_MACH_CRED_COMMIT, clnt, msg); 5878 } 5879 5880 static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args, 5881 struct nfs_commitres *res) 5882 { 5883 struct inode *dst_inode = file_inode(dst); 5884 struct nfs_server *server = NFS_SERVER(dst_inode); 5885 struct rpc_message msg = { 5886 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT], 5887 .rpc_argp = args, 5888 .rpc_resp = res, 5889 }; 5890 5891 args->fh = NFS_FH(dst_inode); 5892 return nfs4_call_sync(server->client, server, &msg, 5893 &args->seq_args, &res->seq_res, 1); 5894 } 5895 5896 int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res) 5897 { 5898 struct nfs_commitargs args = { 5899 .offset = offset, 5900 .count = count, 5901 }; 5902 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); 5903 struct nfs4_exception exception = { }; 5904 int status; 5905 5906 do { 5907 status = _nfs4_proc_commit(dst, &args, res); 5908 status = nfs4_handle_exception(dst_server, status, &exception); 5909 } while (exception.retry); 5910 5911 return status; 5912 } 5913 5914 static bool nfs4_server_supports_acls(const struct nfs_server *server, 5915 enum nfs4_acl_type type) 5916 { 5917 switch (type) { 5918 default: 5919 return server->attr_bitmask[0] & FATTR4_WORD0_ACL; 5920 case NFS4ACL_DACL: 5921 return server->attr_bitmask[1] & FATTR4_WORD1_DACL; 5922 case NFS4ACL_SACL: 5923 return server->attr_bitmask[1] & FATTR4_WORD1_SACL; 5924 } 5925 } 5926 5927 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that 5928 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on 5929 * the stack. 5930 */ 5931 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 5932 5933 int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen, 5934 struct page **pages) 5935 { 5936 struct page *newpage, **spages; 5937 int rc = 0; 5938 size_t len; 5939 spages = pages; 5940 5941 do { 5942 len = min_t(size_t, PAGE_SIZE, buflen); 5943 newpage = alloc_page(GFP_KERNEL); 5944 5945 if (newpage == NULL) 5946 goto unwind; 5947 memcpy(page_address(newpage), buf, len); 5948 buf += len; 5949 buflen -= len; 5950 *pages++ = newpage; 5951 rc++; 5952 } while (buflen != 0); 5953 5954 return rc; 5955 5956 unwind: 5957 for(; rc > 0; rc--) 5958 __free_page(spages[rc-1]); 5959 return -ENOMEM; 5960 } 5961 5962 struct nfs4_cached_acl { 5963 enum nfs4_acl_type type; 5964 int cached; 5965 size_t len; 5966 char data[]; 5967 }; 5968 5969 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 5970 { 5971 struct nfs_inode *nfsi = NFS_I(inode); 5972 5973 spin_lock(&inode->i_lock); 5974 kfree(nfsi->nfs4_acl); 5975 nfsi->nfs4_acl = acl; 5976 spin_unlock(&inode->i_lock); 5977 } 5978 5979 static void nfs4_zap_acl_attr(struct inode *inode) 5980 { 5981 nfs4_set_cached_acl(inode, NULL); 5982 } 5983 5984 static ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, 5985 size_t buflen, enum nfs4_acl_type type) 5986 { 5987 struct nfs_inode *nfsi = NFS_I(inode); 5988 struct nfs4_cached_acl *acl; 5989 int ret = -ENOENT; 5990 5991 spin_lock(&inode->i_lock); 5992 acl = nfsi->nfs4_acl; 5993 if (acl == NULL) 5994 goto out; 5995 if (acl->type != type) 5996 goto out; 5997 if (buf == NULL) /* user is just asking for length */ 5998 goto out_len; 5999 if (acl->cached == 0) 6000 goto out; 6001 ret = -ERANGE; /* see getxattr(2) man page */ 6002 if (acl->len > buflen) 6003 goto out; 6004 memcpy(buf, acl->data, acl->len); 6005 out_len: 6006 ret = acl->len; 6007 out: 6008 spin_unlock(&inode->i_lock); 6009 return ret; 6010 } 6011 6012 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, 6013 size_t pgbase, size_t acl_len, 6014 enum nfs4_acl_type type) 6015 { 6016 struct nfs4_cached_acl *acl; 6017 size_t buflen = sizeof(*acl) + acl_len; 6018 6019 if (buflen <= PAGE_SIZE) { 6020 acl = kmalloc(buflen, GFP_KERNEL); 6021 if (acl == NULL) 6022 goto out; 6023 acl->cached = 1; 6024 _copy_from_pages(acl->data, pages, pgbase, acl_len); 6025 } else { 6026 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 6027 if (acl == NULL) 6028 goto out; 6029 acl->cached = 0; 6030 } 6031 acl->type = type; 6032 acl->len = acl_len; 6033 out: 6034 nfs4_set_cached_acl(inode, acl); 6035 } 6036 6037 /* 6038 * The getxattr API returns the required buffer length when called with a 6039 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 6040 * the required buf. On a NULL buf, we send a page of data to the server 6041 * guessing that the ACL request can be serviced by a page. If so, we cache 6042 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 6043 * the cache. If not so, we throw away the page, and cache the required 6044 * length. The next getxattr call will then produce another round trip to 6045 * the server, this time with the input buf of the required size. 6046 */ 6047 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, 6048 size_t buflen, enum nfs4_acl_type type) 6049 { 6050 struct page **pages; 6051 struct nfs_getaclargs args = { 6052 .fh = NFS_FH(inode), 6053 .acl_type = type, 6054 .acl_len = buflen, 6055 }; 6056 struct nfs_getaclres res = { 6057 .acl_type = type, 6058 .acl_len = buflen, 6059 }; 6060 struct rpc_message msg = { 6061 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 6062 .rpc_argp = &args, 6063 .rpc_resp = &res, 6064 }; 6065 unsigned int npages; 6066 int ret = -ENOMEM, i; 6067 struct nfs_server *server = NFS_SERVER(inode); 6068 6069 if (buflen == 0) 6070 buflen = server->rsize; 6071 6072 npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1; 6073 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 6074 if (!pages) 6075 return -ENOMEM; 6076 6077 args.acl_pages = pages; 6078 6079 for (i = 0; i < npages; i++) { 6080 pages[i] = alloc_page(GFP_KERNEL); 6081 if (!pages[i]) 6082 goto out_free; 6083 } 6084 6085 /* for decoding across pages */ 6086 res.acl_scratch = folio_alloc(GFP_KERNEL, 0); 6087 if (!res.acl_scratch) 6088 goto out_free; 6089 6090 args.acl_len = npages * PAGE_SIZE; 6091 6092 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 6093 __func__, buf, buflen, npages, args.acl_len); 6094 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 6095 &msg, &args.seq_args, &res.seq_res, 0); 6096 if (ret) 6097 goto out_free; 6098 6099 /* Handle the case where the passed-in buffer is too short */ 6100 if (res.acl_flags & NFS4_ACL_TRUNC) { 6101 /* Did the user only issue a request for the acl length? */ 6102 if (buf == NULL) 6103 goto out_ok; 6104 ret = -ERANGE; 6105 goto out_free; 6106 } 6107 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len, 6108 type); 6109 if (buf) { 6110 if (res.acl_len > buflen) { 6111 ret = -ERANGE; 6112 goto out_free; 6113 } 6114 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); 6115 } 6116 out_ok: 6117 ret = res.acl_len; 6118 out_free: 6119 while (--i >= 0) 6120 __free_page(pages[i]); 6121 if (res.acl_scratch) 6122 folio_put(res.acl_scratch); 6123 kfree(pages); 6124 return ret; 6125 } 6126 6127 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, 6128 size_t buflen, enum nfs4_acl_type type) 6129 { 6130 struct nfs4_exception exception = { 6131 .interruptible = true, 6132 }; 6133 ssize_t ret; 6134 do { 6135 ret = __nfs4_get_acl_uncached(inode, buf, buflen, type); 6136 trace_nfs4_get_acl(inode, ret); 6137 if (ret >= 0) 6138 break; 6139 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 6140 } while (exception.retry); 6141 return ret; 6142 } 6143 6144 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen, 6145 enum nfs4_acl_type type) 6146 { 6147 struct nfs_server *server = NFS_SERVER(inode); 6148 int ret; 6149 6150 if (unlikely(NFS_FH(inode)->size == 0)) 6151 return -ENODATA; 6152 if (!nfs4_server_supports_acls(server, type)) 6153 return -EOPNOTSUPP; 6154 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 6155 if (ret < 0) 6156 return ret; 6157 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 6158 nfs_zap_acl_cache(inode); 6159 ret = nfs4_read_cached_acl(inode, buf, buflen, type); 6160 if (ret != -ENOENT) 6161 /* -ENOENT is returned if there is no ACL or if there is an ACL 6162 * but no cached acl data, just the acl length */ 6163 return ret; 6164 return nfs4_get_acl_uncached(inode, buf, buflen, type); 6165 } 6166 6167 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, 6168 size_t buflen, enum nfs4_acl_type type) 6169 { 6170 struct nfs_server *server = NFS_SERVER(inode); 6171 struct page *pages[NFS4ACL_MAXPAGES]; 6172 struct nfs_setaclargs arg = { 6173 .fh = NFS_FH(inode), 6174 .acl_type = type, 6175 .acl_len = buflen, 6176 .acl_pages = pages, 6177 }; 6178 struct nfs_setaclres res; 6179 struct rpc_message msg = { 6180 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 6181 .rpc_argp = &arg, 6182 .rpc_resp = &res, 6183 }; 6184 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 6185 int ret, i; 6186 6187 /* You can't remove system.nfs4_acl: */ 6188 if (buflen == 0) 6189 return -EINVAL; 6190 if (!nfs4_server_supports_acls(server, type)) 6191 return -EOPNOTSUPP; 6192 if (npages > ARRAY_SIZE(pages)) 6193 return -ERANGE; 6194 i = nfs4_buf_to_pages_noslab(buf, buflen, arg.acl_pages); 6195 if (i < 0) 6196 return i; 6197 nfs4_inode_make_writeable(inode); 6198 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6199 6200 /* 6201 * Free each page after tx, so the only ref left is 6202 * held by the network stack 6203 */ 6204 for (; i > 0; i--) 6205 put_page(pages[i-1]); 6206 6207 /* 6208 * Acl update can result in inode attribute update. 6209 * so mark the attribute cache invalid. 6210 */ 6211 spin_lock(&inode->i_lock); 6212 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 6213 NFS_INO_INVALID_CTIME | 6214 NFS_INO_REVAL_FORCED); 6215 spin_unlock(&inode->i_lock); 6216 nfs_access_zap_cache(inode); 6217 nfs_zap_acl_cache(inode); 6218 return ret; 6219 } 6220 6221 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, 6222 size_t buflen, enum nfs4_acl_type type) 6223 { 6224 struct nfs4_exception exception = { }; 6225 int err; 6226 6227 if (unlikely(NFS_FH(inode)->size == 0)) 6228 return -ENODATA; 6229 do { 6230 err = __nfs4_proc_set_acl(inode, buf, buflen, type); 6231 trace_nfs4_set_acl(inode, err); 6232 if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) { 6233 /* 6234 * no need to retry since the kernel 6235 * isn't involved in encoding the ACEs. 6236 */ 6237 err = -EINVAL; 6238 break; 6239 } 6240 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6241 &exception); 6242 } while (exception.retry); 6243 return err; 6244 } 6245 6246 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 6247 static int _nfs4_get_security_label(struct inode *inode, void *buf, 6248 size_t buflen) 6249 { 6250 struct nfs_server *server = NFS_SERVER(inode); 6251 struct nfs4_label label = {0, 0, 0, buflen, buf}; 6252 6253 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 6254 struct nfs_fattr fattr = { 6255 .label = &label, 6256 }; 6257 struct nfs4_getattr_arg arg = { 6258 .fh = NFS_FH(inode), 6259 .bitmask = bitmask, 6260 }; 6261 struct nfs4_getattr_res res = { 6262 .fattr = &fattr, 6263 .server = server, 6264 }; 6265 struct rpc_message msg = { 6266 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 6267 .rpc_argp = &arg, 6268 .rpc_resp = &res, 6269 }; 6270 int ret; 6271 6272 nfs_fattr_init(&fattr); 6273 6274 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0); 6275 if (ret) 6276 return ret; 6277 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) 6278 return -ENOENT; 6279 return label.len; 6280 } 6281 6282 static int nfs4_get_security_label(struct inode *inode, void *buf, 6283 size_t buflen) 6284 { 6285 struct nfs4_exception exception = { 6286 .interruptible = true, 6287 }; 6288 int err; 6289 6290 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 6291 return -EOPNOTSUPP; 6292 6293 do { 6294 err = _nfs4_get_security_label(inode, buf, buflen); 6295 trace_nfs4_get_security_label(inode, err); 6296 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6297 &exception); 6298 } while (exception.retry); 6299 return err; 6300 } 6301 6302 static int _nfs4_do_set_security_label(struct inode *inode, 6303 struct nfs4_label *ilabel, 6304 struct nfs_fattr *fattr) 6305 { 6306 6307 struct iattr sattr = {0}; 6308 struct nfs_server *server = NFS_SERVER(inode); 6309 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 6310 struct nfs_setattrargs arg = { 6311 .fh = NFS_FH(inode), 6312 .iap = &sattr, 6313 .server = server, 6314 .bitmask = bitmask, 6315 .label = ilabel, 6316 }; 6317 struct nfs_setattrres res = { 6318 .fattr = fattr, 6319 .server = server, 6320 }; 6321 struct rpc_message msg = { 6322 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 6323 .rpc_argp = &arg, 6324 .rpc_resp = &res, 6325 }; 6326 int status; 6327 6328 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 6329 6330 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6331 if (status) 6332 dprintk("%s failed: %d\n", __func__, status); 6333 6334 return status; 6335 } 6336 6337 static int nfs4_do_set_security_label(struct inode *inode, 6338 struct nfs4_label *ilabel, 6339 struct nfs_fattr *fattr) 6340 { 6341 struct nfs4_exception exception = { }; 6342 int err; 6343 6344 do { 6345 err = _nfs4_do_set_security_label(inode, ilabel, fattr); 6346 trace_nfs4_set_security_label(inode, err); 6347 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6348 &exception); 6349 } while (exception.retry); 6350 return err; 6351 } 6352 6353 static int 6354 nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen) 6355 { 6356 struct nfs4_label ilabel = {0, 0, 0, buflen, (char *)buf }; 6357 struct nfs_fattr *fattr; 6358 int status; 6359 6360 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 6361 return -EOPNOTSUPP; 6362 6363 fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode)); 6364 if (fattr == NULL) 6365 return -ENOMEM; 6366 6367 status = nfs4_do_set_security_label(inode, &ilabel, fattr); 6368 if (status == 0) 6369 nfs_setsecurity(inode, fattr); 6370 6371 nfs_free_fattr(fattr); 6372 return status; 6373 } 6374 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 6375 6376 6377 static void nfs4_init_boot_verifier(const struct nfs_client *clp, 6378 nfs4_verifier *bootverf) 6379 { 6380 __be32 verf[2]; 6381 6382 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 6383 /* An impossible timestamp guarantees this value 6384 * will never match a generated boot time. */ 6385 verf[0] = cpu_to_be32(U32_MAX); 6386 verf[1] = cpu_to_be32(U32_MAX); 6387 } else { 6388 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 6389 u64 ns = ktime_to_ns(nn->boot_time); 6390 6391 verf[0] = cpu_to_be32(ns >> 32); 6392 verf[1] = cpu_to_be32(ns); 6393 } 6394 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 6395 } 6396 6397 static size_t 6398 nfs4_get_uniquifier(struct nfs_client *clp, char *buf, size_t buflen) 6399 { 6400 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 6401 struct nfs_netns_client *nn_clp = nn->nfs_client; 6402 const char *id; 6403 6404 buf[0] = '\0'; 6405 6406 if (nn_clp) { 6407 rcu_read_lock(); 6408 id = rcu_dereference(nn_clp->identifier); 6409 if (id) 6410 strscpy(buf, id, buflen); 6411 rcu_read_unlock(); 6412 } 6413 6414 if (nfs4_client_id_uniquifier[0] != '\0' && buf[0] == '\0') 6415 strscpy(buf, nfs4_client_id_uniquifier, buflen); 6416 6417 return strlen(buf); 6418 } 6419 6420 static int 6421 nfs4_init_nonuniform_client_string(struct nfs_client *clp) 6422 { 6423 char buf[NFS4_CLIENT_ID_UNIQ_LEN]; 6424 size_t buflen; 6425 size_t len; 6426 char *str; 6427 6428 if (clp->cl_owner_id != NULL) 6429 return 0; 6430 6431 rcu_read_lock(); 6432 len = 14 + 6433 strlen(clp->cl_rpcclient->cl_nodename) + 6434 1 + 6435 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) + 6436 1; 6437 rcu_read_unlock(); 6438 6439 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf)); 6440 if (buflen) 6441 len += buflen + 1; 6442 6443 if (len > NFS4_OPAQUE_LIMIT + 1) 6444 return -EINVAL; 6445 6446 /* 6447 * Since this string is allocated at mount time, and held until the 6448 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 6449 * about a memory-reclaim deadlock. 6450 */ 6451 str = kmalloc(len, GFP_KERNEL); 6452 if (!str) 6453 return -ENOMEM; 6454 6455 rcu_read_lock(); 6456 if (buflen) 6457 scnprintf(str, len, "Linux NFSv4.0 %s/%s/%s", 6458 clp->cl_rpcclient->cl_nodename, buf, 6459 rpc_peeraddr2str(clp->cl_rpcclient, 6460 RPC_DISPLAY_ADDR)); 6461 else 6462 scnprintf(str, len, "Linux NFSv4.0 %s/%s", 6463 clp->cl_rpcclient->cl_nodename, 6464 rpc_peeraddr2str(clp->cl_rpcclient, 6465 RPC_DISPLAY_ADDR)); 6466 rcu_read_unlock(); 6467 6468 clp->cl_owner_id = str; 6469 return 0; 6470 } 6471 6472 static int 6473 nfs4_init_uniform_client_string(struct nfs_client *clp) 6474 { 6475 char buf[NFS4_CLIENT_ID_UNIQ_LEN]; 6476 size_t buflen; 6477 size_t len; 6478 char *str; 6479 6480 if (clp->cl_owner_id != NULL) 6481 return 0; 6482 6483 len = 10 + 10 + 1 + 10 + 1 + 6484 strlen(clp->cl_rpcclient->cl_nodename) + 1; 6485 6486 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf)); 6487 if (buflen) 6488 len += buflen + 1; 6489 6490 if (len > NFS4_OPAQUE_LIMIT + 1) 6491 return -EINVAL; 6492 6493 /* 6494 * Since this string is allocated at mount time, and held until the 6495 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 6496 * about a memory-reclaim deadlock. 6497 */ 6498 str = kmalloc(len, GFP_KERNEL); 6499 if (!str) 6500 return -ENOMEM; 6501 6502 if (buflen) 6503 scnprintf(str, len, "Linux NFSv%u.%u %s/%s", 6504 clp->rpc_ops->version, clp->cl_minorversion, 6505 buf, clp->cl_rpcclient->cl_nodename); 6506 else 6507 scnprintf(str, len, "Linux NFSv%u.%u %s", 6508 clp->rpc_ops->version, clp->cl_minorversion, 6509 clp->cl_rpcclient->cl_nodename); 6510 clp->cl_owner_id = str; 6511 return 0; 6512 } 6513 6514 /* 6515 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback 6516 * services. Advertise one based on the address family of the 6517 * clientaddr. 6518 */ 6519 static unsigned int 6520 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len) 6521 { 6522 if (strchr(clp->cl_ipaddr, ':') != NULL) 6523 return scnprintf(buf, len, "tcp6"); 6524 else 6525 return scnprintf(buf, len, "tcp"); 6526 } 6527 6528 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata) 6529 { 6530 struct nfs4_setclientid *sc = calldata; 6531 6532 if (task->tk_status == 0) 6533 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred); 6534 } 6535 6536 static const struct rpc_call_ops nfs4_setclientid_ops = { 6537 .rpc_call_done = nfs4_setclientid_done, 6538 }; 6539 6540 /** 6541 * nfs4_proc_setclientid - Negotiate client ID 6542 * @clp: state data structure 6543 * @program: RPC program for NFSv4 callback service 6544 * @port: IP port number for NFS4 callback service 6545 * @cred: credential to use for this call 6546 * @res: where to place the result 6547 * 6548 * Returns zero, a negative errno, or a negative NFS4ERR status code. 6549 */ 6550 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 6551 unsigned short port, const struct cred *cred, 6552 struct nfs4_setclientid_res *res) 6553 { 6554 nfs4_verifier sc_verifier; 6555 struct nfs4_setclientid setclientid = { 6556 .sc_verifier = &sc_verifier, 6557 .sc_prog = program, 6558 .sc_clnt = clp, 6559 }; 6560 struct rpc_message msg = { 6561 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 6562 .rpc_argp = &setclientid, 6563 .rpc_resp = res, 6564 .rpc_cred = cred, 6565 }; 6566 struct rpc_task_setup task_setup_data = { 6567 .rpc_client = clp->cl_rpcclient, 6568 .rpc_message = &msg, 6569 .callback_ops = &nfs4_setclientid_ops, 6570 .callback_data = &setclientid, 6571 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN, 6572 }; 6573 unsigned long now = jiffies; 6574 int status; 6575 6576 /* nfs_client_id4 */ 6577 nfs4_init_boot_verifier(clp, &sc_verifier); 6578 6579 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags)) 6580 status = nfs4_init_uniform_client_string(clp); 6581 else 6582 status = nfs4_init_nonuniform_client_string(clp); 6583 6584 if (status) 6585 goto out; 6586 6587 /* cb_client4 */ 6588 setclientid.sc_netid_len = 6589 nfs4_init_callback_netid(clp, 6590 setclientid.sc_netid, 6591 sizeof(setclientid.sc_netid)); 6592 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 6593 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 6594 clp->cl_ipaddr, port >> 8, port & 255); 6595 6596 dprintk("NFS call setclientid auth=%s, '%s'\n", 6597 clp->cl_rpcclient->cl_auth->au_ops->au_name, 6598 clp->cl_owner_id); 6599 6600 status = nfs4_call_sync_custom(&task_setup_data); 6601 if (setclientid.sc_cred) { 6602 kfree(clp->cl_acceptor); 6603 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred); 6604 put_rpccred(setclientid.sc_cred); 6605 } 6606 6607 if (status == 0) 6608 do_renew_lease(clp, now); 6609 out: 6610 trace_nfs4_setclientid(clp, status); 6611 dprintk("NFS reply setclientid: %d\n", status); 6612 return status; 6613 } 6614 6615 /** 6616 * nfs4_proc_setclientid_confirm - Confirm client ID 6617 * @clp: state data structure 6618 * @arg: result of a previous SETCLIENTID 6619 * @cred: credential to use for this call 6620 * 6621 * Returns zero, a negative errno, or a negative NFS4ERR status code. 6622 */ 6623 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 6624 struct nfs4_setclientid_res *arg, 6625 const struct cred *cred) 6626 { 6627 struct rpc_message msg = { 6628 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 6629 .rpc_argp = arg, 6630 .rpc_cred = cred, 6631 }; 6632 int status; 6633 6634 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 6635 clp->cl_rpcclient->cl_auth->au_ops->au_name, 6636 clp->cl_clientid); 6637 status = rpc_call_sync(clp->cl_rpcclient, &msg, 6638 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 6639 trace_nfs4_setclientid_confirm(clp, status); 6640 dprintk("NFS reply setclientid_confirm: %d\n", status); 6641 return status; 6642 } 6643 6644 struct nfs4_delegreturndata { 6645 struct nfs4_delegreturnargs args; 6646 struct nfs4_delegreturnres res; 6647 struct nfs_fh fh; 6648 nfs4_stateid stateid; 6649 unsigned long timestamp; 6650 unsigned short retrans; 6651 struct { 6652 struct nfs4_layoutreturn_args arg; 6653 struct nfs4_layoutreturn_res res; 6654 struct nfs4_xdr_opaque_data ld_private; 6655 u32 roc_barrier; 6656 bool roc; 6657 } lr; 6658 struct nfs4_delegattr sattr; 6659 struct nfs_fattr fattr; 6660 int rpc_status; 6661 struct inode *inode; 6662 }; 6663 6664 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 6665 { 6666 struct nfs4_delegreturndata *data = calldata; 6667 struct nfs4_exception exception = { 6668 .inode = data->inode, 6669 .stateid = &data->stateid, 6670 .task_is_privileged = data->args.seq_args.sa_privileged, 6671 .retrans = data->retrans, 6672 }; 6673 6674 if (!nfs4_sequence_done(task, &data->res.seq_res)) 6675 return; 6676 6677 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); 6678 6679 /* Handle Layoutreturn errors */ 6680 if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res, 6681 &data->res.lr_ret) == -EAGAIN) 6682 goto out_restart; 6683 6684 if (data->args.sattr_args && task->tk_status != 0) { 6685 switch(data->res.sattr_ret) { 6686 case 0: 6687 data->args.sattr_args = NULL; 6688 data->res.sattr_res = false; 6689 break; 6690 case -NFS4ERR_ADMIN_REVOKED: 6691 case -NFS4ERR_DELEG_REVOKED: 6692 case -NFS4ERR_EXPIRED: 6693 case -NFS4ERR_BAD_STATEID: 6694 /* Let the main handler below do stateid recovery */ 6695 break; 6696 case -NFS4ERR_OLD_STATEID: 6697 if (nfs4_refresh_delegation_stateid(&data->stateid, 6698 data->inode)) 6699 goto out_restart; 6700 fallthrough; 6701 default: 6702 data->args.sattr_args = NULL; 6703 data->res.sattr_res = false; 6704 goto out_restart; 6705 } 6706 } 6707 6708 switch (task->tk_status) { 6709 case 0: 6710 renew_lease(data->res.server, data->timestamp); 6711 break; 6712 case -NFS4ERR_ADMIN_REVOKED: 6713 case -NFS4ERR_DELEG_REVOKED: 6714 case -NFS4ERR_EXPIRED: 6715 nfs4_free_revoked_stateid(data->res.server, 6716 data->args.stateid, 6717 task->tk_msg.rpc_cred); 6718 fallthrough; 6719 case -NFS4ERR_BAD_STATEID: 6720 case -NFS4ERR_STALE_STATEID: 6721 case -ETIMEDOUT: 6722 task->tk_status = 0; 6723 break; 6724 case -NFS4ERR_OLD_STATEID: 6725 if (!nfs4_refresh_delegation_stateid(&data->stateid, data->inode)) 6726 nfs4_stateid_seqid_inc(&data->stateid); 6727 if (data->args.bitmask) { 6728 data->args.bitmask = NULL; 6729 data->res.fattr = NULL; 6730 } 6731 goto out_restart; 6732 case -NFS4ERR_ACCESS: 6733 if (data->args.bitmask) { 6734 data->args.bitmask = NULL; 6735 data->res.fattr = NULL; 6736 goto out_restart; 6737 } 6738 fallthrough; 6739 default: 6740 task->tk_status = nfs4_async_handle_exception(task, 6741 data->res.server, task->tk_status, 6742 &exception); 6743 data->retrans = exception.retrans; 6744 if (exception.retry) 6745 goto out_restart; 6746 } 6747 nfs_delegation_mark_returned(data->inode, data->args.stateid); 6748 data->rpc_status = task->tk_status; 6749 return; 6750 out_restart: 6751 task->tk_status = 0; 6752 rpc_restart_call_prepare(task); 6753 } 6754 6755 static void nfs4_delegreturn_release(void *calldata) 6756 { 6757 struct nfs4_delegreturndata *data = calldata; 6758 struct inode *inode = data->inode; 6759 6760 if (data->lr.roc) 6761 pnfs_roc_release(&data->lr.arg, &data->lr.res, 6762 data->res.lr_ret); 6763 if (inode) { 6764 nfs4_fattr_set_prechange(&data->fattr, 6765 inode_peek_iversion_raw(inode)); 6766 nfs_refresh_inode(inode, &data->fattr); 6767 nfs_iput_and_deactive(inode); 6768 } 6769 kfree(calldata); 6770 } 6771 6772 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 6773 { 6774 struct nfs4_delegreturndata *d_data; 6775 struct pnfs_layout_hdr *lo; 6776 6777 d_data = data; 6778 6779 if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) { 6780 nfs4_sequence_done(task, &d_data->res.seq_res); 6781 return; 6782 } 6783 6784 lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL; 6785 if (lo && !pnfs_layout_is_valid(lo)) { 6786 d_data->args.lr_args = NULL; 6787 d_data->res.lr_res = NULL; 6788 } 6789 6790 nfs4_setup_sequence(d_data->res.server->nfs_client, 6791 &d_data->args.seq_args, 6792 &d_data->res.seq_res, 6793 task); 6794 } 6795 6796 static const struct rpc_call_ops nfs4_delegreturn_ops = { 6797 .rpc_call_prepare = nfs4_delegreturn_prepare, 6798 .rpc_call_done = nfs4_delegreturn_done, 6799 .rpc_release = nfs4_delegreturn_release, 6800 }; 6801 6802 static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, 6803 const nfs4_stateid *stateid, 6804 struct nfs_delegation *delegation, 6805 int issync) 6806 { 6807 struct nfs4_delegreturndata *data; 6808 struct nfs_server *server = NFS_SERVER(inode); 6809 struct rpc_task *task; 6810 struct rpc_message msg = { 6811 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 6812 .rpc_cred = cred, 6813 }; 6814 struct rpc_task_setup task_setup_data = { 6815 .rpc_client = server->client, 6816 .rpc_message = &msg, 6817 .callback_ops = &nfs4_delegreturn_ops, 6818 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT, 6819 }; 6820 int status = 0; 6821 6822 if (nfs_server_capable(inode, NFS_CAP_MOVEABLE)) 6823 task_setup_data.flags |= RPC_TASK_MOVEABLE; 6824 6825 data = kzalloc(sizeof(*data), GFP_KERNEL); 6826 if (data == NULL) 6827 return -ENOMEM; 6828 6829 nfs4_state_protect(server->nfs_client, 6830 NFS_SP4_MACH_CRED_CLEANUP, 6831 &task_setup_data.rpc_client, &msg); 6832 6833 data->args.fhandle = &data->fh; 6834 data->args.stateid = &data->stateid; 6835 nfs4_bitmask_set(data->args.bitmask_store, 6836 server->cache_consistency_bitmask, inode, 0); 6837 data->args.bitmask = data->args.bitmask_store; 6838 nfs_copy_fh(&data->fh, NFS_FH(inode)); 6839 nfs4_stateid_copy(&data->stateid, stateid); 6840 data->res.fattr = &data->fattr; 6841 data->res.server = server; 6842 data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT; 6843 data->lr.arg.ld_private = &data->lr.ld_private; 6844 nfs_fattr_init(data->res.fattr); 6845 data->timestamp = jiffies; 6846 data->rpc_status = 0; 6847 data->inode = nfs_igrab_and_active(inode); 6848 if (data->inode || issync) { 6849 data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res, 6850 cred, issync); 6851 if (data->lr.roc) { 6852 data->args.lr_args = &data->lr.arg; 6853 data->res.lr_res = &data->lr.res; 6854 } 6855 } 6856 6857 if (delegation && 6858 test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) { 6859 if (delegation->type & FMODE_READ) { 6860 data->sattr.atime = inode_get_atime(inode); 6861 data->sattr.atime_set = true; 6862 } 6863 if (delegation->type & FMODE_WRITE) { 6864 data->sattr.mtime = inode_get_mtime(inode); 6865 data->sattr.mtime_set = true; 6866 } 6867 data->args.sattr_args = &data->sattr; 6868 data->res.sattr_res = true; 6869 } 6870 6871 nfs4_init_sequence(server->nfs_client, &data->args.seq_args, 6872 &data->res.seq_res, 1, !data->inode ? 1 : 0); 6873 6874 task_setup_data.callback_data = data; 6875 msg.rpc_argp = &data->args; 6876 msg.rpc_resp = &data->res; 6877 task = rpc_run_task(&task_setup_data); 6878 if (IS_ERR(task)) 6879 return PTR_ERR(task); 6880 if (!issync) 6881 goto out; 6882 status = rpc_wait_for_completion_task(task); 6883 if (status != 0) 6884 goto out; 6885 status = data->rpc_status; 6886 out: 6887 rpc_put_task(task); 6888 return status; 6889 } 6890 6891 int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, 6892 const nfs4_stateid *stateid, 6893 struct nfs_delegation *delegation, int issync) 6894 { 6895 struct nfs_server *server = NFS_SERVER(inode); 6896 struct nfs4_exception exception = { }; 6897 int err; 6898 do { 6899 err = _nfs4_proc_delegreturn(inode, cred, stateid, 6900 delegation, issync); 6901 trace_nfs4_delegreturn(inode, stateid, err); 6902 switch (err) { 6903 case -NFS4ERR_STALE_STATEID: 6904 case -NFS4ERR_EXPIRED: 6905 case 0: 6906 return 0; 6907 } 6908 err = nfs4_handle_exception(server, err, &exception); 6909 } while (exception.retry); 6910 return err; 6911 } 6912 6913 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6914 { 6915 struct inode *inode = state->inode; 6916 struct nfs_server *server = NFS_SERVER(inode); 6917 struct nfs_client *clp = server->nfs_client; 6918 struct nfs_lockt_args arg = { 6919 .fh = NFS_FH(inode), 6920 .fl = request, 6921 }; 6922 struct nfs_lockt_res res = { 6923 .denied = request, 6924 }; 6925 struct rpc_message msg = { 6926 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 6927 .rpc_argp = &arg, 6928 .rpc_resp = &res, 6929 .rpc_cred = state->owner->so_cred, 6930 }; 6931 struct nfs4_lock_state *lsp; 6932 int status; 6933 6934 arg.lock_owner.clientid = clp->cl_clientid; 6935 status = nfs4_set_lock_state(state, request); 6936 if (status != 0) 6937 goto out; 6938 lsp = request->fl_u.nfs4_fl.owner; 6939 arg.lock_owner.id = lsp->ls_seqid.owner_id; 6940 arg.lock_owner.s_dev = server->s_dev; 6941 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6942 switch (status) { 6943 case 0: 6944 request->c.flc_type = F_UNLCK; 6945 break; 6946 case -NFS4ERR_DENIED: 6947 status = 0; 6948 } 6949 request->fl_ops->fl_release_private(request); 6950 request->fl_ops = NULL; 6951 out: 6952 return status; 6953 } 6954 6955 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6956 { 6957 struct nfs4_exception exception = { 6958 .interruptible = true, 6959 }; 6960 int err; 6961 6962 do { 6963 err = _nfs4_proc_getlk(state, cmd, request); 6964 trace_nfs4_get_lock(request, state, cmd, err); 6965 err = nfs4_handle_exception(NFS_SERVER(state->inode), err, 6966 &exception); 6967 } while (exception.retry); 6968 return err; 6969 } 6970 6971 /* 6972 * Update the seqid of a lock stateid after receiving 6973 * NFS4ERR_OLD_STATEID 6974 */ 6975 static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst, 6976 struct nfs4_lock_state *lsp) 6977 { 6978 struct nfs4_state *state = lsp->ls_state; 6979 bool ret = false; 6980 6981 spin_lock(&state->state_lock); 6982 if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid)) 6983 goto out; 6984 if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst)) 6985 nfs4_stateid_seqid_inc(dst); 6986 else 6987 dst->seqid = lsp->ls_stateid.seqid; 6988 ret = true; 6989 out: 6990 spin_unlock(&state->state_lock); 6991 return ret; 6992 } 6993 6994 static bool nfs4_sync_lock_stateid(nfs4_stateid *dst, 6995 struct nfs4_lock_state *lsp) 6996 { 6997 struct nfs4_state *state = lsp->ls_state; 6998 bool ret; 6999 7000 spin_lock(&state->state_lock); 7001 ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid); 7002 nfs4_stateid_copy(dst, &lsp->ls_stateid); 7003 spin_unlock(&state->state_lock); 7004 return ret; 7005 } 7006 7007 struct nfs4_unlockdata { 7008 struct nfs_locku_args arg; 7009 struct nfs_locku_res res; 7010 struct nfs4_lock_state *lsp; 7011 struct nfs_open_context *ctx; 7012 struct nfs_lock_context *l_ctx; 7013 struct file_lock fl; 7014 struct nfs_server *server; 7015 unsigned long timestamp; 7016 unsigned short retrans; 7017 }; 7018 7019 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 7020 struct nfs_open_context *ctx, 7021 struct nfs4_lock_state *lsp, 7022 struct nfs_seqid *seqid) 7023 { 7024 struct nfs4_unlockdata *p; 7025 struct nfs4_state *state = lsp->ls_state; 7026 struct inode *inode = state->inode; 7027 struct nfs_lock_context *l_ctx; 7028 7029 p = kzalloc(sizeof(*p), GFP_KERNEL); 7030 if (p == NULL) 7031 return NULL; 7032 l_ctx = nfs_get_lock_context(ctx); 7033 if (!IS_ERR(l_ctx)) { 7034 p->l_ctx = l_ctx; 7035 } else { 7036 kfree(p); 7037 return NULL; 7038 } 7039 p->arg.fh = NFS_FH(inode); 7040 p->arg.fl = &p->fl; 7041 p->arg.seqid = seqid; 7042 p->res.seqid = seqid; 7043 p->lsp = lsp; 7044 /* Ensure we don't close file until we're done freeing locks! */ 7045 p->ctx = get_nfs_open_context(ctx); 7046 locks_init_lock(&p->fl); 7047 locks_copy_lock(&p->fl, fl); 7048 p->server = NFS_SERVER(inode); 7049 spin_lock(&state->state_lock); 7050 nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid); 7051 spin_unlock(&state->state_lock); 7052 return p; 7053 } 7054 7055 static void nfs4_locku_release_calldata(void *data) 7056 { 7057 struct nfs4_unlockdata *calldata = data; 7058 nfs_free_seqid(calldata->arg.seqid); 7059 nfs4_put_lock_state(calldata->lsp); 7060 nfs_put_lock_context(calldata->l_ctx); 7061 put_nfs_open_context(calldata->ctx); 7062 kfree(calldata); 7063 } 7064 7065 static void nfs4_locku_done(struct rpc_task *task, void *data) 7066 { 7067 struct nfs4_unlockdata *calldata = data; 7068 struct nfs4_exception exception = { 7069 .inode = calldata->lsp->ls_state->inode, 7070 .stateid = &calldata->arg.stateid, 7071 .retrans = calldata->retrans, 7072 }; 7073 7074 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 7075 return; 7076 switch (task->tk_status) { 7077 case 0: 7078 renew_lease(calldata->server, calldata->timestamp); 7079 locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl); 7080 if (nfs4_update_lock_stateid(calldata->lsp, 7081 &calldata->res.stateid)) 7082 break; 7083 fallthrough; 7084 case -NFS4ERR_ADMIN_REVOKED: 7085 case -NFS4ERR_EXPIRED: 7086 nfs4_free_revoked_stateid(calldata->server, 7087 &calldata->arg.stateid, 7088 task->tk_msg.rpc_cred); 7089 fallthrough; 7090 case -NFS4ERR_BAD_STATEID: 7091 case -NFS4ERR_STALE_STATEID: 7092 if (nfs4_sync_lock_stateid(&calldata->arg.stateid, 7093 calldata->lsp)) 7094 rpc_restart_call_prepare(task); 7095 break; 7096 case -NFS4ERR_OLD_STATEID: 7097 if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid, 7098 calldata->lsp)) 7099 rpc_restart_call_prepare(task); 7100 break; 7101 default: 7102 task->tk_status = nfs4_async_handle_exception(task, 7103 calldata->server, task->tk_status, 7104 &exception); 7105 calldata->retrans = exception.retrans; 7106 if (exception.retry) 7107 rpc_restart_call_prepare(task); 7108 } 7109 nfs_release_seqid(calldata->arg.seqid); 7110 } 7111 7112 static void nfs4_locku_prepare(struct rpc_task *task, void *data) 7113 { 7114 struct nfs4_unlockdata *calldata = data; 7115 7116 if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) && 7117 nfs_async_iocounter_wait(task, calldata->l_ctx)) 7118 return; 7119 7120 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 7121 goto out_wait; 7122 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { 7123 /* Note: exit _without_ running nfs4_locku_done */ 7124 goto out_no_action; 7125 } 7126 calldata->timestamp = jiffies; 7127 if (nfs4_setup_sequence(calldata->server->nfs_client, 7128 &calldata->arg.seq_args, 7129 &calldata->res.seq_res, 7130 task) != 0) 7131 nfs_release_seqid(calldata->arg.seqid); 7132 return; 7133 out_no_action: 7134 task->tk_action = NULL; 7135 out_wait: 7136 nfs4_sequence_done(task, &calldata->res.seq_res); 7137 } 7138 7139 static const struct rpc_call_ops nfs4_locku_ops = { 7140 .rpc_call_prepare = nfs4_locku_prepare, 7141 .rpc_call_done = nfs4_locku_done, 7142 .rpc_release = nfs4_locku_release_calldata, 7143 }; 7144 7145 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 7146 struct nfs_open_context *ctx, 7147 struct nfs4_lock_state *lsp, 7148 struct nfs_seqid *seqid) 7149 { 7150 struct nfs4_unlockdata *data; 7151 struct nfs_client *clp = NFS_SERVER(lsp->ls_state->inode)->nfs_client; 7152 struct rpc_message msg = { 7153 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 7154 .rpc_cred = ctx->cred, 7155 }; 7156 struct rpc_task_setup task_setup_data = { 7157 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 7158 .rpc_message = &msg, 7159 .callback_ops = &nfs4_locku_ops, 7160 .workqueue = nfsiod_workqueue, 7161 .flags = RPC_TASK_ASYNC, 7162 }; 7163 7164 if (nfs_server_capable(lsp->ls_state->inode, NFS_CAP_MOVEABLE)) 7165 task_setup_data.flags |= RPC_TASK_MOVEABLE; 7166 7167 nfs4_state_protect(clp, NFS_SP4_MACH_CRED_CLEANUP, 7168 &task_setup_data.rpc_client, &msg); 7169 7170 /* Ensure this is an unlock - when canceling a lock, the 7171 * canceled lock is passed in, and it won't be an unlock. 7172 */ 7173 fl->c.flc_type = F_UNLCK; 7174 if (fl->c.flc_flags & FL_CLOSE) 7175 set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags); 7176 7177 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 7178 if (data == NULL) { 7179 nfs_free_seqid(seqid); 7180 return ERR_PTR(-ENOMEM); 7181 } 7182 7183 nfs4_init_sequence(clp, &data->arg.seq_args, &data->res.seq_res, 1, 0); 7184 msg.rpc_argp = &data->arg; 7185 msg.rpc_resp = &data->res; 7186 task_setup_data.callback_data = data; 7187 return rpc_run_task(&task_setup_data); 7188 } 7189 7190 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 7191 { 7192 struct inode *inode = state->inode; 7193 struct nfs4_state_owner *sp = state->owner; 7194 struct nfs_inode *nfsi = NFS_I(inode); 7195 struct nfs_seqid *seqid; 7196 struct nfs4_lock_state *lsp; 7197 struct rpc_task *task; 7198 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 7199 int status = 0; 7200 unsigned char saved_flags = request->c.flc_flags; 7201 7202 status = nfs4_set_lock_state(state, request); 7203 /* Unlock _before_ we do the RPC call */ 7204 request->c.flc_flags |= FL_EXISTS; 7205 /* Exclude nfs_delegation_claim_locks() */ 7206 mutex_lock(&sp->so_delegreturn_mutex); 7207 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ 7208 down_read(&nfsi->rwsem); 7209 if (locks_lock_inode_wait(inode, request) == -ENOENT) { 7210 up_read(&nfsi->rwsem); 7211 mutex_unlock(&sp->so_delegreturn_mutex); 7212 goto out; 7213 } 7214 lsp = request->fl_u.nfs4_fl.owner; 7215 set_bit(NFS_LOCK_UNLOCKING, &lsp->ls_flags); 7216 up_read(&nfsi->rwsem); 7217 mutex_unlock(&sp->so_delegreturn_mutex); 7218 if (status != 0) 7219 goto out; 7220 /* Is this a delegated lock? */ 7221 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0) 7222 goto out; 7223 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid; 7224 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 7225 status = -ENOMEM; 7226 if (IS_ERR(seqid)) 7227 goto out; 7228 task = nfs4_do_unlck(request, 7229 nfs_file_open_context(request->c.flc_file), 7230 lsp, seqid); 7231 status = PTR_ERR(task); 7232 if (IS_ERR(task)) 7233 goto out; 7234 status = rpc_wait_for_completion_task(task); 7235 rpc_put_task(task); 7236 out: 7237 request->c.flc_flags = saved_flags; 7238 trace_nfs4_unlock(request, state, F_SETLK, status); 7239 return status; 7240 } 7241 7242 struct nfs4_lockdata { 7243 struct nfs_lock_args arg; 7244 struct nfs_lock_res res; 7245 struct nfs4_lock_state *lsp; 7246 struct nfs_open_context *ctx; 7247 struct file_lock fl; 7248 unsigned long timestamp; 7249 int rpc_status; 7250 int cancelled; 7251 struct nfs_server *server; 7252 }; 7253 7254 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 7255 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 7256 gfp_t gfp_mask) 7257 { 7258 struct nfs4_lockdata *p; 7259 struct inode *inode = lsp->ls_state->inode; 7260 struct nfs_server *server = NFS_SERVER(inode); 7261 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 7262 7263 p = kzalloc(sizeof(*p), gfp_mask); 7264 if (p == NULL) 7265 return NULL; 7266 7267 p->arg.fh = NFS_FH(inode); 7268 p->arg.fl = &p->fl; 7269 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 7270 if (IS_ERR(p->arg.open_seqid)) 7271 goto out_free; 7272 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 7273 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask); 7274 if (IS_ERR(p->arg.lock_seqid)) 7275 goto out_free_seqid; 7276 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 7277 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 7278 p->arg.lock_owner.s_dev = server->s_dev; 7279 p->res.lock_seqid = p->arg.lock_seqid; 7280 p->lsp = lsp; 7281 p->server = server; 7282 p->ctx = get_nfs_open_context(ctx); 7283 locks_init_lock(&p->fl); 7284 locks_copy_lock(&p->fl, fl); 7285 return p; 7286 out_free_seqid: 7287 nfs_free_seqid(p->arg.open_seqid); 7288 out_free: 7289 kfree(p); 7290 return NULL; 7291 } 7292 7293 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 7294 { 7295 struct nfs4_lockdata *data = calldata; 7296 struct nfs4_state *state = data->lsp->ls_state; 7297 7298 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 7299 goto out_wait; 7300 /* Do we need to do an open_to_lock_owner? */ 7301 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) { 7302 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { 7303 goto out_release_lock_seqid; 7304 } 7305 nfs4_stateid_copy(&data->arg.open_stateid, 7306 &state->open_stateid); 7307 data->arg.new_lock_owner = 1; 7308 data->res.open_seqid = data->arg.open_seqid; 7309 } else { 7310 data->arg.new_lock_owner = 0; 7311 nfs4_stateid_copy(&data->arg.lock_stateid, 7312 &data->lsp->ls_stateid); 7313 } 7314 if (!nfs4_valid_open_stateid(state)) { 7315 data->rpc_status = -EBADF; 7316 task->tk_action = NULL; 7317 goto out_release_open_seqid; 7318 } 7319 data->timestamp = jiffies; 7320 if (nfs4_setup_sequence(data->server->nfs_client, 7321 &data->arg.seq_args, 7322 &data->res.seq_res, 7323 task) == 0) 7324 return; 7325 out_release_open_seqid: 7326 nfs_release_seqid(data->arg.open_seqid); 7327 out_release_lock_seqid: 7328 nfs_release_seqid(data->arg.lock_seqid); 7329 out_wait: 7330 nfs4_sequence_done(task, &data->res.seq_res); 7331 dprintk("%s: ret = %d\n", __func__, data->rpc_status); 7332 } 7333 7334 static void nfs4_lock_done(struct rpc_task *task, void *calldata) 7335 { 7336 struct nfs4_lockdata *data = calldata; 7337 struct nfs4_lock_state *lsp = data->lsp; 7338 7339 if (!nfs4_sequence_done(task, &data->res.seq_res)) 7340 return; 7341 7342 data->rpc_status = task->tk_status; 7343 switch (task->tk_status) { 7344 case 0: 7345 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)), 7346 data->timestamp); 7347 if (data->arg.new_lock && !data->cancelled) { 7348 data->fl.c.flc_flags &= ~(FL_SLEEP | FL_ACCESS); 7349 if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) 7350 goto out_restart; 7351 } 7352 if (data->arg.new_lock_owner != 0) { 7353 nfs_confirm_seqid(&lsp->ls_seqid, 0); 7354 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid); 7355 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 7356 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) 7357 goto out_restart; 7358 break; 7359 case -NFS4ERR_OLD_STATEID: 7360 if (data->arg.new_lock_owner != 0 && 7361 nfs4_refresh_open_old_stateid(&data->arg.open_stateid, 7362 lsp->ls_state)) 7363 goto out_restart; 7364 if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp)) 7365 goto out_restart; 7366 fallthrough; 7367 case -NFS4ERR_BAD_STATEID: 7368 case -NFS4ERR_STALE_STATEID: 7369 case -NFS4ERR_EXPIRED: 7370 if (data->arg.new_lock_owner != 0) { 7371 if (!nfs4_stateid_match(&data->arg.open_stateid, 7372 &lsp->ls_state->open_stateid)) 7373 goto out_restart; 7374 } else if (!nfs4_stateid_match(&data->arg.lock_stateid, 7375 &lsp->ls_stateid)) 7376 goto out_restart; 7377 } 7378 out_done: 7379 dprintk("%s: ret = %d!\n", __func__, data->rpc_status); 7380 return; 7381 out_restart: 7382 if (!data->cancelled) 7383 rpc_restart_call_prepare(task); 7384 goto out_done; 7385 } 7386 7387 static void nfs4_lock_release(void *calldata) 7388 { 7389 struct nfs4_lockdata *data = calldata; 7390 7391 nfs_free_seqid(data->arg.open_seqid); 7392 if (data->cancelled && data->rpc_status == 0) { 7393 struct rpc_task *task; 7394 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 7395 data->arg.lock_seqid); 7396 if (!IS_ERR(task)) 7397 rpc_put_task_async(task); 7398 dprintk("%s: cancelling lock!\n", __func__); 7399 } else 7400 nfs_free_seqid(data->arg.lock_seqid); 7401 nfs4_put_lock_state(data->lsp); 7402 put_nfs_open_context(data->ctx); 7403 kfree(data); 7404 } 7405 7406 static const struct rpc_call_ops nfs4_lock_ops = { 7407 .rpc_call_prepare = nfs4_lock_prepare, 7408 .rpc_call_done = nfs4_lock_done, 7409 .rpc_release = nfs4_lock_release, 7410 }; 7411 7412 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 7413 { 7414 switch (error) { 7415 case -NFS4ERR_ADMIN_REVOKED: 7416 case -NFS4ERR_EXPIRED: 7417 case -NFS4ERR_BAD_STATEID: 7418 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 7419 if (new_lock_owner != 0 || 7420 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) 7421 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 7422 break; 7423 case -NFS4ERR_STALE_STATEID: 7424 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 7425 nfs4_schedule_lease_recovery(server->nfs_client); 7426 } 7427 } 7428 7429 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 7430 { 7431 struct nfs4_lockdata *data; 7432 struct rpc_task *task; 7433 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client; 7434 struct rpc_message msg = { 7435 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 7436 .rpc_cred = state->owner->so_cred, 7437 }; 7438 struct rpc_task_setup task_setup_data = { 7439 .rpc_client = NFS_CLIENT(state->inode), 7440 .rpc_message = &msg, 7441 .callback_ops = &nfs4_lock_ops, 7442 .workqueue = nfsiod_workqueue, 7443 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 7444 }; 7445 int ret; 7446 7447 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE)) 7448 task_setup_data.flags |= RPC_TASK_MOVEABLE; 7449 7450 data = nfs4_alloc_lockdata(fl, 7451 nfs_file_open_context(fl->c.flc_file), 7452 fl->fl_u.nfs4_fl.owner, GFP_KERNEL); 7453 if (data == NULL) 7454 return -ENOMEM; 7455 if (IS_SETLKW(cmd)) 7456 data->arg.block = 1; 7457 nfs4_init_sequence(clp, &data->arg.seq_args, &data->res.seq_res, 1, 7458 recovery_type > NFS_LOCK_NEW); 7459 msg.rpc_argp = &data->arg; 7460 msg.rpc_resp = &data->res; 7461 task_setup_data.callback_data = data; 7462 if (recovery_type > NFS_LOCK_NEW) { 7463 if (recovery_type == NFS_LOCK_RECLAIM) 7464 data->arg.reclaim = NFS_LOCK_RECLAIM; 7465 } else 7466 data->arg.new_lock = 1; 7467 task = rpc_run_task(&task_setup_data); 7468 if (IS_ERR(task)) 7469 return PTR_ERR(task); 7470 ret = rpc_wait_for_completion_task(task); 7471 if (ret == 0) { 7472 ret = data->rpc_status; 7473 if (ret) 7474 nfs4_handle_setlk_error(data->server, data->lsp, 7475 data->arg.new_lock_owner, ret); 7476 } else 7477 data->cancelled = true; 7478 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret); 7479 rpc_put_task(task); 7480 dprintk("%s: ret = %d\n", __func__, ret); 7481 return ret; 7482 } 7483 7484 int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 7485 { 7486 struct nfs_server *server = NFS_SERVER(state->inode); 7487 struct nfs4_exception exception = { 7488 .inode = state->inode, 7489 }; 7490 int err; 7491 7492 do { 7493 /* Cache the lock if possible... */ 7494 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 7495 return 0; 7496 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 7497 if (err != -NFS4ERR_DELAY) 7498 break; 7499 nfs4_handle_exception(server, err, &exception); 7500 } while (exception.retry); 7501 return err; 7502 } 7503 7504 int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 7505 { 7506 struct nfs_server *server = NFS_SERVER(state->inode); 7507 struct nfs4_exception exception = { 7508 .inode = state->inode, 7509 }; 7510 int err; 7511 7512 err = nfs4_set_lock_state(state, request); 7513 if (err != 0) 7514 return err; 7515 if (!recover_lost_locks) { 7516 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags); 7517 return 0; 7518 } 7519 do { 7520 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 7521 return 0; 7522 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 7523 switch (err) { 7524 default: 7525 goto out; 7526 case -NFS4ERR_GRACE: 7527 case -NFS4ERR_DELAY: 7528 nfs4_handle_exception(server, err, &exception); 7529 err = 0; 7530 } 7531 } while (exception.retry); 7532 out: 7533 return err; 7534 } 7535 7536 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 7537 { 7538 struct nfs4_lock_state *lsp; 7539 int status; 7540 7541 status = nfs4_set_lock_state(state, request); 7542 if (status != 0) 7543 return status; 7544 lsp = request->fl_u.nfs4_fl.owner; 7545 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) || 7546 test_bit(NFS_LOCK_LOST, &lsp->ls_flags)) 7547 return 0; 7548 return nfs4_lock_expired(state, request); 7549 } 7550 7551 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7552 { 7553 struct nfs_inode *nfsi = NFS_I(state->inode); 7554 struct nfs4_state_owner *sp = state->owner; 7555 unsigned char flags = request->c.flc_flags; 7556 int status; 7557 7558 request->c.flc_flags |= FL_ACCESS; 7559 status = locks_lock_inode_wait(state->inode, request); 7560 if (status < 0) 7561 goto out; 7562 mutex_lock(&sp->so_delegreturn_mutex); 7563 down_read(&nfsi->rwsem); 7564 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 7565 /* Yes: cache locks! */ 7566 /* ...but avoid races with delegation recall... */ 7567 request->c.flc_flags = flags & ~FL_SLEEP; 7568 status = locks_lock_inode_wait(state->inode, request); 7569 up_read(&nfsi->rwsem); 7570 mutex_unlock(&sp->so_delegreturn_mutex); 7571 goto out; 7572 } 7573 up_read(&nfsi->rwsem); 7574 mutex_unlock(&sp->so_delegreturn_mutex); 7575 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 7576 out: 7577 request->c.flc_flags = flags; 7578 return status; 7579 } 7580 7581 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7582 { 7583 struct nfs4_exception exception = { 7584 .state = state, 7585 .inode = state->inode, 7586 .interruptible = true, 7587 }; 7588 int err; 7589 7590 do { 7591 err = _nfs4_proc_setlk(state, cmd, request); 7592 if (err == -NFS4ERR_DENIED) 7593 err = -EAGAIN; 7594 err = nfs4_handle_exception(NFS_SERVER(state->inode), 7595 err, &exception); 7596 } while (exception.retry); 7597 return err; 7598 } 7599 7600 #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 7601 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 7602 7603 static int 7604 nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd, 7605 struct file_lock *request) 7606 { 7607 int status = -ERESTARTSYS; 7608 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 7609 7610 while(!signalled()) { 7611 status = nfs4_proc_setlk(state, cmd, request); 7612 if ((status != -EAGAIN) || IS_SETLK(cmd)) 7613 break; 7614 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); 7615 schedule_timeout(timeout); 7616 timeout *= 2; 7617 timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout); 7618 status = -ERESTARTSYS; 7619 } 7620 return status; 7621 } 7622 7623 struct nfs4_lock_waiter { 7624 struct inode *inode; 7625 struct nfs_lowner owner; 7626 wait_queue_entry_t wait; 7627 }; 7628 7629 static int 7630 nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key) 7631 { 7632 struct nfs4_lock_waiter *waiter = 7633 container_of(wait, struct nfs4_lock_waiter, wait); 7634 7635 /* NULL key means to wake up everyone */ 7636 if (key) { 7637 struct cb_notify_lock_args *cbnl = key; 7638 struct nfs_lowner *lowner = &cbnl->cbnl_owner, 7639 *wowner = &waiter->owner; 7640 7641 /* Only wake if the callback was for the same owner. */ 7642 if (lowner->id != wowner->id || lowner->s_dev != wowner->s_dev) 7643 return 0; 7644 7645 /* Make sure it's for the right inode */ 7646 if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh)) 7647 return 0; 7648 } 7649 7650 return woken_wake_function(wait, mode, flags, key); 7651 } 7652 7653 static int 7654 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7655 { 7656 struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner; 7657 struct nfs_server *server = NFS_SERVER(state->inode); 7658 struct nfs_client *clp = server->nfs_client; 7659 wait_queue_head_t *q = &clp->cl_lock_waitq; 7660 struct nfs4_lock_waiter waiter = { 7661 .inode = state->inode, 7662 .owner = { .clientid = clp->cl_clientid, 7663 .id = lsp->ls_seqid.owner_id, 7664 .s_dev = server->s_dev }, 7665 }; 7666 int status; 7667 7668 /* Don't bother with waitqueue if we don't expect a callback */ 7669 if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags)) 7670 return nfs4_retry_setlk_simple(state, cmd, request); 7671 7672 init_wait(&waiter.wait); 7673 waiter.wait.func = nfs4_wake_lock_waiter; 7674 add_wait_queue(q, &waiter.wait); 7675 7676 do { 7677 status = nfs4_proc_setlk(state, cmd, request); 7678 if (status != -EAGAIN || IS_SETLK(cmd)) 7679 break; 7680 7681 status = -ERESTARTSYS; 7682 wait_woken(&waiter.wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE, 7683 NFS4_LOCK_MAXTIMEOUT); 7684 } while (!signalled()); 7685 7686 remove_wait_queue(q, &waiter.wait); 7687 7688 return status; 7689 } 7690 7691 static int 7692 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 7693 { 7694 struct nfs_open_context *ctx; 7695 struct nfs4_state *state; 7696 int status; 7697 7698 /* verify open state */ 7699 ctx = nfs_file_open_context(filp); 7700 state = ctx->state; 7701 7702 if (IS_GETLK(cmd)) { 7703 if (state != NULL) 7704 return nfs4_proc_getlk(state, F_GETLK, request); 7705 return 0; 7706 } 7707 7708 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 7709 return -EINVAL; 7710 7711 if (lock_is_unlock(request)) { 7712 if (state != NULL) 7713 return nfs4_proc_unlck(state, cmd, request); 7714 return 0; 7715 } 7716 7717 if (state == NULL) 7718 return -ENOLCK; 7719 7720 if ((request->c.flc_flags & FL_POSIX) && 7721 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 7722 return -ENOLCK; 7723 7724 /* 7725 * Don't rely on the VFS having checked the file open mode, 7726 * since it won't do this for flock() locks. 7727 */ 7728 switch (request->c.flc_type) { 7729 case F_RDLCK: 7730 if (!(filp->f_mode & FMODE_READ)) 7731 return -EBADF; 7732 break; 7733 case F_WRLCK: 7734 if (!(filp->f_mode & FMODE_WRITE)) 7735 return -EBADF; 7736 } 7737 7738 status = nfs4_set_lock_state(state, request); 7739 if (status != 0) 7740 return status; 7741 7742 return nfs4_retry_setlk(state, cmd, request); 7743 } 7744 7745 static int nfs4_delete_lease(struct file *file, void **priv) 7746 { 7747 return generic_setlease(file, F_UNLCK, NULL, priv); 7748 } 7749 7750 static int nfs4_add_lease(struct file *file, int arg, struct file_lease **lease, 7751 void **priv) 7752 { 7753 struct inode *inode = file_inode(file); 7754 fmode_t type = arg == F_RDLCK ? FMODE_READ : FMODE_WRITE; 7755 int ret; 7756 7757 /* No delegation, no lease */ 7758 if (!nfs4_have_delegation(inode, type, 0)) 7759 return -EAGAIN; 7760 ret = generic_setlease(file, arg, lease, priv); 7761 if (ret || nfs4_have_delegation(inode, type, 0)) 7762 return ret; 7763 /* We raced with a delegation return */ 7764 nfs4_delete_lease(file, priv); 7765 return -EAGAIN; 7766 } 7767 7768 int nfs4_proc_setlease(struct file *file, int arg, struct file_lease **lease, 7769 void **priv) 7770 { 7771 switch (arg) { 7772 case F_RDLCK: 7773 case F_WRLCK: 7774 return nfs4_add_lease(file, arg, lease, priv); 7775 case F_UNLCK: 7776 return nfs4_delete_lease(file, priv); 7777 default: 7778 return -EINVAL; 7779 } 7780 } 7781 7782 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid) 7783 { 7784 struct nfs_server *server = NFS_SERVER(state->inode); 7785 int err; 7786 7787 err = nfs4_set_lock_state(state, fl); 7788 if (err != 0) 7789 return err; 7790 do { 7791 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 7792 if (err != -NFS4ERR_DELAY && err != -NFS4ERR_GRACE) 7793 break; 7794 ssleep(1); 7795 } while (err == -NFS4ERR_DELAY || err == -NFSERR_GRACE); 7796 return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err); 7797 } 7798 7799 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 7800 7801 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler, 7802 struct mnt_idmap *idmap, 7803 struct dentry *unused, struct inode *inode, 7804 const char *key, const void *buf, 7805 size_t buflen, int flags) 7806 { 7807 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_ACL); 7808 } 7809 7810 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler, 7811 struct dentry *unused, struct inode *inode, 7812 const char *key, void *buf, size_t buflen) 7813 { 7814 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_ACL); 7815 } 7816 7817 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry) 7818 { 7819 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_ACL); 7820 } 7821 7822 #define XATTR_NAME_NFSV4_DACL "system.nfs4_dacl" 7823 7824 static int nfs4_xattr_set_nfs4_dacl(const struct xattr_handler *handler, 7825 struct mnt_idmap *idmap, 7826 struct dentry *unused, struct inode *inode, 7827 const char *key, const void *buf, 7828 size_t buflen, int flags) 7829 { 7830 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_DACL); 7831 } 7832 7833 static int nfs4_xattr_get_nfs4_dacl(const struct xattr_handler *handler, 7834 struct dentry *unused, struct inode *inode, 7835 const char *key, void *buf, size_t buflen) 7836 { 7837 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_DACL); 7838 } 7839 7840 static bool nfs4_xattr_list_nfs4_dacl(struct dentry *dentry) 7841 { 7842 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_DACL); 7843 } 7844 7845 #define XATTR_NAME_NFSV4_SACL "system.nfs4_sacl" 7846 7847 static int nfs4_xattr_set_nfs4_sacl(const struct xattr_handler *handler, 7848 struct mnt_idmap *idmap, 7849 struct dentry *unused, struct inode *inode, 7850 const char *key, const void *buf, 7851 size_t buflen, int flags) 7852 { 7853 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_SACL); 7854 } 7855 7856 static int nfs4_xattr_get_nfs4_sacl(const struct xattr_handler *handler, 7857 struct dentry *unused, struct inode *inode, 7858 const char *key, void *buf, size_t buflen) 7859 { 7860 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_SACL); 7861 } 7862 7863 static bool nfs4_xattr_list_nfs4_sacl(struct dentry *dentry) 7864 { 7865 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_SACL); 7866 } 7867 7868 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 7869 7870 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler, 7871 struct mnt_idmap *idmap, 7872 struct dentry *unused, struct inode *inode, 7873 const char *key, const void *buf, 7874 size_t buflen, int flags) 7875 { 7876 if (security_ismaclabel(key)) 7877 return nfs4_set_security_label(inode, buf, buflen); 7878 7879 return -EOPNOTSUPP; 7880 } 7881 7882 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler, 7883 struct dentry *unused, struct inode *inode, 7884 const char *key, void *buf, size_t buflen) 7885 { 7886 if (security_ismaclabel(key)) 7887 return nfs4_get_security_label(inode, buf, buflen); 7888 return -EOPNOTSUPP; 7889 } 7890 7891 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = { 7892 .prefix = XATTR_SECURITY_PREFIX, 7893 .get = nfs4_xattr_get_nfs4_label, 7894 .set = nfs4_xattr_set_nfs4_label, 7895 }; 7896 7897 #endif 7898 7899 #ifdef CONFIG_NFS_V4_2 7900 static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler, 7901 struct mnt_idmap *idmap, 7902 struct dentry *unused, struct inode *inode, 7903 const char *key, const void *buf, 7904 size_t buflen, int flags) 7905 { 7906 u32 mask; 7907 int ret; 7908 7909 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 7910 return -EOPNOTSUPP; 7911 7912 /* 7913 * There is no mapping from the MAY_* flags to the NFS_ACCESS_XA* 7914 * flags right now. Handling of xattr operations use the normal 7915 * file read/write permissions. 7916 * 7917 * Just in case the server has other ideas (which RFC 8276 allows), 7918 * do a cached access check for the XA* flags to possibly avoid 7919 * doing an RPC and getting EACCES back. 7920 */ 7921 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 7922 if (!(mask & NFS_ACCESS_XAWRITE)) 7923 return -EACCES; 7924 } 7925 7926 if (buf == NULL) { 7927 ret = nfs42_proc_removexattr(inode, key); 7928 if (!ret) 7929 nfs4_xattr_cache_remove(inode, key); 7930 } else { 7931 ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags); 7932 if (!ret) 7933 nfs4_xattr_cache_add(inode, key, buf, NULL, buflen); 7934 } 7935 7936 return ret; 7937 } 7938 7939 static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler, 7940 struct dentry *unused, struct inode *inode, 7941 const char *key, void *buf, size_t buflen) 7942 { 7943 u32 mask; 7944 ssize_t ret; 7945 7946 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 7947 return -EOPNOTSUPP; 7948 7949 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 7950 if (!(mask & NFS_ACCESS_XAREAD)) 7951 return -EACCES; 7952 } 7953 7954 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 7955 if (ret) 7956 return ret; 7957 7958 ret = nfs4_xattr_cache_get(inode, key, buf, buflen); 7959 if (ret >= 0 || (ret < 0 && ret != -ENOENT)) 7960 return ret; 7961 7962 ret = nfs42_proc_getxattr(inode, key, buf, buflen); 7963 7964 return ret; 7965 } 7966 7967 static ssize_t 7968 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) 7969 { 7970 u64 cookie; 7971 bool eof; 7972 ssize_t ret, size; 7973 char *buf; 7974 size_t buflen; 7975 u32 mask; 7976 7977 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 7978 return 0; 7979 7980 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 7981 if (!(mask & NFS_ACCESS_XALIST)) 7982 return 0; 7983 } 7984 7985 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 7986 if (ret) 7987 return ret; 7988 7989 ret = nfs4_xattr_cache_list(inode, list, list_len); 7990 if (ret >= 0 || (ret < 0 && ret != -ENOENT)) 7991 return ret; 7992 7993 cookie = 0; 7994 eof = false; 7995 buflen = list_len ? list_len : XATTR_LIST_MAX; 7996 buf = list_len ? list : NULL; 7997 size = 0; 7998 7999 while (!eof) { 8000 ret = nfs42_proc_listxattrs(inode, buf, buflen, 8001 &cookie, &eof); 8002 if (ret < 0) 8003 return ret; 8004 8005 if (list_len) { 8006 buf += ret; 8007 buflen -= ret; 8008 } 8009 size += ret; 8010 } 8011 8012 if (list_len) 8013 nfs4_xattr_cache_set_list(inode, list, size); 8014 8015 return size; 8016 } 8017 8018 #else 8019 8020 static ssize_t 8021 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) 8022 { 8023 return 0; 8024 } 8025 #endif /* CONFIG_NFS_V4_2 */ 8026 8027 /* 8028 * nfs_fhget will use either the mounted_on_fileid or the fileid 8029 */ 8030 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 8031 { 8032 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 8033 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 8034 (fattr->valid & NFS_ATTR_FATTR_FSID) && 8035 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 8036 return; 8037 8038 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 8039 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 8040 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 8041 fattr->nlink = 2; 8042 } 8043 8044 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 8045 const struct qstr *name, 8046 struct nfs4_fs_locations *fs_locations, 8047 struct page *page) 8048 { 8049 struct nfs_server *server = NFS_SERVER(dir); 8050 u32 bitmask[3]; 8051 struct nfs4_fs_locations_arg args = { 8052 .dir_fh = NFS_FH(dir), 8053 .name = name, 8054 .page = page, 8055 .bitmask = bitmask, 8056 }; 8057 struct nfs4_fs_locations_res res = { 8058 .fs_locations = fs_locations, 8059 }; 8060 struct rpc_message msg = { 8061 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8062 .rpc_argp = &args, 8063 .rpc_resp = &res, 8064 }; 8065 int status; 8066 8067 dprintk("%s: start\n", __func__); 8068 8069 bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS; 8070 bitmask[1] = nfs4_fattr_bitmap[1]; 8071 8072 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 8073 * is not supported */ 8074 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 8075 bitmask[0] &= ~FATTR4_WORD0_FILEID; 8076 else 8077 bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID; 8078 8079 nfs_fattr_init(fs_locations->fattr); 8080 fs_locations->server = server; 8081 fs_locations->nlocations = 0; 8082 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 8083 dprintk("%s: returned status = %d\n", __func__, status); 8084 return status; 8085 } 8086 8087 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 8088 const struct qstr *name, 8089 struct nfs4_fs_locations *fs_locations, 8090 struct page *page) 8091 { 8092 struct nfs4_exception exception = { 8093 .interruptible = true, 8094 }; 8095 int err; 8096 do { 8097 err = _nfs4_proc_fs_locations(client, dir, name, 8098 fs_locations, page); 8099 trace_nfs4_get_fs_locations(dir, name, err); 8100 err = nfs4_handle_exception(NFS_SERVER(dir), err, 8101 &exception); 8102 } while (exception.retry); 8103 return err; 8104 } 8105 8106 /* 8107 * This operation also signals the server that this client is 8108 * performing migration recovery. The server can stop asserting 8109 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID 8110 * performing this operation is identified in the SEQUENCE 8111 * operation in this compound. 8112 * 8113 * When the client supports GETATTR(fs_locations_info), it can 8114 * be plumbed in here. 8115 */ 8116 static int _nfs41_proc_get_locations(struct nfs_server *server, 8117 struct nfs_fh *fhandle, 8118 struct nfs4_fs_locations *locations, 8119 struct page *page, const struct cred *cred) 8120 { 8121 struct rpc_clnt *clnt = server->client; 8122 struct nfs_client *clp = server->nfs_client; 8123 u32 bitmask[2] = { 8124 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 8125 }; 8126 struct nfs4_fs_locations_arg args = { 8127 .fh = fhandle, 8128 .page = page, 8129 .bitmask = bitmask, 8130 .migration = 1, /* skip LOOKUP */ 8131 }; 8132 struct nfs4_fs_locations_res res = { 8133 .fs_locations = locations, 8134 .migration = 1, 8135 }; 8136 struct rpc_message msg = { 8137 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8138 .rpc_argp = &args, 8139 .rpc_resp = &res, 8140 .rpc_cred = cred, 8141 }; 8142 struct nfs4_call_sync_data data = { 8143 .seq_server = server, 8144 .seq_args = &args.seq_args, 8145 .seq_res = &res.seq_res, 8146 }; 8147 struct rpc_task_setup task_setup_data = { 8148 .rpc_client = clnt, 8149 .rpc_message = &msg, 8150 .callback_ops = clp->cl_mvops->call_sync_ops, 8151 .callback_data = &data, 8152 .flags = RPC_TASK_NO_ROUND_ROBIN, 8153 }; 8154 int status; 8155 8156 nfs_fattr_init(locations->fattr); 8157 locations->server = server; 8158 locations->nlocations = 0; 8159 8160 nfs4_init_sequence(clp, &args.seq_args, &res.seq_res, 0, 1); 8161 status = nfs4_call_sync_custom(&task_setup_data); 8162 if (status == NFS4_OK && 8163 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 8164 status = -NFS4ERR_LEASE_MOVED; 8165 return status; 8166 } 8167 8168 /** 8169 * nfs4_proc_get_locations - discover locations for a migrated FSID 8170 * @server: pointer to nfs_server to process 8171 * @fhandle: pointer to the kernel NFS client file handle 8172 * @locations: result of query 8173 * @page: buffer 8174 * @cred: credential to use for this operation 8175 * 8176 * Returns NFS4_OK on success, a negative NFS4ERR status code if the 8177 * operation failed, or a negative errno if a local error occurred. 8178 * 8179 * On success, "locations" is filled in, but if the server has 8180 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not 8181 * asserted. 8182 * 8183 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases 8184 * from this client that require migration recovery. 8185 */ 8186 int nfs4_proc_get_locations(struct nfs_server *server, 8187 struct nfs_fh *fhandle, 8188 struct nfs4_fs_locations *locations, 8189 struct page *page, const struct cred *cred) 8190 { 8191 struct nfs_client *clp = server->nfs_client; 8192 const struct nfs4_mig_recovery_ops *ops = 8193 clp->cl_mvops->mig_recovery_ops; 8194 struct nfs4_exception exception = { 8195 .interruptible = true, 8196 }; 8197 int status; 8198 8199 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 8200 (unsigned long long)server->fsid.major, 8201 (unsigned long long)server->fsid.minor, 8202 clp->cl_hostname); 8203 nfs_display_fhandle(fhandle, __func__); 8204 8205 do { 8206 status = ops->get_locations(server, fhandle, locations, page, 8207 cred); 8208 if (status != -NFS4ERR_DELAY) 8209 break; 8210 nfs4_handle_exception(server, status, &exception); 8211 } while (exception.retry); 8212 return status; 8213 } 8214 8215 /* 8216 * This operation also signals the server that this client is 8217 * performing "lease moved" recovery. The server can stop asserting 8218 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing 8219 * this operation is identified in the SEQUENCE operation in this 8220 * compound. 8221 */ 8222 static int _nfs41_proc_fsid_present(struct inode *inode, const struct cred *cred) 8223 { 8224 struct nfs_server *server = NFS_SERVER(inode); 8225 struct rpc_clnt *clnt = server->client; 8226 struct nfs4_fsid_present_arg args = { 8227 .fh = NFS_FH(inode), 8228 }; 8229 struct nfs4_fsid_present_res res = { 8230 }; 8231 struct rpc_message msg = { 8232 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 8233 .rpc_argp = &args, 8234 .rpc_resp = &res, 8235 .rpc_cred = cred, 8236 }; 8237 int status; 8238 8239 res.fh = nfs_alloc_fhandle(); 8240 if (res.fh == NULL) 8241 return -ENOMEM; 8242 8243 nfs4_init_sequence(server->nfs_client, &args.seq_args, &res.seq_res, 0, 1); 8244 status = nfs4_call_sync_sequence(clnt, server, &msg, 8245 &args.seq_args, &res.seq_res); 8246 nfs_free_fhandle(res.fh); 8247 if (status == NFS4_OK && 8248 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 8249 status = -NFS4ERR_LEASE_MOVED; 8250 return status; 8251 } 8252 8253 /** 8254 * nfs4_proc_fsid_present - Is this FSID present or absent on server? 8255 * @inode: inode on FSID to check 8256 * @cred: credential to use for this operation 8257 * 8258 * Server indicates whether the FSID is present, moved, or not 8259 * recognized. This operation is necessary to clear a LEASE_MOVED 8260 * condition for this client ID. 8261 * 8262 * Returns NFS4_OK if the FSID is present on this server, 8263 * -NFS4ERR_MOVED if the FSID is no longer present, a negative 8264 * NFS4ERR code if some error occurred on the server, or a 8265 * negative errno if a local failure occurred. 8266 */ 8267 int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred) 8268 { 8269 struct nfs_server *server = NFS_SERVER(inode); 8270 struct nfs_client *clp = server->nfs_client; 8271 const struct nfs4_mig_recovery_ops *ops = 8272 clp->cl_mvops->mig_recovery_ops; 8273 struct nfs4_exception exception = { 8274 .interruptible = true, 8275 }; 8276 int status; 8277 8278 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 8279 (unsigned long long)server->fsid.major, 8280 (unsigned long long)server->fsid.minor, 8281 clp->cl_hostname); 8282 nfs_display_fhandle(NFS_FH(inode), __func__); 8283 8284 do { 8285 status = ops->fsid_present(inode, cred); 8286 if (status != -NFS4ERR_DELAY) 8287 break; 8288 nfs4_handle_exception(server, status, &exception); 8289 } while (exception.retry); 8290 return status; 8291 } 8292 8293 /* 8294 * If 'use_integrity' is true and the state managment nfs_client 8295 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient 8296 * and the machine credential as per RFC3530bis and RFC5661 Security 8297 * Considerations sections. Otherwise, just use the user cred with the 8298 * filesystem's rpc_client. 8299 */ 8300 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity) 8301 { 8302 int status; 8303 struct rpc_clnt *clnt = NFS_SERVER(dir)->client; 8304 struct nfs_client *clp = NFS_SERVER(dir)->nfs_client; 8305 struct nfs4_secinfo_arg args = { 8306 .dir_fh = NFS_FH(dir), 8307 .name = name, 8308 }; 8309 struct nfs4_secinfo_res res = { 8310 .flavors = flavors, 8311 }; 8312 struct rpc_message msg = { 8313 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 8314 .rpc_argp = &args, 8315 .rpc_resp = &res, 8316 }; 8317 struct nfs4_call_sync_data data = { 8318 .seq_server = NFS_SERVER(dir), 8319 .seq_args = &args.seq_args, 8320 .seq_res = &res.seq_res, 8321 }; 8322 struct rpc_task_setup task_setup = { 8323 .rpc_client = clnt, 8324 .rpc_message = &msg, 8325 .callback_ops = clp->cl_mvops->call_sync_ops, 8326 .callback_data = &data, 8327 .flags = RPC_TASK_NO_ROUND_ROBIN, 8328 }; 8329 const struct cred *cred = NULL; 8330 8331 if (use_integrity) { 8332 clnt = clp->cl_rpcclient; 8333 task_setup.rpc_client = clnt; 8334 8335 cred = nfs4_get_clid_cred(clp); 8336 msg.rpc_cred = cred; 8337 } 8338 8339 dprintk("NFS call secinfo %s\n", name->name); 8340 8341 nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg); 8342 nfs4_init_sequence(clp, &args.seq_args, &res.seq_res, 0, 0); 8343 status = nfs4_call_sync_custom(&task_setup); 8344 8345 dprintk("NFS reply secinfo: %d\n", status); 8346 8347 put_cred(cred); 8348 return status; 8349 } 8350 8351 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 8352 struct nfs4_secinfo_flavors *flavors) 8353 { 8354 struct nfs4_exception exception = { 8355 .interruptible = true, 8356 }; 8357 int err; 8358 do { 8359 err = -NFS4ERR_WRONGSEC; 8360 8361 /* try to use integrity protection with machine cred */ 8362 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client)) 8363 err = _nfs4_proc_secinfo(dir, name, flavors, true); 8364 8365 /* 8366 * if unable to use integrity protection, or SECINFO with 8367 * integrity protection returns NFS4ERR_WRONGSEC (which is 8368 * disallowed by spec, but exists in deployed servers) use 8369 * the current filesystem's rpc_client and the user cred. 8370 */ 8371 if (err == -NFS4ERR_WRONGSEC) 8372 err = _nfs4_proc_secinfo(dir, name, flavors, false); 8373 8374 trace_nfs4_secinfo(dir, name, err); 8375 err = nfs4_handle_exception(NFS_SERVER(dir), err, 8376 &exception); 8377 } while (exception.retry); 8378 return err; 8379 } 8380 8381 /* 8382 * Check the exchange flags returned by the server for invalid flags, having 8383 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 8384 * DS flags set. 8385 */ 8386 static int nfs4_check_cl_exchange_flags(u32 flags, u32 version) 8387 { 8388 if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R)) 8389 goto out_inval; 8390 else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R)) 8391 goto out_inval; 8392 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 8393 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 8394 goto out_inval; 8395 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 8396 goto out_inval; 8397 return NFS_OK; 8398 out_inval: 8399 return -NFS4ERR_INVAL; 8400 } 8401 8402 static bool 8403 nfs41_same_server_scope(struct nfs41_server_scope *a, 8404 struct nfs41_server_scope *b) 8405 { 8406 if (a->server_scope_sz != b->server_scope_sz) 8407 return false; 8408 return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0; 8409 } 8410 8411 static void 8412 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) 8413 { 8414 struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp; 8415 struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp; 8416 struct nfs_client *clp = args->client; 8417 8418 switch (task->tk_status) { 8419 case -NFS4ERR_BADSESSION: 8420 case -NFS4ERR_DEADSESSION: 8421 nfs4_schedule_session_recovery(clp->cl_session, 8422 task->tk_status); 8423 return; 8424 } 8425 if (args->dir == NFS4_CDFC4_FORE_OR_BOTH && 8426 res->dir != NFS4_CDFS4_BOTH) { 8427 rpc_task_close_connection(task); 8428 if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES) 8429 rpc_restart_call(task); 8430 } 8431 } 8432 8433 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { 8434 .rpc_call_done = nfs4_bind_one_conn_to_session_done, 8435 }; 8436 8437 /* 8438 * nfs4_proc_bind_one_conn_to_session() 8439 * 8440 * The 4.1 client currently uses the same TCP connection for the 8441 * fore and backchannel. 8442 */ 8443 static 8444 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt, 8445 struct rpc_xprt *xprt, 8446 struct nfs_client *clp, 8447 const struct cred *cred) 8448 { 8449 int status; 8450 struct nfs41_bind_conn_to_session_args args = { 8451 .client = clp, 8452 .dir = NFS4_CDFC4_FORE_OR_BOTH, 8453 .retries = 0, 8454 }; 8455 struct nfs41_bind_conn_to_session_res res; 8456 struct rpc_message msg = { 8457 .rpc_proc = 8458 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 8459 .rpc_argp = &args, 8460 .rpc_resp = &res, 8461 .rpc_cred = cred, 8462 }; 8463 struct rpc_task_setup task_setup_data = { 8464 .rpc_client = clnt, 8465 .rpc_xprt = xprt, 8466 .callback_ops = &nfs4_bind_one_conn_to_session_ops, 8467 .rpc_message = &msg, 8468 .flags = RPC_TASK_TIMEOUT, 8469 }; 8470 struct rpc_task *task; 8471 8472 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id); 8473 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) 8474 args.dir = NFS4_CDFC4_FORE; 8475 8476 /* Do not set the backchannel flag unless this is clnt->cl_xprt */ 8477 if (xprt != rcu_access_pointer(clnt->cl_xprt)) 8478 args.dir = NFS4_CDFC4_FORE; 8479 8480 task = rpc_run_task(&task_setup_data); 8481 if (!IS_ERR(task)) { 8482 status = task->tk_status; 8483 rpc_put_task(task); 8484 } else 8485 status = PTR_ERR(task); 8486 trace_nfs4_bind_conn_to_session(clp, status); 8487 if (status == 0) { 8488 if (memcmp(res.sessionid.data, 8489 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 8490 dprintk("NFS: %s: Session ID mismatch\n", __func__); 8491 return -EIO; 8492 } 8493 if ((res.dir & args.dir) != res.dir || res.dir == 0) { 8494 dprintk("NFS: %s: Unexpected direction from server\n", 8495 __func__); 8496 return -EIO; 8497 } 8498 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) { 8499 dprintk("NFS: %s: Server returned RDMA mode = true\n", 8500 __func__); 8501 return -EIO; 8502 } 8503 } 8504 8505 return status; 8506 } 8507 8508 struct rpc_bind_conn_calldata { 8509 struct nfs_client *clp; 8510 const struct cred *cred; 8511 }; 8512 8513 static int 8514 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt, 8515 struct rpc_xprt *xprt, 8516 void *calldata) 8517 { 8518 struct rpc_bind_conn_calldata *p = calldata; 8519 8520 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred); 8521 } 8522 8523 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, const struct cred *cred) 8524 { 8525 struct rpc_bind_conn_calldata data = { 8526 .clp = clp, 8527 .cred = cred, 8528 }; 8529 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient, 8530 nfs4_proc_bind_conn_to_session_callback, &data); 8531 } 8532 8533 /* 8534 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map 8535 * and operations we'd like to see to enable certain features in the allow map 8536 */ 8537 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = { 8538 .how = SP4_MACH_CRED, 8539 .enforce.u.words = { 8540 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 8541 1 << (OP_EXCHANGE_ID - 32) | 8542 1 << (OP_CREATE_SESSION - 32) | 8543 1 << (OP_DESTROY_SESSION - 32) | 8544 1 << (OP_DESTROY_CLIENTID - 32) 8545 }, 8546 .allow.u.words = { 8547 [0] = 1 << (OP_CLOSE) | 8548 1 << (OP_OPEN_DOWNGRADE) | 8549 1 << (OP_LOCKU) | 8550 1 << (OP_DELEGRETURN) | 8551 1 << (OP_COMMIT), 8552 [1] = 1 << (OP_SECINFO - 32) | 8553 1 << (OP_SECINFO_NO_NAME - 32) | 8554 1 << (OP_LAYOUTRETURN - 32) | 8555 1 << (OP_TEST_STATEID - 32) | 8556 1 << (OP_FREE_STATEID - 32) | 8557 1 << (OP_WRITE - 32) 8558 } 8559 }; 8560 8561 /* 8562 * Select the state protection mode for client `clp' given the server results 8563 * from exchange_id in `sp'. 8564 * 8565 * Returns 0 on success, negative errno otherwise. 8566 */ 8567 static int nfs4_sp4_select_mode(struct nfs_client *clp, 8568 struct nfs41_state_protection *sp) 8569 { 8570 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = { 8571 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 8572 1 << (OP_EXCHANGE_ID - 32) | 8573 1 << (OP_CREATE_SESSION - 32) | 8574 1 << (OP_DESTROY_SESSION - 32) | 8575 1 << (OP_DESTROY_CLIENTID - 32) 8576 }; 8577 unsigned long flags = 0; 8578 unsigned int i; 8579 int ret = 0; 8580 8581 if (sp->how == SP4_MACH_CRED) { 8582 /* Print state protect result */ 8583 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n"); 8584 for (i = 0; i <= LAST_NFS4_OP; i++) { 8585 if (test_bit(i, sp->enforce.u.longs)) 8586 dfprintk(MOUNT, " enforce op %d\n", i); 8587 if (test_bit(i, sp->allow.u.longs)) 8588 dfprintk(MOUNT, " allow op %d\n", i); 8589 } 8590 8591 /* make sure nothing is on enforce list that isn't supported */ 8592 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) { 8593 if (sp->enforce.u.words[i] & ~supported_enforce[i]) { 8594 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 8595 ret = -EINVAL; 8596 goto out; 8597 } 8598 } 8599 8600 /* 8601 * Minimal mode - state operations are allowed to use machine 8602 * credential. Note this already happens by default, so the 8603 * client doesn't have to do anything more than the negotiation. 8604 * 8605 * NOTE: we don't care if EXCHANGE_ID is in the list - 8606 * we're already using the machine cred for exchange_id 8607 * and will never use a different cred. 8608 */ 8609 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) && 8610 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) && 8611 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) && 8612 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) { 8613 dfprintk(MOUNT, "sp4_mach_cred:\n"); 8614 dfprintk(MOUNT, " minimal mode enabled\n"); 8615 __set_bit(NFS_SP4_MACH_CRED_MINIMAL, &flags); 8616 } else { 8617 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 8618 ret = -EINVAL; 8619 goto out; 8620 } 8621 8622 if (test_bit(OP_CLOSE, sp->allow.u.longs) && 8623 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) && 8624 test_bit(OP_DELEGRETURN, sp->allow.u.longs) && 8625 test_bit(OP_LOCKU, sp->allow.u.longs)) { 8626 dfprintk(MOUNT, " cleanup mode enabled\n"); 8627 __set_bit(NFS_SP4_MACH_CRED_CLEANUP, &flags); 8628 } 8629 8630 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) { 8631 dfprintk(MOUNT, " pnfs cleanup mode enabled\n"); 8632 __set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, &flags); 8633 } 8634 8635 if (test_bit(OP_SECINFO, sp->allow.u.longs) && 8636 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) { 8637 dfprintk(MOUNT, " secinfo mode enabled\n"); 8638 __set_bit(NFS_SP4_MACH_CRED_SECINFO, &flags); 8639 } 8640 8641 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) && 8642 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) { 8643 dfprintk(MOUNT, " stateid mode enabled\n"); 8644 __set_bit(NFS_SP4_MACH_CRED_STATEID, &flags); 8645 } 8646 8647 if (test_bit(OP_WRITE, sp->allow.u.longs)) { 8648 dfprintk(MOUNT, " write mode enabled\n"); 8649 __set_bit(NFS_SP4_MACH_CRED_WRITE, &flags); 8650 } 8651 8652 if (test_bit(OP_COMMIT, sp->allow.u.longs)) { 8653 dfprintk(MOUNT, " commit mode enabled\n"); 8654 __set_bit(NFS_SP4_MACH_CRED_COMMIT, &flags); 8655 } 8656 } 8657 out: 8658 clp->cl_sp4_flags = flags; 8659 return ret; 8660 } 8661 8662 struct nfs41_exchange_id_data { 8663 struct nfs41_exchange_id_res res; 8664 struct nfs41_exchange_id_args args; 8665 }; 8666 8667 static void nfs4_exchange_id_release(void *data) 8668 { 8669 struct nfs41_exchange_id_data *cdata = 8670 (struct nfs41_exchange_id_data *)data; 8671 8672 nfs_put_client(cdata->args.client); 8673 kfree(cdata->res.impl_id); 8674 kfree(cdata->res.server_scope); 8675 kfree(cdata->res.server_owner); 8676 kfree(cdata); 8677 } 8678 8679 static const struct rpc_call_ops nfs4_exchange_id_call_ops = { 8680 .rpc_release = nfs4_exchange_id_release, 8681 }; 8682 8683 /* 8684 * _nfs4_proc_exchange_id() 8685 * 8686 * Wrapper for EXCHANGE_ID operation. 8687 */ 8688 static struct rpc_task * 8689 nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred, 8690 u32 sp4_how, struct rpc_xprt *xprt) 8691 { 8692 struct rpc_message msg = { 8693 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 8694 .rpc_cred = cred, 8695 }; 8696 struct rpc_task_setup task_setup_data = { 8697 .rpc_client = clp->cl_rpcclient, 8698 .callback_ops = &nfs4_exchange_id_call_ops, 8699 .rpc_message = &msg, 8700 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN, 8701 }; 8702 struct nfs41_exchange_id_data *calldata; 8703 int status; 8704 8705 if (!refcount_inc_not_zero(&clp->cl_count)) 8706 return ERR_PTR(-EIO); 8707 8708 status = -ENOMEM; 8709 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 8710 if (!calldata) 8711 goto out; 8712 8713 nfs4_init_boot_verifier(clp, &calldata->args.verifier); 8714 8715 status = nfs4_init_uniform_client_string(clp); 8716 if (status) 8717 goto out_calldata; 8718 8719 calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 8720 GFP_NOFS); 8721 status = -ENOMEM; 8722 if (unlikely(calldata->res.server_owner == NULL)) 8723 goto out_calldata; 8724 8725 calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 8726 GFP_NOFS); 8727 if (unlikely(calldata->res.server_scope == NULL)) 8728 goto out_server_owner; 8729 8730 calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 8731 if (unlikely(calldata->res.impl_id == NULL)) 8732 goto out_server_scope; 8733 8734 switch (sp4_how) { 8735 case SP4_NONE: 8736 calldata->args.state_protect.how = SP4_NONE; 8737 break; 8738 8739 case SP4_MACH_CRED: 8740 calldata->args.state_protect = nfs4_sp4_mach_cred_request; 8741 break; 8742 8743 default: 8744 /* unsupported! */ 8745 WARN_ON_ONCE(1); 8746 status = -EINVAL; 8747 goto out_impl_id; 8748 } 8749 if (xprt) { 8750 task_setup_data.rpc_xprt = xprt; 8751 task_setup_data.flags |= RPC_TASK_SOFTCONN; 8752 memcpy(calldata->args.verifier.data, clp->cl_confirm.data, 8753 sizeof(calldata->args.verifier.data)); 8754 } 8755 calldata->args.client = clp; 8756 calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 8757 EXCHGID4_FLAG_BIND_PRINC_STATEID; 8758 #ifdef CONFIG_NFS_V4_1_MIGRATION 8759 calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR; 8760 #endif 8761 if (test_bit(NFS_CS_PNFS, &clp->cl_flags)) 8762 calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS; 8763 msg.rpc_argp = &calldata->args; 8764 msg.rpc_resp = &calldata->res; 8765 task_setup_data.callback_data = calldata; 8766 8767 return rpc_run_task(&task_setup_data); 8768 8769 out_impl_id: 8770 kfree(calldata->res.impl_id); 8771 out_server_scope: 8772 kfree(calldata->res.server_scope); 8773 out_server_owner: 8774 kfree(calldata->res.server_owner); 8775 out_calldata: 8776 kfree(calldata); 8777 out: 8778 nfs_put_client(clp); 8779 return ERR_PTR(status); 8780 } 8781 8782 /* 8783 * _nfs4_proc_exchange_id() 8784 * 8785 * Wrapper for EXCHANGE_ID operation. 8786 */ 8787 static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred, 8788 u32 sp4_how) 8789 { 8790 struct rpc_task *task; 8791 struct nfs41_exchange_id_args *argp; 8792 struct nfs41_exchange_id_res *resp; 8793 unsigned long now = jiffies; 8794 int status; 8795 8796 task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL); 8797 if (IS_ERR(task)) 8798 return PTR_ERR(task); 8799 8800 argp = task->tk_msg.rpc_argp; 8801 resp = task->tk_msg.rpc_resp; 8802 status = task->tk_status; 8803 if (status != 0) 8804 goto out; 8805 8806 status = nfs4_check_cl_exchange_flags(resp->flags, 8807 clp->cl_mvops->minor_version); 8808 if (status != 0) 8809 goto out; 8810 8811 status = nfs4_sp4_select_mode(clp, &resp->state_protect); 8812 if (status != 0) 8813 goto out; 8814 8815 do_renew_lease(clp, now); 8816 8817 clp->cl_clientid = resp->clientid; 8818 clp->cl_exchange_flags = resp->flags; 8819 clp->cl_seqid = resp->seqid; 8820 /* Client ID is not confirmed */ 8821 if (!(resp->flags & EXCHGID4_FLAG_CONFIRMED_R)) 8822 clear_bit(NFS4_SESSION_ESTABLISHED, 8823 &clp->cl_session->session_state); 8824 8825 if (clp->cl_serverscope != NULL && 8826 !nfs41_same_server_scope(clp->cl_serverscope, 8827 resp->server_scope)) { 8828 dprintk("%s: server_scope mismatch detected\n", 8829 __func__); 8830 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 8831 } 8832 8833 swap(clp->cl_serverowner, resp->server_owner); 8834 swap(clp->cl_serverscope, resp->server_scope); 8835 swap(clp->cl_implid, resp->impl_id); 8836 8837 /* Save the EXCHANGE_ID verifier session trunk tests */ 8838 memcpy(clp->cl_confirm.data, argp->verifier.data, 8839 sizeof(clp->cl_confirm.data)); 8840 out: 8841 trace_nfs4_exchange_id(clp, status); 8842 rpc_put_task(task); 8843 return status; 8844 } 8845 8846 /* 8847 * nfs4_proc_exchange_id() 8848 * 8849 * Returns zero, a negative errno, or a negative NFS4ERR status code. 8850 * 8851 * Since the clientid has expired, all compounds using sessions 8852 * associated with the stale clientid will be returning 8853 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 8854 * be in some phase of session reset. 8855 * 8856 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used. 8857 */ 8858 int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred) 8859 { 8860 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor; 8861 int status; 8862 8863 /* try SP4_MACH_CRED if krb5i/p */ 8864 if (authflavor == RPC_AUTH_GSS_KRB5I || 8865 authflavor == RPC_AUTH_GSS_KRB5P) { 8866 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED); 8867 if (!status) 8868 return 0; 8869 } 8870 8871 /* try SP4_NONE */ 8872 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE); 8873 } 8874 8875 /** 8876 * nfs4_test_session_trunk 8877 * 8878 * This is an add_xprt_test() test function called from 8879 * rpc_clnt_setup_test_and_add_xprt. 8880 * 8881 * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt 8882 * and is dereferrenced in nfs4_exchange_id_release 8883 * 8884 * Upon success, add the new transport to the rpc_clnt 8885 * 8886 * @clnt: struct rpc_clnt to get new transport 8887 * @xprt: the rpc_xprt to test 8888 * @data: call data for _nfs4_proc_exchange_id. 8889 */ 8890 void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt, 8891 void *data) 8892 { 8893 struct nfs4_add_xprt_data *adata = data; 8894 struct rpc_task *task; 8895 int status; 8896 8897 u32 sp4_how; 8898 8899 dprintk("--> %s try %s\n", __func__, 8900 xprt->address_strings[RPC_DISPLAY_ADDR]); 8901 8902 sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED); 8903 8904 try_again: 8905 /* Test connection for session trunking. Async exchange_id call */ 8906 task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt); 8907 if (IS_ERR(task)) 8908 return; 8909 8910 status = task->tk_status; 8911 if (status == 0) { 8912 status = nfs4_detect_session_trunking(adata->clp, 8913 task->tk_msg.rpc_resp, xprt); 8914 trace_nfs4_trunked_exchange_id(adata->clp, 8915 xprt->address_strings[RPC_DISPLAY_ADDR], status); 8916 } 8917 if (status == 0) 8918 rpc_clnt_xprt_switch_add_xprt(clnt, xprt); 8919 else if (status != -NFS4ERR_DELAY && rpc_clnt_xprt_switch_has_addr(clnt, 8920 (struct sockaddr *)&xprt->addr)) 8921 rpc_clnt_xprt_switch_remove_xprt(clnt, xprt); 8922 8923 rpc_put_task(task); 8924 if (status == -NFS4ERR_DELAY) { 8925 ssleep(1); 8926 goto try_again; 8927 } 8928 } 8929 EXPORT_SYMBOL_GPL(nfs4_test_session_trunk); 8930 8931 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 8932 const struct cred *cred) 8933 { 8934 struct rpc_message msg = { 8935 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 8936 .rpc_argp = clp, 8937 .rpc_cred = cred, 8938 }; 8939 int status; 8940 8941 status = rpc_call_sync(clp->cl_rpcclient, &msg, 8942 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 8943 trace_nfs4_destroy_clientid(clp, status); 8944 if (status) 8945 dprintk("NFS: Got error %d from the server %s on " 8946 "DESTROY_CLIENTID.", status, clp->cl_hostname); 8947 return status; 8948 } 8949 8950 static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 8951 const struct cred *cred) 8952 { 8953 unsigned int loop; 8954 int ret; 8955 8956 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 8957 ret = _nfs4_proc_destroy_clientid(clp, cred); 8958 switch (ret) { 8959 case -NFS4ERR_DELAY: 8960 case -NFS4ERR_CLIENTID_BUSY: 8961 ssleep(1); 8962 break; 8963 default: 8964 return ret; 8965 } 8966 } 8967 return 0; 8968 } 8969 8970 int nfs4_destroy_clientid(struct nfs_client *clp) 8971 { 8972 const struct cred *cred; 8973 int ret = 0; 8974 8975 if (clp->cl_mvops->minor_version < 1) 8976 goto out; 8977 if (clp->cl_exchange_flags == 0) 8978 goto out; 8979 if (clp->cl_preserve_clid) 8980 goto out; 8981 cred = nfs4_get_clid_cred(clp); 8982 ret = nfs4_proc_destroy_clientid(clp, cred); 8983 put_cred(cred); 8984 switch (ret) { 8985 case 0: 8986 case -NFS4ERR_STALE_CLIENTID: 8987 clp->cl_exchange_flags = 0; 8988 } 8989 out: 8990 return ret; 8991 } 8992 8993 struct nfs4_get_lease_time_data { 8994 struct nfs4_get_lease_time_args *args; 8995 struct nfs4_get_lease_time_res *res; 8996 struct nfs_client *clp; 8997 }; 8998 8999 static void nfs4_get_lease_time_prepare(struct rpc_task *task, 9000 void *calldata) 9001 { 9002 struct nfs4_get_lease_time_data *data = 9003 (struct nfs4_get_lease_time_data *)calldata; 9004 9005 /* just setup sequence, do not trigger session recovery 9006 since we're invoked within one */ 9007 nfs4_setup_sequence(data->clp, 9008 &data->args->la_seq_args, 9009 &data->res->lr_seq_res, 9010 task); 9011 } 9012 9013 /* 9014 * Called from nfs4_state_manager thread for session setup, so don't recover 9015 * from sequence operation or clientid errors. 9016 */ 9017 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 9018 { 9019 struct nfs4_get_lease_time_data *data = 9020 (struct nfs4_get_lease_time_data *)calldata; 9021 9022 if (!nfs4_sequence_done(task, &data->res->lr_seq_res)) 9023 return; 9024 switch (task->tk_status) { 9025 case -NFS4ERR_DELAY: 9026 case -NFS4ERR_GRACE: 9027 rpc_delay(task, NFS4_POLL_RETRY_MIN); 9028 task->tk_status = 0; 9029 fallthrough; 9030 case -NFS4ERR_RETRY_UNCACHED_REP: 9031 rpc_restart_call_prepare(task); 9032 return; 9033 } 9034 } 9035 9036 static const struct rpc_call_ops nfs4_get_lease_time_ops = { 9037 .rpc_call_prepare = nfs4_get_lease_time_prepare, 9038 .rpc_call_done = nfs4_get_lease_time_done, 9039 }; 9040 9041 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 9042 { 9043 struct nfs4_get_lease_time_args args; 9044 struct nfs4_get_lease_time_res res = { 9045 .lr_fsinfo = fsinfo, 9046 }; 9047 struct nfs4_get_lease_time_data data = { 9048 .args = &args, 9049 .res = &res, 9050 .clp = clp, 9051 }; 9052 struct rpc_message msg = { 9053 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 9054 .rpc_argp = &args, 9055 .rpc_resp = &res, 9056 }; 9057 struct rpc_task_setup task_setup = { 9058 .rpc_client = clp->cl_rpcclient, 9059 .rpc_message = &msg, 9060 .callback_ops = &nfs4_get_lease_time_ops, 9061 .callback_data = &data, 9062 .flags = RPC_TASK_TIMEOUT, 9063 }; 9064 9065 nfs4_init_sequence(clp, &args.la_seq_args, &res.lr_seq_res, 0, 1); 9066 return nfs4_call_sync_custom(&task_setup); 9067 } 9068 9069 /* 9070 * Initialize the values to be used by the client in CREATE_SESSION 9071 * If nfs4_init_session set the fore channel request and response sizes, 9072 * use them. 9073 * 9074 * Set the back channel max_resp_sz_cached to zero to force the client to 9075 * always set csa_cachethis to FALSE because the current implementation 9076 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 9077 */ 9078 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args, 9079 struct rpc_clnt *clnt) 9080 { 9081 unsigned int max_rqst_sz, max_resp_sz; 9082 unsigned int max_bc_payload = rpc_max_bc_payload(clnt); 9083 unsigned int max_bc_slots = rpc_num_bc_slots(clnt); 9084 9085 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead; 9086 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead; 9087 9088 /* Fore channel attributes */ 9089 args->fc_attrs.max_rqst_sz = max_rqst_sz; 9090 args->fc_attrs.max_resp_sz = max_resp_sz; 9091 args->fc_attrs.max_ops = NFS4_MAX_OPS; 9092 args->fc_attrs.max_reqs = max_session_slots; 9093 9094 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 9095 "max_ops=%u max_reqs=%u\n", 9096 __func__, 9097 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 9098 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 9099 9100 /* Back channel attributes */ 9101 args->bc_attrs.max_rqst_sz = max_bc_payload; 9102 args->bc_attrs.max_resp_sz = max_bc_payload; 9103 args->bc_attrs.max_resp_sz_cached = 0; 9104 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 9105 args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1); 9106 if (args->bc_attrs.max_reqs > max_bc_slots) 9107 args->bc_attrs.max_reqs = max_bc_slots; 9108 9109 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 9110 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 9111 __func__, 9112 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 9113 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 9114 args->bc_attrs.max_reqs); 9115 } 9116 9117 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, 9118 struct nfs41_create_session_res *res) 9119 { 9120 struct nfs4_channel_attrs *sent = &args->fc_attrs; 9121 struct nfs4_channel_attrs *rcvd = &res->fc_attrs; 9122 9123 if (rcvd->max_resp_sz > sent->max_resp_sz) 9124 return -EINVAL; 9125 /* 9126 * Our requested max_ops is the minimum we need; we're not 9127 * prepared to break up compounds into smaller pieces than that. 9128 * So, no point even trying to continue if the server won't 9129 * cooperate: 9130 */ 9131 if (rcvd->max_ops < sent->max_ops) 9132 return -EINVAL; 9133 if (rcvd->max_reqs == 0) 9134 return -EINVAL; 9135 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 9136 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 9137 return 0; 9138 } 9139 9140 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, 9141 struct nfs41_create_session_res *res) 9142 { 9143 struct nfs4_channel_attrs *sent = &args->bc_attrs; 9144 struct nfs4_channel_attrs *rcvd = &res->bc_attrs; 9145 9146 if (!(res->flags & SESSION4_BACK_CHAN)) 9147 goto out; 9148 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 9149 return -EINVAL; 9150 if (rcvd->max_resp_sz > sent->max_resp_sz) 9151 return -EINVAL; 9152 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 9153 return -EINVAL; 9154 if (rcvd->max_ops > sent->max_ops) 9155 return -EINVAL; 9156 if (rcvd->max_reqs > sent->max_reqs) 9157 return -EINVAL; 9158 out: 9159 return 0; 9160 } 9161 9162 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 9163 struct nfs41_create_session_res *res) 9164 { 9165 int ret; 9166 9167 ret = nfs4_verify_fore_channel_attrs(args, res); 9168 if (ret) 9169 return ret; 9170 return nfs4_verify_back_channel_attrs(args, res); 9171 } 9172 9173 static void nfs4_update_session(struct nfs4_session *session, 9174 struct nfs41_create_session_res *res) 9175 { 9176 nfs4_copy_sessionid(&session->sess_id, &res->sessionid); 9177 /* Mark client id and session as being confirmed */ 9178 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 9179 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state); 9180 session->flags = res->flags; 9181 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); 9182 if (res->flags & SESSION4_BACK_CHAN) 9183 memcpy(&session->bc_attrs, &res->bc_attrs, 9184 sizeof(session->bc_attrs)); 9185 } 9186 9187 static int _nfs4_proc_create_session(struct nfs_client *clp, 9188 const struct cred *cred) 9189 { 9190 struct nfs4_session *session = clp->cl_session; 9191 struct nfs41_create_session_args args = { 9192 .client = clp, 9193 .clientid = clp->cl_clientid, 9194 .seqid = clp->cl_seqid, 9195 .cb_program = NFS4_CALLBACK, 9196 }; 9197 struct nfs41_create_session_res res; 9198 9199 struct rpc_message msg = { 9200 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 9201 .rpc_argp = &args, 9202 .rpc_resp = &res, 9203 .rpc_cred = cred, 9204 }; 9205 int status; 9206 9207 nfs4_init_channel_attrs(&args, clp->cl_rpcclient); 9208 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 9209 9210 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 9211 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9212 trace_nfs4_create_session(clp, status); 9213 9214 switch (status) { 9215 case -NFS4ERR_STALE_CLIENTID: 9216 case -NFS4ERR_DELAY: 9217 case -ETIMEDOUT: 9218 case -EACCES: 9219 case -EAGAIN: 9220 goto out; 9221 } 9222 9223 clp->cl_seqid++; 9224 if (!status) { 9225 /* Verify the session's negotiated channel_attrs values */ 9226 status = nfs4_verify_channel_attrs(&args, &res); 9227 /* Increment the clientid slot sequence id */ 9228 if (status) 9229 goto out; 9230 nfs4_update_session(session, &res); 9231 } 9232 out: 9233 return status; 9234 } 9235 9236 /* 9237 * Issues a CREATE_SESSION operation to the server. 9238 * It is the responsibility of the caller to verify the session is 9239 * expired before calling this routine. 9240 */ 9241 int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred) 9242 { 9243 int status; 9244 unsigned *ptr; 9245 struct nfs4_session *session = clp->cl_session; 9246 struct nfs4_add_xprt_data xprtdata = { 9247 .clp = clp, 9248 }; 9249 struct rpc_add_xprt_test rpcdata = { 9250 .add_xprt_test = clp->cl_mvops->session_trunk, 9251 .data = &xprtdata, 9252 }; 9253 9254 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 9255 9256 status = _nfs4_proc_create_session(clp, cred); 9257 if (status) 9258 goto out; 9259 9260 /* Init or reset the session slot tables */ 9261 status = nfs4_setup_session_slot_tables(session); 9262 dprintk("slot table setup returned %d\n", status); 9263 if (status) 9264 goto out; 9265 9266 ptr = (unsigned *)&session->sess_id.data[0]; 9267 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 9268 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 9269 rpc_clnt_probe_trunked_xprts(clp->cl_rpcclient, &rpcdata); 9270 out: 9271 return status; 9272 } 9273 9274 /* 9275 * Issue the over-the-wire RPC DESTROY_SESSION. 9276 * The caller must serialize access to this routine. 9277 */ 9278 int nfs4_proc_destroy_session(struct nfs4_session *session, 9279 const struct cred *cred) 9280 { 9281 struct rpc_message msg = { 9282 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 9283 .rpc_argp = session, 9284 .rpc_cred = cred, 9285 }; 9286 int status = 0; 9287 9288 /* session is still being setup */ 9289 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state)) 9290 return 0; 9291 9292 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 9293 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9294 trace_nfs4_destroy_session(session->clp, status); 9295 9296 if (status) 9297 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 9298 "Session has been destroyed regardless...\n", status); 9299 rpc_clnt_manage_trunked_xprts(session->clp->cl_rpcclient); 9300 return status; 9301 } 9302 9303 /* 9304 * Renew the cl_session lease. 9305 */ 9306 struct nfs4_sequence_data { 9307 struct nfs_client *clp; 9308 struct nfs4_sequence_args args; 9309 struct nfs4_sequence_res res; 9310 }; 9311 9312 static void nfs41_sequence_release(void *data) 9313 { 9314 struct nfs4_sequence_data *calldata = data; 9315 struct nfs_client *clp = calldata->clp; 9316 9317 if (refcount_read(&clp->cl_count) > 1) 9318 nfs4_schedule_state_renewal(clp); 9319 nfs_put_client(clp); 9320 kfree(calldata); 9321 } 9322 9323 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 9324 { 9325 switch(task->tk_status) { 9326 case -NFS4ERR_DELAY: 9327 rpc_delay(task, NFS4_POLL_RETRY_MAX); 9328 return -EAGAIN; 9329 default: 9330 nfs4_schedule_lease_recovery(clp); 9331 } 9332 return 0; 9333 } 9334 9335 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 9336 { 9337 struct nfs4_sequence_data *calldata = data; 9338 struct nfs_client *clp = calldata->clp; 9339 9340 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 9341 return; 9342 9343 trace_nfs4_sequence(clp, task->tk_status); 9344 if (task->tk_status < 0 && clp->cl_cons_state >= 0) { 9345 dprintk("%s ERROR %d\n", __func__, task->tk_status); 9346 if (refcount_read(&clp->cl_count) == 1) 9347 return; 9348 9349 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 9350 rpc_restart_call_prepare(task); 9351 return; 9352 } 9353 } 9354 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 9355 } 9356 9357 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 9358 { 9359 struct nfs4_sequence_data *calldata = data; 9360 struct nfs_client *clp = calldata->clp; 9361 struct nfs4_sequence_args *args; 9362 struct nfs4_sequence_res *res; 9363 9364 args = task->tk_msg.rpc_argp; 9365 res = task->tk_msg.rpc_resp; 9366 9367 nfs4_setup_sequence(clp, args, res, task); 9368 } 9369 9370 static const struct rpc_call_ops nfs41_sequence_ops = { 9371 .rpc_call_done = nfs41_sequence_call_done, 9372 .rpc_call_prepare = nfs41_sequence_prepare, 9373 .rpc_release = nfs41_sequence_release, 9374 }; 9375 9376 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 9377 const struct cred *cred, 9378 struct nfs4_slot *slot, 9379 bool is_privileged) 9380 { 9381 struct nfs4_sequence_data *calldata; 9382 struct rpc_message msg = { 9383 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 9384 .rpc_cred = cred, 9385 }; 9386 struct rpc_task_setup task_setup_data = { 9387 .rpc_client = clp->cl_rpcclient, 9388 .rpc_message = &msg, 9389 .callback_ops = &nfs41_sequence_ops, 9390 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE, 9391 }; 9392 struct rpc_task *ret; 9393 9394 ret = ERR_PTR(-EIO); 9395 if (!refcount_inc_not_zero(&clp->cl_count)) 9396 goto out_err; 9397 9398 ret = ERR_PTR(-ENOMEM); 9399 calldata = kzalloc(sizeof(*calldata), GFP_KERNEL); 9400 if (calldata == NULL) 9401 goto out_put_clp; 9402 nfs4_init_sequence(clp, &calldata->args, &calldata->res, 0, is_privileged); 9403 nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot); 9404 msg.rpc_argp = &calldata->args; 9405 msg.rpc_resp = &calldata->res; 9406 calldata->clp = clp; 9407 task_setup_data.callback_data = calldata; 9408 9409 ret = rpc_run_task(&task_setup_data); 9410 if (IS_ERR(ret)) 9411 goto out_err; 9412 return ret; 9413 out_put_clp: 9414 nfs_put_client(clp); 9415 out_err: 9416 nfs41_release_slot(slot); 9417 return ret; 9418 } 9419 9420 static int nfs41_proc_async_sequence(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags) 9421 { 9422 struct rpc_task *task; 9423 int ret = 0; 9424 9425 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 9426 return -EAGAIN; 9427 task = _nfs41_proc_sequence(clp, cred, NULL, false); 9428 if (IS_ERR(task)) 9429 ret = PTR_ERR(task); 9430 else 9431 rpc_put_task_async(task); 9432 dprintk("<-- %s status=%d\n", __func__, ret); 9433 return ret; 9434 } 9435 9436 static int nfs4_proc_sequence(struct nfs_client *clp, const struct cred *cred) 9437 { 9438 struct rpc_task *task; 9439 int ret; 9440 9441 task = _nfs41_proc_sequence(clp, cred, NULL, true); 9442 if (IS_ERR(task)) { 9443 ret = PTR_ERR(task); 9444 goto out; 9445 } 9446 ret = rpc_wait_for_completion_task(task); 9447 if (!ret) 9448 ret = task->tk_status; 9449 rpc_put_task(task); 9450 out: 9451 dprintk("<-- %s status=%d\n", __func__, ret); 9452 return ret; 9453 } 9454 9455 struct nfs4_reclaim_complete_data { 9456 struct nfs_client *clp; 9457 struct nfs41_reclaim_complete_args arg; 9458 struct nfs41_reclaim_complete_res res; 9459 }; 9460 9461 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 9462 { 9463 struct nfs4_reclaim_complete_data *calldata = data; 9464 9465 nfs4_setup_sequence(calldata->clp, 9466 &calldata->arg.seq_args, 9467 &calldata->res.seq_res, 9468 task); 9469 } 9470 9471 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 9472 { 9473 switch(task->tk_status) { 9474 case 0: 9475 wake_up_all(&clp->cl_lock_waitq); 9476 fallthrough; 9477 case -NFS4ERR_COMPLETE_ALREADY: 9478 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 9479 break; 9480 case -NFS4ERR_DELAY: 9481 rpc_delay(task, NFS4_POLL_RETRY_MAX); 9482 fallthrough; 9483 case -NFS4ERR_RETRY_UNCACHED_REP: 9484 case -EACCES: 9485 dprintk("%s: failed to reclaim complete error %d for server %s, retrying\n", 9486 __func__, task->tk_status, clp->cl_hostname); 9487 return -EAGAIN; 9488 case -NFS4ERR_BADSESSION: 9489 case -NFS4ERR_DEADSESSION: 9490 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 9491 break; 9492 default: 9493 nfs4_schedule_lease_recovery(clp); 9494 } 9495 return 0; 9496 } 9497 9498 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 9499 { 9500 struct nfs4_reclaim_complete_data *calldata = data; 9501 struct nfs_client *clp = calldata->clp; 9502 struct nfs4_sequence_res *res = &calldata->res.seq_res; 9503 9504 if (!nfs41_sequence_done(task, res)) 9505 return; 9506 9507 trace_nfs4_reclaim_complete(clp, task->tk_status); 9508 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 9509 rpc_restart_call_prepare(task); 9510 return; 9511 } 9512 } 9513 9514 static void nfs4_free_reclaim_complete_data(void *data) 9515 { 9516 struct nfs4_reclaim_complete_data *calldata = data; 9517 9518 kfree(calldata); 9519 } 9520 9521 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 9522 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 9523 .rpc_call_done = nfs4_reclaim_complete_done, 9524 .rpc_release = nfs4_free_reclaim_complete_data, 9525 }; 9526 9527 /* 9528 * Issue a global reclaim complete. 9529 */ 9530 static int nfs41_proc_reclaim_complete(struct nfs_client *clp, 9531 const struct cred *cred) 9532 { 9533 struct nfs4_reclaim_complete_data *calldata; 9534 struct rpc_message msg = { 9535 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 9536 .rpc_cred = cred, 9537 }; 9538 struct rpc_task_setup task_setup_data = { 9539 .rpc_client = clp->cl_rpcclient, 9540 .rpc_message = &msg, 9541 .callback_ops = &nfs4_reclaim_complete_call_ops, 9542 .flags = RPC_TASK_NO_ROUND_ROBIN, 9543 }; 9544 int status = -ENOMEM; 9545 9546 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 9547 if (calldata == NULL) 9548 goto out; 9549 calldata->clp = clp; 9550 calldata->arg.one_fs = 0; 9551 9552 nfs4_init_sequence(clp, &calldata->arg.seq_args, &calldata->res.seq_res, 0, 1); 9553 msg.rpc_argp = &calldata->arg; 9554 msg.rpc_resp = &calldata->res; 9555 task_setup_data.callback_data = calldata; 9556 status = nfs4_call_sync_custom(&task_setup_data); 9557 out: 9558 dprintk("<-- %s status=%d\n", __func__, status); 9559 return status; 9560 } 9561 9562 static void 9563 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 9564 { 9565 struct nfs4_layoutget *lgp = calldata; 9566 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 9567 9568 nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args, 9569 &lgp->res.seq_res, task); 9570 } 9571 9572 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 9573 { 9574 struct nfs4_layoutget *lgp = calldata; 9575 9576 nfs41_sequence_process(task, &lgp->res.seq_res); 9577 } 9578 9579 static int 9580 nfs4_layoutget_handle_exception(struct rpc_task *task, 9581 struct nfs4_layoutget *lgp, struct nfs4_exception *exception) 9582 { 9583 struct inode *inode = lgp->args.inode; 9584 struct nfs_server *server = NFS_SERVER(inode); 9585 struct pnfs_layout_hdr *lo = lgp->lo; 9586 int nfs4err = task->tk_status; 9587 int err, status = 0; 9588 LIST_HEAD(head); 9589 9590 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); 9591 9592 nfs4_sequence_free_slot(&lgp->res.seq_res); 9593 9594 exception->state = NULL; 9595 exception->stateid = NULL; 9596 9597 switch (nfs4err) { 9598 case 0: 9599 goto out; 9600 9601 /* 9602 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs 9603 * on the file. set tk_status to -ENODATA to tell upper layer to 9604 * retry go inband. 9605 */ 9606 case -NFS4ERR_LAYOUTUNAVAILABLE: 9607 status = -ENODATA; 9608 goto out; 9609 /* 9610 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of 9611 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3). 9612 */ 9613 case -NFS4ERR_BADLAYOUT: 9614 status = -EOVERFLOW; 9615 goto out; 9616 /* 9617 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client 9618 * (or clients) writing to the same RAID stripe except when 9619 * the minlength argument is 0 (see RFC5661 section 18.43.3). 9620 * 9621 * Treat it like we would RECALLCONFLICT -- we retry for a little 9622 * while, and then eventually give up. 9623 */ 9624 case -NFS4ERR_LAYOUTTRYLATER: 9625 if (lgp->args.minlength == 0) { 9626 status = -EOVERFLOW; 9627 goto out; 9628 } 9629 status = -EBUSY; 9630 break; 9631 case -NFS4ERR_RECALLCONFLICT: 9632 case -NFS4ERR_RETURNCONFLICT: 9633 status = -ERECALLCONFLICT; 9634 break; 9635 case -NFS4ERR_DELEG_REVOKED: 9636 case -NFS4ERR_ADMIN_REVOKED: 9637 case -NFS4ERR_EXPIRED: 9638 case -NFS4ERR_BAD_STATEID: 9639 exception->timeout = 0; 9640 spin_lock(&inode->i_lock); 9641 /* If the open stateid was bad, then recover it. */ 9642 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) || 9643 !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) { 9644 spin_unlock(&inode->i_lock); 9645 exception->state = lgp->args.ctx->state; 9646 exception->stateid = &lgp->args.stateid; 9647 break; 9648 } 9649 9650 /* 9651 * Mark the bad layout state as invalid, then retry 9652 */ 9653 pnfs_mark_layout_stateid_invalid(lo, &head); 9654 spin_unlock(&inode->i_lock); 9655 nfs_commit_inode(inode, 0); 9656 pnfs_free_lseg_list(&head); 9657 status = -EAGAIN; 9658 goto out; 9659 } 9660 9661 err = nfs4_handle_exception(server, nfs4err, exception); 9662 if (!status) { 9663 if (exception->retry) 9664 status = -EAGAIN; 9665 else 9666 status = err; 9667 } 9668 out: 9669 return status; 9670 } 9671 9672 size_t max_response_pages(struct nfs_server *server) 9673 { 9674 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 9675 return nfs_page_array_len(0, max_resp_sz); 9676 } 9677 9678 static void nfs4_layoutget_release(void *calldata) 9679 { 9680 struct nfs4_layoutget *lgp = calldata; 9681 9682 nfs4_sequence_free_slot(&lgp->res.seq_res); 9683 pnfs_layoutget_free(lgp); 9684 } 9685 9686 static const struct rpc_call_ops nfs4_layoutget_call_ops = { 9687 .rpc_call_prepare = nfs4_layoutget_prepare, 9688 .rpc_call_done = nfs4_layoutget_done, 9689 .rpc_release = nfs4_layoutget_release, 9690 }; 9691 9692 struct pnfs_layout_segment * 9693 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, 9694 struct nfs4_exception *exception) 9695 { 9696 struct inode *inode = lgp->args.inode; 9697 struct nfs_server *server = NFS_SERVER(inode); 9698 struct rpc_task *task; 9699 struct rpc_message msg = { 9700 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 9701 .rpc_argp = &lgp->args, 9702 .rpc_resp = &lgp->res, 9703 .rpc_cred = lgp->cred, 9704 }; 9705 struct rpc_task_setup task_setup_data = { 9706 .rpc_client = server->client, 9707 .rpc_message = &msg, 9708 .callback_ops = &nfs4_layoutget_call_ops, 9709 .callback_data = lgp, 9710 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF | 9711 RPC_TASK_MOVEABLE, 9712 }; 9713 struct pnfs_layout_segment *lseg = NULL; 9714 int status = 0; 9715 9716 nfs4_init_sequence(server->nfs_client, &lgp->args.seq_args, 9717 &lgp->res.seq_res, 0, 0); 9718 exception->retry = 0; 9719 9720 task = rpc_run_task(&task_setup_data); 9721 if (IS_ERR(task)) 9722 return ERR_CAST(task); 9723 9724 status = rpc_wait_for_completion_task(task); 9725 if (status != 0) 9726 goto out; 9727 9728 if (task->tk_status < 0) { 9729 exception->retry = 1; 9730 status = nfs4_layoutget_handle_exception(task, lgp, exception); 9731 } else if (lgp->res.layoutp->len == 0) { 9732 exception->retry = 1; 9733 status = -EAGAIN; 9734 nfs4_update_delay(&exception->timeout); 9735 } else 9736 lseg = pnfs_layout_process(lgp); 9737 out: 9738 trace_nfs4_layoutget(lgp->args.ctx, 9739 &lgp->args.range, 9740 &lgp->res.range, 9741 &lgp->res.stateid, 9742 status); 9743 9744 rpc_put_task(task); 9745 dprintk("<-- %s status=%d\n", __func__, status); 9746 if (status) 9747 return ERR_PTR(status); 9748 return lseg; 9749 } 9750 9751 static void 9752 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 9753 { 9754 struct nfs4_layoutreturn *lrp = calldata; 9755 9756 nfs4_setup_sequence(lrp->clp, 9757 &lrp->args.seq_args, 9758 &lrp->res.seq_res, 9759 task); 9760 if (!pnfs_layout_is_valid(lrp->args.layout)) 9761 rpc_exit(task, 0); 9762 } 9763 9764 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 9765 { 9766 struct nfs4_layoutreturn *lrp = calldata; 9767 struct nfs_server *server; 9768 9769 if (!nfs41_sequence_process(task, &lrp->res.seq_res)) 9770 return; 9771 9772 if (task->tk_rpc_status == -ETIMEDOUT) { 9773 lrp->rpc_status = -EAGAIN; 9774 lrp->res.lrs_present = 0; 9775 return; 9776 } 9777 /* 9778 * Was there an RPC level error? Assume the call succeeded, 9779 * and that we need to release the layout 9780 */ 9781 if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) { 9782 lrp->res.lrs_present = 0; 9783 return; 9784 } 9785 9786 server = NFS_SERVER(lrp->args.inode); 9787 switch (task->tk_status) { 9788 case -NFS4ERR_OLD_STATEID: 9789 if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid, 9790 &lrp->args.range, 9791 lrp->args.inode)) 9792 goto out_restart; 9793 fallthrough; 9794 default: 9795 task->tk_status = 0; 9796 lrp->res.lrs_present = 0; 9797 fallthrough; 9798 case 0: 9799 break; 9800 case -NFS4ERR_BADSESSION: 9801 case -NFS4ERR_DEADSESSION: 9802 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 9803 nfs4_schedule_session_recovery(server->nfs_client->cl_session, 9804 task->tk_status); 9805 lrp->res.lrs_present = 0; 9806 lrp->rpc_status = -EAGAIN; 9807 task->tk_status = 0; 9808 break; 9809 case -NFS4ERR_DELAY: 9810 if (nfs4_async_handle_error(task, server, NULL, NULL) == 9811 -EAGAIN) 9812 goto out_restart; 9813 lrp->res.lrs_present = 0; 9814 break; 9815 } 9816 return; 9817 out_restart: 9818 task->tk_status = 0; 9819 nfs4_sequence_free_slot(&lrp->res.seq_res); 9820 rpc_restart_call_prepare(task); 9821 } 9822 9823 static void nfs4_layoutreturn_release(void *calldata) 9824 { 9825 struct nfs4_layoutreturn *lrp = calldata; 9826 struct pnfs_layout_hdr *lo = lrp->args.layout; 9827 9828 if (lrp->rpc_status == 0 || !lrp->inode) 9829 pnfs_layoutreturn_free_lsegs( 9830 lo, &lrp->args.stateid, &lrp->args.range, 9831 lrp->res.lrs_present ? &lrp->res.stateid : NULL); 9832 else 9833 pnfs_layoutreturn_retry_later(lo, &lrp->args.stateid, 9834 &lrp->args.range); 9835 nfs4_sequence_free_slot(&lrp->res.seq_res); 9836 if (lrp->ld_private.ops && lrp->ld_private.ops->free) 9837 lrp->ld_private.ops->free(&lrp->ld_private); 9838 pnfs_put_layout_hdr(lrp->args.layout); 9839 nfs_iput_and_deactive(lrp->inode); 9840 put_cred(lrp->cred); 9841 kfree(calldata); 9842 } 9843 9844 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 9845 .rpc_call_prepare = nfs4_layoutreturn_prepare, 9846 .rpc_call_done = nfs4_layoutreturn_done, 9847 .rpc_release = nfs4_layoutreturn_release, 9848 }; 9849 9850 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, unsigned int flags) 9851 { 9852 struct nfs_client *clp = NFS_SERVER(lrp->args.inode)->nfs_client; 9853 struct rpc_task *task; 9854 struct rpc_message msg = { 9855 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 9856 .rpc_argp = &lrp->args, 9857 .rpc_resp = &lrp->res, 9858 .rpc_cred = lrp->cred, 9859 }; 9860 struct rpc_task_setup task_setup_data = { 9861 .rpc_client = NFS_SERVER(lrp->args.inode)->client, 9862 .rpc_message = &msg, 9863 .callback_ops = &nfs4_layoutreturn_call_ops, 9864 .callback_data = lrp, 9865 .flags = RPC_TASK_MOVEABLE, 9866 }; 9867 int status = 0; 9868 9869 nfs4_state_protect(clp, NFS_SP4_MACH_CRED_PNFS_CLEANUP, 9870 &task_setup_data.rpc_client, &msg); 9871 9872 lrp->inode = nfs_igrab_and_active(lrp->args.inode); 9873 if (flags & PNFS_FL_LAYOUTRETURN_ASYNC) { 9874 if (!lrp->inode) { 9875 nfs4_layoutreturn_release(lrp); 9876 return -EAGAIN; 9877 } 9878 task_setup_data.flags |= RPC_TASK_ASYNC; 9879 } 9880 if (!lrp->inode) 9881 flags |= PNFS_FL_LAYOUTRETURN_PRIVILEGED; 9882 9883 nfs4_init_sequence(clp, &lrp->args.seq_args, &lrp->res.seq_res, 1, 9884 flags & PNFS_FL_LAYOUTRETURN_PRIVILEGED ? 1 : 0); 9885 task = rpc_run_task(&task_setup_data); 9886 if (IS_ERR(task)) 9887 return PTR_ERR(task); 9888 if (!(flags & PNFS_FL_LAYOUTRETURN_ASYNC)) 9889 status = task->tk_status; 9890 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status); 9891 dprintk("<-- %s status=%d\n", __func__, status); 9892 rpc_put_task(task); 9893 return status; 9894 } 9895 9896 static int 9897 _nfs4_proc_getdeviceinfo(struct nfs_server *server, 9898 struct pnfs_device *pdev, 9899 const struct cred *cred) 9900 { 9901 struct nfs4_getdeviceinfo_args args = { 9902 .pdev = pdev, 9903 .notify_types = NOTIFY_DEVICEID4_CHANGE | 9904 NOTIFY_DEVICEID4_DELETE, 9905 }; 9906 struct nfs4_getdeviceinfo_res res = { 9907 .pdev = pdev, 9908 }; 9909 struct rpc_message msg = { 9910 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 9911 .rpc_argp = &args, 9912 .rpc_resp = &res, 9913 .rpc_cred = cred, 9914 }; 9915 int status; 9916 9917 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 9918 if (res.notification & ~args.notify_types) 9919 dprintk("%s: unsupported notification\n", __func__); 9920 if (res.notification != args.notify_types) 9921 pdev->nocache = 1; 9922 9923 trace_nfs4_getdeviceinfo(server, &pdev->dev_id, status); 9924 9925 dprintk("<-- %s status=%d\n", __func__, status); 9926 9927 return status; 9928 } 9929 9930 int nfs4_proc_getdeviceinfo(struct nfs_server *server, 9931 struct pnfs_device *pdev, 9932 const struct cred *cred) 9933 { 9934 struct nfs4_exception exception = { }; 9935 int err; 9936 9937 do { 9938 err = nfs4_handle_exception(server, 9939 _nfs4_proc_getdeviceinfo(server, pdev, cred), 9940 &exception); 9941 } while (exception.retry); 9942 return err; 9943 } 9944 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 9945 9946 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 9947 { 9948 struct nfs4_layoutcommit_data *data = calldata; 9949 struct nfs_server *server = NFS_SERVER(data->args.inode); 9950 9951 nfs4_setup_sequence(server->nfs_client, 9952 &data->args.seq_args, 9953 &data->res.seq_res, 9954 task); 9955 } 9956 9957 static void 9958 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 9959 { 9960 struct nfs4_layoutcommit_data *data = calldata; 9961 struct nfs_server *server = NFS_SERVER(data->args.inode); 9962 9963 if (!nfs41_sequence_done(task, &data->res.seq_res)) 9964 return; 9965 9966 switch (task->tk_status) { /* Just ignore these failures */ 9967 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 9968 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 9969 case -NFS4ERR_BADLAYOUT: /* no layout */ 9970 case -NFS4ERR_GRACE: /* loca_recalim always false */ 9971 task->tk_status = 0; 9972 break; 9973 case 0: 9974 break; 9975 default: 9976 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) { 9977 rpc_restart_call_prepare(task); 9978 return; 9979 } 9980 } 9981 } 9982 9983 static void nfs4_layoutcommit_release(void *calldata) 9984 { 9985 struct nfs4_layoutcommit_data *data = calldata; 9986 9987 pnfs_cleanup_layoutcommit(data); 9988 nfs_post_op_update_inode_force_wcc(data->args.inode, 9989 data->res.fattr); 9990 put_cred(data->cred); 9991 nfs_iput_and_deactive(data->inode); 9992 kfree(data); 9993 } 9994 9995 static const struct rpc_call_ops nfs4_layoutcommit_ops = { 9996 .rpc_call_prepare = nfs4_layoutcommit_prepare, 9997 .rpc_call_done = nfs4_layoutcommit_done, 9998 .rpc_release = nfs4_layoutcommit_release, 9999 }; 10000 10001 int 10002 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 10003 { 10004 struct rpc_message msg = { 10005 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 10006 .rpc_argp = &data->args, 10007 .rpc_resp = &data->res, 10008 .rpc_cred = data->cred, 10009 }; 10010 struct rpc_task_setup task_setup_data = { 10011 .task = &data->task, 10012 .rpc_client = NFS_CLIENT(data->args.inode), 10013 .rpc_message = &msg, 10014 .callback_ops = &nfs4_layoutcommit_ops, 10015 .callback_data = data, 10016 .flags = RPC_TASK_MOVEABLE, 10017 }; 10018 struct rpc_task *task; 10019 int status = 0; 10020 10021 dprintk("NFS: initiating layoutcommit call. sync %d " 10022 "lbw: %llu inode %lu\n", sync, 10023 data->args.lastbytewritten, 10024 data->args.inode->i_ino); 10025 10026 if (!sync) { 10027 data->inode = nfs_igrab_and_active(data->args.inode); 10028 if (data->inode == NULL) { 10029 nfs4_layoutcommit_release(data); 10030 return -EAGAIN; 10031 } 10032 task_setup_data.flags = RPC_TASK_ASYNC; 10033 } 10034 nfs4_init_sequence(NFS_SERVER(data->args.inode)->nfs_client, 10035 &data->args.seq_args, &data->res.seq_res, 1, 0); 10036 task = rpc_run_task(&task_setup_data); 10037 if (IS_ERR(task)) 10038 return PTR_ERR(task); 10039 if (sync) 10040 status = task->tk_status; 10041 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status); 10042 dprintk("%s: status %d\n", __func__, status); 10043 rpc_put_task(task); 10044 return status; 10045 } 10046 10047 /* 10048 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if 10049 * possible) as per RFC3530bis and RFC5661 Security Considerations sections 10050 */ 10051 static int _nfs41_proc_secinfo_no_name(struct nfs_server *server, 10052 struct nfs_fh *fhandle, 10053 struct nfs4_secinfo_flavors *flavors, 10054 bool use_integrity) 10055 { 10056 struct nfs_client *clp = server->nfs_client; 10057 struct nfs41_secinfo_no_name_args args = { 10058 .style = SECINFO_STYLE_CURRENT_FH, 10059 }; 10060 struct nfs4_secinfo_res res = { 10061 .flavors = flavors, 10062 }; 10063 struct rpc_message msg = { 10064 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 10065 .rpc_argp = &args, 10066 .rpc_resp = &res, 10067 }; 10068 struct nfs4_call_sync_data data = { 10069 .seq_server = server, 10070 .seq_args = &args.seq_args, 10071 .seq_res = &res.seq_res, 10072 }; 10073 struct rpc_task_setup task_setup = { 10074 .rpc_client = server->client, 10075 .rpc_message = &msg, 10076 .callback_ops = clp->cl_mvops->call_sync_ops, 10077 .callback_data = &data, 10078 .flags = RPC_TASK_NO_ROUND_ROBIN, 10079 }; 10080 const struct cred *cred = NULL; 10081 int status; 10082 10083 if (use_integrity) { 10084 task_setup.rpc_client = clp->cl_rpcclient; 10085 10086 cred = nfs4_get_clid_cred(clp); 10087 msg.rpc_cred = cred; 10088 } 10089 10090 nfs4_init_sequence(clp, &args.seq_args, &res.seq_res, 0, 0); 10091 status = nfs4_call_sync_custom(&task_setup); 10092 dprintk("<-- %s status=%d\n", __func__, status); 10093 10094 put_cred(cred); 10095 10096 return status; 10097 } 10098 10099 static int nfs41_proc_secinfo_no_name(struct nfs_server *server, 10100 struct nfs_fh *fhandle, 10101 struct nfs4_secinfo_flavors *flavors) 10102 { 10103 struct nfs4_exception exception = { 10104 .interruptible = true, 10105 }; 10106 int err; 10107 do { 10108 /* first try using integrity protection */ 10109 err = -NFS4ERR_WRONGSEC; 10110 10111 /* try to use integrity protection with machine cred */ 10112 if (_nfs4_is_integrity_protected(server->nfs_client)) 10113 err = _nfs41_proc_secinfo_no_name(server, fhandle, 10114 flavors, true); 10115 10116 /* 10117 * if unable to use integrity protection, or SECINFO with 10118 * integrity protection returns NFS4ERR_WRONGSEC (which is 10119 * disallowed by spec, but exists in deployed servers) use 10120 * the current filesystem's rpc_client and the user cred. 10121 */ 10122 if (err == -NFS4ERR_WRONGSEC) 10123 err = _nfs41_proc_secinfo_no_name(server, fhandle, 10124 flavors, false); 10125 10126 switch (err) { 10127 case 0: 10128 case -NFS4ERR_WRONGSEC: 10129 case -ENOTSUPP: 10130 goto out; 10131 default: 10132 err = nfs4_handle_exception(server, err, &exception); 10133 } 10134 } while (exception.retry); 10135 out: 10136 return err; 10137 } 10138 10139 static int nfs41_find_root_sec(struct nfs_server *server, 10140 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 10141 { 10142 int err; 10143 struct page *page; 10144 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR; 10145 struct nfs4_secinfo_flavors *flavors; 10146 struct nfs4_secinfo4 *secinfo; 10147 int i; 10148 10149 page = alloc_page(GFP_KERNEL); 10150 if (!page) { 10151 err = -ENOMEM; 10152 goto out; 10153 } 10154 10155 flavors = page_address(page); 10156 err = nfs41_proc_secinfo_no_name(server, fhandle, flavors); 10157 10158 /* 10159 * Fall back on "guess and check" method if 10160 * the server doesn't support SECINFO_NO_NAME 10161 */ 10162 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) { 10163 err = nfs4_find_root_sec(server, fhandle, fattr); 10164 goto out_freepage; 10165 } 10166 if (err) 10167 goto out_freepage; 10168 10169 for (i = 0; i < flavors->num_flavors; i++) { 10170 secinfo = &flavors->flavors[i]; 10171 10172 switch (secinfo->flavor) { 10173 case RPC_AUTH_NULL: 10174 case RPC_AUTH_UNIX: 10175 case RPC_AUTH_GSS: 10176 flavor = rpcauth_get_pseudoflavor(secinfo->flavor, 10177 &secinfo->flavor_info); 10178 break; 10179 default: 10180 flavor = RPC_AUTH_MAXFLAVOR; 10181 break; 10182 } 10183 10184 if (!nfs_auth_info_match(&server->auth_info, flavor)) 10185 flavor = RPC_AUTH_MAXFLAVOR; 10186 10187 if (flavor != RPC_AUTH_MAXFLAVOR) { 10188 err = nfs4_lookup_root_sec(server, fhandle, fattr, 10189 flavor); 10190 if (!err) 10191 break; 10192 } 10193 } 10194 10195 if (flavor == RPC_AUTH_MAXFLAVOR) 10196 err = -EPERM; 10197 10198 out_freepage: 10199 put_page(page); 10200 if (err == -EACCES) 10201 return -EPERM; 10202 out: 10203 return err; 10204 } 10205 10206 static int _nfs41_test_stateid(struct nfs_server *server, 10207 const nfs4_stateid *stateid, 10208 const struct cred *cred) 10209 { 10210 int status; 10211 struct nfs41_test_stateid_args args = { 10212 .stateid = *stateid, 10213 }; 10214 struct nfs41_test_stateid_res res; 10215 struct rpc_message msg = { 10216 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 10217 .rpc_argp = &args, 10218 .rpc_resp = &res, 10219 .rpc_cred = cred, 10220 }; 10221 struct rpc_clnt *rpc_client = server->client; 10222 struct nfs_client *clp = server->nfs_client; 10223 10224 nfs4_state_protect(clp, NFS_SP4_MACH_CRED_STATEID, &rpc_client, &msg); 10225 10226 dprintk("NFS call test_stateid %p\n", stateid); 10227 nfs4_init_sequence(clp, &args.seq_args, &res.seq_res, 0, 1); 10228 status = nfs4_call_sync_sequence(rpc_client, server, &msg, 10229 &args.seq_args, &res.seq_res); 10230 if (status != NFS_OK) { 10231 dprintk("NFS reply test_stateid: failed, %d\n", status); 10232 return status; 10233 } 10234 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 10235 return -res.status; 10236 } 10237 10238 static void nfs4_handle_delay_or_session_error(struct nfs_server *server, 10239 int err, struct nfs4_exception *exception) 10240 { 10241 exception->retry = 0; 10242 switch(err) { 10243 case -NFS4ERR_DELAY: 10244 case -NFS4ERR_RETRY_UNCACHED_REP: 10245 nfs4_handle_exception(server, err, exception); 10246 break; 10247 case -NFS4ERR_BADSESSION: 10248 case -NFS4ERR_BADSLOT: 10249 case -NFS4ERR_BAD_HIGH_SLOT: 10250 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 10251 case -NFS4ERR_DEADSESSION: 10252 nfs4_do_handle_exception(server, err, exception); 10253 } 10254 } 10255 10256 /** 10257 * nfs41_test_stateid - perform a TEST_STATEID operation 10258 * 10259 * @server: server / transport on which to perform the operation 10260 * @stateid: state ID to test 10261 * @cred: credential 10262 * 10263 * Returns NFS_OK if the server recognizes that "stateid" is valid. 10264 * Otherwise a negative NFS4ERR value is returned if the operation 10265 * failed or the state ID is not currently valid. 10266 */ 10267 static int nfs41_test_stateid(struct nfs_server *server, 10268 const nfs4_stateid *stateid, 10269 const struct cred *cred) 10270 { 10271 struct nfs4_exception exception = { 10272 .interruptible = true, 10273 }; 10274 int err; 10275 do { 10276 err = _nfs41_test_stateid(server, stateid, cred); 10277 nfs4_handle_delay_or_session_error(server, err, &exception); 10278 } while (exception.retry); 10279 return err; 10280 } 10281 10282 struct nfs_free_stateid_data { 10283 struct nfs_server *server; 10284 struct nfs41_free_stateid_args args; 10285 struct nfs41_free_stateid_res res; 10286 }; 10287 10288 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) 10289 { 10290 struct nfs_free_stateid_data *data = calldata; 10291 nfs4_setup_sequence(data->server->nfs_client, 10292 &data->args.seq_args, 10293 &data->res.seq_res, 10294 task); 10295 } 10296 10297 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata) 10298 { 10299 struct nfs_free_stateid_data *data = calldata; 10300 10301 nfs41_sequence_done(task, &data->res.seq_res); 10302 10303 switch (task->tk_status) { 10304 case -NFS4ERR_DELAY: 10305 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN) 10306 rpc_restart_call_prepare(task); 10307 } 10308 } 10309 10310 static void nfs41_free_stateid_release(void *calldata) 10311 { 10312 struct nfs_free_stateid_data *data = calldata; 10313 struct nfs_client *clp = data->server->nfs_client; 10314 10315 nfs_put_client(clp); 10316 kfree(calldata); 10317 } 10318 10319 static const struct rpc_call_ops nfs41_free_stateid_ops = { 10320 .rpc_call_prepare = nfs41_free_stateid_prepare, 10321 .rpc_call_done = nfs41_free_stateid_done, 10322 .rpc_release = nfs41_free_stateid_release, 10323 }; 10324 10325 /** 10326 * nfs41_free_stateid - perform a FREE_STATEID operation 10327 * 10328 * @server: server / transport on which to perform the operation 10329 * @stateid: state ID to release 10330 * @cred: credential 10331 * @privileged: set to true if this call needs to be privileged 10332 * 10333 * Note: this function is always asynchronous. 10334 */ 10335 static int nfs41_free_stateid(struct nfs_server *server, 10336 nfs4_stateid *stateid, 10337 const struct cred *cred, 10338 bool privileged) 10339 { 10340 struct rpc_message msg = { 10341 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 10342 .rpc_cred = cred, 10343 }; 10344 struct rpc_task_setup task_setup = { 10345 .rpc_client = server->client, 10346 .rpc_message = &msg, 10347 .callback_ops = &nfs41_free_stateid_ops, 10348 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE, 10349 }; 10350 struct nfs_free_stateid_data *data; 10351 struct rpc_task *task; 10352 struct nfs_client *clp = server->nfs_client; 10353 10354 if (!refcount_inc_not_zero(&clp->cl_count)) 10355 return -EIO; 10356 10357 nfs4_state_protect(clp, NFS_SP4_MACH_CRED_STATEID, 10358 &task_setup.rpc_client, &msg); 10359 10360 dprintk("NFS call free_stateid %p\n", stateid); 10361 data = kmalloc(sizeof(*data), GFP_KERNEL); 10362 if (!data) 10363 return -ENOMEM; 10364 data->server = server; 10365 nfs4_stateid_copy(&data->args.stateid, stateid); 10366 10367 task_setup.callback_data = data; 10368 10369 msg.rpc_argp = &data->args; 10370 msg.rpc_resp = &data->res; 10371 nfs4_init_sequence(clp, &data->args.seq_args, &data->res.seq_res, 1, 10372 privileged); 10373 task = rpc_run_task(&task_setup); 10374 if (IS_ERR(task)) 10375 return PTR_ERR(task); 10376 rpc_put_task(task); 10377 stateid->type = NFS4_FREED_STATEID_TYPE; 10378 return 0; 10379 } 10380 10381 static void 10382 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) 10383 { 10384 const struct cred *cred = lsp->ls_state->owner->so_cred; 10385 10386 nfs41_free_stateid(server, &lsp->ls_stateid, cred, false); 10387 nfs4_free_lock_state(server, lsp); 10388 } 10389 10390 static bool nfs41_match_stateid(const nfs4_stateid *s1, 10391 const nfs4_stateid *s2) 10392 { 10393 trace_nfs41_match_stateid(s1, s2); 10394 10395 if (s1->type != s2->type) 10396 return false; 10397 10398 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 10399 return false; 10400 10401 if (s1->seqid == s2->seqid) 10402 return true; 10403 10404 return s1->seqid == 0 || s2->seqid == 0; 10405 } 10406 10407 bool nfs4_match_stateid(const nfs4_stateid *s1, 10408 const nfs4_stateid *s2) 10409 { 10410 trace_nfs4_match_stateid(s1, s2); 10411 10412 return nfs4_stateid_match(s1, s2); 10413 } 10414 10415 10416 static const struct nfs4_sequence_slot_ops nfs41_sequence_slot_ops = { 10417 .process = nfs41_sequence_process, 10418 .done = nfs41_sequence_done, 10419 .free_slot = nfs41_sequence_free_slot, 10420 }; 10421 10422 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 10423 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 10424 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 10425 .recover_open = nfs4_open_reclaim, 10426 .recover_lock = nfs4_lock_reclaim, 10427 .establish_clid = nfs41_init_clientid, 10428 .reclaim_complete = nfs41_proc_reclaim_complete, 10429 .detect_trunking = nfs41_discover_server_trunking, 10430 }; 10431 10432 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 10433 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 10434 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 10435 .recover_open = nfs41_open_expired, 10436 .recover_lock = nfs41_lock_expired, 10437 .establish_clid = nfs41_init_clientid, 10438 }; 10439 10440 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 10441 .sched_state_renewal = nfs41_proc_async_sequence, 10442 .get_state_renewal_cred = nfs4_get_machine_cred, 10443 .renew_lease = nfs4_proc_sequence, 10444 }; 10445 10446 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = { 10447 .get_locations = _nfs41_proc_get_locations, 10448 .fsid_present = _nfs41_proc_fsid_present, 10449 }; 10450 10451 static struct nfs_seqid * 10452 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2) 10453 { 10454 return NULL; 10455 } 10456 10457 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 10458 .minor_version = 1, 10459 .init_caps = NFS_CAP_READDIRPLUS 10460 | NFS_CAP_ATOMIC_OPEN 10461 | NFS_CAP_DIR_DELEG 10462 | NFS_CAP_POSIX_LOCK 10463 | NFS_CAP_STATEID_NFSV41 10464 | NFS_CAP_ATOMIC_OPEN_V1 10465 | NFS_CAP_LGOPEN 10466 | NFS_CAP_MOVEABLE, 10467 .init_client = nfs41_init_client, 10468 .shutdown_client = nfs41_shutdown_client, 10469 .match_stateid = nfs41_match_stateid, 10470 .find_root_sec = nfs41_find_root_sec, 10471 .free_lock_state = nfs41_free_lock_state, 10472 .test_and_free_expired = nfs41_test_and_free_expired_stateid, 10473 .alloc_seqid = nfs_alloc_no_seqid, 10474 .session_trunk = nfs4_test_session_trunk, 10475 .call_sync_ops = &nfs41_call_sync_ops, 10476 .sequence_slot_ops = &nfs41_sequence_slot_ops, 10477 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 10478 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 10479 .state_renewal_ops = &nfs41_state_renewal_ops, 10480 .mig_recovery_ops = &nfs41_mig_recovery_ops, 10481 }; 10482 10483 #if defined(CONFIG_NFS_V4_2) 10484 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { 10485 .minor_version = 2, 10486 .init_caps = NFS_CAP_READDIRPLUS 10487 | NFS_CAP_ATOMIC_OPEN 10488 | NFS_CAP_DIR_DELEG 10489 | NFS_CAP_POSIX_LOCK 10490 | NFS_CAP_STATEID_NFSV41 10491 | NFS_CAP_ATOMIC_OPEN_V1 10492 | NFS_CAP_LGOPEN 10493 | NFS_CAP_ALLOCATE 10494 | NFS_CAP_COPY 10495 | NFS_CAP_OFFLOAD_CANCEL 10496 | NFS_CAP_COPY_NOTIFY 10497 | NFS_CAP_DEALLOCATE 10498 | NFS_CAP_ZERO_RANGE 10499 | NFS_CAP_SEEK 10500 | NFS_CAP_LAYOUTSTATS 10501 | NFS_CAP_CLONE 10502 | NFS_CAP_LAYOUTERROR 10503 | NFS_CAP_READ_PLUS 10504 | NFS_CAP_MOVEABLE 10505 | NFS_CAP_OFFLOAD_STATUS, 10506 .init_client = nfs41_init_client, 10507 .shutdown_client = nfs41_shutdown_client, 10508 .match_stateid = nfs41_match_stateid, 10509 .find_root_sec = nfs41_find_root_sec, 10510 .free_lock_state = nfs41_free_lock_state, 10511 .call_sync_ops = &nfs41_call_sync_ops, 10512 .sequence_slot_ops = &nfs41_sequence_slot_ops, 10513 .test_and_free_expired = nfs41_test_and_free_expired_stateid, 10514 .alloc_seqid = nfs_alloc_no_seqid, 10515 .session_trunk = nfs4_test_session_trunk, 10516 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 10517 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 10518 .state_renewal_ops = &nfs41_state_renewal_ops, 10519 .mig_recovery_ops = &nfs41_mig_recovery_ops, 10520 }; 10521 #endif 10522 10523 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 10524 #if defined(CONFIG_NFS_V4_0) 10525 [0] = &nfs_v4_0_minor_ops, 10526 #endif /* CONFIG_NFS_V4_0 */ 10527 [1] = &nfs_v4_1_minor_ops, 10528 #if defined(CONFIG_NFS_V4_2) 10529 [2] = &nfs_v4_2_minor_ops, 10530 #endif 10531 }; 10532 10533 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) 10534 { 10535 ssize_t error, error2, error3; 10536 size_t left = size; 10537 10538 error = generic_listxattr(dentry, list, left); 10539 if (error < 0) 10540 return error; 10541 if (list) { 10542 list += error; 10543 left -= error; 10544 } 10545 10546 error2 = security_inode_listsecurity(d_inode(dentry), list, left); 10547 if (error2 < 0) 10548 return error2; 10549 if (list) { 10550 list += error2; 10551 left -= error2; 10552 } 10553 10554 error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left); 10555 if (error3 < 0) 10556 return error3; 10557 10558 error += error2 + error3; 10559 if (size && error > size) 10560 return -ERANGE; 10561 return error; 10562 } 10563 10564 static void nfs4_enable_swap(struct inode *inode) 10565 { 10566 /* The state manager thread must always be running. 10567 * It will notice the client is a swapper, and stay put. 10568 */ 10569 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 10570 10571 nfs4_schedule_state_manager(clp); 10572 } 10573 10574 static void nfs4_disable_swap(struct inode *inode) 10575 { 10576 /* The state manager thread will now exit once it is 10577 * woken. 10578 */ 10579 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 10580 10581 set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state); 10582 clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state); 10583 wake_up_var(&clp->cl_state); 10584 } 10585 10586 static const struct inode_operations nfs4_dir_inode_operations = { 10587 .create = nfs_create, 10588 .lookup = nfs_lookup, 10589 .atomic_open = nfs_atomic_open, 10590 .link = nfs_link, 10591 .unlink = nfs_unlink, 10592 .symlink = nfs_symlink, 10593 .mkdir = nfs_mkdir, 10594 .rmdir = nfs_rmdir, 10595 .mknod = nfs_mknod, 10596 .rename = nfs_rename, 10597 .permission = nfs_permission, 10598 .getattr = nfs_getattr, 10599 .setattr = nfs_setattr, 10600 .listxattr = nfs4_listxattr, 10601 }; 10602 10603 static const struct inode_operations nfs4_file_inode_operations = { 10604 .permission = nfs_permission, 10605 .getattr = nfs_getattr, 10606 .setattr = nfs_setattr, 10607 .listxattr = nfs4_listxattr, 10608 }; 10609 10610 static struct nfs_server *nfs4_clone_server(struct nfs_server *source, 10611 struct nfs_fh *fh, struct nfs_fattr *fattr, 10612 rpc_authflavor_t flavor) 10613 { 10614 struct nfs_server *server; 10615 int error; 10616 10617 server = nfs_clone_server(source, fh, fattr, flavor); 10618 if (IS_ERR(server)) 10619 return server; 10620 10621 error = nfs4_delegation_hash_alloc(server); 10622 if (error) { 10623 nfs_free_server(server); 10624 return ERR_PTR(error); 10625 } 10626 10627 return server; 10628 } 10629 10630 const struct nfs_rpc_ops nfs_v4_clientops = { 10631 .version = 4, /* protocol version */ 10632 .dentry_ops = &nfs4_dentry_operations, 10633 .dir_inode_ops = &nfs4_dir_inode_operations, 10634 .file_inode_ops = &nfs4_file_inode_operations, 10635 .file_ops = &nfs4_file_operations, 10636 .getroot = nfs4_proc_get_root, 10637 .submount = nfs4_submount, 10638 .try_get_tree = nfs4_try_get_tree, 10639 .getattr = nfs4_proc_getattr, 10640 .setattr = nfs4_proc_setattr, 10641 .lookup = nfs4_proc_lookup, 10642 .lookupp = nfs4_proc_lookupp, 10643 .access = nfs4_proc_access, 10644 .readlink = nfs4_proc_readlink, 10645 .create = nfs4_proc_create, 10646 .remove = nfs4_proc_remove, 10647 .unlink_setup = nfs4_proc_unlink_setup, 10648 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 10649 .unlink_done = nfs4_proc_unlink_done, 10650 .rename_setup = nfs4_proc_rename_setup, 10651 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 10652 .rename_done = nfs4_proc_rename_done, 10653 .link = nfs4_proc_link, 10654 .symlink = nfs4_proc_symlink, 10655 .mkdir = nfs4_proc_mkdir, 10656 .rmdir = nfs4_proc_rmdir, 10657 .readdir = nfs4_proc_readdir, 10658 .mknod = nfs4_proc_mknod, 10659 .statfs = nfs4_proc_statfs, 10660 .fsinfo = nfs4_proc_fsinfo, 10661 .pathconf = nfs4_proc_pathconf, 10662 .set_capabilities = nfs4_server_capabilities, 10663 .decode_dirent = nfs4_decode_dirent, 10664 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare, 10665 .read_setup = nfs4_proc_read_setup, 10666 .read_done = nfs4_read_done, 10667 .write_setup = nfs4_proc_write_setup, 10668 .write_done = nfs4_write_done, 10669 .commit_setup = nfs4_proc_commit_setup, 10670 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 10671 .commit_done = nfs4_commit_done, 10672 .lock = nfs4_proc_lock, 10673 .clear_acl_cache = nfs4_zap_acl_attr, 10674 .close_context = nfs4_close_context, 10675 .open_context = nfs4_atomic_open, 10676 .have_delegation = nfs4_have_delegation, 10677 .return_delegation = nfs4_inode_return_delegation, 10678 .alloc_client = nfs4_alloc_client, 10679 .init_client = nfs4_init_client, 10680 .free_client = nfs4_free_client, 10681 .create_server = nfs4_create_server, 10682 .clone_server = nfs4_clone_server, 10683 .discover_trunking = nfs4_discover_trunking, 10684 .enable_swap = nfs4_enable_swap, 10685 .disable_swap = nfs4_disable_swap, 10686 }; 10687 10688 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 10689 .name = XATTR_NAME_NFSV4_ACL, 10690 .list = nfs4_xattr_list_nfs4_acl, 10691 .get = nfs4_xattr_get_nfs4_acl, 10692 .set = nfs4_xattr_set_nfs4_acl, 10693 }; 10694 10695 static const struct xattr_handler nfs4_xattr_nfs4_dacl_handler = { 10696 .name = XATTR_NAME_NFSV4_DACL, 10697 .list = nfs4_xattr_list_nfs4_dacl, 10698 .get = nfs4_xattr_get_nfs4_dacl, 10699 .set = nfs4_xattr_set_nfs4_dacl, 10700 }; 10701 10702 static const struct xattr_handler nfs4_xattr_nfs4_sacl_handler = { 10703 .name = XATTR_NAME_NFSV4_SACL, 10704 .list = nfs4_xattr_list_nfs4_sacl, 10705 .get = nfs4_xattr_get_nfs4_sacl, 10706 .set = nfs4_xattr_set_nfs4_sacl, 10707 }; 10708 10709 #ifdef CONFIG_NFS_V4_2 10710 static const struct xattr_handler nfs4_xattr_nfs4_user_handler = { 10711 .prefix = XATTR_USER_PREFIX, 10712 .get = nfs4_xattr_get_nfs4_user, 10713 .set = nfs4_xattr_set_nfs4_user, 10714 }; 10715 #endif 10716 10717 const struct xattr_handler * const nfs4_xattr_handlers[] = { 10718 &nfs4_xattr_nfs4_acl_handler, 10719 &nfs4_xattr_nfs4_dacl_handler, 10720 &nfs4_xattr_nfs4_sacl_handler, 10721 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 10722 &nfs4_xattr_nfs4_label_handler, 10723 #endif 10724 #ifdef CONFIG_NFS_V4_2 10725 &nfs4_xattr_nfs4_user_handler, 10726 #endif 10727 NULL 10728 }; 10729