1 /* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <linux/mm.h> 39 #include <linux/delay.h> 40 #include <linux/errno.h> 41 #include <linux/string.h> 42 #include <linux/ratelimit.h> 43 #include <linux/printk.h> 44 #include <linux/slab.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/nfs.h> 47 #include <linux/nfs4.h> 48 #include <linux/nfs_fs.h> 49 #include <linux/nfs_page.h> 50 #include <linux/nfs_mount.h> 51 #include <linux/namei.h> 52 #include <linux/mount.h> 53 #include <linux/module.h> 54 #include <linux/xattr.h> 55 #include <linux/utsname.h> 56 #include <linux/freezer.h> 57 #include <linux/iversion.h> 58 59 #include "nfs4_fs.h" 60 #include "delegation.h" 61 #include "internal.h" 62 #include "iostat.h" 63 #include "callback.h" 64 #include "pnfs.h" 65 #include "netns.h" 66 #include "sysfs.h" 67 #include "nfs4idmap.h" 68 #include "nfs4session.h" 69 #include "fscache.h" 70 #include "nfs42.h" 71 72 #include "nfs4trace.h" 73 74 #define NFSDBG_FACILITY NFSDBG_PROC 75 76 #define NFS4_BITMASK_SZ 3 77 78 #define NFS4_POLL_RETRY_MIN (HZ/10) 79 #define NFS4_POLL_RETRY_MAX (15*HZ) 80 81 /* file attributes which can be mapped to nfs attributes */ 82 #define NFS4_VALID_ATTRS (ATTR_MODE \ 83 | ATTR_UID \ 84 | ATTR_GID \ 85 | ATTR_SIZE \ 86 | ATTR_ATIME \ 87 | ATTR_MTIME \ 88 | ATTR_CTIME \ 89 | ATTR_ATIME_SET \ 90 | ATTR_MTIME_SET) 91 92 struct nfs4_opendata; 93 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 94 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 95 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 96 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 97 struct nfs_fattr *fattr, struct inode *inode); 98 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred, 99 struct nfs_fattr *fattr, struct iattr *sattr, 100 struct nfs_open_context *ctx, struct nfs4_label *ilabel); 101 #ifdef CONFIG_NFS_V4_1 102 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 103 const struct cred *cred, 104 struct nfs4_slot *slot, 105 bool is_privileged); 106 static int nfs41_test_stateid(struct nfs_server *, const nfs4_stateid *, 107 const struct cred *); 108 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *, 109 const struct cred *, bool); 110 #endif 111 112 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 113 static inline struct nfs4_label * 114 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 115 struct iattr *sattr, struct nfs4_label *label) 116 { 117 struct lsm_context shim; 118 int err; 119 120 if (label == NULL) 121 return NULL; 122 123 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0) 124 return NULL; 125 126 label->lfs = 0; 127 label->pi = 0; 128 label->len = 0; 129 label->label = NULL; 130 131 err = security_dentry_init_security(dentry, sattr->ia_mode, 132 &dentry->d_name, NULL, &shim); 133 if (err) 134 return NULL; 135 136 label->lsmid = shim.id; 137 label->label = shim.context; 138 label->len = shim.len; 139 return label; 140 } 141 static inline void 142 nfs4_label_release_security(struct nfs4_label *label) 143 { 144 struct lsm_context shim; 145 146 if (label) { 147 shim.context = label->label; 148 shim.len = label->len; 149 shim.id = label->lsmid; 150 security_release_secctx(&shim); 151 } 152 } 153 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 154 { 155 if (label) 156 return server->attr_bitmask; 157 158 return server->attr_bitmask_nl; 159 } 160 #else 161 static inline struct nfs4_label * 162 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 163 struct iattr *sattr, struct nfs4_label *l) 164 { return NULL; } 165 static inline void 166 nfs4_label_release_security(struct nfs4_label *label) 167 { return; } 168 static inline u32 * 169 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 170 { return server->attr_bitmask; } 171 #endif 172 173 /* Prevent leaks of NFSv4 errors into userland */ 174 static int nfs4_map_errors(int err) 175 { 176 if (err >= -1000) 177 return err; 178 switch (err) { 179 case -NFS4ERR_RESOURCE: 180 case -NFS4ERR_LAYOUTTRYLATER: 181 case -NFS4ERR_RECALLCONFLICT: 182 case -NFS4ERR_RETURNCONFLICT: 183 return -EREMOTEIO; 184 case -NFS4ERR_WRONGSEC: 185 case -NFS4ERR_WRONG_CRED: 186 return -EPERM; 187 case -NFS4ERR_BADOWNER: 188 case -NFS4ERR_BADNAME: 189 return -EINVAL; 190 case -NFS4ERR_SHARE_DENIED: 191 return -EACCES; 192 case -NFS4ERR_MINOR_VERS_MISMATCH: 193 return -EPROTONOSUPPORT; 194 case -NFS4ERR_FILE_OPEN: 195 return -EBUSY; 196 case -NFS4ERR_NOT_SAME: 197 return -ENOTSYNC; 198 case -ENETDOWN: 199 case -ENETUNREACH: 200 break; 201 default: 202 dprintk("%s could not handle NFSv4 error %d\n", 203 __func__, -err); 204 break; 205 } 206 return -EIO; 207 } 208 209 /* 210 * This is our standard bitmap for GETATTR requests. 211 */ 212 const u32 nfs4_fattr_bitmap[3] = { 213 FATTR4_WORD0_TYPE 214 | FATTR4_WORD0_CHANGE 215 | FATTR4_WORD0_SIZE 216 | FATTR4_WORD0_FSID 217 | FATTR4_WORD0_FILEID, 218 FATTR4_WORD1_MODE 219 | FATTR4_WORD1_NUMLINKS 220 | FATTR4_WORD1_OWNER 221 | FATTR4_WORD1_OWNER_GROUP 222 | FATTR4_WORD1_RAWDEV 223 | FATTR4_WORD1_SPACE_USED 224 | FATTR4_WORD1_TIME_ACCESS 225 | FATTR4_WORD1_TIME_CREATE 226 | FATTR4_WORD1_TIME_METADATA 227 | FATTR4_WORD1_TIME_MODIFY 228 | FATTR4_WORD1_MOUNTED_ON_FILEID, 229 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 230 FATTR4_WORD2_SECURITY_LABEL 231 #endif 232 }; 233 234 static const u32 nfs4_pnfs_open_bitmap[3] = { 235 FATTR4_WORD0_TYPE 236 | FATTR4_WORD0_CHANGE 237 | FATTR4_WORD0_SIZE 238 | FATTR4_WORD0_FSID 239 | FATTR4_WORD0_FILEID, 240 FATTR4_WORD1_MODE 241 | FATTR4_WORD1_NUMLINKS 242 | FATTR4_WORD1_OWNER 243 | FATTR4_WORD1_OWNER_GROUP 244 | FATTR4_WORD1_RAWDEV 245 | FATTR4_WORD1_SPACE_USED 246 | FATTR4_WORD1_TIME_ACCESS 247 | FATTR4_WORD1_TIME_CREATE 248 | FATTR4_WORD1_TIME_METADATA 249 | FATTR4_WORD1_TIME_MODIFY, 250 FATTR4_WORD2_MDSTHRESHOLD 251 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 252 | FATTR4_WORD2_SECURITY_LABEL 253 #endif 254 }; 255 256 static const u32 nfs4_open_noattr_bitmap[3] = { 257 FATTR4_WORD0_TYPE 258 | FATTR4_WORD0_FILEID, 259 }; 260 261 const u32 nfs4_statfs_bitmap[3] = { 262 FATTR4_WORD0_FILES_AVAIL 263 | FATTR4_WORD0_FILES_FREE 264 | FATTR4_WORD0_FILES_TOTAL, 265 FATTR4_WORD1_SPACE_AVAIL 266 | FATTR4_WORD1_SPACE_FREE 267 | FATTR4_WORD1_SPACE_TOTAL 268 }; 269 270 const u32 nfs4_pathconf_bitmap[3] = { 271 FATTR4_WORD0_MAXLINK 272 | FATTR4_WORD0_MAXNAME, 273 0 274 }; 275 276 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 277 | FATTR4_WORD0_MAXREAD 278 | FATTR4_WORD0_MAXWRITE 279 | FATTR4_WORD0_LEASE_TIME, 280 FATTR4_WORD1_TIME_DELTA 281 | FATTR4_WORD1_FS_LAYOUT_TYPES, 282 FATTR4_WORD2_LAYOUT_BLKSIZE 283 | FATTR4_WORD2_CLONE_BLKSIZE 284 | FATTR4_WORD2_CHANGE_ATTR_TYPE 285 | FATTR4_WORD2_XATTR_SUPPORT 286 }; 287 288 const u32 nfs4_fs_locations_bitmap[3] = { 289 FATTR4_WORD0_CHANGE 290 | FATTR4_WORD0_SIZE 291 | FATTR4_WORD0_FSID 292 | FATTR4_WORD0_FILEID 293 | FATTR4_WORD0_FS_LOCATIONS, 294 FATTR4_WORD1_OWNER 295 | FATTR4_WORD1_OWNER_GROUP 296 | FATTR4_WORD1_RAWDEV 297 | FATTR4_WORD1_SPACE_USED 298 | FATTR4_WORD1_TIME_ACCESS 299 | FATTR4_WORD1_TIME_METADATA 300 | FATTR4_WORD1_TIME_MODIFY 301 | FATTR4_WORD1_MOUNTED_ON_FILEID, 302 }; 303 304 static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src, 305 struct inode *inode, unsigned long flags) 306 { 307 unsigned long cache_validity; 308 309 memcpy(dst, src, NFS4_BITMASK_SZ*sizeof(*dst)); 310 if (!inode || !nfs_have_read_or_write_delegation(inode)) 311 return; 312 313 cache_validity = READ_ONCE(NFS_I(inode)->cache_validity) | flags; 314 315 /* Remove the attributes over which we have full control */ 316 dst[1] &= ~FATTR4_WORD1_RAWDEV; 317 if (!(cache_validity & NFS_INO_INVALID_SIZE)) 318 dst[0] &= ~FATTR4_WORD0_SIZE; 319 320 if (!(cache_validity & NFS_INO_INVALID_CHANGE)) 321 dst[0] &= ~FATTR4_WORD0_CHANGE; 322 323 if (!(cache_validity & NFS_INO_INVALID_MODE)) 324 dst[1] &= ~FATTR4_WORD1_MODE; 325 if (!(cache_validity & NFS_INO_INVALID_OTHER)) 326 dst[1] &= ~(FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP); 327 328 if (!(cache_validity & NFS_INO_INVALID_BTIME)) 329 dst[1] &= ~FATTR4_WORD1_TIME_CREATE; 330 331 if (nfs_have_delegated_mtime(inode)) { 332 if (!(cache_validity & NFS_INO_INVALID_ATIME)) 333 dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET); 334 if (!(cache_validity & NFS_INO_INVALID_MTIME)) 335 dst[1] &= ~(FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET); 336 if (!(cache_validity & NFS_INO_INVALID_CTIME)) 337 dst[1] &= ~(FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY_SET); 338 } else if (nfs_have_delegated_atime(inode)) { 339 if (!(cache_validity & NFS_INO_INVALID_ATIME)) 340 dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET); 341 } 342 } 343 344 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 345 struct nfs4_readdir_arg *readdir) 346 { 347 unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE; 348 __be32 *start, *p; 349 350 if (cookie > 2) { 351 readdir->cookie = cookie; 352 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 353 return; 354 } 355 356 readdir->cookie = 0; 357 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 358 if (cookie == 2) 359 return; 360 361 /* 362 * NFSv4 servers do not return entries for '.' and '..' 363 * Therefore, we fake these entries here. We let '.' 364 * have cookie 0 and '..' have cookie 1. Note that 365 * when talking to the server, we always send cookie 0 366 * instead of 1 or 2. 367 */ 368 start = p = kmap_atomic(*readdir->pages); 369 370 if (cookie == 0) { 371 *p++ = xdr_one; /* next */ 372 *p++ = xdr_zero; /* cookie, first word */ 373 *p++ = xdr_one; /* cookie, second word */ 374 *p++ = xdr_one; /* entry len */ 375 memcpy(p, ".\0\0\0", 4); /* entry */ 376 p++; 377 *p++ = xdr_one; /* bitmap length */ 378 *p++ = htonl(attrs); /* bitmap */ 379 *p++ = htonl(12); /* attribute buffer length */ 380 *p++ = htonl(NF4DIR); 381 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry))); 382 } 383 384 *p++ = xdr_one; /* next */ 385 *p++ = xdr_zero; /* cookie, first word */ 386 *p++ = xdr_two; /* cookie, second word */ 387 *p++ = xdr_two; /* entry len */ 388 memcpy(p, "..\0\0", 4); /* entry */ 389 p++; 390 *p++ = xdr_one; /* bitmap length */ 391 *p++ = htonl(attrs); /* bitmap */ 392 *p++ = htonl(12); /* attribute buffer length */ 393 *p++ = htonl(NF4DIR); 394 spin_lock(&dentry->d_lock); 395 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent))); 396 spin_unlock(&dentry->d_lock); 397 398 readdir->pgbase = (char *)p - (char *)start; 399 readdir->count -= readdir->pgbase; 400 kunmap_atomic(start); 401 } 402 403 static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version) 404 { 405 if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) { 406 fattr->pre_change_attr = version; 407 fattr->valid |= NFS_ATTR_FATTR_PRECHANGE; 408 } 409 } 410 411 static void nfs4_test_and_free_stateid(struct nfs_server *server, 412 nfs4_stateid *stateid, 413 const struct cred *cred) 414 { 415 const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops; 416 417 ops->test_and_free_expired(server, stateid, cred); 418 } 419 420 static void __nfs4_free_revoked_stateid(struct nfs_server *server, 421 nfs4_stateid *stateid, 422 const struct cred *cred) 423 { 424 stateid->type = NFS4_REVOKED_STATEID_TYPE; 425 nfs4_test_and_free_stateid(server, stateid, cred); 426 } 427 428 static void nfs4_free_revoked_stateid(struct nfs_server *server, 429 const nfs4_stateid *stateid, 430 const struct cred *cred) 431 { 432 nfs4_stateid tmp; 433 434 nfs4_stateid_copy(&tmp, stateid); 435 __nfs4_free_revoked_stateid(server, &tmp, cred); 436 } 437 438 static long nfs4_update_delay(long *timeout) 439 { 440 long ret; 441 if (!timeout) 442 return NFS4_POLL_RETRY_MAX; 443 if (*timeout <= 0) 444 *timeout = NFS4_POLL_RETRY_MIN; 445 if (*timeout > NFS4_POLL_RETRY_MAX) 446 *timeout = NFS4_POLL_RETRY_MAX; 447 ret = *timeout; 448 *timeout <<= 1; 449 return ret; 450 } 451 452 static int nfs4_delay_killable(long *timeout) 453 { 454 might_sleep(); 455 456 if (unlikely(nfs_current_task_exiting())) 457 return -EINTR; 458 __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 459 schedule_timeout(nfs4_update_delay(timeout)); 460 if (!__fatal_signal_pending(current)) 461 return 0; 462 return -EINTR; 463 } 464 465 static int nfs4_delay_interruptible(long *timeout) 466 { 467 might_sleep(); 468 469 if (unlikely(nfs_current_task_exiting())) 470 return -EINTR; 471 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE); 472 schedule_timeout(nfs4_update_delay(timeout)); 473 if (!signal_pending(current)) 474 return 0; 475 return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS; 476 } 477 478 static int nfs4_delay(long *timeout, bool interruptible) 479 { 480 if (interruptible) 481 return nfs4_delay_interruptible(timeout); 482 return nfs4_delay_killable(timeout); 483 } 484 485 static const nfs4_stateid * 486 nfs4_recoverable_stateid(const nfs4_stateid *stateid) 487 { 488 if (!stateid) 489 return NULL; 490 switch (stateid->type) { 491 case NFS4_OPEN_STATEID_TYPE: 492 case NFS4_LOCK_STATEID_TYPE: 493 case NFS4_DELEGATION_STATEID_TYPE: 494 return stateid; 495 default: 496 break; 497 } 498 return NULL; 499 } 500 501 /* This is the error handling routine for processes that are allowed 502 * to sleep. 503 */ 504 static int nfs4_do_handle_exception(struct nfs_server *server, 505 int errorcode, struct nfs4_exception *exception) 506 { 507 struct nfs_client *clp = server->nfs_client; 508 struct nfs4_state *state = exception->state; 509 const nfs4_stateid *stateid; 510 struct inode *inode = exception->inode; 511 int ret = errorcode; 512 513 exception->delay = 0; 514 exception->recovering = 0; 515 exception->retry = 0; 516 517 stateid = nfs4_recoverable_stateid(exception->stateid); 518 if (stateid == NULL && state != NULL) 519 stateid = nfs4_recoverable_stateid(&state->stateid); 520 521 switch(errorcode) { 522 case 0: 523 return 0; 524 case -NFS4ERR_BADHANDLE: 525 case -ESTALE: 526 if (inode != NULL && S_ISREG(inode->i_mode)) 527 pnfs_destroy_layout(NFS_I(inode)); 528 break; 529 case -NFS4ERR_DELEG_REVOKED: 530 case -NFS4ERR_ADMIN_REVOKED: 531 case -NFS4ERR_EXPIRED: 532 case -NFS4ERR_BAD_STATEID: 533 case -NFS4ERR_PARTNER_NO_AUTH: 534 if (inode != NULL && stateid != NULL) { 535 nfs_inode_find_state_and_recover(inode, 536 stateid); 537 goto wait_on_recovery; 538 } 539 fallthrough; 540 case -NFS4ERR_OPENMODE: 541 if (inode) { 542 int err; 543 544 err = nfs_async_inode_return_delegation(inode, 545 stateid); 546 if (err == 0) 547 goto wait_on_recovery; 548 if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) { 549 exception->retry = 1; 550 break; 551 } 552 } 553 if (state == NULL) 554 break; 555 ret = nfs4_schedule_stateid_recovery(server, state); 556 if (ret < 0) 557 break; 558 goto wait_on_recovery; 559 case -NFS4ERR_STALE_STATEID: 560 case -NFS4ERR_STALE_CLIENTID: 561 nfs4_schedule_lease_recovery(clp); 562 goto wait_on_recovery; 563 case -NFS4ERR_MOVED: 564 ret = nfs4_schedule_migration_recovery(server); 565 if (ret < 0) 566 break; 567 goto wait_on_recovery; 568 case -NFS4ERR_LEASE_MOVED: 569 nfs4_schedule_lease_moved_recovery(clp); 570 goto wait_on_recovery; 571 #if defined(CONFIG_NFS_V4_1) 572 case -NFS4ERR_BADSESSION: 573 case -NFS4ERR_BADSLOT: 574 case -NFS4ERR_BAD_HIGH_SLOT: 575 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 576 case -NFS4ERR_DEADSESSION: 577 case -NFS4ERR_SEQ_FALSE_RETRY: 578 case -NFS4ERR_SEQ_MISORDERED: 579 /* Handled in nfs41_sequence_process() */ 580 goto wait_on_recovery; 581 #endif /* defined(CONFIG_NFS_V4_1) */ 582 case -NFS4ERR_FILE_OPEN: 583 if (exception->timeout > HZ) { 584 /* We have retried a decent amount, time to 585 * fail 586 */ 587 ret = -EBUSY; 588 break; 589 } 590 fallthrough; 591 case -NFS4ERR_DELAY: 592 nfs_inc_server_stats(server, NFSIOS_DELAY); 593 fallthrough; 594 case -NFS4ERR_GRACE: 595 case -NFS4ERR_LAYOUTTRYLATER: 596 case -NFS4ERR_RECALLCONFLICT: 597 case -NFS4ERR_RETURNCONFLICT: 598 exception->delay = 1; 599 return 0; 600 601 case -NFS4ERR_RETRY_UNCACHED_REP: 602 case -NFS4ERR_OLD_STATEID: 603 exception->retry = 1; 604 break; 605 case -NFS4ERR_BADOWNER: 606 /* The following works around a Linux server bug! */ 607 case -NFS4ERR_BADNAME: 608 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 609 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 610 exception->retry = 1; 611 printk(KERN_WARNING "NFS: v4 server %s " 612 "does not accept raw " 613 "uid/gids. " 614 "Reenabling the idmapper.\n", 615 server->nfs_client->cl_hostname); 616 } 617 } 618 /* We failed to handle the error */ 619 return nfs4_map_errors(ret); 620 wait_on_recovery: 621 exception->recovering = 1; 622 return 0; 623 } 624 625 /* 626 * Track the number of NFS4ERR_DELAY related retransmissions and return 627 * EAGAIN if the 'softerr' mount option is set, and we've exceeded the limit 628 * set by 'nfs_delay_retrans'. 629 */ 630 static int nfs4_exception_should_retrans(const struct nfs_server *server, 631 struct nfs4_exception *exception) 632 { 633 if (server->flags & NFS_MOUNT_SOFTERR && nfs_delay_retrans >= 0) { 634 if (exception->retrans++ >= (unsigned short)nfs_delay_retrans) 635 return -EAGAIN; 636 } 637 return 0; 638 } 639 640 /* This is the error handling routine for processes that are allowed 641 * to sleep. 642 */ 643 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 644 { 645 struct nfs_client *clp = server->nfs_client; 646 int ret; 647 648 ret = nfs4_do_handle_exception(server, errorcode, exception); 649 if (exception->delay) { 650 int ret2 = nfs4_exception_should_retrans(server, exception); 651 if (ret2 < 0) { 652 exception->retry = 0; 653 return ret2; 654 } 655 ret = nfs4_delay(&exception->timeout, 656 exception->interruptible); 657 goto out_retry; 658 } 659 if (exception->recovering) { 660 if (exception->task_is_privileged) 661 return -EDEADLOCK; 662 ret = nfs4_wait_clnt_recover(clp); 663 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 664 return -EIO; 665 goto out_retry; 666 } 667 return ret; 668 out_retry: 669 if (ret == 0) 670 exception->retry = 1; 671 return ret; 672 } 673 674 static int 675 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server, 676 int errorcode, struct nfs4_exception *exception) 677 { 678 struct nfs_client *clp = server->nfs_client; 679 int ret; 680 681 if ((task->tk_rpc_status == -ENETDOWN || 682 task->tk_rpc_status == -ENETUNREACH) && 683 task->tk_flags & RPC_TASK_NETUNREACH_FATAL) { 684 exception->delay = 0; 685 exception->recovering = 0; 686 exception->retry = 0; 687 return -EIO; 688 } 689 690 ret = nfs4_do_handle_exception(server, errorcode, exception); 691 if (exception->delay) { 692 int ret2 = nfs4_exception_should_retrans(server, exception); 693 if (ret2 < 0) { 694 exception->retry = 0; 695 return ret2; 696 } 697 rpc_delay(task, nfs4_update_delay(&exception->timeout)); 698 goto out_retry; 699 } 700 if (exception->recovering) { 701 if (exception->task_is_privileged) 702 return -EDEADLOCK; 703 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 704 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 705 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 706 goto out_retry; 707 } 708 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 709 ret = -EIO; 710 return ret; 711 out_retry: 712 if (ret == 0) { 713 exception->retry = 1; 714 /* 715 * For NFS4ERR_MOVED, the client transport will need to 716 * be recomputed after migration recovery has completed. 717 */ 718 if (errorcode == -NFS4ERR_MOVED) 719 rpc_task_release_transport(task); 720 } 721 return ret; 722 } 723 724 int 725 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server, 726 struct nfs4_state *state, long *timeout) 727 { 728 struct nfs4_exception exception = { 729 .state = state, 730 }; 731 732 if (task->tk_status >= 0) 733 return 0; 734 if (timeout) 735 exception.timeout = *timeout; 736 task->tk_status = nfs4_async_handle_exception(task, server, 737 task->tk_status, 738 &exception); 739 if (exception.delay && timeout) 740 *timeout = exception.timeout; 741 if (exception.retry) 742 return -EAGAIN; 743 return 0; 744 } 745 746 /* 747 * Return 'true' if 'clp' is using an rpc_client that is integrity protected 748 * or 'false' otherwise. 749 */ 750 static bool _nfs4_is_integrity_protected(struct nfs_client *clp) 751 { 752 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor; 753 return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P); 754 } 755 756 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 757 { 758 spin_lock(&clp->cl_lock); 759 if (time_before(clp->cl_last_renewal,timestamp)) 760 clp->cl_last_renewal = timestamp; 761 spin_unlock(&clp->cl_lock); 762 } 763 764 static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 765 { 766 struct nfs_client *clp = server->nfs_client; 767 768 if (!nfs4_has_session(clp)) 769 do_renew_lease(clp, timestamp); 770 } 771 772 struct nfs4_call_sync_data { 773 const struct nfs_server *seq_server; 774 struct nfs4_sequence_args *seq_args; 775 struct nfs4_sequence_res *seq_res; 776 }; 777 778 void nfs4_init_sequence(struct nfs4_sequence_args *args, 779 struct nfs4_sequence_res *res, int cache_reply, 780 int privileged) 781 { 782 args->sa_slot = NULL; 783 args->sa_cache_this = cache_reply; 784 args->sa_privileged = privileged; 785 786 res->sr_slot = NULL; 787 } 788 789 static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res) 790 { 791 struct nfs4_slot *slot = res->sr_slot; 792 struct nfs4_slot_table *tbl; 793 794 tbl = slot->table; 795 spin_lock(&tbl->slot_tbl_lock); 796 if (!nfs41_wake_and_assign_slot(tbl, slot)) 797 nfs4_free_slot(tbl, slot); 798 spin_unlock(&tbl->slot_tbl_lock); 799 800 res->sr_slot = NULL; 801 } 802 803 static int nfs40_sequence_done(struct rpc_task *task, 804 struct nfs4_sequence_res *res) 805 { 806 if (res->sr_slot != NULL) 807 nfs40_sequence_free_slot(res); 808 return 1; 809 } 810 811 #if defined(CONFIG_NFS_V4_1) 812 813 static void nfs41_release_slot(struct nfs4_slot *slot) 814 { 815 struct nfs4_session *session; 816 struct nfs4_slot_table *tbl; 817 bool send_new_highest_used_slotid = false; 818 819 if (!slot) 820 return; 821 tbl = slot->table; 822 session = tbl->session; 823 824 /* Bump the slot sequence number */ 825 if (slot->seq_done) 826 slot->seq_nr++; 827 slot->seq_done = 0; 828 829 spin_lock(&tbl->slot_tbl_lock); 830 /* Be nice to the server: try to ensure that the last transmitted 831 * value for highest_user_slotid <= target_highest_slotid 832 */ 833 if (tbl->highest_used_slotid > tbl->target_highest_slotid) 834 send_new_highest_used_slotid = true; 835 836 if (nfs41_wake_and_assign_slot(tbl, slot)) { 837 send_new_highest_used_slotid = false; 838 goto out_unlock; 839 } 840 nfs4_free_slot(tbl, slot); 841 842 if (tbl->highest_used_slotid != NFS4_NO_SLOT) 843 send_new_highest_used_slotid = false; 844 out_unlock: 845 spin_unlock(&tbl->slot_tbl_lock); 846 if (send_new_highest_used_slotid) 847 nfs41_notify_server(session->clp); 848 if (waitqueue_active(&tbl->slot_waitq)) 849 wake_up_all(&tbl->slot_waitq); 850 } 851 852 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 853 { 854 nfs41_release_slot(res->sr_slot); 855 res->sr_slot = NULL; 856 } 857 858 static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot, 859 u32 seqnr) 860 { 861 if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0) 862 slot->seq_nr_highest_sent = seqnr; 863 } 864 static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, u32 seqnr) 865 { 866 nfs4_slot_sequence_record_sent(slot, seqnr); 867 slot->seq_nr_last_acked = seqnr; 868 } 869 870 static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred, 871 struct nfs4_slot *slot) 872 { 873 struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true); 874 if (!IS_ERR(task)) 875 rpc_put_task_async(task); 876 } 877 878 static int nfs41_sequence_process(struct rpc_task *task, 879 struct nfs4_sequence_res *res) 880 { 881 struct nfs4_session *session; 882 struct nfs4_slot *slot = res->sr_slot; 883 struct nfs_client *clp; 884 int status; 885 int ret = 1; 886 887 if (slot == NULL) 888 goto out_noaction; 889 /* don't increment the sequence number if the task wasn't sent */ 890 if (!RPC_WAS_SENT(task) || slot->seq_done) 891 goto out; 892 893 session = slot->table->session; 894 clp = session->clp; 895 896 trace_nfs4_sequence_done(session, res); 897 898 status = res->sr_status; 899 if (task->tk_status == -NFS4ERR_DEADSESSION) 900 status = -NFS4ERR_DEADSESSION; 901 902 /* Check the SEQUENCE operation status */ 903 switch (status) { 904 case 0: 905 /* Mark this sequence number as having been acked */ 906 nfs4_slot_sequence_acked(slot, slot->seq_nr); 907 /* Update the slot's sequence and clientid lease timer */ 908 slot->seq_done = 1; 909 do_renew_lease(clp, res->sr_timestamp); 910 /* Check sequence flags */ 911 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags, 912 !!slot->privileged); 913 nfs41_update_target_slotid(slot->table, slot, res); 914 break; 915 case 1: 916 /* 917 * sr_status remains 1 if an RPC level error occurred. 918 * The server may or may not have processed the sequence 919 * operation.. 920 */ 921 nfs4_slot_sequence_record_sent(slot, slot->seq_nr); 922 slot->seq_done = 1; 923 goto out; 924 case -NFS4ERR_DELAY: 925 /* The server detected a resend of the RPC call and 926 * returned NFS4ERR_DELAY as per Section 2.10.6.2 927 * of RFC5661. 928 */ 929 dprintk("%s: slot=%u seq=%u: Operation in progress\n", 930 __func__, 931 slot->slot_nr, 932 slot->seq_nr); 933 goto out_retry; 934 case -NFS4ERR_RETRY_UNCACHED_REP: 935 case -NFS4ERR_SEQ_FALSE_RETRY: 936 /* 937 * The server thinks we tried to replay a request. 938 * Retry the call after bumping the sequence ID. 939 */ 940 nfs4_slot_sequence_acked(slot, slot->seq_nr); 941 goto retry_new_seq; 942 case -NFS4ERR_BADSLOT: 943 /* 944 * The slot id we used was probably retired. Try again 945 * using a different slot id. 946 */ 947 if (slot->slot_nr < slot->table->target_highest_slotid) 948 goto session_recover; 949 goto retry_nowait; 950 case -NFS4ERR_SEQ_MISORDERED: 951 nfs4_slot_sequence_record_sent(slot, slot->seq_nr); 952 /* 953 * Were one or more calls using this slot interrupted? 954 * If the server never received the request, then our 955 * transmitted slot sequence number may be too high. However, 956 * if the server did receive the request then it might 957 * accidentally give us a reply with a mismatched operation. 958 * We can sort this out by sending a lone sequence operation 959 * to the server on the same slot. 960 */ 961 if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) { 962 slot->seq_nr--; 963 if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) { 964 nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot); 965 res->sr_slot = NULL; 966 } 967 goto retry_nowait; 968 } 969 /* 970 * RFC5661: 971 * A retry might be sent while the original request is 972 * still in progress on the replier. The replier SHOULD 973 * deal with the issue by returning NFS4ERR_DELAY as the 974 * reply to SEQUENCE or CB_SEQUENCE operation, but 975 * implementations MAY return NFS4ERR_SEQ_MISORDERED. 976 * 977 * Restart the search after a delay. 978 */ 979 slot->seq_nr = slot->seq_nr_highest_sent; 980 goto out_retry; 981 case -NFS4ERR_BADSESSION: 982 case -NFS4ERR_DEADSESSION: 983 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 984 goto session_recover; 985 default: 986 /* Just update the slot sequence no. */ 987 slot->seq_done = 1; 988 } 989 out: 990 /* The session may be reset by one of the error handlers. */ 991 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 992 out_noaction: 993 return ret; 994 session_recover: 995 set_bit(NFS4_SLOT_TBL_DRAINING, &session->fc_slot_table.slot_tbl_state); 996 nfs4_schedule_session_recovery(session, status); 997 dprintk("%s ERROR: %d Reset session\n", __func__, status); 998 nfs41_sequence_free_slot(res); 999 goto out; 1000 retry_new_seq: 1001 ++slot->seq_nr; 1002 retry_nowait: 1003 if (rpc_restart_call_prepare(task)) { 1004 nfs41_sequence_free_slot(res); 1005 task->tk_status = 0; 1006 ret = 0; 1007 } 1008 goto out; 1009 out_retry: 1010 if (!rpc_restart_call(task)) 1011 goto out; 1012 rpc_delay(task, NFS4_POLL_RETRY_MAX); 1013 return 0; 1014 } 1015 1016 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 1017 { 1018 if (!nfs41_sequence_process(task, res)) 1019 return 0; 1020 if (res->sr_slot != NULL) 1021 nfs41_sequence_free_slot(res); 1022 return 1; 1023 1024 } 1025 EXPORT_SYMBOL_GPL(nfs41_sequence_done); 1026 1027 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) 1028 { 1029 if (res->sr_slot == NULL) 1030 return 1; 1031 if (res->sr_slot->table->session != NULL) 1032 return nfs41_sequence_process(task, res); 1033 return nfs40_sequence_done(task, res); 1034 } 1035 1036 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res) 1037 { 1038 if (res->sr_slot != NULL) { 1039 if (res->sr_slot->table->session != NULL) 1040 nfs41_sequence_free_slot(res); 1041 else 1042 nfs40_sequence_free_slot(res); 1043 } 1044 } 1045 1046 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 1047 { 1048 if (res->sr_slot == NULL) 1049 return 1; 1050 if (!res->sr_slot->table->session) 1051 return nfs40_sequence_done(task, res); 1052 return nfs41_sequence_done(task, res); 1053 } 1054 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 1055 1056 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 1057 { 1058 struct nfs4_call_sync_data *data = calldata; 1059 1060 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 1061 1062 nfs4_setup_sequence(data->seq_server->nfs_client, 1063 data->seq_args, data->seq_res, task); 1064 } 1065 1066 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 1067 { 1068 struct nfs4_call_sync_data *data = calldata; 1069 1070 nfs41_sequence_done(task, data->seq_res); 1071 } 1072 1073 static const struct rpc_call_ops nfs41_call_sync_ops = { 1074 .rpc_call_prepare = nfs41_call_sync_prepare, 1075 .rpc_call_done = nfs41_call_sync_done, 1076 }; 1077 1078 #else /* !CONFIG_NFS_V4_1 */ 1079 1080 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) 1081 { 1082 return nfs40_sequence_done(task, res); 1083 } 1084 1085 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res) 1086 { 1087 if (res->sr_slot != NULL) 1088 nfs40_sequence_free_slot(res); 1089 } 1090 1091 int nfs4_sequence_done(struct rpc_task *task, 1092 struct nfs4_sequence_res *res) 1093 { 1094 return nfs40_sequence_done(task, res); 1095 } 1096 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 1097 1098 #endif /* !CONFIG_NFS_V4_1 */ 1099 1100 static void nfs41_sequence_res_init(struct nfs4_sequence_res *res) 1101 { 1102 res->sr_timestamp = jiffies; 1103 res->sr_status_flags = 0; 1104 res->sr_status = 1; 1105 } 1106 1107 static 1108 void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args, 1109 struct nfs4_sequence_res *res, 1110 struct nfs4_slot *slot) 1111 { 1112 if (!slot) 1113 return; 1114 slot->privileged = args->sa_privileged ? 1 : 0; 1115 args->sa_slot = slot; 1116 1117 res->sr_slot = slot; 1118 } 1119 1120 int nfs4_setup_sequence(struct nfs_client *client, 1121 struct nfs4_sequence_args *args, 1122 struct nfs4_sequence_res *res, 1123 struct rpc_task *task) 1124 { 1125 struct nfs4_session *session = nfs4_get_session(client); 1126 struct nfs4_slot_table *tbl = client->cl_slot_tbl; 1127 struct nfs4_slot *slot; 1128 1129 /* slot already allocated? */ 1130 if (res->sr_slot != NULL) 1131 goto out_start; 1132 1133 if (session) 1134 tbl = &session->fc_slot_table; 1135 1136 spin_lock(&tbl->slot_tbl_lock); 1137 /* The state manager will wait until the slot table is empty */ 1138 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) 1139 goto out_sleep; 1140 1141 slot = nfs4_alloc_slot(tbl); 1142 if (IS_ERR(slot)) { 1143 if (slot == ERR_PTR(-ENOMEM)) 1144 goto out_sleep_timeout; 1145 goto out_sleep; 1146 } 1147 spin_unlock(&tbl->slot_tbl_lock); 1148 1149 nfs4_sequence_attach_slot(args, res, slot); 1150 1151 trace_nfs4_setup_sequence(session, args); 1152 out_start: 1153 nfs41_sequence_res_init(res); 1154 rpc_call_start(task); 1155 return 0; 1156 out_sleep_timeout: 1157 /* Try again in 1/4 second */ 1158 if (args->sa_privileged) 1159 rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task, 1160 jiffies + (HZ >> 2), RPC_PRIORITY_PRIVILEGED); 1161 else 1162 rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task, 1163 NULL, jiffies + (HZ >> 2)); 1164 spin_unlock(&tbl->slot_tbl_lock); 1165 return -EAGAIN; 1166 out_sleep: 1167 if (args->sa_privileged) 1168 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 1169 RPC_PRIORITY_PRIVILEGED); 1170 else 1171 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 1172 spin_unlock(&tbl->slot_tbl_lock); 1173 return -EAGAIN; 1174 } 1175 EXPORT_SYMBOL_GPL(nfs4_setup_sequence); 1176 1177 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata) 1178 { 1179 struct nfs4_call_sync_data *data = calldata; 1180 nfs4_setup_sequence(data->seq_server->nfs_client, 1181 data->seq_args, data->seq_res, task); 1182 } 1183 1184 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata) 1185 { 1186 struct nfs4_call_sync_data *data = calldata; 1187 nfs4_sequence_done(task, data->seq_res); 1188 } 1189 1190 static const struct rpc_call_ops nfs40_call_sync_ops = { 1191 .rpc_call_prepare = nfs40_call_sync_prepare, 1192 .rpc_call_done = nfs40_call_sync_done, 1193 }; 1194 1195 static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup) 1196 { 1197 int ret; 1198 struct rpc_task *task; 1199 1200 task = rpc_run_task(task_setup); 1201 if (IS_ERR(task)) 1202 return PTR_ERR(task); 1203 1204 ret = task->tk_status; 1205 rpc_put_task(task); 1206 return ret; 1207 } 1208 1209 static int nfs4_do_call_sync(struct rpc_clnt *clnt, 1210 struct nfs_server *server, 1211 struct rpc_message *msg, 1212 struct nfs4_sequence_args *args, 1213 struct nfs4_sequence_res *res, 1214 unsigned short task_flags) 1215 { 1216 struct nfs_client *clp = server->nfs_client; 1217 struct nfs4_call_sync_data data = { 1218 .seq_server = server, 1219 .seq_args = args, 1220 .seq_res = res, 1221 }; 1222 struct rpc_task_setup task_setup = { 1223 .rpc_client = clnt, 1224 .rpc_message = msg, 1225 .callback_ops = clp->cl_mvops->call_sync_ops, 1226 .callback_data = &data, 1227 .flags = task_flags, 1228 }; 1229 1230 return nfs4_call_sync_custom(&task_setup); 1231 } 1232 1233 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 1234 struct nfs_server *server, 1235 struct rpc_message *msg, 1236 struct nfs4_sequence_args *args, 1237 struct nfs4_sequence_res *res) 1238 { 1239 unsigned short task_flags = 0; 1240 1241 if (server->caps & NFS_CAP_MOVEABLE) 1242 task_flags = RPC_TASK_MOVEABLE; 1243 return nfs4_do_call_sync(clnt, server, msg, args, res, task_flags); 1244 } 1245 1246 1247 int nfs4_call_sync(struct rpc_clnt *clnt, 1248 struct nfs_server *server, 1249 struct rpc_message *msg, 1250 struct nfs4_sequence_args *args, 1251 struct nfs4_sequence_res *res, 1252 int cache_reply) 1253 { 1254 nfs4_init_sequence(args, res, cache_reply, 0); 1255 return nfs4_call_sync_sequence(clnt, server, msg, args, res); 1256 } 1257 1258 static void 1259 nfs4_inc_nlink_locked(struct inode *inode) 1260 { 1261 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 1262 NFS_INO_INVALID_CTIME | 1263 NFS_INO_INVALID_NLINK); 1264 inc_nlink(inode); 1265 } 1266 1267 static void 1268 nfs4_inc_nlink(struct inode *inode) 1269 { 1270 spin_lock(&inode->i_lock); 1271 nfs4_inc_nlink_locked(inode); 1272 spin_unlock(&inode->i_lock); 1273 } 1274 1275 static void 1276 nfs4_dec_nlink_locked(struct inode *inode) 1277 { 1278 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 1279 NFS_INO_INVALID_CTIME | 1280 NFS_INO_INVALID_NLINK); 1281 drop_nlink(inode); 1282 } 1283 1284 static void 1285 nfs4_update_changeattr_locked(struct inode *inode, 1286 struct nfs4_change_info *cinfo, 1287 unsigned long timestamp, unsigned long cache_validity) 1288 { 1289 struct nfs_inode *nfsi = NFS_I(inode); 1290 u64 change_attr = inode_peek_iversion_raw(inode); 1291 1292 if (!nfs_have_delegated_mtime(inode)) 1293 cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME; 1294 if (S_ISDIR(inode->i_mode)) 1295 cache_validity |= NFS_INO_INVALID_DATA; 1296 1297 switch (NFS_SERVER(inode)->change_attr_type) { 1298 case NFS4_CHANGE_TYPE_IS_UNDEFINED: 1299 if (cinfo->after == change_attr) 1300 goto out; 1301 break; 1302 default: 1303 if ((s64)(change_attr - cinfo->after) >= 0) 1304 goto out; 1305 } 1306 1307 inode_set_iversion_raw(inode, cinfo->after); 1308 if (!cinfo->atomic || cinfo->before != change_attr) { 1309 if (S_ISDIR(inode->i_mode)) 1310 nfs_force_lookup_revalidate(inode); 1311 1312 if (!nfs_have_delegated_attributes(inode)) 1313 cache_validity |= 1314 NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL | 1315 NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER | 1316 NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK | 1317 NFS_INO_INVALID_MODE | NFS_INO_INVALID_BTIME | 1318 NFS_INO_INVALID_XATTR; 1319 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); 1320 } 1321 nfsi->attrtimeo_timestamp = jiffies; 1322 nfsi->read_cache_jiffies = timestamp; 1323 nfsi->attr_gencount = nfs_inc_attr_generation_counter(); 1324 nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE; 1325 out: 1326 nfs_set_cache_invalid(inode, cache_validity); 1327 } 1328 1329 void 1330 nfs4_update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo, 1331 unsigned long timestamp, unsigned long cache_validity) 1332 { 1333 spin_lock(&dir->i_lock); 1334 nfs4_update_changeattr_locked(dir, cinfo, timestamp, cache_validity); 1335 spin_unlock(&dir->i_lock); 1336 } 1337 1338 struct nfs4_open_createattrs { 1339 struct nfs4_label *label; 1340 struct iattr *sattr; 1341 const __u32 verf[2]; 1342 }; 1343 1344 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server, 1345 int err, struct nfs4_exception *exception) 1346 { 1347 if (err != -EINVAL) 1348 return false; 1349 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1350 return false; 1351 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1; 1352 exception->retry = 1; 1353 return true; 1354 } 1355 1356 static fmode_t _nfs4_ctx_to_accessmode(const struct nfs_open_context *ctx) 1357 { 1358 return ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC); 1359 } 1360 1361 static fmode_t _nfs4_ctx_to_openmode(const struct nfs_open_context *ctx) 1362 { 1363 fmode_t ret = ctx->mode & (FMODE_READ|FMODE_WRITE); 1364 1365 return (ctx->mode & FMODE_EXEC) ? FMODE_READ | ret : ret; 1366 } 1367 1368 static u32 1369 nfs4_fmode_to_share_access(fmode_t fmode) 1370 { 1371 u32 res = 0; 1372 1373 switch (fmode & (FMODE_READ | FMODE_WRITE)) { 1374 case FMODE_READ: 1375 res = NFS4_SHARE_ACCESS_READ; 1376 break; 1377 case FMODE_WRITE: 1378 res = NFS4_SHARE_ACCESS_WRITE; 1379 break; 1380 case FMODE_READ|FMODE_WRITE: 1381 res = NFS4_SHARE_ACCESS_BOTH; 1382 } 1383 return res; 1384 } 1385 1386 static u32 1387 nfs4_map_atomic_open_share(struct nfs_server *server, 1388 fmode_t fmode, int openflags) 1389 { 1390 u32 res = nfs4_fmode_to_share_access(fmode); 1391 1392 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1393 goto out; 1394 /* Want no delegation if we're using O_DIRECT */ 1395 if (openflags & O_DIRECT) { 1396 res |= NFS4_SHARE_WANT_NO_DELEG; 1397 goto out; 1398 } 1399 /* res |= NFS4_SHARE_WANT_NO_PREFERENCE; */ 1400 if (server->caps & NFS_CAP_DELEGTIME) 1401 res |= NFS4_SHARE_WANT_DELEG_TIMESTAMPS; 1402 if (server->caps & NFS_CAP_OPEN_XOR) 1403 res |= NFS4_SHARE_WANT_OPEN_XOR_DELEGATION; 1404 out: 1405 return res; 1406 } 1407 1408 static enum open_claim_type4 1409 nfs4_map_atomic_open_claim(struct nfs_server *server, 1410 enum open_claim_type4 claim) 1411 { 1412 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1) 1413 return claim; 1414 switch (claim) { 1415 default: 1416 return claim; 1417 case NFS4_OPEN_CLAIM_FH: 1418 return NFS4_OPEN_CLAIM_NULL; 1419 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1420 return NFS4_OPEN_CLAIM_DELEGATE_CUR; 1421 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1422 return NFS4_OPEN_CLAIM_DELEGATE_PREV; 1423 } 1424 } 1425 1426 static void nfs4_init_opendata_res(struct nfs4_opendata *p) 1427 { 1428 p->o_res.f_attr = &p->f_attr; 1429 p->o_res.seqid = p->o_arg.seqid; 1430 p->c_res.seqid = p->c_arg.seqid; 1431 p->o_res.server = p->o_arg.server; 1432 p->o_res.access_request = p->o_arg.access; 1433 nfs_fattr_init(&p->f_attr); 1434 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 1435 } 1436 1437 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 1438 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 1439 const struct nfs4_open_createattrs *c, 1440 enum open_claim_type4 claim, 1441 gfp_t gfp_mask) 1442 { 1443 struct dentry *parent = dget_parent(dentry); 1444 struct inode *dir = d_inode(parent); 1445 struct nfs_server *server = NFS_SERVER(dir); 1446 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 1447 struct nfs4_label *label = (c != NULL) ? c->label : NULL; 1448 struct nfs4_opendata *p; 1449 1450 p = kzalloc(sizeof(*p), gfp_mask); 1451 if (p == NULL) 1452 goto err; 1453 1454 p->f_attr.label = nfs4_label_alloc(server, gfp_mask); 1455 if (IS_ERR(p->f_attr.label)) 1456 goto err_free_p; 1457 1458 p->a_label = nfs4_label_alloc(server, gfp_mask); 1459 if (IS_ERR(p->a_label)) 1460 goto err_free_f; 1461 1462 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 1463 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); 1464 if (IS_ERR(p->o_arg.seqid)) 1465 goto err_free_label; 1466 nfs_sb_active(dentry->d_sb); 1467 p->dentry = dget(dentry); 1468 p->dir = parent; 1469 p->owner = sp; 1470 atomic_inc(&sp->so_count); 1471 p->o_arg.open_flags = flags; 1472 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 1473 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 1474 p->o_arg.share_access = nfs4_map_atomic_open_share(server, 1475 fmode, flags); 1476 if (flags & O_CREAT) { 1477 p->o_arg.umask = current_umask(); 1478 p->o_arg.label = nfs4_label_copy(p->a_label, label); 1479 if (c->sattr != NULL && c->sattr->ia_valid != 0) { 1480 p->o_arg.u.attrs = &p->attrs; 1481 memcpy(&p->attrs, c->sattr, sizeof(p->attrs)); 1482 1483 memcpy(p->o_arg.u.verifier.data, c->verf, 1484 sizeof(p->o_arg.u.verifier.data)); 1485 } 1486 } 1487 /* ask server to check for all possible rights as results 1488 * are cached */ 1489 switch (p->o_arg.claim) { 1490 default: 1491 break; 1492 case NFS4_OPEN_CLAIM_NULL: 1493 case NFS4_OPEN_CLAIM_FH: 1494 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY | 1495 NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE | 1496 NFS4_ACCESS_EXECUTE | 1497 nfs_access_xattr_mask(server); 1498 } 1499 p->o_arg.clientid = server->nfs_client->cl_clientid; 1500 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 1501 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 1502 p->o_arg.name = &dentry->d_name; 1503 p->o_arg.server = server; 1504 p->o_arg.bitmask = nfs4_bitmask(server, label); 1505 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 1506 switch (p->o_arg.claim) { 1507 case NFS4_OPEN_CLAIM_NULL: 1508 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1509 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 1510 p->o_arg.fh = NFS_FH(dir); 1511 break; 1512 case NFS4_OPEN_CLAIM_PREVIOUS: 1513 case NFS4_OPEN_CLAIM_FH: 1514 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1515 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1516 p->o_arg.fh = NFS_FH(d_inode(dentry)); 1517 } 1518 p->c_arg.fh = &p->o_res.fh; 1519 p->c_arg.stateid = &p->o_res.stateid; 1520 p->c_arg.seqid = p->o_arg.seqid; 1521 nfs4_init_opendata_res(p); 1522 kref_init(&p->kref); 1523 return p; 1524 1525 err_free_label: 1526 nfs4_label_free(p->a_label); 1527 err_free_f: 1528 nfs4_label_free(p->f_attr.label); 1529 err_free_p: 1530 kfree(p); 1531 err: 1532 dput(parent); 1533 return NULL; 1534 } 1535 1536 static void nfs4_opendata_free(struct kref *kref) 1537 { 1538 struct nfs4_opendata *p = container_of(kref, 1539 struct nfs4_opendata, kref); 1540 struct super_block *sb = p->dentry->d_sb; 1541 1542 nfs4_lgopen_release(p->lgp); 1543 nfs_free_seqid(p->o_arg.seqid); 1544 nfs4_sequence_free_slot(&p->o_res.seq_res); 1545 if (p->state != NULL) 1546 nfs4_put_open_state(p->state); 1547 nfs4_put_state_owner(p->owner); 1548 1549 nfs4_label_free(p->a_label); 1550 nfs4_label_free(p->f_attr.label); 1551 1552 dput(p->dir); 1553 dput(p->dentry); 1554 nfs_sb_deactive(sb); 1555 nfs_fattr_free_names(&p->f_attr); 1556 kfree(p->f_attr.mdsthreshold); 1557 kfree(p); 1558 } 1559 1560 static void nfs4_opendata_put(struct nfs4_opendata *p) 1561 { 1562 if (p != NULL) 1563 kref_put(&p->kref, nfs4_opendata_free); 1564 } 1565 1566 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state, 1567 fmode_t fmode) 1568 { 1569 switch(fmode & (FMODE_READ|FMODE_WRITE)) { 1570 case FMODE_READ|FMODE_WRITE: 1571 return state->n_rdwr != 0; 1572 case FMODE_WRITE: 1573 return state->n_wronly != 0; 1574 case FMODE_READ: 1575 return state->n_rdonly != 0; 1576 } 1577 WARN_ON_ONCE(1); 1578 return false; 1579 } 1580 1581 static int can_open_cached(struct nfs4_state *state, fmode_t mode, 1582 int open_mode, enum open_claim_type4 claim) 1583 { 1584 int ret = 0; 1585 1586 if (open_mode & (O_EXCL|O_TRUNC)) 1587 goto out; 1588 switch (claim) { 1589 case NFS4_OPEN_CLAIM_NULL: 1590 case NFS4_OPEN_CLAIM_FH: 1591 goto out; 1592 default: 1593 break; 1594 } 1595 switch (mode & (FMODE_READ|FMODE_WRITE)) { 1596 case FMODE_READ: 1597 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 1598 && state->n_rdonly != 0; 1599 break; 1600 case FMODE_WRITE: 1601 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 1602 && state->n_wronly != 0; 1603 break; 1604 case FMODE_READ|FMODE_WRITE: 1605 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 1606 && state->n_rdwr != 0; 1607 } 1608 out: 1609 return ret; 1610 } 1611 1612 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode, 1613 enum open_claim_type4 claim) 1614 { 1615 if (delegation == NULL) 1616 return 0; 1617 if ((delegation->type & fmode) != fmode) 1618 return 0; 1619 switch (claim) { 1620 case NFS4_OPEN_CLAIM_NULL: 1621 case NFS4_OPEN_CLAIM_FH: 1622 break; 1623 case NFS4_OPEN_CLAIM_PREVIOUS: 1624 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 1625 break; 1626 fallthrough; 1627 default: 1628 return 0; 1629 } 1630 nfs_mark_delegation_referenced(delegation); 1631 return 1; 1632 } 1633 1634 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 1635 { 1636 switch (fmode) { 1637 case FMODE_WRITE: 1638 state->n_wronly++; 1639 break; 1640 case FMODE_READ: 1641 state->n_rdonly++; 1642 break; 1643 case FMODE_READ|FMODE_WRITE: 1644 state->n_rdwr++; 1645 } 1646 nfs4_state_set_mode_locked(state, state->state | fmode); 1647 } 1648 1649 #ifdef CONFIG_NFS_V4_1 1650 static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state) 1651 { 1652 if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags)) 1653 return true; 1654 if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags)) 1655 return true; 1656 if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags)) 1657 return true; 1658 return false; 1659 } 1660 #endif /* CONFIG_NFS_V4_1 */ 1661 1662 static void nfs_state_log_update_open_stateid(struct nfs4_state *state) 1663 { 1664 if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags)) 1665 wake_up_all(&state->waitq); 1666 } 1667 1668 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) 1669 { 1670 struct nfs_client *clp = state->owner->so_server->nfs_client; 1671 bool need_recover = false; 1672 1673 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly) 1674 need_recover = true; 1675 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly) 1676 need_recover = true; 1677 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr) 1678 need_recover = true; 1679 if (need_recover) 1680 nfs4_state_mark_reclaim_nograce(clp, state); 1681 } 1682 1683 /* 1684 * Check for whether or not the caller may update the open stateid 1685 * to the value passed in by stateid. 1686 * 1687 * Note: This function relies heavily on the server implementing 1688 * RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2 1689 * correctly. 1690 * i.e. The stateid seqids have to be initialised to 1, and 1691 * are then incremented on every state transition. 1692 */ 1693 static bool nfs_stateid_is_sequential(struct nfs4_state *state, 1694 const nfs4_stateid *stateid) 1695 { 1696 if (test_bit(NFS_OPEN_STATE, &state->flags)) { 1697 /* The common case - we're updating to a new sequence number */ 1698 if (nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1699 if (nfs4_stateid_is_next(&state->open_stateid, stateid)) 1700 return true; 1701 return false; 1702 } 1703 /* The server returned a new stateid */ 1704 } 1705 /* This is the first OPEN in this generation */ 1706 if (stateid->seqid == cpu_to_be32(1)) 1707 return true; 1708 return false; 1709 } 1710 1711 static void nfs_resync_open_stateid_locked(struct nfs4_state *state) 1712 { 1713 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr)) 1714 return; 1715 if (state->n_wronly) 1716 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1717 if (state->n_rdonly) 1718 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1719 if (state->n_rdwr) 1720 set_bit(NFS_O_RDWR_STATE, &state->flags); 1721 set_bit(NFS_OPEN_STATE, &state->flags); 1722 } 1723 1724 static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1725 nfs4_stateid *stateid, fmode_t fmode) 1726 { 1727 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1728 switch (fmode & (FMODE_READ|FMODE_WRITE)) { 1729 case FMODE_WRITE: 1730 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1731 break; 1732 case FMODE_READ: 1733 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1734 break; 1735 case 0: 1736 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1737 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1738 clear_bit(NFS_OPEN_STATE, &state->flags); 1739 } 1740 if (stateid == NULL) 1741 return; 1742 /* Handle OPEN+OPEN_DOWNGRADE races */ 1743 if (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1744 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { 1745 nfs_resync_open_stateid_locked(state); 1746 goto out; 1747 } 1748 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1749 nfs4_stateid_copy(&state->stateid, stateid); 1750 nfs4_stateid_copy(&state->open_stateid, stateid); 1751 trace_nfs4_open_stateid_update(state->inode, stateid, 0); 1752 out: 1753 nfs_state_log_update_open_stateid(state); 1754 } 1755 1756 static void nfs_clear_open_stateid(struct nfs4_state *state, 1757 nfs4_stateid *arg_stateid, 1758 nfs4_stateid *stateid, fmode_t fmode) 1759 { 1760 write_seqlock(&state->seqlock); 1761 /* Ignore, if the CLOSE argment doesn't match the current stateid */ 1762 if (nfs4_state_match_open_stateid_other(state, arg_stateid)) 1763 nfs_clear_open_stateid_locked(state, stateid, fmode); 1764 write_sequnlock(&state->seqlock); 1765 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1766 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1767 } 1768 1769 static void nfs_set_open_stateid_locked(struct nfs4_state *state, 1770 const nfs4_stateid *stateid, nfs4_stateid *freeme) 1771 __must_hold(&state->owner->so_lock) 1772 __must_hold(&state->seqlock) 1773 __must_hold(RCU) 1774 1775 { 1776 DEFINE_WAIT(wait); 1777 int status = 0; 1778 for (;;) { 1779 1780 if (nfs_stateid_is_sequential(state, stateid)) 1781 break; 1782 1783 if (status) { 1784 if (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1785 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { 1786 trace_nfs4_open_stateid_update_skip(state->inode, 1787 stateid, status); 1788 return; 1789 } else { 1790 break; 1791 } 1792 } 1793 1794 /* Rely on seqids for serialisation with NFSv4.0 */ 1795 if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client)) 1796 break; 1797 1798 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); 1799 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); 1800 /* 1801 * Ensure we process the state changes in the same order 1802 * in which the server processed them by delaying the 1803 * update of the stateid until we are in sequence. 1804 */ 1805 write_sequnlock(&state->seqlock); 1806 spin_unlock(&state->owner->so_lock); 1807 rcu_read_unlock(); 1808 trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0); 1809 1810 if (!fatal_signal_pending(current) && 1811 !nfs_current_task_exiting()) { 1812 if (schedule_timeout(5*HZ) == 0) 1813 status = -EAGAIN; 1814 else 1815 status = 0; 1816 } else 1817 status = -EINTR; 1818 finish_wait(&state->waitq, &wait); 1819 rcu_read_lock(); 1820 spin_lock(&state->owner->so_lock); 1821 write_seqlock(&state->seqlock); 1822 } 1823 1824 if (test_bit(NFS_OPEN_STATE, &state->flags) && 1825 !nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1826 nfs4_stateid_copy(freeme, &state->open_stateid); 1827 nfs_test_and_clear_all_open_stateid(state); 1828 } 1829 1830 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1831 nfs4_stateid_copy(&state->stateid, stateid); 1832 nfs4_stateid_copy(&state->open_stateid, stateid); 1833 trace_nfs4_open_stateid_update(state->inode, stateid, status); 1834 nfs_state_log_update_open_stateid(state); 1835 } 1836 1837 static void nfs_state_set_open_stateid(struct nfs4_state *state, 1838 const nfs4_stateid *open_stateid, 1839 fmode_t fmode, 1840 nfs4_stateid *freeme) 1841 { 1842 /* 1843 * Protect the call to nfs4_state_set_mode_locked and 1844 * serialise the stateid update 1845 */ 1846 write_seqlock(&state->seqlock); 1847 nfs_set_open_stateid_locked(state, open_stateid, freeme); 1848 switch (fmode) { 1849 case FMODE_READ: 1850 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1851 break; 1852 case FMODE_WRITE: 1853 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1854 break; 1855 case FMODE_READ|FMODE_WRITE: 1856 set_bit(NFS_O_RDWR_STATE, &state->flags); 1857 } 1858 set_bit(NFS_OPEN_STATE, &state->flags); 1859 write_sequnlock(&state->seqlock); 1860 } 1861 1862 static void nfs_state_clear_open_state_flags(struct nfs4_state *state) 1863 { 1864 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1865 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1866 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1867 clear_bit(NFS_OPEN_STATE, &state->flags); 1868 } 1869 1870 static void nfs_state_set_delegation(struct nfs4_state *state, 1871 const nfs4_stateid *deleg_stateid, 1872 fmode_t fmode) 1873 { 1874 /* 1875 * Protect the call to nfs4_state_set_mode_locked and 1876 * serialise the stateid update 1877 */ 1878 write_seqlock(&state->seqlock); 1879 nfs4_stateid_copy(&state->stateid, deleg_stateid); 1880 set_bit(NFS_DELEGATED_STATE, &state->flags); 1881 write_sequnlock(&state->seqlock); 1882 } 1883 1884 static void nfs_state_clear_delegation(struct nfs4_state *state) 1885 { 1886 write_seqlock(&state->seqlock); 1887 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1888 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1889 write_sequnlock(&state->seqlock); 1890 } 1891 1892 int update_open_stateid(struct nfs4_state *state, 1893 const nfs4_stateid *open_stateid, 1894 const nfs4_stateid *delegation, 1895 fmode_t fmode) 1896 { 1897 struct nfs_server *server = NFS_SERVER(state->inode); 1898 struct nfs_client *clp = server->nfs_client; 1899 struct nfs_inode *nfsi = NFS_I(state->inode); 1900 struct nfs_delegation *deleg_cur; 1901 nfs4_stateid freeme = { }; 1902 int ret = 0; 1903 1904 fmode &= (FMODE_READ|FMODE_WRITE); 1905 1906 rcu_read_lock(); 1907 spin_lock(&state->owner->so_lock); 1908 if (open_stateid != NULL) { 1909 nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme); 1910 ret = 1; 1911 } 1912 1913 deleg_cur = nfs4_get_valid_delegation(state->inode); 1914 if (deleg_cur == NULL) 1915 goto no_delegation; 1916 1917 spin_lock(&deleg_cur->lock); 1918 if (rcu_dereference(nfsi->delegation) != deleg_cur || 1919 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) || 1920 (deleg_cur->type & fmode) != fmode) 1921 goto no_delegation_unlock; 1922 1923 if (delegation == NULL) 1924 delegation = &deleg_cur->stateid; 1925 else if (!nfs4_stateid_match_other(&deleg_cur->stateid, delegation)) 1926 goto no_delegation_unlock; 1927 1928 nfs_mark_delegation_referenced(deleg_cur); 1929 nfs_state_set_delegation(state, &deleg_cur->stateid, fmode); 1930 ret = 1; 1931 no_delegation_unlock: 1932 spin_unlock(&deleg_cur->lock); 1933 no_delegation: 1934 if (ret) 1935 update_open_stateflags(state, fmode); 1936 spin_unlock(&state->owner->so_lock); 1937 rcu_read_unlock(); 1938 1939 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1940 nfs4_schedule_state_manager(clp); 1941 if (freeme.type != 0) 1942 nfs4_test_and_free_stateid(server, &freeme, 1943 state->owner->so_cred); 1944 1945 return ret; 1946 } 1947 1948 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp, 1949 const nfs4_stateid *stateid) 1950 { 1951 struct nfs4_state *state = lsp->ls_state; 1952 bool ret = false; 1953 1954 spin_lock(&state->state_lock); 1955 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid)) 1956 goto out_noupdate; 1957 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid)) 1958 goto out_noupdate; 1959 nfs4_stateid_copy(&lsp->ls_stateid, stateid); 1960 ret = true; 1961 out_noupdate: 1962 spin_unlock(&state->state_lock); 1963 return ret; 1964 } 1965 1966 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1967 { 1968 struct nfs_delegation *delegation; 1969 1970 fmode &= FMODE_READ|FMODE_WRITE; 1971 rcu_read_lock(); 1972 delegation = nfs4_get_valid_delegation(inode); 1973 if (delegation == NULL || (delegation->type & fmode) == fmode) { 1974 rcu_read_unlock(); 1975 return; 1976 } 1977 rcu_read_unlock(); 1978 nfs4_inode_return_delegation(inode); 1979 } 1980 1981 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1982 { 1983 struct nfs4_state *state = opendata->state; 1984 struct nfs_delegation *delegation; 1985 int open_mode = opendata->o_arg.open_flags; 1986 fmode_t fmode = opendata->o_arg.fmode; 1987 enum open_claim_type4 claim = opendata->o_arg.claim; 1988 nfs4_stateid stateid; 1989 int ret = -EAGAIN; 1990 1991 for (;;) { 1992 spin_lock(&state->owner->so_lock); 1993 if (can_open_cached(state, fmode, open_mode, claim)) { 1994 update_open_stateflags(state, fmode); 1995 spin_unlock(&state->owner->so_lock); 1996 goto out_return_state; 1997 } 1998 spin_unlock(&state->owner->so_lock); 1999 rcu_read_lock(); 2000 delegation = nfs4_get_valid_delegation(state->inode); 2001 if (!can_open_delegated(delegation, fmode, claim)) { 2002 rcu_read_unlock(); 2003 break; 2004 } 2005 /* Save the delegation */ 2006 nfs4_stateid_copy(&stateid, &delegation->stateid); 2007 rcu_read_unlock(); 2008 nfs_release_seqid(opendata->o_arg.seqid); 2009 if (!opendata->is_recover) { 2010 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 2011 if (ret != 0) 2012 goto out; 2013 } 2014 ret = -EAGAIN; 2015 2016 /* Try to update the stateid using the delegation */ 2017 if (update_open_stateid(state, NULL, &stateid, fmode)) 2018 goto out_return_state; 2019 } 2020 out: 2021 return ERR_PTR(ret); 2022 out_return_state: 2023 refcount_inc(&state->count); 2024 return state; 2025 } 2026 2027 static void 2028 nfs4_process_delegation(struct inode *inode, const struct cred *cred, 2029 enum open_claim_type4 claim, 2030 const struct nfs4_open_delegation *delegation) 2031 { 2032 switch (delegation->open_delegation_type) { 2033 case NFS4_OPEN_DELEGATE_READ: 2034 case NFS4_OPEN_DELEGATE_WRITE: 2035 case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG: 2036 case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG: 2037 break; 2038 default: 2039 return; 2040 } 2041 switch (claim) { 2042 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 2043 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 2044 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 2045 "returning a delegation for " 2046 "OPEN(CLAIM_DELEGATE_CUR)\n", 2047 NFS_SERVER(inode)->nfs_client->cl_hostname); 2048 break; 2049 case NFS4_OPEN_CLAIM_PREVIOUS: 2050 nfs_inode_reclaim_delegation(inode, cred, delegation->type, 2051 &delegation->stateid, 2052 delegation->pagemod_limit, 2053 delegation->open_delegation_type); 2054 break; 2055 default: 2056 nfs_inode_set_delegation(inode, cred, delegation->type, 2057 &delegation->stateid, 2058 delegation->pagemod_limit, 2059 delegation->open_delegation_type); 2060 } 2061 if (delegation->do_recall) 2062 nfs_async_inode_return_delegation(inode, &delegation->stateid); 2063 } 2064 2065 /* 2066 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes 2067 * and update the nfs4_state. 2068 */ 2069 static struct nfs4_state * 2070 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) 2071 { 2072 struct inode *inode = data->state->inode; 2073 struct nfs4_state *state = data->state; 2074 int ret; 2075 2076 if (!data->rpc_done) { 2077 if (data->rpc_status) 2078 return ERR_PTR(data->rpc_status); 2079 return nfs4_try_open_cached(data); 2080 } 2081 2082 ret = nfs_refresh_inode(inode, &data->f_attr); 2083 if (ret) 2084 return ERR_PTR(ret); 2085 2086 nfs4_process_delegation(state->inode, 2087 data->owner->so_cred, 2088 data->o_arg.claim, 2089 &data->o_res.delegation); 2090 2091 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) { 2092 if (!update_open_stateid(state, &data->o_res.stateid, 2093 NULL, data->o_arg.fmode)) 2094 return ERR_PTR(-EAGAIN); 2095 } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode)) 2096 return ERR_PTR(-EAGAIN); 2097 refcount_inc(&state->count); 2098 2099 return state; 2100 } 2101 2102 static struct inode * 2103 nfs4_opendata_get_inode(struct nfs4_opendata *data) 2104 { 2105 struct inode *inode; 2106 2107 switch (data->o_arg.claim) { 2108 case NFS4_OPEN_CLAIM_NULL: 2109 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 2110 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 2111 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 2112 return ERR_PTR(-EAGAIN); 2113 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, 2114 &data->f_attr); 2115 break; 2116 default: 2117 inode = d_inode(data->dentry); 2118 ihold(inode); 2119 nfs_refresh_inode(inode, &data->f_attr); 2120 } 2121 return inode; 2122 } 2123 2124 static struct nfs4_state * 2125 nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data) 2126 { 2127 struct nfs4_state *state; 2128 struct inode *inode; 2129 2130 inode = nfs4_opendata_get_inode(data); 2131 if (IS_ERR(inode)) 2132 return ERR_CAST(inode); 2133 if (data->state != NULL && data->state->inode == inode) { 2134 state = data->state; 2135 refcount_inc(&state->count); 2136 } else 2137 state = nfs4_get_open_state(inode, data->owner); 2138 iput(inode); 2139 if (state == NULL) 2140 state = ERR_PTR(-ENOMEM); 2141 return state; 2142 } 2143 2144 static struct nfs4_state * 2145 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 2146 { 2147 struct nfs4_state *state; 2148 2149 if (!data->rpc_done) { 2150 state = nfs4_try_open_cached(data); 2151 trace_nfs4_cached_open(data->state); 2152 goto out; 2153 } 2154 2155 state = nfs4_opendata_find_nfs4_state(data); 2156 if (IS_ERR(state)) 2157 goto out; 2158 2159 nfs4_process_delegation(state->inode, 2160 data->owner->so_cred, 2161 data->o_arg.claim, 2162 &data->o_res.delegation); 2163 2164 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) { 2165 if (!update_open_stateid(state, &data->o_res.stateid, 2166 NULL, data->o_arg.fmode)) { 2167 nfs4_put_open_state(state); 2168 state = ERR_PTR(-EAGAIN); 2169 } 2170 } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode)) { 2171 nfs4_put_open_state(state); 2172 state = ERR_PTR(-EAGAIN); 2173 } 2174 out: 2175 nfs_release_seqid(data->o_arg.seqid); 2176 return state; 2177 } 2178 2179 static struct nfs4_state * 2180 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 2181 { 2182 struct nfs4_state *ret; 2183 2184 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 2185 ret =_nfs4_opendata_reclaim_to_nfs4_state(data); 2186 else 2187 ret = _nfs4_opendata_to_nfs4_state(data); 2188 nfs4_sequence_free_slot(&data->o_res.seq_res); 2189 return ret; 2190 } 2191 2192 static struct nfs_open_context * 2193 nfs4_state_find_open_context_mode(struct nfs4_state *state, fmode_t mode) 2194 { 2195 struct nfs_inode *nfsi = NFS_I(state->inode); 2196 struct nfs_open_context *ctx; 2197 2198 rcu_read_lock(); 2199 list_for_each_entry_rcu(ctx, &nfsi->open_files, list) { 2200 if (ctx->state != state) 2201 continue; 2202 if ((ctx->mode & mode) != mode) 2203 continue; 2204 if (!get_nfs_open_context(ctx)) 2205 continue; 2206 rcu_read_unlock(); 2207 return ctx; 2208 } 2209 rcu_read_unlock(); 2210 return ERR_PTR(-ENOENT); 2211 } 2212 2213 static struct nfs_open_context * 2214 nfs4_state_find_open_context(struct nfs4_state *state) 2215 { 2216 struct nfs_open_context *ctx; 2217 2218 ctx = nfs4_state_find_open_context_mode(state, FMODE_READ|FMODE_WRITE); 2219 if (!IS_ERR(ctx)) 2220 return ctx; 2221 ctx = nfs4_state_find_open_context_mode(state, FMODE_WRITE); 2222 if (!IS_ERR(ctx)) 2223 return ctx; 2224 return nfs4_state_find_open_context_mode(state, FMODE_READ); 2225 } 2226 2227 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, 2228 struct nfs4_state *state, enum open_claim_type4 claim) 2229 { 2230 struct nfs4_opendata *opendata; 2231 2232 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, 2233 NULL, claim, GFP_NOFS); 2234 if (opendata == NULL) 2235 return ERR_PTR(-ENOMEM); 2236 opendata->state = state; 2237 refcount_inc(&state->count); 2238 return opendata; 2239 } 2240 2241 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, 2242 fmode_t fmode) 2243 { 2244 struct nfs4_state *newstate; 2245 struct nfs_server *server = NFS_SB(opendata->dentry->d_sb); 2246 int openflags = opendata->o_arg.open_flags; 2247 int ret; 2248 2249 if (!nfs4_mode_match_open_stateid(opendata->state, fmode)) 2250 return 0; 2251 opendata->o_arg.fmode = fmode; 2252 opendata->o_arg.share_access = 2253 nfs4_map_atomic_open_share(server, fmode, openflags); 2254 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 2255 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 2256 nfs4_init_opendata_res(opendata); 2257 ret = _nfs4_recover_proc_open(opendata); 2258 if (ret != 0) 2259 return ret; 2260 newstate = nfs4_opendata_to_nfs4_state(opendata); 2261 if (IS_ERR(newstate)) 2262 return PTR_ERR(newstate); 2263 if (newstate != opendata->state) 2264 ret = -ESTALE; 2265 nfs4_close_state(newstate, fmode); 2266 return ret; 2267 } 2268 2269 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 2270 { 2271 int ret; 2272 2273 /* memory barrier prior to reading state->n_* */ 2274 smp_rmb(); 2275 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 2276 if (ret != 0) 2277 return ret; 2278 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE); 2279 if (ret != 0) 2280 return ret; 2281 ret = nfs4_open_recover_helper(opendata, FMODE_READ); 2282 if (ret != 0) 2283 return ret; 2284 /* 2285 * We may have performed cached opens for all three recoveries. 2286 * Check if we need to update the current stateid. 2287 */ 2288 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 2289 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 2290 write_seqlock(&state->seqlock); 2291 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 2292 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 2293 write_sequnlock(&state->seqlock); 2294 } 2295 return 0; 2296 } 2297 2298 /* 2299 * OPEN_RECLAIM: 2300 * reclaim state on the server after a reboot. 2301 */ 2302 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 2303 { 2304 struct nfs_delegation *delegation; 2305 struct nfs4_opendata *opendata; 2306 u32 delegation_type = NFS4_OPEN_DELEGATE_NONE; 2307 int status; 2308 2309 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2310 NFS4_OPEN_CLAIM_PREVIOUS); 2311 if (IS_ERR(opendata)) 2312 return PTR_ERR(opendata); 2313 rcu_read_lock(); 2314 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2315 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) { 2316 switch(delegation->type) { 2317 case FMODE_READ: 2318 delegation_type = NFS4_OPEN_DELEGATE_READ; 2319 if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) 2320 delegation_type = NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG; 2321 break; 2322 case FMODE_WRITE: 2323 case FMODE_READ|FMODE_WRITE: 2324 delegation_type = NFS4_OPEN_DELEGATE_WRITE; 2325 if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) 2326 delegation_type = NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG; 2327 } 2328 } 2329 rcu_read_unlock(); 2330 opendata->o_arg.u.delegation_type = delegation_type; 2331 status = nfs4_open_recover(opendata, state); 2332 nfs4_opendata_put(opendata); 2333 return status; 2334 } 2335 2336 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 2337 { 2338 struct nfs_server *server = NFS_SERVER(state->inode); 2339 struct nfs4_exception exception = { }; 2340 int err; 2341 do { 2342 err = _nfs4_do_open_reclaim(ctx, state); 2343 trace_nfs4_open_reclaim(ctx, 0, err); 2344 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2345 continue; 2346 if (err != -NFS4ERR_DELAY) 2347 break; 2348 nfs4_handle_exception(server, err, &exception); 2349 } while (exception.retry); 2350 return err; 2351 } 2352 2353 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 2354 { 2355 struct nfs_open_context *ctx; 2356 int ret; 2357 2358 ctx = nfs4_state_find_open_context(state); 2359 if (IS_ERR(ctx)) 2360 return -EAGAIN; 2361 clear_bit(NFS_DELEGATED_STATE, &state->flags); 2362 nfs_state_clear_open_state_flags(state); 2363 ret = nfs4_do_open_reclaim(ctx, state); 2364 put_nfs_open_context(ctx); 2365 return ret; 2366 } 2367 2368 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err) 2369 { 2370 switch (err) { 2371 default: 2372 printk(KERN_ERR "NFS: %s: unhandled error " 2373 "%d.\n", __func__, err); 2374 fallthrough; 2375 case 0: 2376 case -ENOENT: 2377 case -EAGAIN: 2378 case -ESTALE: 2379 case -ETIMEDOUT: 2380 break; 2381 case -NFS4ERR_BADSESSION: 2382 case -NFS4ERR_BADSLOT: 2383 case -NFS4ERR_BAD_HIGH_SLOT: 2384 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 2385 case -NFS4ERR_DEADSESSION: 2386 return -EAGAIN; 2387 case -NFS4ERR_STALE_CLIENTID: 2388 case -NFS4ERR_STALE_STATEID: 2389 /* Don't recall a delegation if it was lost */ 2390 nfs4_schedule_lease_recovery(server->nfs_client); 2391 return -EAGAIN; 2392 case -NFS4ERR_MOVED: 2393 nfs4_schedule_migration_recovery(server); 2394 return -EAGAIN; 2395 case -NFS4ERR_LEASE_MOVED: 2396 nfs4_schedule_lease_moved_recovery(server->nfs_client); 2397 return -EAGAIN; 2398 case -NFS4ERR_DELEG_REVOKED: 2399 case -NFS4ERR_ADMIN_REVOKED: 2400 case -NFS4ERR_EXPIRED: 2401 case -NFS4ERR_BAD_STATEID: 2402 case -NFS4ERR_OPENMODE: 2403 nfs_inode_find_state_and_recover(state->inode, 2404 stateid); 2405 nfs4_schedule_stateid_recovery(server, state); 2406 return -EAGAIN; 2407 case -NFS4ERR_DELAY: 2408 case -NFS4ERR_GRACE: 2409 ssleep(1); 2410 return -EAGAIN; 2411 case -ENOMEM: 2412 case -NFS4ERR_DENIED: 2413 if (fl) { 2414 struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner; 2415 if (lsp) 2416 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 2417 } 2418 return 0; 2419 } 2420 return err; 2421 } 2422 2423 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, 2424 struct nfs4_state *state, const nfs4_stateid *stateid) 2425 { 2426 struct nfs_server *server = NFS_SERVER(state->inode); 2427 struct nfs4_opendata *opendata; 2428 int err = 0; 2429 2430 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2431 NFS4_OPEN_CLAIM_DELEG_CUR_FH); 2432 if (IS_ERR(opendata)) 2433 return PTR_ERR(opendata); 2434 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 2435 if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) { 2436 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 2437 if (err) 2438 goto out; 2439 } 2440 if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) { 2441 err = nfs4_open_recover_helper(opendata, FMODE_WRITE); 2442 if (err) 2443 goto out; 2444 } 2445 if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) { 2446 err = nfs4_open_recover_helper(opendata, FMODE_READ); 2447 if (err) 2448 goto out; 2449 } 2450 nfs_state_clear_delegation(state); 2451 out: 2452 nfs4_opendata_put(opendata); 2453 return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err); 2454 } 2455 2456 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) 2457 { 2458 struct nfs4_opendata *data = calldata; 2459 2460 nfs4_setup_sequence(data->o_arg.server->nfs_client, 2461 &data->c_arg.seq_args, &data->c_res.seq_res, task); 2462 } 2463 2464 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 2465 { 2466 struct nfs4_opendata *data = calldata; 2467 2468 nfs40_sequence_done(task, &data->c_res.seq_res); 2469 2470 data->rpc_status = task->tk_status; 2471 if (data->rpc_status == 0) { 2472 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 2473 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2474 renew_lease(data->o_res.server, data->timestamp); 2475 data->rpc_done = true; 2476 } 2477 } 2478 2479 static void nfs4_open_confirm_release(void *calldata) 2480 { 2481 struct nfs4_opendata *data = calldata; 2482 struct nfs4_state *state = NULL; 2483 2484 /* If this request hasn't been cancelled, do nothing */ 2485 if (!data->cancelled) 2486 goto out_free; 2487 /* In case of error, no cleanup! */ 2488 if (!data->rpc_done) 2489 goto out_free; 2490 state = nfs4_opendata_to_nfs4_state(data); 2491 if (!IS_ERR(state)) 2492 nfs4_close_state(state, data->o_arg.fmode); 2493 out_free: 2494 nfs4_opendata_put(data); 2495 } 2496 2497 static const struct rpc_call_ops nfs4_open_confirm_ops = { 2498 .rpc_call_prepare = nfs4_open_confirm_prepare, 2499 .rpc_call_done = nfs4_open_confirm_done, 2500 .rpc_release = nfs4_open_confirm_release, 2501 }; 2502 2503 /* 2504 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 2505 */ 2506 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 2507 { 2508 struct nfs_server *server = NFS_SERVER(d_inode(data->dir)); 2509 struct rpc_task *task; 2510 struct rpc_message msg = { 2511 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 2512 .rpc_argp = &data->c_arg, 2513 .rpc_resp = &data->c_res, 2514 .rpc_cred = data->owner->so_cred, 2515 }; 2516 struct rpc_task_setup task_setup_data = { 2517 .rpc_client = server->client, 2518 .rpc_message = &msg, 2519 .callback_ops = &nfs4_open_confirm_ops, 2520 .callback_data = data, 2521 .workqueue = nfsiod_workqueue, 2522 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 2523 }; 2524 int status; 2525 2526 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1, 2527 data->is_recover); 2528 kref_get(&data->kref); 2529 data->rpc_done = false; 2530 data->rpc_status = 0; 2531 data->timestamp = jiffies; 2532 task = rpc_run_task(&task_setup_data); 2533 if (IS_ERR(task)) 2534 return PTR_ERR(task); 2535 status = rpc_wait_for_completion_task(task); 2536 if (status != 0) { 2537 data->cancelled = true; 2538 smp_wmb(); 2539 } else 2540 status = data->rpc_status; 2541 rpc_put_task(task); 2542 return status; 2543 } 2544 2545 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 2546 { 2547 struct nfs4_opendata *data = calldata; 2548 struct nfs4_state_owner *sp = data->owner; 2549 struct nfs_client *clp = sp->so_server->nfs_client; 2550 enum open_claim_type4 claim = data->o_arg.claim; 2551 2552 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 2553 goto out_wait; 2554 /* 2555 * Check if we still need to send an OPEN call, or if we can use 2556 * a delegation instead. 2557 */ 2558 if (data->state != NULL) { 2559 struct nfs_delegation *delegation; 2560 2561 if (can_open_cached(data->state, data->o_arg.fmode, 2562 data->o_arg.open_flags, claim)) 2563 goto out_no_action; 2564 rcu_read_lock(); 2565 delegation = nfs4_get_valid_delegation(data->state->inode); 2566 if (can_open_delegated(delegation, data->o_arg.fmode, claim)) 2567 goto unlock_no_action; 2568 rcu_read_unlock(); 2569 } 2570 /* Update client id. */ 2571 data->o_arg.clientid = clp->cl_clientid; 2572 switch (claim) { 2573 default: 2574 break; 2575 case NFS4_OPEN_CLAIM_PREVIOUS: 2576 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 2577 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 2578 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 2579 fallthrough; 2580 case NFS4_OPEN_CLAIM_FH: 2581 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 2582 } 2583 data->timestamp = jiffies; 2584 if (nfs4_setup_sequence(data->o_arg.server->nfs_client, 2585 &data->o_arg.seq_args, 2586 &data->o_res.seq_res, 2587 task) != 0) 2588 nfs_release_seqid(data->o_arg.seqid); 2589 2590 /* Set the create mode (note dependency on the session type) */ 2591 data->o_arg.createmode = NFS4_CREATE_UNCHECKED; 2592 if (data->o_arg.open_flags & O_EXCL) { 2593 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1; 2594 if (clp->cl_mvops->minor_version == 0) { 2595 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE; 2596 /* don't put an ACCESS op in OPEN compound if O_EXCL, 2597 * because ACCESS will return permission denied for 2598 * all bits until close */ 2599 data->o_res.access_request = data->o_arg.access = 0; 2600 } else if (nfs4_has_persistent_session(clp)) 2601 data->o_arg.createmode = NFS4_CREATE_GUARDED; 2602 } 2603 return; 2604 unlock_no_action: 2605 trace_nfs4_cached_open(data->state); 2606 rcu_read_unlock(); 2607 out_no_action: 2608 task->tk_action = NULL; 2609 out_wait: 2610 nfs4_sequence_done(task, &data->o_res.seq_res); 2611 } 2612 2613 static void nfs4_open_done(struct rpc_task *task, void *calldata) 2614 { 2615 struct nfs4_opendata *data = calldata; 2616 2617 data->rpc_status = task->tk_status; 2618 2619 if (!nfs4_sequence_process(task, &data->o_res.seq_res)) 2620 return; 2621 2622 if (task->tk_status == 0) { 2623 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) { 2624 switch (data->o_res.f_attr->mode & S_IFMT) { 2625 case S_IFREG: 2626 break; 2627 case S_IFLNK: 2628 data->rpc_status = -ELOOP; 2629 break; 2630 case S_IFDIR: 2631 data->rpc_status = -EISDIR; 2632 break; 2633 default: 2634 data->rpc_status = -ENOTDIR; 2635 } 2636 } 2637 renew_lease(data->o_res.server, data->timestamp); 2638 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 2639 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2640 } 2641 data->rpc_done = true; 2642 } 2643 2644 static void nfs4_open_release(void *calldata) 2645 { 2646 struct nfs4_opendata *data = calldata; 2647 struct nfs4_state *state = NULL; 2648 2649 /* In case of error, no cleanup! */ 2650 if (data->rpc_status != 0 || !data->rpc_done) { 2651 nfs_release_seqid(data->o_arg.seqid); 2652 goto out_free; 2653 } 2654 /* If this request hasn't been cancelled, do nothing */ 2655 if (!data->cancelled) 2656 goto out_free; 2657 /* In case we need an open_confirm, no cleanup! */ 2658 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 2659 goto out_free; 2660 state = nfs4_opendata_to_nfs4_state(data); 2661 if (!IS_ERR(state)) 2662 nfs4_close_state(state, data->o_arg.fmode); 2663 out_free: 2664 nfs4_opendata_put(data); 2665 } 2666 2667 static const struct rpc_call_ops nfs4_open_ops = { 2668 .rpc_call_prepare = nfs4_open_prepare, 2669 .rpc_call_done = nfs4_open_done, 2670 .rpc_release = nfs4_open_release, 2671 }; 2672 2673 static int nfs4_run_open_task(struct nfs4_opendata *data, 2674 struct nfs_open_context *ctx) 2675 { 2676 struct inode *dir = d_inode(data->dir); 2677 struct nfs_server *server = NFS_SERVER(dir); 2678 struct nfs_openargs *o_arg = &data->o_arg; 2679 struct nfs_openres *o_res = &data->o_res; 2680 struct rpc_task *task; 2681 struct rpc_message msg = { 2682 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 2683 .rpc_argp = o_arg, 2684 .rpc_resp = o_res, 2685 .rpc_cred = data->owner->so_cred, 2686 }; 2687 struct rpc_task_setup task_setup_data = { 2688 .rpc_client = server->client, 2689 .rpc_message = &msg, 2690 .callback_ops = &nfs4_open_ops, 2691 .callback_data = data, 2692 .workqueue = nfsiod_workqueue, 2693 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 2694 }; 2695 int status; 2696 2697 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE)) 2698 task_setup_data.flags |= RPC_TASK_MOVEABLE; 2699 2700 kref_get(&data->kref); 2701 data->rpc_done = false; 2702 data->rpc_status = 0; 2703 data->cancelled = false; 2704 data->is_recover = false; 2705 if (!ctx) { 2706 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1); 2707 data->is_recover = true; 2708 task_setup_data.flags |= RPC_TASK_TIMEOUT; 2709 } else { 2710 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0); 2711 pnfs_lgopen_prepare(data, ctx); 2712 } 2713 task = rpc_run_task(&task_setup_data); 2714 if (IS_ERR(task)) 2715 return PTR_ERR(task); 2716 status = rpc_wait_for_completion_task(task); 2717 if (status != 0) { 2718 data->cancelled = true; 2719 smp_wmb(); 2720 } else 2721 status = data->rpc_status; 2722 rpc_put_task(task); 2723 2724 return status; 2725 } 2726 2727 static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 2728 { 2729 struct inode *dir = d_inode(data->dir); 2730 struct nfs_openres *o_res = &data->o_res; 2731 int status; 2732 2733 status = nfs4_run_open_task(data, NULL); 2734 if (status != 0 || !data->rpc_done) 2735 return status; 2736 2737 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 2738 2739 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) 2740 status = _nfs4_proc_open_confirm(data); 2741 2742 return status; 2743 } 2744 2745 /* 2746 * Additional permission checks in order to distinguish between an 2747 * open for read, and an open for execute. This works around the 2748 * fact that NFSv4 OPEN treats read and execute permissions as being 2749 * the same. 2750 * Note that in the non-execute case, we want to turn off permission 2751 * checking if we just created a new file (POSIX open() semantics). 2752 */ 2753 static int nfs4_opendata_access(const struct cred *cred, 2754 struct nfs4_opendata *opendata, 2755 struct nfs4_state *state, fmode_t fmode) 2756 { 2757 struct nfs_access_entry cache; 2758 u32 mask, flags; 2759 2760 /* access call failed or for some reason the server doesn't 2761 * support any access modes -- defer access call until later */ 2762 if (opendata->o_res.access_supported == 0) 2763 return 0; 2764 2765 mask = 0; 2766 if (fmode & FMODE_EXEC) { 2767 /* ONLY check for exec rights */ 2768 if (S_ISDIR(state->inode->i_mode)) 2769 mask = NFS4_ACCESS_LOOKUP; 2770 else 2771 mask = NFS4_ACCESS_EXECUTE; 2772 } else if ((fmode & FMODE_READ) && !opendata->file_created) 2773 mask = NFS4_ACCESS_READ; 2774 2775 nfs_access_set_mask(&cache, opendata->o_res.access_result); 2776 nfs_access_add_cache(state->inode, &cache, cred); 2777 2778 flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP; 2779 if ((mask & ~cache.mask & flags) == 0) 2780 return 0; 2781 2782 return -EACCES; 2783 } 2784 2785 /* 2786 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 2787 */ 2788 static int _nfs4_proc_open(struct nfs4_opendata *data, 2789 struct nfs_open_context *ctx) 2790 { 2791 struct inode *dir = d_inode(data->dir); 2792 struct nfs_server *server = NFS_SERVER(dir); 2793 struct nfs_openargs *o_arg = &data->o_arg; 2794 struct nfs_openres *o_res = &data->o_res; 2795 int status; 2796 2797 status = nfs4_run_open_task(data, ctx); 2798 if (!data->rpc_done) 2799 return status; 2800 if (status != 0) { 2801 if (status == -NFS4ERR_BADNAME && 2802 !(o_arg->open_flags & O_CREAT)) 2803 return -ENOENT; 2804 return status; 2805 } 2806 2807 nfs_fattr_map_and_free_names(server, &data->f_attr); 2808 2809 if (o_arg->open_flags & O_CREAT) { 2810 if (o_arg->open_flags & O_EXCL) 2811 data->file_created = true; 2812 else if (o_res->cinfo.before != o_res->cinfo.after) 2813 data->file_created = true; 2814 if (data->file_created || 2815 inode_peek_iversion_raw(dir) != o_res->cinfo.after) 2816 nfs4_update_changeattr(dir, &o_res->cinfo, 2817 o_res->f_attr->time_start, 2818 NFS_INO_INVALID_DATA); 2819 } 2820 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 2821 server->caps &= ~NFS_CAP_POSIX_LOCK; 2822 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2823 status = _nfs4_proc_open_confirm(data); 2824 if (status != 0) 2825 return status; 2826 } 2827 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) { 2828 struct nfs_fh *fh = &o_res->fh; 2829 2830 nfs4_sequence_free_slot(&o_res->seq_res); 2831 if (o_arg->claim == NFS4_OPEN_CLAIM_FH) 2832 fh = NFS_FH(d_inode(data->dentry)); 2833 nfs4_proc_getattr(server, fh, o_res->f_attr, NULL); 2834 } 2835 return 0; 2836 } 2837 2838 /* 2839 * OPEN_EXPIRED: 2840 * reclaim state on the server after a network partition. 2841 * Assumes caller holds the appropriate lock 2842 */ 2843 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2844 { 2845 struct nfs4_opendata *opendata; 2846 int ret; 2847 2848 opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_FH); 2849 if (IS_ERR(opendata)) 2850 return PTR_ERR(opendata); 2851 /* 2852 * We're not recovering a delegation, so ask for no delegation. 2853 * Otherwise the recovery thread could deadlock with an outstanding 2854 * delegation return. 2855 */ 2856 opendata->o_arg.open_flags = O_DIRECT; 2857 ret = nfs4_open_recover(opendata, state); 2858 if (ret == -ESTALE) 2859 d_drop(ctx->dentry); 2860 nfs4_opendata_put(opendata); 2861 return ret; 2862 } 2863 2864 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2865 { 2866 struct nfs_server *server = NFS_SERVER(state->inode); 2867 struct nfs4_exception exception = { }; 2868 int err; 2869 2870 do { 2871 err = _nfs4_open_expired(ctx, state); 2872 trace_nfs4_open_expired(ctx, 0, err); 2873 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2874 continue; 2875 switch (err) { 2876 default: 2877 goto out; 2878 case -NFS4ERR_GRACE: 2879 case -NFS4ERR_DELAY: 2880 nfs4_handle_exception(server, err, &exception); 2881 err = 0; 2882 } 2883 } while (exception.retry); 2884 out: 2885 return err; 2886 } 2887 2888 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2889 { 2890 struct nfs_open_context *ctx; 2891 int ret; 2892 2893 ctx = nfs4_state_find_open_context(state); 2894 if (IS_ERR(ctx)) 2895 return -EAGAIN; 2896 ret = nfs4_do_open_expired(ctx, state); 2897 put_nfs_open_context(ctx); 2898 return ret; 2899 } 2900 2901 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state, 2902 const nfs4_stateid *stateid) 2903 { 2904 nfs_remove_bad_delegation(state->inode, stateid); 2905 nfs_state_clear_delegation(state); 2906 } 2907 2908 static void nfs40_clear_delegation_stateid(struct nfs4_state *state) 2909 { 2910 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL) 2911 nfs_finish_clear_delegation_stateid(state, NULL); 2912 } 2913 2914 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2915 { 2916 /* NFSv4.0 doesn't allow for delegation recovery on open expire */ 2917 nfs40_clear_delegation_stateid(state); 2918 nfs_state_clear_open_state_flags(state); 2919 return nfs4_open_expired(sp, state); 2920 } 2921 2922 static int nfs40_test_and_free_expired_stateid(struct nfs_server *server, 2923 nfs4_stateid *stateid, const struct cred *cred) 2924 { 2925 return -NFS4ERR_BAD_STATEID; 2926 } 2927 2928 #if defined(CONFIG_NFS_V4_1) 2929 static int nfs41_test_and_free_expired_stateid(struct nfs_server *server, 2930 nfs4_stateid *stateid, const struct cred *cred) 2931 { 2932 int status; 2933 2934 switch (stateid->type) { 2935 default: 2936 break; 2937 case NFS4_INVALID_STATEID_TYPE: 2938 case NFS4_SPECIAL_STATEID_TYPE: 2939 case NFS4_FREED_STATEID_TYPE: 2940 return -NFS4ERR_BAD_STATEID; 2941 case NFS4_REVOKED_STATEID_TYPE: 2942 goto out_free; 2943 } 2944 2945 status = nfs41_test_stateid(server, stateid, cred); 2946 switch (status) { 2947 case -NFS4ERR_EXPIRED: 2948 case -NFS4ERR_ADMIN_REVOKED: 2949 case -NFS4ERR_DELEG_REVOKED: 2950 break; 2951 default: 2952 return status; 2953 } 2954 out_free: 2955 /* Ack the revoked state to the server */ 2956 nfs41_free_stateid(server, stateid, cred, true); 2957 return -NFS4ERR_EXPIRED; 2958 } 2959 2960 static int nfs41_check_delegation_stateid(struct nfs4_state *state) 2961 { 2962 struct nfs_server *server = NFS_SERVER(state->inode); 2963 nfs4_stateid stateid; 2964 struct nfs_delegation *delegation; 2965 const struct cred *cred = NULL; 2966 int status, ret = NFS_OK; 2967 2968 /* Get the delegation credential for use by test/free_stateid */ 2969 rcu_read_lock(); 2970 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2971 if (delegation == NULL) { 2972 rcu_read_unlock(); 2973 nfs_state_clear_delegation(state); 2974 return NFS_OK; 2975 } 2976 2977 spin_lock(&delegation->lock); 2978 nfs4_stateid_copy(&stateid, &delegation->stateid); 2979 2980 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, 2981 &delegation->flags)) { 2982 spin_unlock(&delegation->lock); 2983 rcu_read_unlock(); 2984 return NFS_OK; 2985 } 2986 2987 if (delegation->cred) 2988 cred = get_cred(delegation->cred); 2989 spin_unlock(&delegation->lock); 2990 rcu_read_unlock(); 2991 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred); 2992 trace_nfs4_test_delegation_stateid(state, NULL, status); 2993 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) 2994 nfs_finish_clear_delegation_stateid(state, &stateid); 2995 else 2996 ret = status; 2997 2998 put_cred(cred); 2999 return ret; 3000 } 3001 3002 static void nfs41_delegation_recover_stateid(struct nfs4_state *state) 3003 { 3004 nfs4_stateid tmp; 3005 3006 if (test_bit(NFS_DELEGATED_STATE, &state->flags) && 3007 nfs4_copy_delegation_stateid(state->inode, state->state, 3008 &tmp, NULL) && 3009 nfs4_stateid_match_other(&state->stateid, &tmp)) 3010 nfs_state_set_delegation(state, &tmp, state->state); 3011 else 3012 nfs_state_clear_delegation(state); 3013 } 3014 3015 /** 3016 * nfs41_check_expired_locks - possibly free a lock stateid 3017 * 3018 * @state: NFSv4 state for an inode 3019 * 3020 * Returns NFS_OK if recovery for this stateid is now finished. 3021 * Otherwise a negative NFS4ERR value is returned. 3022 */ 3023 static int nfs41_check_expired_locks(struct nfs4_state *state) 3024 { 3025 int status, ret = NFS_OK; 3026 struct nfs4_lock_state *lsp, *prev = NULL; 3027 struct nfs_server *server = NFS_SERVER(state->inode); 3028 3029 if (!test_bit(LK_STATE_IN_USE, &state->flags)) 3030 goto out; 3031 3032 spin_lock(&state->state_lock); 3033 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 3034 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 3035 const struct cred *cred = lsp->ls_state->owner->so_cred; 3036 3037 refcount_inc(&lsp->ls_count); 3038 spin_unlock(&state->state_lock); 3039 3040 nfs4_put_lock_state(prev); 3041 prev = lsp; 3042 3043 status = nfs41_test_and_free_expired_stateid(server, 3044 &lsp->ls_stateid, 3045 cred); 3046 trace_nfs4_test_lock_stateid(state, lsp, status); 3047 if (status == -NFS4ERR_EXPIRED || 3048 status == -NFS4ERR_BAD_STATEID) { 3049 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 3050 lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE; 3051 if (!recover_lost_locks) 3052 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 3053 } else if (status != NFS_OK) { 3054 ret = status; 3055 nfs4_put_lock_state(prev); 3056 goto out; 3057 } 3058 spin_lock(&state->state_lock); 3059 } 3060 } 3061 spin_unlock(&state->state_lock); 3062 nfs4_put_lock_state(prev); 3063 out: 3064 return ret; 3065 } 3066 3067 /** 3068 * nfs41_check_open_stateid - possibly free an open stateid 3069 * 3070 * @state: NFSv4 state for an inode 3071 * 3072 * Returns NFS_OK if recovery for this stateid is now finished. 3073 * Otherwise a negative NFS4ERR value is returned. 3074 */ 3075 static int nfs41_check_open_stateid(struct nfs4_state *state) 3076 { 3077 struct nfs_server *server = NFS_SERVER(state->inode); 3078 nfs4_stateid *stateid = &state->open_stateid; 3079 const struct cred *cred = state->owner->so_cred; 3080 int status; 3081 3082 if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) 3083 return -NFS4ERR_BAD_STATEID; 3084 status = nfs41_test_and_free_expired_stateid(server, stateid, cred); 3085 trace_nfs4_test_open_stateid(state, NULL, status); 3086 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) { 3087 nfs_state_clear_open_state_flags(state); 3088 stateid->type = NFS4_INVALID_STATEID_TYPE; 3089 return status; 3090 } 3091 if (nfs_open_stateid_recover_openmode(state)) 3092 return -NFS4ERR_OPENMODE; 3093 return NFS_OK; 3094 } 3095 3096 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 3097 { 3098 int status; 3099 3100 status = nfs41_check_delegation_stateid(state); 3101 if (status != NFS_OK) 3102 return status; 3103 nfs41_delegation_recover_stateid(state); 3104 3105 status = nfs41_check_expired_locks(state); 3106 if (status != NFS_OK) 3107 return status; 3108 status = nfs41_check_open_stateid(state); 3109 if (status != NFS_OK) 3110 status = nfs4_open_expired(sp, state); 3111 return status; 3112 } 3113 #endif 3114 3115 /* 3116 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 3117 * fields corresponding to attributes that were used to store the verifier. 3118 * Make sure we clobber those fields in the later setattr call 3119 */ 3120 static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata, 3121 struct iattr *sattr, struct nfs4_label **label) 3122 { 3123 const __u32 *bitmask = opendata->o_arg.server->exclcreat_bitmask; 3124 __u32 attrset[3]; 3125 unsigned ret; 3126 unsigned i; 3127 3128 for (i = 0; i < ARRAY_SIZE(attrset); i++) { 3129 attrset[i] = opendata->o_res.attrset[i]; 3130 if (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE4_1) 3131 attrset[i] &= ~bitmask[i]; 3132 } 3133 3134 ret = (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE) ? 3135 sattr->ia_valid : 0; 3136 3137 if ((attrset[1] & (FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET))) { 3138 if (sattr->ia_valid & ATTR_ATIME_SET) 3139 ret |= ATTR_ATIME_SET; 3140 else 3141 ret |= ATTR_ATIME; 3142 } 3143 3144 if ((attrset[1] & (FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET))) { 3145 if (sattr->ia_valid & ATTR_MTIME_SET) 3146 ret |= ATTR_MTIME_SET; 3147 else 3148 ret |= ATTR_MTIME; 3149 } 3150 3151 if (!(attrset[2] & FATTR4_WORD2_SECURITY_LABEL)) 3152 *label = NULL; 3153 return ret; 3154 } 3155 3156 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, 3157 struct nfs_open_context *ctx) 3158 { 3159 struct nfs4_state_owner *sp = opendata->owner; 3160 struct nfs_server *server = sp->so_server; 3161 struct dentry *dentry; 3162 struct nfs4_state *state; 3163 fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx); 3164 struct inode *dir = d_inode(opendata->dir); 3165 unsigned long dir_verifier; 3166 int ret; 3167 3168 dir_verifier = nfs_save_change_attribute(dir); 3169 3170 ret = _nfs4_proc_open(opendata, ctx); 3171 if (ret != 0) 3172 goto out; 3173 3174 state = _nfs4_opendata_to_nfs4_state(opendata); 3175 ret = PTR_ERR(state); 3176 if (IS_ERR(state)) 3177 goto out; 3178 ctx->state = state; 3179 if (server->caps & NFS_CAP_POSIX_LOCK) 3180 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 3181 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK) 3182 set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags); 3183 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_PRESERVE_UNLINKED) 3184 set_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(state->inode)->flags); 3185 3186 switch(opendata->o_arg.claim) { 3187 default: 3188 break; 3189 case NFS4_OPEN_CLAIM_NULL: 3190 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 3191 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 3192 if (!opendata->rpc_done) 3193 break; 3194 if (opendata->o_res.delegation.type != 0) 3195 dir_verifier = nfs_save_change_attribute(dir); 3196 } 3197 3198 dentry = opendata->dentry; 3199 nfs_set_verifier(dentry, dir_verifier); 3200 if (d_really_is_negative(dentry)) { 3201 struct dentry *alias; 3202 d_drop(dentry); 3203 alias = d_splice_alias(igrab(state->inode), dentry); 3204 /* d_splice_alias() can't fail here - it's a non-directory */ 3205 if (alias) { 3206 dput(ctx->dentry); 3207 nfs_set_verifier(alias, dir_verifier); 3208 ctx->dentry = dentry = alias; 3209 } 3210 } 3211 3212 /* Parse layoutget results before we check for access */ 3213 pnfs_parse_lgopen(state->inode, opendata->lgp, ctx); 3214 3215 ret = nfs4_opendata_access(sp->so_cred, opendata, state, acc_mode); 3216 if (ret != 0) 3217 goto out; 3218 3219 if (d_inode(dentry) == state->inode) 3220 nfs_inode_attach_open_context(ctx); 3221 3222 out: 3223 if (!opendata->cancelled) { 3224 if (opendata->lgp) { 3225 nfs4_lgopen_release(opendata->lgp); 3226 opendata->lgp = NULL; 3227 } 3228 nfs4_sequence_free_slot(&opendata->o_res.seq_res); 3229 } 3230 return ret; 3231 } 3232 3233 /* 3234 * Returns a referenced nfs4_state 3235 */ 3236 static int _nfs4_do_open(struct inode *dir, 3237 struct nfs_open_context *ctx, 3238 int flags, 3239 const struct nfs4_open_createattrs *c, 3240 int *opened) 3241 { 3242 struct nfs4_state_owner *sp; 3243 struct nfs4_state *state = NULL; 3244 struct nfs_server *server = NFS_SERVER(dir); 3245 struct nfs4_opendata *opendata; 3246 struct dentry *dentry = ctx->dentry; 3247 const struct cred *cred = ctx->cred; 3248 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold; 3249 fmode_t fmode = _nfs4_ctx_to_openmode(ctx); 3250 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL; 3251 struct iattr *sattr = c->sattr; 3252 struct nfs4_label *label = c->label; 3253 int status; 3254 3255 /* Protect against reboot recovery conflicts */ 3256 status = -ENOMEM; 3257 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 3258 if (sp == NULL) { 3259 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 3260 goto out_err; 3261 } 3262 status = nfs4_client_recover_expired_lease(server->nfs_client); 3263 if (status != 0) 3264 goto err_put_state_owner; 3265 if (d_really_is_positive(dentry)) 3266 nfs4_return_incompatible_delegation(d_inode(dentry), fmode); 3267 status = -ENOMEM; 3268 if (d_really_is_positive(dentry)) 3269 claim = NFS4_OPEN_CLAIM_FH; 3270 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, 3271 c, claim, GFP_KERNEL); 3272 if (opendata == NULL) 3273 goto err_put_state_owner; 3274 3275 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 3276 if (!opendata->f_attr.mdsthreshold) { 3277 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 3278 if (!opendata->f_attr.mdsthreshold) 3279 goto err_opendata_put; 3280 } 3281 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 3282 } 3283 if (d_really_is_positive(dentry)) 3284 opendata->state = nfs4_get_open_state(d_inode(dentry), sp); 3285 3286 status = _nfs4_open_and_get_state(opendata, ctx); 3287 if (status != 0) 3288 goto err_opendata_put; 3289 state = ctx->state; 3290 3291 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) && 3292 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) { 3293 unsigned attrs = nfs4_exclusive_attrset(opendata, sattr, &label); 3294 /* 3295 * send create attributes which was not set by open 3296 * with an extra setattr. 3297 */ 3298 if (attrs || label) { 3299 unsigned ia_old = sattr->ia_valid; 3300 3301 sattr->ia_valid = attrs; 3302 nfs_fattr_init(opendata->o_res.f_attr); 3303 status = nfs4_do_setattr(state->inode, cred, 3304 opendata->o_res.f_attr, sattr, 3305 ctx, label); 3306 if (status == 0) { 3307 nfs_setattr_update_inode(state->inode, sattr, 3308 opendata->o_res.f_attr); 3309 nfs_setsecurity(state->inode, opendata->o_res.f_attr); 3310 } 3311 sattr->ia_valid = ia_old; 3312 } 3313 } 3314 if (opened && opendata->file_created) 3315 *opened = 1; 3316 3317 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) { 3318 *ctx_th = opendata->f_attr.mdsthreshold; 3319 opendata->f_attr.mdsthreshold = NULL; 3320 } 3321 3322 nfs4_opendata_put(opendata); 3323 nfs4_put_state_owner(sp); 3324 return 0; 3325 err_opendata_put: 3326 nfs4_opendata_put(opendata); 3327 err_put_state_owner: 3328 nfs4_put_state_owner(sp); 3329 out_err: 3330 return status; 3331 } 3332 3333 3334 static struct nfs4_state *nfs4_do_open(struct inode *dir, 3335 struct nfs_open_context *ctx, 3336 int flags, 3337 struct iattr *sattr, 3338 struct nfs4_label *label, 3339 int *opened) 3340 { 3341 struct nfs_server *server = NFS_SERVER(dir); 3342 struct nfs4_exception exception = { 3343 .interruptible = true, 3344 }; 3345 struct nfs4_state *res; 3346 struct nfs4_open_createattrs c = { 3347 .label = label, 3348 .sattr = sattr, 3349 .verf = { 3350 [0] = (__u32)jiffies, 3351 [1] = (__u32)current->pid, 3352 }, 3353 }; 3354 int status; 3355 3356 do { 3357 status = _nfs4_do_open(dir, ctx, flags, &c, opened); 3358 res = ctx->state; 3359 trace_nfs4_open_file(ctx, flags, status); 3360 if (status == 0) 3361 break; 3362 /* NOTE: BAD_SEQID means the server and client disagree about the 3363 * book-keeping w.r.t. state-changing operations 3364 * (OPEN/CLOSE/LOCK/LOCKU...) 3365 * It is actually a sign of a bug on the client or on the server. 3366 * 3367 * If we receive a BAD_SEQID error in the particular case of 3368 * doing an OPEN, we assume that nfs_increment_open_seqid() will 3369 * have unhashed the old state_owner for us, and that we can 3370 * therefore safely retry using a new one. We should still warn 3371 * the user though... 3372 */ 3373 if (status == -NFS4ERR_BAD_SEQID) { 3374 pr_warn_ratelimited("NFS: v4 server %s " 3375 " returned a bad sequence-id error!\n", 3376 NFS_SERVER(dir)->nfs_client->cl_hostname); 3377 exception.retry = 1; 3378 continue; 3379 } 3380 /* 3381 * BAD_STATEID on OPEN means that the server cancelled our 3382 * state before it received the OPEN_CONFIRM. 3383 * Recover by retrying the request as per the discussion 3384 * on Page 181 of RFC3530. 3385 */ 3386 if (status == -NFS4ERR_BAD_STATEID) { 3387 exception.retry = 1; 3388 continue; 3389 } 3390 if (status == -NFS4ERR_EXPIRED) { 3391 nfs4_schedule_lease_recovery(server->nfs_client); 3392 exception.retry = 1; 3393 continue; 3394 } 3395 if (status == -EAGAIN) { 3396 /* We must have found a delegation */ 3397 exception.retry = 1; 3398 continue; 3399 } 3400 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception)) 3401 continue; 3402 res = ERR_PTR(nfs4_handle_exception(server, 3403 status, &exception)); 3404 } while (exception.retry); 3405 return res; 3406 } 3407 3408 static int _nfs4_do_setattr(struct inode *inode, 3409 struct nfs_setattrargs *arg, 3410 struct nfs_setattrres *res, 3411 const struct cred *cred, 3412 struct nfs_open_context *ctx) 3413 { 3414 struct nfs_server *server = NFS_SERVER(inode); 3415 struct rpc_message msg = { 3416 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 3417 .rpc_argp = arg, 3418 .rpc_resp = res, 3419 .rpc_cred = cred, 3420 }; 3421 const struct cred *delegation_cred = NULL; 3422 unsigned long timestamp = jiffies; 3423 bool truncate; 3424 int status; 3425 3426 nfs_fattr_init(res->fattr); 3427 3428 /* Servers should only apply open mode checks for file size changes */ 3429 truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false; 3430 if (!truncate) { 3431 nfs4_inode_make_writeable(inode); 3432 goto zero_stateid; 3433 } 3434 3435 if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) { 3436 /* Use that stateid */ 3437 } else if (ctx != NULL && ctx->state) { 3438 struct nfs_lock_context *l_ctx; 3439 if (!nfs4_valid_open_stateid(ctx->state)) 3440 return -EBADF; 3441 l_ctx = nfs_get_lock_context(ctx); 3442 if (IS_ERR(l_ctx)) 3443 return PTR_ERR(l_ctx); 3444 status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx, 3445 &arg->stateid, &delegation_cred); 3446 nfs_put_lock_context(l_ctx); 3447 if (status == -EIO) 3448 return -EBADF; 3449 else if (status == -EAGAIN) 3450 goto zero_stateid; 3451 } else { 3452 zero_stateid: 3453 nfs4_stateid_copy(&arg->stateid, &zero_stateid); 3454 } 3455 if (delegation_cred) 3456 msg.rpc_cred = delegation_cred; 3457 3458 status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1); 3459 3460 put_cred(delegation_cred); 3461 if (status == 0 && ctx != NULL) 3462 renew_lease(server, timestamp); 3463 trace_nfs4_setattr(inode, &arg->stateid, status); 3464 return status; 3465 } 3466 3467 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred, 3468 struct nfs_fattr *fattr, struct iattr *sattr, 3469 struct nfs_open_context *ctx, struct nfs4_label *ilabel) 3470 { 3471 struct nfs_server *server = NFS_SERVER(inode); 3472 __u32 bitmask[NFS4_BITMASK_SZ]; 3473 struct nfs4_state *state = ctx ? ctx->state : NULL; 3474 struct nfs_setattrargs arg = { 3475 .fh = NFS_FH(inode), 3476 .iap = sattr, 3477 .server = server, 3478 .bitmask = bitmask, 3479 .label = ilabel, 3480 }; 3481 struct nfs_setattrres res = { 3482 .fattr = fattr, 3483 .server = server, 3484 }; 3485 struct nfs4_exception exception = { 3486 .state = state, 3487 .inode = inode, 3488 .stateid = &arg.stateid, 3489 }; 3490 unsigned long adjust_flags = NFS_INO_INVALID_CHANGE | 3491 NFS_INO_INVALID_CTIME; 3492 int err; 3493 3494 if (sattr->ia_valid & (ATTR_MODE | ATTR_KILL_SUID | ATTR_KILL_SGID)) 3495 adjust_flags |= NFS_INO_INVALID_MODE; 3496 if (sattr->ia_valid & (ATTR_UID | ATTR_GID)) 3497 adjust_flags |= NFS_INO_INVALID_OTHER; 3498 if (sattr->ia_valid & ATTR_ATIME) 3499 adjust_flags |= NFS_INO_INVALID_ATIME; 3500 if (sattr->ia_valid & ATTR_MTIME) 3501 adjust_flags |= NFS_INO_INVALID_MTIME; 3502 3503 do { 3504 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), 3505 inode, adjust_flags); 3506 3507 err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx); 3508 switch (err) { 3509 case -NFS4ERR_OPENMODE: 3510 if (!(sattr->ia_valid & ATTR_SIZE)) { 3511 pr_warn_once("NFSv4: server %s is incorrectly " 3512 "applying open mode checks to " 3513 "a SETATTR that is not " 3514 "changing file size.\n", 3515 server->nfs_client->cl_hostname); 3516 } 3517 if (state && !(state->state & FMODE_WRITE)) { 3518 err = -EBADF; 3519 if (sattr->ia_valid & ATTR_OPEN) 3520 err = -EACCES; 3521 goto out; 3522 } 3523 } 3524 err = nfs4_handle_exception(server, err, &exception); 3525 } while (exception.retry); 3526 out: 3527 return err; 3528 } 3529 3530 static bool 3531 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task) 3532 { 3533 if (inode == NULL || !nfs_have_layout(inode)) 3534 return false; 3535 3536 return pnfs_wait_on_layoutreturn(inode, task); 3537 } 3538 3539 /* 3540 * Update the seqid of an open stateid 3541 */ 3542 static void nfs4_sync_open_stateid(nfs4_stateid *dst, 3543 struct nfs4_state *state) 3544 { 3545 __be32 seqid_open; 3546 u32 dst_seqid; 3547 int seq; 3548 3549 for (;;) { 3550 if (!nfs4_valid_open_stateid(state)) 3551 break; 3552 seq = read_seqbegin(&state->seqlock); 3553 if (!nfs4_state_match_open_stateid_other(state, dst)) { 3554 nfs4_stateid_copy(dst, &state->open_stateid); 3555 if (read_seqretry(&state->seqlock, seq)) 3556 continue; 3557 break; 3558 } 3559 seqid_open = state->open_stateid.seqid; 3560 if (read_seqretry(&state->seqlock, seq)) 3561 continue; 3562 3563 dst_seqid = be32_to_cpu(dst->seqid); 3564 if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0) 3565 dst->seqid = seqid_open; 3566 break; 3567 } 3568 } 3569 3570 /* 3571 * Update the seqid of an open stateid after receiving 3572 * NFS4ERR_OLD_STATEID 3573 */ 3574 static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst, 3575 struct nfs4_state *state) 3576 { 3577 __be32 seqid_open; 3578 u32 dst_seqid; 3579 bool ret; 3580 int seq, status = -EAGAIN; 3581 DEFINE_WAIT(wait); 3582 3583 for (;;) { 3584 ret = false; 3585 if (!nfs4_valid_open_stateid(state)) 3586 break; 3587 seq = read_seqbegin(&state->seqlock); 3588 if (!nfs4_state_match_open_stateid_other(state, dst)) { 3589 if (read_seqretry(&state->seqlock, seq)) 3590 continue; 3591 break; 3592 } 3593 3594 write_seqlock(&state->seqlock); 3595 seqid_open = state->open_stateid.seqid; 3596 3597 dst_seqid = be32_to_cpu(dst->seqid); 3598 3599 /* Did another OPEN bump the state's seqid? try again: */ 3600 if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) { 3601 dst->seqid = seqid_open; 3602 write_sequnlock(&state->seqlock); 3603 ret = true; 3604 break; 3605 } 3606 3607 /* server says we're behind but we haven't seen the update yet */ 3608 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); 3609 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); 3610 write_sequnlock(&state->seqlock); 3611 trace_nfs4_close_stateid_update_wait(state->inode, dst, 0); 3612 3613 if (fatal_signal_pending(current) || nfs_current_task_exiting()) 3614 status = -EINTR; 3615 else 3616 if (schedule_timeout(5*HZ) != 0) 3617 status = 0; 3618 3619 finish_wait(&state->waitq, &wait); 3620 3621 if (!status) 3622 continue; 3623 if (status == -EINTR) 3624 break; 3625 3626 /* we slept the whole 5 seconds, we must have lost a seqid */ 3627 dst->seqid = cpu_to_be32(dst_seqid + 1); 3628 ret = true; 3629 break; 3630 } 3631 3632 return ret; 3633 } 3634 3635 struct nfs4_closedata { 3636 struct inode *inode; 3637 struct nfs4_state *state; 3638 struct nfs_closeargs arg; 3639 struct nfs_closeres res; 3640 struct { 3641 struct nfs4_layoutreturn_args arg; 3642 struct nfs4_layoutreturn_res res; 3643 struct nfs4_xdr_opaque_data ld_private; 3644 u32 roc_barrier; 3645 bool roc; 3646 } lr; 3647 struct nfs_fattr fattr; 3648 unsigned long timestamp; 3649 unsigned short retrans; 3650 }; 3651 3652 static void nfs4_free_closedata(void *data) 3653 { 3654 struct nfs4_closedata *calldata = data; 3655 struct nfs4_state_owner *sp = calldata->state->owner; 3656 struct super_block *sb = calldata->state->inode->i_sb; 3657 3658 if (calldata->lr.roc) 3659 pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res, 3660 calldata->res.lr_ret); 3661 nfs4_put_open_state(calldata->state); 3662 nfs_free_seqid(calldata->arg.seqid); 3663 nfs4_put_state_owner(sp); 3664 nfs_sb_deactive(sb); 3665 kfree(calldata); 3666 } 3667 3668 static void nfs4_close_done(struct rpc_task *task, void *data) 3669 { 3670 struct nfs4_closedata *calldata = data; 3671 struct nfs4_state *state = calldata->state; 3672 struct nfs_server *server = NFS_SERVER(calldata->inode); 3673 nfs4_stateid *res_stateid = NULL; 3674 struct nfs4_exception exception = { 3675 .state = state, 3676 .inode = calldata->inode, 3677 .stateid = &calldata->arg.stateid, 3678 .retrans = calldata->retrans, 3679 }; 3680 3681 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 3682 return; 3683 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status); 3684 3685 /* Handle Layoutreturn errors */ 3686 if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res, 3687 &calldata->res.lr_ret) == -EAGAIN) 3688 goto out_restart; 3689 3690 /* hmm. we are done with the inode, and in the process of freeing 3691 * the state_owner. we keep this around to process errors 3692 */ 3693 switch (task->tk_status) { 3694 case 0: 3695 res_stateid = &calldata->res.stateid; 3696 renew_lease(server, calldata->timestamp); 3697 break; 3698 case -NFS4ERR_ACCESS: 3699 if (calldata->arg.bitmask != NULL) { 3700 calldata->arg.bitmask = NULL; 3701 calldata->res.fattr = NULL; 3702 goto out_restart; 3703 3704 } 3705 break; 3706 case -NFS4ERR_OLD_STATEID: 3707 /* Did we race with OPEN? */ 3708 if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid, 3709 state)) 3710 goto out_restart; 3711 goto out_release; 3712 case -NFS4ERR_ADMIN_REVOKED: 3713 case -NFS4ERR_STALE_STATEID: 3714 case -NFS4ERR_EXPIRED: 3715 nfs4_free_revoked_stateid(server, 3716 &calldata->arg.stateid, 3717 task->tk_msg.rpc_cred); 3718 fallthrough; 3719 case -NFS4ERR_BAD_STATEID: 3720 if (calldata->arg.fmode == 0) 3721 break; 3722 fallthrough; 3723 default: 3724 task->tk_status = nfs4_async_handle_exception(task, 3725 server, task->tk_status, &exception); 3726 calldata->retrans = exception.retrans; 3727 if (exception.retry) 3728 goto out_restart; 3729 } 3730 nfs_clear_open_stateid(state, &calldata->arg.stateid, 3731 res_stateid, calldata->arg.fmode); 3732 out_release: 3733 task->tk_status = 0; 3734 nfs_release_seqid(calldata->arg.seqid); 3735 nfs_refresh_inode(calldata->inode, &calldata->fattr); 3736 dprintk("%s: ret = %d\n", __func__, task->tk_status); 3737 return; 3738 out_restart: 3739 task->tk_status = 0; 3740 rpc_restart_call_prepare(task); 3741 goto out_release; 3742 } 3743 3744 static void nfs4_close_prepare(struct rpc_task *task, void *data) 3745 { 3746 struct nfs4_closedata *calldata = data; 3747 struct nfs4_state *state = calldata->state; 3748 struct inode *inode = calldata->inode; 3749 struct nfs_server *server = NFS_SERVER(inode); 3750 struct pnfs_layout_hdr *lo; 3751 bool is_rdonly, is_wronly, is_rdwr; 3752 int call_close = 0; 3753 3754 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 3755 goto out_wait; 3756 3757 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 3758 spin_lock(&state->owner->so_lock); 3759 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); 3760 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); 3761 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); 3762 /* Calculate the change in open mode */ 3763 calldata->arg.fmode = 0; 3764 if (state->n_rdwr == 0) { 3765 if (state->n_rdonly == 0) 3766 call_close |= is_rdonly; 3767 else if (is_rdonly) 3768 calldata->arg.fmode |= FMODE_READ; 3769 if (state->n_wronly == 0) 3770 call_close |= is_wronly; 3771 else if (is_wronly) 3772 calldata->arg.fmode |= FMODE_WRITE; 3773 if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE)) 3774 call_close |= is_rdwr; 3775 } else if (is_rdwr) 3776 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; 3777 3778 nfs4_sync_open_stateid(&calldata->arg.stateid, state); 3779 if (!nfs4_valid_open_stateid(state)) 3780 call_close = 0; 3781 spin_unlock(&state->owner->so_lock); 3782 3783 if (!call_close) { 3784 /* Note: exit _without_ calling nfs4_close_done */ 3785 goto out_no_action; 3786 } 3787 3788 if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) { 3789 nfs_release_seqid(calldata->arg.seqid); 3790 goto out_wait; 3791 } 3792 3793 lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL; 3794 if (lo && !pnfs_layout_is_valid(lo)) { 3795 calldata->arg.lr_args = NULL; 3796 calldata->res.lr_res = NULL; 3797 } 3798 3799 if (calldata->arg.fmode == 0) 3800 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 3801 3802 if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) { 3803 /* Close-to-open cache consistency revalidation */ 3804 if (!nfs4_have_delegation(inode, FMODE_READ, 0)) { 3805 nfs4_bitmask_set(calldata->arg.bitmask_store, 3806 server->cache_consistency_bitmask, 3807 inode, 0); 3808 calldata->arg.bitmask = calldata->arg.bitmask_store; 3809 } else 3810 calldata->arg.bitmask = NULL; 3811 } 3812 3813 calldata->arg.share_access = 3814 nfs4_fmode_to_share_access(calldata->arg.fmode); 3815 3816 if (calldata->res.fattr == NULL) 3817 calldata->arg.bitmask = NULL; 3818 else if (calldata->arg.bitmask == NULL) 3819 calldata->res.fattr = NULL; 3820 calldata->timestamp = jiffies; 3821 if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client, 3822 &calldata->arg.seq_args, 3823 &calldata->res.seq_res, 3824 task) != 0) 3825 nfs_release_seqid(calldata->arg.seqid); 3826 return; 3827 out_no_action: 3828 task->tk_action = NULL; 3829 out_wait: 3830 nfs4_sequence_done(task, &calldata->res.seq_res); 3831 } 3832 3833 static const struct rpc_call_ops nfs4_close_ops = { 3834 .rpc_call_prepare = nfs4_close_prepare, 3835 .rpc_call_done = nfs4_close_done, 3836 .rpc_release = nfs4_free_closedata, 3837 }; 3838 3839 /* 3840 * It is possible for data to be read/written from a mem-mapped file 3841 * after the sys_close call (which hits the vfs layer as a flush). 3842 * This means that we can't safely call nfsv4 close on a file until 3843 * the inode is cleared. This in turn means that we are not good 3844 * NFSv4 citizens - we do not indicate to the server to update the file's 3845 * share state even when we are done with one of the three share 3846 * stateid's in the inode. 3847 * 3848 * NOTE: Caller must be holding the sp->so_owner semaphore! 3849 */ 3850 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) 3851 { 3852 struct nfs_server *server = NFS_SERVER(state->inode); 3853 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 3854 struct nfs4_closedata *calldata; 3855 struct nfs4_state_owner *sp = state->owner; 3856 struct rpc_task *task; 3857 struct rpc_message msg = { 3858 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 3859 .rpc_cred = state->owner->so_cred, 3860 }; 3861 struct rpc_task_setup task_setup_data = { 3862 .rpc_client = server->client, 3863 .rpc_message = &msg, 3864 .callback_ops = &nfs4_close_ops, 3865 .workqueue = nfsiod_workqueue, 3866 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 3867 }; 3868 int status = -ENOMEM; 3869 3870 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE)) 3871 task_setup_data.flags |= RPC_TASK_MOVEABLE; 3872 3873 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP, 3874 &task_setup_data.rpc_client, &msg); 3875 3876 calldata = kzalloc(sizeof(*calldata), gfp_mask); 3877 if (calldata == NULL) 3878 goto out; 3879 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1, 0); 3880 calldata->inode = state->inode; 3881 calldata->state = state; 3882 calldata->arg.fh = NFS_FH(state->inode); 3883 if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state)) 3884 goto out_free_calldata; 3885 /* Serialization for the sequence id */ 3886 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 3887 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask); 3888 if (IS_ERR(calldata->arg.seqid)) 3889 goto out_free_calldata; 3890 nfs_fattr_init(&calldata->fattr); 3891 calldata->arg.fmode = 0; 3892 calldata->lr.arg.ld_private = &calldata->lr.ld_private; 3893 calldata->res.fattr = &calldata->fattr; 3894 calldata->res.seqid = calldata->arg.seqid; 3895 calldata->res.server = server; 3896 calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT; 3897 calldata->lr.roc = pnfs_roc(state->inode, 3898 &calldata->lr.arg, &calldata->lr.res, msg.rpc_cred); 3899 if (calldata->lr.roc) { 3900 calldata->arg.lr_args = &calldata->lr.arg; 3901 calldata->res.lr_res = &calldata->lr.res; 3902 } 3903 nfs_sb_active(calldata->inode->i_sb); 3904 3905 msg.rpc_argp = &calldata->arg; 3906 msg.rpc_resp = &calldata->res; 3907 task_setup_data.callback_data = calldata; 3908 task = rpc_run_task(&task_setup_data); 3909 if (IS_ERR(task)) 3910 return PTR_ERR(task); 3911 status = 0; 3912 if (wait) 3913 status = rpc_wait_for_completion_task(task); 3914 rpc_put_task(task); 3915 return status; 3916 out_free_calldata: 3917 kfree(calldata); 3918 out: 3919 nfs4_put_open_state(state); 3920 nfs4_put_state_owner(sp); 3921 return status; 3922 } 3923 3924 static struct inode * 3925 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, 3926 int open_flags, struct iattr *attr, int *opened) 3927 { 3928 struct nfs4_state *state; 3929 struct nfs4_label l, *label; 3930 3931 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); 3932 3933 /* Protect against concurrent sillydeletes */ 3934 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened); 3935 3936 nfs4_label_release_security(label); 3937 3938 if (IS_ERR(state)) 3939 return ERR_CAST(state); 3940 return state->inode; 3941 } 3942 3943 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 3944 { 3945 struct dentry *dentry = ctx->dentry; 3946 if (ctx->state == NULL) 3947 return; 3948 if (dentry->d_flags & DCACHE_NFSFS_RENAMED) 3949 nfs4_inode_set_return_delegation_on_close(d_inode(dentry)); 3950 if (is_sync) 3951 nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx)); 3952 else 3953 nfs4_close_state(ctx->state, _nfs4_ctx_to_openmode(ctx)); 3954 } 3955 3956 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL) 3957 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL) 3958 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_OPEN_ARGUMENTS - 1UL) 3959 3960 #define FATTR4_WORD2_NFS42_TIME_DELEG_MASK \ 3961 (FATTR4_WORD2_TIME_DELEG_MODIFY|FATTR4_WORD2_TIME_DELEG_ACCESS) 3962 static bool nfs4_server_delegtime_capable(struct nfs4_server_caps_res *res) 3963 { 3964 u32 share_access_want = res->open_caps.oa_share_access_want[0]; 3965 u32 attr_bitmask = res->attr_bitmask[2]; 3966 3967 return (share_access_want & NFS4_SHARE_WANT_DELEG_TIMESTAMPS) && 3968 ((attr_bitmask & FATTR4_WORD2_NFS42_TIME_DELEG_MASK) == 3969 FATTR4_WORD2_NFS42_TIME_DELEG_MASK); 3970 } 3971 3972 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 3973 { 3974 u32 minorversion = server->nfs_client->cl_minorversion; 3975 u32 bitmask[3] = { 3976 [0] = FATTR4_WORD0_SUPPORTED_ATTRS, 3977 }; 3978 struct nfs4_server_caps_arg args = { 3979 .fhandle = fhandle, 3980 .bitmask = bitmask, 3981 }; 3982 struct nfs4_server_caps_res res = {}; 3983 struct rpc_message msg = { 3984 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 3985 .rpc_argp = &args, 3986 .rpc_resp = &res, 3987 }; 3988 int status; 3989 int i; 3990 3991 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS | 3992 FATTR4_WORD0_FH_EXPIRE_TYPE | 3993 FATTR4_WORD0_LINK_SUPPORT | 3994 FATTR4_WORD0_SYMLINK_SUPPORT | 3995 FATTR4_WORD0_ACLSUPPORT | 3996 FATTR4_WORD0_CASE_INSENSITIVE | 3997 FATTR4_WORD0_CASE_PRESERVING; 3998 if (minorversion) 3999 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT; 4000 if (minorversion > 1) 4001 bitmask[2] |= FATTR4_WORD2_OPEN_ARGUMENTS; 4002 4003 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4004 if (status == 0) { 4005 bitmask[0] = (FATTR4_WORD0_SUPPORTED_ATTRS | 4006 FATTR4_WORD0_FH_EXPIRE_TYPE | 4007 FATTR4_WORD0_LINK_SUPPORT | 4008 FATTR4_WORD0_SYMLINK_SUPPORT | 4009 FATTR4_WORD0_ACLSUPPORT | 4010 FATTR4_WORD0_CASE_INSENSITIVE | 4011 FATTR4_WORD0_CASE_PRESERVING) & 4012 res.attr_bitmask[0]; 4013 /* Sanity check the server answers */ 4014 switch (minorversion) { 4015 case 0: 4016 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK; 4017 res.attr_bitmask[2] = 0; 4018 break; 4019 case 1: 4020 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK; 4021 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT & 4022 res.attr_bitmask[2]; 4023 break; 4024 case 2: 4025 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK; 4026 bitmask[2] = (FATTR4_WORD2_SUPPATTR_EXCLCREAT | 4027 FATTR4_WORD2_OPEN_ARGUMENTS) & 4028 res.attr_bitmask[2]; 4029 } 4030 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 4031 server->caps &= 4032 ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS | 4033 NFS_CAP_SECURITY_LABEL | NFS_CAP_FS_LOCATIONS | 4034 NFS_CAP_OPEN_XOR | NFS_CAP_DELEGTIME); 4035 server->fattr_valid = NFS_ATTR_FATTR_V4; 4036 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL && 4037 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 4038 server->caps |= NFS_CAP_ACLS; 4039 if (res.has_links != 0) 4040 server->caps |= NFS_CAP_HARDLINKS; 4041 if (res.has_symlinks != 0) 4042 server->caps |= NFS_CAP_SYMLINKS; 4043 if (res.case_insensitive) 4044 server->caps |= NFS_CAP_CASE_INSENSITIVE; 4045 if (res.case_preserving) 4046 server->caps |= NFS_CAP_CASE_PRESERVING; 4047 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 4048 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL) 4049 server->caps |= NFS_CAP_SECURITY_LABEL; 4050 #endif 4051 if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS) 4052 server->caps |= NFS_CAP_FS_LOCATIONS; 4053 if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID)) 4054 server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID; 4055 if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE)) 4056 server->fattr_valid &= ~NFS_ATTR_FATTR_MODE; 4057 if (!(res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)) 4058 server->fattr_valid &= ~NFS_ATTR_FATTR_NLINK; 4059 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER)) 4060 server->fattr_valid &= ~(NFS_ATTR_FATTR_OWNER | 4061 NFS_ATTR_FATTR_OWNER_NAME); 4062 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)) 4063 server->fattr_valid &= ~(NFS_ATTR_FATTR_GROUP | 4064 NFS_ATTR_FATTR_GROUP_NAME); 4065 if (!(res.attr_bitmask[1] & FATTR4_WORD1_SPACE_USED)) 4066 server->fattr_valid &= ~NFS_ATTR_FATTR_SPACE_USED; 4067 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)) 4068 server->fattr_valid &= ~NFS_ATTR_FATTR_ATIME; 4069 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)) 4070 server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME; 4071 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)) 4072 server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME; 4073 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)) 4074 server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME; 4075 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_CREATE)) 4076 server->fattr_valid &= ~NFS_ATTR_FATTR_BTIME; 4077 memcpy(server->attr_bitmask_nl, res.attr_bitmask, 4078 sizeof(server->attr_bitmask)); 4079 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL; 4080 4081 if (res.open_caps.oa_share_access_want[0] & 4082 NFS4_SHARE_WANT_OPEN_XOR_DELEGATION) 4083 server->caps |= NFS_CAP_OPEN_XOR; 4084 if (nfs4_server_delegtime_capable(&res)) 4085 server->caps |= NFS_CAP_DELEGTIME; 4086 4087 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 4088 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 4089 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 4090 server->cache_consistency_bitmask[2] = 0; 4091 4092 /* Avoid a regression due to buggy server */ 4093 for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++) 4094 res.exclcreat_bitmask[i] &= res.attr_bitmask[i]; 4095 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask, 4096 sizeof(server->exclcreat_bitmask)); 4097 4098 server->acl_bitmask = res.acl_bitmask; 4099 server->fh_expire_type = res.fh_expire_type; 4100 } 4101 4102 return status; 4103 } 4104 4105 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 4106 { 4107 struct nfs4_exception exception = { 4108 .interruptible = true, 4109 }; 4110 int err; 4111 4112 do { 4113 err = nfs4_handle_exception(server, 4114 _nfs4_server_capabilities(server, fhandle), 4115 &exception); 4116 } while (exception.retry); 4117 return err; 4118 } 4119 4120 static void test_fs_location_for_trunking(struct nfs4_fs_location *location, 4121 struct nfs_client *clp, 4122 struct nfs_server *server) 4123 { 4124 int i; 4125 4126 for (i = 0; i < location->nservers; i++) { 4127 struct nfs4_string *srv_loc = &location->servers[i]; 4128 struct sockaddr_storage addr; 4129 size_t addrlen; 4130 struct xprt_create xprt_args = { 4131 .ident = 0, 4132 .net = clp->cl_net, 4133 }; 4134 struct nfs4_add_xprt_data xprtdata = { 4135 .clp = clp, 4136 }; 4137 struct rpc_add_xprt_test rpcdata = { 4138 .add_xprt_test = clp->cl_mvops->session_trunk, 4139 .data = &xprtdata, 4140 }; 4141 char *servername = NULL; 4142 4143 if (!srv_loc->len) 4144 continue; 4145 4146 addrlen = nfs_parse_server_name(srv_loc->data, srv_loc->len, 4147 &addr, sizeof(addr), 4148 clp->cl_net, server->port); 4149 if (!addrlen) 4150 return; 4151 xprt_args.dstaddr = (struct sockaddr *)&addr; 4152 xprt_args.addrlen = addrlen; 4153 servername = kmalloc(srv_loc->len + 1, GFP_KERNEL); 4154 if (!servername) 4155 return; 4156 memcpy(servername, srv_loc->data, srv_loc->len); 4157 servername[srv_loc->len] = '\0'; 4158 xprt_args.servername = servername; 4159 4160 xprtdata.cred = nfs4_get_clid_cred(clp); 4161 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args, 4162 rpc_clnt_setup_test_and_add_xprt, 4163 &rpcdata); 4164 if (xprtdata.cred) 4165 put_cred(xprtdata.cred); 4166 kfree(servername); 4167 } 4168 } 4169 4170 static bool _is_same_nfs4_pathname(struct nfs4_pathname *path1, 4171 struct nfs4_pathname *path2) 4172 { 4173 int i; 4174 4175 if (path1->ncomponents != path2->ncomponents) 4176 return false; 4177 for (i = 0; i < path1->ncomponents; i++) { 4178 if (path1->components[i].len != path2->components[i].len) 4179 return false; 4180 if (memcmp(path1->components[i].data, path2->components[i].data, 4181 path1->components[i].len)) 4182 return false; 4183 } 4184 return true; 4185 } 4186 4187 static int _nfs4_discover_trunking(struct nfs_server *server, 4188 struct nfs_fh *fhandle) 4189 { 4190 struct nfs4_fs_locations *locations = NULL; 4191 struct page *page; 4192 const struct cred *cred; 4193 struct nfs_client *clp = server->nfs_client; 4194 const struct nfs4_state_maintenance_ops *ops = 4195 clp->cl_mvops->state_renewal_ops; 4196 int status = -ENOMEM, i; 4197 4198 cred = ops->get_state_renewal_cred(clp); 4199 if (cred == NULL) { 4200 cred = nfs4_get_clid_cred(clp); 4201 if (cred == NULL) 4202 return -ENOKEY; 4203 } 4204 4205 page = alloc_page(GFP_KERNEL); 4206 if (!page) 4207 goto out_put_cred; 4208 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 4209 if (!locations) 4210 goto out_free; 4211 locations->fattr = nfs_alloc_fattr(); 4212 if (!locations->fattr) 4213 goto out_free_2; 4214 4215 status = nfs4_proc_get_locations(server, fhandle, locations, page, 4216 cred); 4217 if (status) 4218 goto out_free_3; 4219 4220 for (i = 0; i < locations->nlocations; i++) { 4221 if (!_is_same_nfs4_pathname(&locations->fs_path, 4222 &locations->locations[i].rootpath)) 4223 continue; 4224 test_fs_location_for_trunking(&locations->locations[i], clp, 4225 server); 4226 } 4227 out_free_3: 4228 kfree(locations->fattr); 4229 out_free_2: 4230 kfree(locations); 4231 out_free: 4232 __free_page(page); 4233 out_put_cred: 4234 put_cred(cred); 4235 return status; 4236 } 4237 4238 static int nfs4_discover_trunking(struct nfs_server *server, 4239 struct nfs_fh *fhandle) 4240 { 4241 struct nfs4_exception exception = { 4242 .interruptible = true, 4243 }; 4244 struct nfs_client *clp = server->nfs_client; 4245 int err = 0; 4246 4247 if (!nfs4_has_session(clp)) 4248 goto out; 4249 do { 4250 err = nfs4_handle_exception(server, 4251 _nfs4_discover_trunking(server, fhandle), 4252 &exception); 4253 } while (exception.retry); 4254 out: 4255 return err; 4256 } 4257 4258 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 4259 struct nfs_fattr *fattr) 4260 { 4261 u32 bitmask[3] = { 4262 [0] = FATTR4_WORD0_TYPE | FATTR4_WORD0_CHANGE | 4263 FATTR4_WORD0_SIZE | FATTR4_WORD0_FSID, 4264 }; 4265 struct nfs4_lookup_root_arg args = { 4266 .bitmask = bitmask, 4267 }; 4268 struct nfs4_lookup_res res = { 4269 .server = server, 4270 .fattr = fattr, 4271 .fh = fhandle, 4272 }; 4273 struct rpc_message msg = { 4274 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 4275 .rpc_argp = &args, 4276 .rpc_resp = &res, 4277 }; 4278 4279 nfs_fattr_init(fattr); 4280 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4281 } 4282 4283 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 4284 struct nfs_fattr *fattr) 4285 { 4286 struct nfs4_exception exception = { 4287 .interruptible = true, 4288 }; 4289 int err; 4290 do { 4291 err = _nfs4_lookup_root(server, fhandle, fattr); 4292 trace_nfs4_lookup_root(server, fhandle, fattr, err); 4293 switch (err) { 4294 case 0: 4295 case -NFS4ERR_WRONGSEC: 4296 goto out; 4297 default: 4298 err = nfs4_handle_exception(server, err, &exception); 4299 } 4300 } while (exception.retry); 4301 out: 4302 return err; 4303 } 4304 4305 static int nfs4_lookup_root_sec(struct nfs_server *server, 4306 struct nfs_fh *fhandle, struct nfs_fattr *fattr, 4307 rpc_authflavor_t flavor) 4308 { 4309 struct rpc_auth_create_args auth_args = { 4310 .pseudoflavor = flavor, 4311 }; 4312 struct rpc_auth *auth; 4313 4314 auth = rpcauth_create(&auth_args, server->client); 4315 if (IS_ERR(auth)) 4316 return -EACCES; 4317 return nfs4_lookup_root(server, fhandle, fattr); 4318 } 4319 4320 /* 4321 * Retry pseudoroot lookup with various security flavors. We do this when: 4322 * 4323 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC 4324 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation 4325 * 4326 * Returns zero on success, or a negative NFS4ERR value, or a 4327 * negative errno value. 4328 */ 4329 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 4330 struct nfs_fattr *fattr) 4331 { 4332 /* Per 3530bis 15.33.5 */ 4333 static const rpc_authflavor_t flav_array[] = { 4334 RPC_AUTH_GSS_KRB5P, 4335 RPC_AUTH_GSS_KRB5I, 4336 RPC_AUTH_GSS_KRB5, 4337 RPC_AUTH_UNIX, /* courtesy */ 4338 RPC_AUTH_NULL, 4339 }; 4340 int status = -EPERM; 4341 size_t i; 4342 4343 if (server->auth_info.flavor_len > 0) { 4344 /* try each flavor specified by user */ 4345 for (i = 0; i < server->auth_info.flavor_len; i++) { 4346 status = nfs4_lookup_root_sec( 4347 server, fhandle, fattr, 4348 server->auth_info.flavors[i]); 4349 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 4350 continue; 4351 break; 4352 } 4353 } else { 4354 /* no flavors specified by user, try default list */ 4355 for (i = 0; i < ARRAY_SIZE(flav_array); i++) { 4356 status = nfs4_lookup_root_sec(server, fhandle, fattr, 4357 flav_array[i]); 4358 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 4359 continue; 4360 break; 4361 } 4362 } 4363 4364 /* 4365 * -EACCES could mean that the user doesn't have correct permissions 4366 * to access the mount. It could also mean that we tried to mount 4367 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 4368 * existing mount programs don't handle -EACCES very well so it should 4369 * be mapped to -EPERM instead. 4370 */ 4371 if (status == -EACCES) 4372 status = -EPERM; 4373 return status; 4374 } 4375 4376 /** 4377 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot 4378 * @server: initialized nfs_server handle 4379 * @fhandle: we fill in the pseudo-fs root file handle 4380 * @fattr: we fill in a bare bones struct fattr 4381 * @auth_probe: probe the auth flavours 4382 * 4383 * Returns zero on success, or a negative errno. 4384 */ 4385 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 4386 struct nfs_fattr *fattr, bool auth_probe) 4387 { 4388 int status = 0; 4389 4390 if (!auth_probe) 4391 status = nfs4_lookup_root(server, fhandle, fattr); 4392 4393 if (auth_probe || status == NFS4ERR_WRONGSEC) 4394 status = server->nfs_client->cl_mvops->find_root_sec( 4395 server, fhandle, fattr); 4396 4397 return nfs4_map_errors(status); 4398 } 4399 4400 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 4401 struct nfs_fsinfo *info) 4402 { 4403 int error; 4404 struct nfs_fattr *fattr = info->fattr; 4405 4406 error = nfs4_server_capabilities(server, mntfh); 4407 if (error < 0) { 4408 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 4409 return error; 4410 } 4411 4412 error = nfs4_proc_getattr(server, mntfh, fattr, NULL); 4413 if (error < 0) { 4414 dprintk("nfs4_get_root: getattr error = %d\n", -error); 4415 goto out; 4416 } 4417 4418 if (fattr->valid & NFS_ATTR_FATTR_FSID && 4419 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 4420 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 4421 4422 out: 4423 return error; 4424 } 4425 4426 /* 4427 * Get locations and (maybe) other attributes of a referral. 4428 * Note that we'll actually follow the referral later when 4429 * we detect fsid mismatch in inode revalidation 4430 */ 4431 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 4432 const struct qstr *name, struct nfs_fattr *fattr, 4433 struct nfs_fh *fhandle) 4434 { 4435 int status = -ENOMEM; 4436 struct page *page = NULL; 4437 struct nfs4_fs_locations *locations = NULL; 4438 4439 page = alloc_page(GFP_KERNEL); 4440 if (page == NULL) 4441 goto out; 4442 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 4443 if (locations == NULL) 4444 goto out; 4445 4446 locations->fattr = fattr; 4447 4448 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 4449 if (status != 0) 4450 goto out; 4451 4452 /* 4453 * If the fsid didn't change, this is a migration event, not a 4454 * referral. Cause us to drop into the exception handler, which 4455 * will kick off migration recovery. 4456 */ 4457 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &fattr->fsid)) { 4458 dprintk("%s: server did not return a different fsid for" 4459 " a referral at %s\n", __func__, name->name); 4460 status = -NFS4ERR_MOVED; 4461 goto out; 4462 } 4463 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 4464 nfs_fixup_referral_attributes(fattr); 4465 memset(fhandle, 0, sizeof(struct nfs_fh)); 4466 out: 4467 if (page) 4468 __free_page(page); 4469 kfree(locations); 4470 return status; 4471 } 4472 4473 #if IS_ENABLED(CONFIG_NFS_V4_1) 4474 static bool should_request_dir_deleg(struct inode *inode) 4475 { 4476 if (!directory_delegations) 4477 return false; 4478 if (!inode) 4479 return false; 4480 if (!S_ISDIR(inode->i_mode)) 4481 return false; 4482 if (!nfs_server_capable(inode, NFS_CAP_DIR_DELEG)) 4483 return false; 4484 if (!test_and_clear_bit(NFS_INO_REQ_DIR_DELEG, &(NFS_I(inode)->flags))) 4485 return false; 4486 if (nfs4_have_delegation(inode, FMODE_READ, 0)) 4487 return false; 4488 return true; 4489 } 4490 #else 4491 static bool should_request_dir_deleg(struct inode *inode) 4492 { 4493 return false; 4494 } 4495 #endif /* CONFIG_NFS_V4_1 */ 4496 4497 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 4498 struct nfs_fattr *fattr, struct inode *inode) 4499 { 4500 __u32 bitmask[NFS4_BITMASK_SZ]; 4501 struct nfs4_getattr_arg args = { 4502 .fh = fhandle, 4503 .bitmask = bitmask, 4504 }; 4505 struct nfs4_getattr_res res = { 4506 .fattr = fattr, 4507 .server = server, 4508 }; 4509 struct rpc_message msg = { 4510 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 4511 .rpc_argp = &args, 4512 .rpc_resp = &res, 4513 }; 4514 struct nfs4_gdd_res gdd_res; 4515 unsigned short task_flags = 0; 4516 int status; 4517 4518 if (nfs4_has_session(server->nfs_client)) 4519 task_flags = RPC_TASK_MOVEABLE; 4520 4521 /* Is this is an attribute revalidation, subject to softreval? */ 4522 if (inode && (server->flags & NFS_MOUNT_SOFTREVAL)) 4523 task_flags |= RPC_TASK_TIMEOUT; 4524 4525 args.get_dir_deleg = should_request_dir_deleg(inode); 4526 if (args.get_dir_deleg) 4527 res.gdd_res = &gdd_res; 4528 4529 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), inode, 0); 4530 nfs_fattr_init(fattr); 4531 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 4532 4533 status = nfs4_do_call_sync(server->client, server, &msg, 4534 &args.seq_args, &res.seq_res, task_flags); 4535 if (args.get_dir_deleg) { 4536 switch (status) { 4537 case 0: 4538 if (gdd_res.status != GDD4_OK) 4539 break; 4540 status = nfs_inode_set_delegation( 4541 inode, current_cred(), FMODE_READ, 4542 &gdd_res.deleg, 0, NFS4_OPEN_DELEGATE_READ); 4543 break; 4544 case -ENOTSUPP: 4545 case -EOPNOTSUPP: 4546 server->caps &= ~NFS_CAP_DIR_DELEG; 4547 } 4548 } 4549 return status; 4550 } 4551 4552 int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 4553 struct nfs_fattr *fattr, struct inode *inode) 4554 { 4555 struct nfs4_exception exception = { 4556 .interruptible = true, 4557 }; 4558 int err; 4559 do { 4560 err = _nfs4_proc_getattr(server, fhandle, fattr, inode); 4561 trace_nfs4_getattr(server, fhandle, fattr, err); 4562 switch (err) { 4563 default: 4564 err = nfs4_handle_exception(server, err, &exception); 4565 break; 4566 case -ENOTSUPP: 4567 case -EOPNOTSUPP: 4568 exception.retry = true; 4569 } 4570 } while (exception.retry); 4571 return err; 4572 } 4573 4574 /* 4575 * The file is not closed if it is opened due to the a request to change 4576 * the size of the file. The open call will not be needed once the 4577 * VFS layer lookup-intents are implemented. 4578 * 4579 * Close is called when the inode is destroyed. 4580 * If we haven't opened the file for O_WRONLY, we 4581 * need to in the size_change case to obtain a stateid. 4582 * 4583 * Got race? 4584 * Because OPEN is always done by name in nfsv4, it is 4585 * possible that we opened a different file by the same 4586 * name. We can recognize this race condition, but we 4587 * can't do anything about it besides returning an error. 4588 * 4589 * This will be fixed with VFS changes (lookup-intent). 4590 */ 4591 static int 4592 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 4593 struct iattr *sattr) 4594 { 4595 struct inode *inode = d_inode(dentry); 4596 const struct cred *cred = NULL; 4597 struct nfs_open_context *ctx = NULL; 4598 int status; 4599 4600 if (pnfs_ld_layoutret_on_setattr(inode) && 4601 sattr->ia_valid & ATTR_SIZE && 4602 sattr->ia_size < i_size_read(inode)) 4603 pnfs_commit_and_return_layout(inode); 4604 4605 nfs_fattr_init(fattr); 4606 4607 /* Deal with open(O_TRUNC) */ 4608 if (sattr->ia_valid & ATTR_OPEN) 4609 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME); 4610 4611 /* Optimization: if the end result is no change, don't RPC */ 4612 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0) 4613 return 0; 4614 4615 /* Search for an existing open(O_WRITE) file */ 4616 if (sattr->ia_valid & ATTR_FILE) { 4617 4618 ctx = nfs_file_open_context(sattr->ia_file); 4619 if (ctx) 4620 cred = ctx->cred; 4621 } 4622 4623 /* Return any delegations if we're going to change ACLs */ 4624 if ((sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) 4625 nfs4_inode_make_writeable(inode); 4626 4627 status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL); 4628 if (status == 0) { 4629 nfs_setattr_update_inode(inode, sattr, fattr); 4630 nfs_setsecurity(inode, fattr); 4631 } 4632 return status; 4633 } 4634 4635 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 4636 struct dentry *dentry, const struct qstr *name, 4637 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4638 { 4639 struct nfs_server *server = NFS_SERVER(dir); 4640 int status; 4641 struct nfs4_lookup_arg args = { 4642 .bitmask = server->attr_bitmask, 4643 .dir_fh = NFS_FH(dir), 4644 .name = name, 4645 }; 4646 struct nfs4_lookup_res res = { 4647 .server = server, 4648 .fattr = fattr, 4649 .fh = fhandle, 4650 }; 4651 struct rpc_message msg = { 4652 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 4653 .rpc_argp = &args, 4654 .rpc_resp = &res, 4655 }; 4656 unsigned short task_flags = 0; 4657 4658 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE)) 4659 task_flags = RPC_TASK_MOVEABLE; 4660 4661 /* Is this is an attribute revalidation, subject to softreval? */ 4662 if (nfs_lookup_is_soft_revalidate(dentry)) 4663 task_flags |= RPC_TASK_TIMEOUT; 4664 4665 args.bitmask = nfs4_bitmask(server, fattr->label); 4666 4667 nfs_fattr_init(fattr); 4668 4669 dprintk("NFS call lookup %pd2\n", dentry); 4670 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 4671 status = nfs4_do_call_sync(clnt, server, &msg, 4672 &args.seq_args, &res.seq_res, task_flags); 4673 dprintk("NFS reply lookup: %d\n", status); 4674 return status; 4675 } 4676 4677 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 4678 { 4679 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 4680 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 4681 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 4682 fattr->nlink = 2; 4683 } 4684 4685 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 4686 struct dentry *dentry, const struct qstr *name, 4687 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4688 { 4689 struct nfs4_exception exception = { 4690 .interruptible = true, 4691 }; 4692 struct rpc_clnt *client = *clnt; 4693 int err; 4694 do { 4695 err = _nfs4_proc_lookup(client, dir, dentry, name, fhandle, fattr); 4696 trace_nfs4_lookup(dir, name, err); 4697 switch (err) { 4698 case -NFS4ERR_BADNAME: 4699 err = -ENOENT; 4700 goto out; 4701 case -NFS4ERR_MOVED: 4702 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 4703 if (err == -NFS4ERR_MOVED) 4704 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 4705 goto out; 4706 case -NFS4ERR_WRONGSEC: 4707 err = -EPERM; 4708 if (client != *clnt) 4709 goto out; 4710 client = nfs4_negotiate_security(client, dir, name); 4711 if (IS_ERR(client)) 4712 return PTR_ERR(client); 4713 4714 exception.retry = 1; 4715 break; 4716 default: 4717 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 4718 } 4719 } while (exception.retry); 4720 4721 out: 4722 if (err == 0) 4723 *clnt = client; 4724 else if (client != *clnt) 4725 rpc_shutdown_client(client); 4726 4727 return err; 4728 } 4729 4730 static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry, const struct qstr *name, 4731 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4732 { 4733 int status; 4734 struct rpc_clnt *client = NFS_CLIENT(dir); 4735 4736 status = nfs4_proc_lookup_common(&client, dir, dentry, name, fhandle, fattr); 4737 if (client != NFS_CLIENT(dir)) { 4738 rpc_shutdown_client(client); 4739 nfs_fixup_secinfo_attributes(fattr); 4740 } 4741 return status; 4742 } 4743 4744 struct rpc_clnt * 4745 nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry, 4746 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4747 { 4748 struct rpc_clnt *client = NFS_CLIENT(dir); 4749 int status; 4750 4751 status = nfs4_proc_lookup_common(&client, dir, dentry, &dentry->d_name, 4752 fhandle, fattr); 4753 if (status < 0) 4754 return ERR_PTR(status); 4755 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client; 4756 } 4757 4758 static int _nfs4_proc_lookupp(struct inode *inode, 4759 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4760 { 4761 struct rpc_clnt *clnt = NFS_CLIENT(inode); 4762 struct nfs_server *server = NFS_SERVER(inode); 4763 int status; 4764 struct nfs4_lookupp_arg args = { 4765 .bitmask = server->attr_bitmask, 4766 .fh = NFS_FH(inode), 4767 }; 4768 struct nfs4_lookupp_res res = { 4769 .server = server, 4770 .fattr = fattr, 4771 .fh = fhandle, 4772 }; 4773 struct rpc_message msg = { 4774 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP], 4775 .rpc_argp = &args, 4776 .rpc_resp = &res, 4777 }; 4778 unsigned short task_flags = 0; 4779 4780 if (server->flags & NFS_MOUNT_SOFTREVAL) 4781 task_flags |= RPC_TASK_TIMEOUT; 4782 if (server->caps & NFS_CAP_MOVEABLE) 4783 task_flags |= RPC_TASK_MOVEABLE; 4784 4785 args.bitmask = nfs4_bitmask(server, fattr->label); 4786 4787 nfs_fattr_init(fattr); 4788 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 4789 4790 dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino); 4791 status = nfs4_do_call_sync(clnt, server, &msg, &args.seq_args, 4792 &res.seq_res, task_flags); 4793 dprintk("NFS reply lookupp: %d\n", status); 4794 return status; 4795 } 4796 4797 static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle, 4798 struct nfs_fattr *fattr) 4799 { 4800 struct nfs4_exception exception = { 4801 .interruptible = true, 4802 }; 4803 int err; 4804 do { 4805 err = _nfs4_proc_lookupp(inode, fhandle, fattr); 4806 trace_nfs4_lookupp(inode, err); 4807 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4808 &exception); 4809 } while (exception.retry); 4810 return err; 4811 } 4812 4813 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry, 4814 const struct cred *cred) 4815 { 4816 struct nfs_server *server = NFS_SERVER(inode); 4817 struct nfs4_accessargs args = { 4818 .fh = NFS_FH(inode), 4819 .access = entry->mask, 4820 }; 4821 struct nfs4_accessres res = { 4822 .server = server, 4823 }; 4824 struct rpc_message msg = { 4825 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 4826 .rpc_argp = &args, 4827 .rpc_resp = &res, 4828 .rpc_cred = cred, 4829 }; 4830 int status = 0; 4831 4832 if (!nfs4_have_delegation(inode, FMODE_READ, 0)) { 4833 nfs_request_directory_delegation(inode); 4834 res.fattr = nfs_alloc_fattr(); 4835 if (res.fattr == NULL) 4836 return -ENOMEM; 4837 args.bitmask = server->cache_consistency_bitmask; 4838 } 4839 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4840 if (!status) { 4841 nfs_access_set_mask(entry, res.access); 4842 if (res.fattr) 4843 nfs_refresh_inode(inode, res.fattr); 4844 } 4845 nfs_free_fattr(res.fattr); 4846 return status; 4847 } 4848 4849 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry, 4850 const struct cred *cred) 4851 { 4852 struct nfs4_exception exception = { 4853 .interruptible = true, 4854 }; 4855 int err; 4856 do { 4857 err = _nfs4_proc_access(inode, entry, cred); 4858 trace_nfs4_access(inode, err); 4859 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4860 &exception); 4861 } while (exception.retry); 4862 return err; 4863 } 4864 4865 /* 4866 * TODO: For the time being, we don't try to get any attributes 4867 * along with any of the zero-copy operations READ, READDIR, 4868 * READLINK, WRITE. 4869 * 4870 * In the case of the first three, we want to put the GETATTR 4871 * after the read-type operation -- this is because it is hard 4872 * to predict the length of a GETATTR response in v4, and thus 4873 * align the READ data correctly. This means that the GETATTR 4874 * may end up partially falling into the page cache, and we should 4875 * shift it into the 'tail' of the xdr_buf before processing. 4876 * To do this efficiently, we need to know the total length 4877 * of data received, which doesn't seem to be available outside 4878 * of the RPC layer. 4879 * 4880 * In the case of WRITE, we also want to put the GETATTR after 4881 * the operation -- in this case because we want to make sure 4882 * we get the post-operation mtime and size. 4883 * 4884 * Both of these changes to the XDR layer would in fact be quite 4885 * minor, but I decided to leave them for a subsequent patch. 4886 */ 4887 static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 4888 unsigned int pgbase, unsigned int pglen) 4889 { 4890 struct nfs4_readlink args = { 4891 .fh = NFS_FH(inode), 4892 .pgbase = pgbase, 4893 .pglen = pglen, 4894 .pages = &page, 4895 }; 4896 struct nfs4_readlink_res res; 4897 struct rpc_message msg = { 4898 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 4899 .rpc_argp = &args, 4900 .rpc_resp = &res, 4901 }; 4902 4903 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 4904 } 4905 4906 static int nfs4_proc_readlink(struct inode *inode, struct page *page, 4907 unsigned int pgbase, unsigned int pglen) 4908 { 4909 struct nfs4_exception exception = { 4910 .interruptible = true, 4911 }; 4912 int err; 4913 do { 4914 err = _nfs4_proc_readlink(inode, page, pgbase, pglen); 4915 trace_nfs4_readlink(inode, err); 4916 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4917 &exception); 4918 } while (exception.retry); 4919 return err; 4920 } 4921 4922 /* 4923 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 4924 */ 4925 static int 4926 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 4927 int flags) 4928 { 4929 struct nfs_server *server = NFS_SERVER(dir); 4930 struct nfs4_label l, *ilabel; 4931 struct nfs_open_context *ctx; 4932 struct nfs4_state *state; 4933 int status = 0; 4934 4935 ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL); 4936 if (IS_ERR(ctx)) 4937 return PTR_ERR(ctx); 4938 4939 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); 4940 4941 nfs_request_directory_delegation(dir); 4942 4943 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 4944 sattr->ia_mode &= ~current_umask(); 4945 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL); 4946 if (IS_ERR(state)) { 4947 status = PTR_ERR(state); 4948 goto out; 4949 } 4950 out: 4951 nfs4_label_release_security(ilabel); 4952 put_nfs_open_context(ctx); 4953 return status; 4954 } 4955 4956 static int 4957 _nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype) 4958 { 4959 struct nfs_server *server = NFS_SERVER(dir); 4960 struct nfs_removeargs args = { 4961 .fh = NFS_FH(dir), 4962 .name = *name, 4963 }; 4964 struct nfs_removeres res = { 4965 .server = server, 4966 }; 4967 struct rpc_message msg = { 4968 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 4969 .rpc_argp = &args, 4970 .rpc_resp = &res, 4971 }; 4972 unsigned long timestamp = jiffies; 4973 int status; 4974 4975 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 4976 if (status == 0) { 4977 spin_lock(&dir->i_lock); 4978 /* Removing a directory decrements nlink in the parent */ 4979 if (ftype == NF4DIR && dir->i_nlink > 2) 4980 nfs4_dec_nlink_locked(dir); 4981 nfs4_update_changeattr_locked(dir, &res.cinfo, timestamp, 4982 NFS_INO_INVALID_DATA); 4983 spin_unlock(&dir->i_lock); 4984 } 4985 return status; 4986 } 4987 4988 static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry) 4989 { 4990 struct nfs4_exception exception = { 4991 .interruptible = true, 4992 }; 4993 struct inode *inode = d_inode(dentry); 4994 int err; 4995 4996 if (inode) { 4997 if (inode->i_nlink == 1) 4998 nfs4_inode_return_delegation(inode); 4999 else 5000 nfs4_inode_make_writeable(inode); 5001 } 5002 do { 5003 err = _nfs4_proc_remove(dir, &dentry->d_name, NF4REG); 5004 trace_nfs4_remove(dir, &dentry->d_name, err); 5005 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5006 &exception); 5007 } while (exception.retry); 5008 return err; 5009 } 5010 5011 static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name) 5012 { 5013 struct nfs4_exception exception = { 5014 .interruptible = true, 5015 }; 5016 int err; 5017 5018 do { 5019 err = _nfs4_proc_remove(dir, name, NF4DIR); 5020 trace_nfs4_remove(dir, name, err); 5021 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5022 &exception); 5023 } while (exception.retry); 5024 return err; 5025 } 5026 5027 static void nfs4_proc_unlink_setup(struct rpc_message *msg, 5028 struct dentry *dentry, 5029 struct inode *inode) 5030 { 5031 struct nfs_removeargs *args = msg->rpc_argp; 5032 struct nfs_removeres *res = msg->rpc_resp; 5033 5034 res->server = NFS_SB(dentry->d_sb); 5035 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 5036 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1, 0); 5037 5038 nfs_fattr_init(res->dir_attr); 5039 nfs_request_directory_delegation(d_inode(dentry->d_parent)); 5040 5041 if (inode) { 5042 nfs4_inode_return_delegation(inode); 5043 nfs_d_prune_case_insensitive_aliases(inode); 5044 } 5045 } 5046 5047 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 5048 { 5049 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client, 5050 &data->args.seq_args, 5051 &data->res.seq_res, 5052 task); 5053 } 5054 5055 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 5056 { 5057 struct nfs_unlinkdata *data = task->tk_calldata; 5058 struct nfs_removeres *res = &data->res; 5059 5060 if (!nfs4_sequence_done(task, &res->seq_res)) 5061 return 0; 5062 if (nfs4_async_handle_error(task, res->server, NULL, 5063 &data->timeout) == -EAGAIN) 5064 return 0; 5065 if (task->tk_status == 0) 5066 nfs4_update_changeattr(dir, &res->cinfo, 5067 res->dir_attr->time_start, 5068 NFS_INO_INVALID_DATA); 5069 return 1; 5070 } 5071 5072 static void nfs4_proc_rename_setup(struct rpc_message *msg, 5073 struct dentry *old_dentry, 5074 struct dentry *new_dentry, 5075 struct inode *same_parent) 5076 { 5077 struct nfs_renameargs *arg = msg->rpc_argp; 5078 struct nfs_renameres *res = msg->rpc_resp; 5079 struct inode *old_inode = d_inode(old_dentry); 5080 struct inode *new_inode = d_inode(new_dentry); 5081 5082 if (old_inode) 5083 nfs4_inode_make_writeable(old_inode); 5084 if (new_inode) 5085 nfs4_inode_return_delegation(new_inode); 5086 if (same_parent) 5087 nfs_request_directory_delegation(same_parent); 5088 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 5089 res->server = NFS_SB(old_dentry->d_sb); 5090 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1, 0); 5091 } 5092 5093 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 5094 { 5095 nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client, 5096 &data->args.seq_args, 5097 &data->res.seq_res, 5098 task); 5099 } 5100 5101 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 5102 struct inode *new_dir) 5103 { 5104 struct nfs_renamedata *data = task->tk_calldata; 5105 struct nfs_renameres *res = &data->res; 5106 5107 if (!nfs4_sequence_done(task, &res->seq_res)) 5108 return 0; 5109 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) 5110 return 0; 5111 5112 if (task->tk_status == 0) { 5113 nfs_d_prune_case_insensitive_aliases(d_inode(data->old_dentry)); 5114 if (new_dir != old_dir) { 5115 /* Note: If we moved a directory, nlink will change */ 5116 nfs4_update_changeattr(old_dir, &res->old_cinfo, 5117 res->old_fattr->time_start, 5118 NFS_INO_INVALID_NLINK | 5119 NFS_INO_INVALID_DATA); 5120 nfs4_update_changeattr(new_dir, &res->new_cinfo, 5121 res->new_fattr->time_start, 5122 NFS_INO_INVALID_NLINK | 5123 NFS_INO_INVALID_DATA); 5124 } else 5125 nfs4_update_changeattr(old_dir, &res->old_cinfo, 5126 res->old_fattr->time_start, 5127 NFS_INO_INVALID_DATA); 5128 } 5129 return 1; 5130 } 5131 5132 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name) 5133 { 5134 struct nfs_server *server = NFS_SERVER(inode); 5135 __u32 bitmask[NFS4_BITMASK_SZ]; 5136 struct nfs4_link_arg arg = { 5137 .fh = NFS_FH(inode), 5138 .dir_fh = NFS_FH(dir), 5139 .name = name, 5140 .bitmask = bitmask, 5141 }; 5142 struct nfs4_link_res res = { 5143 .server = server, 5144 }; 5145 struct rpc_message msg = { 5146 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 5147 .rpc_argp = &arg, 5148 .rpc_resp = &res, 5149 }; 5150 int status = -ENOMEM; 5151 5152 res.fattr = nfs_alloc_fattr_with_label(server); 5153 if (res.fattr == NULL) 5154 goto out; 5155 5156 nfs4_inode_make_writeable(inode); 5157 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, res.fattr->label), 5158 inode, 5159 NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME); 5160 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 5161 if (!status) { 5162 nfs4_update_changeattr(dir, &res.cinfo, res.fattr->time_start, 5163 NFS_INO_INVALID_DATA); 5164 nfs4_inc_nlink(inode); 5165 status = nfs_post_op_update_inode(inode, res.fattr); 5166 if (!status) 5167 nfs_setsecurity(inode, res.fattr); 5168 } 5169 5170 out: 5171 nfs_free_fattr(res.fattr); 5172 return status; 5173 } 5174 5175 static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name) 5176 { 5177 struct nfs4_exception exception = { 5178 .interruptible = true, 5179 }; 5180 int err; 5181 do { 5182 err = nfs4_handle_exception(NFS_SERVER(inode), 5183 _nfs4_proc_link(inode, dir, name), 5184 &exception); 5185 } while (exception.retry); 5186 return err; 5187 } 5188 5189 struct nfs4_createdata { 5190 struct rpc_message msg; 5191 struct nfs4_create_arg arg; 5192 struct nfs4_create_res res; 5193 struct nfs_fh fh; 5194 struct nfs_fattr fattr; 5195 }; 5196 5197 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 5198 const struct qstr *name, struct iattr *sattr, u32 ftype) 5199 { 5200 struct nfs4_createdata *data; 5201 5202 data = kzalloc(sizeof(*data), GFP_KERNEL); 5203 if (data != NULL) { 5204 struct nfs_server *server = NFS_SERVER(dir); 5205 5206 data->fattr.label = nfs4_label_alloc(server, GFP_KERNEL); 5207 if (IS_ERR(data->fattr.label)) 5208 goto out_free; 5209 5210 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 5211 data->msg.rpc_argp = &data->arg; 5212 data->msg.rpc_resp = &data->res; 5213 data->arg.dir_fh = NFS_FH(dir); 5214 data->arg.server = server; 5215 data->arg.name = name; 5216 data->arg.attrs = sattr; 5217 data->arg.ftype = ftype; 5218 data->arg.bitmask = nfs4_bitmask(server, data->fattr.label); 5219 data->arg.umask = current_umask(); 5220 data->res.server = server; 5221 data->res.fh = &data->fh; 5222 data->res.fattr = &data->fattr; 5223 nfs_fattr_init(data->res.fattr); 5224 } 5225 return data; 5226 out_free: 5227 kfree(data); 5228 return NULL; 5229 } 5230 5231 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 5232 { 5233 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 5234 &data->arg.seq_args, &data->res.seq_res, 1); 5235 if (status == 0) { 5236 spin_lock(&dir->i_lock); 5237 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo, 5238 data->res.fattr->time_start, 5239 NFS_INO_INVALID_DATA); 5240 spin_unlock(&dir->i_lock); 5241 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); 5242 } 5243 return status; 5244 } 5245 5246 static struct dentry *nfs4_do_mkdir(struct inode *dir, struct dentry *dentry, 5247 struct nfs4_createdata *data, int *statusp) 5248 { 5249 struct dentry *ret; 5250 5251 *statusp = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 5252 &data->arg.seq_args, &data->res.seq_res, 1); 5253 5254 if (*statusp) 5255 return NULL; 5256 5257 spin_lock(&dir->i_lock); 5258 /* Creating a directory bumps nlink in the parent */ 5259 nfs4_inc_nlink_locked(dir); 5260 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo, 5261 data->res.fattr->time_start, 5262 NFS_INO_INVALID_DATA); 5263 spin_unlock(&dir->i_lock); 5264 ret = nfs_add_or_obtain(dentry, data->res.fh, data->res.fattr); 5265 if (!IS_ERR(ret)) 5266 return ret; 5267 *statusp = PTR_ERR(ret); 5268 return NULL; 5269 } 5270 5271 static void nfs4_free_createdata(struct nfs4_createdata *data) 5272 { 5273 nfs4_label_free(data->fattr.label); 5274 kfree(data); 5275 } 5276 5277 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 5278 struct folio *folio, unsigned int len, struct iattr *sattr, 5279 struct nfs4_label *label) 5280 { 5281 struct page *page = &folio->page; 5282 struct nfs4_createdata *data; 5283 int status = -ENAMETOOLONG; 5284 5285 if (len > NFS4_MAXPATHLEN) 5286 goto out; 5287 5288 status = -ENOMEM; 5289 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 5290 if (data == NULL) 5291 goto out; 5292 5293 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 5294 data->arg.u.symlink.pages = &page; 5295 data->arg.u.symlink.len = len; 5296 data->arg.label = label; 5297 5298 status = nfs4_do_create(dir, dentry, data); 5299 5300 nfs4_free_createdata(data); 5301 out: 5302 return status; 5303 } 5304 5305 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 5306 struct folio *folio, unsigned int len, struct iattr *sattr) 5307 { 5308 struct nfs4_exception exception = { 5309 .interruptible = true, 5310 }; 5311 struct nfs4_label l, *label; 5312 int err; 5313 5314 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5315 5316 do { 5317 err = _nfs4_proc_symlink(dir, dentry, folio, len, sattr, label); 5318 trace_nfs4_symlink(dir, &dentry->d_name, err); 5319 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5320 &exception); 5321 } while (exception.retry); 5322 5323 nfs4_label_release_security(label); 5324 return err; 5325 } 5326 5327 static struct dentry *_nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 5328 struct iattr *sattr, 5329 struct nfs4_label *label, int *statusp) 5330 { 5331 struct nfs4_createdata *data; 5332 struct dentry *ret = NULL; 5333 5334 *statusp = -ENOMEM; 5335 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 5336 if (data == NULL) 5337 goto out; 5338 5339 data->arg.label = label; 5340 ret = nfs4_do_mkdir(dir, dentry, data, statusp); 5341 5342 nfs4_free_createdata(data); 5343 out: 5344 return ret; 5345 } 5346 5347 static struct dentry *nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 5348 struct iattr *sattr) 5349 { 5350 struct nfs_server *server = NFS_SERVER(dir); 5351 struct nfs4_exception exception = { 5352 .interruptible = true, 5353 }; 5354 struct nfs4_label l, *label; 5355 struct dentry *alias; 5356 int err; 5357 5358 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5359 5360 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 5361 sattr->ia_mode &= ~current_umask(); 5362 do { 5363 alias = _nfs4_proc_mkdir(dir, dentry, sattr, label, &err); 5364 trace_nfs4_mkdir(dir, &dentry->d_name, err); 5365 if (err) 5366 alias = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir), 5367 err, 5368 &exception)); 5369 } while (exception.retry); 5370 nfs4_label_release_security(label); 5371 5372 return alias; 5373 } 5374 5375 static int _nfs4_proc_readdir(struct nfs_readdir_arg *nr_arg, 5376 struct nfs_readdir_res *nr_res) 5377 { 5378 struct inode *dir = d_inode(nr_arg->dentry); 5379 struct nfs_server *server = NFS_SERVER(dir); 5380 struct nfs4_readdir_arg args = { 5381 .fh = NFS_FH(dir), 5382 .pages = nr_arg->pages, 5383 .pgbase = 0, 5384 .count = nr_arg->page_len, 5385 .plus = nr_arg->plus, 5386 }; 5387 struct nfs4_readdir_res res; 5388 struct rpc_message msg = { 5389 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 5390 .rpc_argp = &args, 5391 .rpc_resp = &res, 5392 .rpc_cred = nr_arg->cred, 5393 }; 5394 int status; 5395 5396 dprintk("%s: dentry = %pd2, cookie = %llu\n", __func__, 5397 nr_arg->dentry, (unsigned long long)nr_arg->cookie); 5398 if (!(server->caps & NFS_CAP_SECURITY_LABEL)) 5399 args.bitmask = server->attr_bitmask_nl; 5400 else 5401 args.bitmask = server->attr_bitmask; 5402 5403 nfs4_setup_readdir(nr_arg->cookie, nr_arg->verf, nr_arg->dentry, &args); 5404 res.pgbase = args.pgbase; 5405 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 5406 &res.seq_res, 0); 5407 if (status >= 0) { 5408 memcpy(nr_res->verf, res.verifier.data, NFS4_VERIFIER_SIZE); 5409 status += args.pgbase; 5410 } 5411 5412 nfs_invalidate_atime(dir); 5413 5414 dprintk("%s: returns %d\n", __func__, status); 5415 return status; 5416 } 5417 5418 static int nfs4_proc_readdir(struct nfs_readdir_arg *arg, 5419 struct nfs_readdir_res *res) 5420 { 5421 struct nfs4_exception exception = { 5422 .interruptible = true, 5423 }; 5424 int err; 5425 do { 5426 err = _nfs4_proc_readdir(arg, res); 5427 trace_nfs4_readdir(d_inode(arg->dentry), err); 5428 err = nfs4_handle_exception(NFS_SERVER(d_inode(arg->dentry)), 5429 err, &exception); 5430 } while (exception.retry); 5431 return err; 5432 } 5433 5434 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 5435 struct iattr *sattr, struct nfs4_label *label, dev_t rdev) 5436 { 5437 struct nfs4_createdata *data; 5438 int mode = sattr->ia_mode; 5439 int status = -ENOMEM; 5440 5441 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 5442 if (data == NULL) 5443 goto out; 5444 5445 if (S_ISFIFO(mode)) 5446 data->arg.ftype = NF4FIFO; 5447 else if (S_ISBLK(mode)) { 5448 data->arg.ftype = NF4BLK; 5449 data->arg.u.device.specdata1 = MAJOR(rdev); 5450 data->arg.u.device.specdata2 = MINOR(rdev); 5451 } 5452 else if (S_ISCHR(mode)) { 5453 data->arg.ftype = NF4CHR; 5454 data->arg.u.device.specdata1 = MAJOR(rdev); 5455 data->arg.u.device.specdata2 = MINOR(rdev); 5456 } else if (!S_ISSOCK(mode)) { 5457 status = -EINVAL; 5458 goto out_free; 5459 } 5460 5461 data->arg.label = label; 5462 status = nfs4_do_create(dir, dentry, data); 5463 out_free: 5464 nfs4_free_createdata(data); 5465 out: 5466 return status; 5467 } 5468 5469 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 5470 struct iattr *sattr, dev_t rdev) 5471 { 5472 struct nfs_server *server = NFS_SERVER(dir); 5473 struct nfs4_exception exception = { 5474 .interruptible = true, 5475 }; 5476 struct nfs4_label l, *label; 5477 int err; 5478 5479 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5480 5481 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 5482 sattr->ia_mode &= ~current_umask(); 5483 do { 5484 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev); 5485 trace_nfs4_mknod(dir, &dentry->d_name, err); 5486 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5487 &exception); 5488 } while (exception.retry); 5489 5490 nfs4_label_release_security(label); 5491 5492 return err; 5493 } 5494 5495 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 5496 struct nfs_fsstat *fsstat) 5497 { 5498 struct nfs4_statfs_arg args = { 5499 .fh = fhandle, 5500 .bitmask = server->attr_bitmask, 5501 }; 5502 struct nfs4_statfs_res res = { 5503 .fsstat = fsstat, 5504 }; 5505 struct rpc_message msg = { 5506 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 5507 .rpc_argp = &args, 5508 .rpc_resp = &res, 5509 }; 5510 5511 nfs_fattr_init(fsstat->fattr); 5512 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5513 } 5514 5515 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 5516 { 5517 struct nfs4_exception exception = { 5518 .interruptible = true, 5519 }; 5520 int err; 5521 do { 5522 err = nfs4_handle_exception(server, 5523 _nfs4_proc_statfs(server, fhandle, fsstat), 5524 &exception); 5525 } while (exception.retry); 5526 return err; 5527 } 5528 5529 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 5530 struct nfs_fsinfo *fsinfo) 5531 { 5532 struct nfs4_fsinfo_arg args = { 5533 .fh = fhandle, 5534 .bitmask = server->attr_bitmask, 5535 }; 5536 struct nfs4_fsinfo_res res = { 5537 .fsinfo = fsinfo, 5538 }; 5539 struct rpc_message msg = { 5540 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 5541 .rpc_argp = &args, 5542 .rpc_resp = &res, 5543 }; 5544 5545 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5546 } 5547 5548 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 5549 { 5550 struct nfs4_exception exception = { 5551 .interruptible = true, 5552 }; 5553 int err; 5554 5555 do { 5556 err = _nfs4_do_fsinfo(server, fhandle, fsinfo); 5557 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); 5558 if (err == 0) { 5559 nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ); 5560 break; 5561 } 5562 err = nfs4_handle_exception(server, err, &exception); 5563 } while (exception.retry); 5564 return err; 5565 } 5566 5567 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 5568 { 5569 int error; 5570 5571 nfs_fattr_init(fsinfo->fattr); 5572 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 5573 if (error == 0) { 5574 /* block layout checks this! */ 5575 server->pnfs_blksize = fsinfo->blksize; 5576 set_pnfs_layoutdriver(server, fhandle, fsinfo); 5577 } 5578 5579 return error; 5580 } 5581 5582 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 5583 struct nfs_pathconf *pathconf) 5584 { 5585 struct nfs4_pathconf_arg args = { 5586 .fh = fhandle, 5587 .bitmask = server->attr_bitmask, 5588 }; 5589 struct nfs4_pathconf_res res = { 5590 .pathconf = pathconf, 5591 }; 5592 struct rpc_message msg = { 5593 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 5594 .rpc_argp = &args, 5595 .rpc_resp = &res, 5596 }; 5597 5598 /* None of the pathconf attributes are mandatory to implement */ 5599 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 5600 memset(pathconf, 0, sizeof(*pathconf)); 5601 return 0; 5602 } 5603 5604 nfs_fattr_init(pathconf->fattr); 5605 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5606 } 5607 5608 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 5609 struct nfs_pathconf *pathconf) 5610 { 5611 struct nfs4_exception exception = { 5612 .interruptible = true, 5613 }; 5614 int err; 5615 5616 do { 5617 err = nfs4_handle_exception(server, 5618 _nfs4_proc_pathconf(server, fhandle, pathconf), 5619 &exception); 5620 } while (exception.retry); 5621 return err; 5622 } 5623 5624 int nfs4_set_rw_stateid(nfs4_stateid *stateid, 5625 const struct nfs_open_context *ctx, 5626 const struct nfs_lock_context *l_ctx, 5627 fmode_t fmode) 5628 { 5629 return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL); 5630 } 5631 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid); 5632 5633 static bool nfs4_stateid_is_current(nfs4_stateid *stateid, 5634 const struct nfs_open_context *ctx, 5635 const struct nfs_lock_context *l_ctx, 5636 fmode_t fmode) 5637 { 5638 nfs4_stateid _current_stateid; 5639 5640 /* If the current stateid represents a lost lock, then exit */ 5641 if (nfs4_set_rw_stateid(&_current_stateid, ctx, l_ctx, fmode) == -EIO) 5642 return true; 5643 return nfs4_stateid_match(stateid, &_current_stateid); 5644 } 5645 5646 static bool nfs4_error_stateid_expired(int err) 5647 { 5648 switch (err) { 5649 case -NFS4ERR_DELEG_REVOKED: 5650 case -NFS4ERR_ADMIN_REVOKED: 5651 case -NFS4ERR_BAD_STATEID: 5652 case -NFS4ERR_STALE_STATEID: 5653 case -NFS4ERR_OLD_STATEID: 5654 case -NFS4ERR_OPENMODE: 5655 case -NFS4ERR_EXPIRED: 5656 return true; 5657 } 5658 return false; 5659 } 5660 5661 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) 5662 { 5663 struct nfs_server *server = NFS_SERVER(hdr->inode); 5664 5665 trace_nfs4_read(hdr, task->tk_status); 5666 if (task->tk_status < 0) { 5667 struct nfs4_exception exception = { 5668 .inode = hdr->inode, 5669 .state = hdr->args.context->state, 5670 .stateid = &hdr->args.stateid, 5671 .retrans = hdr->retrans, 5672 }; 5673 task->tk_status = nfs4_async_handle_exception(task, 5674 server, task->tk_status, &exception); 5675 hdr->retrans = exception.retrans; 5676 if (exception.retry) { 5677 rpc_restart_call_prepare(task); 5678 return -EAGAIN; 5679 } 5680 } 5681 5682 if (task->tk_status > 0) 5683 renew_lease(server, hdr->timestamp); 5684 return 0; 5685 } 5686 5687 static bool nfs4_read_stateid_changed(struct rpc_task *task, 5688 struct nfs_pgio_args *args) 5689 { 5690 5691 if (!nfs4_error_stateid_expired(task->tk_status) || 5692 nfs4_stateid_is_current(&args->stateid, 5693 args->context, 5694 args->lock_context, 5695 FMODE_READ)) 5696 return false; 5697 rpc_restart_call_prepare(task); 5698 return true; 5699 } 5700 5701 static bool nfs4_read_plus_not_supported(struct rpc_task *task, 5702 struct nfs_pgio_header *hdr) 5703 { 5704 struct nfs_server *server = NFS_SERVER(hdr->inode); 5705 struct rpc_message *msg = &task->tk_msg; 5706 5707 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] && 5708 task->tk_status == -ENOTSUPP) { 5709 server->caps &= ~NFS_CAP_READ_PLUS; 5710 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 5711 rpc_restart_call_prepare(task); 5712 return true; 5713 } 5714 return false; 5715 } 5716 5717 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 5718 { 5719 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 5720 return -EAGAIN; 5721 if (nfs4_read_stateid_changed(task, &hdr->args)) 5722 return -EAGAIN; 5723 if (nfs4_read_plus_not_supported(task, hdr)) 5724 return -EAGAIN; 5725 if (task->tk_status > 0) 5726 nfs_invalidate_atime(hdr->inode); 5727 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 5728 nfs4_read_done_cb(task, hdr); 5729 } 5730 5731 #if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS 5732 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr, 5733 struct rpc_message *msg) 5734 { 5735 /* Note: We don't use READ_PLUS with pNFS yet */ 5736 if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp) { 5737 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS]; 5738 return nfs_read_alloc_scratch(hdr, READ_PLUS_SCRATCH_SIZE); 5739 } 5740 return false; 5741 } 5742 #else 5743 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr, 5744 struct rpc_message *msg) 5745 { 5746 return false; 5747 } 5748 #endif /* CONFIG_NFS_V4_2 */ 5749 5750 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr, 5751 struct rpc_message *msg) 5752 { 5753 hdr->timestamp = jiffies; 5754 if (!hdr->pgio_done_cb) 5755 hdr->pgio_done_cb = nfs4_read_done_cb; 5756 if (!nfs42_read_plus_support(hdr, msg)) 5757 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 5758 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0); 5759 } 5760 5761 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, 5762 struct nfs_pgio_header *hdr) 5763 { 5764 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client, 5765 &hdr->args.seq_args, 5766 &hdr->res.seq_res, 5767 task)) 5768 return 0; 5769 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, 5770 hdr->args.lock_context, 5771 hdr->rw_mode) == -EIO) 5772 return -EIO; 5773 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) 5774 return -EIO; 5775 return 0; 5776 } 5777 5778 static int nfs4_write_done_cb(struct rpc_task *task, 5779 struct nfs_pgio_header *hdr) 5780 { 5781 struct inode *inode = hdr->inode; 5782 5783 trace_nfs4_write(hdr, task->tk_status); 5784 if (task->tk_status < 0) { 5785 struct nfs4_exception exception = { 5786 .inode = hdr->inode, 5787 .state = hdr->args.context->state, 5788 .stateid = &hdr->args.stateid, 5789 .retrans = hdr->retrans, 5790 }; 5791 task->tk_status = nfs4_async_handle_exception(task, 5792 NFS_SERVER(inode), task->tk_status, 5793 &exception); 5794 hdr->retrans = exception.retrans; 5795 if (exception.retry) { 5796 rpc_restart_call_prepare(task); 5797 return -EAGAIN; 5798 } 5799 } 5800 if (task->tk_status >= 0) { 5801 renew_lease(NFS_SERVER(inode), hdr->timestamp); 5802 nfs_writeback_update_inode(hdr); 5803 } 5804 return 0; 5805 } 5806 5807 static bool nfs4_write_stateid_changed(struct rpc_task *task, 5808 struct nfs_pgio_args *args) 5809 { 5810 5811 if (!nfs4_error_stateid_expired(task->tk_status) || 5812 nfs4_stateid_is_current(&args->stateid, 5813 args->context, 5814 args->lock_context, 5815 FMODE_WRITE)) 5816 return false; 5817 rpc_restart_call_prepare(task); 5818 return true; 5819 } 5820 5821 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 5822 { 5823 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 5824 return -EAGAIN; 5825 if (nfs4_write_stateid_changed(task, &hdr->args)) 5826 return -EAGAIN; 5827 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 5828 nfs4_write_done_cb(task, hdr); 5829 } 5830 5831 static 5832 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr) 5833 { 5834 /* Don't request attributes for pNFS or O_DIRECT writes */ 5835 if (hdr->ds_clp != NULL || hdr->dreq != NULL) 5836 return false; 5837 /* Otherwise, request attributes if and only if we don't hold 5838 * a delegation 5839 */ 5840 return nfs4_have_delegation(hdr->inode, FMODE_READ, 0) == 0; 5841 } 5842 5843 void nfs4_bitmask_set(__u32 bitmask[], const __u32 src[], 5844 struct inode *inode, unsigned long cache_validity) 5845 { 5846 struct nfs_server *server = NFS_SERVER(inode); 5847 unsigned int i; 5848 5849 memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ); 5850 cache_validity |= READ_ONCE(NFS_I(inode)->cache_validity); 5851 5852 if (cache_validity & NFS_INO_INVALID_CHANGE) 5853 bitmask[0] |= FATTR4_WORD0_CHANGE; 5854 if (cache_validity & NFS_INO_INVALID_ATIME) 5855 bitmask[1] |= FATTR4_WORD1_TIME_ACCESS; 5856 if (cache_validity & NFS_INO_INVALID_MODE) 5857 bitmask[1] |= FATTR4_WORD1_MODE; 5858 if (cache_validity & NFS_INO_INVALID_OTHER) 5859 bitmask[1] |= FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP; 5860 if (cache_validity & NFS_INO_INVALID_NLINK) 5861 bitmask[1] |= FATTR4_WORD1_NUMLINKS; 5862 if (cache_validity & NFS_INO_INVALID_CTIME) 5863 bitmask[1] |= FATTR4_WORD1_TIME_METADATA; 5864 if (cache_validity & NFS_INO_INVALID_MTIME) 5865 bitmask[1] |= FATTR4_WORD1_TIME_MODIFY; 5866 if (cache_validity & NFS_INO_INVALID_BLOCKS) 5867 bitmask[1] |= FATTR4_WORD1_SPACE_USED; 5868 if (cache_validity & NFS_INO_INVALID_BTIME) 5869 bitmask[1] |= FATTR4_WORD1_TIME_CREATE; 5870 5871 if (cache_validity & NFS_INO_INVALID_SIZE) 5872 bitmask[0] |= FATTR4_WORD0_SIZE; 5873 5874 for (i = 0; i < NFS4_BITMASK_SZ; i++) 5875 bitmask[i] &= server->attr_bitmask[i]; 5876 } 5877 5878 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, 5879 struct rpc_message *msg, 5880 struct rpc_clnt **clnt) 5881 { 5882 struct nfs_server *server = NFS_SERVER(hdr->inode); 5883 5884 if (!nfs4_write_need_cache_consistency_data(hdr)) { 5885 hdr->args.bitmask = NULL; 5886 hdr->res.fattr = NULL; 5887 } else { 5888 nfs4_bitmask_set(hdr->args.bitmask_store, 5889 server->cache_consistency_bitmask, 5890 hdr->inode, NFS_INO_INVALID_BLOCKS); 5891 hdr->args.bitmask = hdr->args.bitmask_store; 5892 } 5893 5894 if (!hdr->pgio_done_cb) 5895 hdr->pgio_done_cb = nfs4_write_done_cb; 5896 hdr->res.server = server; 5897 hdr->timestamp = jiffies; 5898 5899 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 5900 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0); 5901 nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr); 5902 } 5903 5904 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 5905 { 5906 nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client, 5907 &data->args.seq_args, 5908 &data->res.seq_res, 5909 task); 5910 } 5911 5912 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 5913 { 5914 struct inode *inode = data->inode; 5915 5916 trace_nfs4_commit(data, task->tk_status); 5917 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 5918 NULL, NULL) == -EAGAIN) { 5919 rpc_restart_call_prepare(task); 5920 return -EAGAIN; 5921 } 5922 return 0; 5923 } 5924 5925 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 5926 { 5927 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5928 return -EAGAIN; 5929 return data->commit_done_cb(task, data); 5930 } 5931 5932 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg, 5933 struct rpc_clnt **clnt) 5934 { 5935 struct nfs_server *server = NFS_SERVER(data->inode); 5936 5937 if (data->commit_done_cb == NULL) 5938 data->commit_done_cb = nfs4_commit_done_cb; 5939 data->res.server = server; 5940 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 5941 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0); 5942 nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client, 5943 NFS_SP4_MACH_CRED_COMMIT, clnt, msg); 5944 } 5945 5946 static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args, 5947 struct nfs_commitres *res) 5948 { 5949 struct inode *dst_inode = file_inode(dst); 5950 struct nfs_server *server = NFS_SERVER(dst_inode); 5951 struct rpc_message msg = { 5952 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT], 5953 .rpc_argp = args, 5954 .rpc_resp = res, 5955 }; 5956 5957 args->fh = NFS_FH(dst_inode); 5958 return nfs4_call_sync(server->client, server, &msg, 5959 &args->seq_args, &res->seq_res, 1); 5960 } 5961 5962 int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res) 5963 { 5964 struct nfs_commitargs args = { 5965 .offset = offset, 5966 .count = count, 5967 }; 5968 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); 5969 struct nfs4_exception exception = { }; 5970 int status; 5971 5972 do { 5973 status = _nfs4_proc_commit(dst, &args, res); 5974 status = nfs4_handle_exception(dst_server, status, &exception); 5975 } while (exception.retry); 5976 5977 return status; 5978 } 5979 5980 struct nfs4_renewdata { 5981 struct nfs_client *client; 5982 unsigned long timestamp; 5983 }; 5984 5985 /* 5986 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 5987 * standalone procedure for queueing an asynchronous RENEW. 5988 */ 5989 static void nfs4_renew_release(void *calldata) 5990 { 5991 struct nfs4_renewdata *data = calldata; 5992 struct nfs_client *clp = data->client; 5993 5994 if (refcount_read(&clp->cl_count) > 1) 5995 nfs4_schedule_state_renewal(clp); 5996 nfs_put_client(clp); 5997 kfree(data); 5998 } 5999 6000 static void nfs4_renew_done(struct rpc_task *task, void *calldata) 6001 { 6002 struct nfs4_renewdata *data = calldata; 6003 struct nfs_client *clp = data->client; 6004 unsigned long timestamp = data->timestamp; 6005 6006 trace_nfs4_renew_async(clp, task->tk_status); 6007 switch (task->tk_status) { 6008 case 0: 6009 break; 6010 case -NFS4ERR_LEASE_MOVED: 6011 nfs4_schedule_lease_moved_recovery(clp); 6012 break; 6013 default: 6014 /* Unless we're shutting down, schedule state recovery! */ 6015 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 6016 return; 6017 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 6018 nfs4_schedule_lease_recovery(clp); 6019 return; 6020 } 6021 nfs4_schedule_path_down_recovery(clp); 6022 } 6023 do_renew_lease(clp, timestamp); 6024 } 6025 6026 static const struct rpc_call_ops nfs4_renew_ops = { 6027 .rpc_call_done = nfs4_renew_done, 6028 .rpc_release = nfs4_renew_release, 6029 }; 6030 6031 static int nfs4_proc_async_renew(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags) 6032 { 6033 struct rpc_message msg = { 6034 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 6035 .rpc_argp = clp, 6036 .rpc_cred = cred, 6037 }; 6038 struct nfs4_renewdata *data; 6039 6040 if (renew_flags == 0) 6041 return 0; 6042 if (!refcount_inc_not_zero(&clp->cl_count)) 6043 return -EIO; 6044 data = kmalloc(sizeof(*data), GFP_NOFS); 6045 if (data == NULL) { 6046 nfs_put_client(clp); 6047 return -ENOMEM; 6048 } 6049 data->client = clp; 6050 data->timestamp = jiffies; 6051 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT, 6052 &nfs4_renew_ops, data); 6053 } 6054 6055 static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred) 6056 { 6057 struct rpc_message msg = { 6058 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 6059 .rpc_argp = clp, 6060 .rpc_cred = cred, 6061 }; 6062 unsigned long now = jiffies; 6063 int status; 6064 6065 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 6066 if (status < 0) 6067 return status; 6068 do_renew_lease(clp, now); 6069 return 0; 6070 } 6071 6072 static bool nfs4_server_supports_acls(const struct nfs_server *server, 6073 enum nfs4_acl_type type) 6074 { 6075 switch (type) { 6076 default: 6077 return server->attr_bitmask[0] & FATTR4_WORD0_ACL; 6078 case NFS4ACL_DACL: 6079 return server->attr_bitmask[1] & FATTR4_WORD1_DACL; 6080 case NFS4ACL_SACL: 6081 return server->attr_bitmask[1] & FATTR4_WORD1_SACL; 6082 } 6083 } 6084 6085 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that 6086 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on 6087 * the stack. 6088 */ 6089 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 6090 6091 int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen, 6092 struct page **pages) 6093 { 6094 struct page *newpage, **spages; 6095 int rc = 0; 6096 size_t len; 6097 spages = pages; 6098 6099 do { 6100 len = min_t(size_t, PAGE_SIZE, buflen); 6101 newpage = alloc_page(GFP_KERNEL); 6102 6103 if (newpage == NULL) 6104 goto unwind; 6105 memcpy(page_address(newpage), buf, len); 6106 buf += len; 6107 buflen -= len; 6108 *pages++ = newpage; 6109 rc++; 6110 } while (buflen != 0); 6111 6112 return rc; 6113 6114 unwind: 6115 for(; rc > 0; rc--) 6116 __free_page(spages[rc-1]); 6117 return -ENOMEM; 6118 } 6119 6120 struct nfs4_cached_acl { 6121 enum nfs4_acl_type type; 6122 int cached; 6123 size_t len; 6124 char data[]; 6125 }; 6126 6127 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 6128 { 6129 struct nfs_inode *nfsi = NFS_I(inode); 6130 6131 spin_lock(&inode->i_lock); 6132 kfree(nfsi->nfs4_acl); 6133 nfsi->nfs4_acl = acl; 6134 spin_unlock(&inode->i_lock); 6135 } 6136 6137 static void nfs4_zap_acl_attr(struct inode *inode) 6138 { 6139 nfs4_set_cached_acl(inode, NULL); 6140 } 6141 6142 static ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, 6143 size_t buflen, enum nfs4_acl_type type) 6144 { 6145 struct nfs_inode *nfsi = NFS_I(inode); 6146 struct nfs4_cached_acl *acl; 6147 int ret = -ENOENT; 6148 6149 spin_lock(&inode->i_lock); 6150 acl = nfsi->nfs4_acl; 6151 if (acl == NULL) 6152 goto out; 6153 if (acl->type != type) 6154 goto out; 6155 if (buf == NULL) /* user is just asking for length */ 6156 goto out_len; 6157 if (acl->cached == 0) 6158 goto out; 6159 ret = -ERANGE; /* see getxattr(2) man page */ 6160 if (acl->len > buflen) 6161 goto out; 6162 memcpy(buf, acl->data, acl->len); 6163 out_len: 6164 ret = acl->len; 6165 out: 6166 spin_unlock(&inode->i_lock); 6167 return ret; 6168 } 6169 6170 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, 6171 size_t pgbase, size_t acl_len, 6172 enum nfs4_acl_type type) 6173 { 6174 struct nfs4_cached_acl *acl; 6175 size_t buflen = sizeof(*acl) + acl_len; 6176 6177 if (buflen <= PAGE_SIZE) { 6178 acl = kmalloc(buflen, GFP_KERNEL); 6179 if (acl == NULL) 6180 goto out; 6181 acl->cached = 1; 6182 _copy_from_pages(acl->data, pages, pgbase, acl_len); 6183 } else { 6184 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 6185 if (acl == NULL) 6186 goto out; 6187 acl->cached = 0; 6188 } 6189 acl->type = type; 6190 acl->len = acl_len; 6191 out: 6192 nfs4_set_cached_acl(inode, acl); 6193 } 6194 6195 /* 6196 * The getxattr API returns the required buffer length when called with a 6197 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 6198 * the required buf. On a NULL buf, we send a page of data to the server 6199 * guessing that the ACL request can be serviced by a page. If so, we cache 6200 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 6201 * the cache. If not so, we throw away the page, and cache the required 6202 * length. The next getxattr call will then produce another round trip to 6203 * the server, this time with the input buf of the required size. 6204 */ 6205 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, 6206 size_t buflen, enum nfs4_acl_type type) 6207 { 6208 struct page **pages; 6209 struct nfs_getaclargs args = { 6210 .fh = NFS_FH(inode), 6211 .acl_type = type, 6212 .acl_len = buflen, 6213 }; 6214 struct nfs_getaclres res = { 6215 .acl_type = type, 6216 .acl_len = buflen, 6217 }; 6218 struct rpc_message msg = { 6219 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 6220 .rpc_argp = &args, 6221 .rpc_resp = &res, 6222 }; 6223 unsigned int npages; 6224 int ret = -ENOMEM, i; 6225 struct nfs_server *server = NFS_SERVER(inode); 6226 6227 if (buflen == 0) 6228 buflen = server->rsize; 6229 6230 npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1; 6231 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 6232 if (!pages) 6233 return -ENOMEM; 6234 6235 args.acl_pages = pages; 6236 6237 for (i = 0; i < npages; i++) { 6238 pages[i] = alloc_page(GFP_KERNEL); 6239 if (!pages[i]) 6240 goto out_free; 6241 } 6242 6243 /* for decoding across pages */ 6244 res.acl_scratch = folio_alloc(GFP_KERNEL, 0); 6245 if (!res.acl_scratch) 6246 goto out_free; 6247 6248 args.acl_len = npages * PAGE_SIZE; 6249 6250 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 6251 __func__, buf, buflen, npages, args.acl_len); 6252 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 6253 &msg, &args.seq_args, &res.seq_res, 0); 6254 if (ret) 6255 goto out_free; 6256 6257 /* Handle the case where the passed-in buffer is too short */ 6258 if (res.acl_flags & NFS4_ACL_TRUNC) { 6259 /* Did the user only issue a request for the acl length? */ 6260 if (buf == NULL) 6261 goto out_ok; 6262 ret = -ERANGE; 6263 goto out_free; 6264 } 6265 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len, 6266 type); 6267 if (buf) { 6268 if (res.acl_len > buflen) { 6269 ret = -ERANGE; 6270 goto out_free; 6271 } 6272 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); 6273 } 6274 out_ok: 6275 ret = res.acl_len; 6276 out_free: 6277 while (--i >= 0) 6278 __free_page(pages[i]); 6279 if (res.acl_scratch) 6280 folio_put(res.acl_scratch); 6281 kfree(pages); 6282 return ret; 6283 } 6284 6285 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, 6286 size_t buflen, enum nfs4_acl_type type) 6287 { 6288 struct nfs4_exception exception = { 6289 .interruptible = true, 6290 }; 6291 ssize_t ret; 6292 do { 6293 ret = __nfs4_get_acl_uncached(inode, buf, buflen, type); 6294 trace_nfs4_get_acl(inode, ret); 6295 if (ret >= 0) 6296 break; 6297 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 6298 } while (exception.retry); 6299 return ret; 6300 } 6301 6302 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen, 6303 enum nfs4_acl_type type) 6304 { 6305 struct nfs_server *server = NFS_SERVER(inode); 6306 int ret; 6307 6308 if (unlikely(NFS_FH(inode)->size == 0)) 6309 return -ENODATA; 6310 if (!nfs4_server_supports_acls(server, type)) 6311 return -EOPNOTSUPP; 6312 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 6313 if (ret < 0) 6314 return ret; 6315 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 6316 nfs_zap_acl_cache(inode); 6317 ret = nfs4_read_cached_acl(inode, buf, buflen, type); 6318 if (ret != -ENOENT) 6319 /* -ENOENT is returned if there is no ACL or if there is an ACL 6320 * but no cached acl data, just the acl length */ 6321 return ret; 6322 return nfs4_get_acl_uncached(inode, buf, buflen, type); 6323 } 6324 6325 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, 6326 size_t buflen, enum nfs4_acl_type type) 6327 { 6328 struct nfs_server *server = NFS_SERVER(inode); 6329 struct page *pages[NFS4ACL_MAXPAGES]; 6330 struct nfs_setaclargs arg = { 6331 .fh = NFS_FH(inode), 6332 .acl_type = type, 6333 .acl_len = buflen, 6334 .acl_pages = pages, 6335 }; 6336 struct nfs_setaclres res; 6337 struct rpc_message msg = { 6338 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 6339 .rpc_argp = &arg, 6340 .rpc_resp = &res, 6341 }; 6342 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 6343 int ret, i; 6344 6345 /* You can't remove system.nfs4_acl: */ 6346 if (buflen == 0) 6347 return -EINVAL; 6348 if (!nfs4_server_supports_acls(server, type)) 6349 return -EOPNOTSUPP; 6350 if (npages > ARRAY_SIZE(pages)) 6351 return -ERANGE; 6352 i = nfs4_buf_to_pages_noslab(buf, buflen, arg.acl_pages); 6353 if (i < 0) 6354 return i; 6355 nfs4_inode_make_writeable(inode); 6356 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6357 6358 /* 6359 * Free each page after tx, so the only ref left is 6360 * held by the network stack 6361 */ 6362 for (; i > 0; i--) 6363 put_page(pages[i-1]); 6364 6365 /* 6366 * Acl update can result in inode attribute update. 6367 * so mark the attribute cache invalid. 6368 */ 6369 spin_lock(&inode->i_lock); 6370 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 6371 NFS_INO_INVALID_CTIME | 6372 NFS_INO_REVAL_FORCED); 6373 spin_unlock(&inode->i_lock); 6374 nfs_access_zap_cache(inode); 6375 nfs_zap_acl_cache(inode); 6376 return ret; 6377 } 6378 6379 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, 6380 size_t buflen, enum nfs4_acl_type type) 6381 { 6382 struct nfs4_exception exception = { }; 6383 int err; 6384 6385 if (unlikely(NFS_FH(inode)->size == 0)) 6386 return -ENODATA; 6387 do { 6388 err = __nfs4_proc_set_acl(inode, buf, buflen, type); 6389 trace_nfs4_set_acl(inode, err); 6390 if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) { 6391 /* 6392 * no need to retry since the kernel 6393 * isn't involved in encoding the ACEs. 6394 */ 6395 err = -EINVAL; 6396 break; 6397 } 6398 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6399 &exception); 6400 } while (exception.retry); 6401 return err; 6402 } 6403 6404 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 6405 static int _nfs4_get_security_label(struct inode *inode, void *buf, 6406 size_t buflen) 6407 { 6408 struct nfs_server *server = NFS_SERVER(inode); 6409 struct nfs4_label label = {0, 0, 0, buflen, buf}; 6410 6411 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 6412 struct nfs_fattr fattr = { 6413 .label = &label, 6414 }; 6415 struct nfs4_getattr_arg arg = { 6416 .fh = NFS_FH(inode), 6417 .bitmask = bitmask, 6418 }; 6419 struct nfs4_getattr_res res = { 6420 .fattr = &fattr, 6421 .server = server, 6422 }; 6423 struct rpc_message msg = { 6424 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 6425 .rpc_argp = &arg, 6426 .rpc_resp = &res, 6427 }; 6428 int ret; 6429 6430 nfs_fattr_init(&fattr); 6431 6432 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0); 6433 if (ret) 6434 return ret; 6435 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) 6436 return -ENOENT; 6437 return label.len; 6438 } 6439 6440 static int nfs4_get_security_label(struct inode *inode, void *buf, 6441 size_t buflen) 6442 { 6443 struct nfs4_exception exception = { 6444 .interruptible = true, 6445 }; 6446 int err; 6447 6448 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 6449 return -EOPNOTSUPP; 6450 6451 do { 6452 err = _nfs4_get_security_label(inode, buf, buflen); 6453 trace_nfs4_get_security_label(inode, err); 6454 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6455 &exception); 6456 } while (exception.retry); 6457 return err; 6458 } 6459 6460 static int _nfs4_do_set_security_label(struct inode *inode, 6461 struct nfs4_label *ilabel, 6462 struct nfs_fattr *fattr) 6463 { 6464 6465 struct iattr sattr = {0}; 6466 struct nfs_server *server = NFS_SERVER(inode); 6467 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 6468 struct nfs_setattrargs arg = { 6469 .fh = NFS_FH(inode), 6470 .iap = &sattr, 6471 .server = server, 6472 .bitmask = bitmask, 6473 .label = ilabel, 6474 }; 6475 struct nfs_setattrres res = { 6476 .fattr = fattr, 6477 .server = server, 6478 }; 6479 struct rpc_message msg = { 6480 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 6481 .rpc_argp = &arg, 6482 .rpc_resp = &res, 6483 }; 6484 int status; 6485 6486 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 6487 6488 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6489 if (status) 6490 dprintk("%s failed: %d\n", __func__, status); 6491 6492 return status; 6493 } 6494 6495 static int nfs4_do_set_security_label(struct inode *inode, 6496 struct nfs4_label *ilabel, 6497 struct nfs_fattr *fattr) 6498 { 6499 struct nfs4_exception exception = { }; 6500 int err; 6501 6502 do { 6503 err = _nfs4_do_set_security_label(inode, ilabel, fattr); 6504 trace_nfs4_set_security_label(inode, err); 6505 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6506 &exception); 6507 } while (exception.retry); 6508 return err; 6509 } 6510 6511 static int 6512 nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen) 6513 { 6514 struct nfs4_label ilabel = {0, 0, 0, buflen, (char *)buf }; 6515 struct nfs_fattr *fattr; 6516 int status; 6517 6518 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 6519 return -EOPNOTSUPP; 6520 6521 fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode)); 6522 if (fattr == NULL) 6523 return -ENOMEM; 6524 6525 status = nfs4_do_set_security_label(inode, &ilabel, fattr); 6526 if (status == 0) 6527 nfs_setsecurity(inode, fattr); 6528 6529 nfs_free_fattr(fattr); 6530 return status; 6531 } 6532 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 6533 6534 6535 static void nfs4_init_boot_verifier(const struct nfs_client *clp, 6536 nfs4_verifier *bootverf) 6537 { 6538 __be32 verf[2]; 6539 6540 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 6541 /* An impossible timestamp guarantees this value 6542 * will never match a generated boot time. */ 6543 verf[0] = cpu_to_be32(U32_MAX); 6544 verf[1] = cpu_to_be32(U32_MAX); 6545 } else { 6546 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 6547 u64 ns = ktime_to_ns(nn->boot_time); 6548 6549 verf[0] = cpu_to_be32(ns >> 32); 6550 verf[1] = cpu_to_be32(ns); 6551 } 6552 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 6553 } 6554 6555 static size_t 6556 nfs4_get_uniquifier(struct nfs_client *clp, char *buf, size_t buflen) 6557 { 6558 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 6559 struct nfs_netns_client *nn_clp = nn->nfs_client; 6560 const char *id; 6561 6562 buf[0] = '\0'; 6563 6564 if (nn_clp) { 6565 rcu_read_lock(); 6566 id = rcu_dereference(nn_clp->identifier); 6567 if (id) 6568 strscpy(buf, id, buflen); 6569 rcu_read_unlock(); 6570 } 6571 6572 if (nfs4_client_id_uniquifier[0] != '\0' && buf[0] == '\0') 6573 strscpy(buf, nfs4_client_id_uniquifier, buflen); 6574 6575 return strlen(buf); 6576 } 6577 6578 static int 6579 nfs4_init_nonuniform_client_string(struct nfs_client *clp) 6580 { 6581 char buf[NFS4_CLIENT_ID_UNIQ_LEN]; 6582 size_t buflen; 6583 size_t len; 6584 char *str; 6585 6586 if (clp->cl_owner_id != NULL) 6587 return 0; 6588 6589 rcu_read_lock(); 6590 len = 14 + 6591 strlen(clp->cl_rpcclient->cl_nodename) + 6592 1 + 6593 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) + 6594 1; 6595 rcu_read_unlock(); 6596 6597 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf)); 6598 if (buflen) 6599 len += buflen + 1; 6600 6601 if (len > NFS4_OPAQUE_LIMIT + 1) 6602 return -EINVAL; 6603 6604 /* 6605 * Since this string is allocated at mount time, and held until the 6606 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 6607 * about a memory-reclaim deadlock. 6608 */ 6609 str = kmalloc(len, GFP_KERNEL); 6610 if (!str) 6611 return -ENOMEM; 6612 6613 rcu_read_lock(); 6614 if (buflen) 6615 scnprintf(str, len, "Linux NFSv4.0 %s/%s/%s", 6616 clp->cl_rpcclient->cl_nodename, buf, 6617 rpc_peeraddr2str(clp->cl_rpcclient, 6618 RPC_DISPLAY_ADDR)); 6619 else 6620 scnprintf(str, len, "Linux NFSv4.0 %s/%s", 6621 clp->cl_rpcclient->cl_nodename, 6622 rpc_peeraddr2str(clp->cl_rpcclient, 6623 RPC_DISPLAY_ADDR)); 6624 rcu_read_unlock(); 6625 6626 clp->cl_owner_id = str; 6627 return 0; 6628 } 6629 6630 static int 6631 nfs4_init_uniform_client_string(struct nfs_client *clp) 6632 { 6633 char buf[NFS4_CLIENT_ID_UNIQ_LEN]; 6634 size_t buflen; 6635 size_t len; 6636 char *str; 6637 6638 if (clp->cl_owner_id != NULL) 6639 return 0; 6640 6641 len = 10 + 10 + 1 + 10 + 1 + 6642 strlen(clp->cl_rpcclient->cl_nodename) + 1; 6643 6644 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf)); 6645 if (buflen) 6646 len += buflen + 1; 6647 6648 if (len > NFS4_OPAQUE_LIMIT + 1) 6649 return -EINVAL; 6650 6651 /* 6652 * Since this string is allocated at mount time, and held until the 6653 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 6654 * about a memory-reclaim deadlock. 6655 */ 6656 str = kmalloc(len, GFP_KERNEL); 6657 if (!str) 6658 return -ENOMEM; 6659 6660 if (buflen) 6661 scnprintf(str, len, "Linux NFSv%u.%u %s/%s", 6662 clp->rpc_ops->version, clp->cl_minorversion, 6663 buf, clp->cl_rpcclient->cl_nodename); 6664 else 6665 scnprintf(str, len, "Linux NFSv%u.%u %s", 6666 clp->rpc_ops->version, clp->cl_minorversion, 6667 clp->cl_rpcclient->cl_nodename); 6668 clp->cl_owner_id = str; 6669 return 0; 6670 } 6671 6672 /* 6673 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback 6674 * services. Advertise one based on the address family of the 6675 * clientaddr. 6676 */ 6677 static unsigned int 6678 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len) 6679 { 6680 if (strchr(clp->cl_ipaddr, ':') != NULL) 6681 return scnprintf(buf, len, "tcp6"); 6682 else 6683 return scnprintf(buf, len, "tcp"); 6684 } 6685 6686 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata) 6687 { 6688 struct nfs4_setclientid *sc = calldata; 6689 6690 if (task->tk_status == 0) 6691 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred); 6692 } 6693 6694 static const struct rpc_call_ops nfs4_setclientid_ops = { 6695 .rpc_call_done = nfs4_setclientid_done, 6696 }; 6697 6698 /** 6699 * nfs4_proc_setclientid - Negotiate client ID 6700 * @clp: state data structure 6701 * @program: RPC program for NFSv4 callback service 6702 * @port: IP port number for NFS4 callback service 6703 * @cred: credential to use for this call 6704 * @res: where to place the result 6705 * 6706 * Returns zero, a negative errno, or a negative NFS4ERR status code. 6707 */ 6708 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 6709 unsigned short port, const struct cred *cred, 6710 struct nfs4_setclientid_res *res) 6711 { 6712 nfs4_verifier sc_verifier; 6713 struct nfs4_setclientid setclientid = { 6714 .sc_verifier = &sc_verifier, 6715 .sc_prog = program, 6716 .sc_clnt = clp, 6717 }; 6718 struct rpc_message msg = { 6719 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 6720 .rpc_argp = &setclientid, 6721 .rpc_resp = res, 6722 .rpc_cred = cred, 6723 }; 6724 struct rpc_task_setup task_setup_data = { 6725 .rpc_client = clp->cl_rpcclient, 6726 .rpc_message = &msg, 6727 .callback_ops = &nfs4_setclientid_ops, 6728 .callback_data = &setclientid, 6729 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN, 6730 }; 6731 unsigned long now = jiffies; 6732 int status; 6733 6734 /* nfs_client_id4 */ 6735 nfs4_init_boot_verifier(clp, &sc_verifier); 6736 6737 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags)) 6738 status = nfs4_init_uniform_client_string(clp); 6739 else 6740 status = nfs4_init_nonuniform_client_string(clp); 6741 6742 if (status) 6743 goto out; 6744 6745 /* cb_client4 */ 6746 setclientid.sc_netid_len = 6747 nfs4_init_callback_netid(clp, 6748 setclientid.sc_netid, 6749 sizeof(setclientid.sc_netid)); 6750 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 6751 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 6752 clp->cl_ipaddr, port >> 8, port & 255); 6753 6754 dprintk("NFS call setclientid auth=%s, '%s'\n", 6755 clp->cl_rpcclient->cl_auth->au_ops->au_name, 6756 clp->cl_owner_id); 6757 6758 status = nfs4_call_sync_custom(&task_setup_data); 6759 if (setclientid.sc_cred) { 6760 kfree(clp->cl_acceptor); 6761 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred); 6762 put_rpccred(setclientid.sc_cred); 6763 } 6764 6765 if (status == 0) 6766 do_renew_lease(clp, now); 6767 out: 6768 trace_nfs4_setclientid(clp, status); 6769 dprintk("NFS reply setclientid: %d\n", status); 6770 return status; 6771 } 6772 6773 /** 6774 * nfs4_proc_setclientid_confirm - Confirm client ID 6775 * @clp: state data structure 6776 * @arg: result of a previous SETCLIENTID 6777 * @cred: credential to use for this call 6778 * 6779 * Returns zero, a negative errno, or a negative NFS4ERR status code. 6780 */ 6781 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 6782 struct nfs4_setclientid_res *arg, 6783 const struct cred *cred) 6784 { 6785 struct rpc_message msg = { 6786 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 6787 .rpc_argp = arg, 6788 .rpc_cred = cred, 6789 }; 6790 int status; 6791 6792 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 6793 clp->cl_rpcclient->cl_auth->au_ops->au_name, 6794 clp->cl_clientid); 6795 status = rpc_call_sync(clp->cl_rpcclient, &msg, 6796 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 6797 trace_nfs4_setclientid_confirm(clp, status); 6798 dprintk("NFS reply setclientid_confirm: %d\n", status); 6799 return status; 6800 } 6801 6802 struct nfs4_delegreturndata { 6803 struct nfs4_delegreturnargs args; 6804 struct nfs4_delegreturnres res; 6805 struct nfs_fh fh; 6806 nfs4_stateid stateid; 6807 unsigned long timestamp; 6808 unsigned short retrans; 6809 struct { 6810 struct nfs4_layoutreturn_args arg; 6811 struct nfs4_layoutreturn_res res; 6812 struct nfs4_xdr_opaque_data ld_private; 6813 u32 roc_barrier; 6814 bool roc; 6815 } lr; 6816 struct nfs4_delegattr sattr; 6817 struct nfs_fattr fattr; 6818 int rpc_status; 6819 struct inode *inode; 6820 }; 6821 6822 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 6823 { 6824 struct nfs4_delegreturndata *data = calldata; 6825 struct nfs4_exception exception = { 6826 .inode = data->inode, 6827 .stateid = &data->stateid, 6828 .task_is_privileged = data->args.seq_args.sa_privileged, 6829 .retrans = data->retrans, 6830 }; 6831 6832 if (!nfs4_sequence_done(task, &data->res.seq_res)) 6833 return; 6834 6835 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); 6836 6837 /* Handle Layoutreturn errors */ 6838 if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res, 6839 &data->res.lr_ret) == -EAGAIN) 6840 goto out_restart; 6841 6842 if (data->args.sattr_args && task->tk_status != 0) { 6843 switch(data->res.sattr_ret) { 6844 case 0: 6845 data->args.sattr_args = NULL; 6846 data->res.sattr_res = false; 6847 break; 6848 case -NFS4ERR_ADMIN_REVOKED: 6849 case -NFS4ERR_DELEG_REVOKED: 6850 case -NFS4ERR_EXPIRED: 6851 case -NFS4ERR_BAD_STATEID: 6852 /* Let the main handler below do stateid recovery */ 6853 break; 6854 case -NFS4ERR_OLD_STATEID: 6855 if (nfs4_refresh_delegation_stateid(&data->stateid, 6856 data->inode)) 6857 goto out_restart; 6858 fallthrough; 6859 default: 6860 data->args.sattr_args = NULL; 6861 data->res.sattr_res = false; 6862 goto out_restart; 6863 } 6864 } 6865 6866 switch (task->tk_status) { 6867 case 0: 6868 renew_lease(data->res.server, data->timestamp); 6869 break; 6870 case -NFS4ERR_ADMIN_REVOKED: 6871 case -NFS4ERR_DELEG_REVOKED: 6872 case -NFS4ERR_EXPIRED: 6873 nfs4_free_revoked_stateid(data->res.server, 6874 data->args.stateid, 6875 task->tk_msg.rpc_cred); 6876 fallthrough; 6877 case -NFS4ERR_BAD_STATEID: 6878 case -NFS4ERR_STALE_STATEID: 6879 case -ETIMEDOUT: 6880 task->tk_status = 0; 6881 break; 6882 case -NFS4ERR_OLD_STATEID: 6883 if (!nfs4_refresh_delegation_stateid(&data->stateid, data->inode)) 6884 nfs4_stateid_seqid_inc(&data->stateid); 6885 if (data->args.bitmask) { 6886 data->args.bitmask = NULL; 6887 data->res.fattr = NULL; 6888 } 6889 goto out_restart; 6890 case -NFS4ERR_ACCESS: 6891 if (data->args.bitmask) { 6892 data->args.bitmask = NULL; 6893 data->res.fattr = NULL; 6894 goto out_restart; 6895 } 6896 fallthrough; 6897 default: 6898 task->tk_status = nfs4_async_handle_exception(task, 6899 data->res.server, task->tk_status, 6900 &exception); 6901 data->retrans = exception.retrans; 6902 if (exception.retry) 6903 goto out_restart; 6904 } 6905 nfs_delegation_mark_returned(data->inode, data->args.stateid); 6906 data->rpc_status = task->tk_status; 6907 return; 6908 out_restart: 6909 task->tk_status = 0; 6910 rpc_restart_call_prepare(task); 6911 } 6912 6913 static void nfs4_delegreturn_release(void *calldata) 6914 { 6915 struct nfs4_delegreturndata *data = calldata; 6916 struct inode *inode = data->inode; 6917 6918 if (data->lr.roc) 6919 pnfs_roc_release(&data->lr.arg, &data->lr.res, 6920 data->res.lr_ret); 6921 if (inode) { 6922 nfs4_fattr_set_prechange(&data->fattr, 6923 inode_peek_iversion_raw(inode)); 6924 nfs_refresh_inode(inode, &data->fattr); 6925 nfs_iput_and_deactive(inode); 6926 } 6927 kfree(calldata); 6928 } 6929 6930 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 6931 { 6932 struct nfs4_delegreturndata *d_data; 6933 struct pnfs_layout_hdr *lo; 6934 6935 d_data = data; 6936 6937 if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) { 6938 nfs4_sequence_done(task, &d_data->res.seq_res); 6939 return; 6940 } 6941 6942 lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL; 6943 if (lo && !pnfs_layout_is_valid(lo)) { 6944 d_data->args.lr_args = NULL; 6945 d_data->res.lr_res = NULL; 6946 } 6947 6948 nfs4_setup_sequence(d_data->res.server->nfs_client, 6949 &d_data->args.seq_args, 6950 &d_data->res.seq_res, 6951 task); 6952 } 6953 6954 static const struct rpc_call_ops nfs4_delegreturn_ops = { 6955 .rpc_call_prepare = nfs4_delegreturn_prepare, 6956 .rpc_call_done = nfs4_delegreturn_done, 6957 .rpc_release = nfs4_delegreturn_release, 6958 }; 6959 6960 static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, 6961 const nfs4_stateid *stateid, 6962 struct nfs_delegation *delegation, 6963 int issync) 6964 { 6965 struct nfs4_delegreturndata *data; 6966 struct nfs_server *server = NFS_SERVER(inode); 6967 struct rpc_task *task; 6968 struct rpc_message msg = { 6969 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 6970 .rpc_cred = cred, 6971 }; 6972 struct rpc_task_setup task_setup_data = { 6973 .rpc_client = server->client, 6974 .rpc_message = &msg, 6975 .callback_ops = &nfs4_delegreturn_ops, 6976 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT, 6977 }; 6978 int status = 0; 6979 6980 if (nfs_server_capable(inode, NFS_CAP_MOVEABLE)) 6981 task_setup_data.flags |= RPC_TASK_MOVEABLE; 6982 6983 data = kzalloc(sizeof(*data), GFP_KERNEL); 6984 if (data == NULL) 6985 return -ENOMEM; 6986 6987 nfs4_state_protect(server->nfs_client, 6988 NFS_SP4_MACH_CRED_CLEANUP, 6989 &task_setup_data.rpc_client, &msg); 6990 6991 data->args.fhandle = &data->fh; 6992 data->args.stateid = &data->stateid; 6993 nfs4_bitmask_set(data->args.bitmask_store, 6994 server->cache_consistency_bitmask, inode, 0); 6995 data->args.bitmask = data->args.bitmask_store; 6996 nfs_copy_fh(&data->fh, NFS_FH(inode)); 6997 nfs4_stateid_copy(&data->stateid, stateid); 6998 data->res.fattr = &data->fattr; 6999 data->res.server = server; 7000 data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT; 7001 data->lr.arg.ld_private = &data->lr.ld_private; 7002 nfs_fattr_init(data->res.fattr); 7003 data->timestamp = jiffies; 7004 data->rpc_status = 0; 7005 data->inode = nfs_igrab_and_active(inode); 7006 if (data->inode || issync) { 7007 data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res, 7008 cred); 7009 if (data->lr.roc) { 7010 data->args.lr_args = &data->lr.arg; 7011 data->res.lr_res = &data->lr.res; 7012 } 7013 } 7014 7015 if (delegation && 7016 test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) { 7017 if (delegation->type & FMODE_READ) { 7018 data->sattr.atime = inode_get_atime(inode); 7019 data->sattr.atime_set = true; 7020 } 7021 if (delegation->type & FMODE_WRITE) { 7022 data->sattr.mtime = inode_get_mtime(inode); 7023 data->sattr.mtime_set = true; 7024 } 7025 data->args.sattr_args = &data->sattr; 7026 data->res.sattr_res = true; 7027 } 7028 7029 if (!data->inode) 7030 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 7031 1); 7032 else 7033 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 7034 0); 7035 7036 task_setup_data.callback_data = data; 7037 msg.rpc_argp = &data->args; 7038 msg.rpc_resp = &data->res; 7039 task = rpc_run_task(&task_setup_data); 7040 if (IS_ERR(task)) 7041 return PTR_ERR(task); 7042 if (!issync) 7043 goto out; 7044 status = rpc_wait_for_completion_task(task); 7045 if (status != 0) 7046 goto out; 7047 status = data->rpc_status; 7048 out: 7049 rpc_put_task(task); 7050 return status; 7051 } 7052 7053 int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, 7054 const nfs4_stateid *stateid, 7055 struct nfs_delegation *delegation, int issync) 7056 { 7057 struct nfs_server *server = NFS_SERVER(inode); 7058 struct nfs4_exception exception = { }; 7059 int err; 7060 do { 7061 err = _nfs4_proc_delegreturn(inode, cred, stateid, 7062 delegation, issync); 7063 trace_nfs4_delegreturn(inode, stateid, err); 7064 switch (err) { 7065 case -NFS4ERR_STALE_STATEID: 7066 case -NFS4ERR_EXPIRED: 7067 case 0: 7068 return 0; 7069 } 7070 err = nfs4_handle_exception(server, err, &exception); 7071 } while (exception.retry); 7072 return err; 7073 } 7074 7075 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7076 { 7077 struct inode *inode = state->inode; 7078 struct nfs_server *server = NFS_SERVER(inode); 7079 struct nfs_client *clp = server->nfs_client; 7080 struct nfs_lockt_args arg = { 7081 .fh = NFS_FH(inode), 7082 .fl = request, 7083 }; 7084 struct nfs_lockt_res res = { 7085 .denied = request, 7086 }; 7087 struct rpc_message msg = { 7088 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 7089 .rpc_argp = &arg, 7090 .rpc_resp = &res, 7091 .rpc_cred = state->owner->so_cred, 7092 }; 7093 struct nfs4_lock_state *lsp; 7094 int status; 7095 7096 arg.lock_owner.clientid = clp->cl_clientid; 7097 status = nfs4_set_lock_state(state, request); 7098 if (status != 0) 7099 goto out; 7100 lsp = request->fl_u.nfs4_fl.owner; 7101 arg.lock_owner.id = lsp->ls_seqid.owner_id; 7102 arg.lock_owner.s_dev = server->s_dev; 7103 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 7104 switch (status) { 7105 case 0: 7106 request->c.flc_type = F_UNLCK; 7107 break; 7108 case -NFS4ERR_DENIED: 7109 status = 0; 7110 } 7111 request->fl_ops->fl_release_private(request); 7112 request->fl_ops = NULL; 7113 out: 7114 return status; 7115 } 7116 7117 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7118 { 7119 struct nfs4_exception exception = { 7120 .interruptible = true, 7121 }; 7122 int err; 7123 7124 do { 7125 err = _nfs4_proc_getlk(state, cmd, request); 7126 trace_nfs4_get_lock(request, state, cmd, err); 7127 err = nfs4_handle_exception(NFS_SERVER(state->inode), err, 7128 &exception); 7129 } while (exception.retry); 7130 return err; 7131 } 7132 7133 /* 7134 * Update the seqid of a lock stateid after receiving 7135 * NFS4ERR_OLD_STATEID 7136 */ 7137 static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst, 7138 struct nfs4_lock_state *lsp) 7139 { 7140 struct nfs4_state *state = lsp->ls_state; 7141 bool ret = false; 7142 7143 spin_lock(&state->state_lock); 7144 if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid)) 7145 goto out; 7146 if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst)) 7147 nfs4_stateid_seqid_inc(dst); 7148 else 7149 dst->seqid = lsp->ls_stateid.seqid; 7150 ret = true; 7151 out: 7152 spin_unlock(&state->state_lock); 7153 return ret; 7154 } 7155 7156 static bool nfs4_sync_lock_stateid(nfs4_stateid *dst, 7157 struct nfs4_lock_state *lsp) 7158 { 7159 struct nfs4_state *state = lsp->ls_state; 7160 bool ret; 7161 7162 spin_lock(&state->state_lock); 7163 ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid); 7164 nfs4_stateid_copy(dst, &lsp->ls_stateid); 7165 spin_unlock(&state->state_lock); 7166 return ret; 7167 } 7168 7169 struct nfs4_unlockdata { 7170 struct nfs_locku_args arg; 7171 struct nfs_locku_res res; 7172 struct nfs4_lock_state *lsp; 7173 struct nfs_open_context *ctx; 7174 struct nfs_lock_context *l_ctx; 7175 struct file_lock fl; 7176 struct nfs_server *server; 7177 unsigned long timestamp; 7178 unsigned short retrans; 7179 }; 7180 7181 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 7182 struct nfs_open_context *ctx, 7183 struct nfs4_lock_state *lsp, 7184 struct nfs_seqid *seqid) 7185 { 7186 struct nfs4_unlockdata *p; 7187 struct nfs4_state *state = lsp->ls_state; 7188 struct inode *inode = state->inode; 7189 struct nfs_lock_context *l_ctx; 7190 7191 p = kzalloc(sizeof(*p), GFP_KERNEL); 7192 if (p == NULL) 7193 return NULL; 7194 l_ctx = nfs_get_lock_context(ctx); 7195 if (!IS_ERR(l_ctx)) { 7196 p->l_ctx = l_ctx; 7197 } else { 7198 kfree(p); 7199 return NULL; 7200 } 7201 p->arg.fh = NFS_FH(inode); 7202 p->arg.fl = &p->fl; 7203 p->arg.seqid = seqid; 7204 p->res.seqid = seqid; 7205 p->lsp = lsp; 7206 /* Ensure we don't close file until we're done freeing locks! */ 7207 p->ctx = get_nfs_open_context(ctx); 7208 locks_init_lock(&p->fl); 7209 locks_copy_lock(&p->fl, fl); 7210 p->server = NFS_SERVER(inode); 7211 spin_lock(&state->state_lock); 7212 nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid); 7213 spin_unlock(&state->state_lock); 7214 return p; 7215 } 7216 7217 static void nfs4_locku_release_calldata(void *data) 7218 { 7219 struct nfs4_unlockdata *calldata = data; 7220 nfs_free_seqid(calldata->arg.seqid); 7221 nfs4_put_lock_state(calldata->lsp); 7222 nfs_put_lock_context(calldata->l_ctx); 7223 put_nfs_open_context(calldata->ctx); 7224 kfree(calldata); 7225 } 7226 7227 static void nfs4_locku_done(struct rpc_task *task, void *data) 7228 { 7229 struct nfs4_unlockdata *calldata = data; 7230 struct nfs4_exception exception = { 7231 .inode = calldata->lsp->ls_state->inode, 7232 .stateid = &calldata->arg.stateid, 7233 .retrans = calldata->retrans, 7234 }; 7235 7236 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 7237 return; 7238 switch (task->tk_status) { 7239 case 0: 7240 renew_lease(calldata->server, calldata->timestamp); 7241 locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl); 7242 if (nfs4_update_lock_stateid(calldata->lsp, 7243 &calldata->res.stateid)) 7244 break; 7245 fallthrough; 7246 case -NFS4ERR_ADMIN_REVOKED: 7247 case -NFS4ERR_EXPIRED: 7248 nfs4_free_revoked_stateid(calldata->server, 7249 &calldata->arg.stateid, 7250 task->tk_msg.rpc_cred); 7251 fallthrough; 7252 case -NFS4ERR_BAD_STATEID: 7253 case -NFS4ERR_STALE_STATEID: 7254 if (nfs4_sync_lock_stateid(&calldata->arg.stateid, 7255 calldata->lsp)) 7256 rpc_restart_call_prepare(task); 7257 break; 7258 case -NFS4ERR_OLD_STATEID: 7259 if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid, 7260 calldata->lsp)) 7261 rpc_restart_call_prepare(task); 7262 break; 7263 default: 7264 task->tk_status = nfs4_async_handle_exception(task, 7265 calldata->server, task->tk_status, 7266 &exception); 7267 calldata->retrans = exception.retrans; 7268 if (exception.retry) 7269 rpc_restart_call_prepare(task); 7270 } 7271 nfs_release_seqid(calldata->arg.seqid); 7272 } 7273 7274 static void nfs4_locku_prepare(struct rpc_task *task, void *data) 7275 { 7276 struct nfs4_unlockdata *calldata = data; 7277 7278 if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) && 7279 nfs_async_iocounter_wait(task, calldata->l_ctx)) 7280 return; 7281 7282 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 7283 goto out_wait; 7284 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { 7285 /* Note: exit _without_ running nfs4_locku_done */ 7286 goto out_no_action; 7287 } 7288 calldata->timestamp = jiffies; 7289 if (nfs4_setup_sequence(calldata->server->nfs_client, 7290 &calldata->arg.seq_args, 7291 &calldata->res.seq_res, 7292 task) != 0) 7293 nfs_release_seqid(calldata->arg.seqid); 7294 return; 7295 out_no_action: 7296 task->tk_action = NULL; 7297 out_wait: 7298 nfs4_sequence_done(task, &calldata->res.seq_res); 7299 } 7300 7301 static const struct rpc_call_ops nfs4_locku_ops = { 7302 .rpc_call_prepare = nfs4_locku_prepare, 7303 .rpc_call_done = nfs4_locku_done, 7304 .rpc_release = nfs4_locku_release_calldata, 7305 }; 7306 7307 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 7308 struct nfs_open_context *ctx, 7309 struct nfs4_lock_state *lsp, 7310 struct nfs_seqid *seqid) 7311 { 7312 struct nfs4_unlockdata *data; 7313 struct rpc_message msg = { 7314 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 7315 .rpc_cred = ctx->cred, 7316 }; 7317 struct rpc_task_setup task_setup_data = { 7318 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 7319 .rpc_message = &msg, 7320 .callback_ops = &nfs4_locku_ops, 7321 .workqueue = nfsiod_workqueue, 7322 .flags = RPC_TASK_ASYNC, 7323 }; 7324 7325 if (nfs_server_capable(lsp->ls_state->inode, NFS_CAP_MOVEABLE)) 7326 task_setup_data.flags |= RPC_TASK_MOVEABLE; 7327 7328 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client, 7329 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg); 7330 7331 /* Ensure this is an unlock - when canceling a lock, the 7332 * canceled lock is passed in, and it won't be an unlock. 7333 */ 7334 fl->c.flc_type = F_UNLCK; 7335 if (fl->c.flc_flags & FL_CLOSE) 7336 set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags); 7337 7338 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 7339 if (data == NULL) { 7340 nfs_free_seqid(seqid); 7341 return ERR_PTR(-ENOMEM); 7342 } 7343 7344 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 0); 7345 msg.rpc_argp = &data->arg; 7346 msg.rpc_resp = &data->res; 7347 task_setup_data.callback_data = data; 7348 return rpc_run_task(&task_setup_data); 7349 } 7350 7351 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 7352 { 7353 struct inode *inode = state->inode; 7354 struct nfs4_state_owner *sp = state->owner; 7355 struct nfs_inode *nfsi = NFS_I(inode); 7356 struct nfs_seqid *seqid; 7357 struct nfs4_lock_state *lsp; 7358 struct rpc_task *task; 7359 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 7360 int status = 0; 7361 unsigned char saved_flags = request->c.flc_flags; 7362 7363 status = nfs4_set_lock_state(state, request); 7364 /* Unlock _before_ we do the RPC call */ 7365 request->c.flc_flags |= FL_EXISTS; 7366 /* Exclude nfs_delegation_claim_locks() */ 7367 mutex_lock(&sp->so_delegreturn_mutex); 7368 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ 7369 down_read(&nfsi->rwsem); 7370 if (locks_lock_inode_wait(inode, request) == -ENOENT) { 7371 up_read(&nfsi->rwsem); 7372 mutex_unlock(&sp->so_delegreturn_mutex); 7373 goto out; 7374 } 7375 lsp = request->fl_u.nfs4_fl.owner; 7376 set_bit(NFS_LOCK_UNLOCKING, &lsp->ls_flags); 7377 up_read(&nfsi->rwsem); 7378 mutex_unlock(&sp->so_delegreturn_mutex); 7379 if (status != 0) 7380 goto out; 7381 /* Is this a delegated lock? */ 7382 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0) 7383 goto out; 7384 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid; 7385 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 7386 status = -ENOMEM; 7387 if (IS_ERR(seqid)) 7388 goto out; 7389 task = nfs4_do_unlck(request, 7390 nfs_file_open_context(request->c.flc_file), 7391 lsp, seqid); 7392 status = PTR_ERR(task); 7393 if (IS_ERR(task)) 7394 goto out; 7395 status = rpc_wait_for_completion_task(task); 7396 rpc_put_task(task); 7397 out: 7398 request->c.flc_flags = saved_flags; 7399 trace_nfs4_unlock(request, state, F_SETLK, status); 7400 return status; 7401 } 7402 7403 struct nfs4_lockdata { 7404 struct nfs_lock_args arg; 7405 struct nfs_lock_res res; 7406 struct nfs4_lock_state *lsp; 7407 struct nfs_open_context *ctx; 7408 struct file_lock fl; 7409 unsigned long timestamp; 7410 int rpc_status; 7411 int cancelled; 7412 struct nfs_server *server; 7413 }; 7414 7415 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 7416 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 7417 gfp_t gfp_mask) 7418 { 7419 struct nfs4_lockdata *p; 7420 struct inode *inode = lsp->ls_state->inode; 7421 struct nfs_server *server = NFS_SERVER(inode); 7422 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 7423 7424 p = kzalloc(sizeof(*p), gfp_mask); 7425 if (p == NULL) 7426 return NULL; 7427 7428 p->arg.fh = NFS_FH(inode); 7429 p->arg.fl = &p->fl; 7430 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 7431 if (IS_ERR(p->arg.open_seqid)) 7432 goto out_free; 7433 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 7434 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask); 7435 if (IS_ERR(p->arg.lock_seqid)) 7436 goto out_free_seqid; 7437 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 7438 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 7439 p->arg.lock_owner.s_dev = server->s_dev; 7440 p->res.lock_seqid = p->arg.lock_seqid; 7441 p->lsp = lsp; 7442 p->server = server; 7443 p->ctx = get_nfs_open_context(ctx); 7444 locks_init_lock(&p->fl); 7445 locks_copy_lock(&p->fl, fl); 7446 return p; 7447 out_free_seqid: 7448 nfs_free_seqid(p->arg.open_seqid); 7449 out_free: 7450 kfree(p); 7451 return NULL; 7452 } 7453 7454 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 7455 { 7456 struct nfs4_lockdata *data = calldata; 7457 struct nfs4_state *state = data->lsp->ls_state; 7458 7459 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 7460 goto out_wait; 7461 /* Do we need to do an open_to_lock_owner? */ 7462 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) { 7463 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { 7464 goto out_release_lock_seqid; 7465 } 7466 nfs4_stateid_copy(&data->arg.open_stateid, 7467 &state->open_stateid); 7468 data->arg.new_lock_owner = 1; 7469 data->res.open_seqid = data->arg.open_seqid; 7470 } else { 7471 data->arg.new_lock_owner = 0; 7472 nfs4_stateid_copy(&data->arg.lock_stateid, 7473 &data->lsp->ls_stateid); 7474 } 7475 if (!nfs4_valid_open_stateid(state)) { 7476 data->rpc_status = -EBADF; 7477 task->tk_action = NULL; 7478 goto out_release_open_seqid; 7479 } 7480 data->timestamp = jiffies; 7481 if (nfs4_setup_sequence(data->server->nfs_client, 7482 &data->arg.seq_args, 7483 &data->res.seq_res, 7484 task) == 0) 7485 return; 7486 out_release_open_seqid: 7487 nfs_release_seqid(data->arg.open_seqid); 7488 out_release_lock_seqid: 7489 nfs_release_seqid(data->arg.lock_seqid); 7490 out_wait: 7491 nfs4_sequence_done(task, &data->res.seq_res); 7492 dprintk("%s: ret = %d\n", __func__, data->rpc_status); 7493 } 7494 7495 static void nfs4_lock_done(struct rpc_task *task, void *calldata) 7496 { 7497 struct nfs4_lockdata *data = calldata; 7498 struct nfs4_lock_state *lsp = data->lsp; 7499 7500 if (!nfs4_sequence_done(task, &data->res.seq_res)) 7501 return; 7502 7503 data->rpc_status = task->tk_status; 7504 switch (task->tk_status) { 7505 case 0: 7506 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)), 7507 data->timestamp); 7508 if (data->arg.new_lock && !data->cancelled) { 7509 data->fl.c.flc_flags &= ~(FL_SLEEP | FL_ACCESS); 7510 if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) 7511 goto out_restart; 7512 } 7513 if (data->arg.new_lock_owner != 0) { 7514 nfs_confirm_seqid(&lsp->ls_seqid, 0); 7515 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid); 7516 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 7517 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) 7518 goto out_restart; 7519 break; 7520 case -NFS4ERR_OLD_STATEID: 7521 if (data->arg.new_lock_owner != 0 && 7522 nfs4_refresh_open_old_stateid(&data->arg.open_stateid, 7523 lsp->ls_state)) 7524 goto out_restart; 7525 if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp)) 7526 goto out_restart; 7527 fallthrough; 7528 case -NFS4ERR_BAD_STATEID: 7529 case -NFS4ERR_STALE_STATEID: 7530 case -NFS4ERR_EXPIRED: 7531 if (data->arg.new_lock_owner != 0) { 7532 if (!nfs4_stateid_match(&data->arg.open_stateid, 7533 &lsp->ls_state->open_stateid)) 7534 goto out_restart; 7535 } else if (!nfs4_stateid_match(&data->arg.lock_stateid, 7536 &lsp->ls_stateid)) 7537 goto out_restart; 7538 } 7539 out_done: 7540 dprintk("%s: ret = %d!\n", __func__, data->rpc_status); 7541 return; 7542 out_restart: 7543 if (!data->cancelled) 7544 rpc_restart_call_prepare(task); 7545 goto out_done; 7546 } 7547 7548 static void nfs4_lock_release(void *calldata) 7549 { 7550 struct nfs4_lockdata *data = calldata; 7551 7552 nfs_free_seqid(data->arg.open_seqid); 7553 if (data->cancelled && data->rpc_status == 0) { 7554 struct rpc_task *task; 7555 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 7556 data->arg.lock_seqid); 7557 if (!IS_ERR(task)) 7558 rpc_put_task_async(task); 7559 dprintk("%s: cancelling lock!\n", __func__); 7560 } else 7561 nfs_free_seqid(data->arg.lock_seqid); 7562 nfs4_put_lock_state(data->lsp); 7563 put_nfs_open_context(data->ctx); 7564 kfree(data); 7565 } 7566 7567 static const struct rpc_call_ops nfs4_lock_ops = { 7568 .rpc_call_prepare = nfs4_lock_prepare, 7569 .rpc_call_done = nfs4_lock_done, 7570 .rpc_release = nfs4_lock_release, 7571 }; 7572 7573 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 7574 { 7575 switch (error) { 7576 case -NFS4ERR_ADMIN_REVOKED: 7577 case -NFS4ERR_EXPIRED: 7578 case -NFS4ERR_BAD_STATEID: 7579 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 7580 if (new_lock_owner != 0 || 7581 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) 7582 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 7583 break; 7584 case -NFS4ERR_STALE_STATEID: 7585 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 7586 nfs4_schedule_lease_recovery(server->nfs_client); 7587 } 7588 } 7589 7590 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 7591 { 7592 struct nfs4_lockdata *data; 7593 struct rpc_task *task; 7594 struct rpc_message msg = { 7595 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 7596 .rpc_cred = state->owner->so_cred, 7597 }; 7598 struct rpc_task_setup task_setup_data = { 7599 .rpc_client = NFS_CLIENT(state->inode), 7600 .rpc_message = &msg, 7601 .callback_ops = &nfs4_lock_ops, 7602 .workqueue = nfsiod_workqueue, 7603 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 7604 }; 7605 int ret; 7606 7607 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE)) 7608 task_setup_data.flags |= RPC_TASK_MOVEABLE; 7609 7610 data = nfs4_alloc_lockdata(fl, 7611 nfs_file_open_context(fl->c.flc_file), 7612 fl->fl_u.nfs4_fl.owner, GFP_KERNEL); 7613 if (data == NULL) 7614 return -ENOMEM; 7615 if (IS_SETLKW(cmd)) 7616 data->arg.block = 1; 7617 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 7618 recovery_type > NFS_LOCK_NEW); 7619 msg.rpc_argp = &data->arg; 7620 msg.rpc_resp = &data->res; 7621 task_setup_data.callback_data = data; 7622 if (recovery_type > NFS_LOCK_NEW) { 7623 if (recovery_type == NFS_LOCK_RECLAIM) 7624 data->arg.reclaim = NFS_LOCK_RECLAIM; 7625 } else 7626 data->arg.new_lock = 1; 7627 task = rpc_run_task(&task_setup_data); 7628 if (IS_ERR(task)) 7629 return PTR_ERR(task); 7630 ret = rpc_wait_for_completion_task(task); 7631 if (ret == 0) { 7632 ret = data->rpc_status; 7633 if (ret) 7634 nfs4_handle_setlk_error(data->server, data->lsp, 7635 data->arg.new_lock_owner, ret); 7636 } else 7637 data->cancelled = true; 7638 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret); 7639 rpc_put_task(task); 7640 dprintk("%s: ret = %d\n", __func__, ret); 7641 return ret; 7642 } 7643 7644 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 7645 { 7646 struct nfs_server *server = NFS_SERVER(state->inode); 7647 struct nfs4_exception exception = { 7648 .inode = state->inode, 7649 }; 7650 int err; 7651 7652 do { 7653 /* Cache the lock if possible... */ 7654 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 7655 return 0; 7656 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 7657 if (err != -NFS4ERR_DELAY) 7658 break; 7659 nfs4_handle_exception(server, err, &exception); 7660 } while (exception.retry); 7661 return err; 7662 } 7663 7664 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 7665 { 7666 struct nfs_server *server = NFS_SERVER(state->inode); 7667 struct nfs4_exception exception = { 7668 .inode = state->inode, 7669 }; 7670 int err; 7671 7672 err = nfs4_set_lock_state(state, request); 7673 if (err != 0) 7674 return err; 7675 if (!recover_lost_locks) { 7676 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags); 7677 return 0; 7678 } 7679 do { 7680 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 7681 return 0; 7682 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 7683 switch (err) { 7684 default: 7685 goto out; 7686 case -NFS4ERR_GRACE: 7687 case -NFS4ERR_DELAY: 7688 nfs4_handle_exception(server, err, &exception); 7689 err = 0; 7690 } 7691 } while (exception.retry); 7692 out: 7693 return err; 7694 } 7695 7696 #if defined(CONFIG_NFS_V4_1) 7697 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 7698 { 7699 struct nfs4_lock_state *lsp; 7700 int status; 7701 7702 status = nfs4_set_lock_state(state, request); 7703 if (status != 0) 7704 return status; 7705 lsp = request->fl_u.nfs4_fl.owner; 7706 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) || 7707 test_bit(NFS_LOCK_LOST, &lsp->ls_flags)) 7708 return 0; 7709 return nfs4_lock_expired(state, request); 7710 } 7711 #endif 7712 7713 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7714 { 7715 struct nfs_inode *nfsi = NFS_I(state->inode); 7716 struct nfs4_state_owner *sp = state->owner; 7717 unsigned char flags = request->c.flc_flags; 7718 int status; 7719 7720 request->c.flc_flags |= FL_ACCESS; 7721 status = locks_lock_inode_wait(state->inode, request); 7722 if (status < 0) 7723 goto out; 7724 mutex_lock(&sp->so_delegreturn_mutex); 7725 down_read(&nfsi->rwsem); 7726 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 7727 /* Yes: cache locks! */ 7728 /* ...but avoid races with delegation recall... */ 7729 request->c.flc_flags = flags & ~FL_SLEEP; 7730 status = locks_lock_inode_wait(state->inode, request); 7731 up_read(&nfsi->rwsem); 7732 mutex_unlock(&sp->so_delegreturn_mutex); 7733 goto out; 7734 } 7735 up_read(&nfsi->rwsem); 7736 mutex_unlock(&sp->so_delegreturn_mutex); 7737 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 7738 out: 7739 request->c.flc_flags = flags; 7740 return status; 7741 } 7742 7743 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7744 { 7745 struct nfs4_exception exception = { 7746 .state = state, 7747 .inode = state->inode, 7748 .interruptible = true, 7749 }; 7750 int err; 7751 7752 do { 7753 err = _nfs4_proc_setlk(state, cmd, request); 7754 if (err == -NFS4ERR_DENIED) 7755 err = -EAGAIN; 7756 err = nfs4_handle_exception(NFS_SERVER(state->inode), 7757 err, &exception); 7758 } while (exception.retry); 7759 return err; 7760 } 7761 7762 #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 7763 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 7764 7765 static int 7766 nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd, 7767 struct file_lock *request) 7768 { 7769 int status = -ERESTARTSYS; 7770 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 7771 7772 while(!signalled()) { 7773 status = nfs4_proc_setlk(state, cmd, request); 7774 if ((status != -EAGAIN) || IS_SETLK(cmd)) 7775 break; 7776 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); 7777 schedule_timeout(timeout); 7778 timeout *= 2; 7779 timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout); 7780 status = -ERESTARTSYS; 7781 } 7782 return status; 7783 } 7784 7785 #ifdef CONFIG_NFS_V4_1 7786 struct nfs4_lock_waiter { 7787 struct inode *inode; 7788 struct nfs_lowner owner; 7789 wait_queue_entry_t wait; 7790 }; 7791 7792 static int 7793 nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key) 7794 { 7795 struct nfs4_lock_waiter *waiter = 7796 container_of(wait, struct nfs4_lock_waiter, wait); 7797 7798 /* NULL key means to wake up everyone */ 7799 if (key) { 7800 struct cb_notify_lock_args *cbnl = key; 7801 struct nfs_lowner *lowner = &cbnl->cbnl_owner, 7802 *wowner = &waiter->owner; 7803 7804 /* Only wake if the callback was for the same owner. */ 7805 if (lowner->id != wowner->id || lowner->s_dev != wowner->s_dev) 7806 return 0; 7807 7808 /* Make sure it's for the right inode */ 7809 if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh)) 7810 return 0; 7811 } 7812 7813 return woken_wake_function(wait, mode, flags, key); 7814 } 7815 7816 static int 7817 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7818 { 7819 struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner; 7820 struct nfs_server *server = NFS_SERVER(state->inode); 7821 struct nfs_client *clp = server->nfs_client; 7822 wait_queue_head_t *q = &clp->cl_lock_waitq; 7823 struct nfs4_lock_waiter waiter = { 7824 .inode = state->inode, 7825 .owner = { .clientid = clp->cl_clientid, 7826 .id = lsp->ls_seqid.owner_id, 7827 .s_dev = server->s_dev }, 7828 }; 7829 int status; 7830 7831 /* Don't bother with waitqueue if we don't expect a callback */ 7832 if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags)) 7833 return nfs4_retry_setlk_simple(state, cmd, request); 7834 7835 init_wait(&waiter.wait); 7836 waiter.wait.func = nfs4_wake_lock_waiter; 7837 add_wait_queue(q, &waiter.wait); 7838 7839 do { 7840 status = nfs4_proc_setlk(state, cmd, request); 7841 if (status != -EAGAIN || IS_SETLK(cmd)) 7842 break; 7843 7844 status = -ERESTARTSYS; 7845 wait_woken(&waiter.wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE, 7846 NFS4_LOCK_MAXTIMEOUT); 7847 } while (!signalled()); 7848 7849 remove_wait_queue(q, &waiter.wait); 7850 7851 return status; 7852 } 7853 #else /* !CONFIG_NFS_V4_1 */ 7854 static inline int 7855 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7856 { 7857 return nfs4_retry_setlk_simple(state, cmd, request); 7858 } 7859 #endif 7860 7861 static int 7862 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 7863 { 7864 struct nfs_open_context *ctx; 7865 struct nfs4_state *state; 7866 int status; 7867 7868 /* verify open state */ 7869 ctx = nfs_file_open_context(filp); 7870 state = ctx->state; 7871 7872 if (IS_GETLK(cmd)) { 7873 if (state != NULL) 7874 return nfs4_proc_getlk(state, F_GETLK, request); 7875 return 0; 7876 } 7877 7878 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 7879 return -EINVAL; 7880 7881 if (lock_is_unlock(request)) { 7882 if (state != NULL) 7883 return nfs4_proc_unlck(state, cmd, request); 7884 return 0; 7885 } 7886 7887 if (state == NULL) 7888 return -ENOLCK; 7889 7890 if ((request->c.flc_flags & FL_POSIX) && 7891 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 7892 return -ENOLCK; 7893 7894 /* 7895 * Don't rely on the VFS having checked the file open mode, 7896 * since it won't do this for flock() locks. 7897 */ 7898 switch (request->c.flc_type) { 7899 case F_RDLCK: 7900 if (!(filp->f_mode & FMODE_READ)) 7901 return -EBADF; 7902 break; 7903 case F_WRLCK: 7904 if (!(filp->f_mode & FMODE_WRITE)) 7905 return -EBADF; 7906 } 7907 7908 status = nfs4_set_lock_state(state, request); 7909 if (status != 0) 7910 return status; 7911 7912 return nfs4_retry_setlk(state, cmd, request); 7913 } 7914 7915 static int nfs4_delete_lease(struct file *file, void **priv) 7916 { 7917 return generic_setlease(file, F_UNLCK, NULL, priv); 7918 } 7919 7920 static int nfs4_add_lease(struct file *file, int arg, struct file_lease **lease, 7921 void **priv) 7922 { 7923 struct inode *inode = file_inode(file); 7924 fmode_t type = arg == F_RDLCK ? FMODE_READ : FMODE_WRITE; 7925 int ret; 7926 7927 /* No delegation, no lease */ 7928 if (!nfs4_have_delegation(inode, type, 0)) 7929 return -EAGAIN; 7930 ret = generic_setlease(file, arg, lease, priv); 7931 if (ret || nfs4_have_delegation(inode, type, 0)) 7932 return ret; 7933 /* We raced with a delegation return */ 7934 nfs4_delete_lease(file, priv); 7935 return -EAGAIN; 7936 } 7937 7938 int nfs4_proc_setlease(struct file *file, int arg, struct file_lease **lease, 7939 void **priv) 7940 { 7941 switch (arg) { 7942 case F_RDLCK: 7943 case F_WRLCK: 7944 return nfs4_add_lease(file, arg, lease, priv); 7945 case F_UNLCK: 7946 return nfs4_delete_lease(file, priv); 7947 default: 7948 return -EINVAL; 7949 } 7950 } 7951 7952 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid) 7953 { 7954 struct nfs_server *server = NFS_SERVER(state->inode); 7955 int err; 7956 7957 err = nfs4_set_lock_state(state, fl); 7958 if (err != 0) 7959 return err; 7960 do { 7961 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 7962 if (err != -NFS4ERR_DELAY && err != -NFS4ERR_GRACE) 7963 break; 7964 ssleep(1); 7965 } while (err == -NFS4ERR_DELAY || err == -NFSERR_GRACE); 7966 return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err); 7967 } 7968 7969 struct nfs_release_lockowner_data { 7970 struct nfs4_lock_state *lsp; 7971 struct nfs_server *server; 7972 struct nfs_release_lockowner_args args; 7973 struct nfs_release_lockowner_res res; 7974 unsigned long timestamp; 7975 }; 7976 7977 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata) 7978 { 7979 struct nfs_release_lockowner_data *data = calldata; 7980 struct nfs_server *server = data->server; 7981 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 7982 &data->res.seq_res, task); 7983 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 7984 data->timestamp = jiffies; 7985 } 7986 7987 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata) 7988 { 7989 struct nfs_release_lockowner_data *data = calldata; 7990 struct nfs_server *server = data->server; 7991 7992 nfs40_sequence_done(task, &data->res.seq_res); 7993 7994 switch (task->tk_status) { 7995 case 0: 7996 renew_lease(server, data->timestamp); 7997 break; 7998 case -NFS4ERR_STALE_CLIENTID: 7999 case -NFS4ERR_EXPIRED: 8000 nfs4_schedule_lease_recovery(server->nfs_client); 8001 break; 8002 case -NFS4ERR_LEASE_MOVED: 8003 case -NFS4ERR_DELAY: 8004 if (nfs4_async_handle_error(task, server, 8005 NULL, NULL) == -EAGAIN) 8006 rpc_restart_call_prepare(task); 8007 } 8008 } 8009 8010 static void nfs4_release_lockowner_release(void *calldata) 8011 { 8012 struct nfs_release_lockowner_data *data = calldata; 8013 nfs4_free_lock_state(data->server, data->lsp); 8014 kfree(calldata); 8015 } 8016 8017 static const struct rpc_call_ops nfs4_release_lockowner_ops = { 8018 .rpc_call_prepare = nfs4_release_lockowner_prepare, 8019 .rpc_call_done = nfs4_release_lockowner_done, 8020 .rpc_release = nfs4_release_lockowner_release, 8021 }; 8022 8023 static void 8024 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp) 8025 { 8026 struct nfs_release_lockowner_data *data; 8027 struct rpc_message msg = { 8028 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], 8029 }; 8030 8031 if (server->nfs_client->cl_mvops->minor_version != 0) 8032 return; 8033 8034 data = kmalloc(sizeof(*data), GFP_KERNEL); 8035 if (!data) 8036 return; 8037 data->lsp = lsp; 8038 data->server = server; 8039 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 8040 data->args.lock_owner.id = lsp->ls_seqid.owner_id; 8041 data->args.lock_owner.s_dev = server->s_dev; 8042 8043 msg.rpc_argp = &data->args; 8044 msg.rpc_resp = &data->res; 8045 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 8046 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 8047 } 8048 8049 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 8050 8051 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler, 8052 struct mnt_idmap *idmap, 8053 struct dentry *unused, struct inode *inode, 8054 const char *key, const void *buf, 8055 size_t buflen, int flags) 8056 { 8057 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_ACL); 8058 } 8059 8060 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler, 8061 struct dentry *unused, struct inode *inode, 8062 const char *key, void *buf, size_t buflen) 8063 { 8064 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_ACL); 8065 } 8066 8067 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry) 8068 { 8069 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_ACL); 8070 } 8071 8072 #if defined(CONFIG_NFS_V4_1) 8073 #define XATTR_NAME_NFSV4_DACL "system.nfs4_dacl" 8074 8075 static int nfs4_xattr_set_nfs4_dacl(const struct xattr_handler *handler, 8076 struct mnt_idmap *idmap, 8077 struct dentry *unused, struct inode *inode, 8078 const char *key, const void *buf, 8079 size_t buflen, int flags) 8080 { 8081 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_DACL); 8082 } 8083 8084 static int nfs4_xattr_get_nfs4_dacl(const struct xattr_handler *handler, 8085 struct dentry *unused, struct inode *inode, 8086 const char *key, void *buf, size_t buflen) 8087 { 8088 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_DACL); 8089 } 8090 8091 static bool nfs4_xattr_list_nfs4_dacl(struct dentry *dentry) 8092 { 8093 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_DACL); 8094 } 8095 8096 #define XATTR_NAME_NFSV4_SACL "system.nfs4_sacl" 8097 8098 static int nfs4_xattr_set_nfs4_sacl(const struct xattr_handler *handler, 8099 struct mnt_idmap *idmap, 8100 struct dentry *unused, struct inode *inode, 8101 const char *key, const void *buf, 8102 size_t buflen, int flags) 8103 { 8104 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_SACL); 8105 } 8106 8107 static int nfs4_xattr_get_nfs4_sacl(const struct xattr_handler *handler, 8108 struct dentry *unused, struct inode *inode, 8109 const char *key, void *buf, size_t buflen) 8110 { 8111 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_SACL); 8112 } 8113 8114 static bool nfs4_xattr_list_nfs4_sacl(struct dentry *dentry) 8115 { 8116 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_SACL); 8117 } 8118 8119 #endif 8120 8121 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 8122 8123 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler, 8124 struct mnt_idmap *idmap, 8125 struct dentry *unused, struct inode *inode, 8126 const char *key, const void *buf, 8127 size_t buflen, int flags) 8128 { 8129 if (security_ismaclabel(key)) 8130 return nfs4_set_security_label(inode, buf, buflen); 8131 8132 return -EOPNOTSUPP; 8133 } 8134 8135 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler, 8136 struct dentry *unused, struct inode *inode, 8137 const char *key, void *buf, size_t buflen) 8138 { 8139 if (security_ismaclabel(key)) 8140 return nfs4_get_security_label(inode, buf, buflen); 8141 return -EOPNOTSUPP; 8142 } 8143 8144 static ssize_t 8145 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 8146 { 8147 int len = 0; 8148 8149 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) { 8150 len = security_inode_listsecurity(inode, list, list_len); 8151 if (len >= 0 && list_len && len > list_len) 8152 return -ERANGE; 8153 } 8154 return len; 8155 } 8156 8157 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = { 8158 .prefix = XATTR_SECURITY_PREFIX, 8159 .get = nfs4_xattr_get_nfs4_label, 8160 .set = nfs4_xattr_set_nfs4_label, 8161 }; 8162 8163 #else 8164 8165 static ssize_t 8166 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 8167 { 8168 return 0; 8169 } 8170 8171 #endif 8172 8173 #ifdef CONFIG_NFS_V4_2 8174 static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler, 8175 struct mnt_idmap *idmap, 8176 struct dentry *unused, struct inode *inode, 8177 const char *key, const void *buf, 8178 size_t buflen, int flags) 8179 { 8180 u32 mask; 8181 int ret; 8182 8183 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8184 return -EOPNOTSUPP; 8185 8186 /* 8187 * There is no mapping from the MAY_* flags to the NFS_ACCESS_XA* 8188 * flags right now. Handling of xattr operations use the normal 8189 * file read/write permissions. 8190 * 8191 * Just in case the server has other ideas (which RFC 8276 allows), 8192 * do a cached access check for the XA* flags to possibly avoid 8193 * doing an RPC and getting EACCES back. 8194 */ 8195 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8196 if (!(mask & NFS_ACCESS_XAWRITE)) 8197 return -EACCES; 8198 } 8199 8200 if (buf == NULL) { 8201 ret = nfs42_proc_removexattr(inode, key); 8202 if (!ret) 8203 nfs4_xattr_cache_remove(inode, key); 8204 } else { 8205 ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags); 8206 if (!ret) 8207 nfs4_xattr_cache_add(inode, key, buf, NULL, buflen); 8208 } 8209 8210 return ret; 8211 } 8212 8213 static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler, 8214 struct dentry *unused, struct inode *inode, 8215 const char *key, void *buf, size_t buflen) 8216 { 8217 u32 mask; 8218 ssize_t ret; 8219 8220 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8221 return -EOPNOTSUPP; 8222 8223 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8224 if (!(mask & NFS_ACCESS_XAREAD)) 8225 return -EACCES; 8226 } 8227 8228 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 8229 if (ret) 8230 return ret; 8231 8232 ret = nfs4_xattr_cache_get(inode, key, buf, buflen); 8233 if (ret >= 0 || (ret < 0 && ret != -ENOENT)) 8234 return ret; 8235 8236 ret = nfs42_proc_getxattr(inode, key, buf, buflen); 8237 8238 return ret; 8239 } 8240 8241 static ssize_t 8242 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) 8243 { 8244 u64 cookie; 8245 bool eof; 8246 ssize_t ret, size; 8247 char *buf; 8248 size_t buflen; 8249 u32 mask; 8250 8251 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8252 return 0; 8253 8254 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8255 if (!(mask & NFS_ACCESS_XALIST)) 8256 return 0; 8257 } 8258 8259 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 8260 if (ret) 8261 return ret; 8262 8263 ret = nfs4_xattr_cache_list(inode, list, list_len); 8264 if (ret >= 0 || (ret < 0 && ret != -ENOENT)) 8265 return ret; 8266 8267 cookie = 0; 8268 eof = false; 8269 buflen = list_len ? list_len : XATTR_LIST_MAX; 8270 buf = list_len ? list : NULL; 8271 size = 0; 8272 8273 while (!eof) { 8274 ret = nfs42_proc_listxattrs(inode, buf, buflen, 8275 &cookie, &eof); 8276 if (ret < 0) 8277 return ret; 8278 8279 if (list_len) { 8280 buf += ret; 8281 buflen -= ret; 8282 } 8283 size += ret; 8284 } 8285 8286 if (list_len) 8287 nfs4_xattr_cache_set_list(inode, list, size); 8288 8289 return size; 8290 } 8291 8292 #else 8293 8294 static ssize_t 8295 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) 8296 { 8297 return 0; 8298 } 8299 #endif /* CONFIG_NFS_V4_2 */ 8300 8301 /* 8302 * nfs_fhget will use either the mounted_on_fileid or the fileid 8303 */ 8304 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 8305 { 8306 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 8307 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 8308 (fattr->valid & NFS_ATTR_FATTR_FSID) && 8309 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 8310 return; 8311 8312 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 8313 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 8314 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 8315 fattr->nlink = 2; 8316 } 8317 8318 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 8319 const struct qstr *name, 8320 struct nfs4_fs_locations *fs_locations, 8321 struct page *page) 8322 { 8323 struct nfs_server *server = NFS_SERVER(dir); 8324 u32 bitmask[3]; 8325 struct nfs4_fs_locations_arg args = { 8326 .dir_fh = NFS_FH(dir), 8327 .name = name, 8328 .page = page, 8329 .bitmask = bitmask, 8330 }; 8331 struct nfs4_fs_locations_res res = { 8332 .fs_locations = fs_locations, 8333 }; 8334 struct rpc_message msg = { 8335 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8336 .rpc_argp = &args, 8337 .rpc_resp = &res, 8338 }; 8339 int status; 8340 8341 dprintk("%s: start\n", __func__); 8342 8343 bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS; 8344 bitmask[1] = nfs4_fattr_bitmap[1]; 8345 8346 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 8347 * is not supported */ 8348 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 8349 bitmask[0] &= ~FATTR4_WORD0_FILEID; 8350 else 8351 bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID; 8352 8353 nfs_fattr_init(fs_locations->fattr); 8354 fs_locations->server = server; 8355 fs_locations->nlocations = 0; 8356 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 8357 dprintk("%s: returned status = %d\n", __func__, status); 8358 return status; 8359 } 8360 8361 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 8362 const struct qstr *name, 8363 struct nfs4_fs_locations *fs_locations, 8364 struct page *page) 8365 { 8366 struct nfs4_exception exception = { 8367 .interruptible = true, 8368 }; 8369 int err; 8370 do { 8371 err = _nfs4_proc_fs_locations(client, dir, name, 8372 fs_locations, page); 8373 trace_nfs4_get_fs_locations(dir, name, err); 8374 err = nfs4_handle_exception(NFS_SERVER(dir), err, 8375 &exception); 8376 } while (exception.retry); 8377 return err; 8378 } 8379 8380 /* 8381 * This operation also signals the server that this client is 8382 * performing migration recovery. The server can stop returning 8383 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is 8384 * appended to this compound to identify the client ID which is 8385 * performing recovery. 8386 */ 8387 static int _nfs40_proc_get_locations(struct nfs_server *server, 8388 struct nfs_fh *fhandle, 8389 struct nfs4_fs_locations *locations, 8390 struct page *page, const struct cred *cred) 8391 { 8392 struct rpc_clnt *clnt = server->client; 8393 u32 bitmask[2] = { 8394 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 8395 }; 8396 struct nfs4_fs_locations_arg args = { 8397 .clientid = server->nfs_client->cl_clientid, 8398 .fh = fhandle, 8399 .page = page, 8400 .bitmask = bitmask, 8401 .migration = 1, /* skip LOOKUP */ 8402 .renew = 1, /* append RENEW */ 8403 }; 8404 struct nfs4_fs_locations_res res = { 8405 .fs_locations = locations, 8406 .migration = 1, 8407 .renew = 1, 8408 }; 8409 struct rpc_message msg = { 8410 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8411 .rpc_argp = &args, 8412 .rpc_resp = &res, 8413 .rpc_cred = cred, 8414 }; 8415 unsigned long now = jiffies; 8416 int status; 8417 8418 nfs_fattr_init(locations->fattr); 8419 locations->server = server; 8420 locations->nlocations = 0; 8421 8422 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8423 status = nfs4_call_sync_sequence(clnt, server, &msg, 8424 &args.seq_args, &res.seq_res); 8425 if (status) 8426 return status; 8427 8428 renew_lease(server, now); 8429 return 0; 8430 } 8431 8432 #ifdef CONFIG_NFS_V4_1 8433 8434 /* 8435 * This operation also signals the server that this client is 8436 * performing migration recovery. The server can stop asserting 8437 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID 8438 * performing this operation is identified in the SEQUENCE 8439 * operation in this compound. 8440 * 8441 * When the client supports GETATTR(fs_locations_info), it can 8442 * be plumbed in here. 8443 */ 8444 static int _nfs41_proc_get_locations(struct nfs_server *server, 8445 struct nfs_fh *fhandle, 8446 struct nfs4_fs_locations *locations, 8447 struct page *page, const struct cred *cred) 8448 { 8449 struct rpc_clnt *clnt = server->client; 8450 u32 bitmask[2] = { 8451 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 8452 }; 8453 struct nfs4_fs_locations_arg args = { 8454 .fh = fhandle, 8455 .page = page, 8456 .bitmask = bitmask, 8457 .migration = 1, /* skip LOOKUP */ 8458 }; 8459 struct nfs4_fs_locations_res res = { 8460 .fs_locations = locations, 8461 .migration = 1, 8462 }; 8463 struct rpc_message msg = { 8464 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8465 .rpc_argp = &args, 8466 .rpc_resp = &res, 8467 .rpc_cred = cred, 8468 }; 8469 struct nfs4_call_sync_data data = { 8470 .seq_server = server, 8471 .seq_args = &args.seq_args, 8472 .seq_res = &res.seq_res, 8473 }; 8474 struct rpc_task_setup task_setup_data = { 8475 .rpc_client = clnt, 8476 .rpc_message = &msg, 8477 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops, 8478 .callback_data = &data, 8479 .flags = RPC_TASK_NO_ROUND_ROBIN, 8480 }; 8481 int status; 8482 8483 nfs_fattr_init(locations->fattr); 8484 locations->server = server; 8485 locations->nlocations = 0; 8486 8487 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8488 status = nfs4_call_sync_custom(&task_setup_data); 8489 if (status == NFS4_OK && 8490 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 8491 status = -NFS4ERR_LEASE_MOVED; 8492 return status; 8493 } 8494 8495 #endif /* CONFIG_NFS_V4_1 */ 8496 8497 /** 8498 * nfs4_proc_get_locations - discover locations for a migrated FSID 8499 * @server: pointer to nfs_server to process 8500 * @fhandle: pointer to the kernel NFS client file handle 8501 * @locations: result of query 8502 * @page: buffer 8503 * @cred: credential to use for this operation 8504 * 8505 * Returns NFS4_OK on success, a negative NFS4ERR status code if the 8506 * operation failed, or a negative errno if a local error occurred. 8507 * 8508 * On success, "locations" is filled in, but if the server has 8509 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not 8510 * asserted. 8511 * 8512 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases 8513 * from this client that require migration recovery. 8514 */ 8515 int nfs4_proc_get_locations(struct nfs_server *server, 8516 struct nfs_fh *fhandle, 8517 struct nfs4_fs_locations *locations, 8518 struct page *page, const struct cred *cred) 8519 { 8520 struct nfs_client *clp = server->nfs_client; 8521 const struct nfs4_mig_recovery_ops *ops = 8522 clp->cl_mvops->mig_recovery_ops; 8523 struct nfs4_exception exception = { 8524 .interruptible = true, 8525 }; 8526 int status; 8527 8528 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 8529 (unsigned long long)server->fsid.major, 8530 (unsigned long long)server->fsid.minor, 8531 clp->cl_hostname); 8532 nfs_display_fhandle(fhandle, __func__); 8533 8534 do { 8535 status = ops->get_locations(server, fhandle, locations, page, 8536 cred); 8537 if (status != -NFS4ERR_DELAY) 8538 break; 8539 nfs4_handle_exception(server, status, &exception); 8540 } while (exception.retry); 8541 return status; 8542 } 8543 8544 /* 8545 * This operation also signals the server that this client is 8546 * performing "lease moved" recovery. The server can stop 8547 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation 8548 * is appended to this compound to identify the client ID which is 8549 * performing recovery. 8550 */ 8551 static int _nfs40_proc_fsid_present(struct inode *inode, const struct cred *cred) 8552 { 8553 struct nfs_server *server = NFS_SERVER(inode); 8554 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 8555 struct rpc_clnt *clnt = server->client; 8556 struct nfs4_fsid_present_arg args = { 8557 .fh = NFS_FH(inode), 8558 .clientid = clp->cl_clientid, 8559 .renew = 1, /* append RENEW */ 8560 }; 8561 struct nfs4_fsid_present_res res = { 8562 .renew = 1, 8563 }; 8564 struct rpc_message msg = { 8565 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 8566 .rpc_argp = &args, 8567 .rpc_resp = &res, 8568 .rpc_cred = cred, 8569 }; 8570 unsigned long now = jiffies; 8571 int status; 8572 8573 res.fh = nfs_alloc_fhandle(); 8574 if (res.fh == NULL) 8575 return -ENOMEM; 8576 8577 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8578 status = nfs4_call_sync_sequence(clnt, server, &msg, 8579 &args.seq_args, &res.seq_res); 8580 nfs_free_fhandle(res.fh); 8581 if (status) 8582 return status; 8583 8584 do_renew_lease(clp, now); 8585 return 0; 8586 } 8587 8588 #ifdef CONFIG_NFS_V4_1 8589 8590 /* 8591 * This operation also signals the server that this client is 8592 * performing "lease moved" recovery. The server can stop asserting 8593 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing 8594 * this operation is identified in the SEQUENCE operation in this 8595 * compound. 8596 */ 8597 static int _nfs41_proc_fsid_present(struct inode *inode, const struct cred *cred) 8598 { 8599 struct nfs_server *server = NFS_SERVER(inode); 8600 struct rpc_clnt *clnt = server->client; 8601 struct nfs4_fsid_present_arg args = { 8602 .fh = NFS_FH(inode), 8603 }; 8604 struct nfs4_fsid_present_res res = { 8605 }; 8606 struct rpc_message msg = { 8607 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 8608 .rpc_argp = &args, 8609 .rpc_resp = &res, 8610 .rpc_cred = cred, 8611 }; 8612 int status; 8613 8614 res.fh = nfs_alloc_fhandle(); 8615 if (res.fh == NULL) 8616 return -ENOMEM; 8617 8618 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8619 status = nfs4_call_sync_sequence(clnt, server, &msg, 8620 &args.seq_args, &res.seq_res); 8621 nfs_free_fhandle(res.fh); 8622 if (status == NFS4_OK && 8623 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 8624 status = -NFS4ERR_LEASE_MOVED; 8625 return status; 8626 } 8627 8628 #endif /* CONFIG_NFS_V4_1 */ 8629 8630 /** 8631 * nfs4_proc_fsid_present - Is this FSID present or absent on server? 8632 * @inode: inode on FSID to check 8633 * @cred: credential to use for this operation 8634 * 8635 * Server indicates whether the FSID is present, moved, or not 8636 * recognized. This operation is necessary to clear a LEASE_MOVED 8637 * condition for this client ID. 8638 * 8639 * Returns NFS4_OK if the FSID is present on this server, 8640 * -NFS4ERR_MOVED if the FSID is no longer present, a negative 8641 * NFS4ERR code if some error occurred on the server, or a 8642 * negative errno if a local failure occurred. 8643 */ 8644 int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred) 8645 { 8646 struct nfs_server *server = NFS_SERVER(inode); 8647 struct nfs_client *clp = server->nfs_client; 8648 const struct nfs4_mig_recovery_ops *ops = 8649 clp->cl_mvops->mig_recovery_ops; 8650 struct nfs4_exception exception = { 8651 .interruptible = true, 8652 }; 8653 int status; 8654 8655 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 8656 (unsigned long long)server->fsid.major, 8657 (unsigned long long)server->fsid.minor, 8658 clp->cl_hostname); 8659 nfs_display_fhandle(NFS_FH(inode), __func__); 8660 8661 do { 8662 status = ops->fsid_present(inode, cred); 8663 if (status != -NFS4ERR_DELAY) 8664 break; 8665 nfs4_handle_exception(server, status, &exception); 8666 } while (exception.retry); 8667 return status; 8668 } 8669 8670 /* 8671 * If 'use_integrity' is true and the state managment nfs_client 8672 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient 8673 * and the machine credential as per RFC3530bis and RFC5661 Security 8674 * Considerations sections. Otherwise, just use the user cred with the 8675 * filesystem's rpc_client. 8676 */ 8677 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity) 8678 { 8679 int status; 8680 struct rpc_clnt *clnt = NFS_SERVER(dir)->client; 8681 struct nfs_client *clp = NFS_SERVER(dir)->nfs_client; 8682 struct nfs4_secinfo_arg args = { 8683 .dir_fh = NFS_FH(dir), 8684 .name = name, 8685 }; 8686 struct nfs4_secinfo_res res = { 8687 .flavors = flavors, 8688 }; 8689 struct rpc_message msg = { 8690 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 8691 .rpc_argp = &args, 8692 .rpc_resp = &res, 8693 }; 8694 struct nfs4_call_sync_data data = { 8695 .seq_server = NFS_SERVER(dir), 8696 .seq_args = &args.seq_args, 8697 .seq_res = &res.seq_res, 8698 }; 8699 struct rpc_task_setup task_setup = { 8700 .rpc_client = clnt, 8701 .rpc_message = &msg, 8702 .callback_ops = clp->cl_mvops->call_sync_ops, 8703 .callback_data = &data, 8704 .flags = RPC_TASK_NO_ROUND_ROBIN, 8705 }; 8706 const struct cred *cred = NULL; 8707 8708 if (use_integrity) { 8709 clnt = clp->cl_rpcclient; 8710 task_setup.rpc_client = clnt; 8711 8712 cred = nfs4_get_clid_cred(clp); 8713 msg.rpc_cred = cred; 8714 } 8715 8716 dprintk("NFS call secinfo %s\n", name->name); 8717 8718 nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg); 8719 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 8720 status = nfs4_call_sync_custom(&task_setup); 8721 8722 dprintk("NFS reply secinfo: %d\n", status); 8723 8724 put_cred(cred); 8725 return status; 8726 } 8727 8728 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 8729 struct nfs4_secinfo_flavors *flavors) 8730 { 8731 struct nfs4_exception exception = { 8732 .interruptible = true, 8733 }; 8734 int err; 8735 do { 8736 err = -NFS4ERR_WRONGSEC; 8737 8738 /* try to use integrity protection with machine cred */ 8739 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client)) 8740 err = _nfs4_proc_secinfo(dir, name, flavors, true); 8741 8742 /* 8743 * if unable to use integrity protection, or SECINFO with 8744 * integrity protection returns NFS4ERR_WRONGSEC (which is 8745 * disallowed by spec, but exists in deployed servers) use 8746 * the current filesystem's rpc_client and the user cred. 8747 */ 8748 if (err == -NFS4ERR_WRONGSEC) 8749 err = _nfs4_proc_secinfo(dir, name, flavors, false); 8750 8751 trace_nfs4_secinfo(dir, name, err); 8752 err = nfs4_handle_exception(NFS_SERVER(dir), err, 8753 &exception); 8754 } while (exception.retry); 8755 return err; 8756 } 8757 8758 #ifdef CONFIG_NFS_V4_1 8759 /* 8760 * Check the exchange flags returned by the server for invalid flags, having 8761 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 8762 * DS flags set. 8763 */ 8764 static int nfs4_check_cl_exchange_flags(u32 flags, u32 version) 8765 { 8766 if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R)) 8767 goto out_inval; 8768 else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R)) 8769 goto out_inval; 8770 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 8771 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 8772 goto out_inval; 8773 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 8774 goto out_inval; 8775 return NFS_OK; 8776 out_inval: 8777 return -NFS4ERR_INVAL; 8778 } 8779 8780 static bool 8781 nfs41_same_server_scope(struct nfs41_server_scope *a, 8782 struct nfs41_server_scope *b) 8783 { 8784 if (a->server_scope_sz != b->server_scope_sz) 8785 return false; 8786 return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0; 8787 } 8788 8789 static void 8790 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) 8791 { 8792 struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp; 8793 struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp; 8794 struct nfs_client *clp = args->client; 8795 8796 switch (task->tk_status) { 8797 case -NFS4ERR_BADSESSION: 8798 case -NFS4ERR_DEADSESSION: 8799 nfs4_schedule_session_recovery(clp->cl_session, 8800 task->tk_status); 8801 return; 8802 } 8803 if (args->dir == NFS4_CDFC4_FORE_OR_BOTH && 8804 res->dir != NFS4_CDFS4_BOTH) { 8805 rpc_task_close_connection(task); 8806 if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES) 8807 rpc_restart_call(task); 8808 } 8809 } 8810 8811 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { 8812 .rpc_call_done = nfs4_bind_one_conn_to_session_done, 8813 }; 8814 8815 /* 8816 * nfs4_proc_bind_one_conn_to_session() 8817 * 8818 * The 4.1 client currently uses the same TCP connection for the 8819 * fore and backchannel. 8820 */ 8821 static 8822 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt, 8823 struct rpc_xprt *xprt, 8824 struct nfs_client *clp, 8825 const struct cred *cred) 8826 { 8827 int status; 8828 struct nfs41_bind_conn_to_session_args args = { 8829 .client = clp, 8830 .dir = NFS4_CDFC4_FORE_OR_BOTH, 8831 .retries = 0, 8832 }; 8833 struct nfs41_bind_conn_to_session_res res; 8834 struct rpc_message msg = { 8835 .rpc_proc = 8836 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 8837 .rpc_argp = &args, 8838 .rpc_resp = &res, 8839 .rpc_cred = cred, 8840 }; 8841 struct rpc_task_setup task_setup_data = { 8842 .rpc_client = clnt, 8843 .rpc_xprt = xprt, 8844 .callback_ops = &nfs4_bind_one_conn_to_session_ops, 8845 .rpc_message = &msg, 8846 .flags = RPC_TASK_TIMEOUT, 8847 }; 8848 struct rpc_task *task; 8849 8850 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id); 8851 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) 8852 args.dir = NFS4_CDFC4_FORE; 8853 8854 /* Do not set the backchannel flag unless this is clnt->cl_xprt */ 8855 if (xprt != rcu_access_pointer(clnt->cl_xprt)) 8856 args.dir = NFS4_CDFC4_FORE; 8857 8858 task = rpc_run_task(&task_setup_data); 8859 if (!IS_ERR(task)) { 8860 status = task->tk_status; 8861 rpc_put_task(task); 8862 } else 8863 status = PTR_ERR(task); 8864 trace_nfs4_bind_conn_to_session(clp, status); 8865 if (status == 0) { 8866 if (memcmp(res.sessionid.data, 8867 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 8868 dprintk("NFS: %s: Session ID mismatch\n", __func__); 8869 return -EIO; 8870 } 8871 if ((res.dir & args.dir) != res.dir || res.dir == 0) { 8872 dprintk("NFS: %s: Unexpected direction from server\n", 8873 __func__); 8874 return -EIO; 8875 } 8876 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) { 8877 dprintk("NFS: %s: Server returned RDMA mode = true\n", 8878 __func__); 8879 return -EIO; 8880 } 8881 } 8882 8883 return status; 8884 } 8885 8886 struct rpc_bind_conn_calldata { 8887 struct nfs_client *clp; 8888 const struct cred *cred; 8889 }; 8890 8891 static int 8892 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt, 8893 struct rpc_xprt *xprt, 8894 void *calldata) 8895 { 8896 struct rpc_bind_conn_calldata *p = calldata; 8897 8898 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred); 8899 } 8900 8901 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, const struct cred *cred) 8902 { 8903 struct rpc_bind_conn_calldata data = { 8904 .clp = clp, 8905 .cred = cred, 8906 }; 8907 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient, 8908 nfs4_proc_bind_conn_to_session_callback, &data); 8909 } 8910 8911 /* 8912 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map 8913 * and operations we'd like to see to enable certain features in the allow map 8914 */ 8915 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = { 8916 .how = SP4_MACH_CRED, 8917 .enforce.u.words = { 8918 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 8919 1 << (OP_EXCHANGE_ID - 32) | 8920 1 << (OP_CREATE_SESSION - 32) | 8921 1 << (OP_DESTROY_SESSION - 32) | 8922 1 << (OP_DESTROY_CLIENTID - 32) 8923 }, 8924 .allow.u.words = { 8925 [0] = 1 << (OP_CLOSE) | 8926 1 << (OP_OPEN_DOWNGRADE) | 8927 1 << (OP_LOCKU) | 8928 1 << (OP_DELEGRETURN) | 8929 1 << (OP_COMMIT), 8930 [1] = 1 << (OP_SECINFO - 32) | 8931 1 << (OP_SECINFO_NO_NAME - 32) | 8932 1 << (OP_LAYOUTRETURN - 32) | 8933 1 << (OP_TEST_STATEID - 32) | 8934 1 << (OP_FREE_STATEID - 32) | 8935 1 << (OP_WRITE - 32) 8936 } 8937 }; 8938 8939 /* 8940 * Select the state protection mode for client `clp' given the server results 8941 * from exchange_id in `sp'. 8942 * 8943 * Returns 0 on success, negative errno otherwise. 8944 */ 8945 static int nfs4_sp4_select_mode(struct nfs_client *clp, 8946 struct nfs41_state_protection *sp) 8947 { 8948 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = { 8949 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 8950 1 << (OP_EXCHANGE_ID - 32) | 8951 1 << (OP_CREATE_SESSION - 32) | 8952 1 << (OP_DESTROY_SESSION - 32) | 8953 1 << (OP_DESTROY_CLIENTID - 32) 8954 }; 8955 unsigned long flags = 0; 8956 unsigned int i; 8957 int ret = 0; 8958 8959 if (sp->how == SP4_MACH_CRED) { 8960 /* Print state protect result */ 8961 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n"); 8962 for (i = 0; i <= LAST_NFS4_OP; i++) { 8963 if (test_bit(i, sp->enforce.u.longs)) 8964 dfprintk(MOUNT, " enforce op %d\n", i); 8965 if (test_bit(i, sp->allow.u.longs)) 8966 dfprintk(MOUNT, " allow op %d\n", i); 8967 } 8968 8969 /* make sure nothing is on enforce list that isn't supported */ 8970 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) { 8971 if (sp->enforce.u.words[i] & ~supported_enforce[i]) { 8972 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 8973 ret = -EINVAL; 8974 goto out; 8975 } 8976 } 8977 8978 /* 8979 * Minimal mode - state operations are allowed to use machine 8980 * credential. Note this already happens by default, so the 8981 * client doesn't have to do anything more than the negotiation. 8982 * 8983 * NOTE: we don't care if EXCHANGE_ID is in the list - 8984 * we're already using the machine cred for exchange_id 8985 * and will never use a different cred. 8986 */ 8987 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) && 8988 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) && 8989 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) && 8990 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) { 8991 dfprintk(MOUNT, "sp4_mach_cred:\n"); 8992 dfprintk(MOUNT, " minimal mode enabled\n"); 8993 __set_bit(NFS_SP4_MACH_CRED_MINIMAL, &flags); 8994 } else { 8995 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 8996 ret = -EINVAL; 8997 goto out; 8998 } 8999 9000 if (test_bit(OP_CLOSE, sp->allow.u.longs) && 9001 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) && 9002 test_bit(OP_DELEGRETURN, sp->allow.u.longs) && 9003 test_bit(OP_LOCKU, sp->allow.u.longs)) { 9004 dfprintk(MOUNT, " cleanup mode enabled\n"); 9005 __set_bit(NFS_SP4_MACH_CRED_CLEANUP, &flags); 9006 } 9007 9008 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) { 9009 dfprintk(MOUNT, " pnfs cleanup mode enabled\n"); 9010 __set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, &flags); 9011 } 9012 9013 if (test_bit(OP_SECINFO, sp->allow.u.longs) && 9014 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) { 9015 dfprintk(MOUNT, " secinfo mode enabled\n"); 9016 __set_bit(NFS_SP4_MACH_CRED_SECINFO, &flags); 9017 } 9018 9019 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) && 9020 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) { 9021 dfprintk(MOUNT, " stateid mode enabled\n"); 9022 __set_bit(NFS_SP4_MACH_CRED_STATEID, &flags); 9023 } 9024 9025 if (test_bit(OP_WRITE, sp->allow.u.longs)) { 9026 dfprintk(MOUNT, " write mode enabled\n"); 9027 __set_bit(NFS_SP4_MACH_CRED_WRITE, &flags); 9028 } 9029 9030 if (test_bit(OP_COMMIT, sp->allow.u.longs)) { 9031 dfprintk(MOUNT, " commit mode enabled\n"); 9032 __set_bit(NFS_SP4_MACH_CRED_COMMIT, &flags); 9033 } 9034 } 9035 out: 9036 clp->cl_sp4_flags = flags; 9037 return ret; 9038 } 9039 9040 struct nfs41_exchange_id_data { 9041 struct nfs41_exchange_id_res res; 9042 struct nfs41_exchange_id_args args; 9043 }; 9044 9045 static void nfs4_exchange_id_release(void *data) 9046 { 9047 struct nfs41_exchange_id_data *cdata = 9048 (struct nfs41_exchange_id_data *)data; 9049 9050 nfs_put_client(cdata->args.client); 9051 kfree(cdata->res.impl_id); 9052 kfree(cdata->res.server_scope); 9053 kfree(cdata->res.server_owner); 9054 kfree(cdata); 9055 } 9056 9057 static const struct rpc_call_ops nfs4_exchange_id_call_ops = { 9058 .rpc_release = nfs4_exchange_id_release, 9059 }; 9060 9061 /* 9062 * _nfs4_proc_exchange_id() 9063 * 9064 * Wrapper for EXCHANGE_ID operation. 9065 */ 9066 static struct rpc_task * 9067 nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred, 9068 u32 sp4_how, struct rpc_xprt *xprt) 9069 { 9070 struct rpc_message msg = { 9071 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 9072 .rpc_cred = cred, 9073 }; 9074 struct rpc_task_setup task_setup_data = { 9075 .rpc_client = clp->cl_rpcclient, 9076 .callback_ops = &nfs4_exchange_id_call_ops, 9077 .rpc_message = &msg, 9078 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN, 9079 }; 9080 struct nfs41_exchange_id_data *calldata; 9081 int status; 9082 9083 if (!refcount_inc_not_zero(&clp->cl_count)) 9084 return ERR_PTR(-EIO); 9085 9086 status = -ENOMEM; 9087 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 9088 if (!calldata) 9089 goto out; 9090 9091 nfs4_init_boot_verifier(clp, &calldata->args.verifier); 9092 9093 status = nfs4_init_uniform_client_string(clp); 9094 if (status) 9095 goto out_calldata; 9096 9097 calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 9098 GFP_NOFS); 9099 status = -ENOMEM; 9100 if (unlikely(calldata->res.server_owner == NULL)) 9101 goto out_calldata; 9102 9103 calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 9104 GFP_NOFS); 9105 if (unlikely(calldata->res.server_scope == NULL)) 9106 goto out_server_owner; 9107 9108 calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 9109 if (unlikely(calldata->res.impl_id == NULL)) 9110 goto out_server_scope; 9111 9112 switch (sp4_how) { 9113 case SP4_NONE: 9114 calldata->args.state_protect.how = SP4_NONE; 9115 break; 9116 9117 case SP4_MACH_CRED: 9118 calldata->args.state_protect = nfs4_sp4_mach_cred_request; 9119 break; 9120 9121 default: 9122 /* unsupported! */ 9123 WARN_ON_ONCE(1); 9124 status = -EINVAL; 9125 goto out_impl_id; 9126 } 9127 if (xprt) { 9128 task_setup_data.rpc_xprt = xprt; 9129 task_setup_data.flags |= RPC_TASK_SOFTCONN; 9130 memcpy(calldata->args.verifier.data, clp->cl_confirm.data, 9131 sizeof(calldata->args.verifier.data)); 9132 } 9133 calldata->args.client = clp; 9134 calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 9135 EXCHGID4_FLAG_BIND_PRINC_STATEID; 9136 #ifdef CONFIG_NFS_V4_1_MIGRATION 9137 calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR; 9138 #endif 9139 if (test_bit(NFS_CS_PNFS, &clp->cl_flags)) 9140 calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS; 9141 msg.rpc_argp = &calldata->args; 9142 msg.rpc_resp = &calldata->res; 9143 task_setup_data.callback_data = calldata; 9144 9145 return rpc_run_task(&task_setup_data); 9146 9147 out_impl_id: 9148 kfree(calldata->res.impl_id); 9149 out_server_scope: 9150 kfree(calldata->res.server_scope); 9151 out_server_owner: 9152 kfree(calldata->res.server_owner); 9153 out_calldata: 9154 kfree(calldata); 9155 out: 9156 nfs_put_client(clp); 9157 return ERR_PTR(status); 9158 } 9159 9160 /* 9161 * _nfs4_proc_exchange_id() 9162 * 9163 * Wrapper for EXCHANGE_ID operation. 9164 */ 9165 static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred, 9166 u32 sp4_how) 9167 { 9168 struct rpc_task *task; 9169 struct nfs41_exchange_id_args *argp; 9170 struct nfs41_exchange_id_res *resp; 9171 unsigned long now = jiffies; 9172 int status; 9173 9174 task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL); 9175 if (IS_ERR(task)) 9176 return PTR_ERR(task); 9177 9178 argp = task->tk_msg.rpc_argp; 9179 resp = task->tk_msg.rpc_resp; 9180 status = task->tk_status; 9181 if (status != 0) 9182 goto out; 9183 9184 status = nfs4_check_cl_exchange_flags(resp->flags, 9185 clp->cl_mvops->minor_version); 9186 if (status != 0) 9187 goto out; 9188 9189 status = nfs4_sp4_select_mode(clp, &resp->state_protect); 9190 if (status != 0) 9191 goto out; 9192 9193 do_renew_lease(clp, now); 9194 9195 clp->cl_clientid = resp->clientid; 9196 clp->cl_exchange_flags = resp->flags; 9197 clp->cl_seqid = resp->seqid; 9198 /* Client ID is not confirmed */ 9199 if (!(resp->flags & EXCHGID4_FLAG_CONFIRMED_R)) 9200 clear_bit(NFS4_SESSION_ESTABLISHED, 9201 &clp->cl_session->session_state); 9202 9203 if (clp->cl_serverscope != NULL && 9204 !nfs41_same_server_scope(clp->cl_serverscope, 9205 resp->server_scope)) { 9206 dprintk("%s: server_scope mismatch detected\n", 9207 __func__); 9208 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 9209 } 9210 9211 swap(clp->cl_serverowner, resp->server_owner); 9212 swap(clp->cl_serverscope, resp->server_scope); 9213 swap(clp->cl_implid, resp->impl_id); 9214 9215 /* Save the EXCHANGE_ID verifier session trunk tests */ 9216 memcpy(clp->cl_confirm.data, argp->verifier.data, 9217 sizeof(clp->cl_confirm.data)); 9218 out: 9219 trace_nfs4_exchange_id(clp, status); 9220 rpc_put_task(task); 9221 return status; 9222 } 9223 9224 /* 9225 * nfs4_proc_exchange_id() 9226 * 9227 * Returns zero, a negative errno, or a negative NFS4ERR status code. 9228 * 9229 * Since the clientid has expired, all compounds using sessions 9230 * associated with the stale clientid will be returning 9231 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 9232 * be in some phase of session reset. 9233 * 9234 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used. 9235 */ 9236 int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred) 9237 { 9238 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor; 9239 int status; 9240 9241 /* try SP4_MACH_CRED if krb5i/p */ 9242 if (authflavor == RPC_AUTH_GSS_KRB5I || 9243 authflavor == RPC_AUTH_GSS_KRB5P) { 9244 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED); 9245 if (!status) 9246 return 0; 9247 } 9248 9249 /* try SP4_NONE */ 9250 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE); 9251 } 9252 9253 /** 9254 * nfs4_test_session_trunk 9255 * 9256 * This is an add_xprt_test() test function called from 9257 * rpc_clnt_setup_test_and_add_xprt. 9258 * 9259 * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt 9260 * and is dereferrenced in nfs4_exchange_id_release 9261 * 9262 * Upon success, add the new transport to the rpc_clnt 9263 * 9264 * @clnt: struct rpc_clnt to get new transport 9265 * @xprt: the rpc_xprt to test 9266 * @data: call data for _nfs4_proc_exchange_id. 9267 */ 9268 void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt, 9269 void *data) 9270 { 9271 struct nfs4_add_xprt_data *adata = data; 9272 struct rpc_task *task; 9273 int status; 9274 9275 u32 sp4_how; 9276 9277 dprintk("--> %s try %s\n", __func__, 9278 xprt->address_strings[RPC_DISPLAY_ADDR]); 9279 9280 sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED); 9281 9282 try_again: 9283 /* Test connection for session trunking. Async exchange_id call */ 9284 task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt); 9285 if (IS_ERR(task)) 9286 return; 9287 9288 status = task->tk_status; 9289 if (status == 0) { 9290 status = nfs4_detect_session_trunking(adata->clp, 9291 task->tk_msg.rpc_resp, xprt); 9292 trace_nfs4_trunked_exchange_id(adata->clp, 9293 xprt->address_strings[RPC_DISPLAY_ADDR], status); 9294 } 9295 if (status == 0) 9296 rpc_clnt_xprt_switch_add_xprt(clnt, xprt); 9297 else if (status != -NFS4ERR_DELAY && rpc_clnt_xprt_switch_has_addr(clnt, 9298 (struct sockaddr *)&xprt->addr)) 9299 rpc_clnt_xprt_switch_remove_xprt(clnt, xprt); 9300 9301 rpc_put_task(task); 9302 if (status == -NFS4ERR_DELAY) { 9303 ssleep(1); 9304 goto try_again; 9305 } 9306 } 9307 EXPORT_SYMBOL_GPL(nfs4_test_session_trunk); 9308 9309 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 9310 const struct cred *cred) 9311 { 9312 struct rpc_message msg = { 9313 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 9314 .rpc_argp = clp, 9315 .rpc_cred = cred, 9316 }; 9317 int status; 9318 9319 status = rpc_call_sync(clp->cl_rpcclient, &msg, 9320 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9321 trace_nfs4_destroy_clientid(clp, status); 9322 if (status) 9323 dprintk("NFS: Got error %d from the server %s on " 9324 "DESTROY_CLIENTID.", status, clp->cl_hostname); 9325 return status; 9326 } 9327 9328 static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 9329 const struct cred *cred) 9330 { 9331 unsigned int loop; 9332 int ret; 9333 9334 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 9335 ret = _nfs4_proc_destroy_clientid(clp, cred); 9336 switch (ret) { 9337 case -NFS4ERR_DELAY: 9338 case -NFS4ERR_CLIENTID_BUSY: 9339 ssleep(1); 9340 break; 9341 default: 9342 return ret; 9343 } 9344 } 9345 return 0; 9346 } 9347 9348 int nfs4_destroy_clientid(struct nfs_client *clp) 9349 { 9350 const struct cred *cred; 9351 int ret = 0; 9352 9353 if (clp->cl_mvops->minor_version < 1) 9354 goto out; 9355 if (clp->cl_exchange_flags == 0) 9356 goto out; 9357 if (clp->cl_preserve_clid) 9358 goto out; 9359 cred = nfs4_get_clid_cred(clp); 9360 ret = nfs4_proc_destroy_clientid(clp, cred); 9361 put_cred(cred); 9362 switch (ret) { 9363 case 0: 9364 case -NFS4ERR_STALE_CLIENTID: 9365 clp->cl_exchange_flags = 0; 9366 } 9367 out: 9368 return ret; 9369 } 9370 9371 #endif /* CONFIG_NFS_V4_1 */ 9372 9373 struct nfs4_get_lease_time_data { 9374 struct nfs4_get_lease_time_args *args; 9375 struct nfs4_get_lease_time_res *res; 9376 struct nfs_client *clp; 9377 }; 9378 9379 static void nfs4_get_lease_time_prepare(struct rpc_task *task, 9380 void *calldata) 9381 { 9382 struct nfs4_get_lease_time_data *data = 9383 (struct nfs4_get_lease_time_data *)calldata; 9384 9385 /* just setup sequence, do not trigger session recovery 9386 since we're invoked within one */ 9387 nfs4_setup_sequence(data->clp, 9388 &data->args->la_seq_args, 9389 &data->res->lr_seq_res, 9390 task); 9391 } 9392 9393 /* 9394 * Called from nfs4_state_manager thread for session setup, so don't recover 9395 * from sequence operation or clientid errors. 9396 */ 9397 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 9398 { 9399 struct nfs4_get_lease_time_data *data = 9400 (struct nfs4_get_lease_time_data *)calldata; 9401 9402 if (!nfs4_sequence_done(task, &data->res->lr_seq_res)) 9403 return; 9404 switch (task->tk_status) { 9405 case -NFS4ERR_DELAY: 9406 case -NFS4ERR_GRACE: 9407 rpc_delay(task, NFS4_POLL_RETRY_MIN); 9408 task->tk_status = 0; 9409 fallthrough; 9410 case -NFS4ERR_RETRY_UNCACHED_REP: 9411 rpc_restart_call_prepare(task); 9412 return; 9413 } 9414 } 9415 9416 static const struct rpc_call_ops nfs4_get_lease_time_ops = { 9417 .rpc_call_prepare = nfs4_get_lease_time_prepare, 9418 .rpc_call_done = nfs4_get_lease_time_done, 9419 }; 9420 9421 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 9422 { 9423 struct nfs4_get_lease_time_args args; 9424 struct nfs4_get_lease_time_res res = { 9425 .lr_fsinfo = fsinfo, 9426 }; 9427 struct nfs4_get_lease_time_data data = { 9428 .args = &args, 9429 .res = &res, 9430 .clp = clp, 9431 }; 9432 struct rpc_message msg = { 9433 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 9434 .rpc_argp = &args, 9435 .rpc_resp = &res, 9436 }; 9437 struct rpc_task_setup task_setup = { 9438 .rpc_client = clp->cl_rpcclient, 9439 .rpc_message = &msg, 9440 .callback_ops = &nfs4_get_lease_time_ops, 9441 .callback_data = &data, 9442 .flags = RPC_TASK_TIMEOUT, 9443 }; 9444 9445 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0, 1); 9446 return nfs4_call_sync_custom(&task_setup); 9447 } 9448 9449 #ifdef CONFIG_NFS_V4_1 9450 9451 /* 9452 * Initialize the values to be used by the client in CREATE_SESSION 9453 * If nfs4_init_session set the fore channel request and response sizes, 9454 * use them. 9455 * 9456 * Set the back channel max_resp_sz_cached to zero to force the client to 9457 * always set csa_cachethis to FALSE because the current implementation 9458 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 9459 */ 9460 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args, 9461 struct rpc_clnt *clnt) 9462 { 9463 unsigned int max_rqst_sz, max_resp_sz; 9464 unsigned int max_bc_payload = rpc_max_bc_payload(clnt); 9465 unsigned int max_bc_slots = rpc_num_bc_slots(clnt); 9466 9467 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead; 9468 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead; 9469 9470 /* Fore channel attributes */ 9471 args->fc_attrs.max_rqst_sz = max_rqst_sz; 9472 args->fc_attrs.max_resp_sz = max_resp_sz; 9473 args->fc_attrs.max_ops = NFS4_MAX_OPS; 9474 args->fc_attrs.max_reqs = max_session_slots; 9475 9476 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 9477 "max_ops=%u max_reqs=%u\n", 9478 __func__, 9479 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 9480 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 9481 9482 /* Back channel attributes */ 9483 args->bc_attrs.max_rqst_sz = max_bc_payload; 9484 args->bc_attrs.max_resp_sz = max_bc_payload; 9485 args->bc_attrs.max_resp_sz_cached = 0; 9486 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 9487 args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1); 9488 if (args->bc_attrs.max_reqs > max_bc_slots) 9489 args->bc_attrs.max_reqs = max_bc_slots; 9490 9491 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 9492 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 9493 __func__, 9494 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 9495 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 9496 args->bc_attrs.max_reqs); 9497 } 9498 9499 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, 9500 struct nfs41_create_session_res *res) 9501 { 9502 struct nfs4_channel_attrs *sent = &args->fc_attrs; 9503 struct nfs4_channel_attrs *rcvd = &res->fc_attrs; 9504 9505 if (rcvd->max_resp_sz > sent->max_resp_sz) 9506 return -EINVAL; 9507 /* 9508 * Our requested max_ops is the minimum we need; we're not 9509 * prepared to break up compounds into smaller pieces than that. 9510 * So, no point even trying to continue if the server won't 9511 * cooperate: 9512 */ 9513 if (rcvd->max_ops < sent->max_ops) 9514 return -EINVAL; 9515 if (rcvd->max_reqs == 0) 9516 return -EINVAL; 9517 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 9518 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 9519 return 0; 9520 } 9521 9522 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, 9523 struct nfs41_create_session_res *res) 9524 { 9525 struct nfs4_channel_attrs *sent = &args->bc_attrs; 9526 struct nfs4_channel_attrs *rcvd = &res->bc_attrs; 9527 9528 if (!(res->flags & SESSION4_BACK_CHAN)) 9529 goto out; 9530 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 9531 return -EINVAL; 9532 if (rcvd->max_resp_sz > sent->max_resp_sz) 9533 return -EINVAL; 9534 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 9535 return -EINVAL; 9536 if (rcvd->max_ops > sent->max_ops) 9537 return -EINVAL; 9538 if (rcvd->max_reqs > sent->max_reqs) 9539 return -EINVAL; 9540 out: 9541 return 0; 9542 } 9543 9544 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 9545 struct nfs41_create_session_res *res) 9546 { 9547 int ret; 9548 9549 ret = nfs4_verify_fore_channel_attrs(args, res); 9550 if (ret) 9551 return ret; 9552 return nfs4_verify_back_channel_attrs(args, res); 9553 } 9554 9555 static void nfs4_update_session(struct nfs4_session *session, 9556 struct nfs41_create_session_res *res) 9557 { 9558 nfs4_copy_sessionid(&session->sess_id, &res->sessionid); 9559 /* Mark client id and session as being confirmed */ 9560 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 9561 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state); 9562 session->flags = res->flags; 9563 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); 9564 if (res->flags & SESSION4_BACK_CHAN) 9565 memcpy(&session->bc_attrs, &res->bc_attrs, 9566 sizeof(session->bc_attrs)); 9567 } 9568 9569 static int _nfs4_proc_create_session(struct nfs_client *clp, 9570 const struct cred *cred) 9571 { 9572 struct nfs4_session *session = clp->cl_session; 9573 struct nfs41_create_session_args args = { 9574 .client = clp, 9575 .clientid = clp->cl_clientid, 9576 .seqid = clp->cl_seqid, 9577 .cb_program = NFS4_CALLBACK, 9578 }; 9579 struct nfs41_create_session_res res; 9580 9581 struct rpc_message msg = { 9582 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 9583 .rpc_argp = &args, 9584 .rpc_resp = &res, 9585 .rpc_cred = cred, 9586 }; 9587 int status; 9588 9589 nfs4_init_channel_attrs(&args, clp->cl_rpcclient); 9590 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 9591 9592 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 9593 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9594 trace_nfs4_create_session(clp, status); 9595 9596 switch (status) { 9597 case -NFS4ERR_STALE_CLIENTID: 9598 case -NFS4ERR_DELAY: 9599 case -ETIMEDOUT: 9600 case -EACCES: 9601 case -EAGAIN: 9602 goto out; 9603 } 9604 9605 clp->cl_seqid++; 9606 if (!status) { 9607 /* Verify the session's negotiated channel_attrs values */ 9608 status = nfs4_verify_channel_attrs(&args, &res); 9609 /* Increment the clientid slot sequence id */ 9610 if (status) 9611 goto out; 9612 nfs4_update_session(session, &res); 9613 } 9614 out: 9615 return status; 9616 } 9617 9618 /* 9619 * Issues a CREATE_SESSION operation to the server. 9620 * It is the responsibility of the caller to verify the session is 9621 * expired before calling this routine. 9622 */ 9623 int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred) 9624 { 9625 int status; 9626 unsigned *ptr; 9627 struct nfs4_session *session = clp->cl_session; 9628 struct nfs4_add_xprt_data xprtdata = { 9629 .clp = clp, 9630 }; 9631 struct rpc_add_xprt_test rpcdata = { 9632 .add_xprt_test = clp->cl_mvops->session_trunk, 9633 .data = &xprtdata, 9634 }; 9635 9636 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 9637 9638 status = _nfs4_proc_create_session(clp, cred); 9639 if (status) 9640 goto out; 9641 9642 /* Init or reset the session slot tables */ 9643 status = nfs4_setup_session_slot_tables(session); 9644 dprintk("slot table setup returned %d\n", status); 9645 if (status) 9646 goto out; 9647 9648 ptr = (unsigned *)&session->sess_id.data[0]; 9649 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 9650 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 9651 rpc_clnt_probe_trunked_xprts(clp->cl_rpcclient, &rpcdata); 9652 out: 9653 return status; 9654 } 9655 9656 /* 9657 * Issue the over-the-wire RPC DESTROY_SESSION. 9658 * The caller must serialize access to this routine. 9659 */ 9660 int nfs4_proc_destroy_session(struct nfs4_session *session, 9661 const struct cred *cred) 9662 { 9663 struct rpc_message msg = { 9664 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 9665 .rpc_argp = session, 9666 .rpc_cred = cred, 9667 }; 9668 int status = 0; 9669 9670 /* session is still being setup */ 9671 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state)) 9672 return 0; 9673 9674 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 9675 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9676 trace_nfs4_destroy_session(session->clp, status); 9677 9678 if (status) 9679 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 9680 "Session has been destroyed regardless...\n", status); 9681 rpc_clnt_manage_trunked_xprts(session->clp->cl_rpcclient); 9682 return status; 9683 } 9684 9685 /* 9686 * Renew the cl_session lease. 9687 */ 9688 struct nfs4_sequence_data { 9689 struct nfs_client *clp; 9690 struct nfs4_sequence_args args; 9691 struct nfs4_sequence_res res; 9692 }; 9693 9694 static void nfs41_sequence_release(void *data) 9695 { 9696 struct nfs4_sequence_data *calldata = data; 9697 struct nfs_client *clp = calldata->clp; 9698 9699 if (refcount_read(&clp->cl_count) > 1) 9700 nfs4_schedule_state_renewal(clp); 9701 nfs_put_client(clp); 9702 kfree(calldata); 9703 } 9704 9705 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 9706 { 9707 switch(task->tk_status) { 9708 case -NFS4ERR_DELAY: 9709 rpc_delay(task, NFS4_POLL_RETRY_MAX); 9710 return -EAGAIN; 9711 default: 9712 nfs4_schedule_lease_recovery(clp); 9713 } 9714 return 0; 9715 } 9716 9717 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 9718 { 9719 struct nfs4_sequence_data *calldata = data; 9720 struct nfs_client *clp = calldata->clp; 9721 9722 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 9723 return; 9724 9725 trace_nfs4_sequence(clp, task->tk_status); 9726 if (task->tk_status < 0 && clp->cl_cons_state >= 0) { 9727 dprintk("%s ERROR %d\n", __func__, task->tk_status); 9728 if (refcount_read(&clp->cl_count) == 1) 9729 return; 9730 9731 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 9732 rpc_restart_call_prepare(task); 9733 return; 9734 } 9735 } 9736 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 9737 } 9738 9739 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 9740 { 9741 struct nfs4_sequence_data *calldata = data; 9742 struct nfs_client *clp = calldata->clp; 9743 struct nfs4_sequence_args *args; 9744 struct nfs4_sequence_res *res; 9745 9746 args = task->tk_msg.rpc_argp; 9747 res = task->tk_msg.rpc_resp; 9748 9749 nfs4_setup_sequence(clp, args, res, task); 9750 } 9751 9752 static const struct rpc_call_ops nfs41_sequence_ops = { 9753 .rpc_call_done = nfs41_sequence_call_done, 9754 .rpc_call_prepare = nfs41_sequence_prepare, 9755 .rpc_release = nfs41_sequence_release, 9756 }; 9757 9758 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 9759 const struct cred *cred, 9760 struct nfs4_slot *slot, 9761 bool is_privileged) 9762 { 9763 struct nfs4_sequence_data *calldata; 9764 struct rpc_message msg = { 9765 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 9766 .rpc_cred = cred, 9767 }; 9768 struct rpc_task_setup task_setup_data = { 9769 .rpc_client = clp->cl_rpcclient, 9770 .rpc_message = &msg, 9771 .callback_ops = &nfs41_sequence_ops, 9772 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE, 9773 }; 9774 struct rpc_task *ret; 9775 9776 ret = ERR_PTR(-EIO); 9777 if (!refcount_inc_not_zero(&clp->cl_count)) 9778 goto out_err; 9779 9780 ret = ERR_PTR(-ENOMEM); 9781 calldata = kzalloc(sizeof(*calldata), GFP_KERNEL); 9782 if (calldata == NULL) 9783 goto out_put_clp; 9784 nfs4_init_sequence(&calldata->args, &calldata->res, 0, is_privileged); 9785 nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot); 9786 msg.rpc_argp = &calldata->args; 9787 msg.rpc_resp = &calldata->res; 9788 calldata->clp = clp; 9789 task_setup_data.callback_data = calldata; 9790 9791 ret = rpc_run_task(&task_setup_data); 9792 if (IS_ERR(ret)) 9793 goto out_err; 9794 return ret; 9795 out_put_clp: 9796 nfs_put_client(clp); 9797 out_err: 9798 nfs41_release_slot(slot); 9799 return ret; 9800 } 9801 9802 static int nfs41_proc_async_sequence(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags) 9803 { 9804 struct rpc_task *task; 9805 int ret = 0; 9806 9807 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 9808 return -EAGAIN; 9809 task = _nfs41_proc_sequence(clp, cred, NULL, false); 9810 if (IS_ERR(task)) 9811 ret = PTR_ERR(task); 9812 else 9813 rpc_put_task_async(task); 9814 dprintk("<-- %s status=%d\n", __func__, ret); 9815 return ret; 9816 } 9817 9818 static int nfs4_proc_sequence(struct nfs_client *clp, const struct cred *cred) 9819 { 9820 struct rpc_task *task; 9821 int ret; 9822 9823 task = _nfs41_proc_sequence(clp, cred, NULL, true); 9824 if (IS_ERR(task)) { 9825 ret = PTR_ERR(task); 9826 goto out; 9827 } 9828 ret = rpc_wait_for_completion_task(task); 9829 if (!ret) 9830 ret = task->tk_status; 9831 rpc_put_task(task); 9832 out: 9833 dprintk("<-- %s status=%d\n", __func__, ret); 9834 return ret; 9835 } 9836 9837 struct nfs4_reclaim_complete_data { 9838 struct nfs_client *clp; 9839 struct nfs41_reclaim_complete_args arg; 9840 struct nfs41_reclaim_complete_res res; 9841 }; 9842 9843 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 9844 { 9845 struct nfs4_reclaim_complete_data *calldata = data; 9846 9847 nfs4_setup_sequence(calldata->clp, 9848 &calldata->arg.seq_args, 9849 &calldata->res.seq_res, 9850 task); 9851 } 9852 9853 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 9854 { 9855 switch(task->tk_status) { 9856 case 0: 9857 wake_up_all(&clp->cl_lock_waitq); 9858 fallthrough; 9859 case -NFS4ERR_COMPLETE_ALREADY: 9860 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 9861 break; 9862 case -NFS4ERR_DELAY: 9863 rpc_delay(task, NFS4_POLL_RETRY_MAX); 9864 fallthrough; 9865 case -NFS4ERR_RETRY_UNCACHED_REP: 9866 case -EACCES: 9867 dprintk("%s: failed to reclaim complete error %d for server %s, retrying\n", 9868 __func__, task->tk_status, clp->cl_hostname); 9869 return -EAGAIN; 9870 case -NFS4ERR_BADSESSION: 9871 case -NFS4ERR_DEADSESSION: 9872 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 9873 break; 9874 default: 9875 nfs4_schedule_lease_recovery(clp); 9876 } 9877 return 0; 9878 } 9879 9880 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 9881 { 9882 struct nfs4_reclaim_complete_data *calldata = data; 9883 struct nfs_client *clp = calldata->clp; 9884 struct nfs4_sequence_res *res = &calldata->res.seq_res; 9885 9886 if (!nfs41_sequence_done(task, res)) 9887 return; 9888 9889 trace_nfs4_reclaim_complete(clp, task->tk_status); 9890 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 9891 rpc_restart_call_prepare(task); 9892 return; 9893 } 9894 } 9895 9896 static void nfs4_free_reclaim_complete_data(void *data) 9897 { 9898 struct nfs4_reclaim_complete_data *calldata = data; 9899 9900 kfree(calldata); 9901 } 9902 9903 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 9904 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 9905 .rpc_call_done = nfs4_reclaim_complete_done, 9906 .rpc_release = nfs4_free_reclaim_complete_data, 9907 }; 9908 9909 /* 9910 * Issue a global reclaim complete. 9911 */ 9912 static int nfs41_proc_reclaim_complete(struct nfs_client *clp, 9913 const struct cred *cred) 9914 { 9915 struct nfs4_reclaim_complete_data *calldata; 9916 struct rpc_message msg = { 9917 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 9918 .rpc_cred = cred, 9919 }; 9920 struct rpc_task_setup task_setup_data = { 9921 .rpc_client = clp->cl_rpcclient, 9922 .rpc_message = &msg, 9923 .callback_ops = &nfs4_reclaim_complete_call_ops, 9924 .flags = RPC_TASK_NO_ROUND_ROBIN, 9925 }; 9926 int status = -ENOMEM; 9927 9928 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 9929 if (calldata == NULL) 9930 goto out; 9931 calldata->clp = clp; 9932 calldata->arg.one_fs = 0; 9933 9934 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0, 1); 9935 msg.rpc_argp = &calldata->arg; 9936 msg.rpc_resp = &calldata->res; 9937 task_setup_data.callback_data = calldata; 9938 status = nfs4_call_sync_custom(&task_setup_data); 9939 out: 9940 dprintk("<-- %s status=%d\n", __func__, status); 9941 return status; 9942 } 9943 9944 static void 9945 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 9946 { 9947 struct nfs4_layoutget *lgp = calldata; 9948 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 9949 9950 nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args, 9951 &lgp->res.seq_res, task); 9952 } 9953 9954 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 9955 { 9956 struct nfs4_layoutget *lgp = calldata; 9957 9958 nfs41_sequence_process(task, &lgp->res.seq_res); 9959 } 9960 9961 static int 9962 nfs4_layoutget_handle_exception(struct rpc_task *task, 9963 struct nfs4_layoutget *lgp, struct nfs4_exception *exception) 9964 { 9965 struct inode *inode = lgp->args.inode; 9966 struct nfs_server *server = NFS_SERVER(inode); 9967 struct pnfs_layout_hdr *lo = lgp->lo; 9968 int nfs4err = task->tk_status; 9969 int err, status = 0; 9970 LIST_HEAD(head); 9971 9972 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); 9973 9974 nfs4_sequence_free_slot(&lgp->res.seq_res); 9975 9976 exception->state = NULL; 9977 exception->stateid = NULL; 9978 9979 switch (nfs4err) { 9980 case 0: 9981 goto out; 9982 9983 /* 9984 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs 9985 * on the file. set tk_status to -ENODATA to tell upper layer to 9986 * retry go inband. 9987 */ 9988 case -NFS4ERR_LAYOUTUNAVAILABLE: 9989 status = -ENODATA; 9990 goto out; 9991 /* 9992 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of 9993 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3). 9994 */ 9995 case -NFS4ERR_BADLAYOUT: 9996 status = -EOVERFLOW; 9997 goto out; 9998 /* 9999 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client 10000 * (or clients) writing to the same RAID stripe except when 10001 * the minlength argument is 0 (see RFC5661 section 18.43.3). 10002 * 10003 * Treat it like we would RECALLCONFLICT -- we retry for a little 10004 * while, and then eventually give up. 10005 */ 10006 case -NFS4ERR_LAYOUTTRYLATER: 10007 if (lgp->args.minlength == 0) { 10008 status = -EOVERFLOW; 10009 goto out; 10010 } 10011 status = -EBUSY; 10012 break; 10013 case -NFS4ERR_RECALLCONFLICT: 10014 case -NFS4ERR_RETURNCONFLICT: 10015 status = -ERECALLCONFLICT; 10016 break; 10017 case -NFS4ERR_DELEG_REVOKED: 10018 case -NFS4ERR_ADMIN_REVOKED: 10019 case -NFS4ERR_EXPIRED: 10020 case -NFS4ERR_BAD_STATEID: 10021 exception->timeout = 0; 10022 spin_lock(&inode->i_lock); 10023 /* If the open stateid was bad, then recover it. */ 10024 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) || 10025 !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) { 10026 spin_unlock(&inode->i_lock); 10027 exception->state = lgp->args.ctx->state; 10028 exception->stateid = &lgp->args.stateid; 10029 break; 10030 } 10031 10032 /* 10033 * Mark the bad layout state as invalid, then retry 10034 */ 10035 pnfs_mark_layout_stateid_invalid(lo, &head); 10036 spin_unlock(&inode->i_lock); 10037 nfs_commit_inode(inode, 0); 10038 pnfs_free_lseg_list(&head); 10039 status = -EAGAIN; 10040 goto out; 10041 } 10042 10043 err = nfs4_handle_exception(server, nfs4err, exception); 10044 if (!status) { 10045 if (exception->retry) 10046 status = -EAGAIN; 10047 else 10048 status = err; 10049 } 10050 out: 10051 return status; 10052 } 10053 10054 size_t max_response_pages(struct nfs_server *server) 10055 { 10056 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 10057 return nfs_page_array_len(0, max_resp_sz); 10058 } 10059 10060 static void nfs4_layoutget_release(void *calldata) 10061 { 10062 struct nfs4_layoutget *lgp = calldata; 10063 10064 nfs4_sequence_free_slot(&lgp->res.seq_res); 10065 pnfs_layoutget_free(lgp); 10066 } 10067 10068 static const struct rpc_call_ops nfs4_layoutget_call_ops = { 10069 .rpc_call_prepare = nfs4_layoutget_prepare, 10070 .rpc_call_done = nfs4_layoutget_done, 10071 .rpc_release = nfs4_layoutget_release, 10072 }; 10073 10074 struct pnfs_layout_segment * 10075 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, 10076 struct nfs4_exception *exception) 10077 { 10078 struct inode *inode = lgp->args.inode; 10079 struct nfs_server *server = NFS_SERVER(inode); 10080 struct rpc_task *task; 10081 struct rpc_message msg = { 10082 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 10083 .rpc_argp = &lgp->args, 10084 .rpc_resp = &lgp->res, 10085 .rpc_cred = lgp->cred, 10086 }; 10087 struct rpc_task_setup task_setup_data = { 10088 .rpc_client = server->client, 10089 .rpc_message = &msg, 10090 .callback_ops = &nfs4_layoutget_call_ops, 10091 .callback_data = lgp, 10092 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF | 10093 RPC_TASK_MOVEABLE, 10094 }; 10095 struct pnfs_layout_segment *lseg = NULL; 10096 int status = 0; 10097 10098 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0); 10099 exception->retry = 0; 10100 10101 task = rpc_run_task(&task_setup_data); 10102 if (IS_ERR(task)) 10103 return ERR_CAST(task); 10104 10105 status = rpc_wait_for_completion_task(task); 10106 if (status != 0) 10107 goto out; 10108 10109 if (task->tk_status < 0) { 10110 exception->retry = 1; 10111 status = nfs4_layoutget_handle_exception(task, lgp, exception); 10112 } else if (lgp->res.layoutp->len == 0) { 10113 exception->retry = 1; 10114 status = -EAGAIN; 10115 nfs4_update_delay(&exception->timeout); 10116 } else 10117 lseg = pnfs_layout_process(lgp); 10118 out: 10119 trace_nfs4_layoutget(lgp->args.ctx, 10120 &lgp->args.range, 10121 &lgp->res.range, 10122 &lgp->res.stateid, 10123 status); 10124 10125 rpc_put_task(task); 10126 dprintk("<-- %s status=%d\n", __func__, status); 10127 if (status) 10128 return ERR_PTR(status); 10129 return lseg; 10130 } 10131 10132 static void 10133 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 10134 { 10135 struct nfs4_layoutreturn *lrp = calldata; 10136 10137 nfs4_setup_sequence(lrp->clp, 10138 &lrp->args.seq_args, 10139 &lrp->res.seq_res, 10140 task); 10141 if (!pnfs_layout_is_valid(lrp->args.layout)) 10142 rpc_exit(task, 0); 10143 } 10144 10145 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 10146 { 10147 struct nfs4_layoutreturn *lrp = calldata; 10148 struct nfs_server *server; 10149 10150 if (!nfs41_sequence_process(task, &lrp->res.seq_res)) 10151 return; 10152 10153 if (task->tk_rpc_status == -ETIMEDOUT) { 10154 lrp->rpc_status = -EAGAIN; 10155 lrp->res.lrs_present = 0; 10156 return; 10157 } 10158 /* 10159 * Was there an RPC level error? Assume the call succeeded, 10160 * and that we need to release the layout 10161 */ 10162 if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) { 10163 lrp->res.lrs_present = 0; 10164 return; 10165 } 10166 10167 server = NFS_SERVER(lrp->args.inode); 10168 switch (task->tk_status) { 10169 case -NFS4ERR_OLD_STATEID: 10170 if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid, 10171 &lrp->args.range, 10172 lrp->args.inode)) 10173 goto out_restart; 10174 fallthrough; 10175 default: 10176 task->tk_status = 0; 10177 lrp->res.lrs_present = 0; 10178 fallthrough; 10179 case 0: 10180 break; 10181 case -NFS4ERR_BADSESSION: 10182 case -NFS4ERR_DEADSESSION: 10183 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 10184 nfs4_schedule_session_recovery(server->nfs_client->cl_session, 10185 task->tk_status); 10186 lrp->res.lrs_present = 0; 10187 lrp->rpc_status = -EAGAIN; 10188 task->tk_status = 0; 10189 break; 10190 case -NFS4ERR_DELAY: 10191 if (nfs4_async_handle_error(task, server, NULL, NULL) == 10192 -EAGAIN) 10193 goto out_restart; 10194 lrp->res.lrs_present = 0; 10195 break; 10196 } 10197 return; 10198 out_restart: 10199 task->tk_status = 0; 10200 nfs4_sequence_free_slot(&lrp->res.seq_res); 10201 rpc_restart_call_prepare(task); 10202 } 10203 10204 static void nfs4_layoutreturn_release(void *calldata) 10205 { 10206 struct nfs4_layoutreturn *lrp = calldata; 10207 struct pnfs_layout_hdr *lo = lrp->args.layout; 10208 10209 if (lrp->rpc_status == 0 || !lrp->inode) 10210 pnfs_layoutreturn_free_lsegs( 10211 lo, &lrp->args.stateid, &lrp->args.range, 10212 lrp->res.lrs_present ? &lrp->res.stateid : NULL); 10213 else 10214 pnfs_layoutreturn_retry_later(lo, &lrp->args.stateid, 10215 &lrp->args.range); 10216 nfs4_sequence_free_slot(&lrp->res.seq_res); 10217 if (lrp->ld_private.ops && lrp->ld_private.ops->free) 10218 lrp->ld_private.ops->free(&lrp->ld_private); 10219 pnfs_put_layout_hdr(lrp->args.layout); 10220 nfs_iput_and_deactive(lrp->inode); 10221 put_cred(lrp->cred); 10222 kfree(calldata); 10223 } 10224 10225 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 10226 .rpc_call_prepare = nfs4_layoutreturn_prepare, 10227 .rpc_call_done = nfs4_layoutreturn_done, 10228 .rpc_release = nfs4_layoutreturn_release, 10229 }; 10230 10231 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, unsigned int flags) 10232 { 10233 struct rpc_task *task; 10234 struct rpc_message msg = { 10235 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 10236 .rpc_argp = &lrp->args, 10237 .rpc_resp = &lrp->res, 10238 .rpc_cred = lrp->cred, 10239 }; 10240 struct rpc_task_setup task_setup_data = { 10241 .rpc_client = NFS_SERVER(lrp->args.inode)->client, 10242 .rpc_message = &msg, 10243 .callback_ops = &nfs4_layoutreturn_call_ops, 10244 .callback_data = lrp, 10245 .flags = RPC_TASK_MOVEABLE, 10246 }; 10247 int status = 0; 10248 10249 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client, 10250 NFS_SP4_MACH_CRED_PNFS_CLEANUP, 10251 &task_setup_data.rpc_client, &msg); 10252 10253 lrp->inode = nfs_igrab_and_active(lrp->args.inode); 10254 if (flags & PNFS_FL_LAYOUTRETURN_ASYNC) { 10255 if (!lrp->inode) { 10256 nfs4_layoutreturn_release(lrp); 10257 return -EAGAIN; 10258 } 10259 task_setup_data.flags |= RPC_TASK_ASYNC; 10260 } 10261 if (!lrp->inode) 10262 flags |= PNFS_FL_LAYOUTRETURN_PRIVILEGED; 10263 if (flags & PNFS_FL_LAYOUTRETURN_PRIVILEGED) 10264 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 10265 1); 10266 else 10267 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 10268 0); 10269 task = rpc_run_task(&task_setup_data); 10270 if (IS_ERR(task)) 10271 return PTR_ERR(task); 10272 if (!(flags & PNFS_FL_LAYOUTRETURN_ASYNC)) 10273 status = task->tk_status; 10274 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status); 10275 dprintk("<-- %s status=%d\n", __func__, status); 10276 rpc_put_task(task); 10277 return status; 10278 } 10279 10280 static int 10281 _nfs4_proc_getdeviceinfo(struct nfs_server *server, 10282 struct pnfs_device *pdev, 10283 const struct cred *cred) 10284 { 10285 struct nfs4_getdeviceinfo_args args = { 10286 .pdev = pdev, 10287 .notify_types = NOTIFY_DEVICEID4_CHANGE | 10288 NOTIFY_DEVICEID4_DELETE, 10289 }; 10290 struct nfs4_getdeviceinfo_res res = { 10291 .pdev = pdev, 10292 }; 10293 struct rpc_message msg = { 10294 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 10295 .rpc_argp = &args, 10296 .rpc_resp = &res, 10297 .rpc_cred = cred, 10298 }; 10299 int status; 10300 10301 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 10302 if (res.notification & ~args.notify_types) 10303 dprintk("%s: unsupported notification\n", __func__); 10304 if (res.notification != args.notify_types) 10305 pdev->nocache = 1; 10306 10307 trace_nfs4_getdeviceinfo(server, &pdev->dev_id, status); 10308 10309 dprintk("<-- %s status=%d\n", __func__, status); 10310 10311 return status; 10312 } 10313 10314 int nfs4_proc_getdeviceinfo(struct nfs_server *server, 10315 struct pnfs_device *pdev, 10316 const struct cred *cred) 10317 { 10318 struct nfs4_exception exception = { }; 10319 int err; 10320 10321 do { 10322 err = nfs4_handle_exception(server, 10323 _nfs4_proc_getdeviceinfo(server, pdev, cred), 10324 &exception); 10325 } while (exception.retry); 10326 return err; 10327 } 10328 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 10329 10330 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 10331 { 10332 struct nfs4_layoutcommit_data *data = calldata; 10333 struct nfs_server *server = NFS_SERVER(data->args.inode); 10334 10335 nfs4_setup_sequence(server->nfs_client, 10336 &data->args.seq_args, 10337 &data->res.seq_res, 10338 task); 10339 } 10340 10341 static void 10342 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 10343 { 10344 struct nfs4_layoutcommit_data *data = calldata; 10345 struct nfs_server *server = NFS_SERVER(data->args.inode); 10346 10347 if (!nfs41_sequence_done(task, &data->res.seq_res)) 10348 return; 10349 10350 switch (task->tk_status) { /* Just ignore these failures */ 10351 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 10352 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 10353 case -NFS4ERR_BADLAYOUT: /* no layout */ 10354 case -NFS4ERR_GRACE: /* loca_recalim always false */ 10355 task->tk_status = 0; 10356 break; 10357 case 0: 10358 break; 10359 default: 10360 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) { 10361 rpc_restart_call_prepare(task); 10362 return; 10363 } 10364 } 10365 } 10366 10367 static void nfs4_layoutcommit_release(void *calldata) 10368 { 10369 struct nfs4_layoutcommit_data *data = calldata; 10370 10371 pnfs_cleanup_layoutcommit(data); 10372 nfs_post_op_update_inode_force_wcc(data->args.inode, 10373 data->res.fattr); 10374 put_cred(data->cred); 10375 nfs_iput_and_deactive(data->inode); 10376 kfree(data); 10377 } 10378 10379 static const struct rpc_call_ops nfs4_layoutcommit_ops = { 10380 .rpc_call_prepare = nfs4_layoutcommit_prepare, 10381 .rpc_call_done = nfs4_layoutcommit_done, 10382 .rpc_release = nfs4_layoutcommit_release, 10383 }; 10384 10385 int 10386 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 10387 { 10388 struct rpc_message msg = { 10389 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 10390 .rpc_argp = &data->args, 10391 .rpc_resp = &data->res, 10392 .rpc_cred = data->cred, 10393 }; 10394 struct rpc_task_setup task_setup_data = { 10395 .task = &data->task, 10396 .rpc_client = NFS_CLIENT(data->args.inode), 10397 .rpc_message = &msg, 10398 .callback_ops = &nfs4_layoutcommit_ops, 10399 .callback_data = data, 10400 .flags = RPC_TASK_MOVEABLE, 10401 }; 10402 struct rpc_task *task; 10403 int status = 0; 10404 10405 dprintk("NFS: initiating layoutcommit call. sync %d " 10406 "lbw: %llu inode %lu\n", sync, 10407 data->args.lastbytewritten, 10408 data->args.inode->i_ino); 10409 10410 if (!sync) { 10411 data->inode = nfs_igrab_and_active(data->args.inode); 10412 if (data->inode == NULL) { 10413 nfs4_layoutcommit_release(data); 10414 return -EAGAIN; 10415 } 10416 task_setup_data.flags = RPC_TASK_ASYNC; 10417 } 10418 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0); 10419 task = rpc_run_task(&task_setup_data); 10420 if (IS_ERR(task)) 10421 return PTR_ERR(task); 10422 if (sync) 10423 status = task->tk_status; 10424 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status); 10425 dprintk("%s: status %d\n", __func__, status); 10426 rpc_put_task(task); 10427 return status; 10428 } 10429 10430 /* 10431 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if 10432 * possible) as per RFC3530bis and RFC5661 Security Considerations sections 10433 */ 10434 static int _nfs41_proc_secinfo_no_name(struct nfs_server *server, 10435 struct nfs_fh *fhandle, 10436 struct nfs4_secinfo_flavors *flavors, 10437 bool use_integrity) 10438 { 10439 struct nfs41_secinfo_no_name_args args = { 10440 .style = SECINFO_STYLE_CURRENT_FH, 10441 }; 10442 struct nfs4_secinfo_res res = { 10443 .flavors = flavors, 10444 }; 10445 struct rpc_message msg = { 10446 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 10447 .rpc_argp = &args, 10448 .rpc_resp = &res, 10449 }; 10450 struct nfs4_call_sync_data data = { 10451 .seq_server = server, 10452 .seq_args = &args.seq_args, 10453 .seq_res = &res.seq_res, 10454 }; 10455 struct rpc_task_setup task_setup = { 10456 .rpc_client = server->client, 10457 .rpc_message = &msg, 10458 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops, 10459 .callback_data = &data, 10460 .flags = RPC_TASK_NO_ROUND_ROBIN, 10461 }; 10462 const struct cred *cred = NULL; 10463 int status; 10464 10465 if (use_integrity) { 10466 task_setup.rpc_client = server->nfs_client->cl_rpcclient; 10467 10468 cred = nfs4_get_clid_cred(server->nfs_client); 10469 msg.rpc_cred = cred; 10470 } 10471 10472 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 10473 status = nfs4_call_sync_custom(&task_setup); 10474 dprintk("<-- %s status=%d\n", __func__, status); 10475 10476 put_cred(cred); 10477 10478 return status; 10479 } 10480 10481 static int nfs41_proc_secinfo_no_name(struct nfs_server *server, 10482 struct nfs_fh *fhandle, 10483 struct nfs4_secinfo_flavors *flavors) 10484 { 10485 struct nfs4_exception exception = { 10486 .interruptible = true, 10487 }; 10488 int err; 10489 do { 10490 /* first try using integrity protection */ 10491 err = -NFS4ERR_WRONGSEC; 10492 10493 /* try to use integrity protection with machine cred */ 10494 if (_nfs4_is_integrity_protected(server->nfs_client)) 10495 err = _nfs41_proc_secinfo_no_name(server, fhandle, 10496 flavors, true); 10497 10498 /* 10499 * if unable to use integrity protection, or SECINFO with 10500 * integrity protection returns NFS4ERR_WRONGSEC (which is 10501 * disallowed by spec, but exists in deployed servers) use 10502 * the current filesystem's rpc_client and the user cred. 10503 */ 10504 if (err == -NFS4ERR_WRONGSEC) 10505 err = _nfs41_proc_secinfo_no_name(server, fhandle, 10506 flavors, false); 10507 10508 switch (err) { 10509 case 0: 10510 case -NFS4ERR_WRONGSEC: 10511 case -ENOTSUPP: 10512 goto out; 10513 default: 10514 err = nfs4_handle_exception(server, err, &exception); 10515 } 10516 } while (exception.retry); 10517 out: 10518 return err; 10519 } 10520 10521 static int nfs41_find_root_sec(struct nfs_server *server, 10522 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 10523 { 10524 int err; 10525 struct page *page; 10526 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR; 10527 struct nfs4_secinfo_flavors *flavors; 10528 struct nfs4_secinfo4 *secinfo; 10529 int i; 10530 10531 page = alloc_page(GFP_KERNEL); 10532 if (!page) { 10533 err = -ENOMEM; 10534 goto out; 10535 } 10536 10537 flavors = page_address(page); 10538 err = nfs41_proc_secinfo_no_name(server, fhandle, flavors); 10539 10540 /* 10541 * Fall back on "guess and check" method if 10542 * the server doesn't support SECINFO_NO_NAME 10543 */ 10544 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) { 10545 err = nfs4_find_root_sec(server, fhandle, fattr); 10546 goto out_freepage; 10547 } 10548 if (err) 10549 goto out_freepage; 10550 10551 for (i = 0; i < flavors->num_flavors; i++) { 10552 secinfo = &flavors->flavors[i]; 10553 10554 switch (secinfo->flavor) { 10555 case RPC_AUTH_NULL: 10556 case RPC_AUTH_UNIX: 10557 case RPC_AUTH_GSS: 10558 flavor = rpcauth_get_pseudoflavor(secinfo->flavor, 10559 &secinfo->flavor_info); 10560 break; 10561 default: 10562 flavor = RPC_AUTH_MAXFLAVOR; 10563 break; 10564 } 10565 10566 if (!nfs_auth_info_match(&server->auth_info, flavor)) 10567 flavor = RPC_AUTH_MAXFLAVOR; 10568 10569 if (flavor != RPC_AUTH_MAXFLAVOR) { 10570 err = nfs4_lookup_root_sec(server, fhandle, fattr, 10571 flavor); 10572 if (!err) 10573 break; 10574 } 10575 } 10576 10577 if (flavor == RPC_AUTH_MAXFLAVOR) 10578 err = -EPERM; 10579 10580 out_freepage: 10581 put_page(page); 10582 if (err == -EACCES) 10583 return -EPERM; 10584 out: 10585 return err; 10586 } 10587 10588 static int _nfs41_test_stateid(struct nfs_server *server, 10589 const nfs4_stateid *stateid, 10590 const struct cred *cred) 10591 { 10592 int status; 10593 struct nfs41_test_stateid_args args = { 10594 .stateid = *stateid, 10595 }; 10596 struct nfs41_test_stateid_res res; 10597 struct rpc_message msg = { 10598 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 10599 .rpc_argp = &args, 10600 .rpc_resp = &res, 10601 .rpc_cred = cred, 10602 }; 10603 struct rpc_clnt *rpc_client = server->client; 10604 10605 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 10606 &rpc_client, &msg); 10607 10608 dprintk("NFS call test_stateid %p\n", stateid); 10609 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 10610 status = nfs4_call_sync_sequence(rpc_client, server, &msg, 10611 &args.seq_args, &res.seq_res); 10612 if (status != NFS_OK) { 10613 dprintk("NFS reply test_stateid: failed, %d\n", status); 10614 return status; 10615 } 10616 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 10617 return -res.status; 10618 } 10619 10620 static void nfs4_handle_delay_or_session_error(struct nfs_server *server, 10621 int err, struct nfs4_exception *exception) 10622 { 10623 exception->retry = 0; 10624 switch(err) { 10625 case -NFS4ERR_DELAY: 10626 case -NFS4ERR_RETRY_UNCACHED_REP: 10627 nfs4_handle_exception(server, err, exception); 10628 break; 10629 case -NFS4ERR_BADSESSION: 10630 case -NFS4ERR_BADSLOT: 10631 case -NFS4ERR_BAD_HIGH_SLOT: 10632 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 10633 case -NFS4ERR_DEADSESSION: 10634 nfs4_do_handle_exception(server, err, exception); 10635 } 10636 } 10637 10638 /** 10639 * nfs41_test_stateid - perform a TEST_STATEID operation 10640 * 10641 * @server: server / transport on which to perform the operation 10642 * @stateid: state ID to test 10643 * @cred: credential 10644 * 10645 * Returns NFS_OK if the server recognizes that "stateid" is valid. 10646 * Otherwise a negative NFS4ERR value is returned if the operation 10647 * failed or the state ID is not currently valid. 10648 */ 10649 static int nfs41_test_stateid(struct nfs_server *server, 10650 const nfs4_stateid *stateid, 10651 const struct cred *cred) 10652 { 10653 struct nfs4_exception exception = { 10654 .interruptible = true, 10655 }; 10656 int err; 10657 do { 10658 err = _nfs41_test_stateid(server, stateid, cred); 10659 nfs4_handle_delay_or_session_error(server, err, &exception); 10660 } while (exception.retry); 10661 return err; 10662 } 10663 10664 struct nfs_free_stateid_data { 10665 struct nfs_server *server; 10666 struct nfs41_free_stateid_args args; 10667 struct nfs41_free_stateid_res res; 10668 }; 10669 10670 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) 10671 { 10672 struct nfs_free_stateid_data *data = calldata; 10673 nfs4_setup_sequence(data->server->nfs_client, 10674 &data->args.seq_args, 10675 &data->res.seq_res, 10676 task); 10677 } 10678 10679 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata) 10680 { 10681 struct nfs_free_stateid_data *data = calldata; 10682 10683 nfs41_sequence_done(task, &data->res.seq_res); 10684 10685 switch (task->tk_status) { 10686 case -NFS4ERR_DELAY: 10687 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN) 10688 rpc_restart_call_prepare(task); 10689 } 10690 } 10691 10692 static void nfs41_free_stateid_release(void *calldata) 10693 { 10694 struct nfs_free_stateid_data *data = calldata; 10695 struct nfs_client *clp = data->server->nfs_client; 10696 10697 nfs_put_client(clp); 10698 kfree(calldata); 10699 } 10700 10701 static const struct rpc_call_ops nfs41_free_stateid_ops = { 10702 .rpc_call_prepare = nfs41_free_stateid_prepare, 10703 .rpc_call_done = nfs41_free_stateid_done, 10704 .rpc_release = nfs41_free_stateid_release, 10705 }; 10706 10707 /** 10708 * nfs41_free_stateid - perform a FREE_STATEID operation 10709 * 10710 * @server: server / transport on which to perform the operation 10711 * @stateid: state ID to release 10712 * @cred: credential 10713 * @privileged: set to true if this call needs to be privileged 10714 * 10715 * Note: this function is always asynchronous. 10716 */ 10717 static int nfs41_free_stateid(struct nfs_server *server, 10718 nfs4_stateid *stateid, 10719 const struct cred *cred, 10720 bool privileged) 10721 { 10722 struct rpc_message msg = { 10723 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 10724 .rpc_cred = cred, 10725 }; 10726 struct rpc_task_setup task_setup = { 10727 .rpc_client = server->client, 10728 .rpc_message = &msg, 10729 .callback_ops = &nfs41_free_stateid_ops, 10730 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE, 10731 }; 10732 struct nfs_free_stateid_data *data; 10733 struct rpc_task *task; 10734 struct nfs_client *clp = server->nfs_client; 10735 10736 if (!refcount_inc_not_zero(&clp->cl_count)) 10737 return -EIO; 10738 10739 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 10740 &task_setup.rpc_client, &msg); 10741 10742 dprintk("NFS call free_stateid %p\n", stateid); 10743 data = kmalloc(sizeof(*data), GFP_KERNEL); 10744 if (!data) 10745 return -ENOMEM; 10746 data->server = server; 10747 nfs4_stateid_copy(&data->args.stateid, stateid); 10748 10749 task_setup.callback_data = data; 10750 10751 msg.rpc_argp = &data->args; 10752 msg.rpc_resp = &data->res; 10753 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, privileged); 10754 task = rpc_run_task(&task_setup); 10755 if (IS_ERR(task)) 10756 return PTR_ERR(task); 10757 rpc_put_task(task); 10758 stateid->type = NFS4_FREED_STATEID_TYPE; 10759 return 0; 10760 } 10761 10762 static void 10763 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) 10764 { 10765 const struct cred *cred = lsp->ls_state->owner->so_cred; 10766 10767 nfs41_free_stateid(server, &lsp->ls_stateid, cred, false); 10768 nfs4_free_lock_state(server, lsp); 10769 } 10770 10771 static bool nfs41_match_stateid(const nfs4_stateid *s1, 10772 const nfs4_stateid *s2) 10773 { 10774 trace_nfs41_match_stateid(s1, s2); 10775 10776 if (s1->type != s2->type) 10777 return false; 10778 10779 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 10780 return false; 10781 10782 if (s1->seqid == s2->seqid) 10783 return true; 10784 10785 return s1->seqid == 0 || s2->seqid == 0; 10786 } 10787 10788 #endif /* CONFIG_NFS_V4_1 */ 10789 10790 static bool nfs4_match_stateid(const nfs4_stateid *s1, 10791 const nfs4_stateid *s2) 10792 { 10793 trace_nfs4_match_stateid(s1, s2); 10794 10795 return nfs4_stateid_match(s1, s2); 10796 } 10797 10798 10799 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 10800 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 10801 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 10802 .recover_open = nfs4_open_reclaim, 10803 .recover_lock = nfs4_lock_reclaim, 10804 .establish_clid = nfs4_init_clientid, 10805 .detect_trunking = nfs40_discover_server_trunking, 10806 }; 10807 10808 #if defined(CONFIG_NFS_V4_1) 10809 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 10810 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 10811 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 10812 .recover_open = nfs4_open_reclaim, 10813 .recover_lock = nfs4_lock_reclaim, 10814 .establish_clid = nfs41_init_clientid, 10815 .reclaim_complete = nfs41_proc_reclaim_complete, 10816 .detect_trunking = nfs41_discover_server_trunking, 10817 }; 10818 #endif /* CONFIG_NFS_V4_1 */ 10819 10820 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 10821 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 10822 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 10823 .recover_open = nfs40_open_expired, 10824 .recover_lock = nfs4_lock_expired, 10825 .establish_clid = nfs4_init_clientid, 10826 }; 10827 10828 #if defined(CONFIG_NFS_V4_1) 10829 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 10830 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 10831 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 10832 .recover_open = nfs41_open_expired, 10833 .recover_lock = nfs41_lock_expired, 10834 .establish_clid = nfs41_init_clientid, 10835 }; 10836 #endif /* CONFIG_NFS_V4_1 */ 10837 10838 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { 10839 .sched_state_renewal = nfs4_proc_async_renew, 10840 .get_state_renewal_cred = nfs4_get_renew_cred, 10841 .renew_lease = nfs4_proc_renew, 10842 }; 10843 10844 #if defined(CONFIG_NFS_V4_1) 10845 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 10846 .sched_state_renewal = nfs41_proc_async_sequence, 10847 .get_state_renewal_cred = nfs4_get_machine_cred, 10848 .renew_lease = nfs4_proc_sequence, 10849 }; 10850 #endif 10851 10852 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = { 10853 .get_locations = _nfs40_proc_get_locations, 10854 .fsid_present = _nfs40_proc_fsid_present, 10855 }; 10856 10857 #if defined(CONFIG_NFS_V4_1) 10858 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = { 10859 .get_locations = _nfs41_proc_get_locations, 10860 .fsid_present = _nfs41_proc_fsid_present, 10861 }; 10862 #endif /* CONFIG_NFS_V4_1 */ 10863 10864 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { 10865 .minor_version = 0, 10866 .init_caps = NFS_CAP_READDIRPLUS 10867 | NFS_CAP_ATOMIC_OPEN 10868 | NFS_CAP_POSIX_LOCK, 10869 .init_client = nfs40_init_client, 10870 .shutdown_client = nfs40_shutdown_client, 10871 .match_stateid = nfs4_match_stateid, 10872 .find_root_sec = nfs4_find_root_sec, 10873 .free_lock_state = nfs4_release_lockowner, 10874 .test_and_free_expired = nfs40_test_and_free_expired_stateid, 10875 .alloc_seqid = nfs_alloc_seqid, 10876 .call_sync_ops = &nfs40_call_sync_ops, 10877 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 10878 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 10879 .state_renewal_ops = &nfs40_state_renewal_ops, 10880 .mig_recovery_ops = &nfs40_mig_recovery_ops, 10881 }; 10882 10883 #if defined(CONFIG_NFS_V4_1) 10884 static struct nfs_seqid * 10885 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2) 10886 { 10887 return NULL; 10888 } 10889 10890 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 10891 .minor_version = 1, 10892 .init_caps = NFS_CAP_READDIRPLUS 10893 | NFS_CAP_ATOMIC_OPEN 10894 | NFS_CAP_DIR_DELEG 10895 | NFS_CAP_POSIX_LOCK 10896 | NFS_CAP_STATEID_NFSV41 10897 | NFS_CAP_ATOMIC_OPEN_V1 10898 | NFS_CAP_LGOPEN 10899 | NFS_CAP_MOVEABLE, 10900 .init_client = nfs41_init_client, 10901 .shutdown_client = nfs41_shutdown_client, 10902 .match_stateid = nfs41_match_stateid, 10903 .find_root_sec = nfs41_find_root_sec, 10904 .free_lock_state = nfs41_free_lock_state, 10905 .test_and_free_expired = nfs41_test_and_free_expired_stateid, 10906 .alloc_seqid = nfs_alloc_no_seqid, 10907 .session_trunk = nfs4_test_session_trunk, 10908 .call_sync_ops = &nfs41_call_sync_ops, 10909 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 10910 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 10911 .state_renewal_ops = &nfs41_state_renewal_ops, 10912 .mig_recovery_ops = &nfs41_mig_recovery_ops, 10913 }; 10914 #endif 10915 10916 #if defined(CONFIG_NFS_V4_2) 10917 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { 10918 .minor_version = 2, 10919 .init_caps = NFS_CAP_READDIRPLUS 10920 | NFS_CAP_ATOMIC_OPEN 10921 | NFS_CAP_DIR_DELEG 10922 | NFS_CAP_POSIX_LOCK 10923 | NFS_CAP_STATEID_NFSV41 10924 | NFS_CAP_ATOMIC_OPEN_V1 10925 | NFS_CAP_LGOPEN 10926 | NFS_CAP_ALLOCATE 10927 | NFS_CAP_COPY 10928 | NFS_CAP_OFFLOAD_CANCEL 10929 | NFS_CAP_COPY_NOTIFY 10930 | NFS_CAP_DEALLOCATE 10931 | NFS_CAP_ZERO_RANGE 10932 | NFS_CAP_SEEK 10933 | NFS_CAP_LAYOUTSTATS 10934 | NFS_CAP_CLONE 10935 | NFS_CAP_LAYOUTERROR 10936 | NFS_CAP_READ_PLUS 10937 | NFS_CAP_MOVEABLE 10938 | NFS_CAP_OFFLOAD_STATUS, 10939 .init_client = nfs41_init_client, 10940 .shutdown_client = nfs41_shutdown_client, 10941 .match_stateid = nfs41_match_stateid, 10942 .find_root_sec = nfs41_find_root_sec, 10943 .free_lock_state = nfs41_free_lock_state, 10944 .call_sync_ops = &nfs41_call_sync_ops, 10945 .test_and_free_expired = nfs41_test_and_free_expired_stateid, 10946 .alloc_seqid = nfs_alloc_no_seqid, 10947 .session_trunk = nfs4_test_session_trunk, 10948 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 10949 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 10950 .state_renewal_ops = &nfs41_state_renewal_ops, 10951 .mig_recovery_ops = &nfs41_mig_recovery_ops, 10952 }; 10953 #endif 10954 10955 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 10956 [0] = &nfs_v4_0_minor_ops, 10957 #if defined(CONFIG_NFS_V4_1) 10958 [1] = &nfs_v4_1_minor_ops, 10959 #endif 10960 #if defined(CONFIG_NFS_V4_2) 10961 [2] = &nfs_v4_2_minor_ops, 10962 #endif 10963 }; 10964 10965 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) 10966 { 10967 ssize_t error, error2, error3, error4 = 0; 10968 size_t left = size; 10969 10970 error = generic_listxattr(dentry, list, left); 10971 if (error < 0) 10972 return error; 10973 if (list) { 10974 list += error; 10975 left -= error; 10976 } 10977 10978 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, left); 10979 if (error2 < 0) 10980 return error2; 10981 10982 if (list) { 10983 list += error2; 10984 left -= error2; 10985 } 10986 10987 error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left); 10988 if (error3 < 0) 10989 return error3; 10990 if (list) { 10991 list += error3; 10992 left -= error3; 10993 } 10994 10995 if (!nfs_server_capable(d_inode(dentry), NFS_CAP_SECURITY_LABEL)) { 10996 error4 = security_inode_listsecurity(d_inode(dentry), list, left); 10997 if (error4 < 0) 10998 return error4; 10999 } 11000 11001 error += error2 + error3 + error4; 11002 if (size && error > size) 11003 return -ERANGE; 11004 return error; 11005 } 11006 11007 static void nfs4_enable_swap(struct inode *inode) 11008 { 11009 /* The state manager thread must always be running. 11010 * It will notice the client is a swapper, and stay put. 11011 */ 11012 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 11013 11014 nfs4_schedule_state_manager(clp); 11015 } 11016 11017 static void nfs4_disable_swap(struct inode *inode) 11018 { 11019 /* The state manager thread will now exit once it is 11020 * woken. 11021 */ 11022 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 11023 11024 set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state); 11025 clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state); 11026 wake_up_var(&clp->cl_state); 11027 } 11028 11029 static const struct inode_operations nfs4_dir_inode_operations = { 11030 .create = nfs_create, 11031 .lookup = nfs_lookup, 11032 .atomic_open = nfs_atomic_open, 11033 .link = nfs_link, 11034 .unlink = nfs_unlink, 11035 .symlink = nfs_symlink, 11036 .mkdir = nfs_mkdir, 11037 .rmdir = nfs_rmdir, 11038 .mknod = nfs_mknod, 11039 .rename = nfs_rename, 11040 .permission = nfs_permission, 11041 .getattr = nfs_getattr, 11042 .setattr = nfs_setattr, 11043 .listxattr = nfs4_listxattr, 11044 }; 11045 11046 static const struct inode_operations nfs4_file_inode_operations = { 11047 .permission = nfs_permission, 11048 .getattr = nfs_getattr, 11049 .setattr = nfs_setattr, 11050 .listxattr = nfs4_listxattr, 11051 }; 11052 11053 static struct nfs_server *nfs4_clone_server(struct nfs_server *source, 11054 struct nfs_fh *fh, struct nfs_fattr *fattr, 11055 rpc_authflavor_t flavor) 11056 { 11057 struct nfs_server *server; 11058 int error; 11059 11060 server = nfs_clone_server(source, fh, fattr, flavor); 11061 if (IS_ERR(server)) 11062 return server; 11063 11064 error = nfs4_delegation_hash_alloc(server); 11065 if (error) { 11066 nfs_free_server(server); 11067 return ERR_PTR(error); 11068 } 11069 11070 return server; 11071 } 11072 11073 const struct nfs_rpc_ops nfs_v4_clientops = { 11074 .version = 4, /* protocol version */ 11075 .dentry_ops = &nfs4_dentry_operations, 11076 .dir_inode_ops = &nfs4_dir_inode_operations, 11077 .file_inode_ops = &nfs4_file_inode_operations, 11078 .file_ops = &nfs4_file_operations, 11079 .getroot = nfs4_proc_get_root, 11080 .submount = nfs4_submount, 11081 .try_get_tree = nfs4_try_get_tree, 11082 .getattr = nfs4_proc_getattr, 11083 .setattr = nfs4_proc_setattr, 11084 .lookup = nfs4_proc_lookup, 11085 .lookupp = nfs4_proc_lookupp, 11086 .access = nfs4_proc_access, 11087 .readlink = nfs4_proc_readlink, 11088 .create = nfs4_proc_create, 11089 .remove = nfs4_proc_remove, 11090 .unlink_setup = nfs4_proc_unlink_setup, 11091 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 11092 .unlink_done = nfs4_proc_unlink_done, 11093 .rename_setup = nfs4_proc_rename_setup, 11094 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 11095 .rename_done = nfs4_proc_rename_done, 11096 .link = nfs4_proc_link, 11097 .symlink = nfs4_proc_symlink, 11098 .mkdir = nfs4_proc_mkdir, 11099 .rmdir = nfs4_proc_rmdir, 11100 .readdir = nfs4_proc_readdir, 11101 .mknod = nfs4_proc_mknod, 11102 .statfs = nfs4_proc_statfs, 11103 .fsinfo = nfs4_proc_fsinfo, 11104 .pathconf = nfs4_proc_pathconf, 11105 .set_capabilities = nfs4_server_capabilities, 11106 .decode_dirent = nfs4_decode_dirent, 11107 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare, 11108 .read_setup = nfs4_proc_read_setup, 11109 .read_done = nfs4_read_done, 11110 .write_setup = nfs4_proc_write_setup, 11111 .write_done = nfs4_write_done, 11112 .commit_setup = nfs4_proc_commit_setup, 11113 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 11114 .commit_done = nfs4_commit_done, 11115 .lock = nfs4_proc_lock, 11116 .clear_acl_cache = nfs4_zap_acl_attr, 11117 .close_context = nfs4_close_context, 11118 .open_context = nfs4_atomic_open, 11119 .have_delegation = nfs4_have_delegation, 11120 .return_delegation = nfs4_inode_return_delegation, 11121 .alloc_client = nfs4_alloc_client, 11122 .init_client = nfs4_init_client, 11123 .free_client = nfs4_free_client, 11124 .create_server = nfs4_create_server, 11125 .clone_server = nfs4_clone_server, 11126 .discover_trunking = nfs4_discover_trunking, 11127 .enable_swap = nfs4_enable_swap, 11128 .disable_swap = nfs4_disable_swap, 11129 }; 11130 11131 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 11132 .name = XATTR_NAME_NFSV4_ACL, 11133 .list = nfs4_xattr_list_nfs4_acl, 11134 .get = nfs4_xattr_get_nfs4_acl, 11135 .set = nfs4_xattr_set_nfs4_acl, 11136 }; 11137 11138 #if defined(CONFIG_NFS_V4_1) 11139 static const struct xattr_handler nfs4_xattr_nfs4_dacl_handler = { 11140 .name = XATTR_NAME_NFSV4_DACL, 11141 .list = nfs4_xattr_list_nfs4_dacl, 11142 .get = nfs4_xattr_get_nfs4_dacl, 11143 .set = nfs4_xattr_set_nfs4_dacl, 11144 }; 11145 11146 static const struct xattr_handler nfs4_xattr_nfs4_sacl_handler = { 11147 .name = XATTR_NAME_NFSV4_SACL, 11148 .list = nfs4_xattr_list_nfs4_sacl, 11149 .get = nfs4_xattr_get_nfs4_sacl, 11150 .set = nfs4_xattr_set_nfs4_sacl, 11151 }; 11152 #endif 11153 11154 #ifdef CONFIG_NFS_V4_2 11155 static const struct xattr_handler nfs4_xattr_nfs4_user_handler = { 11156 .prefix = XATTR_USER_PREFIX, 11157 .get = nfs4_xattr_get_nfs4_user, 11158 .set = nfs4_xattr_set_nfs4_user, 11159 }; 11160 #endif 11161 11162 const struct xattr_handler * const nfs4_xattr_handlers[] = { 11163 &nfs4_xattr_nfs4_acl_handler, 11164 #if defined(CONFIG_NFS_V4_1) 11165 &nfs4_xattr_nfs4_dacl_handler, 11166 &nfs4_xattr_nfs4_sacl_handler, 11167 #endif 11168 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 11169 &nfs4_xattr_nfs4_label_handler, 11170 #endif 11171 #ifdef CONFIG_NFS_V4_2 11172 &nfs4_xattr_nfs4_user_handler, 11173 #endif 11174 NULL 11175 }; 11176