1 /* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <linux/mm.h> 39 #include <linux/delay.h> 40 #include <linux/errno.h> 41 #include <linux/string.h> 42 #include <linux/ratelimit.h> 43 #include <linux/printk.h> 44 #include <linux/slab.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/nfs.h> 47 #include <linux/nfs4.h> 48 #include <linux/nfs_fs.h> 49 #include <linux/nfs_page.h> 50 #include <linux/nfs_mount.h> 51 #include <linux/namei.h> 52 #include <linux/mount.h> 53 #include <linux/module.h> 54 #include <linux/xattr.h> 55 #include <linux/utsname.h> 56 #include <linux/freezer.h> 57 #include <linux/iversion.h> 58 59 #include "nfs4_fs.h" 60 #include "delegation.h" 61 #include "internal.h" 62 #include "iostat.h" 63 #include "callback.h" 64 #include "pnfs.h" 65 #include "netns.h" 66 #include "sysfs.h" 67 #include "nfs4idmap.h" 68 #include "nfs4session.h" 69 #include "fscache.h" 70 #include "nfs42.h" 71 72 #include "nfs4trace.h" 73 74 #define NFSDBG_FACILITY NFSDBG_PROC 75 76 #define NFS4_BITMASK_SZ 3 77 78 #define NFS4_POLL_RETRY_MIN (HZ/10) 79 #define NFS4_POLL_RETRY_MAX (15*HZ) 80 81 /* file attributes which can be mapped to nfs attributes */ 82 #define NFS4_VALID_ATTRS (ATTR_MODE \ 83 | ATTR_UID \ 84 | ATTR_GID \ 85 | ATTR_SIZE \ 86 | ATTR_ATIME \ 87 | ATTR_MTIME \ 88 | ATTR_CTIME \ 89 | ATTR_ATIME_SET \ 90 | ATTR_MTIME_SET) 91 92 struct nfs4_opendata; 93 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 94 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 95 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 96 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 97 struct nfs_fattr *fattr, struct inode *inode); 98 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred, 99 struct nfs_fattr *fattr, struct iattr *sattr, 100 struct nfs_open_context *ctx, struct nfs4_label *ilabel); 101 #ifdef CONFIG_NFS_V4_1 102 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 103 const struct cred *cred, 104 struct nfs4_slot *slot, 105 bool is_privileged); 106 static int nfs41_test_stateid(struct nfs_server *, const nfs4_stateid *, 107 const struct cred *); 108 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *, 109 const struct cred *, bool); 110 #endif 111 112 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 113 static inline struct nfs4_label * 114 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 115 struct iattr *sattr, struct nfs4_label *label) 116 { 117 struct lsm_context shim; 118 int err; 119 120 if (label == NULL) 121 return NULL; 122 123 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0) 124 return NULL; 125 126 label->lfs = 0; 127 label->pi = 0; 128 label->len = 0; 129 label->label = NULL; 130 131 err = security_dentry_init_security(dentry, sattr->ia_mode, 132 &dentry->d_name, NULL, &shim); 133 if (err) 134 return NULL; 135 136 label->lsmid = shim.id; 137 label->label = shim.context; 138 label->len = shim.len; 139 return label; 140 } 141 static inline void 142 nfs4_label_release_security(struct nfs4_label *label) 143 { 144 struct lsm_context shim; 145 146 if (label) { 147 shim.context = label->label; 148 shim.len = label->len; 149 shim.id = label->lsmid; 150 security_release_secctx(&shim); 151 } 152 } 153 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 154 { 155 if (label) 156 return server->attr_bitmask; 157 158 return server->attr_bitmask_nl; 159 } 160 #else 161 static inline struct nfs4_label * 162 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 163 struct iattr *sattr, struct nfs4_label *l) 164 { return NULL; } 165 static inline void 166 nfs4_label_release_security(struct nfs4_label *label) 167 { return; } 168 static inline u32 * 169 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 170 { return server->attr_bitmask; } 171 #endif 172 173 /* Prevent leaks of NFSv4 errors into userland */ 174 static int nfs4_map_errors(int err) 175 { 176 if (err >= -1000) 177 return err; 178 switch (err) { 179 case -NFS4ERR_RESOURCE: 180 case -NFS4ERR_LAYOUTTRYLATER: 181 case -NFS4ERR_RECALLCONFLICT: 182 case -NFS4ERR_RETURNCONFLICT: 183 return -EREMOTEIO; 184 case -NFS4ERR_WRONGSEC: 185 case -NFS4ERR_WRONG_CRED: 186 return -EPERM; 187 case -NFS4ERR_BADOWNER: 188 case -NFS4ERR_BADNAME: 189 return -EINVAL; 190 case -NFS4ERR_SHARE_DENIED: 191 return -EACCES; 192 case -NFS4ERR_MINOR_VERS_MISMATCH: 193 return -EPROTONOSUPPORT; 194 case -NFS4ERR_FILE_OPEN: 195 return -EBUSY; 196 case -NFS4ERR_NOT_SAME: 197 return -ENOTSYNC; 198 case -ENETDOWN: 199 case -ENETUNREACH: 200 break; 201 default: 202 dprintk("%s could not handle NFSv4 error %d\n", 203 __func__, -err); 204 break; 205 } 206 return -EIO; 207 } 208 209 /* 210 * This is our standard bitmap for GETATTR requests. 211 */ 212 const u32 nfs4_fattr_bitmap[3] = { 213 FATTR4_WORD0_TYPE 214 | FATTR4_WORD0_CHANGE 215 | FATTR4_WORD0_SIZE 216 | FATTR4_WORD0_FSID 217 | FATTR4_WORD0_FILEID, 218 FATTR4_WORD1_MODE 219 | FATTR4_WORD1_NUMLINKS 220 | FATTR4_WORD1_OWNER 221 | FATTR4_WORD1_OWNER_GROUP 222 | FATTR4_WORD1_RAWDEV 223 | FATTR4_WORD1_SPACE_USED 224 | FATTR4_WORD1_TIME_ACCESS 225 | FATTR4_WORD1_TIME_CREATE 226 | FATTR4_WORD1_TIME_METADATA 227 | FATTR4_WORD1_TIME_MODIFY 228 | FATTR4_WORD1_MOUNTED_ON_FILEID, 229 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 230 FATTR4_WORD2_SECURITY_LABEL 231 #endif 232 }; 233 234 static const u32 nfs4_pnfs_open_bitmap[3] = { 235 FATTR4_WORD0_TYPE 236 | FATTR4_WORD0_CHANGE 237 | FATTR4_WORD0_SIZE 238 | FATTR4_WORD0_FSID 239 | FATTR4_WORD0_FILEID, 240 FATTR4_WORD1_MODE 241 | FATTR4_WORD1_NUMLINKS 242 | FATTR4_WORD1_OWNER 243 | FATTR4_WORD1_OWNER_GROUP 244 | FATTR4_WORD1_RAWDEV 245 | FATTR4_WORD1_SPACE_USED 246 | FATTR4_WORD1_TIME_ACCESS 247 | FATTR4_WORD1_TIME_CREATE 248 | FATTR4_WORD1_TIME_METADATA 249 | FATTR4_WORD1_TIME_MODIFY, 250 FATTR4_WORD2_MDSTHRESHOLD 251 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 252 | FATTR4_WORD2_SECURITY_LABEL 253 #endif 254 }; 255 256 static const u32 nfs4_open_noattr_bitmap[3] = { 257 FATTR4_WORD0_TYPE 258 | FATTR4_WORD0_FILEID, 259 }; 260 261 const u32 nfs4_statfs_bitmap[3] = { 262 FATTR4_WORD0_FILES_AVAIL 263 | FATTR4_WORD0_FILES_FREE 264 | FATTR4_WORD0_FILES_TOTAL, 265 FATTR4_WORD1_SPACE_AVAIL 266 | FATTR4_WORD1_SPACE_FREE 267 | FATTR4_WORD1_SPACE_TOTAL 268 }; 269 270 const u32 nfs4_pathconf_bitmap[3] = { 271 FATTR4_WORD0_MAXLINK 272 | FATTR4_WORD0_MAXNAME, 273 0 274 }; 275 276 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 277 | FATTR4_WORD0_MAXREAD 278 | FATTR4_WORD0_MAXWRITE 279 | FATTR4_WORD0_LEASE_TIME, 280 FATTR4_WORD1_TIME_DELTA 281 | FATTR4_WORD1_FS_LAYOUT_TYPES, 282 FATTR4_WORD2_LAYOUT_BLKSIZE 283 | FATTR4_WORD2_CLONE_BLKSIZE 284 | FATTR4_WORD2_CHANGE_ATTR_TYPE 285 | FATTR4_WORD2_XATTR_SUPPORT 286 }; 287 288 const u32 nfs4_fs_locations_bitmap[3] = { 289 FATTR4_WORD0_CHANGE 290 | FATTR4_WORD0_SIZE 291 | FATTR4_WORD0_FSID 292 | FATTR4_WORD0_FILEID 293 | FATTR4_WORD0_FS_LOCATIONS, 294 FATTR4_WORD1_OWNER 295 | FATTR4_WORD1_OWNER_GROUP 296 | FATTR4_WORD1_RAWDEV 297 | FATTR4_WORD1_SPACE_USED 298 | FATTR4_WORD1_TIME_ACCESS 299 | FATTR4_WORD1_TIME_METADATA 300 | FATTR4_WORD1_TIME_MODIFY 301 | FATTR4_WORD1_MOUNTED_ON_FILEID, 302 }; 303 304 static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src, 305 struct inode *inode, unsigned long flags) 306 { 307 unsigned long cache_validity; 308 309 memcpy(dst, src, NFS4_BITMASK_SZ*sizeof(*dst)); 310 if (!inode || !nfs_have_read_or_write_delegation(inode)) 311 return; 312 313 cache_validity = READ_ONCE(NFS_I(inode)->cache_validity) | flags; 314 315 /* Remove the attributes over which we have full control */ 316 dst[1] &= ~FATTR4_WORD1_RAWDEV; 317 if (!(cache_validity & NFS_INO_INVALID_SIZE)) 318 dst[0] &= ~FATTR4_WORD0_SIZE; 319 320 if (!(cache_validity & NFS_INO_INVALID_CHANGE)) 321 dst[0] &= ~FATTR4_WORD0_CHANGE; 322 323 if (!(cache_validity & NFS_INO_INVALID_MODE)) 324 dst[1] &= ~FATTR4_WORD1_MODE; 325 if (!(cache_validity & NFS_INO_INVALID_OTHER)) 326 dst[1] &= ~(FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP); 327 328 if (!(cache_validity & NFS_INO_INVALID_BTIME)) 329 dst[1] &= ~FATTR4_WORD1_TIME_CREATE; 330 331 if (nfs_have_delegated_mtime(inode)) { 332 if (!(cache_validity & NFS_INO_INVALID_ATIME)) 333 dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET); 334 if (!(cache_validity & NFS_INO_INVALID_MTIME)) 335 dst[1] &= ~(FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET); 336 if (!(cache_validity & NFS_INO_INVALID_CTIME)) 337 dst[1] &= ~(FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY_SET); 338 } else if (nfs_have_delegated_atime(inode)) { 339 if (!(cache_validity & NFS_INO_INVALID_ATIME)) 340 dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET); 341 } 342 } 343 344 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 345 struct nfs4_readdir_arg *readdir) 346 { 347 unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE; 348 __be32 *start, *p; 349 350 if (cookie > 2) { 351 readdir->cookie = cookie; 352 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 353 return; 354 } 355 356 readdir->cookie = 0; 357 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 358 if (cookie == 2) 359 return; 360 361 /* 362 * NFSv4 servers do not return entries for '.' and '..' 363 * Therefore, we fake these entries here. We let '.' 364 * have cookie 0 and '..' have cookie 1. Note that 365 * when talking to the server, we always send cookie 0 366 * instead of 1 or 2. 367 */ 368 start = p = kmap_atomic(*readdir->pages); 369 370 if (cookie == 0) { 371 *p++ = xdr_one; /* next */ 372 *p++ = xdr_zero; /* cookie, first word */ 373 *p++ = xdr_one; /* cookie, second word */ 374 *p++ = xdr_one; /* entry len */ 375 memcpy(p, ".\0\0\0", 4); /* entry */ 376 p++; 377 *p++ = xdr_one; /* bitmap length */ 378 *p++ = htonl(attrs); /* bitmap */ 379 *p++ = htonl(12); /* attribute buffer length */ 380 *p++ = htonl(NF4DIR); 381 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry))); 382 } 383 384 *p++ = xdr_one; /* next */ 385 *p++ = xdr_zero; /* cookie, first word */ 386 *p++ = xdr_two; /* cookie, second word */ 387 *p++ = xdr_two; /* entry len */ 388 memcpy(p, "..\0\0", 4); /* entry */ 389 p++; 390 *p++ = xdr_one; /* bitmap length */ 391 *p++ = htonl(attrs); /* bitmap */ 392 *p++ = htonl(12); /* attribute buffer length */ 393 *p++ = htonl(NF4DIR); 394 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent))); 395 396 readdir->pgbase = (char *)p - (char *)start; 397 readdir->count -= readdir->pgbase; 398 kunmap_atomic(start); 399 } 400 401 static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version) 402 { 403 if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) { 404 fattr->pre_change_attr = version; 405 fattr->valid |= NFS_ATTR_FATTR_PRECHANGE; 406 } 407 } 408 409 static void nfs4_test_and_free_stateid(struct nfs_server *server, 410 nfs4_stateid *stateid, 411 const struct cred *cred) 412 { 413 const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops; 414 415 ops->test_and_free_expired(server, stateid, cred); 416 } 417 418 static void __nfs4_free_revoked_stateid(struct nfs_server *server, 419 nfs4_stateid *stateid, 420 const struct cred *cred) 421 { 422 stateid->type = NFS4_REVOKED_STATEID_TYPE; 423 nfs4_test_and_free_stateid(server, stateid, cred); 424 } 425 426 static void nfs4_free_revoked_stateid(struct nfs_server *server, 427 const nfs4_stateid *stateid, 428 const struct cred *cred) 429 { 430 nfs4_stateid tmp; 431 432 nfs4_stateid_copy(&tmp, stateid); 433 __nfs4_free_revoked_stateid(server, &tmp, cred); 434 } 435 436 static long nfs4_update_delay(long *timeout) 437 { 438 long ret; 439 if (!timeout) 440 return NFS4_POLL_RETRY_MAX; 441 if (*timeout <= 0) 442 *timeout = NFS4_POLL_RETRY_MIN; 443 if (*timeout > NFS4_POLL_RETRY_MAX) 444 *timeout = NFS4_POLL_RETRY_MAX; 445 ret = *timeout; 446 *timeout <<= 1; 447 return ret; 448 } 449 450 static int nfs4_delay_killable(long *timeout) 451 { 452 might_sleep(); 453 454 if (unlikely(nfs_current_task_exiting())) 455 return -EINTR; 456 __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 457 schedule_timeout(nfs4_update_delay(timeout)); 458 if (!__fatal_signal_pending(current)) 459 return 0; 460 return -EINTR; 461 } 462 463 static int nfs4_delay_interruptible(long *timeout) 464 { 465 might_sleep(); 466 467 if (unlikely(nfs_current_task_exiting())) 468 return -EINTR; 469 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE); 470 schedule_timeout(nfs4_update_delay(timeout)); 471 if (!signal_pending(current)) 472 return 0; 473 return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS; 474 } 475 476 static int nfs4_delay(long *timeout, bool interruptible) 477 { 478 if (interruptible) 479 return nfs4_delay_interruptible(timeout); 480 return nfs4_delay_killable(timeout); 481 } 482 483 static const nfs4_stateid * 484 nfs4_recoverable_stateid(const nfs4_stateid *stateid) 485 { 486 if (!stateid) 487 return NULL; 488 switch (stateid->type) { 489 case NFS4_OPEN_STATEID_TYPE: 490 case NFS4_LOCK_STATEID_TYPE: 491 case NFS4_DELEGATION_STATEID_TYPE: 492 return stateid; 493 default: 494 break; 495 } 496 return NULL; 497 } 498 499 /* This is the error handling routine for processes that are allowed 500 * to sleep. 501 */ 502 static int nfs4_do_handle_exception(struct nfs_server *server, 503 int errorcode, struct nfs4_exception *exception) 504 { 505 struct nfs_client *clp = server->nfs_client; 506 struct nfs4_state *state = exception->state; 507 const nfs4_stateid *stateid; 508 struct inode *inode = exception->inode; 509 int ret = errorcode; 510 511 exception->delay = 0; 512 exception->recovering = 0; 513 exception->retry = 0; 514 515 stateid = nfs4_recoverable_stateid(exception->stateid); 516 if (stateid == NULL && state != NULL) 517 stateid = nfs4_recoverable_stateid(&state->stateid); 518 519 switch(errorcode) { 520 case 0: 521 return 0; 522 case -NFS4ERR_BADHANDLE: 523 case -ESTALE: 524 if (inode != NULL && S_ISREG(inode->i_mode)) 525 pnfs_destroy_layout(NFS_I(inode)); 526 break; 527 case -NFS4ERR_DELEG_REVOKED: 528 case -NFS4ERR_ADMIN_REVOKED: 529 case -NFS4ERR_EXPIRED: 530 case -NFS4ERR_BAD_STATEID: 531 case -NFS4ERR_PARTNER_NO_AUTH: 532 if (inode != NULL && stateid != NULL) { 533 nfs_inode_find_state_and_recover(inode, 534 stateid); 535 goto wait_on_recovery; 536 } 537 fallthrough; 538 case -NFS4ERR_OPENMODE: 539 if (inode) { 540 int err; 541 542 err = nfs_async_inode_return_delegation(inode, 543 stateid); 544 if (err == 0) 545 goto wait_on_recovery; 546 if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) { 547 exception->retry = 1; 548 break; 549 } 550 } 551 if (state == NULL) 552 break; 553 ret = nfs4_schedule_stateid_recovery(server, state); 554 if (ret < 0) 555 break; 556 goto wait_on_recovery; 557 case -NFS4ERR_STALE_STATEID: 558 case -NFS4ERR_STALE_CLIENTID: 559 nfs4_schedule_lease_recovery(clp); 560 goto wait_on_recovery; 561 case -NFS4ERR_MOVED: 562 ret = nfs4_schedule_migration_recovery(server); 563 if (ret < 0) 564 break; 565 goto wait_on_recovery; 566 case -NFS4ERR_LEASE_MOVED: 567 nfs4_schedule_lease_moved_recovery(clp); 568 goto wait_on_recovery; 569 #if defined(CONFIG_NFS_V4_1) 570 case -NFS4ERR_BADSESSION: 571 case -NFS4ERR_BADSLOT: 572 case -NFS4ERR_BAD_HIGH_SLOT: 573 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 574 case -NFS4ERR_DEADSESSION: 575 case -NFS4ERR_SEQ_FALSE_RETRY: 576 case -NFS4ERR_SEQ_MISORDERED: 577 /* Handled in nfs41_sequence_process() */ 578 goto wait_on_recovery; 579 #endif /* defined(CONFIG_NFS_V4_1) */ 580 case -NFS4ERR_FILE_OPEN: 581 if (exception->timeout > HZ) { 582 /* We have retried a decent amount, time to 583 * fail 584 */ 585 ret = -EBUSY; 586 break; 587 } 588 fallthrough; 589 case -NFS4ERR_DELAY: 590 nfs_inc_server_stats(server, NFSIOS_DELAY); 591 fallthrough; 592 case -NFS4ERR_GRACE: 593 case -NFS4ERR_LAYOUTTRYLATER: 594 case -NFS4ERR_RECALLCONFLICT: 595 case -NFS4ERR_RETURNCONFLICT: 596 exception->delay = 1; 597 return 0; 598 599 case -NFS4ERR_RETRY_UNCACHED_REP: 600 case -NFS4ERR_OLD_STATEID: 601 exception->retry = 1; 602 break; 603 case -NFS4ERR_BADOWNER: 604 /* The following works around a Linux server bug! */ 605 case -NFS4ERR_BADNAME: 606 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 607 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 608 exception->retry = 1; 609 printk(KERN_WARNING "NFS: v4 server %s " 610 "does not accept raw " 611 "uid/gids. " 612 "Reenabling the idmapper.\n", 613 server->nfs_client->cl_hostname); 614 } 615 } 616 /* We failed to handle the error */ 617 return nfs4_map_errors(ret); 618 wait_on_recovery: 619 exception->recovering = 1; 620 return 0; 621 } 622 623 /* 624 * Track the number of NFS4ERR_DELAY related retransmissions and return 625 * EAGAIN if the 'softerr' mount option is set, and we've exceeded the limit 626 * set by 'nfs_delay_retrans'. 627 */ 628 static int nfs4_exception_should_retrans(const struct nfs_server *server, 629 struct nfs4_exception *exception) 630 { 631 if (server->flags & NFS_MOUNT_SOFTERR && nfs_delay_retrans >= 0) { 632 if (exception->retrans++ >= (unsigned short)nfs_delay_retrans) 633 return -EAGAIN; 634 } 635 return 0; 636 } 637 638 /* This is the error handling routine for processes that are allowed 639 * to sleep. 640 */ 641 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 642 { 643 struct nfs_client *clp = server->nfs_client; 644 int ret; 645 646 ret = nfs4_do_handle_exception(server, errorcode, exception); 647 if (exception->delay) { 648 int ret2 = nfs4_exception_should_retrans(server, exception); 649 if (ret2 < 0) { 650 exception->retry = 0; 651 return ret2; 652 } 653 ret = nfs4_delay(&exception->timeout, 654 exception->interruptible); 655 goto out_retry; 656 } 657 if (exception->recovering) { 658 if (exception->task_is_privileged) 659 return -EDEADLOCK; 660 ret = nfs4_wait_clnt_recover(clp); 661 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 662 return -EIO; 663 goto out_retry; 664 } 665 return ret; 666 out_retry: 667 if (ret == 0) 668 exception->retry = 1; 669 return ret; 670 } 671 672 static int 673 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server, 674 int errorcode, struct nfs4_exception *exception) 675 { 676 struct nfs_client *clp = server->nfs_client; 677 int ret; 678 679 if ((task->tk_rpc_status == -ENETDOWN || 680 task->tk_rpc_status == -ENETUNREACH) && 681 task->tk_flags & RPC_TASK_NETUNREACH_FATAL) { 682 exception->delay = 0; 683 exception->recovering = 0; 684 exception->retry = 0; 685 return -EIO; 686 } 687 688 ret = nfs4_do_handle_exception(server, errorcode, exception); 689 if (exception->delay) { 690 int ret2 = nfs4_exception_should_retrans(server, exception); 691 if (ret2 < 0) { 692 exception->retry = 0; 693 return ret2; 694 } 695 rpc_delay(task, nfs4_update_delay(&exception->timeout)); 696 goto out_retry; 697 } 698 if (exception->recovering) { 699 if (exception->task_is_privileged) 700 return -EDEADLOCK; 701 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 702 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 703 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 704 goto out_retry; 705 } 706 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 707 ret = -EIO; 708 return ret; 709 out_retry: 710 if (ret == 0) { 711 exception->retry = 1; 712 /* 713 * For NFS4ERR_MOVED, the client transport will need to 714 * be recomputed after migration recovery has completed. 715 */ 716 if (errorcode == -NFS4ERR_MOVED) 717 rpc_task_release_transport(task); 718 } 719 return ret; 720 } 721 722 int 723 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server, 724 struct nfs4_state *state, long *timeout) 725 { 726 struct nfs4_exception exception = { 727 .state = state, 728 }; 729 730 if (task->tk_status >= 0) 731 return 0; 732 if (timeout) 733 exception.timeout = *timeout; 734 task->tk_status = nfs4_async_handle_exception(task, server, 735 task->tk_status, 736 &exception); 737 if (exception.delay && timeout) 738 *timeout = exception.timeout; 739 if (exception.retry) 740 return -EAGAIN; 741 return 0; 742 } 743 744 /* 745 * Return 'true' if 'clp' is using an rpc_client that is integrity protected 746 * or 'false' otherwise. 747 */ 748 static bool _nfs4_is_integrity_protected(struct nfs_client *clp) 749 { 750 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor; 751 return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P); 752 } 753 754 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 755 { 756 spin_lock(&clp->cl_lock); 757 if (time_before(clp->cl_last_renewal,timestamp)) 758 clp->cl_last_renewal = timestamp; 759 spin_unlock(&clp->cl_lock); 760 } 761 762 static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 763 { 764 struct nfs_client *clp = server->nfs_client; 765 766 if (!nfs4_has_session(clp)) 767 do_renew_lease(clp, timestamp); 768 } 769 770 struct nfs4_call_sync_data { 771 const struct nfs_server *seq_server; 772 struct nfs4_sequence_args *seq_args; 773 struct nfs4_sequence_res *seq_res; 774 }; 775 776 void nfs4_init_sequence(struct nfs4_sequence_args *args, 777 struct nfs4_sequence_res *res, int cache_reply, 778 int privileged) 779 { 780 args->sa_slot = NULL; 781 args->sa_cache_this = cache_reply; 782 args->sa_privileged = privileged; 783 784 res->sr_slot = NULL; 785 } 786 787 static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res) 788 { 789 struct nfs4_slot *slot = res->sr_slot; 790 struct nfs4_slot_table *tbl; 791 792 tbl = slot->table; 793 spin_lock(&tbl->slot_tbl_lock); 794 if (!nfs41_wake_and_assign_slot(tbl, slot)) 795 nfs4_free_slot(tbl, slot); 796 spin_unlock(&tbl->slot_tbl_lock); 797 798 res->sr_slot = NULL; 799 } 800 801 static int nfs40_sequence_done(struct rpc_task *task, 802 struct nfs4_sequence_res *res) 803 { 804 if (res->sr_slot != NULL) 805 nfs40_sequence_free_slot(res); 806 return 1; 807 } 808 809 #if defined(CONFIG_NFS_V4_1) 810 811 static void nfs41_release_slot(struct nfs4_slot *slot) 812 { 813 struct nfs4_session *session; 814 struct nfs4_slot_table *tbl; 815 bool send_new_highest_used_slotid = false; 816 817 if (!slot) 818 return; 819 tbl = slot->table; 820 session = tbl->session; 821 822 /* Bump the slot sequence number */ 823 if (slot->seq_done) 824 slot->seq_nr++; 825 slot->seq_done = 0; 826 827 spin_lock(&tbl->slot_tbl_lock); 828 /* Be nice to the server: try to ensure that the last transmitted 829 * value for highest_user_slotid <= target_highest_slotid 830 */ 831 if (tbl->highest_used_slotid > tbl->target_highest_slotid) 832 send_new_highest_used_slotid = true; 833 834 if (nfs41_wake_and_assign_slot(tbl, slot)) { 835 send_new_highest_used_slotid = false; 836 goto out_unlock; 837 } 838 nfs4_free_slot(tbl, slot); 839 840 if (tbl->highest_used_slotid != NFS4_NO_SLOT) 841 send_new_highest_used_slotid = false; 842 out_unlock: 843 spin_unlock(&tbl->slot_tbl_lock); 844 if (send_new_highest_used_slotid) 845 nfs41_notify_server(session->clp); 846 if (waitqueue_active(&tbl->slot_waitq)) 847 wake_up_all(&tbl->slot_waitq); 848 } 849 850 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 851 { 852 nfs41_release_slot(res->sr_slot); 853 res->sr_slot = NULL; 854 } 855 856 static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot, 857 u32 seqnr) 858 { 859 if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0) 860 slot->seq_nr_highest_sent = seqnr; 861 } 862 static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, u32 seqnr) 863 { 864 nfs4_slot_sequence_record_sent(slot, seqnr); 865 slot->seq_nr_last_acked = seqnr; 866 } 867 868 static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred, 869 struct nfs4_slot *slot) 870 { 871 struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true); 872 if (!IS_ERR(task)) 873 rpc_put_task_async(task); 874 } 875 876 static int nfs41_sequence_process(struct rpc_task *task, 877 struct nfs4_sequence_res *res) 878 { 879 struct nfs4_session *session; 880 struct nfs4_slot *slot = res->sr_slot; 881 struct nfs_client *clp; 882 int status; 883 int ret = 1; 884 885 if (slot == NULL) 886 goto out_noaction; 887 /* don't increment the sequence number if the task wasn't sent */ 888 if (!RPC_WAS_SENT(task) || slot->seq_done) 889 goto out; 890 891 session = slot->table->session; 892 clp = session->clp; 893 894 trace_nfs4_sequence_done(session, res); 895 896 status = res->sr_status; 897 if (task->tk_status == -NFS4ERR_DEADSESSION) 898 status = -NFS4ERR_DEADSESSION; 899 900 /* Check the SEQUENCE operation status */ 901 switch (status) { 902 case 0: 903 /* Mark this sequence number as having been acked */ 904 nfs4_slot_sequence_acked(slot, slot->seq_nr); 905 /* Update the slot's sequence and clientid lease timer */ 906 slot->seq_done = 1; 907 do_renew_lease(clp, res->sr_timestamp); 908 /* Check sequence flags */ 909 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags, 910 !!slot->privileged); 911 nfs41_update_target_slotid(slot->table, slot, res); 912 break; 913 case 1: 914 /* 915 * sr_status remains 1 if an RPC level error occurred. 916 * The server may or may not have processed the sequence 917 * operation.. 918 */ 919 nfs4_slot_sequence_record_sent(slot, slot->seq_nr); 920 slot->seq_done = 1; 921 goto out; 922 case -NFS4ERR_DELAY: 923 /* The server detected a resend of the RPC call and 924 * returned NFS4ERR_DELAY as per Section 2.10.6.2 925 * of RFC5661. 926 */ 927 dprintk("%s: slot=%u seq=%u: Operation in progress\n", 928 __func__, 929 slot->slot_nr, 930 slot->seq_nr); 931 goto out_retry; 932 case -NFS4ERR_RETRY_UNCACHED_REP: 933 case -NFS4ERR_SEQ_FALSE_RETRY: 934 /* 935 * The server thinks we tried to replay a request. 936 * Retry the call after bumping the sequence ID. 937 */ 938 nfs4_slot_sequence_acked(slot, slot->seq_nr); 939 goto retry_new_seq; 940 case -NFS4ERR_BADSLOT: 941 /* 942 * The slot id we used was probably retired. Try again 943 * using a different slot id. 944 */ 945 if (slot->slot_nr < slot->table->target_highest_slotid) 946 goto session_recover; 947 goto retry_nowait; 948 case -NFS4ERR_SEQ_MISORDERED: 949 nfs4_slot_sequence_record_sent(slot, slot->seq_nr); 950 /* 951 * Were one or more calls using this slot interrupted? 952 * If the server never received the request, then our 953 * transmitted slot sequence number may be too high. However, 954 * if the server did receive the request then it might 955 * accidentally give us a reply with a mismatched operation. 956 * We can sort this out by sending a lone sequence operation 957 * to the server on the same slot. 958 */ 959 if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) { 960 slot->seq_nr--; 961 if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) { 962 nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot); 963 res->sr_slot = NULL; 964 } 965 goto retry_nowait; 966 } 967 /* 968 * RFC5661: 969 * A retry might be sent while the original request is 970 * still in progress on the replier. The replier SHOULD 971 * deal with the issue by returning NFS4ERR_DELAY as the 972 * reply to SEQUENCE or CB_SEQUENCE operation, but 973 * implementations MAY return NFS4ERR_SEQ_MISORDERED. 974 * 975 * Restart the search after a delay. 976 */ 977 slot->seq_nr = slot->seq_nr_highest_sent; 978 goto out_retry; 979 case -NFS4ERR_BADSESSION: 980 case -NFS4ERR_DEADSESSION: 981 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 982 goto session_recover; 983 default: 984 /* Just update the slot sequence no. */ 985 slot->seq_done = 1; 986 } 987 out: 988 /* The session may be reset by one of the error handlers. */ 989 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 990 out_noaction: 991 return ret; 992 session_recover: 993 set_bit(NFS4_SLOT_TBL_DRAINING, &session->fc_slot_table.slot_tbl_state); 994 nfs4_schedule_session_recovery(session, status); 995 dprintk("%s ERROR: %d Reset session\n", __func__, status); 996 nfs41_sequence_free_slot(res); 997 goto out; 998 retry_new_seq: 999 ++slot->seq_nr; 1000 retry_nowait: 1001 if (rpc_restart_call_prepare(task)) { 1002 nfs41_sequence_free_slot(res); 1003 task->tk_status = 0; 1004 ret = 0; 1005 } 1006 goto out; 1007 out_retry: 1008 if (!rpc_restart_call(task)) 1009 goto out; 1010 rpc_delay(task, NFS4_POLL_RETRY_MAX); 1011 return 0; 1012 } 1013 1014 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 1015 { 1016 if (!nfs41_sequence_process(task, res)) 1017 return 0; 1018 if (res->sr_slot != NULL) 1019 nfs41_sequence_free_slot(res); 1020 return 1; 1021 1022 } 1023 EXPORT_SYMBOL_GPL(nfs41_sequence_done); 1024 1025 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) 1026 { 1027 if (res->sr_slot == NULL) 1028 return 1; 1029 if (res->sr_slot->table->session != NULL) 1030 return nfs41_sequence_process(task, res); 1031 return nfs40_sequence_done(task, res); 1032 } 1033 1034 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res) 1035 { 1036 if (res->sr_slot != NULL) { 1037 if (res->sr_slot->table->session != NULL) 1038 nfs41_sequence_free_slot(res); 1039 else 1040 nfs40_sequence_free_slot(res); 1041 } 1042 } 1043 1044 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 1045 { 1046 if (res->sr_slot == NULL) 1047 return 1; 1048 if (!res->sr_slot->table->session) 1049 return nfs40_sequence_done(task, res); 1050 return nfs41_sequence_done(task, res); 1051 } 1052 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 1053 1054 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 1055 { 1056 struct nfs4_call_sync_data *data = calldata; 1057 1058 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 1059 1060 nfs4_setup_sequence(data->seq_server->nfs_client, 1061 data->seq_args, data->seq_res, task); 1062 } 1063 1064 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 1065 { 1066 struct nfs4_call_sync_data *data = calldata; 1067 1068 nfs41_sequence_done(task, data->seq_res); 1069 } 1070 1071 static const struct rpc_call_ops nfs41_call_sync_ops = { 1072 .rpc_call_prepare = nfs41_call_sync_prepare, 1073 .rpc_call_done = nfs41_call_sync_done, 1074 }; 1075 1076 #else /* !CONFIG_NFS_V4_1 */ 1077 1078 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) 1079 { 1080 return nfs40_sequence_done(task, res); 1081 } 1082 1083 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res) 1084 { 1085 if (res->sr_slot != NULL) 1086 nfs40_sequence_free_slot(res); 1087 } 1088 1089 int nfs4_sequence_done(struct rpc_task *task, 1090 struct nfs4_sequence_res *res) 1091 { 1092 return nfs40_sequence_done(task, res); 1093 } 1094 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 1095 1096 #endif /* !CONFIG_NFS_V4_1 */ 1097 1098 static void nfs41_sequence_res_init(struct nfs4_sequence_res *res) 1099 { 1100 res->sr_timestamp = jiffies; 1101 res->sr_status_flags = 0; 1102 res->sr_status = 1; 1103 } 1104 1105 static 1106 void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args, 1107 struct nfs4_sequence_res *res, 1108 struct nfs4_slot *slot) 1109 { 1110 if (!slot) 1111 return; 1112 slot->privileged = args->sa_privileged ? 1 : 0; 1113 args->sa_slot = slot; 1114 1115 res->sr_slot = slot; 1116 } 1117 1118 int nfs4_setup_sequence(struct nfs_client *client, 1119 struct nfs4_sequence_args *args, 1120 struct nfs4_sequence_res *res, 1121 struct rpc_task *task) 1122 { 1123 struct nfs4_session *session = nfs4_get_session(client); 1124 struct nfs4_slot_table *tbl = client->cl_slot_tbl; 1125 struct nfs4_slot *slot; 1126 1127 /* slot already allocated? */ 1128 if (res->sr_slot != NULL) 1129 goto out_start; 1130 1131 if (session) 1132 tbl = &session->fc_slot_table; 1133 1134 spin_lock(&tbl->slot_tbl_lock); 1135 /* The state manager will wait until the slot table is empty */ 1136 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) 1137 goto out_sleep; 1138 1139 slot = nfs4_alloc_slot(tbl); 1140 if (IS_ERR(slot)) { 1141 if (slot == ERR_PTR(-ENOMEM)) 1142 goto out_sleep_timeout; 1143 goto out_sleep; 1144 } 1145 spin_unlock(&tbl->slot_tbl_lock); 1146 1147 nfs4_sequence_attach_slot(args, res, slot); 1148 1149 trace_nfs4_setup_sequence(session, args); 1150 out_start: 1151 nfs41_sequence_res_init(res); 1152 rpc_call_start(task); 1153 return 0; 1154 out_sleep_timeout: 1155 /* Try again in 1/4 second */ 1156 if (args->sa_privileged) 1157 rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task, 1158 jiffies + (HZ >> 2), RPC_PRIORITY_PRIVILEGED); 1159 else 1160 rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task, 1161 NULL, jiffies + (HZ >> 2)); 1162 spin_unlock(&tbl->slot_tbl_lock); 1163 return -EAGAIN; 1164 out_sleep: 1165 if (args->sa_privileged) 1166 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 1167 RPC_PRIORITY_PRIVILEGED); 1168 else 1169 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 1170 spin_unlock(&tbl->slot_tbl_lock); 1171 return -EAGAIN; 1172 } 1173 EXPORT_SYMBOL_GPL(nfs4_setup_sequence); 1174 1175 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata) 1176 { 1177 struct nfs4_call_sync_data *data = calldata; 1178 nfs4_setup_sequence(data->seq_server->nfs_client, 1179 data->seq_args, data->seq_res, task); 1180 } 1181 1182 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata) 1183 { 1184 struct nfs4_call_sync_data *data = calldata; 1185 nfs4_sequence_done(task, data->seq_res); 1186 } 1187 1188 static const struct rpc_call_ops nfs40_call_sync_ops = { 1189 .rpc_call_prepare = nfs40_call_sync_prepare, 1190 .rpc_call_done = nfs40_call_sync_done, 1191 }; 1192 1193 static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup) 1194 { 1195 int ret; 1196 struct rpc_task *task; 1197 1198 task = rpc_run_task(task_setup); 1199 if (IS_ERR(task)) 1200 return PTR_ERR(task); 1201 1202 ret = task->tk_status; 1203 rpc_put_task(task); 1204 return ret; 1205 } 1206 1207 static int nfs4_do_call_sync(struct rpc_clnt *clnt, 1208 struct nfs_server *server, 1209 struct rpc_message *msg, 1210 struct nfs4_sequence_args *args, 1211 struct nfs4_sequence_res *res, 1212 unsigned short task_flags) 1213 { 1214 struct nfs_client *clp = server->nfs_client; 1215 struct nfs4_call_sync_data data = { 1216 .seq_server = server, 1217 .seq_args = args, 1218 .seq_res = res, 1219 }; 1220 struct rpc_task_setup task_setup = { 1221 .rpc_client = clnt, 1222 .rpc_message = msg, 1223 .callback_ops = clp->cl_mvops->call_sync_ops, 1224 .callback_data = &data, 1225 .flags = task_flags, 1226 }; 1227 1228 return nfs4_call_sync_custom(&task_setup); 1229 } 1230 1231 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 1232 struct nfs_server *server, 1233 struct rpc_message *msg, 1234 struct nfs4_sequence_args *args, 1235 struct nfs4_sequence_res *res) 1236 { 1237 unsigned short task_flags = 0; 1238 1239 if (server->caps & NFS_CAP_MOVEABLE) 1240 task_flags = RPC_TASK_MOVEABLE; 1241 return nfs4_do_call_sync(clnt, server, msg, args, res, task_flags); 1242 } 1243 1244 1245 int nfs4_call_sync(struct rpc_clnt *clnt, 1246 struct nfs_server *server, 1247 struct rpc_message *msg, 1248 struct nfs4_sequence_args *args, 1249 struct nfs4_sequence_res *res, 1250 int cache_reply) 1251 { 1252 nfs4_init_sequence(args, res, cache_reply, 0); 1253 return nfs4_call_sync_sequence(clnt, server, msg, args, res); 1254 } 1255 1256 static void 1257 nfs4_inc_nlink_locked(struct inode *inode) 1258 { 1259 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 1260 NFS_INO_INVALID_CTIME | 1261 NFS_INO_INVALID_NLINK); 1262 inc_nlink(inode); 1263 } 1264 1265 static void 1266 nfs4_inc_nlink(struct inode *inode) 1267 { 1268 spin_lock(&inode->i_lock); 1269 nfs4_inc_nlink_locked(inode); 1270 spin_unlock(&inode->i_lock); 1271 } 1272 1273 static void 1274 nfs4_dec_nlink_locked(struct inode *inode) 1275 { 1276 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 1277 NFS_INO_INVALID_CTIME | 1278 NFS_INO_INVALID_NLINK); 1279 drop_nlink(inode); 1280 } 1281 1282 static void 1283 nfs4_update_changeattr_locked(struct inode *inode, 1284 struct nfs4_change_info *cinfo, 1285 unsigned long timestamp, unsigned long cache_validity) 1286 { 1287 struct nfs_inode *nfsi = NFS_I(inode); 1288 u64 change_attr = inode_peek_iversion_raw(inode); 1289 1290 if (!nfs_have_delegated_mtime(inode)) 1291 cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME; 1292 if (S_ISDIR(inode->i_mode)) 1293 cache_validity |= NFS_INO_INVALID_DATA; 1294 1295 switch (NFS_SERVER(inode)->change_attr_type) { 1296 case NFS4_CHANGE_TYPE_IS_UNDEFINED: 1297 if (cinfo->after == change_attr) 1298 goto out; 1299 break; 1300 default: 1301 if ((s64)(change_attr - cinfo->after) >= 0) 1302 goto out; 1303 } 1304 1305 inode_set_iversion_raw(inode, cinfo->after); 1306 if (!cinfo->atomic || cinfo->before != change_attr) { 1307 if (S_ISDIR(inode->i_mode)) 1308 nfs_force_lookup_revalidate(inode); 1309 1310 if (!nfs_have_delegated_attributes(inode)) 1311 cache_validity |= 1312 NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL | 1313 NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER | 1314 NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK | 1315 NFS_INO_INVALID_MODE | NFS_INO_INVALID_BTIME | 1316 NFS_INO_INVALID_XATTR; 1317 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); 1318 } 1319 nfsi->attrtimeo_timestamp = jiffies; 1320 nfsi->read_cache_jiffies = timestamp; 1321 nfsi->attr_gencount = nfs_inc_attr_generation_counter(); 1322 nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE; 1323 out: 1324 nfs_set_cache_invalid(inode, cache_validity); 1325 } 1326 1327 void 1328 nfs4_update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo, 1329 unsigned long timestamp, unsigned long cache_validity) 1330 { 1331 spin_lock(&dir->i_lock); 1332 nfs4_update_changeattr_locked(dir, cinfo, timestamp, cache_validity); 1333 spin_unlock(&dir->i_lock); 1334 } 1335 1336 struct nfs4_open_createattrs { 1337 struct nfs4_label *label; 1338 struct iattr *sattr; 1339 const __u32 verf[2]; 1340 }; 1341 1342 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server, 1343 int err, struct nfs4_exception *exception) 1344 { 1345 if (err != -EINVAL) 1346 return false; 1347 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1348 return false; 1349 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1; 1350 exception->retry = 1; 1351 return true; 1352 } 1353 1354 static fmode_t _nfs4_ctx_to_accessmode(const struct nfs_open_context *ctx) 1355 { 1356 return ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC); 1357 } 1358 1359 static fmode_t _nfs4_ctx_to_openmode(const struct nfs_open_context *ctx) 1360 { 1361 fmode_t ret = ctx->mode & (FMODE_READ|FMODE_WRITE); 1362 1363 return (ctx->mode & FMODE_EXEC) ? FMODE_READ | ret : ret; 1364 } 1365 1366 static u32 1367 nfs4_fmode_to_share_access(fmode_t fmode) 1368 { 1369 u32 res = 0; 1370 1371 switch (fmode & (FMODE_READ | FMODE_WRITE)) { 1372 case FMODE_READ: 1373 res = NFS4_SHARE_ACCESS_READ; 1374 break; 1375 case FMODE_WRITE: 1376 res = NFS4_SHARE_ACCESS_WRITE; 1377 break; 1378 case FMODE_READ|FMODE_WRITE: 1379 res = NFS4_SHARE_ACCESS_BOTH; 1380 } 1381 return res; 1382 } 1383 1384 static u32 1385 nfs4_map_atomic_open_share(struct nfs_server *server, 1386 fmode_t fmode, int openflags) 1387 { 1388 u32 res = nfs4_fmode_to_share_access(fmode); 1389 1390 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1391 goto out; 1392 /* Want no delegation if we're using O_DIRECT */ 1393 if (openflags & O_DIRECT) { 1394 res |= NFS4_SHARE_WANT_NO_DELEG; 1395 goto out; 1396 } 1397 /* res |= NFS4_SHARE_WANT_NO_PREFERENCE; */ 1398 if (server->caps & NFS_CAP_DELEGTIME) 1399 res |= NFS4_SHARE_WANT_DELEG_TIMESTAMPS; 1400 if (server->caps & NFS_CAP_OPEN_XOR) 1401 res |= NFS4_SHARE_WANT_OPEN_XOR_DELEGATION; 1402 out: 1403 return res; 1404 } 1405 1406 static enum open_claim_type4 1407 nfs4_map_atomic_open_claim(struct nfs_server *server, 1408 enum open_claim_type4 claim) 1409 { 1410 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1) 1411 return claim; 1412 switch (claim) { 1413 default: 1414 return claim; 1415 case NFS4_OPEN_CLAIM_FH: 1416 return NFS4_OPEN_CLAIM_NULL; 1417 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1418 return NFS4_OPEN_CLAIM_DELEGATE_CUR; 1419 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1420 return NFS4_OPEN_CLAIM_DELEGATE_PREV; 1421 } 1422 } 1423 1424 static void nfs4_init_opendata_res(struct nfs4_opendata *p) 1425 { 1426 p->o_res.f_attr = &p->f_attr; 1427 p->o_res.seqid = p->o_arg.seqid; 1428 p->c_res.seqid = p->c_arg.seqid; 1429 p->o_res.server = p->o_arg.server; 1430 p->o_res.access_request = p->o_arg.access; 1431 nfs_fattr_init(&p->f_attr); 1432 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 1433 } 1434 1435 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 1436 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 1437 const struct nfs4_open_createattrs *c, 1438 enum open_claim_type4 claim, 1439 gfp_t gfp_mask) 1440 { 1441 struct dentry *parent = dget_parent(dentry); 1442 struct inode *dir = d_inode(parent); 1443 struct nfs_server *server = NFS_SERVER(dir); 1444 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 1445 struct nfs4_label *label = (c != NULL) ? c->label : NULL; 1446 struct nfs4_opendata *p; 1447 1448 p = kzalloc(sizeof(*p), gfp_mask); 1449 if (p == NULL) 1450 goto err; 1451 1452 p->f_attr.label = nfs4_label_alloc(server, gfp_mask); 1453 if (IS_ERR(p->f_attr.label)) 1454 goto err_free_p; 1455 1456 p->a_label = nfs4_label_alloc(server, gfp_mask); 1457 if (IS_ERR(p->a_label)) 1458 goto err_free_f; 1459 1460 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 1461 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); 1462 if (IS_ERR(p->o_arg.seqid)) 1463 goto err_free_label; 1464 nfs_sb_active(dentry->d_sb); 1465 p->dentry = dget(dentry); 1466 p->dir = parent; 1467 p->owner = sp; 1468 atomic_inc(&sp->so_count); 1469 p->o_arg.open_flags = flags; 1470 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 1471 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 1472 p->o_arg.share_access = nfs4_map_atomic_open_share(server, 1473 fmode, flags); 1474 if (flags & O_CREAT) { 1475 p->o_arg.umask = current_umask(); 1476 p->o_arg.label = nfs4_label_copy(p->a_label, label); 1477 if (c->sattr != NULL && c->sattr->ia_valid != 0) { 1478 p->o_arg.u.attrs = &p->attrs; 1479 memcpy(&p->attrs, c->sattr, sizeof(p->attrs)); 1480 1481 memcpy(p->o_arg.u.verifier.data, c->verf, 1482 sizeof(p->o_arg.u.verifier.data)); 1483 } 1484 } 1485 /* ask server to check for all possible rights as results 1486 * are cached */ 1487 switch (p->o_arg.claim) { 1488 default: 1489 break; 1490 case NFS4_OPEN_CLAIM_NULL: 1491 case NFS4_OPEN_CLAIM_FH: 1492 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY | 1493 NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE | 1494 NFS4_ACCESS_EXECUTE | 1495 nfs_access_xattr_mask(server); 1496 } 1497 p->o_arg.clientid = server->nfs_client->cl_clientid; 1498 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 1499 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 1500 p->o_arg.name = &dentry->d_name; 1501 p->o_arg.server = server; 1502 p->o_arg.bitmask = nfs4_bitmask(server, label); 1503 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 1504 switch (p->o_arg.claim) { 1505 case NFS4_OPEN_CLAIM_NULL: 1506 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1507 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 1508 p->o_arg.fh = NFS_FH(dir); 1509 break; 1510 case NFS4_OPEN_CLAIM_PREVIOUS: 1511 case NFS4_OPEN_CLAIM_FH: 1512 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1513 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1514 p->o_arg.fh = NFS_FH(d_inode(dentry)); 1515 } 1516 p->c_arg.fh = &p->o_res.fh; 1517 p->c_arg.stateid = &p->o_res.stateid; 1518 p->c_arg.seqid = p->o_arg.seqid; 1519 nfs4_init_opendata_res(p); 1520 kref_init(&p->kref); 1521 return p; 1522 1523 err_free_label: 1524 nfs4_label_free(p->a_label); 1525 err_free_f: 1526 nfs4_label_free(p->f_attr.label); 1527 err_free_p: 1528 kfree(p); 1529 err: 1530 dput(parent); 1531 return NULL; 1532 } 1533 1534 static void nfs4_opendata_free(struct kref *kref) 1535 { 1536 struct nfs4_opendata *p = container_of(kref, 1537 struct nfs4_opendata, kref); 1538 struct super_block *sb = p->dentry->d_sb; 1539 1540 nfs4_lgopen_release(p->lgp); 1541 nfs_free_seqid(p->o_arg.seqid); 1542 nfs4_sequence_free_slot(&p->o_res.seq_res); 1543 if (p->state != NULL) 1544 nfs4_put_open_state(p->state); 1545 nfs4_put_state_owner(p->owner); 1546 1547 nfs4_label_free(p->a_label); 1548 nfs4_label_free(p->f_attr.label); 1549 1550 dput(p->dir); 1551 dput(p->dentry); 1552 nfs_sb_deactive(sb); 1553 nfs_fattr_free_names(&p->f_attr); 1554 kfree(p->f_attr.mdsthreshold); 1555 kfree(p); 1556 } 1557 1558 static void nfs4_opendata_put(struct nfs4_opendata *p) 1559 { 1560 if (p != NULL) 1561 kref_put(&p->kref, nfs4_opendata_free); 1562 } 1563 1564 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state, 1565 fmode_t fmode) 1566 { 1567 switch(fmode & (FMODE_READ|FMODE_WRITE)) { 1568 case FMODE_READ|FMODE_WRITE: 1569 return state->n_rdwr != 0; 1570 case FMODE_WRITE: 1571 return state->n_wronly != 0; 1572 case FMODE_READ: 1573 return state->n_rdonly != 0; 1574 } 1575 WARN_ON_ONCE(1); 1576 return false; 1577 } 1578 1579 static int can_open_cached(struct nfs4_state *state, fmode_t mode, 1580 int open_mode, enum open_claim_type4 claim) 1581 { 1582 int ret = 0; 1583 1584 if (open_mode & (O_EXCL|O_TRUNC)) 1585 goto out; 1586 switch (claim) { 1587 case NFS4_OPEN_CLAIM_NULL: 1588 case NFS4_OPEN_CLAIM_FH: 1589 goto out; 1590 default: 1591 break; 1592 } 1593 switch (mode & (FMODE_READ|FMODE_WRITE)) { 1594 case FMODE_READ: 1595 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 1596 && state->n_rdonly != 0; 1597 break; 1598 case FMODE_WRITE: 1599 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 1600 && state->n_wronly != 0; 1601 break; 1602 case FMODE_READ|FMODE_WRITE: 1603 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 1604 && state->n_rdwr != 0; 1605 } 1606 out: 1607 return ret; 1608 } 1609 1610 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode, 1611 enum open_claim_type4 claim) 1612 { 1613 if (delegation == NULL) 1614 return 0; 1615 if ((delegation->type & fmode) != fmode) 1616 return 0; 1617 switch (claim) { 1618 case NFS4_OPEN_CLAIM_NULL: 1619 case NFS4_OPEN_CLAIM_FH: 1620 break; 1621 case NFS4_OPEN_CLAIM_PREVIOUS: 1622 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 1623 break; 1624 fallthrough; 1625 default: 1626 return 0; 1627 } 1628 nfs_mark_delegation_referenced(delegation); 1629 return 1; 1630 } 1631 1632 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 1633 { 1634 switch (fmode) { 1635 case FMODE_WRITE: 1636 state->n_wronly++; 1637 break; 1638 case FMODE_READ: 1639 state->n_rdonly++; 1640 break; 1641 case FMODE_READ|FMODE_WRITE: 1642 state->n_rdwr++; 1643 } 1644 nfs4_state_set_mode_locked(state, state->state | fmode); 1645 } 1646 1647 #ifdef CONFIG_NFS_V4_1 1648 static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state) 1649 { 1650 if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags)) 1651 return true; 1652 if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags)) 1653 return true; 1654 if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags)) 1655 return true; 1656 return false; 1657 } 1658 #endif /* CONFIG_NFS_V4_1 */ 1659 1660 static void nfs_state_log_update_open_stateid(struct nfs4_state *state) 1661 { 1662 if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags)) 1663 wake_up_all(&state->waitq); 1664 } 1665 1666 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) 1667 { 1668 struct nfs_client *clp = state->owner->so_server->nfs_client; 1669 bool need_recover = false; 1670 1671 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly) 1672 need_recover = true; 1673 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly) 1674 need_recover = true; 1675 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr) 1676 need_recover = true; 1677 if (need_recover) 1678 nfs4_state_mark_reclaim_nograce(clp, state); 1679 } 1680 1681 /* 1682 * Check for whether or not the caller may update the open stateid 1683 * to the value passed in by stateid. 1684 * 1685 * Note: This function relies heavily on the server implementing 1686 * RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2 1687 * correctly. 1688 * i.e. The stateid seqids have to be initialised to 1, and 1689 * are then incremented on every state transition. 1690 */ 1691 static bool nfs_stateid_is_sequential(struct nfs4_state *state, 1692 const nfs4_stateid *stateid) 1693 { 1694 if (test_bit(NFS_OPEN_STATE, &state->flags)) { 1695 /* The common case - we're updating to a new sequence number */ 1696 if (nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1697 if (nfs4_stateid_is_next(&state->open_stateid, stateid)) 1698 return true; 1699 return false; 1700 } 1701 /* The server returned a new stateid */ 1702 } 1703 /* This is the first OPEN in this generation */ 1704 if (stateid->seqid == cpu_to_be32(1)) 1705 return true; 1706 return false; 1707 } 1708 1709 static void nfs_resync_open_stateid_locked(struct nfs4_state *state) 1710 { 1711 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr)) 1712 return; 1713 if (state->n_wronly) 1714 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1715 if (state->n_rdonly) 1716 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1717 if (state->n_rdwr) 1718 set_bit(NFS_O_RDWR_STATE, &state->flags); 1719 set_bit(NFS_OPEN_STATE, &state->flags); 1720 } 1721 1722 static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1723 nfs4_stateid *stateid, fmode_t fmode) 1724 { 1725 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1726 switch (fmode & (FMODE_READ|FMODE_WRITE)) { 1727 case FMODE_WRITE: 1728 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1729 break; 1730 case FMODE_READ: 1731 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1732 break; 1733 case 0: 1734 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1735 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1736 clear_bit(NFS_OPEN_STATE, &state->flags); 1737 } 1738 if (stateid == NULL) 1739 return; 1740 /* Handle OPEN+OPEN_DOWNGRADE races */ 1741 if (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1742 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { 1743 nfs_resync_open_stateid_locked(state); 1744 goto out; 1745 } 1746 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1747 nfs4_stateid_copy(&state->stateid, stateid); 1748 nfs4_stateid_copy(&state->open_stateid, stateid); 1749 trace_nfs4_open_stateid_update(state->inode, stateid, 0); 1750 out: 1751 nfs_state_log_update_open_stateid(state); 1752 } 1753 1754 static void nfs_clear_open_stateid(struct nfs4_state *state, 1755 nfs4_stateid *arg_stateid, 1756 nfs4_stateid *stateid, fmode_t fmode) 1757 { 1758 write_seqlock(&state->seqlock); 1759 /* Ignore, if the CLOSE argment doesn't match the current stateid */ 1760 if (nfs4_state_match_open_stateid_other(state, arg_stateid)) 1761 nfs_clear_open_stateid_locked(state, stateid, fmode); 1762 write_sequnlock(&state->seqlock); 1763 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1764 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1765 } 1766 1767 static void nfs_set_open_stateid_locked(struct nfs4_state *state, 1768 const nfs4_stateid *stateid, nfs4_stateid *freeme) 1769 __must_hold(&state->owner->so_lock) 1770 __must_hold(&state->seqlock) 1771 __must_hold(RCU) 1772 1773 { 1774 DEFINE_WAIT(wait); 1775 int status = 0; 1776 for (;;) { 1777 1778 if (nfs_stateid_is_sequential(state, stateid)) 1779 break; 1780 1781 if (status) 1782 break; 1783 /* Rely on seqids for serialisation with NFSv4.0 */ 1784 if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client)) 1785 break; 1786 1787 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); 1788 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); 1789 /* 1790 * Ensure we process the state changes in the same order 1791 * in which the server processed them by delaying the 1792 * update of the stateid until we are in sequence. 1793 */ 1794 write_sequnlock(&state->seqlock); 1795 spin_unlock(&state->owner->so_lock); 1796 rcu_read_unlock(); 1797 trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0); 1798 1799 if (!fatal_signal_pending(current) && 1800 !nfs_current_task_exiting()) { 1801 if (schedule_timeout(5*HZ) == 0) 1802 status = -EAGAIN; 1803 else 1804 status = 0; 1805 } else 1806 status = -EINTR; 1807 finish_wait(&state->waitq, &wait); 1808 rcu_read_lock(); 1809 spin_lock(&state->owner->so_lock); 1810 write_seqlock(&state->seqlock); 1811 } 1812 1813 if (test_bit(NFS_OPEN_STATE, &state->flags) && 1814 !nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1815 nfs4_stateid_copy(freeme, &state->open_stateid); 1816 nfs_test_and_clear_all_open_stateid(state); 1817 } 1818 1819 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1820 nfs4_stateid_copy(&state->stateid, stateid); 1821 nfs4_stateid_copy(&state->open_stateid, stateid); 1822 trace_nfs4_open_stateid_update(state->inode, stateid, status); 1823 nfs_state_log_update_open_stateid(state); 1824 } 1825 1826 static void nfs_state_set_open_stateid(struct nfs4_state *state, 1827 const nfs4_stateid *open_stateid, 1828 fmode_t fmode, 1829 nfs4_stateid *freeme) 1830 { 1831 /* 1832 * Protect the call to nfs4_state_set_mode_locked and 1833 * serialise the stateid update 1834 */ 1835 write_seqlock(&state->seqlock); 1836 nfs_set_open_stateid_locked(state, open_stateid, freeme); 1837 switch (fmode) { 1838 case FMODE_READ: 1839 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1840 break; 1841 case FMODE_WRITE: 1842 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1843 break; 1844 case FMODE_READ|FMODE_WRITE: 1845 set_bit(NFS_O_RDWR_STATE, &state->flags); 1846 } 1847 set_bit(NFS_OPEN_STATE, &state->flags); 1848 write_sequnlock(&state->seqlock); 1849 } 1850 1851 static void nfs_state_clear_open_state_flags(struct nfs4_state *state) 1852 { 1853 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1854 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1855 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1856 clear_bit(NFS_OPEN_STATE, &state->flags); 1857 } 1858 1859 static void nfs_state_set_delegation(struct nfs4_state *state, 1860 const nfs4_stateid *deleg_stateid, 1861 fmode_t fmode) 1862 { 1863 /* 1864 * Protect the call to nfs4_state_set_mode_locked and 1865 * serialise the stateid update 1866 */ 1867 write_seqlock(&state->seqlock); 1868 nfs4_stateid_copy(&state->stateid, deleg_stateid); 1869 set_bit(NFS_DELEGATED_STATE, &state->flags); 1870 write_sequnlock(&state->seqlock); 1871 } 1872 1873 static void nfs_state_clear_delegation(struct nfs4_state *state) 1874 { 1875 write_seqlock(&state->seqlock); 1876 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1877 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1878 write_sequnlock(&state->seqlock); 1879 } 1880 1881 int update_open_stateid(struct nfs4_state *state, 1882 const nfs4_stateid *open_stateid, 1883 const nfs4_stateid *delegation, 1884 fmode_t fmode) 1885 { 1886 struct nfs_server *server = NFS_SERVER(state->inode); 1887 struct nfs_client *clp = server->nfs_client; 1888 struct nfs_inode *nfsi = NFS_I(state->inode); 1889 struct nfs_delegation *deleg_cur; 1890 nfs4_stateid freeme = { }; 1891 int ret = 0; 1892 1893 fmode &= (FMODE_READ|FMODE_WRITE); 1894 1895 rcu_read_lock(); 1896 spin_lock(&state->owner->so_lock); 1897 if (open_stateid != NULL) { 1898 nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme); 1899 ret = 1; 1900 } 1901 1902 deleg_cur = nfs4_get_valid_delegation(state->inode); 1903 if (deleg_cur == NULL) 1904 goto no_delegation; 1905 1906 spin_lock(&deleg_cur->lock); 1907 if (rcu_dereference(nfsi->delegation) != deleg_cur || 1908 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) || 1909 (deleg_cur->type & fmode) != fmode) 1910 goto no_delegation_unlock; 1911 1912 if (delegation == NULL) 1913 delegation = &deleg_cur->stateid; 1914 else if (!nfs4_stateid_match_other(&deleg_cur->stateid, delegation)) 1915 goto no_delegation_unlock; 1916 1917 nfs_mark_delegation_referenced(deleg_cur); 1918 nfs_state_set_delegation(state, &deleg_cur->stateid, fmode); 1919 ret = 1; 1920 no_delegation_unlock: 1921 spin_unlock(&deleg_cur->lock); 1922 no_delegation: 1923 if (ret) 1924 update_open_stateflags(state, fmode); 1925 spin_unlock(&state->owner->so_lock); 1926 rcu_read_unlock(); 1927 1928 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1929 nfs4_schedule_state_manager(clp); 1930 if (freeme.type != 0) 1931 nfs4_test_and_free_stateid(server, &freeme, 1932 state->owner->so_cred); 1933 1934 return ret; 1935 } 1936 1937 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp, 1938 const nfs4_stateid *stateid) 1939 { 1940 struct nfs4_state *state = lsp->ls_state; 1941 bool ret = false; 1942 1943 spin_lock(&state->state_lock); 1944 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid)) 1945 goto out_noupdate; 1946 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid)) 1947 goto out_noupdate; 1948 nfs4_stateid_copy(&lsp->ls_stateid, stateid); 1949 ret = true; 1950 out_noupdate: 1951 spin_unlock(&state->state_lock); 1952 return ret; 1953 } 1954 1955 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1956 { 1957 struct nfs_delegation *delegation; 1958 1959 fmode &= FMODE_READ|FMODE_WRITE; 1960 rcu_read_lock(); 1961 delegation = nfs4_get_valid_delegation(inode); 1962 if (delegation == NULL || (delegation->type & fmode) == fmode) { 1963 rcu_read_unlock(); 1964 return; 1965 } 1966 rcu_read_unlock(); 1967 nfs4_inode_return_delegation(inode); 1968 } 1969 1970 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1971 { 1972 struct nfs4_state *state = opendata->state; 1973 struct nfs_delegation *delegation; 1974 int open_mode = opendata->o_arg.open_flags; 1975 fmode_t fmode = opendata->o_arg.fmode; 1976 enum open_claim_type4 claim = opendata->o_arg.claim; 1977 nfs4_stateid stateid; 1978 int ret = -EAGAIN; 1979 1980 for (;;) { 1981 spin_lock(&state->owner->so_lock); 1982 if (can_open_cached(state, fmode, open_mode, claim)) { 1983 update_open_stateflags(state, fmode); 1984 spin_unlock(&state->owner->so_lock); 1985 goto out_return_state; 1986 } 1987 spin_unlock(&state->owner->so_lock); 1988 rcu_read_lock(); 1989 delegation = nfs4_get_valid_delegation(state->inode); 1990 if (!can_open_delegated(delegation, fmode, claim)) { 1991 rcu_read_unlock(); 1992 break; 1993 } 1994 /* Save the delegation */ 1995 nfs4_stateid_copy(&stateid, &delegation->stateid); 1996 rcu_read_unlock(); 1997 nfs_release_seqid(opendata->o_arg.seqid); 1998 if (!opendata->is_recover) { 1999 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 2000 if (ret != 0) 2001 goto out; 2002 } 2003 ret = -EAGAIN; 2004 2005 /* Try to update the stateid using the delegation */ 2006 if (update_open_stateid(state, NULL, &stateid, fmode)) 2007 goto out_return_state; 2008 } 2009 out: 2010 return ERR_PTR(ret); 2011 out_return_state: 2012 refcount_inc(&state->count); 2013 return state; 2014 } 2015 2016 static void 2017 nfs4_process_delegation(struct inode *inode, const struct cred *cred, 2018 enum open_claim_type4 claim, 2019 const struct nfs4_open_delegation *delegation) 2020 { 2021 switch (delegation->open_delegation_type) { 2022 case NFS4_OPEN_DELEGATE_READ: 2023 case NFS4_OPEN_DELEGATE_WRITE: 2024 case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG: 2025 case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG: 2026 break; 2027 default: 2028 return; 2029 } 2030 switch (claim) { 2031 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 2032 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 2033 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 2034 "returning a delegation for " 2035 "OPEN(CLAIM_DELEGATE_CUR)\n", 2036 NFS_SERVER(inode)->nfs_client->cl_hostname); 2037 break; 2038 case NFS4_OPEN_CLAIM_PREVIOUS: 2039 nfs_inode_reclaim_delegation(inode, cred, delegation->type, 2040 &delegation->stateid, 2041 delegation->pagemod_limit, 2042 delegation->open_delegation_type); 2043 break; 2044 default: 2045 nfs_inode_set_delegation(inode, cred, delegation->type, 2046 &delegation->stateid, 2047 delegation->pagemod_limit, 2048 delegation->open_delegation_type); 2049 } 2050 if (delegation->do_recall) 2051 nfs_async_inode_return_delegation(inode, &delegation->stateid); 2052 } 2053 2054 /* 2055 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes 2056 * and update the nfs4_state. 2057 */ 2058 static struct nfs4_state * 2059 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) 2060 { 2061 struct inode *inode = data->state->inode; 2062 struct nfs4_state *state = data->state; 2063 int ret; 2064 2065 if (!data->rpc_done) { 2066 if (data->rpc_status) 2067 return ERR_PTR(data->rpc_status); 2068 return nfs4_try_open_cached(data); 2069 } 2070 2071 ret = nfs_refresh_inode(inode, &data->f_attr); 2072 if (ret) 2073 return ERR_PTR(ret); 2074 2075 nfs4_process_delegation(state->inode, 2076 data->owner->so_cred, 2077 data->o_arg.claim, 2078 &data->o_res.delegation); 2079 2080 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) { 2081 if (!update_open_stateid(state, &data->o_res.stateid, 2082 NULL, data->o_arg.fmode)) 2083 return ERR_PTR(-EAGAIN); 2084 } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode)) 2085 return ERR_PTR(-EAGAIN); 2086 refcount_inc(&state->count); 2087 2088 return state; 2089 } 2090 2091 static struct inode * 2092 nfs4_opendata_get_inode(struct nfs4_opendata *data) 2093 { 2094 struct inode *inode; 2095 2096 switch (data->o_arg.claim) { 2097 case NFS4_OPEN_CLAIM_NULL: 2098 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 2099 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 2100 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 2101 return ERR_PTR(-EAGAIN); 2102 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, 2103 &data->f_attr); 2104 break; 2105 default: 2106 inode = d_inode(data->dentry); 2107 ihold(inode); 2108 nfs_refresh_inode(inode, &data->f_attr); 2109 } 2110 return inode; 2111 } 2112 2113 static struct nfs4_state * 2114 nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data) 2115 { 2116 struct nfs4_state *state; 2117 struct inode *inode; 2118 2119 inode = nfs4_opendata_get_inode(data); 2120 if (IS_ERR(inode)) 2121 return ERR_CAST(inode); 2122 if (data->state != NULL && data->state->inode == inode) { 2123 state = data->state; 2124 refcount_inc(&state->count); 2125 } else 2126 state = nfs4_get_open_state(inode, data->owner); 2127 iput(inode); 2128 if (state == NULL) 2129 state = ERR_PTR(-ENOMEM); 2130 return state; 2131 } 2132 2133 static struct nfs4_state * 2134 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 2135 { 2136 struct nfs4_state *state; 2137 2138 if (!data->rpc_done) { 2139 state = nfs4_try_open_cached(data); 2140 trace_nfs4_cached_open(data->state); 2141 goto out; 2142 } 2143 2144 state = nfs4_opendata_find_nfs4_state(data); 2145 if (IS_ERR(state)) 2146 goto out; 2147 2148 nfs4_process_delegation(state->inode, 2149 data->owner->so_cred, 2150 data->o_arg.claim, 2151 &data->o_res.delegation); 2152 2153 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) { 2154 if (!update_open_stateid(state, &data->o_res.stateid, 2155 NULL, data->o_arg.fmode)) { 2156 nfs4_put_open_state(state); 2157 state = ERR_PTR(-EAGAIN); 2158 } 2159 } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode)) { 2160 nfs4_put_open_state(state); 2161 state = ERR_PTR(-EAGAIN); 2162 } 2163 out: 2164 nfs_release_seqid(data->o_arg.seqid); 2165 return state; 2166 } 2167 2168 static struct nfs4_state * 2169 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 2170 { 2171 struct nfs4_state *ret; 2172 2173 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 2174 ret =_nfs4_opendata_reclaim_to_nfs4_state(data); 2175 else 2176 ret = _nfs4_opendata_to_nfs4_state(data); 2177 nfs4_sequence_free_slot(&data->o_res.seq_res); 2178 return ret; 2179 } 2180 2181 static struct nfs_open_context * 2182 nfs4_state_find_open_context_mode(struct nfs4_state *state, fmode_t mode) 2183 { 2184 struct nfs_inode *nfsi = NFS_I(state->inode); 2185 struct nfs_open_context *ctx; 2186 2187 rcu_read_lock(); 2188 list_for_each_entry_rcu(ctx, &nfsi->open_files, list) { 2189 if (ctx->state != state) 2190 continue; 2191 if ((ctx->mode & mode) != mode) 2192 continue; 2193 if (!get_nfs_open_context(ctx)) 2194 continue; 2195 rcu_read_unlock(); 2196 return ctx; 2197 } 2198 rcu_read_unlock(); 2199 return ERR_PTR(-ENOENT); 2200 } 2201 2202 static struct nfs_open_context * 2203 nfs4_state_find_open_context(struct nfs4_state *state) 2204 { 2205 struct nfs_open_context *ctx; 2206 2207 ctx = nfs4_state_find_open_context_mode(state, FMODE_READ|FMODE_WRITE); 2208 if (!IS_ERR(ctx)) 2209 return ctx; 2210 ctx = nfs4_state_find_open_context_mode(state, FMODE_WRITE); 2211 if (!IS_ERR(ctx)) 2212 return ctx; 2213 return nfs4_state_find_open_context_mode(state, FMODE_READ); 2214 } 2215 2216 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, 2217 struct nfs4_state *state, enum open_claim_type4 claim) 2218 { 2219 struct nfs4_opendata *opendata; 2220 2221 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, 2222 NULL, claim, GFP_NOFS); 2223 if (opendata == NULL) 2224 return ERR_PTR(-ENOMEM); 2225 opendata->state = state; 2226 refcount_inc(&state->count); 2227 return opendata; 2228 } 2229 2230 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, 2231 fmode_t fmode) 2232 { 2233 struct nfs4_state *newstate; 2234 struct nfs_server *server = NFS_SB(opendata->dentry->d_sb); 2235 int openflags = opendata->o_arg.open_flags; 2236 int ret; 2237 2238 if (!nfs4_mode_match_open_stateid(opendata->state, fmode)) 2239 return 0; 2240 opendata->o_arg.fmode = fmode; 2241 opendata->o_arg.share_access = 2242 nfs4_map_atomic_open_share(server, fmode, openflags); 2243 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 2244 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 2245 nfs4_init_opendata_res(opendata); 2246 ret = _nfs4_recover_proc_open(opendata); 2247 if (ret != 0) 2248 return ret; 2249 newstate = nfs4_opendata_to_nfs4_state(opendata); 2250 if (IS_ERR(newstate)) 2251 return PTR_ERR(newstate); 2252 if (newstate != opendata->state) 2253 ret = -ESTALE; 2254 nfs4_close_state(newstate, fmode); 2255 return ret; 2256 } 2257 2258 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 2259 { 2260 int ret; 2261 2262 /* memory barrier prior to reading state->n_* */ 2263 smp_rmb(); 2264 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 2265 if (ret != 0) 2266 return ret; 2267 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE); 2268 if (ret != 0) 2269 return ret; 2270 ret = nfs4_open_recover_helper(opendata, FMODE_READ); 2271 if (ret != 0) 2272 return ret; 2273 /* 2274 * We may have performed cached opens for all three recoveries. 2275 * Check if we need to update the current stateid. 2276 */ 2277 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 2278 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 2279 write_seqlock(&state->seqlock); 2280 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 2281 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 2282 write_sequnlock(&state->seqlock); 2283 } 2284 return 0; 2285 } 2286 2287 /* 2288 * OPEN_RECLAIM: 2289 * reclaim state on the server after a reboot. 2290 */ 2291 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 2292 { 2293 struct nfs_delegation *delegation; 2294 struct nfs4_opendata *opendata; 2295 u32 delegation_type = NFS4_OPEN_DELEGATE_NONE; 2296 int status; 2297 2298 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2299 NFS4_OPEN_CLAIM_PREVIOUS); 2300 if (IS_ERR(opendata)) 2301 return PTR_ERR(opendata); 2302 rcu_read_lock(); 2303 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2304 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) { 2305 switch(delegation->type) { 2306 case FMODE_READ: 2307 delegation_type = NFS4_OPEN_DELEGATE_READ; 2308 if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) 2309 delegation_type = NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG; 2310 break; 2311 case FMODE_WRITE: 2312 case FMODE_READ|FMODE_WRITE: 2313 delegation_type = NFS4_OPEN_DELEGATE_WRITE; 2314 if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) 2315 delegation_type = NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG; 2316 } 2317 } 2318 rcu_read_unlock(); 2319 opendata->o_arg.u.delegation_type = delegation_type; 2320 status = nfs4_open_recover(opendata, state); 2321 nfs4_opendata_put(opendata); 2322 return status; 2323 } 2324 2325 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 2326 { 2327 struct nfs_server *server = NFS_SERVER(state->inode); 2328 struct nfs4_exception exception = { }; 2329 int err; 2330 do { 2331 err = _nfs4_do_open_reclaim(ctx, state); 2332 trace_nfs4_open_reclaim(ctx, 0, err); 2333 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2334 continue; 2335 if (err != -NFS4ERR_DELAY) 2336 break; 2337 nfs4_handle_exception(server, err, &exception); 2338 } while (exception.retry); 2339 return err; 2340 } 2341 2342 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 2343 { 2344 struct nfs_open_context *ctx; 2345 int ret; 2346 2347 ctx = nfs4_state_find_open_context(state); 2348 if (IS_ERR(ctx)) 2349 return -EAGAIN; 2350 clear_bit(NFS_DELEGATED_STATE, &state->flags); 2351 nfs_state_clear_open_state_flags(state); 2352 ret = nfs4_do_open_reclaim(ctx, state); 2353 put_nfs_open_context(ctx); 2354 return ret; 2355 } 2356 2357 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err) 2358 { 2359 switch (err) { 2360 default: 2361 printk(KERN_ERR "NFS: %s: unhandled error " 2362 "%d.\n", __func__, err); 2363 fallthrough; 2364 case 0: 2365 case -ENOENT: 2366 case -EAGAIN: 2367 case -ESTALE: 2368 case -ETIMEDOUT: 2369 break; 2370 case -NFS4ERR_BADSESSION: 2371 case -NFS4ERR_BADSLOT: 2372 case -NFS4ERR_BAD_HIGH_SLOT: 2373 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 2374 case -NFS4ERR_DEADSESSION: 2375 return -EAGAIN; 2376 case -NFS4ERR_STALE_CLIENTID: 2377 case -NFS4ERR_STALE_STATEID: 2378 /* Don't recall a delegation if it was lost */ 2379 nfs4_schedule_lease_recovery(server->nfs_client); 2380 return -EAGAIN; 2381 case -NFS4ERR_MOVED: 2382 nfs4_schedule_migration_recovery(server); 2383 return -EAGAIN; 2384 case -NFS4ERR_LEASE_MOVED: 2385 nfs4_schedule_lease_moved_recovery(server->nfs_client); 2386 return -EAGAIN; 2387 case -NFS4ERR_DELEG_REVOKED: 2388 case -NFS4ERR_ADMIN_REVOKED: 2389 case -NFS4ERR_EXPIRED: 2390 case -NFS4ERR_BAD_STATEID: 2391 case -NFS4ERR_OPENMODE: 2392 nfs_inode_find_state_and_recover(state->inode, 2393 stateid); 2394 nfs4_schedule_stateid_recovery(server, state); 2395 return -EAGAIN; 2396 case -NFS4ERR_DELAY: 2397 case -NFS4ERR_GRACE: 2398 ssleep(1); 2399 return -EAGAIN; 2400 case -ENOMEM: 2401 case -NFS4ERR_DENIED: 2402 if (fl) { 2403 struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner; 2404 if (lsp) 2405 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 2406 } 2407 return 0; 2408 } 2409 return err; 2410 } 2411 2412 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, 2413 struct nfs4_state *state, const nfs4_stateid *stateid) 2414 { 2415 struct nfs_server *server = NFS_SERVER(state->inode); 2416 struct nfs4_opendata *opendata; 2417 int err = 0; 2418 2419 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2420 NFS4_OPEN_CLAIM_DELEG_CUR_FH); 2421 if (IS_ERR(opendata)) 2422 return PTR_ERR(opendata); 2423 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 2424 if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) { 2425 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 2426 if (err) 2427 goto out; 2428 } 2429 if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) { 2430 err = nfs4_open_recover_helper(opendata, FMODE_WRITE); 2431 if (err) 2432 goto out; 2433 } 2434 if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) { 2435 err = nfs4_open_recover_helper(opendata, FMODE_READ); 2436 if (err) 2437 goto out; 2438 } 2439 nfs_state_clear_delegation(state); 2440 out: 2441 nfs4_opendata_put(opendata); 2442 return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err); 2443 } 2444 2445 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) 2446 { 2447 struct nfs4_opendata *data = calldata; 2448 2449 nfs4_setup_sequence(data->o_arg.server->nfs_client, 2450 &data->c_arg.seq_args, &data->c_res.seq_res, task); 2451 } 2452 2453 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 2454 { 2455 struct nfs4_opendata *data = calldata; 2456 2457 nfs40_sequence_done(task, &data->c_res.seq_res); 2458 2459 data->rpc_status = task->tk_status; 2460 if (data->rpc_status == 0) { 2461 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 2462 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2463 renew_lease(data->o_res.server, data->timestamp); 2464 data->rpc_done = true; 2465 } 2466 } 2467 2468 static void nfs4_open_confirm_release(void *calldata) 2469 { 2470 struct nfs4_opendata *data = calldata; 2471 struct nfs4_state *state = NULL; 2472 2473 /* If this request hasn't been cancelled, do nothing */ 2474 if (!data->cancelled) 2475 goto out_free; 2476 /* In case of error, no cleanup! */ 2477 if (!data->rpc_done) 2478 goto out_free; 2479 state = nfs4_opendata_to_nfs4_state(data); 2480 if (!IS_ERR(state)) 2481 nfs4_close_state(state, data->o_arg.fmode); 2482 out_free: 2483 nfs4_opendata_put(data); 2484 } 2485 2486 static const struct rpc_call_ops nfs4_open_confirm_ops = { 2487 .rpc_call_prepare = nfs4_open_confirm_prepare, 2488 .rpc_call_done = nfs4_open_confirm_done, 2489 .rpc_release = nfs4_open_confirm_release, 2490 }; 2491 2492 /* 2493 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 2494 */ 2495 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 2496 { 2497 struct nfs_server *server = NFS_SERVER(d_inode(data->dir)); 2498 struct rpc_task *task; 2499 struct rpc_message msg = { 2500 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 2501 .rpc_argp = &data->c_arg, 2502 .rpc_resp = &data->c_res, 2503 .rpc_cred = data->owner->so_cred, 2504 }; 2505 struct rpc_task_setup task_setup_data = { 2506 .rpc_client = server->client, 2507 .rpc_message = &msg, 2508 .callback_ops = &nfs4_open_confirm_ops, 2509 .callback_data = data, 2510 .workqueue = nfsiod_workqueue, 2511 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 2512 }; 2513 int status; 2514 2515 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1, 2516 data->is_recover); 2517 kref_get(&data->kref); 2518 data->rpc_done = false; 2519 data->rpc_status = 0; 2520 data->timestamp = jiffies; 2521 task = rpc_run_task(&task_setup_data); 2522 if (IS_ERR(task)) 2523 return PTR_ERR(task); 2524 status = rpc_wait_for_completion_task(task); 2525 if (status != 0) { 2526 data->cancelled = true; 2527 smp_wmb(); 2528 } else 2529 status = data->rpc_status; 2530 rpc_put_task(task); 2531 return status; 2532 } 2533 2534 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 2535 { 2536 struct nfs4_opendata *data = calldata; 2537 struct nfs4_state_owner *sp = data->owner; 2538 struct nfs_client *clp = sp->so_server->nfs_client; 2539 enum open_claim_type4 claim = data->o_arg.claim; 2540 2541 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 2542 goto out_wait; 2543 /* 2544 * Check if we still need to send an OPEN call, or if we can use 2545 * a delegation instead. 2546 */ 2547 if (data->state != NULL) { 2548 struct nfs_delegation *delegation; 2549 2550 if (can_open_cached(data->state, data->o_arg.fmode, 2551 data->o_arg.open_flags, claim)) 2552 goto out_no_action; 2553 rcu_read_lock(); 2554 delegation = nfs4_get_valid_delegation(data->state->inode); 2555 if (can_open_delegated(delegation, data->o_arg.fmode, claim)) 2556 goto unlock_no_action; 2557 rcu_read_unlock(); 2558 } 2559 /* Update client id. */ 2560 data->o_arg.clientid = clp->cl_clientid; 2561 switch (claim) { 2562 default: 2563 break; 2564 case NFS4_OPEN_CLAIM_PREVIOUS: 2565 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 2566 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 2567 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 2568 fallthrough; 2569 case NFS4_OPEN_CLAIM_FH: 2570 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 2571 } 2572 data->timestamp = jiffies; 2573 if (nfs4_setup_sequence(data->o_arg.server->nfs_client, 2574 &data->o_arg.seq_args, 2575 &data->o_res.seq_res, 2576 task) != 0) 2577 nfs_release_seqid(data->o_arg.seqid); 2578 2579 /* Set the create mode (note dependency on the session type) */ 2580 data->o_arg.createmode = NFS4_CREATE_UNCHECKED; 2581 if (data->o_arg.open_flags & O_EXCL) { 2582 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1; 2583 if (clp->cl_mvops->minor_version == 0) { 2584 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE; 2585 /* don't put an ACCESS op in OPEN compound if O_EXCL, 2586 * because ACCESS will return permission denied for 2587 * all bits until close */ 2588 data->o_res.access_request = data->o_arg.access = 0; 2589 } else if (nfs4_has_persistent_session(clp)) 2590 data->o_arg.createmode = NFS4_CREATE_GUARDED; 2591 } 2592 return; 2593 unlock_no_action: 2594 trace_nfs4_cached_open(data->state); 2595 rcu_read_unlock(); 2596 out_no_action: 2597 task->tk_action = NULL; 2598 out_wait: 2599 nfs4_sequence_done(task, &data->o_res.seq_res); 2600 } 2601 2602 static void nfs4_open_done(struct rpc_task *task, void *calldata) 2603 { 2604 struct nfs4_opendata *data = calldata; 2605 2606 data->rpc_status = task->tk_status; 2607 2608 if (!nfs4_sequence_process(task, &data->o_res.seq_res)) 2609 return; 2610 2611 if (task->tk_status == 0) { 2612 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) { 2613 switch (data->o_res.f_attr->mode & S_IFMT) { 2614 case S_IFREG: 2615 break; 2616 case S_IFLNK: 2617 data->rpc_status = -ELOOP; 2618 break; 2619 case S_IFDIR: 2620 data->rpc_status = -EISDIR; 2621 break; 2622 default: 2623 data->rpc_status = -ENOTDIR; 2624 } 2625 } 2626 renew_lease(data->o_res.server, data->timestamp); 2627 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 2628 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2629 } 2630 data->rpc_done = true; 2631 } 2632 2633 static void nfs4_open_release(void *calldata) 2634 { 2635 struct nfs4_opendata *data = calldata; 2636 struct nfs4_state *state = NULL; 2637 2638 /* In case of error, no cleanup! */ 2639 if (data->rpc_status != 0 || !data->rpc_done) { 2640 nfs_release_seqid(data->o_arg.seqid); 2641 goto out_free; 2642 } 2643 /* If this request hasn't been cancelled, do nothing */ 2644 if (!data->cancelled) 2645 goto out_free; 2646 /* In case we need an open_confirm, no cleanup! */ 2647 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 2648 goto out_free; 2649 state = nfs4_opendata_to_nfs4_state(data); 2650 if (!IS_ERR(state)) 2651 nfs4_close_state(state, data->o_arg.fmode); 2652 out_free: 2653 nfs4_opendata_put(data); 2654 } 2655 2656 static const struct rpc_call_ops nfs4_open_ops = { 2657 .rpc_call_prepare = nfs4_open_prepare, 2658 .rpc_call_done = nfs4_open_done, 2659 .rpc_release = nfs4_open_release, 2660 }; 2661 2662 static int nfs4_run_open_task(struct nfs4_opendata *data, 2663 struct nfs_open_context *ctx) 2664 { 2665 struct inode *dir = d_inode(data->dir); 2666 struct nfs_server *server = NFS_SERVER(dir); 2667 struct nfs_openargs *o_arg = &data->o_arg; 2668 struct nfs_openres *o_res = &data->o_res; 2669 struct rpc_task *task; 2670 struct rpc_message msg = { 2671 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 2672 .rpc_argp = o_arg, 2673 .rpc_resp = o_res, 2674 .rpc_cred = data->owner->so_cred, 2675 }; 2676 struct rpc_task_setup task_setup_data = { 2677 .rpc_client = server->client, 2678 .rpc_message = &msg, 2679 .callback_ops = &nfs4_open_ops, 2680 .callback_data = data, 2681 .workqueue = nfsiod_workqueue, 2682 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 2683 }; 2684 int status; 2685 2686 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE)) 2687 task_setup_data.flags |= RPC_TASK_MOVEABLE; 2688 2689 kref_get(&data->kref); 2690 data->rpc_done = false; 2691 data->rpc_status = 0; 2692 data->cancelled = false; 2693 data->is_recover = false; 2694 if (!ctx) { 2695 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1); 2696 data->is_recover = true; 2697 task_setup_data.flags |= RPC_TASK_TIMEOUT; 2698 } else { 2699 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0); 2700 pnfs_lgopen_prepare(data, ctx); 2701 } 2702 task = rpc_run_task(&task_setup_data); 2703 if (IS_ERR(task)) 2704 return PTR_ERR(task); 2705 status = rpc_wait_for_completion_task(task); 2706 if (status != 0) { 2707 data->cancelled = true; 2708 smp_wmb(); 2709 } else 2710 status = data->rpc_status; 2711 rpc_put_task(task); 2712 2713 return status; 2714 } 2715 2716 static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 2717 { 2718 struct inode *dir = d_inode(data->dir); 2719 struct nfs_openres *o_res = &data->o_res; 2720 int status; 2721 2722 status = nfs4_run_open_task(data, NULL); 2723 if (status != 0 || !data->rpc_done) 2724 return status; 2725 2726 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 2727 2728 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) 2729 status = _nfs4_proc_open_confirm(data); 2730 2731 return status; 2732 } 2733 2734 /* 2735 * Additional permission checks in order to distinguish between an 2736 * open for read, and an open for execute. This works around the 2737 * fact that NFSv4 OPEN treats read and execute permissions as being 2738 * the same. 2739 * Note that in the non-execute case, we want to turn off permission 2740 * checking if we just created a new file (POSIX open() semantics). 2741 */ 2742 static int nfs4_opendata_access(const struct cred *cred, 2743 struct nfs4_opendata *opendata, 2744 struct nfs4_state *state, fmode_t fmode) 2745 { 2746 struct nfs_access_entry cache; 2747 u32 mask, flags; 2748 2749 /* access call failed or for some reason the server doesn't 2750 * support any access modes -- defer access call until later */ 2751 if (opendata->o_res.access_supported == 0) 2752 return 0; 2753 2754 mask = 0; 2755 if (fmode & FMODE_EXEC) { 2756 /* ONLY check for exec rights */ 2757 if (S_ISDIR(state->inode->i_mode)) 2758 mask = NFS4_ACCESS_LOOKUP; 2759 else 2760 mask = NFS4_ACCESS_EXECUTE; 2761 } else if ((fmode & FMODE_READ) && !opendata->file_created) 2762 mask = NFS4_ACCESS_READ; 2763 2764 nfs_access_set_mask(&cache, opendata->o_res.access_result); 2765 nfs_access_add_cache(state->inode, &cache, cred); 2766 2767 flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP; 2768 if ((mask & ~cache.mask & flags) == 0) 2769 return 0; 2770 2771 return -EACCES; 2772 } 2773 2774 /* 2775 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 2776 */ 2777 static int _nfs4_proc_open(struct nfs4_opendata *data, 2778 struct nfs_open_context *ctx) 2779 { 2780 struct inode *dir = d_inode(data->dir); 2781 struct nfs_server *server = NFS_SERVER(dir); 2782 struct nfs_openargs *o_arg = &data->o_arg; 2783 struct nfs_openres *o_res = &data->o_res; 2784 int status; 2785 2786 status = nfs4_run_open_task(data, ctx); 2787 if (!data->rpc_done) 2788 return status; 2789 if (status != 0) { 2790 if (status == -NFS4ERR_BADNAME && 2791 !(o_arg->open_flags & O_CREAT)) 2792 return -ENOENT; 2793 return status; 2794 } 2795 2796 nfs_fattr_map_and_free_names(server, &data->f_attr); 2797 2798 if (o_arg->open_flags & O_CREAT) { 2799 if (o_arg->open_flags & O_EXCL) 2800 data->file_created = true; 2801 else if (o_res->cinfo.before != o_res->cinfo.after) 2802 data->file_created = true; 2803 if (data->file_created || 2804 inode_peek_iversion_raw(dir) != o_res->cinfo.after) 2805 nfs4_update_changeattr(dir, &o_res->cinfo, 2806 o_res->f_attr->time_start, 2807 NFS_INO_INVALID_DATA); 2808 } 2809 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 2810 server->caps &= ~NFS_CAP_POSIX_LOCK; 2811 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2812 status = _nfs4_proc_open_confirm(data); 2813 if (status != 0) 2814 return status; 2815 } 2816 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) { 2817 struct nfs_fh *fh = &o_res->fh; 2818 2819 nfs4_sequence_free_slot(&o_res->seq_res); 2820 if (o_arg->claim == NFS4_OPEN_CLAIM_FH) 2821 fh = NFS_FH(d_inode(data->dentry)); 2822 nfs4_proc_getattr(server, fh, o_res->f_attr, NULL); 2823 } 2824 return 0; 2825 } 2826 2827 /* 2828 * OPEN_EXPIRED: 2829 * reclaim state on the server after a network partition. 2830 * Assumes caller holds the appropriate lock 2831 */ 2832 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2833 { 2834 struct nfs4_opendata *opendata; 2835 int ret; 2836 2837 opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_FH); 2838 if (IS_ERR(opendata)) 2839 return PTR_ERR(opendata); 2840 /* 2841 * We're not recovering a delegation, so ask for no delegation. 2842 * Otherwise the recovery thread could deadlock with an outstanding 2843 * delegation return. 2844 */ 2845 opendata->o_arg.open_flags = O_DIRECT; 2846 ret = nfs4_open_recover(opendata, state); 2847 if (ret == -ESTALE) 2848 d_drop(ctx->dentry); 2849 nfs4_opendata_put(opendata); 2850 return ret; 2851 } 2852 2853 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2854 { 2855 struct nfs_server *server = NFS_SERVER(state->inode); 2856 struct nfs4_exception exception = { }; 2857 int err; 2858 2859 do { 2860 err = _nfs4_open_expired(ctx, state); 2861 trace_nfs4_open_expired(ctx, 0, err); 2862 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2863 continue; 2864 switch (err) { 2865 default: 2866 goto out; 2867 case -NFS4ERR_GRACE: 2868 case -NFS4ERR_DELAY: 2869 nfs4_handle_exception(server, err, &exception); 2870 err = 0; 2871 } 2872 } while (exception.retry); 2873 out: 2874 return err; 2875 } 2876 2877 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2878 { 2879 struct nfs_open_context *ctx; 2880 int ret; 2881 2882 ctx = nfs4_state_find_open_context(state); 2883 if (IS_ERR(ctx)) 2884 return -EAGAIN; 2885 ret = nfs4_do_open_expired(ctx, state); 2886 put_nfs_open_context(ctx); 2887 return ret; 2888 } 2889 2890 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state, 2891 const nfs4_stateid *stateid) 2892 { 2893 nfs_remove_bad_delegation(state->inode, stateid); 2894 nfs_state_clear_delegation(state); 2895 } 2896 2897 static void nfs40_clear_delegation_stateid(struct nfs4_state *state) 2898 { 2899 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL) 2900 nfs_finish_clear_delegation_stateid(state, NULL); 2901 } 2902 2903 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2904 { 2905 /* NFSv4.0 doesn't allow for delegation recovery on open expire */ 2906 nfs40_clear_delegation_stateid(state); 2907 nfs_state_clear_open_state_flags(state); 2908 return nfs4_open_expired(sp, state); 2909 } 2910 2911 static int nfs40_test_and_free_expired_stateid(struct nfs_server *server, 2912 nfs4_stateid *stateid, const struct cred *cred) 2913 { 2914 return -NFS4ERR_BAD_STATEID; 2915 } 2916 2917 #if defined(CONFIG_NFS_V4_1) 2918 static int nfs41_test_and_free_expired_stateid(struct nfs_server *server, 2919 nfs4_stateid *stateid, const struct cred *cred) 2920 { 2921 int status; 2922 2923 switch (stateid->type) { 2924 default: 2925 break; 2926 case NFS4_INVALID_STATEID_TYPE: 2927 case NFS4_SPECIAL_STATEID_TYPE: 2928 case NFS4_FREED_STATEID_TYPE: 2929 return -NFS4ERR_BAD_STATEID; 2930 case NFS4_REVOKED_STATEID_TYPE: 2931 goto out_free; 2932 } 2933 2934 status = nfs41_test_stateid(server, stateid, cred); 2935 switch (status) { 2936 case -NFS4ERR_EXPIRED: 2937 case -NFS4ERR_ADMIN_REVOKED: 2938 case -NFS4ERR_DELEG_REVOKED: 2939 break; 2940 default: 2941 return status; 2942 } 2943 out_free: 2944 /* Ack the revoked state to the server */ 2945 nfs41_free_stateid(server, stateid, cred, true); 2946 return -NFS4ERR_EXPIRED; 2947 } 2948 2949 static int nfs41_check_delegation_stateid(struct nfs4_state *state) 2950 { 2951 struct nfs_server *server = NFS_SERVER(state->inode); 2952 nfs4_stateid stateid; 2953 struct nfs_delegation *delegation; 2954 const struct cred *cred = NULL; 2955 int status, ret = NFS_OK; 2956 2957 /* Get the delegation credential for use by test/free_stateid */ 2958 rcu_read_lock(); 2959 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2960 if (delegation == NULL) { 2961 rcu_read_unlock(); 2962 nfs_state_clear_delegation(state); 2963 return NFS_OK; 2964 } 2965 2966 spin_lock(&delegation->lock); 2967 nfs4_stateid_copy(&stateid, &delegation->stateid); 2968 2969 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, 2970 &delegation->flags)) { 2971 spin_unlock(&delegation->lock); 2972 rcu_read_unlock(); 2973 return NFS_OK; 2974 } 2975 2976 if (delegation->cred) 2977 cred = get_cred(delegation->cred); 2978 spin_unlock(&delegation->lock); 2979 rcu_read_unlock(); 2980 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred); 2981 trace_nfs4_test_delegation_stateid(state, NULL, status); 2982 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) 2983 nfs_finish_clear_delegation_stateid(state, &stateid); 2984 else 2985 ret = status; 2986 2987 put_cred(cred); 2988 return ret; 2989 } 2990 2991 static void nfs41_delegation_recover_stateid(struct nfs4_state *state) 2992 { 2993 nfs4_stateid tmp; 2994 2995 if (test_bit(NFS_DELEGATED_STATE, &state->flags) && 2996 nfs4_copy_delegation_stateid(state->inode, state->state, 2997 &tmp, NULL) && 2998 nfs4_stateid_match_other(&state->stateid, &tmp)) 2999 nfs_state_set_delegation(state, &tmp, state->state); 3000 else 3001 nfs_state_clear_delegation(state); 3002 } 3003 3004 /** 3005 * nfs41_check_expired_locks - possibly free a lock stateid 3006 * 3007 * @state: NFSv4 state for an inode 3008 * 3009 * Returns NFS_OK if recovery for this stateid is now finished. 3010 * Otherwise a negative NFS4ERR value is returned. 3011 */ 3012 static int nfs41_check_expired_locks(struct nfs4_state *state) 3013 { 3014 int status, ret = NFS_OK; 3015 struct nfs4_lock_state *lsp, *prev = NULL; 3016 struct nfs_server *server = NFS_SERVER(state->inode); 3017 3018 if (!test_bit(LK_STATE_IN_USE, &state->flags)) 3019 goto out; 3020 3021 spin_lock(&state->state_lock); 3022 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 3023 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 3024 const struct cred *cred = lsp->ls_state->owner->so_cred; 3025 3026 refcount_inc(&lsp->ls_count); 3027 spin_unlock(&state->state_lock); 3028 3029 nfs4_put_lock_state(prev); 3030 prev = lsp; 3031 3032 status = nfs41_test_and_free_expired_stateid(server, 3033 &lsp->ls_stateid, 3034 cred); 3035 trace_nfs4_test_lock_stateid(state, lsp, status); 3036 if (status == -NFS4ERR_EXPIRED || 3037 status == -NFS4ERR_BAD_STATEID) { 3038 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 3039 lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE; 3040 if (!recover_lost_locks) 3041 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 3042 } else if (status != NFS_OK) { 3043 ret = status; 3044 nfs4_put_lock_state(prev); 3045 goto out; 3046 } 3047 spin_lock(&state->state_lock); 3048 } 3049 } 3050 spin_unlock(&state->state_lock); 3051 nfs4_put_lock_state(prev); 3052 out: 3053 return ret; 3054 } 3055 3056 /** 3057 * nfs41_check_open_stateid - possibly free an open stateid 3058 * 3059 * @state: NFSv4 state for an inode 3060 * 3061 * Returns NFS_OK if recovery for this stateid is now finished. 3062 * Otherwise a negative NFS4ERR value is returned. 3063 */ 3064 static int nfs41_check_open_stateid(struct nfs4_state *state) 3065 { 3066 struct nfs_server *server = NFS_SERVER(state->inode); 3067 nfs4_stateid *stateid = &state->open_stateid; 3068 const struct cred *cred = state->owner->so_cred; 3069 int status; 3070 3071 if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) 3072 return -NFS4ERR_BAD_STATEID; 3073 status = nfs41_test_and_free_expired_stateid(server, stateid, cred); 3074 trace_nfs4_test_open_stateid(state, NULL, status); 3075 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) { 3076 nfs_state_clear_open_state_flags(state); 3077 stateid->type = NFS4_INVALID_STATEID_TYPE; 3078 return status; 3079 } 3080 if (nfs_open_stateid_recover_openmode(state)) 3081 return -NFS4ERR_OPENMODE; 3082 return NFS_OK; 3083 } 3084 3085 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 3086 { 3087 int status; 3088 3089 status = nfs41_check_delegation_stateid(state); 3090 if (status != NFS_OK) 3091 return status; 3092 nfs41_delegation_recover_stateid(state); 3093 3094 status = nfs41_check_expired_locks(state); 3095 if (status != NFS_OK) 3096 return status; 3097 status = nfs41_check_open_stateid(state); 3098 if (status != NFS_OK) 3099 status = nfs4_open_expired(sp, state); 3100 return status; 3101 } 3102 #endif 3103 3104 /* 3105 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 3106 * fields corresponding to attributes that were used to store the verifier. 3107 * Make sure we clobber those fields in the later setattr call 3108 */ 3109 static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata, 3110 struct iattr *sattr, struct nfs4_label **label) 3111 { 3112 const __u32 *bitmask = opendata->o_arg.server->exclcreat_bitmask; 3113 __u32 attrset[3]; 3114 unsigned ret; 3115 unsigned i; 3116 3117 for (i = 0; i < ARRAY_SIZE(attrset); i++) { 3118 attrset[i] = opendata->o_res.attrset[i]; 3119 if (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE4_1) 3120 attrset[i] &= ~bitmask[i]; 3121 } 3122 3123 ret = (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE) ? 3124 sattr->ia_valid : 0; 3125 3126 if ((attrset[1] & (FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET))) { 3127 if (sattr->ia_valid & ATTR_ATIME_SET) 3128 ret |= ATTR_ATIME_SET; 3129 else 3130 ret |= ATTR_ATIME; 3131 } 3132 3133 if ((attrset[1] & (FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET))) { 3134 if (sattr->ia_valid & ATTR_MTIME_SET) 3135 ret |= ATTR_MTIME_SET; 3136 else 3137 ret |= ATTR_MTIME; 3138 } 3139 3140 if (!(attrset[2] & FATTR4_WORD2_SECURITY_LABEL)) 3141 *label = NULL; 3142 return ret; 3143 } 3144 3145 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, 3146 struct nfs_open_context *ctx) 3147 { 3148 struct nfs4_state_owner *sp = opendata->owner; 3149 struct nfs_server *server = sp->so_server; 3150 struct dentry *dentry; 3151 struct nfs4_state *state; 3152 fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx); 3153 struct inode *dir = d_inode(opendata->dir); 3154 unsigned long dir_verifier; 3155 int ret; 3156 3157 dir_verifier = nfs_save_change_attribute(dir); 3158 3159 ret = _nfs4_proc_open(opendata, ctx); 3160 if (ret != 0) 3161 goto out; 3162 3163 state = _nfs4_opendata_to_nfs4_state(opendata); 3164 ret = PTR_ERR(state); 3165 if (IS_ERR(state)) 3166 goto out; 3167 ctx->state = state; 3168 if (server->caps & NFS_CAP_POSIX_LOCK) 3169 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 3170 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK) 3171 set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags); 3172 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_PRESERVE_UNLINKED) 3173 set_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(state->inode)->flags); 3174 3175 dentry = opendata->dentry; 3176 if (d_really_is_negative(dentry)) { 3177 struct dentry *alias; 3178 d_drop(dentry); 3179 alias = d_splice_alias(igrab(state->inode), dentry); 3180 /* d_splice_alias() can't fail here - it's a non-directory */ 3181 if (alias) { 3182 dput(ctx->dentry); 3183 ctx->dentry = dentry = alias; 3184 } 3185 } 3186 3187 switch(opendata->o_arg.claim) { 3188 default: 3189 break; 3190 case NFS4_OPEN_CLAIM_NULL: 3191 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 3192 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 3193 if (!opendata->rpc_done) 3194 break; 3195 if (opendata->o_res.delegation.type != 0) 3196 dir_verifier = nfs_save_change_attribute(dir); 3197 nfs_set_verifier(dentry, dir_verifier); 3198 } 3199 3200 /* Parse layoutget results before we check for access */ 3201 pnfs_parse_lgopen(state->inode, opendata->lgp, ctx); 3202 3203 ret = nfs4_opendata_access(sp->so_cred, opendata, state, acc_mode); 3204 if (ret != 0) 3205 goto out; 3206 3207 if (d_inode(dentry) == state->inode) 3208 nfs_inode_attach_open_context(ctx); 3209 3210 out: 3211 if (!opendata->cancelled) { 3212 if (opendata->lgp) { 3213 nfs4_lgopen_release(opendata->lgp); 3214 opendata->lgp = NULL; 3215 } 3216 nfs4_sequence_free_slot(&opendata->o_res.seq_res); 3217 } 3218 return ret; 3219 } 3220 3221 /* 3222 * Returns a referenced nfs4_state 3223 */ 3224 static int _nfs4_do_open(struct inode *dir, 3225 struct nfs_open_context *ctx, 3226 int flags, 3227 const struct nfs4_open_createattrs *c, 3228 int *opened) 3229 { 3230 struct nfs4_state_owner *sp; 3231 struct nfs4_state *state = NULL; 3232 struct nfs_server *server = NFS_SERVER(dir); 3233 struct nfs4_opendata *opendata; 3234 struct dentry *dentry = ctx->dentry; 3235 const struct cred *cred = ctx->cred; 3236 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold; 3237 fmode_t fmode = _nfs4_ctx_to_openmode(ctx); 3238 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL; 3239 struct iattr *sattr = c->sattr; 3240 struct nfs4_label *label = c->label; 3241 int status; 3242 3243 /* Protect against reboot recovery conflicts */ 3244 status = -ENOMEM; 3245 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 3246 if (sp == NULL) { 3247 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 3248 goto out_err; 3249 } 3250 status = nfs4_client_recover_expired_lease(server->nfs_client); 3251 if (status != 0) 3252 goto err_put_state_owner; 3253 if (d_really_is_positive(dentry)) 3254 nfs4_return_incompatible_delegation(d_inode(dentry), fmode); 3255 status = -ENOMEM; 3256 if (d_really_is_positive(dentry)) 3257 claim = NFS4_OPEN_CLAIM_FH; 3258 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, 3259 c, claim, GFP_KERNEL); 3260 if (opendata == NULL) 3261 goto err_put_state_owner; 3262 3263 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 3264 if (!opendata->f_attr.mdsthreshold) { 3265 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 3266 if (!opendata->f_attr.mdsthreshold) 3267 goto err_opendata_put; 3268 } 3269 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 3270 } 3271 if (d_really_is_positive(dentry)) 3272 opendata->state = nfs4_get_open_state(d_inode(dentry), sp); 3273 3274 status = _nfs4_open_and_get_state(opendata, ctx); 3275 if (status != 0) 3276 goto err_opendata_put; 3277 state = ctx->state; 3278 3279 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) && 3280 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) { 3281 unsigned attrs = nfs4_exclusive_attrset(opendata, sattr, &label); 3282 /* 3283 * send create attributes which was not set by open 3284 * with an extra setattr. 3285 */ 3286 if (attrs || label) { 3287 unsigned ia_old = sattr->ia_valid; 3288 3289 sattr->ia_valid = attrs; 3290 nfs_fattr_init(opendata->o_res.f_attr); 3291 status = nfs4_do_setattr(state->inode, cred, 3292 opendata->o_res.f_attr, sattr, 3293 ctx, label); 3294 if (status == 0) { 3295 nfs_setattr_update_inode(state->inode, sattr, 3296 opendata->o_res.f_attr); 3297 nfs_setsecurity(state->inode, opendata->o_res.f_attr); 3298 } 3299 sattr->ia_valid = ia_old; 3300 } 3301 } 3302 if (opened && opendata->file_created) 3303 *opened = 1; 3304 3305 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) { 3306 *ctx_th = opendata->f_attr.mdsthreshold; 3307 opendata->f_attr.mdsthreshold = NULL; 3308 } 3309 3310 nfs4_opendata_put(opendata); 3311 nfs4_put_state_owner(sp); 3312 return 0; 3313 err_opendata_put: 3314 nfs4_opendata_put(opendata); 3315 err_put_state_owner: 3316 nfs4_put_state_owner(sp); 3317 out_err: 3318 return status; 3319 } 3320 3321 3322 static struct nfs4_state *nfs4_do_open(struct inode *dir, 3323 struct nfs_open_context *ctx, 3324 int flags, 3325 struct iattr *sattr, 3326 struct nfs4_label *label, 3327 int *opened) 3328 { 3329 struct nfs_server *server = NFS_SERVER(dir); 3330 struct nfs4_exception exception = { 3331 .interruptible = true, 3332 }; 3333 struct nfs4_state *res; 3334 struct nfs4_open_createattrs c = { 3335 .label = label, 3336 .sattr = sattr, 3337 .verf = { 3338 [0] = (__u32)jiffies, 3339 [1] = (__u32)current->pid, 3340 }, 3341 }; 3342 int status; 3343 3344 do { 3345 status = _nfs4_do_open(dir, ctx, flags, &c, opened); 3346 res = ctx->state; 3347 trace_nfs4_open_file(ctx, flags, status); 3348 if (status == 0) 3349 break; 3350 /* NOTE: BAD_SEQID means the server and client disagree about the 3351 * book-keeping w.r.t. state-changing operations 3352 * (OPEN/CLOSE/LOCK/LOCKU...) 3353 * It is actually a sign of a bug on the client or on the server. 3354 * 3355 * If we receive a BAD_SEQID error in the particular case of 3356 * doing an OPEN, we assume that nfs_increment_open_seqid() will 3357 * have unhashed the old state_owner for us, and that we can 3358 * therefore safely retry using a new one. We should still warn 3359 * the user though... 3360 */ 3361 if (status == -NFS4ERR_BAD_SEQID) { 3362 pr_warn_ratelimited("NFS: v4 server %s " 3363 " returned a bad sequence-id error!\n", 3364 NFS_SERVER(dir)->nfs_client->cl_hostname); 3365 exception.retry = 1; 3366 continue; 3367 } 3368 /* 3369 * BAD_STATEID on OPEN means that the server cancelled our 3370 * state before it received the OPEN_CONFIRM. 3371 * Recover by retrying the request as per the discussion 3372 * on Page 181 of RFC3530. 3373 */ 3374 if (status == -NFS4ERR_BAD_STATEID) { 3375 exception.retry = 1; 3376 continue; 3377 } 3378 if (status == -NFS4ERR_EXPIRED) { 3379 nfs4_schedule_lease_recovery(server->nfs_client); 3380 exception.retry = 1; 3381 continue; 3382 } 3383 if (status == -EAGAIN) { 3384 /* We must have found a delegation */ 3385 exception.retry = 1; 3386 continue; 3387 } 3388 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception)) 3389 continue; 3390 res = ERR_PTR(nfs4_handle_exception(server, 3391 status, &exception)); 3392 } while (exception.retry); 3393 return res; 3394 } 3395 3396 static int _nfs4_do_setattr(struct inode *inode, 3397 struct nfs_setattrargs *arg, 3398 struct nfs_setattrres *res, 3399 const struct cred *cred, 3400 struct nfs_open_context *ctx) 3401 { 3402 struct nfs_server *server = NFS_SERVER(inode); 3403 struct rpc_message msg = { 3404 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 3405 .rpc_argp = arg, 3406 .rpc_resp = res, 3407 .rpc_cred = cred, 3408 }; 3409 const struct cred *delegation_cred = NULL; 3410 unsigned long timestamp = jiffies; 3411 bool truncate; 3412 int status; 3413 3414 nfs_fattr_init(res->fattr); 3415 3416 /* Servers should only apply open mode checks for file size changes */ 3417 truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false; 3418 if (!truncate) { 3419 nfs4_inode_make_writeable(inode); 3420 goto zero_stateid; 3421 } 3422 3423 if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) { 3424 /* Use that stateid */ 3425 } else if (ctx != NULL && ctx->state) { 3426 struct nfs_lock_context *l_ctx; 3427 if (!nfs4_valid_open_stateid(ctx->state)) 3428 return -EBADF; 3429 l_ctx = nfs_get_lock_context(ctx); 3430 if (IS_ERR(l_ctx)) 3431 return PTR_ERR(l_ctx); 3432 status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx, 3433 &arg->stateid, &delegation_cred); 3434 nfs_put_lock_context(l_ctx); 3435 if (status == -EIO) 3436 return -EBADF; 3437 else if (status == -EAGAIN) 3438 goto zero_stateid; 3439 } else { 3440 zero_stateid: 3441 nfs4_stateid_copy(&arg->stateid, &zero_stateid); 3442 } 3443 if (delegation_cred) 3444 msg.rpc_cred = delegation_cred; 3445 3446 status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1); 3447 3448 put_cred(delegation_cred); 3449 if (status == 0 && ctx != NULL) 3450 renew_lease(server, timestamp); 3451 trace_nfs4_setattr(inode, &arg->stateid, status); 3452 return status; 3453 } 3454 3455 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred, 3456 struct nfs_fattr *fattr, struct iattr *sattr, 3457 struct nfs_open_context *ctx, struct nfs4_label *ilabel) 3458 { 3459 struct nfs_server *server = NFS_SERVER(inode); 3460 __u32 bitmask[NFS4_BITMASK_SZ]; 3461 struct nfs4_state *state = ctx ? ctx->state : NULL; 3462 struct nfs_setattrargs arg = { 3463 .fh = NFS_FH(inode), 3464 .iap = sattr, 3465 .server = server, 3466 .bitmask = bitmask, 3467 .label = ilabel, 3468 }; 3469 struct nfs_setattrres res = { 3470 .fattr = fattr, 3471 .server = server, 3472 }; 3473 struct nfs4_exception exception = { 3474 .state = state, 3475 .inode = inode, 3476 .stateid = &arg.stateid, 3477 }; 3478 unsigned long adjust_flags = NFS_INO_INVALID_CHANGE | 3479 NFS_INO_INVALID_CTIME; 3480 int err; 3481 3482 if (sattr->ia_valid & (ATTR_MODE | ATTR_KILL_SUID | ATTR_KILL_SGID)) 3483 adjust_flags |= NFS_INO_INVALID_MODE; 3484 if (sattr->ia_valid & (ATTR_UID | ATTR_GID)) 3485 adjust_flags |= NFS_INO_INVALID_OTHER; 3486 if (sattr->ia_valid & ATTR_ATIME) 3487 adjust_flags |= NFS_INO_INVALID_ATIME; 3488 if (sattr->ia_valid & ATTR_MTIME) 3489 adjust_flags |= NFS_INO_INVALID_MTIME; 3490 3491 do { 3492 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), 3493 inode, adjust_flags); 3494 3495 err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx); 3496 switch (err) { 3497 case -NFS4ERR_OPENMODE: 3498 if (!(sattr->ia_valid & ATTR_SIZE)) { 3499 pr_warn_once("NFSv4: server %s is incorrectly " 3500 "applying open mode checks to " 3501 "a SETATTR that is not " 3502 "changing file size.\n", 3503 server->nfs_client->cl_hostname); 3504 } 3505 if (state && !(state->state & FMODE_WRITE)) { 3506 err = -EBADF; 3507 if (sattr->ia_valid & ATTR_OPEN) 3508 err = -EACCES; 3509 goto out; 3510 } 3511 } 3512 err = nfs4_handle_exception(server, err, &exception); 3513 } while (exception.retry); 3514 out: 3515 return err; 3516 } 3517 3518 static bool 3519 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task) 3520 { 3521 if (inode == NULL || !nfs_have_layout(inode)) 3522 return false; 3523 3524 return pnfs_wait_on_layoutreturn(inode, task); 3525 } 3526 3527 /* 3528 * Update the seqid of an open stateid 3529 */ 3530 static void nfs4_sync_open_stateid(nfs4_stateid *dst, 3531 struct nfs4_state *state) 3532 { 3533 __be32 seqid_open; 3534 u32 dst_seqid; 3535 int seq; 3536 3537 for (;;) { 3538 if (!nfs4_valid_open_stateid(state)) 3539 break; 3540 seq = read_seqbegin(&state->seqlock); 3541 if (!nfs4_state_match_open_stateid_other(state, dst)) { 3542 nfs4_stateid_copy(dst, &state->open_stateid); 3543 if (read_seqretry(&state->seqlock, seq)) 3544 continue; 3545 break; 3546 } 3547 seqid_open = state->open_stateid.seqid; 3548 if (read_seqretry(&state->seqlock, seq)) 3549 continue; 3550 3551 dst_seqid = be32_to_cpu(dst->seqid); 3552 if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0) 3553 dst->seqid = seqid_open; 3554 break; 3555 } 3556 } 3557 3558 /* 3559 * Update the seqid of an open stateid after receiving 3560 * NFS4ERR_OLD_STATEID 3561 */ 3562 static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst, 3563 struct nfs4_state *state) 3564 { 3565 __be32 seqid_open; 3566 u32 dst_seqid; 3567 bool ret; 3568 int seq, status = -EAGAIN; 3569 DEFINE_WAIT(wait); 3570 3571 for (;;) { 3572 ret = false; 3573 if (!nfs4_valid_open_stateid(state)) 3574 break; 3575 seq = read_seqbegin(&state->seqlock); 3576 if (!nfs4_state_match_open_stateid_other(state, dst)) { 3577 if (read_seqretry(&state->seqlock, seq)) 3578 continue; 3579 break; 3580 } 3581 3582 write_seqlock(&state->seqlock); 3583 seqid_open = state->open_stateid.seqid; 3584 3585 dst_seqid = be32_to_cpu(dst->seqid); 3586 3587 /* Did another OPEN bump the state's seqid? try again: */ 3588 if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) { 3589 dst->seqid = seqid_open; 3590 write_sequnlock(&state->seqlock); 3591 ret = true; 3592 break; 3593 } 3594 3595 /* server says we're behind but we haven't seen the update yet */ 3596 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); 3597 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); 3598 write_sequnlock(&state->seqlock); 3599 trace_nfs4_close_stateid_update_wait(state->inode, dst, 0); 3600 3601 if (fatal_signal_pending(current) || nfs_current_task_exiting()) 3602 status = -EINTR; 3603 else 3604 if (schedule_timeout(5*HZ) != 0) 3605 status = 0; 3606 3607 finish_wait(&state->waitq, &wait); 3608 3609 if (!status) 3610 continue; 3611 if (status == -EINTR) 3612 break; 3613 3614 /* we slept the whole 5 seconds, we must have lost a seqid */ 3615 dst->seqid = cpu_to_be32(dst_seqid + 1); 3616 ret = true; 3617 break; 3618 } 3619 3620 return ret; 3621 } 3622 3623 struct nfs4_closedata { 3624 struct inode *inode; 3625 struct nfs4_state *state; 3626 struct nfs_closeargs arg; 3627 struct nfs_closeres res; 3628 struct { 3629 struct nfs4_layoutreturn_args arg; 3630 struct nfs4_layoutreturn_res res; 3631 struct nfs4_xdr_opaque_data ld_private; 3632 u32 roc_barrier; 3633 bool roc; 3634 } lr; 3635 struct nfs_fattr fattr; 3636 unsigned long timestamp; 3637 }; 3638 3639 static void nfs4_free_closedata(void *data) 3640 { 3641 struct nfs4_closedata *calldata = data; 3642 struct nfs4_state_owner *sp = calldata->state->owner; 3643 struct super_block *sb = calldata->state->inode->i_sb; 3644 3645 if (calldata->lr.roc) 3646 pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res, 3647 calldata->res.lr_ret); 3648 nfs4_put_open_state(calldata->state); 3649 nfs_free_seqid(calldata->arg.seqid); 3650 nfs4_put_state_owner(sp); 3651 nfs_sb_deactive(sb); 3652 kfree(calldata); 3653 } 3654 3655 static void nfs4_close_done(struct rpc_task *task, void *data) 3656 { 3657 struct nfs4_closedata *calldata = data; 3658 struct nfs4_state *state = calldata->state; 3659 struct nfs_server *server = NFS_SERVER(calldata->inode); 3660 nfs4_stateid *res_stateid = NULL; 3661 struct nfs4_exception exception = { 3662 .state = state, 3663 .inode = calldata->inode, 3664 .stateid = &calldata->arg.stateid, 3665 }; 3666 3667 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 3668 return; 3669 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status); 3670 3671 /* Handle Layoutreturn errors */ 3672 if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res, 3673 &calldata->res.lr_ret) == -EAGAIN) 3674 goto out_restart; 3675 3676 /* hmm. we are done with the inode, and in the process of freeing 3677 * the state_owner. we keep this around to process errors 3678 */ 3679 switch (task->tk_status) { 3680 case 0: 3681 res_stateid = &calldata->res.stateid; 3682 renew_lease(server, calldata->timestamp); 3683 break; 3684 case -NFS4ERR_ACCESS: 3685 if (calldata->arg.bitmask != NULL) { 3686 calldata->arg.bitmask = NULL; 3687 calldata->res.fattr = NULL; 3688 goto out_restart; 3689 3690 } 3691 break; 3692 case -NFS4ERR_OLD_STATEID: 3693 /* Did we race with OPEN? */ 3694 if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid, 3695 state)) 3696 goto out_restart; 3697 goto out_release; 3698 case -NFS4ERR_ADMIN_REVOKED: 3699 case -NFS4ERR_STALE_STATEID: 3700 case -NFS4ERR_EXPIRED: 3701 nfs4_free_revoked_stateid(server, 3702 &calldata->arg.stateid, 3703 task->tk_msg.rpc_cred); 3704 fallthrough; 3705 case -NFS4ERR_BAD_STATEID: 3706 if (calldata->arg.fmode == 0) 3707 break; 3708 fallthrough; 3709 default: 3710 task->tk_status = nfs4_async_handle_exception(task, 3711 server, task->tk_status, &exception); 3712 if (exception.retry) 3713 goto out_restart; 3714 } 3715 nfs_clear_open_stateid(state, &calldata->arg.stateid, 3716 res_stateid, calldata->arg.fmode); 3717 out_release: 3718 task->tk_status = 0; 3719 nfs_release_seqid(calldata->arg.seqid); 3720 nfs_refresh_inode(calldata->inode, &calldata->fattr); 3721 dprintk("%s: ret = %d\n", __func__, task->tk_status); 3722 return; 3723 out_restart: 3724 task->tk_status = 0; 3725 rpc_restart_call_prepare(task); 3726 goto out_release; 3727 } 3728 3729 static void nfs4_close_prepare(struct rpc_task *task, void *data) 3730 { 3731 struct nfs4_closedata *calldata = data; 3732 struct nfs4_state *state = calldata->state; 3733 struct inode *inode = calldata->inode; 3734 struct nfs_server *server = NFS_SERVER(inode); 3735 struct pnfs_layout_hdr *lo; 3736 bool is_rdonly, is_wronly, is_rdwr; 3737 int call_close = 0; 3738 3739 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 3740 goto out_wait; 3741 3742 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 3743 spin_lock(&state->owner->so_lock); 3744 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); 3745 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); 3746 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); 3747 /* Calculate the change in open mode */ 3748 calldata->arg.fmode = 0; 3749 if (state->n_rdwr == 0) { 3750 if (state->n_rdonly == 0) 3751 call_close |= is_rdonly; 3752 else if (is_rdonly) 3753 calldata->arg.fmode |= FMODE_READ; 3754 if (state->n_wronly == 0) 3755 call_close |= is_wronly; 3756 else if (is_wronly) 3757 calldata->arg.fmode |= FMODE_WRITE; 3758 if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE)) 3759 call_close |= is_rdwr; 3760 } else if (is_rdwr) 3761 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; 3762 3763 nfs4_sync_open_stateid(&calldata->arg.stateid, state); 3764 if (!nfs4_valid_open_stateid(state)) 3765 call_close = 0; 3766 spin_unlock(&state->owner->so_lock); 3767 3768 if (!call_close) { 3769 /* Note: exit _without_ calling nfs4_close_done */ 3770 goto out_no_action; 3771 } 3772 3773 if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) { 3774 nfs_release_seqid(calldata->arg.seqid); 3775 goto out_wait; 3776 } 3777 3778 lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL; 3779 if (lo && !pnfs_layout_is_valid(lo)) { 3780 calldata->arg.lr_args = NULL; 3781 calldata->res.lr_res = NULL; 3782 } 3783 3784 if (calldata->arg.fmode == 0) 3785 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 3786 3787 if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) { 3788 /* Close-to-open cache consistency revalidation */ 3789 if (!nfs4_have_delegation(inode, FMODE_READ, 0)) { 3790 nfs4_bitmask_set(calldata->arg.bitmask_store, 3791 server->cache_consistency_bitmask, 3792 inode, 0); 3793 calldata->arg.bitmask = calldata->arg.bitmask_store; 3794 } else 3795 calldata->arg.bitmask = NULL; 3796 } 3797 3798 calldata->arg.share_access = 3799 nfs4_fmode_to_share_access(calldata->arg.fmode); 3800 3801 if (calldata->res.fattr == NULL) 3802 calldata->arg.bitmask = NULL; 3803 else if (calldata->arg.bitmask == NULL) 3804 calldata->res.fattr = NULL; 3805 calldata->timestamp = jiffies; 3806 if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client, 3807 &calldata->arg.seq_args, 3808 &calldata->res.seq_res, 3809 task) != 0) 3810 nfs_release_seqid(calldata->arg.seqid); 3811 return; 3812 out_no_action: 3813 task->tk_action = NULL; 3814 out_wait: 3815 nfs4_sequence_done(task, &calldata->res.seq_res); 3816 } 3817 3818 static const struct rpc_call_ops nfs4_close_ops = { 3819 .rpc_call_prepare = nfs4_close_prepare, 3820 .rpc_call_done = nfs4_close_done, 3821 .rpc_release = nfs4_free_closedata, 3822 }; 3823 3824 /* 3825 * It is possible for data to be read/written from a mem-mapped file 3826 * after the sys_close call (which hits the vfs layer as a flush). 3827 * This means that we can't safely call nfsv4 close on a file until 3828 * the inode is cleared. This in turn means that we are not good 3829 * NFSv4 citizens - we do not indicate to the server to update the file's 3830 * share state even when we are done with one of the three share 3831 * stateid's in the inode. 3832 * 3833 * NOTE: Caller must be holding the sp->so_owner semaphore! 3834 */ 3835 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) 3836 { 3837 struct nfs_server *server = NFS_SERVER(state->inode); 3838 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 3839 struct nfs4_closedata *calldata; 3840 struct nfs4_state_owner *sp = state->owner; 3841 struct rpc_task *task; 3842 struct rpc_message msg = { 3843 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 3844 .rpc_cred = state->owner->so_cred, 3845 }; 3846 struct rpc_task_setup task_setup_data = { 3847 .rpc_client = server->client, 3848 .rpc_message = &msg, 3849 .callback_ops = &nfs4_close_ops, 3850 .workqueue = nfsiod_workqueue, 3851 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 3852 }; 3853 int status = -ENOMEM; 3854 3855 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE)) 3856 task_setup_data.flags |= RPC_TASK_MOVEABLE; 3857 3858 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP, 3859 &task_setup_data.rpc_client, &msg); 3860 3861 calldata = kzalloc(sizeof(*calldata), gfp_mask); 3862 if (calldata == NULL) 3863 goto out; 3864 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1, 0); 3865 calldata->inode = state->inode; 3866 calldata->state = state; 3867 calldata->arg.fh = NFS_FH(state->inode); 3868 if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state)) 3869 goto out_free_calldata; 3870 /* Serialization for the sequence id */ 3871 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 3872 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask); 3873 if (IS_ERR(calldata->arg.seqid)) 3874 goto out_free_calldata; 3875 nfs_fattr_init(&calldata->fattr); 3876 calldata->arg.fmode = 0; 3877 calldata->lr.arg.ld_private = &calldata->lr.ld_private; 3878 calldata->res.fattr = &calldata->fattr; 3879 calldata->res.seqid = calldata->arg.seqid; 3880 calldata->res.server = server; 3881 calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT; 3882 calldata->lr.roc = pnfs_roc(state->inode, 3883 &calldata->lr.arg, &calldata->lr.res, msg.rpc_cred); 3884 if (calldata->lr.roc) { 3885 calldata->arg.lr_args = &calldata->lr.arg; 3886 calldata->res.lr_res = &calldata->lr.res; 3887 } 3888 nfs_sb_active(calldata->inode->i_sb); 3889 3890 msg.rpc_argp = &calldata->arg; 3891 msg.rpc_resp = &calldata->res; 3892 task_setup_data.callback_data = calldata; 3893 task = rpc_run_task(&task_setup_data); 3894 if (IS_ERR(task)) 3895 return PTR_ERR(task); 3896 status = 0; 3897 if (wait) 3898 status = rpc_wait_for_completion_task(task); 3899 rpc_put_task(task); 3900 return status; 3901 out_free_calldata: 3902 kfree(calldata); 3903 out: 3904 nfs4_put_open_state(state); 3905 nfs4_put_state_owner(sp); 3906 return status; 3907 } 3908 3909 static struct inode * 3910 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, 3911 int open_flags, struct iattr *attr, int *opened) 3912 { 3913 struct nfs4_state *state; 3914 struct nfs4_label l, *label; 3915 3916 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); 3917 3918 /* Protect against concurrent sillydeletes */ 3919 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened); 3920 3921 nfs4_label_release_security(label); 3922 3923 if (IS_ERR(state)) 3924 return ERR_CAST(state); 3925 return state->inode; 3926 } 3927 3928 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 3929 { 3930 struct dentry *dentry = ctx->dentry; 3931 if (ctx->state == NULL) 3932 return; 3933 if (dentry->d_flags & DCACHE_NFSFS_RENAMED) 3934 nfs4_inode_set_return_delegation_on_close(d_inode(dentry)); 3935 if (is_sync) 3936 nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx)); 3937 else 3938 nfs4_close_state(ctx->state, _nfs4_ctx_to_openmode(ctx)); 3939 } 3940 3941 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL) 3942 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL) 3943 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_OPEN_ARGUMENTS - 1UL) 3944 3945 #define FATTR4_WORD2_NFS42_TIME_DELEG_MASK \ 3946 (FATTR4_WORD2_TIME_DELEG_MODIFY|FATTR4_WORD2_TIME_DELEG_ACCESS) 3947 static bool nfs4_server_delegtime_capable(struct nfs4_server_caps_res *res) 3948 { 3949 u32 share_access_want = res->open_caps.oa_share_access_want[0]; 3950 u32 attr_bitmask = res->attr_bitmask[2]; 3951 3952 return (share_access_want & NFS4_SHARE_WANT_DELEG_TIMESTAMPS) && 3953 ((attr_bitmask & FATTR4_WORD2_NFS42_TIME_DELEG_MASK) == 3954 FATTR4_WORD2_NFS42_TIME_DELEG_MASK); 3955 } 3956 3957 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 3958 { 3959 u32 minorversion = server->nfs_client->cl_minorversion; 3960 u32 bitmask[3] = { 3961 [0] = FATTR4_WORD0_SUPPORTED_ATTRS, 3962 }; 3963 struct nfs4_server_caps_arg args = { 3964 .fhandle = fhandle, 3965 .bitmask = bitmask, 3966 }; 3967 struct nfs4_server_caps_res res = {}; 3968 struct rpc_message msg = { 3969 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 3970 .rpc_argp = &args, 3971 .rpc_resp = &res, 3972 }; 3973 int status; 3974 int i; 3975 3976 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS | 3977 FATTR4_WORD0_FH_EXPIRE_TYPE | 3978 FATTR4_WORD0_LINK_SUPPORT | 3979 FATTR4_WORD0_SYMLINK_SUPPORT | 3980 FATTR4_WORD0_ACLSUPPORT | 3981 FATTR4_WORD0_CASE_INSENSITIVE | 3982 FATTR4_WORD0_CASE_PRESERVING; 3983 if (minorversion) 3984 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT; 3985 if (minorversion > 1) 3986 bitmask[2] |= FATTR4_WORD2_OPEN_ARGUMENTS; 3987 3988 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3989 if (status == 0) { 3990 bitmask[0] = (FATTR4_WORD0_SUPPORTED_ATTRS | 3991 FATTR4_WORD0_FH_EXPIRE_TYPE | 3992 FATTR4_WORD0_LINK_SUPPORT | 3993 FATTR4_WORD0_SYMLINK_SUPPORT | 3994 FATTR4_WORD0_ACLSUPPORT | 3995 FATTR4_WORD0_CASE_INSENSITIVE | 3996 FATTR4_WORD0_CASE_PRESERVING) & 3997 res.attr_bitmask[0]; 3998 /* Sanity check the server answers */ 3999 switch (minorversion) { 4000 case 0: 4001 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK; 4002 res.attr_bitmask[2] = 0; 4003 break; 4004 case 1: 4005 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK; 4006 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT & 4007 res.attr_bitmask[2]; 4008 break; 4009 case 2: 4010 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK; 4011 bitmask[2] = (FATTR4_WORD2_SUPPATTR_EXCLCREAT | 4012 FATTR4_WORD2_OPEN_ARGUMENTS) & 4013 res.attr_bitmask[2]; 4014 } 4015 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 4016 server->caps &= 4017 ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS | NFS_CAP_SYMLINKS | 4018 NFS_CAP_SECURITY_LABEL | NFS_CAP_FS_LOCATIONS | 4019 NFS_CAP_OPEN_XOR | NFS_CAP_DELEGTIME); 4020 server->fattr_valid = NFS_ATTR_FATTR_V4; 4021 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL && 4022 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 4023 server->caps |= NFS_CAP_ACLS; 4024 if (res.has_links != 0) 4025 server->caps |= NFS_CAP_HARDLINKS; 4026 if (res.has_symlinks != 0) 4027 server->caps |= NFS_CAP_SYMLINKS; 4028 if (res.case_insensitive) 4029 server->caps |= NFS_CAP_CASE_INSENSITIVE; 4030 if (res.case_preserving) 4031 server->caps |= NFS_CAP_CASE_PRESERVING; 4032 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 4033 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL) 4034 server->caps |= NFS_CAP_SECURITY_LABEL; 4035 #endif 4036 if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS) 4037 server->caps |= NFS_CAP_FS_LOCATIONS; 4038 if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID)) 4039 server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID; 4040 if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE)) 4041 server->fattr_valid &= ~NFS_ATTR_FATTR_MODE; 4042 if (!(res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)) 4043 server->fattr_valid &= ~NFS_ATTR_FATTR_NLINK; 4044 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER)) 4045 server->fattr_valid &= ~(NFS_ATTR_FATTR_OWNER | 4046 NFS_ATTR_FATTR_OWNER_NAME); 4047 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)) 4048 server->fattr_valid &= ~(NFS_ATTR_FATTR_GROUP | 4049 NFS_ATTR_FATTR_GROUP_NAME); 4050 if (!(res.attr_bitmask[1] & FATTR4_WORD1_SPACE_USED)) 4051 server->fattr_valid &= ~NFS_ATTR_FATTR_SPACE_USED; 4052 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)) 4053 server->fattr_valid &= ~NFS_ATTR_FATTR_ATIME; 4054 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)) 4055 server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME; 4056 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)) 4057 server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME; 4058 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)) 4059 server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME; 4060 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_CREATE)) 4061 server->fattr_valid &= ~NFS_ATTR_FATTR_BTIME; 4062 memcpy(server->attr_bitmask_nl, res.attr_bitmask, 4063 sizeof(server->attr_bitmask)); 4064 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL; 4065 4066 if (res.open_caps.oa_share_access_want[0] & 4067 NFS4_SHARE_WANT_OPEN_XOR_DELEGATION) 4068 server->caps |= NFS_CAP_OPEN_XOR; 4069 if (nfs4_server_delegtime_capable(&res)) 4070 server->caps |= NFS_CAP_DELEGTIME; 4071 4072 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 4073 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 4074 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 4075 server->cache_consistency_bitmask[2] = 0; 4076 4077 /* Avoid a regression due to buggy server */ 4078 for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++) 4079 res.exclcreat_bitmask[i] &= res.attr_bitmask[i]; 4080 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask, 4081 sizeof(server->exclcreat_bitmask)); 4082 4083 server->acl_bitmask = res.acl_bitmask; 4084 server->fh_expire_type = res.fh_expire_type; 4085 } 4086 4087 return status; 4088 } 4089 4090 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 4091 { 4092 struct nfs4_exception exception = { 4093 .interruptible = true, 4094 }; 4095 int err; 4096 4097 do { 4098 err = nfs4_handle_exception(server, 4099 _nfs4_server_capabilities(server, fhandle), 4100 &exception); 4101 } while (exception.retry); 4102 return err; 4103 } 4104 4105 static void test_fs_location_for_trunking(struct nfs4_fs_location *location, 4106 struct nfs_client *clp, 4107 struct nfs_server *server) 4108 { 4109 int i; 4110 4111 for (i = 0; i < location->nservers; i++) { 4112 struct nfs4_string *srv_loc = &location->servers[i]; 4113 struct sockaddr_storage addr; 4114 size_t addrlen; 4115 struct xprt_create xprt_args = { 4116 .ident = 0, 4117 .net = clp->cl_net, 4118 }; 4119 struct nfs4_add_xprt_data xprtdata = { 4120 .clp = clp, 4121 }; 4122 struct rpc_add_xprt_test rpcdata = { 4123 .add_xprt_test = clp->cl_mvops->session_trunk, 4124 .data = &xprtdata, 4125 }; 4126 char *servername = NULL; 4127 4128 if (!srv_loc->len) 4129 continue; 4130 4131 addrlen = nfs_parse_server_name(srv_loc->data, srv_loc->len, 4132 &addr, sizeof(addr), 4133 clp->cl_net, server->port); 4134 if (!addrlen) 4135 return; 4136 xprt_args.dstaddr = (struct sockaddr *)&addr; 4137 xprt_args.addrlen = addrlen; 4138 servername = kmalloc(srv_loc->len + 1, GFP_KERNEL); 4139 if (!servername) 4140 return; 4141 memcpy(servername, srv_loc->data, srv_loc->len); 4142 servername[srv_loc->len] = '\0'; 4143 xprt_args.servername = servername; 4144 4145 xprtdata.cred = nfs4_get_clid_cred(clp); 4146 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args, 4147 rpc_clnt_setup_test_and_add_xprt, 4148 &rpcdata); 4149 if (xprtdata.cred) 4150 put_cred(xprtdata.cred); 4151 kfree(servername); 4152 } 4153 } 4154 4155 static bool _is_same_nfs4_pathname(struct nfs4_pathname *path1, 4156 struct nfs4_pathname *path2) 4157 { 4158 int i; 4159 4160 if (path1->ncomponents != path2->ncomponents) 4161 return false; 4162 for (i = 0; i < path1->ncomponents; i++) { 4163 if (path1->components[i].len != path2->components[i].len) 4164 return false; 4165 if (memcmp(path1->components[i].data, path2->components[i].data, 4166 path1->components[i].len)) 4167 return false; 4168 } 4169 return true; 4170 } 4171 4172 static int _nfs4_discover_trunking(struct nfs_server *server, 4173 struct nfs_fh *fhandle) 4174 { 4175 struct nfs4_fs_locations *locations = NULL; 4176 struct page *page; 4177 const struct cred *cred; 4178 struct nfs_client *clp = server->nfs_client; 4179 const struct nfs4_state_maintenance_ops *ops = 4180 clp->cl_mvops->state_renewal_ops; 4181 int status = -ENOMEM, i; 4182 4183 cred = ops->get_state_renewal_cred(clp); 4184 if (cred == NULL) { 4185 cred = nfs4_get_clid_cred(clp); 4186 if (cred == NULL) 4187 return -ENOKEY; 4188 } 4189 4190 page = alloc_page(GFP_KERNEL); 4191 if (!page) 4192 goto out_put_cred; 4193 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 4194 if (!locations) 4195 goto out_free; 4196 locations->fattr = nfs_alloc_fattr(); 4197 if (!locations->fattr) 4198 goto out_free_2; 4199 4200 status = nfs4_proc_get_locations(server, fhandle, locations, page, 4201 cred); 4202 if (status) 4203 goto out_free_3; 4204 4205 for (i = 0; i < locations->nlocations; i++) { 4206 if (!_is_same_nfs4_pathname(&locations->fs_path, 4207 &locations->locations[i].rootpath)) 4208 continue; 4209 test_fs_location_for_trunking(&locations->locations[i], clp, 4210 server); 4211 } 4212 out_free_3: 4213 kfree(locations->fattr); 4214 out_free_2: 4215 kfree(locations); 4216 out_free: 4217 __free_page(page); 4218 out_put_cred: 4219 put_cred(cred); 4220 return status; 4221 } 4222 4223 static int nfs4_discover_trunking(struct nfs_server *server, 4224 struct nfs_fh *fhandle) 4225 { 4226 struct nfs4_exception exception = { 4227 .interruptible = true, 4228 }; 4229 struct nfs_client *clp = server->nfs_client; 4230 int err = 0; 4231 4232 if (!nfs4_has_session(clp)) 4233 goto out; 4234 do { 4235 err = nfs4_handle_exception(server, 4236 _nfs4_discover_trunking(server, fhandle), 4237 &exception); 4238 } while (exception.retry); 4239 out: 4240 return err; 4241 } 4242 4243 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 4244 struct nfs_fattr *fattr) 4245 { 4246 u32 bitmask[3] = { 4247 [0] = FATTR4_WORD0_TYPE | FATTR4_WORD0_CHANGE | 4248 FATTR4_WORD0_SIZE | FATTR4_WORD0_FSID, 4249 }; 4250 struct nfs4_lookup_root_arg args = { 4251 .bitmask = bitmask, 4252 }; 4253 struct nfs4_lookup_res res = { 4254 .server = server, 4255 .fattr = fattr, 4256 .fh = fhandle, 4257 }; 4258 struct rpc_message msg = { 4259 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 4260 .rpc_argp = &args, 4261 .rpc_resp = &res, 4262 }; 4263 4264 nfs_fattr_init(fattr); 4265 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4266 } 4267 4268 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 4269 struct nfs_fattr *fattr) 4270 { 4271 struct nfs4_exception exception = { 4272 .interruptible = true, 4273 }; 4274 int err; 4275 do { 4276 err = _nfs4_lookup_root(server, fhandle, fattr); 4277 trace_nfs4_lookup_root(server, fhandle, fattr, err); 4278 switch (err) { 4279 case 0: 4280 case -NFS4ERR_WRONGSEC: 4281 goto out; 4282 default: 4283 err = nfs4_handle_exception(server, err, &exception); 4284 } 4285 } while (exception.retry); 4286 out: 4287 return err; 4288 } 4289 4290 static int nfs4_lookup_root_sec(struct nfs_server *server, 4291 struct nfs_fh *fhandle, struct nfs_fattr *fattr, 4292 rpc_authflavor_t flavor) 4293 { 4294 struct rpc_auth_create_args auth_args = { 4295 .pseudoflavor = flavor, 4296 }; 4297 struct rpc_auth *auth; 4298 4299 auth = rpcauth_create(&auth_args, server->client); 4300 if (IS_ERR(auth)) 4301 return -EACCES; 4302 return nfs4_lookup_root(server, fhandle, fattr); 4303 } 4304 4305 /* 4306 * Retry pseudoroot lookup with various security flavors. We do this when: 4307 * 4308 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC 4309 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation 4310 * 4311 * Returns zero on success, or a negative NFS4ERR value, or a 4312 * negative errno value. 4313 */ 4314 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 4315 struct nfs_fattr *fattr) 4316 { 4317 /* Per 3530bis 15.33.5 */ 4318 static const rpc_authflavor_t flav_array[] = { 4319 RPC_AUTH_GSS_KRB5P, 4320 RPC_AUTH_GSS_KRB5I, 4321 RPC_AUTH_GSS_KRB5, 4322 RPC_AUTH_UNIX, /* courtesy */ 4323 RPC_AUTH_NULL, 4324 }; 4325 int status = -EPERM; 4326 size_t i; 4327 4328 if (server->auth_info.flavor_len > 0) { 4329 /* try each flavor specified by user */ 4330 for (i = 0; i < server->auth_info.flavor_len; i++) { 4331 status = nfs4_lookup_root_sec( 4332 server, fhandle, fattr, 4333 server->auth_info.flavors[i]); 4334 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 4335 continue; 4336 break; 4337 } 4338 } else { 4339 /* no flavors specified by user, try default list */ 4340 for (i = 0; i < ARRAY_SIZE(flav_array); i++) { 4341 status = nfs4_lookup_root_sec(server, fhandle, fattr, 4342 flav_array[i]); 4343 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 4344 continue; 4345 break; 4346 } 4347 } 4348 4349 /* 4350 * -EACCES could mean that the user doesn't have correct permissions 4351 * to access the mount. It could also mean that we tried to mount 4352 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 4353 * existing mount programs don't handle -EACCES very well so it should 4354 * be mapped to -EPERM instead. 4355 */ 4356 if (status == -EACCES) 4357 status = -EPERM; 4358 return status; 4359 } 4360 4361 /** 4362 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot 4363 * @server: initialized nfs_server handle 4364 * @fhandle: we fill in the pseudo-fs root file handle 4365 * @fattr: we fill in a bare bones struct fattr 4366 * @auth_probe: probe the auth flavours 4367 * 4368 * Returns zero on success, or a negative errno. 4369 */ 4370 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 4371 struct nfs_fattr *fattr, bool auth_probe) 4372 { 4373 int status = 0; 4374 4375 if (!auth_probe) 4376 status = nfs4_lookup_root(server, fhandle, fattr); 4377 4378 if (auth_probe || status == NFS4ERR_WRONGSEC) 4379 status = server->nfs_client->cl_mvops->find_root_sec( 4380 server, fhandle, fattr); 4381 4382 return nfs4_map_errors(status); 4383 } 4384 4385 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 4386 struct nfs_fsinfo *info) 4387 { 4388 int error; 4389 struct nfs_fattr *fattr = info->fattr; 4390 4391 error = nfs4_server_capabilities(server, mntfh); 4392 if (error < 0) { 4393 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 4394 return error; 4395 } 4396 4397 error = nfs4_proc_getattr(server, mntfh, fattr, NULL); 4398 if (error < 0) { 4399 dprintk("nfs4_get_root: getattr error = %d\n", -error); 4400 goto out; 4401 } 4402 4403 if (fattr->valid & NFS_ATTR_FATTR_FSID && 4404 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 4405 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 4406 4407 out: 4408 return error; 4409 } 4410 4411 /* 4412 * Get locations and (maybe) other attributes of a referral. 4413 * Note that we'll actually follow the referral later when 4414 * we detect fsid mismatch in inode revalidation 4415 */ 4416 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 4417 const struct qstr *name, struct nfs_fattr *fattr, 4418 struct nfs_fh *fhandle) 4419 { 4420 int status = -ENOMEM; 4421 struct page *page = NULL; 4422 struct nfs4_fs_locations *locations = NULL; 4423 4424 page = alloc_page(GFP_KERNEL); 4425 if (page == NULL) 4426 goto out; 4427 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 4428 if (locations == NULL) 4429 goto out; 4430 4431 locations->fattr = fattr; 4432 4433 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 4434 if (status != 0) 4435 goto out; 4436 4437 /* 4438 * If the fsid didn't change, this is a migration event, not a 4439 * referral. Cause us to drop into the exception handler, which 4440 * will kick off migration recovery. 4441 */ 4442 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &fattr->fsid)) { 4443 dprintk("%s: server did not return a different fsid for" 4444 " a referral at %s\n", __func__, name->name); 4445 status = -NFS4ERR_MOVED; 4446 goto out; 4447 } 4448 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 4449 nfs_fixup_referral_attributes(fattr); 4450 memset(fhandle, 0, sizeof(struct nfs_fh)); 4451 out: 4452 if (page) 4453 __free_page(page); 4454 kfree(locations); 4455 return status; 4456 } 4457 4458 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 4459 struct nfs_fattr *fattr, struct inode *inode) 4460 { 4461 __u32 bitmask[NFS4_BITMASK_SZ]; 4462 struct nfs4_getattr_arg args = { 4463 .fh = fhandle, 4464 .bitmask = bitmask, 4465 }; 4466 struct nfs4_getattr_res res = { 4467 .fattr = fattr, 4468 .server = server, 4469 }; 4470 struct rpc_message msg = { 4471 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 4472 .rpc_argp = &args, 4473 .rpc_resp = &res, 4474 }; 4475 unsigned short task_flags = 0; 4476 4477 if (nfs4_has_session(server->nfs_client)) 4478 task_flags = RPC_TASK_MOVEABLE; 4479 4480 /* Is this is an attribute revalidation, subject to softreval? */ 4481 if (inode && (server->flags & NFS_MOUNT_SOFTREVAL)) 4482 task_flags |= RPC_TASK_TIMEOUT; 4483 4484 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), inode, 0); 4485 nfs_fattr_init(fattr); 4486 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 4487 return nfs4_do_call_sync(server->client, server, &msg, 4488 &args.seq_args, &res.seq_res, task_flags); 4489 } 4490 4491 int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 4492 struct nfs_fattr *fattr, struct inode *inode) 4493 { 4494 struct nfs4_exception exception = { 4495 .interruptible = true, 4496 }; 4497 int err; 4498 do { 4499 err = _nfs4_proc_getattr(server, fhandle, fattr, inode); 4500 trace_nfs4_getattr(server, fhandle, fattr, err); 4501 err = nfs4_handle_exception(server, err, 4502 &exception); 4503 } while (exception.retry); 4504 return err; 4505 } 4506 4507 /* 4508 * The file is not closed if it is opened due to the a request to change 4509 * the size of the file. The open call will not be needed once the 4510 * VFS layer lookup-intents are implemented. 4511 * 4512 * Close is called when the inode is destroyed. 4513 * If we haven't opened the file for O_WRONLY, we 4514 * need to in the size_change case to obtain a stateid. 4515 * 4516 * Got race? 4517 * Because OPEN is always done by name in nfsv4, it is 4518 * possible that we opened a different file by the same 4519 * name. We can recognize this race condition, but we 4520 * can't do anything about it besides returning an error. 4521 * 4522 * This will be fixed with VFS changes (lookup-intent). 4523 */ 4524 static int 4525 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 4526 struct iattr *sattr) 4527 { 4528 struct inode *inode = d_inode(dentry); 4529 const struct cred *cred = NULL; 4530 struct nfs_open_context *ctx = NULL; 4531 int status; 4532 4533 if (pnfs_ld_layoutret_on_setattr(inode) && 4534 sattr->ia_valid & ATTR_SIZE && 4535 sattr->ia_size < i_size_read(inode)) 4536 pnfs_commit_and_return_layout(inode); 4537 4538 nfs_fattr_init(fattr); 4539 4540 /* Deal with open(O_TRUNC) */ 4541 if (sattr->ia_valid & ATTR_OPEN) 4542 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME); 4543 4544 /* Optimization: if the end result is no change, don't RPC */ 4545 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0) 4546 return 0; 4547 4548 /* Search for an existing open(O_WRITE) file */ 4549 if (sattr->ia_valid & ATTR_FILE) { 4550 4551 ctx = nfs_file_open_context(sattr->ia_file); 4552 if (ctx) 4553 cred = ctx->cred; 4554 } 4555 4556 /* Return any delegations if we're going to change ACLs */ 4557 if ((sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) 4558 nfs4_inode_make_writeable(inode); 4559 4560 status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL); 4561 if (status == 0) { 4562 nfs_setattr_update_inode(inode, sattr, fattr); 4563 nfs_setsecurity(inode, fattr); 4564 } 4565 return status; 4566 } 4567 4568 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 4569 struct dentry *dentry, const struct qstr *name, 4570 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4571 { 4572 struct nfs_server *server = NFS_SERVER(dir); 4573 int status; 4574 struct nfs4_lookup_arg args = { 4575 .bitmask = server->attr_bitmask, 4576 .dir_fh = NFS_FH(dir), 4577 .name = name, 4578 }; 4579 struct nfs4_lookup_res res = { 4580 .server = server, 4581 .fattr = fattr, 4582 .fh = fhandle, 4583 }; 4584 struct rpc_message msg = { 4585 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 4586 .rpc_argp = &args, 4587 .rpc_resp = &res, 4588 }; 4589 unsigned short task_flags = 0; 4590 4591 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE)) 4592 task_flags = RPC_TASK_MOVEABLE; 4593 4594 /* Is this is an attribute revalidation, subject to softreval? */ 4595 if (nfs_lookup_is_soft_revalidate(dentry)) 4596 task_flags |= RPC_TASK_TIMEOUT; 4597 4598 args.bitmask = nfs4_bitmask(server, fattr->label); 4599 4600 nfs_fattr_init(fattr); 4601 4602 dprintk("NFS call lookup %pd2\n", dentry); 4603 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 4604 status = nfs4_do_call_sync(clnt, server, &msg, 4605 &args.seq_args, &res.seq_res, task_flags); 4606 dprintk("NFS reply lookup: %d\n", status); 4607 return status; 4608 } 4609 4610 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 4611 { 4612 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 4613 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 4614 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 4615 fattr->nlink = 2; 4616 } 4617 4618 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 4619 struct dentry *dentry, const struct qstr *name, 4620 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4621 { 4622 struct nfs4_exception exception = { 4623 .interruptible = true, 4624 }; 4625 struct rpc_clnt *client = *clnt; 4626 int err; 4627 do { 4628 err = _nfs4_proc_lookup(client, dir, dentry, name, fhandle, fattr); 4629 trace_nfs4_lookup(dir, name, err); 4630 switch (err) { 4631 case -NFS4ERR_BADNAME: 4632 err = -ENOENT; 4633 goto out; 4634 case -NFS4ERR_MOVED: 4635 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 4636 if (err == -NFS4ERR_MOVED) 4637 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 4638 goto out; 4639 case -NFS4ERR_WRONGSEC: 4640 err = -EPERM; 4641 if (client != *clnt) 4642 goto out; 4643 client = nfs4_negotiate_security(client, dir, name); 4644 if (IS_ERR(client)) 4645 return PTR_ERR(client); 4646 4647 exception.retry = 1; 4648 break; 4649 default: 4650 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 4651 } 4652 } while (exception.retry); 4653 4654 out: 4655 if (err == 0) 4656 *clnt = client; 4657 else if (client != *clnt) 4658 rpc_shutdown_client(client); 4659 4660 return err; 4661 } 4662 4663 static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry, const struct qstr *name, 4664 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4665 { 4666 int status; 4667 struct rpc_clnt *client = NFS_CLIENT(dir); 4668 4669 status = nfs4_proc_lookup_common(&client, dir, dentry, name, fhandle, fattr); 4670 if (client != NFS_CLIENT(dir)) { 4671 rpc_shutdown_client(client); 4672 nfs_fixup_secinfo_attributes(fattr); 4673 } 4674 return status; 4675 } 4676 4677 struct rpc_clnt * 4678 nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry, 4679 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4680 { 4681 struct rpc_clnt *client = NFS_CLIENT(dir); 4682 int status; 4683 4684 status = nfs4_proc_lookup_common(&client, dir, dentry, &dentry->d_name, 4685 fhandle, fattr); 4686 if (status < 0) 4687 return ERR_PTR(status); 4688 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client; 4689 } 4690 4691 static int _nfs4_proc_lookupp(struct inode *inode, 4692 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4693 { 4694 struct rpc_clnt *clnt = NFS_CLIENT(inode); 4695 struct nfs_server *server = NFS_SERVER(inode); 4696 int status; 4697 struct nfs4_lookupp_arg args = { 4698 .bitmask = server->attr_bitmask, 4699 .fh = NFS_FH(inode), 4700 }; 4701 struct nfs4_lookupp_res res = { 4702 .server = server, 4703 .fattr = fattr, 4704 .fh = fhandle, 4705 }; 4706 struct rpc_message msg = { 4707 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP], 4708 .rpc_argp = &args, 4709 .rpc_resp = &res, 4710 }; 4711 unsigned short task_flags = 0; 4712 4713 if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL) 4714 task_flags |= RPC_TASK_TIMEOUT; 4715 4716 args.bitmask = nfs4_bitmask(server, fattr->label); 4717 4718 nfs_fattr_init(fattr); 4719 4720 dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino); 4721 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, 4722 &res.seq_res, task_flags); 4723 dprintk("NFS reply lookupp: %d\n", status); 4724 return status; 4725 } 4726 4727 static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle, 4728 struct nfs_fattr *fattr) 4729 { 4730 struct nfs4_exception exception = { 4731 .interruptible = true, 4732 }; 4733 int err; 4734 do { 4735 err = _nfs4_proc_lookupp(inode, fhandle, fattr); 4736 trace_nfs4_lookupp(inode, err); 4737 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4738 &exception); 4739 } while (exception.retry); 4740 return err; 4741 } 4742 4743 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry, 4744 const struct cred *cred) 4745 { 4746 struct nfs_server *server = NFS_SERVER(inode); 4747 struct nfs4_accessargs args = { 4748 .fh = NFS_FH(inode), 4749 .access = entry->mask, 4750 }; 4751 struct nfs4_accessres res = { 4752 .server = server, 4753 }; 4754 struct rpc_message msg = { 4755 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 4756 .rpc_argp = &args, 4757 .rpc_resp = &res, 4758 .rpc_cred = cred, 4759 }; 4760 int status = 0; 4761 4762 if (!nfs4_have_delegation(inode, FMODE_READ, 0)) { 4763 res.fattr = nfs_alloc_fattr(); 4764 if (res.fattr == NULL) 4765 return -ENOMEM; 4766 args.bitmask = server->cache_consistency_bitmask; 4767 } 4768 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4769 if (!status) { 4770 nfs_access_set_mask(entry, res.access); 4771 if (res.fattr) 4772 nfs_refresh_inode(inode, res.fattr); 4773 } 4774 nfs_free_fattr(res.fattr); 4775 return status; 4776 } 4777 4778 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry, 4779 const struct cred *cred) 4780 { 4781 struct nfs4_exception exception = { 4782 .interruptible = true, 4783 }; 4784 int err; 4785 do { 4786 err = _nfs4_proc_access(inode, entry, cred); 4787 trace_nfs4_access(inode, err); 4788 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4789 &exception); 4790 } while (exception.retry); 4791 return err; 4792 } 4793 4794 /* 4795 * TODO: For the time being, we don't try to get any attributes 4796 * along with any of the zero-copy operations READ, READDIR, 4797 * READLINK, WRITE. 4798 * 4799 * In the case of the first three, we want to put the GETATTR 4800 * after the read-type operation -- this is because it is hard 4801 * to predict the length of a GETATTR response in v4, and thus 4802 * align the READ data correctly. This means that the GETATTR 4803 * may end up partially falling into the page cache, and we should 4804 * shift it into the 'tail' of the xdr_buf before processing. 4805 * To do this efficiently, we need to know the total length 4806 * of data received, which doesn't seem to be available outside 4807 * of the RPC layer. 4808 * 4809 * In the case of WRITE, we also want to put the GETATTR after 4810 * the operation -- in this case because we want to make sure 4811 * we get the post-operation mtime and size. 4812 * 4813 * Both of these changes to the XDR layer would in fact be quite 4814 * minor, but I decided to leave them for a subsequent patch. 4815 */ 4816 static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 4817 unsigned int pgbase, unsigned int pglen) 4818 { 4819 struct nfs4_readlink args = { 4820 .fh = NFS_FH(inode), 4821 .pgbase = pgbase, 4822 .pglen = pglen, 4823 .pages = &page, 4824 }; 4825 struct nfs4_readlink_res res; 4826 struct rpc_message msg = { 4827 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 4828 .rpc_argp = &args, 4829 .rpc_resp = &res, 4830 }; 4831 4832 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 4833 } 4834 4835 static int nfs4_proc_readlink(struct inode *inode, struct page *page, 4836 unsigned int pgbase, unsigned int pglen) 4837 { 4838 struct nfs4_exception exception = { 4839 .interruptible = true, 4840 }; 4841 int err; 4842 do { 4843 err = _nfs4_proc_readlink(inode, page, pgbase, pglen); 4844 trace_nfs4_readlink(inode, err); 4845 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4846 &exception); 4847 } while (exception.retry); 4848 return err; 4849 } 4850 4851 /* 4852 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 4853 */ 4854 static int 4855 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 4856 int flags) 4857 { 4858 struct nfs_server *server = NFS_SERVER(dir); 4859 struct nfs4_label l, *ilabel; 4860 struct nfs_open_context *ctx; 4861 struct nfs4_state *state; 4862 int status = 0; 4863 4864 ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL); 4865 if (IS_ERR(ctx)) 4866 return PTR_ERR(ctx); 4867 4868 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); 4869 4870 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 4871 sattr->ia_mode &= ~current_umask(); 4872 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL); 4873 if (IS_ERR(state)) { 4874 status = PTR_ERR(state); 4875 goto out; 4876 } 4877 out: 4878 nfs4_label_release_security(ilabel); 4879 put_nfs_open_context(ctx); 4880 return status; 4881 } 4882 4883 static int 4884 _nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype) 4885 { 4886 struct nfs_server *server = NFS_SERVER(dir); 4887 struct nfs_removeargs args = { 4888 .fh = NFS_FH(dir), 4889 .name = *name, 4890 }; 4891 struct nfs_removeres res = { 4892 .server = server, 4893 }; 4894 struct rpc_message msg = { 4895 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 4896 .rpc_argp = &args, 4897 .rpc_resp = &res, 4898 }; 4899 unsigned long timestamp = jiffies; 4900 int status; 4901 4902 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 4903 if (status == 0) { 4904 spin_lock(&dir->i_lock); 4905 /* Removing a directory decrements nlink in the parent */ 4906 if (ftype == NF4DIR && dir->i_nlink > 2) 4907 nfs4_dec_nlink_locked(dir); 4908 nfs4_update_changeattr_locked(dir, &res.cinfo, timestamp, 4909 NFS_INO_INVALID_DATA); 4910 spin_unlock(&dir->i_lock); 4911 } 4912 return status; 4913 } 4914 4915 static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry) 4916 { 4917 struct nfs4_exception exception = { 4918 .interruptible = true, 4919 }; 4920 struct inode *inode = d_inode(dentry); 4921 int err; 4922 4923 if (inode) { 4924 if (inode->i_nlink == 1) 4925 nfs4_inode_return_delegation(inode); 4926 else 4927 nfs4_inode_make_writeable(inode); 4928 } 4929 do { 4930 err = _nfs4_proc_remove(dir, &dentry->d_name, NF4REG); 4931 trace_nfs4_remove(dir, &dentry->d_name, err); 4932 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4933 &exception); 4934 } while (exception.retry); 4935 return err; 4936 } 4937 4938 static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name) 4939 { 4940 struct nfs4_exception exception = { 4941 .interruptible = true, 4942 }; 4943 int err; 4944 4945 do { 4946 err = _nfs4_proc_remove(dir, name, NF4DIR); 4947 trace_nfs4_remove(dir, name, err); 4948 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4949 &exception); 4950 } while (exception.retry); 4951 return err; 4952 } 4953 4954 static void nfs4_proc_unlink_setup(struct rpc_message *msg, 4955 struct dentry *dentry, 4956 struct inode *inode) 4957 { 4958 struct nfs_removeargs *args = msg->rpc_argp; 4959 struct nfs_removeres *res = msg->rpc_resp; 4960 4961 res->server = NFS_SB(dentry->d_sb); 4962 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 4963 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1, 0); 4964 4965 nfs_fattr_init(res->dir_attr); 4966 4967 if (inode) { 4968 nfs4_inode_return_delegation(inode); 4969 nfs_d_prune_case_insensitive_aliases(inode); 4970 } 4971 } 4972 4973 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 4974 { 4975 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client, 4976 &data->args.seq_args, 4977 &data->res.seq_res, 4978 task); 4979 } 4980 4981 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 4982 { 4983 struct nfs_unlinkdata *data = task->tk_calldata; 4984 struct nfs_removeres *res = &data->res; 4985 4986 if (!nfs4_sequence_done(task, &res->seq_res)) 4987 return 0; 4988 if (nfs4_async_handle_error(task, res->server, NULL, 4989 &data->timeout) == -EAGAIN) 4990 return 0; 4991 if (task->tk_status == 0) 4992 nfs4_update_changeattr(dir, &res->cinfo, 4993 res->dir_attr->time_start, 4994 NFS_INO_INVALID_DATA); 4995 return 1; 4996 } 4997 4998 static void nfs4_proc_rename_setup(struct rpc_message *msg, 4999 struct dentry *old_dentry, 5000 struct dentry *new_dentry) 5001 { 5002 struct nfs_renameargs *arg = msg->rpc_argp; 5003 struct nfs_renameres *res = msg->rpc_resp; 5004 struct inode *old_inode = d_inode(old_dentry); 5005 struct inode *new_inode = d_inode(new_dentry); 5006 5007 if (old_inode) 5008 nfs4_inode_make_writeable(old_inode); 5009 if (new_inode) 5010 nfs4_inode_return_delegation(new_inode); 5011 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 5012 res->server = NFS_SB(old_dentry->d_sb); 5013 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1, 0); 5014 } 5015 5016 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 5017 { 5018 nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client, 5019 &data->args.seq_args, 5020 &data->res.seq_res, 5021 task); 5022 } 5023 5024 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 5025 struct inode *new_dir) 5026 { 5027 struct nfs_renamedata *data = task->tk_calldata; 5028 struct nfs_renameres *res = &data->res; 5029 5030 if (!nfs4_sequence_done(task, &res->seq_res)) 5031 return 0; 5032 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) 5033 return 0; 5034 5035 if (task->tk_status == 0) { 5036 nfs_d_prune_case_insensitive_aliases(d_inode(data->old_dentry)); 5037 if (new_dir != old_dir) { 5038 /* Note: If we moved a directory, nlink will change */ 5039 nfs4_update_changeattr(old_dir, &res->old_cinfo, 5040 res->old_fattr->time_start, 5041 NFS_INO_INVALID_NLINK | 5042 NFS_INO_INVALID_DATA); 5043 nfs4_update_changeattr(new_dir, &res->new_cinfo, 5044 res->new_fattr->time_start, 5045 NFS_INO_INVALID_NLINK | 5046 NFS_INO_INVALID_DATA); 5047 } else 5048 nfs4_update_changeattr(old_dir, &res->old_cinfo, 5049 res->old_fattr->time_start, 5050 NFS_INO_INVALID_DATA); 5051 } 5052 return 1; 5053 } 5054 5055 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name) 5056 { 5057 struct nfs_server *server = NFS_SERVER(inode); 5058 __u32 bitmask[NFS4_BITMASK_SZ]; 5059 struct nfs4_link_arg arg = { 5060 .fh = NFS_FH(inode), 5061 .dir_fh = NFS_FH(dir), 5062 .name = name, 5063 .bitmask = bitmask, 5064 }; 5065 struct nfs4_link_res res = { 5066 .server = server, 5067 }; 5068 struct rpc_message msg = { 5069 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 5070 .rpc_argp = &arg, 5071 .rpc_resp = &res, 5072 }; 5073 int status = -ENOMEM; 5074 5075 res.fattr = nfs_alloc_fattr_with_label(server); 5076 if (res.fattr == NULL) 5077 goto out; 5078 5079 nfs4_inode_make_writeable(inode); 5080 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, res.fattr->label), 5081 inode, 5082 NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME); 5083 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 5084 if (!status) { 5085 nfs4_update_changeattr(dir, &res.cinfo, res.fattr->time_start, 5086 NFS_INO_INVALID_DATA); 5087 nfs4_inc_nlink(inode); 5088 status = nfs_post_op_update_inode(inode, res.fattr); 5089 if (!status) 5090 nfs_setsecurity(inode, res.fattr); 5091 } 5092 5093 out: 5094 nfs_free_fattr(res.fattr); 5095 return status; 5096 } 5097 5098 static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name) 5099 { 5100 struct nfs4_exception exception = { 5101 .interruptible = true, 5102 }; 5103 int err; 5104 do { 5105 err = nfs4_handle_exception(NFS_SERVER(inode), 5106 _nfs4_proc_link(inode, dir, name), 5107 &exception); 5108 } while (exception.retry); 5109 return err; 5110 } 5111 5112 struct nfs4_createdata { 5113 struct rpc_message msg; 5114 struct nfs4_create_arg arg; 5115 struct nfs4_create_res res; 5116 struct nfs_fh fh; 5117 struct nfs_fattr fattr; 5118 }; 5119 5120 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 5121 const struct qstr *name, struct iattr *sattr, u32 ftype) 5122 { 5123 struct nfs4_createdata *data; 5124 5125 data = kzalloc(sizeof(*data), GFP_KERNEL); 5126 if (data != NULL) { 5127 struct nfs_server *server = NFS_SERVER(dir); 5128 5129 data->fattr.label = nfs4_label_alloc(server, GFP_KERNEL); 5130 if (IS_ERR(data->fattr.label)) 5131 goto out_free; 5132 5133 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 5134 data->msg.rpc_argp = &data->arg; 5135 data->msg.rpc_resp = &data->res; 5136 data->arg.dir_fh = NFS_FH(dir); 5137 data->arg.server = server; 5138 data->arg.name = name; 5139 data->arg.attrs = sattr; 5140 data->arg.ftype = ftype; 5141 data->arg.bitmask = nfs4_bitmask(server, data->fattr.label); 5142 data->arg.umask = current_umask(); 5143 data->res.server = server; 5144 data->res.fh = &data->fh; 5145 data->res.fattr = &data->fattr; 5146 nfs_fattr_init(data->res.fattr); 5147 } 5148 return data; 5149 out_free: 5150 kfree(data); 5151 return NULL; 5152 } 5153 5154 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 5155 { 5156 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 5157 &data->arg.seq_args, &data->res.seq_res, 1); 5158 if (status == 0) { 5159 spin_lock(&dir->i_lock); 5160 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo, 5161 data->res.fattr->time_start, 5162 NFS_INO_INVALID_DATA); 5163 spin_unlock(&dir->i_lock); 5164 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); 5165 } 5166 return status; 5167 } 5168 5169 static struct dentry *nfs4_do_mkdir(struct inode *dir, struct dentry *dentry, 5170 struct nfs4_createdata *data, int *statusp) 5171 { 5172 struct dentry *ret; 5173 5174 *statusp = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 5175 &data->arg.seq_args, &data->res.seq_res, 1); 5176 5177 if (*statusp) 5178 return NULL; 5179 5180 spin_lock(&dir->i_lock); 5181 /* Creating a directory bumps nlink in the parent */ 5182 nfs4_inc_nlink_locked(dir); 5183 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo, 5184 data->res.fattr->time_start, 5185 NFS_INO_INVALID_DATA); 5186 spin_unlock(&dir->i_lock); 5187 ret = nfs_add_or_obtain(dentry, data->res.fh, data->res.fattr); 5188 if (!IS_ERR(ret)) 5189 return ret; 5190 *statusp = PTR_ERR(ret); 5191 return NULL; 5192 } 5193 5194 static void nfs4_free_createdata(struct nfs4_createdata *data) 5195 { 5196 nfs4_label_free(data->fattr.label); 5197 kfree(data); 5198 } 5199 5200 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 5201 struct folio *folio, unsigned int len, struct iattr *sattr, 5202 struct nfs4_label *label) 5203 { 5204 struct page *page = &folio->page; 5205 struct nfs4_createdata *data; 5206 int status = -ENAMETOOLONG; 5207 5208 if (len > NFS4_MAXPATHLEN) 5209 goto out; 5210 5211 status = -ENOMEM; 5212 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 5213 if (data == NULL) 5214 goto out; 5215 5216 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 5217 data->arg.u.symlink.pages = &page; 5218 data->arg.u.symlink.len = len; 5219 data->arg.label = label; 5220 5221 status = nfs4_do_create(dir, dentry, data); 5222 5223 nfs4_free_createdata(data); 5224 out: 5225 return status; 5226 } 5227 5228 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 5229 struct folio *folio, unsigned int len, struct iattr *sattr) 5230 { 5231 struct nfs4_exception exception = { 5232 .interruptible = true, 5233 }; 5234 struct nfs4_label l, *label; 5235 int err; 5236 5237 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5238 5239 do { 5240 err = _nfs4_proc_symlink(dir, dentry, folio, len, sattr, label); 5241 trace_nfs4_symlink(dir, &dentry->d_name, err); 5242 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5243 &exception); 5244 } while (exception.retry); 5245 5246 nfs4_label_release_security(label); 5247 return err; 5248 } 5249 5250 static struct dentry *_nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 5251 struct iattr *sattr, 5252 struct nfs4_label *label, int *statusp) 5253 { 5254 struct nfs4_createdata *data; 5255 struct dentry *ret = NULL; 5256 5257 *statusp = -ENOMEM; 5258 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 5259 if (data == NULL) 5260 goto out; 5261 5262 data->arg.label = label; 5263 ret = nfs4_do_mkdir(dir, dentry, data, statusp); 5264 5265 nfs4_free_createdata(data); 5266 out: 5267 return ret; 5268 } 5269 5270 static struct dentry *nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 5271 struct iattr *sattr) 5272 { 5273 struct nfs_server *server = NFS_SERVER(dir); 5274 struct nfs4_exception exception = { 5275 .interruptible = true, 5276 }; 5277 struct nfs4_label l, *label; 5278 struct dentry *alias; 5279 int err; 5280 5281 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5282 5283 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 5284 sattr->ia_mode &= ~current_umask(); 5285 do { 5286 alias = _nfs4_proc_mkdir(dir, dentry, sattr, label, &err); 5287 trace_nfs4_mkdir(dir, &dentry->d_name, err); 5288 if (err) 5289 alias = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir), 5290 err, 5291 &exception)); 5292 } while (exception.retry); 5293 nfs4_label_release_security(label); 5294 5295 return alias; 5296 } 5297 5298 static int _nfs4_proc_readdir(struct nfs_readdir_arg *nr_arg, 5299 struct nfs_readdir_res *nr_res) 5300 { 5301 struct inode *dir = d_inode(nr_arg->dentry); 5302 struct nfs_server *server = NFS_SERVER(dir); 5303 struct nfs4_readdir_arg args = { 5304 .fh = NFS_FH(dir), 5305 .pages = nr_arg->pages, 5306 .pgbase = 0, 5307 .count = nr_arg->page_len, 5308 .plus = nr_arg->plus, 5309 }; 5310 struct nfs4_readdir_res res; 5311 struct rpc_message msg = { 5312 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 5313 .rpc_argp = &args, 5314 .rpc_resp = &res, 5315 .rpc_cred = nr_arg->cred, 5316 }; 5317 int status; 5318 5319 dprintk("%s: dentry = %pd2, cookie = %llu\n", __func__, 5320 nr_arg->dentry, (unsigned long long)nr_arg->cookie); 5321 if (!(server->caps & NFS_CAP_SECURITY_LABEL)) 5322 args.bitmask = server->attr_bitmask_nl; 5323 else 5324 args.bitmask = server->attr_bitmask; 5325 5326 nfs4_setup_readdir(nr_arg->cookie, nr_arg->verf, nr_arg->dentry, &args); 5327 res.pgbase = args.pgbase; 5328 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 5329 &res.seq_res, 0); 5330 if (status >= 0) { 5331 memcpy(nr_res->verf, res.verifier.data, NFS4_VERIFIER_SIZE); 5332 status += args.pgbase; 5333 } 5334 5335 nfs_invalidate_atime(dir); 5336 5337 dprintk("%s: returns %d\n", __func__, status); 5338 return status; 5339 } 5340 5341 static int nfs4_proc_readdir(struct nfs_readdir_arg *arg, 5342 struct nfs_readdir_res *res) 5343 { 5344 struct nfs4_exception exception = { 5345 .interruptible = true, 5346 }; 5347 int err; 5348 do { 5349 err = _nfs4_proc_readdir(arg, res); 5350 trace_nfs4_readdir(d_inode(arg->dentry), err); 5351 err = nfs4_handle_exception(NFS_SERVER(d_inode(arg->dentry)), 5352 err, &exception); 5353 } while (exception.retry); 5354 return err; 5355 } 5356 5357 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 5358 struct iattr *sattr, struct nfs4_label *label, dev_t rdev) 5359 { 5360 struct nfs4_createdata *data; 5361 int mode = sattr->ia_mode; 5362 int status = -ENOMEM; 5363 5364 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 5365 if (data == NULL) 5366 goto out; 5367 5368 if (S_ISFIFO(mode)) 5369 data->arg.ftype = NF4FIFO; 5370 else if (S_ISBLK(mode)) { 5371 data->arg.ftype = NF4BLK; 5372 data->arg.u.device.specdata1 = MAJOR(rdev); 5373 data->arg.u.device.specdata2 = MINOR(rdev); 5374 } 5375 else if (S_ISCHR(mode)) { 5376 data->arg.ftype = NF4CHR; 5377 data->arg.u.device.specdata1 = MAJOR(rdev); 5378 data->arg.u.device.specdata2 = MINOR(rdev); 5379 } else if (!S_ISSOCK(mode)) { 5380 status = -EINVAL; 5381 goto out_free; 5382 } 5383 5384 data->arg.label = label; 5385 status = nfs4_do_create(dir, dentry, data); 5386 out_free: 5387 nfs4_free_createdata(data); 5388 out: 5389 return status; 5390 } 5391 5392 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 5393 struct iattr *sattr, dev_t rdev) 5394 { 5395 struct nfs_server *server = NFS_SERVER(dir); 5396 struct nfs4_exception exception = { 5397 .interruptible = true, 5398 }; 5399 struct nfs4_label l, *label; 5400 int err; 5401 5402 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5403 5404 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 5405 sattr->ia_mode &= ~current_umask(); 5406 do { 5407 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev); 5408 trace_nfs4_mknod(dir, &dentry->d_name, err); 5409 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5410 &exception); 5411 } while (exception.retry); 5412 5413 nfs4_label_release_security(label); 5414 5415 return err; 5416 } 5417 5418 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 5419 struct nfs_fsstat *fsstat) 5420 { 5421 struct nfs4_statfs_arg args = { 5422 .fh = fhandle, 5423 .bitmask = server->attr_bitmask, 5424 }; 5425 struct nfs4_statfs_res res = { 5426 .fsstat = fsstat, 5427 }; 5428 struct rpc_message msg = { 5429 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 5430 .rpc_argp = &args, 5431 .rpc_resp = &res, 5432 }; 5433 5434 nfs_fattr_init(fsstat->fattr); 5435 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5436 } 5437 5438 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 5439 { 5440 struct nfs4_exception exception = { 5441 .interruptible = true, 5442 }; 5443 int err; 5444 do { 5445 err = nfs4_handle_exception(server, 5446 _nfs4_proc_statfs(server, fhandle, fsstat), 5447 &exception); 5448 } while (exception.retry); 5449 return err; 5450 } 5451 5452 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 5453 struct nfs_fsinfo *fsinfo) 5454 { 5455 struct nfs4_fsinfo_arg args = { 5456 .fh = fhandle, 5457 .bitmask = server->attr_bitmask, 5458 }; 5459 struct nfs4_fsinfo_res res = { 5460 .fsinfo = fsinfo, 5461 }; 5462 struct rpc_message msg = { 5463 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 5464 .rpc_argp = &args, 5465 .rpc_resp = &res, 5466 }; 5467 5468 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5469 } 5470 5471 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 5472 { 5473 struct nfs4_exception exception = { 5474 .interruptible = true, 5475 }; 5476 int err; 5477 5478 do { 5479 err = _nfs4_do_fsinfo(server, fhandle, fsinfo); 5480 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); 5481 if (err == 0) { 5482 nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ); 5483 break; 5484 } 5485 err = nfs4_handle_exception(server, err, &exception); 5486 } while (exception.retry); 5487 return err; 5488 } 5489 5490 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 5491 { 5492 int error; 5493 5494 nfs_fattr_init(fsinfo->fattr); 5495 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 5496 if (error == 0) { 5497 /* block layout checks this! */ 5498 server->pnfs_blksize = fsinfo->blksize; 5499 set_pnfs_layoutdriver(server, fhandle, fsinfo); 5500 } 5501 5502 return error; 5503 } 5504 5505 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 5506 struct nfs_pathconf *pathconf) 5507 { 5508 struct nfs4_pathconf_arg args = { 5509 .fh = fhandle, 5510 .bitmask = server->attr_bitmask, 5511 }; 5512 struct nfs4_pathconf_res res = { 5513 .pathconf = pathconf, 5514 }; 5515 struct rpc_message msg = { 5516 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 5517 .rpc_argp = &args, 5518 .rpc_resp = &res, 5519 }; 5520 5521 /* None of the pathconf attributes are mandatory to implement */ 5522 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 5523 memset(pathconf, 0, sizeof(*pathconf)); 5524 return 0; 5525 } 5526 5527 nfs_fattr_init(pathconf->fattr); 5528 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5529 } 5530 5531 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 5532 struct nfs_pathconf *pathconf) 5533 { 5534 struct nfs4_exception exception = { 5535 .interruptible = true, 5536 }; 5537 int err; 5538 5539 do { 5540 err = nfs4_handle_exception(server, 5541 _nfs4_proc_pathconf(server, fhandle, pathconf), 5542 &exception); 5543 } while (exception.retry); 5544 return err; 5545 } 5546 5547 int nfs4_set_rw_stateid(nfs4_stateid *stateid, 5548 const struct nfs_open_context *ctx, 5549 const struct nfs_lock_context *l_ctx, 5550 fmode_t fmode) 5551 { 5552 return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL); 5553 } 5554 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid); 5555 5556 static bool nfs4_stateid_is_current(nfs4_stateid *stateid, 5557 const struct nfs_open_context *ctx, 5558 const struct nfs_lock_context *l_ctx, 5559 fmode_t fmode) 5560 { 5561 nfs4_stateid _current_stateid; 5562 5563 /* If the current stateid represents a lost lock, then exit */ 5564 if (nfs4_set_rw_stateid(&_current_stateid, ctx, l_ctx, fmode) == -EIO) 5565 return true; 5566 return nfs4_stateid_match(stateid, &_current_stateid); 5567 } 5568 5569 static bool nfs4_error_stateid_expired(int err) 5570 { 5571 switch (err) { 5572 case -NFS4ERR_DELEG_REVOKED: 5573 case -NFS4ERR_ADMIN_REVOKED: 5574 case -NFS4ERR_BAD_STATEID: 5575 case -NFS4ERR_STALE_STATEID: 5576 case -NFS4ERR_OLD_STATEID: 5577 case -NFS4ERR_OPENMODE: 5578 case -NFS4ERR_EXPIRED: 5579 return true; 5580 } 5581 return false; 5582 } 5583 5584 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) 5585 { 5586 struct nfs_server *server = NFS_SERVER(hdr->inode); 5587 5588 trace_nfs4_read(hdr, task->tk_status); 5589 if (task->tk_status < 0) { 5590 struct nfs4_exception exception = { 5591 .inode = hdr->inode, 5592 .state = hdr->args.context->state, 5593 .stateid = &hdr->args.stateid, 5594 }; 5595 task->tk_status = nfs4_async_handle_exception(task, 5596 server, task->tk_status, &exception); 5597 if (exception.retry) { 5598 rpc_restart_call_prepare(task); 5599 return -EAGAIN; 5600 } 5601 } 5602 5603 if (task->tk_status > 0) 5604 renew_lease(server, hdr->timestamp); 5605 return 0; 5606 } 5607 5608 static bool nfs4_read_stateid_changed(struct rpc_task *task, 5609 struct nfs_pgio_args *args) 5610 { 5611 5612 if (!nfs4_error_stateid_expired(task->tk_status) || 5613 nfs4_stateid_is_current(&args->stateid, 5614 args->context, 5615 args->lock_context, 5616 FMODE_READ)) 5617 return false; 5618 rpc_restart_call_prepare(task); 5619 return true; 5620 } 5621 5622 static bool nfs4_read_plus_not_supported(struct rpc_task *task, 5623 struct nfs_pgio_header *hdr) 5624 { 5625 struct nfs_server *server = NFS_SERVER(hdr->inode); 5626 struct rpc_message *msg = &task->tk_msg; 5627 5628 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] && 5629 task->tk_status == -ENOTSUPP) { 5630 server->caps &= ~NFS_CAP_READ_PLUS; 5631 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 5632 rpc_restart_call_prepare(task); 5633 return true; 5634 } 5635 return false; 5636 } 5637 5638 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 5639 { 5640 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 5641 return -EAGAIN; 5642 if (nfs4_read_stateid_changed(task, &hdr->args)) 5643 return -EAGAIN; 5644 if (nfs4_read_plus_not_supported(task, hdr)) 5645 return -EAGAIN; 5646 if (task->tk_status > 0) 5647 nfs_invalidate_atime(hdr->inode); 5648 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 5649 nfs4_read_done_cb(task, hdr); 5650 } 5651 5652 #if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS 5653 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr, 5654 struct rpc_message *msg) 5655 { 5656 /* Note: We don't use READ_PLUS with pNFS yet */ 5657 if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp) { 5658 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS]; 5659 return nfs_read_alloc_scratch(hdr, READ_PLUS_SCRATCH_SIZE); 5660 } 5661 return false; 5662 } 5663 #else 5664 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr, 5665 struct rpc_message *msg) 5666 { 5667 return false; 5668 } 5669 #endif /* CONFIG_NFS_V4_2 */ 5670 5671 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr, 5672 struct rpc_message *msg) 5673 { 5674 hdr->timestamp = jiffies; 5675 if (!hdr->pgio_done_cb) 5676 hdr->pgio_done_cb = nfs4_read_done_cb; 5677 if (!nfs42_read_plus_support(hdr, msg)) 5678 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 5679 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0); 5680 } 5681 5682 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, 5683 struct nfs_pgio_header *hdr) 5684 { 5685 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client, 5686 &hdr->args.seq_args, 5687 &hdr->res.seq_res, 5688 task)) 5689 return 0; 5690 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, 5691 hdr->args.lock_context, 5692 hdr->rw_mode) == -EIO) 5693 return -EIO; 5694 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) 5695 return -EIO; 5696 return 0; 5697 } 5698 5699 static int nfs4_write_done_cb(struct rpc_task *task, 5700 struct nfs_pgio_header *hdr) 5701 { 5702 struct inode *inode = hdr->inode; 5703 5704 trace_nfs4_write(hdr, task->tk_status); 5705 if (task->tk_status < 0) { 5706 struct nfs4_exception exception = { 5707 .inode = hdr->inode, 5708 .state = hdr->args.context->state, 5709 .stateid = &hdr->args.stateid, 5710 }; 5711 task->tk_status = nfs4_async_handle_exception(task, 5712 NFS_SERVER(inode), task->tk_status, 5713 &exception); 5714 if (exception.retry) { 5715 rpc_restart_call_prepare(task); 5716 return -EAGAIN; 5717 } 5718 } 5719 if (task->tk_status >= 0) { 5720 renew_lease(NFS_SERVER(inode), hdr->timestamp); 5721 nfs_writeback_update_inode(hdr); 5722 } 5723 return 0; 5724 } 5725 5726 static bool nfs4_write_stateid_changed(struct rpc_task *task, 5727 struct nfs_pgio_args *args) 5728 { 5729 5730 if (!nfs4_error_stateid_expired(task->tk_status) || 5731 nfs4_stateid_is_current(&args->stateid, 5732 args->context, 5733 args->lock_context, 5734 FMODE_WRITE)) 5735 return false; 5736 rpc_restart_call_prepare(task); 5737 return true; 5738 } 5739 5740 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 5741 { 5742 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 5743 return -EAGAIN; 5744 if (nfs4_write_stateid_changed(task, &hdr->args)) 5745 return -EAGAIN; 5746 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 5747 nfs4_write_done_cb(task, hdr); 5748 } 5749 5750 static 5751 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr) 5752 { 5753 /* Don't request attributes for pNFS or O_DIRECT writes */ 5754 if (hdr->ds_clp != NULL || hdr->dreq != NULL) 5755 return false; 5756 /* Otherwise, request attributes if and only if we don't hold 5757 * a delegation 5758 */ 5759 return nfs4_have_delegation(hdr->inode, FMODE_READ, 0) == 0; 5760 } 5761 5762 void nfs4_bitmask_set(__u32 bitmask[], const __u32 src[], 5763 struct inode *inode, unsigned long cache_validity) 5764 { 5765 struct nfs_server *server = NFS_SERVER(inode); 5766 unsigned int i; 5767 5768 memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ); 5769 cache_validity |= READ_ONCE(NFS_I(inode)->cache_validity); 5770 5771 if (cache_validity & NFS_INO_INVALID_CHANGE) 5772 bitmask[0] |= FATTR4_WORD0_CHANGE; 5773 if (cache_validity & NFS_INO_INVALID_ATIME) 5774 bitmask[1] |= FATTR4_WORD1_TIME_ACCESS; 5775 if (cache_validity & NFS_INO_INVALID_MODE) 5776 bitmask[1] |= FATTR4_WORD1_MODE; 5777 if (cache_validity & NFS_INO_INVALID_OTHER) 5778 bitmask[1] |= FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP; 5779 if (cache_validity & NFS_INO_INVALID_NLINK) 5780 bitmask[1] |= FATTR4_WORD1_NUMLINKS; 5781 if (cache_validity & NFS_INO_INVALID_CTIME) 5782 bitmask[1] |= FATTR4_WORD1_TIME_METADATA; 5783 if (cache_validity & NFS_INO_INVALID_MTIME) 5784 bitmask[1] |= FATTR4_WORD1_TIME_MODIFY; 5785 if (cache_validity & NFS_INO_INVALID_BLOCKS) 5786 bitmask[1] |= FATTR4_WORD1_SPACE_USED; 5787 if (cache_validity & NFS_INO_INVALID_BTIME) 5788 bitmask[1] |= FATTR4_WORD1_TIME_CREATE; 5789 5790 if (cache_validity & NFS_INO_INVALID_SIZE) 5791 bitmask[0] |= FATTR4_WORD0_SIZE; 5792 5793 for (i = 0; i < NFS4_BITMASK_SZ; i++) 5794 bitmask[i] &= server->attr_bitmask[i]; 5795 } 5796 5797 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, 5798 struct rpc_message *msg, 5799 struct rpc_clnt **clnt) 5800 { 5801 struct nfs_server *server = NFS_SERVER(hdr->inode); 5802 5803 if (!nfs4_write_need_cache_consistency_data(hdr)) { 5804 hdr->args.bitmask = NULL; 5805 hdr->res.fattr = NULL; 5806 } else { 5807 nfs4_bitmask_set(hdr->args.bitmask_store, 5808 server->cache_consistency_bitmask, 5809 hdr->inode, NFS_INO_INVALID_BLOCKS); 5810 hdr->args.bitmask = hdr->args.bitmask_store; 5811 } 5812 5813 if (!hdr->pgio_done_cb) 5814 hdr->pgio_done_cb = nfs4_write_done_cb; 5815 hdr->res.server = server; 5816 hdr->timestamp = jiffies; 5817 5818 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 5819 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0); 5820 nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr); 5821 } 5822 5823 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 5824 { 5825 nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client, 5826 &data->args.seq_args, 5827 &data->res.seq_res, 5828 task); 5829 } 5830 5831 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 5832 { 5833 struct inode *inode = data->inode; 5834 5835 trace_nfs4_commit(data, task->tk_status); 5836 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 5837 NULL, NULL) == -EAGAIN) { 5838 rpc_restart_call_prepare(task); 5839 return -EAGAIN; 5840 } 5841 return 0; 5842 } 5843 5844 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 5845 { 5846 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5847 return -EAGAIN; 5848 return data->commit_done_cb(task, data); 5849 } 5850 5851 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg, 5852 struct rpc_clnt **clnt) 5853 { 5854 struct nfs_server *server = NFS_SERVER(data->inode); 5855 5856 if (data->commit_done_cb == NULL) 5857 data->commit_done_cb = nfs4_commit_done_cb; 5858 data->res.server = server; 5859 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 5860 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0); 5861 nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client, 5862 NFS_SP4_MACH_CRED_COMMIT, clnt, msg); 5863 } 5864 5865 static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args, 5866 struct nfs_commitres *res) 5867 { 5868 struct inode *dst_inode = file_inode(dst); 5869 struct nfs_server *server = NFS_SERVER(dst_inode); 5870 struct rpc_message msg = { 5871 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT], 5872 .rpc_argp = args, 5873 .rpc_resp = res, 5874 }; 5875 5876 args->fh = NFS_FH(dst_inode); 5877 return nfs4_call_sync(server->client, server, &msg, 5878 &args->seq_args, &res->seq_res, 1); 5879 } 5880 5881 int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res) 5882 { 5883 struct nfs_commitargs args = { 5884 .offset = offset, 5885 .count = count, 5886 }; 5887 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); 5888 struct nfs4_exception exception = { }; 5889 int status; 5890 5891 do { 5892 status = _nfs4_proc_commit(dst, &args, res); 5893 status = nfs4_handle_exception(dst_server, status, &exception); 5894 } while (exception.retry); 5895 5896 return status; 5897 } 5898 5899 struct nfs4_renewdata { 5900 struct nfs_client *client; 5901 unsigned long timestamp; 5902 }; 5903 5904 /* 5905 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 5906 * standalone procedure for queueing an asynchronous RENEW. 5907 */ 5908 static void nfs4_renew_release(void *calldata) 5909 { 5910 struct nfs4_renewdata *data = calldata; 5911 struct nfs_client *clp = data->client; 5912 5913 if (refcount_read(&clp->cl_count) > 1) 5914 nfs4_schedule_state_renewal(clp); 5915 nfs_put_client(clp); 5916 kfree(data); 5917 } 5918 5919 static void nfs4_renew_done(struct rpc_task *task, void *calldata) 5920 { 5921 struct nfs4_renewdata *data = calldata; 5922 struct nfs_client *clp = data->client; 5923 unsigned long timestamp = data->timestamp; 5924 5925 trace_nfs4_renew_async(clp, task->tk_status); 5926 switch (task->tk_status) { 5927 case 0: 5928 break; 5929 case -NFS4ERR_LEASE_MOVED: 5930 nfs4_schedule_lease_moved_recovery(clp); 5931 break; 5932 default: 5933 /* Unless we're shutting down, schedule state recovery! */ 5934 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 5935 return; 5936 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 5937 nfs4_schedule_lease_recovery(clp); 5938 return; 5939 } 5940 nfs4_schedule_path_down_recovery(clp); 5941 } 5942 do_renew_lease(clp, timestamp); 5943 } 5944 5945 static const struct rpc_call_ops nfs4_renew_ops = { 5946 .rpc_call_done = nfs4_renew_done, 5947 .rpc_release = nfs4_renew_release, 5948 }; 5949 5950 static int nfs4_proc_async_renew(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags) 5951 { 5952 struct rpc_message msg = { 5953 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 5954 .rpc_argp = clp, 5955 .rpc_cred = cred, 5956 }; 5957 struct nfs4_renewdata *data; 5958 5959 if (renew_flags == 0) 5960 return 0; 5961 if (!refcount_inc_not_zero(&clp->cl_count)) 5962 return -EIO; 5963 data = kmalloc(sizeof(*data), GFP_NOFS); 5964 if (data == NULL) { 5965 nfs_put_client(clp); 5966 return -ENOMEM; 5967 } 5968 data->client = clp; 5969 data->timestamp = jiffies; 5970 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT, 5971 &nfs4_renew_ops, data); 5972 } 5973 5974 static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred) 5975 { 5976 struct rpc_message msg = { 5977 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 5978 .rpc_argp = clp, 5979 .rpc_cred = cred, 5980 }; 5981 unsigned long now = jiffies; 5982 int status; 5983 5984 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5985 if (status < 0) 5986 return status; 5987 do_renew_lease(clp, now); 5988 return 0; 5989 } 5990 5991 static bool nfs4_server_supports_acls(const struct nfs_server *server, 5992 enum nfs4_acl_type type) 5993 { 5994 switch (type) { 5995 default: 5996 return server->attr_bitmask[0] & FATTR4_WORD0_ACL; 5997 case NFS4ACL_DACL: 5998 return server->attr_bitmask[1] & FATTR4_WORD1_DACL; 5999 case NFS4ACL_SACL: 6000 return server->attr_bitmask[1] & FATTR4_WORD1_SACL; 6001 } 6002 } 6003 6004 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that 6005 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on 6006 * the stack. 6007 */ 6008 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 6009 6010 int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen, 6011 struct page **pages) 6012 { 6013 struct page *newpage, **spages; 6014 int rc = 0; 6015 size_t len; 6016 spages = pages; 6017 6018 do { 6019 len = min_t(size_t, PAGE_SIZE, buflen); 6020 newpage = alloc_page(GFP_KERNEL); 6021 6022 if (newpage == NULL) 6023 goto unwind; 6024 memcpy(page_address(newpage), buf, len); 6025 buf += len; 6026 buflen -= len; 6027 *pages++ = newpage; 6028 rc++; 6029 } while (buflen != 0); 6030 6031 return rc; 6032 6033 unwind: 6034 for(; rc > 0; rc--) 6035 __free_page(spages[rc-1]); 6036 return -ENOMEM; 6037 } 6038 6039 struct nfs4_cached_acl { 6040 enum nfs4_acl_type type; 6041 int cached; 6042 size_t len; 6043 char data[]; 6044 }; 6045 6046 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 6047 { 6048 struct nfs_inode *nfsi = NFS_I(inode); 6049 6050 spin_lock(&inode->i_lock); 6051 kfree(nfsi->nfs4_acl); 6052 nfsi->nfs4_acl = acl; 6053 spin_unlock(&inode->i_lock); 6054 } 6055 6056 static void nfs4_zap_acl_attr(struct inode *inode) 6057 { 6058 nfs4_set_cached_acl(inode, NULL); 6059 } 6060 6061 static ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, 6062 size_t buflen, enum nfs4_acl_type type) 6063 { 6064 struct nfs_inode *nfsi = NFS_I(inode); 6065 struct nfs4_cached_acl *acl; 6066 int ret = -ENOENT; 6067 6068 spin_lock(&inode->i_lock); 6069 acl = nfsi->nfs4_acl; 6070 if (acl == NULL) 6071 goto out; 6072 if (acl->type != type) 6073 goto out; 6074 if (buf == NULL) /* user is just asking for length */ 6075 goto out_len; 6076 if (acl->cached == 0) 6077 goto out; 6078 ret = -ERANGE; /* see getxattr(2) man page */ 6079 if (acl->len > buflen) 6080 goto out; 6081 memcpy(buf, acl->data, acl->len); 6082 out_len: 6083 ret = acl->len; 6084 out: 6085 spin_unlock(&inode->i_lock); 6086 return ret; 6087 } 6088 6089 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, 6090 size_t pgbase, size_t acl_len, 6091 enum nfs4_acl_type type) 6092 { 6093 struct nfs4_cached_acl *acl; 6094 size_t buflen = sizeof(*acl) + acl_len; 6095 6096 if (buflen <= PAGE_SIZE) { 6097 acl = kmalloc(buflen, GFP_KERNEL); 6098 if (acl == NULL) 6099 goto out; 6100 acl->cached = 1; 6101 _copy_from_pages(acl->data, pages, pgbase, acl_len); 6102 } else { 6103 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 6104 if (acl == NULL) 6105 goto out; 6106 acl->cached = 0; 6107 } 6108 acl->type = type; 6109 acl->len = acl_len; 6110 out: 6111 nfs4_set_cached_acl(inode, acl); 6112 } 6113 6114 /* 6115 * The getxattr API returns the required buffer length when called with a 6116 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 6117 * the required buf. On a NULL buf, we send a page of data to the server 6118 * guessing that the ACL request can be serviced by a page. If so, we cache 6119 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 6120 * the cache. If not so, we throw away the page, and cache the required 6121 * length. The next getxattr call will then produce another round trip to 6122 * the server, this time with the input buf of the required size. 6123 */ 6124 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, 6125 size_t buflen, enum nfs4_acl_type type) 6126 { 6127 struct page **pages; 6128 struct nfs_getaclargs args = { 6129 .fh = NFS_FH(inode), 6130 .acl_type = type, 6131 .acl_len = buflen, 6132 }; 6133 struct nfs_getaclres res = { 6134 .acl_type = type, 6135 .acl_len = buflen, 6136 }; 6137 struct rpc_message msg = { 6138 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 6139 .rpc_argp = &args, 6140 .rpc_resp = &res, 6141 }; 6142 unsigned int npages; 6143 int ret = -ENOMEM, i; 6144 struct nfs_server *server = NFS_SERVER(inode); 6145 6146 if (buflen == 0) 6147 buflen = server->rsize; 6148 6149 npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1; 6150 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 6151 if (!pages) 6152 return -ENOMEM; 6153 6154 args.acl_pages = pages; 6155 6156 for (i = 0; i < npages; i++) { 6157 pages[i] = alloc_page(GFP_KERNEL); 6158 if (!pages[i]) 6159 goto out_free; 6160 } 6161 6162 /* for decoding across pages */ 6163 res.acl_scratch = alloc_page(GFP_KERNEL); 6164 if (!res.acl_scratch) 6165 goto out_free; 6166 6167 args.acl_len = npages * PAGE_SIZE; 6168 6169 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 6170 __func__, buf, buflen, npages, args.acl_len); 6171 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 6172 &msg, &args.seq_args, &res.seq_res, 0); 6173 if (ret) 6174 goto out_free; 6175 6176 /* Handle the case where the passed-in buffer is too short */ 6177 if (res.acl_flags & NFS4_ACL_TRUNC) { 6178 /* Did the user only issue a request for the acl length? */ 6179 if (buf == NULL) 6180 goto out_ok; 6181 ret = -ERANGE; 6182 goto out_free; 6183 } 6184 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len, 6185 type); 6186 if (buf) { 6187 if (res.acl_len > buflen) { 6188 ret = -ERANGE; 6189 goto out_free; 6190 } 6191 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); 6192 } 6193 out_ok: 6194 ret = res.acl_len; 6195 out_free: 6196 while (--i >= 0) 6197 __free_page(pages[i]); 6198 if (res.acl_scratch) 6199 __free_page(res.acl_scratch); 6200 kfree(pages); 6201 return ret; 6202 } 6203 6204 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, 6205 size_t buflen, enum nfs4_acl_type type) 6206 { 6207 struct nfs4_exception exception = { 6208 .interruptible = true, 6209 }; 6210 ssize_t ret; 6211 do { 6212 ret = __nfs4_get_acl_uncached(inode, buf, buflen, type); 6213 trace_nfs4_get_acl(inode, ret); 6214 if (ret >= 0) 6215 break; 6216 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 6217 } while (exception.retry); 6218 return ret; 6219 } 6220 6221 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen, 6222 enum nfs4_acl_type type) 6223 { 6224 struct nfs_server *server = NFS_SERVER(inode); 6225 int ret; 6226 6227 if (unlikely(NFS_FH(inode)->size == 0)) 6228 return -ENODATA; 6229 if (!nfs4_server_supports_acls(server, type)) 6230 return -EOPNOTSUPP; 6231 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 6232 if (ret < 0) 6233 return ret; 6234 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 6235 nfs_zap_acl_cache(inode); 6236 ret = nfs4_read_cached_acl(inode, buf, buflen, type); 6237 if (ret != -ENOENT) 6238 /* -ENOENT is returned if there is no ACL or if there is an ACL 6239 * but no cached acl data, just the acl length */ 6240 return ret; 6241 return nfs4_get_acl_uncached(inode, buf, buflen, type); 6242 } 6243 6244 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, 6245 size_t buflen, enum nfs4_acl_type type) 6246 { 6247 struct nfs_server *server = NFS_SERVER(inode); 6248 struct page *pages[NFS4ACL_MAXPAGES]; 6249 struct nfs_setaclargs arg = { 6250 .fh = NFS_FH(inode), 6251 .acl_type = type, 6252 .acl_len = buflen, 6253 .acl_pages = pages, 6254 }; 6255 struct nfs_setaclres res; 6256 struct rpc_message msg = { 6257 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 6258 .rpc_argp = &arg, 6259 .rpc_resp = &res, 6260 }; 6261 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 6262 int ret, i; 6263 6264 /* You can't remove system.nfs4_acl: */ 6265 if (buflen == 0) 6266 return -EINVAL; 6267 if (!nfs4_server_supports_acls(server, type)) 6268 return -EOPNOTSUPP; 6269 if (npages > ARRAY_SIZE(pages)) 6270 return -ERANGE; 6271 i = nfs4_buf_to_pages_noslab(buf, buflen, arg.acl_pages); 6272 if (i < 0) 6273 return i; 6274 nfs4_inode_make_writeable(inode); 6275 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6276 6277 /* 6278 * Free each page after tx, so the only ref left is 6279 * held by the network stack 6280 */ 6281 for (; i > 0; i--) 6282 put_page(pages[i-1]); 6283 6284 /* 6285 * Acl update can result in inode attribute update. 6286 * so mark the attribute cache invalid. 6287 */ 6288 spin_lock(&inode->i_lock); 6289 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 6290 NFS_INO_INVALID_CTIME | 6291 NFS_INO_REVAL_FORCED); 6292 spin_unlock(&inode->i_lock); 6293 nfs_access_zap_cache(inode); 6294 nfs_zap_acl_cache(inode); 6295 return ret; 6296 } 6297 6298 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, 6299 size_t buflen, enum nfs4_acl_type type) 6300 { 6301 struct nfs4_exception exception = { }; 6302 int err; 6303 6304 if (unlikely(NFS_FH(inode)->size == 0)) 6305 return -ENODATA; 6306 do { 6307 err = __nfs4_proc_set_acl(inode, buf, buflen, type); 6308 trace_nfs4_set_acl(inode, err); 6309 if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) { 6310 /* 6311 * no need to retry since the kernel 6312 * isn't involved in encoding the ACEs. 6313 */ 6314 err = -EINVAL; 6315 break; 6316 } 6317 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6318 &exception); 6319 } while (exception.retry); 6320 return err; 6321 } 6322 6323 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 6324 static int _nfs4_get_security_label(struct inode *inode, void *buf, 6325 size_t buflen) 6326 { 6327 struct nfs_server *server = NFS_SERVER(inode); 6328 struct nfs4_label label = {0, 0, 0, buflen, buf}; 6329 6330 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 6331 struct nfs_fattr fattr = { 6332 .label = &label, 6333 }; 6334 struct nfs4_getattr_arg arg = { 6335 .fh = NFS_FH(inode), 6336 .bitmask = bitmask, 6337 }; 6338 struct nfs4_getattr_res res = { 6339 .fattr = &fattr, 6340 .server = server, 6341 }; 6342 struct rpc_message msg = { 6343 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 6344 .rpc_argp = &arg, 6345 .rpc_resp = &res, 6346 }; 6347 int ret; 6348 6349 nfs_fattr_init(&fattr); 6350 6351 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0); 6352 if (ret) 6353 return ret; 6354 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) 6355 return -ENOENT; 6356 return label.len; 6357 } 6358 6359 static int nfs4_get_security_label(struct inode *inode, void *buf, 6360 size_t buflen) 6361 { 6362 struct nfs4_exception exception = { 6363 .interruptible = true, 6364 }; 6365 int err; 6366 6367 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 6368 return -EOPNOTSUPP; 6369 6370 do { 6371 err = _nfs4_get_security_label(inode, buf, buflen); 6372 trace_nfs4_get_security_label(inode, err); 6373 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6374 &exception); 6375 } while (exception.retry); 6376 return err; 6377 } 6378 6379 static int _nfs4_do_set_security_label(struct inode *inode, 6380 struct nfs4_label *ilabel, 6381 struct nfs_fattr *fattr) 6382 { 6383 6384 struct iattr sattr = {0}; 6385 struct nfs_server *server = NFS_SERVER(inode); 6386 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 6387 struct nfs_setattrargs arg = { 6388 .fh = NFS_FH(inode), 6389 .iap = &sattr, 6390 .server = server, 6391 .bitmask = bitmask, 6392 .label = ilabel, 6393 }; 6394 struct nfs_setattrres res = { 6395 .fattr = fattr, 6396 .server = server, 6397 }; 6398 struct rpc_message msg = { 6399 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 6400 .rpc_argp = &arg, 6401 .rpc_resp = &res, 6402 }; 6403 int status; 6404 6405 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 6406 6407 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6408 if (status) 6409 dprintk("%s failed: %d\n", __func__, status); 6410 6411 return status; 6412 } 6413 6414 static int nfs4_do_set_security_label(struct inode *inode, 6415 struct nfs4_label *ilabel, 6416 struct nfs_fattr *fattr) 6417 { 6418 struct nfs4_exception exception = { }; 6419 int err; 6420 6421 do { 6422 err = _nfs4_do_set_security_label(inode, ilabel, fattr); 6423 trace_nfs4_set_security_label(inode, err); 6424 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6425 &exception); 6426 } while (exception.retry); 6427 return err; 6428 } 6429 6430 static int 6431 nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen) 6432 { 6433 struct nfs4_label ilabel = {0, 0, 0, buflen, (char *)buf }; 6434 struct nfs_fattr *fattr; 6435 int status; 6436 6437 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 6438 return -EOPNOTSUPP; 6439 6440 fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode)); 6441 if (fattr == NULL) 6442 return -ENOMEM; 6443 6444 status = nfs4_do_set_security_label(inode, &ilabel, fattr); 6445 if (status == 0) 6446 nfs_setsecurity(inode, fattr); 6447 6448 nfs_free_fattr(fattr); 6449 return status; 6450 } 6451 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 6452 6453 6454 static void nfs4_init_boot_verifier(const struct nfs_client *clp, 6455 nfs4_verifier *bootverf) 6456 { 6457 __be32 verf[2]; 6458 6459 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 6460 /* An impossible timestamp guarantees this value 6461 * will never match a generated boot time. */ 6462 verf[0] = cpu_to_be32(U32_MAX); 6463 verf[1] = cpu_to_be32(U32_MAX); 6464 } else { 6465 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 6466 u64 ns = ktime_to_ns(nn->boot_time); 6467 6468 verf[0] = cpu_to_be32(ns >> 32); 6469 verf[1] = cpu_to_be32(ns); 6470 } 6471 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 6472 } 6473 6474 static size_t 6475 nfs4_get_uniquifier(struct nfs_client *clp, char *buf, size_t buflen) 6476 { 6477 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 6478 struct nfs_netns_client *nn_clp = nn->nfs_client; 6479 const char *id; 6480 6481 buf[0] = '\0'; 6482 6483 if (nn_clp) { 6484 rcu_read_lock(); 6485 id = rcu_dereference(nn_clp->identifier); 6486 if (id) 6487 strscpy(buf, id, buflen); 6488 rcu_read_unlock(); 6489 } 6490 6491 if (nfs4_client_id_uniquifier[0] != '\0' && buf[0] == '\0') 6492 strscpy(buf, nfs4_client_id_uniquifier, buflen); 6493 6494 return strlen(buf); 6495 } 6496 6497 static int 6498 nfs4_init_nonuniform_client_string(struct nfs_client *clp) 6499 { 6500 char buf[NFS4_CLIENT_ID_UNIQ_LEN]; 6501 size_t buflen; 6502 size_t len; 6503 char *str; 6504 6505 if (clp->cl_owner_id != NULL) 6506 return 0; 6507 6508 rcu_read_lock(); 6509 len = 14 + 6510 strlen(clp->cl_rpcclient->cl_nodename) + 6511 1 + 6512 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) + 6513 1; 6514 rcu_read_unlock(); 6515 6516 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf)); 6517 if (buflen) 6518 len += buflen + 1; 6519 6520 if (len > NFS4_OPAQUE_LIMIT + 1) 6521 return -EINVAL; 6522 6523 /* 6524 * Since this string is allocated at mount time, and held until the 6525 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 6526 * about a memory-reclaim deadlock. 6527 */ 6528 str = kmalloc(len, GFP_KERNEL); 6529 if (!str) 6530 return -ENOMEM; 6531 6532 rcu_read_lock(); 6533 if (buflen) 6534 scnprintf(str, len, "Linux NFSv4.0 %s/%s/%s", 6535 clp->cl_rpcclient->cl_nodename, buf, 6536 rpc_peeraddr2str(clp->cl_rpcclient, 6537 RPC_DISPLAY_ADDR)); 6538 else 6539 scnprintf(str, len, "Linux NFSv4.0 %s/%s", 6540 clp->cl_rpcclient->cl_nodename, 6541 rpc_peeraddr2str(clp->cl_rpcclient, 6542 RPC_DISPLAY_ADDR)); 6543 rcu_read_unlock(); 6544 6545 clp->cl_owner_id = str; 6546 return 0; 6547 } 6548 6549 static int 6550 nfs4_init_uniform_client_string(struct nfs_client *clp) 6551 { 6552 char buf[NFS4_CLIENT_ID_UNIQ_LEN]; 6553 size_t buflen; 6554 size_t len; 6555 char *str; 6556 6557 if (clp->cl_owner_id != NULL) 6558 return 0; 6559 6560 len = 10 + 10 + 1 + 10 + 1 + 6561 strlen(clp->cl_rpcclient->cl_nodename) + 1; 6562 6563 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf)); 6564 if (buflen) 6565 len += buflen + 1; 6566 6567 if (len > NFS4_OPAQUE_LIMIT + 1) 6568 return -EINVAL; 6569 6570 /* 6571 * Since this string is allocated at mount time, and held until the 6572 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 6573 * about a memory-reclaim deadlock. 6574 */ 6575 str = kmalloc(len, GFP_KERNEL); 6576 if (!str) 6577 return -ENOMEM; 6578 6579 if (buflen) 6580 scnprintf(str, len, "Linux NFSv%u.%u %s/%s", 6581 clp->rpc_ops->version, clp->cl_minorversion, 6582 buf, clp->cl_rpcclient->cl_nodename); 6583 else 6584 scnprintf(str, len, "Linux NFSv%u.%u %s", 6585 clp->rpc_ops->version, clp->cl_minorversion, 6586 clp->cl_rpcclient->cl_nodename); 6587 clp->cl_owner_id = str; 6588 return 0; 6589 } 6590 6591 /* 6592 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback 6593 * services. Advertise one based on the address family of the 6594 * clientaddr. 6595 */ 6596 static unsigned int 6597 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len) 6598 { 6599 if (strchr(clp->cl_ipaddr, ':') != NULL) 6600 return scnprintf(buf, len, "tcp6"); 6601 else 6602 return scnprintf(buf, len, "tcp"); 6603 } 6604 6605 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata) 6606 { 6607 struct nfs4_setclientid *sc = calldata; 6608 6609 if (task->tk_status == 0) 6610 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred); 6611 } 6612 6613 static const struct rpc_call_ops nfs4_setclientid_ops = { 6614 .rpc_call_done = nfs4_setclientid_done, 6615 }; 6616 6617 /** 6618 * nfs4_proc_setclientid - Negotiate client ID 6619 * @clp: state data structure 6620 * @program: RPC program for NFSv4 callback service 6621 * @port: IP port number for NFS4 callback service 6622 * @cred: credential to use for this call 6623 * @res: where to place the result 6624 * 6625 * Returns zero, a negative errno, or a negative NFS4ERR status code. 6626 */ 6627 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 6628 unsigned short port, const struct cred *cred, 6629 struct nfs4_setclientid_res *res) 6630 { 6631 nfs4_verifier sc_verifier; 6632 struct nfs4_setclientid setclientid = { 6633 .sc_verifier = &sc_verifier, 6634 .sc_prog = program, 6635 .sc_clnt = clp, 6636 }; 6637 struct rpc_message msg = { 6638 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 6639 .rpc_argp = &setclientid, 6640 .rpc_resp = res, 6641 .rpc_cred = cred, 6642 }; 6643 struct rpc_task_setup task_setup_data = { 6644 .rpc_client = clp->cl_rpcclient, 6645 .rpc_message = &msg, 6646 .callback_ops = &nfs4_setclientid_ops, 6647 .callback_data = &setclientid, 6648 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN, 6649 }; 6650 unsigned long now = jiffies; 6651 int status; 6652 6653 /* nfs_client_id4 */ 6654 nfs4_init_boot_verifier(clp, &sc_verifier); 6655 6656 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags)) 6657 status = nfs4_init_uniform_client_string(clp); 6658 else 6659 status = nfs4_init_nonuniform_client_string(clp); 6660 6661 if (status) 6662 goto out; 6663 6664 /* cb_client4 */ 6665 setclientid.sc_netid_len = 6666 nfs4_init_callback_netid(clp, 6667 setclientid.sc_netid, 6668 sizeof(setclientid.sc_netid)); 6669 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 6670 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 6671 clp->cl_ipaddr, port >> 8, port & 255); 6672 6673 dprintk("NFS call setclientid auth=%s, '%s'\n", 6674 clp->cl_rpcclient->cl_auth->au_ops->au_name, 6675 clp->cl_owner_id); 6676 6677 status = nfs4_call_sync_custom(&task_setup_data); 6678 if (setclientid.sc_cred) { 6679 kfree(clp->cl_acceptor); 6680 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred); 6681 put_rpccred(setclientid.sc_cred); 6682 } 6683 6684 if (status == 0) 6685 do_renew_lease(clp, now); 6686 out: 6687 trace_nfs4_setclientid(clp, status); 6688 dprintk("NFS reply setclientid: %d\n", status); 6689 return status; 6690 } 6691 6692 /** 6693 * nfs4_proc_setclientid_confirm - Confirm client ID 6694 * @clp: state data structure 6695 * @arg: result of a previous SETCLIENTID 6696 * @cred: credential to use for this call 6697 * 6698 * Returns zero, a negative errno, or a negative NFS4ERR status code. 6699 */ 6700 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 6701 struct nfs4_setclientid_res *arg, 6702 const struct cred *cred) 6703 { 6704 struct rpc_message msg = { 6705 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 6706 .rpc_argp = arg, 6707 .rpc_cred = cred, 6708 }; 6709 int status; 6710 6711 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 6712 clp->cl_rpcclient->cl_auth->au_ops->au_name, 6713 clp->cl_clientid); 6714 status = rpc_call_sync(clp->cl_rpcclient, &msg, 6715 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 6716 trace_nfs4_setclientid_confirm(clp, status); 6717 dprintk("NFS reply setclientid_confirm: %d\n", status); 6718 return status; 6719 } 6720 6721 struct nfs4_delegreturndata { 6722 struct nfs4_delegreturnargs args; 6723 struct nfs4_delegreturnres res; 6724 struct nfs_fh fh; 6725 nfs4_stateid stateid; 6726 unsigned long timestamp; 6727 struct { 6728 struct nfs4_layoutreturn_args arg; 6729 struct nfs4_layoutreturn_res res; 6730 struct nfs4_xdr_opaque_data ld_private; 6731 u32 roc_barrier; 6732 bool roc; 6733 } lr; 6734 struct nfs4_delegattr sattr; 6735 struct nfs_fattr fattr; 6736 int rpc_status; 6737 struct inode *inode; 6738 }; 6739 6740 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 6741 { 6742 struct nfs4_delegreturndata *data = calldata; 6743 struct nfs4_exception exception = { 6744 .inode = data->inode, 6745 .stateid = &data->stateid, 6746 .task_is_privileged = data->args.seq_args.sa_privileged, 6747 }; 6748 6749 if (!nfs4_sequence_done(task, &data->res.seq_res)) 6750 return; 6751 6752 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); 6753 6754 /* Handle Layoutreturn errors */ 6755 if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res, 6756 &data->res.lr_ret) == -EAGAIN) 6757 goto out_restart; 6758 6759 if (data->args.sattr_args && task->tk_status != 0) { 6760 switch(data->res.sattr_ret) { 6761 case 0: 6762 data->args.sattr_args = NULL; 6763 data->res.sattr_res = false; 6764 break; 6765 case -NFS4ERR_ADMIN_REVOKED: 6766 case -NFS4ERR_DELEG_REVOKED: 6767 case -NFS4ERR_EXPIRED: 6768 case -NFS4ERR_BAD_STATEID: 6769 /* Let the main handler below do stateid recovery */ 6770 break; 6771 case -NFS4ERR_OLD_STATEID: 6772 if (nfs4_refresh_delegation_stateid(&data->stateid, 6773 data->inode)) 6774 goto out_restart; 6775 fallthrough; 6776 default: 6777 data->args.sattr_args = NULL; 6778 data->res.sattr_res = false; 6779 goto out_restart; 6780 } 6781 } 6782 6783 switch (task->tk_status) { 6784 case 0: 6785 renew_lease(data->res.server, data->timestamp); 6786 break; 6787 case -NFS4ERR_ADMIN_REVOKED: 6788 case -NFS4ERR_DELEG_REVOKED: 6789 case -NFS4ERR_EXPIRED: 6790 nfs4_free_revoked_stateid(data->res.server, 6791 data->args.stateid, 6792 task->tk_msg.rpc_cred); 6793 fallthrough; 6794 case -NFS4ERR_BAD_STATEID: 6795 case -NFS4ERR_STALE_STATEID: 6796 case -ETIMEDOUT: 6797 task->tk_status = 0; 6798 break; 6799 case -NFS4ERR_OLD_STATEID: 6800 if (!nfs4_refresh_delegation_stateid(&data->stateid, data->inode)) 6801 nfs4_stateid_seqid_inc(&data->stateid); 6802 if (data->args.bitmask) { 6803 data->args.bitmask = NULL; 6804 data->res.fattr = NULL; 6805 } 6806 goto out_restart; 6807 case -NFS4ERR_ACCESS: 6808 if (data->args.bitmask) { 6809 data->args.bitmask = NULL; 6810 data->res.fattr = NULL; 6811 goto out_restart; 6812 } 6813 fallthrough; 6814 default: 6815 task->tk_status = nfs4_async_handle_exception(task, 6816 data->res.server, task->tk_status, 6817 &exception); 6818 if (exception.retry) 6819 goto out_restart; 6820 } 6821 nfs_delegation_mark_returned(data->inode, data->args.stateid); 6822 data->rpc_status = task->tk_status; 6823 return; 6824 out_restart: 6825 task->tk_status = 0; 6826 rpc_restart_call_prepare(task); 6827 } 6828 6829 static void nfs4_delegreturn_release(void *calldata) 6830 { 6831 struct nfs4_delegreturndata *data = calldata; 6832 struct inode *inode = data->inode; 6833 6834 if (data->lr.roc) 6835 pnfs_roc_release(&data->lr.arg, &data->lr.res, 6836 data->res.lr_ret); 6837 if (inode) { 6838 nfs4_fattr_set_prechange(&data->fattr, 6839 inode_peek_iversion_raw(inode)); 6840 nfs_refresh_inode(inode, &data->fattr); 6841 nfs_iput_and_deactive(inode); 6842 } 6843 kfree(calldata); 6844 } 6845 6846 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 6847 { 6848 struct nfs4_delegreturndata *d_data; 6849 struct pnfs_layout_hdr *lo; 6850 6851 d_data = data; 6852 6853 if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) { 6854 nfs4_sequence_done(task, &d_data->res.seq_res); 6855 return; 6856 } 6857 6858 lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL; 6859 if (lo && !pnfs_layout_is_valid(lo)) { 6860 d_data->args.lr_args = NULL; 6861 d_data->res.lr_res = NULL; 6862 } 6863 6864 nfs4_setup_sequence(d_data->res.server->nfs_client, 6865 &d_data->args.seq_args, 6866 &d_data->res.seq_res, 6867 task); 6868 } 6869 6870 static const struct rpc_call_ops nfs4_delegreturn_ops = { 6871 .rpc_call_prepare = nfs4_delegreturn_prepare, 6872 .rpc_call_done = nfs4_delegreturn_done, 6873 .rpc_release = nfs4_delegreturn_release, 6874 }; 6875 6876 static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, 6877 const nfs4_stateid *stateid, 6878 struct nfs_delegation *delegation, 6879 int issync) 6880 { 6881 struct nfs4_delegreturndata *data; 6882 struct nfs_server *server = NFS_SERVER(inode); 6883 struct rpc_task *task; 6884 struct rpc_message msg = { 6885 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 6886 .rpc_cred = cred, 6887 }; 6888 struct rpc_task_setup task_setup_data = { 6889 .rpc_client = server->client, 6890 .rpc_message = &msg, 6891 .callback_ops = &nfs4_delegreturn_ops, 6892 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT, 6893 }; 6894 int status = 0; 6895 6896 if (nfs_server_capable(inode, NFS_CAP_MOVEABLE)) 6897 task_setup_data.flags |= RPC_TASK_MOVEABLE; 6898 6899 data = kzalloc(sizeof(*data), GFP_KERNEL); 6900 if (data == NULL) 6901 return -ENOMEM; 6902 6903 nfs4_state_protect(server->nfs_client, 6904 NFS_SP4_MACH_CRED_CLEANUP, 6905 &task_setup_data.rpc_client, &msg); 6906 6907 data->args.fhandle = &data->fh; 6908 data->args.stateid = &data->stateid; 6909 nfs4_bitmask_set(data->args.bitmask_store, 6910 server->cache_consistency_bitmask, inode, 0); 6911 data->args.bitmask = data->args.bitmask_store; 6912 nfs_copy_fh(&data->fh, NFS_FH(inode)); 6913 nfs4_stateid_copy(&data->stateid, stateid); 6914 data->res.fattr = &data->fattr; 6915 data->res.server = server; 6916 data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT; 6917 data->lr.arg.ld_private = &data->lr.ld_private; 6918 nfs_fattr_init(data->res.fattr); 6919 data->timestamp = jiffies; 6920 data->rpc_status = 0; 6921 data->inode = nfs_igrab_and_active(inode); 6922 if (data->inode || issync) { 6923 data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res, 6924 cred); 6925 if (data->lr.roc) { 6926 data->args.lr_args = &data->lr.arg; 6927 data->res.lr_res = &data->lr.res; 6928 } 6929 } 6930 6931 if (delegation && 6932 test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) { 6933 if (delegation->type & FMODE_READ) { 6934 data->sattr.atime = inode_get_atime(inode); 6935 data->sattr.atime_set = true; 6936 } 6937 if (delegation->type & FMODE_WRITE) { 6938 data->sattr.mtime = inode_get_mtime(inode); 6939 data->sattr.mtime_set = true; 6940 } 6941 data->args.sattr_args = &data->sattr; 6942 data->res.sattr_res = true; 6943 } 6944 6945 if (!data->inode) 6946 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 6947 1); 6948 else 6949 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 6950 0); 6951 6952 task_setup_data.callback_data = data; 6953 msg.rpc_argp = &data->args; 6954 msg.rpc_resp = &data->res; 6955 task = rpc_run_task(&task_setup_data); 6956 if (IS_ERR(task)) 6957 return PTR_ERR(task); 6958 if (!issync) 6959 goto out; 6960 status = rpc_wait_for_completion_task(task); 6961 if (status != 0) 6962 goto out; 6963 status = data->rpc_status; 6964 out: 6965 rpc_put_task(task); 6966 return status; 6967 } 6968 6969 int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, 6970 const nfs4_stateid *stateid, 6971 struct nfs_delegation *delegation, int issync) 6972 { 6973 struct nfs_server *server = NFS_SERVER(inode); 6974 struct nfs4_exception exception = { }; 6975 int err; 6976 do { 6977 err = _nfs4_proc_delegreturn(inode, cred, stateid, 6978 delegation, issync); 6979 trace_nfs4_delegreturn(inode, stateid, err); 6980 switch (err) { 6981 case -NFS4ERR_STALE_STATEID: 6982 case -NFS4ERR_EXPIRED: 6983 case 0: 6984 return 0; 6985 } 6986 err = nfs4_handle_exception(server, err, &exception); 6987 } while (exception.retry); 6988 return err; 6989 } 6990 6991 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6992 { 6993 struct inode *inode = state->inode; 6994 struct nfs_server *server = NFS_SERVER(inode); 6995 struct nfs_client *clp = server->nfs_client; 6996 struct nfs_lockt_args arg = { 6997 .fh = NFS_FH(inode), 6998 .fl = request, 6999 }; 7000 struct nfs_lockt_res res = { 7001 .denied = request, 7002 }; 7003 struct rpc_message msg = { 7004 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 7005 .rpc_argp = &arg, 7006 .rpc_resp = &res, 7007 .rpc_cred = state->owner->so_cred, 7008 }; 7009 struct nfs4_lock_state *lsp; 7010 int status; 7011 7012 arg.lock_owner.clientid = clp->cl_clientid; 7013 status = nfs4_set_lock_state(state, request); 7014 if (status != 0) 7015 goto out; 7016 lsp = request->fl_u.nfs4_fl.owner; 7017 arg.lock_owner.id = lsp->ls_seqid.owner_id; 7018 arg.lock_owner.s_dev = server->s_dev; 7019 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 7020 switch (status) { 7021 case 0: 7022 request->c.flc_type = F_UNLCK; 7023 break; 7024 case -NFS4ERR_DENIED: 7025 status = 0; 7026 } 7027 request->fl_ops->fl_release_private(request); 7028 request->fl_ops = NULL; 7029 out: 7030 return status; 7031 } 7032 7033 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7034 { 7035 struct nfs4_exception exception = { 7036 .interruptible = true, 7037 }; 7038 int err; 7039 7040 do { 7041 err = _nfs4_proc_getlk(state, cmd, request); 7042 trace_nfs4_get_lock(request, state, cmd, err); 7043 err = nfs4_handle_exception(NFS_SERVER(state->inode), err, 7044 &exception); 7045 } while (exception.retry); 7046 return err; 7047 } 7048 7049 /* 7050 * Update the seqid of a lock stateid after receiving 7051 * NFS4ERR_OLD_STATEID 7052 */ 7053 static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst, 7054 struct nfs4_lock_state *lsp) 7055 { 7056 struct nfs4_state *state = lsp->ls_state; 7057 bool ret = false; 7058 7059 spin_lock(&state->state_lock); 7060 if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid)) 7061 goto out; 7062 if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst)) 7063 nfs4_stateid_seqid_inc(dst); 7064 else 7065 dst->seqid = lsp->ls_stateid.seqid; 7066 ret = true; 7067 out: 7068 spin_unlock(&state->state_lock); 7069 return ret; 7070 } 7071 7072 static bool nfs4_sync_lock_stateid(nfs4_stateid *dst, 7073 struct nfs4_lock_state *lsp) 7074 { 7075 struct nfs4_state *state = lsp->ls_state; 7076 bool ret; 7077 7078 spin_lock(&state->state_lock); 7079 ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid); 7080 nfs4_stateid_copy(dst, &lsp->ls_stateid); 7081 spin_unlock(&state->state_lock); 7082 return ret; 7083 } 7084 7085 struct nfs4_unlockdata { 7086 struct nfs_locku_args arg; 7087 struct nfs_locku_res res; 7088 struct nfs4_lock_state *lsp; 7089 struct nfs_open_context *ctx; 7090 struct nfs_lock_context *l_ctx; 7091 struct file_lock fl; 7092 struct nfs_server *server; 7093 unsigned long timestamp; 7094 }; 7095 7096 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 7097 struct nfs_open_context *ctx, 7098 struct nfs4_lock_state *lsp, 7099 struct nfs_seqid *seqid) 7100 { 7101 struct nfs4_unlockdata *p; 7102 struct nfs4_state *state = lsp->ls_state; 7103 struct inode *inode = state->inode; 7104 struct nfs_lock_context *l_ctx; 7105 7106 p = kzalloc(sizeof(*p), GFP_KERNEL); 7107 if (p == NULL) 7108 return NULL; 7109 l_ctx = nfs_get_lock_context(ctx); 7110 if (!IS_ERR(l_ctx)) { 7111 p->l_ctx = l_ctx; 7112 } else { 7113 kfree(p); 7114 return NULL; 7115 } 7116 p->arg.fh = NFS_FH(inode); 7117 p->arg.fl = &p->fl; 7118 p->arg.seqid = seqid; 7119 p->res.seqid = seqid; 7120 p->lsp = lsp; 7121 /* Ensure we don't close file until we're done freeing locks! */ 7122 p->ctx = get_nfs_open_context(ctx); 7123 locks_init_lock(&p->fl); 7124 locks_copy_lock(&p->fl, fl); 7125 p->server = NFS_SERVER(inode); 7126 spin_lock(&state->state_lock); 7127 nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid); 7128 spin_unlock(&state->state_lock); 7129 return p; 7130 } 7131 7132 static void nfs4_locku_release_calldata(void *data) 7133 { 7134 struct nfs4_unlockdata *calldata = data; 7135 nfs_free_seqid(calldata->arg.seqid); 7136 nfs4_put_lock_state(calldata->lsp); 7137 nfs_put_lock_context(calldata->l_ctx); 7138 put_nfs_open_context(calldata->ctx); 7139 kfree(calldata); 7140 } 7141 7142 static void nfs4_locku_done(struct rpc_task *task, void *data) 7143 { 7144 struct nfs4_unlockdata *calldata = data; 7145 struct nfs4_exception exception = { 7146 .inode = calldata->lsp->ls_state->inode, 7147 .stateid = &calldata->arg.stateid, 7148 }; 7149 7150 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 7151 return; 7152 switch (task->tk_status) { 7153 case 0: 7154 renew_lease(calldata->server, calldata->timestamp); 7155 locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl); 7156 if (nfs4_update_lock_stateid(calldata->lsp, 7157 &calldata->res.stateid)) 7158 break; 7159 fallthrough; 7160 case -NFS4ERR_ADMIN_REVOKED: 7161 case -NFS4ERR_EXPIRED: 7162 nfs4_free_revoked_stateid(calldata->server, 7163 &calldata->arg.stateid, 7164 task->tk_msg.rpc_cred); 7165 fallthrough; 7166 case -NFS4ERR_BAD_STATEID: 7167 case -NFS4ERR_STALE_STATEID: 7168 if (nfs4_sync_lock_stateid(&calldata->arg.stateid, 7169 calldata->lsp)) 7170 rpc_restart_call_prepare(task); 7171 break; 7172 case -NFS4ERR_OLD_STATEID: 7173 if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid, 7174 calldata->lsp)) 7175 rpc_restart_call_prepare(task); 7176 break; 7177 default: 7178 task->tk_status = nfs4_async_handle_exception(task, 7179 calldata->server, task->tk_status, 7180 &exception); 7181 if (exception.retry) 7182 rpc_restart_call_prepare(task); 7183 } 7184 nfs_release_seqid(calldata->arg.seqid); 7185 } 7186 7187 static void nfs4_locku_prepare(struct rpc_task *task, void *data) 7188 { 7189 struct nfs4_unlockdata *calldata = data; 7190 7191 if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) && 7192 nfs_async_iocounter_wait(task, calldata->l_ctx)) 7193 return; 7194 7195 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 7196 goto out_wait; 7197 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { 7198 /* Note: exit _without_ running nfs4_locku_done */ 7199 goto out_no_action; 7200 } 7201 calldata->timestamp = jiffies; 7202 if (nfs4_setup_sequence(calldata->server->nfs_client, 7203 &calldata->arg.seq_args, 7204 &calldata->res.seq_res, 7205 task) != 0) 7206 nfs_release_seqid(calldata->arg.seqid); 7207 return; 7208 out_no_action: 7209 task->tk_action = NULL; 7210 out_wait: 7211 nfs4_sequence_done(task, &calldata->res.seq_res); 7212 } 7213 7214 static const struct rpc_call_ops nfs4_locku_ops = { 7215 .rpc_call_prepare = nfs4_locku_prepare, 7216 .rpc_call_done = nfs4_locku_done, 7217 .rpc_release = nfs4_locku_release_calldata, 7218 }; 7219 7220 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 7221 struct nfs_open_context *ctx, 7222 struct nfs4_lock_state *lsp, 7223 struct nfs_seqid *seqid) 7224 { 7225 struct nfs4_unlockdata *data; 7226 struct rpc_message msg = { 7227 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 7228 .rpc_cred = ctx->cred, 7229 }; 7230 struct rpc_task_setup task_setup_data = { 7231 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 7232 .rpc_message = &msg, 7233 .callback_ops = &nfs4_locku_ops, 7234 .workqueue = nfsiod_workqueue, 7235 .flags = RPC_TASK_ASYNC, 7236 }; 7237 7238 if (nfs_server_capable(lsp->ls_state->inode, NFS_CAP_MOVEABLE)) 7239 task_setup_data.flags |= RPC_TASK_MOVEABLE; 7240 7241 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client, 7242 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg); 7243 7244 /* Ensure this is an unlock - when canceling a lock, the 7245 * canceled lock is passed in, and it won't be an unlock. 7246 */ 7247 fl->c.flc_type = F_UNLCK; 7248 if (fl->c.flc_flags & FL_CLOSE) 7249 set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags); 7250 7251 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 7252 if (data == NULL) { 7253 nfs_free_seqid(seqid); 7254 return ERR_PTR(-ENOMEM); 7255 } 7256 7257 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 0); 7258 msg.rpc_argp = &data->arg; 7259 msg.rpc_resp = &data->res; 7260 task_setup_data.callback_data = data; 7261 return rpc_run_task(&task_setup_data); 7262 } 7263 7264 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 7265 { 7266 struct inode *inode = state->inode; 7267 struct nfs4_state_owner *sp = state->owner; 7268 struct nfs_inode *nfsi = NFS_I(inode); 7269 struct nfs_seqid *seqid; 7270 struct nfs4_lock_state *lsp; 7271 struct rpc_task *task; 7272 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 7273 int status = 0; 7274 unsigned char saved_flags = request->c.flc_flags; 7275 7276 status = nfs4_set_lock_state(state, request); 7277 /* Unlock _before_ we do the RPC call */ 7278 request->c.flc_flags |= FL_EXISTS; 7279 /* Exclude nfs_delegation_claim_locks() */ 7280 mutex_lock(&sp->so_delegreturn_mutex); 7281 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ 7282 down_read(&nfsi->rwsem); 7283 if (locks_lock_inode_wait(inode, request) == -ENOENT) { 7284 up_read(&nfsi->rwsem); 7285 mutex_unlock(&sp->so_delegreturn_mutex); 7286 goto out; 7287 } 7288 lsp = request->fl_u.nfs4_fl.owner; 7289 set_bit(NFS_LOCK_UNLOCKING, &lsp->ls_flags); 7290 up_read(&nfsi->rwsem); 7291 mutex_unlock(&sp->so_delegreturn_mutex); 7292 if (status != 0) 7293 goto out; 7294 /* Is this a delegated lock? */ 7295 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0) 7296 goto out; 7297 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid; 7298 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 7299 status = -ENOMEM; 7300 if (IS_ERR(seqid)) 7301 goto out; 7302 task = nfs4_do_unlck(request, 7303 nfs_file_open_context(request->c.flc_file), 7304 lsp, seqid); 7305 status = PTR_ERR(task); 7306 if (IS_ERR(task)) 7307 goto out; 7308 status = rpc_wait_for_completion_task(task); 7309 rpc_put_task(task); 7310 out: 7311 request->c.flc_flags = saved_flags; 7312 trace_nfs4_unlock(request, state, F_SETLK, status); 7313 return status; 7314 } 7315 7316 struct nfs4_lockdata { 7317 struct nfs_lock_args arg; 7318 struct nfs_lock_res res; 7319 struct nfs4_lock_state *lsp; 7320 struct nfs_open_context *ctx; 7321 struct file_lock fl; 7322 unsigned long timestamp; 7323 int rpc_status; 7324 int cancelled; 7325 struct nfs_server *server; 7326 }; 7327 7328 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 7329 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 7330 gfp_t gfp_mask) 7331 { 7332 struct nfs4_lockdata *p; 7333 struct inode *inode = lsp->ls_state->inode; 7334 struct nfs_server *server = NFS_SERVER(inode); 7335 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 7336 7337 p = kzalloc(sizeof(*p), gfp_mask); 7338 if (p == NULL) 7339 return NULL; 7340 7341 p->arg.fh = NFS_FH(inode); 7342 p->arg.fl = &p->fl; 7343 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 7344 if (IS_ERR(p->arg.open_seqid)) 7345 goto out_free; 7346 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 7347 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask); 7348 if (IS_ERR(p->arg.lock_seqid)) 7349 goto out_free_seqid; 7350 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 7351 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 7352 p->arg.lock_owner.s_dev = server->s_dev; 7353 p->res.lock_seqid = p->arg.lock_seqid; 7354 p->lsp = lsp; 7355 p->server = server; 7356 p->ctx = get_nfs_open_context(ctx); 7357 locks_init_lock(&p->fl); 7358 locks_copy_lock(&p->fl, fl); 7359 return p; 7360 out_free_seqid: 7361 nfs_free_seqid(p->arg.open_seqid); 7362 out_free: 7363 kfree(p); 7364 return NULL; 7365 } 7366 7367 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 7368 { 7369 struct nfs4_lockdata *data = calldata; 7370 struct nfs4_state *state = data->lsp->ls_state; 7371 7372 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 7373 goto out_wait; 7374 /* Do we need to do an open_to_lock_owner? */ 7375 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) { 7376 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { 7377 goto out_release_lock_seqid; 7378 } 7379 nfs4_stateid_copy(&data->arg.open_stateid, 7380 &state->open_stateid); 7381 data->arg.new_lock_owner = 1; 7382 data->res.open_seqid = data->arg.open_seqid; 7383 } else { 7384 data->arg.new_lock_owner = 0; 7385 nfs4_stateid_copy(&data->arg.lock_stateid, 7386 &data->lsp->ls_stateid); 7387 } 7388 if (!nfs4_valid_open_stateid(state)) { 7389 data->rpc_status = -EBADF; 7390 task->tk_action = NULL; 7391 goto out_release_open_seqid; 7392 } 7393 data->timestamp = jiffies; 7394 if (nfs4_setup_sequence(data->server->nfs_client, 7395 &data->arg.seq_args, 7396 &data->res.seq_res, 7397 task) == 0) 7398 return; 7399 out_release_open_seqid: 7400 nfs_release_seqid(data->arg.open_seqid); 7401 out_release_lock_seqid: 7402 nfs_release_seqid(data->arg.lock_seqid); 7403 out_wait: 7404 nfs4_sequence_done(task, &data->res.seq_res); 7405 dprintk("%s: ret = %d\n", __func__, data->rpc_status); 7406 } 7407 7408 static void nfs4_lock_done(struct rpc_task *task, void *calldata) 7409 { 7410 struct nfs4_lockdata *data = calldata; 7411 struct nfs4_lock_state *lsp = data->lsp; 7412 7413 if (!nfs4_sequence_done(task, &data->res.seq_res)) 7414 return; 7415 7416 data->rpc_status = task->tk_status; 7417 switch (task->tk_status) { 7418 case 0: 7419 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)), 7420 data->timestamp); 7421 if (data->arg.new_lock && !data->cancelled) { 7422 data->fl.c.flc_flags &= ~(FL_SLEEP | FL_ACCESS); 7423 if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) 7424 goto out_restart; 7425 } 7426 if (data->arg.new_lock_owner != 0) { 7427 nfs_confirm_seqid(&lsp->ls_seqid, 0); 7428 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid); 7429 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 7430 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) 7431 goto out_restart; 7432 break; 7433 case -NFS4ERR_OLD_STATEID: 7434 if (data->arg.new_lock_owner != 0 && 7435 nfs4_refresh_open_old_stateid(&data->arg.open_stateid, 7436 lsp->ls_state)) 7437 goto out_restart; 7438 if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp)) 7439 goto out_restart; 7440 fallthrough; 7441 case -NFS4ERR_BAD_STATEID: 7442 case -NFS4ERR_STALE_STATEID: 7443 case -NFS4ERR_EXPIRED: 7444 if (data->arg.new_lock_owner != 0) { 7445 if (!nfs4_stateid_match(&data->arg.open_stateid, 7446 &lsp->ls_state->open_stateid)) 7447 goto out_restart; 7448 } else if (!nfs4_stateid_match(&data->arg.lock_stateid, 7449 &lsp->ls_stateid)) 7450 goto out_restart; 7451 } 7452 out_done: 7453 dprintk("%s: ret = %d!\n", __func__, data->rpc_status); 7454 return; 7455 out_restart: 7456 if (!data->cancelled) 7457 rpc_restart_call_prepare(task); 7458 goto out_done; 7459 } 7460 7461 static void nfs4_lock_release(void *calldata) 7462 { 7463 struct nfs4_lockdata *data = calldata; 7464 7465 nfs_free_seqid(data->arg.open_seqid); 7466 if (data->cancelled && data->rpc_status == 0) { 7467 struct rpc_task *task; 7468 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 7469 data->arg.lock_seqid); 7470 if (!IS_ERR(task)) 7471 rpc_put_task_async(task); 7472 dprintk("%s: cancelling lock!\n", __func__); 7473 } else 7474 nfs_free_seqid(data->arg.lock_seqid); 7475 nfs4_put_lock_state(data->lsp); 7476 put_nfs_open_context(data->ctx); 7477 kfree(data); 7478 } 7479 7480 static const struct rpc_call_ops nfs4_lock_ops = { 7481 .rpc_call_prepare = nfs4_lock_prepare, 7482 .rpc_call_done = nfs4_lock_done, 7483 .rpc_release = nfs4_lock_release, 7484 }; 7485 7486 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 7487 { 7488 switch (error) { 7489 case -NFS4ERR_ADMIN_REVOKED: 7490 case -NFS4ERR_EXPIRED: 7491 case -NFS4ERR_BAD_STATEID: 7492 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 7493 if (new_lock_owner != 0 || 7494 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) 7495 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 7496 break; 7497 case -NFS4ERR_STALE_STATEID: 7498 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 7499 nfs4_schedule_lease_recovery(server->nfs_client); 7500 } 7501 } 7502 7503 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 7504 { 7505 struct nfs4_lockdata *data; 7506 struct rpc_task *task; 7507 struct rpc_message msg = { 7508 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 7509 .rpc_cred = state->owner->so_cred, 7510 }; 7511 struct rpc_task_setup task_setup_data = { 7512 .rpc_client = NFS_CLIENT(state->inode), 7513 .rpc_message = &msg, 7514 .callback_ops = &nfs4_lock_ops, 7515 .workqueue = nfsiod_workqueue, 7516 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 7517 }; 7518 int ret; 7519 7520 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE)) 7521 task_setup_data.flags |= RPC_TASK_MOVEABLE; 7522 7523 data = nfs4_alloc_lockdata(fl, 7524 nfs_file_open_context(fl->c.flc_file), 7525 fl->fl_u.nfs4_fl.owner, GFP_KERNEL); 7526 if (data == NULL) 7527 return -ENOMEM; 7528 if (IS_SETLKW(cmd)) 7529 data->arg.block = 1; 7530 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 7531 recovery_type > NFS_LOCK_NEW); 7532 msg.rpc_argp = &data->arg; 7533 msg.rpc_resp = &data->res; 7534 task_setup_data.callback_data = data; 7535 if (recovery_type > NFS_LOCK_NEW) { 7536 if (recovery_type == NFS_LOCK_RECLAIM) 7537 data->arg.reclaim = NFS_LOCK_RECLAIM; 7538 } else 7539 data->arg.new_lock = 1; 7540 task = rpc_run_task(&task_setup_data); 7541 if (IS_ERR(task)) 7542 return PTR_ERR(task); 7543 ret = rpc_wait_for_completion_task(task); 7544 if (ret == 0) { 7545 ret = data->rpc_status; 7546 if (ret) 7547 nfs4_handle_setlk_error(data->server, data->lsp, 7548 data->arg.new_lock_owner, ret); 7549 } else 7550 data->cancelled = true; 7551 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret); 7552 rpc_put_task(task); 7553 dprintk("%s: ret = %d\n", __func__, ret); 7554 return ret; 7555 } 7556 7557 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 7558 { 7559 struct nfs_server *server = NFS_SERVER(state->inode); 7560 struct nfs4_exception exception = { 7561 .inode = state->inode, 7562 }; 7563 int err; 7564 7565 do { 7566 /* Cache the lock if possible... */ 7567 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 7568 return 0; 7569 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 7570 if (err != -NFS4ERR_DELAY) 7571 break; 7572 nfs4_handle_exception(server, err, &exception); 7573 } while (exception.retry); 7574 return err; 7575 } 7576 7577 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 7578 { 7579 struct nfs_server *server = NFS_SERVER(state->inode); 7580 struct nfs4_exception exception = { 7581 .inode = state->inode, 7582 }; 7583 int err; 7584 7585 err = nfs4_set_lock_state(state, request); 7586 if (err != 0) 7587 return err; 7588 if (!recover_lost_locks) { 7589 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags); 7590 return 0; 7591 } 7592 do { 7593 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 7594 return 0; 7595 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 7596 switch (err) { 7597 default: 7598 goto out; 7599 case -NFS4ERR_GRACE: 7600 case -NFS4ERR_DELAY: 7601 nfs4_handle_exception(server, err, &exception); 7602 err = 0; 7603 } 7604 } while (exception.retry); 7605 out: 7606 return err; 7607 } 7608 7609 #if defined(CONFIG_NFS_V4_1) 7610 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 7611 { 7612 struct nfs4_lock_state *lsp; 7613 int status; 7614 7615 status = nfs4_set_lock_state(state, request); 7616 if (status != 0) 7617 return status; 7618 lsp = request->fl_u.nfs4_fl.owner; 7619 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) || 7620 test_bit(NFS_LOCK_LOST, &lsp->ls_flags)) 7621 return 0; 7622 return nfs4_lock_expired(state, request); 7623 } 7624 #endif 7625 7626 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7627 { 7628 struct nfs_inode *nfsi = NFS_I(state->inode); 7629 struct nfs4_state_owner *sp = state->owner; 7630 unsigned char flags = request->c.flc_flags; 7631 int status; 7632 7633 request->c.flc_flags |= FL_ACCESS; 7634 status = locks_lock_inode_wait(state->inode, request); 7635 if (status < 0) 7636 goto out; 7637 mutex_lock(&sp->so_delegreturn_mutex); 7638 down_read(&nfsi->rwsem); 7639 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 7640 /* Yes: cache locks! */ 7641 /* ...but avoid races with delegation recall... */ 7642 request->c.flc_flags = flags & ~FL_SLEEP; 7643 status = locks_lock_inode_wait(state->inode, request); 7644 up_read(&nfsi->rwsem); 7645 mutex_unlock(&sp->so_delegreturn_mutex); 7646 goto out; 7647 } 7648 up_read(&nfsi->rwsem); 7649 mutex_unlock(&sp->so_delegreturn_mutex); 7650 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 7651 out: 7652 request->c.flc_flags = flags; 7653 return status; 7654 } 7655 7656 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7657 { 7658 struct nfs4_exception exception = { 7659 .state = state, 7660 .inode = state->inode, 7661 .interruptible = true, 7662 }; 7663 int err; 7664 7665 do { 7666 err = _nfs4_proc_setlk(state, cmd, request); 7667 if (err == -NFS4ERR_DENIED) 7668 err = -EAGAIN; 7669 err = nfs4_handle_exception(NFS_SERVER(state->inode), 7670 err, &exception); 7671 } while (exception.retry); 7672 return err; 7673 } 7674 7675 #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 7676 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 7677 7678 static int 7679 nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd, 7680 struct file_lock *request) 7681 { 7682 int status = -ERESTARTSYS; 7683 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 7684 7685 while(!signalled()) { 7686 status = nfs4_proc_setlk(state, cmd, request); 7687 if ((status != -EAGAIN) || IS_SETLK(cmd)) 7688 break; 7689 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); 7690 schedule_timeout(timeout); 7691 timeout *= 2; 7692 timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout); 7693 status = -ERESTARTSYS; 7694 } 7695 return status; 7696 } 7697 7698 #ifdef CONFIG_NFS_V4_1 7699 struct nfs4_lock_waiter { 7700 struct inode *inode; 7701 struct nfs_lowner owner; 7702 wait_queue_entry_t wait; 7703 }; 7704 7705 static int 7706 nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key) 7707 { 7708 struct nfs4_lock_waiter *waiter = 7709 container_of(wait, struct nfs4_lock_waiter, wait); 7710 7711 /* NULL key means to wake up everyone */ 7712 if (key) { 7713 struct cb_notify_lock_args *cbnl = key; 7714 struct nfs_lowner *lowner = &cbnl->cbnl_owner, 7715 *wowner = &waiter->owner; 7716 7717 /* Only wake if the callback was for the same owner. */ 7718 if (lowner->id != wowner->id || lowner->s_dev != wowner->s_dev) 7719 return 0; 7720 7721 /* Make sure it's for the right inode */ 7722 if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh)) 7723 return 0; 7724 } 7725 7726 return woken_wake_function(wait, mode, flags, key); 7727 } 7728 7729 static int 7730 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7731 { 7732 struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner; 7733 struct nfs_server *server = NFS_SERVER(state->inode); 7734 struct nfs_client *clp = server->nfs_client; 7735 wait_queue_head_t *q = &clp->cl_lock_waitq; 7736 struct nfs4_lock_waiter waiter = { 7737 .inode = state->inode, 7738 .owner = { .clientid = clp->cl_clientid, 7739 .id = lsp->ls_seqid.owner_id, 7740 .s_dev = server->s_dev }, 7741 }; 7742 int status; 7743 7744 /* Don't bother with waitqueue if we don't expect a callback */ 7745 if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags)) 7746 return nfs4_retry_setlk_simple(state, cmd, request); 7747 7748 init_wait(&waiter.wait); 7749 waiter.wait.func = nfs4_wake_lock_waiter; 7750 add_wait_queue(q, &waiter.wait); 7751 7752 do { 7753 status = nfs4_proc_setlk(state, cmd, request); 7754 if (status != -EAGAIN || IS_SETLK(cmd)) 7755 break; 7756 7757 status = -ERESTARTSYS; 7758 wait_woken(&waiter.wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE, 7759 NFS4_LOCK_MAXTIMEOUT); 7760 } while (!signalled()); 7761 7762 remove_wait_queue(q, &waiter.wait); 7763 7764 return status; 7765 } 7766 #else /* !CONFIG_NFS_V4_1 */ 7767 static inline int 7768 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7769 { 7770 return nfs4_retry_setlk_simple(state, cmd, request); 7771 } 7772 #endif 7773 7774 static int 7775 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 7776 { 7777 struct nfs_open_context *ctx; 7778 struct nfs4_state *state; 7779 int status; 7780 7781 /* verify open state */ 7782 ctx = nfs_file_open_context(filp); 7783 state = ctx->state; 7784 7785 if (IS_GETLK(cmd)) { 7786 if (state != NULL) 7787 return nfs4_proc_getlk(state, F_GETLK, request); 7788 return 0; 7789 } 7790 7791 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 7792 return -EINVAL; 7793 7794 if (lock_is_unlock(request)) { 7795 if (state != NULL) 7796 return nfs4_proc_unlck(state, cmd, request); 7797 return 0; 7798 } 7799 7800 if (state == NULL) 7801 return -ENOLCK; 7802 7803 if ((request->c.flc_flags & FL_POSIX) && 7804 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 7805 return -ENOLCK; 7806 7807 /* 7808 * Don't rely on the VFS having checked the file open mode, 7809 * since it won't do this for flock() locks. 7810 */ 7811 switch (request->c.flc_type) { 7812 case F_RDLCK: 7813 if (!(filp->f_mode & FMODE_READ)) 7814 return -EBADF; 7815 break; 7816 case F_WRLCK: 7817 if (!(filp->f_mode & FMODE_WRITE)) 7818 return -EBADF; 7819 } 7820 7821 status = nfs4_set_lock_state(state, request); 7822 if (status != 0) 7823 return status; 7824 7825 return nfs4_retry_setlk(state, cmd, request); 7826 } 7827 7828 static int nfs4_delete_lease(struct file *file, void **priv) 7829 { 7830 return generic_setlease(file, F_UNLCK, NULL, priv); 7831 } 7832 7833 static int nfs4_add_lease(struct file *file, int arg, struct file_lease **lease, 7834 void **priv) 7835 { 7836 struct inode *inode = file_inode(file); 7837 fmode_t type = arg == F_RDLCK ? FMODE_READ : FMODE_WRITE; 7838 int ret; 7839 7840 /* No delegation, no lease */ 7841 if (!nfs4_have_delegation(inode, type, 0)) 7842 return -EAGAIN; 7843 ret = generic_setlease(file, arg, lease, priv); 7844 if (ret || nfs4_have_delegation(inode, type, 0)) 7845 return ret; 7846 /* We raced with a delegation return */ 7847 nfs4_delete_lease(file, priv); 7848 return -EAGAIN; 7849 } 7850 7851 int nfs4_proc_setlease(struct file *file, int arg, struct file_lease **lease, 7852 void **priv) 7853 { 7854 switch (arg) { 7855 case F_RDLCK: 7856 case F_WRLCK: 7857 return nfs4_add_lease(file, arg, lease, priv); 7858 case F_UNLCK: 7859 return nfs4_delete_lease(file, priv); 7860 default: 7861 return -EINVAL; 7862 } 7863 } 7864 7865 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid) 7866 { 7867 struct nfs_server *server = NFS_SERVER(state->inode); 7868 int err; 7869 7870 err = nfs4_set_lock_state(state, fl); 7871 if (err != 0) 7872 return err; 7873 do { 7874 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 7875 if (err != -NFS4ERR_DELAY) 7876 break; 7877 ssleep(1); 7878 } while (err == -NFS4ERR_DELAY); 7879 return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err); 7880 } 7881 7882 struct nfs_release_lockowner_data { 7883 struct nfs4_lock_state *lsp; 7884 struct nfs_server *server; 7885 struct nfs_release_lockowner_args args; 7886 struct nfs_release_lockowner_res res; 7887 unsigned long timestamp; 7888 }; 7889 7890 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata) 7891 { 7892 struct nfs_release_lockowner_data *data = calldata; 7893 struct nfs_server *server = data->server; 7894 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 7895 &data->res.seq_res, task); 7896 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 7897 data->timestamp = jiffies; 7898 } 7899 7900 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata) 7901 { 7902 struct nfs_release_lockowner_data *data = calldata; 7903 struct nfs_server *server = data->server; 7904 7905 nfs40_sequence_done(task, &data->res.seq_res); 7906 7907 switch (task->tk_status) { 7908 case 0: 7909 renew_lease(server, data->timestamp); 7910 break; 7911 case -NFS4ERR_STALE_CLIENTID: 7912 case -NFS4ERR_EXPIRED: 7913 nfs4_schedule_lease_recovery(server->nfs_client); 7914 break; 7915 case -NFS4ERR_LEASE_MOVED: 7916 case -NFS4ERR_DELAY: 7917 if (nfs4_async_handle_error(task, server, 7918 NULL, NULL) == -EAGAIN) 7919 rpc_restart_call_prepare(task); 7920 } 7921 } 7922 7923 static void nfs4_release_lockowner_release(void *calldata) 7924 { 7925 struct nfs_release_lockowner_data *data = calldata; 7926 nfs4_free_lock_state(data->server, data->lsp); 7927 kfree(calldata); 7928 } 7929 7930 static const struct rpc_call_ops nfs4_release_lockowner_ops = { 7931 .rpc_call_prepare = nfs4_release_lockowner_prepare, 7932 .rpc_call_done = nfs4_release_lockowner_done, 7933 .rpc_release = nfs4_release_lockowner_release, 7934 }; 7935 7936 static void 7937 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp) 7938 { 7939 struct nfs_release_lockowner_data *data; 7940 struct rpc_message msg = { 7941 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], 7942 }; 7943 7944 if (server->nfs_client->cl_mvops->minor_version != 0) 7945 return; 7946 7947 data = kmalloc(sizeof(*data), GFP_KERNEL); 7948 if (!data) 7949 return; 7950 data->lsp = lsp; 7951 data->server = server; 7952 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 7953 data->args.lock_owner.id = lsp->ls_seqid.owner_id; 7954 data->args.lock_owner.s_dev = server->s_dev; 7955 7956 msg.rpc_argp = &data->args; 7957 msg.rpc_resp = &data->res; 7958 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 7959 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 7960 } 7961 7962 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 7963 7964 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler, 7965 struct mnt_idmap *idmap, 7966 struct dentry *unused, struct inode *inode, 7967 const char *key, const void *buf, 7968 size_t buflen, int flags) 7969 { 7970 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_ACL); 7971 } 7972 7973 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler, 7974 struct dentry *unused, struct inode *inode, 7975 const char *key, void *buf, size_t buflen) 7976 { 7977 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_ACL); 7978 } 7979 7980 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry) 7981 { 7982 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_ACL); 7983 } 7984 7985 #if defined(CONFIG_NFS_V4_1) 7986 #define XATTR_NAME_NFSV4_DACL "system.nfs4_dacl" 7987 7988 static int nfs4_xattr_set_nfs4_dacl(const struct xattr_handler *handler, 7989 struct mnt_idmap *idmap, 7990 struct dentry *unused, struct inode *inode, 7991 const char *key, const void *buf, 7992 size_t buflen, int flags) 7993 { 7994 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_DACL); 7995 } 7996 7997 static int nfs4_xattr_get_nfs4_dacl(const struct xattr_handler *handler, 7998 struct dentry *unused, struct inode *inode, 7999 const char *key, void *buf, size_t buflen) 8000 { 8001 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_DACL); 8002 } 8003 8004 static bool nfs4_xattr_list_nfs4_dacl(struct dentry *dentry) 8005 { 8006 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_DACL); 8007 } 8008 8009 #define XATTR_NAME_NFSV4_SACL "system.nfs4_sacl" 8010 8011 static int nfs4_xattr_set_nfs4_sacl(const struct xattr_handler *handler, 8012 struct mnt_idmap *idmap, 8013 struct dentry *unused, struct inode *inode, 8014 const char *key, const void *buf, 8015 size_t buflen, int flags) 8016 { 8017 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_SACL); 8018 } 8019 8020 static int nfs4_xattr_get_nfs4_sacl(const struct xattr_handler *handler, 8021 struct dentry *unused, struct inode *inode, 8022 const char *key, void *buf, size_t buflen) 8023 { 8024 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_SACL); 8025 } 8026 8027 static bool nfs4_xattr_list_nfs4_sacl(struct dentry *dentry) 8028 { 8029 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_SACL); 8030 } 8031 8032 #endif 8033 8034 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 8035 8036 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler, 8037 struct mnt_idmap *idmap, 8038 struct dentry *unused, struct inode *inode, 8039 const char *key, const void *buf, 8040 size_t buflen, int flags) 8041 { 8042 if (security_ismaclabel(key)) 8043 return nfs4_set_security_label(inode, buf, buflen); 8044 8045 return -EOPNOTSUPP; 8046 } 8047 8048 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler, 8049 struct dentry *unused, struct inode *inode, 8050 const char *key, void *buf, size_t buflen) 8051 { 8052 if (security_ismaclabel(key)) 8053 return nfs4_get_security_label(inode, buf, buflen); 8054 return -EOPNOTSUPP; 8055 } 8056 8057 static ssize_t 8058 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 8059 { 8060 int len = 0; 8061 8062 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) { 8063 len = security_inode_listsecurity(inode, list, list_len); 8064 if (len >= 0 && list_len && len > list_len) 8065 return -ERANGE; 8066 } 8067 return len; 8068 } 8069 8070 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = { 8071 .prefix = XATTR_SECURITY_PREFIX, 8072 .get = nfs4_xattr_get_nfs4_label, 8073 .set = nfs4_xattr_set_nfs4_label, 8074 }; 8075 8076 #else 8077 8078 static ssize_t 8079 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 8080 { 8081 return 0; 8082 } 8083 8084 #endif 8085 8086 #ifdef CONFIG_NFS_V4_2 8087 static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler, 8088 struct mnt_idmap *idmap, 8089 struct dentry *unused, struct inode *inode, 8090 const char *key, const void *buf, 8091 size_t buflen, int flags) 8092 { 8093 u32 mask; 8094 int ret; 8095 8096 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8097 return -EOPNOTSUPP; 8098 8099 /* 8100 * There is no mapping from the MAY_* flags to the NFS_ACCESS_XA* 8101 * flags right now. Handling of xattr operations use the normal 8102 * file read/write permissions. 8103 * 8104 * Just in case the server has other ideas (which RFC 8276 allows), 8105 * do a cached access check for the XA* flags to possibly avoid 8106 * doing an RPC and getting EACCES back. 8107 */ 8108 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8109 if (!(mask & NFS_ACCESS_XAWRITE)) 8110 return -EACCES; 8111 } 8112 8113 if (buf == NULL) { 8114 ret = nfs42_proc_removexattr(inode, key); 8115 if (!ret) 8116 nfs4_xattr_cache_remove(inode, key); 8117 } else { 8118 ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags); 8119 if (!ret) 8120 nfs4_xattr_cache_add(inode, key, buf, NULL, buflen); 8121 } 8122 8123 return ret; 8124 } 8125 8126 static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler, 8127 struct dentry *unused, struct inode *inode, 8128 const char *key, void *buf, size_t buflen) 8129 { 8130 u32 mask; 8131 ssize_t ret; 8132 8133 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8134 return -EOPNOTSUPP; 8135 8136 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8137 if (!(mask & NFS_ACCESS_XAREAD)) 8138 return -EACCES; 8139 } 8140 8141 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 8142 if (ret) 8143 return ret; 8144 8145 ret = nfs4_xattr_cache_get(inode, key, buf, buflen); 8146 if (ret >= 0 || (ret < 0 && ret != -ENOENT)) 8147 return ret; 8148 8149 ret = nfs42_proc_getxattr(inode, key, buf, buflen); 8150 8151 return ret; 8152 } 8153 8154 static ssize_t 8155 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) 8156 { 8157 u64 cookie; 8158 bool eof; 8159 ssize_t ret, size; 8160 char *buf; 8161 size_t buflen; 8162 u32 mask; 8163 8164 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8165 return 0; 8166 8167 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8168 if (!(mask & NFS_ACCESS_XALIST)) 8169 return 0; 8170 } 8171 8172 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 8173 if (ret) 8174 return ret; 8175 8176 ret = nfs4_xattr_cache_list(inode, list, list_len); 8177 if (ret >= 0 || (ret < 0 && ret != -ENOENT)) 8178 return ret; 8179 8180 cookie = 0; 8181 eof = false; 8182 buflen = list_len ? list_len : XATTR_LIST_MAX; 8183 buf = list_len ? list : NULL; 8184 size = 0; 8185 8186 while (!eof) { 8187 ret = nfs42_proc_listxattrs(inode, buf, buflen, 8188 &cookie, &eof); 8189 if (ret < 0) 8190 return ret; 8191 8192 if (list_len) { 8193 buf += ret; 8194 buflen -= ret; 8195 } 8196 size += ret; 8197 } 8198 8199 if (list_len) 8200 nfs4_xattr_cache_set_list(inode, list, size); 8201 8202 return size; 8203 } 8204 8205 #else 8206 8207 static ssize_t 8208 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) 8209 { 8210 return 0; 8211 } 8212 #endif /* CONFIG_NFS_V4_2 */ 8213 8214 /* 8215 * nfs_fhget will use either the mounted_on_fileid or the fileid 8216 */ 8217 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 8218 { 8219 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 8220 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 8221 (fattr->valid & NFS_ATTR_FATTR_FSID) && 8222 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 8223 return; 8224 8225 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 8226 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 8227 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 8228 fattr->nlink = 2; 8229 } 8230 8231 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 8232 const struct qstr *name, 8233 struct nfs4_fs_locations *fs_locations, 8234 struct page *page) 8235 { 8236 struct nfs_server *server = NFS_SERVER(dir); 8237 u32 bitmask[3]; 8238 struct nfs4_fs_locations_arg args = { 8239 .dir_fh = NFS_FH(dir), 8240 .name = name, 8241 .page = page, 8242 .bitmask = bitmask, 8243 }; 8244 struct nfs4_fs_locations_res res = { 8245 .fs_locations = fs_locations, 8246 }; 8247 struct rpc_message msg = { 8248 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8249 .rpc_argp = &args, 8250 .rpc_resp = &res, 8251 }; 8252 int status; 8253 8254 dprintk("%s: start\n", __func__); 8255 8256 bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS; 8257 bitmask[1] = nfs4_fattr_bitmap[1]; 8258 8259 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 8260 * is not supported */ 8261 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 8262 bitmask[0] &= ~FATTR4_WORD0_FILEID; 8263 else 8264 bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID; 8265 8266 nfs_fattr_init(fs_locations->fattr); 8267 fs_locations->server = server; 8268 fs_locations->nlocations = 0; 8269 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 8270 dprintk("%s: returned status = %d\n", __func__, status); 8271 return status; 8272 } 8273 8274 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 8275 const struct qstr *name, 8276 struct nfs4_fs_locations *fs_locations, 8277 struct page *page) 8278 { 8279 struct nfs4_exception exception = { 8280 .interruptible = true, 8281 }; 8282 int err; 8283 do { 8284 err = _nfs4_proc_fs_locations(client, dir, name, 8285 fs_locations, page); 8286 trace_nfs4_get_fs_locations(dir, name, err); 8287 err = nfs4_handle_exception(NFS_SERVER(dir), err, 8288 &exception); 8289 } while (exception.retry); 8290 return err; 8291 } 8292 8293 /* 8294 * This operation also signals the server that this client is 8295 * performing migration recovery. The server can stop returning 8296 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is 8297 * appended to this compound to identify the client ID which is 8298 * performing recovery. 8299 */ 8300 static int _nfs40_proc_get_locations(struct nfs_server *server, 8301 struct nfs_fh *fhandle, 8302 struct nfs4_fs_locations *locations, 8303 struct page *page, const struct cred *cred) 8304 { 8305 struct rpc_clnt *clnt = server->client; 8306 u32 bitmask[2] = { 8307 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 8308 }; 8309 struct nfs4_fs_locations_arg args = { 8310 .clientid = server->nfs_client->cl_clientid, 8311 .fh = fhandle, 8312 .page = page, 8313 .bitmask = bitmask, 8314 .migration = 1, /* skip LOOKUP */ 8315 .renew = 1, /* append RENEW */ 8316 }; 8317 struct nfs4_fs_locations_res res = { 8318 .fs_locations = locations, 8319 .migration = 1, 8320 .renew = 1, 8321 }; 8322 struct rpc_message msg = { 8323 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8324 .rpc_argp = &args, 8325 .rpc_resp = &res, 8326 .rpc_cred = cred, 8327 }; 8328 unsigned long now = jiffies; 8329 int status; 8330 8331 nfs_fattr_init(locations->fattr); 8332 locations->server = server; 8333 locations->nlocations = 0; 8334 8335 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8336 status = nfs4_call_sync_sequence(clnt, server, &msg, 8337 &args.seq_args, &res.seq_res); 8338 if (status) 8339 return status; 8340 8341 renew_lease(server, now); 8342 return 0; 8343 } 8344 8345 #ifdef CONFIG_NFS_V4_1 8346 8347 /* 8348 * This operation also signals the server that this client is 8349 * performing migration recovery. The server can stop asserting 8350 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID 8351 * performing this operation is identified in the SEQUENCE 8352 * operation in this compound. 8353 * 8354 * When the client supports GETATTR(fs_locations_info), it can 8355 * be plumbed in here. 8356 */ 8357 static int _nfs41_proc_get_locations(struct nfs_server *server, 8358 struct nfs_fh *fhandle, 8359 struct nfs4_fs_locations *locations, 8360 struct page *page, const struct cred *cred) 8361 { 8362 struct rpc_clnt *clnt = server->client; 8363 u32 bitmask[2] = { 8364 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 8365 }; 8366 struct nfs4_fs_locations_arg args = { 8367 .fh = fhandle, 8368 .page = page, 8369 .bitmask = bitmask, 8370 .migration = 1, /* skip LOOKUP */ 8371 }; 8372 struct nfs4_fs_locations_res res = { 8373 .fs_locations = locations, 8374 .migration = 1, 8375 }; 8376 struct rpc_message msg = { 8377 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8378 .rpc_argp = &args, 8379 .rpc_resp = &res, 8380 .rpc_cred = cred, 8381 }; 8382 struct nfs4_call_sync_data data = { 8383 .seq_server = server, 8384 .seq_args = &args.seq_args, 8385 .seq_res = &res.seq_res, 8386 }; 8387 struct rpc_task_setup task_setup_data = { 8388 .rpc_client = clnt, 8389 .rpc_message = &msg, 8390 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops, 8391 .callback_data = &data, 8392 .flags = RPC_TASK_NO_ROUND_ROBIN, 8393 }; 8394 int status; 8395 8396 nfs_fattr_init(locations->fattr); 8397 locations->server = server; 8398 locations->nlocations = 0; 8399 8400 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8401 status = nfs4_call_sync_custom(&task_setup_data); 8402 if (status == NFS4_OK && 8403 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 8404 status = -NFS4ERR_LEASE_MOVED; 8405 return status; 8406 } 8407 8408 #endif /* CONFIG_NFS_V4_1 */ 8409 8410 /** 8411 * nfs4_proc_get_locations - discover locations for a migrated FSID 8412 * @server: pointer to nfs_server to process 8413 * @fhandle: pointer to the kernel NFS client file handle 8414 * @locations: result of query 8415 * @page: buffer 8416 * @cred: credential to use for this operation 8417 * 8418 * Returns NFS4_OK on success, a negative NFS4ERR status code if the 8419 * operation failed, or a negative errno if a local error occurred. 8420 * 8421 * On success, "locations" is filled in, but if the server has 8422 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not 8423 * asserted. 8424 * 8425 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases 8426 * from this client that require migration recovery. 8427 */ 8428 int nfs4_proc_get_locations(struct nfs_server *server, 8429 struct nfs_fh *fhandle, 8430 struct nfs4_fs_locations *locations, 8431 struct page *page, const struct cred *cred) 8432 { 8433 struct nfs_client *clp = server->nfs_client; 8434 const struct nfs4_mig_recovery_ops *ops = 8435 clp->cl_mvops->mig_recovery_ops; 8436 struct nfs4_exception exception = { 8437 .interruptible = true, 8438 }; 8439 int status; 8440 8441 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 8442 (unsigned long long)server->fsid.major, 8443 (unsigned long long)server->fsid.minor, 8444 clp->cl_hostname); 8445 nfs_display_fhandle(fhandle, __func__); 8446 8447 do { 8448 status = ops->get_locations(server, fhandle, locations, page, 8449 cred); 8450 if (status != -NFS4ERR_DELAY) 8451 break; 8452 nfs4_handle_exception(server, status, &exception); 8453 } while (exception.retry); 8454 return status; 8455 } 8456 8457 /* 8458 * This operation also signals the server that this client is 8459 * performing "lease moved" recovery. The server can stop 8460 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation 8461 * is appended to this compound to identify the client ID which is 8462 * performing recovery. 8463 */ 8464 static int _nfs40_proc_fsid_present(struct inode *inode, const struct cred *cred) 8465 { 8466 struct nfs_server *server = NFS_SERVER(inode); 8467 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 8468 struct rpc_clnt *clnt = server->client; 8469 struct nfs4_fsid_present_arg args = { 8470 .fh = NFS_FH(inode), 8471 .clientid = clp->cl_clientid, 8472 .renew = 1, /* append RENEW */ 8473 }; 8474 struct nfs4_fsid_present_res res = { 8475 .renew = 1, 8476 }; 8477 struct rpc_message msg = { 8478 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 8479 .rpc_argp = &args, 8480 .rpc_resp = &res, 8481 .rpc_cred = cred, 8482 }; 8483 unsigned long now = jiffies; 8484 int status; 8485 8486 res.fh = nfs_alloc_fhandle(); 8487 if (res.fh == NULL) 8488 return -ENOMEM; 8489 8490 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8491 status = nfs4_call_sync_sequence(clnt, server, &msg, 8492 &args.seq_args, &res.seq_res); 8493 nfs_free_fhandle(res.fh); 8494 if (status) 8495 return status; 8496 8497 do_renew_lease(clp, now); 8498 return 0; 8499 } 8500 8501 #ifdef CONFIG_NFS_V4_1 8502 8503 /* 8504 * This operation also signals the server that this client is 8505 * performing "lease moved" recovery. The server can stop asserting 8506 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing 8507 * this operation is identified in the SEQUENCE operation in this 8508 * compound. 8509 */ 8510 static int _nfs41_proc_fsid_present(struct inode *inode, const struct cred *cred) 8511 { 8512 struct nfs_server *server = NFS_SERVER(inode); 8513 struct rpc_clnt *clnt = server->client; 8514 struct nfs4_fsid_present_arg args = { 8515 .fh = NFS_FH(inode), 8516 }; 8517 struct nfs4_fsid_present_res res = { 8518 }; 8519 struct rpc_message msg = { 8520 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 8521 .rpc_argp = &args, 8522 .rpc_resp = &res, 8523 .rpc_cred = cred, 8524 }; 8525 int status; 8526 8527 res.fh = nfs_alloc_fhandle(); 8528 if (res.fh == NULL) 8529 return -ENOMEM; 8530 8531 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8532 status = nfs4_call_sync_sequence(clnt, server, &msg, 8533 &args.seq_args, &res.seq_res); 8534 nfs_free_fhandle(res.fh); 8535 if (status == NFS4_OK && 8536 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 8537 status = -NFS4ERR_LEASE_MOVED; 8538 return status; 8539 } 8540 8541 #endif /* CONFIG_NFS_V4_1 */ 8542 8543 /** 8544 * nfs4_proc_fsid_present - Is this FSID present or absent on server? 8545 * @inode: inode on FSID to check 8546 * @cred: credential to use for this operation 8547 * 8548 * Server indicates whether the FSID is present, moved, or not 8549 * recognized. This operation is necessary to clear a LEASE_MOVED 8550 * condition for this client ID. 8551 * 8552 * Returns NFS4_OK if the FSID is present on this server, 8553 * -NFS4ERR_MOVED if the FSID is no longer present, a negative 8554 * NFS4ERR code if some error occurred on the server, or a 8555 * negative errno if a local failure occurred. 8556 */ 8557 int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred) 8558 { 8559 struct nfs_server *server = NFS_SERVER(inode); 8560 struct nfs_client *clp = server->nfs_client; 8561 const struct nfs4_mig_recovery_ops *ops = 8562 clp->cl_mvops->mig_recovery_ops; 8563 struct nfs4_exception exception = { 8564 .interruptible = true, 8565 }; 8566 int status; 8567 8568 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 8569 (unsigned long long)server->fsid.major, 8570 (unsigned long long)server->fsid.minor, 8571 clp->cl_hostname); 8572 nfs_display_fhandle(NFS_FH(inode), __func__); 8573 8574 do { 8575 status = ops->fsid_present(inode, cred); 8576 if (status != -NFS4ERR_DELAY) 8577 break; 8578 nfs4_handle_exception(server, status, &exception); 8579 } while (exception.retry); 8580 return status; 8581 } 8582 8583 /* 8584 * If 'use_integrity' is true and the state managment nfs_client 8585 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient 8586 * and the machine credential as per RFC3530bis and RFC5661 Security 8587 * Considerations sections. Otherwise, just use the user cred with the 8588 * filesystem's rpc_client. 8589 */ 8590 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity) 8591 { 8592 int status; 8593 struct rpc_clnt *clnt = NFS_SERVER(dir)->client; 8594 struct nfs_client *clp = NFS_SERVER(dir)->nfs_client; 8595 struct nfs4_secinfo_arg args = { 8596 .dir_fh = NFS_FH(dir), 8597 .name = name, 8598 }; 8599 struct nfs4_secinfo_res res = { 8600 .flavors = flavors, 8601 }; 8602 struct rpc_message msg = { 8603 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 8604 .rpc_argp = &args, 8605 .rpc_resp = &res, 8606 }; 8607 struct nfs4_call_sync_data data = { 8608 .seq_server = NFS_SERVER(dir), 8609 .seq_args = &args.seq_args, 8610 .seq_res = &res.seq_res, 8611 }; 8612 struct rpc_task_setup task_setup = { 8613 .rpc_client = clnt, 8614 .rpc_message = &msg, 8615 .callback_ops = clp->cl_mvops->call_sync_ops, 8616 .callback_data = &data, 8617 .flags = RPC_TASK_NO_ROUND_ROBIN, 8618 }; 8619 const struct cred *cred = NULL; 8620 8621 if (use_integrity) { 8622 clnt = clp->cl_rpcclient; 8623 task_setup.rpc_client = clnt; 8624 8625 cred = nfs4_get_clid_cred(clp); 8626 msg.rpc_cred = cred; 8627 } 8628 8629 dprintk("NFS call secinfo %s\n", name->name); 8630 8631 nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg); 8632 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 8633 status = nfs4_call_sync_custom(&task_setup); 8634 8635 dprintk("NFS reply secinfo: %d\n", status); 8636 8637 put_cred(cred); 8638 return status; 8639 } 8640 8641 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 8642 struct nfs4_secinfo_flavors *flavors) 8643 { 8644 struct nfs4_exception exception = { 8645 .interruptible = true, 8646 }; 8647 int err; 8648 do { 8649 err = -NFS4ERR_WRONGSEC; 8650 8651 /* try to use integrity protection with machine cred */ 8652 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client)) 8653 err = _nfs4_proc_secinfo(dir, name, flavors, true); 8654 8655 /* 8656 * if unable to use integrity protection, or SECINFO with 8657 * integrity protection returns NFS4ERR_WRONGSEC (which is 8658 * disallowed by spec, but exists in deployed servers) use 8659 * the current filesystem's rpc_client and the user cred. 8660 */ 8661 if (err == -NFS4ERR_WRONGSEC) 8662 err = _nfs4_proc_secinfo(dir, name, flavors, false); 8663 8664 trace_nfs4_secinfo(dir, name, err); 8665 err = nfs4_handle_exception(NFS_SERVER(dir), err, 8666 &exception); 8667 } while (exception.retry); 8668 return err; 8669 } 8670 8671 #ifdef CONFIG_NFS_V4_1 8672 /* 8673 * Check the exchange flags returned by the server for invalid flags, having 8674 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 8675 * DS flags set. 8676 */ 8677 static int nfs4_check_cl_exchange_flags(u32 flags, u32 version) 8678 { 8679 if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R)) 8680 goto out_inval; 8681 else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R)) 8682 goto out_inval; 8683 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 8684 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 8685 goto out_inval; 8686 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 8687 goto out_inval; 8688 return NFS_OK; 8689 out_inval: 8690 return -NFS4ERR_INVAL; 8691 } 8692 8693 static bool 8694 nfs41_same_server_scope(struct nfs41_server_scope *a, 8695 struct nfs41_server_scope *b) 8696 { 8697 if (a->server_scope_sz != b->server_scope_sz) 8698 return false; 8699 return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0; 8700 } 8701 8702 static void 8703 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) 8704 { 8705 struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp; 8706 struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp; 8707 struct nfs_client *clp = args->client; 8708 8709 switch (task->tk_status) { 8710 case -NFS4ERR_BADSESSION: 8711 case -NFS4ERR_DEADSESSION: 8712 nfs4_schedule_session_recovery(clp->cl_session, 8713 task->tk_status); 8714 return; 8715 } 8716 if (args->dir == NFS4_CDFC4_FORE_OR_BOTH && 8717 res->dir != NFS4_CDFS4_BOTH) { 8718 rpc_task_close_connection(task); 8719 if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES) 8720 rpc_restart_call(task); 8721 } 8722 } 8723 8724 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { 8725 .rpc_call_done = nfs4_bind_one_conn_to_session_done, 8726 }; 8727 8728 /* 8729 * nfs4_proc_bind_one_conn_to_session() 8730 * 8731 * The 4.1 client currently uses the same TCP connection for the 8732 * fore and backchannel. 8733 */ 8734 static 8735 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt, 8736 struct rpc_xprt *xprt, 8737 struct nfs_client *clp, 8738 const struct cred *cred) 8739 { 8740 int status; 8741 struct nfs41_bind_conn_to_session_args args = { 8742 .client = clp, 8743 .dir = NFS4_CDFC4_FORE_OR_BOTH, 8744 .retries = 0, 8745 }; 8746 struct nfs41_bind_conn_to_session_res res; 8747 struct rpc_message msg = { 8748 .rpc_proc = 8749 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 8750 .rpc_argp = &args, 8751 .rpc_resp = &res, 8752 .rpc_cred = cred, 8753 }; 8754 struct rpc_task_setup task_setup_data = { 8755 .rpc_client = clnt, 8756 .rpc_xprt = xprt, 8757 .callback_ops = &nfs4_bind_one_conn_to_session_ops, 8758 .rpc_message = &msg, 8759 .flags = RPC_TASK_TIMEOUT, 8760 }; 8761 struct rpc_task *task; 8762 8763 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id); 8764 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) 8765 args.dir = NFS4_CDFC4_FORE; 8766 8767 /* Do not set the backchannel flag unless this is clnt->cl_xprt */ 8768 if (xprt != rcu_access_pointer(clnt->cl_xprt)) 8769 args.dir = NFS4_CDFC4_FORE; 8770 8771 task = rpc_run_task(&task_setup_data); 8772 if (!IS_ERR(task)) { 8773 status = task->tk_status; 8774 rpc_put_task(task); 8775 } else 8776 status = PTR_ERR(task); 8777 trace_nfs4_bind_conn_to_session(clp, status); 8778 if (status == 0) { 8779 if (memcmp(res.sessionid.data, 8780 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 8781 dprintk("NFS: %s: Session ID mismatch\n", __func__); 8782 return -EIO; 8783 } 8784 if ((res.dir & args.dir) != res.dir || res.dir == 0) { 8785 dprintk("NFS: %s: Unexpected direction from server\n", 8786 __func__); 8787 return -EIO; 8788 } 8789 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) { 8790 dprintk("NFS: %s: Server returned RDMA mode = true\n", 8791 __func__); 8792 return -EIO; 8793 } 8794 } 8795 8796 return status; 8797 } 8798 8799 struct rpc_bind_conn_calldata { 8800 struct nfs_client *clp; 8801 const struct cred *cred; 8802 }; 8803 8804 static int 8805 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt, 8806 struct rpc_xprt *xprt, 8807 void *calldata) 8808 { 8809 struct rpc_bind_conn_calldata *p = calldata; 8810 8811 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred); 8812 } 8813 8814 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, const struct cred *cred) 8815 { 8816 struct rpc_bind_conn_calldata data = { 8817 .clp = clp, 8818 .cred = cred, 8819 }; 8820 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient, 8821 nfs4_proc_bind_conn_to_session_callback, &data); 8822 } 8823 8824 /* 8825 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map 8826 * and operations we'd like to see to enable certain features in the allow map 8827 */ 8828 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = { 8829 .how = SP4_MACH_CRED, 8830 .enforce.u.words = { 8831 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 8832 1 << (OP_EXCHANGE_ID - 32) | 8833 1 << (OP_CREATE_SESSION - 32) | 8834 1 << (OP_DESTROY_SESSION - 32) | 8835 1 << (OP_DESTROY_CLIENTID - 32) 8836 }, 8837 .allow.u.words = { 8838 [0] = 1 << (OP_CLOSE) | 8839 1 << (OP_OPEN_DOWNGRADE) | 8840 1 << (OP_LOCKU) | 8841 1 << (OP_DELEGRETURN) | 8842 1 << (OP_COMMIT), 8843 [1] = 1 << (OP_SECINFO - 32) | 8844 1 << (OP_SECINFO_NO_NAME - 32) | 8845 1 << (OP_LAYOUTRETURN - 32) | 8846 1 << (OP_TEST_STATEID - 32) | 8847 1 << (OP_FREE_STATEID - 32) | 8848 1 << (OP_WRITE - 32) 8849 } 8850 }; 8851 8852 /* 8853 * Select the state protection mode for client `clp' given the server results 8854 * from exchange_id in `sp'. 8855 * 8856 * Returns 0 on success, negative errno otherwise. 8857 */ 8858 static int nfs4_sp4_select_mode(struct nfs_client *clp, 8859 struct nfs41_state_protection *sp) 8860 { 8861 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = { 8862 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 8863 1 << (OP_EXCHANGE_ID - 32) | 8864 1 << (OP_CREATE_SESSION - 32) | 8865 1 << (OP_DESTROY_SESSION - 32) | 8866 1 << (OP_DESTROY_CLIENTID - 32) 8867 }; 8868 unsigned long flags = 0; 8869 unsigned int i; 8870 int ret = 0; 8871 8872 if (sp->how == SP4_MACH_CRED) { 8873 /* Print state protect result */ 8874 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n"); 8875 for (i = 0; i <= LAST_NFS4_OP; i++) { 8876 if (test_bit(i, sp->enforce.u.longs)) 8877 dfprintk(MOUNT, " enforce op %d\n", i); 8878 if (test_bit(i, sp->allow.u.longs)) 8879 dfprintk(MOUNT, " allow op %d\n", i); 8880 } 8881 8882 /* make sure nothing is on enforce list that isn't supported */ 8883 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) { 8884 if (sp->enforce.u.words[i] & ~supported_enforce[i]) { 8885 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 8886 ret = -EINVAL; 8887 goto out; 8888 } 8889 } 8890 8891 /* 8892 * Minimal mode - state operations are allowed to use machine 8893 * credential. Note this already happens by default, so the 8894 * client doesn't have to do anything more than the negotiation. 8895 * 8896 * NOTE: we don't care if EXCHANGE_ID is in the list - 8897 * we're already using the machine cred for exchange_id 8898 * and will never use a different cred. 8899 */ 8900 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) && 8901 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) && 8902 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) && 8903 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) { 8904 dfprintk(MOUNT, "sp4_mach_cred:\n"); 8905 dfprintk(MOUNT, " minimal mode enabled\n"); 8906 __set_bit(NFS_SP4_MACH_CRED_MINIMAL, &flags); 8907 } else { 8908 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 8909 ret = -EINVAL; 8910 goto out; 8911 } 8912 8913 if (test_bit(OP_CLOSE, sp->allow.u.longs) && 8914 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) && 8915 test_bit(OP_DELEGRETURN, sp->allow.u.longs) && 8916 test_bit(OP_LOCKU, sp->allow.u.longs)) { 8917 dfprintk(MOUNT, " cleanup mode enabled\n"); 8918 __set_bit(NFS_SP4_MACH_CRED_CLEANUP, &flags); 8919 } 8920 8921 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) { 8922 dfprintk(MOUNT, " pnfs cleanup mode enabled\n"); 8923 __set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, &flags); 8924 } 8925 8926 if (test_bit(OP_SECINFO, sp->allow.u.longs) && 8927 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) { 8928 dfprintk(MOUNT, " secinfo mode enabled\n"); 8929 __set_bit(NFS_SP4_MACH_CRED_SECINFO, &flags); 8930 } 8931 8932 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) && 8933 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) { 8934 dfprintk(MOUNT, " stateid mode enabled\n"); 8935 __set_bit(NFS_SP4_MACH_CRED_STATEID, &flags); 8936 } 8937 8938 if (test_bit(OP_WRITE, sp->allow.u.longs)) { 8939 dfprintk(MOUNT, " write mode enabled\n"); 8940 __set_bit(NFS_SP4_MACH_CRED_WRITE, &flags); 8941 } 8942 8943 if (test_bit(OP_COMMIT, sp->allow.u.longs)) { 8944 dfprintk(MOUNT, " commit mode enabled\n"); 8945 __set_bit(NFS_SP4_MACH_CRED_COMMIT, &flags); 8946 } 8947 } 8948 out: 8949 clp->cl_sp4_flags = flags; 8950 return ret; 8951 } 8952 8953 struct nfs41_exchange_id_data { 8954 struct nfs41_exchange_id_res res; 8955 struct nfs41_exchange_id_args args; 8956 }; 8957 8958 static void nfs4_exchange_id_release(void *data) 8959 { 8960 struct nfs41_exchange_id_data *cdata = 8961 (struct nfs41_exchange_id_data *)data; 8962 8963 nfs_put_client(cdata->args.client); 8964 kfree(cdata->res.impl_id); 8965 kfree(cdata->res.server_scope); 8966 kfree(cdata->res.server_owner); 8967 kfree(cdata); 8968 } 8969 8970 static const struct rpc_call_ops nfs4_exchange_id_call_ops = { 8971 .rpc_release = nfs4_exchange_id_release, 8972 }; 8973 8974 /* 8975 * _nfs4_proc_exchange_id() 8976 * 8977 * Wrapper for EXCHANGE_ID operation. 8978 */ 8979 static struct rpc_task * 8980 nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred, 8981 u32 sp4_how, struct rpc_xprt *xprt) 8982 { 8983 struct rpc_message msg = { 8984 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 8985 .rpc_cred = cred, 8986 }; 8987 struct rpc_task_setup task_setup_data = { 8988 .rpc_client = clp->cl_rpcclient, 8989 .callback_ops = &nfs4_exchange_id_call_ops, 8990 .rpc_message = &msg, 8991 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN, 8992 }; 8993 struct nfs41_exchange_id_data *calldata; 8994 int status; 8995 8996 if (!refcount_inc_not_zero(&clp->cl_count)) 8997 return ERR_PTR(-EIO); 8998 8999 status = -ENOMEM; 9000 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 9001 if (!calldata) 9002 goto out; 9003 9004 nfs4_init_boot_verifier(clp, &calldata->args.verifier); 9005 9006 status = nfs4_init_uniform_client_string(clp); 9007 if (status) 9008 goto out_calldata; 9009 9010 calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 9011 GFP_NOFS); 9012 status = -ENOMEM; 9013 if (unlikely(calldata->res.server_owner == NULL)) 9014 goto out_calldata; 9015 9016 calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 9017 GFP_NOFS); 9018 if (unlikely(calldata->res.server_scope == NULL)) 9019 goto out_server_owner; 9020 9021 calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 9022 if (unlikely(calldata->res.impl_id == NULL)) 9023 goto out_server_scope; 9024 9025 switch (sp4_how) { 9026 case SP4_NONE: 9027 calldata->args.state_protect.how = SP4_NONE; 9028 break; 9029 9030 case SP4_MACH_CRED: 9031 calldata->args.state_protect = nfs4_sp4_mach_cred_request; 9032 break; 9033 9034 default: 9035 /* unsupported! */ 9036 WARN_ON_ONCE(1); 9037 status = -EINVAL; 9038 goto out_impl_id; 9039 } 9040 if (xprt) { 9041 task_setup_data.rpc_xprt = xprt; 9042 task_setup_data.flags |= RPC_TASK_SOFTCONN; 9043 memcpy(calldata->args.verifier.data, clp->cl_confirm.data, 9044 sizeof(calldata->args.verifier.data)); 9045 } 9046 calldata->args.client = clp; 9047 calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 9048 EXCHGID4_FLAG_BIND_PRINC_STATEID; 9049 #ifdef CONFIG_NFS_V4_1_MIGRATION 9050 calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR; 9051 #endif 9052 if (test_bit(NFS_CS_PNFS, &clp->cl_flags)) 9053 calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS; 9054 msg.rpc_argp = &calldata->args; 9055 msg.rpc_resp = &calldata->res; 9056 task_setup_data.callback_data = calldata; 9057 9058 return rpc_run_task(&task_setup_data); 9059 9060 out_impl_id: 9061 kfree(calldata->res.impl_id); 9062 out_server_scope: 9063 kfree(calldata->res.server_scope); 9064 out_server_owner: 9065 kfree(calldata->res.server_owner); 9066 out_calldata: 9067 kfree(calldata); 9068 out: 9069 nfs_put_client(clp); 9070 return ERR_PTR(status); 9071 } 9072 9073 /* 9074 * _nfs4_proc_exchange_id() 9075 * 9076 * Wrapper for EXCHANGE_ID operation. 9077 */ 9078 static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred, 9079 u32 sp4_how) 9080 { 9081 struct rpc_task *task; 9082 struct nfs41_exchange_id_args *argp; 9083 struct nfs41_exchange_id_res *resp; 9084 unsigned long now = jiffies; 9085 int status; 9086 9087 task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL); 9088 if (IS_ERR(task)) 9089 return PTR_ERR(task); 9090 9091 argp = task->tk_msg.rpc_argp; 9092 resp = task->tk_msg.rpc_resp; 9093 status = task->tk_status; 9094 if (status != 0) 9095 goto out; 9096 9097 status = nfs4_check_cl_exchange_flags(resp->flags, 9098 clp->cl_mvops->minor_version); 9099 if (status != 0) 9100 goto out; 9101 9102 status = nfs4_sp4_select_mode(clp, &resp->state_protect); 9103 if (status != 0) 9104 goto out; 9105 9106 do_renew_lease(clp, now); 9107 9108 clp->cl_clientid = resp->clientid; 9109 clp->cl_exchange_flags = resp->flags; 9110 clp->cl_seqid = resp->seqid; 9111 /* Client ID is not confirmed */ 9112 if (!(resp->flags & EXCHGID4_FLAG_CONFIRMED_R)) 9113 clear_bit(NFS4_SESSION_ESTABLISHED, 9114 &clp->cl_session->session_state); 9115 9116 if (clp->cl_serverscope != NULL && 9117 !nfs41_same_server_scope(clp->cl_serverscope, 9118 resp->server_scope)) { 9119 dprintk("%s: server_scope mismatch detected\n", 9120 __func__); 9121 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 9122 } 9123 9124 swap(clp->cl_serverowner, resp->server_owner); 9125 swap(clp->cl_serverscope, resp->server_scope); 9126 swap(clp->cl_implid, resp->impl_id); 9127 9128 /* Save the EXCHANGE_ID verifier session trunk tests */ 9129 memcpy(clp->cl_confirm.data, argp->verifier.data, 9130 sizeof(clp->cl_confirm.data)); 9131 out: 9132 trace_nfs4_exchange_id(clp, status); 9133 rpc_put_task(task); 9134 return status; 9135 } 9136 9137 /* 9138 * nfs4_proc_exchange_id() 9139 * 9140 * Returns zero, a negative errno, or a negative NFS4ERR status code. 9141 * 9142 * Since the clientid has expired, all compounds using sessions 9143 * associated with the stale clientid will be returning 9144 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 9145 * be in some phase of session reset. 9146 * 9147 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used. 9148 */ 9149 int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred) 9150 { 9151 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor; 9152 int status; 9153 9154 /* try SP4_MACH_CRED if krb5i/p */ 9155 if (authflavor == RPC_AUTH_GSS_KRB5I || 9156 authflavor == RPC_AUTH_GSS_KRB5P) { 9157 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED); 9158 if (!status) 9159 return 0; 9160 } 9161 9162 /* try SP4_NONE */ 9163 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE); 9164 } 9165 9166 /** 9167 * nfs4_test_session_trunk 9168 * 9169 * This is an add_xprt_test() test function called from 9170 * rpc_clnt_setup_test_and_add_xprt. 9171 * 9172 * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt 9173 * and is dereferrenced in nfs4_exchange_id_release 9174 * 9175 * Upon success, add the new transport to the rpc_clnt 9176 * 9177 * @clnt: struct rpc_clnt to get new transport 9178 * @xprt: the rpc_xprt to test 9179 * @data: call data for _nfs4_proc_exchange_id. 9180 */ 9181 void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt, 9182 void *data) 9183 { 9184 struct nfs4_add_xprt_data *adata = data; 9185 struct rpc_task *task; 9186 int status; 9187 9188 u32 sp4_how; 9189 9190 dprintk("--> %s try %s\n", __func__, 9191 xprt->address_strings[RPC_DISPLAY_ADDR]); 9192 9193 sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED); 9194 9195 try_again: 9196 /* Test connection for session trunking. Async exchange_id call */ 9197 task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt); 9198 if (IS_ERR(task)) 9199 return; 9200 9201 status = task->tk_status; 9202 if (status == 0) { 9203 status = nfs4_detect_session_trunking(adata->clp, 9204 task->tk_msg.rpc_resp, xprt); 9205 trace_nfs4_trunked_exchange_id(adata->clp, 9206 xprt->address_strings[RPC_DISPLAY_ADDR], status); 9207 } 9208 if (status == 0) 9209 rpc_clnt_xprt_switch_add_xprt(clnt, xprt); 9210 else if (status != -NFS4ERR_DELAY && rpc_clnt_xprt_switch_has_addr(clnt, 9211 (struct sockaddr *)&xprt->addr)) 9212 rpc_clnt_xprt_switch_remove_xprt(clnt, xprt); 9213 9214 rpc_put_task(task); 9215 if (status == -NFS4ERR_DELAY) { 9216 ssleep(1); 9217 goto try_again; 9218 } 9219 } 9220 EXPORT_SYMBOL_GPL(nfs4_test_session_trunk); 9221 9222 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 9223 const struct cred *cred) 9224 { 9225 struct rpc_message msg = { 9226 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 9227 .rpc_argp = clp, 9228 .rpc_cred = cred, 9229 }; 9230 int status; 9231 9232 status = rpc_call_sync(clp->cl_rpcclient, &msg, 9233 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9234 trace_nfs4_destroy_clientid(clp, status); 9235 if (status) 9236 dprintk("NFS: Got error %d from the server %s on " 9237 "DESTROY_CLIENTID.", status, clp->cl_hostname); 9238 return status; 9239 } 9240 9241 static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 9242 const struct cred *cred) 9243 { 9244 unsigned int loop; 9245 int ret; 9246 9247 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 9248 ret = _nfs4_proc_destroy_clientid(clp, cred); 9249 switch (ret) { 9250 case -NFS4ERR_DELAY: 9251 case -NFS4ERR_CLIENTID_BUSY: 9252 ssleep(1); 9253 break; 9254 default: 9255 return ret; 9256 } 9257 } 9258 return 0; 9259 } 9260 9261 int nfs4_destroy_clientid(struct nfs_client *clp) 9262 { 9263 const struct cred *cred; 9264 int ret = 0; 9265 9266 if (clp->cl_mvops->minor_version < 1) 9267 goto out; 9268 if (clp->cl_exchange_flags == 0) 9269 goto out; 9270 if (clp->cl_preserve_clid) 9271 goto out; 9272 cred = nfs4_get_clid_cred(clp); 9273 ret = nfs4_proc_destroy_clientid(clp, cred); 9274 put_cred(cred); 9275 switch (ret) { 9276 case 0: 9277 case -NFS4ERR_STALE_CLIENTID: 9278 clp->cl_exchange_flags = 0; 9279 } 9280 out: 9281 return ret; 9282 } 9283 9284 #endif /* CONFIG_NFS_V4_1 */ 9285 9286 struct nfs4_get_lease_time_data { 9287 struct nfs4_get_lease_time_args *args; 9288 struct nfs4_get_lease_time_res *res; 9289 struct nfs_client *clp; 9290 }; 9291 9292 static void nfs4_get_lease_time_prepare(struct rpc_task *task, 9293 void *calldata) 9294 { 9295 struct nfs4_get_lease_time_data *data = 9296 (struct nfs4_get_lease_time_data *)calldata; 9297 9298 /* just setup sequence, do not trigger session recovery 9299 since we're invoked within one */ 9300 nfs4_setup_sequence(data->clp, 9301 &data->args->la_seq_args, 9302 &data->res->lr_seq_res, 9303 task); 9304 } 9305 9306 /* 9307 * Called from nfs4_state_manager thread for session setup, so don't recover 9308 * from sequence operation or clientid errors. 9309 */ 9310 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 9311 { 9312 struct nfs4_get_lease_time_data *data = 9313 (struct nfs4_get_lease_time_data *)calldata; 9314 9315 if (!nfs4_sequence_done(task, &data->res->lr_seq_res)) 9316 return; 9317 switch (task->tk_status) { 9318 case -NFS4ERR_DELAY: 9319 case -NFS4ERR_GRACE: 9320 rpc_delay(task, NFS4_POLL_RETRY_MIN); 9321 task->tk_status = 0; 9322 fallthrough; 9323 case -NFS4ERR_RETRY_UNCACHED_REP: 9324 rpc_restart_call_prepare(task); 9325 return; 9326 } 9327 } 9328 9329 static const struct rpc_call_ops nfs4_get_lease_time_ops = { 9330 .rpc_call_prepare = nfs4_get_lease_time_prepare, 9331 .rpc_call_done = nfs4_get_lease_time_done, 9332 }; 9333 9334 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 9335 { 9336 struct nfs4_get_lease_time_args args; 9337 struct nfs4_get_lease_time_res res = { 9338 .lr_fsinfo = fsinfo, 9339 }; 9340 struct nfs4_get_lease_time_data data = { 9341 .args = &args, 9342 .res = &res, 9343 .clp = clp, 9344 }; 9345 struct rpc_message msg = { 9346 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 9347 .rpc_argp = &args, 9348 .rpc_resp = &res, 9349 }; 9350 struct rpc_task_setup task_setup = { 9351 .rpc_client = clp->cl_rpcclient, 9352 .rpc_message = &msg, 9353 .callback_ops = &nfs4_get_lease_time_ops, 9354 .callback_data = &data, 9355 .flags = RPC_TASK_TIMEOUT, 9356 }; 9357 9358 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0, 1); 9359 return nfs4_call_sync_custom(&task_setup); 9360 } 9361 9362 #ifdef CONFIG_NFS_V4_1 9363 9364 /* 9365 * Initialize the values to be used by the client in CREATE_SESSION 9366 * If nfs4_init_session set the fore channel request and response sizes, 9367 * use them. 9368 * 9369 * Set the back channel max_resp_sz_cached to zero to force the client to 9370 * always set csa_cachethis to FALSE because the current implementation 9371 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 9372 */ 9373 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args, 9374 struct rpc_clnt *clnt) 9375 { 9376 unsigned int max_rqst_sz, max_resp_sz; 9377 unsigned int max_bc_payload = rpc_max_bc_payload(clnt); 9378 unsigned int max_bc_slots = rpc_num_bc_slots(clnt); 9379 9380 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead; 9381 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead; 9382 9383 /* Fore channel attributes */ 9384 args->fc_attrs.max_rqst_sz = max_rqst_sz; 9385 args->fc_attrs.max_resp_sz = max_resp_sz; 9386 args->fc_attrs.max_ops = NFS4_MAX_OPS; 9387 args->fc_attrs.max_reqs = max_session_slots; 9388 9389 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 9390 "max_ops=%u max_reqs=%u\n", 9391 __func__, 9392 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 9393 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 9394 9395 /* Back channel attributes */ 9396 args->bc_attrs.max_rqst_sz = max_bc_payload; 9397 args->bc_attrs.max_resp_sz = max_bc_payload; 9398 args->bc_attrs.max_resp_sz_cached = 0; 9399 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 9400 args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1); 9401 if (args->bc_attrs.max_reqs > max_bc_slots) 9402 args->bc_attrs.max_reqs = max_bc_slots; 9403 9404 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 9405 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 9406 __func__, 9407 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 9408 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 9409 args->bc_attrs.max_reqs); 9410 } 9411 9412 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, 9413 struct nfs41_create_session_res *res) 9414 { 9415 struct nfs4_channel_attrs *sent = &args->fc_attrs; 9416 struct nfs4_channel_attrs *rcvd = &res->fc_attrs; 9417 9418 if (rcvd->max_resp_sz > sent->max_resp_sz) 9419 return -EINVAL; 9420 /* 9421 * Our requested max_ops is the minimum we need; we're not 9422 * prepared to break up compounds into smaller pieces than that. 9423 * So, no point even trying to continue if the server won't 9424 * cooperate: 9425 */ 9426 if (rcvd->max_ops < sent->max_ops) 9427 return -EINVAL; 9428 if (rcvd->max_reqs == 0) 9429 return -EINVAL; 9430 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 9431 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 9432 return 0; 9433 } 9434 9435 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, 9436 struct nfs41_create_session_res *res) 9437 { 9438 struct nfs4_channel_attrs *sent = &args->bc_attrs; 9439 struct nfs4_channel_attrs *rcvd = &res->bc_attrs; 9440 9441 if (!(res->flags & SESSION4_BACK_CHAN)) 9442 goto out; 9443 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 9444 return -EINVAL; 9445 if (rcvd->max_resp_sz < sent->max_resp_sz) 9446 return -EINVAL; 9447 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 9448 return -EINVAL; 9449 if (rcvd->max_ops > sent->max_ops) 9450 return -EINVAL; 9451 if (rcvd->max_reqs > sent->max_reqs) 9452 return -EINVAL; 9453 out: 9454 return 0; 9455 } 9456 9457 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 9458 struct nfs41_create_session_res *res) 9459 { 9460 int ret; 9461 9462 ret = nfs4_verify_fore_channel_attrs(args, res); 9463 if (ret) 9464 return ret; 9465 return nfs4_verify_back_channel_attrs(args, res); 9466 } 9467 9468 static void nfs4_update_session(struct nfs4_session *session, 9469 struct nfs41_create_session_res *res) 9470 { 9471 nfs4_copy_sessionid(&session->sess_id, &res->sessionid); 9472 /* Mark client id and session as being confirmed */ 9473 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 9474 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state); 9475 session->flags = res->flags; 9476 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); 9477 if (res->flags & SESSION4_BACK_CHAN) 9478 memcpy(&session->bc_attrs, &res->bc_attrs, 9479 sizeof(session->bc_attrs)); 9480 } 9481 9482 static int _nfs4_proc_create_session(struct nfs_client *clp, 9483 const struct cred *cred) 9484 { 9485 struct nfs4_session *session = clp->cl_session; 9486 struct nfs41_create_session_args args = { 9487 .client = clp, 9488 .clientid = clp->cl_clientid, 9489 .seqid = clp->cl_seqid, 9490 .cb_program = NFS4_CALLBACK, 9491 }; 9492 struct nfs41_create_session_res res; 9493 9494 struct rpc_message msg = { 9495 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 9496 .rpc_argp = &args, 9497 .rpc_resp = &res, 9498 .rpc_cred = cred, 9499 }; 9500 int status; 9501 9502 nfs4_init_channel_attrs(&args, clp->cl_rpcclient); 9503 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 9504 9505 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 9506 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9507 trace_nfs4_create_session(clp, status); 9508 9509 switch (status) { 9510 case -NFS4ERR_STALE_CLIENTID: 9511 case -NFS4ERR_DELAY: 9512 case -ETIMEDOUT: 9513 case -EACCES: 9514 case -EAGAIN: 9515 goto out; 9516 } 9517 9518 clp->cl_seqid++; 9519 if (!status) { 9520 /* Verify the session's negotiated channel_attrs values */ 9521 status = nfs4_verify_channel_attrs(&args, &res); 9522 /* Increment the clientid slot sequence id */ 9523 if (status) 9524 goto out; 9525 nfs4_update_session(session, &res); 9526 } 9527 out: 9528 return status; 9529 } 9530 9531 /* 9532 * Issues a CREATE_SESSION operation to the server. 9533 * It is the responsibility of the caller to verify the session is 9534 * expired before calling this routine. 9535 */ 9536 int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred) 9537 { 9538 int status; 9539 unsigned *ptr; 9540 struct nfs4_session *session = clp->cl_session; 9541 struct nfs4_add_xprt_data xprtdata = { 9542 .clp = clp, 9543 }; 9544 struct rpc_add_xprt_test rpcdata = { 9545 .add_xprt_test = clp->cl_mvops->session_trunk, 9546 .data = &xprtdata, 9547 }; 9548 9549 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 9550 9551 status = _nfs4_proc_create_session(clp, cred); 9552 if (status) 9553 goto out; 9554 9555 /* Init or reset the session slot tables */ 9556 status = nfs4_setup_session_slot_tables(session); 9557 dprintk("slot table setup returned %d\n", status); 9558 if (status) 9559 goto out; 9560 9561 ptr = (unsigned *)&session->sess_id.data[0]; 9562 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 9563 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 9564 rpc_clnt_probe_trunked_xprts(clp->cl_rpcclient, &rpcdata); 9565 out: 9566 return status; 9567 } 9568 9569 /* 9570 * Issue the over-the-wire RPC DESTROY_SESSION. 9571 * The caller must serialize access to this routine. 9572 */ 9573 int nfs4_proc_destroy_session(struct nfs4_session *session, 9574 const struct cred *cred) 9575 { 9576 struct rpc_message msg = { 9577 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 9578 .rpc_argp = session, 9579 .rpc_cred = cred, 9580 }; 9581 int status = 0; 9582 9583 /* session is still being setup */ 9584 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state)) 9585 return 0; 9586 9587 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 9588 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9589 trace_nfs4_destroy_session(session->clp, status); 9590 9591 if (status) 9592 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 9593 "Session has been destroyed regardless...\n", status); 9594 rpc_clnt_manage_trunked_xprts(session->clp->cl_rpcclient); 9595 return status; 9596 } 9597 9598 /* 9599 * Renew the cl_session lease. 9600 */ 9601 struct nfs4_sequence_data { 9602 struct nfs_client *clp; 9603 struct nfs4_sequence_args args; 9604 struct nfs4_sequence_res res; 9605 }; 9606 9607 static void nfs41_sequence_release(void *data) 9608 { 9609 struct nfs4_sequence_data *calldata = data; 9610 struct nfs_client *clp = calldata->clp; 9611 9612 if (refcount_read(&clp->cl_count) > 1) 9613 nfs4_schedule_state_renewal(clp); 9614 nfs_put_client(clp); 9615 kfree(calldata); 9616 } 9617 9618 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 9619 { 9620 switch(task->tk_status) { 9621 case -NFS4ERR_DELAY: 9622 rpc_delay(task, NFS4_POLL_RETRY_MAX); 9623 return -EAGAIN; 9624 default: 9625 nfs4_schedule_lease_recovery(clp); 9626 } 9627 return 0; 9628 } 9629 9630 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 9631 { 9632 struct nfs4_sequence_data *calldata = data; 9633 struct nfs_client *clp = calldata->clp; 9634 9635 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 9636 return; 9637 9638 trace_nfs4_sequence(clp, task->tk_status); 9639 if (task->tk_status < 0 && clp->cl_cons_state >= 0) { 9640 dprintk("%s ERROR %d\n", __func__, task->tk_status); 9641 if (refcount_read(&clp->cl_count) == 1) 9642 return; 9643 9644 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 9645 rpc_restart_call_prepare(task); 9646 return; 9647 } 9648 } 9649 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 9650 } 9651 9652 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 9653 { 9654 struct nfs4_sequence_data *calldata = data; 9655 struct nfs_client *clp = calldata->clp; 9656 struct nfs4_sequence_args *args; 9657 struct nfs4_sequence_res *res; 9658 9659 args = task->tk_msg.rpc_argp; 9660 res = task->tk_msg.rpc_resp; 9661 9662 nfs4_setup_sequence(clp, args, res, task); 9663 } 9664 9665 static const struct rpc_call_ops nfs41_sequence_ops = { 9666 .rpc_call_done = nfs41_sequence_call_done, 9667 .rpc_call_prepare = nfs41_sequence_prepare, 9668 .rpc_release = nfs41_sequence_release, 9669 }; 9670 9671 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 9672 const struct cred *cred, 9673 struct nfs4_slot *slot, 9674 bool is_privileged) 9675 { 9676 struct nfs4_sequence_data *calldata; 9677 struct rpc_message msg = { 9678 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 9679 .rpc_cred = cred, 9680 }; 9681 struct rpc_task_setup task_setup_data = { 9682 .rpc_client = clp->cl_rpcclient, 9683 .rpc_message = &msg, 9684 .callback_ops = &nfs41_sequence_ops, 9685 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE, 9686 }; 9687 struct rpc_task *ret; 9688 9689 ret = ERR_PTR(-EIO); 9690 if (!refcount_inc_not_zero(&clp->cl_count)) 9691 goto out_err; 9692 9693 ret = ERR_PTR(-ENOMEM); 9694 calldata = kzalloc(sizeof(*calldata), GFP_KERNEL); 9695 if (calldata == NULL) 9696 goto out_put_clp; 9697 nfs4_init_sequence(&calldata->args, &calldata->res, 0, is_privileged); 9698 nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot); 9699 msg.rpc_argp = &calldata->args; 9700 msg.rpc_resp = &calldata->res; 9701 calldata->clp = clp; 9702 task_setup_data.callback_data = calldata; 9703 9704 ret = rpc_run_task(&task_setup_data); 9705 if (IS_ERR(ret)) 9706 goto out_err; 9707 return ret; 9708 out_put_clp: 9709 nfs_put_client(clp); 9710 out_err: 9711 nfs41_release_slot(slot); 9712 return ret; 9713 } 9714 9715 static int nfs41_proc_async_sequence(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags) 9716 { 9717 struct rpc_task *task; 9718 int ret = 0; 9719 9720 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 9721 return -EAGAIN; 9722 task = _nfs41_proc_sequence(clp, cred, NULL, false); 9723 if (IS_ERR(task)) 9724 ret = PTR_ERR(task); 9725 else 9726 rpc_put_task_async(task); 9727 dprintk("<-- %s status=%d\n", __func__, ret); 9728 return ret; 9729 } 9730 9731 static int nfs4_proc_sequence(struct nfs_client *clp, const struct cred *cred) 9732 { 9733 struct rpc_task *task; 9734 int ret; 9735 9736 task = _nfs41_proc_sequence(clp, cred, NULL, true); 9737 if (IS_ERR(task)) { 9738 ret = PTR_ERR(task); 9739 goto out; 9740 } 9741 ret = rpc_wait_for_completion_task(task); 9742 if (!ret) 9743 ret = task->tk_status; 9744 rpc_put_task(task); 9745 out: 9746 dprintk("<-- %s status=%d\n", __func__, ret); 9747 return ret; 9748 } 9749 9750 struct nfs4_reclaim_complete_data { 9751 struct nfs_client *clp; 9752 struct nfs41_reclaim_complete_args arg; 9753 struct nfs41_reclaim_complete_res res; 9754 }; 9755 9756 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 9757 { 9758 struct nfs4_reclaim_complete_data *calldata = data; 9759 9760 nfs4_setup_sequence(calldata->clp, 9761 &calldata->arg.seq_args, 9762 &calldata->res.seq_res, 9763 task); 9764 } 9765 9766 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 9767 { 9768 switch(task->tk_status) { 9769 case 0: 9770 wake_up_all(&clp->cl_lock_waitq); 9771 fallthrough; 9772 case -NFS4ERR_COMPLETE_ALREADY: 9773 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 9774 break; 9775 case -NFS4ERR_DELAY: 9776 rpc_delay(task, NFS4_POLL_RETRY_MAX); 9777 fallthrough; 9778 case -NFS4ERR_RETRY_UNCACHED_REP: 9779 case -EACCES: 9780 dprintk("%s: failed to reclaim complete error %d for server %s, retrying\n", 9781 __func__, task->tk_status, clp->cl_hostname); 9782 return -EAGAIN; 9783 case -NFS4ERR_BADSESSION: 9784 case -NFS4ERR_DEADSESSION: 9785 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 9786 break; 9787 default: 9788 nfs4_schedule_lease_recovery(clp); 9789 } 9790 return 0; 9791 } 9792 9793 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 9794 { 9795 struct nfs4_reclaim_complete_data *calldata = data; 9796 struct nfs_client *clp = calldata->clp; 9797 struct nfs4_sequence_res *res = &calldata->res.seq_res; 9798 9799 if (!nfs41_sequence_done(task, res)) 9800 return; 9801 9802 trace_nfs4_reclaim_complete(clp, task->tk_status); 9803 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 9804 rpc_restart_call_prepare(task); 9805 return; 9806 } 9807 } 9808 9809 static void nfs4_free_reclaim_complete_data(void *data) 9810 { 9811 struct nfs4_reclaim_complete_data *calldata = data; 9812 9813 kfree(calldata); 9814 } 9815 9816 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 9817 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 9818 .rpc_call_done = nfs4_reclaim_complete_done, 9819 .rpc_release = nfs4_free_reclaim_complete_data, 9820 }; 9821 9822 /* 9823 * Issue a global reclaim complete. 9824 */ 9825 static int nfs41_proc_reclaim_complete(struct nfs_client *clp, 9826 const struct cred *cred) 9827 { 9828 struct nfs4_reclaim_complete_data *calldata; 9829 struct rpc_message msg = { 9830 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 9831 .rpc_cred = cred, 9832 }; 9833 struct rpc_task_setup task_setup_data = { 9834 .rpc_client = clp->cl_rpcclient, 9835 .rpc_message = &msg, 9836 .callback_ops = &nfs4_reclaim_complete_call_ops, 9837 .flags = RPC_TASK_NO_ROUND_ROBIN, 9838 }; 9839 int status = -ENOMEM; 9840 9841 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 9842 if (calldata == NULL) 9843 goto out; 9844 calldata->clp = clp; 9845 calldata->arg.one_fs = 0; 9846 9847 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0, 1); 9848 msg.rpc_argp = &calldata->arg; 9849 msg.rpc_resp = &calldata->res; 9850 task_setup_data.callback_data = calldata; 9851 status = nfs4_call_sync_custom(&task_setup_data); 9852 out: 9853 dprintk("<-- %s status=%d\n", __func__, status); 9854 return status; 9855 } 9856 9857 static void 9858 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 9859 { 9860 struct nfs4_layoutget *lgp = calldata; 9861 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 9862 9863 nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args, 9864 &lgp->res.seq_res, task); 9865 } 9866 9867 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 9868 { 9869 struct nfs4_layoutget *lgp = calldata; 9870 9871 nfs41_sequence_process(task, &lgp->res.seq_res); 9872 } 9873 9874 static int 9875 nfs4_layoutget_handle_exception(struct rpc_task *task, 9876 struct nfs4_layoutget *lgp, struct nfs4_exception *exception) 9877 { 9878 struct inode *inode = lgp->args.inode; 9879 struct nfs_server *server = NFS_SERVER(inode); 9880 struct pnfs_layout_hdr *lo = lgp->lo; 9881 int nfs4err = task->tk_status; 9882 int err, status = 0; 9883 LIST_HEAD(head); 9884 9885 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); 9886 9887 nfs4_sequence_free_slot(&lgp->res.seq_res); 9888 9889 exception->state = NULL; 9890 exception->stateid = NULL; 9891 9892 switch (nfs4err) { 9893 case 0: 9894 goto out; 9895 9896 /* 9897 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs 9898 * on the file. set tk_status to -ENODATA to tell upper layer to 9899 * retry go inband. 9900 */ 9901 case -NFS4ERR_LAYOUTUNAVAILABLE: 9902 status = -ENODATA; 9903 goto out; 9904 /* 9905 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of 9906 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3). 9907 */ 9908 case -NFS4ERR_BADLAYOUT: 9909 status = -EOVERFLOW; 9910 goto out; 9911 /* 9912 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client 9913 * (or clients) writing to the same RAID stripe except when 9914 * the minlength argument is 0 (see RFC5661 section 18.43.3). 9915 * 9916 * Treat it like we would RECALLCONFLICT -- we retry for a little 9917 * while, and then eventually give up. 9918 */ 9919 case -NFS4ERR_LAYOUTTRYLATER: 9920 if (lgp->args.minlength == 0) { 9921 status = -EOVERFLOW; 9922 goto out; 9923 } 9924 status = -EBUSY; 9925 break; 9926 case -NFS4ERR_RECALLCONFLICT: 9927 case -NFS4ERR_RETURNCONFLICT: 9928 status = -ERECALLCONFLICT; 9929 break; 9930 case -NFS4ERR_DELEG_REVOKED: 9931 case -NFS4ERR_ADMIN_REVOKED: 9932 case -NFS4ERR_EXPIRED: 9933 case -NFS4ERR_BAD_STATEID: 9934 exception->timeout = 0; 9935 spin_lock(&inode->i_lock); 9936 /* If the open stateid was bad, then recover it. */ 9937 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) || 9938 !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) { 9939 spin_unlock(&inode->i_lock); 9940 exception->state = lgp->args.ctx->state; 9941 exception->stateid = &lgp->args.stateid; 9942 break; 9943 } 9944 9945 /* 9946 * Mark the bad layout state as invalid, then retry 9947 */ 9948 pnfs_mark_layout_stateid_invalid(lo, &head); 9949 spin_unlock(&inode->i_lock); 9950 nfs_commit_inode(inode, 0); 9951 pnfs_free_lseg_list(&head); 9952 status = -EAGAIN; 9953 goto out; 9954 } 9955 9956 err = nfs4_handle_exception(server, nfs4err, exception); 9957 if (!status) { 9958 if (exception->retry) 9959 status = -EAGAIN; 9960 else 9961 status = err; 9962 } 9963 out: 9964 return status; 9965 } 9966 9967 size_t max_response_pages(struct nfs_server *server) 9968 { 9969 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 9970 return nfs_page_array_len(0, max_resp_sz); 9971 } 9972 9973 static void nfs4_layoutget_release(void *calldata) 9974 { 9975 struct nfs4_layoutget *lgp = calldata; 9976 9977 nfs4_sequence_free_slot(&lgp->res.seq_res); 9978 pnfs_layoutget_free(lgp); 9979 } 9980 9981 static const struct rpc_call_ops nfs4_layoutget_call_ops = { 9982 .rpc_call_prepare = nfs4_layoutget_prepare, 9983 .rpc_call_done = nfs4_layoutget_done, 9984 .rpc_release = nfs4_layoutget_release, 9985 }; 9986 9987 struct pnfs_layout_segment * 9988 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, 9989 struct nfs4_exception *exception) 9990 { 9991 struct inode *inode = lgp->args.inode; 9992 struct nfs_server *server = NFS_SERVER(inode); 9993 struct rpc_task *task; 9994 struct rpc_message msg = { 9995 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 9996 .rpc_argp = &lgp->args, 9997 .rpc_resp = &lgp->res, 9998 .rpc_cred = lgp->cred, 9999 }; 10000 struct rpc_task_setup task_setup_data = { 10001 .rpc_client = server->client, 10002 .rpc_message = &msg, 10003 .callback_ops = &nfs4_layoutget_call_ops, 10004 .callback_data = lgp, 10005 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF | 10006 RPC_TASK_MOVEABLE, 10007 }; 10008 struct pnfs_layout_segment *lseg = NULL; 10009 int status = 0; 10010 10011 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0); 10012 exception->retry = 0; 10013 10014 task = rpc_run_task(&task_setup_data); 10015 if (IS_ERR(task)) 10016 return ERR_CAST(task); 10017 10018 status = rpc_wait_for_completion_task(task); 10019 if (status != 0) 10020 goto out; 10021 10022 if (task->tk_status < 0) { 10023 exception->retry = 1; 10024 status = nfs4_layoutget_handle_exception(task, lgp, exception); 10025 } else if (lgp->res.layoutp->len == 0) { 10026 exception->retry = 1; 10027 status = -EAGAIN; 10028 nfs4_update_delay(&exception->timeout); 10029 } else 10030 lseg = pnfs_layout_process(lgp); 10031 out: 10032 trace_nfs4_layoutget(lgp->args.ctx, 10033 &lgp->args.range, 10034 &lgp->res.range, 10035 &lgp->res.stateid, 10036 status); 10037 10038 rpc_put_task(task); 10039 dprintk("<-- %s status=%d\n", __func__, status); 10040 if (status) 10041 return ERR_PTR(status); 10042 return lseg; 10043 } 10044 10045 static void 10046 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 10047 { 10048 struct nfs4_layoutreturn *lrp = calldata; 10049 10050 nfs4_setup_sequence(lrp->clp, 10051 &lrp->args.seq_args, 10052 &lrp->res.seq_res, 10053 task); 10054 if (!pnfs_layout_is_valid(lrp->args.layout)) 10055 rpc_exit(task, 0); 10056 } 10057 10058 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 10059 { 10060 struct nfs4_layoutreturn *lrp = calldata; 10061 struct nfs_server *server; 10062 10063 if (!nfs41_sequence_process(task, &lrp->res.seq_res)) 10064 return; 10065 10066 if (task->tk_rpc_status == -ETIMEDOUT) { 10067 lrp->rpc_status = -EAGAIN; 10068 lrp->res.lrs_present = 0; 10069 return; 10070 } 10071 /* 10072 * Was there an RPC level error? Assume the call succeeded, 10073 * and that we need to release the layout 10074 */ 10075 if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) { 10076 lrp->res.lrs_present = 0; 10077 return; 10078 } 10079 10080 server = NFS_SERVER(lrp->args.inode); 10081 switch (task->tk_status) { 10082 case -NFS4ERR_OLD_STATEID: 10083 if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid, 10084 &lrp->args.range, 10085 lrp->args.inode)) 10086 goto out_restart; 10087 fallthrough; 10088 default: 10089 task->tk_status = 0; 10090 lrp->res.lrs_present = 0; 10091 fallthrough; 10092 case 0: 10093 break; 10094 case -NFS4ERR_BADSESSION: 10095 case -NFS4ERR_DEADSESSION: 10096 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 10097 nfs4_schedule_session_recovery(server->nfs_client->cl_session, 10098 task->tk_status); 10099 lrp->res.lrs_present = 0; 10100 lrp->rpc_status = -EAGAIN; 10101 task->tk_status = 0; 10102 break; 10103 case -NFS4ERR_DELAY: 10104 if (nfs4_async_handle_error(task, server, NULL, NULL) == 10105 -EAGAIN) 10106 goto out_restart; 10107 lrp->res.lrs_present = 0; 10108 break; 10109 } 10110 return; 10111 out_restart: 10112 task->tk_status = 0; 10113 nfs4_sequence_free_slot(&lrp->res.seq_res); 10114 rpc_restart_call_prepare(task); 10115 } 10116 10117 static void nfs4_layoutreturn_release(void *calldata) 10118 { 10119 struct nfs4_layoutreturn *lrp = calldata; 10120 struct pnfs_layout_hdr *lo = lrp->args.layout; 10121 10122 if (lrp->rpc_status == 0 || !lrp->inode) 10123 pnfs_layoutreturn_free_lsegs( 10124 lo, &lrp->args.stateid, &lrp->args.range, 10125 lrp->res.lrs_present ? &lrp->res.stateid : NULL); 10126 else 10127 pnfs_layoutreturn_retry_later(lo, &lrp->args.stateid, 10128 &lrp->args.range); 10129 nfs4_sequence_free_slot(&lrp->res.seq_res); 10130 if (lrp->ld_private.ops && lrp->ld_private.ops->free) 10131 lrp->ld_private.ops->free(&lrp->ld_private); 10132 pnfs_put_layout_hdr(lrp->args.layout); 10133 nfs_iput_and_deactive(lrp->inode); 10134 put_cred(lrp->cred); 10135 kfree(calldata); 10136 } 10137 10138 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 10139 .rpc_call_prepare = nfs4_layoutreturn_prepare, 10140 .rpc_call_done = nfs4_layoutreturn_done, 10141 .rpc_release = nfs4_layoutreturn_release, 10142 }; 10143 10144 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, unsigned int flags) 10145 { 10146 struct rpc_task *task; 10147 struct rpc_message msg = { 10148 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 10149 .rpc_argp = &lrp->args, 10150 .rpc_resp = &lrp->res, 10151 .rpc_cred = lrp->cred, 10152 }; 10153 struct rpc_task_setup task_setup_data = { 10154 .rpc_client = NFS_SERVER(lrp->args.inode)->client, 10155 .rpc_message = &msg, 10156 .callback_ops = &nfs4_layoutreturn_call_ops, 10157 .callback_data = lrp, 10158 .flags = RPC_TASK_MOVEABLE, 10159 }; 10160 int status = 0; 10161 10162 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client, 10163 NFS_SP4_MACH_CRED_PNFS_CLEANUP, 10164 &task_setup_data.rpc_client, &msg); 10165 10166 lrp->inode = nfs_igrab_and_active(lrp->args.inode); 10167 if (flags & PNFS_FL_LAYOUTRETURN_ASYNC) { 10168 if (!lrp->inode) { 10169 nfs4_layoutreturn_release(lrp); 10170 return -EAGAIN; 10171 } 10172 task_setup_data.flags |= RPC_TASK_ASYNC; 10173 } 10174 if (!lrp->inode) 10175 flags |= PNFS_FL_LAYOUTRETURN_PRIVILEGED; 10176 if (flags & PNFS_FL_LAYOUTRETURN_PRIVILEGED) 10177 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 10178 1); 10179 else 10180 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 10181 0); 10182 task = rpc_run_task(&task_setup_data); 10183 if (IS_ERR(task)) 10184 return PTR_ERR(task); 10185 if (!(flags & PNFS_FL_LAYOUTRETURN_ASYNC)) 10186 status = task->tk_status; 10187 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status); 10188 dprintk("<-- %s status=%d\n", __func__, status); 10189 rpc_put_task(task); 10190 return status; 10191 } 10192 10193 static int 10194 _nfs4_proc_getdeviceinfo(struct nfs_server *server, 10195 struct pnfs_device *pdev, 10196 const struct cred *cred) 10197 { 10198 struct nfs4_getdeviceinfo_args args = { 10199 .pdev = pdev, 10200 .notify_types = NOTIFY_DEVICEID4_CHANGE | 10201 NOTIFY_DEVICEID4_DELETE, 10202 }; 10203 struct nfs4_getdeviceinfo_res res = { 10204 .pdev = pdev, 10205 }; 10206 struct rpc_message msg = { 10207 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 10208 .rpc_argp = &args, 10209 .rpc_resp = &res, 10210 .rpc_cred = cred, 10211 }; 10212 int status; 10213 10214 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 10215 if (res.notification & ~args.notify_types) 10216 dprintk("%s: unsupported notification\n", __func__); 10217 if (res.notification != args.notify_types) 10218 pdev->nocache = 1; 10219 10220 trace_nfs4_getdeviceinfo(server, &pdev->dev_id, status); 10221 10222 dprintk("<-- %s status=%d\n", __func__, status); 10223 10224 return status; 10225 } 10226 10227 int nfs4_proc_getdeviceinfo(struct nfs_server *server, 10228 struct pnfs_device *pdev, 10229 const struct cred *cred) 10230 { 10231 struct nfs4_exception exception = { }; 10232 int err; 10233 10234 do { 10235 err = nfs4_handle_exception(server, 10236 _nfs4_proc_getdeviceinfo(server, pdev, cred), 10237 &exception); 10238 } while (exception.retry); 10239 return err; 10240 } 10241 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 10242 10243 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 10244 { 10245 struct nfs4_layoutcommit_data *data = calldata; 10246 struct nfs_server *server = NFS_SERVER(data->args.inode); 10247 10248 nfs4_setup_sequence(server->nfs_client, 10249 &data->args.seq_args, 10250 &data->res.seq_res, 10251 task); 10252 } 10253 10254 static void 10255 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 10256 { 10257 struct nfs4_layoutcommit_data *data = calldata; 10258 struct nfs_server *server = NFS_SERVER(data->args.inode); 10259 10260 if (!nfs41_sequence_done(task, &data->res.seq_res)) 10261 return; 10262 10263 switch (task->tk_status) { /* Just ignore these failures */ 10264 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 10265 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 10266 case -NFS4ERR_BADLAYOUT: /* no layout */ 10267 case -NFS4ERR_GRACE: /* loca_recalim always false */ 10268 task->tk_status = 0; 10269 break; 10270 case 0: 10271 break; 10272 default: 10273 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) { 10274 rpc_restart_call_prepare(task); 10275 return; 10276 } 10277 } 10278 } 10279 10280 static void nfs4_layoutcommit_release(void *calldata) 10281 { 10282 struct nfs4_layoutcommit_data *data = calldata; 10283 10284 pnfs_cleanup_layoutcommit(data); 10285 nfs_post_op_update_inode_force_wcc(data->args.inode, 10286 data->res.fattr); 10287 put_cred(data->cred); 10288 nfs_iput_and_deactive(data->inode); 10289 kfree(data); 10290 } 10291 10292 static const struct rpc_call_ops nfs4_layoutcommit_ops = { 10293 .rpc_call_prepare = nfs4_layoutcommit_prepare, 10294 .rpc_call_done = nfs4_layoutcommit_done, 10295 .rpc_release = nfs4_layoutcommit_release, 10296 }; 10297 10298 int 10299 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 10300 { 10301 struct rpc_message msg = { 10302 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 10303 .rpc_argp = &data->args, 10304 .rpc_resp = &data->res, 10305 .rpc_cred = data->cred, 10306 }; 10307 struct rpc_task_setup task_setup_data = { 10308 .task = &data->task, 10309 .rpc_client = NFS_CLIENT(data->args.inode), 10310 .rpc_message = &msg, 10311 .callback_ops = &nfs4_layoutcommit_ops, 10312 .callback_data = data, 10313 .flags = RPC_TASK_MOVEABLE, 10314 }; 10315 struct rpc_task *task; 10316 int status = 0; 10317 10318 dprintk("NFS: initiating layoutcommit call. sync %d " 10319 "lbw: %llu inode %lu\n", sync, 10320 data->args.lastbytewritten, 10321 data->args.inode->i_ino); 10322 10323 if (!sync) { 10324 data->inode = nfs_igrab_and_active(data->args.inode); 10325 if (data->inode == NULL) { 10326 nfs4_layoutcommit_release(data); 10327 return -EAGAIN; 10328 } 10329 task_setup_data.flags = RPC_TASK_ASYNC; 10330 } 10331 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0); 10332 task = rpc_run_task(&task_setup_data); 10333 if (IS_ERR(task)) 10334 return PTR_ERR(task); 10335 if (sync) 10336 status = task->tk_status; 10337 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status); 10338 dprintk("%s: status %d\n", __func__, status); 10339 rpc_put_task(task); 10340 return status; 10341 } 10342 10343 /* 10344 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if 10345 * possible) as per RFC3530bis and RFC5661 Security Considerations sections 10346 */ 10347 static int _nfs41_proc_secinfo_no_name(struct nfs_server *server, 10348 struct nfs_fh *fhandle, 10349 struct nfs4_secinfo_flavors *flavors, 10350 bool use_integrity) 10351 { 10352 struct nfs41_secinfo_no_name_args args = { 10353 .style = SECINFO_STYLE_CURRENT_FH, 10354 }; 10355 struct nfs4_secinfo_res res = { 10356 .flavors = flavors, 10357 }; 10358 struct rpc_message msg = { 10359 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 10360 .rpc_argp = &args, 10361 .rpc_resp = &res, 10362 }; 10363 struct nfs4_call_sync_data data = { 10364 .seq_server = server, 10365 .seq_args = &args.seq_args, 10366 .seq_res = &res.seq_res, 10367 }; 10368 struct rpc_task_setup task_setup = { 10369 .rpc_client = server->client, 10370 .rpc_message = &msg, 10371 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops, 10372 .callback_data = &data, 10373 .flags = RPC_TASK_NO_ROUND_ROBIN, 10374 }; 10375 const struct cred *cred = NULL; 10376 int status; 10377 10378 if (use_integrity) { 10379 task_setup.rpc_client = server->nfs_client->cl_rpcclient; 10380 10381 cred = nfs4_get_clid_cred(server->nfs_client); 10382 msg.rpc_cred = cred; 10383 } 10384 10385 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 10386 status = nfs4_call_sync_custom(&task_setup); 10387 dprintk("<-- %s status=%d\n", __func__, status); 10388 10389 put_cred(cred); 10390 10391 return status; 10392 } 10393 10394 static int nfs41_proc_secinfo_no_name(struct nfs_server *server, 10395 struct nfs_fh *fhandle, 10396 struct nfs4_secinfo_flavors *flavors) 10397 { 10398 struct nfs4_exception exception = { 10399 .interruptible = true, 10400 }; 10401 int err; 10402 do { 10403 /* first try using integrity protection */ 10404 err = -NFS4ERR_WRONGSEC; 10405 10406 /* try to use integrity protection with machine cred */ 10407 if (_nfs4_is_integrity_protected(server->nfs_client)) 10408 err = _nfs41_proc_secinfo_no_name(server, fhandle, 10409 flavors, true); 10410 10411 /* 10412 * if unable to use integrity protection, or SECINFO with 10413 * integrity protection returns NFS4ERR_WRONGSEC (which is 10414 * disallowed by spec, but exists in deployed servers) use 10415 * the current filesystem's rpc_client and the user cred. 10416 */ 10417 if (err == -NFS4ERR_WRONGSEC) 10418 err = _nfs41_proc_secinfo_no_name(server, fhandle, 10419 flavors, false); 10420 10421 switch (err) { 10422 case 0: 10423 case -NFS4ERR_WRONGSEC: 10424 case -ENOTSUPP: 10425 goto out; 10426 default: 10427 err = nfs4_handle_exception(server, err, &exception); 10428 } 10429 } while (exception.retry); 10430 out: 10431 return err; 10432 } 10433 10434 static int nfs41_find_root_sec(struct nfs_server *server, 10435 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 10436 { 10437 int err; 10438 struct page *page; 10439 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR; 10440 struct nfs4_secinfo_flavors *flavors; 10441 struct nfs4_secinfo4 *secinfo; 10442 int i; 10443 10444 page = alloc_page(GFP_KERNEL); 10445 if (!page) { 10446 err = -ENOMEM; 10447 goto out; 10448 } 10449 10450 flavors = page_address(page); 10451 err = nfs41_proc_secinfo_no_name(server, fhandle, flavors); 10452 10453 /* 10454 * Fall back on "guess and check" method if 10455 * the server doesn't support SECINFO_NO_NAME 10456 */ 10457 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) { 10458 err = nfs4_find_root_sec(server, fhandle, fattr); 10459 goto out_freepage; 10460 } 10461 if (err) 10462 goto out_freepage; 10463 10464 for (i = 0; i < flavors->num_flavors; i++) { 10465 secinfo = &flavors->flavors[i]; 10466 10467 switch (secinfo->flavor) { 10468 case RPC_AUTH_NULL: 10469 case RPC_AUTH_UNIX: 10470 case RPC_AUTH_GSS: 10471 flavor = rpcauth_get_pseudoflavor(secinfo->flavor, 10472 &secinfo->flavor_info); 10473 break; 10474 default: 10475 flavor = RPC_AUTH_MAXFLAVOR; 10476 break; 10477 } 10478 10479 if (!nfs_auth_info_match(&server->auth_info, flavor)) 10480 flavor = RPC_AUTH_MAXFLAVOR; 10481 10482 if (flavor != RPC_AUTH_MAXFLAVOR) { 10483 err = nfs4_lookup_root_sec(server, fhandle, fattr, 10484 flavor); 10485 if (!err) 10486 break; 10487 } 10488 } 10489 10490 if (flavor == RPC_AUTH_MAXFLAVOR) 10491 err = -EPERM; 10492 10493 out_freepage: 10494 put_page(page); 10495 if (err == -EACCES) 10496 return -EPERM; 10497 out: 10498 return err; 10499 } 10500 10501 static int _nfs41_test_stateid(struct nfs_server *server, 10502 const nfs4_stateid *stateid, 10503 const struct cred *cred) 10504 { 10505 int status; 10506 struct nfs41_test_stateid_args args = { 10507 .stateid = *stateid, 10508 }; 10509 struct nfs41_test_stateid_res res; 10510 struct rpc_message msg = { 10511 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 10512 .rpc_argp = &args, 10513 .rpc_resp = &res, 10514 .rpc_cred = cred, 10515 }; 10516 struct rpc_clnt *rpc_client = server->client; 10517 10518 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 10519 &rpc_client, &msg); 10520 10521 dprintk("NFS call test_stateid %p\n", stateid); 10522 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 10523 status = nfs4_call_sync_sequence(rpc_client, server, &msg, 10524 &args.seq_args, &res.seq_res); 10525 if (status != NFS_OK) { 10526 dprintk("NFS reply test_stateid: failed, %d\n", status); 10527 return status; 10528 } 10529 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 10530 return -res.status; 10531 } 10532 10533 static void nfs4_handle_delay_or_session_error(struct nfs_server *server, 10534 int err, struct nfs4_exception *exception) 10535 { 10536 exception->retry = 0; 10537 switch(err) { 10538 case -NFS4ERR_DELAY: 10539 case -NFS4ERR_RETRY_UNCACHED_REP: 10540 nfs4_handle_exception(server, err, exception); 10541 break; 10542 case -NFS4ERR_BADSESSION: 10543 case -NFS4ERR_BADSLOT: 10544 case -NFS4ERR_BAD_HIGH_SLOT: 10545 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 10546 case -NFS4ERR_DEADSESSION: 10547 nfs4_do_handle_exception(server, err, exception); 10548 } 10549 } 10550 10551 /** 10552 * nfs41_test_stateid - perform a TEST_STATEID operation 10553 * 10554 * @server: server / transport on which to perform the operation 10555 * @stateid: state ID to test 10556 * @cred: credential 10557 * 10558 * Returns NFS_OK if the server recognizes that "stateid" is valid. 10559 * Otherwise a negative NFS4ERR value is returned if the operation 10560 * failed or the state ID is not currently valid. 10561 */ 10562 static int nfs41_test_stateid(struct nfs_server *server, 10563 const nfs4_stateid *stateid, 10564 const struct cred *cred) 10565 { 10566 struct nfs4_exception exception = { 10567 .interruptible = true, 10568 }; 10569 int err; 10570 do { 10571 err = _nfs41_test_stateid(server, stateid, cred); 10572 nfs4_handle_delay_or_session_error(server, err, &exception); 10573 } while (exception.retry); 10574 return err; 10575 } 10576 10577 struct nfs_free_stateid_data { 10578 struct nfs_server *server; 10579 struct nfs41_free_stateid_args args; 10580 struct nfs41_free_stateid_res res; 10581 }; 10582 10583 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) 10584 { 10585 struct nfs_free_stateid_data *data = calldata; 10586 nfs4_setup_sequence(data->server->nfs_client, 10587 &data->args.seq_args, 10588 &data->res.seq_res, 10589 task); 10590 } 10591 10592 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata) 10593 { 10594 struct nfs_free_stateid_data *data = calldata; 10595 10596 nfs41_sequence_done(task, &data->res.seq_res); 10597 10598 switch (task->tk_status) { 10599 case -NFS4ERR_DELAY: 10600 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN) 10601 rpc_restart_call_prepare(task); 10602 } 10603 } 10604 10605 static void nfs41_free_stateid_release(void *calldata) 10606 { 10607 struct nfs_free_stateid_data *data = calldata; 10608 struct nfs_client *clp = data->server->nfs_client; 10609 10610 nfs_put_client(clp); 10611 kfree(calldata); 10612 } 10613 10614 static const struct rpc_call_ops nfs41_free_stateid_ops = { 10615 .rpc_call_prepare = nfs41_free_stateid_prepare, 10616 .rpc_call_done = nfs41_free_stateid_done, 10617 .rpc_release = nfs41_free_stateid_release, 10618 }; 10619 10620 /** 10621 * nfs41_free_stateid - perform a FREE_STATEID operation 10622 * 10623 * @server: server / transport on which to perform the operation 10624 * @stateid: state ID to release 10625 * @cred: credential 10626 * @privileged: set to true if this call needs to be privileged 10627 * 10628 * Note: this function is always asynchronous. 10629 */ 10630 static int nfs41_free_stateid(struct nfs_server *server, 10631 nfs4_stateid *stateid, 10632 const struct cred *cred, 10633 bool privileged) 10634 { 10635 struct rpc_message msg = { 10636 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 10637 .rpc_cred = cred, 10638 }; 10639 struct rpc_task_setup task_setup = { 10640 .rpc_client = server->client, 10641 .rpc_message = &msg, 10642 .callback_ops = &nfs41_free_stateid_ops, 10643 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE, 10644 }; 10645 struct nfs_free_stateid_data *data; 10646 struct rpc_task *task; 10647 struct nfs_client *clp = server->nfs_client; 10648 10649 if (!refcount_inc_not_zero(&clp->cl_count)) 10650 return -EIO; 10651 10652 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 10653 &task_setup.rpc_client, &msg); 10654 10655 dprintk("NFS call free_stateid %p\n", stateid); 10656 data = kmalloc(sizeof(*data), GFP_KERNEL); 10657 if (!data) 10658 return -ENOMEM; 10659 data->server = server; 10660 nfs4_stateid_copy(&data->args.stateid, stateid); 10661 10662 task_setup.callback_data = data; 10663 10664 msg.rpc_argp = &data->args; 10665 msg.rpc_resp = &data->res; 10666 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, privileged); 10667 task = rpc_run_task(&task_setup); 10668 if (IS_ERR(task)) 10669 return PTR_ERR(task); 10670 rpc_put_task(task); 10671 stateid->type = NFS4_FREED_STATEID_TYPE; 10672 return 0; 10673 } 10674 10675 static void 10676 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) 10677 { 10678 const struct cred *cred = lsp->ls_state->owner->so_cred; 10679 10680 nfs41_free_stateid(server, &lsp->ls_stateid, cred, false); 10681 nfs4_free_lock_state(server, lsp); 10682 } 10683 10684 static bool nfs41_match_stateid(const nfs4_stateid *s1, 10685 const nfs4_stateid *s2) 10686 { 10687 trace_nfs41_match_stateid(s1, s2); 10688 10689 if (s1->type != s2->type) 10690 return false; 10691 10692 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 10693 return false; 10694 10695 if (s1->seqid == s2->seqid) 10696 return true; 10697 10698 return s1->seqid == 0 || s2->seqid == 0; 10699 } 10700 10701 #endif /* CONFIG_NFS_V4_1 */ 10702 10703 static bool nfs4_match_stateid(const nfs4_stateid *s1, 10704 const nfs4_stateid *s2) 10705 { 10706 trace_nfs4_match_stateid(s1, s2); 10707 10708 return nfs4_stateid_match(s1, s2); 10709 } 10710 10711 10712 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 10713 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 10714 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 10715 .recover_open = nfs4_open_reclaim, 10716 .recover_lock = nfs4_lock_reclaim, 10717 .establish_clid = nfs4_init_clientid, 10718 .detect_trunking = nfs40_discover_server_trunking, 10719 }; 10720 10721 #if defined(CONFIG_NFS_V4_1) 10722 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 10723 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 10724 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 10725 .recover_open = nfs4_open_reclaim, 10726 .recover_lock = nfs4_lock_reclaim, 10727 .establish_clid = nfs41_init_clientid, 10728 .reclaim_complete = nfs41_proc_reclaim_complete, 10729 .detect_trunking = nfs41_discover_server_trunking, 10730 }; 10731 #endif /* CONFIG_NFS_V4_1 */ 10732 10733 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 10734 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 10735 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 10736 .recover_open = nfs40_open_expired, 10737 .recover_lock = nfs4_lock_expired, 10738 .establish_clid = nfs4_init_clientid, 10739 }; 10740 10741 #if defined(CONFIG_NFS_V4_1) 10742 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 10743 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 10744 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 10745 .recover_open = nfs41_open_expired, 10746 .recover_lock = nfs41_lock_expired, 10747 .establish_clid = nfs41_init_clientid, 10748 }; 10749 #endif /* CONFIG_NFS_V4_1 */ 10750 10751 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { 10752 .sched_state_renewal = nfs4_proc_async_renew, 10753 .get_state_renewal_cred = nfs4_get_renew_cred, 10754 .renew_lease = nfs4_proc_renew, 10755 }; 10756 10757 #if defined(CONFIG_NFS_V4_1) 10758 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 10759 .sched_state_renewal = nfs41_proc_async_sequence, 10760 .get_state_renewal_cred = nfs4_get_machine_cred, 10761 .renew_lease = nfs4_proc_sequence, 10762 }; 10763 #endif 10764 10765 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = { 10766 .get_locations = _nfs40_proc_get_locations, 10767 .fsid_present = _nfs40_proc_fsid_present, 10768 }; 10769 10770 #if defined(CONFIG_NFS_V4_1) 10771 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = { 10772 .get_locations = _nfs41_proc_get_locations, 10773 .fsid_present = _nfs41_proc_fsid_present, 10774 }; 10775 #endif /* CONFIG_NFS_V4_1 */ 10776 10777 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { 10778 .minor_version = 0, 10779 .init_caps = NFS_CAP_READDIRPLUS 10780 | NFS_CAP_ATOMIC_OPEN 10781 | NFS_CAP_POSIX_LOCK, 10782 .init_client = nfs40_init_client, 10783 .shutdown_client = nfs40_shutdown_client, 10784 .match_stateid = nfs4_match_stateid, 10785 .find_root_sec = nfs4_find_root_sec, 10786 .free_lock_state = nfs4_release_lockowner, 10787 .test_and_free_expired = nfs40_test_and_free_expired_stateid, 10788 .alloc_seqid = nfs_alloc_seqid, 10789 .call_sync_ops = &nfs40_call_sync_ops, 10790 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 10791 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 10792 .state_renewal_ops = &nfs40_state_renewal_ops, 10793 .mig_recovery_ops = &nfs40_mig_recovery_ops, 10794 }; 10795 10796 #if defined(CONFIG_NFS_V4_1) 10797 static struct nfs_seqid * 10798 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2) 10799 { 10800 return NULL; 10801 } 10802 10803 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 10804 .minor_version = 1, 10805 .init_caps = NFS_CAP_READDIRPLUS 10806 | NFS_CAP_ATOMIC_OPEN 10807 | NFS_CAP_POSIX_LOCK 10808 | NFS_CAP_STATEID_NFSV41 10809 | NFS_CAP_ATOMIC_OPEN_V1 10810 | NFS_CAP_LGOPEN 10811 | NFS_CAP_MOVEABLE, 10812 .init_client = nfs41_init_client, 10813 .shutdown_client = nfs41_shutdown_client, 10814 .match_stateid = nfs41_match_stateid, 10815 .find_root_sec = nfs41_find_root_sec, 10816 .free_lock_state = nfs41_free_lock_state, 10817 .test_and_free_expired = nfs41_test_and_free_expired_stateid, 10818 .alloc_seqid = nfs_alloc_no_seqid, 10819 .session_trunk = nfs4_test_session_trunk, 10820 .call_sync_ops = &nfs41_call_sync_ops, 10821 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 10822 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 10823 .state_renewal_ops = &nfs41_state_renewal_ops, 10824 .mig_recovery_ops = &nfs41_mig_recovery_ops, 10825 }; 10826 #endif 10827 10828 #if defined(CONFIG_NFS_V4_2) 10829 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { 10830 .minor_version = 2, 10831 .init_caps = NFS_CAP_READDIRPLUS 10832 | NFS_CAP_ATOMIC_OPEN 10833 | NFS_CAP_POSIX_LOCK 10834 | NFS_CAP_STATEID_NFSV41 10835 | NFS_CAP_ATOMIC_OPEN_V1 10836 | NFS_CAP_LGOPEN 10837 | NFS_CAP_ALLOCATE 10838 | NFS_CAP_COPY 10839 | NFS_CAP_OFFLOAD_CANCEL 10840 | NFS_CAP_COPY_NOTIFY 10841 | NFS_CAP_DEALLOCATE 10842 | NFS_CAP_ZERO_RANGE 10843 | NFS_CAP_SEEK 10844 | NFS_CAP_LAYOUTSTATS 10845 | NFS_CAP_CLONE 10846 | NFS_CAP_LAYOUTERROR 10847 | NFS_CAP_READ_PLUS 10848 | NFS_CAP_MOVEABLE 10849 | NFS_CAP_OFFLOAD_STATUS, 10850 .init_client = nfs41_init_client, 10851 .shutdown_client = nfs41_shutdown_client, 10852 .match_stateid = nfs41_match_stateid, 10853 .find_root_sec = nfs41_find_root_sec, 10854 .free_lock_state = nfs41_free_lock_state, 10855 .call_sync_ops = &nfs41_call_sync_ops, 10856 .test_and_free_expired = nfs41_test_and_free_expired_stateid, 10857 .alloc_seqid = nfs_alloc_no_seqid, 10858 .session_trunk = nfs4_test_session_trunk, 10859 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 10860 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 10861 .state_renewal_ops = &nfs41_state_renewal_ops, 10862 .mig_recovery_ops = &nfs41_mig_recovery_ops, 10863 }; 10864 #endif 10865 10866 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 10867 [0] = &nfs_v4_0_minor_ops, 10868 #if defined(CONFIG_NFS_V4_1) 10869 [1] = &nfs_v4_1_minor_ops, 10870 #endif 10871 #if defined(CONFIG_NFS_V4_2) 10872 [2] = &nfs_v4_2_minor_ops, 10873 #endif 10874 }; 10875 10876 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) 10877 { 10878 ssize_t error, error2, error3, error4 = 0; 10879 size_t left = size; 10880 10881 error = generic_listxattr(dentry, list, left); 10882 if (error < 0) 10883 return error; 10884 if (list) { 10885 list += error; 10886 left -= error; 10887 } 10888 10889 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, left); 10890 if (error2 < 0) 10891 return error2; 10892 10893 if (list) { 10894 list += error2; 10895 left -= error2; 10896 } 10897 10898 error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left); 10899 if (error3 < 0) 10900 return error3; 10901 if (list) { 10902 list += error3; 10903 left -= error3; 10904 } 10905 10906 if (!nfs_server_capable(d_inode(dentry), NFS_CAP_SECURITY_LABEL)) { 10907 error4 = security_inode_listsecurity(d_inode(dentry), list, left); 10908 if (error4 < 0) 10909 return error4; 10910 } 10911 10912 error += error2 + error3 + error4; 10913 if (size && error > size) 10914 return -ERANGE; 10915 return error; 10916 } 10917 10918 static void nfs4_enable_swap(struct inode *inode) 10919 { 10920 /* The state manager thread must always be running. 10921 * It will notice the client is a swapper, and stay put. 10922 */ 10923 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 10924 10925 nfs4_schedule_state_manager(clp); 10926 } 10927 10928 static void nfs4_disable_swap(struct inode *inode) 10929 { 10930 /* The state manager thread will now exit once it is 10931 * woken. 10932 */ 10933 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 10934 10935 set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state); 10936 clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state); 10937 wake_up_var(&clp->cl_state); 10938 } 10939 10940 static const struct inode_operations nfs4_dir_inode_operations = { 10941 .create = nfs_create, 10942 .lookup = nfs_lookup, 10943 .atomic_open = nfs_atomic_open, 10944 .link = nfs_link, 10945 .unlink = nfs_unlink, 10946 .symlink = nfs_symlink, 10947 .mkdir = nfs_mkdir, 10948 .rmdir = nfs_rmdir, 10949 .mknod = nfs_mknod, 10950 .rename = nfs_rename, 10951 .permission = nfs_permission, 10952 .getattr = nfs_getattr, 10953 .setattr = nfs_setattr, 10954 .listxattr = nfs4_listxattr, 10955 }; 10956 10957 static const struct inode_operations nfs4_file_inode_operations = { 10958 .permission = nfs_permission, 10959 .getattr = nfs_getattr, 10960 .setattr = nfs_setattr, 10961 .listxattr = nfs4_listxattr, 10962 }; 10963 10964 static struct nfs_server *nfs4_clone_server(struct nfs_server *source, 10965 struct nfs_fh *fh, struct nfs_fattr *fattr, 10966 rpc_authflavor_t flavor) 10967 { 10968 struct nfs_server *server; 10969 int error; 10970 10971 server = nfs_clone_server(source, fh, fattr, flavor); 10972 if (IS_ERR(server)) 10973 return server; 10974 10975 error = nfs4_delegation_hash_alloc(server); 10976 if (error) { 10977 nfs_free_server(server); 10978 return ERR_PTR(error); 10979 } 10980 10981 return server; 10982 } 10983 10984 const struct nfs_rpc_ops nfs_v4_clientops = { 10985 .version = 4, /* protocol version */ 10986 .dentry_ops = &nfs4_dentry_operations, 10987 .dir_inode_ops = &nfs4_dir_inode_operations, 10988 .file_inode_ops = &nfs4_file_inode_operations, 10989 .file_ops = &nfs4_file_operations, 10990 .getroot = nfs4_proc_get_root, 10991 .submount = nfs4_submount, 10992 .try_get_tree = nfs4_try_get_tree, 10993 .getattr = nfs4_proc_getattr, 10994 .setattr = nfs4_proc_setattr, 10995 .lookup = nfs4_proc_lookup, 10996 .lookupp = nfs4_proc_lookupp, 10997 .access = nfs4_proc_access, 10998 .readlink = nfs4_proc_readlink, 10999 .create = nfs4_proc_create, 11000 .remove = nfs4_proc_remove, 11001 .unlink_setup = nfs4_proc_unlink_setup, 11002 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 11003 .unlink_done = nfs4_proc_unlink_done, 11004 .rename_setup = nfs4_proc_rename_setup, 11005 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 11006 .rename_done = nfs4_proc_rename_done, 11007 .link = nfs4_proc_link, 11008 .symlink = nfs4_proc_symlink, 11009 .mkdir = nfs4_proc_mkdir, 11010 .rmdir = nfs4_proc_rmdir, 11011 .readdir = nfs4_proc_readdir, 11012 .mknod = nfs4_proc_mknod, 11013 .statfs = nfs4_proc_statfs, 11014 .fsinfo = nfs4_proc_fsinfo, 11015 .pathconf = nfs4_proc_pathconf, 11016 .set_capabilities = nfs4_server_capabilities, 11017 .decode_dirent = nfs4_decode_dirent, 11018 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare, 11019 .read_setup = nfs4_proc_read_setup, 11020 .read_done = nfs4_read_done, 11021 .write_setup = nfs4_proc_write_setup, 11022 .write_done = nfs4_write_done, 11023 .commit_setup = nfs4_proc_commit_setup, 11024 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 11025 .commit_done = nfs4_commit_done, 11026 .lock = nfs4_proc_lock, 11027 .clear_acl_cache = nfs4_zap_acl_attr, 11028 .close_context = nfs4_close_context, 11029 .open_context = nfs4_atomic_open, 11030 .have_delegation = nfs4_have_delegation, 11031 .return_delegation = nfs4_inode_return_delegation, 11032 .alloc_client = nfs4_alloc_client, 11033 .init_client = nfs4_init_client, 11034 .free_client = nfs4_free_client, 11035 .create_server = nfs4_create_server, 11036 .clone_server = nfs4_clone_server, 11037 .discover_trunking = nfs4_discover_trunking, 11038 .enable_swap = nfs4_enable_swap, 11039 .disable_swap = nfs4_disable_swap, 11040 }; 11041 11042 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 11043 .name = XATTR_NAME_NFSV4_ACL, 11044 .list = nfs4_xattr_list_nfs4_acl, 11045 .get = nfs4_xattr_get_nfs4_acl, 11046 .set = nfs4_xattr_set_nfs4_acl, 11047 }; 11048 11049 #if defined(CONFIG_NFS_V4_1) 11050 static const struct xattr_handler nfs4_xattr_nfs4_dacl_handler = { 11051 .name = XATTR_NAME_NFSV4_DACL, 11052 .list = nfs4_xattr_list_nfs4_dacl, 11053 .get = nfs4_xattr_get_nfs4_dacl, 11054 .set = nfs4_xattr_set_nfs4_dacl, 11055 }; 11056 11057 static const struct xattr_handler nfs4_xattr_nfs4_sacl_handler = { 11058 .name = XATTR_NAME_NFSV4_SACL, 11059 .list = nfs4_xattr_list_nfs4_sacl, 11060 .get = nfs4_xattr_get_nfs4_sacl, 11061 .set = nfs4_xattr_set_nfs4_sacl, 11062 }; 11063 #endif 11064 11065 #ifdef CONFIG_NFS_V4_2 11066 static const struct xattr_handler nfs4_xattr_nfs4_user_handler = { 11067 .prefix = XATTR_USER_PREFIX, 11068 .get = nfs4_xattr_get_nfs4_user, 11069 .set = nfs4_xattr_set_nfs4_user, 11070 }; 11071 #endif 11072 11073 const struct xattr_handler * const nfs4_xattr_handlers[] = { 11074 &nfs4_xattr_nfs4_acl_handler, 11075 #if defined(CONFIG_NFS_V4_1) 11076 &nfs4_xattr_nfs4_dacl_handler, 11077 &nfs4_xattr_nfs4_sacl_handler, 11078 #endif 11079 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 11080 &nfs4_xattr_nfs4_label_handler, 11081 #endif 11082 #ifdef CONFIG_NFS_V4_2 11083 &nfs4_xattr_nfs4_user_handler, 11084 #endif 11085 NULL 11086 }; 11087