1 /* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <linux/mm.h> 39 #include <linux/delay.h> 40 #include <linux/errno.h> 41 #include <linux/string.h> 42 #include <linux/ratelimit.h> 43 #include <linux/printk.h> 44 #include <linux/slab.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/nfs.h> 47 #include <linux/nfs4.h> 48 #include <linux/nfs_fs.h> 49 #include <linux/nfs_page.h> 50 #include <linux/nfs_mount.h> 51 #include <linux/namei.h> 52 #include <linux/mount.h> 53 #include <linux/module.h> 54 #include <linux/xattr.h> 55 #include <linux/utsname.h> 56 #include <linux/freezer.h> 57 #include <linux/iversion.h> 58 59 #include "nfs4_fs.h" 60 #include "delegation.h" 61 #include "internal.h" 62 #include "iostat.h" 63 #include "callback.h" 64 #include "pnfs.h" 65 #include "netns.h" 66 #include "sysfs.h" 67 #include "nfs4idmap.h" 68 #include "nfs4session.h" 69 #include "fscache.h" 70 #include "nfs42.h" 71 72 #include "nfs4trace.h" 73 74 #define NFSDBG_FACILITY NFSDBG_PROC 75 76 #define NFS4_BITMASK_SZ 3 77 78 #define NFS4_POLL_RETRY_MIN (HZ/10) 79 #define NFS4_POLL_RETRY_MAX (15*HZ) 80 81 /* file attributes which can be mapped to nfs attributes */ 82 #define NFS4_VALID_ATTRS (ATTR_MODE \ 83 | ATTR_UID \ 84 | ATTR_GID \ 85 | ATTR_SIZE \ 86 | ATTR_ATIME \ 87 | ATTR_MTIME \ 88 | ATTR_CTIME \ 89 | ATTR_ATIME_SET \ 90 | ATTR_MTIME_SET) 91 92 struct nfs4_opendata; 93 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 94 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 95 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 96 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 97 struct nfs_fattr *fattr, struct inode *inode); 98 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred, 99 struct nfs_fattr *fattr, struct iattr *sattr, 100 struct nfs_open_context *ctx, struct nfs4_label *ilabel); 101 #ifdef CONFIG_NFS_V4_1 102 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 103 const struct cred *cred, 104 struct nfs4_slot *slot, 105 bool is_privileged); 106 static int nfs41_test_stateid(struct nfs_server *, const nfs4_stateid *, 107 const struct cred *); 108 static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *, 109 const struct cred *, bool); 110 #endif 111 112 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 113 static inline struct nfs4_label * 114 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 115 struct iattr *sattr, struct nfs4_label *label) 116 { 117 struct lsm_context shim; 118 int err; 119 120 if (label == NULL) 121 return NULL; 122 123 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0) 124 return NULL; 125 126 label->lfs = 0; 127 label->pi = 0; 128 label->len = 0; 129 label->label = NULL; 130 131 err = security_dentry_init_security(dentry, sattr->ia_mode, 132 &dentry->d_name, NULL, &shim); 133 if (err) 134 return NULL; 135 136 label->label = shim.context; 137 label->len = shim.len; 138 return label; 139 } 140 static inline void 141 nfs4_label_release_security(struct nfs4_label *label) 142 { 143 struct lsm_context shim; 144 145 if (label) { 146 shim.context = label->label; 147 shim.len = label->len; 148 shim.id = LSM_ID_UNDEF; 149 security_release_secctx(&shim); 150 } 151 } 152 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 153 { 154 if (label) 155 return server->attr_bitmask; 156 157 return server->attr_bitmask_nl; 158 } 159 #else 160 static inline struct nfs4_label * 161 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 162 struct iattr *sattr, struct nfs4_label *l) 163 { return NULL; } 164 static inline void 165 nfs4_label_release_security(struct nfs4_label *label) 166 { return; } 167 static inline u32 * 168 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 169 { return server->attr_bitmask; } 170 #endif 171 172 /* Prevent leaks of NFSv4 errors into userland */ 173 static int nfs4_map_errors(int err) 174 { 175 if (err >= -1000) 176 return err; 177 switch (err) { 178 case -NFS4ERR_RESOURCE: 179 case -NFS4ERR_LAYOUTTRYLATER: 180 case -NFS4ERR_RECALLCONFLICT: 181 case -NFS4ERR_RETURNCONFLICT: 182 return -EREMOTEIO; 183 case -NFS4ERR_WRONGSEC: 184 case -NFS4ERR_WRONG_CRED: 185 return -EPERM; 186 case -NFS4ERR_BADOWNER: 187 case -NFS4ERR_BADNAME: 188 return -EINVAL; 189 case -NFS4ERR_SHARE_DENIED: 190 return -EACCES; 191 case -NFS4ERR_MINOR_VERS_MISMATCH: 192 return -EPROTONOSUPPORT; 193 case -NFS4ERR_FILE_OPEN: 194 return -EBUSY; 195 case -NFS4ERR_NOT_SAME: 196 return -ENOTSYNC; 197 default: 198 dprintk("%s could not handle NFSv4 error %d\n", 199 __func__, -err); 200 break; 201 } 202 return -EIO; 203 } 204 205 /* 206 * This is our standard bitmap for GETATTR requests. 207 */ 208 const u32 nfs4_fattr_bitmap[3] = { 209 FATTR4_WORD0_TYPE 210 | FATTR4_WORD0_CHANGE 211 | FATTR4_WORD0_SIZE 212 | FATTR4_WORD0_FSID 213 | FATTR4_WORD0_FILEID, 214 FATTR4_WORD1_MODE 215 | FATTR4_WORD1_NUMLINKS 216 | FATTR4_WORD1_OWNER 217 | FATTR4_WORD1_OWNER_GROUP 218 | FATTR4_WORD1_RAWDEV 219 | FATTR4_WORD1_SPACE_USED 220 | FATTR4_WORD1_TIME_ACCESS 221 | FATTR4_WORD1_TIME_METADATA 222 | FATTR4_WORD1_TIME_MODIFY 223 | FATTR4_WORD1_MOUNTED_ON_FILEID, 224 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 225 FATTR4_WORD2_SECURITY_LABEL 226 #endif 227 }; 228 229 static const u32 nfs4_pnfs_open_bitmap[3] = { 230 FATTR4_WORD0_TYPE 231 | FATTR4_WORD0_CHANGE 232 | FATTR4_WORD0_SIZE 233 | FATTR4_WORD0_FSID 234 | FATTR4_WORD0_FILEID, 235 FATTR4_WORD1_MODE 236 | FATTR4_WORD1_NUMLINKS 237 | FATTR4_WORD1_OWNER 238 | FATTR4_WORD1_OWNER_GROUP 239 | FATTR4_WORD1_RAWDEV 240 | FATTR4_WORD1_SPACE_USED 241 | FATTR4_WORD1_TIME_ACCESS 242 | FATTR4_WORD1_TIME_METADATA 243 | FATTR4_WORD1_TIME_MODIFY, 244 FATTR4_WORD2_MDSTHRESHOLD 245 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 246 | FATTR4_WORD2_SECURITY_LABEL 247 #endif 248 }; 249 250 static const u32 nfs4_open_noattr_bitmap[3] = { 251 FATTR4_WORD0_TYPE 252 | FATTR4_WORD0_FILEID, 253 }; 254 255 const u32 nfs4_statfs_bitmap[3] = { 256 FATTR4_WORD0_FILES_AVAIL 257 | FATTR4_WORD0_FILES_FREE 258 | FATTR4_WORD0_FILES_TOTAL, 259 FATTR4_WORD1_SPACE_AVAIL 260 | FATTR4_WORD1_SPACE_FREE 261 | FATTR4_WORD1_SPACE_TOTAL 262 }; 263 264 const u32 nfs4_pathconf_bitmap[3] = { 265 FATTR4_WORD0_MAXLINK 266 | FATTR4_WORD0_MAXNAME, 267 0 268 }; 269 270 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 271 | FATTR4_WORD0_MAXREAD 272 | FATTR4_WORD0_MAXWRITE 273 | FATTR4_WORD0_LEASE_TIME, 274 FATTR4_WORD1_TIME_DELTA 275 | FATTR4_WORD1_FS_LAYOUT_TYPES, 276 FATTR4_WORD2_LAYOUT_BLKSIZE 277 | FATTR4_WORD2_CLONE_BLKSIZE 278 | FATTR4_WORD2_CHANGE_ATTR_TYPE 279 | FATTR4_WORD2_XATTR_SUPPORT 280 }; 281 282 const u32 nfs4_fs_locations_bitmap[3] = { 283 FATTR4_WORD0_CHANGE 284 | FATTR4_WORD0_SIZE 285 | FATTR4_WORD0_FSID 286 | FATTR4_WORD0_FILEID 287 | FATTR4_WORD0_FS_LOCATIONS, 288 FATTR4_WORD1_OWNER 289 | FATTR4_WORD1_OWNER_GROUP 290 | FATTR4_WORD1_RAWDEV 291 | FATTR4_WORD1_SPACE_USED 292 | FATTR4_WORD1_TIME_ACCESS 293 | FATTR4_WORD1_TIME_METADATA 294 | FATTR4_WORD1_TIME_MODIFY 295 | FATTR4_WORD1_MOUNTED_ON_FILEID, 296 }; 297 298 static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src, 299 struct inode *inode, unsigned long flags) 300 { 301 unsigned long cache_validity; 302 303 memcpy(dst, src, NFS4_BITMASK_SZ*sizeof(*dst)); 304 if (!inode || !nfs_have_read_or_write_delegation(inode)) 305 return; 306 307 cache_validity = READ_ONCE(NFS_I(inode)->cache_validity) | flags; 308 309 /* Remove the attributes over which we have full control */ 310 dst[1] &= ~FATTR4_WORD1_RAWDEV; 311 if (!(cache_validity & NFS_INO_INVALID_SIZE)) 312 dst[0] &= ~FATTR4_WORD0_SIZE; 313 314 if (!(cache_validity & NFS_INO_INVALID_CHANGE)) 315 dst[0] &= ~FATTR4_WORD0_CHANGE; 316 317 if (!(cache_validity & NFS_INO_INVALID_MODE)) 318 dst[1] &= ~FATTR4_WORD1_MODE; 319 if (!(cache_validity & NFS_INO_INVALID_OTHER)) 320 dst[1] &= ~(FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP); 321 322 if (nfs_have_delegated_mtime(inode)) { 323 if (!(cache_validity & NFS_INO_INVALID_ATIME)) 324 dst[1] &= ~FATTR4_WORD1_TIME_ACCESS; 325 if (!(cache_validity & NFS_INO_INVALID_MTIME)) 326 dst[1] &= ~FATTR4_WORD1_TIME_MODIFY; 327 if (!(cache_validity & NFS_INO_INVALID_CTIME)) 328 dst[1] &= ~FATTR4_WORD1_TIME_METADATA; 329 } else if (nfs_have_delegated_atime(inode)) { 330 if (!(cache_validity & NFS_INO_INVALID_ATIME)) 331 dst[1] &= ~FATTR4_WORD1_TIME_ACCESS; 332 } 333 } 334 335 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 336 struct nfs4_readdir_arg *readdir) 337 { 338 unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE; 339 __be32 *start, *p; 340 341 if (cookie > 2) { 342 readdir->cookie = cookie; 343 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 344 return; 345 } 346 347 readdir->cookie = 0; 348 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 349 if (cookie == 2) 350 return; 351 352 /* 353 * NFSv4 servers do not return entries for '.' and '..' 354 * Therefore, we fake these entries here. We let '.' 355 * have cookie 0 and '..' have cookie 1. Note that 356 * when talking to the server, we always send cookie 0 357 * instead of 1 or 2. 358 */ 359 start = p = kmap_atomic(*readdir->pages); 360 361 if (cookie == 0) { 362 *p++ = xdr_one; /* next */ 363 *p++ = xdr_zero; /* cookie, first word */ 364 *p++ = xdr_one; /* cookie, second word */ 365 *p++ = xdr_one; /* entry len */ 366 memcpy(p, ".\0\0\0", 4); /* entry */ 367 p++; 368 *p++ = xdr_one; /* bitmap length */ 369 *p++ = htonl(attrs); /* bitmap */ 370 *p++ = htonl(12); /* attribute buffer length */ 371 *p++ = htonl(NF4DIR); 372 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry))); 373 } 374 375 *p++ = xdr_one; /* next */ 376 *p++ = xdr_zero; /* cookie, first word */ 377 *p++ = xdr_two; /* cookie, second word */ 378 *p++ = xdr_two; /* entry len */ 379 memcpy(p, "..\0\0", 4); /* entry */ 380 p++; 381 *p++ = xdr_one; /* bitmap length */ 382 *p++ = htonl(attrs); /* bitmap */ 383 *p++ = htonl(12); /* attribute buffer length */ 384 *p++ = htonl(NF4DIR); 385 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent))); 386 387 readdir->pgbase = (char *)p - (char *)start; 388 readdir->count -= readdir->pgbase; 389 kunmap_atomic(start); 390 } 391 392 static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version) 393 { 394 if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) { 395 fattr->pre_change_attr = version; 396 fattr->valid |= NFS_ATTR_FATTR_PRECHANGE; 397 } 398 } 399 400 static void nfs4_test_and_free_stateid(struct nfs_server *server, 401 nfs4_stateid *stateid, 402 const struct cred *cred) 403 { 404 const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops; 405 406 ops->test_and_free_expired(server, stateid, cred); 407 } 408 409 static void __nfs4_free_revoked_stateid(struct nfs_server *server, 410 nfs4_stateid *stateid, 411 const struct cred *cred) 412 { 413 stateid->type = NFS4_REVOKED_STATEID_TYPE; 414 nfs4_test_and_free_stateid(server, stateid, cred); 415 } 416 417 static void nfs4_free_revoked_stateid(struct nfs_server *server, 418 const nfs4_stateid *stateid, 419 const struct cred *cred) 420 { 421 nfs4_stateid tmp; 422 423 nfs4_stateid_copy(&tmp, stateid); 424 __nfs4_free_revoked_stateid(server, &tmp, cred); 425 } 426 427 static long nfs4_update_delay(long *timeout) 428 { 429 long ret; 430 if (!timeout) 431 return NFS4_POLL_RETRY_MAX; 432 if (*timeout <= 0) 433 *timeout = NFS4_POLL_RETRY_MIN; 434 if (*timeout > NFS4_POLL_RETRY_MAX) 435 *timeout = NFS4_POLL_RETRY_MAX; 436 ret = *timeout; 437 *timeout <<= 1; 438 return ret; 439 } 440 441 static int nfs4_delay_killable(long *timeout) 442 { 443 might_sleep(); 444 445 __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 446 schedule_timeout(nfs4_update_delay(timeout)); 447 if (!__fatal_signal_pending(current)) 448 return 0; 449 return -EINTR; 450 } 451 452 static int nfs4_delay_interruptible(long *timeout) 453 { 454 might_sleep(); 455 456 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE); 457 schedule_timeout(nfs4_update_delay(timeout)); 458 if (!signal_pending(current)) 459 return 0; 460 return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS; 461 } 462 463 static int nfs4_delay(long *timeout, bool interruptible) 464 { 465 if (interruptible) 466 return nfs4_delay_interruptible(timeout); 467 return nfs4_delay_killable(timeout); 468 } 469 470 static const nfs4_stateid * 471 nfs4_recoverable_stateid(const nfs4_stateid *stateid) 472 { 473 if (!stateid) 474 return NULL; 475 switch (stateid->type) { 476 case NFS4_OPEN_STATEID_TYPE: 477 case NFS4_LOCK_STATEID_TYPE: 478 case NFS4_DELEGATION_STATEID_TYPE: 479 return stateid; 480 default: 481 break; 482 } 483 return NULL; 484 } 485 486 /* This is the error handling routine for processes that are allowed 487 * to sleep. 488 */ 489 static int nfs4_do_handle_exception(struct nfs_server *server, 490 int errorcode, struct nfs4_exception *exception) 491 { 492 struct nfs_client *clp = server->nfs_client; 493 struct nfs4_state *state = exception->state; 494 const nfs4_stateid *stateid; 495 struct inode *inode = exception->inode; 496 int ret = errorcode; 497 498 exception->delay = 0; 499 exception->recovering = 0; 500 exception->retry = 0; 501 502 stateid = nfs4_recoverable_stateid(exception->stateid); 503 if (stateid == NULL && state != NULL) 504 stateid = nfs4_recoverable_stateid(&state->stateid); 505 506 switch(errorcode) { 507 case 0: 508 return 0; 509 case -NFS4ERR_BADHANDLE: 510 case -ESTALE: 511 if (inode != NULL && S_ISREG(inode->i_mode)) 512 pnfs_destroy_layout(NFS_I(inode)); 513 break; 514 case -NFS4ERR_DELEG_REVOKED: 515 case -NFS4ERR_ADMIN_REVOKED: 516 case -NFS4ERR_EXPIRED: 517 case -NFS4ERR_BAD_STATEID: 518 case -NFS4ERR_PARTNER_NO_AUTH: 519 if (inode != NULL && stateid != NULL) { 520 nfs_inode_find_state_and_recover(inode, 521 stateid); 522 goto wait_on_recovery; 523 } 524 fallthrough; 525 case -NFS4ERR_OPENMODE: 526 if (inode) { 527 int err; 528 529 err = nfs_async_inode_return_delegation(inode, 530 stateid); 531 if (err == 0) 532 goto wait_on_recovery; 533 if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) { 534 exception->retry = 1; 535 break; 536 } 537 } 538 if (state == NULL) 539 break; 540 ret = nfs4_schedule_stateid_recovery(server, state); 541 if (ret < 0) 542 break; 543 goto wait_on_recovery; 544 case -NFS4ERR_STALE_STATEID: 545 case -NFS4ERR_STALE_CLIENTID: 546 nfs4_schedule_lease_recovery(clp); 547 goto wait_on_recovery; 548 case -NFS4ERR_MOVED: 549 ret = nfs4_schedule_migration_recovery(server); 550 if (ret < 0) 551 break; 552 goto wait_on_recovery; 553 case -NFS4ERR_LEASE_MOVED: 554 nfs4_schedule_lease_moved_recovery(clp); 555 goto wait_on_recovery; 556 #if defined(CONFIG_NFS_V4_1) 557 case -NFS4ERR_BADSESSION: 558 case -NFS4ERR_BADSLOT: 559 case -NFS4ERR_BAD_HIGH_SLOT: 560 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 561 case -NFS4ERR_DEADSESSION: 562 case -NFS4ERR_SEQ_FALSE_RETRY: 563 case -NFS4ERR_SEQ_MISORDERED: 564 /* Handled in nfs41_sequence_process() */ 565 goto wait_on_recovery; 566 #endif /* defined(CONFIG_NFS_V4_1) */ 567 case -NFS4ERR_FILE_OPEN: 568 if (exception->timeout > HZ) { 569 /* We have retried a decent amount, time to 570 * fail 571 */ 572 ret = -EBUSY; 573 break; 574 } 575 fallthrough; 576 case -NFS4ERR_DELAY: 577 nfs_inc_server_stats(server, NFSIOS_DELAY); 578 fallthrough; 579 case -NFS4ERR_GRACE: 580 case -NFS4ERR_LAYOUTTRYLATER: 581 case -NFS4ERR_RECALLCONFLICT: 582 case -NFS4ERR_RETURNCONFLICT: 583 exception->delay = 1; 584 return 0; 585 586 case -NFS4ERR_RETRY_UNCACHED_REP: 587 case -NFS4ERR_OLD_STATEID: 588 exception->retry = 1; 589 break; 590 case -NFS4ERR_BADOWNER: 591 /* The following works around a Linux server bug! */ 592 case -NFS4ERR_BADNAME: 593 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 594 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 595 exception->retry = 1; 596 printk(KERN_WARNING "NFS: v4 server %s " 597 "does not accept raw " 598 "uid/gids. " 599 "Reenabling the idmapper.\n", 600 server->nfs_client->cl_hostname); 601 } 602 } 603 /* We failed to handle the error */ 604 return nfs4_map_errors(ret); 605 wait_on_recovery: 606 exception->recovering = 1; 607 return 0; 608 } 609 610 /* 611 * Track the number of NFS4ERR_DELAY related retransmissions and return 612 * EAGAIN if the 'softerr' mount option is set, and we've exceeded the limit 613 * set by 'nfs_delay_retrans'. 614 */ 615 static int nfs4_exception_should_retrans(const struct nfs_server *server, 616 struct nfs4_exception *exception) 617 { 618 if (server->flags & NFS_MOUNT_SOFTERR && nfs_delay_retrans >= 0) { 619 if (exception->retrans++ >= (unsigned short)nfs_delay_retrans) 620 return -EAGAIN; 621 } 622 return 0; 623 } 624 625 /* This is the error handling routine for processes that are allowed 626 * to sleep. 627 */ 628 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 629 { 630 struct nfs_client *clp = server->nfs_client; 631 int ret; 632 633 ret = nfs4_do_handle_exception(server, errorcode, exception); 634 if (exception->delay) { 635 int ret2 = nfs4_exception_should_retrans(server, exception); 636 if (ret2 < 0) { 637 exception->retry = 0; 638 return ret2; 639 } 640 ret = nfs4_delay(&exception->timeout, 641 exception->interruptible); 642 goto out_retry; 643 } 644 if (exception->recovering) { 645 if (exception->task_is_privileged) 646 return -EDEADLOCK; 647 ret = nfs4_wait_clnt_recover(clp); 648 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 649 return -EIO; 650 goto out_retry; 651 } 652 return ret; 653 out_retry: 654 if (ret == 0) 655 exception->retry = 1; 656 return ret; 657 } 658 659 static int 660 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server, 661 int errorcode, struct nfs4_exception *exception) 662 { 663 struct nfs_client *clp = server->nfs_client; 664 int ret; 665 666 ret = nfs4_do_handle_exception(server, errorcode, exception); 667 if (exception->delay) { 668 int ret2 = nfs4_exception_should_retrans(server, exception); 669 if (ret2 < 0) { 670 exception->retry = 0; 671 return ret2; 672 } 673 rpc_delay(task, nfs4_update_delay(&exception->timeout)); 674 goto out_retry; 675 } 676 if (exception->recovering) { 677 if (exception->task_is_privileged) 678 return -EDEADLOCK; 679 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 680 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 681 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 682 goto out_retry; 683 } 684 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 685 ret = -EIO; 686 return ret; 687 out_retry: 688 if (ret == 0) { 689 exception->retry = 1; 690 /* 691 * For NFS4ERR_MOVED, the client transport will need to 692 * be recomputed after migration recovery has completed. 693 */ 694 if (errorcode == -NFS4ERR_MOVED) 695 rpc_task_release_transport(task); 696 } 697 return ret; 698 } 699 700 int 701 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server, 702 struct nfs4_state *state, long *timeout) 703 { 704 struct nfs4_exception exception = { 705 .state = state, 706 }; 707 708 if (task->tk_status >= 0) 709 return 0; 710 if (timeout) 711 exception.timeout = *timeout; 712 task->tk_status = nfs4_async_handle_exception(task, server, 713 task->tk_status, 714 &exception); 715 if (exception.delay && timeout) 716 *timeout = exception.timeout; 717 if (exception.retry) 718 return -EAGAIN; 719 return 0; 720 } 721 722 /* 723 * Return 'true' if 'clp' is using an rpc_client that is integrity protected 724 * or 'false' otherwise. 725 */ 726 static bool _nfs4_is_integrity_protected(struct nfs_client *clp) 727 { 728 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor; 729 return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P); 730 } 731 732 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 733 { 734 spin_lock(&clp->cl_lock); 735 if (time_before(clp->cl_last_renewal,timestamp)) 736 clp->cl_last_renewal = timestamp; 737 spin_unlock(&clp->cl_lock); 738 } 739 740 static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 741 { 742 struct nfs_client *clp = server->nfs_client; 743 744 if (!nfs4_has_session(clp)) 745 do_renew_lease(clp, timestamp); 746 } 747 748 struct nfs4_call_sync_data { 749 const struct nfs_server *seq_server; 750 struct nfs4_sequence_args *seq_args; 751 struct nfs4_sequence_res *seq_res; 752 }; 753 754 void nfs4_init_sequence(struct nfs4_sequence_args *args, 755 struct nfs4_sequence_res *res, int cache_reply, 756 int privileged) 757 { 758 args->sa_slot = NULL; 759 args->sa_cache_this = cache_reply; 760 args->sa_privileged = privileged; 761 762 res->sr_slot = NULL; 763 } 764 765 static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res) 766 { 767 struct nfs4_slot *slot = res->sr_slot; 768 struct nfs4_slot_table *tbl; 769 770 tbl = slot->table; 771 spin_lock(&tbl->slot_tbl_lock); 772 if (!nfs41_wake_and_assign_slot(tbl, slot)) 773 nfs4_free_slot(tbl, slot); 774 spin_unlock(&tbl->slot_tbl_lock); 775 776 res->sr_slot = NULL; 777 } 778 779 static int nfs40_sequence_done(struct rpc_task *task, 780 struct nfs4_sequence_res *res) 781 { 782 if (res->sr_slot != NULL) 783 nfs40_sequence_free_slot(res); 784 return 1; 785 } 786 787 #if defined(CONFIG_NFS_V4_1) 788 789 static void nfs41_release_slot(struct nfs4_slot *slot) 790 { 791 struct nfs4_session *session; 792 struct nfs4_slot_table *tbl; 793 bool send_new_highest_used_slotid = false; 794 795 if (!slot) 796 return; 797 tbl = slot->table; 798 session = tbl->session; 799 800 /* Bump the slot sequence number */ 801 if (slot->seq_done) 802 slot->seq_nr++; 803 slot->seq_done = 0; 804 805 spin_lock(&tbl->slot_tbl_lock); 806 /* Be nice to the server: try to ensure that the last transmitted 807 * value for highest_user_slotid <= target_highest_slotid 808 */ 809 if (tbl->highest_used_slotid > tbl->target_highest_slotid) 810 send_new_highest_used_slotid = true; 811 812 if (nfs41_wake_and_assign_slot(tbl, slot)) { 813 send_new_highest_used_slotid = false; 814 goto out_unlock; 815 } 816 nfs4_free_slot(tbl, slot); 817 818 if (tbl->highest_used_slotid != NFS4_NO_SLOT) 819 send_new_highest_used_slotid = false; 820 out_unlock: 821 spin_unlock(&tbl->slot_tbl_lock); 822 if (send_new_highest_used_slotid) 823 nfs41_notify_server(session->clp); 824 if (waitqueue_active(&tbl->slot_waitq)) 825 wake_up_all(&tbl->slot_waitq); 826 } 827 828 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 829 { 830 nfs41_release_slot(res->sr_slot); 831 res->sr_slot = NULL; 832 } 833 834 static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot, 835 u32 seqnr) 836 { 837 if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0) 838 slot->seq_nr_highest_sent = seqnr; 839 } 840 static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, u32 seqnr) 841 { 842 nfs4_slot_sequence_record_sent(slot, seqnr); 843 slot->seq_nr_last_acked = seqnr; 844 } 845 846 static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred, 847 struct nfs4_slot *slot) 848 { 849 struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true); 850 if (!IS_ERR(task)) 851 rpc_put_task_async(task); 852 } 853 854 static int nfs41_sequence_process(struct rpc_task *task, 855 struct nfs4_sequence_res *res) 856 { 857 struct nfs4_session *session; 858 struct nfs4_slot *slot = res->sr_slot; 859 struct nfs_client *clp; 860 int status; 861 int ret = 1; 862 863 if (slot == NULL) 864 goto out_noaction; 865 /* don't increment the sequence number if the task wasn't sent */ 866 if (!RPC_WAS_SENT(task) || slot->seq_done) 867 goto out; 868 869 session = slot->table->session; 870 clp = session->clp; 871 872 trace_nfs4_sequence_done(session, res); 873 874 status = res->sr_status; 875 if (task->tk_status == -NFS4ERR_DEADSESSION) 876 status = -NFS4ERR_DEADSESSION; 877 878 /* Check the SEQUENCE operation status */ 879 switch (status) { 880 case 0: 881 /* Mark this sequence number as having been acked */ 882 nfs4_slot_sequence_acked(slot, slot->seq_nr); 883 /* Update the slot's sequence and clientid lease timer */ 884 slot->seq_done = 1; 885 do_renew_lease(clp, res->sr_timestamp); 886 /* Check sequence flags */ 887 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags, 888 !!slot->privileged); 889 nfs41_update_target_slotid(slot->table, slot, res); 890 break; 891 case 1: 892 /* 893 * sr_status remains 1 if an RPC level error occurred. 894 * The server may or may not have processed the sequence 895 * operation.. 896 */ 897 nfs4_slot_sequence_record_sent(slot, slot->seq_nr); 898 slot->seq_done = 1; 899 goto out; 900 case -NFS4ERR_DELAY: 901 /* The server detected a resend of the RPC call and 902 * returned NFS4ERR_DELAY as per Section 2.10.6.2 903 * of RFC5661. 904 */ 905 dprintk("%s: slot=%u seq=%u: Operation in progress\n", 906 __func__, 907 slot->slot_nr, 908 slot->seq_nr); 909 goto out_retry; 910 case -NFS4ERR_RETRY_UNCACHED_REP: 911 case -NFS4ERR_SEQ_FALSE_RETRY: 912 /* 913 * The server thinks we tried to replay a request. 914 * Retry the call after bumping the sequence ID. 915 */ 916 nfs4_slot_sequence_acked(slot, slot->seq_nr); 917 goto retry_new_seq; 918 case -NFS4ERR_BADSLOT: 919 /* 920 * The slot id we used was probably retired. Try again 921 * using a different slot id. 922 */ 923 if (slot->slot_nr < slot->table->target_highest_slotid) 924 goto session_recover; 925 goto retry_nowait; 926 case -NFS4ERR_SEQ_MISORDERED: 927 nfs4_slot_sequence_record_sent(slot, slot->seq_nr); 928 /* 929 * Were one or more calls using this slot interrupted? 930 * If the server never received the request, then our 931 * transmitted slot sequence number may be too high. However, 932 * if the server did receive the request then it might 933 * accidentally give us a reply with a mismatched operation. 934 * We can sort this out by sending a lone sequence operation 935 * to the server on the same slot. 936 */ 937 if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) { 938 slot->seq_nr--; 939 if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) { 940 nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot); 941 res->sr_slot = NULL; 942 } 943 goto retry_nowait; 944 } 945 /* 946 * RFC5661: 947 * A retry might be sent while the original request is 948 * still in progress on the replier. The replier SHOULD 949 * deal with the issue by returning NFS4ERR_DELAY as the 950 * reply to SEQUENCE or CB_SEQUENCE operation, but 951 * implementations MAY return NFS4ERR_SEQ_MISORDERED. 952 * 953 * Restart the search after a delay. 954 */ 955 slot->seq_nr = slot->seq_nr_highest_sent; 956 goto out_retry; 957 case -NFS4ERR_BADSESSION: 958 case -NFS4ERR_DEADSESSION: 959 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 960 goto session_recover; 961 default: 962 /* Just update the slot sequence no. */ 963 slot->seq_done = 1; 964 } 965 out: 966 /* The session may be reset by one of the error handlers. */ 967 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 968 out_noaction: 969 return ret; 970 session_recover: 971 set_bit(NFS4_SLOT_TBL_DRAINING, &session->fc_slot_table.slot_tbl_state); 972 nfs4_schedule_session_recovery(session, status); 973 dprintk("%s ERROR: %d Reset session\n", __func__, status); 974 nfs41_sequence_free_slot(res); 975 goto out; 976 retry_new_seq: 977 ++slot->seq_nr; 978 retry_nowait: 979 if (rpc_restart_call_prepare(task)) { 980 nfs41_sequence_free_slot(res); 981 task->tk_status = 0; 982 ret = 0; 983 } 984 goto out; 985 out_retry: 986 if (!rpc_restart_call(task)) 987 goto out; 988 rpc_delay(task, NFS4_POLL_RETRY_MAX); 989 return 0; 990 } 991 992 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 993 { 994 if (!nfs41_sequence_process(task, res)) 995 return 0; 996 if (res->sr_slot != NULL) 997 nfs41_sequence_free_slot(res); 998 return 1; 999 1000 } 1001 EXPORT_SYMBOL_GPL(nfs41_sequence_done); 1002 1003 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) 1004 { 1005 if (res->sr_slot == NULL) 1006 return 1; 1007 if (res->sr_slot->table->session != NULL) 1008 return nfs41_sequence_process(task, res); 1009 return nfs40_sequence_done(task, res); 1010 } 1011 1012 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res) 1013 { 1014 if (res->sr_slot != NULL) { 1015 if (res->sr_slot->table->session != NULL) 1016 nfs41_sequence_free_slot(res); 1017 else 1018 nfs40_sequence_free_slot(res); 1019 } 1020 } 1021 1022 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 1023 { 1024 if (res->sr_slot == NULL) 1025 return 1; 1026 if (!res->sr_slot->table->session) 1027 return nfs40_sequence_done(task, res); 1028 return nfs41_sequence_done(task, res); 1029 } 1030 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 1031 1032 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 1033 { 1034 struct nfs4_call_sync_data *data = calldata; 1035 1036 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 1037 1038 nfs4_setup_sequence(data->seq_server->nfs_client, 1039 data->seq_args, data->seq_res, task); 1040 } 1041 1042 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 1043 { 1044 struct nfs4_call_sync_data *data = calldata; 1045 1046 nfs41_sequence_done(task, data->seq_res); 1047 } 1048 1049 static const struct rpc_call_ops nfs41_call_sync_ops = { 1050 .rpc_call_prepare = nfs41_call_sync_prepare, 1051 .rpc_call_done = nfs41_call_sync_done, 1052 }; 1053 1054 #else /* !CONFIG_NFS_V4_1 */ 1055 1056 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) 1057 { 1058 return nfs40_sequence_done(task, res); 1059 } 1060 1061 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res) 1062 { 1063 if (res->sr_slot != NULL) 1064 nfs40_sequence_free_slot(res); 1065 } 1066 1067 int nfs4_sequence_done(struct rpc_task *task, 1068 struct nfs4_sequence_res *res) 1069 { 1070 return nfs40_sequence_done(task, res); 1071 } 1072 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 1073 1074 #endif /* !CONFIG_NFS_V4_1 */ 1075 1076 static void nfs41_sequence_res_init(struct nfs4_sequence_res *res) 1077 { 1078 res->sr_timestamp = jiffies; 1079 res->sr_status_flags = 0; 1080 res->sr_status = 1; 1081 } 1082 1083 static 1084 void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args, 1085 struct nfs4_sequence_res *res, 1086 struct nfs4_slot *slot) 1087 { 1088 if (!slot) 1089 return; 1090 slot->privileged = args->sa_privileged ? 1 : 0; 1091 args->sa_slot = slot; 1092 1093 res->sr_slot = slot; 1094 } 1095 1096 int nfs4_setup_sequence(struct nfs_client *client, 1097 struct nfs4_sequence_args *args, 1098 struct nfs4_sequence_res *res, 1099 struct rpc_task *task) 1100 { 1101 struct nfs4_session *session = nfs4_get_session(client); 1102 struct nfs4_slot_table *tbl = client->cl_slot_tbl; 1103 struct nfs4_slot *slot; 1104 1105 /* slot already allocated? */ 1106 if (res->sr_slot != NULL) 1107 goto out_start; 1108 1109 if (session) 1110 tbl = &session->fc_slot_table; 1111 1112 spin_lock(&tbl->slot_tbl_lock); 1113 /* The state manager will wait until the slot table is empty */ 1114 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) 1115 goto out_sleep; 1116 1117 slot = nfs4_alloc_slot(tbl); 1118 if (IS_ERR(slot)) { 1119 if (slot == ERR_PTR(-ENOMEM)) 1120 goto out_sleep_timeout; 1121 goto out_sleep; 1122 } 1123 spin_unlock(&tbl->slot_tbl_lock); 1124 1125 nfs4_sequence_attach_slot(args, res, slot); 1126 1127 trace_nfs4_setup_sequence(session, args); 1128 out_start: 1129 nfs41_sequence_res_init(res); 1130 rpc_call_start(task); 1131 return 0; 1132 out_sleep_timeout: 1133 /* Try again in 1/4 second */ 1134 if (args->sa_privileged) 1135 rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task, 1136 jiffies + (HZ >> 2), RPC_PRIORITY_PRIVILEGED); 1137 else 1138 rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task, 1139 NULL, jiffies + (HZ >> 2)); 1140 spin_unlock(&tbl->slot_tbl_lock); 1141 return -EAGAIN; 1142 out_sleep: 1143 if (args->sa_privileged) 1144 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 1145 RPC_PRIORITY_PRIVILEGED); 1146 else 1147 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 1148 spin_unlock(&tbl->slot_tbl_lock); 1149 return -EAGAIN; 1150 } 1151 EXPORT_SYMBOL_GPL(nfs4_setup_sequence); 1152 1153 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata) 1154 { 1155 struct nfs4_call_sync_data *data = calldata; 1156 nfs4_setup_sequence(data->seq_server->nfs_client, 1157 data->seq_args, data->seq_res, task); 1158 } 1159 1160 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata) 1161 { 1162 struct nfs4_call_sync_data *data = calldata; 1163 nfs4_sequence_done(task, data->seq_res); 1164 } 1165 1166 static const struct rpc_call_ops nfs40_call_sync_ops = { 1167 .rpc_call_prepare = nfs40_call_sync_prepare, 1168 .rpc_call_done = nfs40_call_sync_done, 1169 }; 1170 1171 static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup) 1172 { 1173 int ret; 1174 struct rpc_task *task; 1175 1176 task = rpc_run_task(task_setup); 1177 if (IS_ERR(task)) 1178 return PTR_ERR(task); 1179 1180 ret = task->tk_status; 1181 rpc_put_task(task); 1182 return ret; 1183 } 1184 1185 static int nfs4_do_call_sync(struct rpc_clnt *clnt, 1186 struct nfs_server *server, 1187 struct rpc_message *msg, 1188 struct nfs4_sequence_args *args, 1189 struct nfs4_sequence_res *res, 1190 unsigned short task_flags) 1191 { 1192 struct nfs_client *clp = server->nfs_client; 1193 struct nfs4_call_sync_data data = { 1194 .seq_server = server, 1195 .seq_args = args, 1196 .seq_res = res, 1197 }; 1198 struct rpc_task_setup task_setup = { 1199 .rpc_client = clnt, 1200 .rpc_message = msg, 1201 .callback_ops = clp->cl_mvops->call_sync_ops, 1202 .callback_data = &data, 1203 .flags = task_flags, 1204 }; 1205 1206 return nfs4_call_sync_custom(&task_setup); 1207 } 1208 1209 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 1210 struct nfs_server *server, 1211 struct rpc_message *msg, 1212 struct nfs4_sequence_args *args, 1213 struct nfs4_sequence_res *res) 1214 { 1215 unsigned short task_flags = 0; 1216 1217 if (server->caps & NFS_CAP_MOVEABLE) 1218 task_flags = RPC_TASK_MOVEABLE; 1219 return nfs4_do_call_sync(clnt, server, msg, args, res, task_flags); 1220 } 1221 1222 1223 int nfs4_call_sync(struct rpc_clnt *clnt, 1224 struct nfs_server *server, 1225 struct rpc_message *msg, 1226 struct nfs4_sequence_args *args, 1227 struct nfs4_sequence_res *res, 1228 int cache_reply) 1229 { 1230 nfs4_init_sequence(args, res, cache_reply, 0); 1231 return nfs4_call_sync_sequence(clnt, server, msg, args, res); 1232 } 1233 1234 static void 1235 nfs4_inc_nlink_locked(struct inode *inode) 1236 { 1237 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 1238 NFS_INO_INVALID_CTIME | 1239 NFS_INO_INVALID_NLINK); 1240 inc_nlink(inode); 1241 } 1242 1243 static void 1244 nfs4_inc_nlink(struct inode *inode) 1245 { 1246 spin_lock(&inode->i_lock); 1247 nfs4_inc_nlink_locked(inode); 1248 spin_unlock(&inode->i_lock); 1249 } 1250 1251 static void 1252 nfs4_dec_nlink_locked(struct inode *inode) 1253 { 1254 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 1255 NFS_INO_INVALID_CTIME | 1256 NFS_INO_INVALID_NLINK); 1257 drop_nlink(inode); 1258 } 1259 1260 static void 1261 nfs4_update_changeattr_locked(struct inode *inode, 1262 struct nfs4_change_info *cinfo, 1263 unsigned long timestamp, unsigned long cache_validity) 1264 { 1265 struct nfs_inode *nfsi = NFS_I(inode); 1266 u64 change_attr = inode_peek_iversion_raw(inode); 1267 1268 if (!nfs_have_delegated_mtime(inode)) 1269 cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME; 1270 if (S_ISDIR(inode->i_mode)) 1271 cache_validity |= NFS_INO_INVALID_DATA; 1272 1273 switch (NFS_SERVER(inode)->change_attr_type) { 1274 case NFS4_CHANGE_TYPE_IS_UNDEFINED: 1275 if (cinfo->after == change_attr) 1276 goto out; 1277 break; 1278 default: 1279 if ((s64)(change_attr - cinfo->after) >= 0) 1280 goto out; 1281 } 1282 1283 inode_set_iversion_raw(inode, cinfo->after); 1284 if (!cinfo->atomic || cinfo->before != change_attr) { 1285 if (S_ISDIR(inode->i_mode)) 1286 nfs_force_lookup_revalidate(inode); 1287 1288 if (!nfs_have_delegated_attributes(inode)) 1289 cache_validity |= 1290 NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL | 1291 NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER | 1292 NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK | 1293 NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR; 1294 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); 1295 } 1296 nfsi->attrtimeo_timestamp = jiffies; 1297 nfsi->read_cache_jiffies = timestamp; 1298 nfsi->attr_gencount = nfs_inc_attr_generation_counter(); 1299 nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE; 1300 out: 1301 nfs_set_cache_invalid(inode, cache_validity); 1302 } 1303 1304 void 1305 nfs4_update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo, 1306 unsigned long timestamp, unsigned long cache_validity) 1307 { 1308 spin_lock(&dir->i_lock); 1309 nfs4_update_changeattr_locked(dir, cinfo, timestamp, cache_validity); 1310 spin_unlock(&dir->i_lock); 1311 } 1312 1313 struct nfs4_open_createattrs { 1314 struct nfs4_label *label; 1315 struct iattr *sattr; 1316 const __u32 verf[2]; 1317 }; 1318 1319 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server, 1320 int err, struct nfs4_exception *exception) 1321 { 1322 if (err != -EINVAL) 1323 return false; 1324 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1325 return false; 1326 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1; 1327 exception->retry = 1; 1328 return true; 1329 } 1330 1331 static fmode_t _nfs4_ctx_to_accessmode(const struct nfs_open_context *ctx) 1332 { 1333 return ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC); 1334 } 1335 1336 static fmode_t _nfs4_ctx_to_openmode(const struct nfs_open_context *ctx) 1337 { 1338 fmode_t ret = ctx->mode & (FMODE_READ|FMODE_WRITE); 1339 1340 return (ctx->mode & FMODE_EXEC) ? FMODE_READ | ret : ret; 1341 } 1342 1343 static u32 1344 nfs4_fmode_to_share_access(fmode_t fmode) 1345 { 1346 u32 res = 0; 1347 1348 switch (fmode & (FMODE_READ | FMODE_WRITE)) { 1349 case FMODE_READ: 1350 res = NFS4_SHARE_ACCESS_READ; 1351 break; 1352 case FMODE_WRITE: 1353 res = NFS4_SHARE_ACCESS_WRITE; 1354 break; 1355 case FMODE_READ|FMODE_WRITE: 1356 res = NFS4_SHARE_ACCESS_BOTH; 1357 } 1358 return res; 1359 } 1360 1361 static u32 1362 nfs4_map_atomic_open_share(struct nfs_server *server, 1363 fmode_t fmode, int openflags) 1364 { 1365 u32 res = nfs4_fmode_to_share_access(fmode); 1366 1367 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1368 goto out; 1369 /* Want no delegation if we're using O_DIRECT */ 1370 if (openflags & O_DIRECT) { 1371 res |= NFS4_SHARE_WANT_NO_DELEG; 1372 goto out; 1373 } 1374 /* res |= NFS4_SHARE_WANT_NO_PREFERENCE; */ 1375 if (server->caps & NFS_CAP_DELEGTIME) 1376 res |= NFS4_SHARE_WANT_DELEG_TIMESTAMPS; 1377 if (server->caps & NFS_CAP_OPEN_XOR) 1378 res |= NFS4_SHARE_WANT_OPEN_XOR_DELEGATION; 1379 out: 1380 return res; 1381 } 1382 1383 static enum open_claim_type4 1384 nfs4_map_atomic_open_claim(struct nfs_server *server, 1385 enum open_claim_type4 claim) 1386 { 1387 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1) 1388 return claim; 1389 switch (claim) { 1390 default: 1391 return claim; 1392 case NFS4_OPEN_CLAIM_FH: 1393 return NFS4_OPEN_CLAIM_NULL; 1394 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1395 return NFS4_OPEN_CLAIM_DELEGATE_CUR; 1396 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1397 return NFS4_OPEN_CLAIM_DELEGATE_PREV; 1398 } 1399 } 1400 1401 static void nfs4_init_opendata_res(struct nfs4_opendata *p) 1402 { 1403 p->o_res.f_attr = &p->f_attr; 1404 p->o_res.seqid = p->o_arg.seqid; 1405 p->c_res.seqid = p->c_arg.seqid; 1406 p->o_res.server = p->o_arg.server; 1407 p->o_res.access_request = p->o_arg.access; 1408 nfs_fattr_init(&p->f_attr); 1409 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 1410 } 1411 1412 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 1413 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 1414 const struct nfs4_open_createattrs *c, 1415 enum open_claim_type4 claim, 1416 gfp_t gfp_mask) 1417 { 1418 struct dentry *parent = dget_parent(dentry); 1419 struct inode *dir = d_inode(parent); 1420 struct nfs_server *server = NFS_SERVER(dir); 1421 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 1422 struct nfs4_label *label = (c != NULL) ? c->label : NULL; 1423 struct nfs4_opendata *p; 1424 1425 p = kzalloc(sizeof(*p), gfp_mask); 1426 if (p == NULL) 1427 goto err; 1428 1429 p->f_attr.label = nfs4_label_alloc(server, gfp_mask); 1430 if (IS_ERR(p->f_attr.label)) 1431 goto err_free_p; 1432 1433 p->a_label = nfs4_label_alloc(server, gfp_mask); 1434 if (IS_ERR(p->a_label)) 1435 goto err_free_f; 1436 1437 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 1438 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); 1439 if (IS_ERR(p->o_arg.seqid)) 1440 goto err_free_label; 1441 nfs_sb_active(dentry->d_sb); 1442 p->dentry = dget(dentry); 1443 p->dir = parent; 1444 p->owner = sp; 1445 atomic_inc(&sp->so_count); 1446 p->o_arg.open_flags = flags; 1447 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 1448 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 1449 p->o_arg.share_access = nfs4_map_atomic_open_share(server, 1450 fmode, flags); 1451 if (flags & O_CREAT) { 1452 p->o_arg.umask = current_umask(); 1453 p->o_arg.label = nfs4_label_copy(p->a_label, label); 1454 if (c->sattr != NULL && c->sattr->ia_valid != 0) { 1455 p->o_arg.u.attrs = &p->attrs; 1456 memcpy(&p->attrs, c->sattr, sizeof(p->attrs)); 1457 1458 memcpy(p->o_arg.u.verifier.data, c->verf, 1459 sizeof(p->o_arg.u.verifier.data)); 1460 } 1461 } 1462 /* ask server to check for all possible rights as results 1463 * are cached */ 1464 switch (p->o_arg.claim) { 1465 default: 1466 break; 1467 case NFS4_OPEN_CLAIM_NULL: 1468 case NFS4_OPEN_CLAIM_FH: 1469 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY | 1470 NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE | 1471 NFS4_ACCESS_EXECUTE | 1472 nfs_access_xattr_mask(server); 1473 } 1474 p->o_arg.clientid = server->nfs_client->cl_clientid; 1475 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 1476 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 1477 p->o_arg.name = &dentry->d_name; 1478 p->o_arg.server = server; 1479 p->o_arg.bitmask = nfs4_bitmask(server, label); 1480 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 1481 switch (p->o_arg.claim) { 1482 case NFS4_OPEN_CLAIM_NULL: 1483 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1484 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 1485 p->o_arg.fh = NFS_FH(dir); 1486 break; 1487 case NFS4_OPEN_CLAIM_PREVIOUS: 1488 case NFS4_OPEN_CLAIM_FH: 1489 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1490 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1491 p->o_arg.fh = NFS_FH(d_inode(dentry)); 1492 } 1493 p->c_arg.fh = &p->o_res.fh; 1494 p->c_arg.stateid = &p->o_res.stateid; 1495 p->c_arg.seqid = p->o_arg.seqid; 1496 nfs4_init_opendata_res(p); 1497 kref_init(&p->kref); 1498 return p; 1499 1500 err_free_label: 1501 nfs4_label_free(p->a_label); 1502 err_free_f: 1503 nfs4_label_free(p->f_attr.label); 1504 err_free_p: 1505 kfree(p); 1506 err: 1507 dput(parent); 1508 return NULL; 1509 } 1510 1511 static void nfs4_opendata_free(struct kref *kref) 1512 { 1513 struct nfs4_opendata *p = container_of(kref, 1514 struct nfs4_opendata, kref); 1515 struct super_block *sb = p->dentry->d_sb; 1516 1517 nfs4_lgopen_release(p->lgp); 1518 nfs_free_seqid(p->o_arg.seqid); 1519 nfs4_sequence_free_slot(&p->o_res.seq_res); 1520 if (p->state != NULL) 1521 nfs4_put_open_state(p->state); 1522 nfs4_put_state_owner(p->owner); 1523 1524 nfs4_label_free(p->a_label); 1525 nfs4_label_free(p->f_attr.label); 1526 1527 dput(p->dir); 1528 dput(p->dentry); 1529 nfs_sb_deactive(sb); 1530 nfs_fattr_free_names(&p->f_attr); 1531 kfree(p->f_attr.mdsthreshold); 1532 kfree(p); 1533 } 1534 1535 static void nfs4_opendata_put(struct nfs4_opendata *p) 1536 { 1537 if (p != NULL) 1538 kref_put(&p->kref, nfs4_opendata_free); 1539 } 1540 1541 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state, 1542 fmode_t fmode) 1543 { 1544 switch(fmode & (FMODE_READ|FMODE_WRITE)) { 1545 case FMODE_READ|FMODE_WRITE: 1546 return state->n_rdwr != 0; 1547 case FMODE_WRITE: 1548 return state->n_wronly != 0; 1549 case FMODE_READ: 1550 return state->n_rdonly != 0; 1551 } 1552 WARN_ON_ONCE(1); 1553 return false; 1554 } 1555 1556 static int can_open_cached(struct nfs4_state *state, fmode_t mode, 1557 int open_mode, enum open_claim_type4 claim) 1558 { 1559 int ret = 0; 1560 1561 if (open_mode & (O_EXCL|O_TRUNC)) 1562 goto out; 1563 switch (claim) { 1564 case NFS4_OPEN_CLAIM_NULL: 1565 case NFS4_OPEN_CLAIM_FH: 1566 goto out; 1567 default: 1568 break; 1569 } 1570 switch (mode & (FMODE_READ|FMODE_WRITE)) { 1571 case FMODE_READ: 1572 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 1573 && state->n_rdonly != 0; 1574 break; 1575 case FMODE_WRITE: 1576 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 1577 && state->n_wronly != 0; 1578 break; 1579 case FMODE_READ|FMODE_WRITE: 1580 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 1581 && state->n_rdwr != 0; 1582 } 1583 out: 1584 return ret; 1585 } 1586 1587 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode, 1588 enum open_claim_type4 claim) 1589 { 1590 if (delegation == NULL) 1591 return 0; 1592 if ((delegation->type & fmode) != fmode) 1593 return 0; 1594 switch (claim) { 1595 case NFS4_OPEN_CLAIM_NULL: 1596 case NFS4_OPEN_CLAIM_FH: 1597 break; 1598 case NFS4_OPEN_CLAIM_PREVIOUS: 1599 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 1600 break; 1601 fallthrough; 1602 default: 1603 return 0; 1604 } 1605 nfs_mark_delegation_referenced(delegation); 1606 return 1; 1607 } 1608 1609 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 1610 { 1611 switch (fmode) { 1612 case FMODE_WRITE: 1613 state->n_wronly++; 1614 break; 1615 case FMODE_READ: 1616 state->n_rdonly++; 1617 break; 1618 case FMODE_READ|FMODE_WRITE: 1619 state->n_rdwr++; 1620 } 1621 nfs4_state_set_mode_locked(state, state->state | fmode); 1622 } 1623 1624 #ifdef CONFIG_NFS_V4_1 1625 static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state) 1626 { 1627 if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags)) 1628 return true; 1629 if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags)) 1630 return true; 1631 if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags)) 1632 return true; 1633 return false; 1634 } 1635 #endif /* CONFIG_NFS_V4_1 */ 1636 1637 static void nfs_state_log_update_open_stateid(struct nfs4_state *state) 1638 { 1639 if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags)) 1640 wake_up_all(&state->waitq); 1641 } 1642 1643 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) 1644 { 1645 struct nfs_client *clp = state->owner->so_server->nfs_client; 1646 bool need_recover = false; 1647 1648 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly) 1649 need_recover = true; 1650 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly) 1651 need_recover = true; 1652 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr) 1653 need_recover = true; 1654 if (need_recover) 1655 nfs4_state_mark_reclaim_nograce(clp, state); 1656 } 1657 1658 /* 1659 * Check for whether or not the caller may update the open stateid 1660 * to the value passed in by stateid. 1661 * 1662 * Note: This function relies heavily on the server implementing 1663 * RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2 1664 * correctly. 1665 * i.e. The stateid seqids have to be initialised to 1, and 1666 * are then incremented on every state transition. 1667 */ 1668 static bool nfs_stateid_is_sequential(struct nfs4_state *state, 1669 const nfs4_stateid *stateid) 1670 { 1671 if (test_bit(NFS_OPEN_STATE, &state->flags)) { 1672 /* The common case - we're updating to a new sequence number */ 1673 if (nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1674 if (nfs4_stateid_is_next(&state->open_stateid, stateid)) 1675 return true; 1676 return false; 1677 } 1678 /* The server returned a new stateid */ 1679 } 1680 /* This is the first OPEN in this generation */ 1681 if (stateid->seqid == cpu_to_be32(1)) 1682 return true; 1683 return false; 1684 } 1685 1686 static void nfs_resync_open_stateid_locked(struct nfs4_state *state) 1687 { 1688 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr)) 1689 return; 1690 if (state->n_wronly) 1691 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1692 if (state->n_rdonly) 1693 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1694 if (state->n_rdwr) 1695 set_bit(NFS_O_RDWR_STATE, &state->flags); 1696 set_bit(NFS_OPEN_STATE, &state->flags); 1697 } 1698 1699 static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1700 nfs4_stateid *stateid, fmode_t fmode) 1701 { 1702 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1703 switch (fmode & (FMODE_READ|FMODE_WRITE)) { 1704 case FMODE_WRITE: 1705 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1706 break; 1707 case FMODE_READ: 1708 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1709 break; 1710 case 0: 1711 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1712 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1713 clear_bit(NFS_OPEN_STATE, &state->flags); 1714 } 1715 if (stateid == NULL) 1716 return; 1717 /* Handle OPEN+OPEN_DOWNGRADE races */ 1718 if (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1719 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { 1720 nfs_resync_open_stateid_locked(state); 1721 goto out; 1722 } 1723 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1724 nfs4_stateid_copy(&state->stateid, stateid); 1725 nfs4_stateid_copy(&state->open_stateid, stateid); 1726 trace_nfs4_open_stateid_update(state->inode, stateid, 0); 1727 out: 1728 nfs_state_log_update_open_stateid(state); 1729 } 1730 1731 static void nfs_clear_open_stateid(struct nfs4_state *state, 1732 nfs4_stateid *arg_stateid, 1733 nfs4_stateid *stateid, fmode_t fmode) 1734 { 1735 write_seqlock(&state->seqlock); 1736 /* Ignore, if the CLOSE argment doesn't match the current stateid */ 1737 if (nfs4_state_match_open_stateid_other(state, arg_stateid)) 1738 nfs_clear_open_stateid_locked(state, stateid, fmode); 1739 write_sequnlock(&state->seqlock); 1740 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1741 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1742 } 1743 1744 static void nfs_set_open_stateid_locked(struct nfs4_state *state, 1745 const nfs4_stateid *stateid, nfs4_stateid *freeme) 1746 __must_hold(&state->owner->so_lock) 1747 __must_hold(&state->seqlock) 1748 __must_hold(RCU) 1749 1750 { 1751 DEFINE_WAIT(wait); 1752 int status = 0; 1753 for (;;) { 1754 1755 if (nfs_stateid_is_sequential(state, stateid)) 1756 break; 1757 1758 if (status) 1759 break; 1760 /* Rely on seqids for serialisation with NFSv4.0 */ 1761 if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client)) 1762 break; 1763 1764 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); 1765 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); 1766 /* 1767 * Ensure we process the state changes in the same order 1768 * in which the server processed them by delaying the 1769 * update of the stateid until we are in sequence. 1770 */ 1771 write_sequnlock(&state->seqlock); 1772 spin_unlock(&state->owner->so_lock); 1773 rcu_read_unlock(); 1774 trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0); 1775 1776 if (!fatal_signal_pending(current)) { 1777 if (schedule_timeout(5*HZ) == 0) 1778 status = -EAGAIN; 1779 else 1780 status = 0; 1781 } else 1782 status = -EINTR; 1783 finish_wait(&state->waitq, &wait); 1784 rcu_read_lock(); 1785 spin_lock(&state->owner->so_lock); 1786 write_seqlock(&state->seqlock); 1787 } 1788 1789 if (test_bit(NFS_OPEN_STATE, &state->flags) && 1790 !nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1791 nfs4_stateid_copy(freeme, &state->open_stateid); 1792 nfs_test_and_clear_all_open_stateid(state); 1793 } 1794 1795 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1796 nfs4_stateid_copy(&state->stateid, stateid); 1797 nfs4_stateid_copy(&state->open_stateid, stateid); 1798 trace_nfs4_open_stateid_update(state->inode, stateid, status); 1799 nfs_state_log_update_open_stateid(state); 1800 } 1801 1802 static void nfs_state_set_open_stateid(struct nfs4_state *state, 1803 const nfs4_stateid *open_stateid, 1804 fmode_t fmode, 1805 nfs4_stateid *freeme) 1806 { 1807 /* 1808 * Protect the call to nfs4_state_set_mode_locked and 1809 * serialise the stateid update 1810 */ 1811 write_seqlock(&state->seqlock); 1812 nfs_set_open_stateid_locked(state, open_stateid, freeme); 1813 switch (fmode) { 1814 case FMODE_READ: 1815 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1816 break; 1817 case FMODE_WRITE: 1818 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1819 break; 1820 case FMODE_READ|FMODE_WRITE: 1821 set_bit(NFS_O_RDWR_STATE, &state->flags); 1822 } 1823 set_bit(NFS_OPEN_STATE, &state->flags); 1824 write_sequnlock(&state->seqlock); 1825 } 1826 1827 static void nfs_state_clear_open_state_flags(struct nfs4_state *state) 1828 { 1829 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1830 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1831 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1832 clear_bit(NFS_OPEN_STATE, &state->flags); 1833 } 1834 1835 static void nfs_state_set_delegation(struct nfs4_state *state, 1836 const nfs4_stateid *deleg_stateid, 1837 fmode_t fmode) 1838 { 1839 /* 1840 * Protect the call to nfs4_state_set_mode_locked and 1841 * serialise the stateid update 1842 */ 1843 write_seqlock(&state->seqlock); 1844 nfs4_stateid_copy(&state->stateid, deleg_stateid); 1845 set_bit(NFS_DELEGATED_STATE, &state->flags); 1846 write_sequnlock(&state->seqlock); 1847 } 1848 1849 static void nfs_state_clear_delegation(struct nfs4_state *state) 1850 { 1851 write_seqlock(&state->seqlock); 1852 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1853 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1854 write_sequnlock(&state->seqlock); 1855 } 1856 1857 int update_open_stateid(struct nfs4_state *state, 1858 const nfs4_stateid *open_stateid, 1859 const nfs4_stateid *delegation, 1860 fmode_t fmode) 1861 { 1862 struct nfs_server *server = NFS_SERVER(state->inode); 1863 struct nfs_client *clp = server->nfs_client; 1864 struct nfs_inode *nfsi = NFS_I(state->inode); 1865 struct nfs_delegation *deleg_cur; 1866 nfs4_stateid freeme = { }; 1867 int ret = 0; 1868 1869 fmode &= (FMODE_READ|FMODE_WRITE); 1870 1871 rcu_read_lock(); 1872 spin_lock(&state->owner->so_lock); 1873 if (open_stateid != NULL) { 1874 nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme); 1875 ret = 1; 1876 } 1877 1878 deleg_cur = nfs4_get_valid_delegation(state->inode); 1879 if (deleg_cur == NULL) 1880 goto no_delegation; 1881 1882 spin_lock(&deleg_cur->lock); 1883 if (rcu_dereference(nfsi->delegation) != deleg_cur || 1884 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) || 1885 (deleg_cur->type & fmode) != fmode) 1886 goto no_delegation_unlock; 1887 1888 if (delegation == NULL) 1889 delegation = &deleg_cur->stateid; 1890 else if (!nfs4_stateid_match_other(&deleg_cur->stateid, delegation)) 1891 goto no_delegation_unlock; 1892 1893 nfs_mark_delegation_referenced(deleg_cur); 1894 nfs_state_set_delegation(state, &deleg_cur->stateid, fmode); 1895 ret = 1; 1896 no_delegation_unlock: 1897 spin_unlock(&deleg_cur->lock); 1898 no_delegation: 1899 if (ret) 1900 update_open_stateflags(state, fmode); 1901 spin_unlock(&state->owner->so_lock); 1902 rcu_read_unlock(); 1903 1904 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1905 nfs4_schedule_state_manager(clp); 1906 if (freeme.type != 0) 1907 nfs4_test_and_free_stateid(server, &freeme, 1908 state->owner->so_cred); 1909 1910 return ret; 1911 } 1912 1913 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp, 1914 const nfs4_stateid *stateid) 1915 { 1916 struct nfs4_state *state = lsp->ls_state; 1917 bool ret = false; 1918 1919 spin_lock(&state->state_lock); 1920 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid)) 1921 goto out_noupdate; 1922 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid)) 1923 goto out_noupdate; 1924 nfs4_stateid_copy(&lsp->ls_stateid, stateid); 1925 ret = true; 1926 out_noupdate: 1927 spin_unlock(&state->state_lock); 1928 return ret; 1929 } 1930 1931 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1932 { 1933 struct nfs_delegation *delegation; 1934 1935 fmode &= FMODE_READ|FMODE_WRITE; 1936 rcu_read_lock(); 1937 delegation = nfs4_get_valid_delegation(inode); 1938 if (delegation == NULL || (delegation->type & fmode) == fmode) { 1939 rcu_read_unlock(); 1940 return; 1941 } 1942 rcu_read_unlock(); 1943 nfs4_inode_return_delegation(inode); 1944 } 1945 1946 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1947 { 1948 struct nfs4_state *state = opendata->state; 1949 struct nfs_delegation *delegation; 1950 int open_mode = opendata->o_arg.open_flags; 1951 fmode_t fmode = opendata->o_arg.fmode; 1952 enum open_claim_type4 claim = opendata->o_arg.claim; 1953 nfs4_stateid stateid; 1954 int ret = -EAGAIN; 1955 1956 for (;;) { 1957 spin_lock(&state->owner->so_lock); 1958 if (can_open_cached(state, fmode, open_mode, claim)) { 1959 update_open_stateflags(state, fmode); 1960 spin_unlock(&state->owner->so_lock); 1961 goto out_return_state; 1962 } 1963 spin_unlock(&state->owner->so_lock); 1964 rcu_read_lock(); 1965 delegation = nfs4_get_valid_delegation(state->inode); 1966 if (!can_open_delegated(delegation, fmode, claim)) { 1967 rcu_read_unlock(); 1968 break; 1969 } 1970 /* Save the delegation */ 1971 nfs4_stateid_copy(&stateid, &delegation->stateid); 1972 rcu_read_unlock(); 1973 nfs_release_seqid(opendata->o_arg.seqid); 1974 if (!opendata->is_recover) { 1975 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 1976 if (ret != 0) 1977 goto out; 1978 } 1979 ret = -EAGAIN; 1980 1981 /* Try to update the stateid using the delegation */ 1982 if (update_open_stateid(state, NULL, &stateid, fmode)) 1983 goto out_return_state; 1984 } 1985 out: 1986 return ERR_PTR(ret); 1987 out_return_state: 1988 refcount_inc(&state->count); 1989 return state; 1990 } 1991 1992 static void 1993 nfs4_process_delegation(struct inode *inode, const struct cred *cred, 1994 enum open_claim_type4 claim, 1995 const struct nfs4_open_delegation *delegation) 1996 { 1997 switch (delegation->open_delegation_type) { 1998 case NFS4_OPEN_DELEGATE_READ: 1999 case NFS4_OPEN_DELEGATE_WRITE: 2000 case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG: 2001 case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG: 2002 break; 2003 default: 2004 return; 2005 } 2006 switch (claim) { 2007 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 2008 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 2009 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 2010 "returning a delegation for " 2011 "OPEN(CLAIM_DELEGATE_CUR)\n", 2012 NFS_SERVER(inode)->nfs_client->cl_hostname); 2013 break; 2014 case NFS4_OPEN_CLAIM_PREVIOUS: 2015 nfs_inode_reclaim_delegation(inode, cred, delegation->type, 2016 &delegation->stateid, 2017 delegation->pagemod_limit, 2018 delegation->open_delegation_type); 2019 break; 2020 default: 2021 nfs_inode_set_delegation(inode, cred, delegation->type, 2022 &delegation->stateid, 2023 delegation->pagemod_limit, 2024 delegation->open_delegation_type); 2025 } 2026 if (delegation->do_recall) 2027 nfs_async_inode_return_delegation(inode, &delegation->stateid); 2028 } 2029 2030 /* 2031 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes 2032 * and update the nfs4_state. 2033 */ 2034 static struct nfs4_state * 2035 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) 2036 { 2037 struct inode *inode = data->state->inode; 2038 struct nfs4_state *state = data->state; 2039 int ret; 2040 2041 if (!data->rpc_done) { 2042 if (data->rpc_status) 2043 return ERR_PTR(data->rpc_status); 2044 return nfs4_try_open_cached(data); 2045 } 2046 2047 ret = nfs_refresh_inode(inode, &data->f_attr); 2048 if (ret) 2049 return ERR_PTR(ret); 2050 2051 nfs4_process_delegation(state->inode, 2052 data->owner->so_cred, 2053 data->o_arg.claim, 2054 &data->o_res.delegation); 2055 2056 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) { 2057 if (!update_open_stateid(state, &data->o_res.stateid, 2058 NULL, data->o_arg.fmode)) 2059 return ERR_PTR(-EAGAIN); 2060 } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode)) 2061 return ERR_PTR(-EAGAIN); 2062 refcount_inc(&state->count); 2063 2064 return state; 2065 } 2066 2067 static struct inode * 2068 nfs4_opendata_get_inode(struct nfs4_opendata *data) 2069 { 2070 struct inode *inode; 2071 2072 switch (data->o_arg.claim) { 2073 case NFS4_OPEN_CLAIM_NULL: 2074 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 2075 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 2076 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 2077 return ERR_PTR(-EAGAIN); 2078 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, 2079 &data->f_attr); 2080 break; 2081 default: 2082 inode = d_inode(data->dentry); 2083 ihold(inode); 2084 nfs_refresh_inode(inode, &data->f_attr); 2085 } 2086 return inode; 2087 } 2088 2089 static struct nfs4_state * 2090 nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data) 2091 { 2092 struct nfs4_state *state; 2093 struct inode *inode; 2094 2095 inode = nfs4_opendata_get_inode(data); 2096 if (IS_ERR(inode)) 2097 return ERR_CAST(inode); 2098 if (data->state != NULL && data->state->inode == inode) { 2099 state = data->state; 2100 refcount_inc(&state->count); 2101 } else 2102 state = nfs4_get_open_state(inode, data->owner); 2103 iput(inode); 2104 if (state == NULL) 2105 state = ERR_PTR(-ENOMEM); 2106 return state; 2107 } 2108 2109 static struct nfs4_state * 2110 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 2111 { 2112 struct nfs4_state *state; 2113 2114 if (!data->rpc_done) { 2115 state = nfs4_try_open_cached(data); 2116 trace_nfs4_cached_open(data->state); 2117 goto out; 2118 } 2119 2120 state = nfs4_opendata_find_nfs4_state(data); 2121 if (IS_ERR(state)) 2122 goto out; 2123 2124 nfs4_process_delegation(state->inode, 2125 data->owner->so_cred, 2126 data->o_arg.claim, 2127 &data->o_res.delegation); 2128 2129 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) { 2130 if (!update_open_stateid(state, &data->o_res.stateid, 2131 NULL, data->o_arg.fmode)) { 2132 nfs4_put_open_state(state); 2133 state = ERR_PTR(-EAGAIN); 2134 } 2135 } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode)) { 2136 nfs4_put_open_state(state); 2137 state = ERR_PTR(-EAGAIN); 2138 } 2139 out: 2140 nfs_release_seqid(data->o_arg.seqid); 2141 return state; 2142 } 2143 2144 static struct nfs4_state * 2145 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 2146 { 2147 struct nfs4_state *ret; 2148 2149 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 2150 ret =_nfs4_opendata_reclaim_to_nfs4_state(data); 2151 else 2152 ret = _nfs4_opendata_to_nfs4_state(data); 2153 nfs4_sequence_free_slot(&data->o_res.seq_res); 2154 return ret; 2155 } 2156 2157 static struct nfs_open_context * 2158 nfs4_state_find_open_context_mode(struct nfs4_state *state, fmode_t mode) 2159 { 2160 struct nfs_inode *nfsi = NFS_I(state->inode); 2161 struct nfs_open_context *ctx; 2162 2163 rcu_read_lock(); 2164 list_for_each_entry_rcu(ctx, &nfsi->open_files, list) { 2165 if (ctx->state != state) 2166 continue; 2167 if ((ctx->mode & mode) != mode) 2168 continue; 2169 if (!get_nfs_open_context(ctx)) 2170 continue; 2171 rcu_read_unlock(); 2172 return ctx; 2173 } 2174 rcu_read_unlock(); 2175 return ERR_PTR(-ENOENT); 2176 } 2177 2178 static struct nfs_open_context * 2179 nfs4_state_find_open_context(struct nfs4_state *state) 2180 { 2181 struct nfs_open_context *ctx; 2182 2183 ctx = nfs4_state_find_open_context_mode(state, FMODE_READ|FMODE_WRITE); 2184 if (!IS_ERR(ctx)) 2185 return ctx; 2186 ctx = nfs4_state_find_open_context_mode(state, FMODE_WRITE); 2187 if (!IS_ERR(ctx)) 2188 return ctx; 2189 return nfs4_state_find_open_context_mode(state, FMODE_READ); 2190 } 2191 2192 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, 2193 struct nfs4_state *state, enum open_claim_type4 claim) 2194 { 2195 struct nfs4_opendata *opendata; 2196 2197 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, 2198 NULL, claim, GFP_NOFS); 2199 if (opendata == NULL) 2200 return ERR_PTR(-ENOMEM); 2201 opendata->state = state; 2202 refcount_inc(&state->count); 2203 return opendata; 2204 } 2205 2206 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, 2207 fmode_t fmode) 2208 { 2209 struct nfs4_state *newstate; 2210 struct nfs_server *server = NFS_SB(opendata->dentry->d_sb); 2211 int openflags = opendata->o_arg.open_flags; 2212 int ret; 2213 2214 if (!nfs4_mode_match_open_stateid(opendata->state, fmode)) 2215 return 0; 2216 opendata->o_arg.fmode = fmode; 2217 opendata->o_arg.share_access = 2218 nfs4_map_atomic_open_share(server, fmode, openflags); 2219 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 2220 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 2221 nfs4_init_opendata_res(opendata); 2222 ret = _nfs4_recover_proc_open(opendata); 2223 if (ret != 0) 2224 return ret; 2225 newstate = nfs4_opendata_to_nfs4_state(opendata); 2226 if (IS_ERR(newstate)) 2227 return PTR_ERR(newstate); 2228 if (newstate != opendata->state) 2229 ret = -ESTALE; 2230 nfs4_close_state(newstate, fmode); 2231 return ret; 2232 } 2233 2234 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 2235 { 2236 int ret; 2237 2238 /* memory barrier prior to reading state->n_* */ 2239 smp_rmb(); 2240 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 2241 if (ret != 0) 2242 return ret; 2243 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE); 2244 if (ret != 0) 2245 return ret; 2246 ret = nfs4_open_recover_helper(opendata, FMODE_READ); 2247 if (ret != 0) 2248 return ret; 2249 /* 2250 * We may have performed cached opens for all three recoveries. 2251 * Check if we need to update the current stateid. 2252 */ 2253 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 2254 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 2255 write_seqlock(&state->seqlock); 2256 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 2257 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 2258 write_sequnlock(&state->seqlock); 2259 } 2260 return 0; 2261 } 2262 2263 /* 2264 * OPEN_RECLAIM: 2265 * reclaim state on the server after a reboot. 2266 */ 2267 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 2268 { 2269 struct nfs_delegation *delegation; 2270 struct nfs4_opendata *opendata; 2271 u32 delegation_type = NFS4_OPEN_DELEGATE_NONE; 2272 int status; 2273 2274 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2275 NFS4_OPEN_CLAIM_PREVIOUS); 2276 if (IS_ERR(opendata)) 2277 return PTR_ERR(opendata); 2278 rcu_read_lock(); 2279 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2280 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) { 2281 switch(delegation->type) { 2282 case FMODE_READ: 2283 delegation_type = NFS4_OPEN_DELEGATE_READ; 2284 if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) 2285 delegation_type = NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG; 2286 break; 2287 case FMODE_WRITE: 2288 case FMODE_READ|FMODE_WRITE: 2289 delegation_type = NFS4_OPEN_DELEGATE_WRITE; 2290 if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) 2291 delegation_type = NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG; 2292 } 2293 } 2294 rcu_read_unlock(); 2295 opendata->o_arg.u.delegation_type = delegation_type; 2296 status = nfs4_open_recover(opendata, state); 2297 nfs4_opendata_put(opendata); 2298 return status; 2299 } 2300 2301 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 2302 { 2303 struct nfs_server *server = NFS_SERVER(state->inode); 2304 struct nfs4_exception exception = { }; 2305 int err; 2306 do { 2307 err = _nfs4_do_open_reclaim(ctx, state); 2308 trace_nfs4_open_reclaim(ctx, 0, err); 2309 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2310 continue; 2311 if (err != -NFS4ERR_DELAY) 2312 break; 2313 nfs4_handle_exception(server, err, &exception); 2314 } while (exception.retry); 2315 return err; 2316 } 2317 2318 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 2319 { 2320 struct nfs_open_context *ctx; 2321 int ret; 2322 2323 ctx = nfs4_state_find_open_context(state); 2324 if (IS_ERR(ctx)) 2325 return -EAGAIN; 2326 clear_bit(NFS_DELEGATED_STATE, &state->flags); 2327 nfs_state_clear_open_state_flags(state); 2328 ret = nfs4_do_open_reclaim(ctx, state); 2329 put_nfs_open_context(ctx); 2330 return ret; 2331 } 2332 2333 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err) 2334 { 2335 switch (err) { 2336 default: 2337 printk(KERN_ERR "NFS: %s: unhandled error " 2338 "%d.\n", __func__, err); 2339 fallthrough; 2340 case 0: 2341 case -ENOENT: 2342 case -EAGAIN: 2343 case -ESTALE: 2344 case -ETIMEDOUT: 2345 break; 2346 case -NFS4ERR_BADSESSION: 2347 case -NFS4ERR_BADSLOT: 2348 case -NFS4ERR_BAD_HIGH_SLOT: 2349 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 2350 case -NFS4ERR_DEADSESSION: 2351 return -EAGAIN; 2352 case -NFS4ERR_STALE_CLIENTID: 2353 case -NFS4ERR_STALE_STATEID: 2354 /* Don't recall a delegation if it was lost */ 2355 nfs4_schedule_lease_recovery(server->nfs_client); 2356 return -EAGAIN; 2357 case -NFS4ERR_MOVED: 2358 nfs4_schedule_migration_recovery(server); 2359 return -EAGAIN; 2360 case -NFS4ERR_LEASE_MOVED: 2361 nfs4_schedule_lease_moved_recovery(server->nfs_client); 2362 return -EAGAIN; 2363 case -NFS4ERR_DELEG_REVOKED: 2364 case -NFS4ERR_ADMIN_REVOKED: 2365 case -NFS4ERR_EXPIRED: 2366 case -NFS4ERR_BAD_STATEID: 2367 case -NFS4ERR_OPENMODE: 2368 nfs_inode_find_state_and_recover(state->inode, 2369 stateid); 2370 nfs4_schedule_stateid_recovery(server, state); 2371 return -EAGAIN; 2372 case -NFS4ERR_DELAY: 2373 case -NFS4ERR_GRACE: 2374 ssleep(1); 2375 return -EAGAIN; 2376 case -ENOMEM: 2377 case -NFS4ERR_DENIED: 2378 if (fl) { 2379 struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner; 2380 if (lsp) 2381 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 2382 } 2383 return 0; 2384 } 2385 return err; 2386 } 2387 2388 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, 2389 struct nfs4_state *state, const nfs4_stateid *stateid) 2390 { 2391 struct nfs_server *server = NFS_SERVER(state->inode); 2392 struct nfs4_opendata *opendata; 2393 int err = 0; 2394 2395 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2396 NFS4_OPEN_CLAIM_DELEG_CUR_FH); 2397 if (IS_ERR(opendata)) 2398 return PTR_ERR(opendata); 2399 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 2400 if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) { 2401 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 2402 if (err) 2403 goto out; 2404 } 2405 if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) { 2406 err = nfs4_open_recover_helper(opendata, FMODE_WRITE); 2407 if (err) 2408 goto out; 2409 } 2410 if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) { 2411 err = nfs4_open_recover_helper(opendata, FMODE_READ); 2412 if (err) 2413 goto out; 2414 } 2415 nfs_state_clear_delegation(state); 2416 out: 2417 nfs4_opendata_put(opendata); 2418 return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err); 2419 } 2420 2421 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) 2422 { 2423 struct nfs4_opendata *data = calldata; 2424 2425 nfs4_setup_sequence(data->o_arg.server->nfs_client, 2426 &data->c_arg.seq_args, &data->c_res.seq_res, task); 2427 } 2428 2429 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 2430 { 2431 struct nfs4_opendata *data = calldata; 2432 2433 nfs40_sequence_done(task, &data->c_res.seq_res); 2434 2435 data->rpc_status = task->tk_status; 2436 if (data->rpc_status == 0) { 2437 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 2438 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2439 renew_lease(data->o_res.server, data->timestamp); 2440 data->rpc_done = true; 2441 } 2442 } 2443 2444 static void nfs4_open_confirm_release(void *calldata) 2445 { 2446 struct nfs4_opendata *data = calldata; 2447 struct nfs4_state *state = NULL; 2448 2449 /* If this request hasn't been cancelled, do nothing */ 2450 if (!data->cancelled) 2451 goto out_free; 2452 /* In case of error, no cleanup! */ 2453 if (!data->rpc_done) 2454 goto out_free; 2455 state = nfs4_opendata_to_nfs4_state(data); 2456 if (!IS_ERR(state)) 2457 nfs4_close_state(state, data->o_arg.fmode); 2458 out_free: 2459 nfs4_opendata_put(data); 2460 } 2461 2462 static const struct rpc_call_ops nfs4_open_confirm_ops = { 2463 .rpc_call_prepare = nfs4_open_confirm_prepare, 2464 .rpc_call_done = nfs4_open_confirm_done, 2465 .rpc_release = nfs4_open_confirm_release, 2466 }; 2467 2468 /* 2469 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 2470 */ 2471 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 2472 { 2473 struct nfs_server *server = NFS_SERVER(d_inode(data->dir)); 2474 struct rpc_task *task; 2475 struct rpc_message msg = { 2476 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 2477 .rpc_argp = &data->c_arg, 2478 .rpc_resp = &data->c_res, 2479 .rpc_cred = data->owner->so_cred, 2480 }; 2481 struct rpc_task_setup task_setup_data = { 2482 .rpc_client = server->client, 2483 .rpc_message = &msg, 2484 .callback_ops = &nfs4_open_confirm_ops, 2485 .callback_data = data, 2486 .workqueue = nfsiod_workqueue, 2487 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 2488 }; 2489 int status; 2490 2491 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1, 2492 data->is_recover); 2493 kref_get(&data->kref); 2494 data->rpc_done = false; 2495 data->rpc_status = 0; 2496 data->timestamp = jiffies; 2497 task = rpc_run_task(&task_setup_data); 2498 if (IS_ERR(task)) 2499 return PTR_ERR(task); 2500 status = rpc_wait_for_completion_task(task); 2501 if (status != 0) { 2502 data->cancelled = true; 2503 smp_wmb(); 2504 } else 2505 status = data->rpc_status; 2506 rpc_put_task(task); 2507 return status; 2508 } 2509 2510 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 2511 { 2512 struct nfs4_opendata *data = calldata; 2513 struct nfs4_state_owner *sp = data->owner; 2514 struct nfs_client *clp = sp->so_server->nfs_client; 2515 enum open_claim_type4 claim = data->o_arg.claim; 2516 2517 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 2518 goto out_wait; 2519 /* 2520 * Check if we still need to send an OPEN call, or if we can use 2521 * a delegation instead. 2522 */ 2523 if (data->state != NULL) { 2524 struct nfs_delegation *delegation; 2525 2526 if (can_open_cached(data->state, data->o_arg.fmode, 2527 data->o_arg.open_flags, claim)) 2528 goto out_no_action; 2529 rcu_read_lock(); 2530 delegation = nfs4_get_valid_delegation(data->state->inode); 2531 if (can_open_delegated(delegation, data->o_arg.fmode, claim)) 2532 goto unlock_no_action; 2533 rcu_read_unlock(); 2534 } 2535 /* Update client id. */ 2536 data->o_arg.clientid = clp->cl_clientid; 2537 switch (claim) { 2538 default: 2539 break; 2540 case NFS4_OPEN_CLAIM_PREVIOUS: 2541 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 2542 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 2543 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 2544 fallthrough; 2545 case NFS4_OPEN_CLAIM_FH: 2546 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 2547 } 2548 data->timestamp = jiffies; 2549 if (nfs4_setup_sequence(data->o_arg.server->nfs_client, 2550 &data->o_arg.seq_args, 2551 &data->o_res.seq_res, 2552 task) != 0) 2553 nfs_release_seqid(data->o_arg.seqid); 2554 2555 /* Set the create mode (note dependency on the session type) */ 2556 data->o_arg.createmode = NFS4_CREATE_UNCHECKED; 2557 if (data->o_arg.open_flags & O_EXCL) { 2558 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1; 2559 if (clp->cl_mvops->minor_version == 0) { 2560 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE; 2561 /* don't put an ACCESS op in OPEN compound if O_EXCL, 2562 * because ACCESS will return permission denied for 2563 * all bits until close */ 2564 data->o_res.access_request = data->o_arg.access = 0; 2565 } else if (nfs4_has_persistent_session(clp)) 2566 data->o_arg.createmode = NFS4_CREATE_GUARDED; 2567 } 2568 return; 2569 unlock_no_action: 2570 trace_nfs4_cached_open(data->state); 2571 rcu_read_unlock(); 2572 out_no_action: 2573 task->tk_action = NULL; 2574 out_wait: 2575 nfs4_sequence_done(task, &data->o_res.seq_res); 2576 } 2577 2578 static void nfs4_open_done(struct rpc_task *task, void *calldata) 2579 { 2580 struct nfs4_opendata *data = calldata; 2581 2582 data->rpc_status = task->tk_status; 2583 2584 if (!nfs4_sequence_process(task, &data->o_res.seq_res)) 2585 return; 2586 2587 if (task->tk_status == 0) { 2588 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) { 2589 switch (data->o_res.f_attr->mode & S_IFMT) { 2590 case S_IFREG: 2591 break; 2592 case S_IFLNK: 2593 data->rpc_status = -ELOOP; 2594 break; 2595 case S_IFDIR: 2596 data->rpc_status = -EISDIR; 2597 break; 2598 default: 2599 data->rpc_status = -ENOTDIR; 2600 } 2601 } 2602 renew_lease(data->o_res.server, data->timestamp); 2603 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 2604 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2605 } 2606 data->rpc_done = true; 2607 } 2608 2609 static void nfs4_open_release(void *calldata) 2610 { 2611 struct nfs4_opendata *data = calldata; 2612 struct nfs4_state *state = NULL; 2613 2614 /* In case of error, no cleanup! */ 2615 if (data->rpc_status != 0 || !data->rpc_done) { 2616 nfs_release_seqid(data->o_arg.seqid); 2617 goto out_free; 2618 } 2619 /* If this request hasn't been cancelled, do nothing */ 2620 if (!data->cancelled) 2621 goto out_free; 2622 /* In case we need an open_confirm, no cleanup! */ 2623 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 2624 goto out_free; 2625 state = nfs4_opendata_to_nfs4_state(data); 2626 if (!IS_ERR(state)) 2627 nfs4_close_state(state, data->o_arg.fmode); 2628 out_free: 2629 nfs4_opendata_put(data); 2630 } 2631 2632 static const struct rpc_call_ops nfs4_open_ops = { 2633 .rpc_call_prepare = nfs4_open_prepare, 2634 .rpc_call_done = nfs4_open_done, 2635 .rpc_release = nfs4_open_release, 2636 }; 2637 2638 static int nfs4_run_open_task(struct nfs4_opendata *data, 2639 struct nfs_open_context *ctx) 2640 { 2641 struct inode *dir = d_inode(data->dir); 2642 struct nfs_server *server = NFS_SERVER(dir); 2643 struct nfs_openargs *o_arg = &data->o_arg; 2644 struct nfs_openres *o_res = &data->o_res; 2645 struct rpc_task *task; 2646 struct rpc_message msg = { 2647 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 2648 .rpc_argp = o_arg, 2649 .rpc_resp = o_res, 2650 .rpc_cred = data->owner->so_cred, 2651 }; 2652 struct rpc_task_setup task_setup_data = { 2653 .rpc_client = server->client, 2654 .rpc_message = &msg, 2655 .callback_ops = &nfs4_open_ops, 2656 .callback_data = data, 2657 .workqueue = nfsiod_workqueue, 2658 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 2659 }; 2660 int status; 2661 2662 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE)) 2663 task_setup_data.flags |= RPC_TASK_MOVEABLE; 2664 2665 kref_get(&data->kref); 2666 data->rpc_done = false; 2667 data->rpc_status = 0; 2668 data->cancelled = false; 2669 data->is_recover = false; 2670 if (!ctx) { 2671 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1); 2672 data->is_recover = true; 2673 task_setup_data.flags |= RPC_TASK_TIMEOUT; 2674 } else { 2675 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0); 2676 pnfs_lgopen_prepare(data, ctx); 2677 } 2678 task = rpc_run_task(&task_setup_data); 2679 if (IS_ERR(task)) 2680 return PTR_ERR(task); 2681 status = rpc_wait_for_completion_task(task); 2682 if (status != 0) { 2683 data->cancelled = true; 2684 smp_wmb(); 2685 } else 2686 status = data->rpc_status; 2687 rpc_put_task(task); 2688 2689 return status; 2690 } 2691 2692 static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 2693 { 2694 struct inode *dir = d_inode(data->dir); 2695 struct nfs_openres *o_res = &data->o_res; 2696 int status; 2697 2698 status = nfs4_run_open_task(data, NULL); 2699 if (status != 0 || !data->rpc_done) 2700 return status; 2701 2702 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 2703 2704 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) 2705 status = _nfs4_proc_open_confirm(data); 2706 2707 return status; 2708 } 2709 2710 /* 2711 * Additional permission checks in order to distinguish between an 2712 * open for read, and an open for execute. This works around the 2713 * fact that NFSv4 OPEN treats read and execute permissions as being 2714 * the same. 2715 * Note that in the non-execute case, we want to turn off permission 2716 * checking if we just created a new file (POSIX open() semantics). 2717 */ 2718 static int nfs4_opendata_access(const struct cred *cred, 2719 struct nfs4_opendata *opendata, 2720 struct nfs4_state *state, fmode_t fmode) 2721 { 2722 struct nfs_access_entry cache; 2723 u32 mask, flags; 2724 2725 /* access call failed or for some reason the server doesn't 2726 * support any access modes -- defer access call until later */ 2727 if (opendata->o_res.access_supported == 0) 2728 return 0; 2729 2730 mask = 0; 2731 if (fmode & FMODE_EXEC) { 2732 /* ONLY check for exec rights */ 2733 if (S_ISDIR(state->inode->i_mode)) 2734 mask = NFS4_ACCESS_LOOKUP; 2735 else 2736 mask = NFS4_ACCESS_EXECUTE; 2737 } else if ((fmode & FMODE_READ) && !opendata->file_created) 2738 mask = NFS4_ACCESS_READ; 2739 2740 nfs_access_set_mask(&cache, opendata->o_res.access_result); 2741 nfs_access_add_cache(state->inode, &cache, cred); 2742 2743 flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP; 2744 if ((mask & ~cache.mask & flags) == 0) 2745 return 0; 2746 2747 return -EACCES; 2748 } 2749 2750 /* 2751 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 2752 */ 2753 static int _nfs4_proc_open(struct nfs4_opendata *data, 2754 struct nfs_open_context *ctx) 2755 { 2756 struct inode *dir = d_inode(data->dir); 2757 struct nfs_server *server = NFS_SERVER(dir); 2758 struct nfs_openargs *o_arg = &data->o_arg; 2759 struct nfs_openres *o_res = &data->o_res; 2760 int status; 2761 2762 status = nfs4_run_open_task(data, ctx); 2763 if (!data->rpc_done) 2764 return status; 2765 if (status != 0) { 2766 if (status == -NFS4ERR_BADNAME && 2767 !(o_arg->open_flags & O_CREAT)) 2768 return -ENOENT; 2769 return status; 2770 } 2771 2772 nfs_fattr_map_and_free_names(server, &data->f_attr); 2773 2774 if (o_arg->open_flags & O_CREAT) { 2775 if (o_arg->open_flags & O_EXCL) 2776 data->file_created = true; 2777 else if (o_res->cinfo.before != o_res->cinfo.after) 2778 data->file_created = true; 2779 if (data->file_created || 2780 inode_peek_iversion_raw(dir) != o_res->cinfo.after) 2781 nfs4_update_changeattr(dir, &o_res->cinfo, 2782 o_res->f_attr->time_start, 2783 NFS_INO_INVALID_DATA); 2784 } 2785 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 2786 server->caps &= ~NFS_CAP_POSIX_LOCK; 2787 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2788 status = _nfs4_proc_open_confirm(data); 2789 if (status != 0) 2790 return status; 2791 } 2792 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) { 2793 struct nfs_fh *fh = &o_res->fh; 2794 2795 nfs4_sequence_free_slot(&o_res->seq_res); 2796 if (o_arg->claim == NFS4_OPEN_CLAIM_FH) 2797 fh = NFS_FH(d_inode(data->dentry)); 2798 nfs4_proc_getattr(server, fh, o_res->f_attr, NULL); 2799 } 2800 return 0; 2801 } 2802 2803 /* 2804 * OPEN_EXPIRED: 2805 * reclaim state on the server after a network partition. 2806 * Assumes caller holds the appropriate lock 2807 */ 2808 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2809 { 2810 struct nfs4_opendata *opendata; 2811 int ret; 2812 2813 opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_FH); 2814 if (IS_ERR(opendata)) 2815 return PTR_ERR(opendata); 2816 /* 2817 * We're not recovering a delegation, so ask for no delegation. 2818 * Otherwise the recovery thread could deadlock with an outstanding 2819 * delegation return. 2820 */ 2821 opendata->o_arg.open_flags = O_DIRECT; 2822 ret = nfs4_open_recover(opendata, state); 2823 if (ret == -ESTALE) 2824 d_drop(ctx->dentry); 2825 nfs4_opendata_put(opendata); 2826 return ret; 2827 } 2828 2829 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2830 { 2831 struct nfs_server *server = NFS_SERVER(state->inode); 2832 struct nfs4_exception exception = { }; 2833 int err; 2834 2835 do { 2836 err = _nfs4_open_expired(ctx, state); 2837 trace_nfs4_open_expired(ctx, 0, err); 2838 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2839 continue; 2840 switch (err) { 2841 default: 2842 goto out; 2843 case -NFS4ERR_GRACE: 2844 case -NFS4ERR_DELAY: 2845 nfs4_handle_exception(server, err, &exception); 2846 err = 0; 2847 } 2848 } while (exception.retry); 2849 out: 2850 return err; 2851 } 2852 2853 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2854 { 2855 struct nfs_open_context *ctx; 2856 int ret; 2857 2858 ctx = nfs4_state_find_open_context(state); 2859 if (IS_ERR(ctx)) 2860 return -EAGAIN; 2861 ret = nfs4_do_open_expired(ctx, state); 2862 put_nfs_open_context(ctx); 2863 return ret; 2864 } 2865 2866 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state, 2867 const nfs4_stateid *stateid) 2868 { 2869 nfs_remove_bad_delegation(state->inode, stateid); 2870 nfs_state_clear_delegation(state); 2871 } 2872 2873 static void nfs40_clear_delegation_stateid(struct nfs4_state *state) 2874 { 2875 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL) 2876 nfs_finish_clear_delegation_stateid(state, NULL); 2877 } 2878 2879 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2880 { 2881 /* NFSv4.0 doesn't allow for delegation recovery on open expire */ 2882 nfs40_clear_delegation_stateid(state); 2883 nfs_state_clear_open_state_flags(state); 2884 return nfs4_open_expired(sp, state); 2885 } 2886 2887 static int nfs40_test_and_free_expired_stateid(struct nfs_server *server, 2888 const nfs4_stateid *stateid, 2889 const struct cred *cred) 2890 { 2891 return -NFS4ERR_BAD_STATEID; 2892 } 2893 2894 #if defined(CONFIG_NFS_V4_1) 2895 static int nfs41_test_and_free_expired_stateid(struct nfs_server *server, 2896 const nfs4_stateid *stateid, 2897 const struct cred *cred) 2898 { 2899 int status; 2900 2901 switch (stateid->type) { 2902 default: 2903 break; 2904 case NFS4_INVALID_STATEID_TYPE: 2905 case NFS4_SPECIAL_STATEID_TYPE: 2906 return -NFS4ERR_BAD_STATEID; 2907 case NFS4_REVOKED_STATEID_TYPE: 2908 goto out_free; 2909 } 2910 2911 status = nfs41_test_stateid(server, stateid, cred); 2912 switch (status) { 2913 case -NFS4ERR_EXPIRED: 2914 case -NFS4ERR_ADMIN_REVOKED: 2915 case -NFS4ERR_DELEG_REVOKED: 2916 break; 2917 default: 2918 return status; 2919 } 2920 out_free: 2921 /* Ack the revoked state to the server */ 2922 nfs41_free_stateid(server, stateid, cred, true); 2923 return -NFS4ERR_EXPIRED; 2924 } 2925 2926 static int nfs41_check_delegation_stateid(struct nfs4_state *state) 2927 { 2928 struct nfs_server *server = NFS_SERVER(state->inode); 2929 nfs4_stateid stateid; 2930 struct nfs_delegation *delegation; 2931 const struct cred *cred = NULL; 2932 int status, ret = NFS_OK; 2933 2934 /* Get the delegation credential for use by test/free_stateid */ 2935 rcu_read_lock(); 2936 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2937 if (delegation == NULL) { 2938 rcu_read_unlock(); 2939 nfs_state_clear_delegation(state); 2940 return NFS_OK; 2941 } 2942 2943 spin_lock(&delegation->lock); 2944 nfs4_stateid_copy(&stateid, &delegation->stateid); 2945 2946 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, 2947 &delegation->flags)) { 2948 spin_unlock(&delegation->lock); 2949 rcu_read_unlock(); 2950 return NFS_OK; 2951 } 2952 2953 if (delegation->cred) 2954 cred = get_cred(delegation->cred); 2955 spin_unlock(&delegation->lock); 2956 rcu_read_unlock(); 2957 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred); 2958 trace_nfs4_test_delegation_stateid(state, NULL, status); 2959 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) 2960 nfs_finish_clear_delegation_stateid(state, &stateid); 2961 else 2962 ret = status; 2963 2964 put_cred(cred); 2965 return ret; 2966 } 2967 2968 static void nfs41_delegation_recover_stateid(struct nfs4_state *state) 2969 { 2970 nfs4_stateid tmp; 2971 2972 if (test_bit(NFS_DELEGATED_STATE, &state->flags) && 2973 nfs4_copy_delegation_stateid(state->inode, state->state, 2974 &tmp, NULL) && 2975 nfs4_stateid_match_other(&state->stateid, &tmp)) 2976 nfs_state_set_delegation(state, &tmp, state->state); 2977 else 2978 nfs_state_clear_delegation(state); 2979 } 2980 2981 /** 2982 * nfs41_check_expired_locks - possibly free a lock stateid 2983 * 2984 * @state: NFSv4 state for an inode 2985 * 2986 * Returns NFS_OK if recovery for this stateid is now finished. 2987 * Otherwise a negative NFS4ERR value is returned. 2988 */ 2989 static int nfs41_check_expired_locks(struct nfs4_state *state) 2990 { 2991 int status, ret = NFS_OK; 2992 struct nfs4_lock_state *lsp, *prev = NULL; 2993 struct nfs_server *server = NFS_SERVER(state->inode); 2994 2995 if (!test_bit(LK_STATE_IN_USE, &state->flags)) 2996 goto out; 2997 2998 spin_lock(&state->state_lock); 2999 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 3000 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 3001 const struct cred *cred = lsp->ls_state->owner->so_cred; 3002 3003 refcount_inc(&lsp->ls_count); 3004 spin_unlock(&state->state_lock); 3005 3006 nfs4_put_lock_state(prev); 3007 prev = lsp; 3008 3009 status = nfs41_test_and_free_expired_stateid(server, 3010 &lsp->ls_stateid, 3011 cred); 3012 trace_nfs4_test_lock_stateid(state, lsp, status); 3013 if (status == -NFS4ERR_EXPIRED || 3014 status == -NFS4ERR_BAD_STATEID) { 3015 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 3016 lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE; 3017 if (!recover_lost_locks) 3018 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 3019 } else if (status != NFS_OK) { 3020 ret = status; 3021 nfs4_put_lock_state(prev); 3022 goto out; 3023 } 3024 spin_lock(&state->state_lock); 3025 } 3026 } 3027 spin_unlock(&state->state_lock); 3028 nfs4_put_lock_state(prev); 3029 out: 3030 return ret; 3031 } 3032 3033 /** 3034 * nfs41_check_open_stateid - possibly free an open stateid 3035 * 3036 * @state: NFSv4 state for an inode 3037 * 3038 * Returns NFS_OK if recovery for this stateid is now finished. 3039 * Otherwise a negative NFS4ERR value is returned. 3040 */ 3041 static int nfs41_check_open_stateid(struct nfs4_state *state) 3042 { 3043 struct nfs_server *server = NFS_SERVER(state->inode); 3044 nfs4_stateid *stateid = &state->open_stateid; 3045 const struct cred *cred = state->owner->so_cred; 3046 int status; 3047 3048 if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) 3049 return -NFS4ERR_BAD_STATEID; 3050 status = nfs41_test_and_free_expired_stateid(server, stateid, cred); 3051 trace_nfs4_test_open_stateid(state, NULL, status); 3052 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) { 3053 nfs_state_clear_open_state_flags(state); 3054 stateid->type = NFS4_INVALID_STATEID_TYPE; 3055 return status; 3056 } 3057 if (nfs_open_stateid_recover_openmode(state)) 3058 return -NFS4ERR_OPENMODE; 3059 return NFS_OK; 3060 } 3061 3062 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 3063 { 3064 int status; 3065 3066 status = nfs41_check_delegation_stateid(state); 3067 if (status != NFS_OK) 3068 return status; 3069 nfs41_delegation_recover_stateid(state); 3070 3071 status = nfs41_check_expired_locks(state); 3072 if (status != NFS_OK) 3073 return status; 3074 status = nfs41_check_open_stateid(state); 3075 if (status != NFS_OK) 3076 status = nfs4_open_expired(sp, state); 3077 return status; 3078 } 3079 #endif 3080 3081 /* 3082 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 3083 * fields corresponding to attributes that were used to store the verifier. 3084 * Make sure we clobber those fields in the later setattr call 3085 */ 3086 static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata, 3087 struct iattr *sattr, struct nfs4_label **label) 3088 { 3089 const __u32 *bitmask = opendata->o_arg.server->exclcreat_bitmask; 3090 __u32 attrset[3]; 3091 unsigned ret; 3092 unsigned i; 3093 3094 for (i = 0; i < ARRAY_SIZE(attrset); i++) { 3095 attrset[i] = opendata->o_res.attrset[i]; 3096 if (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE4_1) 3097 attrset[i] &= ~bitmask[i]; 3098 } 3099 3100 ret = (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE) ? 3101 sattr->ia_valid : 0; 3102 3103 if ((attrset[1] & (FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET))) { 3104 if (sattr->ia_valid & ATTR_ATIME_SET) 3105 ret |= ATTR_ATIME_SET; 3106 else 3107 ret |= ATTR_ATIME; 3108 } 3109 3110 if ((attrset[1] & (FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET))) { 3111 if (sattr->ia_valid & ATTR_MTIME_SET) 3112 ret |= ATTR_MTIME_SET; 3113 else 3114 ret |= ATTR_MTIME; 3115 } 3116 3117 if (!(attrset[2] & FATTR4_WORD2_SECURITY_LABEL)) 3118 *label = NULL; 3119 return ret; 3120 } 3121 3122 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, 3123 struct nfs_open_context *ctx) 3124 { 3125 struct nfs4_state_owner *sp = opendata->owner; 3126 struct nfs_server *server = sp->so_server; 3127 struct dentry *dentry; 3128 struct nfs4_state *state; 3129 fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx); 3130 struct inode *dir = d_inode(opendata->dir); 3131 unsigned long dir_verifier; 3132 int ret; 3133 3134 dir_verifier = nfs_save_change_attribute(dir); 3135 3136 ret = _nfs4_proc_open(opendata, ctx); 3137 if (ret != 0) 3138 goto out; 3139 3140 state = _nfs4_opendata_to_nfs4_state(opendata); 3141 ret = PTR_ERR(state); 3142 if (IS_ERR(state)) 3143 goto out; 3144 ctx->state = state; 3145 if (server->caps & NFS_CAP_POSIX_LOCK) 3146 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 3147 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK) 3148 set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags); 3149 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_PRESERVE_UNLINKED) 3150 set_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(state->inode)->flags); 3151 3152 dentry = opendata->dentry; 3153 if (d_really_is_negative(dentry)) { 3154 struct dentry *alias; 3155 d_drop(dentry); 3156 alias = d_exact_alias(dentry, state->inode); 3157 if (!alias) 3158 alias = d_splice_alias(igrab(state->inode), dentry); 3159 /* d_splice_alias() can't fail here - it's a non-directory */ 3160 if (alias) { 3161 dput(ctx->dentry); 3162 ctx->dentry = dentry = alias; 3163 } 3164 } 3165 3166 switch(opendata->o_arg.claim) { 3167 default: 3168 break; 3169 case NFS4_OPEN_CLAIM_NULL: 3170 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 3171 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 3172 if (!opendata->rpc_done) 3173 break; 3174 if (opendata->o_res.delegation.type != 0) 3175 dir_verifier = nfs_save_change_attribute(dir); 3176 nfs_set_verifier(dentry, dir_verifier); 3177 } 3178 3179 /* Parse layoutget results before we check for access */ 3180 pnfs_parse_lgopen(state->inode, opendata->lgp, ctx); 3181 3182 ret = nfs4_opendata_access(sp->so_cred, opendata, state, acc_mode); 3183 if (ret != 0) 3184 goto out; 3185 3186 if (d_inode(dentry) == state->inode) 3187 nfs_inode_attach_open_context(ctx); 3188 3189 out: 3190 if (!opendata->cancelled) { 3191 if (opendata->lgp) { 3192 nfs4_lgopen_release(opendata->lgp); 3193 opendata->lgp = NULL; 3194 } 3195 nfs4_sequence_free_slot(&opendata->o_res.seq_res); 3196 } 3197 return ret; 3198 } 3199 3200 /* 3201 * Returns a referenced nfs4_state 3202 */ 3203 static int _nfs4_do_open(struct inode *dir, 3204 struct nfs_open_context *ctx, 3205 int flags, 3206 const struct nfs4_open_createattrs *c, 3207 int *opened) 3208 { 3209 struct nfs4_state_owner *sp; 3210 struct nfs4_state *state = NULL; 3211 struct nfs_server *server = NFS_SERVER(dir); 3212 struct nfs4_opendata *opendata; 3213 struct dentry *dentry = ctx->dentry; 3214 const struct cred *cred = ctx->cred; 3215 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold; 3216 fmode_t fmode = _nfs4_ctx_to_openmode(ctx); 3217 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL; 3218 struct iattr *sattr = c->sattr; 3219 struct nfs4_label *label = c->label; 3220 int status; 3221 3222 /* Protect against reboot recovery conflicts */ 3223 status = -ENOMEM; 3224 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 3225 if (sp == NULL) { 3226 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 3227 goto out_err; 3228 } 3229 status = nfs4_client_recover_expired_lease(server->nfs_client); 3230 if (status != 0) 3231 goto err_put_state_owner; 3232 if (d_really_is_positive(dentry)) 3233 nfs4_return_incompatible_delegation(d_inode(dentry), fmode); 3234 status = -ENOMEM; 3235 if (d_really_is_positive(dentry)) 3236 claim = NFS4_OPEN_CLAIM_FH; 3237 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, 3238 c, claim, GFP_KERNEL); 3239 if (opendata == NULL) 3240 goto err_put_state_owner; 3241 3242 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 3243 if (!opendata->f_attr.mdsthreshold) { 3244 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 3245 if (!opendata->f_attr.mdsthreshold) 3246 goto err_opendata_put; 3247 } 3248 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 3249 } 3250 if (d_really_is_positive(dentry)) 3251 opendata->state = nfs4_get_open_state(d_inode(dentry), sp); 3252 3253 status = _nfs4_open_and_get_state(opendata, ctx); 3254 if (status != 0) 3255 goto err_opendata_put; 3256 state = ctx->state; 3257 3258 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) && 3259 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) { 3260 unsigned attrs = nfs4_exclusive_attrset(opendata, sattr, &label); 3261 /* 3262 * send create attributes which was not set by open 3263 * with an extra setattr. 3264 */ 3265 if (attrs || label) { 3266 unsigned ia_old = sattr->ia_valid; 3267 3268 sattr->ia_valid = attrs; 3269 nfs_fattr_init(opendata->o_res.f_attr); 3270 status = nfs4_do_setattr(state->inode, cred, 3271 opendata->o_res.f_attr, sattr, 3272 ctx, label); 3273 if (status == 0) { 3274 nfs_setattr_update_inode(state->inode, sattr, 3275 opendata->o_res.f_attr); 3276 nfs_setsecurity(state->inode, opendata->o_res.f_attr); 3277 } 3278 sattr->ia_valid = ia_old; 3279 } 3280 } 3281 if (opened && opendata->file_created) 3282 *opened = 1; 3283 3284 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) { 3285 *ctx_th = opendata->f_attr.mdsthreshold; 3286 opendata->f_attr.mdsthreshold = NULL; 3287 } 3288 3289 nfs4_opendata_put(opendata); 3290 nfs4_put_state_owner(sp); 3291 return 0; 3292 err_opendata_put: 3293 nfs4_opendata_put(opendata); 3294 err_put_state_owner: 3295 nfs4_put_state_owner(sp); 3296 out_err: 3297 return status; 3298 } 3299 3300 3301 static struct nfs4_state *nfs4_do_open(struct inode *dir, 3302 struct nfs_open_context *ctx, 3303 int flags, 3304 struct iattr *sattr, 3305 struct nfs4_label *label, 3306 int *opened) 3307 { 3308 struct nfs_server *server = NFS_SERVER(dir); 3309 struct nfs4_exception exception = { 3310 .interruptible = true, 3311 }; 3312 struct nfs4_state *res; 3313 struct nfs4_open_createattrs c = { 3314 .label = label, 3315 .sattr = sattr, 3316 .verf = { 3317 [0] = (__u32)jiffies, 3318 [1] = (__u32)current->pid, 3319 }, 3320 }; 3321 int status; 3322 3323 do { 3324 status = _nfs4_do_open(dir, ctx, flags, &c, opened); 3325 res = ctx->state; 3326 trace_nfs4_open_file(ctx, flags, status); 3327 if (status == 0) 3328 break; 3329 /* NOTE: BAD_SEQID means the server and client disagree about the 3330 * book-keeping w.r.t. state-changing operations 3331 * (OPEN/CLOSE/LOCK/LOCKU...) 3332 * It is actually a sign of a bug on the client or on the server. 3333 * 3334 * If we receive a BAD_SEQID error in the particular case of 3335 * doing an OPEN, we assume that nfs_increment_open_seqid() will 3336 * have unhashed the old state_owner for us, and that we can 3337 * therefore safely retry using a new one. We should still warn 3338 * the user though... 3339 */ 3340 if (status == -NFS4ERR_BAD_SEQID) { 3341 pr_warn_ratelimited("NFS: v4 server %s " 3342 " returned a bad sequence-id error!\n", 3343 NFS_SERVER(dir)->nfs_client->cl_hostname); 3344 exception.retry = 1; 3345 continue; 3346 } 3347 /* 3348 * BAD_STATEID on OPEN means that the server cancelled our 3349 * state before it received the OPEN_CONFIRM. 3350 * Recover by retrying the request as per the discussion 3351 * on Page 181 of RFC3530. 3352 */ 3353 if (status == -NFS4ERR_BAD_STATEID) { 3354 exception.retry = 1; 3355 continue; 3356 } 3357 if (status == -NFS4ERR_EXPIRED) { 3358 nfs4_schedule_lease_recovery(server->nfs_client); 3359 exception.retry = 1; 3360 continue; 3361 } 3362 if (status == -EAGAIN) { 3363 /* We must have found a delegation */ 3364 exception.retry = 1; 3365 continue; 3366 } 3367 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception)) 3368 continue; 3369 res = ERR_PTR(nfs4_handle_exception(server, 3370 status, &exception)); 3371 } while (exception.retry); 3372 return res; 3373 } 3374 3375 static int _nfs4_do_setattr(struct inode *inode, 3376 struct nfs_setattrargs *arg, 3377 struct nfs_setattrres *res, 3378 const struct cred *cred, 3379 struct nfs_open_context *ctx) 3380 { 3381 struct nfs_server *server = NFS_SERVER(inode); 3382 struct rpc_message msg = { 3383 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 3384 .rpc_argp = arg, 3385 .rpc_resp = res, 3386 .rpc_cred = cred, 3387 }; 3388 const struct cred *delegation_cred = NULL; 3389 unsigned long timestamp = jiffies; 3390 bool truncate; 3391 int status; 3392 3393 nfs_fattr_init(res->fattr); 3394 3395 /* Servers should only apply open mode checks for file size changes */ 3396 truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false; 3397 if (!truncate) { 3398 nfs4_inode_make_writeable(inode); 3399 goto zero_stateid; 3400 } 3401 3402 if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) { 3403 /* Use that stateid */ 3404 } else if (ctx != NULL && ctx->state) { 3405 struct nfs_lock_context *l_ctx; 3406 if (!nfs4_valid_open_stateid(ctx->state)) 3407 return -EBADF; 3408 l_ctx = nfs_get_lock_context(ctx); 3409 if (IS_ERR(l_ctx)) 3410 return PTR_ERR(l_ctx); 3411 status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx, 3412 &arg->stateid, &delegation_cred); 3413 nfs_put_lock_context(l_ctx); 3414 if (status == -EIO) 3415 return -EBADF; 3416 else if (status == -EAGAIN) 3417 goto zero_stateid; 3418 } else { 3419 zero_stateid: 3420 nfs4_stateid_copy(&arg->stateid, &zero_stateid); 3421 } 3422 if (delegation_cred) 3423 msg.rpc_cred = delegation_cred; 3424 3425 status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1); 3426 3427 put_cred(delegation_cred); 3428 if (status == 0 && ctx != NULL) 3429 renew_lease(server, timestamp); 3430 trace_nfs4_setattr(inode, &arg->stateid, status); 3431 return status; 3432 } 3433 3434 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred, 3435 struct nfs_fattr *fattr, struct iattr *sattr, 3436 struct nfs_open_context *ctx, struct nfs4_label *ilabel) 3437 { 3438 struct nfs_server *server = NFS_SERVER(inode); 3439 __u32 bitmask[NFS4_BITMASK_SZ]; 3440 struct nfs4_state *state = ctx ? ctx->state : NULL; 3441 struct nfs_setattrargs arg = { 3442 .fh = NFS_FH(inode), 3443 .iap = sattr, 3444 .server = server, 3445 .bitmask = bitmask, 3446 .label = ilabel, 3447 }; 3448 struct nfs_setattrres res = { 3449 .fattr = fattr, 3450 .server = server, 3451 }; 3452 struct nfs4_exception exception = { 3453 .state = state, 3454 .inode = inode, 3455 .stateid = &arg.stateid, 3456 }; 3457 unsigned long adjust_flags = NFS_INO_INVALID_CHANGE | 3458 NFS_INO_INVALID_CTIME; 3459 int err; 3460 3461 if (sattr->ia_valid & (ATTR_MODE | ATTR_KILL_SUID | ATTR_KILL_SGID)) 3462 adjust_flags |= NFS_INO_INVALID_MODE; 3463 if (sattr->ia_valid & (ATTR_UID | ATTR_GID)) 3464 adjust_flags |= NFS_INO_INVALID_OTHER; 3465 if (sattr->ia_valid & ATTR_ATIME) 3466 adjust_flags |= NFS_INO_INVALID_ATIME; 3467 if (sattr->ia_valid & ATTR_MTIME) 3468 adjust_flags |= NFS_INO_INVALID_MTIME; 3469 3470 do { 3471 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), 3472 inode, adjust_flags); 3473 3474 err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx); 3475 switch (err) { 3476 case -NFS4ERR_OPENMODE: 3477 if (!(sattr->ia_valid & ATTR_SIZE)) { 3478 pr_warn_once("NFSv4: server %s is incorrectly " 3479 "applying open mode checks to " 3480 "a SETATTR that is not " 3481 "changing file size.\n", 3482 server->nfs_client->cl_hostname); 3483 } 3484 if (state && !(state->state & FMODE_WRITE)) { 3485 err = -EBADF; 3486 if (sattr->ia_valid & ATTR_OPEN) 3487 err = -EACCES; 3488 goto out; 3489 } 3490 } 3491 err = nfs4_handle_exception(server, err, &exception); 3492 } while (exception.retry); 3493 out: 3494 return err; 3495 } 3496 3497 static bool 3498 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task) 3499 { 3500 if (inode == NULL || !nfs_have_layout(inode)) 3501 return false; 3502 3503 return pnfs_wait_on_layoutreturn(inode, task); 3504 } 3505 3506 /* 3507 * Update the seqid of an open stateid 3508 */ 3509 static void nfs4_sync_open_stateid(nfs4_stateid *dst, 3510 struct nfs4_state *state) 3511 { 3512 __be32 seqid_open; 3513 u32 dst_seqid; 3514 int seq; 3515 3516 for (;;) { 3517 if (!nfs4_valid_open_stateid(state)) 3518 break; 3519 seq = read_seqbegin(&state->seqlock); 3520 if (!nfs4_state_match_open_stateid_other(state, dst)) { 3521 nfs4_stateid_copy(dst, &state->open_stateid); 3522 if (read_seqretry(&state->seqlock, seq)) 3523 continue; 3524 break; 3525 } 3526 seqid_open = state->open_stateid.seqid; 3527 if (read_seqretry(&state->seqlock, seq)) 3528 continue; 3529 3530 dst_seqid = be32_to_cpu(dst->seqid); 3531 if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0) 3532 dst->seqid = seqid_open; 3533 break; 3534 } 3535 } 3536 3537 /* 3538 * Update the seqid of an open stateid after receiving 3539 * NFS4ERR_OLD_STATEID 3540 */ 3541 static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst, 3542 struct nfs4_state *state) 3543 { 3544 __be32 seqid_open; 3545 u32 dst_seqid; 3546 bool ret; 3547 int seq, status = -EAGAIN; 3548 DEFINE_WAIT(wait); 3549 3550 for (;;) { 3551 ret = false; 3552 if (!nfs4_valid_open_stateid(state)) 3553 break; 3554 seq = read_seqbegin(&state->seqlock); 3555 if (!nfs4_state_match_open_stateid_other(state, dst)) { 3556 if (read_seqretry(&state->seqlock, seq)) 3557 continue; 3558 break; 3559 } 3560 3561 write_seqlock(&state->seqlock); 3562 seqid_open = state->open_stateid.seqid; 3563 3564 dst_seqid = be32_to_cpu(dst->seqid); 3565 3566 /* Did another OPEN bump the state's seqid? try again: */ 3567 if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) { 3568 dst->seqid = seqid_open; 3569 write_sequnlock(&state->seqlock); 3570 ret = true; 3571 break; 3572 } 3573 3574 /* server says we're behind but we haven't seen the update yet */ 3575 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); 3576 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); 3577 write_sequnlock(&state->seqlock); 3578 trace_nfs4_close_stateid_update_wait(state->inode, dst, 0); 3579 3580 if (fatal_signal_pending(current)) 3581 status = -EINTR; 3582 else 3583 if (schedule_timeout(5*HZ) != 0) 3584 status = 0; 3585 3586 finish_wait(&state->waitq, &wait); 3587 3588 if (!status) 3589 continue; 3590 if (status == -EINTR) 3591 break; 3592 3593 /* we slept the whole 5 seconds, we must have lost a seqid */ 3594 dst->seqid = cpu_to_be32(dst_seqid + 1); 3595 ret = true; 3596 break; 3597 } 3598 3599 return ret; 3600 } 3601 3602 struct nfs4_closedata { 3603 struct inode *inode; 3604 struct nfs4_state *state; 3605 struct nfs_closeargs arg; 3606 struct nfs_closeres res; 3607 struct { 3608 struct nfs4_layoutreturn_args arg; 3609 struct nfs4_layoutreturn_res res; 3610 struct nfs4_xdr_opaque_data ld_private; 3611 u32 roc_barrier; 3612 bool roc; 3613 } lr; 3614 struct nfs_fattr fattr; 3615 unsigned long timestamp; 3616 }; 3617 3618 static void nfs4_free_closedata(void *data) 3619 { 3620 struct nfs4_closedata *calldata = data; 3621 struct nfs4_state_owner *sp = calldata->state->owner; 3622 struct super_block *sb = calldata->state->inode->i_sb; 3623 3624 if (calldata->lr.roc) 3625 pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res, 3626 calldata->res.lr_ret); 3627 nfs4_put_open_state(calldata->state); 3628 nfs_free_seqid(calldata->arg.seqid); 3629 nfs4_put_state_owner(sp); 3630 nfs_sb_deactive(sb); 3631 kfree(calldata); 3632 } 3633 3634 static void nfs4_close_done(struct rpc_task *task, void *data) 3635 { 3636 struct nfs4_closedata *calldata = data; 3637 struct nfs4_state *state = calldata->state; 3638 struct nfs_server *server = NFS_SERVER(calldata->inode); 3639 nfs4_stateid *res_stateid = NULL; 3640 struct nfs4_exception exception = { 3641 .state = state, 3642 .inode = calldata->inode, 3643 .stateid = &calldata->arg.stateid, 3644 }; 3645 3646 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 3647 return; 3648 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status); 3649 3650 /* Handle Layoutreturn errors */ 3651 if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res, 3652 &calldata->res.lr_ret) == -EAGAIN) 3653 goto out_restart; 3654 3655 /* hmm. we are done with the inode, and in the process of freeing 3656 * the state_owner. we keep this around to process errors 3657 */ 3658 switch (task->tk_status) { 3659 case 0: 3660 res_stateid = &calldata->res.stateid; 3661 renew_lease(server, calldata->timestamp); 3662 break; 3663 case -NFS4ERR_ACCESS: 3664 if (calldata->arg.bitmask != NULL) { 3665 calldata->arg.bitmask = NULL; 3666 calldata->res.fattr = NULL; 3667 goto out_restart; 3668 3669 } 3670 break; 3671 case -NFS4ERR_OLD_STATEID: 3672 /* Did we race with OPEN? */ 3673 if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid, 3674 state)) 3675 goto out_restart; 3676 goto out_release; 3677 case -NFS4ERR_ADMIN_REVOKED: 3678 case -NFS4ERR_STALE_STATEID: 3679 case -NFS4ERR_EXPIRED: 3680 nfs4_free_revoked_stateid(server, 3681 &calldata->arg.stateid, 3682 task->tk_msg.rpc_cred); 3683 fallthrough; 3684 case -NFS4ERR_BAD_STATEID: 3685 if (calldata->arg.fmode == 0) 3686 break; 3687 fallthrough; 3688 default: 3689 task->tk_status = nfs4_async_handle_exception(task, 3690 server, task->tk_status, &exception); 3691 if (exception.retry) 3692 goto out_restart; 3693 } 3694 nfs_clear_open_stateid(state, &calldata->arg.stateid, 3695 res_stateid, calldata->arg.fmode); 3696 out_release: 3697 task->tk_status = 0; 3698 nfs_release_seqid(calldata->arg.seqid); 3699 nfs_refresh_inode(calldata->inode, &calldata->fattr); 3700 dprintk("%s: ret = %d\n", __func__, task->tk_status); 3701 return; 3702 out_restart: 3703 task->tk_status = 0; 3704 rpc_restart_call_prepare(task); 3705 goto out_release; 3706 } 3707 3708 static void nfs4_close_prepare(struct rpc_task *task, void *data) 3709 { 3710 struct nfs4_closedata *calldata = data; 3711 struct nfs4_state *state = calldata->state; 3712 struct inode *inode = calldata->inode; 3713 struct nfs_server *server = NFS_SERVER(inode); 3714 struct pnfs_layout_hdr *lo; 3715 bool is_rdonly, is_wronly, is_rdwr; 3716 int call_close = 0; 3717 3718 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 3719 goto out_wait; 3720 3721 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 3722 spin_lock(&state->owner->so_lock); 3723 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); 3724 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); 3725 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); 3726 /* Calculate the change in open mode */ 3727 calldata->arg.fmode = 0; 3728 if (state->n_rdwr == 0) { 3729 if (state->n_rdonly == 0) 3730 call_close |= is_rdonly; 3731 else if (is_rdonly) 3732 calldata->arg.fmode |= FMODE_READ; 3733 if (state->n_wronly == 0) 3734 call_close |= is_wronly; 3735 else if (is_wronly) 3736 calldata->arg.fmode |= FMODE_WRITE; 3737 if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE)) 3738 call_close |= is_rdwr; 3739 } else if (is_rdwr) 3740 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; 3741 3742 nfs4_sync_open_stateid(&calldata->arg.stateid, state); 3743 if (!nfs4_valid_open_stateid(state)) 3744 call_close = 0; 3745 spin_unlock(&state->owner->so_lock); 3746 3747 if (!call_close) { 3748 /* Note: exit _without_ calling nfs4_close_done */ 3749 goto out_no_action; 3750 } 3751 3752 if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) { 3753 nfs_release_seqid(calldata->arg.seqid); 3754 goto out_wait; 3755 } 3756 3757 lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL; 3758 if (lo && !pnfs_layout_is_valid(lo)) { 3759 calldata->arg.lr_args = NULL; 3760 calldata->res.lr_res = NULL; 3761 } 3762 3763 if (calldata->arg.fmode == 0) 3764 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 3765 3766 if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) { 3767 /* Close-to-open cache consistency revalidation */ 3768 if (!nfs4_have_delegation(inode, FMODE_READ, 0)) { 3769 nfs4_bitmask_set(calldata->arg.bitmask_store, 3770 server->cache_consistency_bitmask, 3771 inode, 0); 3772 calldata->arg.bitmask = calldata->arg.bitmask_store; 3773 } else 3774 calldata->arg.bitmask = NULL; 3775 } 3776 3777 calldata->arg.share_access = 3778 nfs4_fmode_to_share_access(calldata->arg.fmode); 3779 3780 if (calldata->res.fattr == NULL) 3781 calldata->arg.bitmask = NULL; 3782 else if (calldata->arg.bitmask == NULL) 3783 calldata->res.fattr = NULL; 3784 calldata->timestamp = jiffies; 3785 if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client, 3786 &calldata->arg.seq_args, 3787 &calldata->res.seq_res, 3788 task) != 0) 3789 nfs_release_seqid(calldata->arg.seqid); 3790 return; 3791 out_no_action: 3792 task->tk_action = NULL; 3793 out_wait: 3794 nfs4_sequence_done(task, &calldata->res.seq_res); 3795 } 3796 3797 static const struct rpc_call_ops nfs4_close_ops = { 3798 .rpc_call_prepare = nfs4_close_prepare, 3799 .rpc_call_done = nfs4_close_done, 3800 .rpc_release = nfs4_free_closedata, 3801 }; 3802 3803 /* 3804 * It is possible for data to be read/written from a mem-mapped file 3805 * after the sys_close call (which hits the vfs layer as a flush). 3806 * This means that we can't safely call nfsv4 close on a file until 3807 * the inode is cleared. This in turn means that we are not good 3808 * NFSv4 citizens - we do not indicate to the server to update the file's 3809 * share state even when we are done with one of the three share 3810 * stateid's in the inode. 3811 * 3812 * NOTE: Caller must be holding the sp->so_owner semaphore! 3813 */ 3814 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) 3815 { 3816 struct nfs_server *server = NFS_SERVER(state->inode); 3817 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 3818 struct nfs4_closedata *calldata; 3819 struct nfs4_state_owner *sp = state->owner; 3820 struct rpc_task *task; 3821 struct rpc_message msg = { 3822 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 3823 .rpc_cred = state->owner->so_cred, 3824 }; 3825 struct rpc_task_setup task_setup_data = { 3826 .rpc_client = server->client, 3827 .rpc_message = &msg, 3828 .callback_ops = &nfs4_close_ops, 3829 .workqueue = nfsiod_workqueue, 3830 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 3831 }; 3832 int status = -ENOMEM; 3833 3834 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE)) 3835 task_setup_data.flags |= RPC_TASK_MOVEABLE; 3836 3837 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP, 3838 &task_setup_data.rpc_client, &msg); 3839 3840 calldata = kzalloc(sizeof(*calldata), gfp_mask); 3841 if (calldata == NULL) 3842 goto out; 3843 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1, 0); 3844 calldata->inode = state->inode; 3845 calldata->state = state; 3846 calldata->arg.fh = NFS_FH(state->inode); 3847 if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state)) 3848 goto out_free_calldata; 3849 /* Serialization for the sequence id */ 3850 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 3851 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask); 3852 if (IS_ERR(calldata->arg.seqid)) 3853 goto out_free_calldata; 3854 nfs_fattr_init(&calldata->fattr); 3855 calldata->arg.fmode = 0; 3856 calldata->lr.arg.ld_private = &calldata->lr.ld_private; 3857 calldata->res.fattr = &calldata->fattr; 3858 calldata->res.seqid = calldata->arg.seqid; 3859 calldata->res.server = server; 3860 calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT; 3861 calldata->lr.roc = pnfs_roc(state->inode, 3862 &calldata->lr.arg, &calldata->lr.res, msg.rpc_cred); 3863 if (calldata->lr.roc) { 3864 calldata->arg.lr_args = &calldata->lr.arg; 3865 calldata->res.lr_res = &calldata->lr.res; 3866 } 3867 nfs_sb_active(calldata->inode->i_sb); 3868 3869 msg.rpc_argp = &calldata->arg; 3870 msg.rpc_resp = &calldata->res; 3871 task_setup_data.callback_data = calldata; 3872 task = rpc_run_task(&task_setup_data); 3873 if (IS_ERR(task)) 3874 return PTR_ERR(task); 3875 status = 0; 3876 if (wait) 3877 status = rpc_wait_for_completion_task(task); 3878 rpc_put_task(task); 3879 return status; 3880 out_free_calldata: 3881 kfree(calldata); 3882 out: 3883 nfs4_put_open_state(state); 3884 nfs4_put_state_owner(sp); 3885 return status; 3886 } 3887 3888 static struct inode * 3889 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, 3890 int open_flags, struct iattr *attr, int *opened) 3891 { 3892 struct nfs4_state *state; 3893 struct nfs4_label l, *label; 3894 3895 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); 3896 3897 /* Protect against concurrent sillydeletes */ 3898 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened); 3899 3900 nfs4_label_release_security(label); 3901 3902 if (IS_ERR(state)) 3903 return ERR_CAST(state); 3904 return state->inode; 3905 } 3906 3907 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 3908 { 3909 if (ctx->state == NULL) 3910 return; 3911 if (is_sync) 3912 nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx)); 3913 else 3914 nfs4_close_state(ctx->state, _nfs4_ctx_to_openmode(ctx)); 3915 } 3916 3917 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL) 3918 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL) 3919 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_OPEN_ARGUMENTS - 1UL) 3920 3921 #define FATTR4_WORD2_NFS42_TIME_DELEG_MASK \ 3922 (FATTR4_WORD2_TIME_DELEG_MODIFY|FATTR4_WORD2_TIME_DELEG_ACCESS) 3923 static bool nfs4_server_delegtime_capable(struct nfs4_server_caps_res *res) 3924 { 3925 u32 share_access_want = res->open_caps.oa_share_access_want[0]; 3926 u32 attr_bitmask = res->attr_bitmask[2]; 3927 3928 return (share_access_want & NFS4_SHARE_WANT_DELEG_TIMESTAMPS) && 3929 ((attr_bitmask & FATTR4_WORD2_NFS42_TIME_DELEG_MASK) == 3930 FATTR4_WORD2_NFS42_TIME_DELEG_MASK); 3931 } 3932 3933 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 3934 { 3935 u32 minorversion = server->nfs_client->cl_minorversion; 3936 u32 bitmask[3] = { 3937 [0] = FATTR4_WORD0_SUPPORTED_ATTRS, 3938 }; 3939 struct nfs4_server_caps_arg args = { 3940 .fhandle = fhandle, 3941 .bitmask = bitmask, 3942 }; 3943 struct nfs4_server_caps_res res = {}; 3944 struct rpc_message msg = { 3945 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 3946 .rpc_argp = &args, 3947 .rpc_resp = &res, 3948 }; 3949 int status; 3950 int i; 3951 3952 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS | 3953 FATTR4_WORD0_FH_EXPIRE_TYPE | 3954 FATTR4_WORD0_LINK_SUPPORT | 3955 FATTR4_WORD0_SYMLINK_SUPPORT | 3956 FATTR4_WORD0_ACLSUPPORT | 3957 FATTR4_WORD0_CASE_INSENSITIVE | 3958 FATTR4_WORD0_CASE_PRESERVING; 3959 if (minorversion) 3960 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT | 3961 FATTR4_WORD2_OPEN_ARGUMENTS; 3962 3963 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3964 if (status == 0) { 3965 bitmask[0] = (FATTR4_WORD0_SUPPORTED_ATTRS | 3966 FATTR4_WORD0_FH_EXPIRE_TYPE | 3967 FATTR4_WORD0_LINK_SUPPORT | 3968 FATTR4_WORD0_SYMLINK_SUPPORT | 3969 FATTR4_WORD0_ACLSUPPORT | 3970 FATTR4_WORD0_CASE_INSENSITIVE | 3971 FATTR4_WORD0_CASE_PRESERVING) & 3972 res.attr_bitmask[0]; 3973 /* Sanity check the server answers */ 3974 switch (minorversion) { 3975 case 0: 3976 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK; 3977 res.attr_bitmask[2] = 0; 3978 break; 3979 case 1: 3980 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK; 3981 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT & 3982 res.attr_bitmask[2]; 3983 break; 3984 case 2: 3985 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK; 3986 bitmask[2] = (FATTR4_WORD2_SUPPATTR_EXCLCREAT | 3987 FATTR4_WORD2_OPEN_ARGUMENTS) & 3988 res.attr_bitmask[2]; 3989 } 3990 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 3991 server->caps &= ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS | 3992 NFS_CAP_SYMLINKS| NFS_CAP_SECURITY_LABEL); 3993 server->fattr_valid = NFS_ATTR_FATTR_V4; 3994 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL && 3995 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 3996 server->caps |= NFS_CAP_ACLS; 3997 if (res.has_links != 0) 3998 server->caps |= NFS_CAP_HARDLINKS; 3999 if (res.has_symlinks != 0) 4000 server->caps |= NFS_CAP_SYMLINKS; 4001 if (res.case_insensitive) 4002 server->caps |= NFS_CAP_CASE_INSENSITIVE; 4003 if (res.case_preserving) 4004 server->caps |= NFS_CAP_CASE_PRESERVING; 4005 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 4006 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL) 4007 server->caps |= NFS_CAP_SECURITY_LABEL; 4008 #endif 4009 if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS) 4010 server->caps |= NFS_CAP_FS_LOCATIONS; 4011 if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID)) 4012 server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID; 4013 if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE)) 4014 server->fattr_valid &= ~NFS_ATTR_FATTR_MODE; 4015 if (!(res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)) 4016 server->fattr_valid &= ~NFS_ATTR_FATTR_NLINK; 4017 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER)) 4018 server->fattr_valid &= ~(NFS_ATTR_FATTR_OWNER | 4019 NFS_ATTR_FATTR_OWNER_NAME); 4020 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)) 4021 server->fattr_valid &= ~(NFS_ATTR_FATTR_GROUP | 4022 NFS_ATTR_FATTR_GROUP_NAME); 4023 if (!(res.attr_bitmask[1] & FATTR4_WORD1_SPACE_USED)) 4024 server->fattr_valid &= ~NFS_ATTR_FATTR_SPACE_USED; 4025 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)) 4026 server->fattr_valid &= ~NFS_ATTR_FATTR_ATIME; 4027 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)) 4028 server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME; 4029 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)) 4030 server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME; 4031 memcpy(server->attr_bitmask_nl, res.attr_bitmask, 4032 sizeof(server->attr_bitmask)); 4033 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL; 4034 4035 if (res.open_caps.oa_share_access_want[0] & 4036 NFS4_SHARE_WANT_OPEN_XOR_DELEGATION) 4037 server->caps |= NFS_CAP_OPEN_XOR; 4038 if (nfs4_server_delegtime_capable(&res)) 4039 server->caps |= NFS_CAP_DELEGTIME; 4040 4041 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 4042 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 4043 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 4044 server->cache_consistency_bitmask[2] = 0; 4045 4046 /* Avoid a regression due to buggy server */ 4047 for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++) 4048 res.exclcreat_bitmask[i] &= res.attr_bitmask[i]; 4049 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask, 4050 sizeof(server->exclcreat_bitmask)); 4051 4052 server->acl_bitmask = res.acl_bitmask; 4053 server->fh_expire_type = res.fh_expire_type; 4054 } 4055 4056 return status; 4057 } 4058 4059 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 4060 { 4061 struct nfs4_exception exception = { 4062 .interruptible = true, 4063 }; 4064 int err; 4065 4066 nfs4_server_set_init_caps(server); 4067 do { 4068 err = nfs4_handle_exception(server, 4069 _nfs4_server_capabilities(server, fhandle), 4070 &exception); 4071 } while (exception.retry); 4072 return err; 4073 } 4074 4075 static void test_fs_location_for_trunking(struct nfs4_fs_location *location, 4076 struct nfs_client *clp, 4077 struct nfs_server *server) 4078 { 4079 int i; 4080 4081 for (i = 0; i < location->nservers; i++) { 4082 struct nfs4_string *srv_loc = &location->servers[i]; 4083 struct sockaddr_storage addr; 4084 size_t addrlen; 4085 struct xprt_create xprt_args = { 4086 .ident = 0, 4087 .net = clp->cl_net, 4088 }; 4089 struct nfs4_add_xprt_data xprtdata = { 4090 .clp = clp, 4091 }; 4092 struct rpc_add_xprt_test rpcdata = { 4093 .add_xprt_test = clp->cl_mvops->session_trunk, 4094 .data = &xprtdata, 4095 }; 4096 char *servername = NULL; 4097 4098 if (!srv_loc->len) 4099 continue; 4100 4101 addrlen = nfs_parse_server_name(srv_loc->data, srv_loc->len, 4102 &addr, sizeof(addr), 4103 clp->cl_net, server->port); 4104 if (!addrlen) 4105 return; 4106 xprt_args.dstaddr = (struct sockaddr *)&addr; 4107 xprt_args.addrlen = addrlen; 4108 servername = kmalloc(srv_loc->len + 1, GFP_KERNEL); 4109 if (!servername) 4110 return; 4111 memcpy(servername, srv_loc->data, srv_loc->len); 4112 servername[srv_loc->len] = '\0'; 4113 xprt_args.servername = servername; 4114 4115 xprtdata.cred = nfs4_get_clid_cred(clp); 4116 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args, 4117 rpc_clnt_setup_test_and_add_xprt, 4118 &rpcdata); 4119 if (xprtdata.cred) 4120 put_cred(xprtdata.cred); 4121 kfree(servername); 4122 } 4123 } 4124 4125 static bool _is_same_nfs4_pathname(struct nfs4_pathname *path1, 4126 struct nfs4_pathname *path2) 4127 { 4128 int i; 4129 4130 if (path1->ncomponents != path2->ncomponents) 4131 return false; 4132 for (i = 0; i < path1->ncomponents; i++) { 4133 if (path1->components[i].len != path2->components[i].len) 4134 return false; 4135 if (memcmp(path1->components[i].data, path2->components[i].data, 4136 path1->components[i].len)) 4137 return false; 4138 } 4139 return true; 4140 } 4141 4142 static int _nfs4_discover_trunking(struct nfs_server *server, 4143 struct nfs_fh *fhandle) 4144 { 4145 struct nfs4_fs_locations *locations = NULL; 4146 struct page *page; 4147 const struct cred *cred; 4148 struct nfs_client *clp = server->nfs_client; 4149 const struct nfs4_state_maintenance_ops *ops = 4150 clp->cl_mvops->state_renewal_ops; 4151 int status = -ENOMEM, i; 4152 4153 cred = ops->get_state_renewal_cred(clp); 4154 if (cred == NULL) { 4155 cred = nfs4_get_clid_cred(clp); 4156 if (cred == NULL) 4157 return -ENOKEY; 4158 } 4159 4160 page = alloc_page(GFP_KERNEL); 4161 if (!page) 4162 goto out_put_cred; 4163 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 4164 if (!locations) 4165 goto out_free; 4166 locations->fattr = nfs_alloc_fattr(); 4167 if (!locations->fattr) 4168 goto out_free_2; 4169 4170 status = nfs4_proc_get_locations(server, fhandle, locations, page, 4171 cred); 4172 if (status) 4173 goto out_free_3; 4174 4175 for (i = 0; i < locations->nlocations; i++) { 4176 if (!_is_same_nfs4_pathname(&locations->fs_path, 4177 &locations->locations[i].rootpath)) 4178 continue; 4179 test_fs_location_for_trunking(&locations->locations[i], clp, 4180 server); 4181 } 4182 out_free_3: 4183 kfree(locations->fattr); 4184 out_free_2: 4185 kfree(locations); 4186 out_free: 4187 __free_page(page); 4188 out_put_cred: 4189 put_cred(cred); 4190 return status; 4191 } 4192 4193 static int nfs4_discover_trunking(struct nfs_server *server, 4194 struct nfs_fh *fhandle) 4195 { 4196 struct nfs4_exception exception = { 4197 .interruptible = true, 4198 }; 4199 struct nfs_client *clp = server->nfs_client; 4200 int err = 0; 4201 4202 if (!nfs4_has_session(clp)) 4203 goto out; 4204 do { 4205 err = nfs4_handle_exception(server, 4206 _nfs4_discover_trunking(server, fhandle), 4207 &exception); 4208 } while (exception.retry); 4209 out: 4210 return err; 4211 } 4212 4213 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 4214 struct nfs_fsinfo *info) 4215 { 4216 u32 bitmask[3]; 4217 struct nfs4_lookup_root_arg args = { 4218 .bitmask = bitmask, 4219 }; 4220 struct nfs4_lookup_res res = { 4221 .server = server, 4222 .fattr = info->fattr, 4223 .fh = fhandle, 4224 }; 4225 struct rpc_message msg = { 4226 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 4227 .rpc_argp = &args, 4228 .rpc_resp = &res, 4229 }; 4230 4231 bitmask[0] = nfs4_fattr_bitmap[0]; 4232 bitmask[1] = nfs4_fattr_bitmap[1]; 4233 /* 4234 * Process the label in the upcoming getfattr 4235 */ 4236 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL; 4237 4238 nfs_fattr_init(info->fattr); 4239 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4240 } 4241 4242 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 4243 struct nfs_fsinfo *info) 4244 { 4245 struct nfs4_exception exception = { 4246 .interruptible = true, 4247 }; 4248 int err; 4249 do { 4250 err = _nfs4_lookup_root(server, fhandle, info); 4251 trace_nfs4_lookup_root(server, fhandle, info->fattr, err); 4252 switch (err) { 4253 case 0: 4254 case -NFS4ERR_WRONGSEC: 4255 goto out; 4256 default: 4257 err = nfs4_handle_exception(server, err, &exception); 4258 } 4259 } while (exception.retry); 4260 out: 4261 return err; 4262 } 4263 4264 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 4265 struct nfs_fsinfo *info, rpc_authflavor_t flavor) 4266 { 4267 struct rpc_auth_create_args auth_args = { 4268 .pseudoflavor = flavor, 4269 }; 4270 struct rpc_auth *auth; 4271 4272 auth = rpcauth_create(&auth_args, server->client); 4273 if (IS_ERR(auth)) 4274 return -EACCES; 4275 return nfs4_lookup_root(server, fhandle, info); 4276 } 4277 4278 /* 4279 * Retry pseudoroot lookup with various security flavors. We do this when: 4280 * 4281 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC 4282 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation 4283 * 4284 * Returns zero on success, or a negative NFS4ERR value, or a 4285 * negative errno value. 4286 */ 4287 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 4288 struct nfs_fsinfo *info) 4289 { 4290 /* Per 3530bis 15.33.5 */ 4291 static const rpc_authflavor_t flav_array[] = { 4292 RPC_AUTH_GSS_KRB5P, 4293 RPC_AUTH_GSS_KRB5I, 4294 RPC_AUTH_GSS_KRB5, 4295 RPC_AUTH_UNIX, /* courtesy */ 4296 RPC_AUTH_NULL, 4297 }; 4298 int status = -EPERM; 4299 size_t i; 4300 4301 if (server->auth_info.flavor_len > 0) { 4302 /* try each flavor specified by user */ 4303 for (i = 0; i < server->auth_info.flavor_len; i++) { 4304 status = nfs4_lookup_root_sec(server, fhandle, info, 4305 server->auth_info.flavors[i]); 4306 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 4307 continue; 4308 break; 4309 } 4310 } else { 4311 /* no flavors specified by user, try default list */ 4312 for (i = 0; i < ARRAY_SIZE(flav_array); i++) { 4313 status = nfs4_lookup_root_sec(server, fhandle, info, 4314 flav_array[i]); 4315 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 4316 continue; 4317 break; 4318 } 4319 } 4320 4321 /* 4322 * -EACCES could mean that the user doesn't have correct permissions 4323 * to access the mount. It could also mean that we tried to mount 4324 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 4325 * existing mount programs don't handle -EACCES very well so it should 4326 * be mapped to -EPERM instead. 4327 */ 4328 if (status == -EACCES) 4329 status = -EPERM; 4330 return status; 4331 } 4332 4333 /** 4334 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot 4335 * @server: initialized nfs_server handle 4336 * @fhandle: we fill in the pseudo-fs root file handle 4337 * @info: we fill in an FSINFO struct 4338 * @auth_probe: probe the auth flavours 4339 * 4340 * Returns zero on success, or a negative errno. 4341 */ 4342 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 4343 struct nfs_fsinfo *info, 4344 bool auth_probe) 4345 { 4346 int status = 0; 4347 4348 if (!auth_probe) 4349 status = nfs4_lookup_root(server, fhandle, info); 4350 4351 if (auth_probe || status == NFS4ERR_WRONGSEC) 4352 status = server->nfs_client->cl_mvops->find_root_sec(server, 4353 fhandle, info); 4354 4355 if (status == 0) 4356 status = nfs4_server_capabilities(server, fhandle); 4357 if (status == 0) 4358 status = nfs4_do_fsinfo(server, fhandle, info); 4359 4360 return nfs4_map_errors(status); 4361 } 4362 4363 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 4364 struct nfs_fsinfo *info) 4365 { 4366 int error; 4367 struct nfs_fattr *fattr = info->fattr; 4368 4369 error = nfs4_server_capabilities(server, mntfh); 4370 if (error < 0) { 4371 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 4372 return error; 4373 } 4374 4375 error = nfs4_proc_getattr(server, mntfh, fattr, NULL); 4376 if (error < 0) { 4377 dprintk("nfs4_get_root: getattr error = %d\n", -error); 4378 goto out; 4379 } 4380 4381 if (fattr->valid & NFS_ATTR_FATTR_FSID && 4382 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 4383 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 4384 4385 out: 4386 return error; 4387 } 4388 4389 /* 4390 * Get locations and (maybe) other attributes of a referral. 4391 * Note that we'll actually follow the referral later when 4392 * we detect fsid mismatch in inode revalidation 4393 */ 4394 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 4395 const struct qstr *name, struct nfs_fattr *fattr, 4396 struct nfs_fh *fhandle) 4397 { 4398 int status = -ENOMEM; 4399 struct page *page = NULL; 4400 struct nfs4_fs_locations *locations = NULL; 4401 4402 page = alloc_page(GFP_KERNEL); 4403 if (page == NULL) 4404 goto out; 4405 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 4406 if (locations == NULL) 4407 goto out; 4408 4409 locations->fattr = fattr; 4410 4411 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 4412 if (status != 0) 4413 goto out; 4414 4415 /* 4416 * If the fsid didn't change, this is a migration event, not a 4417 * referral. Cause us to drop into the exception handler, which 4418 * will kick off migration recovery. 4419 */ 4420 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &fattr->fsid)) { 4421 dprintk("%s: server did not return a different fsid for" 4422 " a referral at %s\n", __func__, name->name); 4423 status = -NFS4ERR_MOVED; 4424 goto out; 4425 } 4426 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 4427 nfs_fixup_referral_attributes(fattr); 4428 memset(fhandle, 0, sizeof(struct nfs_fh)); 4429 out: 4430 if (page) 4431 __free_page(page); 4432 kfree(locations); 4433 return status; 4434 } 4435 4436 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 4437 struct nfs_fattr *fattr, struct inode *inode) 4438 { 4439 __u32 bitmask[NFS4_BITMASK_SZ]; 4440 struct nfs4_getattr_arg args = { 4441 .fh = fhandle, 4442 .bitmask = bitmask, 4443 }; 4444 struct nfs4_getattr_res res = { 4445 .fattr = fattr, 4446 .server = server, 4447 }; 4448 struct rpc_message msg = { 4449 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 4450 .rpc_argp = &args, 4451 .rpc_resp = &res, 4452 }; 4453 unsigned short task_flags = 0; 4454 4455 if (nfs4_has_session(server->nfs_client)) 4456 task_flags = RPC_TASK_MOVEABLE; 4457 4458 /* Is this is an attribute revalidation, subject to softreval? */ 4459 if (inode && (server->flags & NFS_MOUNT_SOFTREVAL)) 4460 task_flags |= RPC_TASK_TIMEOUT; 4461 4462 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), inode, 0); 4463 nfs_fattr_init(fattr); 4464 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 4465 return nfs4_do_call_sync(server->client, server, &msg, 4466 &args.seq_args, &res.seq_res, task_flags); 4467 } 4468 4469 int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 4470 struct nfs_fattr *fattr, struct inode *inode) 4471 { 4472 struct nfs4_exception exception = { 4473 .interruptible = true, 4474 }; 4475 int err; 4476 do { 4477 err = _nfs4_proc_getattr(server, fhandle, fattr, inode); 4478 trace_nfs4_getattr(server, fhandle, fattr, err); 4479 err = nfs4_handle_exception(server, err, 4480 &exception); 4481 } while (exception.retry); 4482 return err; 4483 } 4484 4485 /* 4486 * The file is not closed if it is opened due to the a request to change 4487 * the size of the file. The open call will not be needed once the 4488 * VFS layer lookup-intents are implemented. 4489 * 4490 * Close is called when the inode is destroyed. 4491 * If we haven't opened the file for O_WRONLY, we 4492 * need to in the size_change case to obtain a stateid. 4493 * 4494 * Got race? 4495 * Because OPEN is always done by name in nfsv4, it is 4496 * possible that we opened a different file by the same 4497 * name. We can recognize this race condition, but we 4498 * can't do anything about it besides returning an error. 4499 * 4500 * This will be fixed with VFS changes (lookup-intent). 4501 */ 4502 static int 4503 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 4504 struct iattr *sattr) 4505 { 4506 struct inode *inode = d_inode(dentry); 4507 const struct cred *cred = NULL; 4508 struct nfs_open_context *ctx = NULL; 4509 int status; 4510 4511 if (pnfs_ld_layoutret_on_setattr(inode) && 4512 sattr->ia_valid & ATTR_SIZE && 4513 sattr->ia_size < i_size_read(inode)) 4514 pnfs_commit_and_return_layout(inode); 4515 4516 nfs_fattr_init(fattr); 4517 4518 /* Deal with open(O_TRUNC) */ 4519 if (sattr->ia_valid & ATTR_OPEN) 4520 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME); 4521 4522 /* Optimization: if the end result is no change, don't RPC */ 4523 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0) 4524 return 0; 4525 4526 /* Search for an existing open(O_WRITE) file */ 4527 if (sattr->ia_valid & ATTR_FILE) { 4528 4529 ctx = nfs_file_open_context(sattr->ia_file); 4530 if (ctx) 4531 cred = ctx->cred; 4532 } 4533 4534 /* Return any delegations if we're going to change ACLs */ 4535 if ((sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) 4536 nfs4_inode_make_writeable(inode); 4537 4538 status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL); 4539 if (status == 0) { 4540 nfs_setattr_update_inode(inode, sattr, fattr); 4541 nfs_setsecurity(inode, fattr); 4542 } 4543 return status; 4544 } 4545 4546 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 4547 struct dentry *dentry, struct nfs_fh *fhandle, 4548 struct nfs_fattr *fattr) 4549 { 4550 struct nfs_server *server = NFS_SERVER(dir); 4551 int status; 4552 struct nfs4_lookup_arg args = { 4553 .bitmask = server->attr_bitmask, 4554 .dir_fh = NFS_FH(dir), 4555 .name = &dentry->d_name, 4556 }; 4557 struct nfs4_lookup_res res = { 4558 .server = server, 4559 .fattr = fattr, 4560 .fh = fhandle, 4561 }; 4562 struct rpc_message msg = { 4563 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 4564 .rpc_argp = &args, 4565 .rpc_resp = &res, 4566 }; 4567 unsigned short task_flags = 0; 4568 4569 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE)) 4570 task_flags = RPC_TASK_MOVEABLE; 4571 4572 /* Is this is an attribute revalidation, subject to softreval? */ 4573 if (nfs_lookup_is_soft_revalidate(dentry)) 4574 task_flags |= RPC_TASK_TIMEOUT; 4575 4576 args.bitmask = nfs4_bitmask(server, fattr->label); 4577 4578 nfs_fattr_init(fattr); 4579 4580 dprintk("NFS call lookup %pd2\n", dentry); 4581 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 4582 status = nfs4_do_call_sync(clnt, server, &msg, 4583 &args.seq_args, &res.seq_res, task_flags); 4584 dprintk("NFS reply lookup: %d\n", status); 4585 return status; 4586 } 4587 4588 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 4589 { 4590 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 4591 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 4592 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 4593 fattr->nlink = 2; 4594 } 4595 4596 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 4597 struct dentry *dentry, struct nfs_fh *fhandle, 4598 struct nfs_fattr *fattr) 4599 { 4600 struct nfs4_exception exception = { 4601 .interruptible = true, 4602 }; 4603 struct rpc_clnt *client = *clnt; 4604 const struct qstr *name = &dentry->d_name; 4605 int err; 4606 do { 4607 err = _nfs4_proc_lookup(client, dir, dentry, fhandle, fattr); 4608 trace_nfs4_lookup(dir, name, err); 4609 switch (err) { 4610 case -NFS4ERR_BADNAME: 4611 err = -ENOENT; 4612 goto out; 4613 case -NFS4ERR_MOVED: 4614 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 4615 if (err == -NFS4ERR_MOVED) 4616 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 4617 goto out; 4618 case -NFS4ERR_WRONGSEC: 4619 err = -EPERM; 4620 if (client != *clnt) 4621 goto out; 4622 client = nfs4_negotiate_security(client, dir, name); 4623 if (IS_ERR(client)) 4624 return PTR_ERR(client); 4625 4626 exception.retry = 1; 4627 break; 4628 default: 4629 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 4630 } 4631 } while (exception.retry); 4632 4633 out: 4634 if (err == 0) 4635 *clnt = client; 4636 else if (client != *clnt) 4637 rpc_shutdown_client(client); 4638 4639 return err; 4640 } 4641 4642 static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry, 4643 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4644 { 4645 int status; 4646 struct rpc_clnt *client = NFS_CLIENT(dir); 4647 4648 status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr); 4649 if (client != NFS_CLIENT(dir)) { 4650 rpc_shutdown_client(client); 4651 nfs_fixup_secinfo_attributes(fattr); 4652 } 4653 return status; 4654 } 4655 4656 struct rpc_clnt * 4657 nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry, 4658 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4659 { 4660 struct rpc_clnt *client = NFS_CLIENT(dir); 4661 int status; 4662 4663 status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr); 4664 if (status < 0) 4665 return ERR_PTR(status); 4666 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client; 4667 } 4668 4669 static int _nfs4_proc_lookupp(struct inode *inode, 4670 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4671 { 4672 struct rpc_clnt *clnt = NFS_CLIENT(inode); 4673 struct nfs_server *server = NFS_SERVER(inode); 4674 int status; 4675 struct nfs4_lookupp_arg args = { 4676 .bitmask = server->attr_bitmask, 4677 .fh = NFS_FH(inode), 4678 }; 4679 struct nfs4_lookupp_res res = { 4680 .server = server, 4681 .fattr = fattr, 4682 .fh = fhandle, 4683 }; 4684 struct rpc_message msg = { 4685 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP], 4686 .rpc_argp = &args, 4687 .rpc_resp = &res, 4688 }; 4689 unsigned short task_flags = 0; 4690 4691 if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL) 4692 task_flags |= RPC_TASK_TIMEOUT; 4693 4694 args.bitmask = nfs4_bitmask(server, fattr->label); 4695 4696 nfs_fattr_init(fattr); 4697 4698 dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino); 4699 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, 4700 &res.seq_res, task_flags); 4701 dprintk("NFS reply lookupp: %d\n", status); 4702 return status; 4703 } 4704 4705 static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle, 4706 struct nfs_fattr *fattr) 4707 { 4708 struct nfs4_exception exception = { 4709 .interruptible = true, 4710 }; 4711 int err; 4712 do { 4713 err = _nfs4_proc_lookupp(inode, fhandle, fattr); 4714 trace_nfs4_lookupp(inode, err); 4715 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4716 &exception); 4717 } while (exception.retry); 4718 return err; 4719 } 4720 4721 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry, 4722 const struct cred *cred) 4723 { 4724 struct nfs_server *server = NFS_SERVER(inode); 4725 struct nfs4_accessargs args = { 4726 .fh = NFS_FH(inode), 4727 .access = entry->mask, 4728 }; 4729 struct nfs4_accessres res = { 4730 .server = server, 4731 }; 4732 struct rpc_message msg = { 4733 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 4734 .rpc_argp = &args, 4735 .rpc_resp = &res, 4736 .rpc_cred = cred, 4737 }; 4738 int status = 0; 4739 4740 if (!nfs4_have_delegation(inode, FMODE_READ, 0)) { 4741 res.fattr = nfs_alloc_fattr(); 4742 if (res.fattr == NULL) 4743 return -ENOMEM; 4744 args.bitmask = server->cache_consistency_bitmask; 4745 } 4746 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4747 if (!status) { 4748 nfs_access_set_mask(entry, res.access); 4749 if (res.fattr) 4750 nfs_refresh_inode(inode, res.fattr); 4751 } 4752 nfs_free_fattr(res.fattr); 4753 return status; 4754 } 4755 4756 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry, 4757 const struct cred *cred) 4758 { 4759 struct nfs4_exception exception = { 4760 .interruptible = true, 4761 }; 4762 int err; 4763 do { 4764 err = _nfs4_proc_access(inode, entry, cred); 4765 trace_nfs4_access(inode, err); 4766 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4767 &exception); 4768 } while (exception.retry); 4769 return err; 4770 } 4771 4772 /* 4773 * TODO: For the time being, we don't try to get any attributes 4774 * along with any of the zero-copy operations READ, READDIR, 4775 * READLINK, WRITE. 4776 * 4777 * In the case of the first three, we want to put the GETATTR 4778 * after the read-type operation -- this is because it is hard 4779 * to predict the length of a GETATTR response in v4, and thus 4780 * align the READ data correctly. This means that the GETATTR 4781 * may end up partially falling into the page cache, and we should 4782 * shift it into the 'tail' of the xdr_buf before processing. 4783 * To do this efficiently, we need to know the total length 4784 * of data received, which doesn't seem to be available outside 4785 * of the RPC layer. 4786 * 4787 * In the case of WRITE, we also want to put the GETATTR after 4788 * the operation -- in this case because we want to make sure 4789 * we get the post-operation mtime and size. 4790 * 4791 * Both of these changes to the XDR layer would in fact be quite 4792 * minor, but I decided to leave them for a subsequent patch. 4793 */ 4794 static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 4795 unsigned int pgbase, unsigned int pglen) 4796 { 4797 struct nfs4_readlink args = { 4798 .fh = NFS_FH(inode), 4799 .pgbase = pgbase, 4800 .pglen = pglen, 4801 .pages = &page, 4802 }; 4803 struct nfs4_readlink_res res; 4804 struct rpc_message msg = { 4805 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 4806 .rpc_argp = &args, 4807 .rpc_resp = &res, 4808 }; 4809 4810 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 4811 } 4812 4813 static int nfs4_proc_readlink(struct inode *inode, struct page *page, 4814 unsigned int pgbase, unsigned int pglen) 4815 { 4816 struct nfs4_exception exception = { 4817 .interruptible = true, 4818 }; 4819 int err; 4820 do { 4821 err = _nfs4_proc_readlink(inode, page, pgbase, pglen); 4822 trace_nfs4_readlink(inode, err); 4823 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4824 &exception); 4825 } while (exception.retry); 4826 return err; 4827 } 4828 4829 /* 4830 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 4831 */ 4832 static int 4833 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 4834 int flags) 4835 { 4836 struct nfs_server *server = NFS_SERVER(dir); 4837 struct nfs4_label l, *ilabel; 4838 struct nfs_open_context *ctx; 4839 struct nfs4_state *state; 4840 int status = 0; 4841 4842 ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL); 4843 if (IS_ERR(ctx)) 4844 return PTR_ERR(ctx); 4845 4846 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); 4847 4848 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 4849 sattr->ia_mode &= ~current_umask(); 4850 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL); 4851 if (IS_ERR(state)) { 4852 status = PTR_ERR(state); 4853 goto out; 4854 } 4855 out: 4856 nfs4_label_release_security(ilabel); 4857 put_nfs_open_context(ctx); 4858 return status; 4859 } 4860 4861 static int 4862 _nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype) 4863 { 4864 struct nfs_server *server = NFS_SERVER(dir); 4865 struct nfs_removeargs args = { 4866 .fh = NFS_FH(dir), 4867 .name = *name, 4868 }; 4869 struct nfs_removeres res = { 4870 .server = server, 4871 }; 4872 struct rpc_message msg = { 4873 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 4874 .rpc_argp = &args, 4875 .rpc_resp = &res, 4876 }; 4877 unsigned long timestamp = jiffies; 4878 int status; 4879 4880 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 4881 if (status == 0) { 4882 spin_lock(&dir->i_lock); 4883 /* Removing a directory decrements nlink in the parent */ 4884 if (ftype == NF4DIR && dir->i_nlink > 2) 4885 nfs4_dec_nlink_locked(dir); 4886 nfs4_update_changeattr_locked(dir, &res.cinfo, timestamp, 4887 NFS_INO_INVALID_DATA); 4888 spin_unlock(&dir->i_lock); 4889 } 4890 return status; 4891 } 4892 4893 static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry) 4894 { 4895 struct nfs4_exception exception = { 4896 .interruptible = true, 4897 }; 4898 struct inode *inode = d_inode(dentry); 4899 int err; 4900 4901 if (inode) { 4902 if (inode->i_nlink == 1) 4903 nfs4_inode_return_delegation(inode); 4904 else 4905 nfs4_inode_make_writeable(inode); 4906 } 4907 do { 4908 err = _nfs4_proc_remove(dir, &dentry->d_name, NF4REG); 4909 trace_nfs4_remove(dir, &dentry->d_name, err); 4910 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4911 &exception); 4912 } while (exception.retry); 4913 return err; 4914 } 4915 4916 static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name) 4917 { 4918 struct nfs4_exception exception = { 4919 .interruptible = true, 4920 }; 4921 int err; 4922 4923 do { 4924 err = _nfs4_proc_remove(dir, name, NF4DIR); 4925 trace_nfs4_remove(dir, name, err); 4926 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4927 &exception); 4928 } while (exception.retry); 4929 return err; 4930 } 4931 4932 static void nfs4_proc_unlink_setup(struct rpc_message *msg, 4933 struct dentry *dentry, 4934 struct inode *inode) 4935 { 4936 struct nfs_removeargs *args = msg->rpc_argp; 4937 struct nfs_removeres *res = msg->rpc_resp; 4938 4939 res->server = NFS_SB(dentry->d_sb); 4940 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 4941 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1, 0); 4942 4943 nfs_fattr_init(res->dir_attr); 4944 4945 if (inode) { 4946 nfs4_inode_return_delegation(inode); 4947 nfs_d_prune_case_insensitive_aliases(inode); 4948 } 4949 } 4950 4951 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 4952 { 4953 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client, 4954 &data->args.seq_args, 4955 &data->res.seq_res, 4956 task); 4957 } 4958 4959 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 4960 { 4961 struct nfs_unlinkdata *data = task->tk_calldata; 4962 struct nfs_removeres *res = &data->res; 4963 4964 if (!nfs4_sequence_done(task, &res->seq_res)) 4965 return 0; 4966 if (nfs4_async_handle_error(task, res->server, NULL, 4967 &data->timeout) == -EAGAIN) 4968 return 0; 4969 if (task->tk_status == 0) 4970 nfs4_update_changeattr(dir, &res->cinfo, 4971 res->dir_attr->time_start, 4972 NFS_INO_INVALID_DATA); 4973 return 1; 4974 } 4975 4976 static void nfs4_proc_rename_setup(struct rpc_message *msg, 4977 struct dentry *old_dentry, 4978 struct dentry *new_dentry) 4979 { 4980 struct nfs_renameargs *arg = msg->rpc_argp; 4981 struct nfs_renameres *res = msg->rpc_resp; 4982 struct inode *old_inode = d_inode(old_dentry); 4983 struct inode *new_inode = d_inode(new_dentry); 4984 4985 if (old_inode) 4986 nfs4_inode_make_writeable(old_inode); 4987 if (new_inode) 4988 nfs4_inode_return_delegation(new_inode); 4989 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 4990 res->server = NFS_SB(old_dentry->d_sb); 4991 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1, 0); 4992 } 4993 4994 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 4995 { 4996 nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client, 4997 &data->args.seq_args, 4998 &data->res.seq_res, 4999 task); 5000 } 5001 5002 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 5003 struct inode *new_dir) 5004 { 5005 struct nfs_renamedata *data = task->tk_calldata; 5006 struct nfs_renameres *res = &data->res; 5007 5008 if (!nfs4_sequence_done(task, &res->seq_res)) 5009 return 0; 5010 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) 5011 return 0; 5012 5013 if (task->tk_status == 0) { 5014 nfs_d_prune_case_insensitive_aliases(d_inode(data->old_dentry)); 5015 if (new_dir != old_dir) { 5016 /* Note: If we moved a directory, nlink will change */ 5017 nfs4_update_changeattr(old_dir, &res->old_cinfo, 5018 res->old_fattr->time_start, 5019 NFS_INO_INVALID_NLINK | 5020 NFS_INO_INVALID_DATA); 5021 nfs4_update_changeattr(new_dir, &res->new_cinfo, 5022 res->new_fattr->time_start, 5023 NFS_INO_INVALID_NLINK | 5024 NFS_INO_INVALID_DATA); 5025 } else 5026 nfs4_update_changeattr(old_dir, &res->old_cinfo, 5027 res->old_fattr->time_start, 5028 NFS_INO_INVALID_DATA); 5029 } 5030 return 1; 5031 } 5032 5033 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name) 5034 { 5035 struct nfs_server *server = NFS_SERVER(inode); 5036 __u32 bitmask[NFS4_BITMASK_SZ]; 5037 struct nfs4_link_arg arg = { 5038 .fh = NFS_FH(inode), 5039 .dir_fh = NFS_FH(dir), 5040 .name = name, 5041 .bitmask = bitmask, 5042 }; 5043 struct nfs4_link_res res = { 5044 .server = server, 5045 }; 5046 struct rpc_message msg = { 5047 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 5048 .rpc_argp = &arg, 5049 .rpc_resp = &res, 5050 }; 5051 int status = -ENOMEM; 5052 5053 res.fattr = nfs_alloc_fattr_with_label(server); 5054 if (res.fattr == NULL) 5055 goto out; 5056 5057 nfs4_inode_make_writeable(inode); 5058 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, res.fattr->label), 5059 inode, 5060 NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME); 5061 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 5062 if (!status) { 5063 nfs4_update_changeattr(dir, &res.cinfo, res.fattr->time_start, 5064 NFS_INO_INVALID_DATA); 5065 nfs4_inc_nlink(inode); 5066 status = nfs_post_op_update_inode(inode, res.fattr); 5067 if (!status) 5068 nfs_setsecurity(inode, res.fattr); 5069 } 5070 5071 out: 5072 nfs_free_fattr(res.fattr); 5073 return status; 5074 } 5075 5076 static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name) 5077 { 5078 struct nfs4_exception exception = { 5079 .interruptible = true, 5080 }; 5081 int err; 5082 do { 5083 err = nfs4_handle_exception(NFS_SERVER(inode), 5084 _nfs4_proc_link(inode, dir, name), 5085 &exception); 5086 } while (exception.retry); 5087 return err; 5088 } 5089 5090 struct nfs4_createdata { 5091 struct rpc_message msg; 5092 struct nfs4_create_arg arg; 5093 struct nfs4_create_res res; 5094 struct nfs_fh fh; 5095 struct nfs_fattr fattr; 5096 }; 5097 5098 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 5099 const struct qstr *name, struct iattr *sattr, u32 ftype) 5100 { 5101 struct nfs4_createdata *data; 5102 5103 data = kzalloc(sizeof(*data), GFP_KERNEL); 5104 if (data != NULL) { 5105 struct nfs_server *server = NFS_SERVER(dir); 5106 5107 data->fattr.label = nfs4_label_alloc(server, GFP_KERNEL); 5108 if (IS_ERR(data->fattr.label)) 5109 goto out_free; 5110 5111 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 5112 data->msg.rpc_argp = &data->arg; 5113 data->msg.rpc_resp = &data->res; 5114 data->arg.dir_fh = NFS_FH(dir); 5115 data->arg.server = server; 5116 data->arg.name = name; 5117 data->arg.attrs = sattr; 5118 data->arg.ftype = ftype; 5119 data->arg.bitmask = nfs4_bitmask(server, data->fattr.label); 5120 data->arg.umask = current_umask(); 5121 data->res.server = server; 5122 data->res.fh = &data->fh; 5123 data->res.fattr = &data->fattr; 5124 nfs_fattr_init(data->res.fattr); 5125 } 5126 return data; 5127 out_free: 5128 kfree(data); 5129 return NULL; 5130 } 5131 5132 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 5133 { 5134 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 5135 &data->arg.seq_args, &data->res.seq_res, 1); 5136 if (status == 0) { 5137 spin_lock(&dir->i_lock); 5138 /* Creating a directory bumps nlink in the parent */ 5139 if (data->arg.ftype == NF4DIR) 5140 nfs4_inc_nlink_locked(dir); 5141 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo, 5142 data->res.fattr->time_start, 5143 NFS_INO_INVALID_DATA); 5144 spin_unlock(&dir->i_lock); 5145 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); 5146 } 5147 return status; 5148 } 5149 5150 static void nfs4_free_createdata(struct nfs4_createdata *data) 5151 { 5152 nfs4_label_free(data->fattr.label); 5153 kfree(data); 5154 } 5155 5156 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 5157 struct folio *folio, unsigned int len, struct iattr *sattr, 5158 struct nfs4_label *label) 5159 { 5160 struct page *page = &folio->page; 5161 struct nfs4_createdata *data; 5162 int status = -ENAMETOOLONG; 5163 5164 if (len > NFS4_MAXPATHLEN) 5165 goto out; 5166 5167 status = -ENOMEM; 5168 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 5169 if (data == NULL) 5170 goto out; 5171 5172 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 5173 data->arg.u.symlink.pages = &page; 5174 data->arg.u.symlink.len = len; 5175 data->arg.label = label; 5176 5177 status = nfs4_do_create(dir, dentry, data); 5178 5179 nfs4_free_createdata(data); 5180 out: 5181 return status; 5182 } 5183 5184 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 5185 struct folio *folio, unsigned int len, struct iattr *sattr) 5186 { 5187 struct nfs4_exception exception = { 5188 .interruptible = true, 5189 }; 5190 struct nfs4_label l, *label; 5191 int err; 5192 5193 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5194 5195 do { 5196 err = _nfs4_proc_symlink(dir, dentry, folio, len, sattr, label); 5197 trace_nfs4_symlink(dir, &dentry->d_name, err); 5198 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5199 &exception); 5200 } while (exception.retry); 5201 5202 nfs4_label_release_security(label); 5203 return err; 5204 } 5205 5206 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 5207 struct iattr *sattr, struct nfs4_label *label) 5208 { 5209 struct nfs4_createdata *data; 5210 int status = -ENOMEM; 5211 5212 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 5213 if (data == NULL) 5214 goto out; 5215 5216 data->arg.label = label; 5217 status = nfs4_do_create(dir, dentry, data); 5218 5219 nfs4_free_createdata(data); 5220 out: 5221 return status; 5222 } 5223 5224 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 5225 struct iattr *sattr) 5226 { 5227 struct nfs_server *server = NFS_SERVER(dir); 5228 struct nfs4_exception exception = { 5229 .interruptible = true, 5230 }; 5231 struct nfs4_label l, *label; 5232 int err; 5233 5234 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5235 5236 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 5237 sattr->ia_mode &= ~current_umask(); 5238 do { 5239 err = _nfs4_proc_mkdir(dir, dentry, sattr, label); 5240 trace_nfs4_mkdir(dir, &dentry->d_name, err); 5241 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5242 &exception); 5243 } while (exception.retry); 5244 nfs4_label_release_security(label); 5245 5246 return err; 5247 } 5248 5249 static int _nfs4_proc_readdir(struct nfs_readdir_arg *nr_arg, 5250 struct nfs_readdir_res *nr_res) 5251 { 5252 struct inode *dir = d_inode(nr_arg->dentry); 5253 struct nfs_server *server = NFS_SERVER(dir); 5254 struct nfs4_readdir_arg args = { 5255 .fh = NFS_FH(dir), 5256 .pages = nr_arg->pages, 5257 .pgbase = 0, 5258 .count = nr_arg->page_len, 5259 .plus = nr_arg->plus, 5260 }; 5261 struct nfs4_readdir_res res; 5262 struct rpc_message msg = { 5263 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 5264 .rpc_argp = &args, 5265 .rpc_resp = &res, 5266 .rpc_cred = nr_arg->cred, 5267 }; 5268 int status; 5269 5270 dprintk("%s: dentry = %pd2, cookie = %llu\n", __func__, 5271 nr_arg->dentry, (unsigned long long)nr_arg->cookie); 5272 if (!(server->caps & NFS_CAP_SECURITY_LABEL)) 5273 args.bitmask = server->attr_bitmask_nl; 5274 else 5275 args.bitmask = server->attr_bitmask; 5276 5277 nfs4_setup_readdir(nr_arg->cookie, nr_arg->verf, nr_arg->dentry, &args); 5278 res.pgbase = args.pgbase; 5279 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 5280 &res.seq_res, 0); 5281 if (status >= 0) { 5282 memcpy(nr_res->verf, res.verifier.data, NFS4_VERIFIER_SIZE); 5283 status += args.pgbase; 5284 } 5285 5286 nfs_invalidate_atime(dir); 5287 5288 dprintk("%s: returns %d\n", __func__, status); 5289 return status; 5290 } 5291 5292 static int nfs4_proc_readdir(struct nfs_readdir_arg *arg, 5293 struct nfs_readdir_res *res) 5294 { 5295 struct nfs4_exception exception = { 5296 .interruptible = true, 5297 }; 5298 int err; 5299 do { 5300 err = _nfs4_proc_readdir(arg, res); 5301 trace_nfs4_readdir(d_inode(arg->dentry), err); 5302 err = nfs4_handle_exception(NFS_SERVER(d_inode(arg->dentry)), 5303 err, &exception); 5304 } while (exception.retry); 5305 return err; 5306 } 5307 5308 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 5309 struct iattr *sattr, struct nfs4_label *label, dev_t rdev) 5310 { 5311 struct nfs4_createdata *data; 5312 int mode = sattr->ia_mode; 5313 int status = -ENOMEM; 5314 5315 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 5316 if (data == NULL) 5317 goto out; 5318 5319 if (S_ISFIFO(mode)) 5320 data->arg.ftype = NF4FIFO; 5321 else if (S_ISBLK(mode)) { 5322 data->arg.ftype = NF4BLK; 5323 data->arg.u.device.specdata1 = MAJOR(rdev); 5324 data->arg.u.device.specdata2 = MINOR(rdev); 5325 } 5326 else if (S_ISCHR(mode)) { 5327 data->arg.ftype = NF4CHR; 5328 data->arg.u.device.specdata1 = MAJOR(rdev); 5329 data->arg.u.device.specdata2 = MINOR(rdev); 5330 } else if (!S_ISSOCK(mode)) { 5331 status = -EINVAL; 5332 goto out_free; 5333 } 5334 5335 data->arg.label = label; 5336 status = nfs4_do_create(dir, dentry, data); 5337 out_free: 5338 nfs4_free_createdata(data); 5339 out: 5340 return status; 5341 } 5342 5343 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 5344 struct iattr *sattr, dev_t rdev) 5345 { 5346 struct nfs_server *server = NFS_SERVER(dir); 5347 struct nfs4_exception exception = { 5348 .interruptible = true, 5349 }; 5350 struct nfs4_label l, *label; 5351 int err; 5352 5353 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5354 5355 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 5356 sattr->ia_mode &= ~current_umask(); 5357 do { 5358 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev); 5359 trace_nfs4_mknod(dir, &dentry->d_name, err); 5360 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5361 &exception); 5362 } while (exception.retry); 5363 5364 nfs4_label_release_security(label); 5365 5366 return err; 5367 } 5368 5369 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 5370 struct nfs_fsstat *fsstat) 5371 { 5372 struct nfs4_statfs_arg args = { 5373 .fh = fhandle, 5374 .bitmask = server->attr_bitmask, 5375 }; 5376 struct nfs4_statfs_res res = { 5377 .fsstat = fsstat, 5378 }; 5379 struct rpc_message msg = { 5380 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 5381 .rpc_argp = &args, 5382 .rpc_resp = &res, 5383 }; 5384 5385 nfs_fattr_init(fsstat->fattr); 5386 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5387 } 5388 5389 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 5390 { 5391 struct nfs4_exception exception = { 5392 .interruptible = true, 5393 }; 5394 int err; 5395 do { 5396 err = nfs4_handle_exception(server, 5397 _nfs4_proc_statfs(server, fhandle, fsstat), 5398 &exception); 5399 } while (exception.retry); 5400 return err; 5401 } 5402 5403 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 5404 struct nfs_fsinfo *fsinfo) 5405 { 5406 struct nfs4_fsinfo_arg args = { 5407 .fh = fhandle, 5408 .bitmask = server->attr_bitmask, 5409 }; 5410 struct nfs4_fsinfo_res res = { 5411 .fsinfo = fsinfo, 5412 }; 5413 struct rpc_message msg = { 5414 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 5415 .rpc_argp = &args, 5416 .rpc_resp = &res, 5417 }; 5418 5419 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5420 } 5421 5422 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 5423 { 5424 struct nfs4_exception exception = { 5425 .interruptible = true, 5426 }; 5427 int err; 5428 5429 do { 5430 err = _nfs4_do_fsinfo(server, fhandle, fsinfo); 5431 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); 5432 if (err == 0) { 5433 nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ); 5434 break; 5435 } 5436 err = nfs4_handle_exception(server, err, &exception); 5437 } while (exception.retry); 5438 return err; 5439 } 5440 5441 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 5442 { 5443 int error; 5444 5445 nfs_fattr_init(fsinfo->fattr); 5446 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 5447 if (error == 0) { 5448 /* block layout checks this! */ 5449 server->pnfs_blksize = fsinfo->blksize; 5450 set_pnfs_layoutdriver(server, fhandle, fsinfo); 5451 } 5452 5453 return error; 5454 } 5455 5456 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 5457 struct nfs_pathconf *pathconf) 5458 { 5459 struct nfs4_pathconf_arg args = { 5460 .fh = fhandle, 5461 .bitmask = server->attr_bitmask, 5462 }; 5463 struct nfs4_pathconf_res res = { 5464 .pathconf = pathconf, 5465 }; 5466 struct rpc_message msg = { 5467 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 5468 .rpc_argp = &args, 5469 .rpc_resp = &res, 5470 }; 5471 5472 /* None of the pathconf attributes are mandatory to implement */ 5473 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 5474 memset(pathconf, 0, sizeof(*pathconf)); 5475 return 0; 5476 } 5477 5478 nfs_fattr_init(pathconf->fattr); 5479 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5480 } 5481 5482 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 5483 struct nfs_pathconf *pathconf) 5484 { 5485 struct nfs4_exception exception = { 5486 .interruptible = true, 5487 }; 5488 int err; 5489 5490 do { 5491 err = nfs4_handle_exception(server, 5492 _nfs4_proc_pathconf(server, fhandle, pathconf), 5493 &exception); 5494 } while (exception.retry); 5495 return err; 5496 } 5497 5498 int nfs4_set_rw_stateid(nfs4_stateid *stateid, 5499 const struct nfs_open_context *ctx, 5500 const struct nfs_lock_context *l_ctx, 5501 fmode_t fmode) 5502 { 5503 return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL); 5504 } 5505 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid); 5506 5507 static bool nfs4_stateid_is_current(nfs4_stateid *stateid, 5508 const struct nfs_open_context *ctx, 5509 const struct nfs_lock_context *l_ctx, 5510 fmode_t fmode) 5511 { 5512 nfs4_stateid _current_stateid; 5513 5514 /* If the current stateid represents a lost lock, then exit */ 5515 if (nfs4_set_rw_stateid(&_current_stateid, ctx, l_ctx, fmode) == -EIO) 5516 return true; 5517 return nfs4_stateid_match(stateid, &_current_stateid); 5518 } 5519 5520 static bool nfs4_error_stateid_expired(int err) 5521 { 5522 switch (err) { 5523 case -NFS4ERR_DELEG_REVOKED: 5524 case -NFS4ERR_ADMIN_REVOKED: 5525 case -NFS4ERR_BAD_STATEID: 5526 case -NFS4ERR_STALE_STATEID: 5527 case -NFS4ERR_OLD_STATEID: 5528 case -NFS4ERR_OPENMODE: 5529 case -NFS4ERR_EXPIRED: 5530 return true; 5531 } 5532 return false; 5533 } 5534 5535 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) 5536 { 5537 struct nfs_server *server = NFS_SERVER(hdr->inode); 5538 5539 trace_nfs4_read(hdr, task->tk_status); 5540 if (task->tk_status < 0) { 5541 struct nfs4_exception exception = { 5542 .inode = hdr->inode, 5543 .state = hdr->args.context->state, 5544 .stateid = &hdr->args.stateid, 5545 }; 5546 task->tk_status = nfs4_async_handle_exception(task, 5547 server, task->tk_status, &exception); 5548 if (exception.retry) { 5549 rpc_restart_call_prepare(task); 5550 return -EAGAIN; 5551 } 5552 } 5553 5554 if (task->tk_status > 0) 5555 renew_lease(server, hdr->timestamp); 5556 return 0; 5557 } 5558 5559 static bool nfs4_read_stateid_changed(struct rpc_task *task, 5560 struct nfs_pgio_args *args) 5561 { 5562 5563 if (!nfs4_error_stateid_expired(task->tk_status) || 5564 nfs4_stateid_is_current(&args->stateid, 5565 args->context, 5566 args->lock_context, 5567 FMODE_READ)) 5568 return false; 5569 rpc_restart_call_prepare(task); 5570 return true; 5571 } 5572 5573 static bool nfs4_read_plus_not_supported(struct rpc_task *task, 5574 struct nfs_pgio_header *hdr) 5575 { 5576 struct nfs_server *server = NFS_SERVER(hdr->inode); 5577 struct rpc_message *msg = &task->tk_msg; 5578 5579 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] && 5580 task->tk_status == -ENOTSUPP) { 5581 server->caps &= ~NFS_CAP_READ_PLUS; 5582 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 5583 rpc_restart_call_prepare(task); 5584 return true; 5585 } 5586 return false; 5587 } 5588 5589 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 5590 { 5591 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 5592 return -EAGAIN; 5593 if (nfs4_read_stateid_changed(task, &hdr->args)) 5594 return -EAGAIN; 5595 if (nfs4_read_plus_not_supported(task, hdr)) 5596 return -EAGAIN; 5597 if (task->tk_status > 0) 5598 nfs_invalidate_atime(hdr->inode); 5599 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 5600 nfs4_read_done_cb(task, hdr); 5601 } 5602 5603 #if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS 5604 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr, 5605 struct rpc_message *msg) 5606 { 5607 /* Note: We don't use READ_PLUS with pNFS yet */ 5608 if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp) { 5609 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS]; 5610 return nfs_read_alloc_scratch(hdr, READ_PLUS_SCRATCH_SIZE); 5611 } 5612 return false; 5613 } 5614 #else 5615 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr, 5616 struct rpc_message *msg) 5617 { 5618 return false; 5619 } 5620 #endif /* CONFIG_NFS_V4_2 */ 5621 5622 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr, 5623 struct rpc_message *msg) 5624 { 5625 hdr->timestamp = jiffies; 5626 if (!hdr->pgio_done_cb) 5627 hdr->pgio_done_cb = nfs4_read_done_cb; 5628 if (!nfs42_read_plus_support(hdr, msg)) 5629 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 5630 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0); 5631 } 5632 5633 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, 5634 struct nfs_pgio_header *hdr) 5635 { 5636 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client, 5637 &hdr->args.seq_args, 5638 &hdr->res.seq_res, 5639 task)) 5640 return 0; 5641 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, 5642 hdr->args.lock_context, 5643 hdr->rw_mode) == -EIO) 5644 return -EIO; 5645 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) 5646 return -EIO; 5647 return 0; 5648 } 5649 5650 static int nfs4_write_done_cb(struct rpc_task *task, 5651 struct nfs_pgio_header *hdr) 5652 { 5653 struct inode *inode = hdr->inode; 5654 5655 trace_nfs4_write(hdr, task->tk_status); 5656 if (task->tk_status < 0) { 5657 struct nfs4_exception exception = { 5658 .inode = hdr->inode, 5659 .state = hdr->args.context->state, 5660 .stateid = &hdr->args.stateid, 5661 }; 5662 task->tk_status = nfs4_async_handle_exception(task, 5663 NFS_SERVER(inode), task->tk_status, 5664 &exception); 5665 if (exception.retry) { 5666 rpc_restart_call_prepare(task); 5667 return -EAGAIN; 5668 } 5669 } 5670 if (task->tk_status >= 0) { 5671 renew_lease(NFS_SERVER(inode), hdr->timestamp); 5672 nfs_writeback_update_inode(hdr); 5673 } 5674 return 0; 5675 } 5676 5677 static bool nfs4_write_stateid_changed(struct rpc_task *task, 5678 struct nfs_pgio_args *args) 5679 { 5680 5681 if (!nfs4_error_stateid_expired(task->tk_status) || 5682 nfs4_stateid_is_current(&args->stateid, 5683 args->context, 5684 args->lock_context, 5685 FMODE_WRITE)) 5686 return false; 5687 rpc_restart_call_prepare(task); 5688 return true; 5689 } 5690 5691 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 5692 { 5693 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 5694 return -EAGAIN; 5695 if (nfs4_write_stateid_changed(task, &hdr->args)) 5696 return -EAGAIN; 5697 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 5698 nfs4_write_done_cb(task, hdr); 5699 } 5700 5701 static 5702 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr) 5703 { 5704 /* Don't request attributes for pNFS or O_DIRECT writes */ 5705 if (hdr->ds_clp != NULL || hdr->dreq != NULL) 5706 return false; 5707 /* Otherwise, request attributes if and only if we don't hold 5708 * a delegation 5709 */ 5710 return nfs4_have_delegation(hdr->inode, FMODE_READ, 0) == 0; 5711 } 5712 5713 void nfs4_bitmask_set(__u32 bitmask[], const __u32 src[], 5714 struct inode *inode, unsigned long cache_validity) 5715 { 5716 struct nfs_server *server = NFS_SERVER(inode); 5717 unsigned int i; 5718 5719 memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ); 5720 cache_validity |= READ_ONCE(NFS_I(inode)->cache_validity); 5721 5722 if (cache_validity & NFS_INO_INVALID_CHANGE) 5723 bitmask[0] |= FATTR4_WORD0_CHANGE; 5724 if (cache_validity & NFS_INO_INVALID_ATIME) 5725 bitmask[1] |= FATTR4_WORD1_TIME_ACCESS; 5726 if (cache_validity & NFS_INO_INVALID_MODE) 5727 bitmask[1] |= FATTR4_WORD1_MODE; 5728 if (cache_validity & NFS_INO_INVALID_OTHER) 5729 bitmask[1] |= FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP; 5730 if (cache_validity & NFS_INO_INVALID_NLINK) 5731 bitmask[1] |= FATTR4_WORD1_NUMLINKS; 5732 if (cache_validity & NFS_INO_INVALID_CTIME) 5733 bitmask[1] |= FATTR4_WORD1_TIME_METADATA; 5734 if (cache_validity & NFS_INO_INVALID_MTIME) 5735 bitmask[1] |= FATTR4_WORD1_TIME_MODIFY; 5736 if (cache_validity & NFS_INO_INVALID_BLOCKS) 5737 bitmask[1] |= FATTR4_WORD1_SPACE_USED; 5738 5739 if (cache_validity & NFS_INO_INVALID_SIZE) 5740 bitmask[0] |= FATTR4_WORD0_SIZE; 5741 5742 for (i = 0; i < NFS4_BITMASK_SZ; i++) 5743 bitmask[i] &= server->attr_bitmask[i]; 5744 } 5745 5746 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, 5747 struct rpc_message *msg, 5748 struct rpc_clnt **clnt) 5749 { 5750 struct nfs_server *server = NFS_SERVER(hdr->inode); 5751 5752 if (!nfs4_write_need_cache_consistency_data(hdr)) { 5753 hdr->args.bitmask = NULL; 5754 hdr->res.fattr = NULL; 5755 } else { 5756 nfs4_bitmask_set(hdr->args.bitmask_store, 5757 server->cache_consistency_bitmask, 5758 hdr->inode, NFS_INO_INVALID_BLOCKS); 5759 hdr->args.bitmask = hdr->args.bitmask_store; 5760 } 5761 5762 if (!hdr->pgio_done_cb) 5763 hdr->pgio_done_cb = nfs4_write_done_cb; 5764 hdr->res.server = server; 5765 hdr->timestamp = jiffies; 5766 5767 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 5768 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0); 5769 nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr); 5770 } 5771 5772 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 5773 { 5774 nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client, 5775 &data->args.seq_args, 5776 &data->res.seq_res, 5777 task); 5778 } 5779 5780 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 5781 { 5782 struct inode *inode = data->inode; 5783 5784 trace_nfs4_commit(data, task->tk_status); 5785 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 5786 NULL, NULL) == -EAGAIN) { 5787 rpc_restart_call_prepare(task); 5788 return -EAGAIN; 5789 } 5790 return 0; 5791 } 5792 5793 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 5794 { 5795 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5796 return -EAGAIN; 5797 return data->commit_done_cb(task, data); 5798 } 5799 5800 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg, 5801 struct rpc_clnt **clnt) 5802 { 5803 struct nfs_server *server = NFS_SERVER(data->inode); 5804 5805 if (data->commit_done_cb == NULL) 5806 data->commit_done_cb = nfs4_commit_done_cb; 5807 data->res.server = server; 5808 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 5809 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0); 5810 nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client, 5811 NFS_SP4_MACH_CRED_COMMIT, clnt, msg); 5812 } 5813 5814 static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args, 5815 struct nfs_commitres *res) 5816 { 5817 struct inode *dst_inode = file_inode(dst); 5818 struct nfs_server *server = NFS_SERVER(dst_inode); 5819 struct rpc_message msg = { 5820 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT], 5821 .rpc_argp = args, 5822 .rpc_resp = res, 5823 }; 5824 5825 args->fh = NFS_FH(dst_inode); 5826 return nfs4_call_sync(server->client, server, &msg, 5827 &args->seq_args, &res->seq_res, 1); 5828 } 5829 5830 int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res) 5831 { 5832 struct nfs_commitargs args = { 5833 .offset = offset, 5834 .count = count, 5835 }; 5836 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); 5837 struct nfs4_exception exception = { }; 5838 int status; 5839 5840 do { 5841 status = _nfs4_proc_commit(dst, &args, res); 5842 status = nfs4_handle_exception(dst_server, status, &exception); 5843 } while (exception.retry); 5844 5845 return status; 5846 } 5847 5848 struct nfs4_renewdata { 5849 struct nfs_client *client; 5850 unsigned long timestamp; 5851 }; 5852 5853 /* 5854 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 5855 * standalone procedure for queueing an asynchronous RENEW. 5856 */ 5857 static void nfs4_renew_release(void *calldata) 5858 { 5859 struct nfs4_renewdata *data = calldata; 5860 struct nfs_client *clp = data->client; 5861 5862 if (refcount_read(&clp->cl_count) > 1) 5863 nfs4_schedule_state_renewal(clp); 5864 nfs_put_client(clp); 5865 kfree(data); 5866 } 5867 5868 static void nfs4_renew_done(struct rpc_task *task, void *calldata) 5869 { 5870 struct nfs4_renewdata *data = calldata; 5871 struct nfs_client *clp = data->client; 5872 unsigned long timestamp = data->timestamp; 5873 5874 trace_nfs4_renew_async(clp, task->tk_status); 5875 switch (task->tk_status) { 5876 case 0: 5877 break; 5878 case -NFS4ERR_LEASE_MOVED: 5879 nfs4_schedule_lease_moved_recovery(clp); 5880 break; 5881 default: 5882 /* Unless we're shutting down, schedule state recovery! */ 5883 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 5884 return; 5885 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 5886 nfs4_schedule_lease_recovery(clp); 5887 return; 5888 } 5889 nfs4_schedule_path_down_recovery(clp); 5890 } 5891 do_renew_lease(clp, timestamp); 5892 } 5893 5894 static const struct rpc_call_ops nfs4_renew_ops = { 5895 .rpc_call_done = nfs4_renew_done, 5896 .rpc_release = nfs4_renew_release, 5897 }; 5898 5899 static int nfs4_proc_async_renew(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags) 5900 { 5901 struct rpc_message msg = { 5902 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 5903 .rpc_argp = clp, 5904 .rpc_cred = cred, 5905 }; 5906 struct nfs4_renewdata *data; 5907 5908 if (renew_flags == 0) 5909 return 0; 5910 if (!refcount_inc_not_zero(&clp->cl_count)) 5911 return -EIO; 5912 data = kmalloc(sizeof(*data), GFP_NOFS); 5913 if (data == NULL) { 5914 nfs_put_client(clp); 5915 return -ENOMEM; 5916 } 5917 data->client = clp; 5918 data->timestamp = jiffies; 5919 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT, 5920 &nfs4_renew_ops, data); 5921 } 5922 5923 static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred) 5924 { 5925 struct rpc_message msg = { 5926 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 5927 .rpc_argp = clp, 5928 .rpc_cred = cred, 5929 }; 5930 unsigned long now = jiffies; 5931 int status; 5932 5933 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5934 if (status < 0) 5935 return status; 5936 do_renew_lease(clp, now); 5937 return 0; 5938 } 5939 5940 static bool nfs4_server_supports_acls(const struct nfs_server *server, 5941 enum nfs4_acl_type type) 5942 { 5943 switch (type) { 5944 default: 5945 return server->attr_bitmask[0] & FATTR4_WORD0_ACL; 5946 case NFS4ACL_DACL: 5947 return server->attr_bitmask[1] & FATTR4_WORD1_DACL; 5948 case NFS4ACL_SACL: 5949 return server->attr_bitmask[1] & FATTR4_WORD1_SACL; 5950 } 5951 } 5952 5953 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that 5954 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on 5955 * the stack. 5956 */ 5957 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 5958 5959 int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen, 5960 struct page **pages) 5961 { 5962 struct page *newpage, **spages; 5963 int rc = 0; 5964 size_t len; 5965 spages = pages; 5966 5967 do { 5968 len = min_t(size_t, PAGE_SIZE, buflen); 5969 newpage = alloc_page(GFP_KERNEL); 5970 5971 if (newpage == NULL) 5972 goto unwind; 5973 memcpy(page_address(newpage), buf, len); 5974 buf += len; 5975 buflen -= len; 5976 *pages++ = newpage; 5977 rc++; 5978 } while (buflen != 0); 5979 5980 return rc; 5981 5982 unwind: 5983 for(; rc > 0; rc--) 5984 __free_page(spages[rc-1]); 5985 return -ENOMEM; 5986 } 5987 5988 struct nfs4_cached_acl { 5989 enum nfs4_acl_type type; 5990 int cached; 5991 size_t len; 5992 char data[]; 5993 }; 5994 5995 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 5996 { 5997 struct nfs_inode *nfsi = NFS_I(inode); 5998 5999 spin_lock(&inode->i_lock); 6000 kfree(nfsi->nfs4_acl); 6001 nfsi->nfs4_acl = acl; 6002 spin_unlock(&inode->i_lock); 6003 } 6004 6005 static void nfs4_zap_acl_attr(struct inode *inode) 6006 { 6007 nfs4_set_cached_acl(inode, NULL); 6008 } 6009 6010 static ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, 6011 size_t buflen, enum nfs4_acl_type type) 6012 { 6013 struct nfs_inode *nfsi = NFS_I(inode); 6014 struct nfs4_cached_acl *acl; 6015 int ret = -ENOENT; 6016 6017 spin_lock(&inode->i_lock); 6018 acl = nfsi->nfs4_acl; 6019 if (acl == NULL) 6020 goto out; 6021 if (acl->type != type) 6022 goto out; 6023 if (buf == NULL) /* user is just asking for length */ 6024 goto out_len; 6025 if (acl->cached == 0) 6026 goto out; 6027 ret = -ERANGE; /* see getxattr(2) man page */ 6028 if (acl->len > buflen) 6029 goto out; 6030 memcpy(buf, acl->data, acl->len); 6031 out_len: 6032 ret = acl->len; 6033 out: 6034 spin_unlock(&inode->i_lock); 6035 return ret; 6036 } 6037 6038 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, 6039 size_t pgbase, size_t acl_len, 6040 enum nfs4_acl_type type) 6041 { 6042 struct nfs4_cached_acl *acl; 6043 size_t buflen = sizeof(*acl) + acl_len; 6044 6045 if (buflen <= PAGE_SIZE) { 6046 acl = kmalloc(buflen, GFP_KERNEL); 6047 if (acl == NULL) 6048 goto out; 6049 acl->cached = 1; 6050 _copy_from_pages(acl->data, pages, pgbase, acl_len); 6051 } else { 6052 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 6053 if (acl == NULL) 6054 goto out; 6055 acl->cached = 0; 6056 } 6057 acl->type = type; 6058 acl->len = acl_len; 6059 out: 6060 nfs4_set_cached_acl(inode, acl); 6061 } 6062 6063 /* 6064 * The getxattr API returns the required buffer length when called with a 6065 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 6066 * the required buf. On a NULL buf, we send a page of data to the server 6067 * guessing that the ACL request can be serviced by a page. If so, we cache 6068 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 6069 * the cache. If not so, we throw away the page, and cache the required 6070 * length. The next getxattr call will then produce another round trip to 6071 * the server, this time with the input buf of the required size. 6072 */ 6073 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, 6074 size_t buflen, enum nfs4_acl_type type) 6075 { 6076 struct page **pages; 6077 struct nfs_getaclargs args = { 6078 .fh = NFS_FH(inode), 6079 .acl_type = type, 6080 .acl_len = buflen, 6081 }; 6082 struct nfs_getaclres res = { 6083 .acl_type = type, 6084 .acl_len = buflen, 6085 }; 6086 struct rpc_message msg = { 6087 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 6088 .rpc_argp = &args, 6089 .rpc_resp = &res, 6090 }; 6091 unsigned int npages; 6092 int ret = -ENOMEM, i; 6093 struct nfs_server *server = NFS_SERVER(inode); 6094 6095 if (buflen == 0) 6096 buflen = server->rsize; 6097 6098 npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1; 6099 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 6100 if (!pages) 6101 return -ENOMEM; 6102 6103 args.acl_pages = pages; 6104 6105 for (i = 0; i < npages; i++) { 6106 pages[i] = alloc_page(GFP_KERNEL); 6107 if (!pages[i]) 6108 goto out_free; 6109 } 6110 6111 /* for decoding across pages */ 6112 res.acl_scratch = alloc_page(GFP_KERNEL); 6113 if (!res.acl_scratch) 6114 goto out_free; 6115 6116 args.acl_len = npages * PAGE_SIZE; 6117 6118 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 6119 __func__, buf, buflen, npages, args.acl_len); 6120 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 6121 &msg, &args.seq_args, &res.seq_res, 0); 6122 if (ret) 6123 goto out_free; 6124 6125 /* Handle the case where the passed-in buffer is too short */ 6126 if (res.acl_flags & NFS4_ACL_TRUNC) { 6127 /* Did the user only issue a request for the acl length? */ 6128 if (buf == NULL) 6129 goto out_ok; 6130 ret = -ERANGE; 6131 goto out_free; 6132 } 6133 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len, 6134 type); 6135 if (buf) { 6136 if (res.acl_len > buflen) { 6137 ret = -ERANGE; 6138 goto out_free; 6139 } 6140 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); 6141 } 6142 out_ok: 6143 ret = res.acl_len; 6144 out_free: 6145 while (--i >= 0) 6146 __free_page(pages[i]); 6147 if (res.acl_scratch) 6148 __free_page(res.acl_scratch); 6149 kfree(pages); 6150 return ret; 6151 } 6152 6153 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, 6154 size_t buflen, enum nfs4_acl_type type) 6155 { 6156 struct nfs4_exception exception = { 6157 .interruptible = true, 6158 }; 6159 ssize_t ret; 6160 do { 6161 ret = __nfs4_get_acl_uncached(inode, buf, buflen, type); 6162 trace_nfs4_get_acl(inode, ret); 6163 if (ret >= 0) 6164 break; 6165 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 6166 } while (exception.retry); 6167 return ret; 6168 } 6169 6170 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen, 6171 enum nfs4_acl_type type) 6172 { 6173 struct nfs_server *server = NFS_SERVER(inode); 6174 int ret; 6175 6176 if (!nfs4_server_supports_acls(server, type)) 6177 return -EOPNOTSUPP; 6178 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 6179 if (ret < 0) 6180 return ret; 6181 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 6182 nfs_zap_acl_cache(inode); 6183 ret = nfs4_read_cached_acl(inode, buf, buflen, type); 6184 if (ret != -ENOENT) 6185 /* -ENOENT is returned if there is no ACL or if there is an ACL 6186 * but no cached acl data, just the acl length */ 6187 return ret; 6188 return nfs4_get_acl_uncached(inode, buf, buflen, type); 6189 } 6190 6191 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, 6192 size_t buflen, enum nfs4_acl_type type) 6193 { 6194 struct nfs_server *server = NFS_SERVER(inode); 6195 struct page *pages[NFS4ACL_MAXPAGES]; 6196 struct nfs_setaclargs arg = { 6197 .fh = NFS_FH(inode), 6198 .acl_type = type, 6199 .acl_len = buflen, 6200 .acl_pages = pages, 6201 }; 6202 struct nfs_setaclres res; 6203 struct rpc_message msg = { 6204 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 6205 .rpc_argp = &arg, 6206 .rpc_resp = &res, 6207 }; 6208 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 6209 int ret, i; 6210 6211 /* You can't remove system.nfs4_acl: */ 6212 if (buflen == 0) 6213 return -EINVAL; 6214 if (!nfs4_server_supports_acls(server, type)) 6215 return -EOPNOTSUPP; 6216 if (npages > ARRAY_SIZE(pages)) 6217 return -ERANGE; 6218 i = nfs4_buf_to_pages_noslab(buf, buflen, arg.acl_pages); 6219 if (i < 0) 6220 return i; 6221 nfs4_inode_make_writeable(inode); 6222 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6223 6224 /* 6225 * Free each page after tx, so the only ref left is 6226 * held by the network stack 6227 */ 6228 for (; i > 0; i--) 6229 put_page(pages[i-1]); 6230 6231 /* 6232 * Acl update can result in inode attribute update. 6233 * so mark the attribute cache invalid. 6234 */ 6235 spin_lock(&inode->i_lock); 6236 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 6237 NFS_INO_INVALID_CTIME | 6238 NFS_INO_REVAL_FORCED); 6239 spin_unlock(&inode->i_lock); 6240 nfs_access_zap_cache(inode); 6241 nfs_zap_acl_cache(inode); 6242 return ret; 6243 } 6244 6245 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, 6246 size_t buflen, enum nfs4_acl_type type) 6247 { 6248 struct nfs4_exception exception = { }; 6249 int err; 6250 do { 6251 err = __nfs4_proc_set_acl(inode, buf, buflen, type); 6252 trace_nfs4_set_acl(inode, err); 6253 if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) { 6254 /* 6255 * no need to retry since the kernel 6256 * isn't involved in encoding the ACEs. 6257 */ 6258 err = -EINVAL; 6259 break; 6260 } 6261 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6262 &exception); 6263 } while (exception.retry); 6264 return err; 6265 } 6266 6267 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 6268 static int _nfs4_get_security_label(struct inode *inode, void *buf, 6269 size_t buflen) 6270 { 6271 struct nfs_server *server = NFS_SERVER(inode); 6272 struct nfs4_label label = {0, 0, buflen, buf}; 6273 6274 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 6275 struct nfs_fattr fattr = { 6276 .label = &label, 6277 }; 6278 struct nfs4_getattr_arg arg = { 6279 .fh = NFS_FH(inode), 6280 .bitmask = bitmask, 6281 }; 6282 struct nfs4_getattr_res res = { 6283 .fattr = &fattr, 6284 .server = server, 6285 }; 6286 struct rpc_message msg = { 6287 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 6288 .rpc_argp = &arg, 6289 .rpc_resp = &res, 6290 }; 6291 int ret; 6292 6293 nfs_fattr_init(&fattr); 6294 6295 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0); 6296 if (ret) 6297 return ret; 6298 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) 6299 return -ENOENT; 6300 return label.len; 6301 } 6302 6303 static int nfs4_get_security_label(struct inode *inode, void *buf, 6304 size_t buflen) 6305 { 6306 struct nfs4_exception exception = { 6307 .interruptible = true, 6308 }; 6309 int err; 6310 6311 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 6312 return -EOPNOTSUPP; 6313 6314 do { 6315 err = _nfs4_get_security_label(inode, buf, buflen); 6316 trace_nfs4_get_security_label(inode, err); 6317 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6318 &exception); 6319 } while (exception.retry); 6320 return err; 6321 } 6322 6323 static int _nfs4_do_set_security_label(struct inode *inode, 6324 struct nfs4_label *ilabel, 6325 struct nfs_fattr *fattr) 6326 { 6327 6328 struct iattr sattr = {0}; 6329 struct nfs_server *server = NFS_SERVER(inode); 6330 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 6331 struct nfs_setattrargs arg = { 6332 .fh = NFS_FH(inode), 6333 .iap = &sattr, 6334 .server = server, 6335 .bitmask = bitmask, 6336 .label = ilabel, 6337 }; 6338 struct nfs_setattrres res = { 6339 .fattr = fattr, 6340 .server = server, 6341 }; 6342 struct rpc_message msg = { 6343 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 6344 .rpc_argp = &arg, 6345 .rpc_resp = &res, 6346 }; 6347 int status; 6348 6349 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 6350 6351 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6352 if (status) 6353 dprintk("%s failed: %d\n", __func__, status); 6354 6355 return status; 6356 } 6357 6358 static int nfs4_do_set_security_label(struct inode *inode, 6359 struct nfs4_label *ilabel, 6360 struct nfs_fattr *fattr) 6361 { 6362 struct nfs4_exception exception = { }; 6363 int err; 6364 6365 do { 6366 err = _nfs4_do_set_security_label(inode, ilabel, fattr); 6367 trace_nfs4_set_security_label(inode, err); 6368 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6369 &exception); 6370 } while (exception.retry); 6371 return err; 6372 } 6373 6374 static int 6375 nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen) 6376 { 6377 struct nfs4_label ilabel = {0, 0, buflen, (char *)buf }; 6378 struct nfs_fattr *fattr; 6379 int status; 6380 6381 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 6382 return -EOPNOTSUPP; 6383 6384 fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode)); 6385 if (fattr == NULL) 6386 return -ENOMEM; 6387 6388 status = nfs4_do_set_security_label(inode, &ilabel, fattr); 6389 if (status == 0) 6390 nfs_setsecurity(inode, fattr); 6391 6392 nfs_free_fattr(fattr); 6393 return status; 6394 } 6395 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 6396 6397 6398 static void nfs4_init_boot_verifier(const struct nfs_client *clp, 6399 nfs4_verifier *bootverf) 6400 { 6401 __be32 verf[2]; 6402 6403 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 6404 /* An impossible timestamp guarantees this value 6405 * will never match a generated boot time. */ 6406 verf[0] = cpu_to_be32(U32_MAX); 6407 verf[1] = cpu_to_be32(U32_MAX); 6408 } else { 6409 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 6410 u64 ns = ktime_to_ns(nn->boot_time); 6411 6412 verf[0] = cpu_to_be32(ns >> 32); 6413 verf[1] = cpu_to_be32(ns); 6414 } 6415 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 6416 } 6417 6418 static size_t 6419 nfs4_get_uniquifier(struct nfs_client *clp, char *buf, size_t buflen) 6420 { 6421 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 6422 struct nfs_netns_client *nn_clp = nn->nfs_client; 6423 const char *id; 6424 6425 buf[0] = '\0'; 6426 6427 if (nn_clp) { 6428 rcu_read_lock(); 6429 id = rcu_dereference(nn_clp->identifier); 6430 if (id) 6431 strscpy(buf, id, buflen); 6432 rcu_read_unlock(); 6433 } 6434 6435 if (nfs4_client_id_uniquifier[0] != '\0' && buf[0] == '\0') 6436 strscpy(buf, nfs4_client_id_uniquifier, buflen); 6437 6438 return strlen(buf); 6439 } 6440 6441 static int 6442 nfs4_init_nonuniform_client_string(struct nfs_client *clp) 6443 { 6444 char buf[NFS4_CLIENT_ID_UNIQ_LEN]; 6445 size_t buflen; 6446 size_t len; 6447 char *str; 6448 6449 if (clp->cl_owner_id != NULL) 6450 return 0; 6451 6452 rcu_read_lock(); 6453 len = 14 + 6454 strlen(clp->cl_rpcclient->cl_nodename) + 6455 1 + 6456 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) + 6457 1; 6458 rcu_read_unlock(); 6459 6460 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf)); 6461 if (buflen) 6462 len += buflen + 1; 6463 6464 if (len > NFS4_OPAQUE_LIMIT + 1) 6465 return -EINVAL; 6466 6467 /* 6468 * Since this string is allocated at mount time, and held until the 6469 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 6470 * about a memory-reclaim deadlock. 6471 */ 6472 str = kmalloc(len, GFP_KERNEL); 6473 if (!str) 6474 return -ENOMEM; 6475 6476 rcu_read_lock(); 6477 if (buflen) 6478 scnprintf(str, len, "Linux NFSv4.0 %s/%s/%s", 6479 clp->cl_rpcclient->cl_nodename, buf, 6480 rpc_peeraddr2str(clp->cl_rpcclient, 6481 RPC_DISPLAY_ADDR)); 6482 else 6483 scnprintf(str, len, "Linux NFSv4.0 %s/%s", 6484 clp->cl_rpcclient->cl_nodename, 6485 rpc_peeraddr2str(clp->cl_rpcclient, 6486 RPC_DISPLAY_ADDR)); 6487 rcu_read_unlock(); 6488 6489 clp->cl_owner_id = str; 6490 return 0; 6491 } 6492 6493 static int 6494 nfs4_init_uniform_client_string(struct nfs_client *clp) 6495 { 6496 char buf[NFS4_CLIENT_ID_UNIQ_LEN]; 6497 size_t buflen; 6498 size_t len; 6499 char *str; 6500 6501 if (clp->cl_owner_id != NULL) 6502 return 0; 6503 6504 len = 10 + 10 + 1 + 10 + 1 + 6505 strlen(clp->cl_rpcclient->cl_nodename) + 1; 6506 6507 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf)); 6508 if (buflen) 6509 len += buflen + 1; 6510 6511 if (len > NFS4_OPAQUE_LIMIT + 1) 6512 return -EINVAL; 6513 6514 /* 6515 * Since this string is allocated at mount time, and held until the 6516 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 6517 * about a memory-reclaim deadlock. 6518 */ 6519 str = kmalloc(len, GFP_KERNEL); 6520 if (!str) 6521 return -ENOMEM; 6522 6523 if (buflen) 6524 scnprintf(str, len, "Linux NFSv%u.%u %s/%s", 6525 clp->rpc_ops->version, clp->cl_minorversion, 6526 buf, clp->cl_rpcclient->cl_nodename); 6527 else 6528 scnprintf(str, len, "Linux NFSv%u.%u %s", 6529 clp->rpc_ops->version, clp->cl_minorversion, 6530 clp->cl_rpcclient->cl_nodename); 6531 clp->cl_owner_id = str; 6532 return 0; 6533 } 6534 6535 /* 6536 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback 6537 * services. Advertise one based on the address family of the 6538 * clientaddr. 6539 */ 6540 static unsigned int 6541 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len) 6542 { 6543 if (strchr(clp->cl_ipaddr, ':') != NULL) 6544 return scnprintf(buf, len, "tcp6"); 6545 else 6546 return scnprintf(buf, len, "tcp"); 6547 } 6548 6549 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata) 6550 { 6551 struct nfs4_setclientid *sc = calldata; 6552 6553 if (task->tk_status == 0) 6554 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred); 6555 } 6556 6557 static const struct rpc_call_ops nfs4_setclientid_ops = { 6558 .rpc_call_done = nfs4_setclientid_done, 6559 }; 6560 6561 /** 6562 * nfs4_proc_setclientid - Negotiate client ID 6563 * @clp: state data structure 6564 * @program: RPC program for NFSv4 callback service 6565 * @port: IP port number for NFS4 callback service 6566 * @cred: credential to use for this call 6567 * @res: where to place the result 6568 * 6569 * Returns zero, a negative errno, or a negative NFS4ERR status code. 6570 */ 6571 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 6572 unsigned short port, const struct cred *cred, 6573 struct nfs4_setclientid_res *res) 6574 { 6575 nfs4_verifier sc_verifier; 6576 struct nfs4_setclientid setclientid = { 6577 .sc_verifier = &sc_verifier, 6578 .sc_prog = program, 6579 .sc_clnt = clp, 6580 }; 6581 struct rpc_message msg = { 6582 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 6583 .rpc_argp = &setclientid, 6584 .rpc_resp = res, 6585 .rpc_cred = cred, 6586 }; 6587 struct rpc_task_setup task_setup_data = { 6588 .rpc_client = clp->cl_rpcclient, 6589 .rpc_message = &msg, 6590 .callback_ops = &nfs4_setclientid_ops, 6591 .callback_data = &setclientid, 6592 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN, 6593 }; 6594 unsigned long now = jiffies; 6595 int status; 6596 6597 /* nfs_client_id4 */ 6598 nfs4_init_boot_verifier(clp, &sc_verifier); 6599 6600 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags)) 6601 status = nfs4_init_uniform_client_string(clp); 6602 else 6603 status = nfs4_init_nonuniform_client_string(clp); 6604 6605 if (status) 6606 goto out; 6607 6608 /* cb_client4 */ 6609 setclientid.sc_netid_len = 6610 nfs4_init_callback_netid(clp, 6611 setclientid.sc_netid, 6612 sizeof(setclientid.sc_netid)); 6613 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 6614 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 6615 clp->cl_ipaddr, port >> 8, port & 255); 6616 6617 dprintk("NFS call setclientid auth=%s, '%s'\n", 6618 clp->cl_rpcclient->cl_auth->au_ops->au_name, 6619 clp->cl_owner_id); 6620 6621 status = nfs4_call_sync_custom(&task_setup_data); 6622 if (setclientid.sc_cred) { 6623 kfree(clp->cl_acceptor); 6624 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred); 6625 put_rpccred(setclientid.sc_cred); 6626 } 6627 6628 if (status == 0) 6629 do_renew_lease(clp, now); 6630 out: 6631 trace_nfs4_setclientid(clp, status); 6632 dprintk("NFS reply setclientid: %d\n", status); 6633 return status; 6634 } 6635 6636 /** 6637 * nfs4_proc_setclientid_confirm - Confirm client ID 6638 * @clp: state data structure 6639 * @arg: result of a previous SETCLIENTID 6640 * @cred: credential to use for this call 6641 * 6642 * Returns zero, a negative errno, or a negative NFS4ERR status code. 6643 */ 6644 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 6645 struct nfs4_setclientid_res *arg, 6646 const struct cred *cred) 6647 { 6648 struct rpc_message msg = { 6649 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 6650 .rpc_argp = arg, 6651 .rpc_cred = cred, 6652 }; 6653 int status; 6654 6655 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 6656 clp->cl_rpcclient->cl_auth->au_ops->au_name, 6657 clp->cl_clientid); 6658 status = rpc_call_sync(clp->cl_rpcclient, &msg, 6659 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 6660 trace_nfs4_setclientid_confirm(clp, status); 6661 dprintk("NFS reply setclientid_confirm: %d\n", status); 6662 return status; 6663 } 6664 6665 struct nfs4_delegreturndata { 6666 struct nfs4_delegreturnargs args; 6667 struct nfs4_delegreturnres res; 6668 struct nfs_fh fh; 6669 nfs4_stateid stateid; 6670 unsigned long timestamp; 6671 struct { 6672 struct nfs4_layoutreturn_args arg; 6673 struct nfs4_layoutreturn_res res; 6674 struct nfs4_xdr_opaque_data ld_private; 6675 u32 roc_barrier; 6676 bool roc; 6677 } lr; 6678 struct nfs4_delegattr sattr; 6679 struct nfs_fattr fattr; 6680 int rpc_status; 6681 struct inode *inode; 6682 }; 6683 6684 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 6685 { 6686 struct nfs4_delegreturndata *data = calldata; 6687 struct nfs4_exception exception = { 6688 .inode = data->inode, 6689 .stateid = &data->stateid, 6690 .task_is_privileged = data->args.seq_args.sa_privileged, 6691 }; 6692 6693 if (!nfs4_sequence_done(task, &data->res.seq_res)) 6694 return; 6695 6696 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); 6697 6698 /* Handle Layoutreturn errors */ 6699 if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res, 6700 &data->res.lr_ret) == -EAGAIN) 6701 goto out_restart; 6702 6703 if (data->args.sattr_args && task->tk_status != 0) { 6704 switch(data->res.sattr_ret) { 6705 case 0: 6706 data->args.sattr_args = NULL; 6707 data->res.sattr_res = false; 6708 break; 6709 case -NFS4ERR_ADMIN_REVOKED: 6710 case -NFS4ERR_DELEG_REVOKED: 6711 case -NFS4ERR_EXPIRED: 6712 case -NFS4ERR_BAD_STATEID: 6713 /* Let the main handler below do stateid recovery */ 6714 break; 6715 case -NFS4ERR_OLD_STATEID: 6716 if (nfs4_refresh_delegation_stateid(&data->stateid, 6717 data->inode)) 6718 goto out_restart; 6719 fallthrough; 6720 default: 6721 data->args.sattr_args = NULL; 6722 data->res.sattr_res = false; 6723 goto out_restart; 6724 } 6725 } 6726 6727 switch (task->tk_status) { 6728 case 0: 6729 renew_lease(data->res.server, data->timestamp); 6730 break; 6731 case -NFS4ERR_ADMIN_REVOKED: 6732 case -NFS4ERR_DELEG_REVOKED: 6733 case -NFS4ERR_EXPIRED: 6734 nfs4_free_revoked_stateid(data->res.server, 6735 data->args.stateid, 6736 task->tk_msg.rpc_cred); 6737 fallthrough; 6738 case -NFS4ERR_BAD_STATEID: 6739 case -NFS4ERR_STALE_STATEID: 6740 case -ETIMEDOUT: 6741 task->tk_status = 0; 6742 break; 6743 case -NFS4ERR_OLD_STATEID: 6744 if (!nfs4_refresh_delegation_stateid(&data->stateid, data->inode)) 6745 nfs4_stateid_seqid_inc(&data->stateid); 6746 if (data->args.bitmask) { 6747 data->args.bitmask = NULL; 6748 data->res.fattr = NULL; 6749 } 6750 goto out_restart; 6751 case -NFS4ERR_ACCESS: 6752 if (data->args.bitmask) { 6753 data->args.bitmask = NULL; 6754 data->res.fattr = NULL; 6755 goto out_restart; 6756 } 6757 fallthrough; 6758 default: 6759 task->tk_status = nfs4_async_handle_exception(task, 6760 data->res.server, task->tk_status, 6761 &exception); 6762 if (exception.retry) 6763 goto out_restart; 6764 } 6765 nfs_delegation_mark_returned(data->inode, data->args.stateid); 6766 data->rpc_status = task->tk_status; 6767 return; 6768 out_restart: 6769 task->tk_status = 0; 6770 rpc_restart_call_prepare(task); 6771 } 6772 6773 static void nfs4_delegreturn_release(void *calldata) 6774 { 6775 struct nfs4_delegreturndata *data = calldata; 6776 struct inode *inode = data->inode; 6777 6778 if (data->lr.roc) 6779 pnfs_roc_release(&data->lr.arg, &data->lr.res, 6780 data->res.lr_ret); 6781 if (inode) { 6782 nfs4_fattr_set_prechange(&data->fattr, 6783 inode_peek_iversion_raw(inode)); 6784 nfs_refresh_inode(inode, &data->fattr); 6785 nfs_iput_and_deactive(inode); 6786 } 6787 kfree(calldata); 6788 } 6789 6790 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 6791 { 6792 struct nfs4_delegreturndata *d_data; 6793 struct pnfs_layout_hdr *lo; 6794 6795 d_data = data; 6796 6797 if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) { 6798 nfs4_sequence_done(task, &d_data->res.seq_res); 6799 return; 6800 } 6801 6802 lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL; 6803 if (lo && !pnfs_layout_is_valid(lo)) { 6804 d_data->args.lr_args = NULL; 6805 d_data->res.lr_res = NULL; 6806 } 6807 6808 nfs4_setup_sequence(d_data->res.server->nfs_client, 6809 &d_data->args.seq_args, 6810 &d_data->res.seq_res, 6811 task); 6812 } 6813 6814 static const struct rpc_call_ops nfs4_delegreturn_ops = { 6815 .rpc_call_prepare = nfs4_delegreturn_prepare, 6816 .rpc_call_done = nfs4_delegreturn_done, 6817 .rpc_release = nfs4_delegreturn_release, 6818 }; 6819 6820 static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, 6821 const nfs4_stateid *stateid, 6822 struct nfs_delegation *delegation, 6823 int issync) 6824 { 6825 struct nfs4_delegreturndata *data; 6826 struct nfs_server *server = NFS_SERVER(inode); 6827 struct rpc_task *task; 6828 struct rpc_message msg = { 6829 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 6830 .rpc_cred = cred, 6831 }; 6832 struct rpc_task_setup task_setup_data = { 6833 .rpc_client = server->client, 6834 .rpc_message = &msg, 6835 .callback_ops = &nfs4_delegreturn_ops, 6836 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT, 6837 }; 6838 int status = 0; 6839 6840 if (nfs_server_capable(inode, NFS_CAP_MOVEABLE)) 6841 task_setup_data.flags |= RPC_TASK_MOVEABLE; 6842 6843 data = kzalloc(sizeof(*data), GFP_KERNEL); 6844 if (data == NULL) 6845 return -ENOMEM; 6846 6847 nfs4_state_protect(server->nfs_client, 6848 NFS_SP4_MACH_CRED_CLEANUP, 6849 &task_setup_data.rpc_client, &msg); 6850 6851 data->args.fhandle = &data->fh; 6852 data->args.stateid = &data->stateid; 6853 nfs4_bitmask_set(data->args.bitmask_store, 6854 server->cache_consistency_bitmask, inode, 0); 6855 data->args.bitmask = data->args.bitmask_store; 6856 nfs_copy_fh(&data->fh, NFS_FH(inode)); 6857 nfs4_stateid_copy(&data->stateid, stateid); 6858 data->res.fattr = &data->fattr; 6859 data->res.server = server; 6860 data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT; 6861 data->lr.arg.ld_private = &data->lr.ld_private; 6862 nfs_fattr_init(data->res.fattr); 6863 data->timestamp = jiffies; 6864 data->rpc_status = 0; 6865 data->inode = nfs_igrab_and_active(inode); 6866 if (data->inode || issync) { 6867 data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res, 6868 cred); 6869 if (data->lr.roc) { 6870 data->args.lr_args = &data->lr.arg; 6871 data->res.lr_res = &data->lr.res; 6872 } 6873 } 6874 6875 if (delegation && 6876 test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) { 6877 if (delegation->type & FMODE_READ) { 6878 data->sattr.atime = inode_get_atime(inode); 6879 data->sattr.atime_set = true; 6880 } 6881 if (delegation->type & FMODE_WRITE) { 6882 data->sattr.mtime = inode_get_mtime(inode); 6883 data->sattr.mtime_set = true; 6884 } 6885 data->args.sattr_args = &data->sattr; 6886 data->res.sattr_res = true; 6887 } 6888 6889 if (!data->inode) 6890 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 6891 1); 6892 else 6893 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 6894 0); 6895 6896 task_setup_data.callback_data = data; 6897 msg.rpc_argp = &data->args; 6898 msg.rpc_resp = &data->res; 6899 task = rpc_run_task(&task_setup_data); 6900 if (IS_ERR(task)) 6901 return PTR_ERR(task); 6902 if (!issync) 6903 goto out; 6904 status = rpc_wait_for_completion_task(task); 6905 if (status != 0) 6906 goto out; 6907 status = data->rpc_status; 6908 out: 6909 rpc_put_task(task); 6910 return status; 6911 } 6912 6913 int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, 6914 const nfs4_stateid *stateid, 6915 struct nfs_delegation *delegation, int issync) 6916 { 6917 struct nfs_server *server = NFS_SERVER(inode); 6918 struct nfs4_exception exception = { }; 6919 int err; 6920 do { 6921 err = _nfs4_proc_delegreturn(inode, cred, stateid, 6922 delegation, issync); 6923 trace_nfs4_delegreturn(inode, stateid, err); 6924 switch (err) { 6925 case -NFS4ERR_STALE_STATEID: 6926 case -NFS4ERR_EXPIRED: 6927 case 0: 6928 return 0; 6929 } 6930 err = nfs4_handle_exception(server, err, &exception); 6931 } while (exception.retry); 6932 return err; 6933 } 6934 6935 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6936 { 6937 struct inode *inode = state->inode; 6938 struct nfs_server *server = NFS_SERVER(inode); 6939 struct nfs_client *clp = server->nfs_client; 6940 struct nfs_lockt_args arg = { 6941 .fh = NFS_FH(inode), 6942 .fl = request, 6943 }; 6944 struct nfs_lockt_res res = { 6945 .denied = request, 6946 }; 6947 struct rpc_message msg = { 6948 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 6949 .rpc_argp = &arg, 6950 .rpc_resp = &res, 6951 .rpc_cred = state->owner->so_cred, 6952 }; 6953 struct nfs4_lock_state *lsp; 6954 int status; 6955 6956 arg.lock_owner.clientid = clp->cl_clientid; 6957 status = nfs4_set_lock_state(state, request); 6958 if (status != 0) 6959 goto out; 6960 lsp = request->fl_u.nfs4_fl.owner; 6961 arg.lock_owner.id = lsp->ls_seqid.owner_id; 6962 arg.lock_owner.s_dev = server->s_dev; 6963 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6964 switch (status) { 6965 case 0: 6966 request->c.flc_type = F_UNLCK; 6967 break; 6968 case -NFS4ERR_DENIED: 6969 status = 0; 6970 } 6971 request->fl_ops->fl_release_private(request); 6972 request->fl_ops = NULL; 6973 out: 6974 return status; 6975 } 6976 6977 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6978 { 6979 struct nfs4_exception exception = { 6980 .interruptible = true, 6981 }; 6982 int err; 6983 6984 do { 6985 err = _nfs4_proc_getlk(state, cmd, request); 6986 trace_nfs4_get_lock(request, state, cmd, err); 6987 err = nfs4_handle_exception(NFS_SERVER(state->inode), err, 6988 &exception); 6989 } while (exception.retry); 6990 return err; 6991 } 6992 6993 /* 6994 * Update the seqid of a lock stateid after receiving 6995 * NFS4ERR_OLD_STATEID 6996 */ 6997 static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst, 6998 struct nfs4_lock_state *lsp) 6999 { 7000 struct nfs4_state *state = lsp->ls_state; 7001 bool ret = false; 7002 7003 spin_lock(&state->state_lock); 7004 if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid)) 7005 goto out; 7006 if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst)) 7007 nfs4_stateid_seqid_inc(dst); 7008 else 7009 dst->seqid = lsp->ls_stateid.seqid; 7010 ret = true; 7011 out: 7012 spin_unlock(&state->state_lock); 7013 return ret; 7014 } 7015 7016 static bool nfs4_sync_lock_stateid(nfs4_stateid *dst, 7017 struct nfs4_lock_state *lsp) 7018 { 7019 struct nfs4_state *state = lsp->ls_state; 7020 bool ret; 7021 7022 spin_lock(&state->state_lock); 7023 ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid); 7024 nfs4_stateid_copy(dst, &lsp->ls_stateid); 7025 spin_unlock(&state->state_lock); 7026 return ret; 7027 } 7028 7029 struct nfs4_unlockdata { 7030 struct nfs_locku_args arg; 7031 struct nfs_locku_res res; 7032 struct nfs4_lock_state *lsp; 7033 struct nfs_open_context *ctx; 7034 struct nfs_lock_context *l_ctx; 7035 struct file_lock fl; 7036 struct nfs_server *server; 7037 unsigned long timestamp; 7038 }; 7039 7040 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 7041 struct nfs_open_context *ctx, 7042 struct nfs4_lock_state *lsp, 7043 struct nfs_seqid *seqid) 7044 { 7045 struct nfs4_unlockdata *p; 7046 struct nfs4_state *state = lsp->ls_state; 7047 struct inode *inode = state->inode; 7048 7049 p = kzalloc(sizeof(*p), GFP_KERNEL); 7050 if (p == NULL) 7051 return NULL; 7052 p->arg.fh = NFS_FH(inode); 7053 p->arg.fl = &p->fl; 7054 p->arg.seqid = seqid; 7055 p->res.seqid = seqid; 7056 p->lsp = lsp; 7057 /* Ensure we don't close file until we're done freeing locks! */ 7058 p->ctx = get_nfs_open_context(ctx); 7059 p->l_ctx = nfs_get_lock_context(ctx); 7060 locks_init_lock(&p->fl); 7061 locks_copy_lock(&p->fl, fl); 7062 p->server = NFS_SERVER(inode); 7063 spin_lock(&state->state_lock); 7064 nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid); 7065 spin_unlock(&state->state_lock); 7066 return p; 7067 } 7068 7069 static void nfs4_locku_release_calldata(void *data) 7070 { 7071 struct nfs4_unlockdata *calldata = data; 7072 nfs_free_seqid(calldata->arg.seqid); 7073 nfs4_put_lock_state(calldata->lsp); 7074 nfs_put_lock_context(calldata->l_ctx); 7075 put_nfs_open_context(calldata->ctx); 7076 kfree(calldata); 7077 } 7078 7079 static void nfs4_locku_done(struct rpc_task *task, void *data) 7080 { 7081 struct nfs4_unlockdata *calldata = data; 7082 struct nfs4_exception exception = { 7083 .inode = calldata->lsp->ls_state->inode, 7084 .stateid = &calldata->arg.stateid, 7085 }; 7086 7087 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 7088 return; 7089 switch (task->tk_status) { 7090 case 0: 7091 renew_lease(calldata->server, calldata->timestamp); 7092 locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl); 7093 if (nfs4_update_lock_stateid(calldata->lsp, 7094 &calldata->res.stateid)) 7095 break; 7096 fallthrough; 7097 case -NFS4ERR_ADMIN_REVOKED: 7098 case -NFS4ERR_EXPIRED: 7099 nfs4_free_revoked_stateid(calldata->server, 7100 &calldata->arg.stateid, 7101 task->tk_msg.rpc_cred); 7102 fallthrough; 7103 case -NFS4ERR_BAD_STATEID: 7104 case -NFS4ERR_STALE_STATEID: 7105 if (nfs4_sync_lock_stateid(&calldata->arg.stateid, 7106 calldata->lsp)) 7107 rpc_restart_call_prepare(task); 7108 break; 7109 case -NFS4ERR_OLD_STATEID: 7110 if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid, 7111 calldata->lsp)) 7112 rpc_restart_call_prepare(task); 7113 break; 7114 default: 7115 task->tk_status = nfs4_async_handle_exception(task, 7116 calldata->server, task->tk_status, 7117 &exception); 7118 if (exception.retry) 7119 rpc_restart_call_prepare(task); 7120 } 7121 nfs_release_seqid(calldata->arg.seqid); 7122 } 7123 7124 static void nfs4_locku_prepare(struct rpc_task *task, void *data) 7125 { 7126 struct nfs4_unlockdata *calldata = data; 7127 7128 if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) && 7129 nfs_async_iocounter_wait(task, calldata->l_ctx)) 7130 return; 7131 7132 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 7133 goto out_wait; 7134 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { 7135 /* Note: exit _without_ running nfs4_locku_done */ 7136 goto out_no_action; 7137 } 7138 calldata->timestamp = jiffies; 7139 if (nfs4_setup_sequence(calldata->server->nfs_client, 7140 &calldata->arg.seq_args, 7141 &calldata->res.seq_res, 7142 task) != 0) 7143 nfs_release_seqid(calldata->arg.seqid); 7144 return; 7145 out_no_action: 7146 task->tk_action = NULL; 7147 out_wait: 7148 nfs4_sequence_done(task, &calldata->res.seq_res); 7149 } 7150 7151 static const struct rpc_call_ops nfs4_locku_ops = { 7152 .rpc_call_prepare = nfs4_locku_prepare, 7153 .rpc_call_done = nfs4_locku_done, 7154 .rpc_release = nfs4_locku_release_calldata, 7155 }; 7156 7157 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 7158 struct nfs_open_context *ctx, 7159 struct nfs4_lock_state *lsp, 7160 struct nfs_seqid *seqid) 7161 { 7162 struct nfs4_unlockdata *data; 7163 struct rpc_message msg = { 7164 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 7165 .rpc_cred = ctx->cred, 7166 }; 7167 struct rpc_task_setup task_setup_data = { 7168 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 7169 .rpc_message = &msg, 7170 .callback_ops = &nfs4_locku_ops, 7171 .workqueue = nfsiod_workqueue, 7172 .flags = RPC_TASK_ASYNC, 7173 }; 7174 7175 if (nfs_server_capable(lsp->ls_state->inode, NFS_CAP_MOVEABLE)) 7176 task_setup_data.flags |= RPC_TASK_MOVEABLE; 7177 7178 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client, 7179 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg); 7180 7181 /* Ensure this is an unlock - when canceling a lock, the 7182 * canceled lock is passed in, and it won't be an unlock. 7183 */ 7184 fl->c.flc_type = F_UNLCK; 7185 if (fl->c.flc_flags & FL_CLOSE) 7186 set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags); 7187 7188 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 7189 if (data == NULL) { 7190 nfs_free_seqid(seqid); 7191 return ERR_PTR(-ENOMEM); 7192 } 7193 7194 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 0); 7195 msg.rpc_argp = &data->arg; 7196 msg.rpc_resp = &data->res; 7197 task_setup_data.callback_data = data; 7198 return rpc_run_task(&task_setup_data); 7199 } 7200 7201 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 7202 { 7203 struct inode *inode = state->inode; 7204 struct nfs4_state_owner *sp = state->owner; 7205 struct nfs_inode *nfsi = NFS_I(inode); 7206 struct nfs_seqid *seqid; 7207 struct nfs4_lock_state *lsp; 7208 struct rpc_task *task; 7209 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 7210 int status = 0; 7211 unsigned char saved_flags = request->c.flc_flags; 7212 7213 status = nfs4_set_lock_state(state, request); 7214 /* Unlock _before_ we do the RPC call */ 7215 request->c.flc_flags |= FL_EXISTS; 7216 /* Exclude nfs_delegation_claim_locks() */ 7217 mutex_lock(&sp->so_delegreturn_mutex); 7218 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ 7219 down_read(&nfsi->rwsem); 7220 if (locks_lock_inode_wait(inode, request) == -ENOENT) { 7221 up_read(&nfsi->rwsem); 7222 mutex_unlock(&sp->so_delegreturn_mutex); 7223 goto out; 7224 } 7225 lsp = request->fl_u.nfs4_fl.owner; 7226 set_bit(NFS_LOCK_UNLOCKING, &lsp->ls_flags); 7227 up_read(&nfsi->rwsem); 7228 mutex_unlock(&sp->so_delegreturn_mutex); 7229 if (status != 0) 7230 goto out; 7231 /* Is this a delegated lock? */ 7232 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0) 7233 goto out; 7234 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid; 7235 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 7236 status = -ENOMEM; 7237 if (IS_ERR(seqid)) 7238 goto out; 7239 task = nfs4_do_unlck(request, 7240 nfs_file_open_context(request->c.flc_file), 7241 lsp, seqid); 7242 status = PTR_ERR(task); 7243 if (IS_ERR(task)) 7244 goto out; 7245 status = rpc_wait_for_completion_task(task); 7246 rpc_put_task(task); 7247 out: 7248 request->c.flc_flags = saved_flags; 7249 trace_nfs4_unlock(request, state, F_SETLK, status); 7250 return status; 7251 } 7252 7253 struct nfs4_lockdata { 7254 struct nfs_lock_args arg; 7255 struct nfs_lock_res res; 7256 struct nfs4_lock_state *lsp; 7257 struct nfs_open_context *ctx; 7258 struct file_lock fl; 7259 unsigned long timestamp; 7260 int rpc_status; 7261 int cancelled; 7262 struct nfs_server *server; 7263 }; 7264 7265 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 7266 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 7267 gfp_t gfp_mask) 7268 { 7269 struct nfs4_lockdata *p; 7270 struct inode *inode = lsp->ls_state->inode; 7271 struct nfs_server *server = NFS_SERVER(inode); 7272 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 7273 7274 p = kzalloc(sizeof(*p), gfp_mask); 7275 if (p == NULL) 7276 return NULL; 7277 7278 p->arg.fh = NFS_FH(inode); 7279 p->arg.fl = &p->fl; 7280 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 7281 if (IS_ERR(p->arg.open_seqid)) 7282 goto out_free; 7283 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 7284 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask); 7285 if (IS_ERR(p->arg.lock_seqid)) 7286 goto out_free_seqid; 7287 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 7288 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 7289 p->arg.lock_owner.s_dev = server->s_dev; 7290 p->res.lock_seqid = p->arg.lock_seqid; 7291 p->lsp = lsp; 7292 p->server = server; 7293 p->ctx = get_nfs_open_context(ctx); 7294 locks_init_lock(&p->fl); 7295 locks_copy_lock(&p->fl, fl); 7296 return p; 7297 out_free_seqid: 7298 nfs_free_seqid(p->arg.open_seqid); 7299 out_free: 7300 kfree(p); 7301 return NULL; 7302 } 7303 7304 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 7305 { 7306 struct nfs4_lockdata *data = calldata; 7307 struct nfs4_state *state = data->lsp->ls_state; 7308 7309 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 7310 goto out_wait; 7311 /* Do we need to do an open_to_lock_owner? */ 7312 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) { 7313 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { 7314 goto out_release_lock_seqid; 7315 } 7316 nfs4_stateid_copy(&data->arg.open_stateid, 7317 &state->open_stateid); 7318 data->arg.new_lock_owner = 1; 7319 data->res.open_seqid = data->arg.open_seqid; 7320 } else { 7321 data->arg.new_lock_owner = 0; 7322 nfs4_stateid_copy(&data->arg.lock_stateid, 7323 &data->lsp->ls_stateid); 7324 } 7325 if (!nfs4_valid_open_stateid(state)) { 7326 data->rpc_status = -EBADF; 7327 task->tk_action = NULL; 7328 goto out_release_open_seqid; 7329 } 7330 data->timestamp = jiffies; 7331 if (nfs4_setup_sequence(data->server->nfs_client, 7332 &data->arg.seq_args, 7333 &data->res.seq_res, 7334 task) == 0) 7335 return; 7336 out_release_open_seqid: 7337 nfs_release_seqid(data->arg.open_seqid); 7338 out_release_lock_seqid: 7339 nfs_release_seqid(data->arg.lock_seqid); 7340 out_wait: 7341 nfs4_sequence_done(task, &data->res.seq_res); 7342 dprintk("%s: ret = %d\n", __func__, data->rpc_status); 7343 } 7344 7345 static void nfs4_lock_done(struct rpc_task *task, void *calldata) 7346 { 7347 struct nfs4_lockdata *data = calldata; 7348 struct nfs4_lock_state *lsp = data->lsp; 7349 7350 if (!nfs4_sequence_done(task, &data->res.seq_res)) 7351 return; 7352 7353 data->rpc_status = task->tk_status; 7354 switch (task->tk_status) { 7355 case 0: 7356 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)), 7357 data->timestamp); 7358 if (data->arg.new_lock && !data->cancelled) { 7359 data->fl.c.flc_flags &= ~(FL_SLEEP | FL_ACCESS); 7360 if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) 7361 goto out_restart; 7362 } 7363 if (data->arg.new_lock_owner != 0) { 7364 nfs_confirm_seqid(&lsp->ls_seqid, 0); 7365 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid); 7366 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 7367 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) 7368 goto out_restart; 7369 break; 7370 case -NFS4ERR_OLD_STATEID: 7371 if (data->arg.new_lock_owner != 0 && 7372 nfs4_refresh_open_old_stateid(&data->arg.open_stateid, 7373 lsp->ls_state)) 7374 goto out_restart; 7375 if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp)) 7376 goto out_restart; 7377 fallthrough; 7378 case -NFS4ERR_BAD_STATEID: 7379 case -NFS4ERR_STALE_STATEID: 7380 case -NFS4ERR_EXPIRED: 7381 if (data->arg.new_lock_owner != 0) { 7382 if (!nfs4_stateid_match(&data->arg.open_stateid, 7383 &lsp->ls_state->open_stateid)) 7384 goto out_restart; 7385 } else if (!nfs4_stateid_match(&data->arg.lock_stateid, 7386 &lsp->ls_stateid)) 7387 goto out_restart; 7388 } 7389 out_done: 7390 dprintk("%s: ret = %d!\n", __func__, data->rpc_status); 7391 return; 7392 out_restart: 7393 if (!data->cancelled) 7394 rpc_restart_call_prepare(task); 7395 goto out_done; 7396 } 7397 7398 static void nfs4_lock_release(void *calldata) 7399 { 7400 struct nfs4_lockdata *data = calldata; 7401 7402 nfs_free_seqid(data->arg.open_seqid); 7403 if (data->cancelled && data->rpc_status == 0) { 7404 struct rpc_task *task; 7405 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 7406 data->arg.lock_seqid); 7407 if (!IS_ERR(task)) 7408 rpc_put_task_async(task); 7409 dprintk("%s: cancelling lock!\n", __func__); 7410 } else 7411 nfs_free_seqid(data->arg.lock_seqid); 7412 nfs4_put_lock_state(data->lsp); 7413 put_nfs_open_context(data->ctx); 7414 kfree(data); 7415 } 7416 7417 static const struct rpc_call_ops nfs4_lock_ops = { 7418 .rpc_call_prepare = nfs4_lock_prepare, 7419 .rpc_call_done = nfs4_lock_done, 7420 .rpc_release = nfs4_lock_release, 7421 }; 7422 7423 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 7424 { 7425 switch (error) { 7426 case -NFS4ERR_ADMIN_REVOKED: 7427 case -NFS4ERR_EXPIRED: 7428 case -NFS4ERR_BAD_STATEID: 7429 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 7430 if (new_lock_owner != 0 || 7431 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) 7432 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 7433 break; 7434 case -NFS4ERR_STALE_STATEID: 7435 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 7436 nfs4_schedule_lease_recovery(server->nfs_client); 7437 } 7438 } 7439 7440 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 7441 { 7442 struct nfs4_lockdata *data; 7443 struct rpc_task *task; 7444 struct rpc_message msg = { 7445 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 7446 .rpc_cred = state->owner->so_cred, 7447 }; 7448 struct rpc_task_setup task_setup_data = { 7449 .rpc_client = NFS_CLIENT(state->inode), 7450 .rpc_message = &msg, 7451 .callback_ops = &nfs4_lock_ops, 7452 .workqueue = nfsiod_workqueue, 7453 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 7454 }; 7455 int ret; 7456 7457 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE)) 7458 task_setup_data.flags |= RPC_TASK_MOVEABLE; 7459 7460 data = nfs4_alloc_lockdata(fl, 7461 nfs_file_open_context(fl->c.flc_file), 7462 fl->fl_u.nfs4_fl.owner, GFP_KERNEL); 7463 if (data == NULL) 7464 return -ENOMEM; 7465 if (IS_SETLKW(cmd)) 7466 data->arg.block = 1; 7467 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 7468 recovery_type > NFS_LOCK_NEW); 7469 msg.rpc_argp = &data->arg; 7470 msg.rpc_resp = &data->res; 7471 task_setup_data.callback_data = data; 7472 if (recovery_type > NFS_LOCK_NEW) { 7473 if (recovery_type == NFS_LOCK_RECLAIM) 7474 data->arg.reclaim = NFS_LOCK_RECLAIM; 7475 } else 7476 data->arg.new_lock = 1; 7477 task = rpc_run_task(&task_setup_data); 7478 if (IS_ERR(task)) 7479 return PTR_ERR(task); 7480 ret = rpc_wait_for_completion_task(task); 7481 if (ret == 0) { 7482 ret = data->rpc_status; 7483 if (ret) 7484 nfs4_handle_setlk_error(data->server, data->lsp, 7485 data->arg.new_lock_owner, ret); 7486 } else 7487 data->cancelled = true; 7488 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret); 7489 rpc_put_task(task); 7490 dprintk("%s: ret = %d\n", __func__, ret); 7491 return ret; 7492 } 7493 7494 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 7495 { 7496 struct nfs_server *server = NFS_SERVER(state->inode); 7497 struct nfs4_exception exception = { 7498 .inode = state->inode, 7499 }; 7500 int err; 7501 7502 do { 7503 /* Cache the lock if possible... */ 7504 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 7505 return 0; 7506 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 7507 if (err != -NFS4ERR_DELAY) 7508 break; 7509 nfs4_handle_exception(server, err, &exception); 7510 } while (exception.retry); 7511 return err; 7512 } 7513 7514 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 7515 { 7516 struct nfs_server *server = NFS_SERVER(state->inode); 7517 struct nfs4_exception exception = { 7518 .inode = state->inode, 7519 }; 7520 int err; 7521 7522 err = nfs4_set_lock_state(state, request); 7523 if (err != 0) 7524 return err; 7525 if (!recover_lost_locks) { 7526 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags); 7527 return 0; 7528 } 7529 do { 7530 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 7531 return 0; 7532 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 7533 switch (err) { 7534 default: 7535 goto out; 7536 case -NFS4ERR_GRACE: 7537 case -NFS4ERR_DELAY: 7538 nfs4_handle_exception(server, err, &exception); 7539 err = 0; 7540 } 7541 } while (exception.retry); 7542 out: 7543 return err; 7544 } 7545 7546 #if defined(CONFIG_NFS_V4_1) 7547 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 7548 { 7549 struct nfs4_lock_state *lsp; 7550 int status; 7551 7552 status = nfs4_set_lock_state(state, request); 7553 if (status != 0) 7554 return status; 7555 lsp = request->fl_u.nfs4_fl.owner; 7556 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) || 7557 test_bit(NFS_LOCK_LOST, &lsp->ls_flags)) 7558 return 0; 7559 return nfs4_lock_expired(state, request); 7560 } 7561 #endif 7562 7563 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7564 { 7565 struct nfs_inode *nfsi = NFS_I(state->inode); 7566 struct nfs4_state_owner *sp = state->owner; 7567 unsigned char flags = request->c.flc_flags; 7568 int status; 7569 7570 request->c.flc_flags |= FL_ACCESS; 7571 status = locks_lock_inode_wait(state->inode, request); 7572 if (status < 0) 7573 goto out; 7574 mutex_lock(&sp->so_delegreturn_mutex); 7575 down_read(&nfsi->rwsem); 7576 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 7577 /* Yes: cache locks! */ 7578 /* ...but avoid races with delegation recall... */ 7579 request->c.flc_flags = flags & ~FL_SLEEP; 7580 status = locks_lock_inode_wait(state->inode, request); 7581 up_read(&nfsi->rwsem); 7582 mutex_unlock(&sp->so_delegreturn_mutex); 7583 goto out; 7584 } 7585 up_read(&nfsi->rwsem); 7586 mutex_unlock(&sp->so_delegreturn_mutex); 7587 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 7588 out: 7589 request->c.flc_flags = flags; 7590 return status; 7591 } 7592 7593 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7594 { 7595 struct nfs4_exception exception = { 7596 .state = state, 7597 .inode = state->inode, 7598 .interruptible = true, 7599 }; 7600 int err; 7601 7602 do { 7603 err = _nfs4_proc_setlk(state, cmd, request); 7604 if (err == -NFS4ERR_DENIED) 7605 err = -EAGAIN; 7606 err = nfs4_handle_exception(NFS_SERVER(state->inode), 7607 err, &exception); 7608 } while (exception.retry); 7609 return err; 7610 } 7611 7612 #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 7613 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 7614 7615 static int 7616 nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd, 7617 struct file_lock *request) 7618 { 7619 int status = -ERESTARTSYS; 7620 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 7621 7622 while(!signalled()) { 7623 status = nfs4_proc_setlk(state, cmd, request); 7624 if ((status != -EAGAIN) || IS_SETLK(cmd)) 7625 break; 7626 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); 7627 schedule_timeout(timeout); 7628 timeout *= 2; 7629 timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout); 7630 status = -ERESTARTSYS; 7631 } 7632 return status; 7633 } 7634 7635 #ifdef CONFIG_NFS_V4_1 7636 struct nfs4_lock_waiter { 7637 struct inode *inode; 7638 struct nfs_lowner owner; 7639 wait_queue_entry_t wait; 7640 }; 7641 7642 static int 7643 nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key) 7644 { 7645 struct nfs4_lock_waiter *waiter = 7646 container_of(wait, struct nfs4_lock_waiter, wait); 7647 7648 /* NULL key means to wake up everyone */ 7649 if (key) { 7650 struct cb_notify_lock_args *cbnl = key; 7651 struct nfs_lowner *lowner = &cbnl->cbnl_owner, 7652 *wowner = &waiter->owner; 7653 7654 /* Only wake if the callback was for the same owner. */ 7655 if (lowner->id != wowner->id || lowner->s_dev != wowner->s_dev) 7656 return 0; 7657 7658 /* Make sure it's for the right inode */ 7659 if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh)) 7660 return 0; 7661 } 7662 7663 return woken_wake_function(wait, mode, flags, key); 7664 } 7665 7666 static int 7667 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7668 { 7669 struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner; 7670 struct nfs_server *server = NFS_SERVER(state->inode); 7671 struct nfs_client *clp = server->nfs_client; 7672 wait_queue_head_t *q = &clp->cl_lock_waitq; 7673 struct nfs4_lock_waiter waiter = { 7674 .inode = state->inode, 7675 .owner = { .clientid = clp->cl_clientid, 7676 .id = lsp->ls_seqid.owner_id, 7677 .s_dev = server->s_dev }, 7678 }; 7679 int status; 7680 7681 /* Don't bother with waitqueue if we don't expect a callback */ 7682 if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags)) 7683 return nfs4_retry_setlk_simple(state, cmd, request); 7684 7685 init_wait(&waiter.wait); 7686 waiter.wait.func = nfs4_wake_lock_waiter; 7687 add_wait_queue(q, &waiter.wait); 7688 7689 do { 7690 status = nfs4_proc_setlk(state, cmd, request); 7691 if (status != -EAGAIN || IS_SETLK(cmd)) 7692 break; 7693 7694 status = -ERESTARTSYS; 7695 wait_woken(&waiter.wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE, 7696 NFS4_LOCK_MAXTIMEOUT); 7697 } while (!signalled()); 7698 7699 remove_wait_queue(q, &waiter.wait); 7700 7701 return status; 7702 } 7703 #else /* !CONFIG_NFS_V4_1 */ 7704 static inline int 7705 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7706 { 7707 return nfs4_retry_setlk_simple(state, cmd, request); 7708 } 7709 #endif 7710 7711 static int 7712 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 7713 { 7714 struct nfs_open_context *ctx; 7715 struct nfs4_state *state; 7716 int status; 7717 7718 /* verify open state */ 7719 ctx = nfs_file_open_context(filp); 7720 state = ctx->state; 7721 7722 if (IS_GETLK(cmd)) { 7723 if (state != NULL) 7724 return nfs4_proc_getlk(state, F_GETLK, request); 7725 return 0; 7726 } 7727 7728 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 7729 return -EINVAL; 7730 7731 if (lock_is_unlock(request)) { 7732 if (state != NULL) 7733 return nfs4_proc_unlck(state, cmd, request); 7734 return 0; 7735 } 7736 7737 if (state == NULL) 7738 return -ENOLCK; 7739 7740 if ((request->c.flc_flags & FL_POSIX) && 7741 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 7742 return -ENOLCK; 7743 7744 /* 7745 * Don't rely on the VFS having checked the file open mode, 7746 * since it won't do this for flock() locks. 7747 */ 7748 switch (request->c.flc_type) { 7749 case F_RDLCK: 7750 if (!(filp->f_mode & FMODE_READ)) 7751 return -EBADF; 7752 break; 7753 case F_WRLCK: 7754 if (!(filp->f_mode & FMODE_WRITE)) 7755 return -EBADF; 7756 } 7757 7758 status = nfs4_set_lock_state(state, request); 7759 if (status != 0) 7760 return status; 7761 7762 return nfs4_retry_setlk(state, cmd, request); 7763 } 7764 7765 static int nfs4_delete_lease(struct file *file, void **priv) 7766 { 7767 return generic_setlease(file, F_UNLCK, NULL, priv); 7768 } 7769 7770 static int nfs4_add_lease(struct file *file, int arg, struct file_lease **lease, 7771 void **priv) 7772 { 7773 struct inode *inode = file_inode(file); 7774 fmode_t type = arg == F_RDLCK ? FMODE_READ : FMODE_WRITE; 7775 int ret; 7776 7777 /* No delegation, no lease */ 7778 if (!nfs4_have_delegation(inode, type, 0)) 7779 return -EAGAIN; 7780 ret = generic_setlease(file, arg, lease, priv); 7781 if (ret || nfs4_have_delegation(inode, type, 0)) 7782 return ret; 7783 /* We raced with a delegation return */ 7784 nfs4_delete_lease(file, priv); 7785 return -EAGAIN; 7786 } 7787 7788 int nfs4_proc_setlease(struct file *file, int arg, struct file_lease **lease, 7789 void **priv) 7790 { 7791 switch (arg) { 7792 case F_RDLCK: 7793 case F_WRLCK: 7794 return nfs4_add_lease(file, arg, lease, priv); 7795 case F_UNLCK: 7796 return nfs4_delete_lease(file, priv); 7797 default: 7798 return -EINVAL; 7799 } 7800 } 7801 7802 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid) 7803 { 7804 struct nfs_server *server = NFS_SERVER(state->inode); 7805 int err; 7806 7807 err = nfs4_set_lock_state(state, fl); 7808 if (err != 0) 7809 return err; 7810 do { 7811 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 7812 if (err != -NFS4ERR_DELAY) 7813 break; 7814 ssleep(1); 7815 } while (err == -NFS4ERR_DELAY); 7816 return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err); 7817 } 7818 7819 struct nfs_release_lockowner_data { 7820 struct nfs4_lock_state *lsp; 7821 struct nfs_server *server; 7822 struct nfs_release_lockowner_args args; 7823 struct nfs_release_lockowner_res res; 7824 unsigned long timestamp; 7825 }; 7826 7827 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata) 7828 { 7829 struct nfs_release_lockowner_data *data = calldata; 7830 struct nfs_server *server = data->server; 7831 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 7832 &data->res.seq_res, task); 7833 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 7834 data->timestamp = jiffies; 7835 } 7836 7837 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata) 7838 { 7839 struct nfs_release_lockowner_data *data = calldata; 7840 struct nfs_server *server = data->server; 7841 7842 nfs40_sequence_done(task, &data->res.seq_res); 7843 7844 switch (task->tk_status) { 7845 case 0: 7846 renew_lease(server, data->timestamp); 7847 break; 7848 case -NFS4ERR_STALE_CLIENTID: 7849 case -NFS4ERR_EXPIRED: 7850 nfs4_schedule_lease_recovery(server->nfs_client); 7851 break; 7852 case -NFS4ERR_LEASE_MOVED: 7853 case -NFS4ERR_DELAY: 7854 if (nfs4_async_handle_error(task, server, 7855 NULL, NULL) == -EAGAIN) 7856 rpc_restart_call_prepare(task); 7857 } 7858 } 7859 7860 static void nfs4_release_lockowner_release(void *calldata) 7861 { 7862 struct nfs_release_lockowner_data *data = calldata; 7863 nfs4_free_lock_state(data->server, data->lsp); 7864 kfree(calldata); 7865 } 7866 7867 static const struct rpc_call_ops nfs4_release_lockowner_ops = { 7868 .rpc_call_prepare = nfs4_release_lockowner_prepare, 7869 .rpc_call_done = nfs4_release_lockowner_done, 7870 .rpc_release = nfs4_release_lockowner_release, 7871 }; 7872 7873 static void 7874 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp) 7875 { 7876 struct nfs_release_lockowner_data *data; 7877 struct rpc_message msg = { 7878 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], 7879 }; 7880 7881 if (server->nfs_client->cl_mvops->minor_version != 0) 7882 return; 7883 7884 data = kmalloc(sizeof(*data), GFP_KERNEL); 7885 if (!data) 7886 return; 7887 data->lsp = lsp; 7888 data->server = server; 7889 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 7890 data->args.lock_owner.id = lsp->ls_seqid.owner_id; 7891 data->args.lock_owner.s_dev = server->s_dev; 7892 7893 msg.rpc_argp = &data->args; 7894 msg.rpc_resp = &data->res; 7895 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 7896 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 7897 } 7898 7899 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 7900 7901 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler, 7902 struct mnt_idmap *idmap, 7903 struct dentry *unused, struct inode *inode, 7904 const char *key, const void *buf, 7905 size_t buflen, int flags) 7906 { 7907 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_ACL); 7908 } 7909 7910 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler, 7911 struct dentry *unused, struct inode *inode, 7912 const char *key, void *buf, size_t buflen) 7913 { 7914 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_ACL); 7915 } 7916 7917 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry) 7918 { 7919 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_ACL); 7920 } 7921 7922 #if defined(CONFIG_NFS_V4_1) 7923 #define XATTR_NAME_NFSV4_DACL "system.nfs4_dacl" 7924 7925 static int nfs4_xattr_set_nfs4_dacl(const struct xattr_handler *handler, 7926 struct mnt_idmap *idmap, 7927 struct dentry *unused, struct inode *inode, 7928 const char *key, const void *buf, 7929 size_t buflen, int flags) 7930 { 7931 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_DACL); 7932 } 7933 7934 static int nfs4_xattr_get_nfs4_dacl(const struct xattr_handler *handler, 7935 struct dentry *unused, struct inode *inode, 7936 const char *key, void *buf, size_t buflen) 7937 { 7938 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_DACL); 7939 } 7940 7941 static bool nfs4_xattr_list_nfs4_dacl(struct dentry *dentry) 7942 { 7943 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_DACL); 7944 } 7945 7946 #define XATTR_NAME_NFSV4_SACL "system.nfs4_sacl" 7947 7948 static int nfs4_xattr_set_nfs4_sacl(const struct xattr_handler *handler, 7949 struct mnt_idmap *idmap, 7950 struct dentry *unused, struct inode *inode, 7951 const char *key, const void *buf, 7952 size_t buflen, int flags) 7953 { 7954 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_SACL); 7955 } 7956 7957 static int nfs4_xattr_get_nfs4_sacl(const struct xattr_handler *handler, 7958 struct dentry *unused, struct inode *inode, 7959 const char *key, void *buf, size_t buflen) 7960 { 7961 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_SACL); 7962 } 7963 7964 static bool nfs4_xattr_list_nfs4_sacl(struct dentry *dentry) 7965 { 7966 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_SACL); 7967 } 7968 7969 #endif 7970 7971 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 7972 7973 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler, 7974 struct mnt_idmap *idmap, 7975 struct dentry *unused, struct inode *inode, 7976 const char *key, const void *buf, 7977 size_t buflen, int flags) 7978 { 7979 if (security_ismaclabel(key)) 7980 return nfs4_set_security_label(inode, buf, buflen); 7981 7982 return -EOPNOTSUPP; 7983 } 7984 7985 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler, 7986 struct dentry *unused, struct inode *inode, 7987 const char *key, void *buf, size_t buflen) 7988 { 7989 if (security_ismaclabel(key)) 7990 return nfs4_get_security_label(inode, buf, buflen); 7991 return -EOPNOTSUPP; 7992 } 7993 7994 static ssize_t 7995 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 7996 { 7997 int len = 0; 7998 7999 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) { 8000 len = security_inode_listsecurity(inode, list, list_len); 8001 if (len >= 0 && list_len && len > list_len) 8002 return -ERANGE; 8003 } 8004 return len; 8005 } 8006 8007 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = { 8008 .prefix = XATTR_SECURITY_PREFIX, 8009 .get = nfs4_xattr_get_nfs4_label, 8010 .set = nfs4_xattr_set_nfs4_label, 8011 }; 8012 8013 #else 8014 8015 static ssize_t 8016 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 8017 { 8018 return 0; 8019 } 8020 8021 #endif 8022 8023 #ifdef CONFIG_NFS_V4_2 8024 static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler, 8025 struct mnt_idmap *idmap, 8026 struct dentry *unused, struct inode *inode, 8027 const char *key, const void *buf, 8028 size_t buflen, int flags) 8029 { 8030 u32 mask; 8031 int ret; 8032 8033 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8034 return -EOPNOTSUPP; 8035 8036 /* 8037 * There is no mapping from the MAY_* flags to the NFS_ACCESS_XA* 8038 * flags right now. Handling of xattr operations use the normal 8039 * file read/write permissions. 8040 * 8041 * Just in case the server has other ideas (which RFC 8276 allows), 8042 * do a cached access check for the XA* flags to possibly avoid 8043 * doing an RPC and getting EACCES back. 8044 */ 8045 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8046 if (!(mask & NFS_ACCESS_XAWRITE)) 8047 return -EACCES; 8048 } 8049 8050 if (buf == NULL) { 8051 ret = nfs42_proc_removexattr(inode, key); 8052 if (!ret) 8053 nfs4_xattr_cache_remove(inode, key); 8054 } else { 8055 ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags); 8056 if (!ret) 8057 nfs4_xattr_cache_add(inode, key, buf, NULL, buflen); 8058 } 8059 8060 return ret; 8061 } 8062 8063 static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler, 8064 struct dentry *unused, struct inode *inode, 8065 const char *key, void *buf, size_t buflen) 8066 { 8067 u32 mask; 8068 ssize_t ret; 8069 8070 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8071 return -EOPNOTSUPP; 8072 8073 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8074 if (!(mask & NFS_ACCESS_XAREAD)) 8075 return -EACCES; 8076 } 8077 8078 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 8079 if (ret) 8080 return ret; 8081 8082 ret = nfs4_xattr_cache_get(inode, key, buf, buflen); 8083 if (ret >= 0 || (ret < 0 && ret != -ENOENT)) 8084 return ret; 8085 8086 ret = nfs42_proc_getxattr(inode, key, buf, buflen); 8087 8088 return ret; 8089 } 8090 8091 static ssize_t 8092 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) 8093 { 8094 u64 cookie; 8095 bool eof; 8096 ssize_t ret, size; 8097 char *buf; 8098 size_t buflen; 8099 u32 mask; 8100 8101 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8102 return 0; 8103 8104 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8105 if (!(mask & NFS_ACCESS_XALIST)) 8106 return 0; 8107 } 8108 8109 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 8110 if (ret) 8111 return ret; 8112 8113 ret = nfs4_xattr_cache_list(inode, list, list_len); 8114 if (ret >= 0 || (ret < 0 && ret != -ENOENT)) 8115 return ret; 8116 8117 cookie = 0; 8118 eof = false; 8119 buflen = list_len ? list_len : XATTR_LIST_MAX; 8120 buf = list_len ? list : NULL; 8121 size = 0; 8122 8123 while (!eof) { 8124 ret = nfs42_proc_listxattrs(inode, buf, buflen, 8125 &cookie, &eof); 8126 if (ret < 0) 8127 return ret; 8128 8129 if (list_len) { 8130 buf += ret; 8131 buflen -= ret; 8132 } 8133 size += ret; 8134 } 8135 8136 if (list_len) 8137 nfs4_xattr_cache_set_list(inode, list, size); 8138 8139 return size; 8140 } 8141 8142 #else 8143 8144 static ssize_t 8145 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) 8146 { 8147 return 0; 8148 } 8149 #endif /* CONFIG_NFS_V4_2 */ 8150 8151 /* 8152 * nfs_fhget will use either the mounted_on_fileid or the fileid 8153 */ 8154 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 8155 { 8156 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 8157 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 8158 (fattr->valid & NFS_ATTR_FATTR_FSID) && 8159 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 8160 return; 8161 8162 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 8163 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 8164 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 8165 fattr->nlink = 2; 8166 } 8167 8168 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 8169 const struct qstr *name, 8170 struct nfs4_fs_locations *fs_locations, 8171 struct page *page) 8172 { 8173 struct nfs_server *server = NFS_SERVER(dir); 8174 u32 bitmask[3]; 8175 struct nfs4_fs_locations_arg args = { 8176 .dir_fh = NFS_FH(dir), 8177 .name = name, 8178 .page = page, 8179 .bitmask = bitmask, 8180 }; 8181 struct nfs4_fs_locations_res res = { 8182 .fs_locations = fs_locations, 8183 }; 8184 struct rpc_message msg = { 8185 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8186 .rpc_argp = &args, 8187 .rpc_resp = &res, 8188 }; 8189 int status; 8190 8191 dprintk("%s: start\n", __func__); 8192 8193 bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS; 8194 bitmask[1] = nfs4_fattr_bitmap[1]; 8195 8196 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 8197 * is not supported */ 8198 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 8199 bitmask[0] &= ~FATTR4_WORD0_FILEID; 8200 else 8201 bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID; 8202 8203 nfs_fattr_init(fs_locations->fattr); 8204 fs_locations->server = server; 8205 fs_locations->nlocations = 0; 8206 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 8207 dprintk("%s: returned status = %d\n", __func__, status); 8208 return status; 8209 } 8210 8211 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 8212 const struct qstr *name, 8213 struct nfs4_fs_locations *fs_locations, 8214 struct page *page) 8215 { 8216 struct nfs4_exception exception = { 8217 .interruptible = true, 8218 }; 8219 int err; 8220 do { 8221 err = _nfs4_proc_fs_locations(client, dir, name, 8222 fs_locations, page); 8223 trace_nfs4_get_fs_locations(dir, name, err); 8224 err = nfs4_handle_exception(NFS_SERVER(dir), err, 8225 &exception); 8226 } while (exception.retry); 8227 return err; 8228 } 8229 8230 /* 8231 * This operation also signals the server that this client is 8232 * performing migration recovery. The server can stop returning 8233 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is 8234 * appended to this compound to identify the client ID which is 8235 * performing recovery. 8236 */ 8237 static int _nfs40_proc_get_locations(struct nfs_server *server, 8238 struct nfs_fh *fhandle, 8239 struct nfs4_fs_locations *locations, 8240 struct page *page, const struct cred *cred) 8241 { 8242 struct rpc_clnt *clnt = server->client; 8243 u32 bitmask[2] = { 8244 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 8245 }; 8246 struct nfs4_fs_locations_arg args = { 8247 .clientid = server->nfs_client->cl_clientid, 8248 .fh = fhandle, 8249 .page = page, 8250 .bitmask = bitmask, 8251 .migration = 1, /* skip LOOKUP */ 8252 .renew = 1, /* append RENEW */ 8253 }; 8254 struct nfs4_fs_locations_res res = { 8255 .fs_locations = locations, 8256 .migration = 1, 8257 .renew = 1, 8258 }; 8259 struct rpc_message msg = { 8260 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8261 .rpc_argp = &args, 8262 .rpc_resp = &res, 8263 .rpc_cred = cred, 8264 }; 8265 unsigned long now = jiffies; 8266 int status; 8267 8268 nfs_fattr_init(locations->fattr); 8269 locations->server = server; 8270 locations->nlocations = 0; 8271 8272 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8273 status = nfs4_call_sync_sequence(clnt, server, &msg, 8274 &args.seq_args, &res.seq_res); 8275 if (status) 8276 return status; 8277 8278 renew_lease(server, now); 8279 return 0; 8280 } 8281 8282 #ifdef CONFIG_NFS_V4_1 8283 8284 /* 8285 * This operation also signals the server that this client is 8286 * performing migration recovery. The server can stop asserting 8287 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID 8288 * performing this operation is identified in the SEQUENCE 8289 * operation in this compound. 8290 * 8291 * When the client supports GETATTR(fs_locations_info), it can 8292 * be plumbed in here. 8293 */ 8294 static int _nfs41_proc_get_locations(struct nfs_server *server, 8295 struct nfs_fh *fhandle, 8296 struct nfs4_fs_locations *locations, 8297 struct page *page, const struct cred *cred) 8298 { 8299 struct rpc_clnt *clnt = server->client; 8300 u32 bitmask[2] = { 8301 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 8302 }; 8303 struct nfs4_fs_locations_arg args = { 8304 .fh = fhandle, 8305 .page = page, 8306 .bitmask = bitmask, 8307 .migration = 1, /* skip LOOKUP */ 8308 }; 8309 struct nfs4_fs_locations_res res = { 8310 .fs_locations = locations, 8311 .migration = 1, 8312 }; 8313 struct rpc_message msg = { 8314 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8315 .rpc_argp = &args, 8316 .rpc_resp = &res, 8317 .rpc_cred = cred, 8318 }; 8319 struct nfs4_call_sync_data data = { 8320 .seq_server = server, 8321 .seq_args = &args.seq_args, 8322 .seq_res = &res.seq_res, 8323 }; 8324 struct rpc_task_setup task_setup_data = { 8325 .rpc_client = clnt, 8326 .rpc_message = &msg, 8327 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops, 8328 .callback_data = &data, 8329 .flags = RPC_TASK_NO_ROUND_ROBIN, 8330 }; 8331 int status; 8332 8333 nfs_fattr_init(locations->fattr); 8334 locations->server = server; 8335 locations->nlocations = 0; 8336 8337 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8338 status = nfs4_call_sync_custom(&task_setup_data); 8339 if (status == NFS4_OK && 8340 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 8341 status = -NFS4ERR_LEASE_MOVED; 8342 return status; 8343 } 8344 8345 #endif /* CONFIG_NFS_V4_1 */ 8346 8347 /** 8348 * nfs4_proc_get_locations - discover locations for a migrated FSID 8349 * @server: pointer to nfs_server to process 8350 * @fhandle: pointer to the kernel NFS client file handle 8351 * @locations: result of query 8352 * @page: buffer 8353 * @cred: credential to use for this operation 8354 * 8355 * Returns NFS4_OK on success, a negative NFS4ERR status code if the 8356 * operation failed, or a negative errno if a local error occurred. 8357 * 8358 * On success, "locations" is filled in, but if the server has 8359 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not 8360 * asserted. 8361 * 8362 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases 8363 * from this client that require migration recovery. 8364 */ 8365 int nfs4_proc_get_locations(struct nfs_server *server, 8366 struct nfs_fh *fhandle, 8367 struct nfs4_fs_locations *locations, 8368 struct page *page, const struct cred *cred) 8369 { 8370 struct nfs_client *clp = server->nfs_client; 8371 const struct nfs4_mig_recovery_ops *ops = 8372 clp->cl_mvops->mig_recovery_ops; 8373 struct nfs4_exception exception = { 8374 .interruptible = true, 8375 }; 8376 int status; 8377 8378 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 8379 (unsigned long long)server->fsid.major, 8380 (unsigned long long)server->fsid.minor, 8381 clp->cl_hostname); 8382 nfs_display_fhandle(fhandle, __func__); 8383 8384 do { 8385 status = ops->get_locations(server, fhandle, locations, page, 8386 cred); 8387 if (status != -NFS4ERR_DELAY) 8388 break; 8389 nfs4_handle_exception(server, status, &exception); 8390 } while (exception.retry); 8391 return status; 8392 } 8393 8394 /* 8395 * This operation also signals the server that this client is 8396 * performing "lease moved" recovery. The server can stop 8397 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation 8398 * is appended to this compound to identify the client ID which is 8399 * performing recovery. 8400 */ 8401 static int _nfs40_proc_fsid_present(struct inode *inode, const struct cred *cred) 8402 { 8403 struct nfs_server *server = NFS_SERVER(inode); 8404 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 8405 struct rpc_clnt *clnt = server->client; 8406 struct nfs4_fsid_present_arg args = { 8407 .fh = NFS_FH(inode), 8408 .clientid = clp->cl_clientid, 8409 .renew = 1, /* append RENEW */ 8410 }; 8411 struct nfs4_fsid_present_res res = { 8412 .renew = 1, 8413 }; 8414 struct rpc_message msg = { 8415 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 8416 .rpc_argp = &args, 8417 .rpc_resp = &res, 8418 .rpc_cred = cred, 8419 }; 8420 unsigned long now = jiffies; 8421 int status; 8422 8423 res.fh = nfs_alloc_fhandle(); 8424 if (res.fh == NULL) 8425 return -ENOMEM; 8426 8427 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8428 status = nfs4_call_sync_sequence(clnt, server, &msg, 8429 &args.seq_args, &res.seq_res); 8430 nfs_free_fhandle(res.fh); 8431 if (status) 8432 return status; 8433 8434 do_renew_lease(clp, now); 8435 return 0; 8436 } 8437 8438 #ifdef CONFIG_NFS_V4_1 8439 8440 /* 8441 * This operation also signals the server that this client is 8442 * performing "lease moved" recovery. The server can stop asserting 8443 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing 8444 * this operation is identified in the SEQUENCE operation in this 8445 * compound. 8446 */ 8447 static int _nfs41_proc_fsid_present(struct inode *inode, const struct cred *cred) 8448 { 8449 struct nfs_server *server = NFS_SERVER(inode); 8450 struct rpc_clnt *clnt = server->client; 8451 struct nfs4_fsid_present_arg args = { 8452 .fh = NFS_FH(inode), 8453 }; 8454 struct nfs4_fsid_present_res res = { 8455 }; 8456 struct rpc_message msg = { 8457 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 8458 .rpc_argp = &args, 8459 .rpc_resp = &res, 8460 .rpc_cred = cred, 8461 }; 8462 int status; 8463 8464 res.fh = nfs_alloc_fhandle(); 8465 if (res.fh == NULL) 8466 return -ENOMEM; 8467 8468 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8469 status = nfs4_call_sync_sequence(clnt, server, &msg, 8470 &args.seq_args, &res.seq_res); 8471 nfs_free_fhandle(res.fh); 8472 if (status == NFS4_OK && 8473 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 8474 status = -NFS4ERR_LEASE_MOVED; 8475 return status; 8476 } 8477 8478 #endif /* CONFIG_NFS_V4_1 */ 8479 8480 /** 8481 * nfs4_proc_fsid_present - Is this FSID present or absent on server? 8482 * @inode: inode on FSID to check 8483 * @cred: credential to use for this operation 8484 * 8485 * Server indicates whether the FSID is present, moved, or not 8486 * recognized. This operation is necessary to clear a LEASE_MOVED 8487 * condition for this client ID. 8488 * 8489 * Returns NFS4_OK if the FSID is present on this server, 8490 * -NFS4ERR_MOVED if the FSID is no longer present, a negative 8491 * NFS4ERR code if some error occurred on the server, or a 8492 * negative errno if a local failure occurred. 8493 */ 8494 int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred) 8495 { 8496 struct nfs_server *server = NFS_SERVER(inode); 8497 struct nfs_client *clp = server->nfs_client; 8498 const struct nfs4_mig_recovery_ops *ops = 8499 clp->cl_mvops->mig_recovery_ops; 8500 struct nfs4_exception exception = { 8501 .interruptible = true, 8502 }; 8503 int status; 8504 8505 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 8506 (unsigned long long)server->fsid.major, 8507 (unsigned long long)server->fsid.minor, 8508 clp->cl_hostname); 8509 nfs_display_fhandle(NFS_FH(inode), __func__); 8510 8511 do { 8512 status = ops->fsid_present(inode, cred); 8513 if (status != -NFS4ERR_DELAY) 8514 break; 8515 nfs4_handle_exception(server, status, &exception); 8516 } while (exception.retry); 8517 return status; 8518 } 8519 8520 /* 8521 * If 'use_integrity' is true and the state managment nfs_client 8522 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient 8523 * and the machine credential as per RFC3530bis and RFC5661 Security 8524 * Considerations sections. Otherwise, just use the user cred with the 8525 * filesystem's rpc_client. 8526 */ 8527 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity) 8528 { 8529 int status; 8530 struct rpc_clnt *clnt = NFS_SERVER(dir)->client; 8531 struct nfs_client *clp = NFS_SERVER(dir)->nfs_client; 8532 struct nfs4_secinfo_arg args = { 8533 .dir_fh = NFS_FH(dir), 8534 .name = name, 8535 }; 8536 struct nfs4_secinfo_res res = { 8537 .flavors = flavors, 8538 }; 8539 struct rpc_message msg = { 8540 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 8541 .rpc_argp = &args, 8542 .rpc_resp = &res, 8543 }; 8544 struct nfs4_call_sync_data data = { 8545 .seq_server = NFS_SERVER(dir), 8546 .seq_args = &args.seq_args, 8547 .seq_res = &res.seq_res, 8548 }; 8549 struct rpc_task_setup task_setup = { 8550 .rpc_client = clnt, 8551 .rpc_message = &msg, 8552 .callback_ops = clp->cl_mvops->call_sync_ops, 8553 .callback_data = &data, 8554 .flags = RPC_TASK_NO_ROUND_ROBIN, 8555 }; 8556 const struct cred *cred = NULL; 8557 8558 if (use_integrity) { 8559 clnt = clp->cl_rpcclient; 8560 task_setup.rpc_client = clnt; 8561 8562 cred = nfs4_get_clid_cred(clp); 8563 msg.rpc_cred = cred; 8564 } 8565 8566 dprintk("NFS call secinfo %s\n", name->name); 8567 8568 nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg); 8569 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 8570 status = nfs4_call_sync_custom(&task_setup); 8571 8572 dprintk("NFS reply secinfo: %d\n", status); 8573 8574 put_cred(cred); 8575 return status; 8576 } 8577 8578 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 8579 struct nfs4_secinfo_flavors *flavors) 8580 { 8581 struct nfs4_exception exception = { 8582 .interruptible = true, 8583 }; 8584 int err; 8585 do { 8586 err = -NFS4ERR_WRONGSEC; 8587 8588 /* try to use integrity protection with machine cred */ 8589 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client)) 8590 err = _nfs4_proc_secinfo(dir, name, flavors, true); 8591 8592 /* 8593 * if unable to use integrity protection, or SECINFO with 8594 * integrity protection returns NFS4ERR_WRONGSEC (which is 8595 * disallowed by spec, but exists in deployed servers) use 8596 * the current filesystem's rpc_client and the user cred. 8597 */ 8598 if (err == -NFS4ERR_WRONGSEC) 8599 err = _nfs4_proc_secinfo(dir, name, flavors, false); 8600 8601 trace_nfs4_secinfo(dir, name, err); 8602 err = nfs4_handle_exception(NFS_SERVER(dir), err, 8603 &exception); 8604 } while (exception.retry); 8605 return err; 8606 } 8607 8608 #ifdef CONFIG_NFS_V4_1 8609 /* 8610 * Check the exchange flags returned by the server for invalid flags, having 8611 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 8612 * DS flags set. 8613 */ 8614 static int nfs4_check_cl_exchange_flags(u32 flags, u32 version) 8615 { 8616 if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R)) 8617 goto out_inval; 8618 else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R)) 8619 goto out_inval; 8620 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 8621 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 8622 goto out_inval; 8623 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 8624 goto out_inval; 8625 return NFS_OK; 8626 out_inval: 8627 return -NFS4ERR_INVAL; 8628 } 8629 8630 static bool 8631 nfs41_same_server_scope(struct nfs41_server_scope *a, 8632 struct nfs41_server_scope *b) 8633 { 8634 if (a->server_scope_sz != b->server_scope_sz) 8635 return false; 8636 return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0; 8637 } 8638 8639 static void 8640 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) 8641 { 8642 struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp; 8643 struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp; 8644 struct nfs_client *clp = args->client; 8645 8646 switch (task->tk_status) { 8647 case -NFS4ERR_BADSESSION: 8648 case -NFS4ERR_DEADSESSION: 8649 nfs4_schedule_session_recovery(clp->cl_session, 8650 task->tk_status); 8651 return; 8652 } 8653 if (args->dir == NFS4_CDFC4_FORE_OR_BOTH && 8654 res->dir != NFS4_CDFS4_BOTH) { 8655 rpc_task_close_connection(task); 8656 if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES) 8657 rpc_restart_call(task); 8658 } 8659 } 8660 8661 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { 8662 .rpc_call_done = nfs4_bind_one_conn_to_session_done, 8663 }; 8664 8665 /* 8666 * nfs4_proc_bind_one_conn_to_session() 8667 * 8668 * The 4.1 client currently uses the same TCP connection for the 8669 * fore and backchannel. 8670 */ 8671 static 8672 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt, 8673 struct rpc_xprt *xprt, 8674 struct nfs_client *clp, 8675 const struct cred *cred) 8676 { 8677 int status; 8678 struct nfs41_bind_conn_to_session_args args = { 8679 .client = clp, 8680 .dir = NFS4_CDFC4_FORE_OR_BOTH, 8681 .retries = 0, 8682 }; 8683 struct nfs41_bind_conn_to_session_res res; 8684 struct rpc_message msg = { 8685 .rpc_proc = 8686 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 8687 .rpc_argp = &args, 8688 .rpc_resp = &res, 8689 .rpc_cred = cred, 8690 }; 8691 struct rpc_task_setup task_setup_data = { 8692 .rpc_client = clnt, 8693 .rpc_xprt = xprt, 8694 .callback_ops = &nfs4_bind_one_conn_to_session_ops, 8695 .rpc_message = &msg, 8696 .flags = RPC_TASK_TIMEOUT, 8697 }; 8698 struct rpc_task *task; 8699 8700 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id); 8701 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) 8702 args.dir = NFS4_CDFC4_FORE; 8703 8704 /* Do not set the backchannel flag unless this is clnt->cl_xprt */ 8705 if (xprt != rcu_access_pointer(clnt->cl_xprt)) 8706 args.dir = NFS4_CDFC4_FORE; 8707 8708 task = rpc_run_task(&task_setup_data); 8709 if (!IS_ERR(task)) { 8710 status = task->tk_status; 8711 rpc_put_task(task); 8712 } else 8713 status = PTR_ERR(task); 8714 trace_nfs4_bind_conn_to_session(clp, status); 8715 if (status == 0) { 8716 if (memcmp(res.sessionid.data, 8717 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 8718 dprintk("NFS: %s: Session ID mismatch\n", __func__); 8719 return -EIO; 8720 } 8721 if ((res.dir & args.dir) != res.dir || res.dir == 0) { 8722 dprintk("NFS: %s: Unexpected direction from server\n", 8723 __func__); 8724 return -EIO; 8725 } 8726 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) { 8727 dprintk("NFS: %s: Server returned RDMA mode = true\n", 8728 __func__); 8729 return -EIO; 8730 } 8731 } 8732 8733 return status; 8734 } 8735 8736 struct rpc_bind_conn_calldata { 8737 struct nfs_client *clp; 8738 const struct cred *cred; 8739 }; 8740 8741 static int 8742 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt, 8743 struct rpc_xprt *xprt, 8744 void *calldata) 8745 { 8746 struct rpc_bind_conn_calldata *p = calldata; 8747 8748 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred); 8749 } 8750 8751 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, const struct cred *cred) 8752 { 8753 struct rpc_bind_conn_calldata data = { 8754 .clp = clp, 8755 .cred = cred, 8756 }; 8757 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient, 8758 nfs4_proc_bind_conn_to_session_callback, &data); 8759 } 8760 8761 /* 8762 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map 8763 * and operations we'd like to see to enable certain features in the allow map 8764 */ 8765 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = { 8766 .how = SP4_MACH_CRED, 8767 .enforce.u.words = { 8768 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 8769 1 << (OP_EXCHANGE_ID - 32) | 8770 1 << (OP_CREATE_SESSION - 32) | 8771 1 << (OP_DESTROY_SESSION - 32) | 8772 1 << (OP_DESTROY_CLIENTID - 32) 8773 }, 8774 .allow.u.words = { 8775 [0] = 1 << (OP_CLOSE) | 8776 1 << (OP_OPEN_DOWNGRADE) | 8777 1 << (OP_LOCKU) | 8778 1 << (OP_DELEGRETURN) | 8779 1 << (OP_COMMIT), 8780 [1] = 1 << (OP_SECINFO - 32) | 8781 1 << (OP_SECINFO_NO_NAME - 32) | 8782 1 << (OP_LAYOUTRETURN - 32) | 8783 1 << (OP_TEST_STATEID - 32) | 8784 1 << (OP_FREE_STATEID - 32) | 8785 1 << (OP_WRITE - 32) 8786 } 8787 }; 8788 8789 /* 8790 * Select the state protection mode for client `clp' given the server results 8791 * from exchange_id in `sp'. 8792 * 8793 * Returns 0 on success, negative errno otherwise. 8794 */ 8795 static int nfs4_sp4_select_mode(struct nfs_client *clp, 8796 struct nfs41_state_protection *sp) 8797 { 8798 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = { 8799 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 8800 1 << (OP_EXCHANGE_ID - 32) | 8801 1 << (OP_CREATE_SESSION - 32) | 8802 1 << (OP_DESTROY_SESSION - 32) | 8803 1 << (OP_DESTROY_CLIENTID - 32) 8804 }; 8805 unsigned long flags = 0; 8806 unsigned int i; 8807 int ret = 0; 8808 8809 if (sp->how == SP4_MACH_CRED) { 8810 /* Print state protect result */ 8811 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n"); 8812 for (i = 0; i <= LAST_NFS4_OP; i++) { 8813 if (test_bit(i, sp->enforce.u.longs)) 8814 dfprintk(MOUNT, " enforce op %d\n", i); 8815 if (test_bit(i, sp->allow.u.longs)) 8816 dfprintk(MOUNT, " allow op %d\n", i); 8817 } 8818 8819 /* make sure nothing is on enforce list that isn't supported */ 8820 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) { 8821 if (sp->enforce.u.words[i] & ~supported_enforce[i]) { 8822 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 8823 ret = -EINVAL; 8824 goto out; 8825 } 8826 } 8827 8828 /* 8829 * Minimal mode - state operations are allowed to use machine 8830 * credential. Note this already happens by default, so the 8831 * client doesn't have to do anything more than the negotiation. 8832 * 8833 * NOTE: we don't care if EXCHANGE_ID is in the list - 8834 * we're already using the machine cred for exchange_id 8835 * and will never use a different cred. 8836 */ 8837 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) && 8838 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) && 8839 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) && 8840 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) { 8841 dfprintk(MOUNT, "sp4_mach_cred:\n"); 8842 dfprintk(MOUNT, " minimal mode enabled\n"); 8843 __set_bit(NFS_SP4_MACH_CRED_MINIMAL, &flags); 8844 } else { 8845 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 8846 ret = -EINVAL; 8847 goto out; 8848 } 8849 8850 if (test_bit(OP_CLOSE, sp->allow.u.longs) && 8851 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) && 8852 test_bit(OP_DELEGRETURN, sp->allow.u.longs) && 8853 test_bit(OP_LOCKU, sp->allow.u.longs)) { 8854 dfprintk(MOUNT, " cleanup mode enabled\n"); 8855 __set_bit(NFS_SP4_MACH_CRED_CLEANUP, &flags); 8856 } 8857 8858 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) { 8859 dfprintk(MOUNT, " pnfs cleanup mode enabled\n"); 8860 __set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, &flags); 8861 } 8862 8863 if (test_bit(OP_SECINFO, sp->allow.u.longs) && 8864 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) { 8865 dfprintk(MOUNT, " secinfo mode enabled\n"); 8866 __set_bit(NFS_SP4_MACH_CRED_SECINFO, &flags); 8867 } 8868 8869 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) && 8870 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) { 8871 dfprintk(MOUNT, " stateid mode enabled\n"); 8872 __set_bit(NFS_SP4_MACH_CRED_STATEID, &flags); 8873 } 8874 8875 if (test_bit(OP_WRITE, sp->allow.u.longs)) { 8876 dfprintk(MOUNT, " write mode enabled\n"); 8877 __set_bit(NFS_SP4_MACH_CRED_WRITE, &flags); 8878 } 8879 8880 if (test_bit(OP_COMMIT, sp->allow.u.longs)) { 8881 dfprintk(MOUNT, " commit mode enabled\n"); 8882 __set_bit(NFS_SP4_MACH_CRED_COMMIT, &flags); 8883 } 8884 } 8885 out: 8886 clp->cl_sp4_flags = flags; 8887 return ret; 8888 } 8889 8890 struct nfs41_exchange_id_data { 8891 struct nfs41_exchange_id_res res; 8892 struct nfs41_exchange_id_args args; 8893 }; 8894 8895 static void nfs4_exchange_id_release(void *data) 8896 { 8897 struct nfs41_exchange_id_data *cdata = 8898 (struct nfs41_exchange_id_data *)data; 8899 8900 nfs_put_client(cdata->args.client); 8901 kfree(cdata->res.impl_id); 8902 kfree(cdata->res.server_scope); 8903 kfree(cdata->res.server_owner); 8904 kfree(cdata); 8905 } 8906 8907 static const struct rpc_call_ops nfs4_exchange_id_call_ops = { 8908 .rpc_release = nfs4_exchange_id_release, 8909 }; 8910 8911 /* 8912 * _nfs4_proc_exchange_id() 8913 * 8914 * Wrapper for EXCHANGE_ID operation. 8915 */ 8916 static struct rpc_task * 8917 nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred, 8918 u32 sp4_how, struct rpc_xprt *xprt) 8919 { 8920 struct rpc_message msg = { 8921 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 8922 .rpc_cred = cred, 8923 }; 8924 struct rpc_task_setup task_setup_data = { 8925 .rpc_client = clp->cl_rpcclient, 8926 .callback_ops = &nfs4_exchange_id_call_ops, 8927 .rpc_message = &msg, 8928 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN, 8929 }; 8930 struct nfs41_exchange_id_data *calldata; 8931 int status; 8932 8933 if (!refcount_inc_not_zero(&clp->cl_count)) 8934 return ERR_PTR(-EIO); 8935 8936 status = -ENOMEM; 8937 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 8938 if (!calldata) 8939 goto out; 8940 8941 nfs4_init_boot_verifier(clp, &calldata->args.verifier); 8942 8943 status = nfs4_init_uniform_client_string(clp); 8944 if (status) 8945 goto out_calldata; 8946 8947 calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 8948 GFP_NOFS); 8949 status = -ENOMEM; 8950 if (unlikely(calldata->res.server_owner == NULL)) 8951 goto out_calldata; 8952 8953 calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 8954 GFP_NOFS); 8955 if (unlikely(calldata->res.server_scope == NULL)) 8956 goto out_server_owner; 8957 8958 calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 8959 if (unlikely(calldata->res.impl_id == NULL)) 8960 goto out_server_scope; 8961 8962 switch (sp4_how) { 8963 case SP4_NONE: 8964 calldata->args.state_protect.how = SP4_NONE; 8965 break; 8966 8967 case SP4_MACH_CRED: 8968 calldata->args.state_protect = nfs4_sp4_mach_cred_request; 8969 break; 8970 8971 default: 8972 /* unsupported! */ 8973 WARN_ON_ONCE(1); 8974 status = -EINVAL; 8975 goto out_impl_id; 8976 } 8977 if (xprt) { 8978 task_setup_data.rpc_xprt = xprt; 8979 task_setup_data.flags |= RPC_TASK_SOFTCONN; 8980 memcpy(calldata->args.verifier.data, clp->cl_confirm.data, 8981 sizeof(calldata->args.verifier.data)); 8982 } 8983 calldata->args.client = clp; 8984 calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 8985 EXCHGID4_FLAG_BIND_PRINC_STATEID; 8986 #ifdef CONFIG_NFS_V4_1_MIGRATION 8987 calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR; 8988 #endif 8989 if (test_bit(NFS_CS_PNFS, &clp->cl_flags)) 8990 calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS; 8991 msg.rpc_argp = &calldata->args; 8992 msg.rpc_resp = &calldata->res; 8993 task_setup_data.callback_data = calldata; 8994 8995 return rpc_run_task(&task_setup_data); 8996 8997 out_impl_id: 8998 kfree(calldata->res.impl_id); 8999 out_server_scope: 9000 kfree(calldata->res.server_scope); 9001 out_server_owner: 9002 kfree(calldata->res.server_owner); 9003 out_calldata: 9004 kfree(calldata); 9005 out: 9006 nfs_put_client(clp); 9007 return ERR_PTR(status); 9008 } 9009 9010 /* 9011 * _nfs4_proc_exchange_id() 9012 * 9013 * Wrapper for EXCHANGE_ID operation. 9014 */ 9015 static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred, 9016 u32 sp4_how) 9017 { 9018 struct rpc_task *task; 9019 struct nfs41_exchange_id_args *argp; 9020 struct nfs41_exchange_id_res *resp; 9021 unsigned long now = jiffies; 9022 int status; 9023 9024 task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL); 9025 if (IS_ERR(task)) 9026 return PTR_ERR(task); 9027 9028 argp = task->tk_msg.rpc_argp; 9029 resp = task->tk_msg.rpc_resp; 9030 status = task->tk_status; 9031 if (status != 0) 9032 goto out; 9033 9034 status = nfs4_check_cl_exchange_flags(resp->flags, 9035 clp->cl_mvops->minor_version); 9036 if (status != 0) 9037 goto out; 9038 9039 status = nfs4_sp4_select_mode(clp, &resp->state_protect); 9040 if (status != 0) 9041 goto out; 9042 9043 do_renew_lease(clp, now); 9044 9045 clp->cl_clientid = resp->clientid; 9046 clp->cl_exchange_flags = resp->flags; 9047 clp->cl_seqid = resp->seqid; 9048 /* Client ID is not confirmed */ 9049 if (!(resp->flags & EXCHGID4_FLAG_CONFIRMED_R)) 9050 clear_bit(NFS4_SESSION_ESTABLISHED, 9051 &clp->cl_session->session_state); 9052 9053 if (clp->cl_serverscope != NULL && 9054 !nfs41_same_server_scope(clp->cl_serverscope, 9055 resp->server_scope)) { 9056 dprintk("%s: server_scope mismatch detected\n", 9057 __func__); 9058 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 9059 } 9060 9061 swap(clp->cl_serverowner, resp->server_owner); 9062 swap(clp->cl_serverscope, resp->server_scope); 9063 swap(clp->cl_implid, resp->impl_id); 9064 9065 /* Save the EXCHANGE_ID verifier session trunk tests */ 9066 memcpy(clp->cl_confirm.data, argp->verifier.data, 9067 sizeof(clp->cl_confirm.data)); 9068 out: 9069 trace_nfs4_exchange_id(clp, status); 9070 rpc_put_task(task); 9071 return status; 9072 } 9073 9074 /* 9075 * nfs4_proc_exchange_id() 9076 * 9077 * Returns zero, a negative errno, or a negative NFS4ERR status code. 9078 * 9079 * Since the clientid has expired, all compounds using sessions 9080 * associated with the stale clientid will be returning 9081 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 9082 * be in some phase of session reset. 9083 * 9084 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used. 9085 */ 9086 int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred) 9087 { 9088 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor; 9089 int status; 9090 9091 /* try SP4_MACH_CRED if krb5i/p */ 9092 if (authflavor == RPC_AUTH_GSS_KRB5I || 9093 authflavor == RPC_AUTH_GSS_KRB5P) { 9094 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED); 9095 if (!status) 9096 return 0; 9097 } 9098 9099 /* try SP4_NONE */ 9100 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE); 9101 } 9102 9103 /** 9104 * nfs4_test_session_trunk 9105 * 9106 * This is an add_xprt_test() test function called from 9107 * rpc_clnt_setup_test_and_add_xprt. 9108 * 9109 * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt 9110 * and is dereferrenced in nfs4_exchange_id_release 9111 * 9112 * Upon success, add the new transport to the rpc_clnt 9113 * 9114 * @clnt: struct rpc_clnt to get new transport 9115 * @xprt: the rpc_xprt to test 9116 * @data: call data for _nfs4_proc_exchange_id. 9117 */ 9118 void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt, 9119 void *data) 9120 { 9121 struct nfs4_add_xprt_data *adata = data; 9122 struct rpc_task *task; 9123 int status; 9124 9125 u32 sp4_how; 9126 9127 dprintk("--> %s try %s\n", __func__, 9128 xprt->address_strings[RPC_DISPLAY_ADDR]); 9129 9130 sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED); 9131 9132 try_again: 9133 /* Test connection for session trunking. Async exchange_id call */ 9134 task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt); 9135 if (IS_ERR(task)) 9136 return; 9137 9138 status = task->tk_status; 9139 if (status == 0) { 9140 status = nfs4_detect_session_trunking(adata->clp, 9141 task->tk_msg.rpc_resp, xprt); 9142 trace_nfs4_trunked_exchange_id(adata->clp, 9143 xprt->address_strings[RPC_DISPLAY_ADDR], status); 9144 } 9145 if (status == 0) 9146 rpc_clnt_xprt_switch_add_xprt(clnt, xprt); 9147 else if (status != -NFS4ERR_DELAY && rpc_clnt_xprt_switch_has_addr(clnt, 9148 (struct sockaddr *)&xprt->addr)) 9149 rpc_clnt_xprt_switch_remove_xprt(clnt, xprt); 9150 9151 rpc_put_task(task); 9152 if (status == -NFS4ERR_DELAY) { 9153 ssleep(1); 9154 goto try_again; 9155 } 9156 } 9157 EXPORT_SYMBOL_GPL(nfs4_test_session_trunk); 9158 9159 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 9160 const struct cred *cred) 9161 { 9162 struct rpc_message msg = { 9163 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 9164 .rpc_argp = clp, 9165 .rpc_cred = cred, 9166 }; 9167 int status; 9168 9169 status = rpc_call_sync(clp->cl_rpcclient, &msg, 9170 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9171 trace_nfs4_destroy_clientid(clp, status); 9172 if (status) 9173 dprintk("NFS: Got error %d from the server %s on " 9174 "DESTROY_CLIENTID.", status, clp->cl_hostname); 9175 return status; 9176 } 9177 9178 static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 9179 const struct cred *cred) 9180 { 9181 unsigned int loop; 9182 int ret; 9183 9184 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 9185 ret = _nfs4_proc_destroy_clientid(clp, cred); 9186 switch (ret) { 9187 case -NFS4ERR_DELAY: 9188 case -NFS4ERR_CLIENTID_BUSY: 9189 ssleep(1); 9190 break; 9191 default: 9192 return ret; 9193 } 9194 } 9195 return 0; 9196 } 9197 9198 int nfs4_destroy_clientid(struct nfs_client *clp) 9199 { 9200 const struct cred *cred; 9201 int ret = 0; 9202 9203 if (clp->cl_mvops->minor_version < 1) 9204 goto out; 9205 if (clp->cl_exchange_flags == 0) 9206 goto out; 9207 if (clp->cl_preserve_clid) 9208 goto out; 9209 cred = nfs4_get_clid_cred(clp); 9210 ret = nfs4_proc_destroy_clientid(clp, cred); 9211 put_cred(cred); 9212 switch (ret) { 9213 case 0: 9214 case -NFS4ERR_STALE_CLIENTID: 9215 clp->cl_exchange_flags = 0; 9216 } 9217 out: 9218 return ret; 9219 } 9220 9221 #endif /* CONFIG_NFS_V4_1 */ 9222 9223 struct nfs4_get_lease_time_data { 9224 struct nfs4_get_lease_time_args *args; 9225 struct nfs4_get_lease_time_res *res; 9226 struct nfs_client *clp; 9227 }; 9228 9229 static void nfs4_get_lease_time_prepare(struct rpc_task *task, 9230 void *calldata) 9231 { 9232 struct nfs4_get_lease_time_data *data = 9233 (struct nfs4_get_lease_time_data *)calldata; 9234 9235 /* just setup sequence, do not trigger session recovery 9236 since we're invoked within one */ 9237 nfs4_setup_sequence(data->clp, 9238 &data->args->la_seq_args, 9239 &data->res->lr_seq_res, 9240 task); 9241 } 9242 9243 /* 9244 * Called from nfs4_state_manager thread for session setup, so don't recover 9245 * from sequence operation or clientid errors. 9246 */ 9247 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 9248 { 9249 struct nfs4_get_lease_time_data *data = 9250 (struct nfs4_get_lease_time_data *)calldata; 9251 9252 if (!nfs4_sequence_done(task, &data->res->lr_seq_res)) 9253 return; 9254 switch (task->tk_status) { 9255 case -NFS4ERR_DELAY: 9256 case -NFS4ERR_GRACE: 9257 rpc_delay(task, NFS4_POLL_RETRY_MIN); 9258 task->tk_status = 0; 9259 fallthrough; 9260 case -NFS4ERR_RETRY_UNCACHED_REP: 9261 rpc_restart_call_prepare(task); 9262 return; 9263 } 9264 } 9265 9266 static const struct rpc_call_ops nfs4_get_lease_time_ops = { 9267 .rpc_call_prepare = nfs4_get_lease_time_prepare, 9268 .rpc_call_done = nfs4_get_lease_time_done, 9269 }; 9270 9271 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 9272 { 9273 struct nfs4_get_lease_time_args args; 9274 struct nfs4_get_lease_time_res res = { 9275 .lr_fsinfo = fsinfo, 9276 }; 9277 struct nfs4_get_lease_time_data data = { 9278 .args = &args, 9279 .res = &res, 9280 .clp = clp, 9281 }; 9282 struct rpc_message msg = { 9283 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 9284 .rpc_argp = &args, 9285 .rpc_resp = &res, 9286 }; 9287 struct rpc_task_setup task_setup = { 9288 .rpc_client = clp->cl_rpcclient, 9289 .rpc_message = &msg, 9290 .callback_ops = &nfs4_get_lease_time_ops, 9291 .callback_data = &data, 9292 .flags = RPC_TASK_TIMEOUT, 9293 }; 9294 9295 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0, 1); 9296 return nfs4_call_sync_custom(&task_setup); 9297 } 9298 9299 #ifdef CONFIG_NFS_V4_1 9300 9301 /* 9302 * Initialize the values to be used by the client in CREATE_SESSION 9303 * If nfs4_init_session set the fore channel request and response sizes, 9304 * use them. 9305 * 9306 * Set the back channel max_resp_sz_cached to zero to force the client to 9307 * always set csa_cachethis to FALSE because the current implementation 9308 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 9309 */ 9310 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args, 9311 struct rpc_clnt *clnt) 9312 { 9313 unsigned int max_rqst_sz, max_resp_sz; 9314 unsigned int max_bc_payload = rpc_max_bc_payload(clnt); 9315 unsigned int max_bc_slots = rpc_num_bc_slots(clnt); 9316 9317 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead; 9318 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead; 9319 9320 /* Fore channel attributes */ 9321 args->fc_attrs.max_rqst_sz = max_rqst_sz; 9322 args->fc_attrs.max_resp_sz = max_resp_sz; 9323 args->fc_attrs.max_ops = NFS4_MAX_OPS; 9324 args->fc_attrs.max_reqs = max_session_slots; 9325 9326 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 9327 "max_ops=%u max_reqs=%u\n", 9328 __func__, 9329 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 9330 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 9331 9332 /* Back channel attributes */ 9333 args->bc_attrs.max_rqst_sz = max_bc_payload; 9334 args->bc_attrs.max_resp_sz = max_bc_payload; 9335 args->bc_attrs.max_resp_sz_cached = 0; 9336 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 9337 args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1); 9338 if (args->bc_attrs.max_reqs > max_bc_slots) 9339 args->bc_attrs.max_reqs = max_bc_slots; 9340 9341 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 9342 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 9343 __func__, 9344 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 9345 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 9346 args->bc_attrs.max_reqs); 9347 } 9348 9349 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, 9350 struct nfs41_create_session_res *res) 9351 { 9352 struct nfs4_channel_attrs *sent = &args->fc_attrs; 9353 struct nfs4_channel_attrs *rcvd = &res->fc_attrs; 9354 9355 if (rcvd->max_resp_sz > sent->max_resp_sz) 9356 return -EINVAL; 9357 /* 9358 * Our requested max_ops is the minimum we need; we're not 9359 * prepared to break up compounds into smaller pieces than that. 9360 * So, no point even trying to continue if the server won't 9361 * cooperate: 9362 */ 9363 if (rcvd->max_ops < sent->max_ops) 9364 return -EINVAL; 9365 if (rcvd->max_reqs == 0) 9366 return -EINVAL; 9367 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 9368 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 9369 return 0; 9370 } 9371 9372 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, 9373 struct nfs41_create_session_res *res) 9374 { 9375 struct nfs4_channel_attrs *sent = &args->bc_attrs; 9376 struct nfs4_channel_attrs *rcvd = &res->bc_attrs; 9377 9378 if (!(res->flags & SESSION4_BACK_CHAN)) 9379 goto out; 9380 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 9381 return -EINVAL; 9382 if (rcvd->max_resp_sz < sent->max_resp_sz) 9383 return -EINVAL; 9384 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 9385 return -EINVAL; 9386 if (rcvd->max_ops > sent->max_ops) 9387 return -EINVAL; 9388 if (rcvd->max_reqs > sent->max_reqs) 9389 return -EINVAL; 9390 out: 9391 return 0; 9392 } 9393 9394 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 9395 struct nfs41_create_session_res *res) 9396 { 9397 int ret; 9398 9399 ret = nfs4_verify_fore_channel_attrs(args, res); 9400 if (ret) 9401 return ret; 9402 return nfs4_verify_back_channel_attrs(args, res); 9403 } 9404 9405 static void nfs4_update_session(struct nfs4_session *session, 9406 struct nfs41_create_session_res *res) 9407 { 9408 nfs4_copy_sessionid(&session->sess_id, &res->sessionid); 9409 /* Mark client id and session as being confirmed */ 9410 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 9411 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state); 9412 session->flags = res->flags; 9413 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); 9414 if (res->flags & SESSION4_BACK_CHAN) 9415 memcpy(&session->bc_attrs, &res->bc_attrs, 9416 sizeof(session->bc_attrs)); 9417 } 9418 9419 static int _nfs4_proc_create_session(struct nfs_client *clp, 9420 const struct cred *cred) 9421 { 9422 struct nfs4_session *session = clp->cl_session; 9423 struct nfs41_create_session_args args = { 9424 .client = clp, 9425 .clientid = clp->cl_clientid, 9426 .seqid = clp->cl_seqid, 9427 .cb_program = NFS4_CALLBACK, 9428 }; 9429 struct nfs41_create_session_res res; 9430 9431 struct rpc_message msg = { 9432 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 9433 .rpc_argp = &args, 9434 .rpc_resp = &res, 9435 .rpc_cred = cred, 9436 }; 9437 int status; 9438 9439 nfs4_init_channel_attrs(&args, clp->cl_rpcclient); 9440 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 9441 9442 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 9443 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9444 trace_nfs4_create_session(clp, status); 9445 9446 switch (status) { 9447 case -NFS4ERR_STALE_CLIENTID: 9448 case -NFS4ERR_DELAY: 9449 case -ETIMEDOUT: 9450 case -EACCES: 9451 case -EAGAIN: 9452 goto out; 9453 } 9454 9455 clp->cl_seqid++; 9456 if (!status) { 9457 /* Verify the session's negotiated channel_attrs values */ 9458 status = nfs4_verify_channel_attrs(&args, &res); 9459 /* Increment the clientid slot sequence id */ 9460 if (status) 9461 goto out; 9462 nfs4_update_session(session, &res); 9463 } 9464 out: 9465 return status; 9466 } 9467 9468 /* 9469 * Issues a CREATE_SESSION operation to the server. 9470 * It is the responsibility of the caller to verify the session is 9471 * expired before calling this routine. 9472 */ 9473 int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred) 9474 { 9475 int status; 9476 unsigned *ptr; 9477 struct nfs4_session *session = clp->cl_session; 9478 struct nfs4_add_xprt_data xprtdata = { 9479 .clp = clp, 9480 }; 9481 struct rpc_add_xprt_test rpcdata = { 9482 .add_xprt_test = clp->cl_mvops->session_trunk, 9483 .data = &xprtdata, 9484 }; 9485 9486 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 9487 9488 status = _nfs4_proc_create_session(clp, cred); 9489 if (status) 9490 goto out; 9491 9492 /* Init or reset the session slot tables */ 9493 status = nfs4_setup_session_slot_tables(session); 9494 dprintk("slot table setup returned %d\n", status); 9495 if (status) 9496 goto out; 9497 9498 ptr = (unsigned *)&session->sess_id.data[0]; 9499 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 9500 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 9501 rpc_clnt_probe_trunked_xprts(clp->cl_rpcclient, &rpcdata); 9502 out: 9503 return status; 9504 } 9505 9506 /* 9507 * Issue the over-the-wire RPC DESTROY_SESSION. 9508 * The caller must serialize access to this routine. 9509 */ 9510 int nfs4_proc_destroy_session(struct nfs4_session *session, 9511 const struct cred *cred) 9512 { 9513 struct rpc_message msg = { 9514 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 9515 .rpc_argp = session, 9516 .rpc_cred = cred, 9517 }; 9518 int status = 0; 9519 9520 /* session is still being setup */ 9521 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state)) 9522 return 0; 9523 9524 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 9525 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9526 trace_nfs4_destroy_session(session->clp, status); 9527 9528 if (status) 9529 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 9530 "Session has been destroyed regardless...\n", status); 9531 rpc_clnt_manage_trunked_xprts(session->clp->cl_rpcclient); 9532 return status; 9533 } 9534 9535 /* 9536 * Renew the cl_session lease. 9537 */ 9538 struct nfs4_sequence_data { 9539 struct nfs_client *clp; 9540 struct nfs4_sequence_args args; 9541 struct nfs4_sequence_res res; 9542 }; 9543 9544 static void nfs41_sequence_release(void *data) 9545 { 9546 struct nfs4_sequence_data *calldata = data; 9547 struct nfs_client *clp = calldata->clp; 9548 9549 if (refcount_read(&clp->cl_count) > 1) 9550 nfs4_schedule_state_renewal(clp); 9551 nfs_put_client(clp); 9552 kfree(calldata); 9553 } 9554 9555 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 9556 { 9557 switch(task->tk_status) { 9558 case -NFS4ERR_DELAY: 9559 rpc_delay(task, NFS4_POLL_RETRY_MAX); 9560 return -EAGAIN; 9561 default: 9562 nfs4_schedule_lease_recovery(clp); 9563 } 9564 return 0; 9565 } 9566 9567 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 9568 { 9569 struct nfs4_sequence_data *calldata = data; 9570 struct nfs_client *clp = calldata->clp; 9571 9572 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 9573 return; 9574 9575 trace_nfs4_sequence(clp, task->tk_status); 9576 if (task->tk_status < 0 && !task->tk_client->cl_shutdown) { 9577 dprintk("%s ERROR %d\n", __func__, task->tk_status); 9578 if (refcount_read(&clp->cl_count) == 1) 9579 return; 9580 9581 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 9582 rpc_restart_call_prepare(task); 9583 return; 9584 } 9585 } 9586 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 9587 } 9588 9589 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 9590 { 9591 struct nfs4_sequence_data *calldata = data; 9592 struct nfs_client *clp = calldata->clp; 9593 struct nfs4_sequence_args *args; 9594 struct nfs4_sequence_res *res; 9595 9596 args = task->tk_msg.rpc_argp; 9597 res = task->tk_msg.rpc_resp; 9598 9599 nfs4_setup_sequence(clp, args, res, task); 9600 } 9601 9602 static const struct rpc_call_ops nfs41_sequence_ops = { 9603 .rpc_call_done = nfs41_sequence_call_done, 9604 .rpc_call_prepare = nfs41_sequence_prepare, 9605 .rpc_release = nfs41_sequence_release, 9606 }; 9607 9608 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 9609 const struct cred *cred, 9610 struct nfs4_slot *slot, 9611 bool is_privileged) 9612 { 9613 struct nfs4_sequence_data *calldata; 9614 struct rpc_message msg = { 9615 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 9616 .rpc_cred = cred, 9617 }; 9618 struct rpc_task_setup task_setup_data = { 9619 .rpc_client = clp->cl_rpcclient, 9620 .rpc_message = &msg, 9621 .callback_ops = &nfs41_sequence_ops, 9622 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE, 9623 }; 9624 struct rpc_task *ret; 9625 9626 ret = ERR_PTR(-EIO); 9627 if (!refcount_inc_not_zero(&clp->cl_count)) 9628 goto out_err; 9629 9630 ret = ERR_PTR(-ENOMEM); 9631 calldata = kzalloc(sizeof(*calldata), GFP_KERNEL); 9632 if (calldata == NULL) 9633 goto out_put_clp; 9634 nfs4_init_sequence(&calldata->args, &calldata->res, 0, is_privileged); 9635 nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot); 9636 msg.rpc_argp = &calldata->args; 9637 msg.rpc_resp = &calldata->res; 9638 calldata->clp = clp; 9639 task_setup_data.callback_data = calldata; 9640 9641 ret = rpc_run_task(&task_setup_data); 9642 if (IS_ERR(ret)) 9643 goto out_err; 9644 return ret; 9645 out_put_clp: 9646 nfs_put_client(clp); 9647 out_err: 9648 nfs41_release_slot(slot); 9649 return ret; 9650 } 9651 9652 static int nfs41_proc_async_sequence(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags) 9653 { 9654 struct rpc_task *task; 9655 int ret = 0; 9656 9657 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 9658 return -EAGAIN; 9659 task = _nfs41_proc_sequence(clp, cred, NULL, false); 9660 if (IS_ERR(task)) 9661 ret = PTR_ERR(task); 9662 else 9663 rpc_put_task_async(task); 9664 dprintk("<-- %s status=%d\n", __func__, ret); 9665 return ret; 9666 } 9667 9668 static int nfs4_proc_sequence(struct nfs_client *clp, const struct cred *cred) 9669 { 9670 struct rpc_task *task; 9671 int ret; 9672 9673 task = _nfs41_proc_sequence(clp, cred, NULL, true); 9674 if (IS_ERR(task)) { 9675 ret = PTR_ERR(task); 9676 goto out; 9677 } 9678 ret = rpc_wait_for_completion_task(task); 9679 if (!ret) 9680 ret = task->tk_status; 9681 rpc_put_task(task); 9682 out: 9683 dprintk("<-- %s status=%d\n", __func__, ret); 9684 return ret; 9685 } 9686 9687 struct nfs4_reclaim_complete_data { 9688 struct nfs_client *clp; 9689 struct nfs41_reclaim_complete_args arg; 9690 struct nfs41_reclaim_complete_res res; 9691 }; 9692 9693 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 9694 { 9695 struct nfs4_reclaim_complete_data *calldata = data; 9696 9697 nfs4_setup_sequence(calldata->clp, 9698 &calldata->arg.seq_args, 9699 &calldata->res.seq_res, 9700 task); 9701 } 9702 9703 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 9704 { 9705 switch(task->tk_status) { 9706 case 0: 9707 wake_up_all(&clp->cl_lock_waitq); 9708 fallthrough; 9709 case -NFS4ERR_COMPLETE_ALREADY: 9710 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 9711 break; 9712 case -NFS4ERR_DELAY: 9713 rpc_delay(task, NFS4_POLL_RETRY_MAX); 9714 fallthrough; 9715 case -NFS4ERR_RETRY_UNCACHED_REP: 9716 case -EACCES: 9717 dprintk("%s: failed to reclaim complete error %d for server %s, retrying\n", 9718 __func__, task->tk_status, clp->cl_hostname); 9719 return -EAGAIN; 9720 case -NFS4ERR_BADSESSION: 9721 case -NFS4ERR_DEADSESSION: 9722 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 9723 break; 9724 default: 9725 nfs4_schedule_lease_recovery(clp); 9726 } 9727 return 0; 9728 } 9729 9730 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 9731 { 9732 struct nfs4_reclaim_complete_data *calldata = data; 9733 struct nfs_client *clp = calldata->clp; 9734 struct nfs4_sequence_res *res = &calldata->res.seq_res; 9735 9736 if (!nfs41_sequence_done(task, res)) 9737 return; 9738 9739 trace_nfs4_reclaim_complete(clp, task->tk_status); 9740 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 9741 rpc_restart_call_prepare(task); 9742 return; 9743 } 9744 } 9745 9746 static void nfs4_free_reclaim_complete_data(void *data) 9747 { 9748 struct nfs4_reclaim_complete_data *calldata = data; 9749 9750 kfree(calldata); 9751 } 9752 9753 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 9754 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 9755 .rpc_call_done = nfs4_reclaim_complete_done, 9756 .rpc_release = nfs4_free_reclaim_complete_data, 9757 }; 9758 9759 /* 9760 * Issue a global reclaim complete. 9761 */ 9762 static int nfs41_proc_reclaim_complete(struct nfs_client *clp, 9763 const struct cred *cred) 9764 { 9765 struct nfs4_reclaim_complete_data *calldata; 9766 struct rpc_message msg = { 9767 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 9768 .rpc_cred = cred, 9769 }; 9770 struct rpc_task_setup task_setup_data = { 9771 .rpc_client = clp->cl_rpcclient, 9772 .rpc_message = &msg, 9773 .callback_ops = &nfs4_reclaim_complete_call_ops, 9774 .flags = RPC_TASK_NO_ROUND_ROBIN, 9775 }; 9776 int status = -ENOMEM; 9777 9778 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 9779 if (calldata == NULL) 9780 goto out; 9781 calldata->clp = clp; 9782 calldata->arg.one_fs = 0; 9783 9784 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0, 1); 9785 msg.rpc_argp = &calldata->arg; 9786 msg.rpc_resp = &calldata->res; 9787 task_setup_data.callback_data = calldata; 9788 status = nfs4_call_sync_custom(&task_setup_data); 9789 out: 9790 dprintk("<-- %s status=%d\n", __func__, status); 9791 return status; 9792 } 9793 9794 static void 9795 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 9796 { 9797 struct nfs4_layoutget *lgp = calldata; 9798 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 9799 9800 nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args, 9801 &lgp->res.seq_res, task); 9802 } 9803 9804 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 9805 { 9806 struct nfs4_layoutget *lgp = calldata; 9807 9808 nfs41_sequence_process(task, &lgp->res.seq_res); 9809 } 9810 9811 static int 9812 nfs4_layoutget_handle_exception(struct rpc_task *task, 9813 struct nfs4_layoutget *lgp, struct nfs4_exception *exception) 9814 { 9815 struct inode *inode = lgp->args.inode; 9816 struct nfs_server *server = NFS_SERVER(inode); 9817 struct pnfs_layout_hdr *lo = lgp->lo; 9818 int nfs4err = task->tk_status; 9819 int err, status = 0; 9820 LIST_HEAD(head); 9821 9822 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); 9823 9824 nfs4_sequence_free_slot(&lgp->res.seq_res); 9825 9826 exception->state = NULL; 9827 exception->stateid = NULL; 9828 9829 switch (nfs4err) { 9830 case 0: 9831 goto out; 9832 9833 /* 9834 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs 9835 * on the file. set tk_status to -ENODATA to tell upper layer to 9836 * retry go inband. 9837 */ 9838 case -NFS4ERR_LAYOUTUNAVAILABLE: 9839 status = -ENODATA; 9840 goto out; 9841 /* 9842 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of 9843 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3). 9844 */ 9845 case -NFS4ERR_BADLAYOUT: 9846 status = -EOVERFLOW; 9847 goto out; 9848 /* 9849 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client 9850 * (or clients) writing to the same RAID stripe except when 9851 * the minlength argument is 0 (see RFC5661 section 18.43.3). 9852 * 9853 * Treat it like we would RECALLCONFLICT -- we retry for a little 9854 * while, and then eventually give up. 9855 */ 9856 case -NFS4ERR_LAYOUTTRYLATER: 9857 if (lgp->args.minlength == 0) { 9858 status = -EOVERFLOW; 9859 goto out; 9860 } 9861 status = -EBUSY; 9862 break; 9863 case -NFS4ERR_RECALLCONFLICT: 9864 case -NFS4ERR_RETURNCONFLICT: 9865 status = -ERECALLCONFLICT; 9866 break; 9867 case -NFS4ERR_DELEG_REVOKED: 9868 case -NFS4ERR_ADMIN_REVOKED: 9869 case -NFS4ERR_EXPIRED: 9870 case -NFS4ERR_BAD_STATEID: 9871 exception->timeout = 0; 9872 spin_lock(&inode->i_lock); 9873 /* If the open stateid was bad, then recover it. */ 9874 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) || 9875 !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) { 9876 spin_unlock(&inode->i_lock); 9877 exception->state = lgp->args.ctx->state; 9878 exception->stateid = &lgp->args.stateid; 9879 break; 9880 } 9881 9882 /* 9883 * Mark the bad layout state as invalid, then retry 9884 */ 9885 pnfs_mark_layout_stateid_invalid(lo, &head); 9886 spin_unlock(&inode->i_lock); 9887 nfs_commit_inode(inode, 0); 9888 pnfs_free_lseg_list(&head); 9889 status = -EAGAIN; 9890 goto out; 9891 } 9892 9893 err = nfs4_handle_exception(server, nfs4err, exception); 9894 if (!status) { 9895 if (exception->retry) 9896 status = -EAGAIN; 9897 else 9898 status = err; 9899 } 9900 out: 9901 return status; 9902 } 9903 9904 size_t max_response_pages(struct nfs_server *server) 9905 { 9906 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 9907 return nfs_page_array_len(0, max_resp_sz); 9908 } 9909 9910 static void nfs4_layoutget_release(void *calldata) 9911 { 9912 struct nfs4_layoutget *lgp = calldata; 9913 9914 nfs4_sequence_free_slot(&lgp->res.seq_res); 9915 pnfs_layoutget_free(lgp); 9916 } 9917 9918 static const struct rpc_call_ops nfs4_layoutget_call_ops = { 9919 .rpc_call_prepare = nfs4_layoutget_prepare, 9920 .rpc_call_done = nfs4_layoutget_done, 9921 .rpc_release = nfs4_layoutget_release, 9922 }; 9923 9924 struct pnfs_layout_segment * 9925 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, 9926 struct nfs4_exception *exception) 9927 { 9928 struct inode *inode = lgp->args.inode; 9929 struct nfs_server *server = NFS_SERVER(inode); 9930 struct rpc_task *task; 9931 struct rpc_message msg = { 9932 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 9933 .rpc_argp = &lgp->args, 9934 .rpc_resp = &lgp->res, 9935 .rpc_cred = lgp->cred, 9936 }; 9937 struct rpc_task_setup task_setup_data = { 9938 .rpc_client = server->client, 9939 .rpc_message = &msg, 9940 .callback_ops = &nfs4_layoutget_call_ops, 9941 .callback_data = lgp, 9942 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF | 9943 RPC_TASK_MOVEABLE, 9944 }; 9945 struct pnfs_layout_segment *lseg = NULL; 9946 int status = 0; 9947 9948 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0); 9949 exception->retry = 0; 9950 9951 task = rpc_run_task(&task_setup_data); 9952 if (IS_ERR(task)) 9953 return ERR_CAST(task); 9954 9955 status = rpc_wait_for_completion_task(task); 9956 if (status != 0) 9957 goto out; 9958 9959 if (task->tk_status < 0) { 9960 exception->retry = 1; 9961 status = nfs4_layoutget_handle_exception(task, lgp, exception); 9962 } else if (lgp->res.layoutp->len == 0) { 9963 exception->retry = 1; 9964 status = -EAGAIN; 9965 nfs4_update_delay(&exception->timeout); 9966 } else 9967 lseg = pnfs_layout_process(lgp); 9968 out: 9969 trace_nfs4_layoutget(lgp->args.ctx, 9970 &lgp->args.range, 9971 &lgp->res.range, 9972 &lgp->res.stateid, 9973 status); 9974 9975 rpc_put_task(task); 9976 dprintk("<-- %s status=%d\n", __func__, status); 9977 if (status) 9978 return ERR_PTR(status); 9979 return lseg; 9980 } 9981 9982 static void 9983 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 9984 { 9985 struct nfs4_layoutreturn *lrp = calldata; 9986 9987 nfs4_setup_sequence(lrp->clp, 9988 &lrp->args.seq_args, 9989 &lrp->res.seq_res, 9990 task); 9991 if (!pnfs_layout_is_valid(lrp->args.layout)) 9992 rpc_exit(task, 0); 9993 } 9994 9995 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 9996 { 9997 struct nfs4_layoutreturn *lrp = calldata; 9998 struct nfs_server *server; 9999 10000 if (!nfs41_sequence_process(task, &lrp->res.seq_res)) 10001 return; 10002 10003 if (task->tk_rpc_status == -ETIMEDOUT) { 10004 lrp->rpc_status = -EAGAIN; 10005 lrp->res.lrs_present = 0; 10006 return; 10007 } 10008 /* 10009 * Was there an RPC level error? Assume the call succeeded, 10010 * and that we need to release the layout 10011 */ 10012 if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) { 10013 lrp->res.lrs_present = 0; 10014 return; 10015 } 10016 10017 server = NFS_SERVER(lrp->args.inode); 10018 switch (task->tk_status) { 10019 case -NFS4ERR_OLD_STATEID: 10020 if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid, 10021 &lrp->args.range, 10022 lrp->args.inode)) 10023 goto out_restart; 10024 fallthrough; 10025 default: 10026 task->tk_status = 0; 10027 lrp->res.lrs_present = 0; 10028 fallthrough; 10029 case 0: 10030 break; 10031 case -NFS4ERR_BADSESSION: 10032 case -NFS4ERR_DEADSESSION: 10033 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 10034 nfs4_schedule_session_recovery(server->nfs_client->cl_session, 10035 task->tk_status); 10036 lrp->res.lrs_present = 0; 10037 lrp->rpc_status = -EAGAIN; 10038 task->tk_status = 0; 10039 break; 10040 case -NFS4ERR_DELAY: 10041 if (nfs4_async_handle_error(task, server, NULL, NULL) == 10042 -EAGAIN) 10043 goto out_restart; 10044 lrp->res.lrs_present = 0; 10045 break; 10046 } 10047 return; 10048 out_restart: 10049 task->tk_status = 0; 10050 nfs4_sequence_free_slot(&lrp->res.seq_res); 10051 rpc_restart_call_prepare(task); 10052 } 10053 10054 static void nfs4_layoutreturn_release(void *calldata) 10055 { 10056 struct nfs4_layoutreturn *lrp = calldata; 10057 struct pnfs_layout_hdr *lo = lrp->args.layout; 10058 10059 if (lrp->rpc_status == 0 || !lrp->inode) 10060 pnfs_layoutreturn_free_lsegs( 10061 lo, &lrp->args.stateid, &lrp->args.range, 10062 lrp->res.lrs_present ? &lrp->res.stateid : NULL); 10063 else 10064 pnfs_layoutreturn_retry_later(lo, &lrp->args.stateid, 10065 &lrp->args.range); 10066 nfs4_sequence_free_slot(&lrp->res.seq_res); 10067 if (lrp->ld_private.ops && lrp->ld_private.ops->free) 10068 lrp->ld_private.ops->free(&lrp->ld_private); 10069 pnfs_put_layout_hdr(lrp->args.layout); 10070 nfs_iput_and_deactive(lrp->inode); 10071 put_cred(lrp->cred); 10072 kfree(calldata); 10073 } 10074 10075 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 10076 .rpc_call_prepare = nfs4_layoutreturn_prepare, 10077 .rpc_call_done = nfs4_layoutreturn_done, 10078 .rpc_release = nfs4_layoutreturn_release, 10079 }; 10080 10081 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, unsigned int flags) 10082 { 10083 struct rpc_task *task; 10084 struct rpc_message msg = { 10085 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 10086 .rpc_argp = &lrp->args, 10087 .rpc_resp = &lrp->res, 10088 .rpc_cred = lrp->cred, 10089 }; 10090 struct rpc_task_setup task_setup_data = { 10091 .rpc_client = NFS_SERVER(lrp->args.inode)->client, 10092 .rpc_message = &msg, 10093 .callback_ops = &nfs4_layoutreturn_call_ops, 10094 .callback_data = lrp, 10095 .flags = RPC_TASK_MOVEABLE, 10096 }; 10097 int status = 0; 10098 10099 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client, 10100 NFS_SP4_MACH_CRED_PNFS_CLEANUP, 10101 &task_setup_data.rpc_client, &msg); 10102 10103 lrp->inode = nfs_igrab_and_active(lrp->args.inode); 10104 if (flags & PNFS_FL_LAYOUTRETURN_ASYNC) { 10105 if (!lrp->inode) { 10106 nfs4_layoutreturn_release(lrp); 10107 return -EAGAIN; 10108 } 10109 task_setup_data.flags |= RPC_TASK_ASYNC; 10110 } 10111 if (!lrp->inode) 10112 flags |= PNFS_FL_LAYOUTRETURN_PRIVILEGED; 10113 if (flags & PNFS_FL_LAYOUTRETURN_PRIVILEGED) 10114 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 10115 1); 10116 else 10117 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 10118 0); 10119 task = rpc_run_task(&task_setup_data); 10120 if (IS_ERR(task)) 10121 return PTR_ERR(task); 10122 if (!(flags & PNFS_FL_LAYOUTRETURN_ASYNC)) 10123 status = task->tk_status; 10124 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status); 10125 dprintk("<-- %s status=%d\n", __func__, status); 10126 rpc_put_task(task); 10127 return status; 10128 } 10129 10130 static int 10131 _nfs4_proc_getdeviceinfo(struct nfs_server *server, 10132 struct pnfs_device *pdev, 10133 const struct cred *cred) 10134 { 10135 struct nfs4_getdeviceinfo_args args = { 10136 .pdev = pdev, 10137 .notify_types = NOTIFY_DEVICEID4_CHANGE | 10138 NOTIFY_DEVICEID4_DELETE, 10139 }; 10140 struct nfs4_getdeviceinfo_res res = { 10141 .pdev = pdev, 10142 }; 10143 struct rpc_message msg = { 10144 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 10145 .rpc_argp = &args, 10146 .rpc_resp = &res, 10147 .rpc_cred = cred, 10148 }; 10149 int status; 10150 10151 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 10152 if (res.notification & ~args.notify_types) 10153 dprintk("%s: unsupported notification\n", __func__); 10154 if (res.notification != args.notify_types) 10155 pdev->nocache = 1; 10156 10157 trace_nfs4_getdeviceinfo(server, &pdev->dev_id, status); 10158 10159 dprintk("<-- %s status=%d\n", __func__, status); 10160 10161 return status; 10162 } 10163 10164 int nfs4_proc_getdeviceinfo(struct nfs_server *server, 10165 struct pnfs_device *pdev, 10166 const struct cred *cred) 10167 { 10168 struct nfs4_exception exception = { }; 10169 int err; 10170 10171 do { 10172 err = nfs4_handle_exception(server, 10173 _nfs4_proc_getdeviceinfo(server, pdev, cred), 10174 &exception); 10175 } while (exception.retry); 10176 return err; 10177 } 10178 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 10179 10180 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 10181 { 10182 struct nfs4_layoutcommit_data *data = calldata; 10183 struct nfs_server *server = NFS_SERVER(data->args.inode); 10184 10185 nfs4_setup_sequence(server->nfs_client, 10186 &data->args.seq_args, 10187 &data->res.seq_res, 10188 task); 10189 } 10190 10191 static void 10192 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 10193 { 10194 struct nfs4_layoutcommit_data *data = calldata; 10195 struct nfs_server *server = NFS_SERVER(data->args.inode); 10196 10197 if (!nfs41_sequence_done(task, &data->res.seq_res)) 10198 return; 10199 10200 switch (task->tk_status) { /* Just ignore these failures */ 10201 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 10202 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 10203 case -NFS4ERR_BADLAYOUT: /* no layout */ 10204 case -NFS4ERR_GRACE: /* loca_recalim always false */ 10205 task->tk_status = 0; 10206 break; 10207 case 0: 10208 break; 10209 default: 10210 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) { 10211 rpc_restart_call_prepare(task); 10212 return; 10213 } 10214 } 10215 } 10216 10217 static void nfs4_layoutcommit_release(void *calldata) 10218 { 10219 struct nfs4_layoutcommit_data *data = calldata; 10220 10221 pnfs_cleanup_layoutcommit(data); 10222 nfs_post_op_update_inode_force_wcc(data->args.inode, 10223 data->res.fattr); 10224 put_cred(data->cred); 10225 nfs_iput_and_deactive(data->inode); 10226 kfree(data); 10227 } 10228 10229 static const struct rpc_call_ops nfs4_layoutcommit_ops = { 10230 .rpc_call_prepare = nfs4_layoutcommit_prepare, 10231 .rpc_call_done = nfs4_layoutcommit_done, 10232 .rpc_release = nfs4_layoutcommit_release, 10233 }; 10234 10235 int 10236 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 10237 { 10238 struct rpc_message msg = { 10239 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 10240 .rpc_argp = &data->args, 10241 .rpc_resp = &data->res, 10242 .rpc_cred = data->cred, 10243 }; 10244 struct rpc_task_setup task_setup_data = { 10245 .task = &data->task, 10246 .rpc_client = NFS_CLIENT(data->args.inode), 10247 .rpc_message = &msg, 10248 .callback_ops = &nfs4_layoutcommit_ops, 10249 .callback_data = data, 10250 .flags = RPC_TASK_MOVEABLE, 10251 }; 10252 struct rpc_task *task; 10253 int status = 0; 10254 10255 dprintk("NFS: initiating layoutcommit call. sync %d " 10256 "lbw: %llu inode %lu\n", sync, 10257 data->args.lastbytewritten, 10258 data->args.inode->i_ino); 10259 10260 if (!sync) { 10261 data->inode = nfs_igrab_and_active(data->args.inode); 10262 if (data->inode == NULL) { 10263 nfs4_layoutcommit_release(data); 10264 return -EAGAIN; 10265 } 10266 task_setup_data.flags = RPC_TASK_ASYNC; 10267 } 10268 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0); 10269 task = rpc_run_task(&task_setup_data); 10270 if (IS_ERR(task)) 10271 return PTR_ERR(task); 10272 if (sync) 10273 status = task->tk_status; 10274 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status); 10275 dprintk("%s: status %d\n", __func__, status); 10276 rpc_put_task(task); 10277 return status; 10278 } 10279 10280 /* 10281 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if 10282 * possible) as per RFC3530bis and RFC5661 Security Considerations sections 10283 */ 10284 static int 10285 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 10286 struct nfs_fsinfo *info, 10287 struct nfs4_secinfo_flavors *flavors, bool use_integrity) 10288 { 10289 struct nfs41_secinfo_no_name_args args = { 10290 .style = SECINFO_STYLE_CURRENT_FH, 10291 }; 10292 struct nfs4_secinfo_res res = { 10293 .flavors = flavors, 10294 }; 10295 struct rpc_message msg = { 10296 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 10297 .rpc_argp = &args, 10298 .rpc_resp = &res, 10299 }; 10300 struct nfs4_call_sync_data data = { 10301 .seq_server = server, 10302 .seq_args = &args.seq_args, 10303 .seq_res = &res.seq_res, 10304 }; 10305 struct rpc_task_setup task_setup = { 10306 .rpc_client = server->client, 10307 .rpc_message = &msg, 10308 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops, 10309 .callback_data = &data, 10310 .flags = RPC_TASK_NO_ROUND_ROBIN, 10311 }; 10312 const struct cred *cred = NULL; 10313 int status; 10314 10315 if (use_integrity) { 10316 task_setup.rpc_client = server->nfs_client->cl_rpcclient; 10317 10318 cred = nfs4_get_clid_cred(server->nfs_client); 10319 msg.rpc_cred = cred; 10320 } 10321 10322 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 10323 status = nfs4_call_sync_custom(&task_setup); 10324 dprintk("<-- %s status=%d\n", __func__, status); 10325 10326 put_cred(cred); 10327 10328 return status; 10329 } 10330 10331 static int 10332 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 10333 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 10334 { 10335 struct nfs4_exception exception = { 10336 .interruptible = true, 10337 }; 10338 int err; 10339 do { 10340 /* first try using integrity protection */ 10341 err = -NFS4ERR_WRONGSEC; 10342 10343 /* try to use integrity protection with machine cred */ 10344 if (_nfs4_is_integrity_protected(server->nfs_client)) 10345 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 10346 flavors, true); 10347 10348 /* 10349 * if unable to use integrity protection, or SECINFO with 10350 * integrity protection returns NFS4ERR_WRONGSEC (which is 10351 * disallowed by spec, but exists in deployed servers) use 10352 * the current filesystem's rpc_client and the user cred. 10353 */ 10354 if (err == -NFS4ERR_WRONGSEC) 10355 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 10356 flavors, false); 10357 10358 switch (err) { 10359 case 0: 10360 case -NFS4ERR_WRONGSEC: 10361 case -ENOTSUPP: 10362 goto out; 10363 default: 10364 err = nfs4_handle_exception(server, err, &exception); 10365 } 10366 } while (exception.retry); 10367 out: 10368 return err; 10369 } 10370 10371 static int 10372 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 10373 struct nfs_fsinfo *info) 10374 { 10375 int err; 10376 struct page *page; 10377 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR; 10378 struct nfs4_secinfo_flavors *flavors; 10379 struct nfs4_secinfo4 *secinfo; 10380 int i; 10381 10382 page = alloc_page(GFP_KERNEL); 10383 if (!page) { 10384 err = -ENOMEM; 10385 goto out; 10386 } 10387 10388 flavors = page_address(page); 10389 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 10390 10391 /* 10392 * Fall back on "guess and check" method if 10393 * the server doesn't support SECINFO_NO_NAME 10394 */ 10395 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) { 10396 err = nfs4_find_root_sec(server, fhandle, info); 10397 goto out_freepage; 10398 } 10399 if (err) 10400 goto out_freepage; 10401 10402 for (i = 0; i < flavors->num_flavors; i++) { 10403 secinfo = &flavors->flavors[i]; 10404 10405 switch (secinfo->flavor) { 10406 case RPC_AUTH_NULL: 10407 case RPC_AUTH_UNIX: 10408 case RPC_AUTH_GSS: 10409 flavor = rpcauth_get_pseudoflavor(secinfo->flavor, 10410 &secinfo->flavor_info); 10411 break; 10412 default: 10413 flavor = RPC_AUTH_MAXFLAVOR; 10414 break; 10415 } 10416 10417 if (!nfs_auth_info_match(&server->auth_info, flavor)) 10418 flavor = RPC_AUTH_MAXFLAVOR; 10419 10420 if (flavor != RPC_AUTH_MAXFLAVOR) { 10421 err = nfs4_lookup_root_sec(server, fhandle, 10422 info, flavor); 10423 if (!err) 10424 break; 10425 } 10426 } 10427 10428 if (flavor == RPC_AUTH_MAXFLAVOR) 10429 err = -EPERM; 10430 10431 out_freepage: 10432 put_page(page); 10433 if (err == -EACCES) 10434 return -EPERM; 10435 out: 10436 return err; 10437 } 10438 10439 static int _nfs41_test_stateid(struct nfs_server *server, 10440 const nfs4_stateid *stateid, 10441 const struct cred *cred) 10442 { 10443 int status; 10444 struct nfs41_test_stateid_args args = { 10445 .stateid = *stateid, 10446 }; 10447 struct nfs41_test_stateid_res res; 10448 struct rpc_message msg = { 10449 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 10450 .rpc_argp = &args, 10451 .rpc_resp = &res, 10452 .rpc_cred = cred, 10453 }; 10454 struct rpc_clnt *rpc_client = server->client; 10455 10456 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 10457 &rpc_client, &msg); 10458 10459 dprintk("NFS call test_stateid %p\n", stateid); 10460 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 10461 status = nfs4_call_sync_sequence(rpc_client, server, &msg, 10462 &args.seq_args, &res.seq_res); 10463 if (status != NFS_OK) { 10464 dprintk("NFS reply test_stateid: failed, %d\n", status); 10465 return status; 10466 } 10467 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 10468 return -res.status; 10469 } 10470 10471 static void nfs4_handle_delay_or_session_error(struct nfs_server *server, 10472 int err, struct nfs4_exception *exception) 10473 { 10474 exception->retry = 0; 10475 switch(err) { 10476 case -NFS4ERR_DELAY: 10477 case -NFS4ERR_RETRY_UNCACHED_REP: 10478 nfs4_handle_exception(server, err, exception); 10479 break; 10480 case -NFS4ERR_BADSESSION: 10481 case -NFS4ERR_BADSLOT: 10482 case -NFS4ERR_BAD_HIGH_SLOT: 10483 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 10484 case -NFS4ERR_DEADSESSION: 10485 nfs4_do_handle_exception(server, err, exception); 10486 } 10487 } 10488 10489 /** 10490 * nfs41_test_stateid - perform a TEST_STATEID operation 10491 * 10492 * @server: server / transport on which to perform the operation 10493 * @stateid: state ID to test 10494 * @cred: credential 10495 * 10496 * Returns NFS_OK if the server recognizes that "stateid" is valid. 10497 * Otherwise a negative NFS4ERR value is returned if the operation 10498 * failed or the state ID is not currently valid. 10499 */ 10500 static int nfs41_test_stateid(struct nfs_server *server, 10501 const nfs4_stateid *stateid, 10502 const struct cred *cred) 10503 { 10504 struct nfs4_exception exception = { 10505 .interruptible = true, 10506 }; 10507 int err; 10508 do { 10509 err = _nfs41_test_stateid(server, stateid, cred); 10510 nfs4_handle_delay_or_session_error(server, err, &exception); 10511 } while (exception.retry); 10512 return err; 10513 } 10514 10515 struct nfs_free_stateid_data { 10516 struct nfs_server *server; 10517 struct nfs41_free_stateid_args args; 10518 struct nfs41_free_stateid_res res; 10519 }; 10520 10521 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) 10522 { 10523 struct nfs_free_stateid_data *data = calldata; 10524 nfs4_setup_sequence(data->server->nfs_client, 10525 &data->args.seq_args, 10526 &data->res.seq_res, 10527 task); 10528 } 10529 10530 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata) 10531 { 10532 struct nfs_free_stateid_data *data = calldata; 10533 10534 nfs41_sequence_done(task, &data->res.seq_res); 10535 10536 switch (task->tk_status) { 10537 case -NFS4ERR_DELAY: 10538 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN) 10539 rpc_restart_call_prepare(task); 10540 } 10541 } 10542 10543 static void nfs41_free_stateid_release(void *calldata) 10544 { 10545 struct nfs_free_stateid_data *data = calldata; 10546 struct nfs_client *clp = data->server->nfs_client; 10547 10548 nfs_put_client(clp); 10549 kfree(calldata); 10550 } 10551 10552 static const struct rpc_call_ops nfs41_free_stateid_ops = { 10553 .rpc_call_prepare = nfs41_free_stateid_prepare, 10554 .rpc_call_done = nfs41_free_stateid_done, 10555 .rpc_release = nfs41_free_stateid_release, 10556 }; 10557 10558 /** 10559 * nfs41_free_stateid - perform a FREE_STATEID operation 10560 * 10561 * @server: server / transport on which to perform the operation 10562 * @stateid: state ID to release 10563 * @cred: credential 10564 * @privileged: set to true if this call needs to be privileged 10565 * 10566 * Note: this function is always asynchronous. 10567 */ 10568 static int nfs41_free_stateid(struct nfs_server *server, 10569 const nfs4_stateid *stateid, 10570 const struct cred *cred, 10571 bool privileged) 10572 { 10573 struct rpc_message msg = { 10574 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 10575 .rpc_cred = cred, 10576 }; 10577 struct rpc_task_setup task_setup = { 10578 .rpc_client = server->client, 10579 .rpc_message = &msg, 10580 .callback_ops = &nfs41_free_stateid_ops, 10581 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE, 10582 }; 10583 struct nfs_free_stateid_data *data; 10584 struct rpc_task *task; 10585 struct nfs_client *clp = server->nfs_client; 10586 10587 if (!refcount_inc_not_zero(&clp->cl_count)) 10588 return -EIO; 10589 10590 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 10591 &task_setup.rpc_client, &msg); 10592 10593 dprintk("NFS call free_stateid %p\n", stateid); 10594 data = kmalloc(sizeof(*data), GFP_KERNEL); 10595 if (!data) 10596 return -ENOMEM; 10597 data->server = server; 10598 nfs4_stateid_copy(&data->args.stateid, stateid); 10599 10600 task_setup.callback_data = data; 10601 10602 msg.rpc_argp = &data->args; 10603 msg.rpc_resp = &data->res; 10604 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, privileged); 10605 task = rpc_run_task(&task_setup); 10606 if (IS_ERR(task)) 10607 return PTR_ERR(task); 10608 rpc_put_task(task); 10609 return 0; 10610 } 10611 10612 static void 10613 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) 10614 { 10615 const struct cred *cred = lsp->ls_state->owner->so_cred; 10616 10617 nfs41_free_stateid(server, &lsp->ls_stateid, cred, false); 10618 nfs4_free_lock_state(server, lsp); 10619 } 10620 10621 static bool nfs41_match_stateid(const nfs4_stateid *s1, 10622 const nfs4_stateid *s2) 10623 { 10624 if (s1->type != s2->type) 10625 return false; 10626 10627 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 10628 return false; 10629 10630 if (s1->seqid == s2->seqid) 10631 return true; 10632 10633 return s1->seqid == 0 || s2->seqid == 0; 10634 } 10635 10636 #endif /* CONFIG_NFS_V4_1 */ 10637 10638 static bool nfs4_match_stateid(const nfs4_stateid *s1, 10639 const nfs4_stateid *s2) 10640 { 10641 return nfs4_stateid_match(s1, s2); 10642 } 10643 10644 10645 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 10646 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 10647 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 10648 .recover_open = nfs4_open_reclaim, 10649 .recover_lock = nfs4_lock_reclaim, 10650 .establish_clid = nfs4_init_clientid, 10651 .detect_trunking = nfs40_discover_server_trunking, 10652 }; 10653 10654 #if defined(CONFIG_NFS_V4_1) 10655 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 10656 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 10657 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 10658 .recover_open = nfs4_open_reclaim, 10659 .recover_lock = nfs4_lock_reclaim, 10660 .establish_clid = nfs41_init_clientid, 10661 .reclaim_complete = nfs41_proc_reclaim_complete, 10662 .detect_trunking = nfs41_discover_server_trunking, 10663 }; 10664 #endif /* CONFIG_NFS_V4_1 */ 10665 10666 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 10667 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 10668 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 10669 .recover_open = nfs40_open_expired, 10670 .recover_lock = nfs4_lock_expired, 10671 .establish_clid = nfs4_init_clientid, 10672 }; 10673 10674 #if defined(CONFIG_NFS_V4_1) 10675 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 10676 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 10677 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 10678 .recover_open = nfs41_open_expired, 10679 .recover_lock = nfs41_lock_expired, 10680 .establish_clid = nfs41_init_clientid, 10681 }; 10682 #endif /* CONFIG_NFS_V4_1 */ 10683 10684 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { 10685 .sched_state_renewal = nfs4_proc_async_renew, 10686 .get_state_renewal_cred = nfs4_get_renew_cred, 10687 .renew_lease = nfs4_proc_renew, 10688 }; 10689 10690 #if defined(CONFIG_NFS_V4_1) 10691 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 10692 .sched_state_renewal = nfs41_proc_async_sequence, 10693 .get_state_renewal_cred = nfs4_get_machine_cred, 10694 .renew_lease = nfs4_proc_sequence, 10695 }; 10696 #endif 10697 10698 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = { 10699 .get_locations = _nfs40_proc_get_locations, 10700 .fsid_present = _nfs40_proc_fsid_present, 10701 }; 10702 10703 #if defined(CONFIG_NFS_V4_1) 10704 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = { 10705 .get_locations = _nfs41_proc_get_locations, 10706 .fsid_present = _nfs41_proc_fsid_present, 10707 }; 10708 #endif /* CONFIG_NFS_V4_1 */ 10709 10710 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { 10711 .minor_version = 0, 10712 .init_caps = NFS_CAP_READDIRPLUS 10713 | NFS_CAP_ATOMIC_OPEN 10714 | NFS_CAP_POSIX_LOCK, 10715 .init_client = nfs40_init_client, 10716 .shutdown_client = nfs40_shutdown_client, 10717 .match_stateid = nfs4_match_stateid, 10718 .find_root_sec = nfs4_find_root_sec, 10719 .free_lock_state = nfs4_release_lockowner, 10720 .test_and_free_expired = nfs40_test_and_free_expired_stateid, 10721 .alloc_seqid = nfs_alloc_seqid, 10722 .call_sync_ops = &nfs40_call_sync_ops, 10723 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 10724 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 10725 .state_renewal_ops = &nfs40_state_renewal_ops, 10726 .mig_recovery_ops = &nfs40_mig_recovery_ops, 10727 }; 10728 10729 #if defined(CONFIG_NFS_V4_1) 10730 static struct nfs_seqid * 10731 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2) 10732 { 10733 return NULL; 10734 } 10735 10736 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 10737 .minor_version = 1, 10738 .init_caps = NFS_CAP_READDIRPLUS 10739 | NFS_CAP_ATOMIC_OPEN 10740 | NFS_CAP_POSIX_LOCK 10741 | NFS_CAP_STATEID_NFSV41 10742 | NFS_CAP_ATOMIC_OPEN_V1 10743 | NFS_CAP_LGOPEN 10744 | NFS_CAP_MOVEABLE, 10745 .init_client = nfs41_init_client, 10746 .shutdown_client = nfs41_shutdown_client, 10747 .match_stateid = nfs41_match_stateid, 10748 .find_root_sec = nfs41_find_root_sec, 10749 .free_lock_state = nfs41_free_lock_state, 10750 .test_and_free_expired = nfs41_test_and_free_expired_stateid, 10751 .alloc_seqid = nfs_alloc_no_seqid, 10752 .session_trunk = nfs4_test_session_trunk, 10753 .call_sync_ops = &nfs41_call_sync_ops, 10754 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 10755 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 10756 .state_renewal_ops = &nfs41_state_renewal_ops, 10757 .mig_recovery_ops = &nfs41_mig_recovery_ops, 10758 }; 10759 #endif 10760 10761 #if defined(CONFIG_NFS_V4_2) 10762 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { 10763 .minor_version = 2, 10764 .init_caps = NFS_CAP_READDIRPLUS 10765 | NFS_CAP_ATOMIC_OPEN 10766 | NFS_CAP_POSIX_LOCK 10767 | NFS_CAP_STATEID_NFSV41 10768 | NFS_CAP_ATOMIC_OPEN_V1 10769 | NFS_CAP_LGOPEN 10770 | NFS_CAP_ALLOCATE 10771 | NFS_CAP_COPY 10772 | NFS_CAP_OFFLOAD_CANCEL 10773 | NFS_CAP_COPY_NOTIFY 10774 | NFS_CAP_DEALLOCATE 10775 | NFS_CAP_SEEK 10776 | NFS_CAP_LAYOUTSTATS 10777 | NFS_CAP_CLONE 10778 | NFS_CAP_LAYOUTERROR 10779 | NFS_CAP_READ_PLUS 10780 | NFS_CAP_MOVEABLE, 10781 .init_client = nfs41_init_client, 10782 .shutdown_client = nfs41_shutdown_client, 10783 .match_stateid = nfs41_match_stateid, 10784 .find_root_sec = nfs41_find_root_sec, 10785 .free_lock_state = nfs41_free_lock_state, 10786 .call_sync_ops = &nfs41_call_sync_ops, 10787 .test_and_free_expired = nfs41_test_and_free_expired_stateid, 10788 .alloc_seqid = nfs_alloc_no_seqid, 10789 .session_trunk = nfs4_test_session_trunk, 10790 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 10791 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 10792 .state_renewal_ops = &nfs41_state_renewal_ops, 10793 .mig_recovery_ops = &nfs41_mig_recovery_ops, 10794 }; 10795 #endif 10796 10797 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 10798 [0] = &nfs_v4_0_minor_ops, 10799 #if defined(CONFIG_NFS_V4_1) 10800 [1] = &nfs_v4_1_minor_ops, 10801 #endif 10802 #if defined(CONFIG_NFS_V4_2) 10803 [2] = &nfs_v4_2_minor_ops, 10804 #endif 10805 }; 10806 10807 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) 10808 { 10809 ssize_t error, error2, error3; 10810 size_t left = size; 10811 10812 error = generic_listxattr(dentry, list, left); 10813 if (error < 0) 10814 return error; 10815 if (list) { 10816 list += error; 10817 left -= error; 10818 } 10819 10820 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, left); 10821 if (error2 < 0) 10822 return error2; 10823 10824 if (list) { 10825 list += error2; 10826 left -= error2; 10827 } 10828 10829 error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left); 10830 if (error3 < 0) 10831 return error3; 10832 10833 error += error2 + error3; 10834 if (size && error > size) 10835 return -ERANGE; 10836 return error; 10837 } 10838 10839 static void nfs4_enable_swap(struct inode *inode) 10840 { 10841 /* The state manager thread must always be running. 10842 * It will notice the client is a swapper, and stay put. 10843 */ 10844 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 10845 10846 nfs4_schedule_state_manager(clp); 10847 } 10848 10849 static void nfs4_disable_swap(struct inode *inode) 10850 { 10851 /* The state manager thread will now exit once it is 10852 * woken. 10853 */ 10854 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 10855 10856 set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state); 10857 clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state); 10858 wake_up_var(&clp->cl_state); 10859 } 10860 10861 static const struct inode_operations nfs4_dir_inode_operations = { 10862 .create = nfs_create, 10863 .lookup = nfs_lookup, 10864 .atomic_open = nfs_atomic_open, 10865 .link = nfs_link, 10866 .unlink = nfs_unlink, 10867 .symlink = nfs_symlink, 10868 .mkdir = nfs_mkdir, 10869 .rmdir = nfs_rmdir, 10870 .mknod = nfs_mknod, 10871 .rename = nfs_rename, 10872 .permission = nfs_permission, 10873 .getattr = nfs_getattr, 10874 .setattr = nfs_setattr, 10875 .listxattr = nfs4_listxattr, 10876 }; 10877 10878 static const struct inode_operations nfs4_file_inode_operations = { 10879 .permission = nfs_permission, 10880 .getattr = nfs_getattr, 10881 .setattr = nfs_setattr, 10882 .listxattr = nfs4_listxattr, 10883 }; 10884 10885 const struct nfs_rpc_ops nfs_v4_clientops = { 10886 .version = 4, /* protocol version */ 10887 .dentry_ops = &nfs4_dentry_operations, 10888 .dir_inode_ops = &nfs4_dir_inode_operations, 10889 .file_inode_ops = &nfs4_file_inode_operations, 10890 .file_ops = &nfs4_file_operations, 10891 .getroot = nfs4_proc_get_root, 10892 .submount = nfs4_submount, 10893 .try_get_tree = nfs4_try_get_tree, 10894 .getattr = nfs4_proc_getattr, 10895 .setattr = nfs4_proc_setattr, 10896 .lookup = nfs4_proc_lookup, 10897 .lookupp = nfs4_proc_lookupp, 10898 .access = nfs4_proc_access, 10899 .readlink = nfs4_proc_readlink, 10900 .create = nfs4_proc_create, 10901 .remove = nfs4_proc_remove, 10902 .unlink_setup = nfs4_proc_unlink_setup, 10903 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 10904 .unlink_done = nfs4_proc_unlink_done, 10905 .rename_setup = nfs4_proc_rename_setup, 10906 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 10907 .rename_done = nfs4_proc_rename_done, 10908 .link = nfs4_proc_link, 10909 .symlink = nfs4_proc_symlink, 10910 .mkdir = nfs4_proc_mkdir, 10911 .rmdir = nfs4_proc_rmdir, 10912 .readdir = nfs4_proc_readdir, 10913 .mknod = nfs4_proc_mknod, 10914 .statfs = nfs4_proc_statfs, 10915 .fsinfo = nfs4_proc_fsinfo, 10916 .pathconf = nfs4_proc_pathconf, 10917 .set_capabilities = nfs4_server_capabilities, 10918 .decode_dirent = nfs4_decode_dirent, 10919 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare, 10920 .read_setup = nfs4_proc_read_setup, 10921 .read_done = nfs4_read_done, 10922 .write_setup = nfs4_proc_write_setup, 10923 .write_done = nfs4_write_done, 10924 .commit_setup = nfs4_proc_commit_setup, 10925 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 10926 .commit_done = nfs4_commit_done, 10927 .lock = nfs4_proc_lock, 10928 .clear_acl_cache = nfs4_zap_acl_attr, 10929 .close_context = nfs4_close_context, 10930 .open_context = nfs4_atomic_open, 10931 .have_delegation = nfs4_have_delegation, 10932 .return_delegation = nfs4_inode_return_delegation, 10933 .alloc_client = nfs4_alloc_client, 10934 .init_client = nfs4_init_client, 10935 .free_client = nfs4_free_client, 10936 .create_server = nfs4_create_server, 10937 .clone_server = nfs_clone_server, 10938 .discover_trunking = nfs4_discover_trunking, 10939 .enable_swap = nfs4_enable_swap, 10940 .disable_swap = nfs4_disable_swap, 10941 }; 10942 10943 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 10944 .name = XATTR_NAME_NFSV4_ACL, 10945 .list = nfs4_xattr_list_nfs4_acl, 10946 .get = nfs4_xattr_get_nfs4_acl, 10947 .set = nfs4_xattr_set_nfs4_acl, 10948 }; 10949 10950 #if defined(CONFIG_NFS_V4_1) 10951 static const struct xattr_handler nfs4_xattr_nfs4_dacl_handler = { 10952 .name = XATTR_NAME_NFSV4_DACL, 10953 .list = nfs4_xattr_list_nfs4_dacl, 10954 .get = nfs4_xattr_get_nfs4_dacl, 10955 .set = nfs4_xattr_set_nfs4_dacl, 10956 }; 10957 10958 static const struct xattr_handler nfs4_xattr_nfs4_sacl_handler = { 10959 .name = XATTR_NAME_NFSV4_SACL, 10960 .list = nfs4_xattr_list_nfs4_sacl, 10961 .get = nfs4_xattr_get_nfs4_sacl, 10962 .set = nfs4_xattr_set_nfs4_sacl, 10963 }; 10964 #endif 10965 10966 #ifdef CONFIG_NFS_V4_2 10967 static const struct xattr_handler nfs4_xattr_nfs4_user_handler = { 10968 .prefix = XATTR_USER_PREFIX, 10969 .get = nfs4_xattr_get_nfs4_user, 10970 .set = nfs4_xattr_set_nfs4_user, 10971 }; 10972 #endif 10973 10974 const struct xattr_handler * const nfs4_xattr_handlers[] = { 10975 &nfs4_xattr_nfs4_acl_handler, 10976 #if defined(CONFIG_NFS_V4_1) 10977 &nfs4_xattr_nfs4_dacl_handler, 10978 &nfs4_xattr_nfs4_sacl_handler, 10979 #endif 10980 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 10981 &nfs4_xattr_nfs4_label_handler, 10982 #endif 10983 #ifdef CONFIG_NFS_V4_2 10984 &nfs4_xattr_nfs4_user_handler, 10985 #endif 10986 NULL 10987 }; 10988