1 /* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <linux/mm.h> 39 #include <linux/delay.h> 40 #include <linux/errno.h> 41 #include <linux/string.h> 42 #include <linux/ratelimit.h> 43 #include <linux/printk.h> 44 #include <linux/slab.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/nfs.h> 47 #include <linux/nfs4.h> 48 #include <linux/nfs_fs.h> 49 #include <linux/nfs_page.h> 50 #include <linux/nfs_mount.h> 51 #include <linux/namei.h> 52 #include <linux/mount.h> 53 #include <linux/module.h> 54 #include <linux/xattr.h> 55 #include <linux/utsname.h> 56 #include <linux/freezer.h> 57 #include <linux/iversion.h> 58 59 #include "nfs4_fs.h" 60 #include "delegation.h" 61 #include "internal.h" 62 #include "iostat.h" 63 #include "callback.h" 64 #include "pnfs.h" 65 #include "netns.h" 66 #include "sysfs.h" 67 #include "nfs4idmap.h" 68 #include "nfs4session.h" 69 #include "fscache.h" 70 #include "nfs42.h" 71 72 #include "nfs4trace.h" 73 74 #define NFSDBG_FACILITY NFSDBG_PROC 75 76 #define NFS4_BITMASK_SZ 3 77 78 #define NFS4_POLL_RETRY_MIN (HZ/10) 79 #define NFS4_POLL_RETRY_MAX (15*HZ) 80 81 /* file attributes which can be mapped to nfs attributes */ 82 #define NFS4_VALID_ATTRS (ATTR_MODE \ 83 | ATTR_UID \ 84 | ATTR_GID \ 85 | ATTR_SIZE \ 86 | ATTR_ATIME \ 87 | ATTR_MTIME \ 88 | ATTR_CTIME \ 89 | ATTR_ATIME_SET \ 90 | ATTR_MTIME_SET) 91 92 struct nfs4_opendata; 93 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 94 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 95 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 96 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 97 struct nfs_fattr *fattr, struct inode *inode); 98 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred, 99 struct nfs_fattr *fattr, struct iattr *sattr, 100 struct nfs_open_context *ctx, struct nfs4_label *ilabel); 101 #ifdef CONFIG_NFS_V4_1 102 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 103 const struct cred *cred, 104 struct nfs4_slot *slot, 105 bool is_privileged); 106 static int nfs41_test_stateid(struct nfs_server *, const nfs4_stateid *, 107 const struct cred *); 108 static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *, 109 const struct cred *, bool); 110 #endif 111 112 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 113 static inline struct nfs4_label * 114 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 115 struct iattr *sattr, struct nfs4_label *label) 116 { 117 struct lsm_context shim; 118 int err; 119 120 if (label == NULL) 121 return NULL; 122 123 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0) 124 return NULL; 125 126 label->lfs = 0; 127 label->pi = 0; 128 label->len = 0; 129 label->label = NULL; 130 131 err = security_dentry_init_security(dentry, sattr->ia_mode, 132 &dentry->d_name, NULL, &shim); 133 if (err) 134 return NULL; 135 136 label->lsmid = shim.id; 137 label->label = shim.context; 138 label->len = shim.len; 139 return label; 140 } 141 static inline void 142 nfs4_label_release_security(struct nfs4_label *label) 143 { 144 struct lsm_context shim; 145 146 if (label) { 147 shim.context = label->label; 148 shim.len = label->len; 149 shim.id = label->lsmid; 150 security_release_secctx(&shim); 151 } 152 } 153 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 154 { 155 if (label) 156 return server->attr_bitmask; 157 158 return server->attr_bitmask_nl; 159 } 160 #else 161 static inline struct nfs4_label * 162 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 163 struct iattr *sattr, struct nfs4_label *l) 164 { return NULL; } 165 static inline void 166 nfs4_label_release_security(struct nfs4_label *label) 167 { return; } 168 static inline u32 * 169 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 170 { return server->attr_bitmask; } 171 #endif 172 173 /* Prevent leaks of NFSv4 errors into userland */ 174 static int nfs4_map_errors(int err) 175 { 176 if (err >= -1000) 177 return err; 178 switch (err) { 179 case -NFS4ERR_RESOURCE: 180 case -NFS4ERR_LAYOUTTRYLATER: 181 case -NFS4ERR_RECALLCONFLICT: 182 case -NFS4ERR_RETURNCONFLICT: 183 return -EREMOTEIO; 184 case -NFS4ERR_WRONGSEC: 185 case -NFS4ERR_WRONG_CRED: 186 return -EPERM; 187 case -NFS4ERR_BADOWNER: 188 case -NFS4ERR_BADNAME: 189 return -EINVAL; 190 case -NFS4ERR_SHARE_DENIED: 191 return -EACCES; 192 case -NFS4ERR_MINOR_VERS_MISMATCH: 193 return -EPROTONOSUPPORT; 194 case -NFS4ERR_FILE_OPEN: 195 return -EBUSY; 196 case -NFS4ERR_NOT_SAME: 197 return -ENOTSYNC; 198 default: 199 dprintk("%s could not handle NFSv4 error %d\n", 200 __func__, -err); 201 break; 202 } 203 return -EIO; 204 } 205 206 /* 207 * This is our standard bitmap for GETATTR requests. 208 */ 209 const u32 nfs4_fattr_bitmap[3] = { 210 FATTR4_WORD0_TYPE 211 | FATTR4_WORD0_CHANGE 212 | FATTR4_WORD0_SIZE 213 | FATTR4_WORD0_FSID 214 | FATTR4_WORD0_FILEID, 215 FATTR4_WORD1_MODE 216 | FATTR4_WORD1_NUMLINKS 217 | FATTR4_WORD1_OWNER 218 | FATTR4_WORD1_OWNER_GROUP 219 | FATTR4_WORD1_RAWDEV 220 | FATTR4_WORD1_SPACE_USED 221 | FATTR4_WORD1_TIME_ACCESS 222 | FATTR4_WORD1_TIME_METADATA 223 | FATTR4_WORD1_TIME_MODIFY 224 | FATTR4_WORD1_MOUNTED_ON_FILEID, 225 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 226 FATTR4_WORD2_SECURITY_LABEL 227 #endif 228 }; 229 230 static const u32 nfs4_pnfs_open_bitmap[3] = { 231 FATTR4_WORD0_TYPE 232 | FATTR4_WORD0_CHANGE 233 | FATTR4_WORD0_SIZE 234 | FATTR4_WORD0_FSID 235 | FATTR4_WORD0_FILEID, 236 FATTR4_WORD1_MODE 237 | FATTR4_WORD1_NUMLINKS 238 | FATTR4_WORD1_OWNER 239 | FATTR4_WORD1_OWNER_GROUP 240 | FATTR4_WORD1_RAWDEV 241 | FATTR4_WORD1_SPACE_USED 242 | FATTR4_WORD1_TIME_ACCESS 243 | FATTR4_WORD1_TIME_METADATA 244 | FATTR4_WORD1_TIME_MODIFY, 245 FATTR4_WORD2_MDSTHRESHOLD 246 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 247 | FATTR4_WORD2_SECURITY_LABEL 248 #endif 249 }; 250 251 static const u32 nfs4_open_noattr_bitmap[3] = { 252 FATTR4_WORD0_TYPE 253 | FATTR4_WORD0_FILEID, 254 }; 255 256 const u32 nfs4_statfs_bitmap[3] = { 257 FATTR4_WORD0_FILES_AVAIL 258 | FATTR4_WORD0_FILES_FREE 259 | FATTR4_WORD0_FILES_TOTAL, 260 FATTR4_WORD1_SPACE_AVAIL 261 | FATTR4_WORD1_SPACE_FREE 262 | FATTR4_WORD1_SPACE_TOTAL 263 }; 264 265 const u32 nfs4_pathconf_bitmap[3] = { 266 FATTR4_WORD0_MAXLINK 267 | FATTR4_WORD0_MAXNAME, 268 0 269 }; 270 271 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 272 | FATTR4_WORD0_MAXREAD 273 | FATTR4_WORD0_MAXWRITE 274 | FATTR4_WORD0_LEASE_TIME, 275 FATTR4_WORD1_TIME_DELTA 276 | FATTR4_WORD1_FS_LAYOUT_TYPES, 277 FATTR4_WORD2_LAYOUT_BLKSIZE 278 | FATTR4_WORD2_CLONE_BLKSIZE 279 | FATTR4_WORD2_CHANGE_ATTR_TYPE 280 | FATTR4_WORD2_XATTR_SUPPORT 281 }; 282 283 const u32 nfs4_fs_locations_bitmap[3] = { 284 FATTR4_WORD0_CHANGE 285 | FATTR4_WORD0_SIZE 286 | FATTR4_WORD0_FSID 287 | FATTR4_WORD0_FILEID 288 | FATTR4_WORD0_FS_LOCATIONS, 289 FATTR4_WORD1_OWNER 290 | FATTR4_WORD1_OWNER_GROUP 291 | FATTR4_WORD1_RAWDEV 292 | FATTR4_WORD1_SPACE_USED 293 | FATTR4_WORD1_TIME_ACCESS 294 | FATTR4_WORD1_TIME_METADATA 295 | FATTR4_WORD1_TIME_MODIFY 296 | FATTR4_WORD1_MOUNTED_ON_FILEID, 297 }; 298 299 static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src, 300 struct inode *inode, unsigned long flags) 301 { 302 unsigned long cache_validity; 303 304 memcpy(dst, src, NFS4_BITMASK_SZ*sizeof(*dst)); 305 if (!inode || !nfs_have_read_or_write_delegation(inode)) 306 return; 307 308 cache_validity = READ_ONCE(NFS_I(inode)->cache_validity) | flags; 309 310 /* Remove the attributes over which we have full control */ 311 dst[1] &= ~FATTR4_WORD1_RAWDEV; 312 if (!(cache_validity & NFS_INO_INVALID_SIZE)) 313 dst[0] &= ~FATTR4_WORD0_SIZE; 314 315 if (!(cache_validity & NFS_INO_INVALID_CHANGE)) 316 dst[0] &= ~FATTR4_WORD0_CHANGE; 317 318 if (!(cache_validity & NFS_INO_INVALID_MODE)) 319 dst[1] &= ~FATTR4_WORD1_MODE; 320 if (!(cache_validity & NFS_INO_INVALID_OTHER)) 321 dst[1] &= ~(FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP); 322 323 if (nfs_have_delegated_mtime(inode)) { 324 if (!(cache_validity & NFS_INO_INVALID_ATIME)) 325 dst[1] &= ~FATTR4_WORD1_TIME_ACCESS; 326 if (!(cache_validity & NFS_INO_INVALID_MTIME)) 327 dst[1] &= ~FATTR4_WORD1_TIME_MODIFY; 328 if (!(cache_validity & NFS_INO_INVALID_CTIME)) 329 dst[1] &= ~FATTR4_WORD1_TIME_METADATA; 330 } else if (nfs_have_delegated_atime(inode)) { 331 if (!(cache_validity & NFS_INO_INVALID_ATIME)) 332 dst[1] &= ~FATTR4_WORD1_TIME_ACCESS; 333 } 334 } 335 336 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 337 struct nfs4_readdir_arg *readdir) 338 { 339 unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE; 340 __be32 *start, *p; 341 342 if (cookie > 2) { 343 readdir->cookie = cookie; 344 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 345 return; 346 } 347 348 readdir->cookie = 0; 349 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 350 if (cookie == 2) 351 return; 352 353 /* 354 * NFSv4 servers do not return entries for '.' and '..' 355 * Therefore, we fake these entries here. We let '.' 356 * have cookie 0 and '..' have cookie 1. Note that 357 * when talking to the server, we always send cookie 0 358 * instead of 1 or 2. 359 */ 360 start = p = kmap_atomic(*readdir->pages); 361 362 if (cookie == 0) { 363 *p++ = xdr_one; /* next */ 364 *p++ = xdr_zero; /* cookie, first word */ 365 *p++ = xdr_one; /* cookie, second word */ 366 *p++ = xdr_one; /* entry len */ 367 memcpy(p, ".\0\0\0", 4); /* entry */ 368 p++; 369 *p++ = xdr_one; /* bitmap length */ 370 *p++ = htonl(attrs); /* bitmap */ 371 *p++ = htonl(12); /* attribute buffer length */ 372 *p++ = htonl(NF4DIR); 373 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry))); 374 } 375 376 *p++ = xdr_one; /* next */ 377 *p++ = xdr_zero; /* cookie, first word */ 378 *p++ = xdr_two; /* cookie, second word */ 379 *p++ = xdr_two; /* entry len */ 380 memcpy(p, "..\0\0", 4); /* entry */ 381 p++; 382 *p++ = xdr_one; /* bitmap length */ 383 *p++ = htonl(attrs); /* bitmap */ 384 *p++ = htonl(12); /* attribute buffer length */ 385 *p++ = htonl(NF4DIR); 386 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent))); 387 388 readdir->pgbase = (char *)p - (char *)start; 389 readdir->count -= readdir->pgbase; 390 kunmap_atomic(start); 391 } 392 393 static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version) 394 { 395 if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) { 396 fattr->pre_change_attr = version; 397 fattr->valid |= NFS_ATTR_FATTR_PRECHANGE; 398 } 399 } 400 401 static void nfs4_test_and_free_stateid(struct nfs_server *server, 402 nfs4_stateid *stateid, 403 const struct cred *cred) 404 { 405 const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops; 406 407 ops->test_and_free_expired(server, stateid, cred); 408 } 409 410 static void __nfs4_free_revoked_stateid(struct nfs_server *server, 411 nfs4_stateid *stateid, 412 const struct cred *cred) 413 { 414 stateid->type = NFS4_REVOKED_STATEID_TYPE; 415 nfs4_test_and_free_stateid(server, stateid, cred); 416 } 417 418 static void nfs4_free_revoked_stateid(struct nfs_server *server, 419 const nfs4_stateid *stateid, 420 const struct cred *cred) 421 { 422 nfs4_stateid tmp; 423 424 nfs4_stateid_copy(&tmp, stateid); 425 __nfs4_free_revoked_stateid(server, &tmp, cred); 426 } 427 428 static long nfs4_update_delay(long *timeout) 429 { 430 long ret; 431 if (!timeout) 432 return NFS4_POLL_RETRY_MAX; 433 if (*timeout <= 0) 434 *timeout = NFS4_POLL_RETRY_MIN; 435 if (*timeout > NFS4_POLL_RETRY_MAX) 436 *timeout = NFS4_POLL_RETRY_MAX; 437 ret = *timeout; 438 *timeout <<= 1; 439 return ret; 440 } 441 442 static int nfs4_delay_killable(long *timeout) 443 { 444 might_sleep(); 445 446 __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 447 schedule_timeout(nfs4_update_delay(timeout)); 448 if (!__fatal_signal_pending(current)) 449 return 0; 450 return -EINTR; 451 } 452 453 static int nfs4_delay_interruptible(long *timeout) 454 { 455 might_sleep(); 456 457 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE); 458 schedule_timeout(nfs4_update_delay(timeout)); 459 if (!signal_pending(current)) 460 return 0; 461 return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS; 462 } 463 464 static int nfs4_delay(long *timeout, bool interruptible) 465 { 466 if (interruptible) 467 return nfs4_delay_interruptible(timeout); 468 return nfs4_delay_killable(timeout); 469 } 470 471 static const nfs4_stateid * 472 nfs4_recoverable_stateid(const nfs4_stateid *stateid) 473 { 474 if (!stateid) 475 return NULL; 476 switch (stateid->type) { 477 case NFS4_OPEN_STATEID_TYPE: 478 case NFS4_LOCK_STATEID_TYPE: 479 case NFS4_DELEGATION_STATEID_TYPE: 480 return stateid; 481 default: 482 break; 483 } 484 return NULL; 485 } 486 487 /* This is the error handling routine for processes that are allowed 488 * to sleep. 489 */ 490 static int nfs4_do_handle_exception(struct nfs_server *server, 491 int errorcode, struct nfs4_exception *exception) 492 { 493 struct nfs_client *clp = server->nfs_client; 494 struct nfs4_state *state = exception->state; 495 const nfs4_stateid *stateid; 496 struct inode *inode = exception->inode; 497 int ret = errorcode; 498 499 exception->delay = 0; 500 exception->recovering = 0; 501 exception->retry = 0; 502 503 stateid = nfs4_recoverable_stateid(exception->stateid); 504 if (stateid == NULL && state != NULL) 505 stateid = nfs4_recoverable_stateid(&state->stateid); 506 507 switch(errorcode) { 508 case 0: 509 return 0; 510 case -NFS4ERR_BADHANDLE: 511 case -ESTALE: 512 if (inode != NULL && S_ISREG(inode->i_mode)) 513 pnfs_destroy_layout(NFS_I(inode)); 514 break; 515 case -NFS4ERR_DELEG_REVOKED: 516 case -NFS4ERR_ADMIN_REVOKED: 517 case -NFS4ERR_EXPIRED: 518 case -NFS4ERR_BAD_STATEID: 519 case -NFS4ERR_PARTNER_NO_AUTH: 520 if (inode != NULL && stateid != NULL) { 521 nfs_inode_find_state_and_recover(inode, 522 stateid); 523 goto wait_on_recovery; 524 } 525 fallthrough; 526 case -NFS4ERR_OPENMODE: 527 if (inode) { 528 int err; 529 530 err = nfs_async_inode_return_delegation(inode, 531 stateid); 532 if (err == 0) 533 goto wait_on_recovery; 534 if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) { 535 exception->retry = 1; 536 break; 537 } 538 } 539 if (state == NULL) 540 break; 541 ret = nfs4_schedule_stateid_recovery(server, state); 542 if (ret < 0) 543 break; 544 goto wait_on_recovery; 545 case -NFS4ERR_STALE_STATEID: 546 case -NFS4ERR_STALE_CLIENTID: 547 nfs4_schedule_lease_recovery(clp); 548 goto wait_on_recovery; 549 case -NFS4ERR_MOVED: 550 ret = nfs4_schedule_migration_recovery(server); 551 if (ret < 0) 552 break; 553 goto wait_on_recovery; 554 case -NFS4ERR_LEASE_MOVED: 555 nfs4_schedule_lease_moved_recovery(clp); 556 goto wait_on_recovery; 557 #if defined(CONFIG_NFS_V4_1) 558 case -NFS4ERR_BADSESSION: 559 case -NFS4ERR_BADSLOT: 560 case -NFS4ERR_BAD_HIGH_SLOT: 561 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 562 case -NFS4ERR_DEADSESSION: 563 case -NFS4ERR_SEQ_FALSE_RETRY: 564 case -NFS4ERR_SEQ_MISORDERED: 565 /* Handled in nfs41_sequence_process() */ 566 goto wait_on_recovery; 567 #endif /* defined(CONFIG_NFS_V4_1) */ 568 case -NFS4ERR_FILE_OPEN: 569 if (exception->timeout > HZ) { 570 /* We have retried a decent amount, time to 571 * fail 572 */ 573 ret = -EBUSY; 574 break; 575 } 576 fallthrough; 577 case -NFS4ERR_DELAY: 578 nfs_inc_server_stats(server, NFSIOS_DELAY); 579 fallthrough; 580 case -NFS4ERR_GRACE: 581 case -NFS4ERR_LAYOUTTRYLATER: 582 case -NFS4ERR_RECALLCONFLICT: 583 case -NFS4ERR_RETURNCONFLICT: 584 exception->delay = 1; 585 return 0; 586 587 case -NFS4ERR_RETRY_UNCACHED_REP: 588 case -NFS4ERR_OLD_STATEID: 589 exception->retry = 1; 590 break; 591 case -NFS4ERR_BADOWNER: 592 /* The following works around a Linux server bug! */ 593 case -NFS4ERR_BADNAME: 594 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 595 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 596 exception->retry = 1; 597 printk(KERN_WARNING "NFS: v4 server %s " 598 "does not accept raw " 599 "uid/gids. " 600 "Reenabling the idmapper.\n", 601 server->nfs_client->cl_hostname); 602 } 603 } 604 /* We failed to handle the error */ 605 return nfs4_map_errors(ret); 606 wait_on_recovery: 607 exception->recovering = 1; 608 return 0; 609 } 610 611 /* 612 * Track the number of NFS4ERR_DELAY related retransmissions and return 613 * EAGAIN if the 'softerr' mount option is set, and we've exceeded the limit 614 * set by 'nfs_delay_retrans'. 615 */ 616 static int nfs4_exception_should_retrans(const struct nfs_server *server, 617 struct nfs4_exception *exception) 618 { 619 if (server->flags & NFS_MOUNT_SOFTERR && nfs_delay_retrans >= 0) { 620 if (exception->retrans++ >= (unsigned short)nfs_delay_retrans) 621 return -EAGAIN; 622 } 623 return 0; 624 } 625 626 /* This is the error handling routine for processes that are allowed 627 * to sleep. 628 */ 629 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 630 { 631 struct nfs_client *clp = server->nfs_client; 632 int ret; 633 634 ret = nfs4_do_handle_exception(server, errorcode, exception); 635 if (exception->delay) { 636 int ret2 = nfs4_exception_should_retrans(server, exception); 637 if (ret2 < 0) { 638 exception->retry = 0; 639 return ret2; 640 } 641 ret = nfs4_delay(&exception->timeout, 642 exception->interruptible); 643 goto out_retry; 644 } 645 if (exception->recovering) { 646 if (exception->task_is_privileged) 647 return -EDEADLOCK; 648 ret = nfs4_wait_clnt_recover(clp); 649 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 650 return -EIO; 651 goto out_retry; 652 } 653 return ret; 654 out_retry: 655 if (ret == 0) 656 exception->retry = 1; 657 return ret; 658 } 659 660 static int 661 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server, 662 int errorcode, struct nfs4_exception *exception) 663 { 664 struct nfs_client *clp = server->nfs_client; 665 int ret; 666 667 ret = nfs4_do_handle_exception(server, errorcode, exception); 668 if (exception->delay) { 669 int ret2 = nfs4_exception_should_retrans(server, exception); 670 if (ret2 < 0) { 671 exception->retry = 0; 672 return ret2; 673 } 674 rpc_delay(task, nfs4_update_delay(&exception->timeout)); 675 goto out_retry; 676 } 677 if (exception->recovering) { 678 if (exception->task_is_privileged) 679 return -EDEADLOCK; 680 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 681 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 682 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 683 goto out_retry; 684 } 685 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 686 ret = -EIO; 687 return ret; 688 out_retry: 689 if (ret == 0) { 690 exception->retry = 1; 691 /* 692 * For NFS4ERR_MOVED, the client transport will need to 693 * be recomputed after migration recovery has completed. 694 */ 695 if (errorcode == -NFS4ERR_MOVED) 696 rpc_task_release_transport(task); 697 } 698 return ret; 699 } 700 701 int 702 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server, 703 struct nfs4_state *state, long *timeout) 704 { 705 struct nfs4_exception exception = { 706 .state = state, 707 }; 708 709 if (task->tk_status >= 0) 710 return 0; 711 if (timeout) 712 exception.timeout = *timeout; 713 task->tk_status = nfs4_async_handle_exception(task, server, 714 task->tk_status, 715 &exception); 716 if (exception.delay && timeout) 717 *timeout = exception.timeout; 718 if (exception.retry) 719 return -EAGAIN; 720 return 0; 721 } 722 723 /* 724 * Return 'true' if 'clp' is using an rpc_client that is integrity protected 725 * or 'false' otherwise. 726 */ 727 static bool _nfs4_is_integrity_protected(struct nfs_client *clp) 728 { 729 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor; 730 return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P); 731 } 732 733 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 734 { 735 spin_lock(&clp->cl_lock); 736 if (time_before(clp->cl_last_renewal,timestamp)) 737 clp->cl_last_renewal = timestamp; 738 spin_unlock(&clp->cl_lock); 739 } 740 741 static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 742 { 743 struct nfs_client *clp = server->nfs_client; 744 745 if (!nfs4_has_session(clp)) 746 do_renew_lease(clp, timestamp); 747 } 748 749 struct nfs4_call_sync_data { 750 const struct nfs_server *seq_server; 751 struct nfs4_sequence_args *seq_args; 752 struct nfs4_sequence_res *seq_res; 753 }; 754 755 void nfs4_init_sequence(struct nfs4_sequence_args *args, 756 struct nfs4_sequence_res *res, int cache_reply, 757 int privileged) 758 { 759 args->sa_slot = NULL; 760 args->sa_cache_this = cache_reply; 761 args->sa_privileged = privileged; 762 763 res->sr_slot = NULL; 764 } 765 766 static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res) 767 { 768 struct nfs4_slot *slot = res->sr_slot; 769 struct nfs4_slot_table *tbl; 770 771 tbl = slot->table; 772 spin_lock(&tbl->slot_tbl_lock); 773 if (!nfs41_wake_and_assign_slot(tbl, slot)) 774 nfs4_free_slot(tbl, slot); 775 spin_unlock(&tbl->slot_tbl_lock); 776 777 res->sr_slot = NULL; 778 } 779 780 static int nfs40_sequence_done(struct rpc_task *task, 781 struct nfs4_sequence_res *res) 782 { 783 if (res->sr_slot != NULL) 784 nfs40_sequence_free_slot(res); 785 return 1; 786 } 787 788 #if defined(CONFIG_NFS_V4_1) 789 790 static void nfs41_release_slot(struct nfs4_slot *slot) 791 { 792 struct nfs4_session *session; 793 struct nfs4_slot_table *tbl; 794 bool send_new_highest_used_slotid = false; 795 796 if (!slot) 797 return; 798 tbl = slot->table; 799 session = tbl->session; 800 801 /* Bump the slot sequence number */ 802 if (slot->seq_done) 803 slot->seq_nr++; 804 slot->seq_done = 0; 805 806 spin_lock(&tbl->slot_tbl_lock); 807 /* Be nice to the server: try to ensure that the last transmitted 808 * value for highest_user_slotid <= target_highest_slotid 809 */ 810 if (tbl->highest_used_slotid > tbl->target_highest_slotid) 811 send_new_highest_used_slotid = true; 812 813 if (nfs41_wake_and_assign_slot(tbl, slot)) { 814 send_new_highest_used_slotid = false; 815 goto out_unlock; 816 } 817 nfs4_free_slot(tbl, slot); 818 819 if (tbl->highest_used_slotid != NFS4_NO_SLOT) 820 send_new_highest_used_slotid = false; 821 out_unlock: 822 spin_unlock(&tbl->slot_tbl_lock); 823 if (send_new_highest_used_slotid) 824 nfs41_notify_server(session->clp); 825 if (waitqueue_active(&tbl->slot_waitq)) 826 wake_up_all(&tbl->slot_waitq); 827 } 828 829 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 830 { 831 nfs41_release_slot(res->sr_slot); 832 res->sr_slot = NULL; 833 } 834 835 static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot, 836 u32 seqnr) 837 { 838 if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0) 839 slot->seq_nr_highest_sent = seqnr; 840 } 841 static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, u32 seqnr) 842 { 843 nfs4_slot_sequence_record_sent(slot, seqnr); 844 slot->seq_nr_last_acked = seqnr; 845 } 846 847 static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred, 848 struct nfs4_slot *slot) 849 { 850 struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true); 851 if (!IS_ERR(task)) 852 rpc_put_task_async(task); 853 } 854 855 static int nfs41_sequence_process(struct rpc_task *task, 856 struct nfs4_sequence_res *res) 857 { 858 struct nfs4_session *session; 859 struct nfs4_slot *slot = res->sr_slot; 860 struct nfs_client *clp; 861 int status; 862 int ret = 1; 863 864 if (slot == NULL) 865 goto out_noaction; 866 /* don't increment the sequence number if the task wasn't sent */ 867 if (!RPC_WAS_SENT(task) || slot->seq_done) 868 goto out; 869 870 session = slot->table->session; 871 clp = session->clp; 872 873 trace_nfs4_sequence_done(session, res); 874 875 status = res->sr_status; 876 if (task->tk_status == -NFS4ERR_DEADSESSION) 877 status = -NFS4ERR_DEADSESSION; 878 879 /* Check the SEQUENCE operation status */ 880 switch (status) { 881 case 0: 882 /* Mark this sequence number as having been acked */ 883 nfs4_slot_sequence_acked(slot, slot->seq_nr); 884 /* Update the slot's sequence and clientid lease timer */ 885 slot->seq_done = 1; 886 do_renew_lease(clp, res->sr_timestamp); 887 /* Check sequence flags */ 888 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags, 889 !!slot->privileged); 890 nfs41_update_target_slotid(slot->table, slot, res); 891 break; 892 case 1: 893 /* 894 * sr_status remains 1 if an RPC level error occurred. 895 * The server may or may not have processed the sequence 896 * operation.. 897 */ 898 nfs4_slot_sequence_record_sent(slot, slot->seq_nr); 899 slot->seq_done = 1; 900 goto out; 901 case -NFS4ERR_DELAY: 902 /* The server detected a resend of the RPC call and 903 * returned NFS4ERR_DELAY as per Section 2.10.6.2 904 * of RFC5661. 905 */ 906 dprintk("%s: slot=%u seq=%u: Operation in progress\n", 907 __func__, 908 slot->slot_nr, 909 slot->seq_nr); 910 goto out_retry; 911 case -NFS4ERR_RETRY_UNCACHED_REP: 912 case -NFS4ERR_SEQ_FALSE_RETRY: 913 /* 914 * The server thinks we tried to replay a request. 915 * Retry the call after bumping the sequence ID. 916 */ 917 nfs4_slot_sequence_acked(slot, slot->seq_nr); 918 goto retry_new_seq; 919 case -NFS4ERR_BADSLOT: 920 /* 921 * The slot id we used was probably retired. Try again 922 * using a different slot id. 923 */ 924 if (slot->slot_nr < slot->table->target_highest_slotid) 925 goto session_recover; 926 goto retry_nowait; 927 case -NFS4ERR_SEQ_MISORDERED: 928 nfs4_slot_sequence_record_sent(slot, slot->seq_nr); 929 /* 930 * Were one or more calls using this slot interrupted? 931 * If the server never received the request, then our 932 * transmitted slot sequence number may be too high. However, 933 * if the server did receive the request then it might 934 * accidentally give us a reply with a mismatched operation. 935 * We can sort this out by sending a lone sequence operation 936 * to the server on the same slot. 937 */ 938 if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) { 939 slot->seq_nr--; 940 if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) { 941 nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot); 942 res->sr_slot = NULL; 943 } 944 goto retry_nowait; 945 } 946 /* 947 * RFC5661: 948 * A retry might be sent while the original request is 949 * still in progress on the replier. The replier SHOULD 950 * deal with the issue by returning NFS4ERR_DELAY as the 951 * reply to SEQUENCE or CB_SEQUENCE operation, but 952 * implementations MAY return NFS4ERR_SEQ_MISORDERED. 953 * 954 * Restart the search after a delay. 955 */ 956 slot->seq_nr = slot->seq_nr_highest_sent; 957 goto out_retry; 958 case -NFS4ERR_BADSESSION: 959 case -NFS4ERR_DEADSESSION: 960 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 961 goto session_recover; 962 default: 963 /* Just update the slot sequence no. */ 964 slot->seq_done = 1; 965 } 966 out: 967 /* The session may be reset by one of the error handlers. */ 968 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 969 out_noaction: 970 return ret; 971 session_recover: 972 set_bit(NFS4_SLOT_TBL_DRAINING, &session->fc_slot_table.slot_tbl_state); 973 nfs4_schedule_session_recovery(session, status); 974 dprintk("%s ERROR: %d Reset session\n", __func__, status); 975 nfs41_sequence_free_slot(res); 976 goto out; 977 retry_new_seq: 978 ++slot->seq_nr; 979 retry_nowait: 980 if (rpc_restart_call_prepare(task)) { 981 nfs41_sequence_free_slot(res); 982 task->tk_status = 0; 983 ret = 0; 984 } 985 goto out; 986 out_retry: 987 if (!rpc_restart_call(task)) 988 goto out; 989 rpc_delay(task, NFS4_POLL_RETRY_MAX); 990 return 0; 991 } 992 993 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 994 { 995 if (!nfs41_sequence_process(task, res)) 996 return 0; 997 if (res->sr_slot != NULL) 998 nfs41_sequence_free_slot(res); 999 return 1; 1000 1001 } 1002 EXPORT_SYMBOL_GPL(nfs41_sequence_done); 1003 1004 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) 1005 { 1006 if (res->sr_slot == NULL) 1007 return 1; 1008 if (res->sr_slot->table->session != NULL) 1009 return nfs41_sequence_process(task, res); 1010 return nfs40_sequence_done(task, res); 1011 } 1012 1013 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res) 1014 { 1015 if (res->sr_slot != NULL) { 1016 if (res->sr_slot->table->session != NULL) 1017 nfs41_sequence_free_slot(res); 1018 else 1019 nfs40_sequence_free_slot(res); 1020 } 1021 } 1022 1023 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 1024 { 1025 if (res->sr_slot == NULL) 1026 return 1; 1027 if (!res->sr_slot->table->session) 1028 return nfs40_sequence_done(task, res); 1029 return nfs41_sequence_done(task, res); 1030 } 1031 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 1032 1033 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 1034 { 1035 struct nfs4_call_sync_data *data = calldata; 1036 1037 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 1038 1039 nfs4_setup_sequence(data->seq_server->nfs_client, 1040 data->seq_args, data->seq_res, task); 1041 } 1042 1043 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 1044 { 1045 struct nfs4_call_sync_data *data = calldata; 1046 1047 nfs41_sequence_done(task, data->seq_res); 1048 } 1049 1050 static const struct rpc_call_ops nfs41_call_sync_ops = { 1051 .rpc_call_prepare = nfs41_call_sync_prepare, 1052 .rpc_call_done = nfs41_call_sync_done, 1053 }; 1054 1055 #else /* !CONFIG_NFS_V4_1 */ 1056 1057 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) 1058 { 1059 return nfs40_sequence_done(task, res); 1060 } 1061 1062 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res) 1063 { 1064 if (res->sr_slot != NULL) 1065 nfs40_sequence_free_slot(res); 1066 } 1067 1068 int nfs4_sequence_done(struct rpc_task *task, 1069 struct nfs4_sequence_res *res) 1070 { 1071 return nfs40_sequence_done(task, res); 1072 } 1073 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 1074 1075 #endif /* !CONFIG_NFS_V4_1 */ 1076 1077 static void nfs41_sequence_res_init(struct nfs4_sequence_res *res) 1078 { 1079 res->sr_timestamp = jiffies; 1080 res->sr_status_flags = 0; 1081 res->sr_status = 1; 1082 } 1083 1084 static 1085 void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args, 1086 struct nfs4_sequence_res *res, 1087 struct nfs4_slot *slot) 1088 { 1089 if (!slot) 1090 return; 1091 slot->privileged = args->sa_privileged ? 1 : 0; 1092 args->sa_slot = slot; 1093 1094 res->sr_slot = slot; 1095 } 1096 1097 int nfs4_setup_sequence(struct nfs_client *client, 1098 struct nfs4_sequence_args *args, 1099 struct nfs4_sequence_res *res, 1100 struct rpc_task *task) 1101 { 1102 struct nfs4_session *session = nfs4_get_session(client); 1103 struct nfs4_slot_table *tbl = client->cl_slot_tbl; 1104 struct nfs4_slot *slot; 1105 1106 /* slot already allocated? */ 1107 if (res->sr_slot != NULL) 1108 goto out_start; 1109 1110 if (session) 1111 tbl = &session->fc_slot_table; 1112 1113 spin_lock(&tbl->slot_tbl_lock); 1114 /* The state manager will wait until the slot table is empty */ 1115 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) 1116 goto out_sleep; 1117 1118 slot = nfs4_alloc_slot(tbl); 1119 if (IS_ERR(slot)) { 1120 if (slot == ERR_PTR(-ENOMEM)) 1121 goto out_sleep_timeout; 1122 goto out_sleep; 1123 } 1124 spin_unlock(&tbl->slot_tbl_lock); 1125 1126 nfs4_sequence_attach_slot(args, res, slot); 1127 1128 trace_nfs4_setup_sequence(session, args); 1129 out_start: 1130 nfs41_sequence_res_init(res); 1131 rpc_call_start(task); 1132 return 0; 1133 out_sleep_timeout: 1134 /* Try again in 1/4 second */ 1135 if (args->sa_privileged) 1136 rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task, 1137 jiffies + (HZ >> 2), RPC_PRIORITY_PRIVILEGED); 1138 else 1139 rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task, 1140 NULL, jiffies + (HZ >> 2)); 1141 spin_unlock(&tbl->slot_tbl_lock); 1142 return -EAGAIN; 1143 out_sleep: 1144 if (args->sa_privileged) 1145 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 1146 RPC_PRIORITY_PRIVILEGED); 1147 else 1148 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 1149 spin_unlock(&tbl->slot_tbl_lock); 1150 return -EAGAIN; 1151 } 1152 EXPORT_SYMBOL_GPL(nfs4_setup_sequence); 1153 1154 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata) 1155 { 1156 struct nfs4_call_sync_data *data = calldata; 1157 nfs4_setup_sequence(data->seq_server->nfs_client, 1158 data->seq_args, data->seq_res, task); 1159 } 1160 1161 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata) 1162 { 1163 struct nfs4_call_sync_data *data = calldata; 1164 nfs4_sequence_done(task, data->seq_res); 1165 } 1166 1167 static const struct rpc_call_ops nfs40_call_sync_ops = { 1168 .rpc_call_prepare = nfs40_call_sync_prepare, 1169 .rpc_call_done = nfs40_call_sync_done, 1170 }; 1171 1172 static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup) 1173 { 1174 int ret; 1175 struct rpc_task *task; 1176 1177 task = rpc_run_task(task_setup); 1178 if (IS_ERR(task)) 1179 return PTR_ERR(task); 1180 1181 ret = task->tk_status; 1182 rpc_put_task(task); 1183 return ret; 1184 } 1185 1186 static int nfs4_do_call_sync(struct rpc_clnt *clnt, 1187 struct nfs_server *server, 1188 struct rpc_message *msg, 1189 struct nfs4_sequence_args *args, 1190 struct nfs4_sequence_res *res, 1191 unsigned short task_flags) 1192 { 1193 struct nfs_client *clp = server->nfs_client; 1194 struct nfs4_call_sync_data data = { 1195 .seq_server = server, 1196 .seq_args = args, 1197 .seq_res = res, 1198 }; 1199 struct rpc_task_setup task_setup = { 1200 .rpc_client = clnt, 1201 .rpc_message = msg, 1202 .callback_ops = clp->cl_mvops->call_sync_ops, 1203 .callback_data = &data, 1204 .flags = task_flags, 1205 }; 1206 1207 return nfs4_call_sync_custom(&task_setup); 1208 } 1209 1210 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 1211 struct nfs_server *server, 1212 struct rpc_message *msg, 1213 struct nfs4_sequence_args *args, 1214 struct nfs4_sequence_res *res) 1215 { 1216 unsigned short task_flags = 0; 1217 1218 if (server->caps & NFS_CAP_MOVEABLE) 1219 task_flags = RPC_TASK_MOVEABLE; 1220 return nfs4_do_call_sync(clnt, server, msg, args, res, task_flags); 1221 } 1222 1223 1224 int nfs4_call_sync(struct rpc_clnt *clnt, 1225 struct nfs_server *server, 1226 struct rpc_message *msg, 1227 struct nfs4_sequence_args *args, 1228 struct nfs4_sequence_res *res, 1229 int cache_reply) 1230 { 1231 nfs4_init_sequence(args, res, cache_reply, 0); 1232 return nfs4_call_sync_sequence(clnt, server, msg, args, res); 1233 } 1234 1235 static void 1236 nfs4_inc_nlink_locked(struct inode *inode) 1237 { 1238 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 1239 NFS_INO_INVALID_CTIME | 1240 NFS_INO_INVALID_NLINK); 1241 inc_nlink(inode); 1242 } 1243 1244 static void 1245 nfs4_inc_nlink(struct inode *inode) 1246 { 1247 spin_lock(&inode->i_lock); 1248 nfs4_inc_nlink_locked(inode); 1249 spin_unlock(&inode->i_lock); 1250 } 1251 1252 static void 1253 nfs4_dec_nlink_locked(struct inode *inode) 1254 { 1255 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 1256 NFS_INO_INVALID_CTIME | 1257 NFS_INO_INVALID_NLINK); 1258 drop_nlink(inode); 1259 } 1260 1261 static void 1262 nfs4_update_changeattr_locked(struct inode *inode, 1263 struct nfs4_change_info *cinfo, 1264 unsigned long timestamp, unsigned long cache_validity) 1265 { 1266 struct nfs_inode *nfsi = NFS_I(inode); 1267 u64 change_attr = inode_peek_iversion_raw(inode); 1268 1269 if (!nfs_have_delegated_mtime(inode)) 1270 cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME; 1271 if (S_ISDIR(inode->i_mode)) 1272 cache_validity |= NFS_INO_INVALID_DATA; 1273 1274 switch (NFS_SERVER(inode)->change_attr_type) { 1275 case NFS4_CHANGE_TYPE_IS_UNDEFINED: 1276 if (cinfo->after == change_attr) 1277 goto out; 1278 break; 1279 default: 1280 if ((s64)(change_attr - cinfo->after) >= 0) 1281 goto out; 1282 } 1283 1284 inode_set_iversion_raw(inode, cinfo->after); 1285 if (!cinfo->atomic || cinfo->before != change_attr) { 1286 if (S_ISDIR(inode->i_mode)) 1287 nfs_force_lookup_revalidate(inode); 1288 1289 if (!nfs_have_delegated_attributes(inode)) 1290 cache_validity |= 1291 NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL | 1292 NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER | 1293 NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK | 1294 NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR; 1295 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); 1296 } 1297 nfsi->attrtimeo_timestamp = jiffies; 1298 nfsi->read_cache_jiffies = timestamp; 1299 nfsi->attr_gencount = nfs_inc_attr_generation_counter(); 1300 nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE; 1301 out: 1302 nfs_set_cache_invalid(inode, cache_validity); 1303 } 1304 1305 void 1306 nfs4_update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo, 1307 unsigned long timestamp, unsigned long cache_validity) 1308 { 1309 spin_lock(&dir->i_lock); 1310 nfs4_update_changeattr_locked(dir, cinfo, timestamp, cache_validity); 1311 spin_unlock(&dir->i_lock); 1312 } 1313 1314 struct nfs4_open_createattrs { 1315 struct nfs4_label *label; 1316 struct iattr *sattr; 1317 const __u32 verf[2]; 1318 }; 1319 1320 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server, 1321 int err, struct nfs4_exception *exception) 1322 { 1323 if (err != -EINVAL) 1324 return false; 1325 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1326 return false; 1327 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1; 1328 exception->retry = 1; 1329 return true; 1330 } 1331 1332 static fmode_t _nfs4_ctx_to_accessmode(const struct nfs_open_context *ctx) 1333 { 1334 return ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC); 1335 } 1336 1337 static fmode_t _nfs4_ctx_to_openmode(const struct nfs_open_context *ctx) 1338 { 1339 fmode_t ret = ctx->mode & (FMODE_READ|FMODE_WRITE); 1340 1341 return (ctx->mode & FMODE_EXEC) ? FMODE_READ | ret : ret; 1342 } 1343 1344 static u32 1345 nfs4_fmode_to_share_access(fmode_t fmode) 1346 { 1347 u32 res = 0; 1348 1349 switch (fmode & (FMODE_READ | FMODE_WRITE)) { 1350 case FMODE_READ: 1351 res = NFS4_SHARE_ACCESS_READ; 1352 break; 1353 case FMODE_WRITE: 1354 res = NFS4_SHARE_ACCESS_WRITE; 1355 break; 1356 case FMODE_READ|FMODE_WRITE: 1357 res = NFS4_SHARE_ACCESS_BOTH; 1358 } 1359 return res; 1360 } 1361 1362 static u32 1363 nfs4_map_atomic_open_share(struct nfs_server *server, 1364 fmode_t fmode, int openflags) 1365 { 1366 u32 res = nfs4_fmode_to_share_access(fmode); 1367 1368 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1369 goto out; 1370 /* Want no delegation if we're using O_DIRECT */ 1371 if (openflags & O_DIRECT) { 1372 res |= NFS4_SHARE_WANT_NO_DELEG; 1373 goto out; 1374 } 1375 /* res |= NFS4_SHARE_WANT_NO_PREFERENCE; */ 1376 if (server->caps & NFS_CAP_DELEGTIME) 1377 res |= NFS4_SHARE_WANT_DELEG_TIMESTAMPS; 1378 if (server->caps & NFS_CAP_OPEN_XOR) 1379 res |= NFS4_SHARE_WANT_OPEN_XOR_DELEGATION; 1380 out: 1381 return res; 1382 } 1383 1384 static enum open_claim_type4 1385 nfs4_map_atomic_open_claim(struct nfs_server *server, 1386 enum open_claim_type4 claim) 1387 { 1388 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1) 1389 return claim; 1390 switch (claim) { 1391 default: 1392 return claim; 1393 case NFS4_OPEN_CLAIM_FH: 1394 return NFS4_OPEN_CLAIM_NULL; 1395 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1396 return NFS4_OPEN_CLAIM_DELEGATE_CUR; 1397 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1398 return NFS4_OPEN_CLAIM_DELEGATE_PREV; 1399 } 1400 } 1401 1402 static void nfs4_init_opendata_res(struct nfs4_opendata *p) 1403 { 1404 p->o_res.f_attr = &p->f_attr; 1405 p->o_res.seqid = p->o_arg.seqid; 1406 p->c_res.seqid = p->c_arg.seqid; 1407 p->o_res.server = p->o_arg.server; 1408 p->o_res.access_request = p->o_arg.access; 1409 nfs_fattr_init(&p->f_attr); 1410 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 1411 } 1412 1413 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 1414 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 1415 const struct nfs4_open_createattrs *c, 1416 enum open_claim_type4 claim, 1417 gfp_t gfp_mask) 1418 { 1419 struct dentry *parent = dget_parent(dentry); 1420 struct inode *dir = d_inode(parent); 1421 struct nfs_server *server = NFS_SERVER(dir); 1422 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 1423 struct nfs4_label *label = (c != NULL) ? c->label : NULL; 1424 struct nfs4_opendata *p; 1425 1426 p = kzalloc(sizeof(*p), gfp_mask); 1427 if (p == NULL) 1428 goto err; 1429 1430 p->f_attr.label = nfs4_label_alloc(server, gfp_mask); 1431 if (IS_ERR(p->f_attr.label)) 1432 goto err_free_p; 1433 1434 p->a_label = nfs4_label_alloc(server, gfp_mask); 1435 if (IS_ERR(p->a_label)) 1436 goto err_free_f; 1437 1438 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 1439 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); 1440 if (IS_ERR(p->o_arg.seqid)) 1441 goto err_free_label; 1442 nfs_sb_active(dentry->d_sb); 1443 p->dentry = dget(dentry); 1444 p->dir = parent; 1445 p->owner = sp; 1446 atomic_inc(&sp->so_count); 1447 p->o_arg.open_flags = flags; 1448 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 1449 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 1450 p->o_arg.share_access = nfs4_map_atomic_open_share(server, 1451 fmode, flags); 1452 if (flags & O_CREAT) { 1453 p->o_arg.umask = current_umask(); 1454 p->o_arg.label = nfs4_label_copy(p->a_label, label); 1455 if (c->sattr != NULL && c->sattr->ia_valid != 0) { 1456 p->o_arg.u.attrs = &p->attrs; 1457 memcpy(&p->attrs, c->sattr, sizeof(p->attrs)); 1458 1459 memcpy(p->o_arg.u.verifier.data, c->verf, 1460 sizeof(p->o_arg.u.verifier.data)); 1461 } 1462 } 1463 /* ask server to check for all possible rights as results 1464 * are cached */ 1465 switch (p->o_arg.claim) { 1466 default: 1467 break; 1468 case NFS4_OPEN_CLAIM_NULL: 1469 case NFS4_OPEN_CLAIM_FH: 1470 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY | 1471 NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE | 1472 NFS4_ACCESS_EXECUTE | 1473 nfs_access_xattr_mask(server); 1474 } 1475 p->o_arg.clientid = server->nfs_client->cl_clientid; 1476 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 1477 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 1478 p->o_arg.name = &dentry->d_name; 1479 p->o_arg.server = server; 1480 p->o_arg.bitmask = nfs4_bitmask(server, label); 1481 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 1482 switch (p->o_arg.claim) { 1483 case NFS4_OPEN_CLAIM_NULL: 1484 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1485 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 1486 p->o_arg.fh = NFS_FH(dir); 1487 break; 1488 case NFS4_OPEN_CLAIM_PREVIOUS: 1489 case NFS4_OPEN_CLAIM_FH: 1490 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1491 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1492 p->o_arg.fh = NFS_FH(d_inode(dentry)); 1493 } 1494 p->c_arg.fh = &p->o_res.fh; 1495 p->c_arg.stateid = &p->o_res.stateid; 1496 p->c_arg.seqid = p->o_arg.seqid; 1497 nfs4_init_opendata_res(p); 1498 kref_init(&p->kref); 1499 return p; 1500 1501 err_free_label: 1502 nfs4_label_free(p->a_label); 1503 err_free_f: 1504 nfs4_label_free(p->f_attr.label); 1505 err_free_p: 1506 kfree(p); 1507 err: 1508 dput(parent); 1509 return NULL; 1510 } 1511 1512 static void nfs4_opendata_free(struct kref *kref) 1513 { 1514 struct nfs4_opendata *p = container_of(kref, 1515 struct nfs4_opendata, kref); 1516 struct super_block *sb = p->dentry->d_sb; 1517 1518 nfs4_lgopen_release(p->lgp); 1519 nfs_free_seqid(p->o_arg.seqid); 1520 nfs4_sequence_free_slot(&p->o_res.seq_res); 1521 if (p->state != NULL) 1522 nfs4_put_open_state(p->state); 1523 nfs4_put_state_owner(p->owner); 1524 1525 nfs4_label_free(p->a_label); 1526 nfs4_label_free(p->f_attr.label); 1527 1528 dput(p->dir); 1529 dput(p->dentry); 1530 nfs_sb_deactive(sb); 1531 nfs_fattr_free_names(&p->f_attr); 1532 kfree(p->f_attr.mdsthreshold); 1533 kfree(p); 1534 } 1535 1536 static void nfs4_opendata_put(struct nfs4_opendata *p) 1537 { 1538 if (p != NULL) 1539 kref_put(&p->kref, nfs4_opendata_free); 1540 } 1541 1542 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state, 1543 fmode_t fmode) 1544 { 1545 switch(fmode & (FMODE_READ|FMODE_WRITE)) { 1546 case FMODE_READ|FMODE_WRITE: 1547 return state->n_rdwr != 0; 1548 case FMODE_WRITE: 1549 return state->n_wronly != 0; 1550 case FMODE_READ: 1551 return state->n_rdonly != 0; 1552 } 1553 WARN_ON_ONCE(1); 1554 return false; 1555 } 1556 1557 static int can_open_cached(struct nfs4_state *state, fmode_t mode, 1558 int open_mode, enum open_claim_type4 claim) 1559 { 1560 int ret = 0; 1561 1562 if (open_mode & (O_EXCL|O_TRUNC)) 1563 goto out; 1564 switch (claim) { 1565 case NFS4_OPEN_CLAIM_NULL: 1566 case NFS4_OPEN_CLAIM_FH: 1567 goto out; 1568 default: 1569 break; 1570 } 1571 switch (mode & (FMODE_READ|FMODE_WRITE)) { 1572 case FMODE_READ: 1573 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 1574 && state->n_rdonly != 0; 1575 break; 1576 case FMODE_WRITE: 1577 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 1578 && state->n_wronly != 0; 1579 break; 1580 case FMODE_READ|FMODE_WRITE: 1581 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 1582 && state->n_rdwr != 0; 1583 } 1584 out: 1585 return ret; 1586 } 1587 1588 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode, 1589 enum open_claim_type4 claim) 1590 { 1591 if (delegation == NULL) 1592 return 0; 1593 if ((delegation->type & fmode) != fmode) 1594 return 0; 1595 switch (claim) { 1596 case NFS4_OPEN_CLAIM_NULL: 1597 case NFS4_OPEN_CLAIM_FH: 1598 break; 1599 case NFS4_OPEN_CLAIM_PREVIOUS: 1600 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 1601 break; 1602 fallthrough; 1603 default: 1604 return 0; 1605 } 1606 nfs_mark_delegation_referenced(delegation); 1607 return 1; 1608 } 1609 1610 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 1611 { 1612 switch (fmode) { 1613 case FMODE_WRITE: 1614 state->n_wronly++; 1615 break; 1616 case FMODE_READ: 1617 state->n_rdonly++; 1618 break; 1619 case FMODE_READ|FMODE_WRITE: 1620 state->n_rdwr++; 1621 } 1622 nfs4_state_set_mode_locked(state, state->state | fmode); 1623 } 1624 1625 #ifdef CONFIG_NFS_V4_1 1626 static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state) 1627 { 1628 if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags)) 1629 return true; 1630 if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags)) 1631 return true; 1632 if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags)) 1633 return true; 1634 return false; 1635 } 1636 #endif /* CONFIG_NFS_V4_1 */ 1637 1638 static void nfs_state_log_update_open_stateid(struct nfs4_state *state) 1639 { 1640 if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags)) 1641 wake_up_all(&state->waitq); 1642 } 1643 1644 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) 1645 { 1646 struct nfs_client *clp = state->owner->so_server->nfs_client; 1647 bool need_recover = false; 1648 1649 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly) 1650 need_recover = true; 1651 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly) 1652 need_recover = true; 1653 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr) 1654 need_recover = true; 1655 if (need_recover) 1656 nfs4_state_mark_reclaim_nograce(clp, state); 1657 } 1658 1659 /* 1660 * Check for whether or not the caller may update the open stateid 1661 * to the value passed in by stateid. 1662 * 1663 * Note: This function relies heavily on the server implementing 1664 * RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2 1665 * correctly. 1666 * i.e. The stateid seqids have to be initialised to 1, and 1667 * are then incremented on every state transition. 1668 */ 1669 static bool nfs_stateid_is_sequential(struct nfs4_state *state, 1670 const nfs4_stateid *stateid) 1671 { 1672 if (test_bit(NFS_OPEN_STATE, &state->flags)) { 1673 /* The common case - we're updating to a new sequence number */ 1674 if (nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1675 if (nfs4_stateid_is_next(&state->open_stateid, stateid)) 1676 return true; 1677 return false; 1678 } 1679 /* The server returned a new stateid */ 1680 } 1681 /* This is the first OPEN in this generation */ 1682 if (stateid->seqid == cpu_to_be32(1)) 1683 return true; 1684 return false; 1685 } 1686 1687 static void nfs_resync_open_stateid_locked(struct nfs4_state *state) 1688 { 1689 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr)) 1690 return; 1691 if (state->n_wronly) 1692 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1693 if (state->n_rdonly) 1694 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1695 if (state->n_rdwr) 1696 set_bit(NFS_O_RDWR_STATE, &state->flags); 1697 set_bit(NFS_OPEN_STATE, &state->flags); 1698 } 1699 1700 static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1701 nfs4_stateid *stateid, fmode_t fmode) 1702 { 1703 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1704 switch (fmode & (FMODE_READ|FMODE_WRITE)) { 1705 case FMODE_WRITE: 1706 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1707 break; 1708 case FMODE_READ: 1709 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1710 break; 1711 case 0: 1712 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1713 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1714 clear_bit(NFS_OPEN_STATE, &state->flags); 1715 } 1716 if (stateid == NULL) 1717 return; 1718 /* Handle OPEN+OPEN_DOWNGRADE races */ 1719 if (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1720 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { 1721 nfs_resync_open_stateid_locked(state); 1722 goto out; 1723 } 1724 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1725 nfs4_stateid_copy(&state->stateid, stateid); 1726 nfs4_stateid_copy(&state->open_stateid, stateid); 1727 trace_nfs4_open_stateid_update(state->inode, stateid, 0); 1728 out: 1729 nfs_state_log_update_open_stateid(state); 1730 } 1731 1732 static void nfs_clear_open_stateid(struct nfs4_state *state, 1733 nfs4_stateid *arg_stateid, 1734 nfs4_stateid *stateid, fmode_t fmode) 1735 { 1736 write_seqlock(&state->seqlock); 1737 /* Ignore, if the CLOSE argment doesn't match the current stateid */ 1738 if (nfs4_state_match_open_stateid_other(state, arg_stateid)) 1739 nfs_clear_open_stateid_locked(state, stateid, fmode); 1740 write_sequnlock(&state->seqlock); 1741 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1742 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1743 } 1744 1745 static void nfs_set_open_stateid_locked(struct nfs4_state *state, 1746 const nfs4_stateid *stateid, nfs4_stateid *freeme) 1747 __must_hold(&state->owner->so_lock) 1748 __must_hold(&state->seqlock) 1749 __must_hold(RCU) 1750 1751 { 1752 DEFINE_WAIT(wait); 1753 int status = 0; 1754 for (;;) { 1755 1756 if (nfs_stateid_is_sequential(state, stateid)) 1757 break; 1758 1759 if (status) 1760 break; 1761 /* Rely on seqids for serialisation with NFSv4.0 */ 1762 if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client)) 1763 break; 1764 1765 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); 1766 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); 1767 /* 1768 * Ensure we process the state changes in the same order 1769 * in which the server processed them by delaying the 1770 * update of the stateid until we are in sequence. 1771 */ 1772 write_sequnlock(&state->seqlock); 1773 spin_unlock(&state->owner->so_lock); 1774 rcu_read_unlock(); 1775 trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0); 1776 1777 if (!fatal_signal_pending(current)) { 1778 if (schedule_timeout(5*HZ) == 0) 1779 status = -EAGAIN; 1780 else 1781 status = 0; 1782 } else 1783 status = -EINTR; 1784 finish_wait(&state->waitq, &wait); 1785 rcu_read_lock(); 1786 spin_lock(&state->owner->so_lock); 1787 write_seqlock(&state->seqlock); 1788 } 1789 1790 if (test_bit(NFS_OPEN_STATE, &state->flags) && 1791 !nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1792 nfs4_stateid_copy(freeme, &state->open_stateid); 1793 nfs_test_and_clear_all_open_stateid(state); 1794 } 1795 1796 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1797 nfs4_stateid_copy(&state->stateid, stateid); 1798 nfs4_stateid_copy(&state->open_stateid, stateid); 1799 trace_nfs4_open_stateid_update(state->inode, stateid, status); 1800 nfs_state_log_update_open_stateid(state); 1801 } 1802 1803 static void nfs_state_set_open_stateid(struct nfs4_state *state, 1804 const nfs4_stateid *open_stateid, 1805 fmode_t fmode, 1806 nfs4_stateid *freeme) 1807 { 1808 /* 1809 * Protect the call to nfs4_state_set_mode_locked and 1810 * serialise the stateid update 1811 */ 1812 write_seqlock(&state->seqlock); 1813 nfs_set_open_stateid_locked(state, open_stateid, freeme); 1814 switch (fmode) { 1815 case FMODE_READ: 1816 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1817 break; 1818 case FMODE_WRITE: 1819 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1820 break; 1821 case FMODE_READ|FMODE_WRITE: 1822 set_bit(NFS_O_RDWR_STATE, &state->flags); 1823 } 1824 set_bit(NFS_OPEN_STATE, &state->flags); 1825 write_sequnlock(&state->seqlock); 1826 } 1827 1828 static void nfs_state_clear_open_state_flags(struct nfs4_state *state) 1829 { 1830 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1831 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1832 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1833 clear_bit(NFS_OPEN_STATE, &state->flags); 1834 } 1835 1836 static void nfs_state_set_delegation(struct nfs4_state *state, 1837 const nfs4_stateid *deleg_stateid, 1838 fmode_t fmode) 1839 { 1840 /* 1841 * Protect the call to nfs4_state_set_mode_locked and 1842 * serialise the stateid update 1843 */ 1844 write_seqlock(&state->seqlock); 1845 nfs4_stateid_copy(&state->stateid, deleg_stateid); 1846 set_bit(NFS_DELEGATED_STATE, &state->flags); 1847 write_sequnlock(&state->seqlock); 1848 } 1849 1850 static void nfs_state_clear_delegation(struct nfs4_state *state) 1851 { 1852 write_seqlock(&state->seqlock); 1853 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1854 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1855 write_sequnlock(&state->seqlock); 1856 } 1857 1858 int update_open_stateid(struct nfs4_state *state, 1859 const nfs4_stateid *open_stateid, 1860 const nfs4_stateid *delegation, 1861 fmode_t fmode) 1862 { 1863 struct nfs_server *server = NFS_SERVER(state->inode); 1864 struct nfs_client *clp = server->nfs_client; 1865 struct nfs_inode *nfsi = NFS_I(state->inode); 1866 struct nfs_delegation *deleg_cur; 1867 nfs4_stateid freeme = { }; 1868 int ret = 0; 1869 1870 fmode &= (FMODE_READ|FMODE_WRITE); 1871 1872 rcu_read_lock(); 1873 spin_lock(&state->owner->so_lock); 1874 if (open_stateid != NULL) { 1875 nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme); 1876 ret = 1; 1877 } 1878 1879 deleg_cur = nfs4_get_valid_delegation(state->inode); 1880 if (deleg_cur == NULL) 1881 goto no_delegation; 1882 1883 spin_lock(&deleg_cur->lock); 1884 if (rcu_dereference(nfsi->delegation) != deleg_cur || 1885 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) || 1886 (deleg_cur->type & fmode) != fmode) 1887 goto no_delegation_unlock; 1888 1889 if (delegation == NULL) 1890 delegation = &deleg_cur->stateid; 1891 else if (!nfs4_stateid_match_other(&deleg_cur->stateid, delegation)) 1892 goto no_delegation_unlock; 1893 1894 nfs_mark_delegation_referenced(deleg_cur); 1895 nfs_state_set_delegation(state, &deleg_cur->stateid, fmode); 1896 ret = 1; 1897 no_delegation_unlock: 1898 spin_unlock(&deleg_cur->lock); 1899 no_delegation: 1900 if (ret) 1901 update_open_stateflags(state, fmode); 1902 spin_unlock(&state->owner->so_lock); 1903 rcu_read_unlock(); 1904 1905 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1906 nfs4_schedule_state_manager(clp); 1907 if (freeme.type != 0) 1908 nfs4_test_and_free_stateid(server, &freeme, 1909 state->owner->so_cred); 1910 1911 return ret; 1912 } 1913 1914 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp, 1915 const nfs4_stateid *stateid) 1916 { 1917 struct nfs4_state *state = lsp->ls_state; 1918 bool ret = false; 1919 1920 spin_lock(&state->state_lock); 1921 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid)) 1922 goto out_noupdate; 1923 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid)) 1924 goto out_noupdate; 1925 nfs4_stateid_copy(&lsp->ls_stateid, stateid); 1926 ret = true; 1927 out_noupdate: 1928 spin_unlock(&state->state_lock); 1929 return ret; 1930 } 1931 1932 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1933 { 1934 struct nfs_delegation *delegation; 1935 1936 fmode &= FMODE_READ|FMODE_WRITE; 1937 rcu_read_lock(); 1938 delegation = nfs4_get_valid_delegation(inode); 1939 if (delegation == NULL || (delegation->type & fmode) == fmode) { 1940 rcu_read_unlock(); 1941 return; 1942 } 1943 rcu_read_unlock(); 1944 nfs4_inode_return_delegation(inode); 1945 } 1946 1947 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1948 { 1949 struct nfs4_state *state = opendata->state; 1950 struct nfs_delegation *delegation; 1951 int open_mode = opendata->o_arg.open_flags; 1952 fmode_t fmode = opendata->o_arg.fmode; 1953 enum open_claim_type4 claim = opendata->o_arg.claim; 1954 nfs4_stateid stateid; 1955 int ret = -EAGAIN; 1956 1957 for (;;) { 1958 spin_lock(&state->owner->so_lock); 1959 if (can_open_cached(state, fmode, open_mode, claim)) { 1960 update_open_stateflags(state, fmode); 1961 spin_unlock(&state->owner->so_lock); 1962 goto out_return_state; 1963 } 1964 spin_unlock(&state->owner->so_lock); 1965 rcu_read_lock(); 1966 delegation = nfs4_get_valid_delegation(state->inode); 1967 if (!can_open_delegated(delegation, fmode, claim)) { 1968 rcu_read_unlock(); 1969 break; 1970 } 1971 /* Save the delegation */ 1972 nfs4_stateid_copy(&stateid, &delegation->stateid); 1973 rcu_read_unlock(); 1974 nfs_release_seqid(opendata->o_arg.seqid); 1975 if (!opendata->is_recover) { 1976 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 1977 if (ret != 0) 1978 goto out; 1979 } 1980 ret = -EAGAIN; 1981 1982 /* Try to update the stateid using the delegation */ 1983 if (update_open_stateid(state, NULL, &stateid, fmode)) 1984 goto out_return_state; 1985 } 1986 out: 1987 return ERR_PTR(ret); 1988 out_return_state: 1989 refcount_inc(&state->count); 1990 return state; 1991 } 1992 1993 static void 1994 nfs4_process_delegation(struct inode *inode, const struct cred *cred, 1995 enum open_claim_type4 claim, 1996 const struct nfs4_open_delegation *delegation) 1997 { 1998 switch (delegation->open_delegation_type) { 1999 case NFS4_OPEN_DELEGATE_READ: 2000 case NFS4_OPEN_DELEGATE_WRITE: 2001 case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG: 2002 case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG: 2003 break; 2004 default: 2005 return; 2006 } 2007 switch (claim) { 2008 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 2009 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 2010 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 2011 "returning a delegation for " 2012 "OPEN(CLAIM_DELEGATE_CUR)\n", 2013 NFS_SERVER(inode)->nfs_client->cl_hostname); 2014 break; 2015 case NFS4_OPEN_CLAIM_PREVIOUS: 2016 nfs_inode_reclaim_delegation(inode, cred, delegation->type, 2017 &delegation->stateid, 2018 delegation->pagemod_limit, 2019 delegation->open_delegation_type); 2020 break; 2021 default: 2022 nfs_inode_set_delegation(inode, cred, delegation->type, 2023 &delegation->stateid, 2024 delegation->pagemod_limit, 2025 delegation->open_delegation_type); 2026 } 2027 if (delegation->do_recall) 2028 nfs_async_inode_return_delegation(inode, &delegation->stateid); 2029 } 2030 2031 /* 2032 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes 2033 * and update the nfs4_state. 2034 */ 2035 static struct nfs4_state * 2036 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) 2037 { 2038 struct inode *inode = data->state->inode; 2039 struct nfs4_state *state = data->state; 2040 int ret; 2041 2042 if (!data->rpc_done) { 2043 if (data->rpc_status) 2044 return ERR_PTR(data->rpc_status); 2045 return nfs4_try_open_cached(data); 2046 } 2047 2048 ret = nfs_refresh_inode(inode, &data->f_attr); 2049 if (ret) 2050 return ERR_PTR(ret); 2051 2052 nfs4_process_delegation(state->inode, 2053 data->owner->so_cred, 2054 data->o_arg.claim, 2055 &data->o_res.delegation); 2056 2057 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) { 2058 if (!update_open_stateid(state, &data->o_res.stateid, 2059 NULL, data->o_arg.fmode)) 2060 return ERR_PTR(-EAGAIN); 2061 } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode)) 2062 return ERR_PTR(-EAGAIN); 2063 refcount_inc(&state->count); 2064 2065 return state; 2066 } 2067 2068 static struct inode * 2069 nfs4_opendata_get_inode(struct nfs4_opendata *data) 2070 { 2071 struct inode *inode; 2072 2073 switch (data->o_arg.claim) { 2074 case NFS4_OPEN_CLAIM_NULL: 2075 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 2076 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 2077 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 2078 return ERR_PTR(-EAGAIN); 2079 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, 2080 &data->f_attr); 2081 break; 2082 default: 2083 inode = d_inode(data->dentry); 2084 ihold(inode); 2085 nfs_refresh_inode(inode, &data->f_attr); 2086 } 2087 return inode; 2088 } 2089 2090 static struct nfs4_state * 2091 nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data) 2092 { 2093 struct nfs4_state *state; 2094 struct inode *inode; 2095 2096 inode = nfs4_opendata_get_inode(data); 2097 if (IS_ERR(inode)) 2098 return ERR_CAST(inode); 2099 if (data->state != NULL && data->state->inode == inode) { 2100 state = data->state; 2101 refcount_inc(&state->count); 2102 } else 2103 state = nfs4_get_open_state(inode, data->owner); 2104 iput(inode); 2105 if (state == NULL) 2106 state = ERR_PTR(-ENOMEM); 2107 return state; 2108 } 2109 2110 static struct nfs4_state * 2111 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 2112 { 2113 struct nfs4_state *state; 2114 2115 if (!data->rpc_done) { 2116 state = nfs4_try_open_cached(data); 2117 trace_nfs4_cached_open(data->state); 2118 goto out; 2119 } 2120 2121 state = nfs4_opendata_find_nfs4_state(data); 2122 if (IS_ERR(state)) 2123 goto out; 2124 2125 nfs4_process_delegation(state->inode, 2126 data->owner->so_cred, 2127 data->o_arg.claim, 2128 &data->o_res.delegation); 2129 2130 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) { 2131 if (!update_open_stateid(state, &data->o_res.stateid, 2132 NULL, data->o_arg.fmode)) { 2133 nfs4_put_open_state(state); 2134 state = ERR_PTR(-EAGAIN); 2135 } 2136 } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode)) { 2137 nfs4_put_open_state(state); 2138 state = ERR_PTR(-EAGAIN); 2139 } 2140 out: 2141 nfs_release_seqid(data->o_arg.seqid); 2142 return state; 2143 } 2144 2145 static struct nfs4_state * 2146 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 2147 { 2148 struct nfs4_state *ret; 2149 2150 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 2151 ret =_nfs4_opendata_reclaim_to_nfs4_state(data); 2152 else 2153 ret = _nfs4_opendata_to_nfs4_state(data); 2154 nfs4_sequence_free_slot(&data->o_res.seq_res); 2155 return ret; 2156 } 2157 2158 static struct nfs_open_context * 2159 nfs4_state_find_open_context_mode(struct nfs4_state *state, fmode_t mode) 2160 { 2161 struct nfs_inode *nfsi = NFS_I(state->inode); 2162 struct nfs_open_context *ctx; 2163 2164 rcu_read_lock(); 2165 list_for_each_entry_rcu(ctx, &nfsi->open_files, list) { 2166 if (ctx->state != state) 2167 continue; 2168 if ((ctx->mode & mode) != mode) 2169 continue; 2170 if (!get_nfs_open_context(ctx)) 2171 continue; 2172 rcu_read_unlock(); 2173 return ctx; 2174 } 2175 rcu_read_unlock(); 2176 return ERR_PTR(-ENOENT); 2177 } 2178 2179 static struct nfs_open_context * 2180 nfs4_state_find_open_context(struct nfs4_state *state) 2181 { 2182 struct nfs_open_context *ctx; 2183 2184 ctx = nfs4_state_find_open_context_mode(state, FMODE_READ|FMODE_WRITE); 2185 if (!IS_ERR(ctx)) 2186 return ctx; 2187 ctx = nfs4_state_find_open_context_mode(state, FMODE_WRITE); 2188 if (!IS_ERR(ctx)) 2189 return ctx; 2190 return nfs4_state_find_open_context_mode(state, FMODE_READ); 2191 } 2192 2193 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, 2194 struct nfs4_state *state, enum open_claim_type4 claim) 2195 { 2196 struct nfs4_opendata *opendata; 2197 2198 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, 2199 NULL, claim, GFP_NOFS); 2200 if (opendata == NULL) 2201 return ERR_PTR(-ENOMEM); 2202 opendata->state = state; 2203 refcount_inc(&state->count); 2204 return opendata; 2205 } 2206 2207 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, 2208 fmode_t fmode) 2209 { 2210 struct nfs4_state *newstate; 2211 struct nfs_server *server = NFS_SB(opendata->dentry->d_sb); 2212 int openflags = opendata->o_arg.open_flags; 2213 int ret; 2214 2215 if (!nfs4_mode_match_open_stateid(opendata->state, fmode)) 2216 return 0; 2217 opendata->o_arg.fmode = fmode; 2218 opendata->o_arg.share_access = 2219 nfs4_map_atomic_open_share(server, fmode, openflags); 2220 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 2221 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 2222 nfs4_init_opendata_res(opendata); 2223 ret = _nfs4_recover_proc_open(opendata); 2224 if (ret != 0) 2225 return ret; 2226 newstate = nfs4_opendata_to_nfs4_state(opendata); 2227 if (IS_ERR(newstate)) 2228 return PTR_ERR(newstate); 2229 if (newstate != opendata->state) 2230 ret = -ESTALE; 2231 nfs4_close_state(newstate, fmode); 2232 return ret; 2233 } 2234 2235 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 2236 { 2237 int ret; 2238 2239 /* memory barrier prior to reading state->n_* */ 2240 smp_rmb(); 2241 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 2242 if (ret != 0) 2243 return ret; 2244 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE); 2245 if (ret != 0) 2246 return ret; 2247 ret = nfs4_open_recover_helper(opendata, FMODE_READ); 2248 if (ret != 0) 2249 return ret; 2250 /* 2251 * We may have performed cached opens for all three recoveries. 2252 * Check if we need to update the current stateid. 2253 */ 2254 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 2255 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 2256 write_seqlock(&state->seqlock); 2257 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 2258 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 2259 write_sequnlock(&state->seqlock); 2260 } 2261 return 0; 2262 } 2263 2264 /* 2265 * OPEN_RECLAIM: 2266 * reclaim state on the server after a reboot. 2267 */ 2268 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 2269 { 2270 struct nfs_delegation *delegation; 2271 struct nfs4_opendata *opendata; 2272 u32 delegation_type = NFS4_OPEN_DELEGATE_NONE; 2273 int status; 2274 2275 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2276 NFS4_OPEN_CLAIM_PREVIOUS); 2277 if (IS_ERR(opendata)) 2278 return PTR_ERR(opendata); 2279 rcu_read_lock(); 2280 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2281 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) { 2282 switch(delegation->type) { 2283 case FMODE_READ: 2284 delegation_type = NFS4_OPEN_DELEGATE_READ; 2285 if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) 2286 delegation_type = NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG; 2287 break; 2288 case FMODE_WRITE: 2289 case FMODE_READ|FMODE_WRITE: 2290 delegation_type = NFS4_OPEN_DELEGATE_WRITE; 2291 if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) 2292 delegation_type = NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG; 2293 } 2294 } 2295 rcu_read_unlock(); 2296 opendata->o_arg.u.delegation_type = delegation_type; 2297 status = nfs4_open_recover(opendata, state); 2298 nfs4_opendata_put(opendata); 2299 return status; 2300 } 2301 2302 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 2303 { 2304 struct nfs_server *server = NFS_SERVER(state->inode); 2305 struct nfs4_exception exception = { }; 2306 int err; 2307 do { 2308 err = _nfs4_do_open_reclaim(ctx, state); 2309 trace_nfs4_open_reclaim(ctx, 0, err); 2310 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2311 continue; 2312 if (err != -NFS4ERR_DELAY) 2313 break; 2314 nfs4_handle_exception(server, err, &exception); 2315 } while (exception.retry); 2316 return err; 2317 } 2318 2319 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 2320 { 2321 struct nfs_open_context *ctx; 2322 int ret; 2323 2324 ctx = nfs4_state_find_open_context(state); 2325 if (IS_ERR(ctx)) 2326 return -EAGAIN; 2327 clear_bit(NFS_DELEGATED_STATE, &state->flags); 2328 nfs_state_clear_open_state_flags(state); 2329 ret = nfs4_do_open_reclaim(ctx, state); 2330 put_nfs_open_context(ctx); 2331 return ret; 2332 } 2333 2334 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err) 2335 { 2336 switch (err) { 2337 default: 2338 printk(KERN_ERR "NFS: %s: unhandled error " 2339 "%d.\n", __func__, err); 2340 fallthrough; 2341 case 0: 2342 case -ENOENT: 2343 case -EAGAIN: 2344 case -ESTALE: 2345 case -ETIMEDOUT: 2346 break; 2347 case -NFS4ERR_BADSESSION: 2348 case -NFS4ERR_BADSLOT: 2349 case -NFS4ERR_BAD_HIGH_SLOT: 2350 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 2351 case -NFS4ERR_DEADSESSION: 2352 return -EAGAIN; 2353 case -NFS4ERR_STALE_CLIENTID: 2354 case -NFS4ERR_STALE_STATEID: 2355 /* Don't recall a delegation if it was lost */ 2356 nfs4_schedule_lease_recovery(server->nfs_client); 2357 return -EAGAIN; 2358 case -NFS4ERR_MOVED: 2359 nfs4_schedule_migration_recovery(server); 2360 return -EAGAIN; 2361 case -NFS4ERR_LEASE_MOVED: 2362 nfs4_schedule_lease_moved_recovery(server->nfs_client); 2363 return -EAGAIN; 2364 case -NFS4ERR_DELEG_REVOKED: 2365 case -NFS4ERR_ADMIN_REVOKED: 2366 case -NFS4ERR_EXPIRED: 2367 case -NFS4ERR_BAD_STATEID: 2368 case -NFS4ERR_OPENMODE: 2369 nfs_inode_find_state_and_recover(state->inode, 2370 stateid); 2371 nfs4_schedule_stateid_recovery(server, state); 2372 return -EAGAIN; 2373 case -NFS4ERR_DELAY: 2374 case -NFS4ERR_GRACE: 2375 ssleep(1); 2376 return -EAGAIN; 2377 case -ENOMEM: 2378 case -NFS4ERR_DENIED: 2379 if (fl) { 2380 struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner; 2381 if (lsp) 2382 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 2383 } 2384 return 0; 2385 } 2386 return err; 2387 } 2388 2389 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, 2390 struct nfs4_state *state, const nfs4_stateid *stateid) 2391 { 2392 struct nfs_server *server = NFS_SERVER(state->inode); 2393 struct nfs4_opendata *opendata; 2394 int err = 0; 2395 2396 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2397 NFS4_OPEN_CLAIM_DELEG_CUR_FH); 2398 if (IS_ERR(opendata)) 2399 return PTR_ERR(opendata); 2400 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 2401 if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) { 2402 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 2403 if (err) 2404 goto out; 2405 } 2406 if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) { 2407 err = nfs4_open_recover_helper(opendata, FMODE_WRITE); 2408 if (err) 2409 goto out; 2410 } 2411 if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) { 2412 err = nfs4_open_recover_helper(opendata, FMODE_READ); 2413 if (err) 2414 goto out; 2415 } 2416 nfs_state_clear_delegation(state); 2417 out: 2418 nfs4_opendata_put(opendata); 2419 return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err); 2420 } 2421 2422 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) 2423 { 2424 struct nfs4_opendata *data = calldata; 2425 2426 nfs4_setup_sequence(data->o_arg.server->nfs_client, 2427 &data->c_arg.seq_args, &data->c_res.seq_res, task); 2428 } 2429 2430 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 2431 { 2432 struct nfs4_opendata *data = calldata; 2433 2434 nfs40_sequence_done(task, &data->c_res.seq_res); 2435 2436 data->rpc_status = task->tk_status; 2437 if (data->rpc_status == 0) { 2438 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 2439 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2440 renew_lease(data->o_res.server, data->timestamp); 2441 data->rpc_done = true; 2442 } 2443 } 2444 2445 static void nfs4_open_confirm_release(void *calldata) 2446 { 2447 struct nfs4_opendata *data = calldata; 2448 struct nfs4_state *state = NULL; 2449 2450 /* If this request hasn't been cancelled, do nothing */ 2451 if (!data->cancelled) 2452 goto out_free; 2453 /* In case of error, no cleanup! */ 2454 if (!data->rpc_done) 2455 goto out_free; 2456 state = nfs4_opendata_to_nfs4_state(data); 2457 if (!IS_ERR(state)) 2458 nfs4_close_state(state, data->o_arg.fmode); 2459 out_free: 2460 nfs4_opendata_put(data); 2461 } 2462 2463 static const struct rpc_call_ops nfs4_open_confirm_ops = { 2464 .rpc_call_prepare = nfs4_open_confirm_prepare, 2465 .rpc_call_done = nfs4_open_confirm_done, 2466 .rpc_release = nfs4_open_confirm_release, 2467 }; 2468 2469 /* 2470 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 2471 */ 2472 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 2473 { 2474 struct nfs_server *server = NFS_SERVER(d_inode(data->dir)); 2475 struct rpc_task *task; 2476 struct rpc_message msg = { 2477 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 2478 .rpc_argp = &data->c_arg, 2479 .rpc_resp = &data->c_res, 2480 .rpc_cred = data->owner->so_cred, 2481 }; 2482 struct rpc_task_setup task_setup_data = { 2483 .rpc_client = server->client, 2484 .rpc_message = &msg, 2485 .callback_ops = &nfs4_open_confirm_ops, 2486 .callback_data = data, 2487 .workqueue = nfsiod_workqueue, 2488 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 2489 }; 2490 int status; 2491 2492 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1, 2493 data->is_recover); 2494 kref_get(&data->kref); 2495 data->rpc_done = false; 2496 data->rpc_status = 0; 2497 data->timestamp = jiffies; 2498 task = rpc_run_task(&task_setup_data); 2499 if (IS_ERR(task)) 2500 return PTR_ERR(task); 2501 status = rpc_wait_for_completion_task(task); 2502 if (status != 0) { 2503 data->cancelled = true; 2504 smp_wmb(); 2505 } else 2506 status = data->rpc_status; 2507 rpc_put_task(task); 2508 return status; 2509 } 2510 2511 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 2512 { 2513 struct nfs4_opendata *data = calldata; 2514 struct nfs4_state_owner *sp = data->owner; 2515 struct nfs_client *clp = sp->so_server->nfs_client; 2516 enum open_claim_type4 claim = data->o_arg.claim; 2517 2518 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 2519 goto out_wait; 2520 /* 2521 * Check if we still need to send an OPEN call, or if we can use 2522 * a delegation instead. 2523 */ 2524 if (data->state != NULL) { 2525 struct nfs_delegation *delegation; 2526 2527 if (can_open_cached(data->state, data->o_arg.fmode, 2528 data->o_arg.open_flags, claim)) 2529 goto out_no_action; 2530 rcu_read_lock(); 2531 delegation = nfs4_get_valid_delegation(data->state->inode); 2532 if (can_open_delegated(delegation, data->o_arg.fmode, claim)) 2533 goto unlock_no_action; 2534 rcu_read_unlock(); 2535 } 2536 /* Update client id. */ 2537 data->o_arg.clientid = clp->cl_clientid; 2538 switch (claim) { 2539 default: 2540 break; 2541 case NFS4_OPEN_CLAIM_PREVIOUS: 2542 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 2543 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 2544 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 2545 fallthrough; 2546 case NFS4_OPEN_CLAIM_FH: 2547 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 2548 } 2549 data->timestamp = jiffies; 2550 if (nfs4_setup_sequence(data->o_arg.server->nfs_client, 2551 &data->o_arg.seq_args, 2552 &data->o_res.seq_res, 2553 task) != 0) 2554 nfs_release_seqid(data->o_arg.seqid); 2555 2556 /* Set the create mode (note dependency on the session type) */ 2557 data->o_arg.createmode = NFS4_CREATE_UNCHECKED; 2558 if (data->o_arg.open_flags & O_EXCL) { 2559 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1; 2560 if (clp->cl_mvops->minor_version == 0) { 2561 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE; 2562 /* don't put an ACCESS op in OPEN compound if O_EXCL, 2563 * because ACCESS will return permission denied for 2564 * all bits until close */ 2565 data->o_res.access_request = data->o_arg.access = 0; 2566 } else if (nfs4_has_persistent_session(clp)) 2567 data->o_arg.createmode = NFS4_CREATE_GUARDED; 2568 } 2569 return; 2570 unlock_no_action: 2571 trace_nfs4_cached_open(data->state); 2572 rcu_read_unlock(); 2573 out_no_action: 2574 task->tk_action = NULL; 2575 out_wait: 2576 nfs4_sequence_done(task, &data->o_res.seq_res); 2577 } 2578 2579 static void nfs4_open_done(struct rpc_task *task, void *calldata) 2580 { 2581 struct nfs4_opendata *data = calldata; 2582 2583 data->rpc_status = task->tk_status; 2584 2585 if (!nfs4_sequence_process(task, &data->o_res.seq_res)) 2586 return; 2587 2588 if (task->tk_status == 0) { 2589 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) { 2590 switch (data->o_res.f_attr->mode & S_IFMT) { 2591 case S_IFREG: 2592 break; 2593 case S_IFLNK: 2594 data->rpc_status = -ELOOP; 2595 break; 2596 case S_IFDIR: 2597 data->rpc_status = -EISDIR; 2598 break; 2599 default: 2600 data->rpc_status = -ENOTDIR; 2601 } 2602 } 2603 renew_lease(data->o_res.server, data->timestamp); 2604 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 2605 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2606 } 2607 data->rpc_done = true; 2608 } 2609 2610 static void nfs4_open_release(void *calldata) 2611 { 2612 struct nfs4_opendata *data = calldata; 2613 struct nfs4_state *state = NULL; 2614 2615 /* In case of error, no cleanup! */ 2616 if (data->rpc_status != 0 || !data->rpc_done) { 2617 nfs_release_seqid(data->o_arg.seqid); 2618 goto out_free; 2619 } 2620 /* If this request hasn't been cancelled, do nothing */ 2621 if (!data->cancelled) 2622 goto out_free; 2623 /* In case we need an open_confirm, no cleanup! */ 2624 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 2625 goto out_free; 2626 state = nfs4_opendata_to_nfs4_state(data); 2627 if (!IS_ERR(state)) 2628 nfs4_close_state(state, data->o_arg.fmode); 2629 out_free: 2630 nfs4_opendata_put(data); 2631 } 2632 2633 static const struct rpc_call_ops nfs4_open_ops = { 2634 .rpc_call_prepare = nfs4_open_prepare, 2635 .rpc_call_done = nfs4_open_done, 2636 .rpc_release = nfs4_open_release, 2637 }; 2638 2639 static int nfs4_run_open_task(struct nfs4_opendata *data, 2640 struct nfs_open_context *ctx) 2641 { 2642 struct inode *dir = d_inode(data->dir); 2643 struct nfs_server *server = NFS_SERVER(dir); 2644 struct nfs_openargs *o_arg = &data->o_arg; 2645 struct nfs_openres *o_res = &data->o_res; 2646 struct rpc_task *task; 2647 struct rpc_message msg = { 2648 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 2649 .rpc_argp = o_arg, 2650 .rpc_resp = o_res, 2651 .rpc_cred = data->owner->so_cred, 2652 }; 2653 struct rpc_task_setup task_setup_data = { 2654 .rpc_client = server->client, 2655 .rpc_message = &msg, 2656 .callback_ops = &nfs4_open_ops, 2657 .callback_data = data, 2658 .workqueue = nfsiod_workqueue, 2659 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 2660 }; 2661 int status; 2662 2663 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE)) 2664 task_setup_data.flags |= RPC_TASK_MOVEABLE; 2665 2666 kref_get(&data->kref); 2667 data->rpc_done = false; 2668 data->rpc_status = 0; 2669 data->cancelled = false; 2670 data->is_recover = false; 2671 if (!ctx) { 2672 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1); 2673 data->is_recover = true; 2674 task_setup_data.flags |= RPC_TASK_TIMEOUT; 2675 } else { 2676 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0); 2677 pnfs_lgopen_prepare(data, ctx); 2678 } 2679 task = rpc_run_task(&task_setup_data); 2680 if (IS_ERR(task)) 2681 return PTR_ERR(task); 2682 status = rpc_wait_for_completion_task(task); 2683 if (status != 0) { 2684 data->cancelled = true; 2685 smp_wmb(); 2686 } else 2687 status = data->rpc_status; 2688 rpc_put_task(task); 2689 2690 return status; 2691 } 2692 2693 static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 2694 { 2695 struct inode *dir = d_inode(data->dir); 2696 struct nfs_openres *o_res = &data->o_res; 2697 int status; 2698 2699 status = nfs4_run_open_task(data, NULL); 2700 if (status != 0 || !data->rpc_done) 2701 return status; 2702 2703 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 2704 2705 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) 2706 status = _nfs4_proc_open_confirm(data); 2707 2708 return status; 2709 } 2710 2711 /* 2712 * Additional permission checks in order to distinguish between an 2713 * open for read, and an open for execute. This works around the 2714 * fact that NFSv4 OPEN treats read and execute permissions as being 2715 * the same. 2716 * Note that in the non-execute case, we want to turn off permission 2717 * checking if we just created a new file (POSIX open() semantics). 2718 */ 2719 static int nfs4_opendata_access(const struct cred *cred, 2720 struct nfs4_opendata *opendata, 2721 struct nfs4_state *state, fmode_t fmode) 2722 { 2723 struct nfs_access_entry cache; 2724 u32 mask, flags; 2725 2726 /* access call failed or for some reason the server doesn't 2727 * support any access modes -- defer access call until later */ 2728 if (opendata->o_res.access_supported == 0) 2729 return 0; 2730 2731 mask = 0; 2732 if (fmode & FMODE_EXEC) { 2733 /* ONLY check for exec rights */ 2734 if (S_ISDIR(state->inode->i_mode)) 2735 mask = NFS4_ACCESS_LOOKUP; 2736 else 2737 mask = NFS4_ACCESS_EXECUTE; 2738 } else if ((fmode & FMODE_READ) && !opendata->file_created) 2739 mask = NFS4_ACCESS_READ; 2740 2741 nfs_access_set_mask(&cache, opendata->o_res.access_result); 2742 nfs_access_add_cache(state->inode, &cache, cred); 2743 2744 flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP; 2745 if ((mask & ~cache.mask & flags) == 0) 2746 return 0; 2747 2748 return -EACCES; 2749 } 2750 2751 /* 2752 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 2753 */ 2754 static int _nfs4_proc_open(struct nfs4_opendata *data, 2755 struct nfs_open_context *ctx) 2756 { 2757 struct inode *dir = d_inode(data->dir); 2758 struct nfs_server *server = NFS_SERVER(dir); 2759 struct nfs_openargs *o_arg = &data->o_arg; 2760 struct nfs_openres *o_res = &data->o_res; 2761 int status; 2762 2763 status = nfs4_run_open_task(data, ctx); 2764 if (!data->rpc_done) 2765 return status; 2766 if (status != 0) { 2767 if (status == -NFS4ERR_BADNAME && 2768 !(o_arg->open_flags & O_CREAT)) 2769 return -ENOENT; 2770 return status; 2771 } 2772 2773 nfs_fattr_map_and_free_names(server, &data->f_attr); 2774 2775 if (o_arg->open_flags & O_CREAT) { 2776 if (o_arg->open_flags & O_EXCL) 2777 data->file_created = true; 2778 else if (o_res->cinfo.before != o_res->cinfo.after) 2779 data->file_created = true; 2780 if (data->file_created || 2781 inode_peek_iversion_raw(dir) != o_res->cinfo.after) 2782 nfs4_update_changeattr(dir, &o_res->cinfo, 2783 o_res->f_attr->time_start, 2784 NFS_INO_INVALID_DATA); 2785 } 2786 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 2787 server->caps &= ~NFS_CAP_POSIX_LOCK; 2788 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2789 status = _nfs4_proc_open_confirm(data); 2790 if (status != 0) 2791 return status; 2792 } 2793 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) { 2794 struct nfs_fh *fh = &o_res->fh; 2795 2796 nfs4_sequence_free_slot(&o_res->seq_res); 2797 if (o_arg->claim == NFS4_OPEN_CLAIM_FH) 2798 fh = NFS_FH(d_inode(data->dentry)); 2799 nfs4_proc_getattr(server, fh, o_res->f_attr, NULL); 2800 } 2801 return 0; 2802 } 2803 2804 /* 2805 * OPEN_EXPIRED: 2806 * reclaim state on the server after a network partition. 2807 * Assumes caller holds the appropriate lock 2808 */ 2809 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2810 { 2811 struct nfs4_opendata *opendata; 2812 int ret; 2813 2814 opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_FH); 2815 if (IS_ERR(opendata)) 2816 return PTR_ERR(opendata); 2817 /* 2818 * We're not recovering a delegation, so ask for no delegation. 2819 * Otherwise the recovery thread could deadlock with an outstanding 2820 * delegation return. 2821 */ 2822 opendata->o_arg.open_flags = O_DIRECT; 2823 ret = nfs4_open_recover(opendata, state); 2824 if (ret == -ESTALE) 2825 d_drop(ctx->dentry); 2826 nfs4_opendata_put(opendata); 2827 return ret; 2828 } 2829 2830 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2831 { 2832 struct nfs_server *server = NFS_SERVER(state->inode); 2833 struct nfs4_exception exception = { }; 2834 int err; 2835 2836 do { 2837 err = _nfs4_open_expired(ctx, state); 2838 trace_nfs4_open_expired(ctx, 0, err); 2839 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2840 continue; 2841 switch (err) { 2842 default: 2843 goto out; 2844 case -NFS4ERR_GRACE: 2845 case -NFS4ERR_DELAY: 2846 nfs4_handle_exception(server, err, &exception); 2847 err = 0; 2848 } 2849 } while (exception.retry); 2850 out: 2851 return err; 2852 } 2853 2854 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2855 { 2856 struct nfs_open_context *ctx; 2857 int ret; 2858 2859 ctx = nfs4_state_find_open_context(state); 2860 if (IS_ERR(ctx)) 2861 return -EAGAIN; 2862 ret = nfs4_do_open_expired(ctx, state); 2863 put_nfs_open_context(ctx); 2864 return ret; 2865 } 2866 2867 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state, 2868 const nfs4_stateid *stateid) 2869 { 2870 nfs_remove_bad_delegation(state->inode, stateid); 2871 nfs_state_clear_delegation(state); 2872 } 2873 2874 static void nfs40_clear_delegation_stateid(struct nfs4_state *state) 2875 { 2876 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL) 2877 nfs_finish_clear_delegation_stateid(state, NULL); 2878 } 2879 2880 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2881 { 2882 /* NFSv4.0 doesn't allow for delegation recovery on open expire */ 2883 nfs40_clear_delegation_stateid(state); 2884 nfs_state_clear_open_state_flags(state); 2885 return nfs4_open_expired(sp, state); 2886 } 2887 2888 static int nfs40_test_and_free_expired_stateid(struct nfs_server *server, 2889 const nfs4_stateid *stateid, 2890 const struct cred *cred) 2891 { 2892 return -NFS4ERR_BAD_STATEID; 2893 } 2894 2895 #if defined(CONFIG_NFS_V4_1) 2896 static int nfs41_test_and_free_expired_stateid(struct nfs_server *server, 2897 const nfs4_stateid *stateid, 2898 const struct cred *cred) 2899 { 2900 int status; 2901 2902 switch (stateid->type) { 2903 default: 2904 break; 2905 case NFS4_INVALID_STATEID_TYPE: 2906 case NFS4_SPECIAL_STATEID_TYPE: 2907 return -NFS4ERR_BAD_STATEID; 2908 case NFS4_REVOKED_STATEID_TYPE: 2909 goto out_free; 2910 } 2911 2912 status = nfs41_test_stateid(server, stateid, cred); 2913 switch (status) { 2914 case -NFS4ERR_EXPIRED: 2915 case -NFS4ERR_ADMIN_REVOKED: 2916 case -NFS4ERR_DELEG_REVOKED: 2917 break; 2918 default: 2919 return status; 2920 } 2921 out_free: 2922 /* Ack the revoked state to the server */ 2923 nfs41_free_stateid(server, stateid, cred, true); 2924 return -NFS4ERR_EXPIRED; 2925 } 2926 2927 static int nfs41_check_delegation_stateid(struct nfs4_state *state) 2928 { 2929 struct nfs_server *server = NFS_SERVER(state->inode); 2930 nfs4_stateid stateid; 2931 struct nfs_delegation *delegation; 2932 const struct cred *cred = NULL; 2933 int status, ret = NFS_OK; 2934 2935 /* Get the delegation credential for use by test/free_stateid */ 2936 rcu_read_lock(); 2937 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2938 if (delegation == NULL) { 2939 rcu_read_unlock(); 2940 nfs_state_clear_delegation(state); 2941 return NFS_OK; 2942 } 2943 2944 spin_lock(&delegation->lock); 2945 nfs4_stateid_copy(&stateid, &delegation->stateid); 2946 2947 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, 2948 &delegation->flags)) { 2949 spin_unlock(&delegation->lock); 2950 rcu_read_unlock(); 2951 return NFS_OK; 2952 } 2953 2954 if (delegation->cred) 2955 cred = get_cred(delegation->cred); 2956 spin_unlock(&delegation->lock); 2957 rcu_read_unlock(); 2958 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred); 2959 trace_nfs4_test_delegation_stateid(state, NULL, status); 2960 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) 2961 nfs_finish_clear_delegation_stateid(state, &stateid); 2962 else 2963 ret = status; 2964 2965 put_cred(cred); 2966 return ret; 2967 } 2968 2969 static void nfs41_delegation_recover_stateid(struct nfs4_state *state) 2970 { 2971 nfs4_stateid tmp; 2972 2973 if (test_bit(NFS_DELEGATED_STATE, &state->flags) && 2974 nfs4_copy_delegation_stateid(state->inode, state->state, 2975 &tmp, NULL) && 2976 nfs4_stateid_match_other(&state->stateid, &tmp)) 2977 nfs_state_set_delegation(state, &tmp, state->state); 2978 else 2979 nfs_state_clear_delegation(state); 2980 } 2981 2982 /** 2983 * nfs41_check_expired_locks - possibly free a lock stateid 2984 * 2985 * @state: NFSv4 state for an inode 2986 * 2987 * Returns NFS_OK if recovery for this stateid is now finished. 2988 * Otherwise a negative NFS4ERR value is returned. 2989 */ 2990 static int nfs41_check_expired_locks(struct nfs4_state *state) 2991 { 2992 int status, ret = NFS_OK; 2993 struct nfs4_lock_state *lsp, *prev = NULL; 2994 struct nfs_server *server = NFS_SERVER(state->inode); 2995 2996 if (!test_bit(LK_STATE_IN_USE, &state->flags)) 2997 goto out; 2998 2999 spin_lock(&state->state_lock); 3000 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 3001 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 3002 const struct cred *cred = lsp->ls_state->owner->so_cred; 3003 3004 refcount_inc(&lsp->ls_count); 3005 spin_unlock(&state->state_lock); 3006 3007 nfs4_put_lock_state(prev); 3008 prev = lsp; 3009 3010 status = nfs41_test_and_free_expired_stateid(server, 3011 &lsp->ls_stateid, 3012 cred); 3013 trace_nfs4_test_lock_stateid(state, lsp, status); 3014 if (status == -NFS4ERR_EXPIRED || 3015 status == -NFS4ERR_BAD_STATEID) { 3016 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 3017 lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE; 3018 if (!recover_lost_locks) 3019 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 3020 } else if (status != NFS_OK) { 3021 ret = status; 3022 nfs4_put_lock_state(prev); 3023 goto out; 3024 } 3025 spin_lock(&state->state_lock); 3026 } 3027 } 3028 spin_unlock(&state->state_lock); 3029 nfs4_put_lock_state(prev); 3030 out: 3031 return ret; 3032 } 3033 3034 /** 3035 * nfs41_check_open_stateid - possibly free an open stateid 3036 * 3037 * @state: NFSv4 state for an inode 3038 * 3039 * Returns NFS_OK if recovery for this stateid is now finished. 3040 * Otherwise a negative NFS4ERR value is returned. 3041 */ 3042 static int nfs41_check_open_stateid(struct nfs4_state *state) 3043 { 3044 struct nfs_server *server = NFS_SERVER(state->inode); 3045 nfs4_stateid *stateid = &state->open_stateid; 3046 const struct cred *cred = state->owner->so_cred; 3047 int status; 3048 3049 if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) 3050 return -NFS4ERR_BAD_STATEID; 3051 status = nfs41_test_and_free_expired_stateid(server, stateid, cred); 3052 trace_nfs4_test_open_stateid(state, NULL, status); 3053 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) { 3054 nfs_state_clear_open_state_flags(state); 3055 stateid->type = NFS4_INVALID_STATEID_TYPE; 3056 return status; 3057 } 3058 if (nfs_open_stateid_recover_openmode(state)) 3059 return -NFS4ERR_OPENMODE; 3060 return NFS_OK; 3061 } 3062 3063 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 3064 { 3065 int status; 3066 3067 status = nfs41_check_delegation_stateid(state); 3068 if (status != NFS_OK) 3069 return status; 3070 nfs41_delegation_recover_stateid(state); 3071 3072 status = nfs41_check_expired_locks(state); 3073 if (status != NFS_OK) 3074 return status; 3075 status = nfs41_check_open_stateid(state); 3076 if (status != NFS_OK) 3077 status = nfs4_open_expired(sp, state); 3078 return status; 3079 } 3080 #endif 3081 3082 /* 3083 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 3084 * fields corresponding to attributes that were used to store the verifier. 3085 * Make sure we clobber those fields in the later setattr call 3086 */ 3087 static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata, 3088 struct iattr *sattr, struct nfs4_label **label) 3089 { 3090 const __u32 *bitmask = opendata->o_arg.server->exclcreat_bitmask; 3091 __u32 attrset[3]; 3092 unsigned ret; 3093 unsigned i; 3094 3095 for (i = 0; i < ARRAY_SIZE(attrset); i++) { 3096 attrset[i] = opendata->o_res.attrset[i]; 3097 if (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE4_1) 3098 attrset[i] &= ~bitmask[i]; 3099 } 3100 3101 ret = (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE) ? 3102 sattr->ia_valid : 0; 3103 3104 if ((attrset[1] & (FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET))) { 3105 if (sattr->ia_valid & ATTR_ATIME_SET) 3106 ret |= ATTR_ATIME_SET; 3107 else 3108 ret |= ATTR_ATIME; 3109 } 3110 3111 if ((attrset[1] & (FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET))) { 3112 if (sattr->ia_valid & ATTR_MTIME_SET) 3113 ret |= ATTR_MTIME_SET; 3114 else 3115 ret |= ATTR_MTIME; 3116 } 3117 3118 if (!(attrset[2] & FATTR4_WORD2_SECURITY_LABEL)) 3119 *label = NULL; 3120 return ret; 3121 } 3122 3123 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, 3124 struct nfs_open_context *ctx) 3125 { 3126 struct nfs4_state_owner *sp = opendata->owner; 3127 struct nfs_server *server = sp->so_server; 3128 struct dentry *dentry; 3129 struct nfs4_state *state; 3130 fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx); 3131 struct inode *dir = d_inode(opendata->dir); 3132 unsigned long dir_verifier; 3133 int ret; 3134 3135 dir_verifier = nfs_save_change_attribute(dir); 3136 3137 ret = _nfs4_proc_open(opendata, ctx); 3138 if (ret != 0) 3139 goto out; 3140 3141 state = _nfs4_opendata_to_nfs4_state(opendata); 3142 ret = PTR_ERR(state); 3143 if (IS_ERR(state)) 3144 goto out; 3145 ctx->state = state; 3146 if (server->caps & NFS_CAP_POSIX_LOCK) 3147 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 3148 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK) 3149 set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags); 3150 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_PRESERVE_UNLINKED) 3151 set_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(state->inode)->flags); 3152 3153 dentry = opendata->dentry; 3154 if (d_really_is_negative(dentry)) { 3155 struct dentry *alias; 3156 d_drop(dentry); 3157 alias = d_splice_alias(igrab(state->inode), dentry); 3158 /* d_splice_alias() can't fail here - it's a non-directory */ 3159 if (alias) { 3160 dput(ctx->dentry); 3161 ctx->dentry = dentry = alias; 3162 } 3163 } 3164 3165 switch(opendata->o_arg.claim) { 3166 default: 3167 break; 3168 case NFS4_OPEN_CLAIM_NULL: 3169 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 3170 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 3171 if (!opendata->rpc_done) 3172 break; 3173 if (opendata->o_res.delegation.type != 0) 3174 dir_verifier = nfs_save_change_attribute(dir); 3175 nfs_set_verifier(dentry, dir_verifier); 3176 } 3177 3178 /* Parse layoutget results before we check for access */ 3179 pnfs_parse_lgopen(state->inode, opendata->lgp, ctx); 3180 3181 ret = nfs4_opendata_access(sp->so_cred, opendata, state, acc_mode); 3182 if (ret != 0) 3183 goto out; 3184 3185 if (d_inode(dentry) == state->inode) 3186 nfs_inode_attach_open_context(ctx); 3187 3188 out: 3189 if (!opendata->cancelled) { 3190 if (opendata->lgp) { 3191 nfs4_lgopen_release(opendata->lgp); 3192 opendata->lgp = NULL; 3193 } 3194 nfs4_sequence_free_slot(&opendata->o_res.seq_res); 3195 } 3196 return ret; 3197 } 3198 3199 /* 3200 * Returns a referenced nfs4_state 3201 */ 3202 static int _nfs4_do_open(struct inode *dir, 3203 struct nfs_open_context *ctx, 3204 int flags, 3205 const struct nfs4_open_createattrs *c, 3206 int *opened) 3207 { 3208 struct nfs4_state_owner *sp; 3209 struct nfs4_state *state = NULL; 3210 struct nfs_server *server = NFS_SERVER(dir); 3211 struct nfs4_opendata *opendata; 3212 struct dentry *dentry = ctx->dentry; 3213 const struct cred *cred = ctx->cred; 3214 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold; 3215 fmode_t fmode = _nfs4_ctx_to_openmode(ctx); 3216 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL; 3217 struct iattr *sattr = c->sattr; 3218 struct nfs4_label *label = c->label; 3219 int status; 3220 3221 /* Protect against reboot recovery conflicts */ 3222 status = -ENOMEM; 3223 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 3224 if (sp == NULL) { 3225 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 3226 goto out_err; 3227 } 3228 status = nfs4_client_recover_expired_lease(server->nfs_client); 3229 if (status != 0) 3230 goto err_put_state_owner; 3231 if (d_really_is_positive(dentry)) 3232 nfs4_return_incompatible_delegation(d_inode(dentry), fmode); 3233 status = -ENOMEM; 3234 if (d_really_is_positive(dentry)) 3235 claim = NFS4_OPEN_CLAIM_FH; 3236 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, 3237 c, claim, GFP_KERNEL); 3238 if (opendata == NULL) 3239 goto err_put_state_owner; 3240 3241 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 3242 if (!opendata->f_attr.mdsthreshold) { 3243 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 3244 if (!opendata->f_attr.mdsthreshold) 3245 goto err_opendata_put; 3246 } 3247 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 3248 } 3249 if (d_really_is_positive(dentry)) 3250 opendata->state = nfs4_get_open_state(d_inode(dentry), sp); 3251 3252 status = _nfs4_open_and_get_state(opendata, ctx); 3253 if (status != 0) 3254 goto err_opendata_put; 3255 state = ctx->state; 3256 3257 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) && 3258 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) { 3259 unsigned attrs = nfs4_exclusive_attrset(opendata, sattr, &label); 3260 /* 3261 * send create attributes which was not set by open 3262 * with an extra setattr. 3263 */ 3264 if (attrs || label) { 3265 unsigned ia_old = sattr->ia_valid; 3266 3267 sattr->ia_valid = attrs; 3268 nfs_fattr_init(opendata->o_res.f_attr); 3269 status = nfs4_do_setattr(state->inode, cred, 3270 opendata->o_res.f_attr, sattr, 3271 ctx, label); 3272 if (status == 0) { 3273 nfs_setattr_update_inode(state->inode, sattr, 3274 opendata->o_res.f_attr); 3275 nfs_setsecurity(state->inode, opendata->o_res.f_attr); 3276 } 3277 sattr->ia_valid = ia_old; 3278 } 3279 } 3280 if (opened && opendata->file_created) 3281 *opened = 1; 3282 3283 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) { 3284 *ctx_th = opendata->f_attr.mdsthreshold; 3285 opendata->f_attr.mdsthreshold = NULL; 3286 } 3287 3288 nfs4_opendata_put(opendata); 3289 nfs4_put_state_owner(sp); 3290 return 0; 3291 err_opendata_put: 3292 nfs4_opendata_put(opendata); 3293 err_put_state_owner: 3294 nfs4_put_state_owner(sp); 3295 out_err: 3296 return status; 3297 } 3298 3299 3300 static struct nfs4_state *nfs4_do_open(struct inode *dir, 3301 struct nfs_open_context *ctx, 3302 int flags, 3303 struct iattr *sattr, 3304 struct nfs4_label *label, 3305 int *opened) 3306 { 3307 struct nfs_server *server = NFS_SERVER(dir); 3308 struct nfs4_exception exception = { 3309 .interruptible = true, 3310 }; 3311 struct nfs4_state *res; 3312 struct nfs4_open_createattrs c = { 3313 .label = label, 3314 .sattr = sattr, 3315 .verf = { 3316 [0] = (__u32)jiffies, 3317 [1] = (__u32)current->pid, 3318 }, 3319 }; 3320 int status; 3321 3322 do { 3323 status = _nfs4_do_open(dir, ctx, flags, &c, opened); 3324 res = ctx->state; 3325 trace_nfs4_open_file(ctx, flags, status); 3326 if (status == 0) 3327 break; 3328 /* NOTE: BAD_SEQID means the server and client disagree about the 3329 * book-keeping w.r.t. state-changing operations 3330 * (OPEN/CLOSE/LOCK/LOCKU...) 3331 * It is actually a sign of a bug on the client or on the server. 3332 * 3333 * If we receive a BAD_SEQID error in the particular case of 3334 * doing an OPEN, we assume that nfs_increment_open_seqid() will 3335 * have unhashed the old state_owner for us, and that we can 3336 * therefore safely retry using a new one. We should still warn 3337 * the user though... 3338 */ 3339 if (status == -NFS4ERR_BAD_SEQID) { 3340 pr_warn_ratelimited("NFS: v4 server %s " 3341 " returned a bad sequence-id error!\n", 3342 NFS_SERVER(dir)->nfs_client->cl_hostname); 3343 exception.retry = 1; 3344 continue; 3345 } 3346 /* 3347 * BAD_STATEID on OPEN means that the server cancelled our 3348 * state before it received the OPEN_CONFIRM. 3349 * Recover by retrying the request as per the discussion 3350 * on Page 181 of RFC3530. 3351 */ 3352 if (status == -NFS4ERR_BAD_STATEID) { 3353 exception.retry = 1; 3354 continue; 3355 } 3356 if (status == -NFS4ERR_EXPIRED) { 3357 nfs4_schedule_lease_recovery(server->nfs_client); 3358 exception.retry = 1; 3359 continue; 3360 } 3361 if (status == -EAGAIN) { 3362 /* We must have found a delegation */ 3363 exception.retry = 1; 3364 continue; 3365 } 3366 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception)) 3367 continue; 3368 res = ERR_PTR(nfs4_handle_exception(server, 3369 status, &exception)); 3370 } while (exception.retry); 3371 return res; 3372 } 3373 3374 static int _nfs4_do_setattr(struct inode *inode, 3375 struct nfs_setattrargs *arg, 3376 struct nfs_setattrres *res, 3377 const struct cred *cred, 3378 struct nfs_open_context *ctx) 3379 { 3380 struct nfs_server *server = NFS_SERVER(inode); 3381 struct rpc_message msg = { 3382 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 3383 .rpc_argp = arg, 3384 .rpc_resp = res, 3385 .rpc_cred = cred, 3386 }; 3387 const struct cred *delegation_cred = NULL; 3388 unsigned long timestamp = jiffies; 3389 bool truncate; 3390 int status; 3391 3392 nfs_fattr_init(res->fattr); 3393 3394 /* Servers should only apply open mode checks for file size changes */ 3395 truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false; 3396 if (!truncate) { 3397 nfs4_inode_make_writeable(inode); 3398 goto zero_stateid; 3399 } 3400 3401 if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) { 3402 /* Use that stateid */ 3403 } else if (ctx != NULL && ctx->state) { 3404 struct nfs_lock_context *l_ctx; 3405 if (!nfs4_valid_open_stateid(ctx->state)) 3406 return -EBADF; 3407 l_ctx = nfs_get_lock_context(ctx); 3408 if (IS_ERR(l_ctx)) 3409 return PTR_ERR(l_ctx); 3410 status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx, 3411 &arg->stateid, &delegation_cred); 3412 nfs_put_lock_context(l_ctx); 3413 if (status == -EIO) 3414 return -EBADF; 3415 else if (status == -EAGAIN) 3416 goto zero_stateid; 3417 } else { 3418 zero_stateid: 3419 nfs4_stateid_copy(&arg->stateid, &zero_stateid); 3420 } 3421 if (delegation_cred) 3422 msg.rpc_cred = delegation_cred; 3423 3424 status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1); 3425 3426 put_cred(delegation_cred); 3427 if (status == 0 && ctx != NULL) 3428 renew_lease(server, timestamp); 3429 trace_nfs4_setattr(inode, &arg->stateid, status); 3430 return status; 3431 } 3432 3433 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred, 3434 struct nfs_fattr *fattr, struct iattr *sattr, 3435 struct nfs_open_context *ctx, struct nfs4_label *ilabel) 3436 { 3437 struct nfs_server *server = NFS_SERVER(inode); 3438 __u32 bitmask[NFS4_BITMASK_SZ]; 3439 struct nfs4_state *state = ctx ? ctx->state : NULL; 3440 struct nfs_setattrargs arg = { 3441 .fh = NFS_FH(inode), 3442 .iap = sattr, 3443 .server = server, 3444 .bitmask = bitmask, 3445 .label = ilabel, 3446 }; 3447 struct nfs_setattrres res = { 3448 .fattr = fattr, 3449 .server = server, 3450 }; 3451 struct nfs4_exception exception = { 3452 .state = state, 3453 .inode = inode, 3454 .stateid = &arg.stateid, 3455 }; 3456 unsigned long adjust_flags = NFS_INO_INVALID_CHANGE | 3457 NFS_INO_INVALID_CTIME; 3458 int err; 3459 3460 if (sattr->ia_valid & (ATTR_MODE | ATTR_KILL_SUID | ATTR_KILL_SGID)) 3461 adjust_flags |= NFS_INO_INVALID_MODE; 3462 if (sattr->ia_valid & (ATTR_UID | ATTR_GID)) 3463 adjust_flags |= NFS_INO_INVALID_OTHER; 3464 if (sattr->ia_valid & ATTR_ATIME) 3465 adjust_flags |= NFS_INO_INVALID_ATIME; 3466 if (sattr->ia_valid & ATTR_MTIME) 3467 adjust_flags |= NFS_INO_INVALID_MTIME; 3468 3469 do { 3470 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), 3471 inode, adjust_flags); 3472 3473 err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx); 3474 switch (err) { 3475 case -NFS4ERR_OPENMODE: 3476 if (!(sattr->ia_valid & ATTR_SIZE)) { 3477 pr_warn_once("NFSv4: server %s is incorrectly " 3478 "applying open mode checks to " 3479 "a SETATTR that is not " 3480 "changing file size.\n", 3481 server->nfs_client->cl_hostname); 3482 } 3483 if (state && !(state->state & FMODE_WRITE)) { 3484 err = -EBADF; 3485 if (sattr->ia_valid & ATTR_OPEN) 3486 err = -EACCES; 3487 goto out; 3488 } 3489 } 3490 err = nfs4_handle_exception(server, err, &exception); 3491 } while (exception.retry); 3492 out: 3493 return err; 3494 } 3495 3496 static bool 3497 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task) 3498 { 3499 if (inode == NULL || !nfs_have_layout(inode)) 3500 return false; 3501 3502 return pnfs_wait_on_layoutreturn(inode, task); 3503 } 3504 3505 /* 3506 * Update the seqid of an open stateid 3507 */ 3508 static void nfs4_sync_open_stateid(nfs4_stateid *dst, 3509 struct nfs4_state *state) 3510 { 3511 __be32 seqid_open; 3512 u32 dst_seqid; 3513 int seq; 3514 3515 for (;;) { 3516 if (!nfs4_valid_open_stateid(state)) 3517 break; 3518 seq = read_seqbegin(&state->seqlock); 3519 if (!nfs4_state_match_open_stateid_other(state, dst)) { 3520 nfs4_stateid_copy(dst, &state->open_stateid); 3521 if (read_seqretry(&state->seqlock, seq)) 3522 continue; 3523 break; 3524 } 3525 seqid_open = state->open_stateid.seqid; 3526 if (read_seqretry(&state->seqlock, seq)) 3527 continue; 3528 3529 dst_seqid = be32_to_cpu(dst->seqid); 3530 if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0) 3531 dst->seqid = seqid_open; 3532 break; 3533 } 3534 } 3535 3536 /* 3537 * Update the seqid of an open stateid after receiving 3538 * NFS4ERR_OLD_STATEID 3539 */ 3540 static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst, 3541 struct nfs4_state *state) 3542 { 3543 __be32 seqid_open; 3544 u32 dst_seqid; 3545 bool ret; 3546 int seq, status = -EAGAIN; 3547 DEFINE_WAIT(wait); 3548 3549 for (;;) { 3550 ret = false; 3551 if (!nfs4_valid_open_stateid(state)) 3552 break; 3553 seq = read_seqbegin(&state->seqlock); 3554 if (!nfs4_state_match_open_stateid_other(state, dst)) { 3555 if (read_seqretry(&state->seqlock, seq)) 3556 continue; 3557 break; 3558 } 3559 3560 write_seqlock(&state->seqlock); 3561 seqid_open = state->open_stateid.seqid; 3562 3563 dst_seqid = be32_to_cpu(dst->seqid); 3564 3565 /* Did another OPEN bump the state's seqid? try again: */ 3566 if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) { 3567 dst->seqid = seqid_open; 3568 write_sequnlock(&state->seqlock); 3569 ret = true; 3570 break; 3571 } 3572 3573 /* server says we're behind but we haven't seen the update yet */ 3574 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); 3575 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); 3576 write_sequnlock(&state->seqlock); 3577 trace_nfs4_close_stateid_update_wait(state->inode, dst, 0); 3578 3579 if (fatal_signal_pending(current)) 3580 status = -EINTR; 3581 else 3582 if (schedule_timeout(5*HZ) != 0) 3583 status = 0; 3584 3585 finish_wait(&state->waitq, &wait); 3586 3587 if (!status) 3588 continue; 3589 if (status == -EINTR) 3590 break; 3591 3592 /* we slept the whole 5 seconds, we must have lost a seqid */ 3593 dst->seqid = cpu_to_be32(dst_seqid + 1); 3594 ret = true; 3595 break; 3596 } 3597 3598 return ret; 3599 } 3600 3601 struct nfs4_closedata { 3602 struct inode *inode; 3603 struct nfs4_state *state; 3604 struct nfs_closeargs arg; 3605 struct nfs_closeres res; 3606 struct { 3607 struct nfs4_layoutreturn_args arg; 3608 struct nfs4_layoutreturn_res res; 3609 struct nfs4_xdr_opaque_data ld_private; 3610 u32 roc_barrier; 3611 bool roc; 3612 } lr; 3613 struct nfs_fattr fattr; 3614 unsigned long timestamp; 3615 }; 3616 3617 static void nfs4_free_closedata(void *data) 3618 { 3619 struct nfs4_closedata *calldata = data; 3620 struct nfs4_state_owner *sp = calldata->state->owner; 3621 struct super_block *sb = calldata->state->inode->i_sb; 3622 3623 if (calldata->lr.roc) 3624 pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res, 3625 calldata->res.lr_ret); 3626 nfs4_put_open_state(calldata->state); 3627 nfs_free_seqid(calldata->arg.seqid); 3628 nfs4_put_state_owner(sp); 3629 nfs_sb_deactive(sb); 3630 kfree(calldata); 3631 } 3632 3633 static void nfs4_close_done(struct rpc_task *task, void *data) 3634 { 3635 struct nfs4_closedata *calldata = data; 3636 struct nfs4_state *state = calldata->state; 3637 struct nfs_server *server = NFS_SERVER(calldata->inode); 3638 nfs4_stateid *res_stateid = NULL; 3639 struct nfs4_exception exception = { 3640 .state = state, 3641 .inode = calldata->inode, 3642 .stateid = &calldata->arg.stateid, 3643 }; 3644 3645 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 3646 return; 3647 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status); 3648 3649 /* Handle Layoutreturn errors */ 3650 if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res, 3651 &calldata->res.lr_ret) == -EAGAIN) 3652 goto out_restart; 3653 3654 /* hmm. we are done with the inode, and in the process of freeing 3655 * the state_owner. we keep this around to process errors 3656 */ 3657 switch (task->tk_status) { 3658 case 0: 3659 res_stateid = &calldata->res.stateid; 3660 renew_lease(server, calldata->timestamp); 3661 break; 3662 case -NFS4ERR_ACCESS: 3663 if (calldata->arg.bitmask != NULL) { 3664 calldata->arg.bitmask = NULL; 3665 calldata->res.fattr = NULL; 3666 goto out_restart; 3667 3668 } 3669 break; 3670 case -NFS4ERR_OLD_STATEID: 3671 /* Did we race with OPEN? */ 3672 if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid, 3673 state)) 3674 goto out_restart; 3675 goto out_release; 3676 case -NFS4ERR_ADMIN_REVOKED: 3677 case -NFS4ERR_STALE_STATEID: 3678 case -NFS4ERR_EXPIRED: 3679 nfs4_free_revoked_stateid(server, 3680 &calldata->arg.stateid, 3681 task->tk_msg.rpc_cred); 3682 fallthrough; 3683 case -NFS4ERR_BAD_STATEID: 3684 if (calldata->arg.fmode == 0) 3685 break; 3686 fallthrough; 3687 default: 3688 task->tk_status = nfs4_async_handle_exception(task, 3689 server, task->tk_status, &exception); 3690 if (exception.retry) 3691 goto out_restart; 3692 } 3693 nfs_clear_open_stateid(state, &calldata->arg.stateid, 3694 res_stateid, calldata->arg.fmode); 3695 out_release: 3696 task->tk_status = 0; 3697 nfs_release_seqid(calldata->arg.seqid); 3698 nfs_refresh_inode(calldata->inode, &calldata->fattr); 3699 dprintk("%s: ret = %d\n", __func__, task->tk_status); 3700 return; 3701 out_restart: 3702 task->tk_status = 0; 3703 rpc_restart_call_prepare(task); 3704 goto out_release; 3705 } 3706 3707 static void nfs4_close_prepare(struct rpc_task *task, void *data) 3708 { 3709 struct nfs4_closedata *calldata = data; 3710 struct nfs4_state *state = calldata->state; 3711 struct inode *inode = calldata->inode; 3712 struct nfs_server *server = NFS_SERVER(inode); 3713 struct pnfs_layout_hdr *lo; 3714 bool is_rdonly, is_wronly, is_rdwr; 3715 int call_close = 0; 3716 3717 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 3718 goto out_wait; 3719 3720 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 3721 spin_lock(&state->owner->so_lock); 3722 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); 3723 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); 3724 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); 3725 /* Calculate the change in open mode */ 3726 calldata->arg.fmode = 0; 3727 if (state->n_rdwr == 0) { 3728 if (state->n_rdonly == 0) 3729 call_close |= is_rdonly; 3730 else if (is_rdonly) 3731 calldata->arg.fmode |= FMODE_READ; 3732 if (state->n_wronly == 0) 3733 call_close |= is_wronly; 3734 else if (is_wronly) 3735 calldata->arg.fmode |= FMODE_WRITE; 3736 if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE)) 3737 call_close |= is_rdwr; 3738 } else if (is_rdwr) 3739 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; 3740 3741 nfs4_sync_open_stateid(&calldata->arg.stateid, state); 3742 if (!nfs4_valid_open_stateid(state)) 3743 call_close = 0; 3744 spin_unlock(&state->owner->so_lock); 3745 3746 if (!call_close) { 3747 /* Note: exit _without_ calling nfs4_close_done */ 3748 goto out_no_action; 3749 } 3750 3751 if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) { 3752 nfs_release_seqid(calldata->arg.seqid); 3753 goto out_wait; 3754 } 3755 3756 lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL; 3757 if (lo && !pnfs_layout_is_valid(lo)) { 3758 calldata->arg.lr_args = NULL; 3759 calldata->res.lr_res = NULL; 3760 } 3761 3762 if (calldata->arg.fmode == 0) 3763 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 3764 3765 if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) { 3766 /* Close-to-open cache consistency revalidation */ 3767 if (!nfs4_have_delegation(inode, FMODE_READ, 0)) { 3768 nfs4_bitmask_set(calldata->arg.bitmask_store, 3769 server->cache_consistency_bitmask, 3770 inode, 0); 3771 calldata->arg.bitmask = calldata->arg.bitmask_store; 3772 } else 3773 calldata->arg.bitmask = NULL; 3774 } 3775 3776 calldata->arg.share_access = 3777 nfs4_fmode_to_share_access(calldata->arg.fmode); 3778 3779 if (calldata->res.fattr == NULL) 3780 calldata->arg.bitmask = NULL; 3781 else if (calldata->arg.bitmask == NULL) 3782 calldata->res.fattr = NULL; 3783 calldata->timestamp = jiffies; 3784 if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client, 3785 &calldata->arg.seq_args, 3786 &calldata->res.seq_res, 3787 task) != 0) 3788 nfs_release_seqid(calldata->arg.seqid); 3789 return; 3790 out_no_action: 3791 task->tk_action = NULL; 3792 out_wait: 3793 nfs4_sequence_done(task, &calldata->res.seq_res); 3794 } 3795 3796 static const struct rpc_call_ops nfs4_close_ops = { 3797 .rpc_call_prepare = nfs4_close_prepare, 3798 .rpc_call_done = nfs4_close_done, 3799 .rpc_release = nfs4_free_closedata, 3800 }; 3801 3802 /* 3803 * It is possible for data to be read/written from a mem-mapped file 3804 * after the sys_close call (which hits the vfs layer as a flush). 3805 * This means that we can't safely call nfsv4 close on a file until 3806 * the inode is cleared. This in turn means that we are not good 3807 * NFSv4 citizens - we do not indicate to the server to update the file's 3808 * share state even when we are done with one of the three share 3809 * stateid's in the inode. 3810 * 3811 * NOTE: Caller must be holding the sp->so_owner semaphore! 3812 */ 3813 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) 3814 { 3815 struct nfs_server *server = NFS_SERVER(state->inode); 3816 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 3817 struct nfs4_closedata *calldata; 3818 struct nfs4_state_owner *sp = state->owner; 3819 struct rpc_task *task; 3820 struct rpc_message msg = { 3821 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 3822 .rpc_cred = state->owner->so_cred, 3823 }; 3824 struct rpc_task_setup task_setup_data = { 3825 .rpc_client = server->client, 3826 .rpc_message = &msg, 3827 .callback_ops = &nfs4_close_ops, 3828 .workqueue = nfsiod_workqueue, 3829 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 3830 }; 3831 int status = -ENOMEM; 3832 3833 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE)) 3834 task_setup_data.flags |= RPC_TASK_MOVEABLE; 3835 3836 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP, 3837 &task_setup_data.rpc_client, &msg); 3838 3839 calldata = kzalloc(sizeof(*calldata), gfp_mask); 3840 if (calldata == NULL) 3841 goto out; 3842 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1, 0); 3843 calldata->inode = state->inode; 3844 calldata->state = state; 3845 calldata->arg.fh = NFS_FH(state->inode); 3846 if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state)) 3847 goto out_free_calldata; 3848 /* Serialization for the sequence id */ 3849 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 3850 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask); 3851 if (IS_ERR(calldata->arg.seqid)) 3852 goto out_free_calldata; 3853 nfs_fattr_init(&calldata->fattr); 3854 calldata->arg.fmode = 0; 3855 calldata->lr.arg.ld_private = &calldata->lr.ld_private; 3856 calldata->res.fattr = &calldata->fattr; 3857 calldata->res.seqid = calldata->arg.seqid; 3858 calldata->res.server = server; 3859 calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT; 3860 calldata->lr.roc = pnfs_roc(state->inode, 3861 &calldata->lr.arg, &calldata->lr.res, msg.rpc_cred); 3862 if (calldata->lr.roc) { 3863 calldata->arg.lr_args = &calldata->lr.arg; 3864 calldata->res.lr_res = &calldata->lr.res; 3865 } 3866 nfs_sb_active(calldata->inode->i_sb); 3867 3868 msg.rpc_argp = &calldata->arg; 3869 msg.rpc_resp = &calldata->res; 3870 task_setup_data.callback_data = calldata; 3871 task = rpc_run_task(&task_setup_data); 3872 if (IS_ERR(task)) 3873 return PTR_ERR(task); 3874 status = 0; 3875 if (wait) 3876 status = rpc_wait_for_completion_task(task); 3877 rpc_put_task(task); 3878 return status; 3879 out_free_calldata: 3880 kfree(calldata); 3881 out: 3882 nfs4_put_open_state(state); 3883 nfs4_put_state_owner(sp); 3884 return status; 3885 } 3886 3887 static struct inode * 3888 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, 3889 int open_flags, struct iattr *attr, int *opened) 3890 { 3891 struct nfs4_state *state; 3892 struct nfs4_label l, *label; 3893 3894 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); 3895 3896 /* Protect against concurrent sillydeletes */ 3897 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened); 3898 3899 nfs4_label_release_security(label); 3900 3901 if (IS_ERR(state)) 3902 return ERR_CAST(state); 3903 return state->inode; 3904 } 3905 3906 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 3907 { 3908 struct dentry *dentry = ctx->dentry; 3909 if (ctx->state == NULL) 3910 return; 3911 if (dentry->d_flags & DCACHE_NFSFS_RENAMED) 3912 nfs4_inode_set_return_delegation_on_close(d_inode(dentry)); 3913 if (is_sync) 3914 nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx)); 3915 else 3916 nfs4_close_state(ctx->state, _nfs4_ctx_to_openmode(ctx)); 3917 } 3918 3919 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL) 3920 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL) 3921 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_OPEN_ARGUMENTS - 1UL) 3922 3923 #define FATTR4_WORD2_NFS42_TIME_DELEG_MASK \ 3924 (FATTR4_WORD2_TIME_DELEG_MODIFY|FATTR4_WORD2_TIME_DELEG_ACCESS) 3925 static bool nfs4_server_delegtime_capable(struct nfs4_server_caps_res *res) 3926 { 3927 u32 share_access_want = res->open_caps.oa_share_access_want[0]; 3928 u32 attr_bitmask = res->attr_bitmask[2]; 3929 3930 return (share_access_want & NFS4_SHARE_WANT_DELEG_TIMESTAMPS) && 3931 ((attr_bitmask & FATTR4_WORD2_NFS42_TIME_DELEG_MASK) == 3932 FATTR4_WORD2_NFS42_TIME_DELEG_MASK); 3933 } 3934 3935 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 3936 { 3937 u32 minorversion = server->nfs_client->cl_minorversion; 3938 u32 bitmask[3] = { 3939 [0] = FATTR4_WORD0_SUPPORTED_ATTRS, 3940 }; 3941 struct nfs4_server_caps_arg args = { 3942 .fhandle = fhandle, 3943 .bitmask = bitmask, 3944 }; 3945 struct nfs4_server_caps_res res = {}; 3946 struct rpc_message msg = { 3947 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 3948 .rpc_argp = &args, 3949 .rpc_resp = &res, 3950 }; 3951 int status; 3952 int i; 3953 3954 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS | 3955 FATTR4_WORD0_FH_EXPIRE_TYPE | 3956 FATTR4_WORD0_LINK_SUPPORT | 3957 FATTR4_WORD0_SYMLINK_SUPPORT | 3958 FATTR4_WORD0_ACLSUPPORT | 3959 FATTR4_WORD0_CASE_INSENSITIVE | 3960 FATTR4_WORD0_CASE_PRESERVING; 3961 if (minorversion) 3962 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT | 3963 FATTR4_WORD2_OPEN_ARGUMENTS; 3964 3965 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3966 if (status == 0) { 3967 bitmask[0] = (FATTR4_WORD0_SUPPORTED_ATTRS | 3968 FATTR4_WORD0_FH_EXPIRE_TYPE | 3969 FATTR4_WORD0_LINK_SUPPORT | 3970 FATTR4_WORD0_SYMLINK_SUPPORT | 3971 FATTR4_WORD0_ACLSUPPORT | 3972 FATTR4_WORD0_CASE_INSENSITIVE | 3973 FATTR4_WORD0_CASE_PRESERVING) & 3974 res.attr_bitmask[0]; 3975 /* Sanity check the server answers */ 3976 switch (minorversion) { 3977 case 0: 3978 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK; 3979 res.attr_bitmask[2] = 0; 3980 break; 3981 case 1: 3982 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK; 3983 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT & 3984 res.attr_bitmask[2]; 3985 break; 3986 case 2: 3987 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK; 3988 bitmask[2] = (FATTR4_WORD2_SUPPATTR_EXCLCREAT | 3989 FATTR4_WORD2_OPEN_ARGUMENTS) & 3990 res.attr_bitmask[2]; 3991 } 3992 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 3993 server->caps &= ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS | 3994 NFS_CAP_SYMLINKS| NFS_CAP_SECURITY_LABEL); 3995 server->fattr_valid = NFS_ATTR_FATTR_V4; 3996 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL && 3997 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 3998 server->caps |= NFS_CAP_ACLS; 3999 if (res.has_links != 0) 4000 server->caps |= NFS_CAP_HARDLINKS; 4001 if (res.has_symlinks != 0) 4002 server->caps |= NFS_CAP_SYMLINKS; 4003 if (res.case_insensitive) 4004 server->caps |= NFS_CAP_CASE_INSENSITIVE; 4005 if (res.case_preserving) 4006 server->caps |= NFS_CAP_CASE_PRESERVING; 4007 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 4008 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL) 4009 server->caps |= NFS_CAP_SECURITY_LABEL; 4010 #endif 4011 if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS) 4012 server->caps |= NFS_CAP_FS_LOCATIONS; 4013 if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID)) 4014 server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID; 4015 if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE)) 4016 server->fattr_valid &= ~NFS_ATTR_FATTR_MODE; 4017 if (!(res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)) 4018 server->fattr_valid &= ~NFS_ATTR_FATTR_NLINK; 4019 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER)) 4020 server->fattr_valid &= ~(NFS_ATTR_FATTR_OWNER | 4021 NFS_ATTR_FATTR_OWNER_NAME); 4022 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)) 4023 server->fattr_valid &= ~(NFS_ATTR_FATTR_GROUP | 4024 NFS_ATTR_FATTR_GROUP_NAME); 4025 if (!(res.attr_bitmask[1] & FATTR4_WORD1_SPACE_USED)) 4026 server->fattr_valid &= ~NFS_ATTR_FATTR_SPACE_USED; 4027 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)) 4028 server->fattr_valid &= ~NFS_ATTR_FATTR_ATIME; 4029 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)) 4030 server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME; 4031 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)) 4032 server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME; 4033 memcpy(server->attr_bitmask_nl, res.attr_bitmask, 4034 sizeof(server->attr_bitmask)); 4035 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL; 4036 4037 if (res.open_caps.oa_share_access_want[0] & 4038 NFS4_SHARE_WANT_OPEN_XOR_DELEGATION) 4039 server->caps |= NFS_CAP_OPEN_XOR; 4040 if (nfs4_server_delegtime_capable(&res)) 4041 server->caps |= NFS_CAP_DELEGTIME; 4042 4043 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 4044 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 4045 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 4046 server->cache_consistency_bitmask[2] = 0; 4047 4048 /* Avoid a regression due to buggy server */ 4049 for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++) 4050 res.exclcreat_bitmask[i] &= res.attr_bitmask[i]; 4051 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask, 4052 sizeof(server->exclcreat_bitmask)); 4053 4054 server->acl_bitmask = res.acl_bitmask; 4055 server->fh_expire_type = res.fh_expire_type; 4056 } 4057 4058 return status; 4059 } 4060 4061 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 4062 { 4063 struct nfs4_exception exception = { 4064 .interruptible = true, 4065 }; 4066 int err; 4067 4068 nfs4_server_set_init_caps(server); 4069 do { 4070 err = nfs4_handle_exception(server, 4071 _nfs4_server_capabilities(server, fhandle), 4072 &exception); 4073 } while (exception.retry); 4074 return err; 4075 } 4076 4077 static void test_fs_location_for_trunking(struct nfs4_fs_location *location, 4078 struct nfs_client *clp, 4079 struct nfs_server *server) 4080 { 4081 int i; 4082 4083 for (i = 0; i < location->nservers; i++) { 4084 struct nfs4_string *srv_loc = &location->servers[i]; 4085 struct sockaddr_storage addr; 4086 size_t addrlen; 4087 struct xprt_create xprt_args = { 4088 .ident = 0, 4089 .net = clp->cl_net, 4090 }; 4091 struct nfs4_add_xprt_data xprtdata = { 4092 .clp = clp, 4093 }; 4094 struct rpc_add_xprt_test rpcdata = { 4095 .add_xprt_test = clp->cl_mvops->session_trunk, 4096 .data = &xprtdata, 4097 }; 4098 char *servername = NULL; 4099 4100 if (!srv_loc->len) 4101 continue; 4102 4103 addrlen = nfs_parse_server_name(srv_loc->data, srv_loc->len, 4104 &addr, sizeof(addr), 4105 clp->cl_net, server->port); 4106 if (!addrlen) 4107 return; 4108 xprt_args.dstaddr = (struct sockaddr *)&addr; 4109 xprt_args.addrlen = addrlen; 4110 servername = kmalloc(srv_loc->len + 1, GFP_KERNEL); 4111 if (!servername) 4112 return; 4113 memcpy(servername, srv_loc->data, srv_loc->len); 4114 servername[srv_loc->len] = '\0'; 4115 xprt_args.servername = servername; 4116 4117 xprtdata.cred = nfs4_get_clid_cred(clp); 4118 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args, 4119 rpc_clnt_setup_test_and_add_xprt, 4120 &rpcdata); 4121 if (xprtdata.cred) 4122 put_cred(xprtdata.cred); 4123 kfree(servername); 4124 } 4125 } 4126 4127 static bool _is_same_nfs4_pathname(struct nfs4_pathname *path1, 4128 struct nfs4_pathname *path2) 4129 { 4130 int i; 4131 4132 if (path1->ncomponents != path2->ncomponents) 4133 return false; 4134 for (i = 0; i < path1->ncomponents; i++) { 4135 if (path1->components[i].len != path2->components[i].len) 4136 return false; 4137 if (memcmp(path1->components[i].data, path2->components[i].data, 4138 path1->components[i].len)) 4139 return false; 4140 } 4141 return true; 4142 } 4143 4144 static int _nfs4_discover_trunking(struct nfs_server *server, 4145 struct nfs_fh *fhandle) 4146 { 4147 struct nfs4_fs_locations *locations = NULL; 4148 struct page *page; 4149 const struct cred *cred; 4150 struct nfs_client *clp = server->nfs_client; 4151 const struct nfs4_state_maintenance_ops *ops = 4152 clp->cl_mvops->state_renewal_ops; 4153 int status = -ENOMEM, i; 4154 4155 cred = ops->get_state_renewal_cred(clp); 4156 if (cred == NULL) { 4157 cred = nfs4_get_clid_cred(clp); 4158 if (cred == NULL) 4159 return -ENOKEY; 4160 } 4161 4162 page = alloc_page(GFP_KERNEL); 4163 if (!page) 4164 goto out_put_cred; 4165 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 4166 if (!locations) 4167 goto out_free; 4168 locations->fattr = nfs_alloc_fattr(); 4169 if (!locations->fattr) 4170 goto out_free_2; 4171 4172 status = nfs4_proc_get_locations(server, fhandle, locations, page, 4173 cred); 4174 if (status) 4175 goto out_free_3; 4176 4177 for (i = 0; i < locations->nlocations; i++) { 4178 if (!_is_same_nfs4_pathname(&locations->fs_path, 4179 &locations->locations[i].rootpath)) 4180 continue; 4181 test_fs_location_for_trunking(&locations->locations[i], clp, 4182 server); 4183 } 4184 out_free_3: 4185 kfree(locations->fattr); 4186 out_free_2: 4187 kfree(locations); 4188 out_free: 4189 __free_page(page); 4190 out_put_cred: 4191 put_cred(cred); 4192 return status; 4193 } 4194 4195 static int nfs4_discover_trunking(struct nfs_server *server, 4196 struct nfs_fh *fhandle) 4197 { 4198 struct nfs4_exception exception = { 4199 .interruptible = true, 4200 }; 4201 struct nfs_client *clp = server->nfs_client; 4202 int err = 0; 4203 4204 if (!nfs4_has_session(clp)) 4205 goto out; 4206 do { 4207 err = nfs4_handle_exception(server, 4208 _nfs4_discover_trunking(server, fhandle), 4209 &exception); 4210 } while (exception.retry); 4211 out: 4212 return err; 4213 } 4214 4215 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 4216 struct nfs_fsinfo *info) 4217 { 4218 u32 bitmask[3]; 4219 struct nfs4_lookup_root_arg args = { 4220 .bitmask = bitmask, 4221 }; 4222 struct nfs4_lookup_res res = { 4223 .server = server, 4224 .fattr = info->fattr, 4225 .fh = fhandle, 4226 }; 4227 struct rpc_message msg = { 4228 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 4229 .rpc_argp = &args, 4230 .rpc_resp = &res, 4231 }; 4232 4233 bitmask[0] = nfs4_fattr_bitmap[0]; 4234 bitmask[1] = nfs4_fattr_bitmap[1]; 4235 /* 4236 * Process the label in the upcoming getfattr 4237 */ 4238 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL; 4239 4240 nfs_fattr_init(info->fattr); 4241 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4242 } 4243 4244 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 4245 struct nfs_fsinfo *info) 4246 { 4247 struct nfs4_exception exception = { 4248 .interruptible = true, 4249 }; 4250 int err; 4251 do { 4252 err = _nfs4_lookup_root(server, fhandle, info); 4253 trace_nfs4_lookup_root(server, fhandle, info->fattr, err); 4254 switch (err) { 4255 case 0: 4256 case -NFS4ERR_WRONGSEC: 4257 goto out; 4258 default: 4259 err = nfs4_handle_exception(server, err, &exception); 4260 } 4261 } while (exception.retry); 4262 out: 4263 return err; 4264 } 4265 4266 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 4267 struct nfs_fsinfo *info, rpc_authflavor_t flavor) 4268 { 4269 struct rpc_auth_create_args auth_args = { 4270 .pseudoflavor = flavor, 4271 }; 4272 struct rpc_auth *auth; 4273 4274 auth = rpcauth_create(&auth_args, server->client); 4275 if (IS_ERR(auth)) 4276 return -EACCES; 4277 return nfs4_lookup_root(server, fhandle, info); 4278 } 4279 4280 /* 4281 * Retry pseudoroot lookup with various security flavors. We do this when: 4282 * 4283 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC 4284 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation 4285 * 4286 * Returns zero on success, or a negative NFS4ERR value, or a 4287 * negative errno value. 4288 */ 4289 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 4290 struct nfs_fsinfo *info) 4291 { 4292 /* Per 3530bis 15.33.5 */ 4293 static const rpc_authflavor_t flav_array[] = { 4294 RPC_AUTH_GSS_KRB5P, 4295 RPC_AUTH_GSS_KRB5I, 4296 RPC_AUTH_GSS_KRB5, 4297 RPC_AUTH_UNIX, /* courtesy */ 4298 RPC_AUTH_NULL, 4299 }; 4300 int status = -EPERM; 4301 size_t i; 4302 4303 if (server->auth_info.flavor_len > 0) { 4304 /* try each flavor specified by user */ 4305 for (i = 0; i < server->auth_info.flavor_len; i++) { 4306 status = nfs4_lookup_root_sec(server, fhandle, info, 4307 server->auth_info.flavors[i]); 4308 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 4309 continue; 4310 break; 4311 } 4312 } else { 4313 /* no flavors specified by user, try default list */ 4314 for (i = 0; i < ARRAY_SIZE(flav_array); i++) { 4315 status = nfs4_lookup_root_sec(server, fhandle, info, 4316 flav_array[i]); 4317 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 4318 continue; 4319 break; 4320 } 4321 } 4322 4323 /* 4324 * -EACCES could mean that the user doesn't have correct permissions 4325 * to access the mount. It could also mean that we tried to mount 4326 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 4327 * existing mount programs don't handle -EACCES very well so it should 4328 * be mapped to -EPERM instead. 4329 */ 4330 if (status == -EACCES) 4331 status = -EPERM; 4332 return status; 4333 } 4334 4335 /** 4336 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot 4337 * @server: initialized nfs_server handle 4338 * @fhandle: we fill in the pseudo-fs root file handle 4339 * @info: we fill in an FSINFO struct 4340 * @auth_probe: probe the auth flavours 4341 * 4342 * Returns zero on success, or a negative errno. 4343 */ 4344 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 4345 struct nfs_fsinfo *info, 4346 bool auth_probe) 4347 { 4348 int status = 0; 4349 4350 if (!auth_probe) 4351 status = nfs4_lookup_root(server, fhandle, info); 4352 4353 if (auth_probe || status == NFS4ERR_WRONGSEC) 4354 status = server->nfs_client->cl_mvops->find_root_sec(server, 4355 fhandle, info); 4356 4357 if (status == 0) 4358 status = nfs4_server_capabilities(server, fhandle); 4359 if (status == 0) 4360 status = nfs4_do_fsinfo(server, fhandle, info); 4361 4362 return nfs4_map_errors(status); 4363 } 4364 4365 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 4366 struct nfs_fsinfo *info) 4367 { 4368 int error; 4369 struct nfs_fattr *fattr = info->fattr; 4370 4371 error = nfs4_server_capabilities(server, mntfh); 4372 if (error < 0) { 4373 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 4374 return error; 4375 } 4376 4377 error = nfs4_proc_getattr(server, mntfh, fattr, NULL); 4378 if (error < 0) { 4379 dprintk("nfs4_get_root: getattr error = %d\n", -error); 4380 goto out; 4381 } 4382 4383 if (fattr->valid & NFS_ATTR_FATTR_FSID && 4384 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 4385 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 4386 4387 out: 4388 return error; 4389 } 4390 4391 /* 4392 * Get locations and (maybe) other attributes of a referral. 4393 * Note that we'll actually follow the referral later when 4394 * we detect fsid mismatch in inode revalidation 4395 */ 4396 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 4397 const struct qstr *name, struct nfs_fattr *fattr, 4398 struct nfs_fh *fhandle) 4399 { 4400 int status = -ENOMEM; 4401 struct page *page = NULL; 4402 struct nfs4_fs_locations *locations = NULL; 4403 4404 page = alloc_page(GFP_KERNEL); 4405 if (page == NULL) 4406 goto out; 4407 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 4408 if (locations == NULL) 4409 goto out; 4410 4411 locations->fattr = fattr; 4412 4413 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 4414 if (status != 0) 4415 goto out; 4416 4417 /* 4418 * If the fsid didn't change, this is a migration event, not a 4419 * referral. Cause us to drop into the exception handler, which 4420 * will kick off migration recovery. 4421 */ 4422 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &fattr->fsid)) { 4423 dprintk("%s: server did not return a different fsid for" 4424 " a referral at %s\n", __func__, name->name); 4425 status = -NFS4ERR_MOVED; 4426 goto out; 4427 } 4428 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 4429 nfs_fixup_referral_attributes(fattr); 4430 memset(fhandle, 0, sizeof(struct nfs_fh)); 4431 out: 4432 if (page) 4433 __free_page(page); 4434 kfree(locations); 4435 return status; 4436 } 4437 4438 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 4439 struct nfs_fattr *fattr, struct inode *inode) 4440 { 4441 __u32 bitmask[NFS4_BITMASK_SZ]; 4442 struct nfs4_getattr_arg args = { 4443 .fh = fhandle, 4444 .bitmask = bitmask, 4445 }; 4446 struct nfs4_getattr_res res = { 4447 .fattr = fattr, 4448 .server = server, 4449 }; 4450 struct rpc_message msg = { 4451 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 4452 .rpc_argp = &args, 4453 .rpc_resp = &res, 4454 }; 4455 unsigned short task_flags = 0; 4456 4457 if (nfs4_has_session(server->nfs_client)) 4458 task_flags = RPC_TASK_MOVEABLE; 4459 4460 /* Is this is an attribute revalidation, subject to softreval? */ 4461 if (inode && (server->flags & NFS_MOUNT_SOFTREVAL)) 4462 task_flags |= RPC_TASK_TIMEOUT; 4463 4464 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), inode, 0); 4465 nfs_fattr_init(fattr); 4466 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 4467 return nfs4_do_call_sync(server->client, server, &msg, 4468 &args.seq_args, &res.seq_res, task_flags); 4469 } 4470 4471 int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 4472 struct nfs_fattr *fattr, struct inode *inode) 4473 { 4474 struct nfs4_exception exception = { 4475 .interruptible = true, 4476 }; 4477 int err; 4478 do { 4479 err = _nfs4_proc_getattr(server, fhandle, fattr, inode); 4480 trace_nfs4_getattr(server, fhandle, fattr, err); 4481 err = nfs4_handle_exception(server, err, 4482 &exception); 4483 } while (exception.retry); 4484 return err; 4485 } 4486 4487 /* 4488 * The file is not closed if it is opened due to the a request to change 4489 * the size of the file. The open call will not be needed once the 4490 * VFS layer lookup-intents are implemented. 4491 * 4492 * Close is called when the inode is destroyed. 4493 * If we haven't opened the file for O_WRONLY, we 4494 * need to in the size_change case to obtain a stateid. 4495 * 4496 * Got race? 4497 * Because OPEN is always done by name in nfsv4, it is 4498 * possible that we opened a different file by the same 4499 * name. We can recognize this race condition, but we 4500 * can't do anything about it besides returning an error. 4501 * 4502 * This will be fixed with VFS changes (lookup-intent). 4503 */ 4504 static int 4505 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 4506 struct iattr *sattr) 4507 { 4508 struct inode *inode = d_inode(dentry); 4509 const struct cred *cred = NULL; 4510 struct nfs_open_context *ctx = NULL; 4511 int status; 4512 4513 if (pnfs_ld_layoutret_on_setattr(inode) && 4514 sattr->ia_valid & ATTR_SIZE && 4515 sattr->ia_size < i_size_read(inode)) 4516 pnfs_commit_and_return_layout(inode); 4517 4518 nfs_fattr_init(fattr); 4519 4520 /* Deal with open(O_TRUNC) */ 4521 if (sattr->ia_valid & ATTR_OPEN) 4522 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME); 4523 4524 /* Optimization: if the end result is no change, don't RPC */ 4525 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0) 4526 return 0; 4527 4528 /* Search for an existing open(O_WRITE) file */ 4529 if (sattr->ia_valid & ATTR_FILE) { 4530 4531 ctx = nfs_file_open_context(sattr->ia_file); 4532 if (ctx) 4533 cred = ctx->cred; 4534 } 4535 4536 /* Return any delegations if we're going to change ACLs */ 4537 if ((sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) 4538 nfs4_inode_make_writeable(inode); 4539 4540 status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL); 4541 if (status == 0) { 4542 nfs_setattr_update_inode(inode, sattr, fattr); 4543 nfs_setsecurity(inode, fattr); 4544 } 4545 return status; 4546 } 4547 4548 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 4549 struct dentry *dentry, const struct qstr *name, 4550 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4551 { 4552 struct nfs_server *server = NFS_SERVER(dir); 4553 int status; 4554 struct nfs4_lookup_arg args = { 4555 .bitmask = server->attr_bitmask, 4556 .dir_fh = NFS_FH(dir), 4557 .name = name, 4558 }; 4559 struct nfs4_lookup_res res = { 4560 .server = server, 4561 .fattr = fattr, 4562 .fh = fhandle, 4563 }; 4564 struct rpc_message msg = { 4565 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 4566 .rpc_argp = &args, 4567 .rpc_resp = &res, 4568 }; 4569 unsigned short task_flags = 0; 4570 4571 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE)) 4572 task_flags = RPC_TASK_MOVEABLE; 4573 4574 /* Is this is an attribute revalidation, subject to softreval? */ 4575 if (nfs_lookup_is_soft_revalidate(dentry)) 4576 task_flags |= RPC_TASK_TIMEOUT; 4577 4578 args.bitmask = nfs4_bitmask(server, fattr->label); 4579 4580 nfs_fattr_init(fattr); 4581 4582 dprintk("NFS call lookup %pd2\n", dentry); 4583 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 4584 status = nfs4_do_call_sync(clnt, server, &msg, 4585 &args.seq_args, &res.seq_res, task_flags); 4586 dprintk("NFS reply lookup: %d\n", status); 4587 return status; 4588 } 4589 4590 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 4591 { 4592 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 4593 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 4594 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 4595 fattr->nlink = 2; 4596 } 4597 4598 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 4599 struct dentry *dentry, const struct qstr *name, 4600 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4601 { 4602 struct nfs4_exception exception = { 4603 .interruptible = true, 4604 }; 4605 struct rpc_clnt *client = *clnt; 4606 int err; 4607 do { 4608 err = _nfs4_proc_lookup(client, dir, dentry, name, fhandle, fattr); 4609 trace_nfs4_lookup(dir, name, err); 4610 switch (err) { 4611 case -NFS4ERR_BADNAME: 4612 err = -ENOENT; 4613 goto out; 4614 case -NFS4ERR_MOVED: 4615 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 4616 if (err == -NFS4ERR_MOVED) 4617 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 4618 goto out; 4619 case -NFS4ERR_WRONGSEC: 4620 err = -EPERM; 4621 if (client != *clnt) 4622 goto out; 4623 client = nfs4_negotiate_security(client, dir, name); 4624 if (IS_ERR(client)) 4625 return PTR_ERR(client); 4626 4627 exception.retry = 1; 4628 break; 4629 default: 4630 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 4631 } 4632 } while (exception.retry); 4633 4634 out: 4635 if (err == 0) 4636 *clnt = client; 4637 else if (client != *clnt) 4638 rpc_shutdown_client(client); 4639 4640 return err; 4641 } 4642 4643 static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry, const struct qstr *name, 4644 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4645 { 4646 int status; 4647 struct rpc_clnt *client = NFS_CLIENT(dir); 4648 4649 status = nfs4_proc_lookup_common(&client, dir, dentry, name, fhandle, fattr); 4650 if (client != NFS_CLIENT(dir)) { 4651 rpc_shutdown_client(client); 4652 nfs_fixup_secinfo_attributes(fattr); 4653 } 4654 return status; 4655 } 4656 4657 struct rpc_clnt * 4658 nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry, 4659 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4660 { 4661 struct rpc_clnt *client = NFS_CLIENT(dir); 4662 int status; 4663 4664 status = nfs4_proc_lookup_common(&client, dir, dentry, &dentry->d_name, 4665 fhandle, fattr); 4666 if (status < 0) 4667 return ERR_PTR(status); 4668 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client; 4669 } 4670 4671 static int _nfs4_proc_lookupp(struct inode *inode, 4672 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4673 { 4674 struct rpc_clnt *clnt = NFS_CLIENT(inode); 4675 struct nfs_server *server = NFS_SERVER(inode); 4676 int status; 4677 struct nfs4_lookupp_arg args = { 4678 .bitmask = server->attr_bitmask, 4679 .fh = NFS_FH(inode), 4680 }; 4681 struct nfs4_lookupp_res res = { 4682 .server = server, 4683 .fattr = fattr, 4684 .fh = fhandle, 4685 }; 4686 struct rpc_message msg = { 4687 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP], 4688 .rpc_argp = &args, 4689 .rpc_resp = &res, 4690 }; 4691 unsigned short task_flags = 0; 4692 4693 if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL) 4694 task_flags |= RPC_TASK_TIMEOUT; 4695 4696 args.bitmask = nfs4_bitmask(server, fattr->label); 4697 4698 nfs_fattr_init(fattr); 4699 4700 dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino); 4701 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, 4702 &res.seq_res, task_flags); 4703 dprintk("NFS reply lookupp: %d\n", status); 4704 return status; 4705 } 4706 4707 static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle, 4708 struct nfs_fattr *fattr) 4709 { 4710 struct nfs4_exception exception = { 4711 .interruptible = true, 4712 }; 4713 int err; 4714 do { 4715 err = _nfs4_proc_lookupp(inode, fhandle, fattr); 4716 trace_nfs4_lookupp(inode, err); 4717 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4718 &exception); 4719 } while (exception.retry); 4720 return err; 4721 } 4722 4723 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry, 4724 const struct cred *cred) 4725 { 4726 struct nfs_server *server = NFS_SERVER(inode); 4727 struct nfs4_accessargs args = { 4728 .fh = NFS_FH(inode), 4729 .access = entry->mask, 4730 }; 4731 struct nfs4_accessres res = { 4732 .server = server, 4733 }; 4734 struct rpc_message msg = { 4735 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 4736 .rpc_argp = &args, 4737 .rpc_resp = &res, 4738 .rpc_cred = cred, 4739 }; 4740 int status = 0; 4741 4742 if (!nfs4_have_delegation(inode, FMODE_READ, 0)) { 4743 res.fattr = nfs_alloc_fattr(); 4744 if (res.fattr == NULL) 4745 return -ENOMEM; 4746 args.bitmask = server->cache_consistency_bitmask; 4747 } 4748 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4749 if (!status) { 4750 nfs_access_set_mask(entry, res.access); 4751 if (res.fattr) 4752 nfs_refresh_inode(inode, res.fattr); 4753 } 4754 nfs_free_fattr(res.fattr); 4755 return status; 4756 } 4757 4758 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry, 4759 const struct cred *cred) 4760 { 4761 struct nfs4_exception exception = { 4762 .interruptible = true, 4763 }; 4764 int err; 4765 do { 4766 err = _nfs4_proc_access(inode, entry, cred); 4767 trace_nfs4_access(inode, err); 4768 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4769 &exception); 4770 } while (exception.retry); 4771 return err; 4772 } 4773 4774 /* 4775 * TODO: For the time being, we don't try to get any attributes 4776 * along with any of the zero-copy operations READ, READDIR, 4777 * READLINK, WRITE. 4778 * 4779 * In the case of the first three, we want to put the GETATTR 4780 * after the read-type operation -- this is because it is hard 4781 * to predict the length of a GETATTR response in v4, and thus 4782 * align the READ data correctly. This means that the GETATTR 4783 * may end up partially falling into the page cache, and we should 4784 * shift it into the 'tail' of the xdr_buf before processing. 4785 * To do this efficiently, we need to know the total length 4786 * of data received, which doesn't seem to be available outside 4787 * of the RPC layer. 4788 * 4789 * In the case of WRITE, we also want to put the GETATTR after 4790 * the operation -- in this case because we want to make sure 4791 * we get the post-operation mtime and size. 4792 * 4793 * Both of these changes to the XDR layer would in fact be quite 4794 * minor, but I decided to leave them for a subsequent patch. 4795 */ 4796 static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 4797 unsigned int pgbase, unsigned int pglen) 4798 { 4799 struct nfs4_readlink args = { 4800 .fh = NFS_FH(inode), 4801 .pgbase = pgbase, 4802 .pglen = pglen, 4803 .pages = &page, 4804 }; 4805 struct nfs4_readlink_res res; 4806 struct rpc_message msg = { 4807 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 4808 .rpc_argp = &args, 4809 .rpc_resp = &res, 4810 }; 4811 4812 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 4813 } 4814 4815 static int nfs4_proc_readlink(struct inode *inode, struct page *page, 4816 unsigned int pgbase, unsigned int pglen) 4817 { 4818 struct nfs4_exception exception = { 4819 .interruptible = true, 4820 }; 4821 int err; 4822 do { 4823 err = _nfs4_proc_readlink(inode, page, pgbase, pglen); 4824 trace_nfs4_readlink(inode, err); 4825 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4826 &exception); 4827 } while (exception.retry); 4828 return err; 4829 } 4830 4831 /* 4832 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 4833 */ 4834 static int 4835 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 4836 int flags) 4837 { 4838 struct nfs_server *server = NFS_SERVER(dir); 4839 struct nfs4_label l, *ilabel; 4840 struct nfs_open_context *ctx; 4841 struct nfs4_state *state; 4842 int status = 0; 4843 4844 ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL); 4845 if (IS_ERR(ctx)) 4846 return PTR_ERR(ctx); 4847 4848 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); 4849 4850 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 4851 sattr->ia_mode &= ~current_umask(); 4852 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL); 4853 if (IS_ERR(state)) { 4854 status = PTR_ERR(state); 4855 goto out; 4856 } 4857 out: 4858 nfs4_label_release_security(ilabel); 4859 put_nfs_open_context(ctx); 4860 return status; 4861 } 4862 4863 static int 4864 _nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype) 4865 { 4866 struct nfs_server *server = NFS_SERVER(dir); 4867 struct nfs_removeargs args = { 4868 .fh = NFS_FH(dir), 4869 .name = *name, 4870 }; 4871 struct nfs_removeres res = { 4872 .server = server, 4873 }; 4874 struct rpc_message msg = { 4875 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 4876 .rpc_argp = &args, 4877 .rpc_resp = &res, 4878 }; 4879 unsigned long timestamp = jiffies; 4880 int status; 4881 4882 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 4883 if (status == 0) { 4884 spin_lock(&dir->i_lock); 4885 /* Removing a directory decrements nlink in the parent */ 4886 if (ftype == NF4DIR && dir->i_nlink > 2) 4887 nfs4_dec_nlink_locked(dir); 4888 nfs4_update_changeattr_locked(dir, &res.cinfo, timestamp, 4889 NFS_INO_INVALID_DATA); 4890 spin_unlock(&dir->i_lock); 4891 } 4892 return status; 4893 } 4894 4895 static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry) 4896 { 4897 struct nfs4_exception exception = { 4898 .interruptible = true, 4899 }; 4900 struct inode *inode = d_inode(dentry); 4901 int err; 4902 4903 if (inode) { 4904 if (inode->i_nlink == 1) 4905 nfs4_inode_return_delegation(inode); 4906 else 4907 nfs4_inode_make_writeable(inode); 4908 } 4909 do { 4910 err = _nfs4_proc_remove(dir, &dentry->d_name, NF4REG); 4911 trace_nfs4_remove(dir, &dentry->d_name, err); 4912 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4913 &exception); 4914 } while (exception.retry); 4915 return err; 4916 } 4917 4918 static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name) 4919 { 4920 struct nfs4_exception exception = { 4921 .interruptible = true, 4922 }; 4923 int err; 4924 4925 do { 4926 err = _nfs4_proc_remove(dir, name, NF4DIR); 4927 trace_nfs4_remove(dir, name, err); 4928 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4929 &exception); 4930 } while (exception.retry); 4931 return err; 4932 } 4933 4934 static void nfs4_proc_unlink_setup(struct rpc_message *msg, 4935 struct dentry *dentry, 4936 struct inode *inode) 4937 { 4938 struct nfs_removeargs *args = msg->rpc_argp; 4939 struct nfs_removeres *res = msg->rpc_resp; 4940 4941 res->server = NFS_SB(dentry->d_sb); 4942 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 4943 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1, 0); 4944 4945 nfs_fattr_init(res->dir_attr); 4946 4947 if (inode) { 4948 nfs4_inode_return_delegation(inode); 4949 nfs_d_prune_case_insensitive_aliases(inode); 4950 } 4951 } 4952 4953 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 4954 { 4955 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client, 4956 &data->args.seq_args, 4957 &data->res.seq_res, 4958 task); 4959 } 4960 4961 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 4962 { 4963 struct nfs_unlinkdata *data = task->tk_calldata; 4964 struct nfs_removeres *res = &data->res; 4965 4966 if (!nfs4_sequence_done(task, &res->seq_res)) 4967 return 0; 4968 if (nfs4_async_handle_error(task, res->server, NULL, 4969 &data->timeout) == -EAGAIN) 4970 return 0; 4971 if (task->tk_status == 0) 4972 nfs4_update_changeattr(dir, &res->cinfo, 4973 res->dir_attr->time_start, 4974 NFS_INO_INVALID_DATA); 4975 return 1; 4976 } 4977 4978 static void nfs4_proc_rename_setup(struct rpc_message *msg, 4979 struct dentry *old_dentry, 4980 struct dentry *new_dentry) 4981 { 4982 struct nfs_renameargs *arg = msg->rpc_argp; 4983 struct nfs_renameres *res = msg->rpc_resp; 4984 struct inode *old_inode = d_inode(old_dentry); 4985 struct inode *new_inode = d_inode(new_dentry); 4986 4987 if (old_inode) 4988 nfs4_inode_make_writeable(old_inode); 4989 if (new_inode) 4990 nfs4_inode_return_delegation(new_inode); 4991 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 4992 res->server = NFS_SB(old_dentry->d_sb); 4993 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1, 0); 4994 } 4995 4996 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 4997 { 4998 nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client, 4999 &data->args.seq_args, 5000 &data->res.seq_res, 5001 task); 5002 } 5003 5004 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 5005 struct inode *new_dir) 5006 { 5007 struct nfs_renamedata *data = task->tk_calldata; 5008 struct nfs_renameres *res = &data->res; 5009 5010 if (!nfs4_sequence_done(task, &res->seq_res)) 5011 return 0; 5012 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) 5013 return 0; 5014 5015 if (task->tk_status == 0) { 5016 nfs_d_prune_case_insensitive_aliases(d_inode(data->old_dentry)); 5017 if (new_dir != old_dir) { 5018 /* Note: If we moved a directory, nlink will change */ 5019 nfs4_update_changeattr(old_dir, &res->old_cinfo, 5020 res->old_fattr->time_start, 5021 NFS_INO_INVALID_NLINK | 5022 NFS_INO_INVALID_DATA); 5023 nfs4_update_changeattr(new_dir, &res->new_cinfo, 5024 res->new_fattr->time_start, 5025 NFS_INO_INVALID_NLINK | 5026 NFS_INO_INVALID_DATA); 5027 } else 5028 nfs4_update_changeattr(old_dir, &res->old_cinfo, 5029 res->old_fattr->time_start, 5030 NFS_INO_INVALID_DATA); 5031 } 5032 return 1; 5033 } 5034 5035 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name) 5036 { 5037 struct nfs_server *server = NFS_SERVER(inode); 5038 __u32 bitmask[NFS4_BITMASK_SZ]; 5039 struct nfs4_link_arg arg = { 5040 .fh = NFS_FH(inode), 5041 .dir_fh = NFS_FH(dir), 5042 .name = name, 5043 .bitmask = bitmask, 5044 }; 5045 struct nfs4_link_res res = { 5046 .server = server, 5047 }; 5048 struct rpc_message msg = { 5049 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 5050 .rpc_argp = &arg, 5051 .rpc_resp = &res, 5052 }; 5053 int status = -ENOMEM; 5054 5055 res.fattr = nfs_alloc_fattr_with_label(server); 5056 if (res.fattr == NULL) 5057 goto out; 5058 5059 nfs4_inode_make_writeable(inode); 5060 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, res.fattr->label), 5061 inode, 5062 NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME); 5063 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 5064 if (!status) { 5065 nfs4_update_changeattr(dir, &res.cinfo, res.fattr->time_start, 5066 NFS_INO_INVALID_DATA); 5067 nfs4_inc_nlink(inode); 5068 status = nfs_post_op_update_inode(inode, res.fattr); 5069 if (!status) 5070 nfs_setsecurity(inode, res.fattr); 5071 } 5072 5073 out: 5074 nfs_free_fattr(res.fattr); 5075 return status; 5076 } 5077 5078 static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name) 5079 { 5080 struct nfs4_exception exception = { 5081 .interruptible = true, 5082 }; 5083 int err; 5084 do { 5085 err = nfs4_handle_exception(NFS_SERVER(inode), 5086 _nfs4_proc_link(inode, dir, name), 5087 &exception); 5088 } while (exception.retry); 5089 return err; 5090 } 5091 5092 struct nfs4_createdata { 5093 struct rpc_message msg; 5094 struct nfs4_create_arg arg; 5095 struct nfs4_create_res res; 5096 struct nfs_fh fh; 5097 struct nfs_fattr fattr; 5098 }; 5099 5100 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 5101 const struct qstr *name, struct iattr *sattr, u32 ftype) 5102 { 5103 struct nfs4_createdata *data; 5104 5105 data = kzalloc(sizeof(*data), GFP_KERNEL); 5106 if (data != NULL) { 5107 struct nfs_server *server = NFS_SERVER(dir); 5108 5109 data->fattr.label = nfs4_label_alloc(server, GFP_KERNEL); 5110 if (IS_ERR(data->fattr.label)) 5111 goto out_free; 5112 5113 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 5114 data->msg.rpc_argp = &data->arg; 5115 data->msg.rpc_resp = &data->res; 5116 data->arg.dir_fh = NFS_FH(dir); 5117 data->arg.server = server; 5118 data->arg.name = name; 5119 data->arg.attrs = sattr; 5120 data->arg.ftype = ftype; 5121 data->arg.bitmask = nfs4_bitmask(server, data->fattr.label); 5122 data->arg.umask = current_umask(); 5123 data->res.server = server; 5124 data->res.fh = &data->fh; 5125 data->res.fattr = &data->fattr; 5126 nfs_fattr_init(data->res.fattr); 5127 } 5128 return data; 5129 out_free: 5130 kfree(data); 5131 return NULL; 5132 } 5133 5134 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 5135 { 5136 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 5137 &data->arg.seq_args, &data->res.seq_res, 1); 5138 if (status == 0) { 5139 spin_lock(&dir->i_lock); 5140 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo, 5141 data->res.fattr->time_start, 5142 NFS_INO_INVALID_DATA); 5143 spin_unlock(&dir->i_lock); 5144 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); 5145 } 5146 return status; 5147 } 5148 5149 static struct dentry *nfs4_do_mkdir(struct inode *dir, struct dentry *dentry, 5150 struct nfs4_createdata *data) 5151 { 5152 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 5153 &data->arg.seq_args, &data->res.seq_res, 1); 5154 5155 if (status) 5156 return ERR_PTR(status); 5157 5158 spin_lock(&dir->i_lock); 5159 /* Creating a directory bumps nlink in the parent */ 5160 nfs4_inc_nlink_locked(dir); 5161 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo, 5162 data->res.fattr->time_start, 5163 NFS_INO_INVALID_DATA); 5164 spin_unlock(&dir->i_lock); 5165 return nfs_add_or_obtain(dentry, data->res.fh, data->res.fattr); 5166 } 5167 5168 static void nfs4_free_createdata(struct nfs4_createdata *data) 5169 { 5170 nfs4_label_free(data->fattr.label); 5171 kfree(data); 5172 } 5173 5174 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 5175 struct folio *folio, unsigned int len, struct iattr *sattr, 5176 struct nfs4_label *label) 5177 { 5178 struct page *page = &folio->page; 5179 struct nfs4_createdata *data; 5180 int status = -ENAMETOOLONG; 5181 5182 if (len > NFS4_MAXPATHLEN) 5183 goto out; 5184 5185 status = -ENOMEM; 5186 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 5187 if (data == NULL) 5188 goto out; 5189 5190 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 5191 data->arg.u.symlink.pages = &page; 5192 data->arg.u.symlink.len = len; 5193 data->arg.label = label; 5194 5195 status = nfs4_do_create(dir, dentry, data); 5196 5197 nfs4_free_createdata(data); 5198 out: 5199 return status; 5200 } 5201 5202 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 5203 struct folio *folio, unsigned int len, struct iattr *sattr) 5204 { 5205 struct nfs4_exception exception = { 5206 .interruptible = true, 5207 }; 5208 struct nfs4_label l, *label; 5209 int err; 5210 5211 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5212 5213 do { 5214 err = _nfs4_proc_symlink(dir, dentry, folio, len, sattr, label); 5215 trace_nfs4_symlink(dir, &dentry->d_name, err); 5216 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5217 &exception); 5218 } while (exception.retry); 5219 5220 nfs4_label_release_security(label); 5221 return err; 5222 } 5223 5224 static struct dentry *_nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 5225 struct iattr *sattr, 5226 struct nfs4_label *label) 5227 { 5228 struct nfs4_createdata *data; 5229 struct dentry *ret = ERR_PTR(-ENOMEM); 5230 5231 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 5232 if (data == NULL) 5233 goto out; 5234 5235 data->arg.label = label; 5236 ret = nfs4_do_mkdir(dir, dentry, data); 5237 5238 nfs4_free_createdata(data); 5239 out: 5240 return ret; 5241 } 5242 5243 static struct dentry *nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 5244 struct iattr *sattr) 5245 { 5246 struct nfs_server *server = NFS_SERVER(dir); 5247 struct nfs4_exception exception = { 5248 .interruptible = true, 5249 }; 5250 struct nfs4_label l, *label; 5251 struct dentry *alias; 5252 int err; 5253 5254 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5255 5256 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 5257 sattr->ia_mode &= ~current_umask(); 5258 do { 5259 alias = _nfs4_proc_mkdir(dir, dentry, sattr, label); 5260 err = PTR_ERR_OR_ZERO(alias); 5261 trace_nfs4_mkdir(dir, &dentry->d_name, err); 5262 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5263 &exception); 5264 } while (exception.retry); 5265 nfs4_label_release_security(label); 5266 5267 return alias; 5268 } 5269 5270 static int _nfs4_proc_readdir(struct nfs_readdir_arg *nr_arg, 5271 struct nfs_readdir_res *nr_res) 5272 { 5273 struct inode *dir = d_inode(nr_arg->dentry); 5274 struct nfs_server *server = NFS_SERVER(dir); 5275 struct nfs4_readdir_arg args = { 5276 .fh = NFS_FH(dir), 5277 .pages = nr_arg->pages, 5278 .pgbase = 0, 5279 .count = nr_arg->page_len, 5280 .plus = nr_arg->plus, 5281 }; 5282 struct nfs4_readdir_res res; 5283 struct rpc_message msg = { 5284 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 5285 .rpc_argp = &args, 5286 .rpc_resp = &res, 5287 .rpc_cred = nr_arg->cred, 5288 }; 5289 int status; 5290 5291 dprintk("%s: dentry = %pd2, cookie = %llu\n", __func__, 5292 nr_arg->dentry, (unsigned long long)nr_arg->cookie); 5293 if (!(server->caps & NFS_CAP_SECURITY_LABEL)) 5294 args.bitmask = server->attr_bitmask_nl; 5295 else 5296 args.bitmask = server->attr_bitmask; 5297 5298 nfs4_setup_readdir(nr_arg->cookie, nr_arg->verf, nr_arg->dentry, &args); 5299 res.pgbase = args.pgbase; 5300 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 5301 &res.seq_res, 0); 5302 if (status >= 0) { 5303 memcpy(nr_res->verf, res.verifier.data, NFS4_VERIFIER_SIZE); 5304 status += args.pgbase; 5305 } 5306 5307 nfs_invalidate_atime(dir); 5308 5309 dprintk("%s: returns %d\n", __func__, status); 5310 return status; 5311 } 5312 5313 static int nfs4_proc_readdir(struct nfs_readdir_arg *arg, 5314 struct nfs_readdir_res *res) 5315 { 5316 struct nfs4_exception exception = { 5317 .interruptible = true, 5318 }; 5319 int err; 5320 do { 5321 err = _nfs4_proc_readdir(arg, res); 5322 trace_nfs4_readdir(d_inode(arg->dentry), err); 5323 err = nfs4_handle_exception(NFS_SERVER(d_inode(arg->dentry)), 5324 err, &exception); 5325 } while (exception.retry); 5326 return err; 5327 } 5328 5329 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 5330 struct iattr *sattr, struct nfs4_label *label, dev_t rdev) 5331 { 5332 struct nfs4_createdata *data; 5333 int mode = sattr->ia_mode; 5334 int status = -ENOMEM; 5335 5336 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 5337 if (data == NULL) 5338 goto out; 5339 5340 if (S_ISFIFO(mode)) 5341 data->arg.ftype = NF4FIFO; 5342 else if (S_ISBLK(mode)) { 5343 data->arg.ftype = NF4BLK; 5344 data->arg.u.device.specdata1 = MAJOR(rdev); 5345 data->arg.u.device.specdata2 = MINOR(rdev); 5346 } 5347 else if (S_ISCHR(mode)) { 5348 data->arg.ftype = NF4CHR; 5349 data->arg.u.device.specdata1 = MAJOR(rdev); 5350 data->arg.u.device.specdata2 = MINOR(rdev); 5351 } else if (!S_ISSOCK(mode)) { 5352 status = -EINVAL; 5353 goto out_free; 5354 } 5355 5356 data->arg.label = label; 5357 status = nfs4_do_create(dir, dentry, data); 5358 out_free: 5359 nfs4_free_createdata(data); 5360 out: 5361 return status; 5362 } 5363 5364 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 5365 struct iattr *sattr, dev_t rdev) 5366 { 5367 struct nfs_server *server = NFS_SERVER(dir); 5368 struct nfs4_exception exception = { 5369 .interruptible = true, 5370 }; 5371 struct nfs4_label l, *label; 5372 int err; 5373 5374 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5375 5376 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 5377 sattr->ia_mode &= ~current_umask(); 5378 do { 5379 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev); 5380 trace_nfs4_mknod(dir, &dentry->d_name, err); 5381 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5382 &exception); 5383 } while (exception.retry); 5384 5385 nfs4_label_release_security(label); 5386 5387 return err; 5388 } 5389 5390 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 5391 struct nfs_fsstat *fsstat) 5392 { 5393 struct nfs4_statfs_arg args = { 5394 .fh = fhandle, 5395 .bitmask = server->attr_bitmask, 5396 }; 5397 struct nfs4_statfs_res res = { 5398 .fsstat = fsstat, 5399 }; 5400 struct rpc_message msg = { 5401 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 5402 .rpc_argp = &args, 5403 .rpc_resp = &res, 5404 }; 5405 5406 nfs_fattr_init(fsstat->fattr); 5407 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5408 } 5409 5410 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 5411 { 5412 struct nfs4_exception exception = { 5413 .interruptible = true, 5414 }; 5415 int err; 5416 do { 5417 err = nfs4_handle_exception(server, 5418 _nfs4_proc_statfs(server, fhandle, fsstat), 5419 &exception); 5420 } while (exception.retry); 5421 return err; 5422 } 5423 5424 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 5425 struct nfs_fsinfo *fsinfo) 5426 { 5427 struct nfs4_fsinfo_arg args = { 5428 .fh = fhandle, 5429 .bitmask = server->attr_bitmask, 5430 }; 5431 struct nfs4_fsinfo_res res = { 5432 .fsinfo = fsinfo, 5433 }; 5434 struct rpc_message msg = { 5435 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 5436 .rpc_argp = &args, 5437 .rpc_resp = &res, 5438 }; 5439 5440 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5441 } 5442 5443 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 5444 { 5445 struct nfs4_exception exception = { 5446 .interruptible = true, 5447 }; 5448 int err; 5449 5450 do { 5451 err = _nfs4_do_fsinfo(server, fhandle, fsinfo); 5452 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); 5453 if (err == 0) { 5454 nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ); 5455 break; 5456 } 5457 err = nfs4_handle_exception(server, err, &exception); 5458 } while (exception.retry); 5459 return err; 5460 } 5461 5462 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 5463 { 5464 int error; 5465 5466 nfs_fattr_init(fsinfo->fattr); 5467 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 5468 if (error == 0) { 5469 /* block layout checks this! */ 5470 server->pnfs_blksize = fsinfo->blksize; 5471 set_pnfs_layoutdriver(server, fhandle, fsinfo); 5472 } 5473 5474 return error; 5475 } 5476 5477 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 5478 struct nfs_pathconf *pathconf) 5479 { 5480 struct nfs4_pathconf_arg args = { 5481 .fh = fhandle, 5482 .bitmask = server->attr_bitmask, 5483 }; 5484 struct nfs4_pathconf_res res = { 5485 .pathconf = pathconf, 5486 }; 5487 struct rpc_message msg = { 5488 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 5489 .rpc_argp = &args, 5490 .rpc_resp = &res, 5491 }; 5492 5493 /* None of the pathconf attributes are mandatory to implement */ 5494 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 5495 memset(pathconf, 0, sizeof(*pathconf)); 5496 return 0; 5497 } 5498 5499 nfs_fattr_init(pathconf->fattr); 5500 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5501 } 5502 5503 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 5504 struct nfs_pathconf *pathconf) 5505 { 5506 struct nfs4_exception exception = { 5507 .interruptible = true, 5508 }; 5509 int err; 5510 5511 do { 5512 err = nfs4_handle_exception(server, 5513 _nfs4_proc_pathconf(server, fhandle, pathconf), 5514 &exception); 5515 } while (exception.retry); 5516 return err; 5517 } 5518 5519 int nfs4_set_rw_stateid(nfs4_stateid *stateid, 5520 const struct nfs_open_context *ctx, 5521 const struct nfs_lock_context *l_ctx, 5522 fmode_t fmode) 5523 { 5524 return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL); 5525 } 5526 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid); 5527 5528 static bool nfs4_stateid_is_current(nfs4_stateid *stateid, 5529 const struct nfs_open_context *ctx, 5530 const struct nfs_lock_context *l_ctx, 5531 fmode_t fmode) 5532 { 5533 nfs4_stateid _current_stateid; 5534 5535 /* If the current stateid represents a lost lock, then exit */ 5536 if (nfs4_set_rw_stateid(&_current_stateid, ctx, l_ctx, fmode) == -EIO) 5537 return true; 5538 return nfs4_stateid_match(stateid, &_current_stateid); 5539 } 5540 5541 static bool nfs4_error_stateid_expired(int err) 5542 { 5543 switch (err) { 5544 case -NFS4ERR_DELEG_REVOKED: 5545 case -NFS4ERR_ADMIN_REVOKED: 5546 case -NFS4ERR_BAD_STATEID: 5547 case -NFS4ERR_STALE_STATEID: 5548 case -NFS4ERR_OLD_STATEID: 5549 case -NFS4ERR_OPENMODE: 5550 case -NFS4ERR_EXPIRED: 5551 return true; 5552 } 5553 return false; 5554 } 5555 5556 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) 5557 { 5558 struct nfs_server *server = NFS_SERVER(hdr->inode); 5559 5560 trace_nfs4_read(hdr, task->tk_status); 5561 if (task->tk_status < 0) { 5562 struct nfs4_exception exception = { 5563 .inode = hdr->inode, 5564 .state = hdr->args.context->state, 5565 .stateid = &hdr->args.stateid, 5566 }; 5567 task->tk_status = nfs4_async_handle_exception(task, 5568 server, task->tk_status, &exception); 5569 if (exception.retry) { 5570 rpc_restart_call_prepare(task); 5571 return -EAGAIN; 5572 } 5573 } 5574 5575 if (task->tk_status > 0) 5576 renew_lease(server, hdr->timestamp); 5577 return 0; 5578 } 5579 5580 static bool nfs4_read_stateid_changed(struct rpc_task *task, 5581 struct nfs_pgio_args *args) 5582 { 5583 5584 if (!nfs4_error_stateid_expired(task->tk_status) || 5585 nfs4_stateid_is_current(&args->stateid, 5586 args->context, 5587 args->lock_context, 5588 FMODE_READ)) 5589 return false; 5590 rpc_restart_call_prepare(task); 5591 return true; 5592 } 5593 5594 static bool nfs4_read_plus_not_supported(struct rpc_task *task, 5595 struct nfs_pgio_header *hdr) 5596 { 5597 struct nfs_server *server = NFS_SERVER(hdr->inode); 5598 struct rpc_message *msg = &task->tk_msg; 5599 5600 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] && 5601 task->tk_status == -ENOTSUPP) { 5602 server->caps &= ~NFS_CAP_READ_PLUS; 5603 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 5604 rpc_restart_call_prepare(task); 5605 return true; 5606 } 5607 return false; 5608 } 5609 5610 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 5611 { 5612 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 5613 return -EAGAIN; 5614 if (nfs4_read_stateid_changed(task, &hdr->args)) 5615 return -EAGAIN; 5616 if (nfs4_read_plus_not_supported(task, hdr)) 5617 return -EAGAIN; 5618 if (task->tk_status > 0) 5619 nfs_invalidate_atime(hdr->inode); 5620 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 5621 nfs4_read_done_cb(task, hdr); 5622 } 5623 5624 #if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS 5625 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr, 5626 struct rpc_message *msg) 5627 { 5628 /* Note: We don't use READ_PLUS with pNFS yet */ 5629 if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp) { 5630 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS]; 5631 return nfs_read_alloc_scratch(hdr, READ_PLUS_SCRATCH_SIZE); 5632 } 5633 return false; 5634 } 5635 #else 5636 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr, 5637 struct rpc_message *msg) 5638 { 5639 return false; 5640 } 5641 #endif /* CONFIG_NFS_V4_2 */ 5642 5643 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr, 5644 struct rpc_message *msg) 5645 { 5646 hdr->timestamp = jiffies; 5647 if (!hdr->pgio_done_cb) 5648 hdr->pgio_done_cb = nfs4_read_done_cb; 5649 if (!nfs42_read_plus_support(hdr, msg)) 5650 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 5651 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0); 5652 } 5653 5654 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, 5655 struct nfs_pgio_header *hdr) 5656 { 5657 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client, 5658 &hdr->args.seq_args, 5659 &hdr->res.seq_res, 5660 task)) 5661 return 0; 5662 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, 5663 hdr->args.lock_context, 5664 hdr->rw_mode) == -EIO) 5665 return -EIO; 5666 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) 5667 return -EIO; 5668 return 0; 5669 } 5670 5671 static int nfs4_write_done_cb(struct rpc_task *task, 5672 struct nfs_pgio_header *hdr) 5673 { 5674 struct inode *inode = hdr->inode; 5675 5676 trace_nfs4_write(hdr, task->tk_status); 5677 if (task->tk_status < 0) { 5678 struct nfs4_exception exception = { 5679 .inode = hdr->inode, 5680 .state = hdr->args.context->state, 5681 .stateid = &hdr->args.stateid, 5682 }; 5683 task->tk_status = nfs4_async_handle_exception(task, 5684 NFS_SERVER(inode), task->tk_status, 5685 &exception); 5686 if (exception.retry) { 5687 rpc_restart_call_prepare(task); 5688 return -EAGAIN; 5689 } 5690 } 5691 if (task->tk_status >= 0) { 5692 renew_lease(NFS_SERVER(inode), hdr->timestamp); 5693 nfs_writeback_update_inode(hdr); 5694 } 5695 return 0; 5696 } 5697 5698 static bool nfs4_write_stateid_changed(struct rpc_task *task, 5699 struct nfs_pgio_args *args) 5700 { 5701 5702 if (!nfs4_error_stateid_expired(task->tk_status) || 5703 nfs4_stateid_is_current(&args->stateid, 5704 args->context, 5705 args->lock_context, 5706 FMODE_WRITE)) 5707 return false; 5708 rpc_restart_call_prepare(task); 5709 return true; 5710 } 5711 5712 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 5713 { 5714 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 5715 return -EAGAIN; 5716 if (nfs4_write_stateid_changed(task, &hdr->args)) 5717 return -EAGAIN; 5718 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 5719 nfs4_write_done_cb(task, hdr); 5720 } 5721 5722 static 5723 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr) 5724 { 5725 /* Don't request attributes for pNFS or O_DIRECT writes */ 5726 if (hdr->ds_clp != NULL || hdr->dreq != NULL) 5727 return false; 5728 /* Otherwise, request attributes if and only if we don't hold 5729 * a delegation 5730 */ 5731 return nfs4_have_delegation(hdr->inode, FMODE_READ, 0) == 0; 5732 } 5733 5734 void nfs4_bitmask_set(__u32 bitmask[], const __u32 src[], 5735 struct inode *inode, unsigned long cache_validity) 5736 { 5737 struct nfs_server *server = NFS_SERVER(inode); 5738 unsigned int i; 5739 5740 memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ); 5741 cache_validity |= READ_ONCE(NFS_I(inode)->cache_validity); 5742 5743 if (cache_validity & NFS_INO_INVALID_CHANGE) 5744 bitmask[0] |= FATTR4_WORD0_CHANGE; 5745 if (cache_validity & NFS_INO_INVALID_ATIME) 5746 bitmask[1] |= FATTR4_WORD1_TIME_ACCESS; 5747 if (cache_validity & NFS_INO_INVALID_MODE) 5748 bitmask[1] |= FATTR4_WORD1_MODE; 5749 if (cache_validity & NFS_INO_INVALID_OTHER) 5750 bitmask[1] |= FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP; 5751 if (cache_validity & NFS_INO_INVALID_NLINK) 5752 bitmask[1] |= FATTR4_WORD1_NUMLINKS; 5753 if (cache_validity & NFS_INO_INVALID_CTIME) 5754 bitmask[1] |= FATTR4_WORD1_TIME_METADATA; 5755 if (cache_validity & NFS_INO_INVALID_MTIME) 5756 bitmask[1] |= FATTR4_WORD1_TIME_MODIFY; 5757 if (cache_validity & NFS_INO_INVALID_BLOCKS) 5758 bitmask[1] |= FATTR4_WORD1_SPACE_USED; 5759 5760 if (cache_validity & NFS_INO_INVALID_SIZE) 5761 bitmask[0] |= FATTR4_WORD0_SIZE; 5762 5763 for (i = 0; i < NFS4_BITMASK_SZ; i++) 5764 bitmask[i] &= server->attr_bitmask[i]; 5765 } 5766 5767 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, 5768 struct rpc_message *msg, 5769 struct rpc_clnt **clnt) 5770 { 5771 struct nfs_server *server = NFS_SERVER(hdr->inode); 5772 5773 if (!nfs4_write_need_cache_consistency_data(hdr)) { 5774 hdr->args.bitmask = NULL; 5775 hdr->res.fattr = NULL; 5776 } else { 5777 nfs4_bitmask_set(hdr->args.bitmask_store, 5778 server->cache_consistency_bitmask, 5779 hdr->inode, NFS_INO_INVALID_BLOCKS); 5780 hdr->args.bitmask = hdr->args.bitmask_store; 5781 } 5782 5783 if (!hdr->pgio_done_cb) 5784 hdr->pgio_done_cb = nfs4_write_done_cb; 5785 hdr->res.server = server; 5786 hdr->timestamp = jiffies; 5787 5788 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 5789 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0); 5790 nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr); 5791 } 5792 5793 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 5794 { 5795 nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client, 5796 &data->args.seq_args, 5797 &data->res.seq_res, 5798 task); 5799 } 5800 5801 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 5802 { 5803 struct inode *inode = data->inode; 5804 5805 trace_nfs4_commit(data, task->tk_status); 5806 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 5807 NULL, NULL) == -EAGAIN) { 5808 rpc_restart_call_prepare(task); 5809 return -EAGAIN; 5810 } 5811 return 0; 5812 } 5813 5814 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 5815 { 5816 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5817 return -EAGAIN; 5818 return data->commit_done_cb(task, data); 5819 } 5820 5821 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg, 5822 struct rpc_clnt **clnt) 5823 { 5824 struct nfs_server *server = NFS_SERVER(data->inode); 5825 5826 if (data->commit_done_cb == NULL) 5827 data->commit_done_cb = nfs4_commit_done_cb; 5828 data->res.server = server; 5829 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 5830 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0); 5831 nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client, 5832 NFS_SP4_MACH_CRED_COMMIT, clnt, msg); 5833 } 5834 5835 static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args, 5836 struct nfs_commitres *res) 5837 { 5838 struct inode *dst_inode = file_inode(dst); 5839 struct nfs_server *server = NFS_SERVER(dst_inode); 5840 struct rpc_message msg = { 5841 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT], 5842 .rpc_argp = args, 5843 .rpc_resp = res, 5844 }; 5845 5846 args->fh = NFS_FH(dst_inode); 5847 return nfs4_call_sync(server->client, server, &msg, 5848 &args->seq_args, &res->seq_res, 1); 5849 } 5850 5851 int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res) 5852 { 5853 struct nfs_commitargs args = { 5854 .offset = offset, 5855 .count = count, 5856 }; 5857 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); 5858 struct nfs4_exception exception = { }; 5859 int status; 5860 5861 do { 5862 status = _nfs4_proc_commit(dst, &args, res); 5863 status = nfs4_handle_exception(dst_server, status, &exception); 5864 } while (exception.retry); 5865 5866 return status; 5867 } 5868 5869 struct nfs4_renewdata { 5870 struct nfs_client *client; 5871 unsigned long timestamp; 5872 }; 5873 5874 /* 5875 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 5876 * standalone procedure for queueing an asynchronous RENEW. 5877 */ 5878 static void nfs4_renew_release(void *calldata) 5879 { 5880 struct nfs4_renewdata *data = calldata; 5881 struct nfs_client *clp = data->client; 5882 5883 if (refcount_read(&clp->cl_count) > 1) 5884 nfs4_schedule_state_renewal(clp); 5885 nfs_put_client(clp); 5886 kfree(data); 5887 } 5888 5889 static void nfs4_renew_done(struct rpc_task *task, void *calldata) 5890 { 5891 struct nfs4_renewdata *data = calldata; 5892 struct nfs_client *clp = data->client; 5893 unsigned long timestamp = data->timestamp; 5894 5895 trace_nfs4_renew_async(clp, task->tk_status); 5896 switch (task->tk_status) { 5897 case 0: 5898 break; 5899 case -NFS4ERR_LEASE_MOVED: 5900 nfs4_schedule_lease_moved_recovery(clp); 5901 break; 5902 default: 5903 /* Unless we're shutting down, schedule state recovery! */ 5904 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 5905 return; 5906 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 5907 nfs4_schedule_lease_recovery(clp); 5908 return; 5909 } 5910 nfs4_schedule_path_down_recovery(clp); 5911 } 5912 do_renew_lease(clp, timestamp); 5913 } 5914 5915 static const struct rpc_call_ops nfs4_renew_ops = { 5916 .rpc_call_done = nfs4_renew_done, 5917 .rpc_release = nfs4_renew_release, 5918 }; 5919 5920 static int nfs4_proc_async_renew(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags) 5921 { 5922 struct rpc_message msg = { 5923 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 5924 .rpc_argp = clp, 5925 .rpc_cred = cred, 5926 }; 5927 struct nfs4_renewdata *data; 5928 5929 if (renew_flags == 0) 5930 return 0; 5931 if (!refcount_inc_not_zero(&clp->cl_count)) 5932 return -EIO; 5933 data = kmalloc(sizeof(*data), GFP_NOFS); 5934 if (data == NULL) { 5935 nfs_put_client(clp); 5936 return -ENOMEM; 5937 } 5938 data->client = clp; 5939 data->timestamp = jiffies; 5940 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT, 5941 &nfs4_renew_ops, data); 5942 } 5943 5944 static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred) 5945 { 5946 struct rpc_message msg = { 5947 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 5948 .rpc_argp = clp, 5949 .rpc_cred = cred, 5950 }; 5951 unsigned long now = jiffies; 5952 int status; 5953 5954 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5955 if (status < 0) 5956 return status; 5957 do_renew_lease(clp, now); 5958 return 0; 5959 } 5960 5961 static bool nfs4_server_supports_acls(const struct nfs_server *server, 5962 enum nfs4_acl_type type) 5963 { 5964 switch (type) { 5965 default: 5966 return server->attr_bitmask[0] & FATTR4_WORD0_ACL; 5967 case NFS4ACL_DACL: 5968 return server->attr_bitmask[1] & FATTR4_WORD1_DACL; 5969 case NFS4ACL_SACL: 5970 return server->attr_bitmask[1] & FATTR4_WORD1_SACL; 5971 } 5972 } 5973 5974 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that 5975 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on 5976 * the stack. 5977 */ 5978 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 5979 5980 int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen, 5981 struct page **pages) 5982 { 5983 struct page *newpage, **spages; 5984 int rc = 0; 5985 size_t len; 5986 spages = pages; 5987 5988 do { 5989 len = min_t(size_t, PAGE_SIZE, buflen); 5990 newpage = alloc_page(GFP_KERNEL); 5991 5992 if (newpage == NULL) 5993 goto unwind; 5994 memcpy(page_address(newpage), buf, len); 5995 buf += len; 5996 buflen -= len; 5997 *pages++ = newpage; 5998 rc++; 5999 } while (buflen != 0); 6000 6001 return rc; 6002 6003 unwind: 6004 for(; rc > 0; rc--) 6005 __free_page(spages[rc-1]); 6006 return -ENOMEM; 6007 } 6008 6009 struct nfs4_cached_acl { 6010 enum nfs4_acl_type type; 6011 int cached; 6012 size_t len; 6013 char data[]; 6014 }; 6015 6016 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 6017 { 6018 struct nfs_inode *nfsi = NFS_I(inode); 6019 6020 spin_lock(&inode->i_lock); 6021 kfree(nfsi->nfs4_acl); 6022 nfsi->nfs4_acl = acl; 6023 spin_unlock(&inode->i_lock); 6024 } 6025 6026 static void nfs4_zap_acl_attr(struct inode *inode) 6027 { 6028 nfs4_set_cached_acl(inode, NULL); 6029 } 6030 6031 static ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, 6032 size_t buflen, enum nfs4_acl_type type) 6033 { 6034 struct nfs_inode *nfsi = NFS_I(inode); 6035 struct nfs4_cached_acl *acl; 6036 int ret = -ENOENT; 6037 6038 spin_lock(&inode->i_lock); 6039 acl = nfsi->nfs4_acl; 6040 if (acl == NULL) 6041 goto out; 6042 if (acl->type != type) 6043 goto out; 6044 if (buf == NULL) /* user is just asking for length */ 6045 goto out_len; 6046 if (acl->cached == 0) 6047 goto out; 6048 ret = -ERANGE; /* see getxattr(2) man page */ 6049 if (acl->len > buflen) 6050 goto out; 6051 memcpy(buf, acl->data, acl->len); 6052 out_len: 6053 ret = acl->len; 6054 out: 6055 spin_unlock(&inode->i_lock); 6056 return ret; 6057 } 6058 6059 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, 6060 size_t pgbase, size_t acl_len, 6061 enum nfs4_acl_type type) 6062 { 6063 struct nfs4_cached_acl *acl; 6064 size_t buflen = sizeof(*acl) + acl_len; 6065 6066 if (buflen <= PAGE_SIZE) { 6067 acl = kmalloc(buflen, GFP_KERNEL); 6068 if (acl == NULL) 6069 goto out; 6070 acl->cached = 1; 6071 _copy_from_pages(acl->data, pages, pgbase, acl_len); 6072 } else { 6073 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 6074 if (acl == NULL) 6075 goto out; 6076 acl->cached = 0; 6077 } 6078 acl->type = type; 6079 acl->len = acl_len; 6080 out: 6081 nfs4_set_cached_acl(inode, acl); 6082 } 6083 6084 /* 6085 * The getxattr API returns the required buffer length when called with a 6086 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 6087 * the required buf. On a NULL buf, we send a page of data to the server 6088 * guessing that the ACL request can be serviced by a page. If so, we cache 6089 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 6090 * the cache. If not so, we throw away the page, and cache the required 6091 * length. The next getxattr call will then produce another round trip to 6092 * the server, this time with the input buf of the required size. 6093 */ 6094 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, 6095 size_t buflen, enum nfs4_acl_type type) 6096 { 6097 struct page **pages; 6098 struct nfs_getaclargs args = { 6099 .fh = NFS_FH(inode), 6100 .acl_type = type, 6101 .acl_len = buflen, 6102 }; 6103 struct nfs_getaclres res = { 6104 .acl_type = type, 6105 .acl_len = buflen, 6106 }; 6107 struct rpc_message msg = { 6108 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 6109 .rpc_argp = &args, 6110 .rpc_resp = &res, 6111 }; 6112 unsigned int npages; 6113 int ret = -ENOMEM, i; 6114 struct nfs_server *server = NFS_SERVER(inode); 6115 6116 if (buflen == 0) 6117 buflen = server->rsize; 6118 6119 npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1; 6120 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 6121 if (!pages) 6122 return -ENOMEM; 6123 6124 args.acl_pages = pages; 6125 6126 for (i = 0; i < npages; i++) { 6127 pages[i] = alloc_page(GFP_KERNEL); 6128 if (!pages[i]) 6129 goto out_free; 6130 } 6131 6132 /* for decoding across pages */ 6133 res.acl_scratch = alloc_page(GFP_KERNEL); 6134 if (!res.acl_scratch) 6135 goto out_free; 6136 6137 args.acl_len = npages * PAGE_SIZE; 6138 6139 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 6140 __func__, buf, buflen, npages, args.acl_len); 6141 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 6142 &msg, &args.seq_args, &res.seq_res, 0); 6143 if (ret) 6144 goto out_free; 6145 6146 /* Handle the case where the passed-in buffer is too short */ 6147 if (res.acl_flags & NFS4_ACL_TRUNC) { 6148 /* Did the user only issue a request for the acl length? */ 6149 if (buf == NULL) 6150 goto out_ok; 6151 ret = -ERANGE; 6152 goto out_free; 6153 } 6154 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len, 6155 type); 6156 if (buf) { 6157 if (res.acl_len > buflen) { 6158 ret = -ERANGE; 6159 goto out_free; 6160 } 6161 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); 6162 } 6163 out_ok: 6164 ret = res.acl_len; 6165 out_free: 6166 while (--i >= 0) 6167 __free_page(pages[i]); 6168 if (res.acl_scratch) 6169 __free_page(res.acl_scratch); 6170 kfree(pages); 6171 return ret; 6172 } 6173 6174 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, 6175 size_t buflen, enum nfs4_acl_type type) 6176 { 6177 struct nfs4_exception exception = { 6178 .interruptible = true, 6179 }; 6180 ssize_t ret; 6181 do { 6182 ret = __nfs4_get_acl_uncached(inode, buf, buflen, type); 6183 trace_nfs4_get_acl(inode, ret); 6184 if (ret >= 0) 6185 break; 6186 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 6187 } while (exception.retry); 6188 return ret; 6189 } 6190 6191 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen, 6192 enum nfs4_acl_type type) 6193 { 6194 struct nfs_server *server = NFS_SERVER(inode); 6195 int ret; 6196 6197 if (!nfs4_server_supports_acls(server, type)) 6198 return -EOPNOTSUPP; 6199 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 6200 if (ret < 0) 6201 return ret; 6202 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 6203 nfs_zap_acl_cache(inode); 6204 ret = nfs4_read_cached_acl(inode, buf, buflen, type); 6205 if (ret != -ENOENT) 6206 /* -ENOENT is returned if there is no ACL or if there is an ACL 6207 * but no cached acl data, just the acl length */ 6208 return ret; 6209 return nfs4_get_acl_uncached(inode, buf, buflen, type); 6210 } 6211 6212 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, 6213 size_t buflen, enum nfs4_acl_type type) 6214 { 6215 struct nfs_server *server = NFS_SERVER(inode); 6216 struct page *pages[NFS4ACL_MAXPAGES]; 6217 struct nfs_setaclargs arg = { 6218 .fh = NFS_FH(inode), 6219 .acl_type = type, 6220 .acl_len = buflen, 6221 .acl_pages = pages, 6222 }; 6223 struct nfs_setaclres res; 6224 struct rpc_message msg = { 6225 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 6226 .rpc_argp = &arg, 6227 .rpc_resp = &res, 6228 }; 6229 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 6230 int ret, i; 6231 6232 /* You can't remove system.nfs4_acl: */ 6233 if (buflen == 0) 6234 return -EINVAL; 6235 if (!nfs4_server_supports_acls(server, type)) 6236 return -EOPNOTSUPP; 6237 if (npages > ARRAY_SIZE(pages)) 6238 return -ERANGE; 6239 i = nfs4_buf_to_pages_noslab(buf, buflen, arg.acl_pages); 6240 if (i < 0) 6241 return i; 6242 nfs4_inode_make_writeable(inode); 6243 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6244 6245 /* 6246 * Free each page after tx, so the only ref left is 6247 * held by the network stack 6248 */ 6249 for (; i > 0; i--) 6250 put_page(pages[i-1]); 6251 6252 /* 6253 * Acl update can result in inode attribute update. 6254 * so mark the attribute cache invalid. 6255 */ 6256 spin_lock(&inode->i_lock); 6257 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 6258 NFS_INO_INVALID_CTIME | 6259 NFS_INO_REVAL_FORCED); 6260 spin_unlock(&inode->i_lock); 6261 nfs_access_zap_cache(inode); 6262 nfs_zap_acl_cache(inode); 6263 return ret; 6264 } 6265 6266 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, 6267 size_t buflen, enum nfs4_acl_type type) 6268 { 6269 struct nfs4_exception exception = { }; 6270 int err; 6271 do { 6272 err = __nfs4_proc_set_acl(inode, buf, buflen, type); 6273 trace_nfs4_set_acl(inode, err); 6274 if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) { 6275 /* 6276 * no need to retry since the kernel 6277 * isn't involved in encoding the ACEs. 6278 */ 6279 err = -EINVAL; 6280 break; 6281 } 6282 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6283 &exception); 6284 } while (exception.retry); 6285 return err; 6286 } 6287 6288 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 6289 static int _nfs4_get_security_label(struct inode *inode, void *buf, 6290 size_t buflen) 6291 { 6292 struct nfs_server *server = NFS_SERVER(inode); 6293 struct nfs4_label label = {0, 0, 0, buflen, buf}; 6294 6295 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 6296 struct nfs_fattr fattr = { 6297 .label = &label, 6298 }; 6299 struct nfs4_getattr_arg arg = { 6300 .fh = NFS_FH(inode), 6301 .bitmask = bitmask, 6302 }; 6303 struct nfs4_getattr_res res = { 6304 .fattr = &fattr, 6305 .server = server, 6306 }; 6307 struct rpc_message msg = { 6308 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 6309 .rpc_argp = &arg, 6310 .rpc_resp = &res, 6311 }; 6312 int ret; 6313 6314 nfs_fattr_init(&fattr); 6315 6316 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0); 6317 if (ret) 6318 return ret; 6319 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) 6320 return -ENOENT; 6321 return label.len; 6322 } 6323 6324 static int nfs4_get_security_label(struct inode *inode, void *buf, 6325 size_t buflen) 6326 { 6327 struct nfs4_exception exception = { 6328 .interruptible = true, 6329 }; 6330 int err; 6331 6332 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 6333 return -EOPNOTSUPP; 6334 6335 do { 6336 err = _nfs4_get_security_label(inode, buf, buflen); 6337 trace_nfs4_get_security_label(inode, err); 6338 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6339 &exception); 6340 } while (exception.retry); 6341 return err; 6342 } 6343 6344 static int _nfs4_do_set_security_label(struct inode *inode, 6345 struct nfs4_label *ilabel, 6346 struct nfs_fattr *fattr) 6347 { 6348 6349 struct iattr sattr = {0}; 6350 struct nfs_server *server = NFS_SERVER(inode); 6351 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 6352 struct nfs_setattrargs arg = { 6353 .fh = NFS_FH(inode), 6354 .iap = &sattr, 6355 .server = server, 6356 .bitmask = bitmask, 6357 .label = ilabel, 6358 }; 6359 struct nfs_setattrres res = { 6360 .fattr = fattr, 6361 .server = server, 6362 }; 6363 struct rpc_message msg = { 6364 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 6365 .rpc_argp = &arg, 6366 .rpc_resp = &res, 6367 }; 6368 int status; 6369 6370 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 6371 6372 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6373 if (status) 6374 dprintk("%s failed: %d\n", __func__, status); 6375 6376 return status; 6377 } 6378 6379 static int nfs4_do_set_security_label(struct inode *inode, 6380 struct nfs4_label *ilabel, 6381 struct nfs_fattr *fattr) 6382 { 6383 struct nfs4_exception exception = { }; 6384 int err; 6385 6386 do { 6387 err = _nfs4_do_set_security_label(inode, ilabel, fattr); 6388 trace_nfs4_set_security_label(inode, err); 6389 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6390 &exception); 6391 } while (exception.retry); 6392 return err; 6393 } 6394 6395 static int 6396 nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen) 6397 { 6398 struct nfs4_label ilabel = {0, 0, 0, buflen, (char *)buf }; 6399 struct nfs_fattr *fattr; 6400 int status; 6401 6402 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 6403 return -EOPNOTSUPP; 6404 6405 fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode)); 6406 if (fattr == NULL) 6407 return -ENOMEM; 6408 6409 status = nfs4_do_set_security_label(inode, &ilabel, fattr); 6410 if (status == 0) 6411 nfs_setsecurity(inode, fattr); 6412 6413 nfs_free_fattr(fattr); 6414 return status; 6415 } 6416 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 6417 6418 6419 static void nfs4_init_boot_verifier(const struct nfs_client *clp, 6420 nfs4_verifier *bootverf) 6421 { 6422 __be32 verf[2]; 6423 6424 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 6425 /* An impossible timestamp guarantees this value 6426 * will never match a generated boot time. */ 6427 verf[0] = cpu_to_be32(U32_MAX); 6428 verf[1] = cpu_to_be32(U32_MAX); 6429 } else { 6430 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 6431 u64 ns = ktime_to_ns(nn->boot_time); 6432 6433 verf[0] = cpu_to_be32(ns >> 32); 6434 verf[1] = cpu_to_be32(ns); 6435 } 6436 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 6437 } 6438 6439 static size_t 6440 nfs4_get_uniquifier(struct nfs_client *clp, char *buf, size_t buflen) 6441 { 6442 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 6443 struct nfs_netns_client *nn_clp = nn->nfs_client; 6444 const char *id; 6445 6446 buf[0] = '\0'; 6447 6448 if (nn_clp) { 6449 rcu_read_lock(); 6450 id = rcu_dereference(nn_clp->identifier); 6451 if (id) 6452 strscpy(buf, id, buflen); 6453 rcu_read_unlock(); 6454 } 6455 6456 if (nfs4_client_id_uniquifier[0] != '\0' && buf[0] == '\0') 6457 strscpy(buf, nfs4_client_id_uniquifier, buflen); 6458 6459 return strlen(buf); 6460 } 6461 6462 static int 6463 nfs4_init_nonuniform_client_string(struct nfs_client *clp) 6464 { 6465 char buf[NFS4_CLIENT_ID_UNIQ_LEN]; 6466 size_t buflen; 6467 size_t len; 6468 char *str; 6469 6470 if (clp->cl_owner_id != NULL) 6471 return 0; 6472 6473 rcu_read_lock(); 6474 len = 14 + 6475 strlen(clp->cl_rpcclient->cl_nodename) + 6476 1 + 6477 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) + 6478 1; 6479 rcu_read_unlock(); 6480 6481 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf)); 6482 if (buflen) 6483 len += buflen + 1; 6484 6485 if (len > NFS4_OPAQUE_LIMIT + 1) 6486 return -EINVAL; 6487 6488 /* 6489 * Since this string is allocated at mount time, and held until the 6490 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 6491 * about a memory-reclaim deadlock. 6492 */ 6493 str = kmalloc(len, GFP_KERNEL); 6494 if (!str) 6495 return -ENOMEM; 6496 6497 rcu_read_lock(); 6498 if (buflen) 6499 scnprintf(str, len, "Linux NFSv4.0 %s/%s/%s", 6500 clp->cl_rpcclient->cl_nodename, buf, 6501 rpc_peeraddr2str(clp->cl_rpcclient, 6502 RPC_DISPLAY_ADDR)); 6503 else 6504 scnprintf(str, len, "Linux NFSv4.0 %s/%s", 6505 clp->cl_rpcclient->cl_nodename, 6506 rpc_peeraddr2str(clp->cl_rpcclient, 6507 RPC_DISPLAY_ADDR)); 6508 rcu_read_unlock(); 6509 6510 clp->cl_owner_id = str; 6511 return 0; 6512 } 6513 6514 static int 6515 nfs4_init_uniform_client_string(struct nfs_client *clp) 6516 { 6517 char buf[NFS4_CLIENT_ID_UNIQ_LEN]; 6518 size_t buflen; 6519 size_t len; 6520 char *str; 6521 6522 if (clp->cl_owner_id != NULL) 6523 return 0; 6524 6525 len = 10 + 10 + 1 + 10 + 1 + 6526 strlen(clp->cl_rpcclient->cl_nodename) + 1; 6527 6528 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf)); 6529 if (buflen) 6530 len += buflen + 1; 6531 6532 if (len > NFS4_OPAQUE_LIMIT + 1) 6533 return -EINVAL; 6534 6535 /* 6536 * Since this string is allocated at mount time, and held until the 6537 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 6538 * about a memory-reclaim deadlock. 6539 */ 6540 str = kmalloc(len, GFP_KERNEL); 6541 if (!str) 6542 return -ENOMEM; 6543 6544 if (buflen) 6545 scnprintf(str, len, "Linux NFSv%u.%u %s/%s", 6546 clp->rpc_ops->version, clp->cl_minorversion, 6547 buf, clp->cl_rpcclient->cl_nodename); 6548 else 6549 scnprintf(str, len, "Linux NFSv%u.%u %s", 6550 clp->rpc_ops->version, clp->cl_minorversion, 6551 clp->cl_rpcclient->cl_nodename); 6552 clp->cl_owner_id = str; 6553 return 0; 6554 } 6555 6556 /* 6557 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback 6558 * services. Advertise one based on the address family of the 6559 * clientaddr. 6560 */ 6561 static unsigned int 6562 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len) 6563 { 6564 if (strchr(clp->cl_ipaddr, ':') != NULL) 6565 return scnprintf(buf, len, "tcp6"); 6566 else 6567 return scnprintf(buf, len, "tcp"); 6568 } 6569 6570 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata) 6571 { 6572 struct nfs4_setclientid *sc = calldata; 6573 6574 if (task->tk_status == 0) 6575 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred); 6576 } 6577 6578 static const struct rpc_call_ops nfs4_setclientid_ops = { 6579 .rpc_call_done = nfs4_setclientid_done, 6580 }; 6581 6582 /** 6583 * nfs4_proc_setclientid - Negotiate client ID 6584 * @clp: state data structure 6585 * @program: RPC program for NFSv4 callback service 6586 * @port: IP port number for NFS4 callback service 6587 * @cred: credential to use for this call 6588 * @res: where to place the result 6589 * 6590 * Returns zero, a negative errno, or a negative NFS4ERR status code. 6591 */ 6592 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 6593 unsigned short port, const struct cred *cred, 6594 struct nfs4_setclientid_res *res) 6595 { 6596 nfs4_verifier sc_verifier; 6597 struct nfs4_setclientid setclientid = { 6598 .sc_verifier = &sc_verifier, 6599 .sc_prog = program, 6600 .sc_clnt = clp, 6601 }; 6602 struct rpc_message msg = { 6603 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 6604 .rpc_argp = &setclientid, 6605 .rpc_resp = res, 6606 .rpc_cred = cred, 6607 }; 6608 struct rpc_task_setup task_setup_data = { 6609 .rpc_client = clp->cl_rpcclient, 6610 .rpc_message = &msg, 6611 .callback_ops = &nfs4_setclientid_ops, 6612 .callback_data = &setclientid, 6613 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN, 6614 }; 6615 unsigned long now = jiffies; 6616 int status; 6617 6618 /* nfs_client_id4 */ 6619 nfs4_init_boot_verifier(clp, &sc_verifier); 6620 6621 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags)) 6622 status = nfs4_init_uniform_client_string(clp); 6623 else 6624 status = nfs4_init_nonuniform_client_string(clp); 6625 6626 if (status) 6627 goto out; 6628 6629 /* cb_client4 */ 6630 setclientid.sc_netid_len = 6631 nfs4_init_callback_netid(clp, 6632 setclientid.sc_netid, 6633 sizeof(setclientid.sc_netid)); 6634 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 6635 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 6636 clp->cl_ipaddr, port >> 8, port & 255); 6637 6638 dprintk("NFS call setclientid auth=%s, '%s'\n", 6639 clp->cl_rpcclient->cl_auth->au_ops->au_name, 6640 clp->cl_owner_id); 6641 6642 status = nfs4_call_sync_custom(&task_setup_data); 6643 if (setclientid.sc_cred) { 6644 kfree(clp->cl_acceptor); 6645 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred); 6646 put_rpccred(setclientid.sc_cred); 6647 } 6648 6649 if (status == 0) 6650 do_renew_lease(clp, now); 6651 out: 6652 trace_nfs4_setclientid(clp, status); 6653 dprintk("NFS reply setclientid: %d\n", status); 6654 return status; 6655 } 6656 6657 /** 6658 * nfs4_proc_setclientid_confirm - Confirm client ID 6659 * @clp: state data structure 6660 * @arg: result of a previous SETCLIENTID 6661 * @cred: credential to use for this call 6662 * 6663 * Returns zero, a negative errno, or a negative NFS4ERR status code. 6664 */ 6665 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 6666 struct nfs4_setclientid_res *arg, 6667 const struct cred *cred) 6668 { 6669 struct rpc_message msg = { 6670 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 6671 .rpc_argp = arg, 6672 .rpc_cred = cred, 6673 }; 6674 int status; 6675 6676 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 6677 clp->cl_rpcclient->cl_auth->au_ops->au_name, 6678 clp->cl_clientid); 6679 status = rpc_call_sync(clp->cl_rpcclient, &msg, 6680 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 6681 trace_nfs4_setclientid_confirm(clp, status); 6682 dprintk("NFS reply setclientid_confirm: %d\n", status); 6683 return status; 6684 } 6685 6686 struct nfs4_delegreturndata { 6687 struct nfs4_delegreturnargs args; 6688 struct nfs4_delegreturnres res; 6689 struct nfs_fh fh; 6690 nfs4_stateid stateid; 6691 unsigned long timestamp; 6692 struct { 6693 struct nfs4_layoutreturn_args arg; 6694 struct nfs4_layoutreturn_res res; 6695 struct nfs4_xdr_opaque_data ld_private; 6696 u32 roc_barrier; 6697 bool roc; 6698 } lr; 6699 struct nfs4_delegattr sattr; 6700 struct nfs_fattr fattr; 6701 int rpc_status; 6702 struct inode *inode; 6703 }; 6704 6705 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 6706 { 6707 struct nfs4_delegreturndata *data = calldata; 6708 struct nfs4_exception exception = { 6709 .inode = data->inode, 6710 .stateid = &data->stateid, 6711 .task_is_privileged = data->args.seq_args.sa_privileged, 6712 }; 6713 6714 if (!nfs4_sequence_done(task, &data->res.seq_res)) 6715 return; 6716 6717 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); 6718 6719 /* Handle Layoutreturn errors */ 6720 if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res, 6721 &data->res.lr_ret) == -EAGAIN) 6722 goto out_restart; 6723 6724 if (data->args.sattr_args && task->tk_status != 0) { 6725 switch(data->res.sattr_ret) { 6726 case 0: 6727 data->args.sattr_args = NULL; 6728 data->res.sattr_res = false; 6729 break; 6730 case -NFS4ERR_ADMIN_REVOKED: 6731 case -NFS4ERR_DELEG_REVOKED: 6732 case -NFS4ERR_EXPIRED: 6733 case -NFS4ERR_BAD_STATEID: 6734 /* Let the main handler below do stateid recovery */ 6735 break; 6736 case -NFS4ERR_OLD_STATEID: 6737 if (nfs4_refresh_delegation_stateid(&data->stateid, 6738 data->inode)) 6739 goto out_restart; 6740 fallthrough; 6741 default: 6742 data->args.sattr_args = NULL; 6743 data->res.sattr_res = false; 6744 goto out_restart; 6745 } 6746 } 6747 6748 switch (task->tk_status) { 6749 case 0: 6750 renew_lease(data->res.server, data->timestamp); 6751 break; 6752 case -NFS4ERR_ADMIN_REVOKED: 6753 case -NFS4ERR_DELEG_REVOKED: 6754 case -NFS4ERR_EXPIRED: 6755 nfs4_free_revoked_stateid(data->res.server, 6756 data->args.stateid, 6757 task->tk_msg.rpc_cred); 6758 fallthrough; 6759 case -NFS4ERR_BAD_STATEID: 6760 case -NFS4ERR_STALE_STATEID: 6761 case -ETIMEDOUT: 6762 task->tk_status = 0; 6763 break; 6764 case -NFS4ERR_OLD_STATEID: 6765 if (!nfs4_refresh_delegation_stateid(&data->stateid, data->inode)) 6766 nfs4_stateid_seqid_inc(&data->stateid); 6767 if (data->args.bitmask) { 6768 data->args.bitmask = NULL; 6769 data->res.fattr = NULL; 6770 } 6771 goto out_restart; 6772 case -NFS4ERR_ACCESS: 6773 if (data->args.bitmask) { 6774 data->args.bitmask = NULL; 6775 data->res.fattr = NULL; 6776 goto out_restart; 6777 } 6778 fallthrough; 6779 default: 6780 task->tk_status = nfs4_async_handle_exception(task, 6781 data->res.server, task->tk_status, 6782 &exception); 6783 if (exception.retry) 6784 goto out_restart; 6785 } 6786 nfs_delegation_mark_returned(data->inode, data->args.stateid); 6787 data->rpc_status = task->tk_status; 6788 return; 6789 out_restart: 6790 task->tk_status = 0; 6791 rpc_restart_call_prepare(task); 6792 } 6793 6794 static void nfs4_delegreturn_release(void *calldata) 6795 { 6796 struct nfs4_delegreturndata *data = calldata; 6797 struct inode *inode = data->inode; 6798 6799 if (data->lr.roc) 6800 pnfs_roc_release(&data->lr.arg, &data->lr.res, 6801 data->res.lr_ret); 6802 if (inode) { 6803 nfs4_fattr_set_prechange(&data->fattr, 6804 inode_peek_iversion_raw(inode)); 6805 nfs_refresh_inode(inode, &data->fattr); 6806 nfs_iput_and_deactive(inode); 6807 } 6808 kfree(calldata); 6809 } 6810 6811 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 6812 { 6813 struct nfs4_delegreturndata *d_data; 6814 struct pnfs_layout_hdr *lo; 6815 6816 d_data = data; 6817 6818 if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) { 6819 nfs4_sequence_done(task, &d_data->res.seq_res); 6820 return; 6821 } 6822 6823 lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL; 6824 if (lo && !pnfs_layout_is_valid(lo)) { 6825 d_data->args.lr_args = NULL; 6826 d_data->res.lr_res = NULL; 6827 } 6828 6829 nfs4_setup_sequence(d_data->res.server->nfs_client, 6830 &d_data->args.seq_args, 6831 &d_data->res.seq_res, 6832 task); 6833 } 6834 6835 static const struct rpc_call_ops nfs4_delegreturn_ops = { 6836 .rpc_call_prepare = nfs4_delegreturn_prepare, 6837 .rpc_call_done = nfs4_delegreturn_done, 6838 .rpc_release = nfs4_delegreturn_release, 6839 }; 6840 6841 static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, 6842 const nfs4_stateid *stateid, 6843 struct nfs_delegation *delegation, 6844 int issync) 6845 { 6846 struct nfs4_delegreturndata *data; 6847 struct nfs_server *server = NFS_SERVER(inode); 6848 struct rpc_task *task; 6849 struct rpc_message msg = { 6850 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 6851 .rpc_cred = cred, 6852 }; 6853 struct rpc_task_setup task_setup_data = { 6854 .rpc_client = server->client, 6855 .rpc_message = &msg, 6856 .callback_ops = &nfs4_delegreturn_ops, 6857 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT, 6858 }; 6859 int status = 0; 6860 6861 if (nfs_server_capable(inode, NFS_CAP_MOVEABLE)) 6862 task_setup_data.flags |= RPC_TASK_MOVEABLE; 6863 6864 data = kzalloc(sizeof(*data), GFP_KERNEL); 6865 if (data == NULL) 6866 return -ENOMEM; 6867 6868 nfs4_state_protect(server->nfs_client, 6869 NFS_SP4_MACH_CRED_CLEANUP, 6870 &task_setup_data.rpc_client, &msg); 6871 6872 data->args.fhandle = &data->fh; 6873 data->args.stateid = &data->stateid; 6874 nfs4_bitmask_set(data->args.bitmask_store, 6875 server->cache_consistency_bitmask, inode, 0); 6876 data->args.bitmask = data->args.bitmask_store; 6877 nfs_copy_fh(&data->fh, NFS_FH(inode)); 6878 nfs4_stateid_copy(&data->stateid, stateid); 6879 data->res.fattr = &data->fattr; 6880 data->res.server = server; 6881 data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT; 6882 data->lr.arg.ld_private = &data->lr.ld_private; 6883 nfs_fattr_init(data->res.fattr); 6884 data->timestamp = jiffies; 6885 data->rpc_status = 0; 6886 data->inode = nfs_igrab_and_active(inode); 6887 if (data->inode || issync) { 6888 data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res, 6889 cred); 6890 if (data->lr.roc) { 6891 data->args.lr_args = &data->lr.arg; 6892 data->res.lr_res = &data->lr.res; 6893 } 6894 } 6895 6896 if (delegation && 6897 test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) { 6898 if (delegation->type & FMODE_READ) { 6899 data->sattr.atime = inode_get_atime(inode); 6900 data->sattr.atime_set = true; 6901 } 6902 if (delegation->type & FMODE_WRITE) { 6903 data->sattr.mtime = inode_get_mtime(inode); 6904 data->sattr.mtime_set = true; 6905 } 6906 data->args.sattr_args = &data->sattr; 6907 data->res.sattr_res = true; 6908 } 6909 6910 if (!data->inode) 6911 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 6912 1); 6913 else 6914 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 6915 0); 6916 6917 task_setup_data.callback_data = data; 6918 msg.rpc_argp = &data->args; 6919 msg.rpc_resp = &data->res; 6920 task = rpc_run_task(&task_setup_data); 6921 if (IS_ERR(task)) 6922 return PTR_ERR(task); 6923 if (!issync) 6924 goto out; 6925 status = rpc_wait_for_completion_task(task); 6926 if (status != 0) 6927 goto out; 6928 status = data->rpc_status; 6929 out: 6930 rpc_put_task(task); 6931 return status; 6932 } 6933 6934 int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, 6935 const nfs4_stateid *stateid, 6936 struct nfs_delegation *delegation, int issync) 6937 { 6938 struct nfs_server *server = NFS_SERVER(inode); 6939 struct nfs4_exception exception = { }; 6940 int err; 6941 do { 6942 err = _nfs4_proc_delegreturn(inode, cred, stateid, 6943 delegation, issync); 6944 trace_nfs4_delegreturn(inode, stateid, err); 6945 switch (err) { 6946 case -NFS4ERR_STALE_STATEID: 6947 case -NFS4ERR_EXPIRED: 6948 case 0: 6949 return 0; 6950 } 6951 err = nfs4_handle_exception(server, err, &exception); 6952 } while (exception.retry); 6953 return err; 6954 } 6955 6956 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6957 { 6958 struct inode *inode = state->inode; 6959 struct nfs_server *server = NFS_SERVER(inode); 6960 struct nfs_client *clp = server->nfs_client; 6961 struct nfs_lockt_args arg = { 6962 .fh = NFS_FH(inode), 6963 .fl = request, 6964 }; 6965 struct nfs_lockt_res res = { 6966 .denied = request, 6967 }; 6968 struct rpc_message msg = { 6969 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 6970 .rpc_argp = &arg, 6971 .rpc_resp = &res, 6972 .rpc_cred = state->owner->so_cred, 6973 }; 6974 struct nfs4_lock_state *lsp; 6975 int status; 6976 6977 arg.lock_owner.clientid = clp->cl_clientid; 6978 status = nfs4_set_lock_state(state, request); 6979 if (status != 0) 6980 goto out; 6981 lsp = request->fl_u.nfs4_fl.owner; 6982 arg.lock_owner.id = lsp->ls_seqid.owner_id; 6983 arg.lock_owner.s_dev = server->s_dev; 6984 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6985 switch (status) { 6986 case 0: 6987 request->c.flc_type = F_UNLCK; 6988 break; 6989 case -NFS4ERR_DENIED: 6990 status = 0; 6991 } 6992 request->fl_ops->fl_release_private(request); 6993 request->fl_ops = NULL; 6994 out: 6995 return status; 6996 } 6997 6998 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6999 { 7000 struct nfs4_exception exception = { 7001 .interruptible = true, 7002 }; 7003 int err; 7004 7005 do { 7006 err = _nfs4_proc_getlk(state, cmd, request); 7007 trace_nfs4_get_lock(request, state, cmd, err); 7008 err = nfs4_handle_exception(NFS_SERVER(state->inode), err, 7009 &exception); 7010 } while (exception.retry); 7011 return err; 7012 } 7013 7014 /* 7015 * Update the seqid of a lock stateid after receiving 7016 * NFS4ERR_OLD_STATEID 7017 */ 7018 static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst, 7019 struct nfs4_lock_state *lsp) 7020 { 7021 struct nfs4_state *state = lsp->ls_state; 7022 bool ret = false; 7023 7024 spin_lock(&state->state_lock); 7025 if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid)) 7026 goto out; 7027 if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst)) 7028 nfs4_stateid_seqid_inc(dst); 7029 else 7030 dst->seqid = lsp->ls_stateid.seqid; 7031 ret = true; 7032 out: 7033 spin_unlock(&state->state_lock); 7034 return ret; 7035 } 7036 7037 static bool nfs4_sync_lock_stateid(nfs4_stateid *dst, 7038 struct nfs4_lock_state *lsp) 7039 { 7040 struct nfs4_state *state = lsp->ls_state; 7041 bool ret; 7042 7043 spin_lock(&state->state_lock); 7044 ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid); 7045 nfs4_stateid_copy(dst, &lsp->ls_stateid); 7046 spin_unlock(&state->state_lock); 7047 return ret; 7048 } 7049 7050 struct nfs4_unlockdata { 7051 struct nfs_locku_args arg; 7052 struct nfs_locku_res res; 7053 struct nfs4_lock_state *lsp; 7054 struct nfs_open_context *ctx; 7055 struct nfs_lock_context *l_ctx; 7056 struct file_lock fl; 7057 struct nfs_server *server; 7058 unsigned long timestamp; 7059 }; 7060 7061 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 7062 struct nfs_open_context *ctx, 7063 struct nfs4_lock_state *lsp, 7064 struct nfs_seqid *seqid) 7065 { 7066 struct nfs4_unlockdata *p; 7067 struct nfs4_state *state = lsp->ls_state; 7068 struct inode *inode = state->inode; 7069 7070 p = kzalloc(sizeof(*p), GFP_KERNEL); 7071 if (p == NULL) 7072 return NULL; 7073 p->arg.fh = NFS_FH(inode); 7074 p->arg.fl = &p->fl; 7075 p->arg.seqid = seqid; 7076 p->res.seqid = seqid; 7077 p->lsp = lsp; 7078 /* Ensure we don't close file until we're done freeing locks! */ 7079 p->ctx = get_nfs_open_context(ctx); 7080 p->l_ctx = nfs_get_lock_context(ctx); 7081 locks_init_lock(&p->fl); 7082 locks_copy_lock(&p->fl, fl); 7083 p->server = NFS_SERVER(inode); 7084 spin_lock(&state->state_lock); 7085 nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid); 7086 spin_unlock(&state->state_lock); 7087 return p; 7088 } 7089 7090 static void nfs4_locku_release_calldata(void *data) 7091 { 7092 struct nfs4_unlockdata *calldata = data; 7093 nfs_free_seqid(calldata->arg.seqid); 7094 nfs4_put_lock_state(calldata->lsp); 7095 nfs_put_lock_context(calldata->l_ctx); 7096 put_nfs_open_context(calldata->ctx); 7097 kfree(calldata); 7098 } 7099 7100 static void nfs4_locku_done(struct rpc_task *task, void *data) 7101 { 7102 struct nfs4_unlockdata *calldata = data; 7103 struct nfs4_exception exception = { 7104 .inode = calldata->lsp->ls_state->inode, 7105 .stateid = &calldata->arg.stateid, 7106 }; 7107 7108 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 7109 return; 7110 switch (task->tk_status) { 7111 case 0: 7112 renew_lease(calldata->server, calldata->timestamp); 7113 locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl); 7114 if (nfs4_update_lock_stateid(calldata->lsp, 7115 &calldata->res.stateid)) 7116 break; 7117 fallthrough; 7118 case -NFS4ERR_ADMIN_REVOKED: 7119 case -NFS4ERR_EXPIRED: 7120 nfs4_free_revoked_stateid(calldata->server, 7121 &calldata->arg.stateid, 7122 task->tk_msg.rpc_cred); 7123 fallthrough; 7124 case -NFS4ERR_BAD_STATEID: 7125 case -NFS4ERR_STALE_STATEID: 7126 if (nfs4_sync_lock_stateid(&calldata->arg.stateid, 7127 calldata->lsp)) 7128 rpc_restart_call_prepare(task); 7129 break; 7130 case -NFS4ERR_OLD_STATEID: 7131 if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid, 7132 calldata->lsp)) 7133 rpc_restart_call_prepare(task); 7134 break; 7135 default: 7136 task->tk_status = nfs4_async_handle_exception(task, 7137 calldata->server, task->tk_status, 7138 &exception); 7139 if (exception.retry) 7140 rpc_restart_call_prepare(task); 7141 } 7142 nfs_release_seqid(calldata->arg.seqid); 7143 } 7144 7145 static void nfs4_locku_prepare(struct rpc_task *task, void *data) 7146 { 7147 struct nfs4_unlockdata *calldata = data; 7148 7149 if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) && 7150 nfs_async_iocounter_wait(task, calldata->l_ctx)) 7151 return; 7152 7153 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 7154 goto out_wait; 7155 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { 7156 /* Note: exit _without_ running nfs4_locku_done */ 7157 goto out_no_action; 7158 } 7159 calldata->timestamp = jiffies; 7160 if (nfs4_setup_sequence(calldata->server->nfs_client, 7161 &calldata->arg.seq_args, 7162 &calldata->res.seq_res, 7163 task) != 0) 7164 nfs_release_seqid(calldata->arg.seqid); 7165 return; 7166 out_no_action: 7167 task->tk_action = NULL; 7168 out_wait: 7169 nfs4_sequence_done(task, &calldata->res.seq_res); 7170 } 7171 7172 static const struct rpc_call_ops nfs4_locku_ops = { 7173 .rpc_call_prepare = nfs4_locku_prepare, 7174 .rpc_call_done = nfs4_locku_done, 7175 .rpc_release = nfs4_locku_release_calldata, 7176 }; 7177 7178 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 7179 struct nfs_open_context *ctx, 7180 struct nfs4_lock_state *lsp, 7181 struct nfs_seqid *seqid) 7182 { 7183 struct nfs4_unlockdata *data; 7184 struct rpc_message msg = { 7185 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 7186 .rpc_cred = ctx->cred, 7187 }; 7188 struct rpc_task_setup task_setup_data = { 7189 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 7190 .rpc_message = &msg, 7191 .callback_ops = &nfs4_locku_ops, 7192 .workqueue = nfsiod_workqueue, 7193 .flags = RPC_TASK_ASYNC, 7194 }; 7195 7196 if (nfs_server_capable(lsp->ls_state->inode, NFS_CAP_MOVEABLE)) 7197 task_setup_data.flags |= RPC_TASK_MOVEABLE; 7198 7199 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client, 7200 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg); 7201 7202 /* Ensure this is an unlock - when canceling a lock, the 7203 * canceled lock is passed in, and it won't be an unlock. 7204 */ 7205 fl->c.flc_type = F_UNLCK; 7206 if (fl->c.flc_flags & FL_CLOSE) 7207 set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags); 7208 7209 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 7210 if (data == NULL) { 7211 nfs_free_seqid(seqid); 7212 return ERR_PTR(-ENOMEM); 7213 } 7214 7215 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 0); 7216 msg.rpc_argp = &data->arg; 7217 msg.rpc_resp = &data->res; 7218 task_setup_data.callback_data = data; 7219 return rpc_run_task(&task_setup_data); 7220 } 7221 7222 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 7223 { 7224 struct inode *inode = state->inode; 7225 struct nfs4_state_owner *sp = state->owner; 7226 struct nfs_inode *nfsi = NFS_I(inode); 7227 struct nfs_seqid *seqid; 7228 struct nfs4_lock_state *lsp; 7229 struct rpc_task *task; 7230 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 7231 int status = 0; 7232 unsigned char saved_flags = request->c.flc_flags; 7233 7234 status = nfs4_set_lock_state(state, request); 7235 /* Unlock _before_ we do the RPC call */ 7236 request->c.flc_flags |= FL_EXISTS; 7237 /* Exclude nfs_delegation_claim_locks() */ 7238 mutex_lock(&sp->so_delegreturn_mutex); 7239 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ 7240 down_read(&nfsi->rwsem); 7241 if (locks_lock_inode_wait(inode, request) == -ENOENT) { 7242 up_read(&nfsi->rwsem); 7243 mutex_unlock(&sp->so_delegreturn_mutex); 7244 goto out; 7245 } 7246 lsp = request->fl_u.nfs4_fl.owner; 7247 set_bit(NFS_LOCK_UNLOCKING, &lsp->ls_flags); 7248 up_read(&nfsi->rwsem); 7249 mutex_unlock(&sp->so_delegreturn_mutex); 7250 if (status != 0) 7251 goto out; 7252 /* Is this a delegated lock? */ 7253 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0) 7254 goto out; 7255 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid; 7256 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 7257 status = -ENOMEM; 7258 if (IS_ERR(seqid)) 7259 goto out; 7260 task = nfs4_do_unlck(request, 7261 nfs_file_open_context(request->c.flc_file), 7262 lsp, seqid); 7263 status = PTR_ERR(task); 7264 if (IS_ERR(task)) 7265 goto out; 7266 status = rpc_wait_for_completion_task(task); 7267 rpc_put_task(task); 7268 out: 7269 request->c.flc_flags = saved_flags; 7270 trace_nfs4_unlock(request, state, F_SETLK, status); 7271 return status; 7272 } 7273 7274 struct nfs4_lockdata { 7275 struct nfs_lock_args arg; 7276 struct nfs_lock_res res; 7277 struct nfs4_lock_state *lsp; 7278 struct nfs_open_context *ctx; 7279 struct file_lock fl; 7280 unsigned long timestamp; 7281 int rpc_status; 7282 int cancelled; 7283 struct nfs_server *server; 7284 }; 7285 7286 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 7287 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 7288 gfp_t gfp_mask) 7289 { 7290 struct nfs4_lockdata *p; 7291 struct inode *inode = lsp->ls_state->inode; 7292 struct nfs_server *server = NFS_SERVER(inode); 7293 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 7294 7295 p = kzalloc(sizeof(*p), gfp_mask); 7296 if (p == NULL) 7297 return NULL; 7298 7299 p->arg.fh = NFS_FH(inode); 7300 p->arg.fl = &p->fl; 7301 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 7302 if (IS_ERR(p->arg.open_seqid)) 7303 goto out_free; 7304 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 7305 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask); 7306 if (IS_ERR(p->arg.lock_seqid)) 7307 goto out_free_seqid; 7308 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 7309 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 7310 p->arg.lock_owner.s_dev = server->s_dev; 7311 p->res.lock_seqid = p->arg.lock_seqid; 7312 p->lsp = lsp; 7313 p->server = server; 7314 p->ctx = get_nfs_open_context(ctx); 7315 locks_init_lock(&p->fl); 7316 locks_copy_lock(&p->fl, fl); 7317 return p; 7318 out_free_seqid: 7319 nfs_free_seqid(p->arg.open_seqid); 7320 out_free: 7321 kfree(p); 7322 return NULL; 7323 } 7324 7325 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 7326 { 7327 struct nfs4_lockdata *data = calldata; 7328 struct nfs4_state *state = data->lsp->ls_state; 7329 7330 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 7331 goto out_wait; 7332 /* Do we need to do an open_to_lock_owner? */ 7333 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) { 7334 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { 7335 goto out_release_lock_seqid; 7336 } 7337 nfs4_stateid_copy(&data->arg.open_stateid, 7338 &state->open_stateid); 7339 data->arg.new_lock_owner = 1; 7340 data->res.open_seqid = data->arg.open_seqid; 7341 } else { 7342 data->arg.new_lock_owner = 0; 7343 nfs4_stateid_copy(&data->arg.lock_stateid, 7344 &data->lsp->ls_stateid); 7345 } 7346 if (!nfs4_valid_open_stateid(state)) { 7347 data->rpc_status = -EBADF; 7348 task->tk_action = NULL; 7349 goto out_release_open_seqid; 7350 } 7351 data->timestamp = jiffies; 7352 if (nfs4_setup_sequence(data->server->nfs_client, 7353 &data->arg.seq_args, 7354 &data->res.seq_res, 7355 task) == 0) 7356 return; 7357 out_release_open_seqid: 7358 nfs_release_seqid(data->arg.open_seqid); 7359 out_release_lock_seqid: 7360 nfs_release_seqid(data->arg.lock_seqid); 7361 out_wait: 7362 nfs4_sequence_done(task, &data->res.seq_res); 7363 dprintk("%s: ret = %d\n", __func__, data->rpc_status); 7364 } 7365 7366 static void nfs4_lock_done(struct rpc_task *task, void *calldata) 7367 { 7368 struct nfs4_lockdata *data = calldata; 7369 struct nfs4_lock_state *lsp = data->lsp; 7370 7371 if (!nfs4_sequence_done(task, &data->res.seq_res)) 7372 return; 7373 7374 data->rpc_status = task->tk_status; 7375 switch (task->tk_status) { 7376 case 0: 7377 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)), 7378 data->timestamp); 7379 if (data->arg.new_lock && !data->cancelled) { 7380 data->fl.c.flc_flags &= ~(FL_SLEEP | FL_ACCESS); 7381 if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) 7382 goto out_restart; 7383 } 7384 if (data->arg.new_lock_owner != 0) { 7385 nfs_confirm_seqid(&lsp->ls_seqid, 0); 7386 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid); 7387 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 7388 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) 7389 goto out_restart; 7390 break; 7391 case -NFS4ERR_OLD_STATEID: 7392 if (data->arg.new_lock_owner != 0 && 7393 nfs4_refresh_open_old_stateid(&data->arg.open_stateid, 7394 lsp->ls_state)) 7395 goto out_restart; 7396 if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp)) 7397 goto out_restart; 7398 fallthrough; 7399 case -NFS4ERR_BAD_STATEID: 7400 case -NFS4ERR_STALE_STATEID: 7401 case -NFS4ERR_EXPIRED: 7402 if (data->arg.new_lock_owner != 0) { 7403 if (!nfs4_stateid_match(&data->arg.open_stateid, 7404 &lsp->ls_state->open_stateid)) 7405 goto out_restart; 7406 } else if (!nfs4_stateid_match(&data->arg.lock_stateid, 7407 &lsp->ls_stateid)) 7408 goto out_restart; 7409 } 7410 out_done: 7411 dprintk("%s: ret = %d!\n", __func__, data->rpc_status); 7412 return; 7413 out_restart: 7414 if (!data->cancelled) 7415 rpc_restart_call_prepare(task); 7416 goto out_done; 7417 } 7418 7419 static void nfs4_lock_release(void *calldata) 7420 { 7421 struct nfs4_lockdata *data = calldata; 7422 7423 nfs_free_seqid(data->arg.open_seqid); 7424 if (data->cancelled && data->rpc_status == 0) { 7425 struct rpc_task *task; 7426 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 7427 data->arg.lock_seqid); 7428 if (!IS_ERR(task)) 7429 rpc_put_task_async(task); 7430 dprintk("%s: cancelling lock!\n", __func__); 7431 } else 7432 nfs_free_seqid(data->arg.lock_seqid); 7433 nfs4_put_lock_state(data->lsp); 7434 put_nfs_open_context(data->ctx); 7435 kfree(data); 7436 } 7437 7438 static const struct rpc_call_ops nfs4_lock_ops = { 7439 .rpc_call_prepare = nfs4_lock_prepare, 7440 .rpc_call_done = nfs4_lock_done, 7441 .rpc_release = nfs4_lock_release, 7442 }; 7443 7444 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 7445 { 7446 switch (error) { 7447 case -NFS4ERR_ADMIN_REVOKED: 7448 case -NFS4ERR_EXPIRED: 7449 case -NFS4ERR_BAD_STATEID: 7450 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 7451 if (new_lock_owner != 0 || 7452 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) 7453 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 7454 break; 7455 case -NFS4ERR_STALE_STATEID: 7456 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 7457 nfs4_schedule_lease_recovery(server->nfs_client); 7458 } 7459 } 7460 7461 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 7462 { 7463 struct nfs4_lockdata *data; 7464 struct rpc_task *task; 7465 struct rpc_message msg = { 7466 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 7467 .rpc_cred = state->owner->so_cred, 7468 }; 7469 struct rpc_task_setup task_setup_data = { 7470 .rpc_client = NFS_CLIENT(state->inode), 7471 .rpc_message = &msg, 7472 .callback_ops = &nfs4_lock_ops, 7473 .workqueue = nfsiod_workqueue, 7474 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 7475 }; 7476 int ret; 7477 7478 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE)) 7479 task_setup_data.flags |= RPC_TASK_MOVEABLE; 7480 7481 data = nfs4_alloc_lockdata(fl, 7482 nfs_file_open_context(fl->c.flc_file), 7483 fl->fl_u.nfs4_fl.owner, GFP_KERNEL); 7484 if (data == NULL) 7485 return -ENOMEM; 7486 if (IS_SETLKW(cmd)) 7487 data->arg.block = 1; 7488 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 7489 recovery_type > NFS_LOCK_NEW); 7490 msg.rpc_argp = &data->arg; 7491 msg.rpc_resp = &data->res; 7492 task_setup_data.callback_data = data; 7493 if (recovery_type > NFS_LOCK_NEW) { 7494 if (recovery_type == NFS_LOCK_RECLAIM) 7495 data->arg.reclaim = NFS_LOCK_RECLAIM; 7496 } else 7497 data->arg.new_lock = 1; 7498 task = rpc_run_task(&task_setup_data); 7499 if (IS_ERR(task)) 7500 return PTR_ERR(task); 7501 ret = rpc_wait_for_completion_task(task); 7502 if (ret == 0) { 7503 ret = data->rpc_status; 7504 if (ret) 7505 nfs4_handle_setlk_error(data->server, data->lsp, 7506 data->arg.new_lock_owner, ret); 7507 } else 7508 data->cancelled = true; 7509 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret); 7510 rpc_put_task(task); 7511 dprintk("%s: ret = %d\n", __func__, ret); 7512 return ret; 7513 } 7514 7515 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 7516 { 7517 struct nfs_server *server = NFS_SERVER(state->inode); 7518 struct nfs4_exception exception = { 7519 .inode = state->inode, 7520 }; 7521 int err; 7522 7523 do { 7524 /* Cache the lock if possible... */ 7525 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 7526 return 0; 7527 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 7528 if (err != -NFS4ERR_DELAY) 7529 break; 7530 nfs4_handle_exception(server, err, &exception); 7531 } while (exception.retry); 7532 return err; 7533 } 7534 7535 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 7536 { 7537 struct nfs_server *server = NFS_SERVER(state->inode); 7538 struct nfs4_exception exception = { 7539 .inode = state->inode, 7540 }; 7541 int err; 7542 7543 err = nfs4_set_lock_state(state, request); 7544 if (err != 0) 7545 return err; 7546 if (!recover_lost_locks) { 7547 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags); 7548 return 0; 7549 } 7550 do { 7551 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 7552 return 0; 7553 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 7554 switch (err) { 7555 default: 7556 goto out; 7557 case -NFS4ERR_GRACE: 7558 case -NFS4ERR_DELAY: 7559 nfs4_handle_exception(server, err, &exception); 7560 err = 0; 7561 } 7562 } while (exception.retry); 7563 out: 7564 return err; 7565 } 7566 7567 #if defined(CONFIG_NFS_V4_1) 7568 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 7569 { 7570 struct nfs4_lock_state *lsp; 7571 int status; 7572 7573 status = nfs4_set_lock_state(state, request); 7574 if (status != 0) 7575 return status; 7576 lsp = request->fl_u.nfs4_fl.owner; 7577 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) || 7578 test_bit(NFS_LOCK_LOST, &lsp->ls_flags)) 7579 return 0; 7580 return nfs4_lock_expired(state, request); 7581 } 7582 #endif 7583 7584 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7585 { 7586 struct nfs_inode *nfsi = NFS_I(state->inode); 7587 struct nfs4_state_owner *sp = state->owner; 7588 unsigned char flags = request->c.flc_flags; 7589 int status; 7590 7591 request->c.flc_flags |= FL_ACCESS; 7592 status = locks_lock_inode_wait(state->inode, request); 7593 if (status < 0) 7594 goto out; 7595 mutex_lock(&sp->so_delegreturn_mutex); 7596 down_read(&nfsi->rwsem); 7597 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 7598 /* Yes: cache locks! */ 7599 /* ...but avoid races with delegation recall... */ 7600 request->c.flc_flags = flags & ~FL_SLEEP; 7601 status = locks_lock_inode_wait(state->inode, request); 7602 up_read(&nfsi->rwsem); 7603 mutex_unlock(&sp->so_delegreturn_mutex); 7604 goto out; 7605 } 7606 up_read(&nfsi->rwsem); 7607 mutex_unlock(&sp->so_delegreturn_mutex); 7608 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 7609 out: 7610 request->c.flc_flags = flags; 7611 return status; 7612 } 7613 7614 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7615 { 7616 struct nfs4_exception exception = { 7617 .state = state, 7618 .inode = state->inode, 7619 .interruptible = true, 7620 }; 7621 int err; 7622 7623 do { 7624 err = _nfs4_proc_setlk(state, cmd, request); 7625 if (err == -NFS4ERR_DENIED) 7626 err = -EAGAIN; 7627 err = nfs4_handle_exception(NFS_SERVER(state->inode), 7628 err, &exception); 7629 } while (exception.retry); 7630 return err; 7631 } 7632 7633 #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 7634 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 7635 7636 static int 7637 nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd, 7638 struct file_lock *request) 7639 { 7640 int status = -ERESTARTSYS; 7641 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 7642 7643 while(!signalled()) { 7644 status = nfs4_proc_setlk(state, cmd, request); 7645 if ((status != -EAGAIN) || IS_SETLK(cmd)) 7646 break; 7647 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); 7648 schedule_timeout(timeout); 7649 timeout *= 2; 7650 timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout); 7651 status = -ERESTARTSYS; 7652 } 7653 return status; 7654 } 7655 7656 #ifdef CONFIG_NFS_V4_1 7657 struct nfs4_lock_waiter { 7658 struct inode *inode; 7659 struct nfs_lowner owner; 7660 wait_queue_entry_t wait; 7661 }; 7662 7663 static int 7664 nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key) 7665 { 7666 struct nfs4_lock_waiter *waiter = 7667 container_of(wait, struct nfs4_lock_waiter, wait); 7668 7669 /* NULL key means to wake up everyone */ 7670 if (key) { 7671 struct cb_notify_lock_args *cbnl = key; 7672 struct nfs_lowner *lowner = &cbnl->cbnl_owner, 7673 *wowner = &waiter->owner; 7674 7675 /* Only wake if the callback was for the same owner. */ 7676 if (lowner->id != wowner->id || lowner->s_dev != wowner->s_dev) 7677 return 0; 7678 7679 /* Make sure it's for the right inode */ 7680 if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh)) 7681 return 0; 7682 } 7683 7684 return woken_wake_function(wait, mode, flags, key); 7685 } 7686 7687 static int 7688 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7689 { 7690 struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner; 7691 struct nfs_server *server = NFS_SERVER(state->inode); 7692 struct nfs_client *clp = server->nfs_client; 7693 wait_queue_head_t *q = &clp->cl_lock_waitq; 7694 struct nfs4_lock_waiter waiter = { 7695 .inode = state->inode, 7696 .owner = { .clientid = clp->cl_clientid, 7697 .id = lsp->ls_seqid.owner_id, 7698 .s_dev = server->s_dev }, 7699 }; 7700 int status; 7701 7702 /* Don't bother with waitqueue if we don't expect a callback */ 7703 if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags)) 7704 return nfs4_retry_setlk_simple(state, cmd, request); 7705 7706 init_wait(&waiter.wait); 7707 waiter.wait.func = nfs4_wake_lock_waiter; 7708 add_wait_queue(q, &waiter.wait); 7709 7710 do { 7711 status = nfs4_proc_setlk(state, cmd, request); 7712 if (status != -EAGAIN || IS_SETLK(cmd)) 7713 break; 7714 7715 status = -ERESTARTSYS; 7716 wait_woken(&waiter.wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE, 7717 NFS4_LOCK_MAXTIMEOUT); 7718 } while (!signalled()); 7719 7720 remove_wait_queue(q, &waiter.wait); 7721 7722 return status; 7723 } 7724 #else /* !CONFIG_NFS_V4_1 */ 7725 static inline int 7726 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7727 { 7728 return nfs4_retry_setlk_simple(state, cmd, request); 7729 } 7730 #endif 7731 7732 static int 7733 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 7734 { 7735 struct nfs_open_context *ctx; 7736 struct nfs4_state *state; 7737 int status; 7738 7739 /* verify open state */ 7740 ctx = nfs_file_open_context(filp); 7741 state = ctx->state; 7742 7743 if (IS_GETLK(cmd)) { 7744 if (state != NULL) 7745 return nfs4_proc_getlk(state, F_GETLK, request); 7746 return 0; 7747 } 7748 7749 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 7750 return -EINVAL; 7751 7752 if (lock_is_unlock(request)) { 7753 if (state != NULL) 7754 return nfs4_proc_unlck(state, cmd, request); 7755 return 0; 7756 } 7757 7758 if (state == NULL) 7759 return -ENOLCK; 7760 7761 if ((request->c.flc_flags & FL_POSIX) && 7762 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 7763 return -ENOLCK; 7764 7765 /* 7766 * Don't rely on the VFS having checked the file open mode, 7767 * since it won't do this for flock() locks. 7768 */ 7769 switch (request->c.flc_type) { 7770 case F_RDLCK: 7771 if (!(filp->f_mode & FMODE_READ)) 7772 return -EBADF; 7773 break; 7774 case F_WRLCK: 7775 if (!(filp->f_mode & FMODE_WRITE)) 7776 return -EBADF; 7777 } 7778 7779 status = nfs4_set_lock_state(state, request); 7780 if (status != 0) 7781 return status; 7782 7783 return nfs4_retry_setlk(state, cmd, request); 7784 } 7785 7786 static int nfs4_delete_lease(struct file *file, void **priv) 7787 { 7788 return generic_setlease(file, F_UNLCK, NULL, priv); 7789 } 7790 7791 static int nfs4_add_lease(struct file *file, int arg, struct file_lease **lease, 7792 void **priv) 7793 { 7794 struct inode *inode = file_inode(file); 7795 fmode_t type = arg == F_RDLCK ? FMODE_READ : FMODE_WRITE; 7796 int ret; 7797 7798 /* No delegation, no lease */ 7799 if (!nfs4_have_delegation(inode, type, 0)) 7800 return -EAGAIN; 7801 ret = generic_setlease(file, arg, lease, priv); 7802 if (ret || nfs4_have_delegation(inode, type, 0)) 7803 return ret; 7804 /* We raced with a delegation return */ 7805 nfs4_delete_lease(file, priv); 7806 return -EAGAIN; 7807 } 7808 7809 int nfs4_proc_setlease(struct file *file, int arg, struct file_lease **lease, 7810 void **priv) 7811 { 7812 switch (arg) { 7813 case F_RDLCK: 7814 case F_WRLCK: 7815 return nfs4_add_lease(file, arg, lease, priv); 7816 case F_UNLCK: 7817 return nfs4_delete_lease(file, priv); 7818 default: 7819 return -EINVAL; 7820 } 7821 } 7822 7823 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid) 7824 { 7825 struct nfs_server *server = NFS_SERVER(state->inode); 7826 int err; 7827 7828 err = nfs4_set_lock_state(state, fl); 7829 if (err != 0) 7830 return err; 7831 do { 7832 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 7833 if (err != -NFS4ERR_DELAY) 7834 break; 7835 ssleep(1); 7836 } while (err == -NFS4ERR_DELAY); 7837 return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err); 7838 } 7839 7840 struct nfs_release_lockowner_data { 7841 struct nfs4_lock_state *lsp; 7842 struct nfs_server *server; 7843 struct nfs_release_lockowner_args args; 7844 struct nfs_release_lockowner_res res; 7845 unsigned long timestamp; 7846 }; 7847 7848 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata) 7849 { 7850 struct nfs_release_lockowner_data *data = calldata; 7851 struct nfs_server *server = data->server; 7852 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 7853 &data->res.seq_res, task); 7854 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 7855 data->timestamp = jiffies; 7856 } 7857 7858 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata) 7859 { 7860 struct nfs_release_lockowner_data *data = calldata; 7861 struct nfs_server *server = data->server; 7862 7863 nfs40_sequence_done(task, &data->res.seq_res); 7864 7865 switch (task->tk_status) { 7866 case 0: 7867 renew_lease(server, data->timestamp); 7868 break; 7869 case -NFS4ERR_STALE_CLIENTID: 7870 case -NFS4ERR_EXPIRED: 7871 nfs4_schedule_lease_recovery(server->nfs_client); 7872 break; 7873 case -NFS4ERR_LEASE_MOVED: 7874 case -NFS4ERR_DELAY: 7875 if (nfs4_async_handle_error(task, server, 7876 NULL, NULL) == -EAGAIN) 7877 rpc_restart_call_prepare(task); 7878 } 7879 } 7880 7881 static void nfs4_release_lockowner_release(void *calldata) 7882 { 7883 struct nfs_release_lockowner_data *data = calldata; 7884 nfs4_free_lock_state(data->server, data->lsp); 7885 kfree(calldata); 7886 } 7887 7888 static const struct rpc_call_ops nfs4_release_lockowner_ops = { 7889 .rpc_call_prepare = nfs4_release_lockowner_prepare, 7890 .rpc_call_done = nfs4_release_lockowner_done, 7891 .rpc_release = nfs4_release_lockowner_release, 7892 }; 7893 7894 static void 7895 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp) 7896 { 7897 struct nfs_release_lockowner_data *data; 7898 struct rpc_message msg = { 7899 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], 7900 }; 7901 7902 if (server->nfs_client->cl_mvops->minor_version != 0) 7903 return; 7904 7905 data = kmalloc(sizeof(*data), GFP_KERNEL); 7906 if (!data) 7907 return; 7908 data->lsp = lsp; 7909 data->server = server; 7910 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 7911 data->args.lock_owner.id = lsp->ls_seqid.owner_id; 7912 data->args.lock_owner.s_dev = server->s_dev; 7913 7914 msg.rpc_argp = &data->args; 7915 msg.rpc_resp = &data->res; 7916 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 7917 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 7918 } 7919 7920 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 7921 7922 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler, 7923 struct mnt_idmap *idmap, 7924 struct dentry *unused, struct inode *inode, 7925 const char *key, const void *buf, 7926 size_t buflen, int flags) 7927 { 7928 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_ACL); 7929 } 7930 7931 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler, 7932 struct dentry *unused, struct inode *inode, 7933 const char *key, void *buf, size_t buflen) 7934 { 7935 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_ACL); 7936 } 7937 7938 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry) 7939 { 7940 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_ACL); 7941 } 7942 7943 #if defined(CONFIG_NFS_V4_1) 7944 #define XATTR_NAME_NFSV4_DACL "system.nfs4_dacl" 7945 7946 static int nfs4_xattr_set_nfs4_dacl(const struct xattr_handler *handler, 7947 struct mnt_idmap *idmap, 7948 struct dentry *unused, struct inode *inode, 7949 const char *key, const void *buf, 7950 size_t buflen, int flags) 7951 { 7952 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_DACL); 7953 } 7954 7955 static int nfs4_xattr_get_nfs4_dacl(const struct xattr_handler *handler, 7956 struct dentry *unused, struct inode *inode, 7957 const char *key, void *buf, size_t buflen) 7958 { 7959 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_DACL); 7960 } 7961 7962 static bool nfs4_xattr_list_nfs4_dacl(struct dentry *dentry) 7963 { 7964 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_DACL); 7965 } 7966 7967 #define XATTR_NAME_NFSV4_SACL "system.nfs4_sacl" 7968 7969 static int nfs4_xattr_set_nfs4_sacl(const struct xattr_handler *handler, 7970 struct mnt_idmap *idmap, 7971 struct dentry *unused, struct inode *inode, 7972 const char *key, const void *buf, 7973 size_t buflen, int flags) 7974 { 7975 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_SACL); 7976 } 7977 7978 static int nfs4_xattr_get_nfs4_sacl(const struct xattr_handler *handler, 7979 struct dentry *unused, struct inode *inode, 7980 const char *key, void *buf, size_t buflen) 7981 { 7982 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_SACL); 7983 } 7984 7985 static bool nfs4_xattr_list_nfs4_sacl(struct dentry *dentry) 7986 { 7987 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_SACL); 7988 } 7989 7990 #endif 7991 7992 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 7993 7994 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler, 7995 struct mnt_idmap *idmap, 7996 struct dentry *unused, struct inode *inode, 7997 const char *key, const void *buf, 7998 size_t buflen, int flags) 7999 { 8000 if (security_ismaclabel(key)) 8001 return nfs4_set_security_label(inode, buf, buflen); 8002 8003 return -EOPNOTSUPP; 8004 } 8005 8006 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler, 8007 struct dentry *unused, struct inode *inode, 8008 const char *key, void *buf, size_t buflen) 8009 { 8010 if (security_ismaclabel(key)) 8011 return nfs4_get_security_label(inode, buf, buflen); 8012 return -EOPNOTSUPP; 8013 } 8014 8015 static ssize_t 8016 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 8017 { 8018 int len = 0; 8019 8020 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) { 8021 len = security_inode_listsecurity(inode, list, list_len); 8022 if (len >= 0 && list_len && len > list_len) 8023 return -ERANGE; 8024 } 8025 return len; 8026 } 8027 8028 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = { 8029 .prefix = XATTR_SECURITY_PREFIX, 8030 .get = nfs4_xattr_get_nfs4_label, 8031 .set = nfs4_xattr_set_nfs4_label, 8032 }; 8033 8034 #else 8035 8036 static ssize_t 8037 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 8038 { 8039 return 0; 8040 } 8041 8042 #endif 8043 8044 #ifdef CONFIG_NFS_V4_2 8045 static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler, 8046 struct mnt_idmap *idmap, 8047 struct dentry *unused, struct inode *inode, 8048 const char *key, const void *buf, 8049 size_t buflen, int flags) 8050 { 8051 u32 mask; 8052 int ret; 8053 8054 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8055 return -EOPNOTSUPP; 8056 8057 /* 8058 * There is no mapping from the MAY_* flags to the NFS_ACCESS_XA* 8059 * flags right now. Handling of xattr operations use the normal 8060 * file read/write permissions. 8061 * 8062 * Just in case the server has other ideas (which RFC 8276 allows), 8063 * do a cached access check for the XA* flags to possibly avoid 8064 * doing an RPC and getting EACCES back. 8065 */ 8066 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8067 if (!(mask & NFS_ACCESS_XAWRITE)) 8068 return -EACCES; 8069 } 8070 8071 if (buf == NULL) { 8072 ret = nfs42_proc_removexattr(inode, key); 8073 if (!ret) 8074 nfs4_xattr_cache_remove(inode, key); 8075 } else { 8076 ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags); 8077 if (!ret) 8078 nfs4_xattr_cache_add(inode, key, buf, NULL, buflen); 8079 } 8080 8081 return ret; 8082 } 8083 8084 static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler, 8085 struct dentry *unused, struct inode *inode, 8086 const char *key, void *buf, size_t buflen) 8087 { 8088 u32 mask; 8089 ssize_t ret; 8090 8091 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8092 return -EOPNOTSUPP; 8093 8094 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8095 if (!(mask & NFS_ACCESS_XAREAD)) 8096 return -EACCES; 8097 } 8098 8099 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 8100 if (ret) 8101 return ret; 8102 8103 ret = nfs4_xattr_cache_get(inode, key, buf, buflen); 8104 if (ret >= 0 || (ret < 0 && ret != -ENOENT)) 8105 return ret; 8106 8107 ret = nfs42_proc_getxattr(inode, key, buf, buflen); 8108 8109 return ret; 8110 } 8111 8112 static ssize_t 8113 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) 8114 { 8115 u64 cookie; 8116 bool eof; 8117 ssize_t ret, size; 8118 char *buf; 8119 size_t buflen; 8120 u32 mask; 8121 8122 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8123 return 0; 8124 8125 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8126 if (!(mask & NFS_ACCESS_XALIST)) 8127 return 0; 8128 } 8129 8130 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 8131 if (ret) 8132 return ret; 8133 8134 ret = nfs4_xattr_cache_list(inode, list, list_len); 8135 if (ret >= 0 || (ret < 0 && ret != -ENOENT)) 8136 return ret; 8137 8138 cookie = 0; 8139 eof = false; 8140 buflen = list_len ? list_len : XATTR_LIST_MAX; 8141 buf = list_len ? list : NULL; 8142 size = 0; 8143 8144 while (!eof) { 8145 ret = nfs42_proc_listxattrs(inode, buf, buflen, 8146 &cookie, &eof); 8147 if (ret < 0) 8148 return ret; 8149 8150 if (list_len) { 8151 buf += ret; 8152 buflen -= ret; 8153 } 8154 size += ret; 8155 } 8156 8157 if (list_len) 8158 nfs4_xattr_cache_set_list(inode, list, size); 8159 8160 return size; 8161 } 8162 8163 #else 8164 8165 static ssize_t 8166 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) 8167 { 8168 return 0; 8169 } 8170 #endif /* CONFIG_NFS_V4_2 */ 8171 8172 /* 8173 * nfs_fhget will use either the mounted_on_fileid or the fileid 8174 */ 8175 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 8176 { 8177 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 8178 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 8179 (fattr->valid & NFS_ATTR_FATTR_FSID) && 8180 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 8181 return; 8182 8183 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 8184 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 8185 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 8186 fattr->nlink = 2; 8187 } 8188 8189 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 8190 const struct qstr *name, 8191 struct nfs4_fs_locations *fs_locations, 8192 struct page *page) 8193 { 8194 struct nfs_server *server = NFS_SERVER(dir); 8195 u32 bitmask[3]; 8196 struct nfs4_fs_locations_arg args = { 8197 .dir_fh = NFS_FH(dir), 8198 .name = name, 8199 .page = page, 8200 .bitmask = bitmask, 8201 }; 8202 struct nfs4_fs_locations_res res = { 8203 .fs_locations = fs_locations, 8204 }; 8205 struct rpc_message msg = { 8206 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8207 .rpc_argp = &args, 8208 .rpc_resp = &res, 8209 }; 8210 int status; 8211 8212 dprintk("%s: start\n", __func__); 8213 8214 bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS; 8215 bitmask[1] = nfs4_fattr_bitmap[1]; 8216 8217 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 8218 * is not supported */ 8219 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 8220 bitmask[0] &= ~FATTR4_WORD0_FILEID; 8221 else 8222 bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID; 8223 8224 nfs_fattr_init(fs_locations->fattr); 8225 fs_locations->server = server; 8226 fs_locations->nlocations = 0; 8227 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 8228 dprintk("%s: returned status = %d\n", __func__, status); 8229 return status; 8230 } 8231 8232 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 8233 const struct qstr *name, 8234 struct nfs4_fs_locations *fs_locations, 8235 struct page *page) 8236 { 8237 struct nfs4_exception exception = { 8238 .interruptible = true, 8239 }; 8240 int err; 8241 do { 8242 err = _nfs4_proc_fs_locations(client, dir, name, 8243 fs_locations, page); 8244 trace_nfs4_get_fs_locations(dir, name, err); 8245 err = nfs4_handle_exception(NFS_SERVER(dir), err, 8246 &exception); 8247 } while (exception.retry); 8248 return err; 8249 } 8250 8251 /* 8252 * This operation also signals the server that this client is 8253 * performing migration recovery. The server can stop returning 8254 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is 8255 * appended to this compound to identify the client ID which is 8256 * performing recovery. 8257 */ 8258 static int _nfs40_proc_get_locations(struct nfs_server *server, 8259 struct nfs_fh *fhandle, 8260 struct nfs4_fs_locations *locations, 8261 struct page *page, const struct cred *cred) 8262 { 8263 struct rpc_clnt *clnt = server->client; 8264 u32 bitmask[2] = { 8265 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 8266 }; 8267 struct nfs4_fs_locations_arg args = { 8268 .clientid = server->nfs_client->cl_clientid, 8269 .fh = fhandle, 8270 .page = page, 8271 .bitmask = bitmask, 8272 .migration = 1, /* skip LOOKUP */ 8273 .renew = 1, /* append RENEW */ 8274 }; 8275 struct nfs4_fs_locations_res res = { 8276 .fs_locations = locations, 8277 .migration = 1, 8278 .renew = 1, 8279 }; 8280 struct rpc_message msg = { 8281 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8282 .rpc_argp = &args, 8283 .rpc_resp = &res, 8284 .rpc_cred = cred, 8285 }; 8286 unsigned long now = jiffies; 8287 int status; 8288 8289 nfs_fattr_init(locations->fattr); 8290 locations->server = server; 8291 locations->nlocations = 0; 8292 8293 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8294 status = nfs4_call_sync_sequence(clnt, server, &msg, 8295 &args.seq_args, &res.seq_res); 8296 if (status) 8297 return status; 8298 8299 renew_lease(server, now); 8300 return 0; 8301 } 8302 8303 #ifdef CONFIG_NFS_V4_1 8304 8305 /* 8306 * This operation also signals the server that this client is 8307 * performing migration recovery. The server can stop asserting 8308 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID 8309 * performing this operation is identified in the SEQUENCE 8310 * operation in this compound. 8311 * 8312 * When the client supports GETATTR(fs_locations_info), it can 8313 * be plumbed in here. 8314 */ 8315 static int _nfs41_proc_get_locations(struct nfs_server *server, 8316 struct nfs_fh *fhandle, 8317 struct nfs4_fs_locations *locations, 8318 struct page *page, const struct cred *cred) 8319 { 8320 struct rpc_clnt *clnt = server->client; 8321 u32 bitmask[2] = { 8322 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 8323 }; 8324 struct nfs4_fs_locations_arg args = { 8325 .fh = fhandle, 8326 .page = page, 8327 .bitmask = bitmask, 8328 .migration = 1, /* skip LOOKUP */ 8329 }; 8330 struct nfs4_fs_locations_res res = { 8331 .fs_locations = locations, 8332 .migration = 1, 8333 }; 8334 struct rpc_message msg = { 8335 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8336 .rpc_argp = &args, 8337 .rpc_resp = &res, 8338 .rpc_cred = cred, 8339 }; 8340 struct nfs4_call_sync_data data = { 8341 .seq_server = server, 8342 .seq_args = &args.seq_args, 8343 .seq_res = &res.seq_res, 8344 }; 8345 struct rpc_task_setup task_setup_data = { 8346 .rpc_client = clnt, 8347 .rpc_message = &msg, 8348 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops, 8349 .callback_data = &data, 8350 .flags = RPC_TASK_NO_ROUND_ROBIN, 8351 }; 8352 int status; 8353 8354 nfs_fattr_init(locations->fattr); 8355 locations->server = server; 8356 locations->nlocations = 0; 8357 8358 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8359 status = nfs4_call_sync_custom(&task_setup_data); 8360 if (status == NFS4_OK && 8361 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 8362 status = -NFS4ERR_LEASE_MOVED; 8363 return status; 8364 } 8365 8366 #endif /* CONFIG_NFS_V4_1 */ 8367 8368 /** 8369 * nfs4_proc_get_locations - discover locations for a migrated FSID 8370 * @server: pointer to nfs_server to process 8371 * @fhandle: pointer to the kernel NFS client file handle 8372 * @locations: result of query 8373 * @page: buffer 8374 * @cred: credential to use for this operation 8375 * 8376 * Returns NFS4_OK on success, a negative NFS4ERR status code if the 8377 * operation failed, or a negative errno if a local error occurred. 8378 * 8379 * On success, "locations" is filled in, but if the server has 8380 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not 8381 * asserted. 8382 * 8383 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases 8384 * from this client that require migration recovery. 8385 */ 8386 int nfs4_proc_get_locations(struct nfs_server *server, 8387 struct nfs_fh *fhandle, 8388 struct nfs4_fs_locations *locations, 8389 struct page *page, const struct cred *cred) 8390 { 8391 struct nfs_client *clp = server->nfs_client; 8392 const struct nfs4_mig_recovery_ops *ops = 8393 clp->cl_mvops->mig_recovery_ops; 8394 struct nfs4_exception exception = { 8395 .interruptible = true, 8396 }; 8397 int status; 8398 8399 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 8400 (unsigned long long)server->fsid.major, 8401 (unsigned long long)server->fsid.minor, 8402 clp->cl_hostname); 8403 nfs_display_fhandle(fhandle, __func__); 8404 8405 do { 8406 status = ops->get_locations(server, fhandle, locations, page, 8407 cred); 8408 if (status != -NFS4ERR_DELAY) 8409 break; 8410 nfs4_handle_exception(server, status, &exception); 8411 } while (exception.retry); 8412 return status; 8413 } 8414 8415 /* 8416 * This operation also signals the server that this client is 8417 * performing "lease moved" recovery. The server can stop 8418 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation 8419 * is appended to this compound to identify the client ID which is 8420 * performing recovery. 8421 */ 8422 static int _nfs40_proc_fsid_present(struct inode *inode, const struct cred *cred) 8423 { 8424 struct nfs_server *server = NFS_SERVER(inode); 8425 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 8426 struct rpc_clnt *clnt = server->client; 8427 struct nfs4_fsid_present_arg args = { 8428 .fh = NFS_FH(inode), 8429 .clientid = clp->cl_clientid, 8430 .renew = 1, /* append RENEW */ 8431 }; 8432 struct nfs4_fsid_present_res res = { 8433 .renew = 1, 8434 }; 8435 struct rpc_message msg = { 8436 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 8437 .rpc_argp = &args, 8438 .rpc_resp = &res, 8439 .rpc_cred = cred, 8440 }; 8441 unsigned long now = jiffies; 8442 int status; 8443 8444 res.fh = nfs_alloc_fhandle(); 8445 if (res.fh == NULL) 8446 return -ENOMEM; 8447 8448 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8449 status = nfs4_call_sync_sequence(clnt, server, &msg, 8450 &args.seq_args, &res.seq_res); 8451 nfs_free_fhandle(res.fh); 8452 if (status) 8453 return status; 8454 8455 do_renew_lease(clp, now); 8456 return 0; 8457 } 8458 8459 #ifdef CONFIG_NFS_V4_1 8460 8461 /* 8462 * This operation also signals the server that this client is 8463 * performing "lease moved" recovery. The server can stop asserting 8464 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing 8465 * this operation is identified in the SEQUENCE operation in this 8466 * compound. 8467 */ 8468 static int _nfs41_proc_fsid_present(struct inode *inode, const struct cred *cred) 8469 { 8470 struct nfs_server *server = NFS_SERVER(inode); 8471 struct rpc_clnt *clnt = server->client; 8472 struct nfs4_fsid_present_arg args = { 8473 .fh = NFS_FH(inode), 8474 }; 8475 struct nfs4_fsid_present_res res = { 8476 }; 8477 struct rpc_message msg = { 8478 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 8479 .rpc_argp = &args, 8480 .rpc_resp = &res, 8481 .rpc_cred = cred, 8482 }; 8483 int status; 8484 8485 res.fh = nfs_alloc_fhandle(); 8486 if (res.fh == NULL) 8487 return -ENOMEM; 8488 8489 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8490 status = nfs4_call_sync_sequence(clnt, server, &msg, 8491 &args.seq_args, &res.seq_res); 8492 nfs_free_fhandle(res.fh); 8493 if (status == NFS4_OK && 8494 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 8495 status = -NFS4ERR_LEASE_MOVED; 8496 return status; 8497 } 8498 8499 #endif /* CONFIG_NFS_V4_1 */ 8500 8501 /** 8502 * nfs4_proc_fsid_present - Is this FSID present or absent on server? 8503 * @inode: inode on FSID to check 8504 * @cred: credential to use for this operation 8505 * 8506 * Server indicates whether the FSID is present, moved, or not 8507 * recognized. This operation is necessary to clear a LEASE_MOVED 8508 * condition for this client ID. 8509 * 8510 * Returns NFS4_OK if the FSID is present on this server, 8511 * -NFS4ERR_MOVED if the FSID is no longer present, a negative 8512 * NFS4ERR code if some error occurred on the server, or a 8513 * negative errno if a local failure occurred. 8514 */ 8515 int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred) 8516 { 8517 struct nfs_server *server = NFS_SERVER(inode); 8518 struct nfs_client *clp = server->nfs_client; 8519 const struct nfs4_mig_recovery_ops *ops = 8520 clp->cl_mvops->mig_recovery_ops; 8521 struct nfs4_exception exception = { 8522 .interruptible = true, 8523 }; 8524 int status; 8525 8526 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 8527 (unsigned long long)server->fsid.major, 8528 (unsigned long long)server->fsid.minor, 8529 clp->cl_hostname); 8530 nfs_display_fhandle(NFS_FH(inode), __func__); 8531 8532 do { 8533 status = ops->fsid_present(inode, cred); 8534 if (status != -NFS4ERR_DELAY) 8535 break; 8536 nfs4_handle_exception(server, status, &exception); 8537 } while (exception.retry); 8538 return status; 8539 } 8540 8541 /* 8542 * If 'use_integrity' is true and the state managment nfs_client 8543 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient 8544 * and the machine credential as per RFC3530bis and RFC5661 Security 8545 * Considerations sections. Otherwise, just use the user cred with the 8546 * filesystem's rpc_client. 8547 */ 8548 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity) 8549 { 8550 int status; 8551 struct rpc_clnt *clnt = NFS_SERVER(dir)->client; 8552 struct nfs_client *clp = NFS_SERVER(dir)->nfs_client; 8553 struct nfs4_secinfo_arg args = { 8554 .dir_fh = NFS_FH(dir), 8555 .name = name, 8556 }; 8557 struct nfs4_secinfo_res res = { 8558 .flavors = flavors, 8559 }; 8560 struct rpc_message msg = { 8561 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 8562 .rpc_argp = &args, 8563 .rpc_resp = &res, 8564 }; 8565 struct nfs4_call_sync_data data = { 8566 .seq_server = NFS_SERVER(dir), 8567 .seq_args = &args.seq_args, 8568 .seq_res = &res.seq_res, 8569 }; 8570 struct rpc_task_setup task_setup = { 8571 .rpc_client = clnt, 8572 .rpc_message = &msg, 8573 .callback_ops = clp->cl_mvops->call_sync_ops, 8574 .callback_data = &data, 8575 .flags = RPC_TASK_NO_ROUND_ROBIN, 8576 }; 8577 const struct cred *cred = NULL; 8578 8579 if (use_integrity) { 8580 clnt = clp->cl_rpcclient; 8581 task_setup.rpc_client = clnt; 8582 8583 cred = nfs4_get_clid_cred(clp); 8584 msg.rpc_cred = cred; 8585 } 8586 8587 dprintk("NFS call secinfo %s\n", name->name); 8588 8589 nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg); 8590 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 8591 status = nfs4_call_sync_custom(&task_setup); 8592 8593 dprintk("NFS reply secinfo: %d\n", status); 8594 8595 put_cred(cred); 8596 return status; 8597 } 8598 8599 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 8600 struct nfs4_secinfo_flavors *flavors) 8601 { 8602 struct nfs4_exception exception = { 8603 .interruptible = true, 8604 }; 8605 int err; 8606 do { 8607 err = -NFS4ERR_WRONGSEC; 8608 8609 /* try to use integrity protection with machine cred */ 8610 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client)) 8611 err = _nfs4_proc_secinfo(dir, name, flavors, true); 8612 8613 /* 8614 * if unable to use integrity protection, or SECINFO with 8615 * integrity protection returns NFS4ERR_WRONGSEC (which is 8616 * disallowed by spec, but exists in deployed servers) use 8617 * the current filesystem's rpc_client and the user cred. 8618 */ 8619 if (err == -NFS4ERR_WRONGSEC) 8620 err = _nfs4_proc_secinfo(dir, name, flavors, false); 8621 8622 trace_nfs4_secinfo(dir, name, err); 8623 err = nfs4_handle_exception(NFS_SERVER(dir), err, 8624 &exception); 8625 } while (exception.retry); 8626 return err; 8627 } 8628 8629 #ifdef CONFIG_NFS_V4_1 8630 /* 8631 * Check the exchange flags returned by the server for invalid flags, having 8632 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 8633 * DS flags set. 8634 */ 8635 static int nfs4_check_cl_exchange_flags(u32 flags, u32 version) 8636 { 8637 if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R)) 8638 goto out_inval; 8639 else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R)) 8640 goto out_inval; 8641 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 8642 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 8643 goto out_inval; 8644 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 8645 goto out_inval; 8646 return NFS_OK; 8647 out_inval: 8648 return -NFS4ERR_INVAL; 8649 } 8650 8651 static bool 8652 nfs41_same_server_scope(struct nfs41_server_scope *a, 8653 struct nfs41_server_scope *b) 8654 { 8655 if (a->server_scope_sz != b->server_scope_sz) 8656 return false; 8657 return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0; 8658 } 8659 8660 static void 8661 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) 8662 { 8663 struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp; 8664 struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp; 8665 struct nfs_client *clp = args->client; 8666 8667 switch (task->tk_status) { 8668 case -NFS4ERR_BADSESSION: 8669 case -NFS4ERR_DEADSESSION: 8670 nfs4_schedule_session_recovery(clp->cl_session, 8671 task->tk_status); 8672 return; 8673 } 8674 if (args->dir == NFS4_CDFC4_FORE_OR_BOTH && 8675 res->dir != NFS4_CDFS4_BOTH) { 8676 rpc_task_close_connection(task); 8677 if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES) 8678 rpc_restart_call(task); 8679 } 8680 } 8681 8682 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { 8683 .rpc_call_done = nfs4_bind_one_conn_to_session_done, 8684 }; 8685 8686 /* 8687 * nfs4_proc_bind_one_conn_to_session() 8688 * 8689 * The 4.1 client currently uses the same TCP connection for the 8690 * fore and backchannel. 8691 */ 8692 static 8693 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt, 8694 struct rpc_xprt *xprt, 8695 struct nfs_client *clp, 8696 const struct cred *cred) 8697 { 8698 int status; 8699 struct nfs41_bind_conn_to_session_args args = { 8700 .client = clp, 8701 .dir = NFS4_CDFC4_FORE_OR_BOTH, 8702 .retries = 0, 8703 }; 8704 struct nfs41_bind_conn_to_session_res res; 8705 struct rpc_message msg = { 8706 .rpc_proc = 8707 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 8708 .rpc_argp = &args, 8709 .rpc_resp = &res, 8710 .rpc_cred = cred, 8711 }; 8712 struct rpc_task_setup task_setup_data = { 8713 .rpc_client = clnt, 8714 .rpc_xprt = xprt, 8715 .callback_ops = &nfs4_bind_one_conn_to_session_ops, 8716 .rpc_message = &msg, 8717 .flags = RPC_TASK_TIMEOUT, 8718 }; 8719 struct rpc_task *task; 8720 8721 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id); 8722 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) 8723 args.dir = NFS4_CDFC4_FORE; 8724 8725 /* Do not set the backchannel flag unless this is clnt->cl_xprt */ 8726 if (xprt != rcu_access_pointer(clnt->cl_xprt)) 8727 args.dir = NFS4_CDFC4_FORE; 8728 8729 task = rpc_run_task(&task_setup_data); 8730 if (!IS_ERR(task)) { 8731 status = task->tk_status; 8732 rpc_put_task(task); 8733 } else 8734 status = PTR_ERR(task); 8735 trace_nfs4_bind_conn_to_session(clp, status); 8736 if (status == 0) { 8737 if (memcmp(res.sessionid.data, 8738 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 8739 dprintk("NFS: %s: Session ID mismatch\n", __func__); 8740 return -EIO; 8741 } 8742 if ((res.dir & args.dir) != res.dir || res.dir == 0) { 8743 dprintk("NFS: %s: Unexpected direction from server\n", 8744 __func__); 8745 return -EIO; 8746 } 8747 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) { 8748 dprintk("NFS: %s: Server returned RDMA mode = true\n", 8749 __func__); 8750 return -EIO; 8751 } 8752 } 8753 8754 return status; 8755 } 8756 8757 struct rpc_bind_conn_calldata { 8758 struct nfs_client *clp; 8759 const struct cred *cred; 8760 }; 8761 8762 static int 8763 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt, 8764 struct rpc_xprt *xprt, 8765 void *calldata) 8766 { 8767 struct rpc_bind_conn_calldata *p = calldata; 8768 8769 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred); 8770 } 8771 8772 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, const struct cred *cred) 8773 { 8774 struct rpc_bind_conn_calldata data = { 8775 .clp = clp, 8776 .cred = cred, 8777 }; 8778 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient, 8779 nfs4_proc_bind_conn_to_session_callback, &data); 8780 } 8781 8782 /* 8783 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map 8784 * and operations we'd like to see to enable certain features in the allow map 8785 */ 8786 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = { 8787 .how = SP4_MACH_CRED, 8788 .enforce.u.words = { 8789 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 8790 1 << (OP_EXCHANGE_ID - 32) | 8791 1 << (OP_CREATE_SESSION - 32) | 8792 1 << (OP_DESTROY_SESSION - 32) | 8793 1 << (OP_DESTROY_CLIENTID - 32) 8794 }, 8795 .allow.u.words = { 8796 [0] = 1 << (OP_CLOSE) | 8797 1 << (OP_OPEN_DOWNGRADE) | 8798 1 << (OP_LOCKU) | 8799 1 << (OP_DELEGRETURN) | 8800 1 << (OP_COMMIT), 8801 [1] = 1 << (OP_SECINFO - 32) | 8802 1 << (OP_SECINFO_NO_NAME - 32) | 8803 1 << (OP_LAYOUTRETURN - 32) | 8804 1 << (OP_TEST_STATEID - 32) | 8805 1 << (OP_FREE_STATEID - 32) | 8806 1 << (OP_WRITE - 32) 8807 } 8808 }; 8809 8810 /* 8811 * Select the state protection mode for client `clp' given the server results 8812 * from exchange_id in `sp'. 8813 * 8814 * Returns 0 on success, negative errno otherwise. 8815 */ 8816 static int nfs4_sp4_select_mode(struct nfs_client *clp, 8817 struct nfs41_state_protection *sp) 8818 { 8819 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = { 8820 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 8821 1 << (OP_EXCHANGE_ID - 32) | 8822 1 << (OP_CREATE_SESSION - 32) | 8823 1 << (OP_DESTROY_SESSION - 32) | 8824 1 << (OP_DESTROY_CLIENTID - 32) 8825 }; 8826 unsigned long flags = 0; 8827 unsigned int i; 8828 int ret = 0; 8829 8830 if (sp->how == SP4_MACH_CRED) { 8831 /* Print state protect result */ 8832 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n"); 8833 for (i = 0; i <= LAST_NFS4_OP; i++) { 8834 if (test_bit(i, sp->enforce.u.longs)) 8835 dfprintk(MOUNT, " enforce op %d\n", i); 8836 if (test_bit(i, sp->allow.u.longs)) 8837 dfprintk(MOUNT, " allow op %d\n", i); 8838 } 8839 8840 /* make sure nothing is on enforce list that isn't supported */ 8841 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) { 8842 if (sp->enforce.u.words[i] & ~supported_enforce[i]) { 8843 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 8844 ret = -EINVAL; 8845 goto out; 8846 } 8847 } 8848 8849 /* 8850 * Minimal mode - state operations are allowed to use machine 8851 * credential. Note this already happens by default, so the 8852 * client doesn't have to do anything more than the negotiation. 8853 * 8854 * NOTE: we don't care if EXCHANGE_ID is in the list - 8855 * we're already using the machine cred for exchange_id 8856 * and will never use a different cred. 8857 */ 8858 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) && 8859 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) && 8860 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) && 8861 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) { 8862 dfprintk(MOUNT, "sp4_mach_cred:\n"); 8863 dfprintk(MOUNT, " minimal mode enabled\n"); 8864 __set_bit(NFS_SP4_MACH_CRED_MINIMAL, &flags); 8865 } else { 8866 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 8867 ret = -EINVAL; 8868 goto out; 8869 } 8870 8871 if (test_bit(OP_CLOSE, sp->allow.u.longs) && 8872 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) && 8873 test_bit(OP_DELEGRETURN, sp->allow.u.longs) && 8874 test_bit(OP_LOCKU, sp->allow.u.longs)) { 8875 dfprintk(MOUNT, " cleanup mode enabled\n"); 8876 __set_bit(NFS_SP4_MACH_CRED_CLEANUP, &flags); 8877 } 8878 8879 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) { 8880 dfprintk(MOUNT, " pnfs cleanup mode enabled\n"); 8881 __set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, &flags); 8882 } 8883 8884 if (test_bit(OP_SECINFO, sp->allow.u.longs) && 8885 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) { 8886 dfprintk(MOUNT, " secinfo mode enabled\n"); 8887 __set_bit(NFS_SP4_MACH_CRED_SECINFO, &flags); 8888 } 8889 8890 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) && 8891 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) { 8892 dfprintk(MOUNT, " stateid mode enabled\n"); 8893 __set_bit(NFS_SP4_MACH_CRED_STATEID, &flags); 8894 } 8895 8896 if (test_bit(OP_WRITE, sp->allow.u.longs)) { 8897 dfprintk(MOUNT, " write mode enabled\n"); 8898 __set_bit(NFS_SP4_MACH_CRED_WRITE, &flags); 8899 } 8900 8901 if (test_bit(OP_COMMIT, sp->allow.u.longs)) { 8902 dfprintk(MOUNT, " commit mode enabled\n"); 8903 __set_bit(NFS_SP4_MACH_CRED_COMMIT, &flags); 8904 } 8905 } 8906 out: 8907 clp->cl_sp4_flags = flags; 8908 return ret; 8909 } 8910 8911 struct nfs41_exchange_id_data { 8912 struct nfs41_exchange_id_res res; 8913 struct nfs41_exchange_id_args args; 8914 }; 8915 8916 static void nfs4_exchange_id_release(void *data) 8917 { 8918 struct nfs41_exchange_id_data *cdata = 8919 (struct nfs41_exchange_id_data *)data; 8920 8921 nfs_put_client(cdata->args.client); 8922 kfree(cdata->res.impl_id); 8923 kfree(cdata->res.server_scope); 8924 kfree(cdata->res.server_owner); 8925 kfree(cdata); 8926 } 8927 8928 static const struct rpc_call_ops nfs4_exchange_id_call_ops = { 8929 .rpc_release = nfs4_exchange_id_release, 8930 }; 8931 8932 /* 8933 * _nfs4_proc_exchange_id() 8934 * 8935 * Wrapper for EXCHANGE_ID operation. 8936 */ 8937 static struct rpc_task * 8938 nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred, 8939 u32 sp4_how, struct rpc_xprt *xprt) 8940 { 8941 struct rpc_message msg = { 8942 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 8943 .rpc_cred = cred, 8944 }; 8945 struct rpc_task_setup task_setup_data = { 8946 .rpc_client = clp->cl_rpcclient, 8947 .callback_ops = &nfs4_exchange_id_call_ops, 8948 .rpc_message = &msg, 8949 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN, 8950 }; 8951 struct nfs41_exchange_id_data *calldata; 8952 int status; 8953 8954 if (!refcount_inc_not_zero(&clp->cl_count)) 8955 return ERR_PTR(-EIO); 8956 8957 status = -ENOMEM; 8958 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 8959 if (!calldata) 8960 goto out; 8961 8962 nfs4_init_boot_verifier(clp, &calldata->args.verifier); 8963 8964 status = nfs4_init_uniform_client_string(clp); 8965 if (status) 8966 goto out_calldata; 8967 8968 calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 8969 GFP_NOFS); 8970 status = -ENOMEM; 8971 if (unlikely(calldata->res.server_owner == NULL)) 8972 goto out_calldata; 8973 8974 calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 8975 GFP_NOFS); 8976 if (unlikely(calldata->res.server_scope == NULL)) 8977 goto out_server_owner; 8978 8979 calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 8980 if (unlikely(calldata->res.impl_id == NULL)) 8981 goto out_server_scope; 8982 8983 switch (sp4_how) { 8984 case SP4_NONE: 8985 calldata->args.state_protect.how = SP4_NONE; 8986 break; 8987 8988 case SP4_MACH_CRED: 8989 calldata->args.state_protect = nfs4_sp4_mach_cred_request; 8990 break; 8991 8992 default: 8993 /* unsupported! */ 8994 WARN_ON_ONCE(1); 8995 status = -EINVAL; 8996 goto out_impl_id; 8997 } 8998 if (xprt) { 8999 task_setup_data.rpc_xprt = xprt; 9000 task_setup_data.flags |= RPC_TASK_SOFTCONN; 9001 memcpy(calldata->args.verifier.data, clp->cl_confirm.data, 9002 sizeof(calldata->args.verifier.data)); 9003 } 9004 calldata->args.client = clp; 9005 calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 9006 EXCHGID4_FLAG_BIND_PRINC_STATEID; 9007 #ifdef CONFIG_NFS_V4_1_MIGRATION 9008 calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR; 9009 #endif 9010 if (test_bit(NFS_CS_PNFS, &clp->cl_flags)) 9011 calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS; 9012 msg.rpc_argp = &calldata->args; 9013 msg.rpc_resp = &calldata->res; 9014 task_setup_data.callback_data = calldata; 9015 9016 return rpc_run_task(&task_setup_data); 9017 9018 out_impl_id: 9019 kfree(calldata->res.impl_id); 9020 out_server_scope: 9021 kfree(calldata->res.server_scope); 9022 out_server_owner: 9023 kfree(calldata->res.server_owner); 9024 out_calldata: 9025 kfree(calldata); 9026 out: 9027 nfs_put_client(clp); 9028 return ERR_PTR(status); 9029 } 9030 9031 /* 9032 * _nfs4_proc_exchange_id() 9033 * 9034 * Wrapper for EXCHANGE_ID operation. 9035 */ 9036 static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred, 9037 u32 sp4_how) 9038 { 9039 struct rpc_task *task; 9040 struct nfs41_exchange_id_args *argp; 9041 struct nfs41_exchange_id_res *resp; 9042 unsigned long now = jiffies; 9043 int status; 9044 9045 task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL); 9046 if (IS_ERR(task)) 9047 return PTR_ERR(task); 9048 9049 argp = task->tk_msg.rpc_argp; 9050 resp = task->tk_msg.rpc_resp; 9051 status = task->tk_status; 9052 if (status != 0) 9053 goto out; 9054 9055 status = nfs4_check_cl_exchange_flags(resp->flags, 9056 clp->cl_mvops->minor_version); 9057 if (status != 0) 9058 goto out; 9059 9060 status = nfs4_sp4_select_mode(clp, &resp->state_protect); 9061 if (status != 0) 9062 goto out; 9063 9064 do_renew_lease(clp, now); 9065 9066 clp->cl_clientid = resp->clientid; 9067 clp->cl_exchange_flags = resp->flags; 9068 clp->cl_seqid = resp->seqid; 9069 /* Client ID is not confirmed */ 9070 if (!(resp->flags & EXCHGID4_FLAG_CONFIRMED_R)) 9071 clear_bit(NFS4_SESSION_ESTABLISHED, 9072 &clp->cl_session->session_state); 9073 9074 if (clp->cl_serverscope != NULL && 9075 !nfs41_same_server_scope(clp->cl_serverscope, 9076 resp->server_scope)) { 9077 dprintk("%s: server_scope mismatch detected\n", 9078 __func__); 9079 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 9080 } 9081 9082 swap(clp->cl_serverowner, resp->server_owner); 9083 swap(clp->cl_serverscope, resp->server_scope); 9084 swap(clp->cl_implid, resp->impl_id); 9085 9086 /* Save the EXCHANGE_ID verifier session trunk tests */ 9087 memcpy(clp->cl_confirm.data, argp->verifier.data, 9088 sizeof(clp->cl_confirm.data)); 9089 out: 9090 trace_nfs4_exchange_id(clp, status); 9091 rpc_put_task(task); 9092 return status; 9093 } 9094 9095 /* 9096 * nfs4_proc_exchange_id() 9097 * 9098 * Returns zero, a negative errno, or a negative NFS4ERR status code. 9099 * 9100 * Since the clientid has expired, all compounds using sessions 9101 * associated with the stale clientid will be returning 9102 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 9103 * be in some phase of session reset. 9104 * 9105 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used. 9106 */ 9107 int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred) 9108 { 9109 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor; 9110 int status; 9111 9112 /* try SP4_MACH_CRED if krb5i/p */ 9113 if (authflavor == RPC_AUTH_GSS_KRB5I || 9114 authflavor == RPC_AUTH_GSS_KRB5P) { 9115 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED); 9116 if (!status) 9117 return 0; 9118 } 9119 9120 /* try SP4_NONE */ 9121 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE); 9122 } 9123 9124 /** 9125 * nfs4_test_session_trunk 9126 * 9127 * This is an add_xprt_test() test function called from 9128 * rpc_clnt_setup_test_and_add_xprt. 9129 * 9130 * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt 9131 * and is dereferrenced in nfs4_exchange_id_release 9132 * 9133 * Upon success, add the new transport to the rpc_clnt 9134 * 9135 * @clnt: struct rpc_clnt to get new transport 9136 * @xprt: the rpc_xprt to test 9137 * @data: call data for _nfs4_proc_exchange_id. 9138 */ 9139 void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt, 9140 void *data) 9141 { 9142 struct nfs4_add_xprt_data *adata = data; 9143 struct rpc_task *task; 9144 int status; 9145 9146 u32 sp4_how; 9147 9148 dprintk("--> %s try %s\n", __func__, 9149 xprt->address_strings[RPC_DISPLAY_ADDR]); 9150 9151 sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED); 9152 9153 try_again: 9154 /* Test connection for session trunking. Async exchange_id call */ 9155 task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt); 9156 if (IS_ERR(task)) 9157 return; 9158 9159 status = task->tk_status; 9160 if (status == 0) { 9161 status = nfs4_detect_session_trunking(adata->clp, 9162 task->tk_msg.rpc_resp, xprt); 9163 trace_nfs4_trunked_exchange_id(adata->clp, 9164 xprt->address_strings[RPC_DISPLAY_ADDR], status); 9165 } 9166 if (status == 0) 9167 rpc_clnt_xprt_switch_add_xprt(clnt, xprt); 9168 else if (status != -NFS4ERR_DELAY && rpc_clnt_xprt_switch_has_addr(clnt, 9169 (struct sockaddr *)&xprt->addr)) 9170 rpc_clnt_xprt_switch_remove_xprt(clnt, xprt); 9171 9172 rpc_put_task(task); 9173 if (status == -NFS4ERR_DELAY) { 9174 ssleep(1); 9175 goto try_again; 9176 } 9177 } 9178 EXPORT_SYMBOL_GPL(nfs4_test_session_trunk); 9179 9180 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 9181 const struct cred *cred) 9182 { 9183 struct rpc_message msg = { 9184 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 9185 .rpc_argp = clp, 9186 .rpc_cred = cred, 9187 }; 9188 int status; 9189 9190 status = rpc_call_sync(clp->cl_rpcclient, &msg, 9191 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9192 trace_nfs4_destroy_clientid(clp, status); 9193 if (status) 9194 dprintk("NFS: Got error %d from the server %s on " 9195 "DESTROY_CLIENTID.", status, clp->cl_hostname); 9196 return status; 9197 } 9198 9199 static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 9200 const struct cred *cred) 9201 { 9202 unsigned int loop; 9203 int ret; 9204 9205 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 9206 ret = _nfs4_proc_destroy_clientid(clp, cred); 9207 switch (ret) { 9208 case -NFS4ERR_DELAY: 9209 case -NFS4ERR_CLIENTID_BUSY: 9210 ssleep(1); 9211 break; 9212 default: 9213 return ret; 9214 } 9215 } 9216 return 0; 9217 } 9218 9219 int nfs4_destroy_clientid(struct nfs_client *clp) 9220 { 9221 const struct cred *cred; 9222 int ret = 0; 9223 9224 if (clp->cl_mvops->minor_version < 1) 9225 goto out; 9226 if (clp->cl_exchange_flags == 0) 9227 goto out; 9228 if (clp->cl_preserve_clid) 9229 goto out; 9230 cred = nfs4_get_clid_cred(clp); 9231 ret = nfs4_proc_destroy_clientid(clp, cred); 9232 put_cred(cred); 9233 switch (ret) { 9234 case 0: 9235 case -NFS4ERR_STALE_CLIENTID: 9236 clp->cl_exchange_flags = 0; 9237 } 9238 out: 9239 return ret; 9240 } 9241 9242 #endif /* CONFIG_NFS_V4_1 */ 9243 9244 struct nfs4_get_lease_time_data { 9245 struct nfs4_get_lease_time_args *args; 9246 struct nfs4_get_lease_time_res *res; 9247 struct nfs_client *clp; 9248 }; 9249 9250 static void nfs4_get_lease_time_prepare(struct rpc_task *task, 9251 void *calldata) 9252 { 9253 struct nfs4_get_lease_time_data *data = 9254 (struct nfs4_get_lease_time_data *)calldata; 9255 9256 /* just setup sequence, do not trigger session recovery 9257 since we're invoked within one */ 9258 nfs4_setup_sequence(data->clp, 9259 &data->args->la_seq_args, 9260 &data->res->lr_seq_res, 9261 task); 9262 } 9263 9264 /* 9265 * Called from nfs4_state_manager thread for session setup, so don't recover 9266 * from sequence operation or clientid errors. 9267 */ 9268 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 9269 { 9270 struct nfs4_get_lease_time_data *data = 9271 (struct nfs4_get_lease_time_data *)calldata; 9272 9273 if (!nfs4_sequence_done(task, &data->res->lr_seq_res)) 9274 return; 9275 switch (task->tk_status) { 9276 case -NFS4ERR_DELAY: 9277 case -NFS4ERR_GRACE: 9278 rpc_delay(task, NFS4_POLL_RETRY_MIN); 9279 task->tk_status = 0; 9280 fallthrough; 9281 case -NFS4ERR_RETRY_UNCACHED_REP: 9282 rpc_restart_call_prepare(task); 9283 return; 9284 } 9285 } 9286 9287 static const struct rpc_call_ops nfs4_get_lease_time_ops = { 9288 .rpc_call_prepare = nfs4_get_lease_time_prepare, 9289 .rpc_call_done = nfs4_get_lease_time_done, 9290 }; 9291 9292 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 9293 { 9294 struct nfs4_get_lease_time_args args; 9295 struct nfs4_get_lease_time_res res = { 9296 .lr_fsinfo = fsinfo, 9297 }; 9298 struct nfs4_get_lease_time_data data = { 9299 .args = &args, 9300 .res = &res, 9301 .clp = clp, 9302 }; 9303 struct rpc_message msg = { 9304 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 9305 .rpc_argp = &args, 9306 .rpc_resp = &res, 9307 }; 9308 struct rpc_task_setup task_setup = { 9309 .rpc_client = clp->cl_rpcclient, 9310 .rpc_message = &msg, 9311 .callback_ops = &nfs4_get_lease_time_ops, 9312 .callback_data = &data, 9313 .flags = RPC_TASK_TIMEOUT, 9314 }; 9315 9316 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0, 1); 9317 return nfs4_call_sync_custom(&task_setup); 9318 } 9319 9320 #ifdef CONFIG_NFS_V4_1 9321 9322 /* 9323 * Initialize the values to be used by the client in CREATE_SESSION 9324 * If nfs4_init_session set the fore channel request and response sizes, 9325 * use them. 9326 * 9327 * Set the back channel max_resp_sz_cached to zero to force the client to 9328 * always set csa_cachethis to FALSE because the current implementation 9329 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 9330 */ 9331 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args, 9332 struct rpc_clnt *clnt) 9333 { 9334 unsigned int max_rqst_sz, max_resp_sz; 9335 unsigned int max_bc_payload = rpc_max_bc_payload(clnt); 9336 unsigned int max_bc_slots = rpc_num_bc_slots(clnt); 9337 9338 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead; 9339 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead; 9340 9341 /* Fore channel attributes */ 9342 args->fc_attrs.max_rqst_sz = max_rqst_sz; 9343 args->fc_attrs.max_resp_sz = max_resp_sz; 9344 args->fc_attrs.max_ops = NFS4_MAX_OPS; 9345 args->fc_attrs.max_reqs = max_session_slots; 9346 9347 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 9348 "max_ops=%u max_reqs=%u\n", 9349 __func__, 9350 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 9351 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 9352 9353 /* Back channel attributes */ 9354 args->bc_attrs.max_rqst_sz = max_bc_payload; 9355 args->bc_attrs.max_resp_sz = max_bc_payload; 9356 args->bc_attrs.max_resp_sz_cached = 0; 9357 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 9358 args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1); 9359 if (args->bc_attrs.max_reqs > max_bc_slots) 9360 args->bc_attrs.max_reqs = max_bc_slots; 9361 9362 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 9363 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 9364 __func__, 9365 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 9366 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 9367 args->bc_attrs.max_reqs); 9368 } 9369 9370 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, 9371 struct nfs41_create_session_res *res) 9372 { 9373 struct nfs4_channel_attrs *sent = &args->fc_attrs; 9374 struct nfs4_channel_attrs *rcvd = &res->fc_attrs; 9375 9376 if (rcvd->max_resp_sz > sent->max_resp_sz) 9377 return -EINVAL; 9378 /* 9379 * Our requested max_ops is the minimum we need; we're not 9380 * prepared to break up compounds into smaller pieces than that. 9381 * So, no point even trying to continue if the server won't 9382 * cooperate: 9383 */ 9384 if (rcvd->max_ops < sent->max_ops) 9385 return -EINVAL; 9386 if (rcvd->max_reqs == 0) 9387 return -EINVAL; 9388 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 9389 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 9390 return 0; 9391 } 9392 9393 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, 9394 struct nfs41_create_session_res *res) 9395 { 9396 struct nfs4_channel_attrs *sent = &args->bc_attrs; 9397 struct nfs4_channel_attrs *rcvd = &res->bc_attrs; 9398 9399 if (!(res->flags & SESSION4_BACK_CHAN)) 9400 goto out; 9401 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 9402 return -EINVAL; 9403 if (rcvd->max_resp_sz < sent->max_resp_sz) 9404 return -EINVAL; 9405 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 9406 return -EINVAL; 9407 if (rcvd->max_ops > sent->max_ops) 9408 return -EINVAL; 9409 if (rcvd->max_reqs > sent->max_reqs) 9410 return -EINVAL; 9411 out: 9412 return 0; 9413 } 9414 9415 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 9416 struct nfs41_create_session_res *res) 9417 { 9418 int ret; 9419 9420 ret = nfs4_verify_fore_channel_attrs(args, res); 9421 if (ret) 9422 return ret; 9423 return nfs4_verify_back_channel_attrs(args, res); 9424 } 9425 9426 static void nfs4_update_session(struct nfs4_session *session, 9427 struct nfs41_create_session_res *res) 9428 { 9429 nfs4_copy_sessionid(&session->sess_id, &res->sessionid); 9430 /* Mark client id and session as being confirmed */ 9431 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 9432 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state); 9433 session->flags = res->flags; 9434 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); 9435 if (res->flags & SESSION4_BACK_CHAN) 9436 memcpy(&session->bc_attrs, &res->bc_attrs, 9437 sizeof(session->bc_attrs)); 9438 } 9439 9440 static int _nfs4_proc_create_session(struct nfs_client *clp, 9441 const struct cred *cred) 9442 { 9443 struct nfs4_session *session = clp->cl_session; 9444 struct nfs41_create_session_args args = { 9445 .client = clp, 9446 .clientid = clp->cl_clientid, 9447 .seqid = clp->cl_seqid, 9448 .cb_program = NFS4_CALLBACK, 9449 }; 9450 struct nfs41_create_session_res res; 9451 9452 struct rpc_message msg = { 9453 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 9454 .rpc_argp = &args, 9455 .rpc_resp = &res, 9456 .rpc_cred = cred, 9457 }; 9458 int status; 9459 9460 nfs4_init_channel_attrs(&args, clp->cl_rpcclient); 9461 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 9462 9463 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 9464 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9465 trace_nfs4_create_session(clp, status); 9466 9467 switch (status) { 9468 case -NFS4ERR_STALE_CLIENTID: 9469 case -NFS4ERR_DELAY: 9470 case -ETIMEDOUT: 9471 case -EACCES: 9472 case -EAGAIN: 9473 goto out; 9474 } 9475 9476 clp->cl_seqid++; 9477 if (!status) { 9478 /* Verify the session's negotiated channel_attrs values */ 9479 status = nfs4_verify_channel_attrs(&args, &res); 9480 /* Increment the clientid slot sequence id */ 9481 if (status) 9482 goto out; 9483 nfs4_update_session(session, &res); 9484 } 9485 out: 9486 return status; 9487 } 9488 9489 /* 9490 * Issues a CREATE_SESSION operation to the server. 9491 * It is the responsibility of the caller to verify the session is 9492 * expired before calling this routine. 9493 */ 9494 int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred) 9495 { 9496 int status; 9497 unsigned *ptr; 9498 struct nfs4_session *session = clp->cl_session; 9499 struct nfs4_add_xprt_data xprtdata = { 9500 .clp = clp, 9501 }; 9502 struct rpc_add_xprt_test rpcdata = { 9503 .add_xprt_test = clp->cl_mvops->session_trunk, 9504 .data = &xprtdata, 9505 }; 9506 9507 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 9508 9509 status = _nfs4_proc_create_session(clp, cred); 9510 if (status) 9511 goto out; 9512 9513 /* Init or reset the session slot tables */ 9514 status = nfs4_setup_session_slot_tables(session); 9515 dprintk("slot table setup returned %d\n", status); 9516 if (status) 9517 goto out; 9518 9519 ptr = (unsigned *)&session->sess_id.data[0]; 9520 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 9521 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 9522 rpc_clnt_probe_trunked_xprts(clp->cl_rpcclient, &rpcdata); 9523 out: 9524 return status; 9525 } 9526 9527 /* 9528 * Issue the over-the-wire RPC DESTROY_SESSION. 9529 * The caller must serialize access to this routine. 9530 */ 9531 int nfs4_proc_destroy_session(struct nfs4_session *session, 9532 const struct cred *cred) 9533 { 9534 struct rpc_message msg = { 9535 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 9536 .rpc_argp = session, 9537 .rpc_cred = cred, 9538 }; 9539 int status = 0; 9540 9541 /* session is still being setup */ 9542 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state)) 9543 return 0; 9544 9545 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 9546 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9547 trace_nfs4_destroy_session(session->clp, status); 9548 9549 if (status) 9550 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 9551 "Session has been destroyed regardless...\n", status); 9552 rpc_clnt_manage_trunked_xprts(session->clp->cl_rpcclient); 9553 return status; 9554 } 9555 9556 /* 9557 * Renew the cl_session lease. 9558 */ 9559 struct nfs4_sequence_data { 9560 struct nfs_client *clp; 9561 struct nfs4_sequence_args args; 9562 struct nfs4_sequence_res res; 9563 }; 9564 9565 static void nfs41_sequence_release(void *data) 9566 { 9567 struct nfs4_sequence_data *calldata = data; 9568 struct nfs_client *clp = calldata->clp; 9569 9570 if (refcount_read(&clp->cl_count) > 1) 9571 nfs4_schedule_state_renewal(clp); 9572 nfs_put_client(clp); 9573 kfree(calldata); 9574 } 9575 9576 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 9577 { 9578 switch(task->tk_status) { 9579 case -NFS4ERR_DELAY: 9580 rpc_delay(task, NFS4_POLL_RETRY_MAX); 9581 return -EAGAIN; 9582 default: 9583 nfs4_schedule_lease_recovery(clp); 9584 } 9585 return 0; 9586 } 9587 9588 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 9589 { 9590 struct nfs4_sequence_data *calldata = data; 9591 struct nfs_client *clp = calldata->clp; 9592 9593 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 9594 return; 9595 9596 trace_nfs4_sequence(clp, task->tk_status); 9597 if (task->tk_status < 0 && !task->tk_client->cl_shutdown) { 9598 dprintk("%s ERROR %d\n", __func__, task->tk_status); 9599 if (refcount_read(&clp->cl_count) == 1) 9600 return; 9601 9602 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 9603 rpc_restart_call_prepare(task); 9604 return; 9605 } 9606 } 9607 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 9608 } 9609 9610 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 9611 { 9612 struct nfs4_sequence_data *calldata = data; 9613 struct nfs_client *clp = calldata->clp; 9614 struct nfs4_sequence_args *args; 9615 struct nfs4_sequence_res *res; 9616 9617 args = task->tk_msg.rpc_argp; 9618 res = task->tk_msg.rpc_resp; 9619 9620 nfs4_setup_sequence(clp, args, res, task); 9621 } 9622 9623 static const struct rpc_call_ops nfs41_sequence_ops = { 9624 .rpc_call_done = nfs41_sequence_call_done, 9625 .rpc_call_prepare = nfs41_sequence_prepare, 9626 .rpc_release = nfs41_sequence_release, 9627 }; 9628 9629 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 9630 const struct cred *cred, 9631 struct nfs4_slot *slot, 9632 bool is_privileged) 9633 { 9634 struct nfs4_sequence_data *calldata; 9635 struct rpc_message msg = { 9636 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 9637 .rpc_cred = cred, 9638 }; 9639 struct rpc_task_setup task_setup_data = { 9640 .rpc_client = clp->cl_rpcclient, 9641 .rpc_message = &msg, 9642 .callback_ops = &nfs41_sequence_ops, 9643 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE, 9644 }; 9645 struct rpc_task *ret; 9646 9647 ret = ERR_PTR(-EIO); 9648 if (!refcount_inc_not_zero(&clp->cl_count)) 9649 goto out_err; 9650 9651 ret = ERR_PTR(-ENOMEM); 9652 calldata = kzalloc(sizeof(*calldata), GFP_KERNEL); 9653 if (calldata == NULL) 9654 goto out_put_clp; 9655 nfs4_init_sequence(&calldata->args, &calldata->res, 0, is_privileged); 9656 nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot); 9657 msg.rpc_argp = &calldata->args; 9658 msg.rpc_resp = &calldata->res; 9659 calldata->clp = clp; 9660 task_setup_data.callback_data = calldata; 9661 9662 ret = rpc_run_task(&task_setup_data); 9663 if (IS_ERR(ret)) 9664 goto out_err; 9665 return ret; 9666 out_put_clp: 9667 nfs_put_client(clp); 9668 out_err: 9669 nfs41_release_slot(slot); 9670 return ret; 9671 } 9672 9673 static int nfs41_proc_async_sequence(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags) 9674 { 9675 struct rpc_task *task; 9676 int ret = 0; 9677 9678 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 9679 return -EAGAIN; 9680 task = _nfs41_proc_sequence(clp, cred, NULL, false); 9681 if (IS_ERR(task)) 9682 ret = PTR_ERR(task); 9683 else 9684 rpc_put_task_async(task); 9685 dprintk("<-- %s status=%d\n", __func__, ret); 9686 return ret; 9687 } 9688 9689 static int nfs4_proc_sequence(struct nfs_client *clp, const struct cred *cred) 9690 { 9691 struct rpc_task *task; 9692 int ret; 9693 9694 task = _nfs41_proc_sequence(clp, cred, NULL, true); 9695 if (IS_ERR(task)) { 9696 ret = PTR_ERR(task); 9697 goto out; 9698 } 9699 ret = rpc_wait_for_completion_task(task); 9700 if (!ret) 9701 ret = task->tk_status; 9702 rpc_put_task(task); 9703 out: 9704 dprintk("<-- %s status=%d\n", __func__, ret); 9705 return ret; 9706 } 9707 9708 struct nfs4_reclaim_complete_data { 9709 struct nfs_client *clp; 9710 struct nfs41_reclaim_complete_args arg; 9711 struct nfs41_reclaim_complete_res res; 9712 }; 9713 9714 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 9715 { 9716 struct nfs4_reclaim_complete_data *calldata = data; 9717 9718 nfs4_setup_sequence(calldata->clp, 9719 &calldata->arg.seq_args, 9720 &calldata->res.seq_res, 9721 task); 9722 } 9723 9724 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 9725 { 9726 switch(task->tk_status) { 9727 case 0: 9728 wake_up_all(&clp->cl_lock_waitq); 9729 fallthrough; 9730 case -NFS4ERR_COMPLETE_ALREADY: 9731 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 9732 break; 9733 case -NFS4ERR_DELAY: 9734 rpc_delay(task, NFS4_POLL_RETRY_MAX); 9735 fallthrough; 9736 case -NFS4ERR_RETRY_UNCACHED_REP: 9737 case -EACCES: 9738 dprintk("%s: failed to reclaim complete error %d for server %s, retrying\n", 9739 __func__, task->tk_status, clp->cl_hostname); 9740 return -EAGAIN; 9741 case -NFS4ERR_BADSESSION: 9742 case -NFS4ERR_DEADSESSION: 9743 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 9744 break; 9745 default: 9746 nfs4_schedule_lease_recovery(clp); 9747 } 9748 return 0; 9749 } 9750 9751 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 9752 { 9753 struct nfs4_reclaim_complete_data *calldata = data; 9754 struct nfs_client *clp = calldata->clp; 9755 struct nfs4_sequence_res *res = &calldata->res.seq_res; 9756 9757 if (!nfs41_sequence_done(task, res)) 9758 return; 9759 9760 trace_nfs4_reclaim_complete(clp, task->tk_status); 9761 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 9762 rpc_restart_call_prepare(task); 9763 return; 9764 } 9765 } 9766 9767 static void nfs4_free_reclaim_complete_data(void *data) 9768 { 9769 struct nfs4_reclaim_complete_data *calldata = data; 9770 9771 kfree(calldata); 9772 } 9773 9774 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 9775 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 9776 .rpc_call_done = nfs4_reclaim_complete_done, 9777 .rpc_release = nfs4_free_reclaim_complete_data, 9778 }; 9779 9780 /* 9781 * Issue a global reclaim complete. 9782 */ 9783 static int nfs41_proc_reclaim_complete(struct nfs_client *clp, 9784 const struct cred *cred) 9785 { 9786 struct nfs4_reclaim_complete_data *calldata; 9787 struct rpc_message msg = { 9788 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 9789 .rpc_cred = cred, 9790 }; 9791 struct rpc_task_setup task_setup_data = { 9792 .rpc_client = clp->cl_rpcclient, 9793 .rpc_message = &msg, 9794 .callback_ops = &nfs4_reclaim_complete_call_ops, 9795 .flags = RPC_TASK_NO_ROUND_ROBIN, 9796 }; 9797 int status = -ENOMEM; 9798 9799 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 9800 if (calldata == NULL) 9801 goto out; 9802 calldata->clp = clp; 9803 calldata->arg.one_fs = 0; 9804 9805 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0, 1); 9806 msg.rpc_argp = &calldata->arg; 9807 msg.rpc_resp = &calldata->res; 9808 task_setup_data.callback_data = calldata; 9809 status = nfs4_call_sync_custom(&task_setup_data); 9810 out: 9811 dprintk("<-- %s status=%d\n", __func__, status); 9812 return status; 9813 } 9814 9815 static void 9816 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 9817 { 9818 struct nfs4_layoutget *lgp = calldata; 9819 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 9820 9821 nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args, 9822 &lgp->res.seq_res, task); 9823 } 9824 9825 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 9826 { 9827 struct nfs4_layoutget *lgp = calldata; 9828 9829 nfs41_sequence_process(task, &lgp->res.seq_res); 9830 } 9831 9832 static int 9833 nfs4_layoutget_handle_exception(struct rpc_task *task, 9834 struct nfs4_layoutget *lgp, struct nfs4_exception *exception) 9835 { 9836 struct inode *inode = lgp->args.inode; 9837 struct nfs_server *server = NFS_SERVER(inode); 9838 struct pnfs_layout_hdr *lo = lgp->lo; 9839 int nfs4err = task->tk_status; 9840 int err, status = 0; 9841 LIST_HEAD(head); 9842 9843 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); 9844 9845 nfs4_sequence_free_slot(&lgp->res.seq_res); 9846 9847 exception->state = NULL; 9848 exception->stateid = NULL; 9849 9850 switch (nfs4err) { 9851 case 0: 9852 goto out; 9853 9854 /* 9855 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs 9856 * on the file. set tk_status to -ENODATA to tell upper layer to 9857 * retry go inband. 9858 */ 9859 case -NFS4ERR_LAYOUTUNAVAILABLE: 9860 status = -ENODATA; 9861 goto out; 9862 /* 9863 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of 9864 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3). 9865 */ 9866 case -NFS4ERR_BADLAYOUT: 9867 status = -EOVERFLOW; 9868 goto out; 9869 /* 9870 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client 9871 * (or clients) writing to the same RAID stripe except when 9872 * the minlength argument is 0 (see RFC5661 section 18.43.3). 9873 * 9874 * Treat it like we would RECALLCONFLICT -- we retry for a little 9875 * while, and then eventually give up. 9876 */ 9877 case -NFS4ERR_LAYOUTTRYLATER: 9878 if (lgp->args.minlength == 0) { 9879 status = -EOVERFLOW; 9880 goto out; 9881 } 9882 status = -EBUSY; 9883 break; 9884 case -NFS4ERR_RECALLCONFLICT: 9885 case -NFS4ERR_RETURNCONFLICT: 9886 status = -ERECALLCONFLICT; 9887 break; 9888 case -NFS4ERR_DELEG_REVOKED: 9889 case -NFS4ERR_ADMIN_REVOKED: 9890 case -NFS4ERR_EXPIRED: 9891 case -NFS4ERR_BAD_STATEID: 9892 exception->timeout = 0; 9893 spin_lock(&inode->i_lock); 9894 /* If the open stateid was bad, then recover it. */ 9895 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) || 9896 !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) { 9897 spin_unlock(&inode->i_lock); 9898 exception->state = lgp->args.ctx->state; 9899 exception->stateid = &lgp->args.stateid; 9900 break; 9901 } 9902 9903 /* 9904 * Mark the bad layout state as invalid, then retry 9905 */ 9906 pnfs_mark_layout_stateid_invalid(lo, &head); 9907 spin_unlock(&inode->i_lock); 9908 nfs_commit_inode(inode, 0); 9909 pnfs_free_lseg_list(&head); 9910 status = -EAGAIN; 9911 goto out; 9912 } 9913 9914 err = nfs4_handle_exception(server, nfs4err, exception); 9915 if (!status) { 9916 if (exception->retry) 9917 status = -EAGAIN; 9918 else 9919 status = err; 9920 } 9921 out: 9922 return status; 9923 } 9924 9925 size_t max_response_pages(struct nfs_server *server) 9926 { 9927 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 9928 return nfs_page_array_len(0, max_resp_sz); 9929 } 9930 9931 static void nfs4_layoutget_release(void *calldata) 9932 { 9933 struct nfs4_layoutget *lgp = calldata; 9934 9935 nfs4_sequence_free_slot(&lgp->res.seq_res); 9936 pnfs_layoutget_free(lgp); 9937 } 9938 9939 static const struct rpc_call_ops nfs4_layoutget_call_ops = { 9940 .rpc_call_prepare = nfs4_layoutget_prepare, 9941 .rpc_call_done = nfs4_layoutget_done, 9942 .rpc_release = nfs4_layoutget_release, 9943 }; 9944 9945 struct pnfs_layout_segment * 9946 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, 9947 struct nfs4_exception *exception) 9948 { 9949 struct inode *inode = lgp->args.inode; 9950 struct nfs_server *server = NFS_SERVER(inode); 9951 struct rpc_task *task; 9952 struct rpc_message msg = { 9953 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 9954 .rpc_argp = &lgp->args, 9955 .rpc_resp = &lgp->res, 9956 .rpc_cred = lgp->cred, 9957 }; 9958 struct rpc_task_setup task_setup_data = { 9959 .rpc_client = server->client, 9960 .rpc_message = &msg, 9961 .callback_ops = &nfs4_layoutget_call_ops, 9962 .callback_data = lgp, 9963 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF | 9964 RPC_TASK_MOVEABLE, 9965 }; 9966 struct pnfs_layout_segment *lseg = NULL; 9967 int status = 0; 9968 9969 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0); 9970 exception->retry = 0; 9971 9972 task = rpc_run_task(&task_setup_data); 9973 if (IS_ERR(task)) 9974 return ERR_CAST(task); 9975 9976 status = rpc_wait_for_completion_task(task); 9977 if (status != 0) 9978 goto out; 9979 9980 if (task->tk_status < 0) { 9981 exception->retry = 1; 9982 status = nfs4_layoutget_handle_exception(task, lgp, exception); 9983 } else if (lgp->res.layoutp->len == 0) { 9984 exception->retry = 1; 9985 status = -EAGAIN; 9986 nfs4_update_delay(&exception->timeout); 9987 } else 9988 lseg = pnfs_layout_process(lgp); 9989 out: 9990 trace_nfs4_layoutget(lgp->args.ctx, 9991 &lgp->args.range, 9992 &lgp->res.range, 9993 &lgp->res.stateid, 9994 status); 9995 9996 rpc_put_task(task); 9997 dprintk("<-- %s status=%d\n", __func__, status); 9998 if (status) 9999 return ERR_PTR(status); 10000 return lseg; 10001 } 10002 10003 static void 10004 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 10005 { 10006 struct nfs4_layoutreturn *lrp = calldata; 10007 10008 nfs4_setup_sequence(lrp->clp, 10009 &lrp->args.seq_args, 10010 &lrp->res.seq_res, 10011 task); 10012 if (!pnfs_layout_is_valid(lrp->args.layout)) 10013 rpc_exit(task, 0); 10014 } 10015 10016 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 10017 { 10018 struct nfs4_layoutreturn *lrp = calldata; 10019 struct nfs_server *server; 10020 10021 if (!nfs41_sequence_process(task, &lrp->res.seq_res)) 10022 return; 10023 10024 if (task->tk_rpc_status == -ETIMEDOUT) { 10025 lrp->rpc_status = -EAGAIN; 10026 lrp->res.lrs_present = 0; 10027 return; 10028 } 10029 /* 10030 * Was there an RPC level error? Assume the call succeeded, 10031 * and that we need to release the layout 10032 */ 10033 if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) { 10034 lrp->res.lrs_present = 0; 10035 return; 10036 } 10037 10038 server = NFS_SERVER(lrp->args.inode); 10039 switch (task->tk_status) { 10040 case -NFS4ERR_OLD_STATEID: 10041 if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid, 10042 &lrp->args.range, 10043 lrp->args.inode)) 10044 goto out_restart; 10045 fallthrough; 10046 default: 10047 task->tk_status = 0; 10048 lrp->res.lrs_present = 0; 10049 fallthrough; 10050 case 0: 10051 break; 10052 case -NFS4ERR_BADSESSION: 10053 case -NFS4ERR_DEADSESSION: 10054 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 10055 nfs4_schedule_session_recovery(server->nfs_client->cl_session, 10056 task->tk_status); 10057 lrp->res.lrs_present = 0; 10058 lrp->rpc_status = -EAGAIN; 10059 task->tk_status = 0; 10060 break; 10061 case -NFS4ERR_DELAY: 10062 if (nfs4_async_handle_error(task, server, NULL, NULL) == 10063 -EAGAIN) 10064 goto out_restart; 10065 lrp->res.lrs_present = 0; 10066 break; 10067 } 10068 return; 10069 out_restart: 10070 task->tk_status = 0; 10071 nfs4_sequence_free_slot(&lrp->res.seq_res); 10072 rpc_restart_call_prepare(task); 10073 } 10074 10075 static void nfs4_layoutreturn_release(void *calldata) 10076 { 10077 struct nfs4_layoutreturn *lrp = calldata; 10078 struct pnfs_layout_hdr *lo = lrp->args.layout; 10079 10080 if (lrp->rpc_status == 0 || !lrp->inode) 10081 pnfs_layoutreturn_free_lsegs( 10082 lo, &lrp->args.stateid, &lrp->args.range, 10083 lrp->res.lrs_present ? &lrp->res.stateid : NULL); 10084 else 10085 pnfs_layoutreturn_retry_later(lo, &lrp->args.stateid, 10086 &lrp->args.range); 10087 nfs4_sequence_free_slot(&lrp->res.seq_res); 10088 if (lrp->ld_private.ops && lrp->ld_private.ops->free) 10089 lrp->ld_private.ops->free(&lrp->ld_private); 10090 pnfs_put_layout_hdr(lrp->args.layout); 10091 nfs_iput_and_deactive(lrp->inode); 10092 put_cred(lrp->cred); 10093 kfree(calldata); 10094 } 10095 10096 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 10097 .rpc_call_prepare = nfs4_layoutreturn_prepare, 10098 .rpc_call_done = nfs4_layoutreturn_done, 10099 .rpc_release = nfs4_layoutreturn_release, 10100 }; 10101 10102 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, unsigned int flags) 10103 { 10104 struct rpc_task *task; 10105 struct rpc_message msg = { 10106 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 10107 .rpc_argp = &lrp->args, 10108 .rpc_resp = &lrp->res, 10109 .rpc_cred = lrp->cred, 10110 }; 10111 struct rpc_task_setup task_setup_data = { 10112 .rpc_client = NFS_SERVER(lrp->args.inode)->client, 10113 .rpc_message = &msg, 10114 .callback_ops = &nfs4_layoutreturn_call_ops, 10115 .callback_data = lrp, 10116 .flags = RPC_TASK_MOVEABLE, 10117 }; 10118 int status = 0; 10119 10120 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client, 10121 NFS_SP4_MACH_CRED_PNFS_CLEANUP, 10122 &task_setup_data.rpc_client, &msg); 10123 10124 lrp->inode = nfs_igrab_and_active(lrp->args.inode); 10125 if (flags & PNFS_FL_LAYOUTRETURN_ASYNC) { 10126 if (!lrp->inode) { 10127 nfs4_layoutreturn_release(lrp); 10128 return -EAGAIN; 10129 } 10130 task_setup_data.flags |= RPC_TASK_ASYNC; 10131 } 10132 if (!lrp->inode) 10133 flags |= PNFS_FL_LAYOUTRETURN_PRIVILEGED; 10134 if (flags & PNFS_FL_LAYOUTRETURN_PRIVILEGED) 10135 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 10136 1); 10137 else 10138 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 10139 0); 10140 task = rpc_run_task(&task_setup_data); 10141 if (IS_ERR(task)) 10142 return PTR_ERR(task); 10143 if (!(flags & PNFS_FL_LAYOUTRETURN_ASYNC)) 10144 status = task->tk_status; 10145 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status); 10146 dprintk("<-- %s status=%d\n", __func__, status); 10147 rpc_put_task(task); 10148 return status; 10149 } 10150 10151 static int 10152 _nfs4_proc_getdeviceinfo(struct nfs_server *server, 10153 struct pnfs_device *pdev, 10154 const struct cred *cred) 10155 { 10156 struct nfs4_getdeviceinfo_args args = { 10157 .pdev = pdev, 10158 .notify_types = NOTIFY_DEVICEID4_CHANGE | 10159 NOTIFY_DEVICEID4_DELETE, 10160 }; 10161 struct nfs4_getdeviceinfo_res res = { 10162 .pdev = pdev, 10163 }; 10164 struct rpc_message msg = { 10165 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 10166 .rpc_argp = &args, 10167 .rpc_resp = &res, 10168 .rpc_cred = cred, 10169 }; 10170 int status; 10171 10172 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 10173 if (res.notification & ~args.notify_types) 10174 dprintk("%s: unsupported notification\n", __func__); 10175 if (res.notification != args.notify_types) 10176 pdev->nocache = 1; 10177 10178 trace_nfs4_getdeviceinfo(server, &pdev->dev_id, status); 10179 10180 dprintk("<-- %s status=%d\n", __func__, status); 10181 10182 return status; 10183 } 10184 10185 int nfs4_proc_getdeviceinfo(struct nfs_server *server, 10186 struct pnfs_device *pdev, 10187 const struct cred *cred) 10188 { 10189 struct nfs4_exception exception = { }; 10190 int err; 10191 10192 do { 10193 err = nfs4_handle_exception(server, 10194 _nfs4_proc_getdeviceinfo(server, pdev, cred), 10195 &exception); 10196 } while (exception.retry); 10197 return err; 10198 } 10199 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 10200 10201 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 10202 { 10203 struct nfs4_layoutcommit_data *data = calldata; 10204 struct nfs_server *server = NFS_SERVER(data->args.inode); 10205 10206 nfs4_setup_sequence(server->nfs_client, 10207 &data->args.seq_args, 10208 &data->res.seq_res, 10209 task); 10210 } 10211 10212 static void 10213 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 10214 { 10215 struct nfs4_layoutcommit_data *data = calldata; 10216 struct nfs_server *server = NFS_SERVER(data->args.inode); 10217 10218 if (!nfs41_sequence_done(task, &data->res.seq_res)) 10219 return; 10220 10221 switch (task->tk_status) { /* Just ignore these failures */ 10222 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 10223 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 10224 case -NFS4ERR_BADLAYOUT: /* no layout */ 10225 case -NFS4ERR_GRACE: /* loca_recalim always false */ 10226 task->tk_status = 0; 10227 break; 10228 case 0: 10229 break; 10230 default: 10231 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) { 10232 rpc_restart_call_prepare(task); 10233 return; 10234 } 10235 } 10236 } 10237 10238 static void nfs4_layoutcommit_release(void *calldata) 10239 { 10240 struct nfs4_layoutcommit_data *data = calldata; 10241 10242 pnfs_cleanup_layoutcommit(data); 10243 nfs_post_op_update_inode_force_wcc(data->args.inode, 10244 data->res.fattr); 10245 put_cred(data->cred); 10246 nfs_iput_and_deactive(data->inode); 10247 kfree(data); 10248 } 10249 10250 static const struct rpc_call_ops nfs4_layoutcommit_ops = { 10251 .rpc_call_prepare = nfs4_layoutcommit_prepare, 10252 .rpc_call_done = nfs4_layoutcommit_done, 10253 .rpc_release = nfs4_layoutcommit_release, 10254 }; 10255 10256 int 10257 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 10258 { 10259 struct rpc_message msg = { 10260 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 10261 .rpc_argp = &data->args, 10262 .rpc_resp = &data->res, 10263 .rpc_cred = data->cred, 10264 }; 10265 struct rpc_task_setup task_setup_data = { 10266 .task = &data->task, 10267 .rpc_client = NFS_CLIENT(data->args.inode), 10268 .rpc_message = &msg, 10269 .callback_ops = &nfs4_layoutcommit_ops, 10270 .callback_data = data, 10271 .flags = RPC_TASK_MOVEABLE, 10272 }; 10273 struct rpc_task *task; 10274 int status = 0; 10275 10276 dprintk("NFS: initiating layoutcommit call. sync %d " 10277 "lbw: %llu inode %lu\n", sync, 10278 data->args.lastbytewritten, 10279 data->args.inode->i_ino); 10280 10281 if (!sync) { 10282 data->inode = nfs_igrab_and_active(data->args.inode); 10283 if (data->inode == NULL) { 10284 nfs4_layoutcommit_release(data); 10285 return -EAGAIN; 10286 } 10287 task_setup_data.flags = RPC_TASK_ASYNC; 10288 } 10289 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0); 10290 task = rpc_run_task(&task_setup_data); 10291 if (IS_ERR(task)) 10292 return PTR_ERR(task); 10293 if (sync) 10294 status = task->tk_status; 10295 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status); 10296 dprintk("%s: status %d\n", __func__, status); 10297 rpc_put_task(task); 10298 return status; 10299 } 10300 10301 /* 10302 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if 10303 * possible) as per RFC3530bis and RFC5661 Security Considerations sections 10304 */ 10305 static int 10306 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 10307 struct nfs_fsinfo *info, 10308 struct nfs4_secinfo_flavors *flavors, bool use_integrity) 10309 { 10310 struct nfs41_secinfo_no_name_args args = { 10311 .style = SECINFO_STYLE_CURRENT_FH, 10312 }; 10313 struct nfs4_secinfo_res res = { 10314 .flavors = flavors, 10315 }; 10316 struct rpc_message msg = { 10317 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 10318 .rpc_argp = &args, 10319 .rpc_resp = &res, 10320 }; 10321 struct nfs4_call_sync_data data = { 10322 .seq_server = server, 10323 .seq_args = &args.seq_args, 10324 .seq_res = &res.seq_res, 10325 }; 10326 struct rpc_task_setup task_setup = { 10327 .rpc_client = server->client, 10328 .rpc_message = &msg, 10329 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops, 10330 .callback_data = &data, 10331 .flags = RPC_TASK_NO_ROUND_ROBIN, 10332 }; 10333 const struct cred *cred = NULL; 10334 int status; 10335 10336 if (use_integrity) { 10337 task_setup.rpc_client = server->nfs_client->cl_rpcclient; 10338 10339 cred = nfs4_get_clid_cred(server->nfs_client); 10340 msg.rpc_cred = cred; 10341 } 10342 10343 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 10344 status = nfs4_call_sync_custom(&task_setup); 10345 dprintk("<-- %s status=%d\n", __func__, status); 10346 10347 put_cred(cred); 10348 10349 return status; 10350 } 10351 10352 static int 10353 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 10354 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 10355 { 10356 struct nfs4_exception exception = { 10357 .interruptible = true, 10358 }; 10359 int err; 10360 do { 10361 /* first try using integrity protection */ 10362 err = -NFS4ERR_WRONGSEC; 10363 10364 /* try to use integrity protection with machine cred */ 10365 if (_nfs4_is_integrity_protected(server->nfs_client)) 10366 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 10367 flavors, true); 10368 10369 /* 10370 * if unable to use integrity protection, or SECINFO with 10371 * integrity protection returns NFS4ERR_WRONGSEC (which is 10372 * disallowed by spec, but exists in deployed servers) use 10373 * the current filesystem's rpc_client and the user cred. 10374 */ 10375 if (err == -NFS4ERR_WRONGSEC) 10376 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 10377 flavors, false); 10378 10379 switch (err) { 10380 case 0: 10381 case -NFS4ERR_WRONGSEC: 10382 case -ENOTSUPP: 10383 goto out; 10384 default: 10385 err = nfs4_handle_exception(server, err, &exception); 10386 } 10387 } while (exception.retry); 10388 out: 10389 return err; 10390 } 10391 10392 static int 10393 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 10394 struct nfs_fsinfo *info) 10395 { 10396 int err; 10397 struct page *page; 10398 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR; 10399 struct nfs4_secinfo_flavors *flavors; 10400 struct nfs4_secinfo4 *secinfo; 10401 int i; 10402 10403 page = alloc_page(GFP_KERNEL); 10404 if (!page) { 10405 err = -ENOMEM; 10406 goto out; 10407 } 10408 10409 flavors = page_address(page); 10410 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 10411 10412 /* 10413 * Fall back on "guess and check" method if 10414 * the server doesn't support SECINFO_NO_NAME 10415 */ 10416 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) { 10417 err = nfs4_find_root_sec(server, fhandle, info); 10418 goto out_freepage; 10419 } 10420 if (err) 10421 goto out_freepage; 10422 10423 for (i = 0; i < flavors->num_flavors; i++) { 10424 secinfo = &flavors->flavors[i]; 10425 10426 switch (secinfo->flavor) { 10427 case RPC_AUTH_NULL: 10428 case RPC_AUTH_UNIX: 10429 case RPC_AUTH_GSS: 10430 flavor = rpcauth_get_pseudoflavor(secinfo->flavor, 10431 &secinfo->flavor_info); 10432 break; 10433 default: 10434 flavor = RPC_AUTH_MAXFLAVOR; 10435 break; 10436 } 10437 10438 if (!nfs_auth_info_match(&server->auth_info, flavor)) 10439 flavor = RPC_AUTH_MAXFLAVOR; 10440 10441 if (flavor != RPC_AUTH_MAXFLAVOR) { 10442 err = nfs4_lookup_root_sec(server, fhandle, 10443 info, flavor); 10444 if (!err) 10445 break; 10446 } 10447 } 10448 10449 if (flavor == RPC_AUTH_MAXFLAVOR) 10450 err = -EPERM; 10451 10452 out_freepage: 10453 put_page(page); 10454 if (err == -EACCES) 10455 return -EPERM; 10456 out: 10457 return err; 10458 } 10459 10460 static int _nfs41_test_stateid(struct nfs_server *server, 10461 const nfs4_stateid *stateid, 10462 const struct cred *cred) 10463 { 10464 int status; 10465 struct nfs41_test_stateid_args args = { 10466 .stateid = *stateid, 10467 }; 10468 struct nfs41_test_stateid_res res; 10469 struct rpc_message msg = { 10470 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 10471 .rpc_argp = &args, 10472 .rpc_resp = &res, 10473 .rpc_cred = cred, 10474 }; 10475 struct rpc_clnt *rpc_client = server->client; 10476 10477 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 10478 &rpc_client, &msg); 10479 10480 dprintk("NFS call test_stateid %p\n", stateid); 10481 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 10482 status = nfs4_call_sync_sequence(rpc_client, server, &msg, 10483 &args.seq_args, &res.seq_res); 10484 if (status != NFS_OK) { 10485 dprintk("NFS reply test_stateid: failed, %d\n", status); 10486 return status; 10487 } 10488 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 10489 return -res.status; 10490 } 10491 10492 static void nfs4_handle_delay_or_session_error(struct nfs_server *server, 10493 int err, struct nfs4_exception *exception) 10494 { 10495 exception->retry = 0; 10496 switch(err) { 10497 case -NFS4ERR_DELAY: 10498 case -NFS4ERR_RETRY_UNCACHED_REP: 10499 nfs4_handle_exception(server, err, exception); 10500 break; 10501 case -NFS4ERR_BADSESSION: 10502 case -NFS4ERR_BADSLOT: 10503 case -NFS4ERR_BAD_HIGH_SLOT: 10504 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 10505 case -NFS4ERR_DEADSESSION: 10506 nfs4_do_handle_exception(server, err, exception); 10507 } 10508 } 10509 10510 /** 10511 * nfs41_test_stateid - perform a TEST_STATEID operation 10512 * 10513 * @server: server / transport on which to perform the operation 10514 * @stateid: state ID to test 10515 * @cred: credential 10516 * 10517 * Returns NFS_OK if the server recognizes that "stateid" is valid. 10518 * Otherwise a negative NFS4ERR value is returned if the operation 10519 * failed or the state ID is not currently valid. 10520 */ 10521 static int nfs41_test_stateid(struct nfs_server *server, 10522 const nfs4_stateid *stateid, 10523 const struct cred *cred) 10524 { 10525 struct nfs4_exception exception = { 10526 .interruptible = true, 10527 }; 10528 int err; 10529 do { 10530 err = _nfs41_test_stateid(server, stateid, cred); 10531 nfs4_handle_delay_or_session_error(server, err, &exception); 10532 } while (exception.retry); 10533 return err; 10534 } 10535 10536 struct nfs_free_stateid_data { 10537 struct nfs_server *server; 10538 struct nfs41_free_stateid_args args; 10539 struct nfs41_free_stateid_res res; 10540 }; 10541 10542 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) 10543 { 10544 struct nfs_free_stateid_data *data = calldata; 10545 nfs4_setup_sequence(data->server->nfs_client, 10546 &data->args.seq_args, 10547 &data->res.seq_res, 10548 task); 10549 } 10550 10551 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata) 10552 { 10553 struct nfs_free_stateid_data *data = calldata; 10554 10555 nfs41_sequence_done(task, &data->res.seq_res); 10556 10557 switch (task->tk_status) { 10558 case -NFS4ERR_DELAY: 10559 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN) 10560 rpc_restart_call_prepare(task); 10561 } 10562 } 10563 10564 static void nfs41_free_stateid_release(void *calldata) 10565 { 10566 struct nfs_free_stateid_data *data = calldata; 10567 struct nfs_client *clp = data->server->nfs_client; 10568 10569 nfs_put_client(clp); 10570 kfree(calldata); 10571 } 10572 10573 static const struct rpc_call_ops nfs41_free_stateid_ops = { 10574 .rpc_call_prepare = nfs41_free_stateid_prepare, 10575 .rpc_call_done = nfs41_free_stateid_done, 10576 .rpc_release = nfs41_free_stateid_release, 10577 }; 10578 10579 /** 10580 * nfs41_free_stateid - perform a FREE_STATEID operation 10581 * 10582 * @server: server / transport on which to perform the operation 10583 * @stateid: state ID to release 10584 * @cred: credential 10585 * @privileged: set to true if this call needs to be privileged 10586 * 10587 * Note: this function is always asynchronous. 10588 */ 10589 static int nfs41_free_stateid(struct nfs_server *server, 10590 const nfs4_stateid *stateid, 10591 const struct cred *cred, 10592 bool privileged) 10593 { 10594 struct rpc_message msg = { 10595 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 10596 .rpc_cred = cred, 10597 }; 10598 struct rpc_task_setup task_setup = { 10599 .rpc_client = server->client, 10600 .rpc_message = &msg, 10601 .callback_ops = &nfs41_free_stateid_ops, 10602 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE, 10603 }; 10604 struct nfs_free_stateid_data *data; 10605 struct rpc_task *task; 10606 struct nfs_client *clp = server->nfs_client; 10607 10608 if (!refcount_inc_not_zero(&clp->cl_count)) 10609 return -EIO; 10610 10611 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 10612 &task_setup.rpc_client, &msg); 10613 10614 dprintk("NFS call free_stateid %p\n", stateid); 10615 data = kmalloc(sizeof(*data), GFP_KERNEL); 10616 if (!data) 10617 return -ENOMEM; 10618 data->server = server; 10619 nfs4_stateid_copy(&data->args.stateid, stateid); 10620 10621 task_setup.callback_data = data; 10622 10623 msg.rpc_argp = &data->args; 10624 msg.rpc_resp = &data->res; 10625 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, privileged); 10626 task = rpc_run_task(&task_setup); 10627 if (IS_ERR(task)) 10628 return PTR_ERR(task); 10629 rpc_put_task(task); 10630 return 0; 10631 } 10632 10633 static void 10634 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) 10635 { 10636 const struct cred *cred = lsp->ls_state->owner->so_cred; 10637 10638 nfs41_free_stateid(server, &lsp->ls_stateid, cred, false); 10639 nfs4_free_lock_state(server, lsp); 10640 } 10641 10642 static bool nfs41_match_stateid(const nfs4_stateid *s1, 10643 const nfs4_stateid *s2) 10644 { 10645 if (s1->type != s2->type) 10646 return false; 10647 10648 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 10649 return false; 10650 10651 if (s1->seqid == s2->seqid) 10652 return true; 10653 10654 return s1->seqid == 0 || s2->seqid == 0; 10655 } 10656 10657 #endif /* CONFIG_NFS_V4_1 */ 10658 10659 static bool nfs4_match_stateid(const nfs4_stateid *s1, 10660 const nfs4_stateid *s2) 10661 { 10662 return nfs4_stateid_match(s1, s2); 10663 } 10664 10665 10666 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 10667 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 10668 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 10669 .recover_open = nfs4_open_reclaim, 10670 .recover_lock = nfs4_lock_reclaim, 10671 .establish_clid = nfs4_init_clientid, 10672 .detect_trunking = nfs40_discover_server_trunking, 10673 }; 10674 10675 #if defined(CONFIG_NFS_V4_1) 10676 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 10677 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 10678 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 10679 .recover_open = nfs4_open_reclaim, 10680 .recover_lock = nfs4_lock_reclaim, 10681 .establish_clid = nfs41_init_clientid, 10682 .reclaim_complete = nfs41_proc_reclaim_complete, 10683 .detect_trunking = nfs41_discover_server_trunking, 10684 }; 10685 #endif /* CONFIG_NFS_V4_1 */ 10686 10687 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 10688 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 10689 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 10690 .recover_open = nfs40_open_expired, 10691 .recover_lock = nfs4_lock_expired, 10692 .establish_clid = nfs4_init_clientid, 10693 }; 10694 10695 #if defined(CONFIG_NFS_V4_1) 10696 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 10697 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 10698 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 10699 .recover_open = nfs41_open_expired, 10700 .recover_lock = nfs41_lock_expired, 10701 .establish_clid = nfs41_init_clientid, 10702 }; 10703 #endif /* CONFIG_NFS_V4_1 */ 10704 10705 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { 10706 .sched_state_renewal = nfs4_proc_async_renew, 10707 .get_state_renewal_cred = nfs4_get_renew_cred, 10708 .renew_lease = nfs4_proc_renew, 10709 }; 10710 10711 #if defined(CONFIG_NFS_V4_1) 10712 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 10713 .sched_state_renewal = nfs41_proc_async_sequence, 10714 .get_state_renewal_cred = nfs4_get_machine_cred, 10715 .renew_lease = nfs4_proc_sequence, 10716 }; 10717 #endif 10718 10719 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = { 10720 .get_locations = _nfs40_proc_get_locations, 10721 .fsid_present = _nfs40_proc_fsid_present, 10722 }; 10723 10724 #if defined(CONFIG_NFS_V4_1) 10725 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = { 10726 .get_locations = _nfs41_proc_get_locations, 10727 .fsid_present = _nfs41_proc_fsid_present, 10728 }; 10729 #endif /* CONFIG_NFS_V4_1 */ 10730 10731 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { 10732 .minor_version = 0, 10733 .init_caps = NFS_CAP_READDIRPLUS 10734 | NFS_CAP_ATOMIC_OPEN 10735 | NFS_CAP_POSIX_LOCK, 10736 .init_client = nfs40_init_client, 10737 .shutdown_client = nfs40_shutdown_client, 10738 .match_stateid = nfs4_match_stateid, 10739 .find_root_sec = nfs4_find_root_sec, 10740 .free_lock_state = nfs4_release_lockowner, 10741 .test_and_free_expired = nfs40_test_and_free_expired_stateid, 10742 .alloc_seqid = nfs_alloc_seqid, 10743 .call_sync_ops = &nfs40_call_sync_ops, 10744 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 10745 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 10746 .state_renewal_ops = &nfs40_state_renewal_ops, 10747 .mig_recovery_ops = &nfs40_mig_recovery_ops, 10748 }; 10749 10750 #if defined(CONFIG_NFS_V4_1) 10751 static struct nfs_seqid * 10752 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2) 10753 { 10754 return NULL; 10755 } 10756 10757 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 10758 .minor_version = 1, 10759 .init_caps = NFS_CAP_READDIRPLUS 10760 | NFS_CAP_ATOMIC_OPEN 10761 | NFS_CAP_POSIX_LOCK 10762 | NFS_CAP_STATEID_NFSV41 10763 | NFS_CAP_ATOMIC_OPEN_V1 10764 | NFS_CAP_LGOPEN 10765 | NFS_CAP_MOVEABLE, 10766 .init_client = nfs41_init_client, 10767 .shutdown_client = nfs41_shutdown_client, 10768 .match_stateid = nfs41_match_stateid, 10769 .find_root_sec = nfs41_find_root_sec, 10770 .free_lock_state = nfs41_free_lock_state, 10771 .test_and_free_expired = nfs41_test_and_free_expired_stateid, 10772 .alloc_seqid = nfs_alloc_no_seqid, 10773 .session_trunk = nfs4_test_session_trunk, 10774 .call_sync_ops = &nfs41_call_sync_ops, 10775 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 10776 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 10777 .state_renewal_ops = &nfs41_state_renewal_ops, 10778 .mig_recovery_ops = &nfs41_mig_recovery_ops, 10779 }; 10780 #endif 10781 10782 #if defined(CONFIG_NFS_V4_2) 10783 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { 10784 .minor_version = 2, 10785 .init_caps = NFS_CAP_READDIRPLUS 10786 | NFS_CAP_ATOMIC_OPEN 10787 | NFS_CAP_POSIX_LOCK 10788 | NFS_CAP_STATEID_NFSV41 10789 | NFS_CAP_ATOMIC_OPEN_V1 10790 | NFS_CAP_LGOPEN 10791 | NFS_CAP_ALLOCATE 10792 | NFS_CAP_COPY 10793 | NFS_CAP_OFFLOAD_CANCEL 10794 | NFS_CAP_COPY_NOTIFY 10795 | NFS_CAP_DEALLOCATE 10796 | NFS_CAP_SEEK 10797 | NFS_CAP_LAYOUTSTATS 10798 | NFS_CAP_CLONE 10799 | NFS_CAP_LAYOUTERROR 10800 | NFS_CAP_READ_PLUS 10801 | NFS_CAP_MOVEABLE, 10802 .init_client = nfs41_init_client, 10803 .shutdown_client = nfs41_shutdown_client, 10804 .match_stateid = nfs41_match_stateid, 10805 .find_root_sec = nfs41_find_root_sec, 10806 .free_lock_state = nfs41_free_lock_state, 10807 .call_sync_ops = &nfs41_call_sync_ops, 10808 .test_and_free_expired = nfs41_test_and_free_expired_stateid, 10809 .alloc_seqid = nfs_alloc_no_seqid, 10810 .session_trunk = nfs4_test_session_trunk, 10811 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 10812 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 10813 .state_renewal_ops = &nfs41_state_renewal_ops, 10814 .mig_recovery_ops = &nfs41_mig_recovery_ops, 10815 }; 10816 #endif 10817 10818 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 10819 [0] = &nfs_v4_0_minor_ops, 10820 #if defined(CONFIG_NFS_V4_1) 10821 [1] = &nfs_v4_1_minor_ops, 10822 #endif 10823 #if defined(CONFIG_NFS_V4_2) 10824 [2] = &nfs_v4_2_minor_ops, 10825 #endif 10826 }; 10827 10828 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) 10829 { 10830 ssize_t error, error2, error3; 10831 size_t left = size; 10832 10833 error = generic_listxattr(dentry, list, left); 10834 if (error < 0) 10835 return error; 10836 if (list) { 10837 list += error; 10838 left -= error; 10839 } 10840 10841 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, left); 10842 if (error2 < 0) 10843 return error2; 10844 10845 if (list) { 10846 list += error2; 10847 left -= error2; 10848 } 10849 10850 error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left); 10851 if (error3 < 0) 10852 return error3; 10853 10854 error += error2 + error3; 10855 if (size && error > size) 10856 return -ERANGE; 10857 return error; 10858 } 10859 10860 static void nfs4_enable_swap(struct inode *inode) 10861 { 10862 /* The state manager thread must always be running. 10863 * It will notice the client is a swapper, and stay put. 10864 */ 10865 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 10866 10867 nfs4_schedule_state_manager(clp); 10868 } 10869 10870 static void nfs4_disable_swap(struct inode *inode) 10871 { 10872 /* The state manager thread will now exit once it is 10873 * woken. 10874 */ 10875 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 10876 10877 set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state); 10878 clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state); 10879 wake_up_var(&clp->cl_state); 10880 } 10881 10882 static const struct inode_operations nfs4_dir_inode_operations = { 10883 .create = nfs_create, 10884 .lookup = nfs_lookup, 10885 .atomic_open = nfs_atomic_open, 10886 .link = nfs_link, 10887 .unlink = nfs_unlink, 10888 .symlink = nfs_symlink, 10889 .mkdir = nfs_mkdir, 10890 .rmdir = nfs_rmdir, 10891 .mknod = nfs_mknod, 10892 .rename = nfs_rename, 10893 .permission = nfs_permission, 10894 .getattr = nfs_getattr, 10895 .setattr = nfs_setattr, 10896 .listxattr = nfs4_listxattr, 10897 }; 10898 10899 static const struct inode_operations nfs4_file_inode_operations = { 10900 .permission = nfs_permission, 10901 .getattr = nfs_getattr, 10902 .setattr = nfs_setattr, 10903 .listxattr = nfs4_listxattr, 10904 }; 10905 10906 const struct nfs_rpc_ops nfs_v4_clientops = { 10907 .version = 4, /* protocol version */ 10908 .dentry_ops = &nfs4_dentry_operations, 10909 .dir_inode_ops = &nfs4_dir_inode_operations, 10910 .file_inode_ops = &nfs4_file_inode_operations, 10911 .file_ops = &nfs4_file_operations, 10912 .getroot = nfs4_proc_get_root, 10913 .submount = nfs4_submount, 10914 .try_get_tree = nfs4_try_get_tree, 10915 .getattr = nfs4_proc_getattr, 10916 .setattr = nfs4_proc_setattr, 10917 .lookup = nfs4_proc_lookup, 10918 .lookupp = nfs4_proc_lookupp, 10919 .access = nfs4_proc_access, 10920 .readlink = nfs4_proc_readlink, 10921 .create = nfs4_proc_create, 10922 .remove = nfs4_proc_remove, 10923 .unlink_setup = nfs4_proc_unlink_setup, 10924 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 10925 .unlink_done = nfs4_proc_unlink_done, 10926 .rename_setup = nfs4_proc_rename_setup, 10927 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 10928 .rename_done = nfs4_proc_rename_done, 10929 .link = nfs4_proc_link, 10930 .symlink = nfs4_proc_symlink, 10931 .mkdir = nfs4_proc_mkdir, 10932 .rmdir = nfs4_proc_rmdir, 10933 .readdir = nfs4_proc_readdir, 10934 .mknod = nfs4_proc_mknod, 10935 .statfs = nfs4_proc_statfs, 10936 .fsinfo = nfs4_proc_fsinfo, 10937 .pathconf = nfs4_proc_pathconf, 10938 .set_capabilities = nfs4_server_capabilities, 10939 .decode_dirent = nfs4_decode_dirent, 10940 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare, 10941 .read_setup = nfs4_proc_read_setup, 10942 .read_done = nfs4_read_done, 10943 .write_setup = nfs4_proc_write_setup, 10944 .write_done = nfs4_write_done, 10945 .commit_setup = nfs4_proc_commit_setup, 10946 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 10947 .commit_done = nfs4_commit_done, 10948 .lock = nfs4_proc_lock, 10949 .clear_acl_cache = nfs4_zap_acl_attr, 10950 .close_context = nfs4_close_context, 10951 .open_context = nfs4_atomic_open, 10952 .have_delegation = nfs4_have_delegation, 10953 .return_delegation = nfs4_inode_return_delegation, 10954 .alloc_client = nfs4_alloc_client, 10955 .init_client = nfs4_init_client, 10956 .free_client = nfs4_free_client, 10957 .create_server = nfs4_create_server, 10958 .clone_server = nfs_clone_server, 10959 .discover_trunking = nfs4_discover_trunking, 10960 .enable_swap = nfs4_enable_swap, 10961 .disable_swap = nfs4_disable_swap, 10962 }; 10963 10964 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 10965 .name = XATTR_NAME_NFSV4_ACL, 10966 .list = nfs4_xattr_list_nfs4_acl, 10967 .get = nfs4_xattr_get_nfs4_acl, 10968 .set = nfs4_xattr_set_nfs4_acl, 10969 }; 10970 10971 #if defined(CONFIG_NFS_V4_1) 10972 static const struct xattr_handler nfs4_xattr_nfs4_dacl_handler = { 10973 .name = XATTR_NAME_NFSV4_DACL, 10974 .list = nfs4_xattr_list_nfs4_dacl, 10975 .get = nfs4_xattr_get_nfs4_dacl, 10976 .set = nfs4_xattr_set_nfs4_dacl, 10977 }; 10978 10979 static const struct xattr_handler nfs4_xattr_nfs4_sacl_handler = { 10980 .name = XATTR_NAME_NFSV4_SACL, 10981 .list = nfs4_xattr_list_nfs4_sacl, 10982 .get = nfs4_xattr_get_nfs4_sacl, 10983 .set = nfs4_xattr_set_nfs4_sacl, 10984 }; 10985 #endif 10986 10987 #ifdef CONFIG_NFS_V4_2 10988 static const struct xattr_handler nfs4_xattr_nfs4_user_handler = { 10989 .prefix = XATTR_USER_PREFIX, 10990 .get = nfs4_xattr_get_nfs4_user, 10991 .set = nfs4_xattr_set_nfs4_user, 10992 }; 10993 #endif 10994 10995 const struct xattr_handler * const nfs4_xattr_handlers[] = { 10996 &nfs4_xattr_nfs4_acl_handler, 10997 #if defined(CONFIG_NFS_V4_1) 10998 &nfs4_xattr_nfs4_dacl_handler, 10999 &nfs4_xattr_nfs4_sacl_handler, 11000 #endif 11001 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 11002 &nfs4_xattr_nfs4_label_handler, 11003 #endif 11004 #ifdef CONFIG_NFS_V4_2 11005 &nfs4_xattr_nfs4_user_handler, 11006 #endif 11007 NULL 11008 }; 11009