1 /* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <linux/mm.h> 39 #include <linux/delay.h> 40 #include <linux/errno.h> 41 #include <linux/string.h> 42 #include <linux/ratelimit.h> 43 #include <linux/printk.h> 44 #include <linux/slab.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/nfs.h> 47 #include <linux/nfs4.h> 48 #include <linux/nfs_fs.h> 49 #include <linux/nfs_page.h> 50 #include <linux/nfs_mount.h> 51 #include <linux/namei.h> 52 #include <linux/mount.h> 53 #include <linux/module.h> 54 #include <linux/xattr.h> 55 #include <linux/utsname.h> 56 #include <linux/freezer.h> 57 #include <linux/iversion.h> 58 59 #include "nfs4_fs.h" 60 #include "delegation.h" 61 #include "internal.h" 62 #include "iostat.h" 63 #include "callback.h" 64 #include "pnfs.h" 65 #include "netns.h" 66 #include "sysfs.h" 67 #include "nfs4idmap.h" 68 #include "nfs4session.h" 69 #include "fscache.h" 70 #include "nfs42.h" 71 72 #include "nfs4trace.h" 73 74 #define NFSDBG_FACILITY NFSDBG_PROC 75 76 #define NFS4_BITMASK_SZ 3 77 78 #define NFS4_POLL_RETRY_MIN (HZ/10) 79 #define NFS4_POLL_RETRY_MAX (15*HZ) 80 81 /* file attributes which can be mapped to nfs attributes */ 82 #define NFS4_VALID_ATTRS (ATTR_MODE \ 83 | ATTR_UID \ 84 | ATTR_GID \ 85 | ATTR_SIZE \ 86 | ATTR_ATIME \ 87 | ATTR_MTIME \ 88 | ATTR_CTIME \ 89 | ATTR_ATIME_SET \ 90 | ATTR_MTIME_SET) 91 92 struct nfs4_opendata; 93 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 94 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 95 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 96 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 97 struct nfs_fattr *fattr, struct inode *inode); 98 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred, 99 struct nfs_fattr *fattr, struct iattr *sattr, 100 struct nfs_open_context *ctx, struct nfs4_label *ilabel); 101 #ifdef CONFIG_NFS_V4_1 102 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 103 const struct cred *cred, 104 struct nfs4_slot *slot, 105 bool is_privileged); 106 static int nfs41_test_stateid(struct nfs_server *, const nfs4_stateid *, 107 const struct cred *); 108 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *, 109 const struct cred *, bool); 110 #endif 111 112 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 113 static inline struct nfs4_label * 114 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 115 struct iattr *sattr, struct nfs4_label *label) 116 { 117 struct lsm_context shim; 118 int err; 119 120 if (label == NULL) 121 return NULL; 122 123 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0) 124 return NULL; 125 126 label->lfs = 0; 127 label->pi = 0; 128 label->len = 0; 129 label->label = NULL; 130 131 err = security_dentry_init_security(dentry, sattr->ia_mode, 132 &dentry->d_name, NULL, &shim); 133 if (err) 134 return NULL; 135 136 label->lsmid = shim.id; 137 label->label = shim.context; 138 label->len = shim.len; 139 return label; 140 } 141 static inline void 142 nfs4_label_release_security(struct nfs4_label *label) 143 { 144 struct lsm_context shim; 145 146 if (label) { 147 shim.context = label->label; 148 shim.len = label->len; 149 shim.id = label->lsmid; 150 security_release_secctx(&shim); 151 } 152 } 153 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 154 { 155 if (label) 156 return server->attr_bitmask; 157 158 return server->attr_bitmask_nl; 159 } 160 #else 161 static inline struct nfs4_label * 162 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 163 struct iattr *sattr, struct nfs4_label *l) 164 { return NULL; } 165 static inline void 166 nfs4_label_release_security(struct nfs4_label *label) 167 { return; } 168 static inline u32 * 169 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 170 { return server->attr_bitmask; } 171 #endif 172 173 /* Prevent leaks of NFSv4 errors into userland */ 174 static int nfs4_map_errors(int err) 175 { 176 if (err >= -1000) 177 return err; 178 switch (err) { 179 case -NFS4ERR_RESOURCE: 180 case -NFS4ERR_LAYOUTTRYLATER: 181 case -NFS4ERR_RECALLCONFLICT: 182 case -NFS4ERR_RETURNCONFLICT: 183 return -EREMOTEIO; 184 case -NFS4ERR_WRONGSEC: 185 case -NFS4ERR_WRONG_CRED: 186 return -EPERM; 187 case -NFS4ERR_BADOWNER: 188 case -NFS4ERR_BADNAME: 189 return -EINVAL; 190 case -NFS4ERR_SHARE_DENIED: 191 return -EACCES; 192 case -NFS4ERR_MINOR_VERS_MISMATCH: 193 return -EPROTONOSUPPORT; 194 case -NFS4ERR_FILE_OPEN: 195 return -EBUSY; 196 case -NFS4ERR_NOT_SAME: 197 return -ENOTSYNC; 198 case -ENETDOWN: 199 case -ENETUNREACH: 200 break; 201 default: 202 dprintk("%s could not handle NFSv4 error %d\n", 203 __func__, -err); 204 break; 205 } 206 return -EIO; 207 } 208 209 /* 210 * This is our standard bitmap for GETATTR requests. 211 */ 212 const u32 nfs4_fattr_bitmap[3] = { 213 FATTR4_WORD0_TYPE 214 | FATTR4_WORD0_CHANGE 215 | FATTR4_WORD0_SIZE 216 | FATTR4_WORD0_FSID 217 | FATTR4_WORD0_FILEID, 218 FATTR4_WORD1_MODE 219 | FATTR4_WORD1_NUMLINKS 220 | FATTR4_WORD1_OWNER 221 | FATTR4_WORD1_OWNER_GROUP 222 | FATTR4_WORD1_RAWDEV 223 | FATTR4_WORD1_SPACE_USED 224 | FATTR4_WORD1_TIME_ACCESS 225 | FATTR4_WORD1_TIME_CREATE 226 | FATTR4_WORD1_TIME_METADATA 227 | FATTR4_WORD1_TIME_MODIFY 228 | FATTR4_WORD1_MOUNTED_ON_FILEID, 229 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 230 FATTR4_WORD2_SECURITY_LABEL 231 #endif 232 }; 233 234 static const u32 nfs4_pnfs_open_bitmap[3] = { 235 FATTR4_WORD0_TYPE 236 | FATTR4_WORD0_CHANGE 237 | FATTR4_WORD0_SIZE 238 | FATTR4_WORD0_FSID 239 | FATTR4_WORD0_FILEID, 240 FATTR4_WORD1_MODE 241 | FATTR4_WORD1_NUMLINKS 242 | FATTR4_WORD1_OWNER 243 | FATTR4_WORD1_OWNER_GROUP 244 | FATTR4_WORD1_RAWDEV 245 | FATTR4_WORD1_SPACE_USED 246 | FATTR4_WORD1_TIME_ACCESS 247 | FATTR4_WORD1_TIME_CREATE 248 | FATTR4_WORD1_TIME_METADATA 249 | FATTR4_WORD1_TIME_MODIFY, 250 FATTR4_WORD2_MDSTHRESHOLD 251 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 252 | FATTR4_WORD2_SECURITY_LABEL 253 #endif 254 }; 255 256 static const u32 nfs4_open_noattr_bitmap[3] = { 257 FATTR4_WORD0_TYPE 258 | FATTR4_WORD0_FILEID, 259 }; 260 261 const u32 nfs4_statfs_bitmap[3] = { 262 FATTR4_WORD0_FILES_AVAIL 263 | FATTR4_WORD0_FILES_FREE 264 | FATTR4_WORD0_FILES_TOTAL, 265 FATTR4_WORD1_SPACE_AVAIL 266 | FATTR4_WORD1_SPACE_FREE 267 | FATTR4_WORD1_SPACE_TOTAL 268 }; 269 270 const u32 nfs4_pathconf_bitmap[3] = { 271 FATTR4_WORD0_MAXLINK 272 | FATTR4_WORD0_MAXNAME, 273 0 274 }; 275 276 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 277 | FATTR4_WORD0_MAXREAD 278 | FATTR4_WORD0_MAXWRITE 279 | FATTR4_WORD0_LEASE_TIME, 280 FATTR4_WORD1_TIME_DELTA 281 | FATTR4_WORD1_FS_LAYOUT_TYPES, 282 FATTR4_WORD2_LAYOUT_BLKSIZE 283 | FATTR4_WORD2_CLONE_BLKSIZE 284 | FATTR4_WORD2_CHANGE_ATTR_TYPE 285 | FATTR4_WORD2_XATTR_SUPPORT 286 }; 287 288 const u32 nfs4_fs_locations_bitmap[3] = { 289 FATTR4_WORD0_CHANGE 290 | FATTR4_WORD0_SIZE 291 | FATTR4_WORD0_FSID 292 | FATTR4_WORD0_FILEID 293 | FATTR4_WORD0_FS_LOCATIONS, 294 FATTR4_WORD1_OWNER 295 | FATTR4_WORD1_OWNER_GROUP 296 | FATTR4_WORD1_RAWDEV 297 | FATTR4_WORD1_SPACE_USED 298 | FATTR4_WORD1_TIME_ACCESS 299 | FATTR4_WORD1_TIME_METADATA 300 | FATTR4_WORD1_TIME_MODIFY 301 | FATTR4_WORD1_MOUNTED_ON_FILEID, 302 }; 303 304 static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src, 305 struct inode *inode, unsigned long flags) 306 { 307 unsigned long cache_validity; 308 309 memcpy(dst, src, NFS4_BITMASK_SZ*sizeof(*dst)); 310 if (!inode || !nfs_have_read_or_write_delegation(inode)) 311 return; 312 313 cache_validity = READ_ONCE(NFS_I(inode)->cache_validity) | flags; 314 315 /* Remove the attributes over which we have full control */ 316 dst[1] &= ~FATTR4_WORD1_RAWDEV; 317 if (!(cache_validity & NFS_INO_INVALID_SIZE)) 318 dst[0] &= ~FATTR4_WORD0_SIZE; 319 320 if (!(cache_validity & NFS_INO_INVALID_CHANGE)) 321 dst[0] &= ~FATTR4_WORD0_CHANGE; 322 323 if (!(cache_validity & NFS_INO_INVALID_MODE)) 324 dst[1] &= ~FATTR4_WORD1_MODE; 325 if (!(cache_validity & NFS_INO_INVALID_OTHER)) 326 dst[1] &= ~(FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP); 327 328 if (!(cache_validity & NFS_INO_INVALID_BTIME)) 329 dst[1] &= ~FATTR4_WORD1_TIME_CREATE; 330 331 if (nfs_have_delegated_mtime(inode)) { 332 if (!(cache_validity & NFS_INO_INVALID_ATIME)) 333 dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET); 334 if (!(cache_validity & NFS_INO_INVALID_MTIME)) 335 dst[1] &= ~(FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET); 336 if (!(cache_validity & NFS_INO_INVALID_CTIME)) 337 dst[1] &= ~(FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY_SET); 338 } else if (nfs_have_delegated_atime(inode)) { 339 if (!(cache_validity & NFS_INO_INVALID_ATIME)) 340 dst[1] &= ~(FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET); 341 } 342 } 343 344 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 345 struct nfs4_readdir_arg *readdir) 346 { 347 unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE; 348 __be32 *start, *p; 349 350 if (cookie > 2) { 351 readdir->cookie = cookie; 352 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 353 return; 354 } 355 356 readdir->cookie = 0; 357 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 358 if (cookie == 2) 359 return; 360 361 /* 362 * NFSv4 servers do not return entries for '.' and '..' 363 * Therefore, we fake these entries here. We let '.' 364 * have cookie 0 and '..' have cookie 1. Note that 365 * when talking to the server, we always send cookie 0 366 * instead of 1 or 2. 367 */ 368 start = p = kmap_atomic(*readdir->pages); 369 370 if (cookie == 0) { 371 *p++ = xdr_one; /* next */ 372 *p++ = xdr_zero; /* cookie, first word */ 373 *p++ = xdr_one; /* cookie, second word */ 374 *p++ = xdr_one; /* entry len */ 375 memcpy(p, ".\0\0\0", 4); /* entry */ 376 p++; 377 *p++ = xdr_one; /* bitmap length */ 378 *p++ = htonl(attrs); /* bitmap */ 379 *p++ = htonl(12); /* attribute buffer length */ 380 *p++ = htonl(NF4DIR); 381 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry))); 382 } 383 384 *p++ = xdr_one; /* next */ 385 *p++ = xdr_zero; /* cookie, first word */ 386 *p++ = xdr_two; /* cookie, second word */ 387 *p++ = xdr_two; /* entry len */ 388 memcpy(p, "..\0\0", 4); /* entry */ 389 p++; 390 *p++ = xdr_one; /* bitmap length */ 391 *p++ = htonl(attrs); /* bitmap */ 392 *p++ = htonl(12); /* attribute buffer length */ 393 *p++ = htonl(NF4DIR); 394 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent))); 395 396 readdir->pgbase = (char *)p - (char *)start; 397 readdir->count -= readdir->pgbase; 398 kunmap_atomic(start); 399 } 400 401 static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version) 402 { 403 if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) { 404 fattr->pre_change_attr = version; 405 fattr->valid |= NFS_ATTR_FATTR_PRECHANGE; 406 } 407 } 408 409 static void nfs4_test_and_free_stateid(struct nfs_server *server, 410 nfs4_stateid *stateid, 411 const struct cred *cred) 412 { 413 const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops; 414 415 ops->test_and_free_expired(server, stateid, cred); 416 } 417 418 static void __nfs4_free_revoked_stateid(struct nfs_server *server, 419 nfs4_stateid *stateid, 420 const struct cred *cred) 421 { 422 stateid->type = NFS4_REVOKED_STATEID_TYPE; 423 nfs4_test_and_free_stateid(server, stateid, cred); 424 } 425 426 static void nfs4_free_revoked_stateid(struct nfs_server *server, 427 const nfs4_stateid *stateid, 428 const struct cred *cred) 429 { 430 nfs4_stateid tmp; 431 432 nfs4_stateid_copy(&tmp, stateid); 433 __nfs4_free_revoked_stateid(server, &tmp, cred); 434 } 435 436 static long nfs4_update_delay(long *timeout) 437 { 438 long ret; 439 if (!timeout) 440 return NFS4_POLL_RETRY_MAX; 441 if (*timeout <= 0) 442 *timeout = NFS4_POLL_RETRY_MIN; 443 if (*timeout > NFS4_POLL_RETRY_MAX) 444 *timeout = NFS4_POLL_RETRY_MAX; 445 ret = *timeout; 446 *timeout <<= 1; 447 return ret; 448 } 449 450 static int nfs4_delay_killable(long *timeout) 451 { 452 might_sleep(); 453 454 if (unlikely(nfs_current_task_exiting())) 455 return -EINTR; 456 __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); 457 schedule_timeout(nfs4_update_delay(timeout)); 458 if (!__fatal_signal_pending(current)) 459 return 0; 460 return -EINTR; 461 } 462 463 static int nfs4_delay_interruptible(long *timeout) 464 { 465 might_sleep(); 466 467 if (unlikely(nfs_current_task_exiting())) 468 return -EINTR; 469 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE); 470 schedule_timeout(nfs4_update_delay(timeout)); 471 if (!signal_pending(current)) 472 return 0; 473 return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS; 474 } 475 476 static int nfs4_delay(long *timeout, bool interruptible) 477 { 478 if (interruptible) 479 return nfs4_delay_interruptible(timeout); 480 return nfs4_delay_killable(timeout); 481 } 482 483 static const nfs4_stateid * 484 nfs4_recoverable_stateid(const nfs4_stateid *stateid) 485 { 486 if (!stateid) 487 return NULL; 488 switch (stateid->type) { 489 case NFS4_OPEN_STATEID_TYPE: 490 case NFS4_LOCK_STATEID_TYPE: 491 case NFS4_DELEGATION_STATEID_TYPE: 492 return stateid; 493 default: 494 break; 495 } 496 return NULL; 497 } 498 499 /* This is the error handling routine for processes that are allowed 500 * to sleep. 501 */ 502 static int nfs4_do_handle_exception(struct nfs_server *server, 503 int errorcode, struct nfs4_exception *exception) 504 { 505 struct nfs_client *clp = server->nfs_client; 506 struct nfs4_state *state = exception->state; 507 const nfs4_stateid *stateid; 508 struct inode *inode = exception->inode; 509 int ret = errorcode; 510 511 exception->delay = 0; 512 exception->recovering = 0; 513 exception->retry = 0; 514 515 stateid = nfs4_recoverable_stateid(exception->stateid); 516 if (stateid == NULL && state != NULL) 517 stateid = nfs4_recoverable_stateid(&state->stateid); 518 519 switch(errorcode) { 520 case 0: 521 return 0; 522 case -NFS4ERR_BADHANDLE: 523 case -ESTALE: 524 if (inode != NULL && S_ISREG(inode->i_mode)) 525 pnfs_destroy_layout(NFS_I(inode)); 526 break; 527 case -NFS4ERR_DELEG_REVOKED: 528 case -NFS4ERR_ADMIN_REVOKED: 529 case -NFS4ERR_EXPIRED: 530 case -NFS4ERR_BAD_STATEID: 531 case -NFS4ERR_PARTNER_NO_AUTH: 532 if (inode != NULL && stateid != NULL) { 533 nfs_inode_find_state_and_recover(inode, 534 stateid); 535 goto wait_on_recovery; 536 } 537 fallthrough; 538 case -NFS4ERR_OPENMODE: 539 if (inode) { 540 int err; 541 542 err = nfs_async_inode_return_delegation(inode, 543 stateid); 544 if (err == 0) 545 goto wait_on_recovery; 546 if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) { 547 exception->retry = 1; 548 break; 549 } 550 } 551 if (state == NULL) 552 break; 553 ret = nfs4_schedule_stateid_recovery(server, state); 554 if (ret < 0) 555 break; 556 goto wait_on_recovery; 557 case -NFS4ERR_STALE_STATEID: 558 case -NFS4ERR_STALE_CLIENTID: 559 nfs4_schedule_lease_recovery(clp); 560 goto wait_on_recovery; 561 case -NFS4ERR_MOVED: 562 ret = nfs4_schedule_migration_recovery(server); 563 if (ret < 0) 564 break; 565 goto wait_on_recovery; 566 case -NFS4ERR_LEASE_MOVED: 567 nfs4_schedule_lease_moved_recovery(clp); 568 goto wait_on_recovery; 569 #if defined(CONFIG_NFS_V4_1) 570 case -NFS4ERR_BADSESSION: 571 case -NFS4ERR_BADSLOT: 572 case -NFS4ERR_BAD_HIGH_SLOT: 573 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 574 case -NFS4ERR_DEADSESSION: 575 case -NFS4ERR_SEQ_FALSE_RETRY: 576 case -NFS4ERR_SEQ_MISORDERED: 577 /* Handled in nfs41_sequence_process() */ 578 goto wait_on_recovery; 579 #endif /* defined(CONFIG_NFS_V4_1) */ 580 case -NFS4ERR_FILE_OPEN: 581 if (exception->timeout > HZ) { 582 /* We have retried a decent amount, time to 583 * fail 584 */ 585 ret = -EBUSY; 586 break; 587 } 588 fallthrough; 589 case -NFS4ERR_DELAY: 590 nfs_inc_server_stats(server, NFSIOS_DELAY); 591 fallthrough; 592 case -NFS4ERR_GRACE: 593 case -NFS4ERR_LAYOUTTRYLATER: 594 case -NFS4ERR_RECALLCONFLICT: 595 case -NFS4ERR_RETURNCONFLICT: 596 exception->delay = 1; 597 return 0; 598 599 case -NFS4ERR_RETRY_UNCACHED_REP: 600 case -NFS4ERR_OLD_STATEID: 601 exception->retry = 1; 602 break; 603 case -NFS4ERR_BADOWNER: 604 /* The following works around a Linux server bug! */ 605 case -NFS4ERR_BADNAME: 606 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 607 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 608 exception->retry = 1; 609 printk(KERN_WARNING "NFS: v4 server %s " 610 "does not accept raw " 611 "uid/gids. " 612 "Reenabling the idmapper.\n", 613 server->nfs_client->cl_hostname); 614 } 615 } 616 /* We failed to handle the error */ 617 return nfs4_map_errors(ret); 618 wait_on_recovery: 619 exception->recovering = 1; 620 return 0; 621 } 622 623 /* 624 * Track the number of NFS4ERR_DELAY related retransmissions and return 625 * EAGAIN if the 'softerr' mount option is set, and we've exceeded the limit 626 * set by 'nfs_delay_retrans'. 627 */ 628 static int nfs4_exception_should_retrans(const struct nfs_server *server, 629 struct nfs4_exception *exception) 630 { 631 if (server->flags & NFS_MOUNT_SOFTERR && nfs_delay_retrans >= 0) { 632 if (exception->retrans++ >= (unsigned short)nfs_delay_retrans) 633 return -EAGAIN; 634 } 635 return 0; 636 } 637 638 /* This is the error handling routine for processes that are allowed 639 * to sleep. 640 */ 641 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 642 { 643 struct nfs_client *clp = server->nfs_client; 644 int ret; 645 646 ret = nfs4_do_handle_exception(server, errorcode, exception); 647 if (exception->delay) { 648 int ret2 = nfs4_exception_should_retrans(server, exception); 649 if (ret2 < 0) { 650 exception->retry = 0; 651 return ret2; 652 } 653 ret = nfs4_delay(&exception->timeout, 654 exception->interruptible); 655 goto out_retry; 656 } 657 if (exception->recovering) { 658 if (exception->task_is_privileged) 659 return -EDEADLOCK; 660 ret = nfs4_wait_clnt_recover(clp); 661 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 662 return -EIO; 663 goto out_retry; 664 } 665 return ret; 666 out_retry: 667 if (ret == 0) 668 exception->retry = 1; 669 return ret; 670 } 671 672 static int 673 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server, 674 int errorcode, struct nfs4_exception *exception) 675 { 676 struct nfs_client *clp = server->nfs_client; 677 int ret; 678 679 if ((task->tk_rpc_status == -ENETDOWN || 680 task->tk_rpc_status == -ENETUNREACH) && 681 task->tk_flags & RPC_TASK_NETUNREACH_FATAL) { 682 exception->delay = 0; 683 exception->recovering = 0; 684 exception->retry = 0; 685 return -EIO; 686 } 687 688 ret = nfs4_do_handle_exception(server, errorcode, exception); 689 if (exception->delay) { 690 int ret2 = nfs4_exception_should_retrans(server, exception); 691 if (ret2 < 0) { 692 exception->retry = 0; 693 return ret2; 694 } 695 rpc_delay(task, nfs4_update_delay(&exception->timeout)); 696 goto out_retry; 697 } 698 if (exception->recovering) { 699 if (exception->task_is_privileged) 700 return -EDEADLOCK; 701 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 702 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 703 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 704 goto out_retry; 705 } 706 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 707 ret = -EIO; 708 return ret; 709 out_retry: 710 if (ret == 0) { 711 exception->retry = 1; 712 /* 713 * For NFS4ERR_MOVED, the client transport will need to 714 * be recomputed after migration recovery has completed. 715 */ 716 if (errorcode == -NFS4ERR_MOVED) 717 rpc_task_release_transport(task); 718 } 719 return ret; 720 } 721 722 int 723 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server, 724 struct nfs4_state *state, long *timeout) 725 { 726 struct nfs4_exception exception = { 727 .state = state, 728 }; 729 730 if (task->tk_status >= 0) 731 return 0; 732 if (timeout) 733 exception.timeout = *timeout; 734 task->tk_status = nfs4_async_handle_exception(task, server, 735 task->tk_status, 736 &exception); 737 if (exception.delay && timeout) 738 *timeout = exception.timeout; 739 if (exception.retry) 740 return -EAGAIN; 741 return 0; 742 } 743 744 /* 745 * Return 'true' if 'clp' is using an rpc_client that is integrity protected 746 * or 'false' otherwise. 747 */ 748 static bool _nfs4_is_integrity_protected(struct nfs_client *clp) 749 { 750 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor; 751 return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P); 752 } 753 754 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 755 { 756 spin_lock(&clp->cl_lock); 757 if (time_before(clp->cl_last_renewal,timestamp)) 758 clp->cl_last_renewal = timestamp; 759 spin_unlock(&clp->cl_lock); 760 } 761 762 static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 763 { 764 struct nfs_client *clp = server->nfs_client; 765 766 if (!nfs4_has_session(clp)) 767 do_renew_lease(clp, timestamp); 768 } 769 770 struct nfs4_call_sync_data { 771 const struct nfs_server *seq_server; 772 struct nfs4_sequence_args *seq_args; 773 struct nfs4_sequence_res *seq_res; 774 }; 775 776 void nfs4_init_sequence(struct nfs4_sequence_args *args, 777 struct nfs4_sequence_res *res, int cache_reply, 778 int privileged) 779 { 780 args->sa_slot = NULL; 781 args->sa_cache_this = cache_reply; 782 args->sa_privileged = privileged; 783 784 res->sr_slot = NULL; 785 } 786 787 static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res) 788 { 789 struct nfs4_slot *slot = res->sr_slot; 790 struct nfs4_slot_table *tbl; 791 792 tbl = slot->table; 793 spin_lock(&tbl->slot_tbl_lock); 794 if (!nfs41_wake_and_assign_slot(tbl, slot)) 795 nfs4_free_slot(tbl, slot); 796 spin_unlock(&tbl->slot_tbl_lock); 797 798 res->sr_slot = NULL; 799 } 800 801 static int nfs40_sequence_done(struct rpc_task *task, 802 struct nfs4_sequence_res *res) 803 { 804 if (res->sr_slot != NULL) 805 nfs40_sequence_free_slot(res); 806 return 1; 807 } 808 809 #if defined(CONFIG_NFS_V4_1) 810 811 static void nfs41_release_slot(struct nfs4_slot *slot) 812 { 813 struct nfs4_session *session; 814 struct nfs4_slot_table *tbl; 815 bool send_new_highest_used_slotid = false; 816 817 if (!slot) 818 return; 819 tbl = slot->table; 820 session = tbl->session; 821 822 /* Bump the slot sequence number */ 823 if (slot->seq_done) 824 slot->seq_nr++; 825 slot->seq_done = 0; 826 827 spin_lock(&tbl->slot_tbl_lock); 828 /* Be nice to the server: try to ensure that the last transmitted 829 * value for highest_user_slotid <= target_highest_slotid 830 */ 831 if (tbl->highest_used_slotid > tbl->target_highest_slotid) 832 send_new_highest_used_slotid = true; 833 834 if (nfs41_wake_and_assign_slot(tbl, slot)) { 835 send_new_highest_used_slotid = false; 836 goto out_unlock; 837 } 838 nfs4_free_slot(tbl, slot); 839 840 if (tbl->highest_used_slotid != NFS4_NO_SLOT) 841 send_new_highest_used_slotid = false; 842 out_unlock: 843 spin_unlock(&tbl->slot_tbl_lock); 844 if (send_new_highest_used_slotid) 845 nfs41_notify_server(session->clp); 846 if (waitqueue_active(&tbl->slot_waitq)) 847 wake_up_all(&tbl->slot_waitq); 848 } 849 850 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 851 { 852 nfs41_release_slot(res->sr_slot); 853 res->sr_slot = NULL; 854 } 855 856 static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot, 857 u32 seqnr) 858 { 859 if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0) 860 slot->seq_nr_highest_sent = seqnr; 861 } 862 static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, u32 seqnr) 863 { 864 nfs4_slot_sequence_record_sent(slot, seqnr); 865 slot->seq_nr_last_acked = seqnr; 866 } 867 868 static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred, 869 struct nfs4_slot *slot) 870 { 871 struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true); 872 if (!IS_ERR(task)) 873 rpc_put_task_async(task); 874 } 875 876 static int nfs41_sequence_process(struct rpc_task *task, 877 struct nfs4_sequence_res *res) 878 { 879 struct nfs4_session *session; 880 struct nfs4_slot *slot = res->sr_slot; 881 struct nfs_client *clp; 882 int status; 883 int ret = 1; 884 885 if (slot == NULL) 886 goto out_noaction; 887 /* don't increment the sequence number if the task wasn't sent */ 888 if (!RPC_WAS_SENT(task) || slot->seq_done) 889 goto out; 890 891 session = slot->table->session; 892 clp = session->clp; 893 894 trace_nfs4_sequence_done(session, res); 895 896 status = res->sr_status; 897 if (task->tk_status == -NFS4ERR_DEADSESSION) 898 status = -NFS4ERR_DEADSESSION; 899 900 /* Check the SEQUENCE operation status */ 901 switch (status) { 902 case 0: 903 /* Mark this sequence number as having been acked */ 904 nfs4_slot_sequence_acked(slot, slot->seq_nr); 905 /* Update the slot's sequence and clientid lease timer */ 906 slot->seq_done = 1; 907 do_renew_lease(clp, res->sr_timestamp); 908 /* Check sequence flags */ 909 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags, 910 !!slot->privileged); 911 nfs41_update_target_slotid(slot->table, slot, res); 912 break; 913 case 1: 914 /* 915 * sr_status remains 1 if an RPC level error occurred. 916 * The server may or may not have processed the sequence 917 * operation.. 918 */ 919 nfs4_slot_sequence_record_sent(slot, slot->seq_nr); 920 slot->seq_done = 1; 921 goto out; 922 case -NFS4ERR_DELAY: 923 /* The server detected a resend of the RPC call and 924 * returned NFS4ERR_DELAY as per Section 2.10.6.2 925 * of RFC5661. 926 */ 927 dprintk("%s: slot=%u seq=%u: Operation in progress\n", 928 __func__, 929 slot->slot_nr, 930 slot->seq_nr); 931 goto out_retry; 932 case -NFS4ERR_RETRY_UNCACHED_REP: 933 case -NFS4ERR_SEQ_FALSE_RETRY: 934 /* 935 * The server thinks we tried to replay a request. 936 * Retry the call after bumping the sequence ID. 937 */ 938 nfs4_slot_sequence_acked(slot, slot->seq_nr); 939 goto retry_new_seq; 940 case -NFS4ERR_BADSLOT: 941 /* 942 * The slot id we used was probably retired. Try again 943 * using a different slot id. 944 */ 945 if (slot->slot_nr < slot->table->target_highest_slotid) 946 goto session_recover; 947 goto retry_nowait; 948 case -NFS4ERR_SEQ_MISORDERED: 949 nfs4_slot_sequence_record_sent(slot, slot->seq_nr); 950 /* 951 * Were one or more calls using this slot interrupted? 952 * If the server never received the request, then our 953 * transmitted slot sequence number may be too high. However, 954 * if the server did receive the request then it might 955 * accidentally give us a reply with a mismatched operation. 956 * We can sort this out by sending a lone sequence operation 957 * to the server on the same slot. 958 */ 959 if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) { 960 slot->seq_nr--; 961 if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) { 962 nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot); 963 res->sr_slot = NULL; 964 } 965 goto retry_nowait; 966 } 967 /* 968 * RFC5661: 969 * A retry might be sent while the original request is 970 * still in progress on the replier. The replier SHOULD 971 * deal with the issue by returning NFS4ERR_DELAY as the 972 * reply to SEQUENCE or CB_SEQUENCE operation, but 973 * implementations MAY return NFS4ERR_SEQ_MISORDERED. 974 * 975 * Restart the search after a delay. 976 */ 977 slot->seq_nr = slot->seq_nr_highest_sent; 978 goto out_retry; 979 case -NFS4ERR_BADSESSION: 980 case -NFS4ERR_DEADSESSION: 981 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 982 goto session_recover; 983 default: 984 /* Just update the slot sequence no. */ 985 slot->seq_done = 1; 986 } 987 out: 988 /* The session may be reset by one of the error handlers. */ 989 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 990 out_noaction: 991 return ret; 992 session_recover: 993 set_bit(NFS4_SLOT_TBL_DRAINING, &session->fc_slot_table.slot_tbl_state); 994 nfs4_schedule_session_recovery(session, status); 995 dprintk("%s ERROR: %d Reset session\n", __func__, status); 996 nfs41_sequence_free_slot(res); 997 goto out; 998 retry_new_seq: 999 ++slot->seq_nr; 1000 retry_nowait: 1001 if (rpc_restart_call_prepare(task)) { 1002 nfs41_sequence_free_slot(res); 1003 task->tk_status = 0; 1004 ret = 0; 1005 } 1006 goto out; 1007 out_retry: 1008 if (!rpc_restart_call(task)) 1009 goto out; 1010 rpc_delay(task, NFS4_POLL_RETRY_MAX); 1011 return 0; 1012 } 1013 1014 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 1015 { 1016 if (!nfs41_sequence_process(task, res)) 1017 return 0; 1018 if (res->sr_slot != NULL) 1019 nfs41_sequence_free_slot(res); 1020 return 1; 1021 1022 } 1023 EXPORT_SYMBOL_GPL(nfs41_sequence_done); 1024 1025 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) 1026 { 1027 if (res->sr_slot == NULL) 1028 return 1; 1029 if (res->sr_slot->table->session != NULL) 1030 return nfs41_sequence_process(task, res); 1031 return nfs40_sequence_done(task, res); 1032 } 1033 1034 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res) 1035 { 1036 if (res->sr_slot != NULL) { 1037 if (res->sr_slot->table->session != NULL) 1038 nfs41_sequence_free_slot(res); 1039 else 1040 nfs40_sequence_free_slot(res); 1041 } 1042 } 1043 1044 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 1045 { 1046 if (res->sr_slot == NULL) 1047 return 1; 1048 if (!res->sr_slot->table->session) 1049 return nfs40_sequence_done(task, res); 1050 return nfs41_sequence_done(task, res); 1051 } 1052 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 1053 1054 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 1055 { 1056 struct nfs4_call_sync_data *data = calldata; 1057 1058 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 1059 1060 nfs4_setup_sequence(data->seq_server->nfs_client, 1061 data->seq_args, data->seq_res, task); 1062 } 1063 1064 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 1065 { 1066 struct nfs4_call_sync_data *data = calldata; 1067 1068 nfs41_sequence_done(task, data->seq_res); 1069 } 1070 1071 static const struct rpc_call_ops nfs41_call_sync_ops = { 1072 .rpc_call_prepare = nfs41_call_sync_prepare, 1073 .rpc_call_done = nfs41_call_sync_done, 1074 }; 1075 1076 #else /* !CONFIG_NFS_V4_1 */ 1077 1078 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) 1079 { 1080 return nfs40_sequence_done(task, res); 1081 } 1082 1083 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res) 1084 { 1085 if (res->sr_slot != NULL) 1086 nfs40_sequence_free_slot(res); 1087 } 1088 1089 int nfs4_sequence_done(struct rpc_task *task, 1090 struct nfs4_sequence_res *res) 1091 { 1092 return nfs40_sequence_done(task, res); 1093 } 1094 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 1095 1096 #endif /* !CONFIG_NFS_V4_1 */ 1097 1098 static void nfs41_sequence_res_init(struct nfs4_sequence_res *res) 1099 { 1100 res->sr_timestamp = jiffies; 1101 res->sr_status_flags = 0; 1102 res->sr_status = 1; 1103 } 1104 1105 static 1106 void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args, 1107 struct nfs4_sequence_res *res, 1108 struct nfs4_slot *slot) 1109 { 1110 if (!slot) 1111 return; 1112 slot->privileged = args->sa_privileged ? 1 : 0; 1113 args->sa_slot = slot; 1114 1115 res->sr_slot = slot; 1116 } 1117 1118 int nfs4_setup_sequence(struct nfs_client *client, 1119 struct nfs4_sequence_args *args, 1120 struct nfs4_sequence_res *res, 1121 struct rpc_task *task) 1122 { 1123 struct nfs4_session *session = nfs4_get_session(client); 1124 struct nfs4_slot_table *tbl = client->cl_slot_tbl; 1125 struct nfs4_slot *slot; 1126 1127 /* slot already allocated? */ 1128 if (res->sr_slot != NULL) 1129 goto out_start; 1130 1131 if (session) 1132 tbl = &session->fc_slot_table; 1133 1134 spin_lock(&tbl->slot_tbl_lock); 1135 /* The state manager will wait until the slot table is empty */ 1136 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) 1137 goto out_sleep; 1138 1139 slot = nfs4_alloc_slot(tbl); 1140 if (IS_ERR(slot)) { 1141 if (slot == ERR_PTR(-ENOMEM)) 1142 goto out_sleep_timeout; 1143 goto out_sleep; 1144 } 1145 spin_unlock(&tbl->slot_tbl_lock); 1146 1147 nfs4_sequence_attach_slot(args, res, slot); 1148 1149 trace_nfs4_setup_sequence(session, args); 1150 out_start: 1151 nfs41_sequence_res_init(res); 1152 rpc_call_start(task); 1153 return 0; 1154 out_sleep_timeout: 1155 /* Try again in 1/4 second */ 1156 if (args->sa_privileged) 1157 rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task, 1158 jiffies + (HZ >> 2), RPC_PRIORITY_PRIVILEGED); 1159 else 1160 rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task, 1161 NULL, jiffies + (HZ >> 2)); 1162 spin_unlock(&tbl->slot_tbl_lock); 1163 return -EAGAIN; 1164 out_sleep: 1165 if (args->sa_privileged) 1166 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 1167 RPC_PRIORITY_PRIVILEGED); 1168 else 1169 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 1170 spin_unlock(&tbl->slot_tbl_lock); 1171 return -EAGAIN; 1172 } 1173 EXPORT_SYMBOL_GPL(nfs4_setup_sequence); 1174 1175 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata) 1176 { 1177 struct nfs4_call_sync_data *data = calldata; 1178 nfs4_setup_sequence(data->seq_server->nfs_client, 1179 data->seq_args, data->seq_res, task); 1180 } 1181 1182 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata) 1183 { 1184 struct nfs4_call_sync_data *data = calldata; 1185 nfs4_sequence_done(task, data->seq_res); 1186 } 1187 1188 static const struct rpc_call_ops nfs40_call_sync_ops = { 1189 .rpc_call_prepare = nfs40_call_sync_prepare, 1190 .rpc_call_done = nfs40_call_sync_done, 1191 }; 1192 1193 static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup) 1194 { 1195 int ret; 1196 struct rpc_task *task; 1197 1198 task = rpc_run_task(task_setup); 1199 if (IS_ERR(task)) 1200 return PTR_ERR(task); 1201 1202 ret = task->tk_status; 1203 rpc_put_task(task); 1204 return ret; 1205 } 1206 1207 static int nfs4_do_call_sync(struct rpc_clnt *clnt, 1208 struct nfs_server *server, 1209 struct rpc_message *msg, 1210 struct nfs4_sequence_args *args, 1211 struct nfs4_sequence_res *res, 1212 unsigned short task_flags) 1213 { 1214 struct nfs_client *clp = server->nfs_client; 1215 struct nfs4_call_sync_data data = { 1216 .seq_server = server, 1217 .seq_args = args, 1218 .seq_res = res, 1219 }; 1220 struct rpc_task_setup task_setup = { 1221 .rpc_client = clnt, 1222 .rpc_message = msg, 1223 .callback_ops = clp->cl_mvops->call_sync_ops, 1224 .callback_data = &data, 1225 .flags = task_flags, 1226 }; 1227 1228 return nfs4_call_sync_custom(&task_setup); 1229 } 1230 1231 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 1232 struct nfs_server *server, 1233 struct rpc_message *msg, 1234 struct nfs4_sequence_args *args, 1235 struct nfs4_sequence_res *res) 1236 { 1237 unsigned short task_flags = 0; 1238 1239 if (server->caps & NFS_CAP_MOVEABLE) 1240 task_flags = RPC_TASK_MOVEABLE; 1241 return nfs4_do_call_sync(clnt, server, msg, args, res, task_flags); 1242 } 1243 1244 1245 int nfs4_call_sync(struct rpc_clnt *clnt, 1246 struct nfs_server *server, 1247 struct rpc_message *msg, 1248 struct nfs4_sequence_args *args, 1249 struct nfs4_sequence_res *res, 1250 int cache_reply) 1251 { 1252 nfs4_init_sequence(args, res, cache_reply, 0); 1253 return nfs4_call_sync_sequence(clnt, server, msg, args, res); 1254 } 1255 1256 static void 1257 nfs4_inc_nlink_locked(struct inode *inode) 1258 { 1259 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 1260 NFS_INO_INVALID_CTIME | 1261 NFS_INO_INVALID_NLINK); 1262 inc_nlink(inode); 1263 } 1264 1265 static void 1266 nfs4_inc_nlink(struct inode *inode) 1267 { 1268 spin_lock(&inode->i_lock); 1269 nfs4_inc_nlink_locked(inode); 1270 spin_unlock(&inode->i_lock); 1271 } 1272 1273 static void 1274 nfs4_dec_nlink_locked(struct inode *inode) 1275 { 1276 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 1277 NFS_INO_INVALID_CTIME | 1278 NFS_INO_INVALID_NLINK); 1279 drop_nlink(inode); 1280 } 1281 1282 static void 1283 nfs4_update_changeattr_locked(struct inode *inode, 1284 struct nfs4_change_info *cinfo, 1285 unsigned long timestamp, unsigned long cache_validity) 1286 { 1287 struct nfs_inode *nfsi = NFS_I(inode); 1288 u64 change_attr = inode_peek_iversion_raw(inode); 1289 1290 if (!nfs_have_delegated_mtime(inode)) 1291 cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME; 1292 if (S_ISDIR(inode->i_mode)) 1293 cache_validity |= NFS_INO_INVALID_DATA; 1294 1295 switch (NFS_SERVER(inode)->change_attr_type) { 1296 case NFS4_CHANGE_TYPE_IS_UNDEFINED: 1297 if (cinfo->after == change_attr) 1298 goto out; 1299 break; 1300 default: 1301 if ((s64)(change_attr - cinfo->after) >= 0) 1302 goto out; 1303 } 1304 1305 inode_set_iversion_raw(inode, cinfo->after); 1306 if (!cinfo->atomic || cinfo->before != change_attr) { 1307 if (S_ISDIR(inode->i_mode)) 1308 nfs_force_lookup_revalidate(inode); 1309 1310 if (!nfs_have_delegated_attributes(inode)) 1311 cache_validity |= 1312 NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL | 1313 NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER | 1314 NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK | 1315 NFS_INO_INVALID_MODE | NFS_INO_INVALID_BTIME | 1316 NFS_INO_INVALID_XATTR; 1317 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); 1318 } 1319 nfsi->attrtimeo_timestamp = jiffies; 1320 nfsi->read_cache_jiffies = timestamp; 1321 nfsi->attr_gencount = nfs_inc_attr_generation_counter(); 1322 nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE; 1323 out: 1324 nfs_set_cache_invalid(inode, cache_validity); 1325 } 1326 1327 void 1328 nfs4_update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo, 1329 unsigned long timestamp, unsigned long cache_validity) 1330 { 1331 spin_lock(&dir->i_lock); 1332 nfs4_update_changeattr_locked(dir, cinfo, timestamp, cache_validity); 1333 spin_unlock(&dir->i_lock); 1334 } 1335 1336 struct nfs4_open_createattrs { 1337 struct nfs4_label *label; 1338 struct iattr *sattr; 1339 const __u32 verf[2]; 1340 }; 1341 1342 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server, 1343 int err, struct nfs4_exception *exception) 1344 { 1345 if (err != -EINVAL) 1346 return false; 1347 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1348 return false; 1349 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1; 1350 exception->retry = 1; 1351 return true; 1352 } 1353 1354 static fmode_t _nfs4_ctx_to_accessmode(const struct nfs_open_context *ctx) 1355 { 1356 return ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC); 1357 } 1358 1359 static fmode_t _nfs4_ctx_to_openmode(const struct nfs_open_context *ctx) 1360 { 1361 fmode_t ret = ctx->mode & (FMODE_READ|FMODE_WRITE); 1362 1363 return (ctx->mode & FMODE_EXEC) ? FMODE_READ | ret : ret; 1364 } 1365 1366 static u32 1367 nfs4_fmode_to_share_access(fmode_t fmode) 1368 { 1369 u32 res = 0; 1370 1371 switch (fmode & (FMODE_READ | FMODE_WRITE)) { 1372 case FMODE_READ: 1373 res = NFS4_SHARE_ACCESS_READ; 1374 break; 1375 case FMODE_WRITE: 1376 res = NFS4_SHARE_ACCESS_WRITE; 1377 break; 1378 case FMODE_READ|FMODE_WRITE: 1379 res = NFS4_SHARE_ACCESS_BOTH; 1380 } 1381 return res; 1382 } 1383 1384 static u32 1385 nfs4_map_atomic_open_share(struct nfs_server *server, 1386 fmode_t fmode, int openflags) 1387 { 1388 u32 res = nfs4_fmode_to_share_access(fmode); 1389 1390 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1391 goto out; 1392 /* Want no delegation if we're using O_DIRECT */ 1393 if (openflags & O_DIRECT) { 1394 res |= NFS4_SHARE_WANT_NO_DELEG; 1395 goto out; 1396 } 1397 /* res |= NFS4_SHARE_WANT_NO_PREFERENCE; */ 1398 if (server->caps & NFS_CAP_DELEGTIME) 1399 res |= NFS4_SHARE_WANT_DELEG_TIMESTAMPS; 1400 if (server->caps & NFS_CAP_OPEN_XOR) 1401 res |= NFS4_SHARE_WANT_OPEN_XOR_DELEGATION; 1402 out: 1403 return res; 1404 } 1405 1406 static enum open_claim_type4 1407 nfs4_map_atomic_open_claim(struct nfs_server *server, 1408 enum open_claim_type4 claim) 1409 { 1410 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1) 1411 return claim; 1412 switch (claim) { 1413 default: 1414 return claim; 1415 case NFS4_OPEN_CLAIM_FH: 1416 return NFS4_OPEN_CLAIM_NULL; 1417 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1418 return NFS4_OPEN_CLAIM_DELEGATE_CUR; 1419 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1420 return NFS4_OPEN_CLAIM_DELEGATE_PREV; 1421 } 1422 } 1423 1424 static void nfs4_init_opendata_res(struct nfs4_opendata *p) 1425 { 1426 p->o_res.f_attr = &p->f_attr; 1427 p->o_res.seqid = p->o_arg.seqid; 1428 p->c_res.seqid = p->c_arg.seqid; 1429 p->o_res.server = p->o_arg.server; 1430 p->o_res.access_request = p->o_arg.access; 1431 nfs_fattr_init(&p->f_attr); 1432 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 1433 } 1434 1435 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 1436 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 1437 const struct nfs4_open_createattrs *c, 1438 enum open_claim_type4 claim, 1439 gfp_t gfp_mask) 1440 { 1441 struct dentry *parent = dget_parent(dentry); 1442 struct inode *dir = d_inode(parent); 1443 struct nfs_server *server = NFS_SERVER(dir); 1444 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 1445 struct nfs4_label *label = (c != NULL) ? c->label : NULL; 1446 struct nfs4_opendata *p; 1447 1448 p = kzalloc(sizeof(*p), gfp_mask); 1449 if (p == NULL) 1450 goto err; 1451 1452 p->f_attr.label = nfs4_label_alloc(server, gfp_mask); 1453 if (IS_ERR(p->f_attr.label)) 1454 goto err_free_p; 1455 1456 p->a_label = nfs4_label_alloc(server, gfp_mask); 1457 if (IS_ERR(p->a_label)) 1458 goto err_free_f; 1459 1460 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 1461 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); 1462 if (IS_ERR(p->o_arg.seqid)) 1463 goto err_free_label; 1464 nfs_sb_active(dentry->d_sb); 1465 p->dentry = dget(dentry); 1466 p->dir = parent; 1467 p->owner = sp; 1468 atomic_inc(&sp->so_count); 1469 p->o_arg.open_flags = flags; 1470 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 1471 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 1472 p->o_arg.share_access = nfs4_map_atomic_open_share(server, 1473 fmode, flags); 1474 if (flags & O_CREAT) { 1475 p->o_arg.umask = current_umask(); 1476 p->o_arg.label = nfs4_label_copy(p->a_label, label); 1477 if (c->sattr != NULL && c->sattr->ia_valid != 0) { 1478 p->o_arg.u.attrs = &p->attrs; 1479 memcpy(&p->attrs, c->sattr, sizeof(p->attrs)); 1480 1481 memcpy(p->o_arg.u.verifier.data, c->verf, 1482 sizeof(p->o_arg.u.verifier.data)); 1483 } 1484 } 1485 /* ask server to check for all possible rights as results 1486 * are cached */ 1487 switch (p->o_arg.claim) { 1488 default: 1489 break; 1490 case NFS4_OPEN_CLAIM_NULL: 1491 case NFS4_OPEN_CLAIM_FH: 1492 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY | 1493 NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE | 1494 NFS4_ACCESS_EXECUTE | 1495 nfs_access_xattr_mask(server); 1496 } 1497 p->o_arg.clientid = server->nfs_client->cl_clientid; 1498 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 1499 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 1500 p->o_arg.name = &dentry->d_name; 1501 p->o_arg.server = server; 1502 p->o_arg.bitmask = nfs4_bitmask(server, label); 1503 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 1504 switch (p->o_arg.claim) { 1505 case NFS4_OPEN_CLAIM_NULL: 1506 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1507 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 1508 p->o_arg.fh = NFS_FH(dir); 1509 break; 1510 case NFS4_OPEN_CLAIM_PREVIOUS: 1511 case NFS4_OPEN_CLAIM_FH: 1512 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1513 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1514 p->o_arg.fh = NFS_FH(d_inode(dentry)); 1515 } 1516 p->c_arg.fh = &p->o_res.fh; 1517 p->c_arg.stateid = &p->o_res.stateid; 1518 p->c_arg.seqid = p->o_arg.seqid; 1519 nfs4_init_opendata_res(p); 1520 kref_init(&p->kref); 1521 return p; 1522 1523 err_free_label: 1524 nfs4_label_free(p->a_label); 1525 err_free_f: 1526 nfs4_label_free(p->f_attr.label); 1527 err_free_p: 1528 kfree(p); 1529 err: 1530 dput(parent); 1531 return NULL; 1532 } 1533 1534 static void nfs4_opendata_free(struct kref *kref) 1535 { 1536 struct nfs4_opendata *p = container_of(kref, 1537 struct nfs4_opendata, kref); 1538 struct super_block *sb = p->dentry->d_sb; 1539 1540 nfs4_lgopen_release(p->lgp); 1541 nfs_free_seqid(p->o_arg.seqid); 1542 nfs4_sequence_free_slot(&p->o_res.seq_res); 1543 if (p->state != NULL) 1544 nfs4_put_open_state(p->state); 1545 nfs4_put_state_owner(p->owner); 1546 1547 nfs4_label_free(p->a_label); 1548 nfs4_label_free(p->f_attr.label); 1549 1550 dput(p->dir); 1551 dput(p->dentry); 1552 nfs_sb_deactive(sb); 1553 nfs_fattr_free_names(&p->f_attr); 1554 kfree(p->f_attr.mdsthreshold); 1555 kfree(p); 1556 } 1557 1558 static void nfs4_opendata_put(struct nfs4_opendata *p) 1559 { 1560 if (p != NULL) 1561 kref_put(&p->kref, nfs4_opendata_free); 1562 } 1563 1564 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state, 1565 fmode_t fmode) 1566 { 1567 switch(fmode & (FMODE_READ|FMODE_WRITE)) { 1568 case FMODE_READ|FMODE_WRITE: 1569 return state->n_rdwr != 0; 1570 case FMODE_WRITE: 1571 return state->n_wronly != 0; 1572 case FMODE_READ: 1573 return state->n_rdonly != 0; 1574 } 1575 WARN_ON_ONCE(1); 1576 return false; 1577 } 1578 1579 static int can_open_cached(struct nfs4_state *state, fmode_t mode, 1580 int open_mode, enum open_claim_type4 claim) 1581 { 1582 int ret = 0; 1583 1584 if (open_mode & (O_EXCL|O_TRUNC)) 1585 goto out; 1586 switch (claim) { 1587 case NFS4_OPEN_CLAIM_NULL: 1588 case NFS4_OPEN_CLAIM_FH: 1589 goto out; 1590 default: 1591 break; 1592 } 1593 switch (mode & (FMODE_READ|FMODE_WRITE)) { 1594 case FMODE_READ: 1595 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 1596 && state->n_rdonly != 0; 1597 break; 1598 case FMODE_WRITE: 1599 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 1600 && state->n_wronly != 0; 1601 break; 1602 case FMODE_READ|FMODE_WRITE: 1603 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 1604 && state->n_rdwr != 0; 1605 } 1606 out: 1607 return ret; 1608 } 1609 1610 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode, 1611 enum open_claim_type4 claim) 1612 { 1613 if (delegation == NULL) 1614 return 0; 1615 if ((delegation->type & fmode) != fmode) 1616 return 0; 1617 switch (claim) { 1618 case NFS4_OPEN_CLAIM_NULL: 1619 case NFS4_OPEN_CLAIM_FH: 1620 break; 1621 case NFS4_OPEN_CLAIM_PREVIOUS: 1622 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 1623 break; 1624 fallthrough; 1625 default: 1626 return 0; 1627 } 1628 nfs_mark_delegation_referenced(delegation); 1629 return 1; 1630 } 1631 1632 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 1633 { 1634 switch (fmode) { 1635 case FMODE_WRITE: 1636 state->n_wronly++; 1637 break; 1638 case FMODE_READ: 1639 state->n_rdonly++; 1640 break; 1641 case FMODE_READ|FMODE_WRITE: 1642 state->n_rdwr++; 1643 } 1644 nfs4_state_set_mode_locked(state, state->state | fmode); 1645 } 1646 1647 #ifdef CONFIG_NFS_V4_1 1648 static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state) 1649 { 1650 if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags)) 1651 return true; 1652 if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags)) 1653 return true; 1654 if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags)) 1655 return true; 1656 return false; 1657 } 1658 #endif /* CONFIG_NFS_V4_1 */ 1659 1660 static void nfs_state_log_update_open_stateid(struct nfs4_state *state) 1661 { 1662 if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags)) 1663 wake_up_all(&state->waitq); 1664 } 1665 1666 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) 1667 { 1668 struct nfs_client *clp = state->owner->so_server->nfs_client; 1669 bool need_recover = false; 1670 1671 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly) 1672 need_recover = true; 1673 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly) 1674 need_recover = true; 1675 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr) 1676 need_recover = true; 1677 if (need_recover) 1678 nfs4_state_mark_reclaim_nograce(clp, state); 1679 } 1680 1681 /* 1682 * Check for whether or not the caller may update the open stateid 1683 * to the value passed in by stateid. 1684 * 1685 * Note: This function relies heavily on the server implementing 1686 * RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2 1687 * correctly. 1688 * i.e. The stateid seqids have to be initialised to 1, and 1689 * are then incremented on every state transition. 1690 */ 1691 static bool nfs_stateid_is_sequential(struct nfs4_state *state, 1692 const nfs4_stateid *stateid) 1693 { 1694 if (test_bit(NFS_OPEN_STATE, &state->flags)) { 1695 /* The common case - we're updating to a new sequence number */ 1696 if (nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1697 if (nfs4_stateid_is_next(&state->open_stateid, stateid)) 1698 return true; 1699 return false; 1700 } 1701 /* The server returned a new stateid */ 1702 } 1703 /* This is the first OPEN in this generation */ 1704 if (stateid->seqid == cpu_to_be32(1)) 1705 return true; 1706 return false; 1707 } 1708 1709 static void nfs_resync_open_stateid_locked(struct nfs4_state *state) 1710 { 1711 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr)) 1712 return; 1713 if (state->n_wronly) 1714 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1715 if (state->n_rdonly) 1716 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1717 if (state->n_rdwr) 1718 set_bit(NFS_O_RDWR_STATE, &state->flags); 1719 set_bit(NFS_OPEN_STATE, &state->flags); 1720 } 1721 1722 static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1723 nfs4_stateid *stateid, fmode_t fmode) 1724 { 1725 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1726 switch (fmode & (FMODE_READ|FMODE_WRITE)) { 1727 case FMODE_WRITE: 1728 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1729 break; 1730 case FMODE_READ: 1731 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1732 break; 1733 case 0: 1734 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1735 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1736 clear_bit(NFS_OPEN_STATE, &state->flags); 1737 } 1738 if (stateid == NULL) 1739 return; 1740 /* Handle OPEN+OPEN_DOWNGRADE races */ 1741 if (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1742 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { 1743 nfs_resync_open_stateid_locked(state); 1744 goto out; 1745 } 1746 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1747 nfs4_stateid_copy(&state->stateid, stateid); 1748 nfs4_stateid_copy(&state->open_stateid, stateid); 1749 trace_nfs4_open_stateid_update(state->inode, stateid, 0); 1750 out: 1751 nfs_state_log_update_open_stateid(state); 1752 } 1753 1754 static void nfs_clear_open_stateid(struct nfs4_state *state, 1755 nfs4_stateid *arg_stateid, 1756 nfs4_stateid *stateid, fmode_t fmode) 1757 { 1758 write_seqlock(&state->seqlock); 1759 /* Ignore, if the CLOSE argment doesn't match the current stateid */ 1760 if (nfs4_state_match_open_stateid_other(state, arg_stateid)) 1761 nfs_clear_open_stateid_locked(state, stateid, fmode); 1762 write_sequnlock(&state->seqlock); 1763 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1764 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1765 } 1766 1767 static void nfs_set_open_stateid_locked(struct nfs4_state *state, 1768 const nfs4_stateid *stateid, nfs4_stateid *freeme) 1769 __must_hold(&state->owner->so_lock) 1770 __must_hold(&state->seqlock) 1771 __must_hold(RCU) 1772 1773 { 1774 DEFINE_WAIT(wait); 1775 int status = 0; 1776 for (;;) { 1777 1778 if (nfs_stateid_is_sequential(state, stateid)) 1779 break; 1780 1781 if (status) 1782 break; 1783 /* Rely on seqids for serialisation with NFSv4.0 */ 1784 if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client)) 1785 break; 1786 1787 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); 1788 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); 1789 /* 1790 * Ensure we process the state changes in the same order 1791 * in which the server processed them by delaying the 1792 * update of the stateid until we are in sequence. 1793 */ 1794 write_sequnlock(&state->seqlock); 1795 spin_unlock(&state->owner->so_lock); 1796 rcu_read_unlock(); 1797 trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0); 1798 1799 if (!fatal_signal_pending(current) && 1800 !nfs_current_task_exiting()) { 1801 if (schedule_timeout(5*HZ) == 0) 1802 status = -EAGAIN; 1803 else 1804 status = 0; 1805 } else 1806 status = -EINTR; 1807 finish_wait(&state->waitq, &wait); 1808 rcu_read_lock(); 1809 spin_lock(&state->owner->so_lock); 1810 write_seqlock(&state->seqlock); 1811 } 1812 1813 if (test_bit(NFS_OPEN_STATE, &state->flags) && 1814 !nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1815 nfs4_stateid_copy(freeme, &state->open_stateid); 1816 nfs_test_and_clear_all_open_stateid(state); 1817 } 1818 1819 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1820 nfs4_stateid_copy(&state->stateid, stateid); 1821 nfs4_stateid_copy(&state->open_stateid, stateid); 1822 trace_nfs4_open_stateid_update(state->inode, stateid, status); 1823 nfs_state_log_update_open_stateid(state); 1824 } 1825 1826 static void nfs_state_set_open_stateid(struct nfs4_state *state, 1827 const nfs4_stateid *open_stateid, 1828 fmode_t fmode, 1829 nfs4_stateid *freeme) 1830 { 1831 /* 1832 * Protect the call to nfs4_state_set_mode_locked and 1833 * serialise the stateid update 1834 */ 1835 write_seqlock(&state->seqlock); 1836 nfs_set_open_stateid_locked(state, open_stateid, freeme); 1837 switch (fmode) { 1838 case FMODE_READ: 1839 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1840 break; 1841 case FMODE_WRITE: 1842 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1843 break; 1844 case FMODE_READ|FMODE_WRITE: 1845 set_bit(NFS_O_RDWR_STATE, &state->flags); 1846 } 1847 set_bit(NFS_OPEN_STATE, &state->flags); 1848 write_sequnlock(&state->seqlock); 1849 } 1850 1851 static void nfs_state_clear_open_state_flags(struct nfs4_state *state) 1852 { 1853 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1854 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1855 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1856 clear_bit(NFS_OPEN_STATE, &state->flags); 1857 } 1858 1859 static void nfs_state_set_delegation(struct nfs4_state *state, 1860 const nfs4_stateid *deleg_stateid, 1861 fmode_t fmode) 1862 { 1863 /* 1864 * Protect the call to nfs4_state_set_mode_locked and 1865 * serialise the stateid update 1866 */ 1867 write_seqlock(&state->seqlock); 1868 nfs4_stateid_copy(&state->stateid, deleg_stateid); 1869 set_bit(NFS_DELEGATED_STATE, &state->flags); 1870 write_sequnlock(&state->seqlock); 1871 } 1872 1873 static void nfs_state_clear_delegation(struct nfs4_state *state) 1874 { 1875 write_seqlock(&state->seqlock); 1876 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1877 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1878 write_sequnlock(&state->seqlock); 1879 } 1880 1881 int update_open_stateid(struct nfs4_state *state, 1882 const nfs4_stateid *open_stateid, 1883 const nfs4_stateid *delegation, 1884 fmode_t fmode) 1885 { 1886 struct nfs_server *server = NFS_SERVER(state->inode); 1887 struct nfs_client *clp = server->nfs_client; 1888 struct nfs_inode *nfsi = NFS_I(state->inode); 1889 struct nfs_delegation *deleg_cur; 1890 nfs4_stateid freeme = { }; 1891 int ret = 0; 1892 1893 fmode &= (FMODE_READ|FMODE_WRITE); 1894 1895 rcu_read_lock(); 1896 spin_lock(&state->owner->so_lock); 1897 if (open_stateid != NULL) { 1898 nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme); 1899 ret = 1; 1900 } 1901 1902 deleg_cur = nfs4_get_valid_delegation(state->inode); 1903 if (deleg_cur == NULL) 1904 goto no_delegation; 1905 1906 spin_lock(&deleg_cur->lock); 1907 if (rcu_dereference(nfsi->delegation) != deleg_cur || 1908 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) || 1909 (deleg_cur->type & fmode) != fmode) 1910 goto no_delegation_unlock; 1911 1912 if (delegation == NULL) 1913 delegation = &deleg_cur->stateid; 1914 else if (!nfs4_stateid_match_other(&deleg_cur->stateid, delegation)) 1915 goto no_delegation_unlock; 1916 1917 nfs_mark_delegation_referenced(deleg_cur); 1918 nfs_state_set_delegation(state, &deleg_cur->stateid, fmode); 1919 ret = 1; 1920 no_delegation_unlock: 1921 spin_unlock(&deleg_cur->lock); 1922 no_delegation: 1923 if (ret) 1924 update_open_stateflags(state, fmode); 1925 spin_unlock(&state->owner->so_lock); 1926 rcu_read_unlock(); 1927 1928 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1929 nfs4_schedule_state_manager(clp); 1930 if (freeme.type != 0) 1931 nfs4_test_and_free_stateid(server, &freeme, 1932 state->owner->so_cred); 1933 1934 return ret; 1935 } 1936 1937 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp, 1938 const nfs4_stateid *stateid) 1939 { 1940 struct nfs4_state *state = lsp->ls_state; 1941 bool ret = false; 1942 1943 spin_lock(&state->state_lock); 1944 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid)) 1945 goto out_noupdate; 1946 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid)) 1947 goto out_noupdate; 1948 nfs4_stateid_copy(&lsp->ls_stateid, stateid); 1949 ret = true; 1950 out_noupdate: 1951 spin_unlock(&state->state_lock); 1952 return ret; 1953 } 1954 1955 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1956 { 1957 struct nfs_delegation *delegation; 1958 1959 fmode &= FMODE_READ|FMODE_WRITE; 1960 rcu_read_lock(); 1961 delegation = nfs4_get_valid_delegation(inode); 1962 if (delegation == NULL || (delegation->type & fmode) == fmode) { 1963 rcu_read_unlock(); 1964 return; 1965 } 1966 rcu_read_unlock(); 1967 nfs4_inode_return_delegation(inode); 1968 } 1969 1970 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1971 { 1972 struct nfs4_state *state = opendata->state; 1973 struct nfs_delegation *delegation; 1974 int open_mode = opendata->o_arg.open_flags; 1975 fmode_t fmode = opendata->o_arg.fmode; 1976 enum open_claim_type4 claim = opendata->o_arg.claim; 1977 nfs4_stateid stateid; 1978 int ret = -EAGAIN; 1979 1980 for (;;) { 1981 spin_lock(&state->owner->so_lock); 1982 if (can_open_cached(state, fmode, open_mode, claim)) { 1983 update_open_stateflags(state, fmode); 1984 spin_unlock(&state->owner->so_lock); 1985 goto out_return_state; 1986 } 1987 spin_unlock(&state->owner->so_lock); 1988 rcu_read_lock(); 1989 delegation = nfs4_get_valid_delegation(state->inode); 1990 if (!can_open_delegated(delegation, fmode, claim)) { 1991 rcu_read_unlock(); 1992 break; 1993 } 1994 /* Save the delegation */ 1995 nfs4_stateid_copy(&stateid, &delegation->stateid); 1996 rcu_read_unlock(); 1997 nfs_release_seqid(opendata->o_arg.seqid); 1998 if (!opendata->is_recover) { 1999 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 2000 if (ret != 0) 2001 goto out; 2002 } 2003 ret = -EAGAIN; 2004 2005 /* Try to update the stateid using the delegation */ 2006 if (update_open_stateid(state, NULL, &stateid, fmode)) 2007 goto out_return_state; 2008 } 2009 out: 2010 return ERR_PTR(ret); 2011 out_return_state: 2012 refcount_inc(&state->count); 2013 return state; 2014 } 2015 2016 static void 2017 nfs4_process_delegation(struct inode *inode, const struct cred *cred, 2018 enum open_claim_type4 claim, 2019 const struct nfs4_open_delegation *delegation) 2020 { 2021 switch (delegation->open_delegation_type) { 2022 case NFS4_OPEN_DELEGATE_READ: 2023 case NFS4_OPEN_DELEGATE_WRITE: 2024 case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG: 2025 case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG: 2026 break; 2027 default: 2028 return; 2029 } 2030 switch (claim) { 2031 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 2032 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 2033 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 2034 "returning a delegation for " 2035 "OPEN(CLAIM_DELEGATE_CUR)\n", 2036 NFS_SERVER(inode)->nfs_client->cl_hostname); 2037 break; 2038 case NFS4_OPEN_CLAIM_PREVIOUS: 2039 nfs_inode_reclaim_delegation(inode, cred, delegation->type, 2040 &delegation->stateid, 2041 delegation->pagemod_limit, 2042 delegation->open_delegation_type); 2043 break; 2044 default: 2045 nfs_inode_set_delegation(inode, cred, delegation->type, 2046 &delegation->stateid, 2047 delegation->pagemod_limit, 2048 delegation->open_delegation_type); 2049 } 2050 if (delegation->do_recall) 2051 nfs_async_inode_return_delegation(inode, &delegation->stateid); 2052 } 2053 2054 /* 2055 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes 2056 * and update the nfs4_state. 2057 */ 2058 static struct nfs4_state * 2059 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) 2060 { 2061 struct inode *inode = data->state->inode; 2062 struct nfs4_state *state = data->state; 2063 int ret; 2064 2065 if (!data->rpc_done) { 2066 if (data->rpc_status) 2067 return ERR_PTR(data->rpc_status); 2068 return nfs4_try_open_cached(data); 2069 } 2070 2071 ret = nfs_refresh_inode(inode, &data->f_attr); 2072 if (ret) 2073 return ERR_PTR(ret); 2074 2075 nfs4_process_delegation(state->inode, 2076 data->owner->so_cred, 2077 data->o_arg.claim, 2078 &data->o_res.delegation); 2079 2080 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) { 2081 if (!update_open_stateid(state, &data->o_res.stateid, 2082 NULL, data->o_arg.fmode)) 2083 return ERR_PTR(-EAGAIN); 2084 } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode)) 2085 return ERR_PTR(-EAGAIN); 2086 refcount_inc(&state->count); 2087 2088 return state; 2089 } 2090 2091 static struct inode * 2092 nfs4_opendata_get_inode(struct nfs4_opendata *data) 2093 { 2094 struct inode *inode; 2095 2096 switch (data->o_arg.claim) { 2097 case NFS4_OPEN_CLAIM_NULL: 2098 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 2099 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 2100 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 2101 return ERR_PTR(-EAGAIN); 2102 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, 2103 &data->f_attr); 2104 break; 2105 default: 2106 inode = d_inode(data->dentry); 2107 ihold(inode); 2108 nfs_refresh_inode(inode, &data->f_attr); 2109 } 2110 return inode; 2111 } 2112 2113 static struct nfs4_state * 2114 nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data) 2115 { 2116 struct nfs4_state *state; 2117 struct inode *inode; 2118 2119 inode = nfs4_opendata_get_inode(data); 2120 if (IS_ERR(inode)) 2121 return ERR_CAST(inode); 2122 if (data->state != NULL && data->state->inode == inode) { 2123 state = data->state; 2124 refcount_inc(&state->count); 2125 } else 2126 state = nfs4_get_open_state(inode, data->owner); 2127 iput(inode); 2128 if (state == NULL) 2129 state = ERR_PTR(-ENOMEM); 2130 return state; 2131 } 2132 2133 static struct nfs4_state * 2134 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 2135 { 2136 struct nfs4_state *state; 2137 2138 if (!data->rpc_done) { 2139 state = nfs4_try_open_cached(data); 2140 trace_nfs4_cached_open(data->state); 2141 goto out; 2142 } 2143 2144 state = nfs4_opendata_find_nfs4_state(data); 2145 if (IS_ERR(state)) 2146 goto out; 2147 2148 nfs4_process_delegation(state->inode, 2149 data->owner->so_cred, 2150 data->o_arg.claim, 2151 &data->o_res.delegation); 2152 2153 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_NO_OPEN_STATEID)) { 2154 if (!update_open_stateid(state, &data->o_res.stateid, 2155 NULL, data->o_arg.fmode)) { 2156 nfs4_put_open_state(state); 2157 state = ERR_PTR(-EAGAIN); 2158 } 2159 } else if (!update_open_stateid(state, NULL, NULL, data->o_arg.fmode)) { 2160 nfs4_put_open_state(state); 2161 state = ERR_PTR(-EAGAIN); 2162 } 2163 out: 2164 nfs_release_seqid(data->o_arg.seqid); 2165 return state; 2166 } 2167 2168 static struct nfs4_state * 2169 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 2170 { 2171 struct nfs4_state *ret; 2172 2173 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 2174 ret =_nfs4_opendata_reclaim_to_nfs4_state(data); 2175 else 2176 ret = _nfs4_opendata_to_nfs4_state(data); 2177 nfs4_sequence_free_slot(&data->o_res.seq_res); 2178 return ret; 2179 } 2180 2181 static struct nfs_open_context * 2182 nfs4_state_find_open_context_mode(struct nfs4_state *state, fmode_t mode) 2183 { 2184 struct nfs_inode *nfsi = NFS_I(state->inode); 2185 struct nfs_open_context *ctx; 2186 2187 rcu_read_lock(); 2188 list_for_each_entry_rcu(ctx, &nfsi->open_files, list) { 2189 if (ctx->state != state) 2190 continue; 2191 if ((ctx->mode & mode) != mode) 2192 continue; 2193 if (!get_nfs_open_context(ctx)) 2194 continue; 2195 rcu_read_unlock(); 2196 return ctx; 2197 } 2198 rcu_read_unlock(); 2199 return ERR_PTR(-ENOENT); 2200 } 2201 2202 static struct nfs_open_context * 2203 nfs4_state_find_open_context(struct nfs4_state *state) 2204 { 2205 struct nfs_open_context *ctx; 2206 2207 ctx = nfs4_state_find_open_context_mode(state, FMODE_READ|FMODE_WRITE); 2208 if (!IS_ERR(ctx)) 2209 return ctx; 2210 ctx = nfs4_state_find_open_context_mode(state, FMODE_WRITE); 2211 if (!IS_ERR(ctx)) 2212 return ctx; 2213 return nfs4_state_find_open_context_mode(state, FMODE_READ); 2214 } 2215 2216 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, 2217 struct nfs4_state *state, enum open_claim_type4 claim) 2218 { 2219 struct nfs4_opendata *opendata; 2220 2221 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, 2222 NULL, claim, GFP_NOFS); 2223 if (opendata == NULL) 2224 return ERR_PTR(-ENOMEM); 2225 opendata->state = state; 2226 refcount_inc(&state->count); 2227 return opendata; 2228 } 2229 2230 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, 2231 fmode_t fmode) 2232 { 2233 struct nfs4_state *newstate; 2234 struct nfs_server *server = NFS_SB(opendata->dentry->d_sb); 2235 int openflags = opendata->o_arg.open_flags; 2236 int ret; 2237 2238 if (!nfs4_mode_match_open_stateid(opendata->state, fmode)) 2239 return 0; 2240 opendata->o_arg.fmode = fmode; 2241 opendata->o_arg.share_access = 2242 nfs4_map_atomic_open_share(server, fmode, openflags); 2243 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 2244 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 2245 nfs4_init_opendata_res(opendata); 2246 ret = _nfs4_recover_proc_open(opendata); 2247 if (ret != 0) 2248 return ret; 2249 newstate = nfs4_opendata_to_nfs4_state(opendata); 2250 if (IS_ERR(newstate)) 2251 return PTR_ERR(newstate); 2252 if (newstate != opendata->state) 2253 ret = -ESTALE; 2254 nfs4_close_state(newstate, fmode); 2255 return ret; 2256 } 2257 2258 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 2259 { 2260 int ret; 2261 2262 /* memory barrier prior to reading state->n_* */ 2263 smp_rmb(); 2264 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 2265 if (ret != 0) 2266 return ret; 2267 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE); 2268 if (ret != 0) 2269 return ret; 2270 ret = nfs4_open_recover_helper(opendata, FMODE_READ); 2271 if (ret != 0) 2272 return ret; 2273 /* 2274 * We may have performed cached opens for all three recoveries. 2275 * Check if we need to update the current stateid. 2276 */ 2277 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 2278 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 2279 write_seqlock(&state->seqlock); 2280 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 2281 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 2282 write_sequnlock(&state->seqlock); 2283 } 2284 return 0; 2285 } 2286 2287 /* 2288 * OPEN_RECLAIM: 2289 * reclaim state on the server after a reboot. 2290 */ 2291 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 2292 { 2293 struct nfs_delegation *delegation; 2294 struct nfs4_opendata *opendata; 2295 u32 delegation_type = NFS4_OPEN_DELEGATE_NONE; 2296 int status; 2297 2298 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2299 NFS4_OPEN_CLAIM_PREVIOUS); 2300 if (IS_ERR(opendata)) 2301 return PTR_ERR(opendata); 2302 rcu_read_lock(); 2303 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2304 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) { 2305 switch(delegation->type) { 2306 case FMODE_READ: 2307 delegation_type = NFS4_OPEN_DELEGATE_READ; 2308 if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) 2309 delegation_type = NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG; 2310 break; 2311 case FMODE_WRITE: 2312 case FMODE_READ|FMODE_WRITE: 2313 delegation_type = NFS4_OPEN_DELEGATE_WRITE; 2314 if (test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) 2315 delegation_type = NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG; 2316 } 2317 } 2318 rcu_read_unlock(); 2319 opendata->o_arg.u.delegation_type = delegation_type; 2320 status = nfs4_open_recover(opendata, state); 2321 nfs4_opendata_put(opendata); 2322 return status; 2323 } 2324 2325 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 2326 { 2327 struct nfs_server *server = NFS_SERVER(state->inode); 2328 struct nfs4_exception exception = { }; 2329 int err; 2330 do { 2331 err = _nfs4_do_open_reclaim(ctx, state); 2332 trace_nfs4_open_reclaim(ctx, 0, err); 2333 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2334 continue; 2335 if (err != -NFS4ERR_DELAY) 2336 break; 2337 nfs4_handle_exception(server, err, &exception); 2338 } while (exception.retry); 2339 return err; 2340 } 2341 2342 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 2343 { 2344 struct nfs_open_context *ctx; 2345 int ret; 2346 2347 ctx = nfs4_state_find_open_context(state); 2348 if (IS_ERR(ctx)) 2349 return -EAGAIN; 2350 clear_bit(NFS_DELEGATED_STATE, &state->flags); 2351 nfs_state_clear_open_state_flags(state); 2352 ret = nfs4_do_open_reclaim(ctx, state); 2353 put_nfs_open_context(ctx); 2354 return ret; 2355 } 2356 2357 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err) 2358 { 2359 switch (err) { 2360 default: 2361 printk(KERN_ERR "NFS: %s: unhandled error " 2362 "%d.\n", __func__, err); 2363 fallthrough; 2364 case 0: 2365 case -ENOENT: 2366 case -EAGAIN: 2367 case -ESTALE: 2368 case -ETIMEDOUT: 2369 break; 2370 case -NFS4ERR_BADSESSION: 2371 case -NFS4ERR_BADSLOT: 2372 case -NFS4ERR_BAD_HIGH_SLOT: 2373 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 2374 case -NFS4ERR_DEADSESSION: 2375 return -EAGAIN; 2376 case -NFS4ERR_STALE_CLIENTID: 2377 case -NFS4ERR_STALE_STATEID: 2378 /* Don't recall a delegation if it was lost */ 2379 nfs4_schedule_lease_recovery(server->nfs_client); 2380 return -EAGAIN; 2381 case -NFS4ERR_MOVED: 2382 nfs4_schedule_migration_recovery(server); 2383 return -EAGAIN; 2384 case -NFS4ERR_LEASE_MOVED: 2385 nfs4_schedule_lease_moved_recovery(server->nfs_client); 2386 return -EAGAIN; 2387 case -NFS4ERR_DELEG_REVOKED: 2388 case -NFS4ERR_ADMIN_REVOKED: 2389 case -NFS4ERR_EXPIRED: 2390 case -NFS4ERR_BAD_STATEID: 2391 case -NFS4ERR_OPENMODE: 2392 nfs_inode_find_state_and_recover(state->inode, 2393 stateid); 2394 nfs4_schedule_stateid_recovery(server, state); 2395 return -EAGAIN; 2396 case -NFS4ERR_DELAY: 2397 case -NFS4ERR_GRACE: 2398 ssleep(1); 2399 return -EAGAIN; 2400 case -ENOMEM: 2401 case -NFS4ERR_DENIED: 2402 if (fl) { 2403 struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner; 2404 if (lsp) 2405 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 2406 } 2407 return 0; 2408 } 2409 return err; 2410 } 2411 2412 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, 2413 struct nfs4_state *state, const nfs4_stateid *stateid) 2414 { 2415 struct nfs_server *server = NFS_SERVER(state->inode); 2416 struct nfs4_opendata *opendata; 2417 int err = 0; 2418 2419 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2420 NFS4_OPEN_CLAIM_DELEG_CUR_FH); 2421 if (IS_ERR(opendata)) 2422 return PTR_ERR(opendata); 2423 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 2424 if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) { 2425 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 2426 if (err) 2427 goto out; 2428 } 2429 if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) { 2430 err = nfs4_open_recover_helper(opendata, FMODE_WRITE); 2431 if (err) 2432 goto out; 2433 } 2434 if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) { 2435 err = nfs4_open_recover_helper(opendata, FMODE_READ); 2436 if (err) 2437 goto out; 2438 } 2439 nfs_state_clear_delegation(state); 2440 out: 2441 nfs4_opendata_put(opendata); 2442 return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err); 2443 } 2444 2445 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) 2446 { 2447 struct nfs4_opendata *data = calldata; 2448 2449 nfs4_setup_sequence(data->o_arg.server->nfs_client, 2450 &data->c_arg.seq_args, &data->c_res.seq_res, task); 2451 } 2452 2453 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 2454 { 2455 struct nfs4_opendata *data = calldata; 2456 2457 nfs40_sequence_done(task, &data->c_res.seq_res); 2458 2459 data->rpc_status = task->tk_status; 2460 if (data->rpc_status == 0) { 2461 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 2462 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2463 renew_lease(data->o_res.server, data->timestamp); 2464 data->rpc_done = true; 2465 } 2466 } 2467 2468 static void nfs4_open_confirm_release(void *calldata) 2469 { 2470 struct nfs4_opendata *data = calldata; 2471 struct nfs4_state *state = NULL; 2472 2473 /* If this request hasn't been cancelled, do nothing */ 2474 if (!data->cancelled) 2475 goto out_free; 2476 /* In case of error, no cleanup! */ 2477 if (!data->rpc_done) 2478 goto out_free; 2479 state = nfs4_opendata_to_nfs4_state(data); 2480 if (!IS_ERR(state)) 2481 nfs4_close_state(state, data->o_arg.fmode); 2482 out_free: 2483 nfs4_opendata_put(data); 2484 } 2485 2486 static const struct rpc_call_ops nfs4_open_confirm_ops = { 2487 .rpc_call_prepare = nfs4_open_confirm_prepare, 2488 .rpc_call_done = nfs4_open_confirm_done, 2489 .rpc_release = nfs4_open_confirm_release, 2490 }; 2491 2492 /* 2493 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 2494 */ 2495 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 2496 { 2497 struct nfs_server *server = NFS_SERVER(d_inode(data->dir)); 2498 struct rpc_task *task; 2499 struct rpc_message msg = { 2500 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 2501 .rpc_argp = &data->c_arg, 2502 .rpc_resp = &data->c_res, 2503 .rpc_cred = data->owner->so_cred, 2504 }; 2505 struct rpc_task_setup task_setup_data = { 2506 .rpc_client = server->client, 2507 .rpc_message = &msg, 2508 .callback_ops = &nfs4_open_confirm_ops, 2509 .callback_data = data, 2510 .workqueue = nfsiod_workqueue, 2511 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 2512 }; 2513 int status; 2514 2515 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1, 2516 data->is_recover); 2517 kref_get(&data->kref); 2518 data->rpc_done = false; 2519 data->rpc_status = 0; 2520 data->timestamp = jiffies; 2521 task = rpc_run_task(&task_setup_data); 2522 if (IS_ERR(task)) 2523 return PTR_ERR(task); 2524 status = rpc_wait_for_completion_task(task); 2525 if (status != 0) { 2526 data->cancelled = true; 2527 smp_wmb(); 2528 } else 2529 status = data->rpc_status; 2530 rpc_put_task(task); 2531 return status; 2532 } 2533 2534 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 2535 { 2536 struct nfs4_opendata *data = calldata; 2537 struct nfs4_state_owner *sp = data->owner; 2538 struct nfs_client *clp = sp->so_server->nfs_client; 2539 enum open_claim_type4 claim = data->o_arg.claim; 2540 2541 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 2542 goto out_wait; 2543 /* 2544 * Check if we still need to send an OPEN call, or if we can use 2545 * a delegation instead. 2546 */ 2547 if (data->state != NULL) { 2548 struct nfs_delegation *delegation; 2549 2550 if (can_open_cached(data->state, data->o_arg.fmode, 2551 data->o_arg.open_flags, claim)) 2552 goto out_no_action; 2553 rcu_read_lock(); 2554 delegation = nfs4_get_valid_delegation(data->state->inode); 2555 if (can_open_delegated(delegation, data->o_arg.fmode, claim)) 2556 goto unlock_no_action; 2557 rcu_read_unlock(); 2558 } 2559 /* Update client id. */ 2560 data->o_arg.clientid = clp->cl_clientid; 2561 switch (claim) { 2562 default: 2563 break; 2564 case NFS4_OPEN_CLAIM_PREVIOUS: 2565 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 2566 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 2567 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 2568 fallthrough; 2569 case NFS4_OPEN_CLAIM_FH: 2570 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 2571 } 2572 data->timestamp = jiffies; 2573 if (nfs4_setup_sequence(data->o_arg.server->nfs_client, 2574 &data->o_arg.seq_args, 2575 &data->o_res.seq_res, 2576 task) != 0) 2577 nfs_release_seqid(data->o_arg.seqid); 2578 2579 /* Set the create mode (note dependency on the session type) */ 2580 data->o_arg.createmode = NFS4_CREATE_UNCHECKED; 2581 if (data->o_arg.open_flags & O_EXCL) { 2582 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1; 2583 if (clp->cl_mvops->minor_version == 0) { 2584 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE; 2585 /* don't put an ACCESS op in OPEN compound if O_EXCL, 2586 * because ACCESS will return permission denied for 2587 * all bits until close */ 2588 data->o_res.access_request = data->o_arg.access = 0; 2589 } else if (nfs4_has_persistent_session(clp)) 2590 data->o_arg.createmode = NFS4_CREATE_GUARDED; 2591 } 2592 return; 2593 unlock_no_action: 2594 trace_nfs4_cached_open(data->state); 2595 rcu_read_unlock(); 2596 out_no_action: 2597 task->tk_action = NULL; 2598 out_wait: 2599 nfs4_sequence_done(task, &data->o_res.seq_res); 2600 } 2601 2602 static void nfs4_open_done(struct rpc_task *task, void *calldata) 2603 { 2604 struct nfs4_opendata *data = calldata; 2605 2606 data->rpc_status = task->tk_status; 2607 2608 if (!nfs4_sequence_process(task, &data->o_res.seq_res)) 2609 return; 2610 2611 if (task->tk_status == 0) { 2612 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) { 2613 switch (data->o_res.f_attr->mode & S_IFMT) { 2614 case S_IFREG: 2615 break; 2616 case S_IFLNK: 2617 data->rpc_status = -ELOOP; 2618 break; 2619 case S_IFDIR: 2620 data->rpc_status = -EISDIR; 2621 break; 2622 default: 2623 data->rpc_status = -ENOTDIR; 2624 } 2625 } 2626 renew_lease(data->o_res.server, data->timestamp); 2627 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 2628 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2629 } 2630 data->rpc_done = true; 2631 } 2632 2633 static void nfs4_open_release(void *calldata) 2634 { 2635 struct nfs4_opendata *data = calldata; 2636 struct nfs4_state *state = NULL; 2637 2638 /* In case of error, no cleanup! */ 2639 if (data->rpc_status != 0 || !data->rpc_done) { 2640 nfs_release_seqid(data->o_arg.seqid); 2641 goto out_free; 2642 } 2643 /* If this request hasn't been cancelled, do nothing */ 2644 if (!data->cancelled) 2645 goto out_free; 2646 /* In case we need an open_confirm, no cleanup! */ 2647 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 2648 goto out_free; 2649 state = nfs4_opendata_to_nfs4_state(data); 2650 if (!IS_ERR(state)) 2651 nfs4_close_state(state, data->o_arg.fmode); 2652 out_free: 2653 nfs4_opendata_put(data); 2654 } 2655 2656 static const struct rpc_call_ops nfs4_open_ops = { 2657 .rpc_call_prepare = nfs4_open_prepare, 2658 .rpc_call_done = nfs4_open_done, 2659 .rpc_release = nfs4_open_release, 2660 }; 2661 2662 static int nfs4_run_open_task(struct nfs4_opendata *data, 2663 struct nfs_open_context *ctx) 2664 { 2665 struct inode *dir = d_inode(data->dir); 2666 struct nfs_server *server = NFS_SERVER(dir); 2667 struct nfs_openargs *o_arg = &data->o_arg; 2668 struct nfs_openres *o_res = &data->o_res; 2669 struct rpc_task *task; 2670 struct rpc_message msg = { 2671 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 2672 .rpc_argp = o_arg, 2673 .rpc_resp = o_res, 2674 .rpc_cred = data->owner->so_cred, 2675 }; 2676 struct rpc_task_setup task_setup_data = { 2677 .rpc_client = server->client, 2678 .rpc_message = &msg, 2679 .callback_ops = &nfs4_open_ops, 2680 .callback_data = data, 2681 .workqueue = nfsiod_workqueue, 2682 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 2683 }; 2684 int status; 2685 2686 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE)) 2687 task_setup_data.flags |= RPC_TASK_MOVEABLE; 2688 2689 kref_get(&data->kref); 2690 data->rpc_done = false; 2691 data->rpc_status = 0; 2692 data->cancelled = false; 2693 data->is_recover = false; 2694 if (!ctx) { 2695 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1); 2696 data->is_recover = true; 2697 task_setup_data.flags |= RPC_TASK_TIMEOUT; 2698 } else { 2699 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0); 2700 pnfs_lgopen_prepare(data, ctx); 2701 } 2702 task = rpc_run_task(&task_setup_data); 2703 if (IS_ERR(task)) 2704 return PTR_ERR(task); 2705 status = rpc_wait_for_completion_task(task); 2706 if (status != 0) { 2707 data->cancelled = true; 2708 smp_wmb(); 2709 } else 2710 status = data->rpc_status; 2711 rpc_put_task(task); 2712 2713 return status; 2714 } 2715 2716 static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 2717 { 2718 struct inode *dir = d_inode(data->dir); 2719 struct nfs_openres *o_res = &data->o_res; 2720 int status; 2721 2722 status = nfs4_run_open_task(data, NULL); 2723 if (status != 0 || !data->rpc_done) 2724 return status; 2725 2726 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 2727 2728 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) 2729 status = _nfs4_proc_open_confirm(data); 2730 2731 return status; 2732 } 2733 2734 /* 2735 * Additional permission checks in order to distinguish between an 2736 * open for read, and an open for execute. This works around the 2737 * fact that NFSv4 OPEN treats read and execute permissions as being 2738 * the same. 2739 * Note that in the non-execute case, we want to turn off permission 2740 * checking if we just created a new file (POSIX open() semantics). 2741 */ 2742 static int nfs4_opendata_access(const struct cred *cred, 2743 struct nfs4_opendata *opendata, 2744 struct nfs4_state *state, fmode_t fmode) 2745 { 2746 struct nfs_access_entry cache; 2747 u32 mask, flags; 2748 2749 /* access call failed or for some reason the server doesn't 2750 * support any access modes -- defer access call until later */ 2751 if (opendata->o_res.access_supported == 0) 2752 return 0; 2753 2754 mask = 0; 2755 if (fmode & FMODE_EXEC) { 2756 /* ONLY check for exec rights */ 2757 if (S_ISDIR(state->inode->i_mode)) 2758 mask = NFS4_ACCESS_LOOKUP; 2759 else 2760 mask = NFS4_ACCESS_EXECUTE; 2761 } else if ((fmode & FMODE_READ) && !opendata->file_created) 2762 mask = NFS4_ACCESS_READ; 2763 2764 nfs_access_set_mask(&cache, opendata->o_res.access_result); 2765 nfs_access_add_cache(state->inode, &cache, cred); 2766 2767 flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP; 2768 if ((mask & ~cache.mask & flags) == 0) 2769 return 0; 2770 2771 return -EACCES; 2772 } 2773 2774 /* 2775 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 2776 */ 2777 static int _nfs4_proc_open(struct nfs4_opendata *data, 2778 struct nfs_open_context *ctx) 2779 { 2780 struct inode *dir = d_inode(data->dir); 2781 struct nfs_server *server = NFS_SERVER(dir); 2782 struct nfs_openargs *o_arg = &data->o_arg; 2783 struct nfs_openres *o_res = &data->o_res; 2784 int status; 2785 2786 status = nfs4_run_open_task(data, ctx); 2787 if (!data->rpc_done) 2788 return status; 2789 if (status != 0) { 2790 if (status == -NFS4ERR_BADNAME && 2791 !(o_arg->open_flags & O_CREAT)) 2792 return -ENOENT; 2793 return status; 2794 } 2795 2796 nfs_fattr_map_and_free_names(server, &data->f_attr); 2797 2798 if (o_arg->open_flags & O_CREAT) { 2799 if (o_arg->open_flags & O_EXCL) 2800 data->file_created = true; 2801 else if (o_res->cinfo.before != o_res->cinfo.after) 2802 data->file_created = true; 2803 if (data->file_created || 2804 inode_peek_iversion_raw(dir) != o_res->cinfo.after) 2805 nfs4_update_changeattr(dir, &o_res->cinfo, 2806 o_res->f_attr->time_start, 2807 NFS_INO_INVALID_DATA); 2808 } 2809 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 2810 server->caps &= ~NFS_CAP_POSIX_LOCK; 2811 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2812 status = _nfs4_proc_open_confirm(data); 2813 if (status != 0) 2814 return status; 2815 } 2816 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) { 2817 struct nfs_fh *fh = &o_res->fh; 2818 2819 nfs4_sequence_free_slot(&o_res->seq_res); 2820 if (o_arg->claim == NFS4_OPEN_CLAIM_FH) 2821 fh = NFS_FH(d_inode(data->dentry)); 2822 nfs4_proc_getattr(server, fh, o_res->f_attr, NULL); 2823 } 2824 return 0; 2825 } 2826 2827 /* 2828 * OPEN_EXPIRED: 2829 * reclaim state on the server after a network partition. 2830 * Assumes caller holds the appropriate lock 2831 */ 2832 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2833 { 2834 struct nfs4_opendata *opendata; 2835 int ret; 2836 2837 opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_FH); 2838 if (IS_ERR(opendata)) 2839 return PTR_ERR(opendata); 2840 /* 2841 * We're not recovering a delegation, so ask for no delegation. 2842 * Otherwise the recovery thread could deadlock with an outstanding 2843 * delegation return. 2844 */ 2845 opendata->o_arg.open_flags = O_DIRECT; 2846 ret = nfs4_open_recover(opendata, state); 2847 if (ret == -ESTALE) 2848 d_drop(ctx->dentry); 2849 nfs4_opendata_put(opendata); 2850 return ret; 2851 } 2852 2853 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2854 { 2855 struct nfs_server *server = NFS_SERVER(state->inode); 2856 struct nfs4_exception exception = { }; 2857 int err; 2858 2859 do { 2860 err = _nfs4_open_expired(ctx, state); 2861 trace_nfs4_open_expired(ctx, 0, err); 2862 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2863 continue; 2864 switch (err) { 2865 default: 2866 goto out; 2867 case -NFS4ERR_GRACE: 2868 case -NFS4ERR_DELAY: 2869 nfs4_handle_exception(server, err, &exception); 2870 err = 0; 2871 } 2872 } while (exception.retry); 2873 out: 2874 return err; 2875 } 2876 2877 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2878 { 2879 struct nfs_open_context *ctx; 2880 int ret; 2881 2882 ctx = nfs4_state_find_open_context(state); 2883 if (IS_ERR(ctx)) 2884 return -EAGAIN; 2885 ret = nfs4_do_open_expired(ctx, state); 2886 put_nfs_open_context(ctx); 2887 return ret; 2888 } 2889 2890 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state, 2891 const nfs4_stateid *stateid) 2892 { 2893 nfs_remove_bad_delegation(state->inode, stateid); 2894 nfs_state_clear_delegation(state); 2895 } 2896 2897 static void nfs40_clear_delegation_stateid(struct nfs4_state *state) 2898 { 2899 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL) 2900 nfs_finish_clear_delegation_stateid(state, NULL); 2901 } 2902 2903 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2904 { 2905 /* NFSv4.0 doesn't allow for delegation recovery on open expire */ 2906 nfs40_clear_delegation_stateid(state); 2907 nfs_state_clear_open_state_flags(state); 2908 return nfs4_open_expired(sp, state); 2909 } 2910 2911 static int nfs40_test_and_free_expired_stateid(struct nfs_server *server, 2912 nfs4_stateid *stateid, const struct cred *cred) 2913 { 2914 return -NFS4ERR_BAD_STATEID; 2915 } 2916 2917 #if defined(CONFIG_NFS_V4_1) 2918 static int nfs41_test_and_free_expired_stateid(struct nfs_server *server, 2919 nfs4_stateid *stateid, const struct cred *cred) 2920 { 2921 int status; 2922 2923 switch (stateid->type) { 2924 default: 2925 break; 2926 case NFS4_INVALID_STATEID_TYPE: 2927 case NFS4_SPECIAL_STATEID_TYPE: 2928 case NFS4_FREED_STATEID_TYPE: 2929 return -NFS4ERR_BAD_STATEID; 2930 case NFS4_REVOKED_STATEID_TYPE: 2931 goto out_free; 2932 } 2933 2934 status = nfs41_test_stateid(server, stateid, cred); 2935 switch (status) { 2936 case -NFS4ERR_EXPIRED: 2937 case -NFS4ERR_ADMIN_REVOKED: 2938 case -NFS4ERR_DELEG_REVOKED: 2939 break; 2940 default: 2941 return status; 2942 } 2943 out_free: 2944 /* Ack the revoked state to the server */ 2945 nfs41_free_stateid(server, stateid, cred, true); 2946 return -NFS4ERR_EXPIRED; 2947 } 2948 2949 static int nfs41_check_delegation_stateid(struct nfs4_state *state) 2950 { 2951 struct nfs_server *server = NFS_SERVER(state->inode); 2952 nfs4_stateid stateid; 2953 struct nfs_delegation *delegation; 2954 const struct cred *cred = NULL; 2955 int status, ret = NFS_OK; 2956 2957 /* Get the delegation credential for use by test/free_stateid */ 2958 rcu_read_lock(); 2959 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2960 if (delegation == NULL) { 2961 rcu_read_unlock(); 2962 nfs_state_clear_delegation(state); 2963 return NFS_OK; 2964 } 2965 2966 spin_lock(&delegation->lock); 2967 nfs4_stateid_copy(&stateid, &delegation->stateid); 2968 2969 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, 2970 &delegation->flags)) { 2971 spin_unlock(&delegation->lock); 2972 rcu_read_unlock(); 2973 return NFS_OK; 2974 } 2975 2976 if (delegation->cred) 2977 cred = get_cred(delegation->cred); 2978 spin_unlock(&delegation->lock); 2979 rcu_read_unlock(); 2980 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred); 2981 trace_nfs4_test_delegation_stateid(state, NULL, status); 2982 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) 2983 nfs_finish_clear_delegation_stateid(state, &stateid); 2984 else 2985 ret = status; 2986 2987 put_cred(cred); 2988 return ret; 2989 } 2990 2991 static void nfs41_delegation_recover_stateid(struct nfs4_state *state) 2992 { 2993 nfs4_stateid tmp; 2994 2995 if (test_bit(NFS_DELEGATED_STATE, &state->flags) && 2996 nfs4_copy_delegation_stateid(state->inode, state->state, 2997 &tmp, NULL) && 2998 nfs4_stateid_match_other(&state->stateid, &tmp)) 2999 nfs_state_set_delegation(state, &tmp, state->state); 3000 else 3001 nfs_state_clear_delegation(state); 3002 } 3003 3004 /** 3005 * nfs41_check_expired_locks - possibly free a lock stateid 3006 * 3007 * @state: NFSv4 state for an inode 3008 * 3009 * Returns NFS_OK if recovery for this stateid is now finished. 3010 * Otherwise a negative NFS4ERR value is returned. 3011 */ 3012 static int nfs41_check_expired_locks(struct nfs4_state *state) 3013 { 3014 int status, ret = NFS_OK; 3015 struct nfs4_lock_state *lsp, *prev = NULL; 3016 struct nfs_server *server = NFS_SERVER(state->inode); 3017 3018 if (!test_bit(LK_STATE_IN_USE, &state->flags)) 3019 goto out; 3020 3021 spin_lock(&state->state_lock); 3022 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 3023 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 3024 const struct cred *cred = lsp->ls_state->owner->so_cred; 3025 3026 refcount_inc(&lsp->ls_count); 3027 spin_unlock(&state->state_lock); 3028 3029 nfs4_put_lock_state(prev); 3030 prev = lsp; 3031 3032 status = nfs41_test_and_free_expired_stateid(server, 3033 &lsp->ls_stateid, 3034 cred); 3035 trace_nfs4_test_lock_stateid(state, lsp, status); 3036 if (status == -NFS4ERR_EXPIRED || 3037 status == -NFS4ERR_BAD_STATEID) { 3038 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 3039 lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE; 3040 if (!recover_lost_locks) 3041 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 3042 } else if (status != NFS_OK) { 3043 ret = status; 3044 nfs4_put_lock_state(prev); 3045 goto out; 3046 } 3047 spin_lock(&state->state_lock); 3048 } 3049 } 3050 spin_unlock(&state->state_lock); 3051 nfs4_put_lock_state(prev); 3052 out: 3053 return ret; 3054 } 3055 3056 /** 3057 * nfs41_check_open_stateid - possibly free an open stateid 3058 * 3059 * @state: NFSv4 state for an inode 3060 * 3061 * Returns NFS_OK if recovery for this stateid is now finished. 3062 * Otherwise a negative NFS4ERR value is returned. 3063 */ 3064 static int nfs41_check_open_stateid(struct nfs4_state *state) 3065 { 3066 struct nfs_server *server = NFS_SERVER(state->inode); 3067 nfs4_stateid *stateid = &state->open_stateid; 3068 const struct cred *cred = state->owner->so_cred; 3069 int status; 3070 3071 if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) 3072 return -NFS4ERR_BAD_STATEID; 3073 status = nfs41_test_and_free_expired_stateid(server, stateid, cred); 3074 trace_nfs4_test_open_stateid(state, NULL, status); 3075 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) { 3076 nfs_state_clear_open_state_flags(state); 3077 stateid->type = NFS4_INVALID_STATEID_TYPE; 3078 return status; 3079 } 3080 if (nfs_open_stateid_recover_openmode(state)) 3081 return -NFS4ERR_OPENMODE; 3082 return NFS_OK; 3083 } 3084 3085 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 3086 { 3087 int status; 3088 3089 status = nfs41_check_delegation_stateid(state); 3090 if (status != NFS_OK) 3091 return status; 3092 nfs41_delegation_recover_stateid(state); 3093 3094 status = nfs41_check_expired_locks(state); 3095 if (status != NFS_OK) 3096 return status; 3097 status = nfs41_check_open_stateid(state); 3098 if (status != NFS_OK) 3099 status = nfs4_open_expired(sp, state); 3100 return status; 3101 } 3102 #endif 3103 3104 /* 3105 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 3106 * fields corresponding to attributes that were used to store the verifier. 3107 * Make sure we clobber those fields in the later setattr call 3108 */ 3109 static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata, 3110 struct iattr *sattr, struct nfs4_label **label) 3111 { 3112 const __u32 *bitmask = opendata->o_arg.server->exclcreat_bitmask; 3113 __u32 attrset[3]; 3114 unsigned ret; 3115 unsigned i; 3116 3117 for (i = 0; i < ARRAY_SIZE(attrset); i++) { 3118 attrset[i] = opendata->o_res.attrset[i]; 3119 if (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE4_1) 3120 attrset[i] &= ~bitmask[i]; 3121 } 3122 3123 ret = (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE) ? 3124 sattr->ia_valid : 0; 3125 3126 if ((attrset[1] & (FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET))) { 3127 if (sattr->ia_valid & ATTR_ATIME_SET) 3128 ret |= ATTR_ATIME_SET; 3129 else 3130 ret |= ATTR_ATIME; 3131 } 3132 3133 if ((attrset[1] & (FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET))) { 3134 if (sattr->ia_valid & ATTR_MTIME_SET) 3135 ret |= ATTR_MTIME_SET; 3136 else 3137 ret |= ATTR_MTIME; 3138 } 3139 3140 if (!(attrset[2] & FATTR4_WORD2_SECURITY_LABEL)) 3141 *label = NULL; 3142 return ret; 3143 } 3144 3145 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, 3146 struct nfs_open_context *ctx) 3147 { 3148 struct nfs4_state_owner *sp = opendata->owner; 3149 struct nfs_server *server = sp->so_server; 3150 struct dentry *dentry; 3151 struct nfs4_state *state; 3152 fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx); 3153 struct inode *dir = d_inode(opendata->dir); 3154 unsigned long dir_verifier; 3155 int ret; 3156 3157 dir_verifier = nfs_save_change_attribute(dir); 3158 3159 ret = _nfs4_proc_open(opendata, ctx); 3160 if (ret != 0) 3161 goto out; 3162 3163 state = _nfs4_opendata_to_nfs4_state(opendata); 3164 ret = PTR_ERR(state); 3165 if (IS_ERR(state)) 3166 goto out; 3167 ctx->state = state; 3168 if (server->caps & NFS_CAP_POSIX_LOCK) 3169 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 3170 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK) 3171 set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags); 3172 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_PRESERVE_UNLINKED) 3173 set_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(state->inode)->flags); 3174 3175 dentry = opendata->dentry; 3176 if (d_really_is_negative(dentry)) { 3177 struct dentry *alias; 3178 d_drop(dentry); 3179 alias = d_splice_alias(igrab(state->inode), dentry); 3180 /* d_splice_alias() can't fail here - it's a non-directory */ 3181 if (alias) { 3182 dput(ctx->dentry); 3183 ctx->dentry = dentry = alias; 3184 } 3185 } 3186 3187 switch(opendata->o_arg.claim) { 3188 default: 3189 break; 3190 case NFS4_OPEN_CLAIM_NULL: 3191 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 3192 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 3193 if (!opendata->rpc_done) 3194 break; 3195 if (opendata->o_res.delegation.type != 0) 3196 dir_verifier = nfs_save_change_attribute(dir); 3197 nfs_set_verifier(dentry, dir_verifier); 3198 } 3199 3200 /* Parse layoutget results before we check for access */ 3201 pnfs_parse_lgopen(state->inode, opendata->lgp, ctx); 3202 3203 ret = nfs4_opendata_access(sp->so_cred, opendata, state, acc_mode); 3204 if (ret != 0) 3205 goto out; 3206 3207 if (d_inode(dentry) == state->inode) 3208 nfs_inode_attach_open_context(ctx); 3209 3210 out: 3211 if (!opendata->cancelled) { 3212 if (opendata->lgp) { 3213 nfs4_lgopen_release(opendata->lgp); 3214 opendata->lgp = NULL; 3215 } 3216 nfs4_sequence_free_slot(&opendata->o_res.seq_res); 3217 } 3218 return ret; 3219 } 3220 3221 /* 3222 * Returns a referenced nfs4_state 3223 */ 3224 static int _nfs4_do_open(struct inode *dir, 3225 struct nfs_open_context *ctx, 3226 int flags, 3227 const struct nfs4_open_createattrs *c, 3228 int *opened) 3229 { 3230 struct nfs4_state_owner *sp; 3231 struct nfs4_state *state = NULL; 3232 struct nfs_server *server = NFS_SERVER(dir); 3233 struct nfs4_opendata *opendata; 3234 struct dentry *dentry = ctx->dentry; 3235 const struct cred *cred = ctx->cred; 3236 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold; 3237 fmode_t fmode = _nfs4_ctx_to_openmode(ctx); 3238 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL; 3239 struct iattr *sattr = c->sattr; 3240 struct nfs4_label *label = c->label; 3241 int status; 3242 3243 /* Protect against reboot recovery conflicts */ 3244 status = -ENOMEM; 3245 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 3246 if (sp == NULL) { 3247 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 3248 goto out_err; 3249 } 3250 status = nfs4_client_recover_expired_lease(server->nfs_client); 3251 if (status != 0) 3252 goto err_put_state_owner; 3253 if (d_really_is_positive(dentry)) 3254 nfs4_return_incompatible_delegation(d_inode(dentry), fmode); 3255 status = -ENOMEM; 3256 if (d_really_is_positive(dentry)) 3257 claim = NFS4_OPEN_CLAIM_FH; 3258 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, 3259 c, claim, GFP_KERNEL); 3260 if (opendata == NULL) 3261 goto err_put_state_owner; 3262 3263 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 3264 if (!opendata->f_attr.mdsthreshold) { 3265 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 3266 if (!opendata->f_attr.mdsthreshold) 3267 goto err_opendata_put; 3268 } 3269 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 3270 } 3271 if (d_really_is_positive(dentry)) 3272 opendata->state = nfs4_get_open_state(d_inode(dentry), sp); 3273 3274 status = _nfs4_open_and_get_state(opendata, ctx); 3275 if (status != 0) 3276 goto err_opendata_put; 3277 state = ctx->state; 3278 3279 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) && 3280 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) { 3281 unsigned attrs = nfs4_exclusive_attrset(opendata, sattr, &label); 3282 /* 3283 * send create attributes which was not set by open 3284 * with an extra setattr. 3285 */ 3286 if (attrs || label) { 3287 unsigned ia_old = sattr->ia_valid; 3288 3289 sattr->ia_valid = attrs; 3290 nfs_fattr_init(opendata->o_res.f_attr); 3291 status = nfs4_do_setattr(state->inode, cred, 3292 opendata->o_res.f_attr, sattr, 3293 ctx, label); 3294 if (status == 0) { 3295 nfs_setattr_update_inode(state->inode, sattr, 3296 opendata->o_res.f_attr); 3297 nfs_setsecurity(state->inode, opendata->o_res.f_attr); 3298 } 3299 sattr->ia_valid = ia_old; 3300 } 3301 } 3302 if (opened && opendata->file_created) 3303 *opened = 1; 3304 3305 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) { 3306 *ctx_th = opendata->f_attr.mdsthreshold; 3307 opendata->f_attr.mdsthreshold = NULL; 3308 } 3309 3310 nfs4_opendata_put(opendata); 3311 nfs4_put_state_owner(sp); 3312 return 0; 3313 err_opendata_put: 3314 nfs4_opendata_put(opendata); 3315 err_put_state_owner: 3316 nfs4_put_state_owner(sp); 3317 out_err: 3318 return status; 3319 } 3320 3321 3322 static struct nfs4_state *nfs4_do_open(struct inode *dir, 3323 struct nfs_open_context *ctx, 3324 int flags, 3325 struct iattr *sattr, 3326 struct nfs4_label *label, 3327 int *opened) 3328 { 3329 struct nfs_server *server = NFS_SERVER(dir); 3330 struct nfs4_exception exception = { 3331 .interruptible = true, 3332 }; 3333 struct nfs4_state *res; 3334 struct nfs4_open_createattrs c = { 3335 .label = label, 3336 .sattr = sattr, 3337 .verf = { 3338 [0] = (__u32)jiffies, 3339 [1] = (__u32)current->pid, 3340 }, 3341 }; 3342 int status; 3343 3344 do { 3345 status = _nfs4_do_open(dir, ctx, flags, &c, opened); 3346 res = ctx->state; 3347 trace_nfs4_open_file(ctx, flags, status); 3348 if (status == 0) 3349 break; 3350 /* NOTE: BAD_SEQID means the server and client disagree about the 3351 * book-keeping w.r.t. state-changing operations 3352 * (OPEN/CLOSE/LOCK/LOCKU...) 3353 * It is actually a sign of a bug on the client or on the server. 3354 * 3355 * If we receive a BAD_SEQID error in the particular case of 3356 * doing an OPEN, we assume that nfs_increment_open_seqid() will 3357 * have unhashed the old state_owner for us, and that we can 3358 * therefore safely retry using a new one. We should still warn 3359 * the user though... 3360 */ 3361 if (status == -NFS4ERR_BAD_SEQID) { 3362 pr_warn_ratelimited("NFS: v4 server %s " 3363 " returned a bad sequence-id error!\n", 3364 NFS_SERVER(dir)->nfs_client->cl_hostname); 3365 exception.retry = 1; 3366 continue; 3367 } 3368 /* 3369 * BAD_STATEID on OPEN means that the server cancelled our 3370 * state before it received the OPEN_CONFIRM. 3371 * Recover by retrying the request as per the discussion 3372 * on Page 181 of RFC3530. 3373 */ 3374 if (status == -NFS4ERR_BAD_STATEID) { 3375 exception.retry = 1; 3376 continue; 3377 } 3378 if (status == -NFS4ERR_EXPIRED) { 3379 nfs4_schedule_lease_recovery(server->nfs_client); 3380 exception.retry = 1; 3381 continue; 3382 } 3383 if (status == -EAGAIN) { 3384 /* We must have found a delegation */ 3385 exception.retry = 1; 3386 continue; 3387 } 3388 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception)) 3389 continue; 3390 res = ERR_PTR(nfs4_handle_exception(server, 3391 status, &exception)); 3392 } while (exception.retry); 3393 return res; 3394 } 3395 3396 static int _nfs4_do_setattr(struct inode *inode, 3397 struct nfs_setattrargs *arg, 3398 struct nfs_setattrres *res, 3399 const struct cred *cred, 3400 struct nfs_open_context *ctx) 3401 { 3402 struct nfs_server *server = NFS_SERVER(inode); 3403 struct rpc_message msg = { 3404 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 3405 .rpc_argp = arg, 3406 .rpc_resp = res, 3407 .rpc_cred = cred, 3408 }; 3409 const struct cred *delegation_cred = NULL; 3410 unsigned long timestamp = jiffies; 3411 bool truncate; 3412 int status; 3413 3414 nfs_fattr_init(res->fattr); 3415 3416 /* Servers should only apply open mode checks for file size changes */ 3417 truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false; 3418 if (!truncate) { 3419 nfs4_inode_make_writeable(inode); 3420 goto zero_stateid; 3421 } 3422 3423 if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) { 3424 /* Use that stateid */ 3425 } else if (ctx != NULL && ctx->state) { 3426 struct nfs_lock_context *l_ctx; 3427 if (!nfs4_valid_open_stateid(ctx->state)) 3428 return -EBADF; 3429 l_ctx = nfs_get_lock_context(ctx); 3430 if (IS_ERR(l_ctx)) 3431 return PTR_ERR(l_ctx); 3432 status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx, 3433 &arg->stateid, &delegation_cred); 3434 nfs_put_lock_context(l_ctx); 3435 if (status == -EIO) 3436 return -EBADF; 3437 else if (status == -EAGAIN) 3438 goto zero_stateid; 3439 } else { 3440 zero_stateid: 3441 nfs4_stateid_copy(&arg->stateid, &zero_stateid); 3442 } 3443 if (delegation_cred) 3444 msg.rpc_cred = delegation_cred; 3445 3446 status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1); 3447 3448 put_cred(delegation_cred); 3449 if (status == 0 && ctx != NULL) 3450 renew_lease(server, timestamp); 3451 trace_nfs4_setattr(inode, &arg->stateid, status); 3452 return status; 3453 } 3454 3455 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred, 3456 struct nfs_fattr *fattr, struct iattr *sattr, 3457 struct nfs_open_context *ctx, struct nfs4_label *ilabel) 3458 { 3459 struct nfs_server *server = NFS_SERVER(inode); 3460 __u32 bitmask[NFS4_BITMASK_SZ]; 3461 struct nfs4_state *state = ctx ? ctx->state : NULL; 3462 struct nfs_setattrargs arg = { 3463 .fh = NFS_FH(inode), 3464 .iap = sattr, 3465 .server = server, 3466 .bitmask = bitmask, 3467 .label = ilabel, 3468 }; 3469 struct nfs_setattrres res = { 3470 .fattr = fattr, 3471 .server = server, 3472 }; 3473 struct nfs4_exception exception = { 3474 .state = state, 3475 .inode = inode, 3476 .stateid = &arg.stateid, 3477 }; 3478 unsigned long adjust_flags = NFS_INO_INVALID_CHANGE | 3479 NFS_INO_INVALID_CTIME; 3480 int err; 3481 3482 if (sattr->ia_valid & (ATTR_MODE | ATTR_KILL_SUID | ATTR_KILL_SGID)) 3483 adjust_flags |= NFS_INO_INVALID_MODE; 3484 if (sattr->ia_valid & (ATTR_UID | ATTR_GID)) 3485 adjust_flags |= NFS_INO_INVALID_OTHER; 3486 if (sattr->ia_valid & ATTR_ATIME) 3487 adjust_flags |= NFS_INO_INVALID_ATIME; 3488 if (sattr->ia_valid & ATTR_MTIME) 3489 adjust_flags |= NFS_INO_INVALID_MTIME; 3490 3491 do { 3492 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), 3493 inode, adjust_flags); 3494 3495 err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx); 3496 switch (err) { 3497 case -NFS4ERR_OPENMODE: 3498 if (!(sattr->ia_valid & ATTR_SIZE)) { 3499 pr_warn_once("NFSv4: server %s is incorrectly " 3500 "applying open mode checks to " 3501 "a SETATTR that is not " 3502 "changing file size.\n", 3503 server->nfs_client->cl_hostname); 3504 } 3505 if (state && !(state->state & FMODE_WRITE)) { 3506 err = -EBADF; 3507 if (sattr->ia_valid & ATTR_OPEN) 3508 err = -EACCES; 3509 goto out; 3510 } 3511 } 3512 err = nfs4_handle_exception(server, err, &exception); 3513 } while (exception.retry); 3514 out: 3515 return err; 3516 } 3517 3518 static bool 3519 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task) 3520 { 3521 if (inode == NULL || !nfs_have_layout(inode)) 3522 return false; 3523 3524 return pnfs_wait_on_layoutreturn(inode, task); 3525 } 3526 3527 /* 3528 * Update the seqid of an open stateid 3529 */ 3530 static void nfs4_sync_open_stateid(nfs4_stateid *dst, 3531 struct nfs4_state *state) 3532 { 3533 __be32 seqid_open; 3534 u32 dst_seqid; 3535 int seq; 3536 3537 for (;;) { 3538 if (!nfs4_valid_open_stateid(state)) 3539 break; 3540 seq = read_seqbegin(&state->seqlock); 3541 if (!nfs4_state_match_open_stateid_other(state, dst)) { 3542 nfs4_stateid_copy(dst, &state->open_stateid); 3543 if (read_seqretry(&state->seqlock, seq)) 3544 continue; 3545 break; 3546 } 3547 seqid_open = state->open_stateid.seqid; 3548 if (read_seqretry(&state->seqlock, seq)) 3549 continue; 3550 3551 dst_seqid = be32_to_cpu(dst->seqid); 3552 if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0) 3553 dst->seqid = seqid_open; 3554 break; 3555 } 3556 } 3557 3558 /* 3559 * Update the seqid of an open stateid after receiving 3560 * NFS4ERR_OLD_STATEID 3561 */ 3562 static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst, 3563 struct nfs4_state *state) 3564 { 3565 __be32 seqid_open; 3566 u32 dst_seqid; 3567 bool ret; 3568 int seq, status = -EAGAIN; 3569 DEFINE_WAIT(wait); 3570 3571 for (;;) { 3572 ret = false; 3573 if (!nfs4_valid_open_stateid(state)) 3574 break; 3575 seq = read_seqbegin(&state->seqlock); 3576 if (!nfs4_state_match_open_stateid_other(state, dst)) { 3577 if (read_seqretry(&state->seqlock, seq)) 3578 continue; 3579 break; 3580 } 3581 3582 write_seqlock(&state->seqlock); 3583 seqid_open = state->open_stateid.seqid; 3584 3585 dst_seqid = be32_to_cpu(dst->seqid); 3586 3587 /* Did another OPEN bump the state's seqid? try again: */ 3588 if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) { 3589 dst->seqid = seqid_open; 3590 write_sequnlock(&state->seqlock); 3591 ret = true; 3592 break; 3593 } 3594 3595 /* server says we're behind but we haven't seen the update yet */ 3596 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); 3597 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); 3598 write_sequnlock(&state->seqlock); 3599 trace_nfs4_close_stateid_update_wait(state->inode, dst, 0); 3600 3601 if (fatal_signal_pending(current) || nfs_current_task_exiting()) 3602 status = -EINTR; 3603 else 3604 if (schedule_timeout(5*HZ) != 0) 3605 status = 0; 3606 3607 finish_wait(&state->waitq, &wait); 3608 3609 if (!status) 3610 continue; 3611 if (status == -EINTR) 3612 break; 3613 3614 /* we slept the whole 5 seconds, we must have lost a seqid */ 3615 dst->seqid = cpu_to_be32(dst_seqid + 1); 3616 ret = true; 3617 break; 3618 } 3619 3620 return ret; 3621 } 3622 3623 struct nfs4_closedata { 3624 struct inode *inode; 3625 struct nfs4_state *state; 3626 struct nfs_closeargs arg; 3627 struct nfs_closeres res; 3628 struct { 3629 struct nfs4_layoutreturn_args arg; 3630 struct nfs4_layoutreturn_res res; 3631 struct nfs4_xdr_opaque_data ld_private; 3632 u32 roc_barrier; 3633 bool roc; 3634 } lr; 3635 struct nfs_fattr fattr; 3636 unsigned long timestamp; 3637 }; 3638 3639 static void nfs4_free_closedata(void *data) 3640 { 3641 struct nfs4_closedata *calldata = data; 3642 struct nfs4_state_owner *sp = calldata->state->owner; 3643 struct super_block *sb = calldata->state->inode->i_sb; 3644 3645 if (calldata->lr.roc) 3646 pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res, 3647 calldata->res.lr_ret); 3648 nfs4_put_open_state(calldata->state); 3649 nfs_free_seqid(calldata->arg.seqid); 3650 nfs4_put_state_owner(sp); 3651 nfs_sb_deactive(sb); 3652 kfree(calldata); 3653 } 3654 3655 static void nfs4_close_done(struct rpc_task *task, void *data) 3656 { 3657 struct nfs4_closedata *calldata = data; 3658 struct nfs4_state *state = calldata->state; 3659 struct nfs_server *server = NFS_SERVER(calldata->inode); 3660 nfs4_stateid *res_stateid = NULL; 3661 struct nfs4_exception exception = { 3662 .state = state, 3663 .inode = calldata->inode, 3664 .stateid = &calldata->arg.stateid, 3665 }; 3666 3667 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 3668 return; 3669 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status); 3670 3671 /* Handle Layoutreturn errors */ 3672 if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res, 3673 &calldata->res.lr_ret) == -EAGAIN) 3674 goto out_restart; 3675 3676 /* hmm. we are done with the inode, and in the process of freeing 3677 * the state_owner. we keep this around to process errors 3678 */ 3679 switch (task->tk_status) { 3680 case 0: 3681 res_stateid = &calldata->res.stateid; 3682 renew_lease(server, calldata->timestamp); 3683 break; 3684 case -NFS4ERR_ACCESS: 3685 if (calldata->arg.bitmask != NULL) { 3686 calldata->arg.bitmask = NULL; 3687 calldata->res.fattr = NULL; 3688 goto out_restart; 3689 3690 } 3691 break; 3692 case -NFS4ERR_OLD_STATEID: 3693 /* Did we race with OPEN? */ 3694 if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid, 3695 state)) 3696 goto out_restart; 3697 goto out_release; 3698 case -NFS4ERR_ADMIN_REVOKED: 3699 case -NFS4ERR_STALE_STATEID: 3700 case -NFS4ERR_EXPIRED: 3701 nfs4_free_revoked_stateid(server, 3702 &calldata->arg.stateid, 3703 task->tk_msg.rpc_cred); 3704 fallthrough; 3705 case -NFS4ERR_BAD_STATEID: 3706 if (calldata->arg.fmode == 0) 3707 break; 3708 fallthrough; 3709 default: 3710 task->tk_status = nfs4_async_handle_exception(task, 3711 server, task->tk_status, &exception); 3712 if (exception.retry) 3713 goto out_restart; 3714 } 3715 nfs_clear_open_stateid(state, &calldata->arg.stateid, 3716 res_stateid, calldata->arg.fmode); 3717 out_release: 3718 task->tk_status = 0; 3719 nfs_release_seqid(calldata->arg.seqid); 3720 nfs_refresh_inode(calldata->inode, &calldata->fattr); 3721 dprintk("%s: ret = %d\n", __func__, task->tk_status); 3722 return; 3723 out_restart: 3724 task->tk_status = 0; 3725 rpc_restart_call_prepare(task); 3726 goto out_release; 3727 } 3728 3729 static void nfs4_close_prepare(struct rpc_task *task, void *data) 3730 { 3731 struct nfs4_closedata *calldata = data; 3732 struct nfs4_state *state = calldata->state; 3733 struct inode *inode = calldata->inode; 3734 struct nfs_server *server = NFS_SERVER(inode); 3735 struct pnfs_layout_hdr *lo; 3736 bool is_rdonly, is_wronly, is_rdwr; 3737 int call_close = 0; 3738 3739 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 3740 goto out_wait; 3741 3742 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 3743 spin_lock(&state->owner->so_lock); 3744 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); 3745 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); 3746 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); 3747 /* Calculate the change in open mode */ 3748 calldata->arg.fmode = 0; 3749 if (state->n_rdwr == 0) { 3750 if (state->n_rdonly == 0) 3751 call_close |= is_rdonly; 3752 else if (is_rdonly) 3753 calldata->arg.fmode |= FMODE_READ; 3754 if (state->n_wronly == 0) 3755 call_close |= is_wronly; 3756 else if (is_wronly) 3757 calldata->arg.fmode |= FMODE_WRITE; 3758 if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE)) 3759 call_close |= is_rdwr; 3760 } else if (is_rdwr) 3761 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; 3762 3763 nfs4_sync_open_stateid(&calldata->arg.stateid, state); 3764 if (!nfs4_valid_open_stateid(state)) 3765 call_close = 0; 3766 spin_unlock(&state->owner->so_lock); 3767 3768 if (!call_close) { 3769 /* Note: exit _without_ calling nfs4_close_done */ 3770 goto out_no_action; 3771 } 3772 3773 if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) { 3774 nfs_release_seqid(calldata->arg.seqid); 3775 goto out_wait; 3776 } 3777 3778 lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL; 3779 if (lo && !pnfs_layout_is_valid(lo)) { 3780 calldata->arg.lr_args = NULL; 3781 calldata->res.lr_res = NULL; 3782 } 3783 3784 if (calldata->arg.fmode == 0) 3785 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 3786 3787 if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) { 3788 /* Close-to-open cache consistency revalidation */ 3789 if (!nfs4_have_delegation(inode, FMODE_READ, 0)) { 3790 nfs4_bitmask_set(calldata->arg.bitmask_store, 3791 server->cache_consistency_bitmask, 3792 inode, 0); 3793 calldata->arg.bitmask = calldata->arg.bitmask_store; 3794 } else 3795 calldata->arg.bitmask = NULL; 3796 } 3797 3798 calldata->arg.share_access = 3799 nfs4_fmode_to_share_access(calldata->arg.fmode); 3800 3801 if (calldata->res.fattr == NULL) 3802 calldata->arg.bitmask = NULL; 3803 else if (calldata->arg.bitmask == NULL) 3804 calldata->res.fattr = NULL; 3805 calldata->timestamp = jiffies; 3806 if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client, 3807 &calldata->arg.seq_args, 3808 &calldata->res.seq_res, 3809 task) != 0) 3810 nfs_release_seqid(calldata->arg.seqid); 3811 return; 3812 out_no_action: 3813 task->tk_action = NULL; 3814 out_wait: 3815 nfs4_sequence_done(task, &calldata->res.seq_res); 3816 } 3817 3818 static const struct rpc_call_ops nfs4_close_ops = { 3819 .rpc_call_prepare = nfs4_close_prepare, 3820 .rpc_call_done = nfs4_close_done, 3821 .rpc_release = nfs4_free_closedata, 3822 }; 3823 3824 /* 3825 * It is possible for data to be read/written from a mem-mapped file 3826 * after the sys_close call (which hits the vfs layer as a flush). 3827 * This means that we can't safely call nfsv4 close on a file until 3828 * the inode is cleared. This in turn means that we are not good 3829 * NFSv4 citizens - we do not indicate to the server to update the file's 3830 * share state even when we are done with one of the three share 3831 * stateid's in the inode. 3832 * 3833 * NOTE: Caller must be holding the sp->so_owner semaphore! 3834 */ 3835 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) 3836 { 3837 struct nfs_server *server = NFS_SERVER(state->inode); 3838 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 3839 struct nfs4_closedata *calldata; 3840 struct nfs4_state_owner *sp = state->owner; 3841 struct rpc_task *task; 3842 struct rpc_message msg = { 3843 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 3844 .rpc_cred = state->owner->so_cred, 3845 }; 3846 struct rpc_task_setup task_setup_data = { 3847 .rpc_client = server->client, 3848 .rpc_message = &msg, 3849 .callback_ops = &nfs4_close_ops, 3850 .workqueue = nfsiod_workqueue, 3851 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 3852 }; 3853 int status = -ENOMEM; 3854 3855 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE)) 3856 task_setup_data.flags |= RPC_TASK_MOVEABLE; 3857 3858 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP, 3859 &task_setup_data.rpc_client, &msg); 3860 3861 calldata = kzalloc(sizeof(*calldata), gfp_mask); 3862 if (calldata == NULL) 3863 goto out; 3864 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1, 0); 3865 calldata->inode = state->inode; 3866 calldata->state = state; 3867 calldata->arg.fh = NFS_FH(state->inode); 3868 if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state)) 3869 goto out_free_calldata; 3870 /* Serialization for the sequence id */ 3871 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 3872 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask); 3873 if (IS_ERR(calldata->arg.seqid)) 3874 goto out_free_calldata; 3875 nfs_fattr_init(&calldata->fattr); 3876 calldata->arg.fmode = 0; 3877 calldata->lr.arg.ld_private = &calldata->lr.ld_private; 3878 calldata->res.fattr = &calldata->fattr; 3879 calldata->res.seqid = calldata->arg.seqid; 3880 calldata->res.server = server; 3881 calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT; 3882 calldata->lr.roc = pnfs_roc(state->inode, 3883 &calldata->lr.arg, &calldata->lr.res, msg.rpc_cred); 3884 if (calldata->lr.roc) { 3885 calldata->arg.lr_args = &calldata->lr.arg; 3886 calldata->res.lr_res = &calldata->lr.res; 3887 } 3888 nfs_sb_active(calldata->inode->i_sb); 3889 3890 msg.rpc_argp = &calldata->arg; 3891 msg.rpc_resp = &calldata->res; 3892 task_setup_data.callback_data = calldata; 3893 task = rpc_run_task(&task_setup_data); 3894 if (IS_ERR(task)) 3895 return PTR_ERR(task); 3896 status = 0; 3897 if (wait) 3898 status = rpc_wait_for_completion_task(task); 3899 rpc_put_task(task); 3900 return status; 3901 out_free_calldata: 3902 kfree(calldata); 3903 out: 3904 nfs4_put_open_state(state); 3905 nfs4_put_state_owner(sp); 3906 return status; 3907 } 3908 3909 static struct inode * 3910 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, 3911 int open_flags, struct iattr *attr, int *opened) 3912 { 3913 struct nfs4_state *state; 3914 struct nfs4_label l, *label; 3915 3916 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); 3917 3918 /* Protect against concurrent sillydeletes */ 3919 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened); 3920 3921 nfs4_label_release_security(label); 3922 3923 if (IS_ERR(state)) 3924 return ERR_CAST(state); 3925 return state->inode; 3926 } 3927 3928 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 3929 { 3930 struct dentry *dentry = ctx->dentry; 3931 if (ctx->state == NULL) 3932 return; 3933 if (dentry->d_flags & DCACHE_NFSFS_RENAMED) 3934 nfs4_inode_set_return_delegation_on_close(d_inode(dentry)); 3935 if (is_sync) 3936 nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx)); 3937 else 3938 nfs4_close_state(ctx->state, _nfs4_ctx_to_openmode(ctx)); 3939 } 3940 3941 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL) 3942 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL) 3943 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_OPEN_ARGUMENTS - 1UL) 3944 3945 #define FATTR4_WORD2_NFS42_TIME_DELEG_MASK \ 3946 (FATTR4_WORD2_TIME_DELEG_MODIFY|FATTR4_WORD2_TIME_DELEG_ACCESS) 3947 static bool nfs4_server_delegtime_capable(struct nfs4_server_caps_res *res) 3948 { 3949 u32 share_access_want = res->open_caps.oa_share_access_want[0]; 3950 u32 attr_bitmask = res->attr_bitmask[2]; 3951 3952 return (share_access_want & NFS4_SHARE_WANT_DELEG_TIMESTAMPS) && 3953 ((attr_bitmask & FATTR4_WORD2_NFS42_TIME_DELEG_MASK) == 3954 FATTR4_WORD2_NFS42_TIME_DELEG_MASK); 3955 } 3956 3957 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 3958 { 3959 u32 minorversion = server->nfs_client->cl_minorversion; 3960 u32 bitmask[3] = { 3961 [0] = FATTR4_WORD0_SUPPORTED_ATTRS, 3962 }; 3963 struct nfs4_server_caps_arg args = { 3964 .fhandle = fhandle, 3965 .bitmask = bitmask, 3966 }; 3967 struct nfs4_server_caps_res res = {}; 3968 struct rpc_message msg = { 3969 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 3970 .rpc_argp = &args, 3971 .rpc_resp = &res, 3972 }; 3973 int status; 3974 int i; 3975 3976 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS | 3977 FATTR4_WORD0_FH_EXPIRE_TYPE | 3978 FATTR4_WORD0_LINK_SUPPORT | 3979 FATTR4_WORD0_SYMLINK_SUPPORT | 3980 FATTR4_WORD0_ACLSUPPORT | 3981 FATTR4_WORD0_CASE_INSENSITIVE | 3982 FATTR4_WORD0_CASE_PRESERVING; 3983 if (minorversion) 3984 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT; 3985 if (minorversion > 1) 3986 bitmask[2] |= FATTR4_WORD2_OPEN_ARGUMENTS; 3987 3988 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3989 if (status == 0) { 3990 bitmask[0] = (FATTR4_WORD0_SUPPORTED_ATTRS | 3991 FATTR4_WORD0_FH_EXPIRE_TYPE | 3992 FATTR4_WORD0_LINK_SUPPORT | 3993 FATTR4_WORD0_SYMLINK_SUPPORT | 3994 FATTR4_WORD0_ACLSUPPORT | 3995 FATTR4_WORD0_CASE_INSENSITIVE | 3996 FATTR4_WORD0_CASE_PRESERVING) & 3997 res.attr_bitmask[0]; 3998 /* Sanity check the server answers */ 3999 switch (minorversion) { 4000 case 0: 4001 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK; 4002 res.attr_bitmask[2] = 0; 4003 break; 4004 case 1: 4005 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK; 4006 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT & 4007 res.attr_bitmask[2]; 4008 break; 4009 case 2: 4010 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK; 4011 bitmask[2] = (FATTR4_WORD2_SUPPATTR_EXCLCREAT | 4012 FATTR4_WORD2_OPEN_ARGUMENTS) & 4013 res.attr_bitmask[2]; 4014 } 4015 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 4016 server->caps &= ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS | 4017 NFS_CAP_SYMLINKS| NFS_CAP_SECURITY_LABEL); 4018 server->fattr_valid = NFS_ATTR_FATTR_V4; 4019 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL && 4020 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 4021 server->caps |= NFS_CAP_ACLS; 4022 if (res.has_links != 0) 4023 server->caps |= NFS_CAP_HARDLINKS; 4024 if (res.has_symlinks != 0) 4025 server->caps |= NFS_CAP_SYMLINKS; 4026 if (res.case_insensitive) 4027 server->caps |= NFS_CAP_CASE_INSENSITIVE; 4028 if (res.case_preserving) 4029 server->caps |= NFS_CAP_CASE_PRESERVING; 4030 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 4031 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL) 4032 server->caps |= NFS_CAP_SECURITY_LABEL; 4033 #endif 4034 if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS) 4035 server->caps |= NFS_CAP_FS_LOCATIONS; 4036 if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID)) 4037 server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID; 4038 if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE)) 4039 server->fattr_valid &= ~NFS_ATTR_FATTR_MODE; 4040 if (!(res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)) 4041 server->fattr_valid &= ~NFS_ATTR_FATTR_NLINK; 4042 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER)) 4043 server->fattr_valid &= ~(NFS_ATTR_FATTR_OWNER | 4044 NFS_ATTR_FATTR_OWNER_NAME); 4045 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)) 4046 server->fattr_valid &= ~(NFS_ATTR_FATTR_GROUP | 4047 NFS_ATTR_FATTR_GROUP_NAME); 4048 if (!(res.attr_bitmask[1] & FATTR4_WORD1_SPACE_USED)) 4049 server->fattr_valid &= ~NFS_ATTR_FATTR_SPACE_USED; 4050 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)) 4051 server->fattr_valid &= ~NFS_ATTR_FATTR_ATIME; 4052 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)) 4053 server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME; 4054 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)) 4055 server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME; 4056 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)) 4057 server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME; 4058 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_CREATE)) 4059 server->fattr_valid &= ~NFS_ATTR_FATTR_BTIME; 4060 memcpy(server->attr_bitmask_nl, res.attr_bitmask, 4061 sizeof(server->attr_bitmask)); 4062 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL; 4063 4064 if (res.open_caps.oa_share_access_want[0] & 4065 NFS4_SHARE_WANT_OPEN_XOR_DELEGATION) 4066 server->caps |= NFS_CAP_OPEN_XOR; 4067 if (nfs4_server_delegtime_capable(&res)) 4068 server->caps |= NFS_CAP_DELEGTIME; 4069 4070 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 4071 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 4072 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 4073 server->cache_consistency_bitmask[2] = 0; 4074 4075 /* Avoid a regression due to buggy server */ 4076 for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++) 4077 res.exclcreat_bitmask[i] &= res.attr_bitmask[i]; 4078 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask, 4079 sizeof(server->exclcreat_bitmask)); 4080 4081 server->acl_bitmask = res.acl_bitmask; 4082 server->fh_expire_type = res.fh_expire_type; 4083 } 4084 4085 return status; 4086 } 4087 4088 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 4089 { 4090 struct nfs4_exception exception = { 4091 .interruptible = true, 4092 }; 4093 int err; 4094 4095 nfs_server_set_init_caps(server); 4096 do { 4097 err = nfs4_handle_exception(server, 4098 _nfs4_server_capabilities(server, fhandle), 4099 &exception); 4100 } while (exception.retry); 4101 return err; 4102 } 4103 4104 static void test_fs_location_for_trunking(struct nfs4_fs_location *location, 4105 struct nfs_client *clp, 4106 struct nfs_server *server) 4107 { 4108 int i; 4109 4110 for (i = 0; i < location->nservers; i++) { 4111 struct nfs4_string *srv_loc = &location->servers[i]; 4112 struct sockaddr_storage addr; 4113 size_t addrlen; 4114 struct xprt_create xprt_args = { 4115 .ident = 0, 4116 .net = clp->cl_net, 4117 }; 4118 struct nfs4_add_xprt_data xprtdata = { 4119 .clp = clp, 4120 }; 4121 struct rpc_add_xprt_test rpcdata = { 4122 .add_xprt_test = clp->cl_mvops->session_trunk, 4123 .data = &xprtdata, 4124 }; 4125 char *servername = NULL; 4126 4127 if (!srv_loc->len) 4128 continue; 4129 4130 addrlen = nfs_parse_server_name(srv_loc->data, srv_loc->len, 4131 &addr, sizeof(addr), 4132 clp->cl_net, server->port); 4133 if (!addrlen) 4134 return; 4135 xprt_args.dstaddr = (struct sockaddr *)&addr; 4136 xprt_args.addrlen = addrlen; 4137 servername = kmalloc(srv_loc->len + 1, GFP_KERNEL); 4138 if (!servername) 4139 return; 4140 memcpy(servername, srv_loc->data, srv_loc->len); 4141 servername[srv_loc->len] = '\0'; 4142 xprt_args.servername = servername; 4143 4144 xprtdata.cred = nfs4_get_clid_cred(clp); 4145 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args, 4146 rpc_clnt_setup_test_and_add_xprt, 4147 &rpcdata); 4148 if (xprtdata.cred) 4149 put_cred(xprtdata.cred); 4150 kfree(servername); 4151 } 4152 } 4153 4154 static bool _is_same_nfs4_pathname(struct nfs4_pathname *path1, 4155 struct nfs4_pathname *path2) 4156 { 4157 int i; 4158 4159 if (path1->ncomponents != path2->ncomponents) 4160 return false; 4161 for (i = 0; i < path1->ncomponents; i++) { 4162 if (path1->components[i].len != path2->components[i].len) 4163 return false; 4164 if (memcmp(path1->components[i].data, path2->components[i].data, 4165 path1->components[i].len)) 4166 return false; 4167 } 4168 return true; 4169 } 4170 4171 static int _nfs4_discover_trunking(struct nfs_server *server, 4172 struct nfs_fh *fhandle) 4173 { 4174 struct nfs4_fs_locations *locations = NULL; 4175 struct page *page; 4176 const struct cred *cred; 4177 struct nfs_client *clp = server->nfs_client; 4178 const struct nfs4_state_maintenance_ops *ops = 4179 clp->cl_mvops->state_renewal_ops; 4180 int status = -ENOMEM, i; 4181 4182 cred = ops->get_state_renewal_cred(clp); 4183 if (cred == NULL) { 4184 cred = nfs4_get_clid_cred(clp); 4185 if (cred == NULL) 4186 return -ENOKEY; 4187 } 4188 4189 page = alloc_page(GFP_KERNEL); 4190 if (!page) 4191 goto out_put_cred; 4192 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 4193 if (!locations) 4194 goto out_free; 4195 locations->fattr = nfs_alloc_fattr(); 4196 if (!locations->fattr) 4197 goto out_free_2; 4198 4199 status = nfs4_proc_get_locations(server, fhandle, locations, page, 4200 cred); 4201 if (status) 4202 goto out_free_3; 4203 4204 for (i = 0; i < locations->nlocations; i++) { 4205 if (!_is_same_nfs4_pathname(&locations->fs_path, 4206 &locations->locations[i].rootpath)) 4207 continue; 4208 test_fs_location_for_trunking(&locations->locations[i], clp, 4209 server); 4210 } 4211 out_free_3: 4212 kfree(locations->fattr); 4213 out_free_2: 4214 kfree(locations); 4215 out_free: 4216 __free_page(page); 4217 out_put_cred: 4218 put_cred(cred); 4219 return status; 4220 } 4221 4222 static int nfs4_discover_trunking(struct nfs_server *server, 4223 struct nfs_fh *fhandle) 4224 { 4225 struct nfs4_exception exception = { 4226 .interruptible = true, 4227 }; 4228 struct nfs_client *clp = server->nfs_client; 4229 int err = 0; 4230 4231 if (!nfs4_has_session(clp)) 4232 goto out; 4233 do { 4234 err = nfs4_handle_exception(server, 4235 _nfs4_discover_trunking(server, fhandle), 4236 &exception); 4237 } while (exception.retry); 4238 out: 4239 return err; 4240 } 4241 4242 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 4243 struct nfs_fattr *fattr) 4244 { 4245 u32 bitmask[3] = { 4246 [0] = FATTR4_WORD0_TYPE | FATTR4_WORD0_CHANGE | 4247 FATTR4_WORD0_SIZE | FATTR4_WORD0_FSID, 4248 }; 4249 struct nfs4_lookup_root_arg args = { 4250 .bitmask = bitmask, 4251 }; 4252 struct nfs4_lookup_res res = { 4253 .server = server, 4254 .fattr = fattr, 4255 .fh = fhandle, 4256 }; 4257 struct rpc_message msg = { 4258 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 4259 .rpc_argp = &args, 4260 .rpc_resp = &res, 4261 }; 4262 4263 nfs_fattr_init(fattr); 4264 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4265 } 4266 4267 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 4268 struct nfs_fattr *fattr) 4269 { 4270 struct nfs4_exception exception = { 4271 .interruptible = true, 4272 }; 4273 int err; 4274 do { 4275 err = _nfs4_lookup_root(server, fhandle, fattr); 4276 trace_nfs4_lookup_root(server, fhandle, fattr, err); 4277 switch (err) { 4278 case 0: 4279 case -NFS4ERR_WRONGSEC: 4280 goto out; 4281 default: 4282 err = nfs4_handle_exception(server, err, &exception); 4283 } 4284 } while (exception.retry); 4285 out: 4286 return err; 4287 } 4288 4289 static int nfs4_lookup_root_sec(struct nfs_server *server, 4290 struct nfs_fh *fhandle, struct nfs_fattr *fattr, 4291 rpc_authflavor_t flavor) 4292 { 4293 struct rpc_auth_create_args auth_args = { 4294 .pseudoflavor = flavor, 4295 }; 4296 struct rpc_auth *auth; 4297 4298 auth = rpcauth_create(&auth_args, server->client); 4299 if (IS_ERR(auth)) 4300 return -EACCES; 4301 return nfs4_lookup_root(server, fhandle, fattr); 4302 } 4303 4304 /* 4305 * Retry pseudoroot lookup with various security flavors. We do this when: 4306 * 4307 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC 4308 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation 4309 * 4310 * Returns zero on success, or a negative NFS4ERR value, or a 4311 * negative errno value. 4312 */ 4313 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 4314 struct nfs_fattr *fattr) 4315 { 4316 /* Per 3530bis 15.33.5 */ 4317 static const rpc_authflavor_t flav_array[] = { 4318 RPC_AUTH_GSS_KRB5P, 4319 RPC_AUTH_GSS_KRB5I, 4320 RPC_AUTH_GSS_KRB5, 4321 RPC_AUTH_UNIX, /* courtesy */ 4322 RPC_AUTH_NULL, 4323 }; 4324 int status = -EPERM; 4325 size_t i; 4326 4327 if (server->auth_info.flavor_len > 0) { 4328 /* try each flavor specified by user */ 4329 for (i = 0; i < server->auth_info.flavor_len; i++) { 4330 status = nfs4_lookup_root_sec( 4331 server, fhandle, fattr, 4332 server->auth_info.flavors[i]); 4333 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 4334 continue; 4335 break; 4336 } 4337 } else { 4338 /* no flavors specified by user, try default list */ 4339 for (i = 0; i < ARRAY_SIZE(flav_array); i++) { 4340 status = nfs4_lookup_root_sec(server, fhandle, fattr, 4341 flav_array[i]); 4342 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 4343 continue; 4344 break; 4345 } 4346 } 4347 4348 /* 4349 * -EACCES could mean that the user doesn't have correct permissions 4350 * to access the mount. It could also mean that we tried to mount 4351 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 4352 * existing mount programs don't handle -EACCES very well so it should 4353 * be mapped to -EPERM instead. 4354 */ 4355 if (status == -EACCES) 4356 status = -EPERM; 4357 return status; 4358 } 4359 4360 /** 4361 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot 4362 * @server: initialized nfs_server handle 4363 * @fhandle: we fill in the pseudo-fs root file handle 4364 * @fattr: we fill in a bare bones struct fattr 4365 * @auth_probe: probe the auth flavours 4366 * 4367 * Returns zero on success, or a negative errno. 4368 */ 4369 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 4370 struct nfs_fattr *fattr, bool auth_probe) 4371 { 4372 int status = 0; 4373 4374 if (!auth_probe) 4375 status = nfs4_lookup_root(server, fhandle, fattr); 4376 4377 if (auth_probe || status == NFS4ERR_WRONGSEC) 4378 status = server->nfs_client->cl_mvops->find_root_sec( 4379 server, fhandle, fattr); 4380 4381 return nfs4_map_errors(status); 4382 } 4383 4384 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 4385 struct nfs_fsinfo *info) 4386 { 4387 int error; 4388 struct nfs_fattr *fattr = info->fattr; 4389 4390 error = nfs4_server_capabilities(server, mntfh); 4391 if (error < 0) { 4392 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 4393 return error; 4394 } 4395 4396 error = nfs4_proc_getattr(server, mntfh, fattr, NULL); 4397 if (error < 0) { 4398 dprintk("nfs4_get_root: getattr error = %d\n", -error); 4399 goto out; 4400 } 4401 4402 if (fattr->valid & NFS_ATTR_FATTR_FSID && 4403 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 4404 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 4405 4406 out: 4407 return error; 4408 } 4409 4410 /* 4411 * Get locations and (maybe) other attributes of a referral. 4412 * Note that we'll actually follow the referral later when 4413 * we detect fsid mismatch in inode revalidation 4414 */ 4415 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 4416 const struct qstr *name, struct nfs_fattr *fattr, 4417 struct nfs_fh *fhandle) 4418 { 4419 int status = -ENOMEM; 4420 struct page *page = NULL; 4421 struct nfs4_fs_locations *locations = NULL; 4422 4423 page = alloc_page(GFP_KERNEL); 4424 if (page == NULL) 4425 goto out; 4426 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 4427 if (locations == NULL) 4428 goto out; 4429 4430 locations->fattr = fattr; 4431 4432 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 4433 if (status != 0) 4434 goto out; 4435 4436 /* 4437 * If the fsid didn't change, this is a migration event, not a 4438 * referral. Cause us to drop into the exception handler, which 4439 * will kick off migration recovery. 4440 */ 4441 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &fattr->fsid)) { 4442 dprintk("%s: server did not return a different fsid for" 4443 " a referral at %s\n", __func__, name->name); 4444 status = -NFS4ERR_MOVED; 4445 goto out; 4446 } 4447 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 4448 nfs_fixup_referral_attributes(fattr); 4449 memset(fhandle, 0, sizeof(struct nfs_fh)); 4450 out: 4451 if (page) 4452 __free_page(page); 4453 kfree(locations); 4454 return status; 4455 } 4456 4457 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 4458 struct nfs_fattr *fattr, struct inode *inode) 4459 { 4460 __u32 bitmask[NFS4_BITMASK_SZ]; 4461 struct nfs4_getattr_arg args = { 4462 .fh = fhandle, 4463 .bitmask = bitmask, 4464 }; 4465 struct nfs4_getattr_res res = { 4466 .fattr = fattr, 4467 .server = server, 4468 }; 4469 struct rpc_message msg = { 4470 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 4471 .rpc_argp = &args, 4472 .rpc_resp = &res, 4473 }; 4474 unsigned short task_flags = 0; 4475 4476 if (nfs4_has_session(server->nfs_client)) 4477 task_flags = RPC_TASK_MOVEABLE; 4478 4479 /* Is this is an attribute revalidation, subject to softreval? */ 4480 if (inode && (server->flags & NFS_MOUNT_SOFTREVAL)) 4481 task_flags |= RPC_TASK_TIMEOUT; 4482 4483 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), inode, 0); 4484 nfs_fattr_init(fattr); 4485 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 4486 return nfs4_do_call_sync(server->client, server, &msg, 4487 &args.seq_args, &res.seq_res, task_flags); 4488 } 4489 4490 int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 4491 struct nfs_fattr *fattr, struct inode *inode) 4492 { 4493 struct nfs4_exception exception = { 4494 .interruptible = true, 4495 }; 4496 int err; 4497 do { 4498 err = _nfs4_proc_getattr(server, fhandle, fattr, inode); 4499 trace_nfs4_getattr(server, fhandle, fattr, err); 4500 err = nfs4_handle_exception(server, err, 4501 &exception); 4502 } while (exception.retry); 4503 return err; 4504 } 4505 4506 /* 4507 * The file is not closed if it is opened due to the a request to change 4508 * the size of the file. The open call will not be needed once the 4509 * VFS layer lookup-intents are implemented. 4510 * 4511 * Close is called when the inode is destroyed. 4512 * If we haven't opened the file for O_WRONLY, we 4513 * need to in the size_change case to obtain a stateid. 4514 * 4515 * Got race? 4516 * Because OPEN is always done by name in nfsv4, it is 4517 * possible that we opened a different file by the same 4518 * name. We can recognize this race condition, but we 4519 * can't do anything about it besides returning an error. 4520 * 4521 * This will be fixed with VFS changes (lookup-intent). 4522 */ 4523 static int 4524 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 4525 struct iattr *sattr) 4526 { 4527 struct inode *inode = d_inode(dentry); 4528 const struct cred *cred = NULL; 4529 struct nfs_open_context *ctx = NULL; 4530 int status; 4531 4532 if (pnfs_ld_layoutret_on_setattr(inode) && 4533 sattr->ia_valid & ATTR_SIZE && 4534 sattr->ia_size < i_size_read(inode)) 4535 pnfs_commit_and_return_layout(inode); 4536 4537 nfs_fattr_init(fattr); 4538 4539 /* Deal with open(O_TRUNC) */ 4540 if (sattr->ia_valid & ATTR_OPEN) 4541 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME); 4542 4543 /* Optimization: if the end result is no change, don't RPC */ 4544 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0) 4545 return 0; 4546 4547 /* Search for an existing open(O_WRITE) file */ 4548 if (sattr->ia_valid & ATTR_FILE) { 4549 4550 ctx = nfs_file_open_context(sattr->ia_file); 4551 if (ctx) 4552 cred = ctx->cred; 4553 } 4554 4555 /* Return any delegations if we're going to change ACLs */ 4556 if ((sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) 4557 nfs4_inode_make_writeable(inode); 4558 4559 status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL); 4560 if (status == 0) { 4561 nfs_setattr_update_inode(inode, sattr, fattr); 4562 nfs_setsecurity(inode, fattr); 4563 } 4564 return status; 4565 } 4566 4567 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 4568 struct dentry *dentry, const struct qstr *name, 4569 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4570 { 4571 struct nfs_server *server = NFS_SERVER(dir); 4572 int status; 4573 struct nfs4_lookup_arg args = { 4574 .bitmask = server->attr_bitmask, 4575 .dir_fh = NFS_FH(dir), 4576 .name = name, 4577 }; 4578 struct nfs4_lookup_res res = { 4579 .server = server, 4580 .fattr = fattr, 4581 .fh = fhandle, 4582 }; 4583 struct rpc_message msg = { 4584 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 4585 .rpc_argp = &args, 4586 .rpc_resp = &res, 4587 }; 4588 unsigned short task_flags = 0; 4589 4590 if (nfs_server_capable(dir, NFS_CAP_MOVEABLE)) 4591 task_flags = RPC_TASK_MOVEABLE; 4592 4593 /* Is this is an attribute revalidation, subject to softreval? */ 4594 if (nfs_lookup_is_soft_revalidate(dentry)) 4595 task_flags |= RPC_TASK_TIMEOUT; 4596 4597 args.bitmask = nfs4_bitmask(server, fattr->label); 4598 4599 nfs_fattr_init(fattr); 4600 4601 dprintk("NFS call lookup %pd2\n", dentry); 4602 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 4603 status = nfs4_do_call_sync(clnt, server, &msg, 4604 &args.seq_args, &res.seq_res, task_flags); 4605 dprintk("NFS reply lookup: %d\n", status); 4606 return status; 4607 } 4608 4609 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 4610 { 4611 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 4612 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 4613 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 4614 fattr->nlink = 2; 4615 } 4616 4617 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 4618 struct dentry *dentry, const struct qstr *name, 4619 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4620 { 4621 struct nfs4_exception exception = { 4622 .interruptible = true, 4623 }; 4624 struct rpc_clnt *client = *clnt; 4625 int err; 4626 do { 4627 err = _nfs4_proc_lookup(client, dir, dentry, name, fhandle, fattr); 4628 trace_nfs4_lookup(dir, name, err); 4629 switch (err) { 4630 case -NFS4ERR_BADNAME: 4631 err = -ENOENT; 4632 goto out; 4633 case -NFS4ERR_MOVED: 4634 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 4635 if (err == -NFS4ERR_MOVED) 4636 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 4637 goto out; 4638 case -NFS4ERR_WRONGSEC: 4639 err = -EPERM; 4640 if (client != *clnt) 4641 goto out; 4642 client = nfs4_negotiate_security(client, dir, name); 4643 if (IS_ERR(client)) 4644 return PTR_ERR(client); 4645 4646 exception.retry = 1; 4647 break; 4648 default: 4649 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 4650 } 4651 } while (exception.retry); 4652 4653 out: 4654 if (err == 0) 4655 *clnt = client; 4656 else if (client != *clnt) 4657 rpc_shutdown_client(client); 4658 4659 return err; 4660 } 4661 4662 static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry, const struct qstr *name, 4663 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4664 { 4665 int status; 4666 struct rpc_clnt *client = NFS_CLIENT(dir); 4667 4668 status = nfs4_proc_lookup_common(&client, dir, dentry, name, fhandle, fattr); 4669 if (client != NFS_CLIENT(dir)) { 4670 rpc_shutdown_client(client); 4671 nfs_fixup_secinfo_attributes(fattr); 4672 } 4673 return status; 4674 } 4675 4676 struct rpc_clnt * 4677 nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry, 4678 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4679 { 4680 struct rpc_clnt *client = NFS_CLIENT(dir); 4681 int status; 4682 4683 status = nfs4_proc_lookup_common(&client, dir, dentry, &dentry->d_name, 4684 fhandle, fattr); 4685 if (status < 0) 4686 return ERR_PTR(status); 4687 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client; 4688 } 4689 4690 static int _nfs4_proc_lookupp(struct inode *inode, 4691 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4692 { 4693 struct rpc_clnt *clnt = NFS_CLIENT(inode); 4694 struct nfs_server *server = NFS_SERVER(inode); 4695 int status; 4696 struct nfs4_lookupp_arg args = { 4697 .bitmask = server->attr_bitmask, 4698 .fh = NFS_FH(inode), 4699 }; 4700 struct nfs4_lookupp_res res = { 4701 .server = server, 4702 .fattr = fattr, 4703 .fh = fhandle, 4704 }; 4705 struct rpc_message msg = { 4706 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP], 4707 .rpc_argp = &args, 4708 .rpc_resp = &res, 4709 }; 4710 unsigned short task_flags = 0; 4711 4712 if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL) 4713 task_flags |= RPC_TASK_TIMEOUT; 4714 4715 args.bitmask = nfs4_bitmask(server, fattr->label); 4716 4717 nfs_fattr_init(fattr); 4718 4719 dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino); 4720 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, 4721 &res.seq_res, task_flags); 4722 dprintk("NFS reply lookupp: %d\n", status); 4723 return status; 4724 } 4725 4726 static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle, 4727 struct nfs_fattr *fattr) 4728 { 4729 struct nfs4_exception exception = { 4730 .interruptible = true, 4731 }; 4732 int err; 4733 do { 4734 err = _nfs4_proc_lookupp(inode, fhandle, fattr); 4735 trace_nfs4_lookupp(inode, err); 4736 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4737 &exception); 4738 } while (exception.retry); 4739 return err; 4740 } 4741 4742 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry, 4743 const struct cred *cred) 4744 { 4745 struct nfs_server *server = NFS_SERVER(inode); 4746 struct nfs4_accessargs args = { 4747 .fh = NFS_FH(inode), 4748 .access = entry->mask, 4749 }; 4750 struct nfs4_accessres res = { 4751 .server = server, 4752 }; 4753 struct rpc_message msg = { 4754 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 4755 .rpc_argp = &args, 4756 .rpc_resp = &res, 4757 .rpc_cred = cred, 4758 }; 4759 int status = 0; 4760 4761 if (!nfs4_have_delegation(inode, FMODE_READ, 0)) { 4762 res.fattr = nfs_alloc_fattr(); 4763 if (res.fattr == NULL) 4764 return -ENOMEM; 4765 args.bitmask = server->cache_consistency_bitmask; 4766 } 4767 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4768 if (!status) { 4769 nfs_access_set_mask(entry, res.access); 4770 if (res.fattr) 4771 nfs_refresh_inode(inode, res.fattr); 4772 } 4773 nfs_free_fattr(res.fattr); 4774 return status; 4775 } 4776 4777 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry, 4778 const struct cred *cred) 4779 { 4780 struct nfs4_exception exception = { 4781 .interruptible = true, 4782 }; 4783 int err; 4784 do { 4785 err = _nfs4_proc_access(inode, entry, cred); 4786 trace_nfs4_access(inode, err); 4787 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4788 &exception); 4789 } while (exception.retry); 4790 return err; 4791 } 4792 4793 /* 4794 * TODO: For the time being, we don't try to get any attributes 4795 * along with any of the zero-copy operations READ, READDIR, 4796 * READLINK, WRITE. 4797 * 4798 * In the case of the first three, we want to put the GETATTR 4799 * after the read-type operation -- this is because it is hard 4800 * to predict the length of a GETATTR response in v4, and thus 4801 * align the READ data correctly. This means that the GETATTR 4802 * may end up partially falling into the page cache, and we should 4803 * shift it into the 'tail' of the xdr_buf before processing. 4804 * To do this efficiently, we need to know the total length 4805 * of data received, which doesn't seem to be available outside 4806 * of the RPC layer. 4807 * 4808 * In the case of WRITE, we also want to put the GETATTR after 4809 * the operation -- in this case because we want to make sure 4810 * we get the post-operation mtime and size. 4811 * 4812 * Both of these changes to the XDR layer would in fact be quite 4813 * minor, but I decided to leave them for a subsequent patch. 4814 */ 4815 static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 4816 unsigned int pgbase, unsigned int pglen) 4817 { 4818 struct nfs4_readlink args = { 4819 .fh = NFS_FH(inode), 4820 .pgbase = pgbase, 4821 .pglen = pglen, 4822 .pages = &page, 4823 }; 4824 struct nfs4_readlink_res res; 4825 struct rpc_message msg = { 4826 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 4827 .rpc_argp = &args, 4828 .rpc_resp = &res, 4829 }; 4830 4831 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 4832 } 4833 4834 static int nfs4_proc_readlink(struct inode *inode, struct page *page, 4835 unsigned int pgbase, unsigned int pglen) 4836 { 4837 struct nfs4_exception exception = { 4838 .interruptible = true, 4839 }; 4840 int err; 4841 do { 4842 err = _nfs4_proc_readlink(inode, page, pgbase, pglen); 4843 trace_nfs4_readlink(inode, err); 4844 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4845 &exception); 4846 } while (exception.retry); 4847 return err; 4848 } 4849 4850 /* 4851 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 4852 */ 4853 static int 4854 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 4855 int flags) 4856 { 4857 struct nfs_server *server = NFS_SERVER(dir); 4858 struct nfs4_label l, *ilabel; 4859 struct nfs_open_context *ctx; 4860 struct nfs4_state *state; 4861 int status = 0; 4862 4863 ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL); 4864 if (IS_ERR(ctx)) 4865 return PTR_ERR(ctx); 4866 4867 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); 4868 4869 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 4870 sattr->ia_mode &= ~current_umask(); 4871 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL); 4872 if (IS_ERR(state)) { 4873 status = PTR_ERR(state); 4874 goto out; 4875 } 4876 out: 4877 nfs4_label_release_security(ilabel); 4878 put_nfs_open_context(ctx); 4879 return status; 4880 } 4881 4882 static int 4883 _nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype) 4884 { 4885 struct nfs_server *server = NFS_SERVER(dir); 4886 struct nfs_removeargs args = { 4887 .fh = NFS_FH(dir), 4888 .name = *name, 4889 }; 4890 struct nfs_removeres res = { 4891 .server = server, 4892 }; 4893 struct rpc_message msg = { 4894 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 4895 .rpc_argp = &args, 4896 .rpc_resp = &res, 4897 }; 4898 unsigned long timestamp = jiffies; 4899 int status; 4900 4901 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 4902 if (status == 0) { 4903 spin_lock(&dir->i_lock); 4904 /* Removing a directory decrements nlink in the parent */ 4905 if (ftype == NF4DIR && dir->i_nlink > 2) 4906 nfs4_dec_nlink_locked(dir); 4907 nfs4_update_changeattr_locked(dir, &res.cinfo, timestamp, 4908 NFS_INO_INVALID_DATA); 4909 spin_unlock(&dir->i_lock); 4910 } 4911 return status; 4912 } 4913 4914 static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry) 4915 { 4916 struct nfs4_exception exception = { 4917 .interruptible = true, 4918 }; 4919 struct inode *inode = d_inode(dentry); 4920 int err; 4921 4922 if (inode) { 4923 if (inode->i_nlink == 1) 4924 nfs4_inode_return_delegation(inode); 4925 else 4926 nfs4_inode_make_writeable(inode); 4927 } 4928 do { 4929 err = _nfs4_proc_remove(dir, &dentry->d_name, NF4REG); 4930 trace_nfs4_remove(dir, &dentry->d_name, err); 4931 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4932 &exception); 4933 } while (exception.retry); 4934 return err; 4935 } 4936 4937 static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name) 4938 { 4939 struct nfs4_exception exception = { 4940 .interruptible = true, 4941 }; 4942 int err; 4943 4944 do { 4945 err = _nfs4_proc_remove(dir, name, NF4DIR); 4946 trace_nfs4_remove(dir, name, err); 4947 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4948 &exception); 4949 } while (exception.retry); 4950 return err; 4951 } 4952 4953 static void nfs4_proc_unlink_setup(struct rpc_message *msg, 4954 struct dentry *dentry, 4955 struct inode *inode) 4956 { 4957 struct nfs_removeargs *args = msg->rpc_argp; 4958 struct nfs_removeres *res = msg->rpc_resp; 4959 4960 res->server = NFS_SB(dentry->d_sb); 4961 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 4962 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1, 0); 4963 4964 nfs_fattr_init(res->dir_attr); 4965 4966 if (inode) { 4967 nfs4_inode_return_delegation(inode); 4968 nfs_d_prune_case_insensitive_aliases(inode); 4969 } 4970 } 4971 4972 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 4973 { 4974 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client, 4975 &data->args.seq_args, 4976 &data->res.seq_res, 4977 task); 4978 } 4979 4980 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 4981 { 4982 struct nfs_unlinkdata *data = task->tk_calldata; 4983 struct nfs_removeres *res = &data->res; 4984 4985 if (!nfs4_sequence_done(task, &res->seq_res)) 4986 return 0; 4987 if (nfs4_async_handle_error(task, res->server, NULL, 4988 &data->timeout) == -EAGAIN) 4989 return 0; 4990 if (task->tk_status == 0) 4991 nfs4_update_changeattr(dir, &res->cinfo, 4992 res->dir_attr->time_start, 4993 NFS_INO_INVALID_DATA); 4994 return 1; 4995 } 4996 4997 static void nfs4_proc_rename_setup(struct rpc_message *msg, 4998 struct dentry *old_dentry, 4999 struct dentry *new_dentry) 5000 { 5001 struct nfs_renameargs *arg = msg->rpc_argp; 5002 struct nfs_renameres *res = msg->rpc_resp; 5003 struct inode *old_inode = d_inode(old_dentry); 5004 struct inode *new_inode = d_inode(new_dentry); 5005 5006 if (old_inode) 5007 nfs4_inode_make_writeable(old_inode); 5008 if (new_inode) 5009 nfs4_inode_return_delegation(new_inode); 5010 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 5011 res->server = NFS_SB(old_dentry->d_sb); 5012 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1, 0); 5013 } 5014 5015 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 5016 { 5017 nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client, 5018 &data->args.seq_args, 5019 &data->res.seq_res, 5020 task); 5021 } 5022 5023 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 5024 struct inode *new_dir) 5025 { 5026 struct nfs_renamedata *data = task->tk_calldata; 5027 struct nfs_renameres *res = &data->res; 5028 5029 if (!nfs4_sequence_done(task, &res->seq_res)) 5030 return 0; 5031 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) 5032 return 0; 5033 5034 if (task->tk_status == 0) { 5035 nfs_d_prune_case_insensitive_aliases(d_inode(data->old_dentry)); 5036 if (new_dir != old_dir) { 5037 /* Note: If we moved a directory, nlink will change */ 5038 nfs4_update_changeattr(old_dir, &res->old_cinfo, 5039 res->old_fattr->time_start, 5040 NFS_INO_INVALID_NLINK | 5041 NFS_INO_INVALID_DATA); 5042 nfs4_update_changeattr(new_dir, &res->new_cinfo, 5043 res->new_fattr->time_start, 5044 NFS_INO_INVALID_NLINK | 5045 NFS_INO_INVALID_DATA); 5046 } else 5047 nfs4_update_changeattr(old_dir, &res->old_cinfo, 5048 res->old_fattr->time_start, 5049 NFS_INO_INVALID_DATA); 5050 } 5051 return 1; 5052 } 5053 5054 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name) 5055 { 5056 struct nfs_server *server = NFS_SERVER(inode); 5057 __u32 bitmask[NFS4_BITMASK_SZ]; 5058 struct nfs4_link_arg arg = { 5059 .fh = NFS_FH(inode), 5060 .dir_fh = NFS_FH(dir), 5061 .name = name, 5062 .bitmask = bitmask, 5063 }; 5064 struct nfs4_link_res res = { 5065 .server = server, 5066 }; 5067 struct rpc_message msg = { 5068 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 5069 .rpc_argp = &arg, 5070 .rpc_resp = &res, 5071 }; 5072 int status = -ENOMEM; 5073 5074 res.fattr = nfs_alloc_fattr_with_label(server); 5075 if (res.fattr == NULL) 5076 goto out; 5077 5078 nfs4_inode_make_writeable(inode); 5079 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, res.fattr->label), 5080 inode, 5081 NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME); 5082 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 5083 if (!status) { 5084 nfs4_update_changeattr(dir, &res.cinfo, res.fattr->time_start, 5085 NFS_INO_INVALID_DATA); 5086 nfs4_inc_nlink(inode); 5087 status = nfs_post_op_update_inode(inode, res.fattr); 5088 if (!status) 5089 nfs_setsecurity(inode, res.fattr); 5090 } 5091 5092 out: 5093 nfs_free_fattr(res.fattr); 5094 return status; 5095 } 5096 5097 static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name) 5098 { 5099 struct nfs4_exception exception = { 5100 .interruptible = true, 5101 }; 5102 int err; 5103 do { 5104 err = nfs4_handle_exception(NFS_SERVER(inode), 5105 _nfs4_proc_link(inode, dir, name), 5106 &exception); 5107 } while (exception.retry); 5108 return err; 5109 } 5110 5111 struct nfs4_createdata { 5112 struct rpc_message msg; 5113 struct nfs4_create_arg arg; 5114 struct nfs4_create_res res; 5115 struct nfs_fh fh; 5116 struct nfs_fattr fattr; 5117 }; 5118 5119 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 5120 const struct qstr *name, struct iattr *sattr, u32 ftype) 5121 { 5122 struct nfs4_createdata *data; 5123 5124 data = kzalloc(sizeof(*data), GFP_KERNEL); 5125 if (data != NULL) { 5126 struct nfs_server *server = NFS_SERVER(dir); 5127 5128 data->fattr.label = nfs4_label_alloc(server, GFP_KERNEL); 5129 if (IS_ERR(data->fattr.label)) 5130 goto out_free; 5131 5132 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 5133 data->msg.rpc_argp = &data->arg; 5134 data->msg.rpc_resp = &data->res; 5135 data->arg.dir_fh = NFS_FH(dir); 5136 data->arg.server = server; 5137 data->arg.name = name; 5138 data->arg.attrs = sattr; 5139 data->arg.ftype = ftype; 5140 data->arg.bitmask = nfs4_bitmask(server, data->fattr.label); 5141 data->arg.umask = current_umask(); 5142 data->res.server = server; 5143 data->res.fh = &data->fh; 5144 data->res.fattr = &data->fattr; 5145 nfs_fattr_init(data->res.fattr); 5146 } 5147 return data; 5148 out_free: 5149 kfree(data); 5150 return NULL; 5151 } 5152 5153 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 5154 { 5155 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 5156 &data->arg.seq_args, &data->res.seq_res, 1); 5157 if (status == 0) { 5158 spin_lock(&dir->i_lock); 5159 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo, 5160 data->res.fattr->time_start, 5161 NFS_INO_INVALID_DATA); 5162 spin_unlock(&dir->i_lock); 5163 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); 5164 } 5165 return status; 5166 } 5167 5168 static struct dentry *nfs4_do_mkdir(struct inode *dir, struct dentry *dentry, 5169 struct nfs4_createdata *data, int *statusp) 5170 { 5171 struct dentry *ret; 5172 5173 *statusp = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 5174 &data->arg.seq_args, &data->res.seq_res, 1); 5175 5176 if (*statusp) 5177 return NULL; 5178 5179 spin_lock(&dir->i_lock); 5180 /* Creating a directory bumps nlink in the parent */ 5181 nfs4_inc_nlink_locked(dir); 5182 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo, 5183 data->res.fattr->time_start, 5184 NFS_INO_INVALID_DATA); 5185 spin_unlock(&dir->i_lock); 5186 ret = nfs_add_or_obtain(dentry, data->res.fh, data->res.fattr); 5187 if (!IS_ERR(ret)) 5188 return ret; 5189 *statusp = PTR_ERR(ret); 5190 return NULL; 5191 } 5192 5193 static void nfs4_free_createdata(struct nfs4_createdata *data) 5194 { 5195 nfs4_label_free(data->fattr.label); 5196 kfree(data); 5197 } 5198 5199 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 5200 struct folio *folio, unsigned int len, struct iattr *sattr, 5201 struct nfs4_label *label) 5202 { 5203 struct page *page = &folio->page; 5204 struct nfs4_createdata *data; 5205 int status = -ENAMETOOLONG; 5206 5207 if (len > NFS4_MAXPATHLEN) 5208 goto out; 5209 5210 status = -ENOMEM; 5211 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 5212 if (data == NULL) 5213 goto out; 5214 5215 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 5216 data->arg.u.symlink.pages = &page; 5217 data->arg.u.symlink.len = len; 5218 data->arg.label = label; 5219 5220 status = nfs4_do_create(dir, dentry, data); 5221 5222 nfs4_free_createdata(data); 5223 out: 5224 return status; 5225 } 5226 5227 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 5228 struct folio *folio, unsigned int len, struct iattr *sattr) 5229 { 5230 struct nfs4_exception exception = { 5231 .interruptible = true, 5232 }; 5233 struct nfs4_label l, *label; 5234 int err; 5235 5236 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5237 5238 do { 5239 err = _nfs4_proc_symlink(dir, dentry, folio, len, sattr, label); 5240 trace_nfs4_symlink(dir, &dentry->d_name, err); 5241 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5242 &exception); 5243 } while (exception.retry); 5244 5245 nfs4_label_release_security(label); 5246 return err; 5247 } 5248 5249 static struct dentry *_nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 5250 struct iattr *sattr, 5251 struct nfs4_label *label, int *statusp) 5252 { 5253 struct nfs4_createdata *data; 5254 struct dentry *ret = NULL; 5255 5256 *statusp = -ENOMEM; 5257 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 5258 if (data == NULL) 5259 goto out; 5260 5261 data->arg.label = label; 5262 ret = nfs4_do_mkdir(dir, dentry, data, statusp); 5263 5264 nfs4_free_createdata(data); 5265 out: 5266 return ret; 5267 } 5268 5269 static struct dentry *nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 5270 struct iattr *sattr) 5271 { 5272 struct nfs_server *server = NFS_SERVER(dir); 5273 struct nfs4_exception exception = { 5274 .interruptible = true, 5275 }; 5276 struct nfs4_label l, *label; 5277 struct dentry *alias; 5278 int err; 5279 5280 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5281 5282 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 5283 sattr->ia_mode &= ~current_umask(); 5284 do { 5285 alias = _nfs4_proc_mkdir(dir, dentry, sattr, label, &err); 5286 trace_nfs4_mkdir(dir, &dentry->d_name, err); 5287 if (err) 5288 alias = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir), 5289 err, 5290 &exception)); 5291 } while (exception.retry); 5292 nfs4_label_release_security(label); 5293 5294 return alias; 5295 } 5296 5297 static int _nfs4_proc_readdir(struct nfs_readdir_arg *nr_arg, 5298 struct nfs_readdir_res *nr_res) 5299 { 5300 struct inode *dir = d_inode(nr_arg->dentry); 5301 struct nfs_server *server = NFS_SERVER(dir); 5302 struct nfs4_readdir_arg args = { 5303 .fh = NFS_FH(dir), 5304 .pages = nr_arg->pages, 5305 .pgbase = 0, 5306 .count = nr_arg->page_len, 5307 .plus = nr_arg->plus, 5308 }; 5309 struct nfs4_readdir_res res; 5310 struct rpc_message msg = { 5311 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 5312 .rpc_argp = &args, 5313 .rpc_resp = &res, 5314 .rpc_cred = nr_arg->cred, 5315 }; 5316 int status; 5317 5318 dprintk("%s: dentry = %pd2, cookie = %llu\n", __func__, 5319 nr_arg->dentry, (unsigned long long)nr_arg->cookie); 5320 if (!(server->caps & NFS_CAP_SECURITY_LABEL)) 5321 args.bitmask = server->attr_bitmask_nl; 5322 else 5323 args.bitmask = server->attr_bitmask; 5324 5325 nfs4_setup_readdir(nr_arg->cookie, nr_arg->verf, nr_arg->dentry, &args); 5326 res.pgbase = args.pgbase; 5327 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 5328 &res.seq_res, 0); 5329 if (status >= 0) { 5330 memcpy(nr_res->verf, res.verifier.data, NFS4_VERIFIER_SIZE); 5331 status += args.pgbase; 5332 } 5333 5334 nfs_invalidate_atime(dir); 5335 5336 dprintk("%s: returns %d\n", __func__, status); 5337 return status; 5338 } 5339 5340 static int nfs4_proc_readdir(struct nfs_readdir_arg *arg, 5341 struct nfs_readdir_res *res) 5342 { 5343 struct nfs4_exception exception = { 5344 .interruptible = true, 5345 }; 5346 int err; 5347 do { 5348 err = _nfs4_proc_readdir(arg, res); 5349 trace_nfs4_readdir(d_inode(arg->dentry), err); 5350 err = nfs4_handle_exception(NFS_SERVER(d_inode(arg->dentry)), 5351 err, &exception); 5352 } while (exception.retry); 5353 return err; 5354 } 5355 5356 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 5357 struct iattr *sattr, struct nfs4_label *label, dev_t rdev) 5358 { 5359 struct nfs4_createdata *data; 5360 int mode = sattr->ia_mode; 5361 int status = -ENOMEM; 5362 5363 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 5364 if (data == NULL) 5365 goto out; 5366 5367 if (S_ISFIFO(mode)) 5368 data->arg.ftype = NF4FIFO; 5369 else if (S_ISBLK(mode)) { 5370 data->arg.ftype = NF4BLK; 5371 data->arg.u.device.specdata1 = MAJOR(rdev); 5372 data->arg.u.device.specdata2 = MINOR(rdev); 5373 } 5374 else if (S_ISCHR(mode)) { 5375 data->arg.ftype = NF4CHR; 5376 data->arg.u.device.specdata1 = MAJOR(rdev); 5377 data->arg.u.device.specdata2 = MINOR(rdev); 5378 } else if (!S_ISSOCK(mode)) { 5379 status = -EINVAL; 5380 goto out_free; 5381 } 5382 5383 data->arg.label = label; 5384 status = nfs4_do_create(dir, dentry, data); 5385 out_free: 5386 nfs4_free_createdata(data); 5387 out: 5388 return status; 5389 } 5390 5391 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 5392 struct iattr *sattr, dev_t rdev) 5393 { 5394 struct nfs_server *server = NFS_SERVER(dir); 5395 struct nfs4_exception exception = { 5396 .interruptible = true, 5397 }; 5398 struct nfs4_label l, *label; 5399 int err; 5400 5401 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5402 5403 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 5404 sattr->ia_mode &= ~current_umask(); 5405 do { 5406 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev); 5407 trace_nfs4_mknod(dir, &dentry->d_name, err); 5408 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5409 &exception); 5410 } while (exception.retry); 5411 5412 nfs4_label_release_security(label); 5413 5414 return err; 5415 } 5416 5417 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 5418 struct nfs_fsstat *fsstat) 5419 { 5420 struct nfs4_statfs_arg args = { 5421 .fh = fhandle, 5422 .bitmask = server->attr_bitmask, 5423 }; 5424 struct nfs4_statfs_res res = { 5425 .fsstat = fsstat, 5426 }; 5427 struct rpc_message msg = { 5428 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 5429 .rpc_argp = &args, 5430 .rpc_resp = &res, 5431 }; 5432 5433 nfs_fattr_init(fsstat->fattr); 5434 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5435 } 5436 5437 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 5438 { 5439 struct nfs4_exception exception = { 5440 .interruptible = true, 5441 }; 5442 int err; 5443 do { 5444 err = nfs4_handle_exception(server, 5445 _nfs4_proc_statfs(server, fhandle, fsstat), 5446 &exception); 5447 } while (exception.retry); 5448 return err; 5449 } 5450 5451 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 5452 struct nfs_fsinfo *fsinfo) 5453 { 5454 struct nfs4_fsinfo_arg args = { 5455 .fh = fhandle, 5456 .bitmask = server->attr_bitmask, 5457 }; 5458 struct nfs4_fsinfo_res res = { 5459 .fsinfo = fsinfo, 5460 }; 5461 struct rpc_message msg = { 5462 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 5463 .rpc_argp = &args, 5464 .rpc_resp = &res, 5465 }; 5466 5467 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5468 } 5469 5470 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 5471 { 5472 struct nfs4_exception exception = { 5473 .interruptible = true, 5474 }; 5475 int err; 5476 5477 do { 5478 err = _nfs4_do_fsinfo(server, fhandle, fsinfo); 5479 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); 5480 if (err == 0) { 5481 nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ); 5482 break; 5483 } 5484 err = nfs4_handle_exception(server, err, &exception); 5485 } while (exception.retry); 5486 return err; 5487 } 5488 5489 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 5490 { 5491 int error; 5492 5493 nfs_fattr_init(fsinfo->fattr); 5494 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 5495 if (error == 0) { 5496 /* block layout checks this! */ 5497 server->pnfs_blksize = fsinfo->blksize; 5498 set_pnfs_layoutdriver(server, fhandle, fsinfo); 5499 } 5500 5501 return error; 5502 } 5503 5504 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 5505 struct nfs_pathconf *pathconf) 5506 { 5507 struct nfs4_pathconf_arg args = { 5508 .fh = fhandle, 5509 .bitmask = server->attr_bitmask, 5510 }; 5511 struct nfs4_pathconf_res res = { 5512 .pathconf = pathconf, 5513 }; 5514 struct rpc_message msg = { 5515 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 5516 .rpc_argp = &args, 5517 .rpc_resp = &res, 5518 }; 5519 5520 /* None of the pathconf attributes are mandatory to implement */ 5521 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 5522 memset(pathconf, 0, sizeof(*pathconf)); 5523 return 0; 5524 } 5525 5526 nfs_fattr_init(pathconf->fattr); 5527 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5528 } 5529 5530 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 5531 struct nfs_pathconf *pathconf) 5532 { 5533 struct nfs4_exception exception = { 5534 .interruptible = true, 5535 }; 5536 int err; 5537 5538 do { 5539 err = nfs4_handle_exception(server, 5540 _nfs4_proc_pathconf(server, fhandle, pathconf), 5541 &exception); 5542 } while (exception.retry); 5543 return err; 5544 } 5545 5546 int nfs4_set_rw_stateid(nfs4_stateid *stateid, 5547 const struct nfs_open_context *ctx, 5548 const struct nfs_lock_context *l_ctx, 5549 fmode_t fmode) 5550 { 5551 return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL); 5552 } 5553 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid); 5554 5555 static bool nfs4_stateid_is_current(nfs4_stateid *stateid, 5556 const struct nfs_open_context *ctx, 5557 const struct nfs_lock_context *l_ctx, 5558 fmode_t fmode) 5559 { 5560 nfs4_stateid _current_stateid; 5561 5562 /* If the current stateid represents a lost lock, then exit */ 5563 if (nfs4_set_rw_stateid(&_current_stateid, ctx, l_ctx, fmode) == -EIO) 5564 return true; 5565 return nfs4_stateid_match(stateid, &_current_stateid); 5566 } 5567 5568 static bool nfs4_error_stateid_expired(int err) 5569 { 5570 switch (err) { 5571 case -NFS4ERR_DELEG_REVOKED: 5572 case -NFS4ERR_ADMIN_REVOKED: 5573 case -NFS4ERR_BAD_STATEID: 5574 case -NFS4ERR_STALE_STATEID: 5575 case -NFS4ERR_OLD_STATEID: 5576 case -NFS4ERR_OPENMODE: 5577 case -NFS4ERR_EXPIRED: 5578 return true; 5579 } 5580 return false; 5581 } 5582 5583 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) 5584 { 5585 struct nfs_server *server = NFS_SERVER(hdr->inode); 5586 5587 trace_nfs4_read(hdr, task->tk_status); 5588 if (task->tk_status < 0) { 5589 struct nfs4_exception exception = { 5590 .inode = hdr->inode, 5591 .state = hdr->args.context->state, 5592 .stateid = &hdr->args.stateid, 5593 }; 5594 task->tk_status = nfs4_async_handle_exception(task, 5595 server, task->tk_status, &exception); 5596 if (exception.retry) { 5597 rpc_restart_call_prepare(task); 5598 return -EAGAIN; 5599 } 5600 } 5601 5602 if (task->tk_status > 0) 5603 renew_lease(server, hdr->timestamp); 5604 return 0; 5605 } 5606 5607 static bool nfs4_read_stateid_changed(struct rpc_task *task, 5608 struct nfs_pgio_args *args) 5609 { 5610 5611 if (!nfs4_error_stateid_expired(task->tk_status) || 5612 nfs4_stateid_is_current(&args->stateid, 5613 args->context, 5614 args->lock_context, 5615 FMODE_READ)) 5616 return false; 5617 rpc_restart_call_prepare(task); 5618 return true; 5619 } 5620 5621 static bool nfs4_read_plus_not_supported(struct rpc_task *task, 5622 struct nfs_pgio_header *hdr) 5623 { 5624 struct nfs_server *server = NFS_SERVER(hdr->inode); 5625 struct rpc_message *msg = &task->tk_msg; 5626 5627 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] && 5628 task->tk_status == -ENOTSUPP) { 5629 server->caps &= ~NFS_CAP_READ_PLUS; 5630 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 5631 rpc_restart_call_prepare(task); 5632 return true; 5633 } 5634 return false; 5635 } 5636 5637 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 5638 { 5639 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 5640 return -EAGAIN; 5641 if (nfs4_read_stateid_changed(task, &hdr->args)) 5642 return -EAGAIN; 5643 if (nfs4_read_plus_not_supported(task, hdr)) 5644 return -EAGAIN; 5645 if (task->tk_status > 0) 5646 nfs_invalidate_atime(hdr->inode); 5647 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 5648 nfs4_read_done_cb(task, hdr); 5649 } 5650 5651 #if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS 5652 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr, 5653 struct rpc_message *msg) 5654 { 5655 /* Note: We don't use READ_PLUS with pNFS yet */ 5656 if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp) { 5657 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS]; 5658 return nfs_read_alloc_scratch(hdr, READ_PLUS_SCRATCH_SIZE); 5659 } 5660 return false; 5661 } 5662 #else 5663 static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr, 5664 struct rpc_message *msg) 5665 { 5666 return false; 5667 } 5668 #endif /* CONFIG_NFS_V4_2 */ 5669 5670 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr, 5671 struct rpc_message *msg) 5672 { 5673 hdr->timestamp = jiffies; 5674 if (!hdr->pgio_done_cb) 5675 hdr->pgio_done_cb = nfs4_read_done_cb; 5676 if (!nfs42_read_plus_support(hdr, msg)) 5677 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 5678 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0); 5679 } 5680 5681 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, 5682 struct nfs_pgio_header *hdr) 5683 { 5684 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client, 5685 &hdr->args.seq_args, 5686 &hdr->res.seq_res, 5687 task)) 5688 return 0; 5689 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, 5690 hdr->args.lock_context, 5691 hdr->rw_mode) == -EIO) 5692 return -EIO; 5693 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) 5694 return -EIO; 5695 return 0; 5696 } 5697 5698 static int nfs4_write_done_cb(struct rpc_task *task, 5699 struct nfs_pgio_header *hdr) 5700 { 5701 struct inode *inode = hdr->inode; 5702 5703 trace_nfs4_write(hdr, task->tk_status); 5704 if (task->tk_status < 0) { 5705 struct nfs4_exception exception = { 5706 .inode = hdr->inode, 5707 .state = hdr->args.context->state, 5708 .stateid = &hdr->args.stateid, 5709 }; 5710 task->tk_status = nfs4_async_handle_exception(task, 5711 NFS_SERVER(inode), task->tk_status, 5712 &exception); 5713 if (exception.retry) { 5714 rpc_restart_call_prepare(task); 5715 return -EAGAIN; 5716 } 5717 } 5718 if (task->tk_status >= 0) { 5719 renew_lease(NFS_SERVER(inode), hdr->timestamp); 5720 nfs_writeback_update_inode(hdr); 5721 } 5722 return 0; 5723 } 5724 5725 static bool nfs4_write_stateid_changed(struct rpc_task *task, 5726 struct nfs_pgio_args *args) 5727 { 5728 5729 if (!nfs4_error_stateid_expired(task->tk_status) || 5730 nfs4_stateid_is_current(&args->stateid, 5731 args->context, 5732 args->lock_context, 5733 FMODE_WRITE)) 5734 return false; 5735 rpc_restart_call_prepare(task); 5736 return true; 5737 } 5738 5739 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 5740 { 5741 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 5742 return -EAGAIN; 5743 if (nfs4_write_stateid_changed(task, &hdr->args)) 5744 return -EAGAIN; 5745 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 5746 nfs4_write_done_cb(task, hdr); 5747 } 5748 5749 static 5750 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr) 5751 { 5752 /* Don't request attributes for pNFS or O_DIRECT writes */ 5753 if (hdr->ds_clp != NULL || hdr->dreq != NULL) 5754 return false; 5755 /* Otherwise, request attributes if and only if we don't hold 5756 * a delegation 5757 */ 5758 return nfs4_have_delegation(hdr->inode, FMODE_READ, 0) == 0; 5759 } 5760 5761 void nfs4_bitmask_set(__u32 bitmask[], const __u32 src[], 5762 struct inode *inode, unsigned long cache_validity) 5763 { 5764 struct nfs_server *server = NFS_SERVER(inode); 5765 unsigned int i; 5766 5767 memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ); 5768 cache_validity |= READ_ONCE(NFS_I(inode)->cache_validity); 5769 5770 if (cache_validity & NFS_INO_INVALID_CHANGE) 5771 bitmask[0] |= FATTR4_WORD0_CHANGE; 5772 if (cache_validity & NFS_INO_INVALID_ATIME) 5773 bitmask[1] |= FATTR4_WORD1_TIME_ACCESS; 5774 if (cache_validity & NFS_INO_INVALID_MODE) 5775 bitmask[1] |= FATTR4_WORD1_MODE; 5776 if (cache_validity & NFS_INO_INVALID_OTHER) 5777 bitmask[1] |= FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP; 5778 if (cache_validity & NFS_INO_INVALID_NLINK) 5779 bitmask[1] |= FATTR4_WORD1_NUMLINKS; 5780 if (cache_validity & NFS_INO_INVALID_CTIME) 5781 bitmask[1] |= FATTR4_WORD1_TIME_METADATA; 5782 if (cache_validity & NFS_INO_INVALID_MTIME) 5783 bitmask[1] |= FATTR4_WORD1_TIME_MODIFY; 5784 if (cache_validity & NFS_INO_INVALID_BLOCKS) 5785 bitmask[1] |= FATTR4_WORD1_SPACE_USED; 5786 if (cache_validity & NFS_INO_INVALID_BTIME) 5787 bitmask[1] |= FATTR4_WORD1_TIME_CREATE; 5788 5789 if (cache_validity & NFS_INO_INVALID_SIZE) 5790 bitmask[0] |= FATTR4_WORD0_SIZE; 5791 5792 for (i = 0; i < NFS4_BITMASK_SZ; i++) 5793 bitmask[i] &= server->attr_bitmask[i]; 5794 } 5795 5796 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, 5797 struct rpc_message *msg, 5798 struct rpc_clnt **clnt) 5799 { 5800 struct nfs_server *server = NFS_SERVER(hdr->inode); 5801 5802 if (!nfs4_write_need_cache_consistency_data(hdr)) { 5803 hdr->args.bitmask = NULL; 5804 hdr->res.fattr = NULL; 5805 } else { 5806 nfs4_bitmask_set(hdr->args.bitmask_store, 5807 server->cache_consistency_bitmask, 5808 hdr->inode, NFS_INO_INVALID_BLOCKS); 5809 hdr->args.bitmask = hdr->args.bitmask_store; 5810 } 5811 5812 if (!hdr->pgio_done_cb) 5813 hdr->pgio_done_cb = nfs4_write_done_cb; 5814 hdr->res.server = server; 5815 hdr->timestamp = jiffies; 5816 5817 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 5818 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0); 5819 nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr); 5820 } 5821 5822 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 5823 { 5824 nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client, 5825 &data->args.seq_args, 5826 &data->res.seq_res, 5827 task); 5828 } 5829 5830 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 5831 { 5832 struct inode *inode = data->inode; 5833 5834 trace_nfs4_commit(data, task->tk_status); 5835 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 5836 NULL, NULL) == -EAGAIN) { 5837 rpc_restart_call_prepare(task); 5838 return -EAGAIN; 5839 } 5840 return 0; 5841 } 5842 5843 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 5844 { 5845 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5846 return -EAGAIN; 5847 return data->commit_done_cb(task, data); 5848 } 5849 5850 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg, 5851 struct rpc_clnt **clnt) 5852 { 5853 struct nfs_server *server = NFS_SERVER(data->inode); 5854 5855 if (data->commit_done_cb == NULL) 5856 data->commit_done_cb = nfs4_commit_done_cb; 5857 data->res.server = server; 5858 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 5859 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0); 5860 nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client, 5861 NFS_SP4_MACH_CRED_COMMIT, clnt, msg); 5862 } 5863 5864 static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args, 5865 struct nfs_commitres *res) 5866 { 5867 struct inode *dst_inode = file_inode(dst); 5868 struct nfs_server *server = NFS_SERVER(dst_inode); 5869 struct rpc_message msg = { 5870 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT], 5871 .rpc_argp = args, 5872 .rpc_resp = res, 5873 }; 5874 5875 args->fh = NFS_FH(dst_inode); 5876 return nfs4_call_sync(server->client, server, &msg, 5877 &args->seq_args, &res->seq_res, 1); 5878 } 5879 5880 int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res) 5881 { 5882 struct nfs_commitargs args = { 5883 .offset = offset, 5884 .count = count, 5885 }; 5886 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); 5887 struct nfs4_exception exception = { }; 5888 int status; 5889 5890 do { 5891 status = _nfs4_proc_commit(dst, &args, res); 5892 status = nfs4_handle_exception(dst_server, status, &exception); 5893 } while (exception.retry); 5894 5895 return status; 5896 } 5897 5898 struct nfs4_renewdata { 5899 struct nfs_client *client; 5900 unsigned long timestamp; 5901 }; 5902 5903 /* 5904 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 5905 * standalone procedure for queueing an asynchronous RENEW. 5906 */ 5907 static void nfs4_renew_release(void *calldata) 5908 { 5909 struct nfs4_renewdata *data = calldata; 5910 struct nfs_client *clp = data->client; 5911 5912 if (refcount_read(&clp->cl_count) > 1) 5913 nfs4_schedule_state_renewal(clp); 5914 nfs_put_client(clp); 5915 kfree(data); 5916 } 5917 5918 static void nfs4_renew_done(struct rpc_task *task, void *calldata) 5919 { 5920 struct nfs4_renewdata *data = calldata; 5921 struct nfs_client *clp = data->client; 5922 unsigned long timestamp = data->timestamp; 5923 5924 trace_nfs4_renew_async(clp, task->tk_status); 5925 switch (task->tk_status) { 5926 case 0: 5927 break; 5928 case -NFS4ERR_LEASE_MOVED: 5929 nfs4_schedule_lease_moved_recovery(clp); 5930 break; 5931 default: 5932 /* Unless we're shutting down, schedule state recovery! */ 5933 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 5934 return; 5935 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 5936 nfs4_schedule_lease_recovery(clp); 5937 return; 5938 } 5939 nfs4_schedule_path_down_recovery(clp); 5940 } 5941 do_renew_lease(clp, timestamp); 5942 } 5943 5944 static const struct rpc_call_ops nfs4_renew_ops = { 5945 .rpc_call_done = nfs4_renew_done, 5946 .rpc_release = nfs4_renew_release, 5947 }; 5948 5949 static int nfs4_proc_async_renew(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags) 5950 { 5951 struct rpc_message msg = { 5952 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 5953 .rpc_argp = clp, 5954 .rpc_cred = cred, 5955 }; 5956 struct nfs4_renewdata *data; 5957 5958 if (renew_flags == 0) 5959 return 0; 5960 if (!refcount_inc_not_zero(&clp->cl_count)) 5961 return -EIO; 5962 data = kmalloc(sizeof(*data), GFP_NOFS); 5963 if (data == NULL) { 5964 nfs_put_client(clp); 5965 return -ENOMEM; 5966 } 5967 data->client = clp; 5968 data->timestamp = jiffies; 5969 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT, 5970 &nfs4_renew_ops, data); 5971 } 5972 5973 static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred) 5974 { 5975 struct rpc_message msg = { 5976 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 5977 .rpc_argp = clp, 5978 .rpc_cred = cred, 5979 }; 5980 unsigned long now = jiffies; 5981 int status; 5982 5983 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5984 if (status < 0) 5985 return status; 5986 do_renew_lease(clp, now); 5987 return 0; 5988 } 5989 5990 static bool nfs4_server_supports_acls(const struct nfs_server *server, 5991 enum nfs4_acl_type type) 5992 { 5993 switch (type) { 5994 default: 5995 return server->attr_bitmask[0] & FATTR4_WORD0_ACL; 5996 case NFS4ACL_DACL: 5997 return server->attr_bitmask[1] & FATTR4_WORD1_DACL; 5998 case NFS4ACL_SACL: 5999 return server->attr_bitmask[1] & FATTR4_WORD1_SACL; 6000 } 6001 } 6002 6003 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that 6004 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on 6005 * the stack. 6006 */ 6007 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 6008 6009 int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen, 6010 struct page **pages) 6011 { 6012 struct page *newpage, **spages; 6013 int rc = 0; 6014 size_t len; 6015 spages = pages; 6016 6017 do { 6018 len = min_t(size_t, PAGE_SIZE, buflen); 6019 newpage = alloc_page(GFP_KERNEL); 6020 6021 if (newpage == NULL) 6022 goto unwind; 6023 memcpy(page_address(newpage), buf, len); 6024 buf += len; 6025 buflen -= len; 6026 *pages++ = newpage; 6027 rc++; 6028 } while (buflen != 0); 6029 6030 return rc; 6031 6032 unwind: 6033 for(; rc > 0; rc--) 6034 __free_page(spages[rc-1]); 6035 return -ENOMEM; 6036 } 6037 6038 struct nfs4_cached_acl { 6039 enum nfs4_acl_type type; 6040 int cached; 6041 size_t len; 6042 char data[]; 6043 }; 6044 6045 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 6046 { 6047 struct nfs_inode *nfsi = NFS_I(inode); 6048 6049 spin_lock(&inode->i_lock); 6050 kfree(nfsi->nfs4_acl); 6051 nfsi->nfs4_acl = acl; 6052 spin_unlock(&inode->i_lock); 6053 } 6054 6055 static void nfs4_zap_acl_attr(struct inode *inode) 6056 { 6057 nfs4_set_cached_acl(inode, NULL); 6058 } 6059 6060 static ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, 6061 size_t buflen, enum nfs4_acl_type type) 6062 { 6063 struct nfs_inode *nfsi = NFS_I(inode); 6064 struct nfs4_cached_acl *acl; 6065 int ret = -ENOENT; 6066 6067 spin_lock(&inode->i_lock); 6068 acl = nfsi->nfs4_acl; 6069 if (acl == NULL) 6070 goto out; 6071 if (acl->type != type) 6072 goto out; 6073 if (buf == NULL) /* user is just asking for length */ 6074 goto out_len; 6075 if (acl->cached == 0) 6076 goto out; 6077 ret = -ERANGE; /* see getxattr(2) man page */ 6078 if (acl->len > buflen) 6079 goto out; 6080 memcpy(buf, acl->data, acl->len); 6081 out_len: 6082 ret = acl->len; 6083 out: 6084 spin_unlock(&inode->i_lock); 6085 return ret; 6086 } 6087 6088 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, 6089 size_t pgbase, size_t acl_len, 6090 enum nfs4_acl_type type) 6091 { 6092 struct nfs4_cached_acl *acl; 6093 size_t buflen = sizeof(*acl) + acl_len; 6094 6095 if (buflen <= PAGE_SIZE) { 6096 acl = kmalloc(buflen, GFP_KERNEL); 6097 if (acl == NULL) 6098 goto out; 6099 acl->cached = 1; 6100 _copy_from_pages(acl->data, pages, pgbase, acl_len); 6101 } else { 6102 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 6103 if (acl == NULL) 6104 goto out; 6105 acl->cached = 0; 6106 } 6107 acl->type = type; 6108 acl->len = acl_len; 6109 out: 6110 nfs4_set_cached_acl(inode, acl); 6111 } 6112 6113 /* 6114 * The getxattr API returns the required buffer length when called with a 6115 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 6116 * the required buf. On a NULL buf, we send a page of data to the server 6117 * guessing that the ACL request can be serviced by a page. If so, we cache 6118 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 6119 * the cache. If not so, we throw away the page, and cache the required 6120 * length. The next getxattr call will then produce another round trip to 6121 * the server, this time with the input buf of the required size. 6122 */ 6123 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, 6124 size_t buflen, enum nfs4_acl_type type) 6125 { 6126 struct page **pages; 6127 struct nfs_getaclargs args = { 6128 .fh = NFS_FH(inode), 6129 .acl_type = type, 6130 .acl_len = buflen, 6131 }; 6132 struct nfs_getaclres res = { 6133 .acl_type = type, 6134 .acl_len = buflen, 6135 }; 6136 struct rpc_message msg = { 6137 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 6138 .rpc_argp = &args, 6139 .rpc_resp = &res, 6140 }; 6141 unsigned int npages; 6142 int ret = -ENOMEM, i; 6143 struct nfs_server *server = NFS_SERVER(inode); 6144 6145 if (buflen == 0) 6146 buflen = server->rsize; 6147 6148 npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1; 6149 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 6150 if (!pages) 6151 return -ENOMEM; 6152 6153 args.acl_pages = pages; 6154 6155 for (i = 0; i < npages; i++) { 6156 pages[i] = alloc_page(GFP_KERNEL); 6157 if (!pages[i]) 6158 goto out_free; 6159 } 6160 6161 /* for decoding across pages */ 6162 res.acl_scratch = alloc_page(GFP_KERNEL); 6163 if (!res.acl_scratch) 6164 goto out_free; 6165 6166 args.acl_len = npages * PAGE_SIZE; 6167 6168 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 6169 __func__, buf, buflen, npages, args.acl_len); 6170 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 6171 &msg, &args.seq_args, &res.seq_res, 0); 6172 if (ret) 6173 goto out_free; 6174 6175 /* Handle the case where the passed-in buffer is too short */ 6176 if (res.acl_flags & NFS4_ACL_TRUNC) { 6177 /* Did the user only issue a request for the acl length? */ 6178 if (buf == NULL) 6179 goto out_ok; 6180 ret = -ERANGE; 6181 goto out_free; 6182 } 6183 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len, 6184 type); 6185 if (buf) { 6186 if (res.acl_len > buflen) { 6187 ret = -ERANGE; 6188 goto out_free; 6189 } 6190 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); 6191 } 6192 out_ok: 6193 ret = res.acl_len; 6194 out_free: 6195 while (--i >= 0) 6196 __free_page(pages[i]); 6197 if (res.acl_scratch) 6198 __free_page(res.acl_scratch); 6199 kfree(pages); 6200 return ret; 6201 } 6202 6203 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, 6204 size_t buflen, enum nfs4_acl_type type) 6205 { 6206 struct nfs4_exception exception = { 6207 .interruptible = true, 6208 }; 6209 ssize_t ret; 6210 do { 6211 ret = __nfs4_get_acl_uncached(inode, buf, buflen, type); 6212 trace_nfs4_get_acl(inode, ret); 6213 if (ret >= 0) 6214 break; 6215 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 6216 } while (exception.retry); 6217 return ret; 6218 } 6219 6220 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen, 6221 enum nfs4_acl_type type) 6222 { 6223 struct nfs_server *server = NFS_SERVER(inode); 6224 int ret; 6225 6226 if (unlikely(NFS_FH(inode)->size == 0)) 6227 return -ENODATA; 6228 if (!nfs4_server_supports_acls(server, type)) 6229 return -EOPNOTSUPP; 6230 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 6231 if (ret < 0) 6232 return ret; 6233 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 6234 nfs_zap_acl_cache(inode); 6235 ret = nfs4_read_cached_acl(inode, buf, buflen, type); 6236 if (ret != -ENOENT) 6237 /* -ENOENT is returned if there is no ACL or if there is an ACL 6238 * but no cached acl data, just the acl length */ 6239 return ret; 6240 return nfs4_get_acl_uncached(inode, buf, buflen, type); 6241 } 6242 6243 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, 6244 size_t buflen, enum nfs4_acl_type type) 6245 { 6246 struct nfs_server *server = NFS_SERVER(inode); 6247 struct page *pages[NFS4ACL_MAXPAGES]; 6248 struct nfs_setaclargs arg = { 6249 .fh = NFS_FH(inode), 6250 .acl_type = type, 6251 .acl_len = buflen, 6252 .acl_pages = pages, 6253 }; 6254 struct nfs_setaclres res; 6255 struct rpc_message msg = { 6256 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 6257 .rpc_argp = &arg, 6258 .rpc_resp = &res, 6259 }; 6260 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 6261 int ret, i; 6262 6263 /* You can't remove system.nfs4_acl: */ 6264 if (buflen == 0) 6265 return -EINVAL; 6266 if (!nfs4_server_supports_acls(server, type)) 6267 return -EOPNOTSUPP; 6268 if (npages > ARRAY_SIZE(pages)) 6269 return -ERANGE; 6270 i = nfs4_buf_to_pages_noslab(buf, buflen, arg.acl_pages); 6271 if (i < 0) 6272 return i; 6273 nfs4_inode_make_writeable(inode); 6274 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6275 6276 /* 6277 * Free each page after tx, so the only ref left is 6278 * held by the network stack 6279 */ 6280 for (; i > 0; i--) 6281 put_page(pages[i-1]); 6282 6283 /* 6284 * Acl update can result in inode attribute update. 6285 * so mark the attribute cache invalid. 6286 */ 6287 spin_lock(&inode->i_lock); 6288 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 6289 NFS_INO_INVALID_CTIME | 6290 NFS_INO_REVAL_FORCED); 6291 spin_unlock(&inode->i_lock); 6292 nfs_access_zap_cache(inode); 6293 nfs_zap_acl_cache(inode); 6294 return ret; 6295 } 6296 6297 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, 6298 size_t buflen, enum nfs4_acl_type type) 6299 { 6300 struct nfs4_exception exception = { }; 6301 int err; 6302 6303 if (unlikely(NFS_FH(inode)->size == 0)) 6304 return -ENODATA; 6305 do { 6306 err = __nfs4_proc_set_acl(inode, buf, buflen, type); 6307 trace_nfs4_set_acl(inode, err); 6308 if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) { 6309 /* 6310 * no need to retry since the kernel 6311 * isn't involved in encoding the ACEs. 6312 */ 6313 err = -EINVAL; 6314 break; 6315 } 6316 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6317 &exception); 6318 } while (exception.retry); 6319 return err; 6320 } 6321 6322 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 6323 static int _nfs4_get_security_label(struct inode *inode, void *buf, 6324 size_t buflen) 6325 { 6326 struct nfs_server *server = NFS_SERVER(inode); 6327 struct nfs4_label label = {0, 0, 0, buflen, buf}; 6328 6329 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 6330 struct nfs_fattr fattr = { 6331 .label = &label, 6332 }; 6333 struct nfs4_getattr_arg arg = { 6334 .fh = NFS_FH(inode), 6335 .bitmask = bitmask, 6336 }; 6337 struct nfs4_getattr_res res = { 6338 .fattr = &fattr, 6339 .server = server, 6340 }; 6341 struct rpc_message msg = { 6342 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 6343 .rpc_argp = &arg, 6344 .rpc_resp = &res, 6345 }; 6346 int ret; 6347 6348 nfs_fattr_init(&fattr); 6349 6350 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0); 6351 if (ret) 6352 return ret; 6353 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) 6354 return -ENOENT; 6355 return label.len; 6356 } 6357 6358 static int nfs4_get_security_label(struct inode *inode, void *buf, 6359 size_t buflen) 6360 { 6361 struct nfs4_exception exception = { 6362 .interruptible = true, 6363 }; 6364 int err; 6365 6366 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 6367 return -EOPNOTSUPP; 6368 6369 do { 6370 err = _nfs4_get_security_label(inode, buf, buflen); 6371 trace_nfs4_get_security_label(inode, err); 6372 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6373 &exception); 6374 } while (exception.retry); 6375 return err; 6376 } 6377 6378 static int _nfs4_do_set_security_label(struct inode *inode, 6379 struct nfs4_label *ilabel, 6380 struct nfs_fattr *fattr) 6381 { 6382 6383 struct iattr sattr = {0}; 6384 struct nfs_server *server = NFS_SERVER(inode); 6385 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 6386 struct nfs_setattrargs arg = { 6387 .fh = NFS_FH(inode), 6388 .iap = &sattr, 6389 .server = server, 6390 .bitmask = bitmask, 6391 .label = ilabel, 6392 }; 6393 struct nfs_setattrres res = { 6394 .fattr = fattr, 6395 .server = server, 6396 }; 6397 struct rpc_message msg = { 6398 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 6399 .rpc_argp = &arg, 6400 .rpc_resp = &res, 6401 }; 6402 int status; 6403 6404 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 6405 6406 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6407 if (status) 6408 dprintk("%s failed: %d\n", __func__, status); 6409 6410 return status; 6411 } 6412 6413 static int nfs4_do_set_security_label(struct inode *inode, 6414 struct nfs4_label *ilabel, 6415 struct nfs_fattr *fattr) 6416 { 6417 struct nfs4_exception exception = { }; 6418 int err; 6419 6420 do { 6421 err = _nfs4_do_set_security_label(inode, ilabel, fattr); 6422 trace_nfs4_set_security_label(inode, err); 6423 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6424 &exception); 6425 } while (exception.retry); 6426 return err; 6427 } 6428 6429 static int 6430 nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen) 6431 { 6432 struct nfs4_label ilabel = {0, 0, 0, buflen, (char *)buf }; 6433 struct nfs_fattr *fattr; 6434 int status; 6435 6436 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 6437 return -EOPNOTSUPP; 6438 6439 fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode)); 6440 if (fattr == NULL) 6441 return -ENOMEM; 6442 6443 status = nfs4_do_set_security_label(inode, &ilabel, fattr); 6444 if (status == 0) 6445 nfs_setsecurity(inode, fattr); 6446 6447 nfs_free_fattr(fattr); 6448 return status; 6449 } 6450 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 6451 6452 6453 static void nfs4_init_boot_verifier(const struct nfs_client *clp, 6454 nfs4_verifier *bootverf) 6455 { 6456 __be32 verf[2]; 6457 6458 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 6459 /* An impossible timestamp guarantees this value 6460 * will never match a generated boot time. */ 6461 verf[0] = cpu_to_be32(U32_MAX); 6462 verf[1] = cpu_to_be32(U32_MAX); 6463 } else { 6464 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 6465 u64 ns = ktime_to_ns(nn->boot_time); 6466 6467 verf[0] = cpu_to_be32(ns >> 32); 6468 verf[1] = cpu_to_be32(ns); 6469 } 6470 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 6471 } 6472 6473 static size_t 6474 nfs4_get_uniquifier(struct nfs_client *clp, char *buf, size_t buflen) 6475 { 6476 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 6477 struct nfs_netns_client *nn_clp = nn->nfs_client; 6478 const char *id; 6479 6480 buf[0] = '\0'; 6481 6482 if (nn_clp) { 6483 rcu_read_lock(); 6484 id = rcu_dereference(nn_clp->identifier); 6485 if (id) 6486 strscpy(buf, id, buflen); 6487 rcu_read_unlock(); 6488 } 6489 6490 if (nfs4_client_id_uniquifier[0] != '\0' && buf[0] == '\0') 6491 strscpy(buf, nfs4_client_id_uniquifier, buflen); 6492 6493 return strlen(buf); 6494 } 6495 6496 static int 6497 nfs4_init_nonuniform_client_string(struct nfs_client *clp) 6498 { 6499 char buf[NFS4_CLIENT_ID_UNIQ_LEN]; 6500 size_t buflen; 6501 size_t len; 6502 char *str; 6503 6504 if (clp->cl_owner_id != NULL) 6505 return 0; 6506 6507 rcu_read_lock(); 6508 len = 14 + 6509 strlen(clp->cl_rpcclient->cl_nodename) + 6510 1 + 6511 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) + 6512 1; 6513 rcu_read_unlock(); 6514 6515 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf)); 6516 if (buflen) 6517 len += buflen + 1; 6518 6519 if (len > NFS4_OPAQUE_LIMIT + 1) 6520 return -EINVAL; 6521 6522 /* 6523 * Since this string is allocated at mount time, and held until the 6524 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 6525 * about a memory-reclaim deadlock. 6526 */ 6527 str = kmalloc(len, GFP_KERNEL); 6528 if (!str) 6529 return -ENOMEM; 6530 6531 rcu_read_lock(); 6532 if (buflen) 6533 scnprintf(str, len, "Linux NFSv4.0 %s/%s/%s", 6534 clp->cl_rpcclient->cl_nodename, buf, 6535 rpc_peeraddr2str(clp->cl_rpcclient, 6536 RPC_DISPLAY_ADDR)); 6537 else 6538 scnprintf(str, len, "Linux NFSv4.0 %s/%s", 6539 clp->cl_rpcclient->cl_nodename, 6540 rpc_peeraddr2str(clp->cl_rpcclient, 6541 RPC_DISPLAY_ADDR)); 6542 rcu_read_unlock(); 6543 6544 clp->cl_owner_id = str; 6545 return 0; 6546 } 6547 6548 static int 6549 nfs4_init_uniform_client_string(struct nfs_client *clp) 6550 { 6551 char buf[NFS4_CLIENT_ID_UNIQ_LEN]; 6552 size_t buflen; 6553 size_t len; 6554 char *str; 6555 6556 if (clp->cl_owner_id != NULL) 6557 return 0; 6558 6559 len = 10 + 10 + 1 + 10 + 1 + 6560 strlen(clp->cl_rpcclient->cl_nodename) + 1; 6561 6562 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf)); 6563 if (buflen) 6564 len += buflen + 1; 6565 6566 if (len > NFS4_OPAQUE_LIMIT + 1) 6567 return -EINVAL; 6568 6569 /* 6570 * Since this string is allocated at mount time, and held until the 6571 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 6572 * about a memory-reclaim deadlock. 6573 */ 6574 str = kmalloc(len, GFP_KERNEL); 6575 if (!str) 6576 return -ENOMEM; 6577 6578 if (buflen) 6579 scnprintf(str, len, "Linux NFSv%u.%u %s/%s", 6580 clp->rpc_ops->version, clp->cl_minorversion, 6581 buf, clp->cl_rpcclient->cl_nodename); 6582 else 6583 scnprintf(str, len, "Linux NFSv%u.%u %s", 6584 clp->rpc_ops->version, clp->cl_minorversion, 6585 clp->cl_rpcclient->cl_nodename); 6586 clp->cl_owner_id = str; 6587 return 0; 6588 } 6589 6590 /* 6591 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback 6592 * services. Advertise one based on the address family of the 6593 * clientaddr. 6594 */ 6595 static unsigned int 6596 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len) 6597 { 6598 if (strchr(clp->cl_ipaddr, ':') != NULL) 6599 return scnprintf(buf, len, "tcp6"); 6600 else 6601 return scnprintf(buf, len, "tcp"); 6602 } 6603 6604 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata) 6605 { 6606 struct nfs4_setclientid *sc = calldata; 6607 6608 if (task->tk_status == 0) 6609 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred); 6610 } 6611 6612 static const struct rpc_call_ops nfs4_setclientid_ops = { 6613 .rpc_call_done = nfs4_setclientid_done, 6614 }; 6615 6616 /** 6617 * nfs4_proc_setclientid - Negotiate client ID 6618 * @clp: state data structure 6619 * @program: RPC program for NFSv4 callback service 6620 * @port: IP port number for NFS4 callback service 6621 * @cred: credential to use for this call 6622 * @res: where to place the result 6623 * 6624 * Returns zero, a negative errno, or a negative NFS4ERR status code. 6625 */ 6626 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 6627 unsigned short port, const struct cred *cred, 6628 struct nfs4_setclientid_res *res) 6629 { 6630 nfs4_verifier sc_verifier; 6631 struct nfs4_setclientid setclientid = { 6632 .sc_verifier = &sc_verifier, 6633 .sc_prog = program, 6634 .sc_clnt = clp, 6635 }; 6636 struct rpc_message msg = { 6637 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 6638 .rpc_argp = &setclientid, 6639 .rpc_resp = res, 6640 .rpc_cred = cred, 6641 }; 6642 struct rpc_task_setup task_setup_data = { 6643 .rpc_client = clp->cl_rpcclient, 6644 .rpc_message = &msg, 6645 .callback_ops = &nfs4_setclientid_ops, 6646 .callback_data = &setclientid, 6647 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN, 6648 }; 6649 unsigned long now = jiffies; 6650 int status; 6651 6652 /* nfs_client_id4 */ 6653 nfs4_init_boot_verifier(clp, &sc_verifier); 6654 6655 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags)) 6656 status = nfs4_init_uniform_client_string(clp); 6657 else 6658 status = nfs4_init_nonuniform_client_string(clp); 6659 6660 if (status) 6661 goto out; 6662 6663 /* cb_client4 */ 6664 setclientid.sc_netid_len = 6665 nfs4_init_callback_netid(clp, 6666 setclientid.sc_netid, 6667 sizeof(setclientid.sc_netid)); 6668 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 6669 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 6670 clp->cl_ipaddr, port >> 8, port & 255); 6671 6672 dprintk("NFS call setclientid auth=%s, '%s'\n", 6673 clp->cl_rpcclient->cl_auth->au_ops->au_name, 6674 clp->cl_owner_id); 6675 6676 status = nfs4_call_sync_custom(&task_setup_data); 6677 if (setclientid.sc_cred) { 6678 kfree(clp->cl_acceptor); 6679 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred); 6680 put_rpccred(setclientid.sc_cred); 6681 } 6682 6683 if (status == 0) 6684 do_renew_lease(clp, now); 6685 out: 6686 trace_nfs4_setclientid(clp, status); 6687 dprintk("NFS reply setclientid: %d\n", status); 6688 return status; 6689 } 6690 6691 /** 6692 * nfs4_proc_setclientid_confirm - Confirm client ID 6693 * @clp: state data structure 6694 * @arg: result of a previous SETCLIENTID 6695 * @cred: credential to use for this call 6696 * 6697 * Returns zero, a negative errno, or a negative NFS4ERR status code. 6698 */ 6699 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 6700 struct nfs4_setclientid_res *arg, 6701 const struct cred *cred) 6702 { 6703 struct rpc_message msg = { 6704 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 6705 .rpc_argp = arg, 6706 .rpc_cred = cred, 6707 }; 6708 int status; 6709 6710 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 6711 clp->cl_rpcclient->cl_auth->au_ops->au_name, 6712 clp->cl_clientid); 6713 status = rpc_call_sync(clp->cl_rpcclient, &msg, 6714 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 6715 trace_nfs4_setclientid_confirm(clp, status); 6716 dprintk("NFS reply setclientid_confirm: %d\n", status); 6717 return status; 6718 } 6719 6720 struct nfs4_delegreturndata { 6721 struct nfs4_delegreturnargs args; 6722 struct nfs4_delegreturnres res; 6723 struct nfs_fh fh; 6724 nfs4_stateid stateid; 6725 unsigned long timestamp; 6726 struct { 6727 struct nfs4_layoutreturn_args arg; 6728 struct nfs4_layoutreturn_res res; 6729 struct nfs4_xdr_opaque_data ld_private; 6730 u32 roc_barrier; 6731 bool roc; 6732 } lr; 6733 struct nfs4_delegattr sattr; 6734 struct nfs_fattr fattr; 6735 int rpc_status; 6736 struct inode *inode; 6737 }; 6738 6739 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 6740 { 6741 struct nfs4_delegreturndata *data = calldata; 6742 struct nfs4_exception exception = { 6743 .inode = data->inode, 6744 .stateid = &data->stateid, 6745 .task_is_privileged = data->args.seq_args.sa_privileged, 6746 }; 6747 6748 if (!nfs4_sequence_done(task, &data->res.seq_res)) 6749 return; 6750 6751 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); 6752 6753 /* Handle Layoutreturn errors */ 6754 if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res, 6755 &data->res.lr_ret) == -EAGAIN) 6756 goto out_restart; 6757 6758 if (data->args.sattr_args && task->tk_status != 0) { 6759 switch(data->res.sattr_ret) { 6760 case 0: 6761 data->args.sattr_args = NULL; 6762 data->res.sattr_res = false; 6763 break; 6764 case -NFS4ERR_ADMIN_REVOKED: 6765 case -NFS4ERR_DELEG_REVOKED: 6766 case -NFS4ERR_EXPIRED: 6767 case -NFS4ERR_BAD_STATEID: 6768 /* Let the main handler below do stateid recovery */ 6769 break; 6770 case -NFS4ERR_OLD_STATEID: 6771 if (nfs4_refresh_delegation_stateid(&data->stateid, 6772 data->inode)) 6773 goto out_restart; 6774 fallthrough; 6775 default: 6776 data->args.sattr_args = NULL; 6777 data->res.sattr_res = false; 6778 goto out_restart; 6779 } 6780 } 6781 6782 switch (task->tk_status) { 6783 case 0: 6784 renew_lease(data->res.server, data->timestamp); 6785 break; 6786 case -NFS4ERR_ADMIN_REVOKED: 6787 case -NFS4ERR_DELEG_REVOKED: 6788 case -NFS4ERR_EXPIRED: 6789 nfs4_free_revoked_stateid(data->res.server, 6790 data->args.stateid, 6791 task->tk_msg.rpc_cred); 6792 fallthrough; 6793 case -NFS4ERR_BAD_STATEID: 6794 case -NFS4ERR_STALE_STATEID: 6795 case -ETIMEDOUT: 6796 task->tk_status = 0; 6797 break; 6798 case -NFS4ERR_OLD_STATEID: 6799 if (!nfs4_refresh_delegation_stateid(&data->stateid, data->inode)) 6800 nfs4_stateid_seqid_inc(&data->stateid); 6801 if (data->args.bitmask) { 6802 data->args.bitmask = NULL; 6803 data->res.fattr = NULL; 6804 } 6805 goto out_restart; 6806 case -NFS4ERR_ACCESS: 6807 if (data->args.bitmask) { 6808 data->args.bitmask = NULL; 6809 data->res.fattr = NULL; 6810 goto out_restart; 6811 } 6812 fallthrough; 6813 default: 6814 task->tk_status = nfs4_async_handle_exception(task, 6815 data->res.server, task->tk_status, 6816 &exception); 6817 if (exception.retry) 6818 goto out_restart; 6819 } 6820 nfs_delegation_mark_returned(data->inode, data->args.stateid); 6821 data->rpc_status = task->tk_status; 6822 return; 6823 out_restart: 6824 task->tk_status = 0; 6825 rpc_restart_call_prepare(task); 6826 } 6827 6828 static void nfs4_delegreturn_release(void *calldata) 6829 { 6830 struct nfs4_delegreturndata *data = calldata; 6831 struct inode *inode = data->inode; 6832 6833 if (data->lr.roc) 6834 pnfs_roc_release(&data->lr.arg, &data->lr.res, 6835 data->res.lr_ret); 6836 if (inode) { 6837 nfs4_fattr_set_prechange(&data->fattr, 6838 inode_peek_iversion_raw(inode)); 6839 nfs_refresh_inode(inode, &data->fattr); 6840 nfs_iput_and_deactive(inode); 6841 } 6842 kfree(calldata); 6843 } 6844 6845 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 6846 { 6847 struct nfs4_delegreturndata *d_data; 6848 struct pnfs_layout_hdr *lo; 6849 6850 d_data = data; 6851 6852 if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) { 6853 nfs4_sequence_done(task, &d_data->res.seq_res); 6854 return; 6855 } 6856 6857 lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL; 6858 if (lo && !pnfs_layout_is_valid(lo)) { 6859 d_data->args.lr_args = NULL; 6860 d_data->res.lr_res = NULL; 6861 } 6862 6863 nfs4_setup_sequence(d_data->res.server->nfs_client, 6864 &d_data->args.seq_args, 6865 &d_data->res.seq_res, 6866 task); 6867 } 6868 6869 static const struct rpc_call_ops nfs4_delegreturn_ops = { 6870 .rpc_call_prepare = nfs4_delegreturn_prepare, 6871 .rpc_call_done = nfs4_delegreturn_done, 6872 .rpc_release = nfs4_delegreturn_release, 6873 }; 6874 6875 static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, 6876 const nfs4_stateid *stateid, 6877 struct nfs_delegation *delegation, 6878 int issync) 6879 { 6880 struct nfs4_delegreturndata *data; 6881 struct nfs_server *server = NFS_SERVER(inode); 6882 struct rpc_task *task; 6883 struct rpc_message msg = { 6884 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 6885 .rpc_cred = cred, 6886 }; 6887 struct rpc_task_setup task_setup_data = { 6888 .rpc_client = server->client, 6889 .rpc_message = &msg, 6890 .callback_ops = &nfs4_delegreturn_ops, 6891 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT, 6892 }; 6893 int status = 0; 6894 6895 if (nfs_server_capable(inode, NFS_CAP_MOVEABLE)) 6896 task_setup_data.flags |= RPC_TASK_MOVEABLE; 6897 6898 data = kzalloc(sizeof(*data), GFP_KERNEL); 6899 if (data == NULL) 6900 return -ENOMEM; 6901 6902 nfs4_state_protect(server->nfs_client, 6903 NFS_SP4_MACH_CRED_CLEANUP, 6904 &task_setup_data.rpc_client, &msg); 6905 6906 data->args.fhandle = &data->fh; 6907 data->args.stateid = &data->stateid; 6908 nfs4_bitmask_set(data->args.bitmask_store, 6909 server->cache_consistency_bitmask, inode, 0); 6910 data->args.bitmask = data->args.bitmask_store; 6911 nfs_copy_fh(&data->fh, NFS_FH(inode)); 6912 nfs4_stateid_copy(&data->stateid, stateid); 6913 data->res.fattr = &data->fattr; 6914 data->res.server = server; 6915 data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT; 6916 data->lr.arg.ld_private = &data->lr.ld_private; 6917 nfs_fattr_init(data->res.fattr); 6918 data->timestamp = jiffies; 6919 data->rpc_status = 0; 6920 data->inode = nfs_igrab_and_active(inode); 6921 if (data->inode || issync) { 6922 data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res, 6923 cred); 6924 if (data->lr.roc) { 6925 data->args.lr_args = &data->lr.arg; 6926 data->res.lr_res = &data->lr.res; 6927 } 6928 } 6929 6930 if (delegation && 6931 test_bit(NFS_DELEGATION_DELEGTIME, &delegation->flags)) { 6932 if (delegation->type & FMODE_READ) { 6933 data->sattr.atime = inode_get_atime(inode); 6934 data->sattr.atime_set = true; 6935 } 6936 if (delegation->type & FMODE_WRITE) { 6937 data->sattr.mtime = inode_get_mtime(inode); 6938 data->sattr.mtime_set = true; 6939 } 6940 data->args.sattr_args = &data->sattr; 6941 data->res.sattr_res = true; 6942 } 6943 6944 if (!data->inode) 6945 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 6946 1); 6947 else 6948 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 6949 0); 6950 6951 task_setup_data.callback_data = data; 6952 msg.rpc_argp = &data->args; 6953 msg.rpc_resp = &data->res; 6954 task = rpc_run_task(&task_setup_data); 6955 if (IS_ERR(task)) 6956 return PTR_ERR(task); 6957 if (!issync) 6958 goto out; 6959 status = rpc_wait_for_completion_task(task); 6960 if (status != 0) 6961 goto out; 6962 status = data->rpc_status; 6963 out: 6964 rpc_put_task(task); 6965 return status; 6966 } 6967 6968 int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, 6969 const nfs4_stateid *stateid, 6970 struct nfs_delegation *delegation, int issync) 6971 { 6972 struct nfs_server *server = NFS_SERVER(inode); 6973 struct nfs4_exception exception = { }; 6974 int err; 6975 do { 6976 err = _nfs4_proc_delegreturn(inode, cred, stateid, 6977 delegation, issync); 6978 trace_nfs4_delegreturn(inode, stateid, err); 6979 switch (err) { 6980 case -NFS4ERR_STALE_STATEID: 6981 case -NFS4ERR_EXPIRED: 6982 case 0: 6983 return 0; 6984 } 6985 err = nfs4_handle_exception(server, err, &exception); 6986 } while (exception.retry); 6987 return err; 6988 } 6989 6990 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6991 { 6992 struct inode *inode = state->inode; 6993 struct nfs_server *server = NFS_SERVER(inode); 6994 struct nfs_client *clp = server->nfs_client; 6995 struct nfs_lockt_args arg = { 6996 .fh = NFS_FH(inode), 6997 .fl = request, 6998 }; 6999 struct nfs_lockt_res res = { 7000 .denied = request, 7001 }; 7002 struct rpc_message msg = { 7003 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 7004 .rpc_argp = &arg, 7005 .rpc_resp = &res, 7006 .rpc_cred = state->owner->so_cred, 7007 }; 7008 struct nfs4_lock_state *lsp; 7009 int status; 7010 7011 arg.lock_owner.clientid = clp->cl_clientid; 7012 status = nfs4_set_lock_state(state, request); 7013 if (status != 0) 7014 goto out; 7015 lsp = request->fl_u.nfs4_fl.owner; 7016 arg.lock_owner.id = lsp->ls_seqid.owner_id; 7017 arg.lock_owner.s_dev = server->s_dev; 7018 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 7019 switch (status) { 7020 case 0: 7021 request->c.flc_type = F_UNLCK; 7022 break; 7023 case -NFS4ERR_DENIED: 7024 status = 0; 7025 } 7026 request->fl_ops->fl_release_private(request); 7027 request->fl_ops = NULL; 7028 out: 7029 return status; 7030 } 7031 7032 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7033 { 7034 struct nfs4_exception exception = { 7035 .interruptible = true, 7036 }; 7037 int err; 7038 7039 do { 7040 err = _nfs4_proc_getlk(state, cmd, request); 7041 trace_nfs4_get_lock(request, state, cmd, err); 7042 err = nfs4_handle_exception(NFS_SERVER(state->inode), err, 7043 &exception); 7044 } while (exception.retry); 7045 return err; 7046 } 7047 7048 /* 7049 * Update the seqid of a lock stateid after receiving 7050 * NFS4ERR_OLD_STATEID 7051 */ 7052 static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst, 7053 struct nfs4_lock_state *lsp) 7054 { 7055 struct nfs4_state *state = lsp->ls_state; 7056 bool ret = false; 7057 7058 spin_lock(&state->state_lock); 7059 if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid)) 7060 goto out; 7061 if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst)) 7062 nfs4_stateid_seqid_inc(dst); 7063 else 7064 dst->seqid = lsp->ls_stateid.seqid; 7065 ret = true; 7066 out: 7067 spin_unlock(&state->state_lock); 7068 return ret; 7069 } 7070 7071 static bool nfs4_sync_lock_stateid(nfs4_stateid *dst, 7072 struct nfs4_lock_state *lsp) 7073 { 7074 struct nfs4_state *state = lsp->ls_state; 7075 bool ret; 7076 7077 spin_lock(&state->state_lock); 7078 ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid); 7079 nfs4_stateid_copy(dst, &lsp->ls_stateid); 7080 spin_unlock(&state->state_lock); 7081 return ret; 7082 } 7083 7084 struct nfs4_unlockdata { 7085 struct nfs_locku_args arg; 7086 struct nfs_locku_res res; 7087 struct nfs4_lock_state *lsp; 7088 struct nfs_open_context *ctx; 7089 struct nfs_lock_context *l_ctx; 7090 struct file_lock fl; 7091 struct nfs_server *server; 7092 unsigned long timestamp; 7093 }; 7094 7095 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 7096 struct nfs_open_context *ctx, 7097 struct nfs4_lock_state *lsp, 7098 struct nfs_seqid *seqid) 7099 { 7100 struct nfs4_unlockdata *p; 7101 struct nfs4_state *state = lsp->ls_state; 7102 struct inode *inode = state->inode; 7103 struct nfs_lock_context *l_ctx; 7104 7105 p = kzalloc(sizeof(*p), GFP_KERNEL); 7106 if (p == NULL) 7107 return NULL; 7108 l_ctx = nfs_get_lock_context(ctx); 7109 if (!IS_ERR(l_ctx)) { 7110 p->l_ctx = l_ctx; 7111 } else { 7112 kfree(p); 7113 return NULL; 7114 } 7115 p->arg.fh = NFS_FH(inode); 7116 p->arg.fl = &p->fl; 7117 p->arg.seqid = seqid; 7118 p->res.seqid = seqid; 7119 p->lsp = lsp; 7120 /* Ensure we don't close file until we're done freeing locks! */ 7121 p->ctx = get_nfs_open_context(ctx); 7122 locks_init_lock(&p->fl); 7123 locks_copy_lock(&p->fl, fl); 7124 p->server = NFS_SERVER(inode); 7125 spin_lock(&state->state_lock); 7126 nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid); 7127 spin_unlock(&state->state_lock); 7128 return p; 7129 } 7130 7131 static void nfs4_locku_release_calldata(void *data) 7132 { 7133 struct nfs4_unlockdata *calldata = data; 7134 nfs_free_seqid(calldata->arg.seqid); 7135 nfs4_put_lock_state(calldata->lsp); 7136 nfs_put_lock_context(calldata->l_ctx); 7137 put_nfs_open_context(calldata->ctx); 7138 kfree(calldata); 7139 } 7140 7141 static void nfs4_locku_done(struct rpc_task *task, void *data) 7142 { 7143 struct nfs4_unlockdata *calldata = data; 7144 struct nfs4_exception exception = { 7145 .inode = calldata->lsp->ls_state->inode, 7146 .stateid = &calldata->arg.stateid, 7147 }; 7148 7149 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 7150 return; 7151 switch (task->tk_status) { 7152 case 0: 7153 renew_lease(calldata->server, calldata->timestamp); 7154 locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl); 7155 if (nfs4_update_lock_stateid(calldata->lsp, 7156 &calldata->res.stateid)) 7157 break; 7158 fallthrough; 7159 case -NFS4ERR_ADMIN_REVOKED: 7160 case -NFS4ERR_EXPIRED: 7161 nfs4_free_revoked_stateid(calldata->server, 7162 &calldata->arg.stateid, 7163 task->tk_msg.rpc_cred); 7164 fallthrough; 7165 case -NFS4ERR_BAD_STATEID: 7166 case -NFS4ERR_STALE_STATEID: 7167 if (nfs4_sync_lock_stateid(&calldata->arg.stateid, 7168 calldata->lsp)) 7169 rpc_restart_call_prepare(task); 7170 break; 7171 case -NFS4ERR_OLD_STATEID: 7172 if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid, 7173 calldata->lsp)) 7174 rpc_restart_call_prepare(task); 7175 break; 7176 default: 7177 task->tk_status = nfs4_async_handle_exception(task, 7178 calldata->server, task->tk_status, 7179 &exception); 7180 if (exception.retry) 7181 rpc_restart_call_prepare(task); 7182 } 7183 nfs_release_seqid(calldata->arg.seqid); 7184 } 7185 7186 static void nfs4_locku_prepare(struct rpc_task *task, void *data) 7187 { 7188 struct nfs4_unlockdata *calldata = data; 7189 7190 if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) && 7191 nfs_async_iocounter_wait(task, calldata->l_ctx)) 7192 return; 7193 7194 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 7195 goto out_wait; 7196 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { 7197 /* Note: exit _without_ running nfs4_locku_done */ 7198 goto out_no_action; 7199 } 7200 calldata->timestamp = jiffies; 7201 if (nfs4_setup_sequence(calldata->server->nfs_client, 7202 &calldata->arg.seq_args, 7203 &calldata->res.seq_res, 7204 task) != 0) 7205 nfs_release_seqid(calldata->arg.seqid); 7206 return; 7207 out_no_action: 7208 task->tk_action = NULL; 7209 out_wait: 7210 nfs4_sequence_done(task, &calldata->res.seq_res); 7211 } 7212 7213 static const struct rpc_call_ops nfs4_locku_ops = { 7214 .rpc_call_prepare = nfs4_locku_prepare, 7215 .rpc_call_done = nfs4_locku_done, 7216 .rpc_release = nfs4_locku_release_calldata, 7217 }; 7218 7219 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 7220 struct nfs_open_context *ctx, 7221 struct nfs4_lock_state *lsp, 7222 struct nfs_seqid *seqid) 7223 { 7224 struct nfs4_unlockdata *data; 7225 struct rpc_message msg = { 7226 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 7227 .rpc_cred = ctx->cred, 7228 }; 7229 struct rpc_task_setup task_setup_data = { 7230 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 7231 .rpc_message = &msg, 7232 .callback_ops = &nfs4_locku_ops, 7233 .workqueue = nfsiod_workqueue, 7234 .flags = RPC_TASK_ASYNC, 7235 }; 7236 7237 if (nfs_server_capable(lsp->ls_state->inode, NFS_CAP_MOVEABLE)) 7238 task_setup_data.flags |= RPC_TASK_MOVEABLE; 7239 7240 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client, 7241 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg); 7242 7243 /* Ensure this is an unlock - when canceling a lock, the 7244 * canceled lock is passed in, and it won't be an unlock. 7245 */ 7246 fl->c.flc_type = F_UNLCK; 7247 if (fl->c.flc_flags & FL_CLOSE) 7248 set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags); 7249 7250 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 7251 if (data == NULL) { 7252 nfs_free_seqid(seqid); 7253 return ERR_PTR(-ENOMEM); 7254 } 7255 7256 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 0); 7257 msg.rpc_argp = &data->arg; 7258 msg.rpc_resp = &data->res; 7259 task_setup_data.callback_data = data; 7260 return rpc_run_task(&task_setup_data); 7261 } 7262 7263 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 7264 { 7265 struct inode *inode = state->inode; 7266 struct nfs4_state_owner *sp = state->owner; 7267 struct nfs_inode *nfsi = NFS_I(inode); 7268 struct nfs_seqid *seqid; 7269 struct nfs4_lock_state *lsp; 7270 struct rpc_task *task; 7271 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 7272 int status = 0; 7273 unsigned char saved_flags = request->c.flc_flags; 7274 7275 status = nfs4_set_lock_state(state, request); 7276 /* Unlock _before_ we do the RPC call */ 7277 request->c.flc_flags |= FL_EXISTS; 7278 /* Exclude nfs_delegation_claim_locks() */ 7279 mutex_lock(&sp->so_delegreturn_mutex); 7280 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ 7281 down_read(&nfsi->rwsem); 7282 if (locks_lock_inode_wait(inode, request) == -ENOENT) { 7283 up_read(&nfsi->rwsem); 7284 mutex_unlock(&sp->so_delegreturn_mutex); 7285 goto out; 7286 } 7287 lsp = request->fl_u.nfs4_fl.owner; 7288 set_bit(NFS_LOCK_UNLOCKING, &lsp->ls_flags); 7289 up_read(&nfsi->rwsem); 7290 mutex_unlock(&sp->so_delegreturn_mutex); 7291 if (status != 0) 7292 goto out; 7293 /* Is this a delegated lock? */ 7294 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0) 7295 goto out; 7296 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid; 7297 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 7298 status = -ENOMEM; 7299 if (IS_ERR(seqid)) 7300 goto out; 7301 task = nfs4_do_unlck(request, 7302 nfs_file_open_context(request->c.flc_file), 7303 lsp, seqid); 7304 status = PTR_ERR(task); 7305 if (IS_ERR(task)) 7306 goto out; 7307 status = rpc_wait_for_completion_task(task); 7308 rpc_put_task(task); 7309 out: 7310 request->c.flc_flags = saved_flags; 7311 trace_nfs4_unlock(request, state, F_SETLK, status); 7312 return status; 7313 } 7314 7315 struct nfs4_lockdata { 7316 struct nfs_lock_args arg; 7317 struct nfs_lock_res res; 7318 struct nfs4_lock_state *lsp; 7319 struct nfs_open_context *ctx; 7320 struct file_lock fl; 7321 unsigned long timestamp; 7322 int rpc_status; 7323 int cancelled; 7324 struct nfs_server *server; 7325 }; 7326 7327 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 7328 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 7329 gfp_t gfp_mask) 7330 { 7331 struct nfs4_lockdata *p; 7332 struct inode *inode = lsp->ls_state->inode; 7333 struct nfs_server *server = NFS_SERVER(inode); 7334 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 7335 7336 p = kzalloc(sizeof(*p), gfp_mask); 7337 if (p == NULL) 7338 return NULL; 7339 7340 p->arg.fh = NFS_FH(inode); 7341 p->arg.fl = &p->fl; 7342 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 7343 if (IS_ERR(p->arg.open_seqid)) 7344 goto out_free; 7345 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 7346 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask); 7347 if (IS_ERR(p->arg.lock_seqid)) 7348 goto out_free_seqid; 7349 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 7350 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 7351 p->arg.lock_owner.s_dev = server->s_dev; 7352 p->res.lock_seqid = p->arg.lock_seqid; 7353 p->lsp = lsp; 7354 p->server = server; 7355 p->ctx = get_nfs_open_context(ctx); 7356 locks_init_lock(&p->fl); 7357 locks_copy_lock(&p->fl, fl); 7358 return p; 7359 out_free_seqid: 7360 nfs_free_seqid(p->arg.open_seqid); 7361 out_free: 7362 kfree(p); 7363 return NULL; 7364 } 7365 7366 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 7367 { 7368 struct nfs4_lockdata *data = calldata; 7369 struct nfs4_state *state = data->lsp->ls_state; 7370 7371 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 7372 goto out_wait; 7373 /* Do we need to do an open_to_lock_owner? */ 7374 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) { 7375 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { 7376 goto out_release_lock_seqid; 7377 } 7378 nfs4_stateid_copy(&data->arg.open_stateid, 7379 &state->open_stateid); 7380 data->arg.new_lock_owner = 1; 7381 data->res.open_seqid = data->arg.open_seqid; 7382 } else { 7383 data->arg.new_lock_owner = 0; 7384 nfs4_stateid_copy(&data->arg.lock_stateid, 7385 &data->lsp->ls_stateid); 7386 } 7387 if (!nfs4_valid_open_stateid(state)) { 7388 data->rpc_status = -EBADF; 7389 task->tk_action = NULL; 7390 goto out_release_open_seqid; 7391 } 7392 data->timestamp = jiffies; 7393 if (nfs4_setup_sequence(data->server->nfs_client, 7394 &data->arg.seq_args, 7395 &data->res.seq_res, 7396 task) == 0) 7397 return; 7398 out_release_open_seqid: 7399 nfs_release_seqid(data->arg.open_seqid); 7400 out_release_lock_seqid: 7401 nfs_release_seqid(data->arg.lock_seqid); 7402 out_wait: 7403 nfs4_sequence_done(task, &data->res.seq_res); 7404 dprintk("%s: ret = %d\n", __func__, data->rpc_status); 7405 } 7406 7407 static void nfs4_lock_done(struct rpc_task *task, void *calldata) 7408 { 7409 struct nfs4_lockdata *data = calldata; 7410 struct nfs4_lock_state *lsp = data->lsp; 7411 7412 if (!nfs4_sequence_done(task, &data->res.seq_res)) 7413 return; 7414 7415 data->rpc_status = task->tk_status; 7416 switch (task->tk_status) { 7417 case 0: 7418 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)), 7419 data->timestamp); 7420 if (data->arg.new_lock && !data->cancelled) { 7421 data->fl.c.flc_flags &= ~(FL_SLEEP | FL_ACCESS); 7422 if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) 7423 goto out_restart; 7424 } 7425 if (data->arg.new_lock_owner != 0) { 7426 nfs_confirm_seqid(&lsp->ls_seqid, 0); 7427 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid); 7428 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 7429 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) 7430 goto out_restart; 7431 break; 7432 case -NFS4ERR_OLD_STATEID: 7433 if (data->arg.new_lock_owner != 0 && 7434 nfs4_refresh_open_old_stateid(&data->arg.open_stateid, 7435 lsp->ls_state)) 7436 goto out_restart; 7437 if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp)) 7438 goto out_restart; 7439 fallthrough; 7440 case -NFS4ERR_BAD_STATEID: 7441 case -NFS4ERR_STALE_STATEID: 7442 case -NFS4ERR_EXPIRED: 7443 if (data->arg.new_lock_owner != 0) { 7444 if (!nfs4_stateid_match(&data->arg.open_stateid, 7445 &lsp->ls_state->open_stateid)) 7446 goto out_restart; 7447 } else if (!nfs4_stateid_match(&data->arg.lock_stateid, 7448 &lsp->ls_stateid)) 7449 goto out_restart; 7450 } 7451 out_done: 7452 dprintk("%s: ret = %d!\n", __func__, data->rpc_status); 7453 return; 7454 out_restart: 7455 if (!data->cancelled) 7456 rpc_restart_call_prepare(task); 7457 goto out_done; 7458 } 7459 7460 static void nfs4_lock_release(void *calldata) 7461 { 7462 struct nfs4_lockdata *data = calldata; 7463 7464 nfs_free_seqid(data->arg.open_seqid); 7465 if (data->cancelled && data->rpc_status == 0) { 7466 struct rpc_task *task; 7467 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 7468 data->arg.lock_seqid); 7469 if (!IS_ERR(task)) 7470 rpc_put_task_async(task); 7471 dprintk("%s: cancelling lock!\n", __func__); 7472 } else 7473 nfs_free_seqid(data->arg.lock_seqid); 7474 nfs4_put_lock_state(data->lsp); 7475 put_nfs_open_context(data->ctx); 7476 kfree(data); 7477 } 7478 7479 static const struct rpc_call_ops nfs4_lock_ops = { 7480 .rpc_call_prepare = nfs4_lock_prepare, 7481 .rpc_call_done = nfs4_lock_done, 7482 .rpc_release = nfs4_lock_release, 7483 }; 7484 7485 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 7486 { 7487 switch (error) { 7488 case -NFS4ERR_ADMIN_REVOKED: 7489 case -NFS4ERR_EXPIRED: 7490 case -NFS4ERR_BAD_STATEID: 7491 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 7492 if (new_lock_owner != 0 || 7493 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) 7494 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 7495 break; 7496 case -NFS4ERR_STALE_STATEID: 7497 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 7498 nfs4_schedule_lease_recovery(server->nfs_client); 7499 } 7500 } 7501 7502 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 7503 { 7504 struct nfs4_lockdata *data; 7505 struct rpc_task *task; 7506 struct rpc_message msg = { 7507 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 7508 .rpc_cred = state->owner->so_cred, 7509 }; 7510 struct rpc_task_setup task_setup_data = { 7511 .rpc_client = NFS_CLIENT(state->inode), 7512 .rpc_message = &msg, 7513 .callback_ops = &nfs4_lock_ops, 7514 .workqueue = nfsiod_workqueue, 7515 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 7516 }; 7517 int ret; 7518 7519 if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE)) 7520 task_setup_data.flags |= RPC_TASK_MOVEABLE; 7521 7522 data = nfs4_alloc_lockdata(fl, 7523 nfs_file_open_context(fl->c.flc_file), 7524 fl->fl_u.nfs4_fl.owner, GFP_KERNEL); 7525 if (data == NULL) 7526 return -ENOMEM; 7527 if (IS_SETLKW(cmd)) 7528 data->arg.block = 1; 7529 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 7530 recovery_type > NFS_LOCK_NEW); 7531 msg.rpc_argp = &data->arg; 7532 msg.rpc_resp = &data->res; 7533 task_setup_data.callback_data = data; 7534 if (recovery_type > NFS_LOCK_NEW) { 7535 if (recovery_type == NFS_LOCK_RECLAIM) 7536 data->arg.reclaim = NFS_LOCK_RECLAIM; 7537 } else 7538 data->arg.new_lock = 1; 7539 task = rpc_run_task(&task_setup_data); 7540 if (IS_ERR(task)) 7541 return PTR_ERR(task); 7542 ret = rpc_wait_for_completion_task(task); 7543 if (ret == 0) { 7544 ret = data->rpc_status; 7545 if (ret) 7546 nfs4_handle_setlk_error(data->server, data->lsp, 7547 data->arg.new_lock_owner, ret); 7548 } else 7549 data->cancelled = true; 7550 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret); 7551 rpc_put_task(task); 7552 dprintk("%s: ret = %d\n", __func__, ret); 7553 return ret; 7554 } 7555 7556 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 7557 { 7558 struct nfs_server *server = NFS_SERVER(state->inode); 7559 struct nfs4_exception exception = { 7560 .inode = state->inode, 7561 }; 7562 int err; 7563 7564 do { 7565 /* Cache the lock if possible... */ 7566 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 7567 return 0; 7568 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 7569 if (err != -NFS4ERR_DELAY) 7570 break; 7571 nfs4_handle_exception(server, err, &exception); 7572 } while (exception.retry); 7573 return err; 7574 } 7575 7576 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 7577 { 7578 struct nfs_server *server = NFS_SERVER(state->inode); 7579 struct nfs4_exception exception = { 7580 .inode = state->inode, 7581 }; 7582 int err; 7583 7584 err = nfs4_set_lock_state(state, request); 7585 if (err != 0) 7586 return err; 7587 if (!recover_lost_locks) { 7588 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags); 7589 return 0; 7590 } 7591 do { 7592 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 7593 return 0; 7594 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 7595 switch (err) { 7596 default: 7597 goto out; 7598 case -NFS4ERR_GRACE: 7599 case -NFS4ERR_DELAY: 7600 nfs4_handle_exception(server, err, &exception); 7601 err = 0; 7602 } 7603 } while (exception.retry); 7604 out: 7605 return err; 7606 } 7607 7608 #if defined(CONFIG_NFS_V4_1) 7609 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 7610 { 7611 struct nfs4_lock_state *lsp; 7612 int status; 7613 7614 status = nfs4_set_lock_state(state, request); 7615 if (status != 0) 7616 return status; 7617 lsp = request->fl_u.nfs4_fl.owner; 7618 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) || 7619 test_bit(NFS_LOCK_LOST, &lsp->ls_flags)) 7620 return 0; 7621 return nfs4_lock_expired(state, request); 7622 } 7623 #endif 7624 7625 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7626 { 7627 struct nfs_inode *nfsi = NFS_I(state->inode); 7628 struct nfs4_state_owner *sp = state->owner; 7629 unsigned char flags = request->c.flc_flags; 7630 int status; 7631 7632 request->c.flc_flags |= FL_ACCESS; 7633 status = locks_lock_inode_wait(state->inode, request); 7634 if (status < 0) 7635 goto out; 7636 mutex_lock(&sp->so_delegreturn_mutex); 7637 down_read(&nfsi->rwsem); 7638 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 7639 /* Yes: cache locks! */ 7640 /* ...but avoid races with delegation recall... */ 7641 request->c.flc_flags = flags & ~FL_SLEEP; 7642 status = locks_lock_inode_wait(state->inode, request); 7643 up_read(&nfsi->rwsem); 7644 mutex_unlock(&sp->so_delegreturn_mutex); 7645 goto out; 7646 } 7647 up_read(&nfsi->rwsem); 7648 mutex_unlock(&sp->so_delegreturn_mutex); 7649 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 7650 out: 7651 request->c.flc_flags = flags; 7652 return status; 7653 } 7654 7655 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7656 { 7657 struct nfs4_exception exception = { 7658 .state = state, 7659 .inode = state->inode, 7660 .interruptible = true, 7661 }; 7662 int err; 7663 7664 do { 7665 err = _nfs4_proc_setlk(state, cmd, request); 7666 if (err == -NFS4ERR_DENIED) 7667 err = -EAGAIN; 7668 err = nfs4_handle_exception(NFS_SERVER(state->inode), 7669 err, &exception); 7670 } while (exception.retry); 7671 return err; 7672 } 7673 7674 #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 7675 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 7676 7677 static int 7678 nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd, 7679 struct file_lock *request) 7680 { 7681 int status = -ERESTARTSYS; 7682 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 7683 7684 while(!signalled()) { 7685 status = nfs4_proc_setlk(state, cmd, request); 7686 if ((status != -EAGAIN) || IS_SETLK(cmd)) 7687 break; 7688 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); 7689 schedule_timeout(timeout); 7690 timeout *= 2; 7691 timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout); 7692 status = -ERESTARTSYS; 7693 } 7694 return status; 7695 } 7696 7697 #ifdef CONFIG_NFS_V4_1 7698 struct nfs4_lock_waiter { 7699 struct inode *inode; 7700 struct nfs_lowner owner; 7701 wait_queue_entry_t wait; 7702 }; 7703 7704 static int 7705 nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key) 7706 { 7707 struct nfs4_lock_waiter *waiter = 7708 container_of(wait, struct nfs4_lock_waiter, wait); 7709 7710 /* NULL key means to wake up everyone */ 7711 if (key) { 7712 struct cb_notify_lock_args *cbnl = key; 7713 struct nfs_lowner *lowner = &cbnl->cbnl_owner, 7714 *wowner = &waiter->owner; 7715 7716 /* Only wake if the callback was for the same owner. */ 7717 if (lowner->id != wowner->id || lowner->s_dev != wowner->s_dev) 7718 return 0; 7719 7720 /* Make sure it's for the right inode */ 7721 if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh)) 7722 return 0; 7723 } 7724 7725 return woken_wake_function(wait, mode, flags, key); 7726 } 7727 7728 static int 7729 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7730 { 7731 struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner; 7732 struct nfs_server *server = NFS_SERVER(state->inode); 7733 struct nfs_client *clp = server->nfs_client; 7734 wait_queue_head_t *q = &clp->cl_lock_waitq; 7735 struct nfs4_lock_waiter waiter = { 7736 .inode = state->inode, 7737 .owner = { .clientid = clp->cl_clientid, 7738 .id = lsp->ls_seqid.owner_id, 7739 .s_dev = server->s_dev }, 7740 }; 7741 int status; 7742 7743 /* Don't bother with waitqueue if we don't expect a callback */ 7744 if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags)) 7745 return nfs4_retry_setlk_simple(state, cmd, request); 7746 7747 init_wait(&waiter.wait); 7748 waiter.wait.func = nfs4_wake_lock_waiter; 7749 add_wait_queue(q, &waiter.wait); 7750 7751 do { 7752 status = nfs4_proc_setlk(state, cmd, request); 7753 if (status != -EAGAIN || IS_SETLK(cmd)) 7754 break; 7755 7756 status = -ERESTARTSYS; 7757 wait_woken(&waiter.wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE, 7758 NFS4_LOCK_MAXTIMEOUT); 7759 } while (!signalled()); 7760 7761 remove_wait_queue(q, &waiter.wait); 7762 7763 return status; 7764 } 7765 #else /* !CONFIG_NFS_V4_1 */ 7766 static inline int 7767 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7768 { 7769 return nfs4_retry_setlk_simple(state, cmd, request); 7770 } 7771 #endif 7772 7773 static int 7774 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 7775 { 7776 struct nfs_open_context *ctx; 7777 struct nfs4_state *state; 7778 int status; 7779 7780 /* verify open state */ 7781 ctx = nfs_file_open_context(filp); 7782 state = ctx->state; 7783 7784 if (IS_GETLK(cmd)) { 7785 if (state != NULL) 7786 return nfs4_proc_getlk(state, F_GETLK, request); 7787 return 0; 7788 } 7789 7790 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 7791 return -EINVAL; 7792 7793 if (lock_is_unlock(request)) { 7794 if (state != NULL) 7795 return nfs4_proc_unlck(state, cmd, request); 7796 return 0; 7797 } 7798 7799 if (state == NULL) 7800 return -ENOLCK; 7801 7802 if ((request->c.flc_flags & FL_POSIX) && 7803 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 7804 return -ENOLCK; 7805 7806 /* 7807 * Don't rely on the VFS having checked the file open mode, 7808 * since it won't do this for flock() locks. 7809 */ 7810 switch (request->c.flc_type) { 7811 case F_RDLCK: 7812 if (!(filp->f_mode & FMODE_READ)) 7813 return -EBADF; 7814 break; 7815 case F_WRLCK: 7816 if (!(filp->f_mode & FMODE_WRITE)) 7817 return -EBADF; 7818 } 7819 7820 status = nfs4_set_lock_state(state, request); 7821 if (status != 0) 7822 return status; 7823 7824 return nfs4_retry_setlk(state, cmd, request); 7825 } 7826 7827 static int nfs4_delete_lease(struct file *file, void **priv) 7828 { 7829 return generic_setlease(file, F_UNLCK, NULL, priv); 7830 } 7831 7832 static int nfs4_add_lease(struct file *file, int arg, struct file_lease **lease, 7833 void **priv) 7834 { 7835 struct inode *inode = file_inode(file); 7836 fmode_t type = arg == F_RDLCK ? FMODE_READ : FMODE_WRITE; 7837 int ret; 7838 7839 /* No delegation, no lease */ 7840 if (!nfs4_have_delegation(inode, type, 0)) 7841 return -EAGAIN; 7842 ret = generic_setlease(file, arg, lease, priv); 7843 if (ret || nfs4_have_delegation(inode, type, 0)) 7844 return ret; 7845 /* We raced with a delegation return */ 7846 nfs4_delete_lease(file, priv); 7847 return -EAGAIN; 7848 } 7849 7850 int nfs4_proc_setlease(struct file *file, int arg, struct file_lease **lease, 7851 void **priv) 7852 { 7853 switch (arg) { 7854 case F_RDLCK: 7855 case F_WRLCK: 7856 return nfs4_add_lease(file, arg, lease, priv); 7857 case F_UNLCK: 7858 return nfs4_delete_lease(file, priv); 7859 default: 7860 return -EINVAL; 7861 } 7862 } 7863 7864 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid) 7865 { 7866 struct nfs_server *server = NFS_SERVER(state->inode); 7867 int err; 7868 7869 err = nfs4_set_lock_state(state, fl); 7870 if (err != 0) 7871 return err; 7872 do { 7873 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 7874 if (err != -NFS4ERR_DELAY) 7875 break; 7876 ssleep(1); 7877 } while (err == -NFS4ERR_DELAY); 7878 return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err); 7879 } 7880 7881 struct nfs_release_lockowner_data { 7882 struct nfs4_lock_state *lsp; 7883 struct nfs_server *server; 7884 struct nfs_release_lockowner_args args; 7885 struct nfs_release_lockowner_res res; 7886 unsigned long timestamp; 7887 }; 7888 7889 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata) 7890 { 7891 struct nfs_release_lockowner_data *data = calldata; 7892 struct nfs_server *server = data->server; 7893 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 7894 &data->res.seq_res, task); 7895 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 7896 data->timestamp = jiffies; 7897 } 7898 7899 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata) 7900 { 7901 struct nfs_release_lockowner_data *data = calldata; 7902 struct nfs_server *server = data->server; 7903 7904 nfs40_sequence_done(task, &data->res.seq_res); 7905 7906 switch (task->tk_status) { 7907 case 0: 7908 renew_lease(server, data->timestamp); 7909 break; 7910 case -NFS4ERR_STALE_CLIENTID: 7911 case -NFS4ERR_EXPIRED: 7912 nfs4_schedule_lease_recovery(server->nfs_client); 7913 break; 7914 case -NFS4ERR_LEASE_MOVED: 7915 case -NFS4ERR_DELAY: 7916 if (nfs4_async_handle_error(task, server, 7917 NULL, NULL) == -EAGAIN) 7918 rpc_restart_call_prepare(task); 7919 } 7920 } 7921 7922 static void nfs4_release_lockowner_release(void *calldata) 7923 { 7924 struct nfs_release_lockowner_data *data = calldata; 7925 nfs4_free_lock_state(data->server, data->lsp); 7926 kfree(calldata); 7927 } 7928 7929 static const struct rpc_call_ops nfs4_release_lockowner_ops = { 7930 .rpc_call_prepare = nfs4_release_lockowner_prepare, 7931 .rpc_call_done = nfs4_release_lockowner_done, 7932 .rpc_release = nfs4_release_lockowner_release, 7933 }; 7934 7935 static void 7936 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp) 7937 { 7938 struct nfs_release_lockowner_data *data; 7939 struct rpc_message msg = { 7940 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], 7941 }; 7942 7943 if (server->nfs_client->cl_mvops->minor_version != 0) 7944 return; 7945 7946 data = kmalloc(sizeof(*data), GFP_KERNEL); 7947 if (!data) 7948 return; 7949 data->lsp = lsp; 7950 data->server = server; 7951 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 7952 data->args.lock_owner.id = lsp->ls_seqid.owner_id; 7953 data->args.lock_owner.s_dev = server->s_dev; 7954 7955 msg.rpc_argp = &data->args; 7956 msg.rpc_resp = &data->res; 7957 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 7958 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 7959 } 7960 7961 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 7962 7963 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler, 7964 struct mnt_idmap *idmap, 7965 struct dentry *unused, struct inode *inode, 7966 const char *key, const void *buf, 7967 size_t buflen, int flags) 7968 { 7969 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_ACL); 7970 } 7971 7972 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler, 7973 struct dentry *unused, struct inode *inode, 7974 const char *key, void *buf, size_t buflen) 7975 { 7976 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_ACL); 7977 } 7978 7979 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry) 7980 { 7981 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_ACL); 7982 } 7983 7984 #if defined(CONFIG_NFS_V4_1) 7985 #define XATTR_NAME_NFSV4_DACL "system.nfs4_dacl" 7986 7987 static int nfs4_xattr_set_nfs4_dacl(const struct xattr_handler *handler, 7988 struct mnt_idmap *idmap, 7989 struct dentry *unused, struct inode *inode, 7990 const char *key, const void *buf, 7991 size_t buflen, int flags) 7992 { 7993 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_DACL); 7994 } 7995 7996 static int nfs4_xattr_get_nfs4_dacl(const struct xattr_handler *handler, 7997 struct dentry *unused, struct inode *inode, 7998 const char *key, void *buf, size_t buflen) 7999 { 8000 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_DACL); 8001 } 8002 8003 static bool nfs4_xattr_list_nfs4_dacl(struct dentry *dentry) 8004 { 8005 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_DACL); 8006 } 8007 8008 #define XATTR_NAME_NFSV4_SACL "system.nfs4_sacl" 8009 8010 static int nfs4_xattr_set_nfs4_sacl(const struct xattr_handler *handler, 8011 struct mnt_idmap *idmap, 8012 struct dentry *unused, struct inode *inode, 8013 const char *key, const void *buf, 8014 size_t buflen, int flags) 8015 { 8016 return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_SACL); 8017 } 8018 8019 static int nfs4_xattr_get_nfs4_sacl(const struct xattr_handler *handler, 8020 struct dentry *unused, struct inode *inode, 8021 const char *key, void *buf, size_t buflen) 8022 { 8023 return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_SACL); 8024 } 8025 8026 static bool nfs4_xattr_list_nfs4_sacl(struct dentry *dentry) 8027 { 8028 return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_SACL); 8029 } 8030 8031 #endif 8032 8033 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 8034 8035 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler, 8036 struct mnt_idmap *idmap, 8037 struct dentry *unused, struct inode *inode, 8038 const char *key, const void *buf, 8039 size_t buflen, int flags) 8040 { 8041 if (security_ismaclabel(key)) 8042 return nfs4_set_security_label(inode, buf, buflen); 8043 8044 return -EOPNOTSUPP; 8045 } 8046 8047 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler, 8048 struct dentry *unused, struct inode *inode, 8049 const char *key, void *buf, size_t buflen) 8050 { 8051 if (security_ismaclabel(key)) 8052 return nfs4_get_security_label(inode, buf, buflen); 8053 return -EOPNOTSUPP; 8054 } 8055 8056 static ssize_t 8057 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 8058 { 8059 int len = 0; 8060 8061 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) { 8062 len = security_inode_listsecurity(inode, list, list_len); 8063 if (len >= 0 && list_len && len > list_len) 8064 return -ERANGE; 8065 } 8066 return len; 8067 } 8068 8069 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = { 8070 .prefix = XATTR_SECURITY_PREFIX, 8071 .get = nfs4_xattr_get_nfs4_label, 8072 .set = nfs4_xattr_set_nfs4_label, 8073 }; 8074 8075 #else 8076 8077 static ssize_t 8078 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 8079 { 8080 return 0; 8081 } 8082 8083 #endif 8084 8085 #ifdef CONFIG_NFS_V4_2 8086 static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler, 8087 struct mnt_idmap *idmap, 8088 struct dentry *unused, struct inode *inode, 8089 const char *key, const void *buf, 8090 size_t buflen, int flags) 8091 { 8092 u32 mask; 8093 int ret; 8094 8095 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8096 return -EOPNOTSUPP; 8097 8098 /* 8099 * There is no mapping from the MAY_* flags to the NFS_ACCESS_XA* 8100 * flags right now. Handling of xattr operations use the normal 8101 * file read/write permissions. 8102 * 8103 * Just in case the server has other ideas (which RFC 8276 allows), 8104 * do a cached access check for the XA* flags to possibly avoid 8105 * doing an RPC and getting EACCES back. 8106 */ 8107 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8108 if (!(mask & NFS_ACCESS_XAWRITE)) 8109 return -EACCES; 8110 } 8111 8112 if (buf == NULL) { 8113 ret = nfs42_proc_removexattr(inode, key); 8114 if (!ret) 8115 nfs4_xattr_cache_remove(inode, key); 8116 } else { 8117 ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags); 8118 if (!ret) 8119 nfs4_xattr_cache_add(inode, key, buf, NULL, buflen); 8120 } 8121 8122 return ret; 8123 } 8124 8125 static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler, 8126 struct dentry *unused, struct inode *inode, 8127 const char *key, void *buf, size_t buflen) 8128 { 8129 u32 mask; 8130 ssize_t ret; 8131 8132 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8133 return -EOPNOTSUPP; 8134 8135 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8136 if (!(mask & NFS_ACCESS_XAREAD)) 8137 return -EACCES; 8138 } 8139 8140 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 8141 if (ret) 8142 return ret; 8143 8144 ret = nfs4_xattr_cache_get(inode, key, buf, buflen); 8145 if (ret >= 0 || (ret < 0 && ret != -ENOENT)) 8146 return ret; 8147 8148 ret = nfs42_proc_getxattr(inode, key, buf, buflen); 8149 8150 return ret; 8151 } 8152 8153 static ssize_t 8154 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) 8155 { 8156 u64 cookie; 8157 bool eof; 8158 ssize_t ret, size; 8159 char *buf; 8160 size_t buflen; 8161 u32 mask; 8162 8163 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 8164 return 0; 8165 8166 if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { 8167 if (!(mask & NFS_ACCESS_XALIST)) 8168 return 0; 8169 } 8170 8171 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 8172 if (ret) 8173 return ret; 8174 8175 ret = nfs4_xattr_cache_list(inode, list, list_len); 8176 if (ret >= 0 || (ret < 0 && ret != -ENOENT)) 8177 return ret; 8178 8179 cookie = 0; 8180 eof = false; 8181 buflen = list_len ? list_len : XATTR_LIST_MAX; 8182 buf = list_len ? list : NULL; 8183 size = 0; 8184 8185 while (!eof) { 8186 ret = nfs42_proc_listxattrs(inode, buf, buflen, 8187 &cookie, &eof); 8188 if (ret < 0) 8189 return ret; 8190 8191 if (list_len) { 8192 buf += ret; 8193 buflen -= ret; 8194 } 8195 size += ret; 8196 } 8197 8198 if (list_len) 8199 nfs4_xattr_cache_set_list(inode, list, size); 8200 8201 return size; 8202 } 8203 8204 #else 8205 8206 static ssize_t 8207 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) 8208 { 8209 return 0; 8210 } 8211 #endif /* CONFIG_NFS_V4_2 */ 8212 8213 /* 8214 * nfs_fhget will use either the mounted_on_fileid or the fileid 8215 */ 8216 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 8217 { 8218 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 8219 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 8220 (fattr->valid & NFS_ATTR_FATTR_FSID) && 8221 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 8222 return; 8223 8224 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 8225 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 8226 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 8227 fattr->nlink = 2; 8228 } 8229 8230 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 8231 const struct qstr *name, 8232 struct nfs4_fs_locations *fs_locations, 8233 struct page *page) 8234 { 8235 struct nfs_server *server = NFS_SERVER(dir); 8236 u32 bitmask[3]; 8237 struct nfs4_fs_locations_arg args = { 8238 .dir_fh = NFS_FH(dir), 8239 .name = name, 8240 .page = page, 8241 .bitmask = bitmask, 8242 }; 8243 struct nfs4_fs_locations_res res = { 8244 .fs_locations = fs_locations, 8245 }; 8246 struct rpc_message msg = { 8247 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8248 .rpc_argp = &args, 8249 .rpc_resp = &res, 8250 }; 8251 int status; 8252 8253 dprintk("%s: start\n", __func__); 8254 8255 bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS; 8256 bitmask[1] = nfs4_fattr_bitmap[1]; 8257 8258 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 8259 * is not supported */ 8260 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 8261 bitmask[0] &= ~FATTR4_WORD0_FILEID; 8262 else 8263 bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID; 8264 8265 nfs_fattr_init(fs_locations->fattr); 8266 fs_locations->server = server; 8267 fs_locations->nlocations = 0; 8268 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 8269 dprintk("%s: returned status = %d\n", __func__, status); 8270 return status; 8271 } 8272 8273 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 8274 const struct qstr *name, 8275 struct nfs4_fs_locations *fs_locations, 8276 struct page *page) 8277 { 8278 struct nfs4_exception exception = { 8279 .interruptible = true, 8280 }; 8281 int err; 8282 do { 8283 err = _nfs4_proc_fs_locations(client, dir, name, 8284 fs_locations, page); 8285 trace_nfs4_get_fs_locations(dir, name, err); 8286 err = nfs4_handle_exception(NFS_SERVER(dir), err, 8287 &exception); 8288 } while (exception.retry); 8289 return err; 8290 } 8291 8292 /* 8293 * This operation also signals the server that this client is 8294 * performing migration recovery. The server can stop returning 8295 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is 8296 * appended to this compound to identify the client ID which is 8297 * performing recovery. 8298 */ 8299 static int _nfs40_proc_get_locations(struct nfs_server *server, 8300 struct nfs_fh *fhandle, 8301 struct nfs4_fs_locations *locations, 8302 struct page *page, const struct cred *cred) 8303 { 8304 struct rpc_clnt *clnt = server->client; 8305 u32 bitmask[2] = { 8306 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 8307 }; 8308 struct nfs4_fs_locations_arg args = { 8309 .clientid = server->nfs_client->cl_clientid, 8310 .fh = fhandle, 8311 .page = page, 8312 .bitmask = bitmask, 8313 .migration = 1, /* skip LOOKUP */ 8314 .renew = 1, /* append RENEW */ 8315 }; 8316 struct nfs4_fs_locations_res res = { 8317 .fs_locations = locations, 8318 .migration = 1, 8319 .renew = 1, 8320 }; 8321 struct rpc_message msg = { 8322 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8323 .rpc_argp = &args, 8324 .rpc_resp = &res, 8325 .rpc_cred = cred, 8326 }; 8327 unsigned long now = jiffies; 8328 int status; 8329 8330 nfs_fattr_init(locations->fattr); 8331 locations->server = server; 8332 locations->nlocations = 0; 8333 8334 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8335 status = nfs4_call_sync_sequence(clnt, server, &msg, 8336 &args.seq_args, &res.seq_res); 8337 if (status) 8338 return status; 8339 8340 renew_lease(server, now); 8341 return 0; 8342 } 8343 8344 #ifdef CONFIG_NFS_V4_1 8345 8346 /* 8347 * This operation also signals the server that this client is 8348 * performing migration recovery. The server can stop asserting 8349 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID 8350 * performing this operation is identified in the SEQUENCE 8351 * operation in this compound. 8352 * 8353 * When the client supports GETATTR(fs_locations_info), it can 8354 * be plumbed in here. 8355 */ 8356 static int _nfs41_proc_get_locations(struct nfs_server *server, 8357 struct nfs_fh *fhandle, 8358 struct nfs4_fs_locations *locations, 8359 struct page *page, const struct cred *cred) 8360 { 8361 struct rpc_clnt *clnt = server->client; 8362 u32 bitmask[2] = { 8363 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 8364 }; 8365 struct nfs4_fs_locations_arg args = { 8366 .fh = fhandle, 8367 .page = page, 8368 .bitmask = bitmask, 8369 .migration = 1, /* skip LOOKUP */ 8370 }; 8371 struct nfs4_fs_locations_res res = { 8372 .fs_locations = locations, 8373 .migration = 1, 8374 }; 8375 struct rpc_message msg = { 8376 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 8377 .rpc_argp = &args, 8378 .rpc_resp = &res, 8379 .rpc_cred = cred, 8380 }; 8381 struct nfs4_call_sync_data data = { 8382 .seq_server = server, 8383 .seq_args = &args.seq_args, 8384 .seq_res = &res.seq_res, 8385 }; 8386 struct rpc_task_setup task_setup_data = { 8387 .rpc_client = clnt, 8388 .rpc_message = &msg, 8389 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops, 8390 .callback_data = &data, 8391 .flags = RPC_TASK_NO_ROUND_ROBIN, 8392 }; 8393 int status; 8394 8395 nfs_fattr_init(locations->fattr); 8396 locations->server = server; 8397 locations->nlocations = 0; 8398 8399 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8400 status = nfs4_call_sync_custom(&task_setup_data); 8401 if (status == NFS4_OK && 8402 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 8403 status = -NFS4ERR_LEASE_MOVED; 8404 return status; 8405 } 8406 8407 #endif /* CONFIG_NFS_V4_1 */ 8408 8409 /** 8410 * nfs4_proc_get_locations - discover locations for a migrated FSID 8411 * @server: pointer to nfs_server to process 8412 * @fhandle: pointer to the kernel NFS client file handle 8413 * @locations: result of query 8414 * @page: buffer 8415 * @cred: credential to use for this operation 8416 * 8417 * Returns NFS4_OK on success, a negative NFS4ERR status code if the 8418 * operation failed, or a negative errno if a local error occurred. 8419 * 8420 * On success, "locations" is filled in, but if the server has 8421 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not 8422 * asserted. 8423 * 8424 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases 8425 * from this client that require migration recovery. 8426 */ 8427 int nfs4_proc_get_locations(struct nfs_server *server, 8428 struct nfs_fh *fhandle, 8429 struct nfs4_fs_locations *locations, 8430 struct page *page, const struct cred *cred) 8431 { 8432 struct nfs_client *clp = server->nfs_client; 8433 const struct nfs4_mig_recovery_ops *ops = 8434 clp->cl_mvops->mig_recovery_ops; 8435 struct nfs4_exception exception = { 8436 .interruptible = true, 8437 }; 8438 int status; 8439 8440 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 8441 (unsigned long long)server->fsid.major, 8442 (unsigned long long)server->fsid.minor, 8443 clp->cl_hostname); 8444 nfs_display_fhandle(fhandle, __func__); 8445 8446 do { 8447 status = ops->get_locations(server, fhandle, locations, page, 8448 cred); 8449 if (status != -NFS4ERR_DELAY) 8450 break; 8451 nfs4_handle_exception(server, status, &exception); 8452 } while (exception.retry); 8453 return status; 8454 } 8455 8456 /* 8457 * This operation also signals the server that this client is 8458 * performing "lease moved" recovery. The server can stop 8459 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation 8460 * is appended to this compound to identify the client ID which is 8461 * performing recovery. 8462 */ 8463 static int _nfs40_proc_fsid_present(struct inode *inode, const struct cred *cred) 8464 { 8465 struct nfs_server *server = NFS_SERVER(inode); 8466 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 8467 struct rpc_clnt *clnt = server->client; 8468 struct nfs4_fsid_present_arg args = { 8469 .fh = NFS_FH(inode), 8470 .clientid = clp->cl_clientid, 8471 .renew = 1, /* append RENEW */ 8472 }; 8473 struct nfs4_fsid_present_res res = { 8474 .renew = 1, 8475 }; 8476 struct rpc_message msg = { 8477 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 8478 .rpc_argp = &args, 8479 .rpc_resp = &res, 8480 .rpc_cred = cred, 8481 }; 8482 unsigned long now = jiffies; 8483 int status; 8484 8485 res.fh = nfs_alloc_fhandle(); 8486 if (res.fh == NULL) 8487 return -ENOMEM; 8488 8489 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8490 status = nfs4_call_sync_sequence(clnt, server, &msg, 8491 &args.seq_args, &res.seq_res); 8492 nfs_free_fhandle(res.fh); 8493 if (status) 8494 return status; 8495 8496 do_renew_lease(clp, now); 8497 return 0; 8498 } 8499 8500 #ifdef CONFIG_NFS_V4_1 8501 8502 /* 8503 * This operation also signals the server that this client is 8504 * performing "lease moved" recovery. The server can stop asserting 8505 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing 8506 * this operation is identified in the SEQUENCE operation in this 8507 * compound. 8508 */ 8509 static int _nfs41_proc_fsid_present(struct inode *inode, const struct cred *cred) 8510 { 8511 struct nfs_server *server = NFS_SERVER(inode); 8512 struct rpc_clnt *clnt = server->client; 8513 struct nfs4_fsid_present_arg args = { 8514 .fh = NFS_FH(inode), 8515 }; 8516 struct nfs4_fsid_present_res res = { 8517 }; 8518 struct rpc_message msg = { 8519 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 8520 .rpc_argp = &args, 8521 .rpc_resp = &res, 8522 .rpc_cred = cred, 8523 }; 8524 int status; 8525 8526 res.fh = nfs_alloc_fhandle(); 8527 if (res.fh == NULL) 8528 return -ENOMEM; 8529 8530 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8531 status = nfs4_call_sync_sequence(clnt, server, &msg, 8532 &args.seq_args, &res.seq_res); 8533 nfs_free_fhandle(res.fh); 8534 if (status == NFS4_OK && 8535 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 8536 status = -NFS4ERR_LEASE_MOVED; 8537 return status; 8538 } 8539 8540 #endif /* CONFIG_NFS_V4_1 */ 8541 8542 /** 8543 * nfs4_proc_fsid_present - Is this FSID present or absent on server? 8544 * @inode: inode on FSID to check 8545 * @cred: credential to use for this operation 8546 * 8547 * Server indicates whether the FSID is present, moved, or not 8548 * recognized. This operation is necessary to clear a LEASE_MOVED 8549 * condition for this client ID. 8550 * 8551 * Returns NFS4_OK if the FSID is present on this server, 8552 * -NFS4ERR_MOVED if the FSID is no longer present, a negative 8553 * NFS4ERR code if some error occurred on the server, or a 8554 * negative errno if a local failure occurred. 8555 */ 8556 int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred) 8557 { 8558 struct nfs_server *server = NFS_SERVER(inode); 8559 struct nfs_client *clp = server->nfs_client; 8560 const struct nfs4_mig_recovery_ops *ops = 8561 clp->cl_mvops->mig_recovery_ops; 8562 struct nfs4_exception exception = { 8563 .interruptible = true, 8564 }; 8565 int status; 8566 8567 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 8568 (unsigned long long)server->fsid.major, 8569 (unsigned long long)server->fsid.minor, 8570 clp->cl_hostname); 8571 nfs_display_fhandle(NFS_FH(inode), __func__); 8572 8573 do { 8574 status = ops->fsid_present(inode, cred); 8575 if (status != -NFS4ERR_DELAY) 8576 break; 8577 nfs4_handle_exception(server, status, &exception); 8578 } while (exception.retry); 8579 return status; 8580 } 8581 8582 /* 8583 * If 'use_integrity' is true and the state managment nfs_client 8584 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient 8585 * and the machine credential as per RFC3530bis and RFC5661 Security 8586 * Considerations sections. Otherwise, just use the user cred with the 8587 * filesystem's rpc_client. 8588 */ 8589 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity) 8590 { 8591 int status; 8592 struct rpc_clnt *clnt = NFS_SERVER(dir)->client; 8593 struct nfs_client *clp = NFS_SERVER(dir)->nfs_client; 8594 struct nfs4_secinfo_arg args = { 8595 .dir_fh = NFS_FH(dir), 8596 .name = name, 8597 }; 8598 struct nfs4_secinfo_res res = { 8599 .flavors = flavors, 8600 }; 8601 struct rpc_message msg = { 8602 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 8603 .rpc_argp = &args, 8604 .rpc_resp = &res, 8605 }; 8606 struct nfs4_call_sync_data data = { 8607 .seq_server = NFS_SERVER(dir), 8608 .seq_args = &args.seq_args, 8609 .seq_res = &res.seq_res, 8610 }; 8611 struct rpc_task_setup task_setup = { 8612 .rpc_client = clnt, 8613 .rpc_message = &msg, 8614 .callback_ops = clp->cl_mvops->call_sync_ops, 8615 .callback_data = &data, 8616 .flags = RPC_TASK_NO_ROUND_ROBIN, 8617 }; 8618 const struct cred *cred = NULL; 8619 8620 if (use_integrity) { 8621 clnt = clp->cl_rpcclient; 8622 task_setup.rpc_client = clnt; 8623 8624 cred = nfs4_get_clid_cred(clp); 8625 msg.rpc_cred = cred; 8626 } 8627 8628 dprintk("NFS call secinfo %s\n", name->name); 8629 8630 nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg); 8631 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 8632 status = nfs4_call_sync_custom(&task_setup); 8633 8634 dprintk("NFS reply secinfo: %d\n", status); 8635 8636 put_cred(cred); 8637 return status; 8638 } 8639 8640 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 8641 struct nfs4_secinfo_flavors *flavors) 8642 { 8643 struct nfs4_exception exception = { 8644 .interruptible = true, 8645 }; 8646 int err; 8647 do { 8648 err = -NFS4ERR_WRONGSEC; 8649 8650 /* try to use integrity protection with machine cred */ 8651 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client)) 8652 err = _nfs4_proc_secinfo(dir, name, flavors, true); 8653 8654 /* 8655 * if unable to use integrity protection, or SECINFO with 8656 * integrity protection returns NFS4ERR_WRONGSEC (which is 8657 * disallowed by spec, but exists in deployed servers) use 8658 * the current filesystem's rpc_client and the user cred. 8659 */ 8660 if (err == -NFS4ERR_WRONGSEC) 8661 err = _nfs4_proc_secinfo(dir, name, flavors, false); 8662 8663 trace_nfs4_secinfo(dir, name, err); 8664 err = nfs4_handle_exception(NFS_SERVER(dir), err, 8665 &exception); 8666 } while (exception.retry); 8667 return err; 8668 } 8669 8670 #ifdef CONFIG_NFS_V4_1 8671 /* 8672 * Check the exchange flags returned by the server for invalid flags, having 8673 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 8674 * DS flags set. 8675 */ 8676 static int nfs4_check_cl_exchange_flags(u32 flags, u32 version) 8677 { 8678 if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R)) 8679 goto out_inval; 8680 else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R)) 8681 goto out_inval; 8682 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 8683 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 8684 goto out_inval; 8685 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 8686 goto out_inval; 8687 return NFS_OK; 8688 out_inval: 8689 return -NFS4ERR_INVAL; 8690 } 8691 8692 static bool 8693 nfs41_same_server_scope(struct nfs41_server_scope *a, 8694 struct nfs41_server_scope *b) 8695 { 8696 if (a->server_scope_sz != b->server_scope_sz) 8697 return false; 8698 return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0; 8699 } 8700 8701 static void 8702 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) 8703 { 8704 struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp; 8705 struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp; 8706 struct nfs_client *clp = args->client; 8707 8708 switch (task->tk_status) { 8709 case -NFS4ERR_BADSESSION: 8710 case -NFS4ERR_DEADSESSION: 8711 nfs4_schedule_session_recovery(clp->cl_session, 8712 task->tk_status); 8713 return; 8714 } 8715 if (args->dir == NFS4_CDFC4_FORE_OR_BOTH && 8716 res->dir != NFS4_CDFS4_BOTH) { 8717 rpc_task_close_connection(task); 8718 if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES) 8719 rpc_restart_call(task); 8720 } 8721 } 8722 8723 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { 8724 .rpc_call_done = nfs4_bind_one_conn_to_session_done, 8725 }; 8726 8727 /* 8728 * nfs4_proc_bind_one_conn_to_session() 8729 * 8730 * The 4.1 client currently uses the same TCP connection for the 8731 * fore and backchannel. 8732 */ 8733 static 8734 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt, 8735 struct rpc_xprt *xprt, 8736 struct nfs_client *clp, 8737 const struct cred *cred) 8738 { 8739 int status; 8740 struct nfs41_bind_conn_to_session_args args = { 8741 .client = clp, 8742 .dir = NFS4_CDFC4_FORE_OR_BOTH, 8743 .retries = 0, 8744 }; 8745 struct nfs41_bind_conn_to_session_res res; 8746 struct rpc_message msg = { 8747 .rpc_proc = 8748 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 8749 .rpc_argp = &args, 8750 .rpc_resp = &res, 8751 .rpc_cred = cred, 8752 }; 8753 struct rpc_task_setup task_setup_data = { 8754 .rpc_client = clnt, 8755 .rpc_xprt = xprt, 8756 .callback_ops = &nfs4_bind_one_conn_to_session_ops, 8757 .rpc_message = &msg, 8758 .flags = RPC_TASK_TIMEOUT, 8759 }; 8760 struct rpc_task *task; 8761 8762 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id); 8763 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) 8764 args.dir = NFS4_CDFC4_FORE; 8765 8766 /* Do not set the backchannel flag unless this is clnt->cl_xprt */ 8767 if (xprt != rcu_access_pointer(clnt->cl_xprt)) 8768 args.dir = NFS4_CDFC4_FORE; 8769 8770 task = rpc_run_task(&task_setup_data); 8771 if (!IS_ERR(task)) { 8772 status = task->tk_status; 8773 rpc_put_task(task); 8774 } else 8775 status = PTR_ERR(task); 8776 trace_nfs4_bind_conn_to_session(clp, status); 8777 if (status == 0) { 8778 if (memcmp(res.sessionid.data, 8779 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 8780 dprintk("NFS: %s: Session ID mismatch\n", __func__); 8781 return -EIO; 8782 } 8783 if ((res.dir & args.dir) != res.dir || res.dir == 0) { 8784 dprintk("NFS: %s: Unexpected direction from server\n", 8785 __func__); 8786 return -EIO; 8787 } 8788 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) { 8789 dprintk("NFS: %s: Server returned RDMA mode = true\n", 8790 __func__); 8791 return -EIO; 8792 } 8793 } 8794 8795 return status; 8796 } 8797 8798 struct rpc_bind_conn_calldata { 8799 struct nfs_client *clp; 8800 const struct cred *cred; 8801 }; 8802 8803 static int 8804 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt, 8805 struct rpc_xprt *xprt, 8806 void *calldata) 8807 { 8808 struct rpc_bind_conn_calldata *p = calldata; 8809 8810 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred); 8811 } 8812 8813 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, const struct cred *cred) 8814 { 8815 struct rpc_bind_conn_calldata data = { 8816 .clp = clp, 8817 .cred = cred, 8818 }; 8819 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient, 8820 nfs4_proc_bind_conn_to_session_callback, &data); 8821 } 8822 8823 /* 8824 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map 8825 * and operations we'd like to see to enable certain features in the allow map 8826 */ 8827 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = { 8828 .how = SP4_MACH_CRED, 8829 .enforce.u.words = { 8830 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 8831 1 << (OP_EXCHANGE_ID - 32) | 8832 1 << (OP_CREATE_SESSION - 32) | 8833 1 << (OP_DESTROY_SESSION - 32) | 8834 1 << (OP_DESTROY_CLIENTID - 32) 8835 }, 8836 .allow.u.words = { 8837 [0] = 1 << (OP_CLOSE) | 8838 1 << (OP_OPEN_DOWNGRADE) | 8839 1 << (OP_LOCKU) | 8840 1 << (OP_DELEGRETURN) | 8841 1 << (OP_COMMIT), 8842 [1] = 1 << (OP_SECINFO - 32) | 8843 1 << (OP_SECINFO_NO_NAME - 32) | 8844 1 << (OP_LAYOUTRETURN - 32) | 8845 1 << (OP_TEST_STATEID - 32) | 8846 1 << (OP_FREE_STATEID - 32) | 8847 1 << (OP_WRITE - 32) 8848 } 8849 }; 8850 8851 /* 8852 * Select the state protection mode for client `clp' given the server results 8853 * from exchange_id in `sp'. 8854 * 8855 * Returns 0 on success, negative errno otherwise. 8856 */ 8857 static int nfs4_sp4_select_mode(struct nfs_client *clp, 8858 struct nfs41_state_protection *sp) 8859 { 8860 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = { 8861 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 8862 1 << (OP_EXCHANGE_ID - 32) | 8863 1 << (OP_CREATE_SESSION - 32) | 8864 1 << (OP_DESTROY_SESSION - 32) | 8865 1 << (OP_DESTROY_CLIENTID - 32) 8866 }; 8867 unsigned long flags = 0; 8868 unsigned int i; 8869 int ret = 0; 8870 8871 if (sp->how == SP4_MACH_CRED) { 8872 /* Print state protect result */ 8873 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n"); 8874 for (i = 0; i <= LAST_NFS4_OP; i++) { 8875 if (test_bit(i, sp->enforce.u.longs)) 8876 dfprintk(MOUNT, " enforce op %d\n", i); 8877 if (test_bit(i, sp->allow.u.longs)) 8878 dfprintk(MOUNT, " allow op %d\n", i); 8879 } 8880 8881 /* make sure nothing is on enforce list that isn't supported */ 8882 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) { 8883 if (sp->enforce.u.words[i] & ~supported_enforce[i]) { 8884 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 8885 ret = -EINVAL; 8886 goto out; 8887 } 8888 } 8889 8890 /* 8891 * Minimal mode - state operations are allowed to use machine 8892 * credential. Note this already happens by default, so the 8893 * client doesn't have to do anything more than the negotiation. 8894 * 8895 * NOTE: we don't care if EXCHANGE_ID is in the list - 8896 * we're already using the machine cred for exchange_id 8897 * and will never use a different cred. 8898 */ 8899 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) && 8900 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) && 8901 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) && 8902 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) { 8903 dfprintk(MOUNT, "sp4_mach_cred:\n"); 8904 dfprintk(MOUNT, " minimal mode enabled\n"); 8905 __set_bit(NFS_SP4_MACH_CRED_MINIMAL, &flags); 8906 } else { 8907 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 8908 ret = -EINVAL; 8909 goto out; 8910 } 8911 8912 if (test_bit(OP_CLOSE, sp->allow.u.longs) && 8913 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) && 8914 test_bit(OP_DELEGRETURN, sp->allow.u.longs) && 8915 test_bit(OP_LOCKU, sp->allow.u.longs)) { 8916 dfprintk(MOUNT, " cleanup mode enabled\n"); 8917 __set_bit(NFS_SP4_MACH_CRED_CLEANUP, &flags); 8918 } 8919 8920 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) { 8921 dfprintk(MOUNT, " pnfs cleanup mode enabled\n"); 8922 __set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, &flags); 8923 } 8924 8925 if (test_bit(OP_SECINFO, sp->allow.u.longs) && 8926 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) { 8927 dfprintk(MOUNT, " secinfo mode enabled\n"); 8928 __set_bit(NFS_SP4_MACH_CRED_SECINFO, &flags); 8929 } 8930 8931 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) && 8932 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) { 8933 dfprintk(MOUNT, " stateid mode enabled\n"); 8934 __set_bit(NFS_SP4_MACH_CRED_STATEID, &flags); 8935 } 8936 8937 if (test_bit(OP_WRITE, sp->allow.u.longs)) { 8938 dfprintk(MOUNT, " write mode enabled\n"); 8939 __set_bit(NFS_SP4_MACH_CRED_WRITE, &flags); 8940 } 8941 8942 if (test_bit(OP_COMMIT, sp->allow.u.longs)) { 8943 dfprintk(MOUNT, " commit mode enabled\n"); 8944 __set_bit(NFS_SP4_MACH_CRED_COMMIT, &flags); 8945 } 8946 } 8947 out: 8948 clp->cl_sp4_flags = flags; 8949 return ret; 8950 } 8951 8952 struct nfs41_exchange_id_data { 8953 struct nfs41_exchange_id_res res; 8954 struct nfs41_exchange_id_args args; 8955 }; 8956 8957 static void nfs4_exchange_id_release(void *data) 8958 { 8959 struct nfs41_exchange_id_data *cdata = 8960 (struct nfs41_exchange_id_data *)data; 8961 8962 nfs_put_client(cdata->args.client); 8963 kfree(cdata->res.impl_id); 8964 kfree(cdata->res.server_scope); 8965 kfree(cdata->res.server_owner); 8966 kfree(cdata); 8967 } 8968 8969 static const struct rpc_call_ops nfs4_exchange_id_call_ops = { 8970 .rpc_release = nfs4_exchange_id_release, 8971 }; 8972 8973 /* 8974 * _nfs4_proc_exchange_id() 8975 * 8976 * Wrapper for EXCHANGE_ID operation. 8977 */ 8978 static struct rpc_task * 8979 nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred, 8980 u32 sp4_how, struct rpc_xprt *xprt) 8981 { 8982 struct rpc_message msg = { 8983 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 8984 .rpc_cred = cred, 8985 }; 8986 struct rpc_task_setup task_setup_data = { 8987 .rpc_client = clp->cl_rpcclient, 8988 .callback_ops = &nfs4_exchange_id_call_ops, 8989 .rpc_message = &msg, 8990 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN, 8991 }; 8992 struct nfs41_exchange_id_data *calldata; 8993 int status; 8994 8995 if (!refcount_inc_not_zero(&clp->cl_count)) 8996 return ERR_PTR(-EIO); 8997 8998 status = -ENOMEM; 8999 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 9000 if (!calldata) 9001 goto out; 9002 9003 nfs4_init_boot_verifier(clp, &calldata->args.verifier); 9004 9005 status = nfs4_init_uniform_client_string(clp); 9006 if (status) 9007 goto out_calldata; 9008 9009 calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 9010 GFP_NOFS); 9011 status = -ENOMEM; 9012 if (unlikely(calldata->res.server_owner == NULL)) 9013 goto out_calldata; 9014 9015 calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 9016 GFP_NOFS); 9017 if (unlikely(calldata->res.server_scope == NULL)) 9018 goto out_server_owner; 9019 9020 calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 9021 if (unlikely(calldata->res.impl_id == NULL)) 9022 goto out_server_scope; 9023 9024 switch (sp4_how) { 9025 case SP4_NONE: 9026 calldata->args.state_protect.how = SP4_NONE; 9027 break; 9028 9029 case SP4_MACH_CRED: 9030 calldata->args.state_protect = nfs4_sp4_mach_cred_request; 9031 break; 9032 9033 default: 9034 /* unsupported! */ 9035 WARN_ON_ONCE(1); 9036 status = -EINVAL; 9037 goto out_impl_id; 9038 } 9039 if (xprt) { 9040 task_setup_data.rpc_xprt = xprt; 9041 task_setup_data.flags |= RPC_TASK_SOFTCONN; 9042 memcpy(calldata->args.verifier.data, clp->cl_confirm.data, 9043 sizeof(calldata->args.verifier.data)); 9044 } 9045 calldata->args.client = clp; 9046 calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 9047 EXCHGID4_FLAG_BIND_PRINC_STATEID; 9048 #ifdef CONFIG_NFS_V4_1_MIGRATION 9049 calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR; 9050 #endif 9051 if (test_bit(NFS_CS_PNFS, &clp->cl_flags)) 9052 calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS; 9053 msg.rpc_argp = &calldata->args; 9054 msg.rpc_resp = &calldata->res; 9055 task_setup_data.callback_data = calldata; 9056 9057 return rpc_run_task(&task_setup_data); 9058 9059 out_impl_id: 9060 kfree(calldata->res.impl_id); 9061 out_server_scope: 9062 kfree(calldata->res.server_scope); 9063 out_server_owner: 9064 kfree(calldata->res.server_owner); 9065 out_calldata: 9066 kfree(calldata); 9067 out: 9068 nfs_put_client(clp); 9069 return ERR_PTR(status); 9070 } 9071 9072 /* 9073 * _nfs4_proc_exchange_id() 9074 * 9075 * Wrapper for EXCHANGE_ID operation. 9076 */ 9077 static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred, 9078 u32 sp4_how) 9079 { 9080 struct rpc_task *task; 9081 struct nfs41_exchange_id_args *argp; 9082 struct nfs41_exchange_id_res *resp; 9083 unsigned long now = jiffies; 9084 int status; 9085 9086 task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL); 9087 if (IS_ERR(task)) 9088 return PTR_ERR(task); 9089 9090 argp = task->tk_msg.rpc_argp; 9091 resp = task->tk_msg.rpc_resp; 9092 status = task->tk_status; 9093 if (status != 0) 9094 goto out; 9095 9096 status = nfs4_check_cl_exchange_flags(resp->flags, 9097 clp->cl_mvops->minor_version); 9098 if (status != 0) 9099 goto out; 9100 9101 status = nfs4_sp4_select_mode(clp, &resp->state_protect); 9102 if (status != 0) 9103 goto out; 9104 9105 do_renew_lease(clp, now); 9106 9107 clp->cl_clientid = resp->clientid; 9108 clp->cl_exchange_flags = resp->flags; 9109 clp->cl_seqid = resp->seqid; 9110 /* Client ID is not confirmed */ 9111 if (!(resp->flags & EXCHGID4_FLAG_CONFIRMED_R)) 9112 clear_bit(NFS4_SESSION_ESTABLISHED, 9113 &clp->cl_session->session_state); 9114 9115 if (clp->cl_serverscope != NULL && 9116 !nfs41_same_server_scope(clp->cl_serverscope, 9117 resp->server_scope)) { 9118 dprintk("%s: server_scope mismatch detected\n", 9119 __func__); 9120 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 9121 } 9122 9123 swap(clp->cl_serverowner, resp->server_owner); 9124 swap(clp->cl_serverscope, resp->server_scope); 9125 swap(clp->cl_implid, resp->impl_id); 9126 9127 /* Save the EXCHANGE_ID verifier session trunk tests */ 9128 memcpy(clp->cl_confirm.data, argp->verifier.data, 9129 sizeof(clp->cl_confirm.data)); 9130 out: 9131 trace_nfs4_exchange_id(clp, status); 9132 rpc_put_task(task); 9133 return status; 9134 } 9135 9136 /* 9137 * nfs4_proc_exchange_id() 9138 * 9139 * Returns zero, a negative errno, or a negative NFS4ERR status code. 9140 * 9141 * Since the clientid has expired, all compounds using sessions 9142 * associated with the stale clientid will be returning 9143 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 9144 * be in some phase of session reset. 9145 * 9146 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used. 9147 */ 9148 int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred) 9149 { 9150 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor; 9151 int status; 9152 9153 /* try SP4_MACH_CRED if krb5i/p */ 9154 if (authflavor == RPC_AUTH_GSS_KRB5I || 9155 authflavor == RPC_AUTH_GSS_KRB5P) { 9156 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED); 9157 if (!status) 9158 return 0; 9159 } 9160 9161 /* try SP4_NONE */ 9162 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE); 9163 } 9164 9165 /** 9166 * nfs4_test_session_trunk 9167 * 9168 * This is an add_xprt_test() test function called from 9169 * rpc_clnt_setup_test_and_add_xprt. 9170 * 9171 * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt 9172 * and is dereferrenced in nfs4_exchange_id_release 9173 * 9174 * Upon success, add the new transport to the rpc_clnt 9175 * 9176 * @clnt: struct rpc_clnt to get new transport 9177 * @xprt: the rpc_xprt to test 9178 * @data: call data for _nfs4_proc_exchange_id. 9179 */ 9180 void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt, 9181 void *data) 9182 { 9183 struct nfs4_add_xprt_data *adata = data; 9184 struct rpc_task *task; 9185 int status; 9186 9187 u32 sp4_how; 9188 9189 dprintk("--> %s try %s\n", __func__, 9190 xprt->address_strings[RPC_DISPLAY_ADDR]); 9191 9192 sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED); 9193 9194 try_again: 9195 /* Test connection for session trunking. Async exchange_id call */ 9196 task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt); 9197 if (IS_ERR(task)) 9198 return; 9199 9200 status = task->tk_status; 9201 if (status == 0) { 9202 status = nfs4_detect_session_trunking(adata->clp, 9203 task->tk_msg.rpc_resp, xprt); 9204 trace_nfs4_trunked_exchange_id(adata->clp, 9205 xprt->address_strings[RPC_DISPLAY_ADDR], status); 9206 } 9207 if (status == 0) 9208 rpc_clnt_xprt_switch_add_xprt(clnt, xprt); 9209 else if (status != -NFS4ERR_DELAY && rpc_clnt_xprt_switch_has_addr(clnt, 9210 (struct sockaddr *)&xprt->addr)) 9211 rpc_clnt_xprt_switch_remove_xprt(clnt, xprt); 9212 9213 rpc_put_task(task); 9214 if (status == -NFS4ERR_DELAY) { 9215 ssleep(1); 9216 goto try_again; 9217 } 9218 } 9219 EXPORT_SYMBOL_GPL(nfs4_test_session_trunk); 9220 9221 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 9222 const struct cred *cred) 9223 { 9224 struct rpc_message msg = { 9225 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 9226 .rpc_argp = clp, 9227 .rpc_cred = cred, 9228 }; 9229 int status; 9230 9231 status = rpc_call_sync(clp->cl_rpcclient, &msg, 9232 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9233 trace_nfs4_destroy_clientid(clp, status); 9234 if (status) 9235 dprintk("NFS: Got error %d from the server %s on " 9236 "DESTROY_CLIENTID.", status, clp->cl_hostname); 9237 return status; 9238 } 9239 9240 static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 9241 const struct cred *cred) 9242 { 9243 unsigned int loop; 9244 int ret; 9245 9246 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 9247 ret = _nfs4_proc_destroy_clientid(clp, cred); 9248 switch (ret) { 9249 case -NFS4ERR_DELAY: 9250 case -NFS4ERR_CLIENTID_BUSY: 9251 ssleep(1); 9252 break; 9253 default: 9254 return ret; 9255 } 9256 } 9257 return 0; 9258 } 9259 9260 int nfs4_destroy_clientid(struct nfs_client *clp) 9261 { 9262 const struct cred *cred; 9263 int ret = 0; 9264 9265 if (clp->cl_mvops->minor_version < 1) 9266 goto out; 9267 if (clp->cl_exchange_flags == 0) 9268 goto out; 9269 if (clp->cl_preserve_clid) 9270 goto out; 9271 cred = nfs4_get_clid_cred(clp); 9272 ret = nfs4_proc_destroy_clientid(clp, cred); 9273 put_cred(cred); 9274 switch (ret) { 9275 case 0: 9276 case -NFS4ERR_STALE_CLIENTID: 9277 clp->cl_exchange_flags = 0; 9278 } 9279 out: 9280 return ret; 9281 } 9282 9283 #endif /* CONFIG_NFS_V4_1 */ 9284 9285 struct nfs4_get_lease_time_data { 9286 struct nfs4_get_lease_time_args *args; 9287 struct nfs4_get_lease_time_res *res; 9288 struct nfs_client *clp; 9289 }; 9290 9291 static void nfs4_get_lease_time_prepare(struct rpc_task *task, 9292 void *calldata) 9293 { 9294 struct nfs4_get_lease_time_data *data = 9295 (struct nfs4_get_lease_time_data *)calldata; 9296 9297 /* just setup sequence, do not trigger session recovery 9298 since we're invoked within one */ 9299 nfs4_setup_sequence(data->clp, 9300 &data->args->la_seq_args, 9301 &data->res->lr_seq_res, 9302 task); 9303 } 9304 9305 /* 9306 * Called from nfs4_state_manager thread for session setup, so don't recover 9307 * from sequence operation or clientid errors. 9308 */ 9309 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 9310 { 9311 struct nfs4_get_lease_time_data *data = 9312 (struct nfs4_get_lease_time_data *)calldata; 9313 9314 if (!nfs4_sequence_done(task, &data->res->lr_seq_res)) 9315 return; 9316 switch (task->tk_status) { 9317 case -NFS4ERR_DELAY: 9318 case -NFS4ERR_GRACE: 9319 rpc_delay(task, NFS4_POLL_RETRY_MIN); 9320 task->tk_status = 0; 9321 fallthrough; 9322 case -NFS4ERR_RETRY_UNCACHED_REP: 9323 rpc_restart_call_prepare(task); 9324 return; 9325 } 9326 } 9327 9328 static const struct rpc_call_ops nfs4_get_lease_time_ops = { 9329 .rpc_call_prepare = nfs4_get_lease_time_prepare, 9330 .rpc_call_done = nfs4_get_lease_time_done, 9331 }; 9332 9333 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 9334 { 9335 struct nfs4_get_lease_time_args args; 9336 struct nfs4_get_lease_time_res res = { 9337 .lr_fsinfo = fsinfo, 9338 }; 9339 struct nfs4_get_lease_time_data data = { 9340 .args = &args, 9341 .res = &res, 9342 .clp = clp, 9343 }; 9344 struct rpc_message msg = { 9345 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 9346 .rpc_argp = &args, 9347 .rpc_resp = &res, 9348 }; 9349 struct rpc_task_setup task_setup = { 9350 .rpc_client = clp->cl_rpcclient, 9351 .rpc_message = &msg, 9352 .callback_ops = &nfs4_get_lease_time_ops, 9353 .callback_data = &data, 9354 .flags = RPC_TASK_TIMEOUT, 9355 }; 9356 9357 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0, 1); 9358 return nfs4_call_sync_custom(&task_setup); 9359 } 9360 9361 #ifdef CONFIG_NFS_V4_1 9362 9363 /* 9364 * Initialize the values to be used by the client in CREATE_SESSION 9365 * If nfs4_init_session set the fore channel request and response sizes, 9366 * use them. 9367 * 9368 * Set the back channel max_resp_sz_cached to zero to force the client to 9369 * always set csa_cachethis to FALSE because the current implementation 9370 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 9371 */ 9372 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args, 9373 struct rpc_clnt *clnt) 9374 { 9375 unsigned int max_rqst_sz, max_resp_sz; 9376 unsigned int max_bc_payload = rpc_max_bc_payload(clnt); 9377 unsigned int max_bc_slots = rpc_num_bc_slots(clnt); 9378 9379 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead; 9380 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead; 9381 9382 /* Fore channel attributes */ 9383 args->fc_attrs.max_rqst_sz = max_rqst_sz; 9384 args->fc_attrs.max_resp_sz = max_resp_sz; 9385 args->fc_attrs.max_ops = NFS4_MAX_OPS; 9386 args->fc_attrs.max_reqs = max_session_slots; 9387 9388 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 9389 "max_ops=%u max_reqs=%u\n", 9390 __func__, 9391 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 9392 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 9393 9394 /* Back channel attributes */ 9395 args->bc_attrs.max_rqst_sz = max_bc_payload; 9396 args->bc_attrs.max_resp_sz = max_bc_payload; 9397 args->bc_attrs.max_resp_sz_cached = 0; 9398 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 9399 args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1); 9400 if (args->bc_attrs.max_reqs > max_bc_slots) 9401 args->bc_attrs.max_reqs = max_bc_slots; 9402 9403 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 9404 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 9405 __func__, 9406 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 9407 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 9408 args->bc_attrs.max_reqs); 9409 } 9410 9411 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, 9412 struct nfs41_create_session_res *res) 9413 { 9414 struct nfs4_channel_attrs *sent = &args->fc_attrs; 9415 struct nfs4_channel_attrs *rcvd = &res->fc_attrs; 9416 9417 if (rcvd->max_resp_sz > sent->max_resp_sz) 9418 return -EINVAL; 9419 /* 9420 * Our requested max_ops is the minimum we need; we're not 9421 * prepared to break up compounds into smaller pieces than that. 9422 * So, no point even trying to continue if the server won't 9423 * cooperate: 9424 */ 9425 if (rcvd->max_ops < sent->max_ops) 9426 return -EINVAL; 9427 if (rcvd->max_reqs == 0) 9428 return -EINVAL; 9429 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 9430 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 9431 return 0; 9432 } 9433 9434 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, 9435 struct nfs41_create_session_res *res) 9436 { 9437 struct nfs4_channel_attrs *sent = &args->bc_attrs; 9438 struct nfs4_channel_attrs *rcvd = &res->bc_attrs; 9439 9440 if (!(res->flags & SESSION4_BACK_CHAN)) 9441 goto out; 9442 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 9443 return -EINVAL; 9444 if (rcvd->max_resp_sz < sent->max_resp_sz) 9445 return -EINVAL; 9446 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 9447 return -EINVAL; 9448 if (rcvd->max_ops > sent->max_ops) 9449 return -EINVAL; 9450 if (rcvd->max_reqs > sent->max_reqs) 9451 return -EINVAL; 9452 out: 9453 return 0; 9454 } 9455 9456 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 9457 struct nfs41_create_session_res *res) 9458 { 9459 int ret; 9460 9461 ret = nfs4_verify_fore_channel_attrs(args, res); 9462 if (ret) 9463 return ret; 9464 return nfs4_verify_back_channel_attrs(args, res); 9465 } 9466 9467 static void nfs4_update_session(struct nfs4_session *session, 9468 struct nfs41_create_session_res *res) 9469 { 9470 nfs4_copy_sessionid(&session->sess_id, &res->sessionid); 9471 /* Mark client id and session as being confirmed */ 9472 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 9473 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state); 9474 session->flags = res->flags; 9475 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); 9476 if (res->flags & SESSION4_BACK_CHAN) 9477 memcpy(&session->bc_attrs, &res->bc_attrs, 9478 sizeof(session->bc_attrs)); 9479 } 9480 9481 static int _nfs4_proc_create_session(struct nfs_client *clp, 9482 const struct cred *cred) 9483 { 9484 struct nfs4_session *session = clp->cl_session; 9485 struct nfs41_create_session_args args = { 9486 .client = clp, 9487 .clientid = clp->cl_clientid, 9488 .seqid = clp->cl_seqid, 9489 .cb_program = NFS4_CALLBACK, 9490 }; 9491 struct nfs41_create_session_res res; 9492 9493 struct rpc_message msg = { 9494 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 9495 .rpc_argp = &args, 9496 .rpc_resp = &res, 9497 .rpc_cred = cred, 9498 }; 9499 int status; 9500 9501 nfs4_init_channel_attrs(&args, clp->cl_rpcclient); 9502 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 9503 9504 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 9505 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9506 trace_nfs4_create_session(clp, status); 9507 9508 switch (status) { 9509 case -NFS4ERR_STALE_CLIENTID: 9510 case -NFS4ERR_DELAY: 9511 case -ETIMEDOUT: 9512 case -EACCES: 9513 case -EAGAIN: 9514 goto out; 9515 } 9516 9517 clp->cl_seqid++; 9518 if (!status) { 9519 /* Verify the session's negotiated channel_attrs values */ 9520 status = nfs4_verify_channel_attrs(&args, &res); 9521 /* Increment the clientid slot sequence id */ 9522 if (status) 9523 goto out; 9524 nfs4_update_session(session, &res); 9525 } 9526 out: 9527 return status; 9528 } 9529 9530 /* 9531 * Issues a CREATE_SESSION operation to the server. 9532 * It is the responsibility of the caller to verify the session is 9533 * expired before calling this routine. 9534 */ 9535 int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred) 9536 { 9537 int status; 9538 unsigned *ptr; 9539 struct nfs4_session *session = clp->cl_session; 9540 struct nfs4_add_xprt_data xprtdata = { 9541 .clp = clp, 9542 }; 9543 struct rpc_add_xprt_test rpcdata = { 9544 .add_xprt_test = clp->cl_mvops->session_trunk, 9545 .data = &xprtdata, 9546 }; 9547 9548 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 9549 9550 status = _nfs4_proc_create_session(clp, cred); 9551 if (status) 9552 goto out; 9553 9554 /* Init or reset the session slot tables */ 9555 status = nfs4_setup_session_slot_tables(session); 9556 dprintk("slot table setup returned %d\n", status); 9557 if (status) 9558 goto out; 9559 9560 ptr = (unsigned *)&session->sess_id.data[0]; 9561 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 9562 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 9563 rpc_clnt_probe_trunked_xprts(clp->cl_rpcclient, &rpcdata); 9564 out: 9565 return status; 9566 } 9567 9568 /* 9569 * Issue the over-the-wire RPC DESTROY_SESSION. 9570 * The caller must serialize access to this routine. 9571 */ 9572 int nfs4_proc_destroy_session(struct nfs4_session *session, 9573 const struct cred *cred) 9574 { 9575 struct rpc_message msg = { 9576 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 9577 .rpc_argp = session, 9578 .rpc_cred = cred, 9579 }; 9580 int status = 0; 9581 9582 /* session is still being setup */ 9583 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state)) 9584 return 0; 9585 9586 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 9587 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9588 trace_nfs4_destroy_session(session->clp, status); 9589 9590 if (status) 9591 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 9592 "Session has been destroyed regardless...\n", status); 9593 rpc_clnt_manage_trunked_xprts(session->clp->cl_rpcclient); 9594 return status; 9595 } 9596 9597 /* 9598 * Renew the cl_session lease. 9599 */ 9600 struct nfs4_sequence_data { 9601 struct nfs_client *clp; 9602 struct nfs4_sequence_args args; 9603 struct nfs4_sequence_res res; 9604 }; 9605 9606 static void nfs41_sequence_release(void *data) 9607 { 9608 struct nfs4_sequence_data *calldata = data; 9609 struct nfs_client *clp = calldata->clp; 9610 9611 if (refcount_read(&clp->cl_count) > 1) 9612 nfs4_schedule_state_renewal(clp); 9613 nfs_put_client(clp); 9614 kfree(calldata); 9615 } 9616 9617 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 9618 { 9619 switch(task->tk_status) { 9620 case -NFS4ERR_DELAY: 9621 rpc_delay(task, NFS4_POLL_RETRY_MAX); 9622 return -EAGAIN; 9623 default: 9624 nfs4_schedule_lease_recovery(clp); 9625 } 9626 return 0; 9627 } 9628 9629 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 9630 { 9631 struct nfs4_sequence_data *calldata = data; 9632 struct nfs_client *clp = calldata->clp; 9633 9634 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 9635 return; 9636 9637 trace_nfs4_sequence(clp, task->tk_status); 9638 if (task->tk_status < 0 && clp->cl_cons_state >= 0) { 9639 dprintk("%s ERROR %d\n", __func__, task->tk_status); 9640 if (refcount_read(&clp->cl_count) == 1) 9641 return; 9642 9643 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 9644 rpc_restart_call_prepare(task); 9645 return; 9646 } 9647 } 9648 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 9649 } 9650 9651 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 9652 { 9653 struct nfs4_sequence_data *calldata = data; 9654 struct nfs_client *clp = calldata->clp; 9655 struct nfs4_sequence_args *args; 9656 struct nfs4_sequence_res *res; 9657 9658 args = task->tk_msg.rpc_argp; 9659 res = task->tk_msg.rpc_resp; 9660 9661 nfs4_setup_sequence(clp, args, res, task); 9662 } 9663 9664 static const struct rpc_call_ops nfs41_sequence_ops = { 9665 .rpc_call_done = nfs41_sequence_call_done, 9666 .rpc_call_prepare = nfs41_sequence_prepare, 9667 .rpc_release = nfs41_sequence_release, 9668 }; 9669 9670 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 9671 const struct cred *cred, 9672 struct nfs4_slot *slot, 9673 bool is_privileged) 9674 { 9675 struct nfs4_sequence_data *calldata; 9676 struct rpc_message msg = { 9677 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 9678 .rpc_cred = cred, 9679 }; 9680 struct rpc_task_setup task_setup_data = { 9681 .rpc_client = clp->cl_rpcclient, 9682 .rpc_message = &msg, 9683 .callback_ops = &nfs41_sequence_ops, 9684 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE, 9685 }; 9686 struct rpc_task *ret; 9687 9688 ret = ERR_PTR(-EIO); 9689 if (!refcount_inc_not_zero(&clp->cl_count)) 9690 goto out_err; 9691 9692 ret = ERR_PTR(-ENOMEM); 9693 calldata = kzalloc(sizeof(*calldata), GFP_KERNEL); 9694 if (calldata == NULL) 9695 goto out_put_clp; 9696 nfs4_init_sequence(&calldata->args, &calldata->res, 0, is_privileged); 9697 nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot); 9698 msg.rpc_argp = &calldata->args; 9699 msg.rpc_resp = &calldata->res; 9700 calldata->clp = clp; 9701 task_setup_data.callback_data = calldata; 9702 9703 ret = rpc_run_task(&task_setup_data); 9704 if (IS_ERR(ret)) 9705 goto out_err; 9706 return ret; 9707 out_put_clp: 9708 nfs_put_client(clp); 9709 out_err: 9710 nfs41_release_slot(slot); 9711 return ret; 9712 } 9713 9714 static int nfs41_proc_async_sequence(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags) 9715 { 9716 struct rpc_task *task; 9717 int ret = 0; 9718 9719 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 9720 return -EAGAIN; 9721 task = _nfs41_proc_sequence(clp, cred, NULL, false); 9722 if (IS_ERR(task)) 9723 ret = PTR_ERR(task); 9724 else 9725 rpc_put_task_async(task); 9726 dprintk("<-- %s status=%d\n", __func__, ret); 9727 return ret; 9728 } 9729 9730 static int nfs4_proc_sequence(struct nfs_client *clp, const struct cred *cred) 9731 { 9732 struct rpc_task *task; 9733 int ret; 9734 9735 task = _nfs41_proc_sequence(clp, cred, NULL, true); 9736 if (IS_ERR(task)) { 9737 ret = PTR_ERR(task); 9738 goto out; 9739 } 9740 ret = rpc_wait_for_completion_task(task); 9741 if (!ret) 9742 ret = task->tk_status; 9743 rpc_put_task(task); 9744 out: 9745 dprintk("<-- %s status=%d\n", __func__, ret); 9746 return ret; 9747 } 9748 9749 struct nfs4_reclaim_complete_data { 9750 struct nfs_client *clp; 9751 struct nfs41_reclaim_complete_args arg; 9752 struct nfs41_reclaim_complete_res res; 9753 }; 9754 9755 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 9756 { 9757 struct nfs4_reclaim_complete_data *calldata = data; 9758 9759 nfs4_setup_sequence(calldata->clp, 9760 &calldata->arg.seq_args, 9761 &calldata->res.seq_res, 9762 task); 9763 } 9764 9765 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 9766 { 9767 switch(task->tk_status) { 9768 case 0: 9769 wake_up_all(&clp->cl_lock_waitq); 9770 fallthrough; 9771 case -NFS4ERR_COMPLETE_ALREADY: 9772 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 9773 break; 9774 case -NFS4ERR_DELAY: 9775 rpc_delay(task, NFS4_POLL_RETRY_MAX); 9776 fallthrough; 9777 case -NFS4ERR_RETRY_UNCACHED_REP: 9778 case -EACCES: 9779 dprintk("%s: failed to reclaim complete error %d for server %s, retrying\n", 9780 __func__, task->tk_status, clp->cl_hostname); 9781 return -EAGAIN; 9782 case -NFS4ERR_BADSESSION: 9783 case -NFS4ERR_DEADSESSION: 9784 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 9785 break; 9786 default: 9787 nfs4_schedule_lease_recovery(clp); 9788 } 9789 return 0; 9790 } 9791 9792 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 9793 { 9794 struct nfs4_reclaim_complete_data *calldata = data; 9795 struct nfs_client *clp = calldata->clp; 9796 struct nfs4_sequence_res *res = &calldata->res.seq_res; 9797 9798 if (!nfs41_sequence_done(task, res)) 9799 return; 9800 9801 trace_nfs4_reclaim_complete(clp, task->tk_status); 9802 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 9803 rpc_restart_call_prepare(task); 9804 return; 9805 } 9806 } 9807 9808 static void nfs4_free_reclaim_complete_data(void *data) 9809 { 9810 struct nfs4_reclaim_complete_data *calldata = data; 9811 9812 kfree(calldata); 9813 } 9814 9815 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 9816 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 9817 .rpc_call_done = nfs4_reclaim_complete_done, 9818 .rpc_release = nfs4_free_reclaim_complete_data, 9819 }; 9820 9821 /* 9822 * Issue a global reclaim complete. 9823 */ 9824 static int nfs41_proc_reclaim_complete(struct nfs_client *clp, 9825 const struct cred *cred) 9826 { 9827 struct nfs4_reclaim_complete_data *calldata; 9828 struct rpc_message msg = { 9829 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 9830 .rpc_cred = cred, 9831 }; 9832 struct rpc_task_setup task_setup_data = { 9833 .rpc_client = clp->cl_rpcclient, 9834 .rpc_message = &msg, 9835 .callback_ops = &nfs4_reclaim_complete_call_ops, 9836 .flags = RPC_TASK_NO_ROUND_ROBIN, 9837 }; 9838 int status = -ENOMEM; 9839 9840 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 9841 if (calldata == NULL) 9842 goto out; 9843 calldata->clp = clp; 9844 calldata->arg.one_fs = 0; 9845 9846 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0, 1); 9847 msg.rpc_argp = &calldata->arg; 9848 msg.rpc_resp = &calldata->res; 9849 task_setup_data.callback_data = calldata; 9850 status = nfs4_call_sync_custom(&task_setup_data); 9851 out: 9852 dprintk("<-- %s status=%d\n", __func__, status); 9853 return status; 9854 } 9855 9856 static void 9857 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 9858 { 9859 struct nfs4_layoutget *lgp = calldata; 9860 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 9861 9862 nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args, 9863 &lgp->res.seq_res, task); 9864 } 9865 9866 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 9867 { 9868 struct nfs4_layoutget *lgp = calldata; 9869 9870 nfs41_sequence_process(task, &lgp->res.seq_res); 9871 } 9872 9873 static int 9874 nfs4_layoutget_handle_exception(struct rpc_task *task, 9875 struct nfs4_layoutget *lgp, struct nfs4_exception *exception) 9876 { 9877 struct inode *inode = lgp->args.inode; 9878 struct nfs_server *server = NFS_SERVER(inode); 9879 struct pnfs_layout_hdr *lo = lgp->lo; 9880 int nfs4err = task->tk_status; 9881 int err, status = 0; 9882 LIST_HEAD(head); 9883 9884 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); 9885 9886 nfs4_sequence_free_slot(&lgp->res.seq_res); 9887 9888 exception->state = NULL; 9889 exception->stateid = NULL; 9890 9891 switch (nfs4err) { 9892 case 0: 9893 goto out; 9894 9895 /* 9896 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs 9897 * on the file. set tk_status to -ENODATA to tell upper layer to 9898 * retry go inband. 9899 */ 9900 case -NFS4ERR_LAYOUTUNAVAILABLE: 9901 status = -ENODATA; 9902 goto out; 9903 /* 9904 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of 9905 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3). 9906 */ 9907 case -NFS4ERR_BADLAYOUT: 9908 status = -EOVERFLOW; 9909 goto out; 9910 /* 9911 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client 9912 * (or clients) writing to the same RAID stripe except when 9913 * the minlength argument is 0 (see RFC5661 section 18.43.3). 9914 * 9915 * Treat it like we would RECALLCONFLICT -- we retry for a little 9916 * while, and then eventually give up. 9917 */ 9918 case -NFS4ERR_LAYOUTTRYLATER: 9919 if (lgp->args.minlength == 0) { 9920 status = -EOVERFLOW; 9921 goto out; 9922 } 9923 status = -EBUSY; 9924 break; 9925 case -NFS4ERR_RECALLCONFLICT: 9926 case -NFS4ERR_RETURNCONFLICT: 9927 status = -ERECALLCONFLICT; 9928 break; 9929 case -NFS4ERR_DELEG_REVOKED: 9930 case -NFS4ERR_ADMIN_REVOKED: 9931 case -NFS4ERR_EXPIRED: 9932 case -NFS4ERR_BAD_STATEID: 9933 exception->timeout = 0; 9934 spin_lock(&inode->i_lock); 9935 /* If the open stateid was bad, then recover it. */ 9936 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) || 9937 !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) { 9938 spin_unlock(&inode->i_lock); 9939 exception->state = lgp->args.ctx->state; 9940 exception->stateid = &lgp->args.stateid; 9941 break; 9942 } 9943 9944 /* 9945 * Mark the bad layout state as invalid, then retry 9946 */ 9947 pnfs_mark_layout_stateid_invalid(lo, &head); 9948 spin_unlock(&inode->i_lock); 9949 nfs_commit_inode(inode, 0); 9950 pnfs_free_lseg_list(&head); 9951 status = -EAGAIN; 9952 goto out; 9953 } 9954 9955 err = nfs4_handle_exception(server, nfs4err, exception); 9956 if (!status) { 9957 if (exception->retry) 9958 status = -EAGAIN; 9959 else 9960 status = err; 9961 } 9962 out: 9963 return status; 9964 } 9965 9966 size_t max_response_pages(struct nfs_server *server) 9967 { 9968 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 9969 return nfs_page_array_len(0, max_resp_sz); 9970 } 9971 9972 static void nfs4_layoutget_release(void *calldata) 9973 { 9974 struct nfs4_layoutget *lgp = calldata; 9975 9976 nfs4_sequence_free_slot(&lgp->res.seq_res); 9977 pnfs_layoutget_free(lgp); 9978 } 9979 9980 static const struct rpc_call_ops nfs4_layoutget_call_ops = { 9981 .rpc_call_prepare = nfs4_layoutget_prepare, 9982 .rpc_call_done = nfs4_layoutget_done, 9983 .rpc_release = nfs4_layoutget_release, 9984 }; 9985 9986 struct pnfs_layout_segment * 9987 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, 9988 struct nfs4_exception *exception) 9989 { 9990 struct inode *inode = lgp->args.inode; 9991 struct nfs_server *server = NFS_SERVER(inode); 9992 struct rpc_task *task; 9993 struct rpc_message msg = { 9994 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 9995 .rpc_argp = &lgp->args, 9996 .rpc_resp = &lgp->res, 9997 .rpc_cred = lgp->cred, 9998 }; 9999 struct rpc_task_setup task_setup_data = { 10000 .rpc_client = server->client, 10001 .rpc_message = &msg, 10002 .callback_ops = &nfs4_layoutget_call_ops, 10003 .callback_data = lgp, 10004 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF | 10005 RPC_TASK_MOVEABLE, 10006 }; 10007 struct pnfs_layout_segment *lseg = NULL; 10008 int status = 0; 10009 10010 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0); 10011 exception->retry = 0; 10012 10013 task = rpc_run_task(&task_setup_data); 10014 if (IS_ERR(task)) 10015 return ERR_CAST(task); 10016 10017 status = rpc_wait_for_completion_task(task); 10018 if (status != 0) 10019 goto out; 10020 10021 if (task->tk_status < 0) { 10022 exception->retry = 1; 10023 status = nfs4_layoutget_handle_exception(task, lgp, exception); 10024 } else if (lgp->res.layoutp->len == 0) { 10025 exception->retry = 1; 10026 status = -EAGAIN; 10027 nfs4_update_delay(&exception->timeout); 10028 } else 10029 lseg = pnfs_layout_process(lgp); 10030 out: 10031 trace_nfs4_layoutget(lgp->args.ctx, 10032 &lgp->args.range, 10033 &lgp->res.range, 10034 &lgp->res.stateid, 10035 status); 10036 10037 rpc_put_task(task); 10038 dprintk("<-- %s status=%d\n", __func__, status); 10039 if (status) 10040 return ERR_PTR(status); 10041 return lseg; 10042 } 10043 10044 static void 10045 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 10046 { 10047 struct nfs4_layoutreturn *lrp = calldata; 10048 10049 nfs4_setup_sequence(lrp->clp, 10050 &lrp->args.seq_args, 10051 &lrp->res.seq_res, 10052 task); 10053 if (!pnfs_layout_is_valid(lrp->args.layout)) 10054 rpc_exit(task, 0); 10055 } 10056 10057 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 10058 { 10059 struct nfs4_layoutreturn *lrp = calldata; 10060 struct nfs_server *server; 10061 10062 if (!nfs41_sequence_process(task, &lrp->res.seq_res)) 10063 return; 10064 10065 if (task->tk_rpc_status == -ETIMEDOUT) { 10066 lrp->rpc_status = -EAGAIN; 10067 lrp->res.lrs_present = 0; 10068 return; 10069 } 10070 /* 10071 * Was there an RPC level error? Assume the call succeeded, 10072 * and that we need to release the layout 10073 */ 10074 if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) { 10075 lrp->res.lrs_present = 0; 10076 return; 10077 } 10078 10079 server = NFS_SERVER(lrp->args.inode); 10080 switch (task->tk_status) { 10081 case -NFS4ERR_OLD_STATEID: 10082 if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid, 10083 &lrp->args.range, 10084 lrp->args.inode)) 10085 goto out_restart; 10086 fallthrough; 10087 default: 10088 task->tk_status = 0; 10089 lrp->res.lrs_present = 0; 10090 fallthrough; 10091 case 0: 10092 break; 10093 case -NFS4ERR_BADSESSION: 10094 case -NFS4ERR_DEADSESSION: 10095 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 10096 nfs4_schedule_session_recovery(server->nfs_client->cl_session, 10097 task->tk_status); 10098 lrp->res.lrs_present = 0; 10099 lrp->rpc_status = -EAGAIN; 10100 task->tk_status = 0; 10101 break; 10102 case -NFS4ERR_DELAY: 10103 if (nfs4_async_handle_error(task, server, NULL, NULL) == 10104 -EAGAIN) 10105 goto out_restart; 10106 lrp->res.lrs_present = 0; 10107 break; 10108 } 10109 return; 10110 out_restart: 10111 task->tk_status = 0; 10112 nfs4_sequence_free_slot(&lrp->res.seq_res); 10113 rpc_restart_call_prepare(task); 10114 } 10115 10116 static void nfs4_layoutreturn_release(void *calldata) 10117 { 10118 struct nfs4_layoutreturn *lrp = calldata; 10119 struct pnfs_layout_hdr *lo = lrp->args.layout; 10120 10121 if (lrp->rpc_status == 0 || !lrp->inode) 10122 pnfs_layoutreturn_free_lsegs( 10123 lo, &lrp->args.stateid, &lrp->args.range, 10124 lrp->res.lrs_present ? &lrp->res.stateid : NULL); 10125 else 10126 pnfs_layoutreturn_retry_later(lo, &lrp->args.stateid, 10127 &lrp->args.range); 10128 nfs4_sequence_free_slot(&lrp->res.seq_res); 10129 if (lrp->ld_private.ops && lrp->ld_private.ops->free) 10130 lrp->ld_private.ops->free(&lrp->ld_private); 10131 pnfs_put_layout_hdr(lrp->args.layout); 10132 nfs_iput_and_deactive(lrp->inode); 10133 put_cred(lrp->cred); 10134 kfree(calldata); 10135 } 10136 10137 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 10138 .rpc_call_prepare = nfs4_layoutreturn_prepare, 10139 .rpc_call_done = nfs4_layoutreturn_done, 10140 .rpc_release = nfs4_layoutreturn_release, 10141 }; 10142 10143 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, unsigned int flags) 10144 { 10145 struct rpc_task *task; 10146 struct rpc_message msg = { 10147 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 10148 .rpc_argp = &lrp->args, 10149 .rpc_resp = &lrp->res, 10150 .rpc_cred = lrp->cred, 10151 }; 10152 struct rpc_task_setup task_setup_data = { 10153 .rpc_client = NFS_SERVER(lrp->args.inode)->client, 10154 .rpc_message = &msg, 10155 .callback_ops = &nfs4_layoutreturn_call_ops, 10156 .callback_data = lrp, 10157 .flags = RPC_TASK_MOVEABLE, 10158 }; 10159 int status = 0; 10160 10161 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client, 10162 NFS_SP4_MACH_CRED_PNFS_CLEANUP, 10163 &task_setup_data.rpc_client, &msg); 10164 10165 lrp->inode = nfs_igrab_and_active(lrp->args.inode); 10166 if (flags & PNFS_FL_LAYOUTRETURN_ASYNC) { 10167 if (!lrp->inode) { 10168 nfs4_layoutreturn_release(lrp); 10169 return -EAGAIN; 10170 } 10171 task_setup_data.flags |= RPC_TASK_ASYNC; 10172 } 10173 if (!lrp->inode) 10174 flags |= PNFS_FL_LAYOUTRETURN_PRIVILEGED; 10175 if (flags & PNFS_FL_LAYOUTRETURN_PRIVILEGED) 10176 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 10177 1); 10178 else 10179 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 10180 0); 10181 task = rpc_run_task(&task_setup_data); 10182 if (IS_ERR(task)) 10183 return PTR_ERR(task); 10184 if (!(flags & PNFS_FL_LAYOUTRETURN_ASYNC)) 10185 status = task->tk_status; 10186 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status); 10187 dprintk("<-- %s status=%d\n", __func__, status); 10188 rpc_put_task(task); 10189 return status; 10190 } 10191 10192 static int 10193 _nfs4_proc_getdeviceinfo(struct nfs_server *server, 10194 struct pnfs_device *pdev, 10195 const struct cred *cred) 10196 { 10197 struct nfs4_getdeviceinfo_args args = { 10198 .pdev = pdev, 10199 .notify_types = NOTIFY_DEVICEID4_CHANGE | 10200 NOTIFY_DEVICEID4_DELETE, 10201 }; 10202 struct nfs4_getdeviceinfo_res res = { 10203 .pdev = pdev, 10204 }; 10205 struct rpc_message msg = { 10206 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 10207 .rpc_argp = &args, 10208 .rpc_resp = &res, 10209 .rpc_cred = cred, 10210 }; 10211 int status; 10212 10213 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 10214 if (res.notification & ~args.notify_types) 10215 dprintk("%s: unsupported notification\n", __func__); 10216 if (res.notification != args.notify_types) 10217 pdev->nocache = 1; 10218 10219 trace_nfs4_getdeviceinfo(server, &pdev->dev_id, status); 10220 10221 dprintk("<-- %s status=%d\n", __func__, status); 10222 10223 return status; 10224 } 10225 10226 int nfs4_proc_getdeviceinfo(struct nfs_server *server, 10227 struct pnfs_device *pdev, 10228 const struct cred *cred) 10229 { 10230 struct nfs4_exception exception = { }; 10231 int err; 10232 10233 do { 10234 err = nfs4_handle_exception(server, 10235 _nfs4_proc_getdeviceinfo(server, pdev, cred), 10236 &exception); 10237 } while (exception.retry); 10238 return err; 10239 } 10240 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 10241 10242 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 10243 { 10244 struct nfs4_layoutcommit_data *data = calldata; 10245 struct nfs_server *server = NFS_SERVER(data->args.inode); 10246 10247 nfs4_setup_sequence(server->nfs_client, 10248 &data->args.seq_args, 10249 &data->res.seq_res, 10250 task); 10251 } 10252 10253 static void 10254 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 10255 { 10256 struct nfs4_layoutcommit_data *data = calldata; 10257 struct nfs_server *server = NFS_SERVER(data->args.inode); 10258 10259 if (!nfs41_sequence_done(task, &data->res.seq_res)) 10260 return; 10261 10262 switch (task->tk_status) { /* Just ignore these failures */ 10263 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 10264 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 10265 case -NFS4ERR_BADLAYOUT: /* no layout */ 10266 case -NFS4ERR_GRACE: /* loca_recalim always false */ 10267 task->tk_status = 0; 10268 break; 10269 case 0: 10270 break; 10271 default: 10272 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) { 10273 rpc_restart_call_prepare(task); 10274 return; 10275 } 10276 } 10277 } 10278 10279 static void nfs4_layoutcommit_release(void *calldata) 10280 { 10281 struct nfs4_layoutcommit_data *data = calldata; 10282 10283 pnfs_cleanup_layoutcommit(data); 10284 nfs_post_op_update_inode_force_wcc(data->args.inode, 10285 data->res.fattr); 10286 put_cred(data->cred); 10287 nfs_iput_and_deactive(data->inode); 10288 kfree(data); 10289 } 10290 10291 static const struct rpc_call_ops nfs4_layoutcommit_ops = { 10292 .rpc_call_prepare = nfs4_layoutcommit_prepare, 10293 .rpc_call_done = nfs4_layoutcommit_done, 10294 .rpc_release = nfs4_layoutcommit_release, 10295 }; 10296 10297 int 10298 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 10299 { 10300 struct rpc_message msg = { 10301 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 10302 .rpc_argp = &data->args, 10303 .rpc_resp = &data->res, 10304 .rpc_cred = data->cred, 10305 }; 10306 struct rpc_task_setup task_setup_data = { 10307 .task = &data->task, 10308 .rpc_client = NFS_CLIENT(data->args.inode), 10309 .rpc_message = &msg, 10310 .callback_ops = &nfs4_layoutcommit_ops, 10311 .callback_data = data, 10312 .flags = RPC_TASK_MOVEABLE, 10313 }; 10314 struct rpc_task *task; 10315 int status = 0; 10316 10317 dprintk("NFS: initiating layoutcommit call. sync %d " 10318 "lbw: %llu inode %lu\n", sync, 10319 data->args.lastbytewritten, 10320 data->args.inode->i_ino); 10321 10322 if (!sync) { 10323 data->inode = nfs_igrab_and_active(data->args.inode); 10324 if (data->inode == NULL) { 10325 nfs4_layoutcommit_release(data); 10326 return -EAGAIN; 10327 } 10328 task_setup_data.flags = RPC_TASK_ASYNC; 10329 } 10330 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0); 10331 task = rpc_run_task(&task_setup_data); 10332 if (IS_ERR(task)) 10333 return PTR_ERR(task); 10334 if (sync) 10335 status = task->tk_status; 10336 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status); 10337 dprintk("%s: status %d\n", __func__, status); 10338 rpc_put_task(task); 10339 return status; 10340 } 10341 10342 /* 10343 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if 10344 * possible) as per RFC3530bis and RFC5661 Security Considerations sections 10345 */ 10346 static int _nfs41_proc_secinfo_no_name(struct nfs_server *server, 10347 struct nfs_fh *fhandle, 10348 struct nfs4_secinfo_flavors *flavors, 10349 bool use_integrity) 10350 { 10351 struct nfs41_secinfo_no_name_args args = { 10352 .style = SECINFO_STYLE_CURRENT_FH, 10353 }; 10354 struct nfs4_secinfo_res res = { 10355 .flavors = flavors, 10356 }; 10357 struct rpc_message msg = { 10358 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 10359 .rpc_argp = &args, 10360 .rpc_resp = &res, 10361 }; 10362 struct nfs4_call_sync_data data = { 10363 .seq_server = server, 10364 .seq_args = &args.seq_args, 10365 .seq_res = &res.seq_res, 10366 }; 10367 struct rpc_task_setup task_setup = { 10368 .rpc_client = server->client, 10369 .rpc_message = &msg, 10370 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops, 10371 .callback_data = &data, 10372 .flags = RPC_TASK_NO_ROUND_ROBIN, 10373 }; 10374 const struct cred *cred = NULL; 10375 int status; 10376 10377 if (use_integrity) { 10378 task_setup.rpc_client = server->nfs_client->cl_rpcclient; 10379 10380 cred = nfs4_get_clid_cred(server->nfs_client); 10381 msg.rpc_cred = cred; 10382 } 10383 10384 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 10385 status = nfs4_call_sync_custom(&task_setup); 10386 dprintk("<-- %s status=%d\n", __func__, status); 10387 10388 put_cred(cred); 10389 10390 return status; 10391 } 10392 10393 static int nfs41_proc_secinfo_no_name(struct nfs_server *server, 10394 struct nfs_fh *fhandle, 10395 struct nfs4_secinfo_flavors *flavors) 10396 { 10397 struct nfs4_exception exception = { 10398 .interruptible = true, 10399 }; 10400 int err; 10401 do { 10402 /* first try using integrity protection */ 10403 err = -NFS4ERR_WRONGSEC; 10404 10405 /* try to use integrity protection with machine cred */ 10406 if (_nfs4_is_integrity_protected(server->nfs_client)) 10407 err = _nfs41_proc_secinfo_no_name(server, fhandle, 10408 flavors, true); 10409 10410 /* 10411 * if unable to use integrity protection, or SECINFO with 10412 * integrity protection returns NFS4ERR_WRONGSEC (which is 10413 * disallowed by spec, but exists in deployed servers) use 10414 * the current filesystem's rpc_client and the user cred. 10415 */ 10416 if (err == -NFS4ERR_WRONGSEC) 10417 err = _nfs41_proc_secinfo_no_name(server, fhandle, 10418 flavors, false); 10419 10420 switch (err) { 10421 case 0: 10422 case -NFS4ERR_WRONGSEC: 10423 case -ENOTSUPP: 10424 goto out; 10425 default: 10426 err = nfs4_handle_exception(server, err, &exception); 10427 } 10428 } while (exception.retry); 10429 out: 10430 return err; 10431 } 10432 10433 static int nfs41_find_root_sec(struct nfs_server *server, 10434 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 10435 { 10436 int err; 10437 struct page *page; 10438 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR; 10439 struct nfs4_secinfo_flavors *flavors; 10440 struct nfs4_secinfo4 *secinfo; 10441 int i; 10442 10443 page = alloc_page(GFP_KERNEL); 10444 if (!page) { 10445 err = -ENOMEM; 10446 goto out; 10447 } 10448 10449 flavors = page_address(page); 10450 err = nfs41_proc_secinfo_no_name(server, fhandle, flavors); 10451 10452 /* 10453 * Fall back on "guess and check" method if 10454 * the server doesn't support SECINFO_NO_NAME 10455 */ 10456 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) { 10457 err = nfs4_find_root_sec(server, fhandle, fattr); 10458 goto out_freepage; 10459 } 10460 if (err) 10461 goto out_freepage; 10462 10463 for (i = 0; i < flavors->num_flavors; i++) { 10464 secinfo = &flavors->flavors[i]; 10465 10466 switch (secinfo->flavor) { 10467 case RPC_AUTH_NULL: 10468 case RPC_AUTH_UNIX: 10469 case RPC_AUTH_GSS: 10470 flavor = rpcauth_get_pseudoflavor(secinfo->flavor, 10471 &secinfo->flavor_info); 10472 break; 10473 default: 10474 flavor = RPC_AUTH_MAXFLAVOR; 10475 break; 10476 } 10477 10478 if (!nfs_auth_info_match(&server->auth_info, flavor)) 10479 flavor = RPC_AUTH_MAXFLAVOR; 10480 10481 if (flavor != RPC_AUTH_MAXFLAVOR) { 10482 err = nfs4_lookup_root_sec(server, fhandle, fattr, 10483 flavor); 10484 if (!err) 10485 break; 10486 } 10487 } 10488 10489 if (flavor == RPC_AUTH_MAXFLAVOR) 10490 err = -EPERM; 10491 10492 out_freepage: 10493 put_page(page); 10494 if (err == -EACCES) 10495 return -EPERM; 10496 out: 10497 return err; 10498 } 10499 10500 static int _nfs41_test_stateid(struct nfs_server *server, 10501 const nfs4_stateid *stateid, 10502 const struct cred *cred) 10503 { 10504 int status; 10505 struct nfs41_test_stateid_args args = { 10506 .stateid = *stateid, 10507 }; 10508 struct nfs41_test_stateid_res res; 10509 struct rpc_message msg = { 10510 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 10511 .rpc_argp = &args, 10512 .rpc_resp = &res, 10513 .rpc_cred = cred, 10514 }; 10515 struct rpc_clnt *rpc_client = server->client; 10516 10517 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 10518 &rpc_client, &msg); 10519 10520 dprintk("NFS call test_stateid %p\n", stateid); 10521 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 10522 status = nfs4_call_sync_sequence(rpc_client, server, &msg, 10523 &args.seq_args, &res.seq_res); 10524 if (status != NFS_OK) { 10525 dprintk("NFS reply test_stateid: failed, %d\n", status); 10526 return status; 10527 } 10528 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 10529 return -res.status; 10530 } 10531 10532 static void nfs4_handle_delay_or_session_error(struct nfs_server *server, 10533 int err, struct nfs4_exception *exception) 10534 { 10535 exception->retry = 0; 10536 switch(err) { 10537 case -NFS4ERR_DELAY: 10538 case -NFS4ERR_RETRY_UNCACHED_REP: 10539 nfs4_handle_exception(server, err, exception); 10540 break; 10541 case -NFS4ERR_BADSESSION: 10542 case -NFS4ERR_BADSLOT: 10543 case -NFS4ERR_BAD_HIGH_SLOT: 10544 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 10545 case -NFS4ERR_DEADSESSION: 10546 nfs4_do_handle_exception(server, err, exception); 10547 } 10548 } 10549 10550 /** 10551 * nfs41_test_stateid - perform a TEST_STATEID operation 10552 * 10553 * @server: server / transport on which to perform the operation 10554 * @stateid: state ID to test 10555 * @cred: credential 10556 * 10557 * Returns NFS_OK if the server recognizes that "stateid" is valid. 10558 * Otherwise a negative NFS4ERR value is returned if the operation 10559 * failed or the state ID is not currently valid. 10560 */ 10561 static int nfs41_test_stateid(struct nfs_server *server, 10562 const nfs4_stateid *stateid, 10563 const struct cred *cred) 10564 { 10565 struct nfs4_exception exception = { 10566 .interruptible = true, 10567 }; 10568 int err; 10569 do { 10570 err = _nfs41_test_stateid(server, stateid, cred); 10571 nfs4_handle_delay_or_session_error(server, err, &exception); 10572 } while (exception.retry); 10573 return err; 10574 } 10575 10576 struct nfs_free_stateid_data { 10577 struct nfs_server *server; 10578 struct nfs41_free_stateid_args args; 10579 struct nfs41_free_stateid_res res; 10580 }; 10581 10582 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) 10583 { 10584 struct nfs_free_stateid_data *data = calldata; 10585 nfs4_setup_sequence(data->server->nfs_client, 10586 &data->args.seq_args, 10587 &data->res.seq_res, 10588 task); 10589 } 10590 10591 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata) 10592 { 10593 struct nfs_free_stateid_data *data = calldata; 10594 10595 nfs41_sequence_done(task, &data->res.seq_res); 10596 10597 switch (task->tk_status) { 10598 case -NFS4ERR_DELAY: 10599 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN) 10600 rpc_restart_call_prepare(task); 10601 } 10602 } 10603 10604 static void nfs41_free_stateid_release(void *calldata) 10605 { 10606 struct nfs_free_stateid_data *data = calldata; 10607 struct nfs_client *clp = data->server->nfs_client; 10608 10609 nfs_put_client(clp); 10610 kfree(calldata); 10611 } 10612 10613 static const struct rpc_call_ops nfs41_free_stateid_ops = { 10614 .rpc_call_prepare = nfs41_free_stateid_prepare, 10615 .rpc_call_done = nfs41_free_stateid_done, 10616 .rpc_release = nfs41_free_stateid_release, 10617 }; 10618 10619 /** 10620 * nfs41_free_stateid - perform a FREE_STATEID operation 10621 * 10622 * @server: server / transport on which to perform the operation 10623 * @stateid: state ID to release 10624 * @cred: credential 10625 * @privileged: set to true if this call needs to be privileged 10626 * 10627 * Note: this function is always asynchronous. 10628 */ 10629 static int nfs41_free_stateid(struct nfs_server *server, 10630 nfs4_stateid *stateid, 10631 const struct cred *cred, 10632 bool privileged) 10633 { 10634 struct rpc_message msg = { 10635 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 10636 .rpc_cred = cred, 10637 }; 10638 struct rpc_task_setup task_setup = { 10639 .rpc_client = server->client, 10640 .rpc_message = &msg, 10641 .callback_ops = &nfs41_free_stateid_ops, 10642 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE, 10643 }; 10644 struct nfs_free_stateid_data *data; 10645 struct rpc_task *task; 10646 struct nfs_client *clp = server->nfs_client; 10647 10648 if (!refcount_inc_not_zero(&clp->cl_count)) 10649 return -EIO; 10650 10651 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 10652 &task_setup.rpc_client, &msg); 10653 10654 dprintk("NFS call free_stateid %p\n", stateid); 10655 data = kmalloc(sizeof(*data), GFP_KERNEL); 10656 if (!data) 10657 return -ENOMEM; 10658 data->server = server; 10659 nfs4_stateid_copy(&data->args.stateid, stateid); 10660 10661 task_setup.callback_data = data; 10662 10663 msg.rpc_argp = &data->args; 10664 msg.rpc_resp = &data->res; 10665 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, privileged); 10666 task = rpc_run_task(&task_setup); 10667 if (IS_ERR(task)) 10668 return PTR_ERR(task); 10669 rpc_put_task(task); 10670 stateid->type = NFS4_FREED_STATEID_TYPE; 10671 return 0; 10672 } 10673 10674 static void 10675 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) 10676 { 10677 const struct cred *cred = lsp->ls_state->owner->so_cred; 10678 10679 nfs41_free_stateid(server, &lsp->ls_stateid, cred, false); 10680 nfs4_free_lock_state(server, lsp); 10681 } 10682 10683 static bool nfs41_match_stateid(const nfs4_stateid *s1, 10684 const nfs4_stateid *s2) 10685 { 10686 trace_nfs41_match_stateid(s1, s2); 10687 10688 if (s1->type != s2->type) 10689 return false; 10690 10691 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 10692 return false; 10693 10694 if (s1->seqid == s2->seqid) 10695 return true; 10696 10697 return s1->seqid == 0 || s2->seqid == 0; 10698 } 10699 10700 #endif /* CONFIG_NFS_V4_1 */ 10701 10702 static bool nfs4_match_stateid(const nfs4_stateid *s1, 10703 const nfs4_stateid *s2) 10704 { 10705 trace_nfs4_match_stateid(s1, s2); 10706 10707 return nfs4_stateid_match(s1, s2); 10708 } 10709 10710 10711 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 10712 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 10713 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 10714 .recover_open = nfs4_open_reclaim, 10715 .recover_lock = nfs4_lock_reclaim, 10716 .establish_clid = nfs4_init_clientid, 10717 .detect_trunking = nfs40_discover_server_trunking, 10718 }; 10719 10720 #if defined(CONFIG_NFS_V4_1) 10721 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 10722 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 10723 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 10724 .recover_open = nfs4_open_reclaim, 10725 .recover_lock = nfs4_lock_reclaim, 10726 .establish_clid = nfs41_init_clientid, 10727 .reclaim_complete = nfs41_proc_reclaim_complete, 10728 .detect_trunking = nfs41_discover_server_trunking, 10729 }; 10730 #endif /* CONFIG_NFS_V4_1 */ 10731 10732 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 10733 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 10734 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 10735 .recover_open = nfs40_open_expired, 10736 .recover_lock = nfs4_lock_expired, 10737 .establish_clid = nfs4_init_clientid, 10738 }; 10739 10740 #if defined(CONFIG_NFS_V4_1) 10741 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 10742 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 10743 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 10744 .recover_open = nfs41_open_expired, 10745 .recover_lock = nfs41_lock_expired, 10746 .establish_clid = nfs41_init_clientid, 10747 }; 10748 #endif /* CONFIG_NFS_V4_1 */ 10749 10750 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { 10751 .sched_state_renewal = nfs4_proc_async_renew, 10752 .get_state_renewal_cred = nfs4_get_renew_cred, 10753 .renew_lease = nfs4_proc_renew, 10754 }; 10755 10756 #if defined(CONFIG_NFS_V4_1) 10757 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 10758 .sched_state_renewal = nfs41_proc_async_sequence, 10759 .get_state_renewal_cred = nfs4_get_machine_cred, 10760 .renew_lease = nfs4_proc_sequence, 10761 }; 10762 #endif 10763 10764 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = { 10765 .get_locations = _nfs40_proc_get_locations, 10766 .fsid_present = _nfs40_proc_fsid_present, 10767 }; 10768 10769 #if defined(CONFIG_NFS_V4_1) 10770 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = { 10771 .get_locations = _nfs41_proc_get_locations, 10772 .fsid_present = _nfs41_proc_fsid_present, 10773 }; 10774 #endif /* CONFIG_NFS_V4_1 */ 10775 10776 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { 10777 .minor_version = 0, 10778 .init_caps = NFS_CAP_READDIRPLUS 10779 | NFS_CAP_ATOMIC_OPEN 10780 | NFS_CAP_POSIX_LOCK, 10781 .init_client = nfs40_init_client, 10782 .shutdown_client = nfs40_shutdown_client, 10783 .match_stateid = nfs4_match_stateid, 10784 .find_root_sec = nfs4_find_root_sec, 10785 .free_lock_state = nfs4_release_lockowner, 10786 .test_and_free_expired = nfs40_test_and_free_expired_stateid, 10787 .alloc_seqid = nfs_alloc_seqid, 10788 .call_sync_ops = &nfs40_call_sync_ops, 10789 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 10790 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 10791 .state_renewal_ops = &nfs40_state_renewal_ops, 10792 .mig_recovery_ops = &nfs40_mig_recovery_ops, 10793 }; 10794 10795 #if defined(CONFIG_NFS_V4_1) 10796 static struct nfs_seqid * 10797 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2) 10798 { 10799 return NULL; 10800 } 10801 10802 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 10803 .minor_version = 1, 10804 .init_caps = NFS_CAP_READDIRPLUS 10805 | NFS_CAP_ATOMIC_OPEN 10806 | NFS_CAP_POSIX_LOCK 10807 | NFS_CAP_STATEID_NFSV41 10808 | NFS_CAP_ATOMIC_OPEN_V1 10809 | NFS_CAP_LGOPEN 10810 | NFS_CAP_MOVEABLE, 10811 .init_client = nfs41_init_client, 10812 .shutdown_client = nfs41_shutdown_client, 10813 .match_stateid = nfs41_match_stateid, 10814 .find_root_sec = nfs41_find_root_sec, 10815 .free_lock_state = nfs41_free_lock_state, 10816 .test_and_free_expired = nfs41_test_and_free_expired_stateid, 10817 .alloc_seqid = nfs_alloc_no_seqid, 10818 .session_trunk = nfs4_test_session_trunk, 10819 .call_sync_ops = &nfs41_call_sync_ops, 10820 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 10821 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 10822 .state_renewal_ops = &nfs41_state_renewal_ops, 10823 .mig_recovery_ops = &nfs41_mig_recovery_ops, 10824 }; 10825 #endif 10826 10827 #if defined(CONFIG_NFS_V4_2) 10828 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { 10829 .minor_version = 2, 10830 .init_caps = NFS_CAP_READDIRPLUS 10831 | NFS_CAP_ATOMIC_OPEN 10832 | NFS_CAP_POSIX_LOCK 10833 | NFS_CAP_STATEID_NFSV41 10834 | NFS_CAP_ATOMIC_OPEN_V1 10835 | NFS_CAP_LGOPEN 10836 | NFS_CAP_ALLOCATE 10837 | NFS_CAP_COPY 10838 | NFS_CAP_OFFLOAD_CANCEL 10839 | NFS_CAP_COPY_NOTIFY 10840 | NFS_CAP_DEALLOCATE 10841 | NFS_CAP_ZERO_RANGE 10842 | NFS_CAP_SEEK 10843 | NFS_CAP_LAYOUTSTATS 10844 | NFS_CAP_CLONE 10845 | NFS_CAP_LAYOUTERROR 10846 | NFS_CAP_READ_PLUS 10847 | NFS_CAP_MOVEABLE 10848 | NFS_CAP_OFFLOAD_STATUS, 10849 .init_client = nfs41_init_client, 10850 .shutdown_client = nfs41_shutdown_client, 10851 .match_stateid = nfs41_match_stateid, 10852 .find_root_sec = nfs41_find_root_sec, 10853 .free_lock_state = nfs41_free_lock_state, 10854 .call_sync_ops = &nfs41_call_sync_ops, 10855 .test_and_free_expired = nfs41_test_and_free_expired_stateid, 10856 .alloc_seqid = nfs_alloc_no_seqid, 10857 .session_trunk = nfs4_test_session_trunk, 10858 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 10859 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 10860 .state_renewal_ops = &nfs41_state_renewal_ops, 10861 .mig_recovery_ops = &nfs41_mig_recovery_ops, 10862 }; 10863 #endif 10864 10865 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 10866 [0] = &nfs_v4_0_minor_ops, 10867 #if defined(CONFIG_NFS_V4_1) 10868 [1] = &nfs_v4_1_minor_ops, 10869 #endif 10870 #if defined(CONFIG_NFS_V4_2) 10871 [2] = &nfs_v4_2_minor_ops, 10872 #endif 10873 }; 10874 10875 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) 10876 { 10877 ssize_t error, error2, error3, error4 = 0; 10878 size_t left = size; 10879 10880 error = generic_listxattr(dentry, list, left); 10881 if (error < 0) 10882 return error; 10883 if (list) { 10884 list += error; 10885 left -= error; 10886 } 10887 10888 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, left); 10889 if (error2 < 0) 10890 return error2; 10891 10892 if (list) { 10893 list += error2; 10894 left -= error2; 10895 } 10896 10897 error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left); 10898 if (error3 < 0) 10899 return error3; 10900 if (list) { 10901 list += error3; 10902 left -= error3; 10903 } 10904 10905 if (!nfs_server_capable(d_inode(dentry), NFS_CAP_SECURITY_LABEL)) { 10906 error4 = security_inode_listsecurity(d_inode(dentry), list, left); 10907 if (error4 < 0) 10908 return error4; 10909 } 10910 10911 error += error2 + error3 + error4; 10912 if (size && error > size) 10913 return -ERANGE; 10914 return error; 10915 } 10916 10917 static void nfs4_enable_swap(struct inode *inode) 10918 { 10919 /* The state manager thread must always be running. 10920 * It will notice the client is a swapper, and stay put. 10921 */ 10922 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 10923 10924 nfs4_schedule_state_manager(clp); 10925 } 10926 10927 static void nfs4_disable_swap(struct inode *inode) 10928 { 10929 /* The state manager thread will now exit once it is 10930 * woken. 10931 */ 10932 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 10933 10934 set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state); 10935 clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state); 10936 wake_up_var(&clp->cl_state); 10937 } 10938 10939 static const struct inode_operations nfs4_dir_inode_operations = { 10940 .create = nfs_create, 10941 .lookup = nfs_lookup, 10942 .atomic_open = nfs_atomic_open, 10943 .link = nfs_link, 10944 .unlink = nfs_unlink, 10945 .symlink = nfs_symlink, 10946 .mkdir = nfs_mkdir, 10947 .rmdir = nfs_rmdir, 10948 .mknod = nfs_mknod, 10949 .rename = nfs_rename, 10950 .permission = nfs_permission, 10951 .getattr = nfs_getattr, 10952 .setattr = nfs_setattr, 10953 .listxattr = nfs4_listxattr, 10954 }; 10955 10956 static const struct inode_operations nfs4_file_inode_operations = { 10957 .permission = nfs_permission, 10958 .getattr = nfs_getattr, 10959 .setattr = nfs_setattr, 10960 .listxattr = nfs4_listxattr, 10961 }; 10962 10963 static struct nfs_server *nfs4_clone_server(struct nfs_server *source, 10964 struct nfs_fh *fh, struct nfs_fattr *fattr, 10965 rpc_authflavor_t flavor) 10966 { 10967 struct nfs_server *server; 10968 int error; 10969 10970 server = nfs_clone_server(source, fh, fattr, flavor); 10971 if (IS_ERR(server)) 10972 return server; 10973 10974 error = nfs4_delegation_hash_alloc(server); 10975 if (error) { 10976 nfs_free_server(server); 10977 return ERR_PTR(error); 10978 } 10979 10980 return server; 10981 } 10982 10983 const struct nfs_rpc_ops nfs_v4_clientops = { 10984 .version = 4, /* protocol version */ 10985 .dentry_ops = &nfs4_dentry_operations, 10986 .dir_inode_ops = &nfs4_dir_inode_operations, 10987 .file_inode_ops = &nfs4_file_inode_operations, 10988 .file_ops = &nfs4_file_operations, 10989 .getroot = nfs4_proc_get_root, 10990 .submount = nfs4_submount, 10991 .try_get_tree = nfs4_try_get_tree, 10992 .getattr = nfs4_proc_getattr, 10993 .setattr = nfs4_proc_setattr, 10994 .lookup = nfs4_proc_lookup, 10995 .lookupp = nfs4_proc_lookupp, 10996 .access = nfs4_proc_access, 10997 .readlink = nfs4_proc_readlink, 10998 .create = nfs4_proc_create, 10999 .remove = nfs4_proc_remove, 11000 .unlink_setup = nfs4_proc_unlink_setup, 11001 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 11002 .unlink_done = nfs4_proc_unlink_done, 11003 .rename_setup = nfs4_proc_rename_setup, 11004 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 11005 .rename_done = nfs4_proc_rename_done, 11006 .link = nfs4_proc_link, 11007 .symlink = nfs4_proc_symlink, 11008 .mkdir = nfs4_proc_mkdir, 11009 .rmdir = nfs4_proc_rmdir, 11010 .readdir = nfs4_proc_readdir, 11011 .mknod = nfs4_proc_mknod, 11012 .statfs = nfs4_proc_statfs, 11013 .fsinfo = nfs4_proc_fsinfo, 11014 .pathconf = nfs4_proc_pathconf, 11015 .set_capabilities = nfs4_server_capabilities, 11016 .decode_dirent = nfs4_decode_dirent, 11017 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare, 11018 .read_setup = nfs4_proc_read_setup, 11019 .read_done = nfs4_read_done, 11020 .write_setup = nfs4_proc_write_setup, 11021 .write_done = nfs4_write_done, 11022 .commit_setup = nfs4_proc_commit_setup, 11023 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 11024 .commit_done = nfs4_commit_done, 11025 .lock = nfs4_proc_lock, 11026 .clear_acl_cache = nfs4_zap_acl_attr, 11027 .close_context = nfs4_close_context, 11028 .open_context = nfs4_atomic_open, 11029 .have_delegation = nfs4_have_delegation, 11030 .return_delegation = nfs4_inode_return_delegation, 11031 .alloc_client = nfs4_alloc_client, 11032 .init_client = nfs4_init_client, 11033 .free_client = nfs4_free_client, 11034 .create_server = nfs4_create_server, 11035 .clone_server = nfs4_clone_server, 11036 .discover_trunking = nfs4_discover_trunking, 11037 .enable_swap = nfs4_enable_swap, 11038 .disable_swap = nfs4_disable_swap, 11039 }; 11040 11041 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 11042 .name = XATTR_NAME_NFSV4_ACL, 11043 .list = nfs4_xattr_list_nfs4_acl, 11044 .get = nfs4_xattr_get_nfs4_acl, 11045 .set = nfs4_xattr_set_nfs4_acl, 11046 }; 11047 11048 #if defined(CONFIG_NFS_V4_1) 11049 static const struct xattr_handler nfs4_xattr_nfs4_dacl_handler = { 11050 .name = XATTR_NAME_NFSV4_DACL, 11051 .list = nfs4_xattr_list_nfs4_dacl, 11052 .get = nfs4_xattr_get_nfs4_dacl, 11053 .set = nfs4_xattr_set_nfs4_dacl, 11054 }; 11055 11056 static const struct xattr_handler nfs4_xattr_nfs4_sacl_handler = { 11057 .name = XATTR_NAME_NFSV4_SACL, 11058 .list = nfs4_xattr_list_nfs4_sacl, 11059 .get = nfs4_xattr_get_nfs4_sacl, 11060 .set = nfs4_xattr_set_nfs4_sacl, 11061 }; 11062 #endif 11063 11064 #ifdef CONFIG_NFS_V4_2 11065 static const struct xattr_handler nfs4_xattr_nfs4_user_handler = { 11066 .prefix = XATTR_USER_PREFIX, 11067 .get = nfs4_xattr_get_nfs4_user, 11068 .set = nfs4_xattr_set_nfs4_user, 11069 }; 11070 #endif 11071 11072 const struct xattr_handler * const nfs4_xattr_handlers[] = { 11073 &nfs4_xattr_nfs4_acl_handler, 11074 #if defined(CONFIG_NFS_V4_1) 11075 &nfs4_xattr_nfs4_dacl_handler, 11076 &nfs4_xattr_nfs4_sacl_handler, 11077 #endif 11078 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 11079 &nfs4_xattr_nfs4_label_handler, 11080 #endif 11081 #ifdef CONFIG_NFS_V4_2 11082 &nfs4_xattr_nfs4_user_handler, 11083 #endif 11084 NULL 11085 }; 11086